mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
- Kuan-Wei Chiu has developed the well-named series "lib min_heap: Min
heap optimizations". - Kuan-Wei Chiu has also sped up the library sorting code in the series "lib/sort: Optimize the number of swaps and comparisons". - Alexey Gladkov has added the ability for code running within an IPC namespace to alter its IPC and MQ limits. The series is "Allow to change ipc/mq sysctls inside ipc namespace". - Geert Uytterhoeven has contributed some dhrystone maintenance work in the series "lib: dhry: miscellaneous cleanups". - Ryusuke Konishi continues nilfs2 maintenance work in the series "nilfs2: eliminate kmap and kmap_atomic calls" "nilfs2: fix kernel bug at submit_bh_wbc()" - Nathan Chancellor has updated our build tools requirements in the series "Bump the minimum supported version of LLVM to 13.0.1". - Muhammad Usama Anjum continues with the selftests maintenance work in the series "selftests/mm: Improve run_vmtests.sh". - Oleg Nesterov has done some maintenance work against the signal code in the series "get_signal: minor cleanups and fix". Plus the usual shower of singleton patches in various parts of the tree. Please see the individual changelogs for details. -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZfMnvgAKCRDdBJ7gKXxA jjKMAP4/Upq07D4wjkMVPb+QrkipbbLpdcgJ++q3z6rba4zhPQD+M3SFriIJk/Xh tKVmvihFxfAhdDthseXcIf1nBjMALwY= =8rVc -----END PGP SIGNATURE----- Merge tag 'mm-nonmm-stable-2024-03-14-09-36' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull non-MM updates from Andrew Morton: - Kuan-Wei Chiu has developed the well-named series "lib min_heap: Min heap optimizations". - Kuan-Wei Chiu has also sped up the library sorting code in the series "lib/sort: Optimize the number of swaps and comparisons". - Alexey Gladkov has added the ability for code running within an IPC namespace to alter its IPC and MQ limits. The series is "Allow to change ipc/mq sysctls inside ipc namespace". - Geert Uytterhoeven has contributed some dhrystone maintenance work in the series "lib: dhry: miscellaneous cleanups". - Ryusuke Konishi continues nilfs2 maintenance work in the series "nilfs2: eliminate kmap and kmap_atomic calls" "nilfs2: fix kernel bug at submit_bh_wbc()" - Nathan Chancellor has updated our build tools requirements in the series "Bump the minimum supported version of LLVM to 13.0.1". - Muhammad Usama Anjum continues with the selftests maintenance work in the series "selftests/mm: Improve run_vmtests.sh". - Oleg Nesterov has done some maintenance work against the signal code in the series "get_signal: minor cleanups and fix". Plus the usual shower of singleton patches in various parts of the tree. Please see the individual changelogs for details. * tag 'mm-nonmm-stable-2024-03-14-09-36' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (77 commits) nilfs2: prevent kernel bug at submit_bh_wbc() nilfs2: fix failure to detect DAT corruption in btree and direct mappings ocfs2: enable ocfs2_listxattr for special files ocfs2: remove SLAB_MEM_SPREAD flag usage assoc_array: fix the return value in assoc_array_insert_mid_shortcut() buildid: use kmap_local_page() watchdog/core: remove sysctl handlers from public header nilfs2: use div64_ul() instead of do_div() mul_u64_u64_div_u64: increase precision by conditionally swapping a and b kexec: copy only happens before uchunk goes to zero get_signal: don't initialize ksig->info if SIGNAL_GROUP_EXIT/group_exec_task get_signal: hide_si_addr_tag_bits: fix the usage of uninitialized ksig get_signal: don't abuse ksig->info.si_signo and ksig->sig const_structs.checkpatch: add device_type Normalise "name (ad@dr)" MODULE_AUTHORs to "name <ad@dr>" dyndbg: replace kstrdup() + strchr() with kstrdup_and_replace() list: leverage list_is_head() for list_entry_is_head() nilfs2: MAINTAINERS: drop unreachable project mirror site smp: make __smp_processor_id() 0-argument macro fat: fix uninitialized field in nostale filehandles ...
This commit is contained in:
commit
e5eb28f6d1
@ -4211,6 +4211,7 @@
|
||||
bit 4: print ftrace buffer
|
||||
bit 5: print all printk messages in buffer
|
||||
bit 6: print all CPUs backtrace (if available in the arch)
|
||||
bit 7: print only tasks in uninterruptible (blocked) state
|
||||
*Be aware* that this option may print a _lot_ of lines,
|
||||
so there are risks of losing older messages in the log.
|
||||
Use this option carefully, maybe worth to setup a
|
||||
|
@ -594,6 +594,9 @@ default (``MSGMNB``).
|
||||
``msgmni`` is the maximum number of IPC queues. 32000 by default
|
||||
(``MSGMNI``).
|
||||
|
||||
All of these parameters are set per ipc namespace. The maximum number of bytes
|
||||
in POSIX message queues is limited by ``RLIMIT_MSGQUEUE``. This limit is
|
||||
respected hierarchically in the each user namespace.
|
||||
|
||||
msg_next_id, sem_next_id, and shm_next_id (System V IPC)
|
||||
========================================================
|
||||
@ -850,6 +853,7 @@ bit 3 print locks info if ``CONFIG_LOCKDEP`` is on
|
||||
bit 4 print ftrace buffer
|
||||
bit 5 print all printk messages in buffer
|
||||
bit 6 print all CPUs backtrace (if available in the arch)
|
||||
bit 7 print only tasks in uninterruptible (blocked) state
|
||||
===== ============================================
|
||||
|
||||
So for example to print tasks and memory info on panic, user can::
|
||||
@ -1274,15 +1278,20 @@ are doing anyway :)
|
||||
shmall
|
||||
======
|
||||
|
||||
This parameter sets the total amount of shared memory pages that
|
||||
can be used system wide. Hence, ``shmall`` should always be at least
|
||||
``ceil(shmmax/PAGE_SIZE)``.
|
||||
This parameter sets the total amount of shared memory pages that can be used
|
||||
inside ipc namespace. The shared memory pages counting occurs for each ipc
|
||||
namespace separately and is not inherited. Hence, ``shmall`` should always be at
|
||||
least ``ceil(shmmax/PAGE_SIZE)``.
|
||||
|
||||
If you are not sure what the default ``PAGE_SIZE`` is on your Linux
|
||||
system, you can run the following command::
|
||||
|
||||
# getconf PAGE_SIZE
|
||||
|
||||
To reduce or disable the ability to allocate shared memory, you must create a
|
||||
new ipc namespace, set this parameter to the required value and prohibit the
|
||||
creation of a new ipc namespace in the current user namespace or cgroups can
|
||||
be used.
|
||||
|
||||
shmmax
|
||||
======
|
||||
|
@ -30,7 +30,7 @@ you probably needn't concern yourself with pcmciautils.
|
||||
Program Minimal version Command to check the version
|
||||
====================== =============== ========================================
|
||||
GNU C 5.1 gcc --version
|
||||
Clang/LLVM (optional) 11.0.0 clang --version
|
||||
Clang/LLVM (optional) 13.0.1 clang --version
|
||||
Rust (optional) 1.76.0 rustc --version
|
||||
bindgen (optional) 0.65.1 bindgen --version
|
||||
GNU make 3.82 make --version
|
||||
|
@ -15501,7 +15501,6 @@ M: Ryusuke Konishi <konishi.ryusuke@gmail.com>
|
||||
L: linux-nilfs@vger.kernel.org
|
||||
S: Supported
|
||||
W: https://nilfs.sourceforge.io/
|
||||
W: https://nilfs.osdn.jp/
|
||||
T: git https://github.com/konis/nilfs2.git
|
||||
F: Documentation/filesystems/nilfs2.rst
|
||||
F: fs/nilfs2/
|
||||
|
8
Makefile
8
Makefile
@ -950,14 +950,6 @@ CC_FLAGS_LTO += -fvisibility=hidden
|
||||
|
||||
# Limit inlining across translation units to reduce binary size
|
||||
KBUILD_LDFLAGS += -mllvm -import-instr-limit=5
|
||||
|
||||
# Check for frame size exceeding threshold during prolog/epilog insertion
|
||||
# when using lld < 13.0.0.
|
||||
ifneq ($(CONFIG_FRAME_WARN),0)
|
||||
ifeq ($(call test-lt, $(CONFIG_LLD_VERSION), 130000),y)
|
||||
KBUILD_LDFLAGS += -plugin-opt=-warn-stack-size=$(CONFIG_FRAME_WARN)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CONFIG_LTO
|
||||
|
@ -18,18 +18,12 @@ static __always_inline __attribute_const__ struct task_struct *get_current(void)
|
||||
{
|
||||
struct task_struct *cur;
|
||||
|
||||
#if __has_builtin(__builtin_thread_pointer) && \
|
||||
defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) && \
|
||||
!(defined(CONFIG_THUMB2_KERNEL) && \
|
||||
defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 130001)
|
||||
#if __has_builtin(__builtin_thread_pointer) && defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO)
|
||||
/*
|
||||
* Use the __builtin helper when available - this results in better
|
||||
* code, especially when using GCC in combination with the per-task
|
||||
* stack protector, as the compiler will recognize that it needs to
|
||||
* load the TLS register only once in every function.
|
||||
*
|
||||
* Clang < 13.0.1 gets this wrong for Thumb2 builds:
|
||||
* https://github.com/ClangBuiltLinux/linux/issues/1485
|
||||
*/
|
||||
cur = __builtin_thread_pointer();
|
||||
#elif defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
|
||||
|
@ -379,8 +379,8 @@ config BROKEN_GAS_INST
|
||||
config BUILTIN_RETURN_ADDRESS_STRIPS_PAC
|
||||
bool
|
||||
# Clang's __builtin_return_adddress() strips the PAC since 12.0.0
|
||||
# https://reviews.llvm.org/D75044
|
||||
default y if CC_IS_CLANG && (CLANG_VERSION >= 120000)
|
||||
# https://github.com/llvm/llvm-project/commit/2a96f47c5ffca84cd774ad402cacd137f4bf45e2
|
||||
default y if CC_IS_CLANG
|
||||
# GCC's __builtin_return_address() strips the PAC since 11.1.0,
|
||||
# and this was backported to 10.2.0, 9.4.0, 8.5.0, but not earlier
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94891
|
||||
@ -1387,7 +1387,6 @@ choice
|
||||
|
||||
config CPU_BIG_ENDIAN
|
||||
bool "Build big-endian kernel"
|
||||
depends on !LD_IS_LLD || LLD_VERSION >= 130000
|
||||
# https://github.com/llvm/llvm-project/commit/1379b150991f70a5782e9a143c2ba5308da1161c
|
||||
depends on AS_IS_GNU || AS_VERSION >= 150000
|
||||
help
|
||||
@ -2018,8 +2017,6 @@ config ARM64_BTI_KERNEL
|
||||
depends on !CC_IS_GCC || GCC_VERSION >= 100100
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106671
|
||||
depends on !CC_IS_GCC
|
||||
# https://github.com/llvm/llvm-project/commit/a88c722e687e6780dcd6a58718350dc76fcc4cc9
|
||||
depends on !CC_IS_CLANG || CLANG_VERSION >= 120000
|
||||
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_ARGS)
|
||||
help
|
||||
Build the kernel with Branch Target Identification annotations
|
||||
@ -2222,7 +2219,7 @@ config STACKPROTECTOR_PER_TASK
|
||||
|
||||
config UNWIND_PATCH_PAC_INTO_SCS
|
||||
bool "Enable shadow call stack dynamically using code patching"
|
||||
# needs Clang with https://reviews.llvm.org/D111780 incorporated
|
||||
# needs Clang with https://github.com/llvm/llvm-project/commit/de07cde67b5d205d58690be012106022aea6d2b3 incorporated
|
||||
depends on CC_IS_CLANG && CLANG_VERSION >= 150000
|
||||
depends on ARM64_PTR_AUTH_KERNEL && CC_HAS_BRANCH_PROT_PAC_RET
|
||||
depends on SHADOW_CALL_STACK
|
||||
|
@ -333,7 +333,6 @@ config PANIC_TIMEOUT
|
||||
config COMPAT
|
||||
bool "Enable support for 32bit binaries"
|
||||
depends on PPC64
|
||||
depends on !CC_IS_CLANG || CLANG_VERSION >= 120000
|
||||
default y if !CPU_LITTLE_ENDIAN
|
||||
select ARCH_WANT_OLD_COMPAT_IPC
|
||||
select COMPAT_OLD_SIGACTION
|
||||
|
@ -144,11 +144,11 @@ CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
|
||||
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mlong-double-128)
|
||||
|
||||
# Clang unconditionally reserves r2 on ppc32 and does not support the flag
|
||||
# https://bugs.llvm.org/show_bug.cgi?id=39555
|
||||
# https://llvm.org/pr39555
|
||||
CFLAGS-$(CONFIG_PPC32) := $(call cc-option, -ffixed-r2)
|
||||
|
||||
# Clang doesn't support -mmultiple / -mno-multiple
|
||||
# https://bugs.llvm.org/show_bug.cgi?id=39556
|
||||
# https://llvm.org/pr39556
|
||||
CFLAGS-$(CONFIG_PPC32) += $(call cc-option, $(MULTIPLEWORD))
|
||||
|
||||
CFLAGS-$(CONFIG_PPC32) += $(call cc-option,-mno-readonly-in-sdata)
|
||||
|
@ -55,7 +55,7 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
|
||||
hr->dawrx1 = vcpu->arch.dawrx1;
|
||||
}
|
||||
|
||||
/* Use noinline_for_stack due to https://bugs.llvm.org/show_bug.cgi?id=49610 */
|
||||
/* Use noinline_for_stack due to https://llvm.org/pr49610 */
|
||||
static noinline_for_stack void byteswap_pt_regs(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long *addr = (unsigned long *) regs;
|
||||
|
@ -175,8 +175,6 @@ config RISCV
|
||||
|
||||
config CLANG_SUPPORTS_DYNAMIC_FTRACE
|
||||
def_bool CC_IS_CLANG
|
||||
# https://github.com/llvm/llvm-project/commit/6ab8927931851bb42b2c93a00801dc499d7d9b1e
|
||||
depends on CLANG_VERSION >= 130000
|
||||
# https://github.com/ClangBuiltLinux/linux/issues/1817
|
||||
depends on AS_IS_GNU || (AS_IS_LLVM && (LD_IS_LLD || LD_VERSION >= 23600))
|
||||
|
||||
@ -313,7 +311,7 @@ config AS_HAS_INSN
|
||||
def_bool $(as-instr,.insn r 51$(comma) 0$(comma) 0$(comma) t0$(comma) t0$(comma) zero)
|
||||
|
||||
config AS_HAS_OPTION_ARCH
|
||||
# https://reviews.llvm.org/D123515
|
||||
# https://github.com/llvm/llvm-project/commit/9e8ed3403c191ab9c4903e8eeb8f732ff8a43cb4
|
||||
def_bool y
|
||||
depends on $(as-instr, .option arch$(comma) +m)
|
||||
|
||||
|
@ -13,16 +13,6 @@
|
||||
#endif
|
||||
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
|
||||
|
||||
/*
|
||||
* Clang prior to 13 had "mcount" instead of "_mcount":
|
||||
* https://reviews.llvm.org/D98881
|
||||
*/
|
||||
#if defined(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 130000
|
||||
#define MCOUNT_NAME _mcount
|
||||
#else
|
||||
#define MCOUNT_NAME mcount
|
||||
#endif
|
||||
|
||||
#define ARCH_SUPPORTS_FTRACE_OPS 1
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
@ -30,7 +20,7 @@ extern void *return_address(unsigned int level);
|
||||
|
||||
#define ftrace_return_address(n) return_address(n)
|
||||
|
||||
void MCOUNT_NAME(void);
|
||||
void _mcount(void);
|
||||
static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
||||
{
|
||||
return addr;
|
||||
@ -80,7 +70,7 @@ struct dyn_arch_ftrace {
|
||||
* both auipc and jalr at the same time.
|
||||
*/
|
||||
|
||||
#define MCOUNT_ADDR ((unsigned long)MCOUNT_NAME)
|
||||
#define MCOUNT_ADDR ((unsigned long)_mcount)
|
||||
#define JALR_SIGN_MASK (0x00000800)
|
||||
#define JALR_OFFSET_MASK (0x00000fff)
|
||||
#define AUIPC_OFFSET_MASK (0xfffff000)
|
||||
|
@ -50,8 +50,8 @@
|
||||
|
||||
SYM_TYPED_FUNC_START(ftrace_stub)
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
.global MCOUNT_NAME
|
||||
.set MCOUNT_NAME, ftrace_stub
|
||||
.global _mcount
|
||||
.set _mcount, ftrace_stub
|
||||
#endif
|
||||
ret
|
||||
SYM_FUNC_END(ftrace_stub)
|
||||
@ -80,7 +80,7 @@ SYM_FUNC_END(return_to_handler)
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_DYNAMIC_FTRACE
|
||||
SYM_FUNC_START(MCOUNT_NAME)
|
||||
SYM_FUNC_START(_mcount)
|
||||
la t4, ftrace_stub
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
la t0, ftrace_graph_return
|
||||
@ -126,6 +126,6 @@ SYM_FUNC_START(MCOUNT_NAME)
|
||||
jalr t5
|
||||
RESTORE_ABI_STATE
|
||||
ret
|
||||
SYM_FUNC_END(MCOUNT_NAME)
|
||||
SYM_FUNC_END(_mcount)
|
||||
#endif
|
||||
EXPORT_SYMBOL(MCOUNT_NAME)
|
||||
EXPORT_SYMBOL(_mcount)
|
||||
|
@ -9,7 +9,7 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_CC_IS_CLANG
|
||||
/* https://bugs.llvm.org/show_bug.cgi?id=41424 */
|
||||
/* https://llvm.org/pr41424 */
|
||||
#define ftrace_return_address(n) 0UL
|
||||
#else
|
||||
#define ftrace_return_address(n) __builtin_return_address(n)
|
||||
|
@ -30,7 +30,7 @@
|
||||
#define PFX DRV_MODULE_NAME ": "
|
||||
#define DRV_MODULE_VERSION "0.2"
|
||||
|
||||
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
|
||||
MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
|
||||
MODULE_DESCRIPTION("UltraSPARC-III memory controller driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(DRV_MODULE_VERSION);
|
||||
|
@ -33,7 +33,7 @@
|
||||
|
||||
static char version[] =
|
||||
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
|
||||
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
|
||||
MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
|
||||
MODULE_DESCRIPTION("Sun LDOM domain services driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(DRV_MODULE_VERSION);
|
||||
|
@ -221,12 +221,6 @@ endif
|
||||
|
||||
KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
|
||||
|
||||
ifdef CONFIG_LTO_CLANG
|
||||
ifeq ($(call test-lt, $(CONFIG_LLD_VERSION), 130000),y)
|
||||
KBUILD_LDFLAGS += -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CONFIG_X86_NEED_RELOCS
|
||||
LDFLAGS_vmlinux := --emit-relocs --discard-none
|
||||
else
|
||||
|
@ -5,7 +5,7 @@
|
||||
CFLAGS_cpu.o := -fno-stack-protector
|
||||
|
||||
# Clang may incorrectly inline functions with stack protector enabled into
|
||||
# __restore_processor_state(): https://bugs.llvm.org/show_bug.cgi?id=47479
|
||||
# __restore_processor_state(): https://llvm.org/pr47479
|
||||
CFLAGS_REMOVE_cpu.o := $(CC_FLAGS_LTO)
|
||||
|
||||
obj-$(CONFIG_PM_SLEEP) += cpu.o
|
||||
|
@ -102,7 +102,7 @@ static void blake2b_compress_one_generic(struct blake2b_state *S,
|
||||
ROUND(10);
|
||||
ROUND(11);
|
||||
#ifdef CONFIG_CC_IS_CLANG
|
||||
#pragma nounroll /* https://bugs.llvm.org/show_bug.cgi?id=45803 */
|
||||
#pragma nounroll /* https://llvm.org/pr45803 */
|
||||
#endif
|
||||
for (i = 0; i < 8; ++i)
|
||||
S->h[i] = S->h[i] ^ v[i] ^ v[i + 8];
|
||||
|
@ -6086,9 +6086,7 @@ static void print_binder_node_nilocked(struct seq_file *m,
|
||||
struct binder_work *w;
|
||||
int count;
|
||||
|
||||
count = 0;
|
||||
hlist_for_each_entry(ref, &node->refs, node_entry)
|
||||
count++;
|
||||
count = hlist_count_nodes(&node->refs);
|
||||
|
||||
seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
|
||||
node->debug_id, (u64)node->ptr, (u64)node->cookie,
|
||||
|
@ -28,7 +28,7 @@
|
||||
|
||||
static char version[] =
|
||||
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
|
||||
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
|
||||
MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
|
||||
MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(DRV_MODULE_VERSION);
|
||||
|
@ -29,7 +29,7 @@
|
||||
static char version[] =
|
||||
DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
|
||||
|
||||
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
|
||||
MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
|
||||
MODULE_DESCRIPTION("Niagara2 RNG driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(DRV_MODULE_VERSION);
|
||||
|
@ -167,7 +167,7 @@ static struct i2c_driver st33zp24_i2c_driver = {
|
||||
|
||||
module_i2c_driver(st33zp24_i2c_driver);
|
||||
|
||||
MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)");
|
||||
MODULE_AUTHOR("TPM support <TPMsupport@list.st.com>");
|
||||
MODULE_DESCRIPTION("STM TPM 1.2 I2C ST33 Driver");
|
||||
MODULE_VERSION("1.3.0");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -284,7 +284,7 @@ static struct spi_driver st33zp24_spi_driver = {
|
||||
|
||||
module_spi_driver(st33zp24_spi_driver);
|
||||
|
||||
MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)");
|
||||
MODULE_AUTHOR("TPM support <TPMsupport@list.st.com>");
|
||||
MODULE_DESCRIPTION("STM TPM 1.2 SPI ST33 Driver");
|
||||
MODULE_VERSION("1.3.0");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -582,7 +582,7 @@ int st33zp24_pm_resume(struct device *dev)
|
||||
EXPORT_SYMBOL(st33zp24_pm_resume);
|
||||
#endif
|
||||
|
||||
MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)");
|
||||
MODULE_AUTHOR("TPM support <TPMsupport@list.st.com>");
|
||||
MODULE_DESCRIPTION("ST33ZP24 TPM 1.2 driver");
|
||||
MODULE_VERSION("1.3.0");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -524,7 +524,7 @@ static void __exit tpm_exit(void)
|
||||
subsys_initcall(tpm_init);
|
||||
module_exit(tpm_exit);
|
||||
|
||||
MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
|
||||
MODULE_AUTHOR("Leendert van Doorn <leendert@watson.ibm.com>");
|
||||
MODULE_DESCRIPTION("TPM Driver");
|
||||
MODULE_VERSION("2.0");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -229,7 +229,7 @@ static void __exit cleanup_atmel(void)
|
||||
module_init(init_atmel);
|
||||
module_exit(cleanup_atmel);
|
||||
|
||||
MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
|
||||
MODULE_AUTHOR("Leendert van Doorn <leendert@watson.ibm.com>");
|
||||
MODULE_DESCRIPTION("TPM Driver");
|
||||
MODULE_VERSION("2.0");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -654,6 +654,6 @@ static struct i2c_driver i2c_nuvoton_driver = {
|
||||
|
||||
module_i2c_driver(i2c_nuvoton_driver);
|
||||
|
||||
MODULE_AUTHOR("Dan Morav (dan.morav@nuvoton.com)");
|
||||
MODULE_AUTHOR("Dan Morav <dan.morav@nuvoton.com>");
|
||||
MODULE_DESCRIPTION("Nuvoton TPM I2C Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -410,7 +410,7 @@ static void __exit cleanup_nsc(void)
|
||||
module_init(init_nsc);
|
||||
module_exit(cleanup_nsc);
|
||||
|
||||
MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
|
||||
MODULE_AUTHOR("Leendert van Doorn <leendert@watson.ibm.com>");
|
||||
MODULE_DESCRIPTION("TPM Driver");
|
||||
MODULE_VERSION("2.0");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -429,7 +429,7 @@ static void __exit cleanup_tis(void)
|
||||
|
||||
module_init(init_tis);
|
||||
module_exit(cleanup_tis);
|
||||
MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
|
||||
MODULE_AUTHOR("Leendert van Doorn <leendert@watson.ibm.com>");
|
||||
MODULE_DESCRIPTION("TPM Driver");
|
||||
MODULE_VERSION("2.0");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -1360,7 +1360,7 @@ int tpm_tis_resume(struct device *dev)
|
||||
EXPORT_SYMBOL_GPL(tpm_tis_resume);
|
||||
#endif
|
||||
|
||||
MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
|
||||
MODULE_AUTHOR("Leendert van Doorn <leendert@watson.ibm.com>");
|
||||
MODULE_DESCRIPTION("TPM Driver");
|
||||
MODULE_VERSION("2.0");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -711,7 +711,7 @@ static void __exit vtpm_module_exit(void)
|
||||
module_init(vtpm_module_init);
|
||||
module_exit(vtpm_module_exit);
|
||||
|
||||
MODULE_AUTHOR("Stefan Berger (stefanb@us.ibm.com)");
|
||||
MODULE_AUTHOR("Stefan Berger <stefanb@us.ibm.com>");
|
||||
MODULE_DESCRIPTION("vTPM Driver");
|
||||
MODULE_VERSION("0.1");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -41,7 +41,7 @@
|
||||
static const char version[] =
|
||||
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
|
||||
|
||||
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
|
||||
MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
|
||||
MODULE_DESCRIPTION("Niagara2 Crypto driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(DRV_MODULE_VERSION);
|
||||
|
@ -105,7 +105,7 @@ lib-y := $(patsubst %.o,%.stub.o,$(lib-y))
|
||||
# Even when -mbranch-protection=none is set, Clang will generate a
|
||||
# .note.gnu.property for code-less object files (like lib/ctype.c),
|
||||
# so work around this by explicitly removing the unwanted section.
|
||||
# https://bugs.llvm.org/show_bug.cgi?id=46480
|
||||
# https://llvm.org/pr46480
|
||||
STUBCOPY_FLAGS-y += --remove-section=.note.gnu.property
|
||||
|
||||
STUBCOPY_RELOC-$(CONFIG_X86_32) := R_386_32
|
||||
|
@ -612,7 +612,7 @@ static uint32_t sdma_v4_4_2_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
|
||||
/* Set ring buffer size in dwords */
|
||||
uint32_t rb_bufsz = order_base_2(ring->ring_size / 4);
|
||||
|
||||
barrier(); /* work around https://bugs.llvm.org/show_bug.cgi?id=42576 */
|
||||
barrier(); /* work around https://llvm.org/pr42576 */
|
||||
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
|
||||
#ifdef __BIG_ENDIAN
|
||||
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
|
||||
|
@ -108,7 +108,7 @@ struct dell_smm_cooling_data {
|
||||
struct dell_smm_data *data;
|
||||
};
|
||||
|
||||
MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)");
|
||||
MODULE_AUTHOR("Massimo Dal Zotto <dz@debian.org>");
|
||||
MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
|
||||
MODULE_DESCRIPTION("Dell laptop SMM BIOS hwmon driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -18,7 +18,7 @@
|
||||
|
||||
#define DRV_MODULE_VERSION "0.1"
|
||||
|
||||
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
|
||||
MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
|
||||
MODULE_DESCRIPTION("Ultra45 environmental monitor driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(DRV_MODULE_VERSION);
|
||||
|
@ -187,7 +187,7 @@ static struct platform_driver mlxcpld_mux_driver = {
|
||||
|
||||
module_platform_driver(mlxcpld_mux_driver);
|
||||
|
||||
MODULE_AUTHOR("Michael Shych (michaels@mellanox.com)");
|
||||
MODULE_AUTHOR("Michael Shych <michaels@mellanox.com>");
|
||||
MODULE_DESCRIPTION("Mellanox I2C-CPLD-MUX driver");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_ALIAS("platform:i2c-mux-mlxcpld");
|
||||
|
@ -17,7 +17,7 @@
|
||||
#include <asm/fhc.h>
|
||||
#include <asm/upa.h>
|
||||
|
||||
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
|
||||
MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
|
||||
MODULE_DESCRIPTION("Sun Fire LED driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
|
@ -702,13 +702,7 @@ static unsigned int bch_cache_max_chain(struct cache_set *c)
|
||||
for (h = c->bucket_hash;
|
||||
h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
|
||||
h++) {
|
||||
unsigned int i = 0;
|
||||
struct hlist_node *p;
|
||||
|
||||
hlist_for_each(p, h)
|
||||
i++;
|
||||
|
||||
ret = max(ret, i);
|
||||
ret = max(ret, hlist_count_nodes(h));
|
||||
}
|
||||
|
||||
mutex_unlock(&c->bucket_lock);
|
||||
|
@ -2155,7 +2155,7 @@ module_init(smscore_module_init);
|
||||
module_exit(smscore_module_exit);
|
||||
|
||||
MODULE_DESCRIPTION("Siano MDTV Core module");
|
||||
MODULE_AUTHOR("Siano Mobile Silicon, Inc. (uris@siano-ms.com)");
|
||||
MODULE_AUTHOR("Siano Mobile Silicon, Inc. <uris@siano-ms.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
/* This should match what's defined at smscoreapi.h */
|
||||
|
@ -1267,5 +1267,5 @@ module_init(smsdvb_module_init);
|
||||
module_exit(smsdvb_module_exit);
|
||||
|
||||
MODULE_DESCRIPTION("SMS DVB subsystem adaptation module");
|
||||
MODULE_AUTHOR("Siano Mobile Silicon, Inc. (uris@siano-ms.com)");
|
||||
MODULE_AUTHOR("Siano Mobile Silicon, Inc. <uris@siano-ms.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -1647,7 +1647,7 @@ static const struct dvb_frontend_ops cx24117_ops = {
|
||||
|
||||
|
||||
MODULE_DESCRIPTION("DVB Frontend module for Conexant cx24117/cx24132 hardware");
|
||||
MODULE_AUTHOR("Luis Alves (ljalvs@gmail.com)");
|
||||
MODULE_AUTHOR("Luis Alves <ljalvs@gmail.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION("1.1");
|
||||
MODULE_FIRMWARE(CX24117_DEFAULT_FIRMWARE);
|
||||
|
@ -49,7 +49,7 @@ static const uint8_t zigzag[64] = {
|
||||
|
||||
/*
|
||||
* noinline_for_stack to work around
|
||||
* https://bugs.llvm.org/show_bug.cgi?id=38809
|
||||
* https://llvm.org/pr38809
|
||||
*/
|
||||
static int noinline_for_stack
|
||||
rlc(const s16 *in, __be16 *output, int blocktype)
|
||||
|
@ -724,5 +724,5 @@ static struct usb_driver smsusb_driver = {
|
||||
module_usb_driver(smsusb_driver);
|
||||
|
||||
MODULE_DESCRIPTION("Driver for the Siano SMS1xxx USB dongle");
|
||||
MODULE_AUTHOR("Siano Mobile Silicon, INC. (uris@siano-ms.com)");
|
||||
MODULE_AUTHOR("Siano Mobile Silicon, Inc. <uris@siano-ms.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -221,7 +221,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
|
||||
#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
|
||||
#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
|
||||
|
||||
MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
|
||||
MODULE_AUTHOR("David S. Miller <davem@redhat.com> and Jeff Garzik <jgarzik@pobox.com>");
|
||||
MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_FIRMWARE(FIRMWARE_TG3);
|
||||
|
@ -176,7 +176,7 @@ static char version[] =
|
||||
static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */
|
||||
static int link_mode;
|
||||
|
||||
MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
|
||||
MODULE_AUTHOR("Adrian Sun <asun@darksunrising.com>");
|
||||
MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_FIRMWARE("sun/cassini.bin");
|
||||
|
@ -61,7 +61,7 @@ union niu_page {
|
||||
static char version[] =
|
||||
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
|
||||
|
||||
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
|
||||
MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
|
||||
MODULE_DESCRIPTION("NIU ethernet driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(DRV_MODULE_VERSION);
|
||||
|
@ -59,7 +59,7 @@
|
||||
|
||||
#define DRV_NAME "sunhme"
|
||||
|
||||
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
|
||||
MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
|
||||
MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
|
@ -44,7 +44,7 @@
|
||||
|
||||
static char version[] =
|
||||
DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
|
||||
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
|
||||
MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
|
||||
MODULE_DESCRIPTION("Sun LDOM virtual network driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(DRV_MODULE_VERSION);
|
||||
|
@ -39,7 +39,7 @@
|
||||
*/
|
||||
#define VNET_MAX_RETRIES 10
|
||||
|
||||
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
|
||||
MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
|
||||
MODULE_DESCRIPTION("Sun LDOM virtual network support library");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION("1.1");
|
||||
|
@ -694,6 +694,6 @@ module_init(pptp_init_module);
|
||||
module_exit(pptp_exit_module);
|
||||
|
||||
MODULE_DESCRIPTION("Point-to-Point Tunneling Protocol");
|
||||
MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
|
||||
MODULE_AUTHOR("D. Kozlov <xeb@mail.ru>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_PPTP);
|
||||
|
@ -1107,7 +1107,7 @@ module_init(compal_init);
|
||||
module_exit(compal_cleanup);
|
||||
|
||||
MODULE_AUTHOR("Cezary Jackiewicz");
|
||||
MODULE_AUTHOR("Roald Frederickx (roald.frederickx@gmail.com)");
|
||||
MODULE_AUTHOR("Roald Frederickx <roald.frederickx@gmail.com>");
|
||||
MODULE_DESCRIPTION("Compal Laptop Support");
|
||||
MODULE_VERSION(DRIVER_VERSION);
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -365,7 +365,7 @@ static void __exit oaktrail_cleanup(void)
|
||||
module_init(oaktrail_init);
|
||||
module_exit(oaktrail_cleanup);
|
||||
|
||||
MODULE_AUTHOR("Yin Kangkai (kangkai.yin@intel.com)");
|
||||
MODULE_AUTHOR("Yin Kangkai <kangkai.yin@intel.com>");
|
||||
MODULE_DESCRIPTION("Intel Oaktrail Platform ACPI Extras");
|
||||
MODULE_VERSION(DRIVER_VERSION);
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -6659,6 +6659,6 @@ static void __exit mlxplat_exit(void)
|
||||
}
|
||||
module_exit(mlxplat_exit);
|
||||
|
||||
MODULE_AUTHOR("Vadim Pasternak (vadimp@mellanox.com)");
|
||||
MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
|
||||
MODULE_DESCRIPTION("Mellanox platform driver");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
|
@ -288,7 +288,7 @@ config REGULATOR_CROS_EC
|
||||
config REGULATOR_DA903X
|
||||
tristate "Dialog Semiconductor DA9030/DA9034 regulators"
|
||||
depends on PMIC_DA903X
|
||||
depends on !CC_IS_CLANG # https://bugs.llvm.org/show_bug.cgi?id=38789
|
||||
depends on !CC_IS_CLANG # https://llvm.org/pr38789
|
||||
help
|
||||
Say y here to support the BUCKs and LDOs regulators found on
|
||||
Dialog Semiconductor DA9030/DA9034 PMIC.
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/timer.h>
|
||||
|
||||
MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
|
||||
MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert <felfert@millenux.com>");
|
||||
MODULE_DESCRIPTION("Finite state machine helper functions");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
|
@ -33,7 +33,7 @@
|
||||
#include <linux/pci.h>
|
||||
#endif
|
||||
|
||||
MODULE_AUTHOR("Thomas K. Dyas (tdyas@noc.rutgers.edu) and Eddie C. Dost (ecd@skynet.be)");
|
||||
MODULE_AUTHOR("Thomas K. Dyas <tdyas@noc.rutgers.edu> and Eddie C. Dost <ecd@skynet.be>");
|
||||
MODULE_DESCRIPTION("OPENPROM Configuration Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION("1.0");
|
||||
|
@ -2753,7 +2753,7 @@ static void __exit esp_exit(void)
|
||||
}
|
||||
|
||||
MODULE_DESCRIPTION("ESP SCSI driver core");
|
||||
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
|
||||
MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(DRV_VERSION);
|
||||
|
||||
|
@ -204,6 +204,6 @@ static struct platform_driver esp_jazz_driver = {
|
||||
module_platform_driver(esp_jazz_driver);
|
||||
|
||||
MODULE_DESCRIPTION("JAZZ ESP SCSI driver");
|
||||
MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)");
|
||||
MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(DRV_VERSION);
|
||||
|
@ -54,7 +54,7 @@
|
||||
#define KERN_DEBUG KERN_WARNING
|
||||
#endif
|
||||
|
||||
MODULE_AUTHOR("Paul Mackerras (paulus@samba.org)");
|
||||
MODULE_AUTHOR("Paul Mackerras <paulus@samba.org>");
|
||||
MODULE_DESCRIPTION("PowerMac MESH SCSI driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
|
@ -1468,7 +1468,7 @@ static struct platform_driver qpti_sbus_driver = {
|
||||
module_platform_driver(qpti_sbus_driver);
|
||||
|
||||
MODULE_DESCRIPTION("QlogicISP SBUS driver");
|
||||
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
|
||||
MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION("2.1");
|
||||
MODULE_FIRMWARE("qlogic/isp1000.bin");
|
||||
|
@ -273,7 +273,7 @@ static struct platform_driver esp_sun3x_driver = {
|
||||
module_platform_driver(esp_sun3x_driver);
|
||||
|
||||
MODULE_DESCRIPTION("Sun3x ESP SCSI driver");
|
||||
MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)");
|
||||
MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(DRV_VERSION);
|
||||
MODULE_ALIAS("platform:sun3x_esp");
|
||||
|
@ -608,6 +608,6 @@ static struct platform_driver esp_sbus_driver = {
|
||||
module_platform_driver(esp_sbus_driver);
|
||||
|
||||
MODULE_DESCRIPTION("Sun ESP SCSI driver");
|
||||
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
|
||||
MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(DRV_VERSION);
|
||||
|
@ -670,7 +670,7 @@ static void __exit hgafb_exit(void)
|
||||
*
|
||||
* ------------------------------------------------------------------------- */
|
||||
|
||||
MODULE_AUTHOR("Ferenc Bakonyi (fero@drama.obuda.kando.hu)");
|
||||
MODULE_AUTHOR("Ferenc Bakonyi <fero@drama.obuda.kando.hu>");
|
||||
MODULE_DESCRIPTION("FBDev driver for Hercules Graphics Adaptor");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
|
@ -130,6 +130,12 @@ fat_encode_fh_nostale(struct inode *inode, __u32 *fh, int *lenp,
|
||||
fid->parent_i_gen = parent->i_generation;
|
||||
type = FILEID_FAT_WITH_PARENT;
|
||||
*lenp = FAT_FID_SIZE_WITH_PARENT;
|
||||
} else {
|
||||
/*
|
||||
* We need to initialize this field because the fh is actually
|
||||
* 12 bytes long
|
||||
*/
|
||||
fid->parent_i_pos_hi = 0;
|
||||
}
|
||||
|
||||
return type;
|
||||
|
@ -525,54 +525,55 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
|
||||
ret = nilfs_palloc_get_desc_block(inode, group, 1, &desc_bh);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
desc_kaddr = kmap(desc_bh->b_page);
|
||||
desc_kaddr = kmap_local_page(desc_bh->b_page);
|
||||
desc = nilfs_palloc_block_get_group_desc(
|
||||
inode, group, desc_bh, desc_kaddr);
|
||||
n = nilfs_palloc_rest_groups_in_desc_block(inode, group,
|
||||
maxgroup);
|
||||
for (j = 0; j < n; j++, desc++, group++) {
|
||||
for (j = 0; j < n; j++, desc++, group++, group_offset = 0) {
|
||||
lock = nilfs_mdt_bgl_lock(inode, group);
|
||||
if (nilfs_palloc_group_desc_nfrees(desc, lock) > 0) {
|
||||
ret = nilfs_palloc_get_bitmap_block(
|
||||
inode, group, 1, &bitmap_bh);
|
||||
if (ret < 0)
|
||||
goto out_desc;
|
||||
bitmap_kaddr = kmap(bitmap_bh->b_page);
|
||||
bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
|
||||
pos = nilfs_palloc_find_available_slot(
|
||||
bitmap, group_offset,
|
||||
entries_per_group, lock);
|
||||
if (pos >= 0) {
|
||||
/* found a free entry */
|
||||
nilfs_palloc_group_desc_add_entries(
|
||||
desc, lock, -1);
|
||||
req->pr_entry_nr =
|
||||
entries_per_group * group + pos;
|
||||
kunmap(desc_bh->b_page);
|
||||
kunmap(bitmap_bh->b_page);
|
||||
if (nilfs_palloc_group_desc_nfrees(desc, lock) == 0)
|
||||
continue;
|
||||
|
||||
req->pr_desc_bh = desc_bh;
|
||||
req->pr_bitmap_bh = bitmap_bh;
|
||||
return 0;
|
||||
}
|
||||
kunmap(bitmap_bh->b_page);
|
||||
brelse(bitmap_bh);
|
||||
kunmap_local(desc_kaddr);
|
||||
ret = nilfs_palloc_get_bitmap_block(inode, group, 1,
|
||||
&bitmap_bh);
|
||||
if (unlikely(ret < 0)) {
|
||||
brelse(desc_bh);
|
||||
return ret;
|
||||
}
|
||||
|
||||
group_offset = 0;
|
||||
desc_kaddr = kmap_local_page(desc_bh->b_page);
|
||||
desc = nilfs_palloc_block_get_group_desc(
|
||||
inode, group, desc_bh, desc_kaddr);
|
||||
|
||||
bitmap_kaddr = kmap_local_page(bitmap_bh->b_page);
|
||||
bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
|
||||
pos = nilfs_palloc_find_available_slot(
|
||||
bitmap, group_offset, entries_per_group, lock);
|
||||
kunmap_local(bitmap_kaddr);
|
||||
if (pos >= 0)
|
||||
goto found;
|
||||
|
||||
brelse(bitmap_bh);
|
||||
}
|
||||
|
||||
kunmap(desc_bh->b_page);
|
||||
kunmap_local(desc_kaddr);
|
||||
brelse(desc_bh);
|
||||
}
|
||||
|
||||
/* no entries left */
|
||||
return -ENOSPC;
|
||||
|
||||
out_desc:
|
||||
kunmap(desc_bh->b_page);
|
||||
brelse(desc_bh);
|
||||
return ret;
|
||||
found:
|
||||
/* found a free entry */
|
||||
nilfs_palloc_group_desc_add_entries(desc, lock, -1);
|
||||
req->pr_entry_nr = entries_per_group * group + pos;
|
||||
kunmap_local(desc_kaddr);
|
||||
|
||||
req->pr_desc_bh = desc_bh;
|
||||
req->pr_bitmap_bh = bitmap_bh;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -606,10 +607,10 @@ void nilfs_palloc_commit_free_entry(struct inode *inode,
|
||||
spinlock_t *lock;
|
||||
|
||||
group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
|
||||
desc_kaddr = kmap(req->pr_desc_bh->b_page);
|
||||
desc_kaddr = kmap_local_page(req->pr_desc_bh->b_page);
|
||||
desc = nilfs_palloc_block_get_group_desc(inode, group,
|
||||
req->pr_desc_bh, desc_kaddr);
|
||||
bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page);
|
||||
bitmap_kaddr = kmap_local_page(req->pr_bitmap_bh->b_page);
|
||||
bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
|
||||
lock = nilfs_mdt_bgl_lock(inode, group);
|
||||
|
||||
@ -621,8 +622,8 @@ void nilfs_palloc_commit_free_entry(struct inode *inode,
|
||||
else
|
||||
nilfs_palloc_group_desc_add_entries(desc, lock, 1);
|
||||
|
||||
kunmap(req->pr_bitmap_bh->b_page);
|
||||
kunmap(req->pr_desc_bh->b_page);
|
||||
kunmap_local(bitmap_kaddr);
|
||||
kunmap_local(desc_kaddr);
|
||||
|
||||
mark_buffer_dirty(req->pr_desc_bh);
|
||||
mark_buffer_dirty(req->pr_bitmap_bh);
|
||||
@ -647,10 +648,10 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode,
|
||||
spinlock_t *lock;
|
||||
|
||||
group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
|
||||
desc_kaddr = kmap(req->pr_desc_bh->b_page);
|
||||
desc_kaddr = kmap_local_page(req->pr_desc_bh->b_page);
|
||||
desc = nilfs_palloc_block_get_group_desc(inode, group,
|
||||
req->pr_desc_bh, desc_kaddr);
|
||||
bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page);
|
||||
bitmap_kaddr = kmap_local_page(req->pr_bitmap_bh->b_page);
|
||||
bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
|
||||
lock = nilfs_mdt_bgl_lock(inode, group);
|
||||
|
||||
@ -662,8 +663,8 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode,
|
||||
else
|
||||
nilfs_palloc_group_desc_add_entries(desc, lock, 1);
|
||||
|
||||
kunmap(req->pr_bitmap_bh->b_page);
|
||||
kunmap(req->pr_desc_bh->b_page);
|
||||
kunmap_local(bitmap_kaddr);
|
||||
kunmap_local(desc_kaddr);
|
||||
|
||||
brelse(req->pr_bitmap_bh);
|
||||
brelse(req->pr_desc_bh);
|
||||
@ -755,7 +756,7 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
|
||||
/* Get the first entry number of the group */
|
||||
group_min_nr = (__u64)group * epg;
|
||||
|
||||
bitmap_kaddr = kmap(bitmap_bh->b_page);
|
||||
bitmap_kaddr = kmap_local_page(bitmap_bh->b_page);
|
||||
bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
|
||||
lock = nilfs_mdt_bgl_lock(inode, group);
|
||||
|
||||
@ -801,7 +802,7 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
|
||||
entry_start = rounddown(group_offset, epb);
|
||||
} while (true);
|
||||
|
||||
kunmap(bitmap_bh->b_page);
|
||||
kunmap_local(bitmap_kaddr);
|
||||
mark_buffer_dirty(bitmap_bh);
|
||||
brelse(bitmap_bh);
|
||||
|
||||
@ -815,11 +816,11 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
|
||||
inode->i_ino);
|
||||
}
|
||||
|
||||
desc_kaddr = kmap_atomic(desc_bh->b_page);
|
||||
desc_kaddr = kmap_local_page(desc_bh->b_page);
|
||||
desc = nilfs_palloc_block_get_group_desc(
|
||||
inode, group, desc_bh, desc_kaddr);
|
||||
nfree = nilfs_palloc_group_desc_add_entries(desc, lock, n);
|
||||
kunmap_atomic(desc_kaddr);
|
||||
kunmap_local(desc_kaddr);
|
||||
mark_buffer_dirty(desc_bh);
|
||||
nilfs_mdt_mark_dirty(inode);
|
||||
brelse(desc_bh);
|
||||
|
@ -548,13 +548,10 @@ int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode)
|
||||
*/
|
||||
void nilfs_bmap_write(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode)
|
||||
{
|
||||
down_write(&bmap->b_sem);
|
||||
memcpy(raw_inode->i_bmap, bmap->b_u.u_data,
|
||||
NILFS_INODE_BMAP_SIZE * sizeof(__le64));
|
||||
if (bmap->b_inode->i_ino == NILFS_DAT_INO)
|
||||
bmap->b_last_allocated_ptr = NILFS_BMAP_NEW_PTR_INIT;
|
||||
|
||||
up_write(&bmap->b_sem);
|
||||
}
|
||||
|
||||
void nilfs_bmap_init_gc(struct nilfs_bmap *bmap)
|
||||
|
@ -724,7 +724,7 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree,
|
||||
dat = nilfs_bmap_get_dat(btree);
|
||||
ret = nilfs_dat_translate(dat, ptr, &blocknr);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
goto dat_error;
|
||||
ptr = blocknr;
|
||||
}
|
||||
cnt = 1;
|
||||
@ -743,7 +743,7 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree,
|
||||
if (dat) {
|
||||
ret = nilfs_dat_translate(dat, ptr2, &blocknr);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
goto dat_error;
|
||||
ptr2 = blocknr;
|
||||
}
|
||||
if (ptr2 != ptr + cnt || ++cnt == maxblocks)
|
||||
@ -781,6 +781,11 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree,
|
||||
out:
|
||||
nilfs_btree_free_path(path);
|
||||
return ret;
|
||||
|
||||
dat_error:
|
||||
if (ret == -ENOENT)
|
||||
ret = -EINVAL; /* Notify bmap layer of metadata corruption */
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void nilfs_btree_promote_key(struct nilfs_bmap *btree,
|
||||
|
@ -28,7 +28,7 @@ nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno)
|
||||
{
|
||||
__u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
|
||||
|
||||
do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
|
||||
tcno = div64_ul(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
|
||||
return (unsigned long)tcno;
|
||||
}
|
||||
|
||||
@ -187,35 +187,90 @@ static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile,
|
||||
}
|
||||
|
||||
/**
|
||||
* nilfs_cpfile_get_checkpoint - get a checkpoint
|
||||
* @cpfile: inode of checkpoint file
|
||||
* @cno: checkpoint number
|
||||
* @create: create flag
|
||||
* @cpp: pointer to a checkpoint
|
||||
* @bhp: pointer to a buffer head
|
||||
* nilfs_cpfile_read_checkpoint - read a checkpoint entry in cpfile
|
||||
* @cpfile: checkpoint file inode
|
||||
* @cno: number of checkpoint entry to read
|
||||
* @root: nilfs root object
|
||||
* @ifile: ifile's inode to read and attach to @root
|
||||
*
|
||||
* Description: nilfs_cpfile_get_checkpoint() acquires the checkpoint
|
||||
* specified by @cno. A new checkpoint will be created if @cno is the current
|
||||
* checkpoint number and @create is nonzero.
|
||||
* This function imports checkpoint information from the checkpoint file and
|
||||
* stores it to the inode file given by @ifile and the nilfs root object
|
||||
* given by @root.
|
||||
*
|
||||
* Return Value: On success, 0 is returned, and the checkpoint and the
|
||||
* buffer head of the buffer on which the checkpoint is located are stored in
|
||||
* the place pointed by @cpp and @bhp, respectively. On error, one of the
|
||||
* following negative error codes is returned.
|
||||
*
|
||||
* %-EIO - I/O error.
|
||||
*
|
||||
* %-ENOMEM - Insufficient amount of memory available.
|
||||
*
|
||||
* %-ENOENT - No such checkpoint.
|
||||
*
|
||||
* %-EINVAL - invalid checkpoint.
|
||||
* Return: 0 on success, or the following negative error code on failure.
|
||||
* * %-EINVAL - Invalid checkpoint.
|
||||
* * %-ENOMEM - Insufficient memory available.
|
||||
* * %-EIO - I/O error (including metadata corruption).
|
||||
*/
|
||||
int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
|
||||
__u64 cno,
|
||||
int create,
|
||||
struct nilfs_checkpoint **cpp,
|
||||
struct buffer_head **bhp)
|
||||
int nilfs_cpfile_read_checkpoint(struct inode *cpfile, __u64 cno,
|
||||
struct nilfs_root *root, struct inode *ifile)
|
||||
{
|
||||
struct buffer_head *cp_bh;
|
||||
struct nilfs_checkpoint *cp;
|
||||
void *kaddr;
|
||||
int ret;
|
||||
|
||||
if (cno < 1 || cno > nilfs_mdt_cno(cpfile))
|
||||
return -EINVAL;
|
||||
|
||||
down_read(&NILFS_MDT(cpfile)->mi_sem);
|
||||
ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
|
||||
if (unlikely(ret < 0)) {
|
||||
if (ret == -ENOENT)
|
||||
ret = -EINVAL;
|
||||
goto out_sem;
|
||||
}
|
||||
|
||||
kaddr = kmap_local_page(cp_bh->b_page);
|
||||
cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
|
||||
if (nilfs_checkpoint_invalid(cp)) {
|
||||
ret = -EINVAL;
|
||||
goto put_cp;
|
||||
}
|
||||
|
||||
ret = nilfs_read_inode_common(ifile, &cp->cp_ifile_inode);
|
||||
if (unlikely(ret)) {
|
||||
/*
|
||||
* Since this inode is on a checkpoint entry, treat errors
|
||||
* as metadata corruption.
|
||||
*/
|
||||
nilfs_err(cpfile->i_sb,
|
||||
"ifile inode (checkpoint number=%llu) corrupted",
|
||||
(unsigned long long)cno);
|
||||
ret = -EIO;
|
||||
goto put_cp;
|
||||
}
|
||||
|
||||
/* Configure the nilfs root object */
|
||||
atomic64_set(&root->inodes_count, le64_to_cpu(cp->cp_inodes_count));
|
||||
atomic64_set(&root->blocks_count, le64_to_cpu(cp->cp_blocks_count));
|
||||
root->ifile = ifile;
|
||||
|
||||
put_cp:
|
||||
kunmap_local(kaddr);
|
||||
brelse(cp_bh);
|
||||
out_sem:
|
||||
up_read(&NILFS_MDT(cpfile)->mi_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* nilfs_cpfile_create_checkpoint - create a checkpoint entry on cpfile
|
||||
* @cpfile: checkpoint file inode
|
||||
* @cno: number of checkpoint to set up
|
||||
*
|
||||
* This function creates a checkpoint with the number specified by @cno on
|
||||
* cpfile. If the specified checkpoint entry already exists due to a past
|
||||
* failure, it will be reused without returning an error.
|
||||
* In either case, the buffer of the block containing the checkpoint entry
|
||||
* and the cpfile inode are made dirty for inclusion in the write log.
|
||||
*
|
||||
* Return: 0 on success, or the following negative error code on failure.
|
||||
* * %-ENOMEM - Insufficient memory available.
|
||||
* * %-EIO - I/O error (including metadata corruption).
|
||||
* * %-EROFS - Read only filesystem
|
||||
*/
|
||||
int nilfs_cpfile_create_checkpoint(struct inode *cpfile, __u64 cno)
|
||||
{
|
||||
struct buffer_head *header_bh, *cp_bh;
|
||||
struct nilfs_cpfile_header *header;
|
||||
@ -223,70 +278,128 @@ int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
|
||||
void *kaddr;
|
||||
int ret;
|
||||
|
||||
if (unlikely(cno < 1 || cno > nilfs_mdt_cno(cpfile) ||
|
||||
(cno < nilfs_mdt_cno(cpfile) && create)))
|
||||
return -EINVAL;
|
||||
if (WARN_ON_ONCE(cno < 1))
|
||||
return -EIO;
|
||||
|
||||
down_write(&NILFS_MDT(cpfile)->mi_sem);
|
||||
|
||||
ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
|
||||
if (ret < 0)
|
||||
if (unlikely(ret < 0)) {
|
||||
if (ret == -ENOENT) {
|
||||
nilfs_error(cpfile->i_sb,
|
||||
"checkpoint creation failed due to metadata corruption.");
|
||||
ret = -EIO;
|
||||
}
|
||||
goto out_sem;
|
||||
ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, create, &cp_bh);
|
||||
if (ret < 0)
|
||||
}
|
||||
ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 1, &cp_bh);
|
||||
if (unlikely(ret < 0))
|
||||
goto out_header;
|
||||
kaddr = kmap(cp_bh->b_page);
|
||||
|
||||
kaddr = kmap_local_page(cp_bh->b_page);
|
||||
cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
|
||||
if (nilfs_checkpoint_invalid(cp)) {
|
||||
if (!create) {
|
||||
kunmap(cp_bh->b_page);
|
||||
brelse(cp_bh);
|
||||
ret = -ENOENT;
|
||||
goto out_header;
|
||||
}
|
||||
/* a newly-created checkpoint */
|
||||
nilfs_checkpoint_clear_invalid(cp);
|
||||
if (!nilfs_cpfile_is_in_first(cpfile, cno))
|
||||
nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh,
|
||||
kaddr, 1);
|
||||
mark_buffer_dirty(cp_bh);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
kaddr = kmap_atomic(header_bh->b_page);
|
||||
kaddr = kmap_local_page(header_bh->b_page);
|
||||
header = nilfs_cpfile_block_get_header(cpfile, header_bh,
|
||||
kaddr);
|
||||
le64_add_cpu(&header->ch_ncheckpoints, 1);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
mark_buffer_dirty(header_bh);
|
||||
nilfs_mdt_mark_dirty(cpfile);
|
||||
} else {
|
||||
kunmap_local(kaddr);
|
||||
}
|
||||
|
||||
if (cpp != NULL)
|
||||
*cpp = cp;
|
||||
*bhp = cp_bh;
|
||||
/* Force the buffer and the inode to become dirty */
|
||||
mark_buffer_dirty(cp_bh);
|
||||
brelse(cp_bh);
|
||||
nilfs_mdt_mark_dirty(cpfile);
|
||||
|
||||
out_header:
|
||||
out_header:
|
||||
brelse(header_bh);
|
||||
|
||||
out_sem:
|
||||
out_sem:
|
||||
up_write(&NILFS_MDT(cpfile)->mi_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* nilfs_cpfile_put_checkpoint - put a checkpoint
|
||||
* @cpfile: inode of checkpoint file
|
||||
* @cno: checkpoint number
|
||||
* @bh: buffer head
|
||||
* nilfs_cpfile_finalize_checkpoint - fill in a checkpoint entry in cpfile
|
||||
* @cpfile: checkpoint file inode
|
||||
* @cno: checkpoint number
|
||||
* @root: nilfs root object
|
||||
* @blkinc: number of blocks added by this checkpoint
|
||||
* @ctime: checkpoint creation time
|
||||
* @minor: minor checkpoint flag
|
||||
*
|
||||
* Description: nilfs_cpfile_put_checkpoint() releases the checkpoint
|
||||
* specified by @cno. @bh must be the buffer head which has been returned by
|
||||
* a previous call to nilfs_cpfile_get_checkpoint() with @cno.
|
||||
* This function completes the checkpoint entry numbered by @cno in the
|
||||
* cpfile with the data given by the arguments @root, @blkinc, @ctime, and
|
||||
* @minor.
|
||||
*
|
||||
* Return: 0 on success, or the following negative error code on failure.
|
||||
* * %-ENOMEM - Insufficient memory available.
|
||||
* * %-EIO - I/O error (including metadata corruption).
|
||||
*/
|
||||
void nilfs_cpfile_put_checkpoint(struct inode *cpfile, __u64 cno,
|
||||
struct buffer_head *bh)
|
||||
int nilfs_cpfile_finalize_checkpoint(struct inode *cpfile, __u64 cno,
|
||||
struct nilfs_root *root, __u64 blkinc,
|
||||
time64_t ctime, bool minor)
|
||||
{
|
||||
kunmap(bh->b_page);
|
||||
brelse(bh);
|
||||
struct buffer_head *cp_bh;
|
||||
struct nilfs_checkpoint *cp;
|
||||
void *kaddr;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON_ONCE(cno < 1))
|
||||
return -EIO;
|
||||
|
||||
down_write(&NILFS_MDT(cpfile)->mi_sem);
|
||||
ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
|
||||
if (unlikely(ret < 0)) {
|
||||
if (ret == -ENOENT)
|
||||
goto error;
|
||||
goto out_sem;
|
||||
}
|
||||
|
||||
kaddr = kmap_local_page(cp_bh->b_page);
|
||||
cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
|
||||
if (unlikely(nilfs_checkpoint_invalid(cp))) {
|
||||
kunmap_local(kaddr);
|
||||
brelse(cp_bh);
|
||||
goto error;
|
||||
}
|
||||
|
||||
cp->cp_snapshot_list.ssl_next = 0;
|
||||
cp->cp_snapshot_list.ssl_prev = 0;
|
||||
cp->cp_inodes_count = cpu_to_le64(atomic64_read(&root->inodes_count));
|
||||
cp->cp_blocks_count = cpu_to_le64(atomic64_read(&root->blocks_count));
|
||||
cp->cp_nblk_inc = cpu_to_le64(blkinc);
|
||||
cp->cp_create = cpu_to_le64(ctime);
|
||||
cp->cp_cno = cpu_to_le64(cno);
|
||||
|
||||
if (minor)
|
||||
nilfs_checkpoint_set_minor(cp);
|
||||
else
|
||||
nilfs_checkpoint_clear_minor(cp);
|
||||
|
||||
nilfs_write_inode_common(root->ifile, &cp->cp_ifile_inode);
|
||||
nilfs_bmap_write(NILFS_I(root->ifile)->i_bmap, &cp->cp_ifile_inode);
|
||||
|
||||
kunmap_local(kaddr);
|
||||
brelse(cp_bh);
|
||||
out_sem:
|
||||
up_write(&NILFS_MDT(cpfile)->mi_sem);
|
||||
return ret;
|
||||
|
||||
error:
|
||||
nilfs_error(cpfile->i_sb,
|
||||
"checkpoint finalization failed due to metadata corruption.");
|
||||
ret = -EIO;
|
||||
goto out_sem;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -347,7 +460,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
|
||||
continue;
|
||||
}
|
||||
|
||||
kaddr = kmap_atomic(cp_bh->b_page);
|
||||
kaddr = kmap_local_page(cp_bh->b_page);
|
||||
cp = nilfs_cpfile_block_get_checkpoint(
|
||||
cpfile, cno, cp_bh, kaddr);
|
||||
nicps = 0;
|
||||
@ -369,7 +482,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
|
||||
cpfile, cp_bh, kaddr, nicps);
|
||||
if (count == 0) {
|
||||
/* make hole */
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
brelse(cp_bh);
|
||||
ret =
|
||||
nilfs_cpfile_delete_checkpoint_block(
|
||||
@ -384,18 +497,18 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
|
||||
}
|
||||
}
|
||||
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
brelse(cp_bh);
|
||||
}
|
||||
|
||||
if (tnicps > 0) {
|
||||
kaddr = kmap_atomic(header_bh->b_page);
|
||||
kaddr = kmap_local_page(header_bh->b_page);
|
||||
header = nilfs_cpfile_block_get_header(cpfile, header_bh,
|
||||
kaddr);
|
||||
le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
|
||||
mark_buffer_dirty(header_bh);
|
||||
nilfs_mdt_mark_dirty(cpfile);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
}
|
||||
|
||||
brelse(header_bh);
|
||||
@ -447,7 +560,7 @@ static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
|
||||
}
|
||||
ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno);
|
||||
|
||||
kaddr = kmap_atomic(bh->b_page);
|
||||
kaddr = kmap_local_page(bh->b_page);
|
||||
cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
|
||||
for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) {
|
||||
if (!nilfs_checkpoint_invalid(cp)) {
|
||||
@ -457,7 +570,7 @@ static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
|
||||
n++;
|
||||
}
|
||||
}
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
brelse(bh);
|
||||
}
|
||||
|
||||
@ -491,10 +604,10 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
|
||||
ret = nilfs_cpfile_get_header_block(cpfile, &bh);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
kaddr = kmap_atomic(bh->b_page);
|
||||
kaddr = kmap_local_page(bh->b_page);
|
||||
header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
|
||||
curr = le64_to_cpu(header->ch_snapshot_list.ssl_next);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
brelse(bh);
|
||||
if (curr == 0) {
|
||||
ret = 0;
|
||||
@ -512,7 +625,7 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
|
||||
ret = 0; /* No snapshots (started from a hole block) */
|
||||
goto out;
|
||||
}
|
||||
kaddr = kmap_atomic(bh->b_page);
|
||||
kaddr = kmap_local_page(bh->b_page);
|
||||
while (n < nci) {
|
||||
cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr);
|
||||
curr = ~(__u64)0; /* Terminator */
|
||||
@ -528,7 +641,7 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
|
||||
|
||||
next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next);
|
||||
if (curr_blkoff != next_blkoff) {
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
brelse(bh);
|
||||
ret = nilfs_cpfile_get_checkpoint_block(cpfile, next,
|
||||
0, &bh);
|
||||
@ -536,12 +649,12 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
|
||||
WARN_ON(ret == -ENOENT);
|
||||
goto out;
|
||||
}
|
||||
kaddr = kmap_atomic(bh->b_page);
|
||||
kaddr = kmap_local_page(bh->b_page);
|
||||
}
|
||||
curr = next;
|
||||
curr_blkoff = next_blkoff;
|
||||
}
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
brelse(bh);
|
||||
*cnop = curr;
|
||||
ret = n;
|
||||
@ -650,24 +763,24 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
|
||||
ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
|
||||
if (ret < 0)
|
||||
goto out_sem;
|
||||
kaddr = kmap_atomic(cp_bh->b_page);
|
||||
kaddr = kmap_local_page(cp_bh->b_page);
|
||||
cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
|
||||
if (nilfs_checkpoint_invalid(cp)) {
|
||||
ret = -ENOENT;
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
goto out_cp;
|
||||
}
|
||||
if (nilfs_checkpoint_snapshot(cp)) {
|
||||
ret = 0;
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
goto out_cp;
|
||||
}
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
|
||||
if (ret < 0)
|
||||
goto out_cp;
|
||||
kaddr = kmap_atomic(header_bh->b_page);
|
||||
kaddr = kmap_local_page(header_bh->b_page);
|
||||
header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
|
||||
list = &header->ch_snapshot_list;
|
||||
curr_bh = header_bh;
|
||||
@ -679,13 +792,13 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
|
||||
prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev);
|
||||
curr = prev;
|
||||
if (curr_blkoff != prev_blkoff) {
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
brelse(curr_bh);
|
||||
ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr,
|
||||
0, &curr_bh);
|
||||
if (ret < 0)
|
||||
goto out_header;
|
||||
kaddr = kmap_atomic(curr_bh->b_page);
|
||||
kaddr = kmap_local_page(curr_bh->b_page);
|
||||
}
|
||||
curr_blkoff = prev_blkoff;
|
||||
cp = nilfs_cpfile_block_get_checkpoint(
|
||||
@ -693,7 +806,7 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
|
||||
list = &cp->cp_snapshot_list;
|
||||
prev = le64_to_cpu(list->ssl_prev);
|
||||
}
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
if (prev != 0) {
|
||||
ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
|
||||
@ -705,29 +818,29 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
|
||||
get_bh(prev_bh);
|
||||
}
|
||||
|
||||
kaddr = kmap_atomic(curr_bh->b_page);
|
||||
kaddr = kmap_local_page(curr_bh->b_page);
|
||||
list = nilfs_cpfile_block_get_snapshot_list(
|
||||
cpfile, curr, curr_bh, kaddr);
|
||||
list->ssl_prev = cpu_to_le64(cno);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
kaddr = kmap_atomic(cp_bh->b_page);
|
||||
kaddr = kmap_local_page(cp_bh->b_page);
|
||||
cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
|
||||
cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr);
|
||||
cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev);
|
||||
nilfs_checkpoint_set_snapshot(cp);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
kaddr = kmap_atomic(prev_bh->b_page);
|
||||
kaddr = kmap_local_page(prev_bh->b_page);
|
||||
list = nilfs_cpfile_block_get_snapshot_list(
|
||||
cpfile, prev, prev_bh, kaddr);
|
||||
list->ssl_next = cpu_to_le64(cno);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
kaddr = kmap_atomic(header_bh->b_page);
|
||||
kaddr = kmap_local_page(header_bh->b_page);
|
||||
header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
|
||||
le64_add_cpu(&header->ch_nsnapshots, 1);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
mark_buffer_dirty(prev_bh);
|
||||
mark_buffer_dirty(curr_bh);
|
||||
@ -768,23 +881,23 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
|
||||
ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
|
||||
if (ret < 0)
|
||||
goto out_sem;
|
||||
kaddr = kmap_atomic(cp_bh->b_page);
|
||||
kaddr = kmap_local_page(cp_bh->b_page);
|
||||
cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
|
||||
if (nilfs_checkpoint_invalid(cp)) {
|
||||
ret = -ENOENT;
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
goto out_cp;
|
||||
}
|
||||
if (!nilfs_checkpoint_snapshot(cp)) {
|
||||
ret = 0;
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
goto out_cp;
|
||||
}
|
||||
|
||||
list = &cp->cp_snapshot_list;
|
||||
next = le64_to_cpu(list->ssl_next);
|
||||
prev = le64_to_cpu(list->ssl_prev);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
|
||||
if (ret < 0)
|
||||
@ -808,29 +921,29 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
|
||||
get_bh(prev_bh);
|
||||
}
|
||||
|
||||
kaddr = kmap_atomic(next_bh->b_page);
|
||||
kaddr = kmap_local_page(next_bh->b_page);
|
||||
list = nilfs_cpfile_block_get_snapshot_list(
|
||||
cpfile, next, next_bh, kaddr);
|
||||
list->ssl_prev = cpu_to_le64(prev);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
kaddr = kmap_atomic(prev_bh->b_page);
|
||||
kaddr = kmap_local_page(prev_bh->b_page);
|
||||
list = nilfs_cpfile_block_get_snapshot_list(
|
||||
cpfile, prev, prev_bh, kaddr);
|
||||
list->ssl_next = cpu_to_le64(next);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
kaddr = kmap_atomic(cp_bh->b_page);
|
||||
kaddr = kmap_local_page(cp_bh->b_page);
|
||||
cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
|
||||
cp->cp_snapshot_list.ssl_next = cpu_to_le64(0);
|
||||
cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0);
|
||||
nilfs_checkpoint_clear_snapshot(cp);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
kaddr = kmap_atomic(header_bh->b_page);
|
||||
kaddr = kmap_local_page(header_bh->b_page);
|
||||
header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
|
||||
le64_add_cpu(&header->ch_nsnapshots, -1);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
mark_buffer_dirty(next_bh);
|
||||
mark_buffer_dirty(prev_bh);
|
||||
@ -889,13 +1002,13 @@ int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
|
||||
ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
kaddr = kmap_atomic(bh->b_page);
|
||||
kaddr = kmap_local_page(bh->b_page);
|
||||
cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
|
||||
if (nilfs_checkpoint_invalid(cp))
|
||||
ret = -ENOENT;
|
||||
else
|
||||
ret = nilfs_checkpoint_snapshot(cp);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
brelse(bh);
|
||||
|
||||
out:
|
||||
@ -972,12 +1085,12 @@ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
|
||||
ret = nilfs_cpfile_get_header_block(cpfile, &bh);
|
||||
if (ret < 0)
|
||||
goto out_sem;
|
||||
kaddr = kmap_atomic(bh->b_page);
|
||||
kaddr = kmap_local_page(bh->b_page);
|
||||
header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
|
||||
cpstat->cs_cno = nilfs_mdt_cno(cpfile);
|
||||
cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints);
|
||||
cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
brelse(bh);
|
||||
|
||||
out_sem:
|
||||
|
@ -16,10 +16,12 @@
|
||||
#include <linux/nilfs2_ondisk.h> /* nilfs_inode, nilfs_checkpoint */
|
||||
|
||||
|
||||
int nilfs_cpfile_get_checkpoint(struct inode *, __u64, int,
|
||||
struct nilfs_checkpoint **,
|
||||
struct buffer_head **);
|
||||
void nilfs_cpfile_put_checkpoint(struct inode *, __u64, struct buffer_head *);
|
||||
int nilfs_cpfile_read_checkpoint(struct inode *cpfile, __u64 cno,
|
||||
struct nilfs_root *root, struct inode *ifile);
|
||||
int nilfs_cpfile_create_checkpoint(struct inode *cpfile, __u64 cno);
|
||||
int nilfs_cpfile_finalize_checkpoint(struct inode *cpfile, __u64 cno,
|
||||
struct nilfs_root *root, __u64 blkinc,
|
||||
time64_t ctime, bool minor);
|
||||
int nilfs_cpfile_delete_checkpoints(struct inode *, __u64, __u64);
|
||||
int nilfs_cpfile_delete_checkpoint(struct inode *, __u64);
|
||||
int nilfs_cpfile_change_cpmode(struct inode *, __u64, int);
|
||||
|
@ -91,13 +91,13 @@ void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
|
||||
struct nilfs_dat_entry *entry;
|
||||
void *kaddr;
|
||||
|
||||
kaddr = kmap_atomic(req->pr_entry_bh->b_page);
|
||||
kaddr = kmap_local_page(req->pr_entry_bh->b_page);
|
||||
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
|
||||
req->pr_entry_bh, kaddr);
|
||||
entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
|
||||
entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
|
||||
entry->de_blocknr = cpu_to_le64(0);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
nilfs_palloc_commit_alloc_entry(dat, req);
|
||||
nilfs_dat_commit_entry(dat, req);
|
||||
@ -115,13 +115,13 @@ static void nilfs_dat_commit_free(struct inode *dat,
|
||||
struct nilfs_dat_entry *entry;
|
||||
void *kaddr;
|
||||
|
||||
kaddr = kmap_atomic(req->pr_entry_bh->b_page);
|
||||
kaddr = kmap_local_page(req->pr_entry_bh->b_page);
|
||||
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
|
||||
req->pr_entry_bh, kaddr);
|
||||
entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
|
||||
entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
|
||||
entry->de_blocknr = cpu_to_le64(0);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
nilfs_dat_commit_entry(dat, req);
|
||||
|
||||
@ -145,12 +145,12 @@ void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
|
||||
struct nilfs_dat_entry *entry;
|
||||
void *kaddr;
|
||||
|
||||
kaddr = kmap_atomic(req->pr_entry_bh->b_page);
|
||||
kaddr = kmap_local_page(req->pr_entry_bh->b_page);
|
||||
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
|
||||
req->pr_entry_bh, kaddr);
|
||||
entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
|
||||
entry->de_blocknr = cpu_to_le64(blocknr);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
nilfs_dat_commit_entry(dat, req);
|
||||
}
|
||||
@ -167,12 +167,12 @@ int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
kaddr = kmap_atomic(req->pr_entry_bh->b_page);
|
||||
kaddr = kmap_local_page(req->pr_entry_bh->b_page);
|
||||
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
|
||||
req->pr_entry_bh, kaddr);
|
||||
start = le64_to_cpu(entry->de_start);
|
||||
blocknr = le64_to_cpu(entry->de_blocknr);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
if (blocknr == 0) {
|
||||
ret = nilfs_palloc_prepare_free_entry(dat, req);
|
||||
@ -202,7 +202,7 @@ void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
|
||||
sector_t blocknr;
|
||||
void *kaddr;
|
||||
|
||||
kaddr = kmap_atomic(req->pr_entry_bh->b_page);
|
||||
kaddr = kmap_local_page(req->pr_entry_bh->b_page);
|
||||
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
|
||||
req->pr_entry_bh, kaddr);
|
||||
end = start = le64_to_cpu(entry->de_start);
|
||||
@ -212,7 +212,7 @@ void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
|
||||
}
|
||||
entry->de_end = cpu_to_le64(end);
|
||||
blocknr = le64_to_cpu(entry->de_blocknr);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
if (blocknr == 0)
|
||||
nilfs_dat_commit_free(dat, req);
|
||||
@ -227,12 +227,12 @@ void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
|
||||
sector_t blocknr;
|
||||
void *kaddr;
|
||||
|
||||
kaddr = kmap_atomic(req->pr_entry_bh->b_page);
|
||||
kaddr = kmap_local_page(req->pr_entry_bh->b_page);
|
||||
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
|
||||
req->pr_entry_bh, kaddr);
|
||||
start = le64_to_cpu(entry->de_start);
|
||||
blocknr = le64_to_cpu(entry->de_blocknr);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
if (start == nilfs_mdt_cno(dat) && blocknr == 0)
|
||||
nilfs_palloc_abort_free_entry(dat, req);
|
||||
@ -362,7 +362,7 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
|
||||
}
|
||||
}
|
||||
|
||||
kaddr = kmap_atomic(entry_bh->b_page);
|
||||
kaddr = kmap_local_page(entry_bh->b_page);
|
||||
entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
|
||||
if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
|
||||
nilfs_crit(dat->i_sb,
|
||||
@ -370,13 +370,13 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
|
||||
__func__, (unsigned long long)vblocknr,
|
||||
(unsigned long long)le64_to_cpu(entry->de_start),
|
||||
(unsigned long long)le64_to_cpu(entry->de_end));
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
brelse(entry_bh);
|
||||
return -EINVAL;
|
||||
}
|
||||
WARN_ON(blocknr == 0);
|
||||
entry->de_blocknr = cpu_to_le64(blocknr);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
mark_buffer_dirty(entry_bh);
|
||||
nilfs_mdt_mark_dirty(dat);
|
||||
@ -426,7 +426,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
|
||||
}
|
||||
}
|
||||
|
||||
kaddr = kmap_atomic(entry_bh->b_page);
|
||||
kaddr = kmap_local_page(entry_bh->b_page);
|
||||
entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
|
||||
blocknr = le64_to_cpu(entry->de_blocknr);
|
||||
if (blocknr == 0) {
|
||||
@ -436,7 +436,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
|
||||
*blocknrp = blocknr;
|
||||
|
||||
out:
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
brelse(entry_bh);
|
||||
return ret;
|
||||
}
|
||||
@ -457,10 +457,10 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned int visz,
|
||||
0, &entry_bh);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
kaddr = kmap_atomic(entry_bh->b_page);
|
||||
kaddr = kmap_local_page(entry_bh->b_page);
|
||||
/* last virtual block number in this block */
|
||||
first = vinfo->vi_vblocknr;
|
||||
do_div(first, entries_per_block);
|
||||
first = div64_ul(first, entries_per_block);
|
||||
first *= entries_per_block;
|
||||
last = first + entries_per_block - 1;
|
||||
for (j = i, n = 0;
|
||||
@ -473,7 +473,7 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned int visz,
|
||||
vinfo->vi_end = le64_to_cpu(entry->de_end);
|
||||
vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
|
||||
}
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
brelse(entry_bh);
|
||||
}
|
||||
|
||||
|
@ -66,7 +66,7 @@ static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
|
||||
dat = nilfs_bmap_get_dat(direct);
|
||||
ret = nilfs_dat_translate(dat, ptr, &blocknr);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto dat_error;
|
||||
ptr = blocknr;
|
||||
}
|
||||
|
||||
@ -79,7 +79,7 @@ static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
|
||||
if (dat) {
|
||||
ret = nilfs_dat_translate(dat, ptr2, &blocknr);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto dat_error;
|
||||
ptr2 = blocknr;
|
||||
}
|
||||
if (ptr2 != ptr + cnt)
|
||||
@ -87,6 +87,11 @@ static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
|
||||
}
|
||||
*ptrp = ptr;
|
||||
return cnt;
|
||||
|
||||
dat_error:
|
||||
if (ret == -ENOENT)
|
||||
ret = -EINVAL; /* Notify bmap layer of metadata corruption */
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __u64
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "mdt.h"
|
||||
#include "alloc.h"
|
||||
#include "ifile.h"
|
||||
#include "cpfile.h"
|
||||
|
||||
/**
|
||||
* struct nilfs_ifile_info - on-memory private data of ifile
|
||||
@ -115,11 +116,11 @@ int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino)
|
||||
return ret;
|
||||
}
|
||||
|
||||
kaddr = kmap_atomic(req.pr_entry_bh->b_page);
|
||||
kaddr = kmap_local_page(req.pr_entry_bh->b_page);
|
||||
raw_inode = nilfs_palloc_block_get_entry(ifile, req.pr_entry_nr,
|
||||
req.pr_entry_bh, kaddr);
|
||||
raw_inode->i_flags = 0;
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
mark_buffer_dirty(req.pr_entry_bh);
|
||||
brelse(req.pr_entry_bh);
|
||||
@ -173,14 +174,18 @@ int nilfs_ifile_count_free_inodes(struct inode *ifile,
|
||||
* nilfs_ifile_read - read or get ifile inode
|
||||
* @sb: super block instance
|
||||
* @root: root object
|
||||
* @cno: number of checkpoint entry to read
|
||||
* @inode_size: size of an inode
|
||||
* @raw_inode: on-disk ifile inode
|
||||
* @inodep: buffer to store the inode
|
||||
*
|
||||
* Return: 0 on success, or the following negative error code on failure.
|
||||
* * %-EINVAL - Invalid checkpoint.
|
||||
* * %-ENOMEM - Insufficient memory available.
|
||||
* * %-EIO - I/O error (including metadata corruption).
|
||||
*/
|
||||
int nilfs_ifile_read(struct super_block *sb, struct nilfs_root *root,
|
||||
size_t inode_size, struct nilfs_inode *raw_inode,
|
||||
struct inode **inodep)
|
||||
__u64 cno, size_t inode_size)
|
||||
{
|
||||
struct the_nilfs *nilfs;
|
||||
struct inode *ifile;
|
||||
int err;
|
||||
|
||||
@ -201,13 +206,13 @@ int nilfs_ifile_read(struct super_block *sb, struct nilfs_root *root,
|
||||
|
||||
nilfs_palloc_setup_cache(ifile, &NILFS_IFILE_I(ifile)->palloc_cache);
|
||||
|
||||
err = nilfs_read_inode_common(ifile, raw_inode);
|
||||
nilfs = sb->s_fs_info;
|
||||
err = nilfs_cpfile_read_checkpoint(nilfs->ns_cpfile, cno, root, ifile);
|
||||
if (err)
|
||||
goto failed;
|
||||
|
||||
unlock_new_inode(ifile);
|
||||
out:
|
||||
*inodep = ifile;
|
||||
return 0;
|
||||
failed:
|
||||
iget_failed(ifile);
|
||||
|
@ -21,15 +21,14 @@
|
||||
static inline struct nilfs_inode *
|
||||
nilfs_ifile_map_inode(struct inode *ifile, ino_t ino, struct buffer_head *ibh)
|
||||
{
|
||||
void *kaddr = kmap(ibh->b_page);
|
||||
void *kaddr = kmap_local_page(ibh->b_page);
|
||||
|
||||
return nilfs_palloc_block_get_entry(ifile, ino, ibh, kaddr);
|
||||
}
|
||||
|
||||
static inline void nilfs_ifile_unmap_inode(struct inode *ifile, ino_t ino,
|
||||
struct buffer_head *ibh)
|
||||
static inline void nilfs_ifile_unmap_inode(struct nilfs_inode *raw_inode)
|
||||
{
|
||||
kunmap(ibh->b_page);
|
||||
kunmap_local(raw_inode);
|
||||
}
|
||||
|
||||
int nilfs_ifile_create_inode(struct inode *, ino_t *, struct buffer_head **);
|
||||
@ -39,7 +38,6 @@ int nilfs_ifile_get_inode_block(struct inode *, ino_t, struct buffer_head **);
|
||||
int nilfs_ifile_count_free_inodes(struct inode *, u64 *, u64 *);
|
||||
|
||||
int nilfs_ifile_read(struct super_block *sb, struct nilfs_root *root,
|
||||
size_t inode_size, struct nilfs_inode *raw_inode,
|
||||
struct inode **inodep);
|
||||
__u64 cno, size_t inode_size);
|
||||
|
||||
#endif /* _NILFS_IFILE_H */
|
||||
|
@ -112,7 +112,7 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff,
|
||||
"%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
|
||||
__func__, inode->i_ino,
|
||||
(unsigned long long)blkoff);
|
||||
err = 0;
|
||||
err = -EAGAIN;
|
||||
}
|
||||
nilfs_transaction_abort(inode->i_sb);
|
||||
goto out;
|
||||
@ -520,7 +520,7 @@ static int __nilfs_read_inode(struct super_block *sb,
|
||||
inode, inode->i_mode,
|
||||
huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
|
||||
}
|
||||
nilfs_ifile_unmap_inode(root->ifile, ino, bh);
|
||||
nilfs_ifile_unmap_inode(raw_inode);
|
||||
brelse(bh);
|
||||
up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
|
||||
nilfs_set_inode_flags(inode);
|
||||
@ -529,7 +529,7 @@ static int __nilfs_read_inode(struct super_block *sb,
|
||||
return 0;
|
||||
|
||||
failed_unmap:
|
||||
nilfs_ifile_unmap_inode(root->ifile, ino, bh);
|
||||
nilfs_ifile_unmap_inode(raw_inode);
|
||||
brelse(bh);
|
||||
|
||||
bad_inode:
|
||||
@ -759,8 +759,18 @@ struct inode *nilfs_iget_for_shadow(struct inode *inode)
|
||||
return s_inode;
|
||||
}
|
||||
|
||||
/**
|
||||
* nilfs_write_inode_common - export common inode information to on-disk inode
|
||||
* @inode: inode object
|
||||
* @raw_inode: on-disk inode
|
||||
*
|
||||
* This function writes standard information from the on-memory inode @inode
|
||||
* to @raw_inode on ifile, cpfile or a super root block. Since inode bmap
|
||||
* data is not exported, nilfs_bmap_write() must be called separately during
|
||||
* log writing.
|
||||
*/
|
||||
void nilfs_write_inode_common(struct inode *inode,
|
||||
struct nilfs_inode *raw_inode, int has_bmap)
|
||||
struct nilfs_inode *raw_inode)
|
||||
{
|
||||
struct nilfs_inode_info *ii = NILFS_I(inode);
|
||||
|
||||
@ -778,21 +788,6 @@ void nilfs_write_inode_common(struct inode *inode,
|
||||
raw_inode->i_flags = cpu_to_le32(ii->i_flags);
|
||||
raw_inode->i_generation = cpu_to_le32(inode->i_generation);
|
||||
|
||||
if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
|
||||
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
|
||||
|
||||
/* zero-fill unused portion in the case of super root block */
|
||||
raw_inode->i_xattr = 0;
|
||||
raw_inode->i_pad = 0;
|
||||
memset((void *)raw_inode + sizeof(*raw_inode), 0,
|
||||
nilfs->ns_inode_size - sizeof(*raw_inode));
|
||||
}
|
||||
|
||||
if (has_bmap)
|
||||
nilfs_bmap_write(ii->i_bmap, raw_inode);
|
||||
else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
|
||||
raw_inode->i_device_code =
|
||||
cpu_to_le64(huge_encode_dev(inode->i_rdev));
|
||||
/*
|
||||
* When extending inode, nilfs->ns_inode_size should be checked
|
||||
* for substitutions of appended fields.
|
||||
@ -813,14 +808,13 @@ void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
|
||||
if (flags & I_DIRTY_DATASYNC)
|
||||
set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
|
||||
|
||||
nilfs_write_inode_common(inode, raw_inode, 0);
|
||||
/*
|
||||
* XXX: call with has_bmap = 0 is a workaround to avoid
|
||||
* deadlock of bmap. This delays update of i_bmap to just
|
||||
* before writing.
|
||||
*/
|
||||
nilfs_write_inode_common(inode, raw_inode);
|
||||
|
||||
nilfs_ifile_unmap_inode(ifile, ino, ibh);
|
||||
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
|
||||
raw_inode->i_device_code =
|
||||
cpu_to_le64(huge_encode_dev(inode->i_rdev));
|
||||
|
||||
nilfs_ifile_unmap_inode(raw_inode);
|
||||
}
|
||||
|
||||
#define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */
|
||||
|
@ -1111,7 +1111,7 @@ static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp)
|
||||
segbytes = nilfs->ns_blocks_per_segment * nilfs->ns_blocksize;
|
||||
|
||||
minseg = range[0] + segbytes - 1;
|
||||
do_div(minseg, segbytes);
|
||||
minseg = div64_ul(minseg, segbytes);
|
||||
|
||||
if (range[1] < 4096)
|
||||
goto out;
|
||||
@ -1120,7 +1120,7 @@ static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp)
|
||||
if (maxseg < segbytes)
|
||||
goto out;
|
||||
|
||||
do_div(maxseg, segbytes);
|
||||
maxseg = div64_ul(maxseg, segbytes);
|
||||
maxseg--;
|
||||
|
||||
ret = nilfs_sufile_set_alloc_range(nilfs->ns_sufile, minseg, maxseg);
|
||||
|
@ -47,12 +47,12 @@ nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
|
||||
|
||||
set_buffer_mapped(bh);
|
||||
|
||||
kaddr = kmap_atomic(bh->b_page);
|
||||
kaddr = kmap_local_page(bh->b_page);
|
||||
memset(kaddr + bh_offset(bh), 0, i_blocksize(inode));
|
||||
if (init_block)
|
||||
init_block(inode, bh, kaddr);
|
||||
flush_dcache_page(bh->b_page);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
set_buffer_uptodate(bh);
|
||||
mark_buffer_dirty(bh);
|
||||
|
@ -256,7 +256,8 @@ extern struct inode *nilfs_new_inode(struct inode *, umode_t);
|
||||
extern int nilfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
|
||||
extern void nilfs_set_inode_flags(struct inode *);
|
||||
extern int nilfs_read_inode_common(struct inode *, struct nilfs_inode *);
|
||||
extern void nilfs_write_inode_common(struct inode *, struct nilfs_inode *, int);
|
||||
void nilfs_write_inode_common(struct inode *inode,
|
||||
struct nilfs_inode *raw_inode);
|
||||
struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
|
||||
unsigned long ino);
|
||||
struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
|
||||
|
@ -103,11 +103,11 @@ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
|
||||
struct page *spage = sbh->b_page, *dpage = dbh->b_page;
|
||||
struct buffer_head *bh;
|
||||
|
||||
kaddr0 = kmap_atomic(spage);
|
||||
kaddr1 = kmap_atomic(dpage);
|
||||
kaddr0 = kmap_local_page(spage);
|
||||
kaddr1 = kmap_local_page(dpage);
|
||||
memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size);
|
||||
kunmap_atomic(kaddr1);
|
||||
kunmap_atomic(kaddr0);
|
||||
kunmap_local(kaddr1);
|
||||
kunmap_local(kaddr0);
|
||||
|
||||
dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS;
|
||||
dbh->b_blocknr = sbh->b_blocknr;
|
||||
|
@ -482,9 +482,9 @@ static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
|
||||
if (unlikely(!bh_org))
|
||||
return -EIO;
|
||||
|
||||
kaddr = kmap_atomic(page);
|
||||
kaddr = kmap_local_page(page);
|
||||
memcpy(kaddr + from, bh_org->b_data, bh_org->b_size);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
brelse(bh_org);
|
||||
return 0;
|
||||
}
|
||||
|
@ -220,9 +220,9 @@ static void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
|
||||
crc = crc32_le(crc, bh->b_data, bh->b_size);
|
||||
}
|
||||
list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
|
||||
kaddr = kmap_atomic(bh->b_page);
|
||||
kaddr = kmap_local_page(bh->b_page);
|
||||
crc = crc32_le(crc, kaddr + bh_offset(bh), bh->b_size);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
}
|
||||
raw_sum->ss_datasum = cpu_to_le32(crc);
|
||||
}
|
||||
|
@ -880,76 +880,6 @@ static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
|
||||
nilfs_mdt_clear_dirty(nilfs->ns_dat);
|
||||
}
|
||||
|
||||
static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
|
||||
{
|
||||
struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
|
||||
struct buffer_head *bh_cp;
|
||||
struct nilfs_checkpoint *raw_cp;
|
||||
int err;
|
||||
|
||||
/* XXX: this interface will be changed */
|
||||
err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
|
||||
&raw_cp, &bh_cp);
|
||||
if (likely(!err)) {
|
||||
/*
|
||||
* The following code is duplicated with cpfile. But, it is
|
||||
* needed to collect the checkpoint even if it was not newly
|
||||
* created.
|
||||
*/
|
||||
mark_buffer_dirty(bh_cp);
|
||||
nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
|
||||
nilfs_cpfile_put_checkpoint(
|
||||
nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
|
||||
} else if (err == -EINVAL || err == -ENOENT) {
|
||||
nilfs_error(sci->sc_super,
|
||||
"checkpoint creation failed due to metadata corruption.");
|
||||
err = -EIO;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
|
||||
{
|
||||
struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
|
||||
struct buffer_head *bh_cp;
|
||||
struct nilfs_checkpoint *raw_cp;
|
||||
int err;
|
||||
|
||||
err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
|
||||
&raw_cp, &bh_cp);
|
||||
if (unlikely(err)) {
|
||||
if (err == -EINVAL || err == -ENOENT) {
|
||||
nilfs_error(sci->sc_super,
|
||||
"checkpoint finalization failed due to metadata corruption.");
|
||||
err = -EIO;
|
||||
}
|
||||
goto failed_ibh;
|
||||
}
|
||||
raw_cp->cp_snapshot_list.ssl_next = 0;
|
||||
raw_cp->cp_snapshot_list.ssl_prev = 0;
|
||||
raw_cp->cp_inodes_count =
|
||||
cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count));
|
||||
raw_cp->cp_blocks_count =
|
||||
cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count));
|
||||
raw_cp->cp_nblk_inc =
|
||||
cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
|
||||
raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
|
||||
raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
|
||||
|
||||
if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
|
||||
nilfs_checkpoint_clear_minor(raw_cp);
|
||||
else
|
||||
nilfs_checkpoint_set_minor(raw_cp);
|
||||
|
||||
nilfs_write_inode_common(sci->sc_root->ifile,
|
||||
&raw_cp->cp_ifile_inode, 1);
|
||||
nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
|
||||
return 0;
|
||||
|
||||
failed_ibh:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void nilfs_fill_in_file_bmap(struct inode *ifile,
|
||||
struct nilfs_inode_info *ii)
|
||||
|
||||
@ -963,7 +893,7 @@ static void nilfs_fill_in_file_bmap(struct inode *ifile,
|
||||
raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
|
||||
ibh);
|
||||
nilfs_bmap_write(ii->i_bmap, raw_inode);
|
||||
nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
|
||||
nilfs_ifile_unmap_inode(raw_inode);
|
||||
}
|
||||
}
|
||||
|
||||
@ -977,6 +907,33 @@ static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* nilfs_write_root_mdt_inode - export root metadata inode information to
|
||||
* the on-disk inode
|
||||
* @inode: inode object of the root metadata file
|
||||
* @raw_inode: on-disk inode
|
||||
*
|
||||
* nilfs_write_root_mdt_inode() writes inode information and bmap data of
|
||||
* @inode to the inode area of the metadata file allocated on the super root
|
||||
* block created to finalize the log. Since super root blocks are configured
|
||||
* each time, this function zero-fills the unused area of @raw_inode.
|
||||
*/
|
||||
static void nilfs_write_root_mdt_inode(struct inode *inode,
|
||||
struct nilfs_inode *raw_inode)
|
||||
{
|
||||
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
|
||||
|
||||
nilfs_write_inode_common(inode, raw_inode);
|
||||
|
||||
/* zero-fill unused portion of raw_inode */
|
||||
raw_inode->i_xattr = 0;
|
||||
raw_inode->i_pad = 0;
|
||||
memset((void *)raw_inode + sizeof(*raw_inode), 0,
|
||||
nilfs->ns_inode_size - sizeof(*raw_inode));
|
||||
|
||||
nilfs_bmap_write(NILFS_I(inode)->i_bmap, raw_inode);
|
||||
}
|
||||
|
||||
static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
|
||||
struct the_nilfs *nilfs)
|
||||
{
|
||||
@ -998,12 +955,13 @@ static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
|
||||
nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
|
||||
raw_sr->sr_flags = 0;
|
||||
|
||||
nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr +
|
||||
NILFS_SR_DAT_OFFSET(isz), 1);
|
||||
nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
|
||||
NILFS_SR_CPFILE_OFFSET(isz), 1);
|
||||
nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
|
||||
NILFS_SR_SUFILE_OFFSET(isz), 1);
|
||||
nilfs_write_root_mdt_inode(nilfs->ns_dat, (void *)raw_sr +
|
||||
NILFS_SR_DAT_OFFSET(isz));
|
||||
nilfs_write_root_mdt_inode(nilfs->ns_cpfile, (void *)raw_sr +
|
||||
NILFS_SR_CPFILE_OFFSET(isz));
|
||||
nilfs_write_root_mdt_inode(nilfs->ns_sufile, (void *)raw_sr +
|
||||
NILFS_SR_SUFILE_OFFSET(isz));
|
||||
|
||||
memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
|
||||
set_buffer_uptodate(bh_sr);
|
||||
unlock_buffer(bh_sr);
|
||||
@ -1230,7 +1188,8 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
|
||||
break;
|
||||
nilfs_sc_cstage_inc(sci);
|
||||
/* Creating a checkpoint */
|
||||
err = nilfs_segctor_create_checkpoint(sci);
|
||||
err = nilfs_cpfile_create_checkpoint(nilfs->ns_cpfile,
|
||||
nilfs->ns_cno);
|
||||
if (unlikely(err))
|
||||
break;
|
||||
fallthrough;
|
||||
@ -2101,7 +2060,11 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
|
||||
|
||||
if (mode == SC_LSEG_SR &&
|
||||
nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) {
|
||||
err = nilfs_segctor_fill_in_checkpoint(sci);
|
||||
err = nilfs_cpfile_finalize_checkpoint(
|
||||
nilfs->ns_cpfile, nilfs->ns_cno, sci->sc_root,
|
||||
sci->sc_nblk_inc + sci->sc_nblk_this_inc,
|
||||
sci->sc_seg_ctime,
|
||||
!test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags));
|
||||
if (unlikely(err))
|
||||
goto failed_to_write;
|
||||
|
||||
|
@ -48,7 +48,7 @@ nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
|
||||
{
|
||||
__u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
|
||||
|
||||
do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
|
||||
t = div64_ul(t, nilfs_sufile_segment_usages_per_block(sufile));
|
||||
return (unsigned long)t;
|
||||
}
|
||||
|
||||
@ -107,11 +107,11 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
|
||||
struct nilfs_sufile_header *header;
|
||||
void *kaddr;
|
||||
|
||||
kaddr = kmap_atomic(header_bh->b_page);
|
||||
kaddr = kmap_local_page(header_bh->b_page);
|
||||
header = kaddr + bh_offset(header_bh);
|
||||
le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
|
||||
le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
mark_buffer_dirty(header_bh);
|
||||
}
|
||||
@ -315,10 +315,10 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
|
||||
ret = nilfs_sufile_get_header_block(sufile, &header_bh);
|
||||
if (ret < 0)
|
||||
goto out_sem;
|
||||
kaddr = kmap_atomic(header_bh->b_page);
|
||||
kaddr = kmap_local_page(header_bh->b_page);
|
||||
header = kaddr + bh_offset(header_bh);
|
||||
last_alloc = le64_to_cpu(header->sh_last_alloc);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
nsegments = nilfs_sufile_get_nsegments(sufile);
|
||||
maxsegnum = sui->allocmax;
|
||||
@ -352,7 +352,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
|
||||
&su_bh);
|
||||
if (ret < 0)
|
||||
goto out_header;
|
||||
kaddr = kmap_atomic(su_bh->b_page);
|
||||
kaddr = kmap_local_page(su_bh->b_page);
|
||||
su = nilfs_sufile_block_get_segment_usage(
|
||||
sufile, segnum, su_bh, kaddr);
|
||||
|
||||
@ -363,14 +363,14 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
|
||||
continue;
|
||||
/* found a clean segment */
|
||||
nilfs_segment_usage_set_dirty(su);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
kaddr = kmap_atomic(header_bh->b_page);
|
||||
kaddr = kmap_local_page(header_bh->b_page);
|
||||
header = kaddr + bh_offset(header_bh);
|
||||
le64_add_cpu(&header->sh_ncleansegs, -1);
|
||||
le64_add_cpu(&header->sh_ndirtysegs, 1);
|
||||
header->sh_last_alloc = cpu_to_le64(segnum);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
sui->ncleansegs--;
|
||||
mark_buffer_dirty(header_bh);
|
||||
@ -384,7 +384,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
|
||||
goto out_header;
|
||||
}
|
||||
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
brelse(su_bh);
|
||||
}
|
||||
|
||||
@ -406,16 +406,16 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
|
||||
struct nilfs_segment_usage *su;
|
||||
void *kaddr;
|
||||
|
||||
kaddr = kmap_atomic(su_bh->b_page);
|
||||
kaddr = kmap_local_page(su_bh->b_page);
|
||||
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
|
||||
if (unlikely(!nilfs_segment_usage_clean(su))) {
|
||||
nilfs_warn(sufile->i_sb, "%s: segment %llu must be clean",
|
||||
__func__, (unsigned long long)segnum);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
return;
|
||||
}
|
||||
nilfs_segment_usage_set_dirty(su);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
nilfs_sufile_mod_counter(header_bh, -1, 1);
|
||||
NILFS_SUI(sufile)->ncleansegs--;
|
||||
@ -432,11 +432,11 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
|
||||
void *kaddr;
|
||||
int clean, dirty;
|
||||
|
||||
kaddr = kmap_atomic(su_bh->b_page);
|
||||
kaddr = kmap_local_page(su_bh->b_page);
|
||||
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
|
||||
if (su->su_flags == cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY)) &&
|
||||
su->su_nblocks == cpu_to_le32(0)) {
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
return;
|
||||
}
|
||||
clean = nilfs_segment_usage_clean(su);
|
||||
@ -446,7 +446,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
|
||||
su->su_lastmod = cpu_to_le64(0);
|
||||
su->su_nblocks = cpu_to_le32(0);
|
||||
su->su_flags = cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY));
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
|
||||
NILFS_SUI(sufile)->ncleansegs -= clean;
|
||||
@ -463,12 +463,12 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
|
||||
void *kaddr;
|
||||
int sudirty;
|
||||
|
||||
kaddr = kmap_atomic(su_bh->b_page);
|
||||
kaddr = kmap_local_page(su_bh->b_page);
|
||||
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
|
||||
if (nilfs_segment_usage_clean(su)) {
|
||||
nilfs_warn(sufile->i_sb, "%s: segment %llu is already clean",
|
||||
__func__, (unsigned long long)segnum);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
return;
|
||||
}
|
||||
if (unlikely(nilfs_segment_usage_error(su)))
|
||||
@ -481,7 +481,7 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
|
||||
(unsigned long long)segnum);
|
||||
|
||||
nilfs_segment_usage_set_clean(su);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
mark_buffer_dirty(su_bh);
|
||||
|
||||
nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
|
||||
@ -509,12 +509,12 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
|
||||
if (ret)
|
||||
goto out_sem;
|
||||
|
||||
kaddr = kmap_atomic(bh->b_page);
|
||||
kaddr = kmap_local_page(bh->b_page);
|
||||
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
|
||||
if (unlikely(nilfs_segment_usage_error(su))) {
|
||||
struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
|
||||
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
brelse(bh);
|
||||
if (nilfs_segment_is_active(nilfs, segnum)) {
|
||||
nilfs_error(sufile->i_sb,
|
||||
@ -532,7 +532,7 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
|
||||
ret = -EIO;
|
||||
} else {
|
||||
nilfs_segment_usage_set_dirty(su);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
mark_buffer_dirty(bh);
|
||||
nilfs_mdt_mark_dirty(sufile);
|
||||
brelse(bh);
|
||||
@ -562,7 +562,7 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
|
||||
if (ret < 0)
|
||||
goto out_sem;
|
||||
|
||||
kaddr = kmap_atomic(bh->b_page);
|
||||
kaddr = kmap_local_page(bh->b_page);
|
||||
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
|
||||
if (modtime) {
|
||||
/*
|
||||
@ -573,7 +573,7 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
|
||||
su->su_lastmod = cpu_to_le64(modtime);
|
||||
}
|
||||
su->su_nblocks = cpu_to_le32(nblocks);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
mark_buffer_dirty(bh);
|
||||
nilfs_mdt_mark_dirty(sufile);
|
||||
@ -614,7 +614,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
|
||||
if (ret < 0)
|
||||
goto out_sem;
|
||||
|
||||
kaddr = kmap_atomic(header_bh->b_page);
|
||||
kaddr = kmap_local_page(header_bh->b_page);
|
||||
header = kaddr + bh_offset(header_bh);
|
||||
sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
|
||||
sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
|
||||
@ -624,7 +624,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
|
||||
spin_lock(&nilfs->ns_last_segment_lock);
|
||||
sustat->ss_prot_seq = nilfs->ns_prot_seq;
|
||||
spin_unlock(&nilfs->ns_last_segment_lock);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
brelse(header_bh);
|
||||
|
||||
out_sem:
|
||||
@ -640,15 +640,15 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
|
||||
void *kaddr;
|
||||
int suclean;
|
||||
|
||||
kaddr = kmap_atomic(su_bh->b_page);
|
||||
kaddr = kmap_local_page(su_bh->b_page);
|
||||
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
|
||||
if (nilfs_segment_usage_error(su)) {
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
return;
|
||||
}
|
||||
suclean = nilfs_segment_usage_clean(su);
|
||||
nilfs_segment_usage_set_error(su);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
if (suclean) {
|
||||
nilfs_sufile_mod_counter(header_bh, -1, 0);
|
||||
@ -717,7 +717,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
|
||||
/* hole */
|
||||
continue;
|
||||
}
|
||||
kaddr = kmap_atomic(su_bh->b_page);
|
||||
kaddr = kmap_local_page(su_bh->b_page);
|
||||
su = nilfs_sufile_block_get_segment_usage(
|
||||
sufile, segnum, su_bh, kaddr);
|
||||
su2 = su;
|
||||
@ -726,7 +726,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
|
||||
~BIT(NILFS_SEGMENT_USAGE_ERROR)) ||
|
||||
nilfs_segment_is_active(nilfs, segnum + j)) {
|
||||
ret = -EBUSY;
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
brelse(su_bh);
|
||||
goto out_header;
|
||||
}
|
||||
@ -738,7 +738,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
|
||||
nc++;
|
||||
}
|
||||
}
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
if (nc > 0) {
|
||||
mark_buffer_dirty(su_bh);
|
||||
ncleaned += nc;
|
||||
@ -823,10 +823,10 @@ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
|
||||
sui->allocmin = 0;
|
||||
}
|
||||
|
||||
kaddr = kmap_atomic(header_bh->b_page);
|
||||
kaddr = kmap_local_page(header_bh->b_page);
|
||||
header = kaddr + bh_offset(header_bh);
|
||||
header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
mark_buffer_dirty(header_bh);
|
||||
nilfs_mdt_mark_dirty(sufile);
|
||||
@ -891,7 +891,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
|
||||
continue;
|
||||
}
|
||||
|
||||
kaddr = kmap_atomic(su_bh->b_page);
|
||||
kaddr = kmap_local_page(su_bh->b_page);
|
||||
su = nilfs_sufile_block_get_segment_usage(
|
||||
sufile, segnum, su_bh, kaddr);
|
||||
for (j = 0; j < n;
|
||||
@ -904,7 +904,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
|
||||
si->sui_flags |=
|
||||
BIT(NILFS_SEGMENT_USAGE_ACTIVE);
|
||||
}
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
brelse(su_bh);
|
||||
}
|
||||
ret = nsegs;
|
||||
@ -973,7 +973,7 @@ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
|
||||
goto out_header;
|
||||
|
||||
for (;;) {
|
||||
kaddr = kmap_atomic(bh->b_page);
|
||||
kaddr = kmap_local_page(bh->b_page);
|
||||
su = nilfs_sufile_block_get_segment_usage(
|
||||
sufile, sup->sup_segnum, bh, kaddr);
|
||||
|
||||
@ -1010,7 +1010,7 @@ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
|
||||
su->su_flags = cpu_to_le32(sup->sup_sui.sui_flags);
|
||||
}
|
||||
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
sup = (void *)sup + supsz;
|
||||
if (sup >= supend)
|
||||
@ -1115,7 +1115,7 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
|
||||
continue;
|
||||
}
|
||||
|
||||
kaddr = kmap_atomic(su_bh->b_page);
|
||||
kaddr = kmap_local_page(su_bh->b_page);
|
||||
su = nilfs_sufile_block_get_segment_usage(sufile, segnum,
|
||||
su_bh, kaddr);
|
||||
for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) {
|
||||
@ -1145,7 +1145,7 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
|
||||
}
|
||||
|
||||
if (nblocks >= minlen) {
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
|
||||
ret = blkdev_issue_discard(nilfs->ns_bdev,
|
||||
start * sects_per_block,
|
||||
@ -1157,7 +1157,7 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
|
||||
}
|
||||
|
||||
ndiscarded += nblocks;
|
||||
kaddr = kmap_atomic(su_bh->b_page);
|
||||
kaddr = kmap_local_page(su_bh->b_page);
|
||||
su = nilfs_sufile_block_get_segment_usage(
|
||||
sufile, segnum, su_bh, kaddr);
|
||||
}
|
||||
@ -1166,7 +1166,7 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
|
||||
start = seg_start;
|
||||
nblocks = seg_end - seg_start + 1;
|
||||
}
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
put_bh(su_bh);
|
||||
}
|
||||
|
||||
@ -1246,10 +1246,10 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize,
|
||||
goto failed;
|
||||
|
||||
sui = NILFS_SUI(sufile);
|
||||
kaddr = kmap_atomic(header_bh->b_page);
|
||||
kaddr = kmap_local_page(header_bh->b_page);
|
||||
header = kaddr + bh_offset(header_bh);
|
||||
sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
brelse(header_bh);
|
||||
|
||||
sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
|
||||
|
@ -448,7 +448,7 @@ int nilfs_resize_fs(struct super_block *sb, __u64 newsize)
|
||||
|
||||
sb2off = NILFS_SB2_OFFSET_BYTES(newsize);
|
||||
newnsegs = sb2off >> nilfs->ns_blocksize_bits;
|
||||
do_div(newnsegs, nilfs->ns_blocks_per_segment);
|
||||
newnsegs = div64_ul(newnsegs, nilfs->ns_blocks_per_segment);
|
||||
|
||||
ret = nilfs_sufile_resize(nilfs->ns_sufile, newnsegs);
|
||||
up_write(&nilfs->ns_segctor_sem);
|
||||
@ -544,8 +544,6 @@ int nilfs_attach_checkpoint(struct super_block *sb, __u64 cno, int curr_mnt,
|
||||
{
|
||||
struct the_nilfs *nilfs = sb->s_fs_info;
|
||||
struct nilfs_root *root;
|
||||
struct nilfs_checkpoint *raw_cp;
|
||||
struct buffer_head *bh_cp;
|
||||
int err = -ENOMEM;
|
||||
|
||||
root = nilfs_find_or_create_root(
|
||||
@ -557,38 +555,19 @@ int nilfs_attach_checkpoint(struct super_block *sb, __u64 cno, int curr_mnt,
|
||||
goto reuse; /* already attached checkpoint */
|
||||
|
||||
down_read(&nilfs->ns_segctor_sem);
|
||||
err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, cno, 0, &raw_cp,
|
||||
&bh_cp);
|
||||
err = nilfs_ifile_read(sb, root, cno, nilfs->ns_inode_size);
|
||||
up_read(&nilfs->ns_segctor_sem);
|
||||
if (unlikely(err)) {
|
||||
if (err == -ENOENT || err == -EINVAL) {
|
||||
nilfs_err(sb,
|
||||
"Invalid checkpoint (checkpoint number=%llu)",
|
||||
(unsigned long long)cno);
|
||||
err = -EINVAL;
|
||||
}
|
||||
if (unlikely(err))
|
||||
goto failed;
|
||||
}
|
||||
|
||||
err = nilfs_ifile_read(sb, root, nilfs->ns_inode_size,
|
||||
&raw_cp->cp_ifile_inode, &root->ifile);
|
||||
if (err)
|
||||
goto failed_bh;
|
||||
|
||||
atomic64_set(&root->inodes_count,
|
||||
le64_to_cpu(raw_cp->cp_inodes_count));
|
||||
atomic64_set(&root->blocks_count,
|
||||
le64_to_cpu(raw_cp->cp_blocks_count));
|
||||
|
||||
nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, cno, bh_cp);
|
||||
|
||||
reuse:
|
||||
*rootp = root;
|
||||
return 0;
|
||||
|
||||
failed_bh:
|
||||
nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, cno, bh_cp);
|
||||
failed:
|
||||
if (err == -EINVAL)
|
||||
nilfs_err(sb, "Invalid checkpoint (checkpoint number=%llu)",
|
||||
(unsigned long long)cno);
|
||||
nilfs_put_root(root);
|
||||
|
||||
return err;
|
||||
|
@ -413,7 +413,7 @@ static u64 nilfs_max_segment_count(struct the_nilfs *nilfs)
|
||||
{
|
||||
u64 max_count = U64_MAX;
|
||||
|
||||
do_div(max_count, nilfs->ns_blocks_per_segment);
|
||||
max_count = div64_ul(max_count, nilfs->ns_blocks_per_segment);
|
||||
return min_t(u64, max_count, ULONG_MAX);
|
||||
}
|
||||
|
||||
|
@ -1615,7 +1615,7 @@ update_holders:
|
||||
unlock:
|
||||
lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
|
||||
|
||||
/* ocfs2_unblock_lock reques on seeing OCFS2_LOCK_UPCONVERT_FINISHING */
|
||||
/* ocfs2_unblock_lock request on seeing OCFS2_LOCK_UPCONVERT_FINISHING */
|
||||
kick_dc = (lockres->l_flags & OCFS2_LOCK_BLOCKED);
|
||||
|
||||
spin_unlock_irqrestore(&lockres->l_lock, flags);
|
||||
|
@ -2763,6 +2763,7 @@ const struct inode_operations ocfs2_file_iops = {
|
||||
const struct inode_operations ocfs2_special_file_iops = {
|
||||
.setattr = ocfs2_setattr,
|
||||
.getattr = ocfs2_getattr,
|
||||
.listxattr = ocfs2_listxattr,
|
||||
.permission = ocfs2_permission,
|
||||
.get_inode_acl = ocfs2_iop_get_acl,
|
||||
.set_acl = ocfs2_iop_set_acl,
|
||||
|
@ -1711,12 +1711,12 @@ static int ocfs2_initialize_mem_caches(void)
|
||||
ocfs2_dquot_cachep = kmem_cache_create("ocfs2_dquot_cache",
|
||||
sizeof(struct ocfs2_dquot),
|
||||
0,
|
||||
(SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT),
|
||||
SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT,
|
||||
NULL);
|
||||
ocfs2_qf_chunk_cachep = kmem_cache_create("ocfs2_qf_chunk_cache",
|
||||
sizeof(struct ocfs2_quota_chunk),
|
||||
0,
|
||||
(SLAB_RECLAIM_ACCOUNT),
|
||||
SLAB_RECLAIM_ACCOUNT,
|
||||
NULL);
|
||||
if (!ocfs2_inode_cachep || !ocfs2_dquot_cachep ||
|
||||
!ocfs2_qf_chunk_cachep) {
|
||||
|
@ -984,7 +984,7 @@
|
||||
* -fsanitize=thread produce unwanted sections (.eh_frame
|
||||
* and .init_array.*), but CONFIG_CONSTRUCTORS wants to
|
||||
* keep any .init_array.* sections.
|
||||
* https://bugs.llvm.org/show_bug.cgi?id=46478
|
||||
* https://llvm.org/pr46478
|
||||
*/
|
||||
#ifdef CONFIG_UNWIND_TABLES
|
||||
#define DISCARD_EH_FRAME
|
||||
|
@ -9,7 +9,7 @@
|
||||
* Clang prior to 17 is being silly and considers many __cleanup() variables
|
||||
* as unused (because they are, their sole purpose is to go out of scope).
|
||||
*
|
||||
* https://reviews.llvm.org/D152180
|
||||
* https://github.com/llvm/llvm-project/commit/877210faa447f4cc7db87812f8ed80e398fedd61
|
||||
*/
|
||||
#undef __cleanup
|
||||
#define __cleanup(func) __maybe_unused __attribute__((__cleanup__(func)))
|
||||
@ -114,11 +114,7 @@
|
||||
#define __diag_str(s) __diag_str1(s)
|
||||
#define __diag(s) _Pragma(__diag_str(clang diagnostic s))
|
||||
|
||||
#if CONFIG_CLANG_VERSION >= 110000
|
||||
#define __diag_clang_11(s) __diag(s)
|
||||
#else
|
||||
#define __diag_clang_11(s)
|
||||
#endif
|
||||
#define __diag_clang_13(s) __diag(s)
|
||||
|
||||
#define __diag_ignore_all(option, comment) \
|
||||
__diag_clang(11, ignore, option)
|
||||
__diag_clang(13, ignore, option)
|
||||
|
@ -38,38 +38,6 @@ int fprop_global_init(struct fprop_global *p, gfp_t gfp);
|
||||
void fprop_global_destroy(struct fprop_global *p);
|
||||
bool fprop_new_period(struct fprop_global *p, int periods);
|
||||
|
||||
/*
|
||||
* ---- SINGLE ----
|
||||
*/
|
||||
struct fprop_local_single {
|
||||
/* the local events counter */
|
||||
unsigned long events;
|
||||
/* Period in which we last updated events */
|
||||
unsigned int period;
|
||||
raw_spinlock_t lock; /* Protect period and numerator */
|
||||
};
|
||||
|
||||
#define INIT_FPROP_LOCAL_SINGLE(name) \
|
||||
{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
|
||||
}
|
||||
|
||||
int fprop_local_init_single(struct fprop_local_single *pl);
|
||||
void fprop_local_destroy_single(struct fprop_local_single *pl);
|
||||
void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl);
|
||||
void fprop_fraction_single(struct fprop_global *p,
|
||||
struct fprop_local_single *pl, unsigned long *numerator,
|
||||
unsigned long *denominator);
|
||||
|
||||
static inline
|
||||
void fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
__fprop_inc_single(p, pl);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* ---- PERCPU ----
|
||||
*/
|
||||
|
@ -766,7 +766,7 @@ static inline size_t list_count_nodes(struct list_head *head)
|
||||
* @member: the name of the list_head within the struct.
|
||||
*/
|
||||
#define list_entry_is_head(pos, head, member) \
|
||||
(&pos->member == (head))
|
||||
list_is_head(&pos->member, (head))
|
||||
|
||||
/**
|
||||
* list_for_each_entry - iterate over list of given type
|
||||
@ -1195,4 +1195,19 @@ static inline void hlist_splice_init(struct hlist_head *from,
|
||||
pos && ({ n = pos->member.next; 1; }); \
|
||||
pos = hlist_entry_safe(n, typeof(*pos), member))
|
||||
|
||||
/**
|
||||
* hlist_count_nodes - count nodes in the hlist
|
||||
* @head: the head for your hlist.
|
||||
*/
|
||||
static inline size_t hlist_count_nodes(struct hlist_head *head)
|
||||
{
|
||||
struct hlist_node *pos;
|
||||
size_t count = 0;
|
||||
|
||||
hlist_for_each(pos, head)
|
||||
count++;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -35,31 +35,33 @@ static __always_inline
|
||||
void min_heapify(struct min_heap *heap, int pos,
|
||||
const struct min_heap_callbacks *func)
|
||||
{
|
||||
void *left, *right, *parent, *smallest;
|
||||
void *left, *right;
|
||||
void *data = heap->data;
|
||||
void *root = data + pos * func->elem_size;
|
||||
int i = pos, j;
|
||||
|
||||
/* Find the sift-down path all the way to the leaves. */
|
||||
for (;;) {
|
||||
if (pos * 2 + 1 >= heap->nr)
|
||||
if (i * 2 + 2 >= heap->nr)
|
||||
break;
|
||||
left = data + (i * 2 + 1) * func->elem_size;
|
||||
right = data + (i * 2 + 2) * func->elem_size;
|
||||
i = func->less(left, right) ? i * 2 + 1 : i * 2 + 2;
|
||||
}
|
||||
|
||||
left = data + ((pos * 2 + 1) * func->elem_size);
|
||||
parent = data + (pos * func->elem_size);
|
||||
smallest = parent;
|
||||
if (func->less(left, smallest))
|
||||
smallest = left;
|
||||
/* Special case for the last leaf with no sibling. */
|
||||
if (i * 2 + 2 == heap->nr)
|
||||
i = i * 2 + 1;
|
||||
|
||||
if (pos * 2 + 2 < heap->nr) {
|
||||
right = data + ((pos * 2 + 2) * func->elem_size);
|
||||
if (func->less(right, smallest))
|
||||
smallest = right;
|
||||
}
|
||||
if (smallest == parent)
|
||||
break;
|
||||
func->swp(smallest, parent);
|
||||
if (smallest == left)
|
||||
pos = (pos * 2) + 1;
|
||||
else
|
||||
pos = (pos * 2) + 2;
|
||||
/* Backtrack to the correct location. */
|
||||
while (i != pos && func->less(root, data + i * func->elem_size))
|
||||
i = (i - 1) / 2;
|
||||
|
||||
/* Shift the element into its correct place. */
|
||||
j = i;
|
||||
while (i != pos) {
|
||||
i = (i - 1) / 2;
|
||||
func->swp(data + i * func->elem_size, data + j * func->elem_size);
|
||||
}
|
||||
}
|
||||
|
||||
@ -70,7 +72,7 @@ void min_heapify_all(struct min_heap *heap,
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = heap->nr / 2; i >= 0; i--)
|
||||
for (i = heap->nr / 2 - 1; i >= 0; i--)
|
||||
min_heapify(heap, i, func);
|
||||
}
|
||||
|
||||
|
@ -216,13 +216,6 @@ void watchdog_update_hrtimer_threshold(u64 period);
|
||||
static inline void watchdog_update_hrtimer_threshold(u64 period) { }
|
||||
#endif
|
||||
|
||||
struct ctl_table;
|
||||
int proc_watchdog(struct ctl_table *, int, void *, size_t *, loff_t *);
|
||||
int proc_nmi_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *);
|
||||
int proc_soft_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *);
|
||||
int proc_watchdog_thresh(struct ctl_table *, int , void *, size_t *, loff_t *);
|
||||
int proc_watchdog_cpumask(struct ctl_table *, int, void *, size_t *, loff_t *);
|
||||
|
||||
#ifdef CONFIG_HAVE_ACPI_APEI_NMI
|
||||
#include <asm/nmi.h>
|
||||
#endif
|
||||
|
@ -9,7 +9,5 @@
|
||||
up something else. */
|
||||
|
||||
extern asmlinkage void __init __noreturn start_kernel(void);
|
||||
extern void __init __noreturn arch_call_rest_init(void);
|
||||
extern void __ref __noreturn rest_init(void);
|
||||
|
||||
#endif /* _LINUX_START_KERNEL_H */
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/**
|
||||
* lib/minmax.c: windowed min/max tracker by Kathleen Nichols.
|
||||
/*
|
||||
* win_minmax.h: windowed min/max tracker by Kathleen Nichols.
|
||||
*
|
||||
*/
|
||||
#ifndef MINMAX_H
|
||||
|
@ -683,7 +683,7 @@ static void __init setup_command_line(char *command_line)
|
||||
|
||||
static __initdata DECLARE_COMPLETION(kthreadd_done);
|
||||
|
||||
noinline void __ref __noreturn rest_init(void)
|
||||
static noinline void __ref __noreturn rest_init(void)
|
||||
{
|
||||
struct task_struct *tsk;
|
||||
int pid;
|
||||
@ -828,11 +828,6 @@ static int __init early_randomize_kstack_offset(char *buf)
|
||||
early_param("randomize_kstack_offset", early_randomize_kstack_offset);
|
||||
#endif
|
||||
|
||||
void __init __weak __noreturn arch_call_rest_init(void)
|
||||
{
|
||||
rest_init();
|
||||
}
|
||||
|
||||
static void __init print_unknown_bootoptions(void)
|
||||
{
|
||||
char *unknown_options;
|
||||
@ -1076,7 +1071,7 @@ void start_kernel(void)
|
||||
kcsan_init();
|
||||
|
||||
/* Do the rest non-__init'ed, we're now alive */
|
||||
arch_call_rest_init();
|
||||
rest_init();
|
||||
|
||||
/*
|
||||
* Avoid stack canaries in callers of boot_init_stack_canary for gcc-10
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <linux/ipc_namespace.h>
|
||||
#include <linux/msg.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/cred.h>
|
||||
#include "util.h"
|
||||
|
||||
static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
|
||||
@ -190,25 +191,57 @@ static int set_is_seen(struct ctl_table_set *set)
|
||||
return ¤t->nsproxy->ipc_ns->ipc_set == set;
|
||||
}
|
||||
|
||||
static void ipc_set_ownership(struct ctl_table_header *head,
|
||||
struct ctl_table *table,
|
||||
kuid_t *uid, kgid_t *gid)
|
||||
{
|
||||
struct ipc_namespace *ns =
|
||||
container_of(head->set, struct ipc_namespace, ipc_set);
|
||||
|
||||
kuid_t ns_root_uid = make_kuid(ns->user_ns, 0);
|
||||
kgid_t ns_root_gid = make_kgid(ns->user_ns, 0);
|
||||
|
||||
*uid = uid_valid(ns_root_uid) ? ns_root_uid : GLOBAL_ROOT_UID;
|
||||
*gid = gid_valid(ns_root_gid) ? ns_root_gid : GLOBAL_ROOT_GID;
|
||||
}
|
||||
|
||||
static int ipc_permissions(struct ctl_table_header *head, struct ctl_table *table)
|
||||
{
|
||||
int mode = table->mode;
|
||||
|
||||
#ifdef CONFIG_CHECKPOINT_RESTORE
|
||||
struct ipc_namespace *ns = current->nsproxy->ipc_ns;
|
||||
struct ipc_namespace *ns =
|
||||
container_of(head->set, struct ipc_namespace, ipc_set);
|
||||
|
||||
if (((table->data == &ns->ids[IPC_SEM_IDS].next_id) ||
|
||||
(table->data == &ns->ids[IPC_MSG_IDS].next_id) ||
|
||||
(table->data == &ns->ids[IPC_SHM_IDS].next_id)) &&
|
||||
checkpoint_restore_ns_capable(ns->user_ns))
|
||||
mode = 0666;
|
||||
else
|
||||
#endif
|
||||
return mode;
|
||||
{
|
||||
kuid_t ns_root_uid;
|
||||
kgid_t ns_root_gid;
|
||||
|
||||
ipc_set_ownership(head, table, &ns_root_uid, &ns_root_gid);
|
||||
|
||||
if (uid_eq(current_euid(), ns_root_uid))
|
||||
mode >>= 6;
|
||||
|
||||
else if (in_egroup_p(ns_root_gid))
|
||||
mode >>= 3;
|
||||
}
|
||||
|
||||
mode &= 7;
|
||||
|
||||
return (mode << 6) | (mode << 3) | mode;
|
||||
}
|
||||
|
||||
static struct ctl_table_root set_root = {
|
||||
.lookup = set_lookup,
|
||||
.permissions = ipc_permissions,
|
||||
.set_ownership = ipc_set_ownership,
|
||||
};
|
||||
|
||||
bool setup_ipc_sysctls(struct ipc_namespace *ns)
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/stat.h>
|
||||
#include <linux/capability.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/cred.h>
|
||||
|
||||
static int msg_max_limit_min = MIN_MSGMAX;
|
||||
static int msg_max_limit_max = HARD_MSGMAX;
|
||||
@ -76,8 +77,43 @@ static int set_is_seen(struct ctl_table_set *set)
|
||||
return ¤t->nsproxy->ipc_ns->mq_set == set;
|
||||
}
|
||||
|
||||
static void mq_set_ownership(struct ctl_table_header *head,
|
||||
struct ctl_table *table,
|
||||
kuid_t *uid, kgid_t *gid)
|
||||
{
|
||||
struct ipc_namespace *ns =
|
||||
container_of(head->set, struct ipc_namespace, mq_set);
|
||||
|
||||
kuid_t ns_root_uid = make_kuid(ns->user_ns, 0);
|
||||
kgid_t ns_root_gid = make_kgid(ns->user_ns, 0);
|
||||
|
||||
*uid = uid_valid(ns_root_uid) ? ns_root_uid : GLOBAL_ROOT_UID;
|
||||
*gid = gid_valid(ns_root_gid) ? ns_root_gid : GLOBAL_ROOT_GID;
|
||||
}
|
||||
|
||||
static int mq_permissions(struct ctl_table_header *head, struct ctl_table *table)
|
||||
{
|
||||
int mode = table->mode;
|
||||
kuid_t ns_root_uid;
|
||||
kgid_t ns_root_gid;
|
||||
|
||||
mq_set_ownership(head, table, &ns_root_uid, &ns_root_gid);
|
||||
|
||||
if (uid_eq(current_euid(), ns_root_uid))
|
||||
mode >>= 6;
|
||||
|
||||
else if (in_egroup_p(ns_root_gid))
|
||||
mode >>= 3;
|
||||
|
||||
mode &= 7;
|
||||
|
||||
return (mode << 6) | (mode << 3) | mode;
|
||||
}
|
||||
|
||||
static struct ctl_table_root set_root = {
|
||||
.lookup = set_lookup,
|
||||
.permissions = mq_permissions,
|
||||
.set_ownership = mq_set_ownership,
|
||||
};
|
||||
|
||||
bool setup_mq_sysctls(struct ipc_namespace *ns)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user