mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-05 03:44:03 +08:00
Merge branch 'akpm' (patchbomb from Andrew Morton)
Merge incoming from Andrew Morton: - Various misc things. - arch/sh updates. - Part of ocfs2. Review is slow. - Slab updates. - Most of -mm. - printk updates. - lib/ updates. - checkpatch updates. * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (226 commits) checkpatch: update $declaration_macros, add uninitialized_var checkpatch: warn on missing spaces in broken up quoted checkpatch: fix false positives for --strict "space after cast" test checkpatch: fix false positive MISSING_BREAK warnings with --file checkpatch: add test for native c90 types in unusual order checkpatch: add signed generic types checkpatch: add short int to c variable types checkpatch: add for_each tests to indentation and brace tests checkpatch: fix brace style misuses of else and while checkpatch: add --fix option for a couple OPEN_BRACE misuses checkpatch: use the correct indentation for which() checkpatch: add fix_insert_line and fix_delete_line helpers checkpatch: add ability to insert and delete lines to patch/file checkpatch: add an index variable for fixed lines checkpatch: warn on break after goto or return with same tab indentation checkpatch: emit a warning on file add/move/delete checkpatch: add test for commit id formatting style in commit log checkpatch: emit fewer kmalloc_array/kcalloc conversion warnings checkpatch: improve "no space after cast" test checkpatch: allow multiple const * types ...
This commit is contained in:
commit
33caee3992
@ -818,7 +818,7 @@ RCU pointer/list update:
|
||||
list_add_tail_rcu
|
||||
list_del_rcu
|
||||
list_replace_rcu
|
||||
hlist_add_after_rcu
|
||||
hlist_add_behind_rcu
|
||||
hlist_add_before_rcu
|
||||
hlist_add_head_rcu
|
||||
hlist_del_rcu
|
||||
|
@ -1716,8 +1716,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
7 (KERN_DEBUG) debug-level messages
|
||||
|
||||
log_buf_len=n[KMG] Sets the size of the printk ring buffer,
|
||||
in bytes. n must be a power of two. The default
|
||||
size is set in the kernel config file.
|
||||
in bytes. n must be a power of two and greater
|
||||
than the minimal size. The minimal size is defined
|
||||
by LOG_BUF_SHIFT kernel config parameter. There is
|
||||
also CONFIG_LOG_CPU_MAX_BUF_SHIFT config parameter
|
||||
that allows to increase the default size depending on
|
||||
the number of CPUs. See init/Kconfig for more details.
|
||||
|
||||
logo.nologo [FB] Disables display of the built-in Linux logo.
|
||||
This may be used to provide more screen space for
|
||||
|
@ -47,6 +47,10 @@ use constant HIGH_KSWAPD_REWAKEUP => 21;
|
||||
use constant HIGH_NR_SCANNED => 22;
|
||||
use constant HIGH_NR_TAKEN => 23;
|
||||
use constant HIGH_NR_RECLAIMED => 24;
|
||||
use constant HIGH_NR_FILE_SCANNED => 25;
|
||||
use constant HIGH_NR_ANON_SCANNED => 26;
|
||||
use constant HIGH_NR_FILE_RECLAIMED => 27;
|
||||
use constant HIGH_NR_ANON_RECLAIMED => 28;
|
||||
|
||||
my %perprocesspid;
|
||||
my %perprocess;
|
||||
@ -56,14 +60,18 @@ my $opt_read_procstat;
|
||||
|
||||
my $total_wakeup_kswapd;
|
||||
my ($total_direct_reclaim, $total_direct_nr_scanned);
|
||||
my ($total_direct_nr_file_scanned, $total_direct_nr_anon_scanned);
|
||||
my ($total_direct_latency, $total_kswapd_latency);
|
||||
my ($total_direct_nr_reclaimed);
|
||||
my ($total_direct_nr_file_reclaimed, $total_direct_nr_anon_reclaimed);
|
||||
my ($total_direct_writepage_file_sync, $total_direct_writepage_file_async);
|
||||
my ($total_direct_writepage_anon_sync, $total_direct_writepage_anon_async);
|
||||
my ($total_kswapd_nr_scanned, $total_kswapd_wake);
|
||||
my ($total_kswapd_nr_file_scanned, $total_kswapd_nr_anon_scanned);
|
||||
my ($total_kswapd_writepage_file_sync, $total_kswapd_writepage_file_async);
|
||||
my ($total_kswapd_writepage_anon_sync, $total_kswapd_writepage_anon_async);
|
||||
my ($total_kswapd_nr_reclaimed);
|
||||
my ($total_kswapd_nr_file_reclaimed, $total_kswapd_nr_anon_reclaimed);
|
||||
|
||||
# Catch sigint and exit on request
|
||||
my $sigint_report = 0;
|
||||
@ -374,6 +382,7 @@ EVENT_PROCESS:
|
||||
}
|
||||
my $isolate_mode = $1;
|
||||
my $nr_scanned = $4;
|
||||
my $file = $6;
|
||||
|
||||
# To closer match vmstat scanning statistics, only count isolate_both
|
||||
# and isolate_inactive as scanning. isolate_active is rotation
|
||||
@ -382,6 +391,11 @@ EVENT_PROCESS:
|
||||
# isolate_both == 3
|
||||
if ($isolate_mode != 2) {
|
||||
$perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned;
|
||||
if ($file == 1) {
|
||||
$perprocesspid{$process_pid}->{HIGH_NR_FILE_SCANNED} += $nr_scanned;
|
||||
} else {
|
||||
$perprocesspid{$process_pid}->{HIGH_NR_ANON_SCANNED} += $nr_scanned;
|
||||
}
|
||||
}
|
||||
} elsif ($tracepoint eq "mm_vmscan_lru_shrink_inactive") {
|
||||
$details = $6;
|
||||
@ -391,8 +405,19 @@ EVENT_PROCESS:
|
||||
print " $regex_lru_shrink_inactive/o\n";
|
||||
next;
|
||||
}
|
||||
|
||||
my $nr_reclaimed = $4;
|
||||
my $flags = $6;
|
||||
my $file = 0;
|
||||
if ($flags =~ /RECLAIM_WB_FILE/) {
|
||||
$file = 1;
|
||||
}
|
||||
$perprocesspid{$process_pid}->{HIGH_NR_RECLAIMED} += $nr_reclaimed;
|
||||
if ($file) {
|
||||
$perprocesspid{$process_pid}->{HIGH_NR_FILE_RECLAIMED} += $nr_reclaimed;
|
||||
} else {
|
||||
$perprocesspid{$process_pid}->{HIGH_NR_ANON_RECLAIMED} += $nr_reclaimed;
|
||||
}
|
||||
} elsif ($tracepoint eq "mm_vmscan_writepage") {
|
||||
$details = $6;
|
||||
if ($details !~ /$regex_writepage/o) {
|
||||
@ -493,7 +518,11 @@ sub dump_stats {
|
||||
$total_direct_reclaim += $stats{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN};
|
||||
$total_wakeup_kswapd += $stats{$process_pid}->{MM_VMSCAN_WAKEUP_KSWAPD};
|
||||
$total_direct_nr_scanned += $stats{$process_pid}->{HIGH_NR_SCANNED};
|
||||
$total_direct_nr_file_scanned += $stats{$process_pid}->{HIGH_NR_FILE_SCANNED};
|
||||
$total_direct_nr_anon_scanned += $stats{$process_pid}->{HIGH_NR_ANON_SCANNED};
|
||||
$total_direct_nr_reclaimed += $stats{$process_pid}->{HIGH_NR_RECLAIMED};
|
||||
$total_direct_nr_file_reclaimed += $stats{$process_pid}->{HIGH_NR_FILE_RECLAIMED};
|
||||
$total_direct_nr_anon_reclaimed += $stats{$process_pid}->{HIGH_NR_ANON_RECLAIMED};
|
||||
$total_direct_writepage_file_sync += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC};
|
||||
$total_direct_writepage_anon_sync += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC};
|
||||
$total_direct_writepage_file_async += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC};
|
||||
@ -513,7 +542,11 @@ sub dump_stats {
|
||||
$stats{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN},
|
||||
$stats{$process_pid}->{MM_VMSCAN_WAKEUP_KSWAPD},
|
||||
$stats{$process_pid}->{HIGH_NR_SCANNED},
|
||||
$stats{$process_pid}->{HIGH_NR_FILE_SCANNED},
|
||||
$stats{$process_pid}->{HIGH_NR_ANON_SCANNED},
|
||||
$stats{$process_pid}->{HIGH_NR_RECLAIMED},
|
||||
$stats{$process_pid}->{HIGH_NR_FILE_RECLAIMED},
|
||||
$stats{$process_pid}->{HIGH_NR_ANON_RECLAIMED},
|
||||
$stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC} + $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC},
|
||||
$stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC} + $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_ASYNC},
|
||||
$this_reclaim_delay / 1000);
|
||||
@ -552,7 +585,11 @@ sub dump_stats {
|
||||
|
||||
$total_kswapd_wake += $stats{$process_pid}->{MM_VMSCAN_KSWAPD_WAKE};
|
||||
$total_kswapd_nr_scanned += $stats{$process_pid}->{HIGH_NR_SCANNED};
|
||||
$total_kswapd_nr_file_scanned += $stats{$process_pid}->{HIGH_NR_FILE_SCANNED};
|
||||
$total_kswapd_nr_anon_scanned += $stats{$process_pid}->{HIGH_NR_ANON_SCANNED};
|
||||
$total_kswapd_nr_reclaimed += $stats{$process_pid}->{HIGH_NR_RECLAIMED};
|
||||
$total_kswapd_nr_file_reclaimed += $stats{$process_pid}->{HIGH_NR_FILE_RECLAIMED};
|
||||
$total_kswapd_nr_anon_reclaimed += $stats{$process_pid}->{HIGH_NR_ANON_RECLAIMED};
|
||||
$total_kswapd_writepage_file_sync += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC};
|
||||
$total_kswapd_writepage_anon_sync += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC};
|
||||
$total_kswapd_writepage_file_async += $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC};
|
||||
@ -563,7 +600,11 @@ sub dump_stats {
|
||||
$stats{$process_pid}->{MM_VMSCAN_KSWAPD_WAKE},
|
||||
$stats{$process_pid}->{HIGH_KSWAPD_REWAKEUP},
|
||||
$stats{$process_pid}->{HIGH_NR_SCANNED},
|
||||
$stats{$process_pid}->{HIGH_NR_FILE_SCANNED},
|
||||
$stats{$process_pid}->{HIGH_NR_ANON_SCANNED},
|
||||
$stats{$process_pid}->{HIGH_NR_RECLAIMED},
|
||||
$stats{$process_pid}->{HIGH_NR_FILE_RECLAIMED},
|
||||
$stats{$process_pid}->{HIGH_NR_ANON_RECLAIMED},
|
||||
$stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC} + $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC},
|
||||
$stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC} + $stats{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_ASYNC});
|
||||
|
||||
@ -594,7 +635,11 @@ sub dump_stats {
|
||||
print "\nSummary\n";
|
||||
print "Direct reclaims: $total_direct_reclaim\n";
|
||||
print "Direct reclaim pages scanned: $total_direct_nr_scanned\n";
|
||||
print "Direct reclaim file pages scanned: $total_direct_nr_file_scanned\n";
|
||||
print "Direct reclaim anon pages scanned: $total_direct_nr_anon_scanned\n";
|
||||
print "Direct reclaim pages reclaimed: $total_direct_nr_reclaimed\n";
|
||||
print "Direct reclaim file pages reclaimed: $total_direct_nr_file_reclaimed\n";
|
||||
print "Direct reclaim anon pages reclaimed: $total_direct_nr_anon_reclaimed\n";
|
||||
print "Direct reclaim write file sync I/O: $total_direct_writepage_file_sync\n";
|
||||
print "Direct reclaim write anon sync I/O: $total_direct_writepage_anon_sync\n";
|
||||
print "Direct reclaim write file async I/O: $total_direct_writepage_file_async\n";
|
||||
@ -604,7 +649,11 @@ sub dump_stats {
|
||||
print "\n";
|
||||
print "Kswapd wakeups: $total_kswapd_wake\n";
|
||||
print "Kswapd pages scanned: $total_kswapd_nr_scanned\n";
|
||||
print "Kswapd file pages scanned: $total_kswapd_nr_file_scanned\n";
|
||||
print "Kswapd anon pages scanned: $total_kswapd_nr_anon_scanned\n";
|
||||
print "Kswapd pages reclaimed: $total_kswapd_nr_reclaimed\n";
|
||||
print "Kswapd file pages reclaimed: $total_kswapd_nr_file_reclaimed\n";
|
||||
print "Kswapd anon pages reclaimed: $total_kswapd_nr_anon_reclaimed\n";
|
||||
print "Kswapd reclaim write file sync I/O: $total_kswapd_writepage_file_sync\n";
|
||||
print "Kswapd reclaim write anon sync I/O: $total_kswapd_writepage_anon_sync\n";
|
||||
print "Kswapd reclaim write file async I/O: $total_kswapd_writepage_file_async\n";
|
||||
@ -629,7 +678,11 @@ sub aggregate_perprocesspid() {
|
||||
$perprocess{$process}->{MM_VMSCAN_WAKEUP_KSWAPD} += $perprocesspid{$process_pid}->{MM_VMSCAN_WAKEUP_KSWAPD};
|
||||
$perprocess{$process}->{HIGH_KSWAPD_REWAKEUP} += $perprocesspid{$process_pid}->{HIGH_KSWAPD_REWAKEUP};
|
||||
$perprocess{$process}->{HIGH_NR_SCANNED} += $perprocesspid{$process_pid}->{HIGH_NR_SCANNED};
|
||||
$perprocess{$process}->{HIGH_NR_FILE_SCANNED} += $perprocesspid{$process_pid}->{HIGH_NR_FILE_SCANNED};
|
||||
$perprocess{$process}->{HIGH_NR_ANON_SCANNED} += $perprocesspid{$process_pid}->{HIGH_NR_ANON_SCANNED};
|
||||
$perprocess{$process}->{HIGH_NR_RECLAIMED} += $perprocesspid{$process_pid}->{HIGH_NR_RECLAIMED};
|
||||
$perprocess{$process}->{HIGH_NR_FILE_RECLAIMED} += $perprocesspid{$process_pid}->{HIGH_NR_FILE_RECLAIMED};
|
||||
$perprocess{$process}->{HIGH_NR_ANON_RECLAIMED} += $perprocesspid{$process_pid}->{HIGH_NR_ANON_RECLAIMED};
|
||||
$perprocess{$process}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC} += $perprocesspid{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_SYNC};
|
||||
$perprocess{$process}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC} += $perprocesspid{$process_pid}->{MM_VMSCAN_WRITEPAGE_ANON_SYNC};
|
||||
$perprocess{$process}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC} += $perprocesspid{$process_pid}->{MM_VMSCAN_WRITEPAGE_FILE_ASYNC};
|
||||
|
19
Makefile
19
Makefile
@ -621,6 +621,9 @@ else
|
||||
KBUILD_CFLAGS += -O2
|
||||
endif
|
||||
|
||||
# Tell gcc to never replace conditional load with a non-conditional one
|
||||
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
|
||||
|
||||
ifdef CONFIG_READABLE_ASM
|
||||
# Disable optimizations that make assembler listings hard to read.
|
||||
# reorder blocks reorders the control in the function
|
||||
@ -636,6 +639,22 @@ KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN})
|
||||
endif
|
||||
|
||||
# Handle stack protector mode.
|
||||
#
|
||||
# Since kbuild can potentially perform two passes (first with the old
|
||||
# .config values and then with updated .config values), we cannot error out
|
||||
# if a desired compiler option is unsupported. If we were to error, kbuild
|
||||
# could never get to the second pass and actually notice that we changed
|
||||
# the option to something that was supported.
|
||||
#
|
||||
# Additionally, we don't want to fallback and/or silently change which compiler
|
||||
# flags will be used, since that leads to producing kernels with different
|
||||
# security feature characteristics depending on the compiler used. ("But I
|
||||
# selected CC_STACKPROTECTOR_STRONG! Why did it build with _REGULAR?!")
|
||||
#
|
||||
# The middle ground is to warn here so that the failed option is obvious, but
|
||||
# to let the build fail with bad compiler flags so that we can't produce a
|
||||
# kernel when there is a CONFIG and compiler mismatch.
|
||||
#
|
||||
ifdef CONFIG_CC_STACKPROTECTOR_REGULAR
|
||||
stackp-flag := -fstack-protector
|
||||
ifeq ($(call cc-option, $(stackp-flag)),)
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <linux/io.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/cma.h>
|
||||
|
||||
#include <asm/memory.h>
|
||||
#include <asm/highmem.h>
|
||||
|
@ -631,7 +631,8 @@ int arch_add_memory(int nid, u64 start, u64 size)
|
||||
|
||||
pgdat = NODE_DATA(nid);
|
||||
|
||||
zone = pgdat->node_zones + ZONE_NORMAL;
|
||||
zone = pgdat->node_zones +
|
||||
zone_for_memory(nid, start, size, ZONE_NORMAL);
|
||||
ret = __add_pages(nid, zone, start_pfn, nr_pages);
|
||||
|
||||
if (ret)
|
||||
|
@ -90,7 +90,6 @@ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
|
||||
book3s_hv_rm_mmu.o \
|
||||
book3s_hv_ras.o \
|
||||
book3s_hv_builtin.o \
|
||||
book3s_hv_cma.o \
|
||||
$(kvm-book3s_64-builtin-xics-objs-y)
|
||||
endif
|
||||
|
||||
|
@ -37,8 +37,6 @@
|
||||
#include <asm/ppc-opcode.h>
|
||||
#include <asm/cputable.h>
|
||||
|
||||
#include "book3s_hv_cma.h"
|
||||
|
||||
/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
|
||||
#define MAX_LPID_970 63
|
||||
|
||||
@ -64,10 +62,10 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
|
||||
}
|
||||
|
||||
kvm->arch.hpt_cma_alloc = 0;
|
||||
VM_BUG_ON(order < KVM_CMA_CHUNK_ORDER);
|
||||
page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT));
|
||||
if (page) {
|
||||
hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
|
||||
memset((void *)hpt, 0, (1 << order));
|
||||
kvm->arch.hpt_cma_alloc = 1;
|
||||
}
|
||||
|
||||
|
@ -16,12 +16,14 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/cma.h>
|
||||
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/kvm_ppc.h>
|
||||
#include <asm/kvm_book3s.h>
|
||||
|
||||
#include "book3s_hv_cma.h"
|
||||
#define KVM_CMA_CHUNK_ORDER 18
|
||||
|
||||
/*
|
||||
* Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
|
||||
* should be power of 2.
|
||||
@ -43,6 +45,8 @@ static unsigned long kvm_cma_resv_ratio = 5;
|
||||
unsigned long kvm_rma_pages = (1 << 27) >> PAGE_SHIFT; /* 128MB */
|
||||
EXPORT_SYMBOL_GPL(kvm_rma_pages);
|
||||
|
||||
static struct cma *kvm_cma;
|
||||
|
||||
/* Work out RMLS (real mode limit selector) field value for a given RMA size.
|
||||
Assumes POWER7 or PPC970. */
|
||||
static inline int lpcr_rmls(unsigned long rma_size)
|
||||
@ -97,7 +101,7 @@ struct kvm_rma_info *kvm_alloc_rma()
|
||||
ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
|
||||
if (!ri)
|
||||
return NULL;
|
||||
page = kvm_alloc_cma(kvm_rma_pages, kvm_rma_pages);
|
||||
page = cma_alloc(kvm_cma, kvm_rma_pages, get_order(kvm_rma_pages));
|
||||
if (!page)
|
||||
goto err_out;
|
||||
atomic_set(&ri->use_count, 1);
|
||||
@ -112,7 +116,7 @@ EXPORT_SYMBOL_GPL(kvm_alloc_rma);
|
||||
void kvm_release_rma(struct kvm_rma_info *ri)
|
||||
{
|
||||
if (atomic_dec_and_test(&ri->use_count)) {
|
||||
kvm_release_cma(pfn_to_page(ri->base_pfn), kvm_rma_pages);
|
||||
cma_release(kvm_cma, pfn_to_page(ri->base_pfn), kvm_rma_pages);
|
||||
kfree(ri);
|
||||
}
|
||||
}
|
||||
@ -131,16 +135,18 @@ struct page *kvm_alloc_hpt(unsigned long nr_pages)
|
||||
{
|
||||
unsigned long align_pages = HPT_ALIGN_PAGES;
|
||||
|
||||
VM_BUG_ON(get_order(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
|
||||
|
||||
/* Old CPUs require HPT aligned on a multiple of its size */
|
||||
if (!cpu_has_feature(CPU_FTR_ARCH_206))
|
||||
align_pages = nr_pages;
|
||||
return kvm_alloc_cma(nr_pages, align_pages);
|
||||
return cma_alloc(kvm_cma, nr_pages, get_order(align_pages));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
|
||||
|
||||
void kvm_release_hpt(struct page *page, unsigned long nr_pages)
|
||||
{
|
||||
kvm_release_cma(page, nr_pages);
|
||||
cma_release(kvm_cma, page, nr_pages);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_release_hpt);
|
||||
|
||||
@ -179,7 +185,8 @@ void __init kvm_cma_reserve(void)
|
||||
align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
|
||||
|
||||
align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size);
|
||||
kvm_cma_declare_contiguous(selected_size, align_size);
|
||||
cma_declare_contiguous(0, selected_size, 0, align_size,
|
||||
KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,240 +0,0 @@
|
||||
/*
|
||||
* Contiguous Memory Allocator for ppc KVM hash pagetable based on CMA
|
||||
* for DMA mapping framework
|
||||
*
|
||||
* Copyright IBM Corporation, 2013
|
||||
* Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation; either version 2 of the
|
||||
* License or (at your optional) any later version of the license.
|
||||
*
|
||||
*/
|
||||
#define pr_fmt(fmt) "kvm_cma: " fmt
|
||||
|
||||
#ifdef CONFIG_CMA_DEBUG
|
||||
#ifndef DEBUG
|
||||
# define DEBUG
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "book3s_hv_cma.h"
|
||||
|
||||
struct kvm_cma {
|
||||
unsigned long base_pfn;
|
||||
unsigned long count;
|
||||
unsigned long *bitmap;
|
||||
};
|
||||
|
||||
static DEFINE_MUTEX(kvm_cma_mutex);
|
||||
static struct kvm_cma kvm_cma_area;
|
||||
|
||||
/**
|
||||
* kvm_cma_declare_contiguous() - reserve area for contiguous memory handling
|
||||
* for kvm hash pagetable
|
||||
* @size: Size of the reserved memory.
|
||||
* @alignment: Alignment for the contiguous memory area
|
||||
*
|
||||
* This function reserves memory for kvm cma area. It should be
|
||||
* called by arch code when early allocator (memblock or bootmem)
|
||||
* is still activate.
|
||||
*/
|
||||
long __init kvm_cma_declare_contiguous(phys_addr_t size, phys_addr_t alignment)
|
||||
{
|
||||
long base_pfn;
|
||||
phys_addr_t addr;
|
||||
struct kvm_cma *cma = &kvm_cma_area;
|
||||
|
||||
pr_debug("%s(size %lx)\n", __func__, (unsigned long)size);
|
||||
|
||||
if (!size)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* Sanitise input arguments.
|
||||
* We should be pageblock aligned for CMA.
|
||||
*/
|
||||
alignment = max(alignment, (phys_addr_t)(PAGE_SIZE << pageblock_order));
|
||||
size = ALIGN(size, alignment);
|
||||
/*
|
||||
* Reserve memory
|
||||
* Use __memblock_alloc_base() since
|
||||
* memblock_alloc_base() panic()s.
|
||||
*/
|
||||
addr = __memblock_alloc_base(size, alignment, 0);
|
||||
if (!addr) {
|
||||
base_pfn = -ENOMEM;
|
||||
goto err;
|
||||
} else
|
||||
base_pfn = PFN_DOWN(addr);
|
||||
|
||||
/*
|
||||
* Each reserved area must be initialised later, when more kernel
|
||||
* subsystems (like slab allocator) are available.
|
||||
*/
|
||||
cma->base_pfn = base_pfn;
|
||||
cma->count = size >> PAGE_SHIFT;
|
||||
pr_info("CMA: reserved %ld MiB\n", (unsigned long)size / SZ_1M);
|
||||
return 0;
|
||||
err:
|
||||
pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
|
||||
return base_pfn;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_alloc_cma() - allocate pages from contiguous area
|
||||
* @nr_pages: Requested number of pages.
|
||||
* @align_pages: Requested alignment in number of pages
|
||||
*
|
||||
* This function allocates memory buffer for hash pagetable.
|
||||
*/
|
||||
struct page *kvm_alloc_cma(unsigned long nr_pages, unsigned long align_pages)
|
||||
{
|
||||
int ret;
|
||||
struct page *page = NULL;
|
||||
struct kvm_cma *cma = &kvm_cma_area;
|
||||
unsigned long chunk_count, nr_chunk;
|
||||
unsigned long mask, pfn, pageno, start = 0;
|
||||
|
||||
|
||||
if (!cma || !cma->count)
|
||||
return NULL;
|
||||
|
||||
pr_debug("%s(cma %p, count %lu, align pages %lu)\n", __func__,
|
||||
(void *)cma, nr_pages, align_pages);
|
||||
|
||||
if (!nr_pages)
|
||||
return NULL;
|
||||
/*
|
||||
* align mask with chunk size. The bit tracks pages in chunk size
|
||||
*/
|
||||
VM_BUG_ON(!is_power_of_2(align_pages));
|
||||
mask = (align_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT)) - 1;
|
||||
BUILD_BUG_ON(PAGE_SHIFT > KVM_CMA_CHUNK_ORDER);
|
||||
|
||||
chunk_count = cma->count >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
|
||||
nr_chunk = nr_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
|
||||
|
||||
mutex_lock(&kvm_cma_mutex);
|
||||
for (;;) {
|
||||
pageno = bitmap_find_next_zero_area(cma->bitmap, chunk_count,
|
||||
start, nr_chunk, mask);
|
||||
if (pageno >= chunk_count)
|
||||
break;
|
||||
|
||||
pfn = cma->base_pfn + (pageno << (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT));
|
||||
ret = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_CMA);
|
||||
if (ret == 0) {
|
||||
bitmap_set(cma->bitmap, pageno, nr_chunk);
|
||||
page = pfn_to_page(pfn);
|
||||
memset(pfn_to_kaddr(pfn), 0, nr_pages << PAGE_SHIFT);
|
||||
break;
|
||||
} else if (ret != -EBUSY) {
|
||||
break;
|
||||
}
|
||||
pr_debug("%s(): memory range at %p is busy, retrying\n",
|
||||
__func__, pfn_to_page(pfn));
|
||||
/* try again with a bit different memory target */
|
||||
start = pageno + mask + 1;
|
||||
}
|
||||
mutex_unlock(&kvm_cma_mutex);
|
||||
pr_debug("%s(): returned %p\n", __func__, page);
|
||||
return page;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_release_cma() - release allocated pages for hash pagetable
|
||||
* @pages: Allocated pages.
|
||||
* @nr_pages: Number of allocated pages.
|
||||
*
|
||||
* This function releases memory allocated by kvm_alloc_cma().
|
||||
* It returns false when provided pages do not belong to contiguous area and
|
||||
* true otherwise.
|
||||
*/
|
||||
bool kvm_release_cma(struct page *pages, unsigned long nr_pages)
|
||||
{
|
||||
unsigned long pfn;
|
||||
unsigned long nr_chunk;
|
||||
struct kvm_cma *cma = &kvm_cma_area;
|
||||
|
||||
if (!cma || !pages)
|
||||
return false;
|
||||
|
||||
pr_debug("%s(page %p count %lu)\n", __func__, (void *)pages, nr_pages);
|
||||
|
||||
pfn = page_to_pfn(pages);
|
||||
|
||||
if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
|
||||
return false;
|
||||
|
||||
VM_BUG_ON(pfn + nr_pages > cma->base_pfn + cma->count);
|
||||
nr_chunk = nr_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
|
||||
|
||||
mutex_lock(&kvm_cma_mutex);
|
||||
bitmap_clear(cma->bitmap,
|
||||
(pfn - cma->base_pfn) >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT),
|
||||
nr_chunk);
|
||||
free_contig_range(pfn, nr_pages);
|
||||
mutex_unlock(&kvm_cma_mutex);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int __init kvm_cma_activate_area(unsigned long base_pfn,
|
||||
unsigned long count)
|
||||
{
|
||||
unsigned long pfn = base_pfn;
|
||||
unsigned i = count >> pageblock_order;
|
||||
struct zone *zone;
|
||||
|
||||
WARN_ON_ONCE(!pfn_valid(pfn));
|
||||
zone = page_zone(pfn_to_page(pfn));
|
||||
do {
|
||||
unsigned j;
|
||||
base_pfn = pfn;
|
||||
for (j = pageblock_nr_pages; j; --j, pfn++) {
|
||||
WARN_ON_ONCE(!pfn_valid(pfn));
|
||||
/*
|
||||
* alloc_contig_range requires the pfn range
|
||||
* specified to be in the same zone. Make this
|
||||
* simple by forcing the entire CMA resv range
|
||||
* to be in the same zone.
|
||||
*/
|
||||
if (page_zone(pfn_to_page(pfn)) != zone)
|
||||
return -EINVAL;
|
||||
}
|
||||
init_cma_reserved_pageblock(pfn_to_page(base_pfn));
|
||||
} while (--i);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init kvm_cma_init_reserved_areas(void)
|
||||
{
|
||||
int bitmap_size, ret;
|
||||
unsigned long chunk_count;
|
||||
struct kvm_cma *cma = &kvm_cma_area;
|
||||
|
||||
pr_debug("%s()\n", __func__);
|
||||
if (!cma->count)
|
||||
return 0;
|
||||
chunk_count = cma->count >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
|
||||
bitmap_size = BITS_TO_LONGS(chunk_count) * sizeof(long);
|
||||
cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
|
||||
if (!cma->bitmap)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = kvm_cma_activate_area(cma->base_pfn, cma->count);
|
||||
if (ret)
|
||||
goto error;
|
||||
return 0;
|
||||
|
||||
error:
|
||||
kfree(cma->bitmap);
|
||||
return ret;
|
||||
}
|
||||
core_initcall(kvm_cma_init_reserved_areas);
|
@ -1,27 +0,0 @@
|
||||
/*
|
||||
* Contiguous Memory Allocator for ppc KVM hash pagetable based on CMA
|
||||
* for DMA mapping framework
|
||||
*
|
||||
* Copyright IBM Corporation, 2013
|
||||
* Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation; either version 2 of the
|
||||
* License or (at your optional) any later version of the license.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __POWERPC_KVM_CMA_ALLOC_H__
|
||||
#define __POWERPC_KVM_CMA_ALLOC_H__
|
||||
/*
|
||||
* Both RMA and Hash page allocation will be multiple of 256K.
|
||||
*/
|
||||
#define KVM_CMA_CHUNK_ORDER 18
|
||||
|
||||
extern struct page *kvm_alloc_cma(unsigned long nr_pages,
|
||||
unsigned long align_pages);
|
||||
extern bool kvm_release_cma(struct page *pages, unsigned long nr_pages);
|
||||
extern long kvm_cma_declare_contiguous(phys_addr_t size,
|
||||
phys_addr_t alignment) __init;
|
||||
#endif
|
@ -128,7 +128,8 @@ int arch_add_memory(int nid, u64 start, u64 size)
|
||||
return -EINVAL;
|
||||
|
||||
/* this should work for most non-highmem platforms */
|
||||
zone = pgdata->node_zones;
|
||||
zone = pgdata->node_zones +
|
||||
zone_for_memory(nid, start, size, 0);
|
||||
|
||||
return __add_pages(nid, zone, start_pfn, nr_pages);
|
||||
}
|
||||
|
@ -4,17 +4,6 @@
|
||||
#define PTRACE_GETREGS 12
|
||||
#define PTRACE_SETREGS 13
|
||||
|
||||
#define PC 32
|
||||
#define CONDITION 33
|
||||
#define ECR 34
|
||||
#define EMA 35
|
||||
#define CEH 36
|
||||
#define CEL 37
|
||||
#define COUNTER 38
|
||||
#define LDCR 39
|
||||
#define STCR 40
|
||||
#define PSR 41
|
||||
|
||||
#define SINGLESTEP16_INSN 0x7006
|
||||
#define SINGLESTEP32_INSN 0x840C8000
|
||||
#define BREAKPOINT16_INSN 0x7002 /* work on SPG300 */
|
||||
|
@ -12,9 +12,8 @@ config SH_DMA_IRQ_MULTI
|
||||
default y if CPU_SUBTYPE_SH7750 || CPU_SUBTYPE_SH7751 || \
|
||||
CPU_SUBTYPE_SH7750S || CPU_SUBTYPE_SH7750R || \
|
||||
CPU_SUBTYPE_SH7751R || CPU_SUBTYPE_SH7091 || \
|
||||
CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7764 || \
|
||||
CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785 || \
|
||||
CPU_SUBTYPE_SH7760
|
||||
CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7780 || \
|
||||
CPU_SUBTYPE_SH7785 || CPU_SUBTYPE_SH7760
|
||||
|
||||
config SH_DMA_API
|
||||
depends on SH_DMA
|
||||
|
@ -34,6 +34,17 @@ static inline void outl(unsigned int x, unsigned long port)
|
||||
BUG();
|
||||
}
|
||||
|
||||
static inline void __iomem *ioport_map(unsigned long port, unsigned int size)
|
||||
{
|
||||
BUG();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void ioport_unmap(void __iomem *addr)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
#define inb_p(addr) inb(addr)
|
||||
#define inw_p(addr) inw(addr)
|
||||
#define inl_p(addr) inl(addr)
|
||||
|
@ -32,7 +32,6 @@
|
||||
#define CHCR_TS_HIGH_SHIFT (20 - 2) /* 2 bits for shifted low TS */
|
||||
#elif defined(CONFIG_CPU_SUBTYPE_SH7757) || \
|
||||
defined(CONFIG_CPU_SUBTYPE_SH7763) || \
|
||||
defined(CONFIG_CPU_SUBTYPE_SH7764) || \
|
||||
defined(CONFIG_CPU_SUBTYPE_SH7780) || \
|
||||
defined(CONFIG_CPU_SUBTYPE_SH7785)
|
||||
#define CHCR_TS_LOW_MASK 0x00000018
|
||||
|
@ -14,8 +14,7 @@
|
||||
#define DMTE4_IRQ evt2irq(0xb80)
|
||||
#define DMAE0_IRQ evt2irq(0xbc0) /* DMA Error IRQ*/
|
||||
#define SH_DMAC_BASE0 0xFE008020
|
||||
#elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \
|
||||
defined(CONFIG_CPU_SUBTYPE_SH7764)
|
||||
#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
|
||||
#define DMTE0_IRQ evt2irq(0x640)
|
||||
#define DMTE4_IRQ evt2irq(0x780)
|
||||
#define DMAE0_IRQ evt2irq(0x6c0)
|
||||
|
@ -307,7 +307,7 @@ static struct clk_lookup lookups[] = {
|
||||
CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[HWBLK_TMU0]),
|
||||
CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[HWBLK_TMU1]),
|
||||
|
||||
CLKDEV_ICK_ID("fck", "sh-cmt-16.0", &mstp_clks[HWBLK_CMT]),
|
||||
CLKDEV_ICK_ID("fck", "sh-cmt-32.0", &mstp_clks[HWBLK_CMT]),
|
||||
CLKDEV_DEV_ID("sh-wdt.0", &mstp_clks[HWBLK_RWDT]),
|
||||
CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[HWBLK_DMAC1]),
|
||||
|
||||
@ -332,6 +332,8 @@ static struct clk_lookup lookups[] = {
|
||||
CLKDEV_CON_ID("tsif0", &mstp_clks[HWBLK_TSIF]),
|
||||
CLKDEV_DEV_ID("renesas_usbhs.1", &mstp_clks[HWBLK_USB1]),
|
||||
CLKDEV_DEV_ID("renesas_usbhs.0", &mstp_clks[HWBLK_USB0]),
|
||||
CLKDEV_CON_ID("usb1", &mstp_clks[HWBLK_USB1]),
|
||||
CLKDEV_CON_ID("usb0", &mstp_clks[HWBLK_USB0]),
|
||||
CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]),
|
||||
CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[HWBLK_SDHI0]),
|
||||
CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[HWBLK_SDHI1]),
|
||||
|
@ -80,10 +80,8 @@ static int __init rtc_generic_init(void)
|
||||
return -ENODEV;
|
||||
|
||||
pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
|
||||
if (IS_ERR(pdev))
|
||||
return PTR_ERR(pdev);
|
||||
|
||||
return 0;
|
||||
return PTR_ERR_OR_ZERO(pdev);
|
||||
}
|
||||
module_init(rtc_generic_init);
|
||||
|
||||
|
@ -67,10 +67,8 @@ static int __init asids_debugfs_init(void)
|
||||
NULL, &asids_debugfs_fops);
|
||||
if (!asids_dentry)
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(asids_dentry))
|
||||
return PTR_ERR(asids_dentry);
|
||||
|
||||
return 0;
|
||||
return PTR_ERR_OR_ZERO(asids_dentry);
|
||||
}
|
||||
module_init(asids_debugfs_init);
|
||||
|
||||
|
@ -495,8 +495,9 @@ int arch_add_memory(int nid, u64 start, u64 size)
|
||||
pgdat = NODE_DATA(nid);
|
||||
|
||||
/* We only have ZONE_NORMAL, so this is easy.. */
|
||||
ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL,
|
||||
start_pfn, nr_pages);
|
||||
ret = __add_pages(nid, pgdat->node_zones +
|
||||
zone_for_memory(nid, start, size, ZONE_NORMAL),
|
||||
start_pfn, nr_pages);
|
||||
if (unlikely(ret))
|
||||
printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
|
||||
|
||||
|
@ -58,7 +58,7 @@ void *module_alloc(unsigned long size)
|
||||
area->nr_pages = npages;
|
||||
area->pages = pages;
|
||||
|
||||
if (map_vm_area(area, prot_rwx, &pages)) {
|
||||
if (map_vm_area(area, prot_rwx, pages)) {
|
||||
vunmap(area->addr);
|
||||
goto error;
|
||||
}
|
||||
|
@ -1218,7 +1218,8 @@ good_area:
|
||||
/*
|
||||
* If for any reason at all we couldn't handle the fault,
|
||||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault:
|
||||
* the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if
|
||||
* we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
|
||||
|
@ -825,7 +825,8 @@ void __init mem_init(void)
|
||||
int arch_add_memory(int nid, u64 start, u64 size)
|
||||
{
|
||||
struct pglist_data *pgdata = NODE_DATA(nid);
|
||||
struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
|
||||
struct zone *zone = pgdata->node_zones +
|
||||
zone_for_memory(nid, start, size, ZONE_HIGHMEM);
|
||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
|
||||
|
@ -691,7 +691,8 @@ static void update_end_of_memory_vars(u64 start, u64 size)
|
||||
int arch_add_memory(int nid, u64 start, u64 size)
|
||||
{
|
||||
struct pglist_data *pgdat = NODE_DATA(nid);
|
||||
struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
|
||||
struct zone *zone = pgdat->node_zones +
|
||||
zone_for_memory(nid, start, size, ZONE_NORMAL);
|
||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
int ret;
|
||||
|
@ -16,6 +16,7 @@ menuconfig ATA
|
||||
depends on BLOCK
|
||||
depends on !(M32R || M68K || S390) || BROKEN
|
||||
select SCSI
|
||||
select GLOB
|
||||
---help---
|
||||
If you want to use an ATA hard disk, ATA tape drive, ATA CD-ROM or
|
||||
any other ATA device under Linux, say Y and make sure that you know
|
||||
|
@ -59,6 +59,7 @@
|
||||
#include <linux/async.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/glob.h>
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
@ -4250,73 +4251,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
{ }
|
||||
};
|
||||
|
||||
/**
|
||||
* glob_match - match a text string against a glob-style pattern
|
||||
* @text: the string to be examined
|
||||
* @pattern: the glob-style pattern to be matched against
|
||||
*
|
||||
* Either/both of text and pattern can be empty strings.
|
||||
*
|
||||
* Match text against a glob-style pattern, with wildcards and simple sets:
|
||||
*
|
||||
* ? matches any single character.
|
||||
* * matches any run of characters.
|
||||
* [xyz] matches a single character from the set: x, y, or z.
|
||||
* [a-d] matches a single character from the range: a, b, c, or d.
|
||||
* [a-d0-9] matches a single character from either range.
|
||||
*
|
||||
* The special characters ?, [, -, or *, can be matched using a set, eg. [*]
|
||||
* Behaviour with malformed patterns is undefined, though generally reasonable.
|
||||
*
|
||||
* Sample patterns: "SD1?", "SD1[0-5]", "*R0", "SD*1?[012]*xx"
|
||||
*
|
||||
* This function uses one level of recursion per '*' in pattern.
|
||||
* Since it calls _nothing_ else, and has _no_ explicit local variables,
|
||||
* this will not cause stack problems for any reasonable use here.
|
||||
*
|
||||
* RETURNS:
|
||||
* 0 on match, 1 otherwise.
|
||||
*/
|
||||
static int glob_match (const char *text, const char *pattern)
|
||||
{
|
||||
do {
|
||||
/* Match single character or a '?' wildcard */
|
||||
if (*text == *pattern || *pattern == '?') {
|
||||
if (!*pattern++)
|
||||
return 0; /* End of both strings: match */
|
||||
} else {
|
||||
/* Match single char against a '[' bracketed ']' pattern set */
|
||||
if (!*text || *pattern != '[')
|
||||
break; /* Not a pattern set */
|
||||
while (*++pattern && *pattern != ']' && *text != *pattern) {
|
||||
if (*pattern == '-' && *(pattern - 1) != '[')
|
||||
if (*text > *(pattern - 1) && *text < *(pattern + 1)) {
|
||||
++pattern;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!*pattern || *pattern == ']')
|
||||
return 1; /* No match */
|
||||
while (*pattern && *pattern++ != ']');
|
||||
}
|
||||
} while (*++text && *pattern);
|
||||
|
||||
/* Match any run of chars against a '*' wildcard */
|
||||
if (*pattern == '*') {
|
||||
if (!*++pattern)
|
||||
return 0; /* Match: avoid recursion at end of pattern */
|
||||
/* Loop to handle additional pattern chars after the wildcard */
|
||||
while (*text) {
|
||||
if (glob_match(text, pattern) == 0)
|
||||
return 0; /* Remainder matched */
|
||||
++text; /* Absorb (match) this char and try again */
|
||||
}
|
||||
}
|
||||
if (!*text && !*pattern)
|
||||
return 0; /* End of both strings: match */
|
||||
return 1; /* No match */
|
||||
}
|
||||
|
||||
static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
|
||||
{
|
||||
unsigned char model_num[ATA_ID_PROD_LEN + 1];
|
||||
@ -4327,10 +4261,10 @@ static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
|
||||
ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
|
||||
|
||||
while (ad->model_num) {
|
||||
if (!glob_match(model_num, ad->model_num)) {
|
||||
if (glob_match(model_num, ad->model_num)) {
|
||||
if (ad->model_rev == NULL)
|
||||
return ad->horkage;
|
||||
if (!glob_match(model_rev, ad->model_rev))
|
||||
if (glob_match(model_rev, ad->model_rev))
|
||||
return ad->horkage;
|
||||
}
|
||||
ad++;
|
||||
|
@ -289,16 +289,6 @@ config CMA_ALIGNMENT
|
||||
|
||||
If unsure, leave the default value "8".
|
||||
|
||||
config CMA_AREAS
|
||||
int "Maximum count of the CMA device-private areas"
|
||||
default 7
|
||||
help
|
||||
CMA allows to create CMA areas for particular devices. This parameter
|
||||
sets the maximum number of such device private CMA areas in the
|
||||
system.
|
||||
|
||||
If unsure, leave the default value "7".
|
||||
|
||||
endif
|
||||
|
||||
endmenu
|
||||
|
@ -24,23 +24,9 @@
|
||||
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/page-isolation.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
|
||||
struct cma {
|
||||
unsigned long base_pfn;
|
||||
unsigned long count;
|
||||
unsigned long *bitmap;
|
||||
struct mutex lock;
|
||||
};
|
||||
|
||||
struct cma *dma_contiguous_default_area;
|
||||
#include <linux/cma.h>
|
||||
|
||||
#ifdef CONFIG_CMA_SIZE_MBYTES
|
||||
#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
|
||||
@ -48,6 +34,8 @@ struct cma *dma_contiguous_default_area;
|
||||
#define CMA_SIZE_MBYTES 0
|
||||
#endif
|
||||
|
||||
struct cma *dma_contiguous_default_area;
|
||||
|
||||
/*
|
||||
* Default global CMA area size can be defined in kernel's .config.
|
||||
* This is useful mainly for distro maintainers to create a kernel
|
||||
@ -154,65 +142,6 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
|
||||
}
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(cma_mutex);
|
||||
|
||||
static int __init cma_activate_area(struct cma *cma)
|
||||
{
|
||||
int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long);
|
||||
unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
|
||||
unsigned i = cma->count >> pageblock_order;
|
||||
struct zone *zone;
|
||||
|
||||
cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
|
||||
|
||||
if (!cma->bitmap)
|
||||
return -ENOMEM;
|
||||
|
||||
WARN_ON_ONCE(!pfn_valid(pfn));
|
||||
zone = page_zone(pfn_to_page(pfn));
|
||||
|
||||
do {
|
||||
unsigned j;
|
||||
base_pfn = pfn;
|
||||
for (j = pageblock_nr_pages; j; --j, pfn++) {
|
||||
WARN_ON_ONCE(!pfn_valid(pfn));
|
||||
/*
|
||||
* alloc_contig_range requires the pfn range
|
||||
* specified to be in the same zone. Make this
|
||||
* simple by forcing the entire CMA resv range
|
||||
* to be in the same zone.
|
||||
*/
|
||||
if (page_zone(pfn_to_page(pfn)) != zone)
|
||||
goto err;
|
||||
}
|
||||
init_cma_reserved_pageblock(pfn_to_page(base_pfn));
|
||||
} while (--i);
|
||||
|
||||
mutex_init(&cma->lock);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
kfree(cma->bitmap);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct cma cma_areas[MAX_CMA_AREAS];
|
||||
static unsigned cma_area_count;
|
||||
|
||||
static int __init cma_init_reserved_areas(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cma_area_count; i++) {
|
||||
int ret = cma_activate_area(&cma_areas[i]);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
core_initcall(cma_init_reserved_areas);
|
||||
|
||||
/**
|
||||
* dma_contiguous_reserve_area() - reserve custom contiguous area
|
||||
* @size: Size of the reserved area (in bytes),
|
||||
@ -234,72 +163,17 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
|
||||
phys_addr_t limit, struct cma **res_cma,
|
||||
bool fixed)
|
||||
{
|
||||
struct cma *cma = &cma_areas[cma_area_count];
|
||||
phys_addr_t alignment;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
|
||||
(unsigned long)size, (unsigned long)base,
|
||||
(unsigned long)limit);
|
||||
|
||||
/* Sanity checks */
|
||||
if (cma_area_count == ARRAY_SIZE(cma_areas)) {
|
||||
pr_err("Not enough slots for CMA reserved regions!\n");
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (!size)
|
||||
return -EINVAL;
|
||||
|
||||
/* Sanitise input arguments */
|
||||
alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
|
||||
base = ALIGN(base, alignment);
|
||||
size = ALIGN(size, alignment);
|
||||
limit &= ~(alignment - 1);
|
||||
|
||||
/* Reserve memory */
|
||||
if (base && fixed) {
|
||||
if (memblock_is_region_reserved(base, size) ||
|
||||
memblock_reserve(base, size) < 0) {
|
||||
ret = -EBUSY;
|
||||
goto err;
|
||||
}
|
||||
} else {
|
||||
phys_addr_t addr = memblock_alloc_range(size, alignment, base,
|
||||
limit);
|
||||
if (!addr) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
} else {
|
||||
base = addr;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Each reserved area must be initialised later, when more kernel
|
||||
* subsystems (like slab allocator) are available.
|
||||
*/
|
||||
cma->base_pfn = PFN_DOWN(base);
|
||||
cma->count = size >> PAGE_SHIFT;
|
||||
*res_cma = cma;
|
||||
cma_area_count++;
|
||||
|
||||
pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
|
||||
(unsigned long)base);
|
||||
ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, res_cma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Architecture specific contiguous memory fixup. */
|
||||
dma_contiguous_early_fixup(base, size);
|
||||
return 0;
|
||||
err:
|
||||
pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
|
||||
return ret;
|
||||
}
|
||||
dma_contiguous_early_fixup(cma_get_base(*res_cma),
|
||||
cma_get_size(*res_cma));
|
||||
|
||||
static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
|
||||
{
|
||||
mutex_lock(&cma->lock);
|
||||
bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
|
||||
mutex_unlock(&cma->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -316,62 +190,10 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
|
||||
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
|
||||
unsigned int align)
|
||||
{
|
||||
unsigned long mask, pfn, pageno, start = 0;
|
||||
struct cma *cma = dev_get_cma_area(dev);
|
||||
struct page *page = NULL;
|
||||
int ret;
|
||||
|
||||
if (!cma || !cma->count)
|
||||
return NULL;
|
||||
|
||||
if (align > CONFIG_CMA_ALIGNMENT)
|
||||
align = CONFIG_CMA_ALIGNMENT;
|
||||
|
||||
pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
|
||||
count, align);
|
||||
|
||||
if (!count)
|
||||
return NULL;
|
||||
|
||||
mask = (1 << align) - 1;
|
||||
|
||||
|
||||
for (;;) {
|
||||
mutex_lock(&cma->lock);
|
||||
pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
|
||||
start, count, mask);
|
||||
if (pageno >= cma->count) {
|
||||
mutex_unlock(&cma->lock);
|
||||
break;
|
||||
}
|
||||
bitmap_set(cma->bitmap, pageno, count);
|
||||
/*
|
||||
* It's safe to drop the lock here. We've marked this region for
|
||||
* our exclusive use. If the migration fails we will take the
|
||||
* lock again and unmark it.
|
||||
*/
|
||||
mutex_unlock(&cma->lock);
|
||||
|
||||
pfn = cma->base_pfn + pageno;
|
||||
mutex_lock(&cma_mutex);
|
||||
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
|
||||
mutex_unlock(&cma_mutex);
|
||||
if (ret == 0) {
|
||||
page = pfn_to_page(pfn);
|
||||
break;
|
||||
} else if (ret != -EBUSY) {
|
||||
clear_cma_bitmap(cma, pfn, count);
|
||||
break;
|
||||
}
|
||||
clear_cma_bitmap(cma, pfn, count);
|
||||
pr_debug("%s(): memory range at %p is busy, retrying\n",
|
||||
__func__, pfn_to_page(pfn));
|
||||
/* try again with a bit different memory target */
|
||||
start = pageno + mask + 1;
|
||||
}
|
||||
|
||||
pr_debug("%s(): returned %p\n", __func__, page);
|
||||
return page;
|
||||
return cma_alloc(dev_get_cma_area(dev), count, align);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -387,23 +209,5 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
|
||||
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
||||
int count)
|
||||
{
|
||||
struct cma *cma = dev_get_cma_area(dev);
|
||||
unsigned long pfn;
|
||||
|
||||
if (!cma || !pages)
|
||||
return false;
|
||||
|
||||
pr_debug("%s(page %p)\n", __func__, (void *)pages);
|
||||
|
||||
pfn = page_to_pfn(pages);
|
||||
|
||||
if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
|
||||
return false;
|
||||
|
||||
VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
|
||||
|
||||
free_contig_range(pfn, count);
|
||||
clear_cma_bitmap(cma, pfn, count);
|
||||
|
||||
return true;
|
||||
return cma_release(dev_get_cma_area(dev), pages, count);
|
||||
}
|
||||
|
@ -284,7 +284,7 @@ static int memory_subsys_online(struct device *dev)
|
||||
* attribute and need to set the online_type.
|
||||
*/
|
||||
if (mem->online_type < 0)
|
||||
mem->online_type = ONLINE_KEEP;
|
||||
mem->online_type = MMOP_ONLINE_KEEP;
|
||||
|
||||
ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
|
||||
|
||||
@ -315,23 +315,23 @@ store_mem_state(struct device *dev,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!strncmp(buf, "online_kernel", min_t(int, count, 13)))
|
||||
online_type = ONLINE_KERNEL;
|
||||
else if (!strncmp(buf, "online_movable", min_t(int, count, 14)))
|
||||
online_type = ONLINE_MOVABLE;
|
||||
else if (!strncmp(buf, "online", min_t(int, count, 6)))
|
||||
online_type = ONLINE_KEEP;
|
||||
else if (!strncmp(buf, "offline", min_t(int, count, 7)))
|
||||
online_type = -1;
|
||||
if (sysfs_streq(buf, "online_kernel"))
|
||||
online_type = MMOP_ONLINE_KERNEL;
|
||||
else if (sysfs_streq(buf, "online_movable"))
|
||||
online_type = MMOP_ONLINE_MOVABLE;
|
||||
else if (sysfs_streq(buf, "online"))
|
||||
online_type = MMOP_ONLINE_KEEP;
|
||||
else if (sysfs_streq(buf, "offline"))
|
||||
online_type = MMOP_OFFLINE;
|
||||
else {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
switch (online_type) {
|
||||
case ONLINE_KERNEL:
|
||||
case ONLINE_MOVABLE:
|
||||
case ONLINE_KEEP:
|
||||
case MMOP_ONLINE_KERNEL:
|
||||
case MMOP_ONLINE_MOVABLE:
|
||||
case MMOP_ONLINE_KEEP:
|
||||
/*
|
||||
* mem->online_type is not protected so there can be a
|
||||
* race here. However, when racing online, the first
|
||||
@ -342,7 +342,7 @@ store_mem_state(struct device *dev,
|
||||
mem->online_type = online_type;
|
||||
ret = device_online(&mem->dev);
|
||||
break;
|
||||
case -1:
|
||||
case MMOP_OFFLINE:
|
||||
ret = device_offline(&mem->dev);
|
||||
break;
|
||||
default:
|
||||
@ -406,7 +406,9 @@ memory_probe_store(struct device *dev, struct device_attribute *attr,
|
||||
int i, ret;
|
||||
unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
|
||||
|
||||
phys_addr = simple_strtoull(buf, NULL, 0);
|
||||
ret = kstrtoull(buf, 0, &phys_addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
|
||||
return -EINVAL;
|
||||
|
@ -126,7 +126,7 @@ static ssize_t node_read_meminfo(struct device *dev,
|
||||
nid, K(node_page_state(nid, NR_FILE_PAGES)),
|
||||
nid, K(node_page_state(nid, NR_FILE_MAPPED)),
|
||||
nid, K(node_page_state(nid, NR_ANON_PAGES)),
|
||||
nid, K(node_page_state(nid, NR_SHMEM)),
|
||||
nid, K(i.sharedram),
|
||||
nid, node_page_state(nid, NR_KERNEL_STACK) *
|
||||
THREAD_SIZE / 1024,
|
||||
nid, K(node_page_state(nid, NR_PAGETABLE)),
|
||||
|
@ -183,19 +183,32 @@ static ssize_t comp_algorithm_store(struct device *dev,
|
||||
static int zram_test_flag(struct zram_meta *meta, u32 index,
|
||||
enum zram_pageflags flag)
|
||||
{
|
||||
return meta->table[index].flags & BIT(flag);
|
||||
return meta->table[index].value & BIT(flag);
|
||||
}
|
||||
|
||||
static void zram_set_flag(struct zram_meta *meta, u32 index,
|
||||
enum zram_pageflags flag)
|
||||
{
|
||||
meta->table[index].flags |= BIT(flag);
|
||||
meta->table[index].value |= BIT(flag);
|
||||
}
|
||||
|
||||
static void zram_clear_flag(struct zram_meta *meta, u32 index,
|
||||
enum zram_pageflags flag)
|
||||
{
|
||||
meta->table[index].flags &= ~BIT(flag);
|
||||
meta->table[index].value &= ~BIT(flag);
|
||||
}
|
||||
|
||||
static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
|
||||
{
|
||||
return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
|
||||
}
|
||||
|
||||
static void zram_set_obj_size(struct zram_meta *meta,
|
||||
u32 index, size_t size)
|
||||
{
|
||||
unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
|
||||
|
||||
meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
|
||||
}
|
||||
|
||||
static inline int is_partial_io(struct bio_vec *bvec)
|
||||
@ -255,7 +268,6 @@ static struct zram_meta *zram_meta_alloc(u64 disksize)
|
||||
goto free_table;
|
||||
}
|
||||
|
||||
rwlock_init(&meta->tb_lock);
|
||||
return meta;
|
||||
|
||||
free_table:
|
||||
@ -304,7 +316,12 @@ static void handle_zero_page(struct bio_vec *bvec)
|
||||
flush_dcache_page(page);
|
||||
}
|
||||
|
||||
/* NOTE: caller should hold meta->tb_lock with write-side */
|
||||
|
||||
/*
|
||||
* To protect concurrent access to the same index entry,
|
||||
* caller should hold this table index entry's bit_spinlock to
|
||||
* indicate this index entry is accessing.
|
||||
*/
|
||||
static void zram_free_page(struct zram *zram, size_t index)
|
||||
{
|
||||
struct zram_meta *meta = zram->meta;
|
||||
@ -324,11 +341,12 @@ static void zram_free_page(struct zram *zram, size_t index)
|
||||
|
||||
zs_free(meta->mem_pool, handle);
|
||||
|
||||
atomic64_sub(meta->table[index].size, &zram->stats.compr_data_size);
|
||||
atomic64_sub(zram_get_obj_size(meta, index),
|
||||
&zram->stats.compr_data_size);
|
||||
atomic64_dec(&zram->stats.pages_stored);
|
||||
|
||||
meta->table[index].handle = 0;
|
||||
meta->table[index].size = 0;
|
||||
zram_set_obj_size(meta, index, 0);
|
||||
}
|
||||
|
||||
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
|
||||
@ -337,14 +355,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
|
||||
unsigned char *cmem;
|
||||
struct zram_meta *meta = zram->meta;
|
||||
unsigned long handle;
|
||||
u16 size;
|
||||
size_t size;
|
||||
|
||||
read_lock(&meta->tb_lock);
|
||||
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
||||
handle = meta->table[index].handle;
|
||||
size = meta->table[index].size;
|
||||
size = zram_get_obj_size(meta, index);
|
||||
|
||||
if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
|
||||
read_unlock(&meta->tb_lock);
|
||||
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
||||
clear_page(mem);
|
||||
return 0;
|
||||
}
|
||||
@ -355,7 +373,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
|
||||
else
|
||||
ret = zcomp_decompress(zram->comp, cmem, size, mem);
|
||||
zs_unmap_object(meta->mem_pool, handle);
|
||||
read_unlock(&meta->tb_lock);
|
||||
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
||||
|
||||
/* Should NEVER happen. Return bio error if it does. */
|
||||
if (unlikely(ret)) {
|
||||
@ -376,14 +394,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
|
||||
struct zram_meta *meta = zram->meta;
|
||||
page = bvec->bv_page;
|
||||
|
||||
read_lock(&meta->tb_lock);
|
||||
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
||||
if (unlikely(!meta->table[index].handle) ||
|
||||
zram_test_flag(meta, index, ZRAM_ZERO)) {
|
||||
read_unlock(&meta->tb_lock);
|
||||
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
||||
handle_zero_page(bvec);
|
||||
return 0;
|
||||
}
|
||||
read_unlock(&meta->tb_lock);
|
||||
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
||||
|
||||
if (is_partial_io(bvec))
|
||||
/* Use a temporary buffer to decompress the page */
|
||||
@ -461,10 +479,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
||||
if (page_zero_filled(uncmem)) {
|
||||
kunmap_atomic(user_mem);
|
||||
/* Free memory associated with this sector now. */
|
||||
write_lock(&zram->meta->tb_lock);
|
||||
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
||||
zram_free_page(zram, index);
|
||||
zram_set_flag(meta, index, ZRAM_ZERO);
|
||||
write_unlock(&zram->meta->tb_lock);
|
||||
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
||||
|
||||
atomic64_inc(&zram->stats.zero_pages);
|
||||
ret = 0;
|
||||
@ -514,12 +532,12 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
||||
* Free memory associated with this sector
|
||||
* before overwriting unused sectors.
|
||||
*/
|
||||
write_lock(&zram->meta->tb_lock);
|
||||
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
||||
zram_free_page(zram, index);
|
||||
|
||||
meta->table[index].handle = handle;
|
||||
meta->table[index].size = clen;
|
||||
write_unlock(&zram->meta->tb_lock);
|
||||
zram_set_obj_size(meta, index, clen);
|
||||
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
||||
|
||||
/* Update stats */
|
||||
atomic64_add(clen, &zram->stats.compr_data_size);
|
||||
@ -560,6 +578,7 @@ static void zram_bio_discard(struct zram *zram, u32 index,
|
||||
int offset, struct bio *bio)
|
||||
{
|
||||
size_t n = bio->bi_iter.bi_size;
|
||||
struct zram_meta *meta = zram->meta;
|
||||
|
||||
/*
|
||||
* zram manages data in physical block size units. Because logical block
|
||||
@ -580,13 +599,9 @@ static void zram_bio_discard(struct zram *zram, u32 index,
|
||||
}
|
||||
|
||||
while (n >= PAGE_SIZE) {
|
||||
/*
|
||||
* Discard request can be large so the lock hold times could be
|
||||
* lengthy. So take the lock once per page.
|
||||
*/
|
||||
write_lock(&zram->meta->tb_lock);
|
||||
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
||||
zram_free_page(zram, index);
|
||||
write_unlock(&zram->meta->tb_lock);
|
||||
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
||||
index++;
|
||||
n -= PAGE_SIZE;
|
||||
}
|
||||
@ -821,9 +836,9 @@ static void zram_slot_free_notify(struct block_device *bdev,
|
||||
zram = bdev->bd_disk->private_data;
|
||||
meta = zram->meta;
|
||||
|
||||
write_lock(&meta->tb_lock);
|
||||
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
||||
zram_free_page(zram, index);
|
||||
write_unlock(&meta->tb_lock);
|
||||
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
||||
atomic64_inc(&zram->stats.notify_free);
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,6 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
|
||||
/*-- End of configurable params */
|
||||
|
||||
#define SECTOR_SHIFT 9
|
||||
#define SECTOR_SIZE (1 << SECTOR_SHIFT)
|
||||
#define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
|
||||
#define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT)
|
||||
#define ZRAM_LOGICAL_BLOCK_SHIFT 12
|
||||
@ -51,10 +50,24 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
|
||||
#define ZRAM_SECTOR_PER_LOGICAL_BLOCK \
|
||||
(1 << (ZRAM_LOGICAL_BLOCK_SHIFT - SECTOR_SHIFT))
|
||||
|
||||
/* Flags for zram pages (table[page_no].flags) */
|
||||
|
||||
/*
|
||||
* The lower ZRAM_FLAG_SHIFT bits of table.value is for
|
||||
* object size (excluding header), the higher bits is for
|
||||
* zram_pageflags.
|
||||
*
|
||||
* zram is mainly used for memory efficiency so we want to keep memory
|
||||
* footprint small so we can squeeze size and flags into a field.
|
||||
* The lower ZRAM_FLAG_SHIFT bits is for object size (excluding header),
|
||||
* the higher bits is for zram_pageflags.
|
||||
*/
|
||||
#define ZRAM_FLAG_SHIFT 24
|
||||
|
||||
/* Flags for zram pages (table[page_no].value) */
|
||||
enum zram_pageflags {
|
||||
/* Page consists entirely of zeros */
|
||||
ZRAM_ZERO,
|
||||
ZRAM_ZERO = ZRAM_FLAG_SHIFT + 1,
|
||||
ZRAM_ACCESS, /* page in now accessed */
|
||||
|
||||
__NR_ZRAM_PAGEFLAGS,
|
||||
};
|
||||
@ -62,11 +75,10 @@ enum zram_pageflags {
|
||||
/*-- Data structures */
|
||||
|
||||
/* Allocated for each disk page */
|
||||
struct table {
|
||||
struct zram_table_entry {
|
||||
unsigned long handle;
|
||||
u16 size; /* object size (excluding header) */
|
||||
u8 flags;
|
||||
} __aligned(4);
|
||||
unsigned long value;
|
||||
};
|
||||
|
||||
struct zram_stats {
|
||||
atomic64_t compr_data_size; /* compressed size of pages stored */
|
||||
@ -81,8 +93,7 @@ struct zram_stats {
|
||||
};
|
||||
|
||||
struct zram_meta {
|
||||
rwlock_t tb_lock; /* protect table */
|
||||
struct table *table;
|
||||
struct zram_table_entry *table;
|
||||
struct zs_pool *mem_pool;
|
||||
};
|
||||
|
||||
|
@ -286,7 +286,11 @@ int __meminit firmware_map_add_hotplug(u64 start, u64 end, const char *type)
|
||||
{
|
||||
struct firmware_map_entry *entry;
|
||||
|
||||
entry = firmware_map_find_entry_bootmem(start, end, type);
|
||||
entry = firmware_map_find_entry(start, end - 1, type);
|
||||
if (entry)
|
||||
return 0;
|
||||
|
||||
entry = firmware_map_find_entry_bootmem(start, end - 1, type);
|
||||
if (!entry) {
|
||||
entry = kzalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC);
|
||||
if (!entry)
|
||||
|
@ -125,7 +125,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
|
||||
parent = &entry->head;
|
||||
}
|
||||
if (parent) {
|
||||
hlist_add_after_rcu(parent, &item->head);
|
||||
hlist_add_behind_rcu(&item->head, parent);
|
||||
} else {
|
||||
hlist_add_head_rcu(&item->head, h_list);
|
||||
}
|
||||
|
@ -688,7 +688,7 @@ static int atk_debugfs_gitm_get(void *p, u64 *val)
|
||||
DEFINE_SIMPLE_ATTRIBUTE(atk_debugfs_gitm,
|
||||
atk_debugfs_gitm_get,
|
||||
NULL,
|
||||
"0x%08llx\n")
|
||||
"0x%08llx\n");
|
||||
|
||||
static int atk_acpi_print(char *buf, size_t sz, union acpi_object *obj)
|
||||
{
|
||||
|
@ -42,7 +42,6 @@ DEFINE_MUTEX(lguest_lock);
|
||||
static __init int map_switcher(void)
|
||||
{
|
||||
int i, err;
|
||||
struct page **pagep;
|
||||
|
||||
/*
|
||||
* Map the Switcher in to high memory.
|
||||
@ -110,11 +109,9 @@ static __init int map_switcher(void)
|
||||
* This code actually sets up the pages we've allocated to appear at
|
||||
* switcher_addr. map_vm_area() takes the vma we allocated above, the
|
||||
* kind of pages we're mapping (kernel pages), and a pointer to our
|
||||
* array of struct pages. It increments that pointer, but we don't
|
||||
* care.
|
||||
* array of struct pages.
|
||||
*/
|
||||
pagep = lg_switcher_pages;
|
||||
err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep);
|
||||
err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, lg_switcher_pages);
|
||||
if (err) {
|
||||
printk("lguest: map_vm_area failed: %i\n", err);
|
||||
goto free_vma;
|
||||
|
@ -1948,7 +1948,7 @@ static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
|
||||
|
||||
/* add filter to the list */
|
||||
if (parent)
|
||||
hlist_add_after(&parent->fdir_node, &input->fdir_node);
|
||||
hlist_add_behind(&input->fdir_node, &parent->fdir_node);
|
||||
else
|
||||
hlist_add_head(&input->fdir_node,
|
||||
&pf->fdir_filter_list);
|
||||
|
@ -2517,7 +2517,7 @@ static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
|
||||
|
||||
/* add filter to the list */
|
||||
if (parent)
|
||||
hlist_add_after(&parent->fdir_node, &input->fdir_node);
|
||||
hlist_add_behind(&input->fdir_node, &parent->fdir_node);
|
||||
else
|
||||
hlist_add_head(&input->fdir_node,
|
||||
&adapter->fdir_filter_list);
|
||||
|
@ -585,7 +585,6 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
|
||||
|
||||
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
|
||||
int ret;
|
||||
struct page **page_array_ptr;
|
||||
|
||||
page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
|
||||
|
||||
@ -598,8 +597,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
|
||||
}
|
||||
tmp_area.addr = page_addr;
|
||||
tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
|
||||
page_array_ptr = page;
|
||||
ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
|
||||
ret = map_vm_area(&tmp_area, PAGE_KERNEL, page);
|
||||
if (ret) {
|
||||
pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
|
||||
proc->pid, page_addr);
|
||||
|
@ -351,7 +351,7 @@ cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
|
||||
cfs_hash_dhead_t, dh_head);
|
||||
|
||||
if (dh->dh_tail != NULL) /* not empty */
|
||||
hlist_add_after(dh->dh_tail, hnode);
|
||||
hlist_add_behind(hnode, dh->dh_tail);
|
||||
else /* empty list */
|
||||
hlist_add_head(hnode, &dh->dh_head);
|
||||
dh->dh_tail = hnode;
|
||||
@ -406,7 +406,7 @@ cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
|
||||
cfs_hash_dhead_dep_t, dd_head);
|
||||
|
||||
if (dh->dd_tail != NULL) /* not empty */
|
||||
hlist_add_after(dh->dd_tail, hnode);
|
||||
hlist_add_behind(hnode, dh->dd_tail);
|
||||
else /* empty list */
|
||||
hlist_add_head(hnode, &dh->dd_head);
|
||||
dh->dd_tail = hnode;
|
||||
|
@ -355,7 +355,7 @@ static struct sysrq_key_op sysrq_term_op = {
|
||||
|
||||
static void moom_callback(struct work_struct *ignored)
|
||||
{
|
||||
out_of_memory(node_zonelist(first_online_node, GFP_KERNEL), GFP_KERNEL,
|
||||
out_of_memory(node_zonelist(first_memory_node, GFP_KERNEL), GFP_KERNEL,
|
||||
0, NULL, true);
|
||||
}
|
||||
|
||||
|
@ -67,7 +67,7 @@ static int fscache_max_active_sysctl(struct ctl_table *table, int write,
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct ctl_table fscache_sysctls[] = {
|
||||
static struct ctl_table fscache_sysctls[] = {
|
||||
{
|
||||
.procname = "object_max_active",
|
||||
.data = &fscache_object_max_active,
|
||||
@ -87,7 +87,7 @@ struct ctl_table fscache_sysctls[] = {
|
||||
{}
|
||||
};
|
||||
|
||||
struct ctl_table fscache_sysctls_root[] = {
|
||||
static struct ctl_table fscache_sysctls_root[] = {
|
||||
{
|
||||
.procname = "fscache",
|
||||
.mode = 0555,
|
||||
|
@ -1019,11 +1019,11 @@ static int __logfs_is_valid_block(struct inode *inode, u64 bix, u64 ofs)
|
||||
/**
|
||||
* logfs_is_valid_block - check whether this block is still valid
|
||||
*
|
||||
* @sb - superblock
|
||||
* @ofs - block physical offset
|
||||
* @ino - block inode number
|
||||
* @bix - block index
|
||||
* @level - block level
|
||||
* @sb: superblock
|
||||
* @ofs: block physical offset
|
||||
* @ino: block inode number
|
||||
* @bix: block index
|
||||
* @gc_level: block level
|
||||
*
|
||||
* Returns 0 if the block is invalid, 1 if it is valid and 2 if it will
|
||||
* become invalid once the journal is written.
|
||||
@ -2226,10 +2226,9 @@ void btree_write_block(struct logfs_block *block)
|
||||
*
|
||||
* @inode: parent inode (ifile or directory)
|
||||
* @buf: object to write (inode or dentry)
|
||||
* @n: object size
|
||||
* @_pos: object number (file position in blocks/objects)
|
||||
* @count: object size
|
||||
* @bix: block index
|
||||
* @flags: write flags
|
||||
* @lock: 0 if write lock is already taken, 1 otherwise
|
||||
* @shadow_tree: shadow below this inode
|
||||
*
|
||||
* FIXME: All caller of this put a 200-300 byte variable on the stack,
|
||||
|
@ -798,7 +798,7 @@ static void commit_tree(struct mount *mnt, struct mount *shadows)
|
||||
list_splice(&head, n->list.prev);
|
||||
|
||||
if (shadows)
|
||||
hlist_add_after_rcu(&shadows->mnt_hash, &mnt->mnt_hash);
|
||||
hlist_add_behind_rcu(&mnt->mnt_hash, &shadows->mnt_hash);
|
||||
else
|
||||
hlist_add_head_rcu(&mnt->mnt_hash,
|
||||
m_hash(&parent->mnt, mnt->mnt_mountpoint));
|
||||
|
@ -70,8 +70,15 @@ static int fanotify_get_response(struct fsnotify_group *group,
|
||||
wait_event(group->fanotify_data.access_waitq, event->response ||
|
||||
atomic_read(&group->fanotify_data.bypass_perm));
|
||||
|
||||
if (!event->response) /* bypass_perm set */
|
||||
if (!event->response) { /* bypass_perm set */
|
||||
/*
|
||||
* Event was canceled because group is being destroyed. Remove
|
||||
* it from group's event list because we are responsible for
|
||||
* freeing the permission event.
|
||||
*/
|
||||
fsnotify_remove_event(group, &event->fae.fse);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* userspace responded, convert to something usable */
|
||||
switch (event->response) {
|
||||
@ -210,7 +217,7 @@ static int fanotify_handle_event(struct fsnotify_group *group,
|
||||
return -ENOMEM;
|
||||
|
||||
fsn_event = &event->fse;
|
||||
ret = fsnotify_add_notify_event(group, fsn_event, fanotify_merge);
|
||||
ret = fsnotify_add_event(group, fsn_event, fanotify_merge);
|
||||
if (ret) {
|
||||
/* Permission events shouldn't be merged */
|
||||
BUG_ON(ret == 1 && mask & FAN_ALL_PERM_EVENTS);
|
||||
|
@ -66,7 +66,7 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
|
||||
|
||||
/* held the notification_mutex the whole time, so this is the
|
||||
* same event we peeked above */
|
||||
return fsnotify_remove_notify_event(group);
|
||||
return fsnotify_remove_first_event(group);
|
||||
}
|
||||
|
||||
static int create_fd(struct fsnotify_group *group,
|
||||
@ -359,6 +359,11 @@ static int fanotify_release(struct inode *ignored, struct file *file)
|
||||
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
|
||||
struct fanotify_perm_event_info *event, *next;
|
||||
|
||||
/*
|
||||
* There may be still new events arriving in the notification queue
|
||||
* but since userspace cannot use fanotify fd anymore, no event can
|
||||
* enter or leave access_list by now.
|
||||
*/
|
||||
spin_lock(&group->fanotify_data.access_lock);
|
||||
|
||||
atomic_inc(&group->fanotify_data.bypass_perm);
|
||||
@ -373,6 +378,13 @@ static int fanotify_release(struct inode *ignored, struct file *file)
|
||||
}
|
||||
spin_unlock(&group->fanotify_data.access_lock);
|
||||
|
||||
/*
|
||||
* Since bypass_perm is set, newly queued events will not wait for
|
||||
* access response. Wake up the already sleeping ones now.
|
||||
* synchronize_srcu() in fsnotify_destroy_group() will wait for all
|
||||
* processes sleeping in fanotify_handle_event() waiting for access
|
||||
* response and thus also for all permission events to be freed.
|
||||
*/
|
||||
wake_up(&group->fanotify_data.access_waitq);
|
||||
#endif
|
||||
|
||||
|
@ -232,7 +232,7 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
|
||||
|
||||
BUG_ON(last == NULL);
|
||||
/* mark should be the last entry. last is the current last entry */
|
||||
hlist_add_after_rcu(&last->i.i_list, &mark->i.i_list);
|
||||
hlist_add_behind_rcu(&mark->i.i_list, &last->i.i_list);
|
||||
out:
|
||||
fsnotify_recalc_inode_mask_locked(inode);
|
||||
spin_unlock(&inode->i_lock);
|
||||
|
@ -108,7 +108,7 @@ int inotify_handle_event(struct fsnotify_group *group,
|
||||
if (len)
|
||||
strcpy(event->name, file_name);
|
||||
|
||||
ret = fsnotify_add_notify_event(group, fsn_event, inotify_merge);
|
||||
ret = fsnotify_add_event(group, fsn_event, inotify_merge);
|
||||
if (ret) {
|
||||
/* Our event wasn't used in the end. Free it. */
|
||||
fsnotify_destroy_event(group, fsn_event);
|
||||
|
@ -149,7 +149,7 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
|
||||
if (fsnotify_notify_queue_is_empty(group))
|
||||
return NULL;
|
||||
|
||||
event = fsnotify_peek_notify_event(group);
|
||||
event = fsnotify_peek_first_event(group);
|
||||
|
||||
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
|
||||
|
||||
@ -159,7 +159,7 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
|
||||
|
||||
/* held the notification_mutex the whole time, so this is the
|
||||
* same event we peeked above */
|
||||
fsnotify_remove_notify_event(group);
|
||||
fsnotify_remove_first_event(group);
|
||||
|
||||
return event;
|
||||
}
|
||||
|
@ -73,7 +73,8 @@ void fsnotify_destroy_event(struct fsnotify_group *group,
|
||||
/* Overflow events are per-group and we don't want to free them */
|
||||
if (!event || event->mask == FS_Q_OVERFLOW)
|
||||
return;
|
||||
|
||||
/* If the event is still queued, we have a problem... */
|
||||
WARN_ON(!list_empty(&event->list));
|
||||
group->ops->free_event(event);
|
||||
}
|
||||
|
||||
@ -83,10 +84,10 @@ void fsnotify_destroy_event(struct fsnotify_group *group,
|
||||
* added to the queue, 1 if the event was merged with some other queued event,
|
||||
* 2 if the queue of events has overflown.
|
||||
*/
|
||||
int fsnotify_add_notify_event(struct fsnotify_group *group,
|
||||
struct fsnotify_event *event,
|
||||
int (*merge)(struct list_head *,
|
||||
struct fsnotify_event *))
|
||||
int fsnotify_add_event(struct fsnotify_group *group,
|
||||
struct fsnotify_event *event,
|
||||
int (*merge)(struct list_head *,
|
||||
struct fsnotify_event *))
|
||||
{
|
||||
int ret = 0;
|
||||
struct list_head *list = &group->notification_list;
|
||||
@ -124,11 +125,26 @@ queue:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove @event from group's notification queue. It is the responsibility of
|
||||
* the caller to destroy the event.
|
||||
*/
|
||||
void fsnotify_remove_event(struct fsnotify_group *group,
|
||||
struct fsnotify_event *event)
|
||||
{
|
||||
mutex_lock(&group->notification_mutex);
|
||||
if (!list_empty(&event->list)) {
|
||||
list_del_init(&event->list);
|
||||
group->q_len--;
|
||||
}
|
||||
mutex_unlock(&group->notification_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove and return the first event from the notification list. It is the
|
||||
* responsibility of the caller to destroy the obtained event
|
||||
*/
|
||||
struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group)
|
||||
struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group)
|
||||
{
|
||||
struct fsnotify_event *event;
|
||||
|
||||
@ -140,7 +156,7 @@ struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group
|
||||
struct fsnotify_event, list);
|
||||
/*
|
||||
* We need to init list head for the case of overflow event so that
|
||||
* check in fsnotify_add_notify_events() works
|
||||
* check in fsnotify_add_event() works
|
||||
*/
|
||||
list_del_init(&event->list);
|
||||
group->q_len--;
|
||||
@ -149,9 +165,10 @@ struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group
|
||||
}
|
||||
|
||||
/*
|
||||
* This will not remove the event, that must be done with fsnotify_remove_notify_event()
|
||||
* This will not remove the event, that must be done with
|
||||
* fsnotify_remove_first_event()
|
||||
*/
|
||||
struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group)
|
||||
struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group)
|
||||
{
|
||||
BUG_ON(!mutex_is_locked(&group->notification_mutex));
|
||||
|
||||
@ -169,7 +186,7 @@ void fsnotify_flush_notify(struct fsnotify_group *group)
|
||||
|
||||
mutex_lock(&group->notification_mutex);
|
||||
while (!fsnotify_notify_queue_is_empty(group)) {
|
||||
event = fsnotify_remove_notify_event(group);
|
||||
event = fsnotify_remove_first_event(group);
|
||||
fsnotify_destroy_event(group, event);
|
||||
}
|
||||
mutex_unlock(&group->notification_mutex);
|
||||
|
@ -191,7 +191,7 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
|
||||
|
||||
BUG_ON(last == NULL);
|
||||
/* mark should be the last entry. last is the current last entry */
|
||||
hlist_add_after_rcu(&last->m.m_list, &mark->m.m_list);
|
||||
hlist_add_behind_rcu(&mark->m.m_list, &last->m.m_list);
|
||||
out:
|
||||
fsnotify_recalc_vfsmount_mask_locked(mnt);
|
||||
spin_unlock(&mnt->mnt_root->d_lock);
|
||||
|
@ -74,8 +74,6 @@ static int ntfs_file_open(struct inode *vi, struct file *filp)
|
||||
* ntfs_attr_extend_initialized - extend the initialized size of an attribute
|
||||
* @ni: ntfs inode of the attribute to extend
|
||||
* @new_init_size: requested new initialized size in bytes
|
||||
* @cached_page: store any allocated but unused page here
|
||||
* @lru_pvec: lru-buffering pagevec of the caller
|
||||
*
|
||||
* Extend the initialized size of an attribute described by the ntfs inode @ni
|
||||
* to @new_init_size bytes. This involves zeroing any non-sparse space between
|
||||
@ -395,7 +393,6 @@ static inline void ntfs_fault_in_pages_readable_iovec(const struct iovec *iov,
|
||||
* @nr_pages: number of page cache pages to obtain
|
||||
* @pages: array of pages in which to return the obtained page cache pages
|
||||
* @cached_page: allocated but as yet unused page
|
||||
* @lru_pvec: lru-buffering pagevec of caller
|
||||
*
|
||||
* Obtain @nr_pages locked page cache pages from the mapping @mapping and
|
||||
* starting at index @index.
|
||||
|
@ -4961,6 +4961,15 @@ leftright:
|
||||
|
||||
el = path_leaf_el(path);
|
||||
split_index = ocfs2_search_extent_list(el, cpos);
|
||||
if (split_index == -1) {
|
||||
ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
|
||||
"Owner %llu has an extent at cpos %u "
|
||||
"which can no longer be found.\n",
|
||||
(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
|
||||
cpos);
|
||||
ret = -EROFS;
|
||||
goto out;
|
||||
}
|
||||
goto leftright;
|
||||
}
|
||||
out:
|
||||
@ -5135,7 +5144,7 @@ int ocfs2_change_extent_flag(handle_t *handle,
|
||||
el = path_leaf_el(left_path);
|
||||
|
||||
index = ocfs2_search_extent_list(el, cpos);
|
||||
if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
|
||||
if (index == -1) {
|
||||
ocfs2_error(sb,
|
||||
"Owner %llu has an extent at cpos %u which can no "
|
||||
"longer be found.\n",
|
||||
@ -5491,7 +5500,7 @@ int ocfs2_remove_extent(handle_t *handle,
|
||||
|
||||
el = path_leaf_el(path);
|
||||
index = ocfs2_search_extent_list(el, cpos);
|
||||
if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
|
||||
if (index == -1) {
|
||||
ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
|
||||
"Owner %llu has an extent at cpos %u which can no "
|
||||
"longer be found.\n",
|
||||
@ -5557,7 +5566,7 @@ int ocfs2_remove_extent(handle_t *handle,
|
||||
|
||||
el = path_leaf_el(path);
|
||||
index = ocfs2_search_extent_list(el, cpos);
|
||||
if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
|
||||
if (index == -1) {
|
||||
ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
|
||||
"Owner %llu: split at cpos %u lost record.",
|
||||
(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
|
||||
|
@ -1923,12 +1923,11 @@ static int dlm_join_domain(struct dlm_ctxt *dlm)
|
||||
goto bail;
|
||||
}
|
||||
|
||||
if (total_backoff >
|
||||
msecs_to_jiffies(DLM_JOIN_TIMEOUT_MSECS)) {
|
||||
if (total_backoff > DLM_JOIN_TIMEOUT_MSECS) {
|
||||
status = -ERESTARTSYS;
|
||||
mlog(ML_NOTICE, "Timed out joining dlm domain "
|
||||
"%s after %u msecs\n", dlm->name,
|
||||
jiffies_to_msecs(total_backoff));
|
||||
total_backoff);
|
||||
goto bail;
|
||||
}
|
||||
|
||||
|
@ -2405,6 +2405,10 @@ static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
|
||||
if (res->state & DLM_LOCK_RES_MIGRATING)
|
||||
return 0;
|
||||
|
||||
/* delay migration when the lockres is in RECOCERING state */
|
||||
if (res->state & DLM_LOCK_RES_RECOVERING)
|
||||
return 0;
|
||||
|
||||
if (res->owner != dlm->node_num)
|
||||
return 0;
|
||||
|
||||
|
@ -98,7 +98,7 @@ static int __ocfs2_move_extent(handle_t *handle,
|
||||
el = path_leaf_el(path);
|
||||
|
||||
index = ocfs2_search_extent_list(el, cpos);
|
||||
if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
|
||||
if (index == -1) {
|
||||
ocfs2_error(inode->i_sb,
|
||||
"Inode %llu has an extent at cpos %u which can no "
|
||||
"longer be found.\n",
|
||||
|
@ -3109,7 +3109,7 @@ static int ocfs2_clear_ext_refcount(handle_t *handle,
|
||||
el = path_leaf_el(path);
|
||||
|
||||
index = ocfs2_search_extent_list(el, cpos);
|
||||
if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
|
||||
if (index == -1) {
|
||||
ocfs2_error(sb,
|
||||
"Inode %llu has an extent at cpos %u which can no "
|
||||
"longer be found.\n",
|
||||
|
@ -382,7 +382,7 @@ static int ocfs2_map_slot_buffers(struct ocfs2_super *osb,
|
||||
|
||||
trace_ocfs2_map_slot_buffers(bytes, si->si_blocks);
|
||||
|
||||
si->si_bh = kzalloc(sizeof(struct buffer_head *) * si->si_blocks,
|
||||
si->si_bh = kcalloc(si->si_blocks, sizeof(struct buffer_head *),
|
||||
GFP_KERNEL);
|
||||
if (!si->si_bh) {
|
||||
status = -ENOMEM;
|
||||
|
@ -168,7 +168,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
|
||||
K(global_page_state(NR_WRITEBACK)),
|
||||
K(global_page_state(NR_ANON_PAGES)),
|
||||
K(global_page_state(NR_FILE_MAPPED)),
|
||||
K(global_page_state(NR_SHMEM)),
|
||||
K(i.sharedram),
|
||||
K(global_page_state(NR_SLAB_RECLAIMABLE) +
|
||||
global_page_state(NR_SLAB_UNRECLAIMABLE)),
|
||||
K(global_page_state(NR_SLAB_RECLAIMABLE)),
|
||||
|
@ -925,15 +925,30 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end,
|
||||
struct mm_walk *walk)
|
||||
{
|
||||
struct pagemapread *pm = walk->private;
|
||||
unsigned long addr;
|
||||
unsigned long addr = start;
|
||||
int err = 0;
|
||||
pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
|
||||
|
||||
for (addr = start; addr < end; addr += PAGE_SIZE) {
|
||||
err = add_to_pagemap(addr, &pme, pm);
|
||||
if (err)
|
||||
break;
|
||||
while (addr < end) {
|
||||
struct vm_area_struct *vma = find_vma(walk->mm, addr);
|
||||
pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
|
||||
unsigned long vm_end;
|
||||
|
||||
if (!vma) {
|
||||
vm_end = end;
|
||||
} else {
|
||||
vm_end = min(end, vma->vm_end);
|
||||
if (vma->vm_flags & VM_SOFTDIRTY)
|
||||
pme.pme |= PM_STATUS2(pm->v2, __PM_SOFT_DIRTY);
|
||||
}
|
||||
|
||||
for (; addr < vm_end; addr += PAGE_SIZE) {
|
||||
err = add_to_pagemap(addr, &pme, pm);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
|
||||
|
||||
pages = end_index - start_index + 1;
|
||||
|
||||
page = kmalloc(sizeof(void *) * pages, GFP_KERNEL);
|
||||
page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL);
|
||||
if (page == NULL)
|
||||
return res;
|
||||
|
||||
|
@ -27,6 +27,8 @@
|
||||
* the filesystem.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/vfs.h>
|
||||
#include <linux/slab.h>
|
||||
@ -448,8 +450,7 @@ static int __init init_squashfs_fs(void)
|
||||
return err;
|
||||
}
|
||||
|
||||
printk(KERN_INFO "squashfs: version 4.0 (2009/01/31) "
|
||||
"Phillip Lougher\n");
|
||||
pr_info("version 4.0 (2009/01/31) Phillip Lougher\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -88,32 +88,32 @@
|
||||
* lib/bitmap.c provides these functions:
|
||||
*/
|
||||
|
||||
extern int __bitmap_empty(const unsigned long *bitmap, int bits);
|
||||
extern int __bitmap_full(const unsigned long *bitmap, int bits);
|
||||
extern int __bitmap_empty(const unsigned long *bitmap, unsigned int nbits);
|
||||
extern int __bitmap_full(const unsigned long *bitmap, unsigned int nbits);
|
||||
extern int __bitmap_equal(const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, int bits);
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
|
||||
int bits);
|
||||
unsigned int nbits);
|
||||
extern void __bitmap_shift_right(unsigned long *dst,
|
||||
const unsigned long *src, int shift, int bits);
|
||||
extern void __bitmap_shift_left(unsigned long *dst,
|
||||
const unsigned long *src, int shift, int bits);
|
||||
extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, int bits);
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, int bits);
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, int bits);
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, int bits);
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
extern int __bitmap_intersects(const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, int bits);
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
extern int __bitmap_subset(const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, int bits);
|
||||
extern int __bitmap_weight(const unsigned long *bitmap, int bits);
|
||||
const unsigned long *bitmap2, unsigned int nbits);
|
||||
extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
|
||||
|
||||
extern void bitmap_set(unsigned long *map, int i, int len);
|
||||
extern void bitmap_clear(unsigned long *map, int start, int nr);
|
||||
extern void bitmap_set(unsigned long *map, unsigned int start, int len);
|
||||
extern void bitmap_clear(unsigned long *map, unsigned int start, int len);
|
||||
extern unsigned long bitmap_find_next_zero_area(unsigned long *map,
|
||||
unsigned long size,
|
||||
unsigned long start,
|
||||
@ -140,9 +140,9 @@ extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
|
||||
const unsigned long *relmap, int bits);
|
||||
extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
|
||||
int sz, int bits);
|
||||
extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
|
||||
extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
|
||||
extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
|
||||
extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
|
||||
extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
|
||||
extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
|
||||
extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
|
||||
extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits);
|
||||
|
||||
@ -188,15 +188,15 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
|
||||
}
|
||||
|
||||
static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
|
||||
const unsigned long *src2, int nbits)
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return (*dst = *src1 & *src2) != 0;
|
||||
return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0;
|
||||
return __bitmap_and(dst, src1, src2, nbits);
|
||||
}
|
||||
|
||||
static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
|
||||
const unsigned long *src2, int nbits)
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
*dst = *src1 | *src2;
|
||||
@ -205,7 +205,7 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
|
||||
}
|
||||
|
||||
static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
|
||||
const unsigned long *src2, int nbits)
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
*dst = *src1 ^ *src2;
|
||||
@ -214,24 +214,24 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
|
||||
}
|
||||
|
||||
static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
|
||||
const unsigned long *src2, int nbits)
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return (*dst = *src1 & ~(*src2)) != 0;
|
||||
return (*dst = *src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
|
||||
return __bitmap_andnot(dst, src1, src2, nbits);
|
||||
}
|
||||
|
||||
static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
|
||||
int nbits)
|
||||
unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
*dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits);
|
||||
*dst = ~(*src);
|
||||
else
|
||||
__bitmap_complement(dst, src, nbits);
|
||||
}
|
||||
|
||||
static inline int bitmap_equal(const unsigned long *src1,
|
||||
const unsigned long *src2, int nbits)
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
|
||||
@ -240,7 +240,7 @@ static inline int bitmap_equal(const unsigned long *src1,
|
||||
}
|
||||
|
||||
static inline int bitmap_intersects(const unsigned long *src1,
|
||||
const unsigned long *src2, int nbits)
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
|
||||
@ -249,7 +249,7 @@ static inline int bitmap_intersects(const unsigned long *src1,
|
||||
}
|
||||
|
||||
static inline int bitmap_subset(const unsigned long *src1,
|
||||
const unsigned long *src2, int nbits)
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
|
||||
@ -257,7 +257,7 @@ static inline int bitmap_subset(const unsigned long *src1,
|
||||
return __bitmap_subset(src1, src2, nbits);
|
||||
}
|
||||
|
||||
static inline int bitmap_empty(const unsigned long *src, int nbits)
|
||||
static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
|
||||
@ -265,7 +265,7 @@ static inline int bitmap_empty(const unsigned long *src, int nbits)
|
||||
return __bitmap_empty(src, nbits);
|
||||
}
|
||||
|
||||
static inline int bitmap_full(const unsigned long *src, int nbits)
|
||||
static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
|
||||
@ -273,7 +273,7 @@ static inline int bitmap_full(const unsigned long *src, int nbits)
|
||||
return __bitmap_full(src, nbits);
|
||||
}
|
||||
|
||||
static inline int bitmap_weight(const unsigned long *src, int nbits)
|
||||
static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
|
||||
@ -284,7 +284,7 @@ static inline void bitmap_shift_right(unsigned long *dst,
|
||||
const unsigned long *src, int n, int nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
*dst = *src >> n;
|
||||
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> n;
|
||||
else
|
||||
__bitmap_shift_right(dst, src, n, nbits);
|
||||
}
|
||||
|
@ -2,7 +2,7 @@
|
||||
#define _LINUX_BYTEORDER_GENERIC_H
|
||||
|
||||
/*
|
||||
* linux/byteorder_generic.h
|
||||
* linux/byteorder/generic.h
|
||||
* Generic Byte-reordering support
|
||||
*
|
||||
* The "... p" macros, like le64_to_cpup, can be used with pointers
|
||||
|
27
include/linux/cma.h
Normal file
27
include/linux/cma.h
Normal file
@ -0,0 +1,27 @@
|
||||
#ifndef __CMA_H__
|
||||
#define __CMA_H__
|
||||
|
||||
/*
|
||||
* There is always at least global CMA area and a few optional
|
||||
* areas configured in kernel .config.
|
||||
*/
|
||||
#ifdef CONFIG_CMA_AREAS
|
||||
#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
|
||||
|
||||
#else
|
||||
#define MAX_CMA_AREAS (0)
|
||||
|
||||
#endif
|
||||
|
||||
struct cma;
|
||||
|
||||
extern phys_addr_t cma_get_base(struct cma *cma);
|
||||
extern unsigned long cma_get_size(struct cma *cma);
|
||||
|
||||
extern int __init cma_declare_contiguous(phys_addr_t size,
|
||||
phys_addr_t base, phys_addr_t limit,
|
||||
phys_addr_t alignment, unsigned int order_per_bit,
|
||||
bool fixed, struct cma **res_cma);
|
||||
extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
|
||||
extern bool cma_release(struct cma *cma, struct page *pages, int count);
|
||||
#endif
|
@ -53,18 +53,13 @@
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/device.h>
|
||||
|
||||
struct cma;
|
||||
struct page;
|
||||
struct device;
|
||||
|
||||
#ifdef CONFIG_DMA_CMA
|
||||
|
||||
/*
|
||||
* There is always at least global CMA area and a few optional device
|
||||
* private areas configured in kernel .config.
|
||||
*/
|
||||
#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
|
||||
|
||||
extern struct cma *dma_contiguous_default_area;
|
||||
|
||||
static inline struct cma *dev_get_cma_area(struct device *dev)
|
||||
@ -123,8 +118,6 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
||||
|
||||
#else
|
||||
|
||||
#define MAX_CMA_AREAS (0)
|
||||
|
||||
static inline struct cma *dev_get_cma_area(struct device *dev)
|
||||
{
|
||||
return NULL;
|
||||
|
@ -2688,7 +2688,7 @@ static const struct file_operations __fops = { \
|
||||
.read = simple_attr_read, \
|
||||
.write = simple_attr_write, \
|
||||
.llseek = generic_file_llseek, \
|
||||
};
|
||||
}
|
||||
|
||||
static inline __printf(1, 2)
|
||||
void __simple_attr_check_format(const char *fmt, ...)
|
||||
|
@ -322,16 +322,18 @@ extern int fsnotify_fasync(int fd, struct file *file, int on);
|
||||
extern void fsnotify_destroy_event(struct fsnotify_group *group,
|
||||
struct fsnotify_event *event);
|
||||
/* attach the event to the group notification queue */
|
||||
extern int fsnotify_add_notify_event(struct fsnotify_group *group,
|
||||
struct fsnotify_event *event,
|
||||
int (*merge)(struct list_head *,
|
||||
struct fsnotify_event *));
|
||||
extern int fsnotify_add_event(struct fsnotify_group *group,
|
||||
struct fsnotify_event *event,
|
||||
int (*merge)(struct list_head *,
|
||||
struct fsnotify_event *));
|
||||
/* Remove passed event from groups notification queue */
|
||||
extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event);
|
||||
/* true if the group notification queue is empty */
|
||||
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
|
||||
/* return, but do not dequeue the first event on the notification queue */
|
||||
extern struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group);
|
||||
extern struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group);
|
||||
/* return AND dequeue the first event on the notification queue */
|
||||
extern struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group);
|
||||
extern struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group);
|
||||
|
||||
/* functions used to manipulate the marks attached to inodes */
|
||||
|
||||
|
@ -360,7 +360,7 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
|
||||
void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
|
||||
void free_pages_exact(void *virt, size_t size);
|
||||
/* This is different from alloc_pages_exact_node !!! */
|
||||
void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
|
||||
void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
|
||||
|
||||
#define __get_free_page(gfp_mask) \
|
||||
__get_free_pages((gfp_mask), 0)
|
||||
|
9
include/linux/glob.h
Normal file
9
include/linux/glob.h
Normal file
@ -0,0 +1,9 @@
|
||||
#ifndef _LINUX_GLOB_H
|
||||
#define _LINUX_GLOB_H
|
||||
|
||||
#include <linux/types.h> /* For bool */
|
||||
#include <linux/compiler.h> /* For __pure */
|
||||
|
||||
bool __pure glob_match(char const *pat, char const *str);
|
||||
|
||||
#endif /* _LINUX_GLOB_H */
|
@ -93,7 +93,7 @@ static inline int kmap_atomic_idx_push(void)
|
||||
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
WARN_ON_ONCE(in_irq() && !irqs_disabled());
|
||||
BUG_ON(idx > KM_TYPE_NR);
|
||||
BUG_ON(idx >= KM_TYPE_NR);
|
||||
#endif
|
||||
return idx;
|
||||
}
|
||||
|
@ -93,10 +93,6 @@ extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
|
||||
#endif /* CONFIG_DEBUG_VM */
|
||||
|
||||
extern unsigned long transparent_hugepage_flags;
|
||||
extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||
pmd_t *dst_pmd, pmd_t *src_pmd,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long addr, unsigned long end);
|
||||
extern int split_huge_page_to_list(struct page *page, struct list_head *list);
|
||||
static inline int split_huge_page(struct page *page)
|
||||
{
|
||||
|
@ -87,7 +87,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
|
||||
#endif
|
||||
|
||||
extern unsigned long hugepages_treat_as_movable;
|
||||
extern const unsigned long hugetlb_zero, hugetlb_infinity;
|
||||
extern int sysctl_hugetlb_shm_group;
|
||||
extern struct list_head huge_boot_pages;
|
||||
|
||||
|
@ -493,11 +493,6 @@ static inline char *hex_byte_pack_upper(char *buf, u8 byte)
|
||||
return buf;
|
||||
}
|
||||
|
||||
static inline char * __deprecated pack_hex_byte(char *buf, u8 byte)
|
||||
{
|
||||
return hex_byte_pack(buf, byte);
|
||||
}
|
||||
|
||||
extern int hex_to_bin(char ch);
|
||||
extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
|
||||
|
||||
|
@ -44,7 +44,7 @@ struct klist_node {
|
||||
|
||||
extern void klist_add_tail(struct klist_node *n, struct klist *k);
|
||||
extern void klist_add_head(struct klist_node *n, struct klist *k);
|
||||
extern void klist_add_after(struct klist_node *n, struct klist_node *pos);
|
||||
extern void klist_add_behind(struct klist_node *n, struct klist_node *pos);
|
||||
extern void klist_add_before(struct klist_node *n, struct klist_node *pos);
|
||||
|
||||
extern void klist_del(struct klist_node *n);
|
||||
|
@ -654,15 +654,15 @@ static inline void hlist_add_before(struct hlist_node *n,
|
||||
*(n->pprev) = n;
|
||||
}
|
||||
|
||||
static inline void hlist_add_after(struct hlist_node *n,
|
||||
struct hlist_node *next)
|
||||
static inline void hlist_add_behind(struct hlist_node *n,
|
||||
struct hlist_node *prev)
|
||||
{
|
||||
next->next = n->next;
|
||||
n->next = next;
|
||||
next->pprev = &n->next;
|
||||
n->next = prev->next;
|
||||
prev->next = n;
|
||||
n->pprev = &prev->next;
|
||||
|
||||
if(next->next)
|
||||
next->next->pprev = &next->next;
|
||||
if (n->next)
|
||||
n->next->pprev = &n->next;
|
||||
}
|
||||
|
||||
/* after that we'll appear to be on some hlist and hlist_del will work */
|
||||
|
@ -249,7 +249,7 @@ phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
|
||||
/*
|
||||
* Set the allocation direction to bottom-up or top-down.
|
||||
*/
|
||||
static inline void memblock_set_bottom_up(bool enable)
|
||||
static inline void __init memblock_set_bottom_up(bool enable)
|
||||
{
|
||||
memblock.bottom_up = enable;
|
||||
}
|
||||
@ -264,7 +264,7 @@ static inline bool memblock_bottom_up(void)
|
||||
return memblock.bottom_up;
|
||||
}
|
||||
#else
|
||||
static inline void memblock_set_bottom_up(bool enable) {}
|
||||
static inline void __init memblock_set_bottom_up(bool enable) {}
|
||||
static inline bool memblock_bottom_up(void) { return false; }
|
||||
#endif
|
||||
|
||||
|
@ -26,11 +26,12 @@ enum {
|
||||
MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
|
||||
};
|
||||
|
||||
/* Types for control the zone type of onlined memory */
|
||||
/* Types for control the zone type of onlined and offlined memory */
|
||||
enum {
|
||||
ONLINE_KEEP,
|
||||
ONLINE_KERNEL,
|
||||
ONLINE_MOVABLE,
|
||||
MMOP_OFFLINE = -1,
|
||||
MMOP_ONLINE_KEEP,
|
||||
MMOP_ONLINE_KERNEL,
|
||||
MMOP_ONLINE_MOVABLE,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -258,6 +259,7 @@ static inline void remove_memory(int nid, u64 start, u64 size) {}
|
||||
extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
|
||||
void *arg, int (*func)(struct memory_block *, void *));
|
||||
extern int add_memory(int nid, u64 start, u64 size);
|
||||
extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default);
|
||||
extern int arch_add_memory(int nid, u64 start, u64 size);
|
||||
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
|
||||
extern bool is_memblock_offlined(struct memory_block *mem);
|
||||
|
@ -20,11 +20,13 @@ extern void dump_page_badflags(struct page *page, const char *reason,
|
||||
} while (0)
|
||||
#define VM_WARN_ON(cond) WARN_ON(cond)
|
||||
#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond)
|
||||
#define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format)
|
||||
#else
|
||||
#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
|
||||
#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)
|
||||
#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
|
||||
#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
|
||||
#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_VIRTUAL
|
||||
|
@ -170,6 +170,8 @@ extern int __mmu_notifier_register(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm);
|
||||
extern void mmu_notifier_unregister(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm);
|
||||
extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm);
|
||||
extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
|
||||
extern void __mmu_notifier_release(struct mm_struct *mm);
|
||||
extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
|
||||
@ -288,6 +290,10 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
|
||||
set_pte_at(___mm, ___address, __ptep, ___pte); \
|
||||
})
|
||||
|
||||
extern void mmu_notifier_call_srcu(struct rcu_head *rcu,
|
||||
void (*func)(struct rcu_head *rcu));
|
||||
extern void mmu_notifier_synchronize(void);
|
||||
|
||||
#else /* CONFIG_MMU_NOTIFIER */
|
||||
|
||||
static inline void mmu_notifier_release(struct mm_struct *mm)
|
||||
|
@ -143,6 +143,7 @@ enum zone_stat_item {
|
||||
NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
|
||||
NR_DIRTIED, /* page dirtyings since bootup */
|
||||
NR_WRITTEN, /* page writings since bootup */
|
||||
NR_PAGES_SCANNED, /* pages scanned since last reclaim */
|
||||
#ifdef CONFIG_NUMA
|
||||
NUMA_HIT, /* allocated in intended node */
|
||||
NUMA_MISS, /* allocated in non intended node */
|
||||
@ -324,18 +325,11 @@ enum zone_type {
|
||||
#ifndef __GENERATING_BOUNDS_H
|
||||
|
||||
struct zone {
|
||||
/* Fields commonly accessed by the page allocator */
|
||||
/* Read-mostly fields */
|
||||
|
||||
/* zone watermarks, access with *_wmark_pages(zone) macros */
|
||||
unsigned long watermark[NR_WMARK];
|
||||
|
||||
/*
|
||||
* When free pages are below this point, additional steps are taken
|
||||
* when reading the number of free pages to avoid per-cpu counter
|
||||
* drift allowing watermarks to be breached
|
||||
*/
|
||||
unsigned long percpu_drift_mark;
|
||||
|
||||
/*
|
||||
* We don't know if the memory that we're going to allocate will be freeable
|
||||
* or/and it will be released eventually, so to avoid totally wasting several
|
||||
@ -344,7 +338,20 @@ struct zone {
|
||||
* on the higher zones). This array is recalculated at runtime if the
|
||||
* sysctl_lowmem_reserve_ratio sysctl changes.
|
||||
*/
|
||||
unsigned long lowmem_reserve[MAX_NR_ZONES];
|
||||
long lowmem_reserve[MAX_NR_ZONES];
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
int node;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
|
||||
* this zone's LRU. Maintained by the pageout code.
|
||||
*/
|
||||
unsigned int inactive_ratio;
|
||||
|
||||
struct pglist_data *zone_pgdat;
|
||||
struct per_cpu_pageset __percpu *pageset;
|
||||
|
||||
/*
|
||||
* This is a per-zone reserve of pages that should not be
|
||||
@ -352,34 +359,6 @@ struct zone {
|
||||
*/
|
||||
unsigned long dirty_balance_reserve;
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
int node;
|
||||
/*
|
||||
* zone reclaim becomes active if more unmapped pages exist.
|
||||
*/
|
||||
unsigned long min_unmapped_pages;
|
||||
unsigned long min_slab_pages;
|
||||
#endif
|
||||
struct per_cpu_pageset __percpu *pageset;
|
||||
/*
|
||||
* free areas of different sizes
|
||||
*/
|
||||
spinlock_t lock;
|
||||
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
|
||||
/* Set to true when the PG_migrate_skip bits should be cleared */
|
||||
bool compact_blockskip_flush;
|
||||
|
||||
/* pfn where compaction free scanner should start */
|
||||
unsigned long compact_cached_free_pfn;
|
||||
/* pfn where async and sync compaction migration scanner should start */
|
||||
unsigned long compact_cached_migrate_pfn[2];
|
||||
#endif
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
/* see spanned/present_pages for more description */
|
||||
seqlock_t span_seqlock;
|
||||
#endif
|
||||
struct free_area free_area[MAX_ORDER];
|
||||
|
||||
#ifndef CONFIG_SPARSEMEM
|
||||
/*
|
||||
* Flags for a pageblock_nr_pages block. See pageblock-flags.h.
|
||||
@ -388,74 +367,14 @@ struct zone {
|
||||
unsigned long *pageblock_flags;
|
||||
#endif /* CONFIG_SPARSEMEM */
|
||||
|
||||
#ifdef CONFIG_COMPACTION
|
||||
#ifdef CONFIG_NUMA
|
||||
/*
|
||||
* On compaction failure, 1<<compact_defer_shift compactions
|
||||
* are skipped before trying again. The number attempted since
|
||||
* last failure is tracked with compact_considered.
|
||||
* zone reclaim becomes active if more unmapped pages exist.
|
||||
*/
|
||||
unsigned int compact_considered;
|
||||
unsigned int compact_defer_shift;
|
||||
int compact_order_failed;
|
||||
#endif
|
||||
unsigned long min_unmapped_pages;
|
||||
unsigned long min_slab_pages;
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
ZONE_PADDING(_pad1_)
|
||||
|
||||
/* Fields commonly accessed by the page reclaim scanner */
|
||||
spinlock_t lru_lock;
|
||||
struct lruvec lruvec;
|
||||
|
||||
/* Evictions & activations on the inactive file list */
|
||||
atomic_long_t inactive_age;
|
||||
|
||||
unsigned long pages_scanned; /* since last reclaim */
|
||||
unsigned long flags; /* zone flags, see below */
|
||||
|
||||
/* Zone statistics */
|
||||
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
|
||||
|
||||
/*
|
||||
* The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
|
||||
* this zone's LRU. Maintained by the pageout code.
|
||||
*/
|
||||
unsigned int inactive_ratio;
|
||||
|
||||
|
||||
ZONE_PADDING(_pad2_)
|
||||
/* Rarely used or read-mostly fields */
|
||||
|
||||
/*
|
||||
* wait_table -- the array holding the hash table
|
||||
* wait_table_hash_nr_entries -- the size of the hash table array
|
||||
* wait_table_bits -- wait_table_size == (1 << wait_table_bits)
|
||||
*
|
||||
* The purpose of all these is to keep track of the people
|
||||
* waiting for a page to become available and make them
|
||||
* runnable again when possible. The trouble is that this
|
||||
* consumes a lot of space, especially when so few things
|
||||
* wait on pages at a given time. So instead of using
|
||||
* per-page waitqueues, we use a waitqueue hash table.
|
||||
*
|
||||
* The bucket discipline is to sleep on the same queue when
|
||||
* colliding and wake all in that wait queue when removing.
|
||||
* When something wakes, it must check to be sure its page is
|
||||
* truly available, a la thundering herd. The cost of a
|
||||
* collision is great, but given the expected load of the
|
||||
* table, they should be so rare as to be outweighed by the
|
||||
* benefits from the saved space.
|
||||
*
|
||||
* __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
|
||||
* primary users of these fields, and in mm/page_alloc.c
|
||||
* free_area_init_core() performs the initialization of them.
|
||||
*/
|
||||
wait_queue_head_t * wait_table;
|
||||
unsigned long wait_table_hash_nr_entries;
|
||||
unsigned long wait_table_bits;
|
||||
|
||||
/*
|
||||
* Discontig memory support fields.
|
||||
*/
|
||||
struct pglist_data *zone_pgdat;
|
||||
/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
|
||||
unsigned long zone_start_pfn;
|
||||
|
||||
@ -500,9 +419,11 @@ struct zone {
|
||||
* adjust_managed_page_count() should be used instead of directly
|
||||
* touching zone->managed_pages and totalram_pages.
|
||||
*/
|
||||
unsigned long managed_pages;
|
||||
unsigned long spanned_pages;
|
||||
unsigned long present_pages;
|
||||
unsigned long managed_pages;
|
||||
|
||||
const char *name;
|
||||
|
||||
/*
|
||||
* Number of MIGRATE_RESEVE page block. To maintain for just
|
||||
@ -510,10 +431,94 @@ struct zone {
|
||||
*/
|
||||
int nr_migrate_reserve_block;
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
/* see spanned/present_pages for more description */
|
||||
seqlock_t span_seqlock;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* rarely used fields:
|
||||
* wait_table -- the array holding the hash table
|
||||
* wait_table_hash_nr_entries -- the size of the hash table array
|
||||
* wait_table_bits -- wait_table_size == (1 << wait_table_bits)
|
||||
*
|
||||
* The purpose of all these is to keep track of the people
|
||||
* waiting for a page to become available and make them
|
||||
* runnable again when possible. The trouble is that this
|
||||
* consumes a lot of space, especially when so few things
|
||||
* wait on pages at a given time. So instead of using
|
||||
* per-page waitqueues, we use a waitqueue hash table.
|
||||
*
|
||||
* The bucket discipline is to sleep on the same queue when
|
||||
* colliding and wake all in that wait queue when removing.
|
||||
* When something wakes, it must check to be sure its page is
|
||||
* truly available, a la thundering herd. The cost of a
|
||||
* collision is great, but given the expected load of the
|
||||
* table, they should be so rare as to be outweighed by the
|
||||
* benefits from the saved space.
|
||||
*
|
||||
* __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
|
||||
* primary users of these fields, and in mm/page_alloc.c
|
||||
* free_area_init_core() performs the initialization of them.
|
||||
*/
|
||||
const char *name;
|
||||
wait_queue_head_t *wait_table;
|
||||
unsigned long wait_table_hash_nr_entries;
|
||||
unsigned long wait_table_bits;
|
||||
|
||||
ZONE_PADDING(_pad1_)
|
||||
|
||||
/* Write-intensive fields used from the page allocator */
|
||||
spinlock_t lock;
|
||||
|
||||
/* free areas of different sizes */
|
||||
struct free_area free_area[MAX_ORDER];
|
||||
|
||||
/* zone flags, see below */
|
||||
unsigned long flags;
|
||||
|
||||
ZONE_PADDING(_pad2_)
|
||||
|
||||
/* Write-intensive fields used by page reclaim */
|
||||
|
||||
/* Fields commonly accessed by the page reclaim scanner */
|
||||
spinlock_t lru_lock;
|
||||
struct lruvec lruvec;
|
||||
|
||||
/* Evictions & activations on the inactive file list */
|
||||
atomic_long_t inactive_age;
|
||||
|
||||
/*
|
||||
* When free pages are below this point, additional steps are taken
|
||||
* when reading the number of free pages to avoid per-cpu counter
|
||||
* drift allowing watermarks to be breached
|
||||
*/
|
||||
unsigned long percpu_drift_mark;
|
||||
|
||||
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
|
||||
/* pfn where compaction free scanner should start */
|
||||
unsigned long compact_cached_free_pfn;
|
||||
/* pfn where async and sync compaction migration scanner should start */
|
||||
unsigned long compact_cached_migrate_pfn[2];
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_COMPACTION
|
||||
/*
|
||||
* On compaction failure, 1<<compact_defer_shift compactions
|
||||
* are skipped before trying again. The number attempted since
|
||||
* last failure is tracked with compact_considered.
|
||||
*/
|
||||
unsigned int compact_considered;
|
||||
unsigned int compact_defer_shift;
|
||||
int compact_order_failed;
|
||||
#endif
|
||||
|
||||
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
|
||||
/* Set to true when the PG_migrate_skip bits should be cleared */
|
||||
bool compact_blockskip_flush;
|
||||
#endif
|
||||
|
||||
ZONE_PADDING(_pad3_)
|
||||
/* Zone statistics */
|
||||
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
typedef enum {
|
||||
@ -529,6 +534,7 @@ typedef enum {
|
||||
ZONE_WRITEBACK, /* reclaim scanning has recently found
|
||||
* many pages under writeback
|
||||
*/
|
||||
ZONE_FAIR_DEPLETED, /* fair zone policy batch depleted */
|
||||
} zone_flags_t;
|
||||
|
||||
static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
|
||||
@ -566,6 +572,11 @@ static inline int zone_is_reclaim_locked(const struct zone *zone)
|
||||
return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
|
||||
}
|
||||
|
||||
static inline int zone_is_fair_depleted(const struct zone *zone)
|
||||
{
|
||||
return test_bit(ZONE_FAIR_DEPLETED, &zone->flags);
|
||||
}
|
||||
|
||||
static inline int zone_is_oom_locked(const struct zone *zone)
|
||||
{
|
||||
return test_bit(ZONE_OOM_LOCKED, &zone->flags);
|
||||
@ -872,6 +883,8 @@ static inline int zone_movable_is_highmem(void)
|
||||
{
|
||||
#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
|
||||
return movable_zone == ZONE_HIGHMEM;
|
||||
#elif defined(CONFIG_HIGHMEM)
|
||||
return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
|
@ -430,7 +430,15 @@ static inline int num_node_state(enum node_states state)
|
||||
for_each_node_mask((__node), node_states[__state])
|
||||
|
||||
#define first_online_node first_node(node_states[N_ONLINE])
|
||||
#define next_online_node(nid) next_node((nid), node_states[N_ONLINE])
|
||||
#define first_memory_node first_node(node_states[N_MEMORY])
|
||||
static inline int next_online_node(int nid)
|
||||
{
|
||||
return next_node(nid, node_states[N_ONLINE]);
|
||||
}
|
||||
static inline int next_memory_node(int nid)
|
||||
{
|
||||
return next_node(nid, node_states[N_MEMORY]);
|
||||
}
|
||||
|
||||
extern int nr_node_ids;
|
||||
extern int nr_online_nodes;
|
||||
@ -471,6 +479,7 @@ static inline int num_node_state(enum node_states state)
|
||||
for ( (node) = 0; (node) == 0; (node) = 1)
|
||||
|
||||
#define first_online_node 0
|
||||
#define first_memory_node 0
|
||||
#define next_online_node(nid) (MAX_NUMNODES)
|
||||
#define nr_node_ids 1
|
||||
#define nr_online_nodes 1
|
||||
|
@ -55,8 +55,8 @@ extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
|
||||
struct mem_cgroup *memcg, nodemask_t *nodemask,
|
||||
const char *message);
|
||||
|
||||
extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
|
||||
extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
|
||||
extern bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_flags);
|
||||
extern void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_flags);
|
||||
|
||||
extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
|
||||
int order, const nodemask_t *nodemask);
|
||||
|
@ -171,13 +171,12 @@ static inline int __TestClearPage##uname(struct page *page) \
|
||||
#define __PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \
|
||||
__SETPAGEFLAG(uname, lname) __CLEARPAGEFLAG(uname, lname)
|
||||
|
||||
#define PAGEFLAG_FALSE(uname) \
|
||||
static inline int Page##uname(const struct page *page) \
|
||||
{ return 0; }
|
||||
|
||||
#define TESTSCFLAG(uname, lname) \
|
||||
TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname)
|
||||
|
||||
#define TESTPAGEFLAG_FALSE(uname) \
|
||||
static inline int Page##uname(const struct page *page) { return 0; }
|
||||
|
||||
#define SETPAGEFLAG_NOOP(uname) \
|
||||
static inline void SetPage##uname(struct page *page) { }
|
||||
|
||||
@ -187,12 +186,21 @@ static inline void ClearPage##uname(struct page *page) { }
|
||||
#define __CLEARPAGEFLAG_NOOP(uname) \
|
||||
static inline void __ClearPage##uname(struct page *page) { }
|
||||
|
||||
#define TESTSETFLAG_FALSE(uname) \
|
||||
static inline int TestSetPage##uname(struct page *page) { return 0; }
|
||||
|
||||
#define TESTCLEARFLAG_FALSE(uname) \
|
||||
static inline int TestClearPage##uname(struct page *page) { return 0; }
|
||||
|
||||
#define __TESTCLEARFLAG_FALSE(uname) \
|
||||
static inline int __TestClearPage##uname(struct page *page) { return 0; }
|
||||
|
||||
#define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \
|
||||
SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
|
||||
|
||||
#define TESTSCFLAG_FALSE(uname) \
|
||||
TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
|
||||
|
||||
struct page; /* forward declaration */
|
||||
|
||||
TESTPAGEFLAG(Locked, locked)
|
||||
@ -248,7 +256,6 @@ PAGEFLAG_FALSE(HighMem)
|
||||
PAGEFLAG(SwapCache, swapcache)
|
||||
#else
|
||||
PAGEFLAG_FALSE(SwapCache)
|
||||
SETPAGEFLAG_NOOP(SwapCache) CLEARPAGEFLAG_NOOP(SwapCache)
|
||||
#endif
|
||||
|
||||
PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
|
||||
@ -258,8 +265,8 @@ PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
|
||||
PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
|
||||
TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked)
|
||||
#else
|
||||
PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked)
|
||||
TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked)
|
||||
PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
|
||||
TESTSCFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_USES_PG_UNCACHED
|
||||
|
@ -484,6 +484,9 @@ static inline int lock_page_killable(struct page *page)
|
||||
/*
|
||||
* lock_page_or_retry - Lock the page, unless this would block and the
|
||||
* caller indicated that it can handle a retry.
|
||||
*
|
||||
* Return value and mmap_sem implications depend on flags; see
|
||||
* __lock_page_or_retry().
|
||||
*/
|
||||
static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
|
||||
unsigned int flags)
|
||||
|
@ -31,7 +31,7 @@ static inline const char *printk_skip_level(const char *buffer)
|
||||
}
|
||||
|
||||
/* printk's without a loglevel use this.. */
|
||||
#define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL
|
||||
#define MESSAGE_LOGLEVEL_DEFAULT CONFIG_MESSAGE_LOGLEVEL_DEFAULT
|
||||
|
||||
/* We show everything that is MORE important than this.. */
|
||||
#define CONSOLE_LOGLEVEL_SILENT 0 /* Mum's the word */
|
||||
|
@ -432,9 +432,9 @@ static inline void hlist_add_before_rcu(struct hlist_node *n,
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_add_after_rcu
|
||||
* @prev: the existing element to add the new element after.
|
||||
* hlist_add_behind_rcu
|
||||
* @n: the new element to add to the hash list.
|
||||
* @prev: the existing element to add the new element after.
|
||||
*
|
||||
* Description:
|
||||
* Adds the specified element to the specified hlist
|
||||
@ -449,8 +449,8 @@ static inline void hlist_add_before_rcu(struct hlist_node *n,
|
||||
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
|
||||
* problems on Alpha CPUs.
|
||||
*/
|
||||
static inline void hlist_add_after_rcu(struct hlist_node *prev,
|
||||
struct hlist_node *n)
|
||||
static inline void hlist_add_behind_rcu(struct hlist_node *n,
|
||||
struct hlist_node *prev)
|
||||
{
|
||||
n->next = prev->next;
|
||||
n->pprev = &prev->next;
|
||||
|
@ -311,7 +311,6 @@ extern void lru_add_page_tail(struct page *page, struct page *page_tail,
|
||||
struct lruvec *lruvec, struct list_head *head);
|
||||
extern void activate_page(struct page *);
|
||||
extern void mark_page_accessed(struct page *);
|
||||
extern void init_page_accessed(struct page *page);
|
||||
extern void lru_add_drain(void);
|
||||
extern void lru_add_drain_cpu(int cpu);
|
||||
extern void lru_add_drain_all(void);
|
||||
|
@ -113,7 +113,7 @@ extern struct vm_struct *remove_vm_area(const void *addr);
|
||||
extern struct vm_struct *find_vm_area(const void *addr);
|
||||
|
||||
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
|
||||
struct page ***pages);
|
||||
struct page **pages);
|
||||
#ifdef CONFIG_MMU
|
||||
extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
|
||||
pgprot_t prot, struct page **pages);
|
||||
|
@ -11,7 +11,7 @@ struct zbud_ops {
|
||||
|
||||
struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops);
|
||||
void zbud_destroy_pool(struct zbud_pool *pool);
|
||||
int zbud_alloc(struct zbud_pool *pool, unsigned int size, gfp_t gfp,
|
||||
int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
|
||||
unsigned long *handle);
|
||||
void zbud_free(struct zbud_pool *pool, unsigned long handle);
|
||||
int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries);
|
||||
|
@ -493,64 +493,6 @@ extern int deflateInit2 (z_streamp strm,
|
||||
method). msg is set to null if there is no error message. deflateInit2 does
|
||||
not perform any compression: this will be done by deflate().
|
||||
*/
|
||||
|
||||
#if 0
|
||||
extern int zlib_deflateSetDictionary (z_streamp strm,
|
||||
const Byte *dictionary,
|
||||
uInt dictLength);
|
||||
#endif
|
||||
/*
|
||||
Initializes the compression dictionary from the given byte sequence
|
||||
without producing any compressed output. This function must be called
|
||||
immediately after deflateInit, deflateInit2 or deflateReset, before any
|
||||
call of deflate. The compressor and decompressor must use exactly the same
|
||||
dictionary (see inflateSetDictionary).
|
||||
|
||||
The dictionary should consist of strings (byte sequences) that are likely
|
||||
to be encountered later in the data to be compressed, with the most commonly
|
||||
used strings preferably put towards the end of the dictionary. Using a
|
||||
dictionary is most useful when the data to be compressed is short and can be
|
||||
predicted with good accuracy; the data can then be compressed better than
|
||||
with the default empty dictionary.
|
||||
|
||||
Depending on the size of the compression data structures selected by
|
||||
deflateInit or deflateInit2, a part of the dictionary may in effect be
|
||||
discarded, for example if the dictionary is larger than the window size in
|
||||
deflate or deflate2. Thus the strings most likely to be useful should be
|
||||
put at the end of the dictionary, not at the front.
|
||||
|
||||
Upon return of this function, strm->adler is set to the Adler32 value
|
||||
of the dictionary; the decompressor may later use this value to determine
|
||||
which dictionary has been used by the compressor. (The Adler32 value
|
||||
applies to the whole dictionary even if only a subset of the dictionary is
|
||||
actually used by the compressor.)
|
||||
|
||||
deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a
|
||||
parameter is invalid (such as NULL dictionary) or the stream state is
|
||||
inconsistent (for example if deflate has already been called for this stream
|
||||
or if the compression method is bsort). deflateSetDictionary does not
|
||||
perform any compression: this will be done by deflate().
|
||||
*/
|
||||
|
||||
#if 0
|
||||
extern int zlib_deflateCopy (z_streamp dest, z_streamp source);
|
||||
#endif
|
||||
|
||||
/*
|
||||
Sets the destination stream as a complete copy of the source stream.
|
||||
|
||||
This function can be useful when several compression strategies will be
|
||||
tried, for example when there are several ways of pre-processing the input
|
||||
data with a filter. The streams that will be discarded should then be freed
|
||||
by calling deflateEnd. Note that deflateCopy duplicates the internal
|
||||
compression state which can be quite large, so this strategy is slow and
|
||||
can consume lots of memory.
|
||||
|
||||
deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not
|
||||
enough memory, Z_STREAM_ERROR if the source stream state was inconsistent
|
||||
(such as zalloc being NULL). msg is left unchanged in both source and
|
||||
destination.
|
||||
*/
|
||||
|
||||
extern int zlib_deflateReset (z_streamp strm);
|
||||
/*
|
||||
@ -568,27 +510,6 @@ static inline unsigned long deflateBound(unsigned long s)
|
||||
return s + ((s + 7) >> 3) + ((s + 63) >> 6) + 11;
|
||||
}
|
||||
|
||||
#if 0
|
||||
extern int zlib_deflateParams (z_streamp strm, int level, int strategy);
|
||||
#endif
|
||||
/*
|
||||
Dynamically update the compression level and compression strategy. The
|
||||
interpretation of level and strategy is as in deflateInit2. This can be
|
||||
used to switch between compression and straight copy of the input data, or
|
||||
to switch to a different kind of input data requiring a different
|
||||
strategy. If the compression level is changed, the input available so far
|
||||
is compressed with the old level (and may be flushed); the new level will
|
||||
take effect only at the next call of deflate().
|
||||
|
||||
Before the call of deflateParams, the stream state must be set as for
|
||||
a call of deflate(), since the currently available input may have to
|
||||
be compressed and flushed. In particular, strm->avail_out must be non-zero.
|
||||
|
||||
deflateParams returns Z_OK if success, Z_STREAM_ERROR if the source
|
||||
stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR
|
||||
if strm->avail_out was zero.
|
||||
*/
|
||||
|
||||
/*
|
||||
extern int inflateInit2 (z_streamp strm, int windowBits);
|
||||
|
||||
@ -631,45 +552,6 @@ extern int inflateInit2 (z_streamp strm, int windowBits);
|
||||
and avail_out are unchanged.)
|
||||
*/
|
||||
|
||||
extern int zlib_inflateSetDictionary (z_streamp strm,
|
||||
const Byte *dictionary,
|
||||
uInt dictLength);
|
||||
/*
|
||||
Initializes the decompression dictionary from the given uncompressed byte
|
||||
sequence. This function must be called immediately after a call of inflate,
|
||||
if that call returned Z_NEED_DICT. The dictionary chosen by the compressor
|
||||
can be determined from the adler32 value returned by that call of inflate.
|
||||
The compressor and decompressor must use exactly the same dictionary (see
|
||||
deflateSetDictionary). For raw inflate, this function can be called
|
||||
immediately after inflateInit2() or inflateReset() and before any call of
|
||||
inflate() to set the dictionary. The application must insure that the
|
||||
dictionary that was used for compression is provided.
|
||||
|
||||
inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a
|
||||
parameter is invalid (such as NULL dictionary) or the stream state is
|
||||
inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the
|
||||
expected one (incorrect adler32 value). inflateSetDictionary does not
|
||||
perform any decompression: this will be done by subsequent calls of
|
||||
inflate().
|
||||
*/
|
||||
|
||||
#if 0
|
||||
extern int zlib_inflateSync (z_streamp strm);
|
||||
#endif
|
||||
/*
|
||||
Skips invalid compressed data until a full flush point (see above the
|
||||
description of deflate with Z_FULL_FLUSH) can be found, or until all
|
||||
available input is skipped. No output is provided.
|
||||
|
||||
inflateSync returns Z_OK if a full flush point has been found, Z_BUF_ERROR
|
||||
if no more input was provided, Z_DATA_ERROR if no flush point has been found,
|
||||
or Z_STREAM_ERROR if the stream structure was inconsistent. In the success
|
||||
case, the application may save the current current value of total_in which
|
||||
indicates where valid compressed data was found. In the error case, the
|
||||
application may repeatedly call inflateSync, providing more input each time,
|
||||
until success or end of the input data.
|
||||
*/
|
||||
|
||||
extern int zlib_inflateReset (z_streamp strm);
|
||||
/*
|
||||
This function is equivalent to inflateEnd followed by inflateInit,
|
||||
|
106
include/linux/zpool.h
Normal file
106
include/linux/zpool.h
Normal file
@ -0,0 +1,106 @@
|
||||
/*
|
||||
* zpool memory storage api
|
||||
*
|
||||
* Copyright (C) 2014 Dan Streetman
|
||||
*
|
||||
* This is a common frontend for the zbud and zsmalloc memory
|
||||
* storage pool implementations. Typically, this is used to
|
||||
* store compressed memory.
|
||||
*/
|
||||
|
||||
#ifndef _ZPOOL_H_
|
||||
#define _ZPOOL_H_
|
||||
|
||||
struct zpool;
|
||||
|
||||
struct zpool_ops {
|
||||
int (*evict)(struct zpool *pool, unsigned long handle);
|
||||
};
|
||||
|
||||
/*
|
||||
* Control how a handle is mapped. It will be ignored if the
|
||||
* implementation does not support it. Its use is optional.
|
||||
* Note that this does not refer to memory protection, it
|
||||
* refers to how the memory will be copied in/out if copying
|
||||
* is necessary during mapping; read-write is the safest as
|
||||
* it copies the existing memory in on map, and copies the
|
||||
* changed memory back out on unmap. Write-only does not copy
|
||||
* in the memory and should only be used for initialization.
|
||||
* If in doubt, use ZPOOL_MM_DEFAULT which is read-write.
|
||||
*/
|
||||
enum zpool_mapmode {
|
||||
ZPOOL_MM_RW, /* normal read-write mapping */
|
||||
ZPOOL_MM_RO, /* read-only (no copy-out at unmap time) */
|
||||
ZPOOL_MM_WO, /* write-only (no copy-in at map time) */
|
||||
|
||||
ZPOOL_MM_DEFAULT = ZPOOL_MM_RW
|
||||
};
|
||||
|
||||
struct zpool *zpool_create_pool(char *type, gfp_t gfp, struct zpool_ops *ops);
|
||||
|
||||
char *zpool_get_type(struct zpool *pool);
|
||||
|
||||
void zpool_destroy_pool(struct zpool *pool);
|
||||
|
||||
int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,
|
||||
unsigned long *handle);
|
||||
|
||||
void zpool_free(struct zpool *pool, unsigned long handle);
|
||||
|
||||
int zpool_shrink(struct zpool *pool, unsigned int pages,
|
||||
unsigned int *reclaimed);
|
||||
|
||||
void *zpool_map_handle(struct zpool *pool, unsigned long handle,
|
||||
enum zpool_mapmode mm);
|
||||
|
||||
void zpool_unmap_handle(struct zpool *pool, unsigned long handle);
|
||||
|
||||
u64 zpool_get_total_size(struct zpool *pool);
|
||||
|
||||
|
||||
/**
|
||||
* struct zpool_driver - driver implementation for zpool
|
||||
* @type: name of the driver.
|
||||
* @list: entry in the list of zpool drivers.
|
||||
* @create: create a new pool.
|
||||
* @destroy: destroy a pool.
|
||||
* @malloc: allocate mem from a pool.
|
||||
* @free: free mem from a pool.
|
||||
* @shrink: shrink the pool.
|
||||
* @map: map a handle.
|
||||
* @unmap: unmap a handle.
|
||||
* @total_size: get total size of a pool.
|
||||
*
|
||||
* This is created by a zpool implementation and registered
|
||||
* with zpool.
|
||||
*/
|
||||
struct zpool_driver {
|
||||
char *type;
|
||||
struct module *owner;
|
||||
atomic_t refcount;
|
||||
struct list_head list;
|
||||
|
||||
void *(*create)(gfp_t gfp, struct zpool_ops *ops);
|
||||
void (*destroy)(void *pool);
|
||||
|
||||
int (*malloc)(void *pool, size_t size, gfp_t gfp,
|
||||
unsigned long *handle);
|
||||
void (*free)(void *pool, unsigned long handle);
|
||||
|
||||
int (*shrink)(void *pool, unsigned int pages,
|
||||
unsigned int *reclaimed);
|
||||
|
||||
void *(*map)(void *pool, unsigned long handle,
|
||||
enum zpool_mapmode mm);
|
||||
void (*unmap)(void *pool, unsigned long handle);
|
||||
|
||||
u64 (*total_size)(void *pool);
|
||||
};
|
||||
|
||||
void zpool_register_driver(struct zpool_driver *driver);
|
||||
|
||||
int zpool_unregister_driver(struct zpool_driver *driver);
|
||||
|
||||
int zpool_evict(void *pool, unsigned long handle);
|
||||
|
||||
#endif
|
@ -17,6 +17,7 @@
|
||||
{MR_MEMORY_HOTPLUG, "memory_hotplug"}, \
|
||||
{MR_SYSCALL, "syscall_or_cpuset"}, \
|
||||
{MR_MEMPOLICY_MBIND, "mempolicy_mbind"}, \
|
||||
{MR_NUMA_MISPLACED, "numa_misplaced"}, \
|
||||
{MR_CMA, "cma"}
|
||||
|
||||
TRACE_EVENT(mm_migrate_pages,
|
||||
|
@ -28,12 +28,10 @@ TRACE_EVENT(mm_lru_insertion,
|
||||
|
||||
TP_PROTO(
|
||||
struct page *page,
|
||||
unsigned long pfn,
|
||||
int lru,
|
||||
unsigned long flags
|
||||
int lru
|
||||
),
|
||||
|
||||
TP_ARGS(page, pfn, lru, flags),
|
||||
TP_ARGS(page, lru),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct page *, page )
|
||||
@ -44,9 +42,9 @@ TRACE_EVENT(mm_lru_insertion,
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->page = page;
|
||||
__entry->pfn = pfn;
|
||||
__entry->pfn = page_to_pfn(page);
|
||||
__entry->lru = lru;
|
||||
__entry->flags = flags;
|
||||
__entry->flags = trace_pagemap_flags(page);
|
||||
),
|
||||
|
||||
/* Flag format is based on page-types.c formatting for pagemap */
|
||||
@ -64,9 +62,9 @@ TRACE_EVENT(mm_lru_insertion,
|
||||
|
||||
TRACE_EVENT(mm_lru_activate,
|
||||
|
||||
TP_PROTO(struct page *page, unsigned long pfn),
|
||||
TP_PROTO(struct page *page),
|
||||
|
||||
TP_ARGS(page, pfn),
|
||||
TP_ARGS(page),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct page *, page )
|
||||
@ -75,7 +73,7 @@ TRACE_EVENT(mm_lru_activate,
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->page = page;
|
||||
__entry->pfn = pfn;
|
||||
__entry->pfn = page_to_pfn(page);
|
||||
),
|
||||
|
||||
/* Flag format is based on page-types.c formatting for pagemap */
|
||||
|
46
init/Kconfig
46
init/Kconfig
@ -807,15 +807,53 @@ config LOG_BUF_SHIFT
|
||||
range 12 21
|
||||
default 17
|
||||
help
|
||||
Select kernel log buffer size as a power of 2.
|
||||
Select the minimal kernel log buffer size as a power of 2.
|
||||
The final size is affected by LOG_CPU_MAX_BUF_SHIFT config
|
||||
parameter, see below. Any higher size also might be forced
|
||||
by "log_buf_len" boot parameter.
|
||||
|
||||
Examples:
|
||||
17 => 128 KB
|
||||
17 => 128 KB
|
||||
16 => 64 KB
|
||||
15 => 32 KB
|
||||
14 => 16 KB
|
||||
15 => 32 KB
|
||||
14 => 16 KB
|
||||
13 => 8 KB
|
||||
12 => 4 KB
|
||||
|
||||
config LOG_CPU_MAX_BUF_SHIFT
|
||||
int "CPU kernel log buffer size contribution (13 => 8 KB, 17 => 128KB)"
|
||||
range 0 21
|
||||
default 12 if !BASE_SMALL
|
||||
default 0 if BASE_SMALL
|
||||
help
|
||||
This option allows to increase the default ring buffer size
|
||||
according to the number of CPUs. The value defines the contribution
|
||||
of each CPU as a power of 2. The used space is typically only few
|
||||
lines however it might be much more when problems are reported,
|
||||
e.g. backtraces.
|
||||
|
||||
The increased size means that a new buffer has to be allocated and
|
||||
the original static one is unused. It makes sense only on systems
|
||||
with more CPUs. Therefore this value is used only when the sum of
|
||||
contributions is greater than the half of the default kernel ring
|
||||
buffer as defined by LOG_BUF_SHIFT. The default values are set
|
||||
so that more than 64 CPUs are needed to trigger the allocation.
|
||||
|
||||
Also this option is ignored when "log_buf_len" kernel parameter is
|
||||
used as it forces an exact (power of two) size of the ring buffer.
|
||||
|
||||
The number of possible CPUs is used for this computation ignoring
|
||||
hotplugging making the compuation optimal for the the worst case
|
||||
scenerio while allowing a simple algorithm to be used from bootup.
|
||||
|
||||
Examples shift values and their meaning:
|
||||
17 => 128 KB for each CPU
|
||||
16 => 64 KB for each CPU
|
||||
15 => 32 KB for each CPU
|
||||
14 => 16 KB for each CPU
|
||||
13 => 8 KB for each CPU
|
||||
12 => 4 KB for each CPU
|
||||
|
||||
#
|
||||
# Architectures with an unreliable sched_clock() should select this:
|
||||
#
|
||||
|
@ -106,7 +106,7 @@ static inline struct audit_entry *audit_init_entry(u32 field_count)
|
||||
if (unlikely(!entry))
|
||||
return NULL;
|
||||
|
||||
fields = kzalloc(sizeof(*fields) * field_count, GFP_KERNEL);
|
||||
fields = kcalloc(field_count, sizeof(*fields), GFP_KERNEL);
|
||||
if (unlikely(!fields)) {
|
||||
kfree(entry);
|
||||
return NULL;
|
||||
@ -160,7 +160,7 @@ static __u32 *classes[AUDIT_SYSCALL_CLASSES];
|
||||
|
||||
int __init audit_register_class(int class, unsigned *list)
|
||||
{
|
||||
__u32 *p = kzalloc(AUDIT_BITMASK_SIZE * sizeof(__u32), GFP_KERNEL);
|
||||
__u32 *p = kcalloc(AUDIT_BITMASK_SIZE, sizeof(__u32), GFP_KERNEL);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
while (*list != ~0U) {
|
||||
|
@ -455,6 +455,7 @@ static void exit_mm(struct task_struct * tsk)
|
||||
task_unlock(tsk);
|
||||
mm_update_next_owner(mm);
|
||||
mmput(mm);
|
||||
clear_thread_flag(TIF_MEMDIE);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -45,6 +45,7 @@
|
||||
#include <linux/poll.h>
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <linux/ctype.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
@ -56,7 +57,7 @@
|
||||
|
||||
int console_printk[4] = {
|
||||
CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */
|
||||
DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */
|
||||
MESSAGE_LOGLEVEL_DEFAULT, /* default_message_loglevel */
|
||||
CONSOLE_LOGLEVEL_MIN, /* minimum_console_loglevel */
|
||||
CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */
|
||||
};
|
||||
@ -113,9 +114,9 @@ static int __down_trylock_console_sem(unsigned long ip)
|
||||
* This is used for debugging the mess that is the VT code by
|
||||
* keeping track if we have the console semaphore held. It's
|
||||
* definitely not the perfect debug tool (we don't know if _WE_
|
||||
* hold it are racing, but it helps tracking those weird code
|
||||
* path in the console code where we end up in places I want
|
||||
* locked without the console sempahore held
|
||||
* hold it and are racing, but it helps tracking those weird code
|
||||
* paths in the console code where we end up in places I want
|
||||
* locked without the console sempahore held).
|
||||
*/
|
||||
static int console_locked, console_suspended;
|
||||
|
||||
@ -146,8 +147,8 @@ static int console_may_schedule;
|
||||
* the overall length of the record.
|
||||
*
|
||||
* The heads to the first and last entry in the buffer, as well as the
|
||||
* sequence numbers of these both entries are maintained when messages
|
||||
* are stored..
|
||||
* sequence numbers of these entries are maintained when messages are
|
||||
* stored.
|
||||
*
|
||||
* If the heads indicate available messages, the length in the header
|
||||
* tells the start next message. A length == 0 for the next message
|
||||
@ -257,7 +258,7 @@ static u64 clear_seq;
|
||||
static u32 clear_idx;
|
||||
|
||||
#define PREFIX_MAX 32
|
||||
#define LOG_LINE_MAX 1024 - PREFIX_MAX
|
||||
#define LOG_LINE_MAX (1024 - PREFIX_MAX)
|
||||
|
||||
/* record buffer */
|
||||
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
|
||||
@ -266,6 +267,7 @@ static u32 clear_idx;
|
||||
#define LOG_ALIGN __alignof__(struct printk_log)
|
||||
#endif
|
||||
#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
|
||||
#define __LOG_CPU_MAX_BUF_LEN (1 << CONFIG_LOG_CPU_MAX_BUF_SHIFT)
|
||||
static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
|
||||
static char *log_buf = __log_buf;
|
||||
static u32 log_buf_len = __LOG_BUF_LEN;
|
||||
@ -344,7 +346,7 @@ static int log_make_free_space(u32 msg_size)
|
||||
while (log_first_seq < log_next_seq) {
|
||||
if (logbuf_has_space(msg_size, false))
|
||||
return 0;
|
||||
/* drop old messages until we have enough continuous space */
|
||||
/* drop old messages until we have enough contiguous space */
|
||||
log_first_idx = log_next(log_first_idx);
|
||||
log_first_seq++;
|
||||
}
|
||||
@ -453,11 +455,7 @@ static int log_store(int facility, int level,
|
||||
return msg->text_len;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SECURITY_DMESG_RESTRICT
|
||||
int dmesg_restrict = 1;
|
||||
#else
|
||||
int dmesg_restrict;
|
||||
#endif
|
||||
int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT);
|
||||
|
||||
static int syslog_action_restricted(int type)
|
||||
{
|
||||
@ -828,34 +826,74 @@ void log_buf_kexec_setup(void)
|
||||
/* requested log_buf_len from kernel cmdline */
|
||||
static unsigned long __initdata new_log_buf_len;
|
||||
|
||||
/* we practice scaling the ring buffer by powers of 2 */
|
||||
static void __init log_buf_len_update(unsigned size)
|
||||
{
|
||||
if (size)
|
||||
size = roundup_pow_of_two(size);
|
||||
if (size > log_buf_len)
|
||||
new_log_buf_len = size;
|
||||
}
|
||||
|
||||
/* save requested log_buf_len since it's too early to process it */
|
||||
static int __init log_buf_len_setup(char *str)
|
||||
{
|
||||
unsigned size = memparse(str, &str);
|
||||
|
||||
if (size)
|
||||
size = roundup_pow_of_two(size);
|
||||
if (size > log_buf_len)
|
||||
new_log_buf_len = size;
|
||||
log_buf_len_update(size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("log_buf_len", log_buf_len_setup);
|
||||
|
||||
static void __init log_buf_add_cpu(void)
|
||||
{
|
||||
unsigned int cpu_extra;
|
||||
|
||||
/*
|
||||
* archs should set up cpu_possible_bits properly with
|
||||
* set_cpu_possible() after setup_arch() but just in
|
||||
* case lets ensure this is valid.
|
||||
*/
|
||||
if (num_possible_cpus() == 1)
|
||||
return;
|
||||
|
||||
cpu_extra = (num_possible_cpus() - 1) * __LOG_CPU_MAX_BUF_LEN;
|
||||
|
||||
/* by default this will only continue through for large > 64 CPUs */
|
||||
if (cpu_extra <= __LOG_BUF_LEN / 2)
|
||||
return;
|
||||
|
||||
pr_info("log_buf_len individual max cpu contribution: %d bytes\n",
|
||||
__LOG_CPU_MAX_BUF_LEN);
|
||||
pr_info("log_buf_len total cpu_extra contributions: %d bytes\n",
|
||||
cpu_extra);
|
||||
pr_info("log_buf_len min size: %d bytes\n", __LOG_BUF_LEN);
|
||||
|
||||
log_buf_len_update(cpu_extra + __LOG_BUF_LEN);
|
||||
}
|
||||
|
||||
void __init setup_log_buf(int early)
|
||||
{
|
||||
unsigned long flags;
|
||||
char *new_log_buf;
|
||||
int free;
|
||||
|
||||
if (log_buf != __log_buf)
|
||||
return;
|
||||
|
||||
if (!early && !new_log_buf_len)
|
||||
log_buf_add_cpu();
|
||||
|
||||
if (!new_log_buf_len)
|
||||
return;
|
||||
|
||||
if (early) {
|
||||
new_log_buf =
|
||||
memblock_virt_alloc(new_log_buf_len, PAGE_SIZE);
|
||||
memblock_virt_alloc(new_log_buf_len, LOG_ALIGN);
|
||||
} else {
|
||||
new_log_buf = memblock_virt_alloc_nopanic(new_log_buf_len, 0);
|
||||
new_log_buf = memblock_virt_alloc_nopanic(new_log_buf_len,
|
||||
LOG_ALIGN);
|
||||
}
|
||||
|
||||
if (unlikely(!new_log_buf)) {
|
||||
@ -872,7 +910,7 @@ void __init setup_log_buf(int early)
|
||||
memcpy(log_buf, __log_buf, __LOG_BUF_LEN);
|
||||
raw_spin_unlock_irqrestore(&logbuf_lock, flags);
|
||||
|
||||
pr_info("log_buf_len: %d\n", log_buf_len);
|
||||
pr_info("log_buf_len: %d bytes\n", log_buf_len);
|
||||
pr_info("early log buf free: %d(%d%%)\n",
|
||||
free, (free * 100) / __LOG_BUF_LEN);
|
||||
}
|
||||
@ -881,7 +919,7 @@ static bool __read_mostly ignore_loglevel;
|
||||
|
||||
static int __init ignore_loglevel_setup(char *str)
|
||||
{
|
||||
ignore_loglevel = 1;
|
||||
ignore_loglevel = true;
|
||||
pr_info("debug: ignoring loglevel setting.\n");
|
||||
|
||||
return 0;
|
||||
@ -947,11 +985,7 @@ static inline void boot_delay_msec(int level)
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_PRINTK_TIME)
|
||||
static bool printk_time = 1;
|
||||
#else
|
||||
static bool printk_time;
|
||||
#endif
|
||||
static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME);
|
||||
module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
|
||||
|
||||
static size_t print_time(u64 ts, char *buf)
|
||||
@ -1310,7 +1344,7 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
|
||||
* for pending data, not the size; return the count of
|
||||
* records, not the length.
|
||||
*/
|
||||
error = log_next_idx - syslog_idx;
|
||||
error = log_next_seq - syslog_seq;
|
||||
} else {
|
||||
u64 seq = syslog_seq;
|
||||
u32 idx = syslog_idx;
|
||||
@ -1416,10 +1450,9 @@ static int have_callable_console(void)
|
||||
/*
|
||||
* Can we actually use the console at this time on this cpu?
|
||||
*
|
||||
* Console drivers may assume that per-cpu resources have
|
||||
* been allocated. So unless they're explicitly marked as
|
||||
* being able to cope (CON_ANYTIME) don't call them until
|
||||
* this CPU is officially up.
|
||||
* Console drivers may assume that per-cpu resources have been allocated. So
|
||||
* unless they're explicitly marked as being able to cope (CON_ANYTIME) don't
|
||||
* call them until this CPU is officially up.
|
||||
*/
|
||||
static inline int can_use_console(unsigned int cpu)
|
||||
{
|
||||
@ -1432,8 +1465,10 @@ static inline int can_use_console(unsigned int cpu)
|
||||
* console_lock held, and 'console_locked' set) if it
|
||||
* is successful, false otherwise.
|
||||
*/
|
||||
static int console_trylock_for_printk(unsigned int cpu)
|
||||
static int console_trylock_for_printk(void)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
if (!console_trylock())
|
||||
return 0;
|
||||
/*
|
||||
@ -1476,7 +1511,7 @@ static struct cont {
|
||||
struct task_struct *owner; /* task of first print*/
|
||||
u64 ts_nsec; /* time of first print */
|
||||
u8 level; /* log level of first message */
|
||||
u8 facility; /* log level of first message */
|
||||
u8 facility; /* log facility of first message */
|
||||
enum log_flags flags; /* prefix, newline flags */
|
||||
bool flushed:1; /* buffer sealed and committed */
|
||||
} cont;
|
||||
@ -1608,7 +1643,8 @@ asmlinkage int vprintk_emit(int facility, int level,
|
||||
*/
|
||||
if (!oops_in_progress && !lockdep_recursing(current)) {
|
||||
recursion_bug = 1;
|
||||
goto out_restore_irqs;
|
||||
local_irq_restore(flags);
|
||||
return 0;
|
||||
}
|
||||
zap_locks();
|
||||
}
|
||||
@ -1716,21 +1752,30 @@ asmlinkage int vprintk_emit(int facility, int level,
|
||||
|
||||
logbuf_cpu = UINT_MAX;
|
||||
raw_spin_unlock(&logbuf_lock);
|
||||
lockdep_on();
|
||||
local_irq_restore(flags);
|
||||
|
||||
/* If called from the scheduler, we can not call up(). */
|
||||
if (!in_sched) {
|
||||
lockdep_off();
|
||||
/*
|
||||
* Disable preemption to avoid being preempted while holding
|
||||
* console_sem which would prevent anyone from printing to
|
||||
* console
|
||||
*/
|
||||
preempt_disable();
|
||||
|
||||
/*
|
||||
* Try to acquire and then immediately release the console
|
||||
* semaphore. The release will print out buffers and wake up
|
||||
* /dev/kmsg and syslog() users.
|
||||
*/
|
||||
if (console_trylock_for_printk(this_cpu))
|
||||
if (console_trylock_for_printk())
|
||||
console_unlock();
|
||||
preempt_enable();
|
||||
lockdep_on();
|
||||
}
|
||||
|
||||
lockdep_on();
|
||||
out_restore_irqs:
|
||||
local_irq_restore(flags);
|
||||
return printed_len;
|
||||
}
|
||||
EXPORT_SYMBOL(vprintk_emit);
|
||||
@ -1802,7 +1847,7 @@ EXPORT_SYMBOL(printk);
|
||||
|
||||
#define LOG_LINE_MAX 0
|
||||
#define PREFIX_MAX 0
|
||||
#define LOG_LINE_MAX 0
|
||||
|
||||
static u64 syslog_seq;
|
||||
static u32 syslog_idx;
|
||||
static u64 console_seq;
|
||||
@ -1881,11 +1926,12 @@ static int __add_preferred_console(char *name, int idx, char *options,
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
* Set up a list of consoles. Called from init/main.c
|
||||
* Set up a console. Called via do_early_param() in init/main.c
|
||||
* for each "console=" parameter in the boot command line.
|
||||
*/
|
||||
static int __init console_setup(char *str)
|
||||
{
|
||||
char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for index */
|
||||
char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for "ttyS" */
|
||||
char *s, *options, *brl_options = NULL;
|
||||
int idx;
|
||||
|
||||
@ -1902,7 +1948,8 @@ static int __init console_setup(char *str)
|
||||
strncpy(buf, str, sizeof(buf) - 1);
|
||||
}
|
||||
buf[sizeof(buf) - 1] = 0;
|
||||
if ((options = strchr(str, ',')) != NULL)
|
||||
options = strchr(str, ',');
|
||||
if (options)
|
||||
*(options++) = 0;
|
||||
#ifdef __sparc__
|
||||
if (!strcmp(str, "ttya"))
|
||||
@ -1911,7 +1958,7 @@ static int __init console_setup(char *str)
|
||||
strcpy(buf, "ttyS1");
|
||||
#endif
|
||||
for (s = buf; *s; s++)
|
||||
if ((*s >= '0' && *s <= '9') || *s == ',')
|
||||
if (isdigit(*s) || *s == ',')
|
||||
break;
|
||||
idx = simple_strtoul(s, NULL, 10);
|
||||
*s = 0;
|
||||
@ -1950,7 +1997,6 @@ int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, cha
|
||||
i++, c++)
|
||||
if (strcmp(c->name, name) == 0 && c->index == idx) {
|
||||
strlcpy(c->name, name_new, sizeof(c->name));
|
||||
c->name[sizeof(c->name) - 1] = 0;
|
||||
c->options = options;
|
||||
c->index = idx_new;
|
||||
return i;
|
||||
@ -1959,12 +2005,12 @@ int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, cha
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool console_suspend_enabled = 1;
|
||||
bool console_suspend_enabled = true;
|
||||
EXPORT_SYMBOL(console_suspend_enabled);
|
||||
|
||||
static int __init console_suspend_disable(char *str)
|
||||
{
|
||||
console_suspend_enabled = 0;
|
||||
console_suspend_enabled = false;
|
||||
return 1;
|
||||
}
|
||||
__setup("no_console_suspend", console_suspend_disable);
|
||||
@ -2045,8 +2091,8 @@ EXPORT_SYMBOL(console_lock);
|
||||
/**
|
||||
* console_trylock - try to lock the console system for exclusive use.
|
||||
*
|
||||
* Tried to acquire a lock which guarantees that the caller has
|
||||
* exclusive access to the console system and the console_drivers list.
|
||||
* Try to acquire a lock which guarantees that the caller has exclusive
|
||||
* access to the console system and the console_drivers list.
|
||||
*
|
||||
* returns 1 on success, and 0 on failure to acquire the lock.
|
||||
*/
|
||||
@ -2618,14 +2664,13 @@ EXPORT_SYMBOL(__printk_ratelimit);
|
||||
bool printk_timed_ratelimit(unsigned long *caller_jiffies,
|
||||
unsigned int interval_msecs)
|
||||
{
|
||||
if (*caller_jiffies == 0
|
||||
|| !time_in_range(jiffies, *caller_jiffies,
|
||||
*caller_jiffies
|
||||
+ msecs_to_jiffies(interval_msecs))) {
|
||||
*caller_jiffies = jiffies;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
unsigned long elapsed = jiffies - *caller_jiffies;
|
||||
|
||||
if (*caller_jiffies && elapsed <= msecs_to_jiffies(interval_msecs))
|
||||
return false;
|
||||
|
||||
*caller_jiffies = jiffies;
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(printk_timed_ratelimit);
|
||||
|
||||
|
@ -670,7 +670,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
||||
if (cond_func(cpu, info)) {
|
||||
ret = smp_call_function_single(cpu, func,
|
||||
info, wait);
|
||||
WARN_ON_ONCE(!ret);
|
||||
WARN_ON_ONCE(ret);
|
||||
}
|
||||
preempt_enable();
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user