mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-18 17:54:13 +08:00
5a0e3ad6af
percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
516 lines
13 KiB
C
516 lines
13 KiB
C
/*
|
|
* linux/mm/swap.c
|
|
*
|
|
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
|
*/
|
|
|
|
/*
|
|
* This file contains the default values for the operation of the
|
|
* Linux VM subsystem. Fine-tuning documentation can be found in
|
|
* Documentation/sysctl/vm.txt.
|
|
* Started 18.12.91
|
|
* Swap aging added 23.2.95, Stephen Tweedie.
|
|
* Buffermem limits added 12.3.98, Rik van Riel.
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/kernel_stat.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/pagevec.h>
|
|
#include <linux/init.h>
|
|
#include <linux/module.h>
|
|
#include <linux/mm_inline.h>
|
|
#include <linux/buffer_head.h> /* for try_to_release_page() */
|
|
#include <linux/percpu_counter.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/notifier.h>
|
|
#include <linux/backing-dev.h>
|
|
#include <linux/memcontrol.h>
|
|
#include <linux/gfp.h>
|
|
|
|
#include "internal.h"
|
|
|
|
/* How many pages do we try to swap or page in/out together? */
|
|
int page_cluster;
|
|
|
|
static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
|
|
static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
|
|
|
|
/*
|
|
* This path almost never happens for VM activity - pages are normally
|
|
* freed via pagevecs. But it gets used by networking.
|
|
*/
|
|
static void __page_cache_release(struct page *page)
|
|
{
|
|
if (PageLRU(page)) {
|
|
unsigned long flags;
|
|
struct zone *zone = page_zone(page);
|
|
|
|
spin_lock_irqsave(&zone->lru_lock, flags);
|
|
VM_BUG_ON(!PageLRU(page));
|
|
__ClearPageLRU(page);
|
|
del_page_from_lru(zone, page);
|
|
spin_unlock_irqrestore(&zone->lru_lock, flags);
|
|
}
|
|
free_hot_cold_page(page, 0);
|
|
}
|
|
|
|
static void put_compound_page(struct page *page)
|
|
{
|
|
page = compound_head(page);
|
|
if (put_page_testzero(page)) {
|
|
compound_page_dtor *dtor;
|
|
|
|
dtor = get_compound_page_dtor(page);
|
|
(*dtor)(page);
|
|
}
|
|
}
|
|
|
|
void put_page(struct page *page)
|
|
{
|
|
if (unlikely(PageCompound(page)))
|
|
put_compound_page(page);
|
|
else if (put_page_testzero(page))
|
|
__page_cache_release(page);
|
|
}
|
|
EXPORT_SYMBOL(put_page);
|
|
|
|
/**
|
|
* put_pages_list() - release a list of pages
|
|
* @pages: list of pages threaded on page->lru
|
|
*
|
|
* Release a list of pages which are strung together on page.lru. Currently
|
|
* used by read_cache_pages() and related error recovery code.
|
|
*/
|
|
void put_pages_list(struct list_head *pages)
|
|
{
|
|
while (!list_empty(pages)) {
|
|
struct page *victim;
|
|
|
|
victim = list_entry(pages->prev, struct page, lru);
|
|
list_del(&victim->lru);
|
|
page_cache_release(victim);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(put_pages_list);
|
|
|
|
/*
|
|
* pagevec_move_tail() must be called with IRQ disabled.
|
|
* Otherwise this may cause nasty races.
|
|
*/
|
|
static void pagevec_move_tail(struct pagevec *pvec)
|
|
{
|
|
int i;
|
|
int pgmoved = 0;
|
|
struct zone *zone = NULL;
|
|
|
|
for (i = 0; i < pagevec_count(pvec); i++) {
|
|
struct page *page = pvec->pages[i];
|
|
struct zone *pagezone = page_zone(page);
|
|
|
|
if (pagezone != zone) {
|
|
if (zone)
|
|
spin_unlock(&zone->lru_lock);
|
|
zone = pagezone;
|
|
spin_lock(&zone->lru_lock);
|
|
}
|
|
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
|
|
int lru = page_lru_base_type(page);
|
|
list_move_tail(&page->lru, &zone->lru[lru].list);
|
|
pgmoved++;
|
|
}
|
|
}
|
|
if (zone)
|
|
spin_unlock(&zone->lru_lock);
|
|
__count_vm_events(PGROTATED, pgmoved);
|
|
release_pages(pvec->pages, pvec->nr, pvec->cold);
|
|
pagevec_reinit(pvec);
|
|
}
|
|
|
|
/*
|
|
* Writeback is about to end against a page which has been marked for immediate
|
|
* reclaim. If it still appears to be reclaimable, move it to the tail of the
|
|
* inactive list.
|
|
*/
|
|
void rotate_reclaimable_page(struct page *page)
|
|
{
|
|
if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
|
|
!PageUnevictable(page) && PageLRU(page)) {
|
|
struct pagevec *pvec;
|
|
unsigned long flags;
|
|
|
|
page_cache_get(page);
|
|
local_irq_save(flags);
|
|
pvec = &__get_cpu_var(lru_rotate_pvecs);
|
|
if (!pagevec_add(pvec, page))
|
|
pagevec_move_tail(pvec);
|
|
local_irq_restore(flags);
|
|
}
|
|
}
|
|
|
|
static void update_page_reclaim_stat(struct zone *zone, struct page *page,
|
|
int file, int rotated)
|
|
{
|
|
struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
|
|
struct zone_reclaim_stat *memcg_reclaim_stat;
|
|
|
|
memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
|
|
|
|
reclaim_stat->recent_scanned[file]++;
|
|
if (rotated)
|
|
reclaim_stat->recent_rotated[file]++;
|
|
|
|
if (!memcg_reclaim_stat)
|
|
return;
|
|
|
|
memcg_reclaim_stat->recent_scanned[file]++;
|
|
if (rotated)
|
|
memcg_reclaim_stat->recent_rotated[file]++;
|
|
}
|
|
|
|
/*
|
|
* FIXME: speed this up?
|
|
*/
|
|
void activate_page(struct page *page)
|
|
{
|
|
struct zone *zone = page_zone(page);
|
|
|
|
spin_lock_irq(&zone->lru_lock);
|
|
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
|
|
int file = page_is_file_cache(page);
|
|
int lru = page_lru_base_type(page);
|
|
del_page_from_lru_list(zone, page, lru);
|
|
|
|
SetPageActive(page);
|
|
lru += LRU_ACTIVE;
|
|
add_page_to_lru_list(zone, page, lru);
|
|
__count_vm_event(PGACTIVATE);
|
|
|
|
update_page_reclaim_stat(zone, page, file, 1);
|
|
}
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
}
|
|
|
|
/*
|
|
* Mark a page as having seen activity.
|
|
*
|
|
* inactive,unreferenced -> inactive,referenced
|
|
* inactive,referenced -> active,unreferenced
|
|
* active,unreferenced -> active,referenced
|
|
*/
|
|
void mark_page_accessed(struct page *page)
|
|
{
|
|
if (!PageActive(page) && !PageUnevictable(page) &&
|
|
PageReferenced(page) && PageLRU(page)) {
|
|
activate_page(page);
|
|
ClearPageReferenced(page);
|
|
} else if (!PageReferenced(page)) {
|
|
SetPageReferenced(page);
|
|
}
|
|
}
|
|
|
|
EXPORT_SYMBOL(mark_page_accessed);
|
|
|
|
void __lru_cache_add(struct page *page, enum lru_list lru)
|
|
{
|
|
struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
|
|
|
|
page_cache_get(page);
|
|
if (!pagevec_add(pvec, page))
|
|
____pagevec_lru_add(pvec, lru);
|
|
put_cpu_var(lru_add_pvecs);
|
|
}
|
|
|
|
/**
|
|
* lru_cache_add_lru - add a page to a page list
|
|
* @page: the page to be added to the LRU.
|
|
* @lru: the LRU list to which the page is added.
|
|
*/
|
|
void lru_cache_add_lru(struct page *page, enum lru_list lru)
|
|
{
|
|
if (PageActive(page)) {
|
|
VM_BUG_ON(PageUnevictable(page));
|
|
ClearPageActive(page);
|
|
} else if (PageUnevictable(page)) {
|
|
VM_BUG_ON(PageActive(page));
|
|
ClearPageUnevictable(page);
|
|
}
|
|
|
|
VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
|
|
__lru_cache_add(page, lru);
|
|
}
|
|
|
|
/**
|
|
* add_page_to_unevictable_list - add a page to the unevictable list
|
|
* @page: the page to be added to the unevictable list
|
|
*
|
|
* Add page directly to its zone's unevictable list. To avoid races with
|
|
* tasks that might be making the page evictable, through eg. munlock,
|
|
* munmap or exit, while it's not on the lru, we want to add the page
|
|
* while it's locked or otherwise "invisible" to other tasks. This is
|
|
* difficult to do when using the pagevec cache, so bypass that.
|
|
*/
|
|
void add_page_to_unevictable_list(struct page *page)
|
|
{
|
|
struct zone *zone = page_zone(page);
|
|
|
|
spin_lock_irq(&zone->lru_lock);
|
|
SetPageUnevictable(page);
|
|
SetPageLRU(page);
|
|
add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
}
|
|
|
|
/*
|
|
* Drain pages out of the cpu's pagevecs.
|
|
* Either "cpu" is the current CPU, and preemption has already been
|
|
* disabled; or "cpu" is being hot-unplugged, and is already dead.
|
|
*/
|
|
static void drain_cpu_pagevecs(int cpu)
|
|
{
|
|
struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
|
|
struct pagevec *pvec;
|
|
int lru;
|
|
|
|
for_each_lru(lru) {
|
|
pvec = &pvecs[lru - LRU_BASE];
|
|
if (pagevec_count(pvec))
|
|
____pagevec_lru_add(pvec, lru);
|
|
}
|
|
|
|
pvec = &per_cpu(lru_rotate_pvecs, cpu);
|
|
if (pagevec_count(pvec)) {
|
|
unsigned long flags;
|
|
|
|
/* No harm done if a racing interrupt already did this */
|
|
local_irq_save(flags);
|
|
pagevec_move_tail(pvec);
|
|
local_irq_restore(flags);
|
|
}
|
|
}
|
|
|
|
void lru_add_drain(void)
|
|
{
|
|
drain_cpu_pagevecs(get_cpu());
|
|
put_cpu();
|
|
}
|
|
|
|
static void lru_add_drain_per_cpu(struct work_struct *dummy)
|
|
{
|
|
lru_add_drain();
|
|
}
|
|
|
|
/*
|
|
* Returns 0 for success
|
|
*/
|
|
int lru_add_drain_all(void)
|
|
{
|
|
return schedule_on_each_cpu(lru_add_drain_per_cpu);
|
|
}
|
|
|
|
/*
|
|
* Batched page_cache_release(). Decrement the reference count on all the
|
|
* passed pages. If it fell to zero then remove the page from the LRU and
|
|
* free it.
|
|
*
|
|
* Avoid taking zone->lru_lock if possible, but if it is taken, retain it
|
|
* for the remainder of the operation.
|
|
*
|
|
* The locking in this function is against shrink_inactive_list(): we recheck
|
|
* the page count inside the lock to see whether shrink_inactive_list()
|
|
* grabbed the page via the LRU. If it did, give up: shrink_inactive_list()
|
|
* will free it.
|
|
*/
|
|
void release_pages(struct page **pages, int nr, int cold)
|
|
{
|
|
int i;
|
|
struct pagevec pages_to_free;
|
|
struct zone *zone = NULL;
|
|
unsigned long uninitialized_var(flags);
|
|
|
|
pagevec_init(&pages_to_free, cold);
|
|
for (i = 0; i < nr; i++) {
|
|
struct page *page = pages[i];
|
|
|
|
if (unlikely(PageCompound(page))) {
|
|
if (zone) {
|
|
spin_unlock_irqrestore(&zone->lru_lock, flags);
|
|
zone = NULL;
|
|
}
|
|
put_compound_page(page);
|
|
continue;
|
|
}
|
|
|
|
if (!put_page_testzero(page))
|
|
continue;
|
|
|
|
if (PageLRU(page)) {
|
|
struct zone *pagezone = page_zone(page);
|
|
|
|
if (pagezone != zone) {
|
|
if (zone)
|
|
spin_unlock_irqrestore(&zone->lru_lock,
|
|
flags);
|
|
zone = pagezone;
|
|
spin_lock_irqsave(&zone->lru_lock, flags);
|
|
}
|
|
VM_BUG_ON(!PageLRU(page));
|
|
__ClearPageLRU(page);
|
|
del_page_from_lru(zone, page);
|
|
}
|
|
|
|
if (!pagevec_add(&pages_to_free, page)) {
|
|
if (zone) {
|
|
spin_unlock_irqrestore(&zone->lru_lock, flags);
|
|
zone = NULL;
|
|
}
|
|
__pagevec_free(&pages_to_free);
|
|
pagevec_reinit(&pages_to_free);
|
|
}
|
|
}
|
|
if (zone)
|
|
spin_unlock_irqrestore(&zone->lru_lock, flags);
|
|
|
|
pagevec_free(&pages_to_free);
|
|
}
|
|
|
|
/*
|
|
* The pages which we're about to release may be in the deferred lru-addition
|
|
* queues. That would prevent them from really being freed right now. That's
|
|
* OK from a correctness point of view but is inefficient - those pages may be
|
|
* cache-warm and we want to give them back to the page allocator ASAP.
|
|
*
|
|
* So __pagevec_release() will drain those queues here. __pagevec_lru_add()
|
|
* and __pagevec_lru_add_active() call release_pages() directly to avoid
|
|
* mutual recursion.
|
|
*/
|
|
void __pagevec_release(struct pagevec *pvec)
|
|
{
|
|
lru_add_drain();
|
|
release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
|
|
pagevec_reinit(pvec);
|
|
}
|
|
|
|
EXPORT_SYMBOL(__pagevec_release);
|
|
|
|
/*
|
|
* Add the passed pages to the LRU, then drop the caller's refcount
|
|
* on them. Reinitialises the caller's pagevec.
|
|
*/
|
|
void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
|
|
{
|
|
int i;
|
|
struct zone *zone = NULL;
|
|
|
|
VM_BUG_ON(is_unevictable_lru(lru));
|
|
|
|
for (i = 0; i < pagevec_count(pvec); i++) {
|
|
struct page *page = pvec->pages[i];
|
|
struct zone *pagezone = page_zone(page);
|
|
int file;
|
|
int active;
|
|
|
|
if (pagezone != zone) {
|
|
if (zone)
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
zone = pagezone;
|
|
spin_lock_irq(&zone->lru_lock);
|
|
}
|
|
VM_BUG_ON(PageActive(page));
|
|
VM_BUG_ON(PageUnevictable(page));
|
|
VM_BUG_ON(PageLRU(page));
|
|
SetPageLRU(page);
|
|
active = is_active_lru(lru);
|
|
file = is_file_lru(lru);
|
|
if (active)
|
|
SetPageActive(page);
|
|
update_page_reclaim_stat(zone, page, file, active);
|
|
add_page_to_lru_list(zone, page, lru);
|
|
}
|
|
if (zone)
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
release_pages(pvec->pages, pvec->nr, pvec->cold);
|
|
pagevec_reinit(pvec);
|
|
}
|
|
|
|
EXPORT_SYMBOL(____pagevec_lru_add);
|
|
|
|
/*
|
|
* Try to drop buffers from the pages in a pagevec
|
|
*/
|
|
void pagevec_strip(struct pagevec *pvec)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < pagevec_count(pvec); i++) {
|
|
struct page *page = pvec->pages[i];
|
|
|
|
if (page_has_private(page) && trylock_page(page)) {
|
|
if (page_has_private(page))
|
|
try_to_release_page(page, 0);
|
|
unlock_page(page);
|
|
}
|
|
}
|
|
}
|
|
|
|
/**
|
|
* pagevec_lookup - gang pagecache lookup
|
|
* @pvec: Where the resulting pages are placed
|
|
* @mapping: The address_space to search
|
|
* @start: The starting page index
|
|
* @nr_pages: The maximum number of pages
|
|
*
|
|
* pagevec_lookup() will search for and return a group of up to @nr_pages pages
|
|
* in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a
|
|
* reference against the pages in @pvec.
|
|
*
|
|
* The search returns a group of mapping-contiguous pages with ascending
|
|
* indexes. There may be holes in the indices due to not-present pages.
|
|
*
|
|
* pagevec_lookup() returns the number of pages which were found.
|
|
*/
|
|
unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
|
|
pgoff_t start, unsigned nr_pages)
|
|
{
|
|
pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
|
|
return pagevec_count(pvec);
|
|
}
|
|
|
|
EXPORT_SYMBOL(pagevec_lookup);
|
|
|
|
unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
|
|
pgoff_t *index, int tag, unsigned nr_pages)
|
|
{
|
|
pvec->nr = find_get_pages_tag(mapping, index, tag,
|
|
nr_pages, pvec->pages);
|
|
return pagevec_count(pvec);
|
|
}
|
|
|
|
EXPORT_SYMBOL(pagevec_lookup_tag);
|
|
|
|
/*
|
|
* Perform any setup for the swap system
|
|
*/
|
|
void __init swap_setup(void)
|
|
{
|
|
unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
|
|
|
|
#ifdef CONFIG_SWAP
|
|
bdi_init(swapper_space.backing_dev_info);
|
|
#endif
|
|
|
|
/* Use a smaller cluster for small-memory machines */
|
|
if (megs < 16)
|
|
page_cluster = 2;
|
|
else
|
|
page_cluster = 3;
|
|
/*
|
|
* Right now other parts of the system means that we
|
|
* _really_ don't want to cluster much more
|
|
*/
|
|
}
|