mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 20:54:10 +08:00
memcg: rename high level charging functions
mem_cgroup_newpage_charge is used only for charging anonymous memory so it is better to rename it to mem_cgroup_charge_anon. mem_cgroup_cache_charge is used for file backed memory so rename it to mem_cgroup_charge_file. Signed-off-by: Michal Hocko <mhocko@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
6d1fdc4893
commit
d715ae08f2
@ -24,7 +24,7 @@ Please note that implementation details can be changed.
|
||||
|
||||
a page/swp_entry may be charged (usage += PAGE_SIZE) at
|
||||
|
||||
mem_cgroup_newpage_charge()
|
||||
mem_cgroup_charge_anon()
|
||||
Called at new page fault and Copy-On-Write.
|
||||
|
||||
mem_cgroup_try_charge_swapin()
|
||||
@ -32,7 +32,7 @@ Please note that implementation details can be changed.
|
||||
Followed by charge-commit-cancel protocol. (With swap accounting)
|
||||
At commit, a charge recorded in swap_cgroup is removed.
|
||||
|
||||
mem_cgroup_cache_charge()
|
||||
mem_cgroup_charge_file()
|
||||
Called at add_to_page_cache()
|
||||
|
||||
mem_cgroup_cache_charge_swapin()
|
||||
|
@ -65,7 +65,7 @@ struct mem_cgroup_reclaim_cookie {
|
||||
* (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
|
||||
*/
|
||||
|
||||
extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
|
||||
extern int mem_cgroup_charge_anon(struct page *page, struct mm_struct *mm,
|
||||
gfp_t gfp_mask);
|
||||
/* for swap handling */
|
||||
extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
|
||||
@ -74,7 +74,7 @@ extern void mem_cgroup_commit_charge_swapin(struct page *page,
|
||||
struct mem_cgroup *memcg);
|
||||
extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg);
|
||||
|
||||
extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
|
||||
extern int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
|
||||
gfp_t gfp_mask);
|
||||
|
||||
struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
|
||||
@ -233,13 +233,13 @@ void mem_cgroup_print_bad_page(struct page *page);
|
||||
#else /* CONFIG_MEMCG */
|
||||
struct mem_cgroup;
|
||||
|
||||
static inline int mem_cgroup_newpage_charge(struct page *page,
|
||||
static inline int mem_cgroup_charge_anon(struct page *page,
|
||||
struct mm_struct *mm, gfp_t gfp_mask)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int mem_cgroup_cache_charge(struct page *page,
|
||||
static inline int mem_cgroup_charge_file(struct page *page,
|
||||
struct mm_struct *mm, gfp_t gfp_mask)
|
||||
{
|
||||
return 0;
|
||||
|
@ -563,7 +563,7 @@ static int __add_to_page_cache_locked(struct page *page,
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
VM_BUG_ON_PAGE(PageSwapBacked(page), page);
|
||||
|
||||
error = mem_cgroup_cache_charge(page, current->mm,
|
||||
error = mem_cgroup_charge_file(page, current->mm,
|
||||
gfp_mask & GFP_RECLAIM_MASK);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -827,7 +827,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
count_vm_event(THP_FAULT_FALLBACK);
|
||||
return VM_FAULT_FALLBACK;
|
||||
}
|
||||
if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
|
||||
if (unlikely(mem_cgroup_charge_anon(page, mm, GFP_KERNEL))) {
|
||||
put_page(page);
|
||||
count_vm_event(THP_FAULT_FALLBACK);
|
||||
return VM_FAULT_FALLBACK;
|
||||
@ -968,7 +968,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
|
||||
__GFP_OTHER_NODE,
|
||||
vma, address, page_to_nid(page));
|
||||
if (unlikely(!pages[i] ||
|
||||
mem_cgroup_newpage_charge(pages[i], mm,
|
||||
mem_cgroup_charge_anon(pages[i], mm,
|
||||
GFP_KERNEL))) {
|
||||
if (pages[i])
|
||||
put_page(pages[i]);
|
||||
@ -1101,7 +1101,7 @@ alloc:
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
|
||||
if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL))) {
|
||||
put_page(new_page);
|
||||
if (page) {
|
||||
split_huge_page(page);
|
||||
@ -2359,7 +2359,7 @@ static void collapse_huge_page(struct mm_struct *mm,
|
||||
if (!new_page)
|
||||
return;
|
||||
|
||||
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)))
|
||||
if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL)))
|
||||
return;
|
||||
|
||||
/*
|
||||
|
@ -3818,7 +3818,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int mem_cgroup_newpage_charge(struct page *page,
|
||||
int mem_cgroup_charge_anon(struct page *page,
|
||||
struct mm_struct *mm, gfp_t gfp_mask)
|
||||
{
|
||||
unsigned int nr_pages = 1;
|
||||
@ -3954,7 +3954,7 @@ void mem_cgroup_commit_charge_swapin(struct page *page,
|
||||
MEM_CGROUP_CHARGE_TYPE_ANON);
|
||||
}
|
||||
|
||||
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
|
||||
int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
|
||||
|
@ -2828,7 +2828,7 @@ gotten:
|
||||
}
|
||||
__SetPageUptodate(new_page);
|
||||
|
||||
if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
|
||||
if (mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL))
|
||||
goto oom_free_new;
|
||||
|
||||
mmun_start = address & PAGE_MASK;
|
||||
@ -3281,7 +3281,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
*/
|
||||
__SetPageUptodate(page);
|
||||
|
||||
if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
|
||||
if (mem_cgroup_charge_anon(page, mm, GFP_KERNEL))
|
||||
goto oom_free_page;
|
||||
|
||||
entry = mk_pte(page, vma->vm_page_prot);
|
||||
@ -3537,7 +3537,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
if (!new_page)
|
||||
return VM_FAULT_OOM;
|
||||
|
||||
if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) {
|
||||
if (mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL)) {
|
||||
page_cache_release(new_page);
|
||||
return VM_FAULT_OOM;
|
||||
}
|
||||
|
@ -683,7 +683,7 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
|
||||
* the shmem_swaplist_mutex which might hold up shmem_writepage().
|
||||
* Charged back to the user (not to caller) when swap account is used.
|
||||
*/
|
||||
error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
|
||||
error = mem_cgroup_charge_file(page, current->mm, GFP_KERNEL);
|
||||
if (error)
|
||||
goto out;
|
||||
/* No radix_tree_preload: swap entry keeps a place for page in tree */
|
||||
@ -1080,7 +1080,7 @@ repeat:
|
||||
goto failed;
|
||||
}
|
||||
|
||||
error = mem_cgroup_cache_charge(page, current->mm,
|
||||
error = mem_cgroup_charge_file(page, current->mm,
|
||||
gfp & GFP_RECLAIM_MASK);
|
||||
if (!error) {
|
||||
error = shmem_add_to_page_cache(page, mapping, index,
|
||||
@ -1134,7 +1134,7 @@ repeat:
|
||||
|
||||
SetPageSwapBacked(page);
|
||||
__set_page_locked(page);
|
||||
error = mem_cgroup_cache_charge(page, current->mm,
|
||||
error = mem_cgroup_charge_file(page, current->mm,
|
||||
gfp & GFP_RECLAIM_MASK);
|
||||
if (error)
|
||||
goto decused;
|
||||
|
Loading…
Reference in New Issue
Block a user