mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 14:44:10 +08:00
mm: remove unused VM_<READfoo> macros and expand other in-place
These VM_<READfoo> macros aren't used very often and three of them aren't used at all. Expand the ones that are used in-place, and remove all the now unused #define VM_<foo> macros. VM_READHINTMASK, VM_NormalReadHint and VM_ClearReadHint were added just before 2.4 and appears have never been used. Signed-off-by: Joe Perches <joe@perches.com> Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
73b44ff43c
commit
64363aad5f
@ -151,12 +151,6 @@ extern unsigned int kobjsize(const void *objp);
|
||||
#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
|
||||
#endif
|
||||
|
||||
#define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ)
|
||||
#define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK
|
||||
#define VM_NormalReadHint(v) (!((v)->vm_flags & VM_READHINTMASK))
|
||||
#define VM_SequentialReadHint(v) ((v)->vm_flags & VM_SEQ_READ)
|
||||
#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ)
|
||||
|
||||
/*
|
||||
* Special vmas that are non-mergable, non-mlock()able.
|
||||
* Note: mm/huge_memory.c VM_NO_THP depends on this definition.
|
||||
|
@ -1539,12 +1539,12 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
|
||||
/* If we don't want any read-ahead, don't bother */
|
||||
if (VM_RandomReadHint(vma))
|
||||
if (vma->vm_flags & VM_RAND_READ)
|
||||
return;
|
||||
if (!ra->ra_pages)
|
||||
return;
|
||||
|
||||
if (VM_SequentialReadHint(vma)) {
|
||||
if (vma->vm_flags & VM_SEQ_READ) {
|
||||
page_cache_sync_readahead(mapping, ra, file, offset,
|
||||
ra->ra_pages);
|
||||
return;
|
||||
@ -1584,7 +1584,7 @@ static void do_async_mmap_readahead(struct vm_area_struct *vma,
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
|
||||
/* If we don't want any read-ahead, don't bother */
|
||||
if (VM_RandomReadHint(vma))
|
||||
if (vma->vm_flags & VM_RAND_READ)
|
||||
return;
|
||||
if (ra->mmap_miss > 0)
|
||||
ra->mmap_miss--;
|
||||
|
@ -1150,7 +1150,7 @@ again:
|
||||
if (pte_dirty(ptent))
|
||||
set_page_dirty(page);
|
||||
if (pte_young(ptent) &&
|
||||
likely(!VM_SequentialReadHint(vma)))
|
||||
likely(!(vma->vm_flags & VM_SEQ_READ)))
|
||||
mark_page_accessed(page);
|
||||
rss[MM_FILEPAGES]--;
|
||||
}
|
||||
|
@ -720,7 +720,7 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
|
||||
* mapping is already gone, the unmap path will have
|
||||
* set PG_referenced or activated the page.
|
||||
*/
|
||||
if (likely(!VM_SequentialReadHint(vma)))
|
||||
if (likely(!(vma->vm_flags & VM_SEQ_READ)))
|
||||
referenced++;
|
||||
}
|
||||
pte_unmap_unlock(pte, ptl);
|
||||
|
Loading…
Reference in New Issue
Block a user