mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-22 20:23:57 +08:00
f8891e5e1f
The remaining counters in page_state after the zoned VM counter patches have been applied are all just for show in /proc/vmstat. They have no essential function for the VM. We use a simple increment of per cpu variables. In order to avoid the most severe races we disable preempt. Preempt does not prevent the race between an increment and an interrupt handler incrementing the same statistics counter. However, that race is exceedingly rare, we may only loose one increment or so and there is no requirement (at least not in kernel) that the vm event counters have to be accurate. In the non preempt case this results in a simple increment for each counter. For many architectures this will be reduced by the compiler to a single instruction. This single instruction is atomic for i386 and x86_64. And therefore even the rare race condition in an interrupt is avoided for both architectures in most cases. The patchset also adds an off switch for embedded systems that allows a building of linux kernels without these counters. The implementation of these counters is through inline code that hopefully results in only a single instruction increment instruction being emitted (i386, x86_64) or in the increment being hidden though instruction concurrency (EPIC architectures such as ia64 can get that done). Benefits: - VM event counter operations usually reduce to a single inline instruction on i386 and x86_64. - No interrupt disable, only preempt disable for the preempt case. Preempt disable can also be avoided by moving the counter into a spinlock. - Handling is similar to zoned VM counters. - Simple and easily extendable. - Can be omitted to reduce memory use for embedded use. References: RFC http://marc.theaimsgroup.com/?l=linux-kernel&m=113512330605497&w=2 RFC http://marc.theaimsgroup.com/?l=linux-kernel&m=114988082814934&w=2 local_t http://marc.theaimsgroup.com/?l=linux-kernel&m=114991748606690&w=2 V2 http://marc.theaimsgroup.com/?t=115014808400007&r=1&w=2 V3 http://marc.theaimsgroup.com/?l=linux-kernel&m=115024767022346&w=2 V4 http://marc.theaimsgroup.com/?l=linux-kernel&m=115047968808926&w=2 Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
129 lines
2.9 KiB
C
129 lines
2.9 KiB
C
/*
|
|
* mmap.c
|
|
*
|
|
* Copyright (C) 1995, 1996 by Volker Lendecke
|
|
* Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
|
|
*
|
|
*/
|
|
|
|
#include <linux/stat.h>
|
|
#include <linux/time.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/shm.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/string.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/fcntl.h>
|
|
#include <linux/ncp_fs.h>
|
|
|
|
#include "ncplib_kernel.h"
|
|
#include <asm/uaccess.h>
|
|
#include <asm/system.h>
|
|
|
|
/*
|
|
* Fill in the supplied page for mmap
|
|
*/
|
|
static struct page* ncp_file_mmap_nopage(struct vm_area_struct *area,
|
|
unsigned long address, int *type)
|
|
{
|
|
struct file *file = area->vm_file;
|
|
struct dentry *dentry = file->f_dentry;
|
|
struct inode *inode = dentry->d_inode;
|
|
struct page* page;
|
|
char *pg_addr;
|
|
unsigned int already_read;
|
|
unsigned int count;
|
|
int bufsize;
|
|
int pos;
|
|
|
|
page = alloc_page(GFP_HIGHUSER); /* ncpfs has nothing against high pages
|
|
as long as recvmsg and memset works on it */
|
|
if (!page)
|
|
return page;
|
|
pg_addr = kmap(page);
|
|
address &= PAGE_MASK;
|
|
pos = address - area->vm_start + (area->vm_pgoff << PAGE_SHIFT);
|
|
|
|
count = PAGE_SIZE;
|
|
if (address + PAGE_SIZE > area->vm_end) {
|
|
count = area->vm_end - address;
|
|
}
|
|
/* what we can read in one go */
|
|
bufsize = NCP_SERVER(inode)->buffer_size;
|
|
|
|
already_read = 0;
|
|
if (ncp_make_open(inode, O_RDONLY) >= 0) {
|
|
while (already_read < count) {
|
|
int read_this_time;
|
|
int to_read;
|
|
|
|
to_read = bufsize - (pos % bufsize);
|
|
|
|
to_read = min_t(unsigned int, to_read, count - already_read);
|
|
|
|
if (ncp_read_kernel(NCP_SERVER(inode),
|
|
NCP_FINFO(inode)->file_handle,
|
|
pos, to_read,
|
|
pg_addr + already_read,
|
|
&read_this_time) != 0) {
|
|
read_this_time = 0;
|
|
}
|
|
pos += read_this_time;
|
|
already_read += read_this_time;
|
|
|
|
if (read_this_time < to_read) {
|
|
break;
|
|
}
|
|
}
|
|
ncp_inode_close(inode);
|
|
|
|
}
|
|
|
|
if (already_read < PAGE_SIZE)
|
|
memset(pg_addr + already_read, 0, PAGE_SIZE - already_read);
|
|
flush_dcache_page(page);
|
|
kunmap(page);
|
|
|
|
/*
|
|
* If I understand ncp_read_kernel() properly, the above always
|
|
* fetches from the network, here the analogue of disk.
|
|
* -- wli
|
|
*/
|
|
if (type)
|
|
*type = VM_FAULT_MAJOR;
|
|
count_vm_event(PGMAJFAULT);
|
|
return page;
|
|
}
|
|
|
|
static struct vm_operations_struct ncp_file_mmap =
|
|
{
|
|
.nopage = ncp_file_mmap_nopage,
|
|
};
|
|
|
|
|
|
/* This is used for a general mmap of a ncp file */
|
|
int ncp_mmap(struct file *file, struct vm_area_struct *vma)
|
|
{
|
|
struct inode *inode = file->f_dentry->d_inode;
|
|
|
|
DPRINTK("ncp_mmap: called\n");
|
|
|
|
if (!ncp_conn_valid(NCP_SERVER(inode)))
|
|
return -EIO;
|
|
|
|
/* only PAGE_COW or read-only supported now */
|
|
if (vma->vm_flags & VM_SHARED)
|
|
return -EINVAL;
|
|
/* we do not support files bigger than 4GB... We eventually
|
|
supports just 4GB... */
|
|
if (((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff
|
|
> (1U << (32 - PAGE_SHIFT)))
|
|
return -EFBIG;
|
|
|
|
vma->vm_ops = &ncp_file_mmap;
|
|
file_accessed(file);
|
|
return 0;
|
|
}
|