mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-06 13:55:08 +08:00
Merge branch 'master' into upstream
This commit is contained in:
commit
10e299fc6d
@ -29,6 +29,10 @@ config GENERIC_HARDIRQS
|
|||||||
bool
|
bool
|
||||||
default n
|
default n
|
||||||
|
|
||||||
|
config GENERIC_TIME
|
||||||
|
bool
|
||||||
|
default y
|
||||||
|
|
||||||
config TIME_LOW_RES
|
config TIME_LOW_RES
|
||||||
bool
|
bool
|
||||||
default y
|
default y
|
||||||
|
@ -32,8 +32,6 @@
|
|||||||
|
|
||||||
#define TICK_SIZE (tick_nsec / 1000)
|
#define TICK_SIZE (tick_nsec / 1000)
|
||||||
|
|
||||||
extern unsigned long wall_jiffies;
|
|
||||||
|
|
||||||
unsigned long __nongprelbss __clkin_clock_speed_HZ;
|
unsigned long __nongprelbss __clkin_clock_speed_HZ;
|
||||||
unsigned long __nongprelbss __ext_bus_clock_speed_HZ;
|
unsigned long __nongprelbss __ext_bus_clock_speed_HZ;
|
||||||
unsigned long __nongprelbss __res_bus_clock_speed_HZ;
|
unsigned long __nongprelbss __res_bus_clock_speed_HZ;
|
||||||
@ -144,85 +142,6 @@ void time_init(void)
|
|||||||
time_divisor_init();
|
time_divisor_init();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* This version of gettimeofday has near microsecond resolution.
|
|
||||||
*/
|
|
||||||
void do_gettimeofday(struct timeval *tv)
|
|
||||||
{
|
|
||||||
unsigned long seq;
|
|
||||||
unsigned long usec, sec;
|
|
||||||
unsigned long max_ntp_tick;
|
|
||||||
|
|
||||||
do {
|
|
||||||
unsigned long lost;
|
|
||||||
|
|
||||||
seq = read_seqbegin(&xtime_lock);
|
|
||||||
|
|
||||||
usec = 0;
|
|
||||||
lost = jiffies - wall_jiffies;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If time_adjust is negative then NTP is slowing the clock
|
|
||||||
* so make sure not to go into next possible interval.
|
|
||||||
* Better to lose some accuracy than have time go backwards..
|
|
||||||
*/
|
|
||||||
if (unlikely(time_adjust < 0)) {
|
|
||||||
max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
|
|
||||||
usec = min(usec, max_ntp_tick);
|
|
||||||
|
|
||||||
if (lost)
|
|
||||||
usec += lost * max_ntp_tick;
|
|
||||||
}
|
|
||||||
else if (unlikely(lost))
|
|
||||||
usec += lost * (USEC_PER_SEC / HZ);
|
|
||||||
|
|
||||||
sec = xtime.tv_sec;
|
|
||||||
usec += (xtime.tv_nsec / 1000);
|
|
||||||
} while (read_seqretry(&xtime_lock, seq));
|
|
||||||
|
|
||||||
while (usec >= 1000000) {
|
|
||||||
usec -= 1000000;
|
|
||||||
sec++;
|
|
||||||
}
|
|
||||||
|
|
||||||
tv->tv_sec = sec;
|
|
||||||
tv->tv_usec = usec;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(do_gettimeofday);
|
|
||||||
|
|
||||||
int do_settimeofday(struct timespec *tv)
|
|
||||||
{
|
|
||||||
time_t wtm_sec, sec = tv->tv_sec;
|
|
||||||
long wtm_nsec, nsec = tv->tv_nsec;
|
|
||||||
|
|
||||||
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
write_seqlock_irq(&xtime_lock);
|
|
||||||
/*
|
|
||||||
* This is revolting. We need to set "xtime" correctly. However, the
|
|
||||||
* value in this location is the value at the most recent update of
|
|
||||||
* wall time. Discover what correction gettimeofday() would have
|
|
||||||
* made, and then undo it!
|
|
||||||
*/
|
|
||||||
nsec -= 0 * NSEC_PER_USEC;
|
|
||||||
nsec -= (jiffies - wall_jiffies) * TICK_NSEC;
|
|
||||||
|
|
||||||
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
|
|
||||||
wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
|
|
||||||
|
|
||||||
set_normalized_timespec(&xtime, sec, nsec);
|
|
||||||
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
|
|
||||||
|
|
||||||
ntp_clear();
|
|
||||||
write_sequnlock_irq(&xtime_lock);
|
|
||||||
clock_was_set();
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(do_settimeofday);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Scheduler clock - returns current time in nanosec units.
|
* Scheduler clock - returns current time in nanosec units.
|
||||||
*/
|
*/
|
||||||
|
@ -1605,8 +1605,8 @@ sys_call_table:
|
|||||||
data8 sys_ni_syscall // 1295 reserved for ppoll
|
data8 sys_ni_syscall // 1295 reserved for ppoll
|
||||||
data8 sys_unshare
|
data8 sys_unshare
|
||||||
data8 sys_splice
|
data8 sys_splice
|
||||||
data8 sys_set_robust_list
|
data8 sys_ni_syscall // reserved for set_robust_list
|
||||||
data8 sys_get_robust_list
|
data8 sys_ni_syscall // reserved for get_robust_list
|
||||||
data8 sys_sync_file_range // 1300
|
data8 sys_sync_file_range // 1300
|
||||||
data8 sys_tee
|
data8 sys_tee
|
||||||
data8 sys_vmsplice
|
data8 sys_vmsplice
|
||||||
|
@ -197,6 +197,11 @@ start_ap:
|
|||||||
;;
|
;;
|
||||||
srlz.i
|
srlz.i
|
||||||
;;
|
;;
|
||||||
|
{
|
||||||
|
flushrs // must be first insn in group
|
||||||
|
srlz.i
|
||||||
|
}
|
||||||
|
;;
|
||||||
/*
|
/*
|
||||||
* Save the region registers, predicate before they get clobbered
|
* Save the region registers, predicate before they get clobbered
|
||||||
*/
|
*/
|
||||||
|
@ -4936,13 +4936,15 @@ abort_locked:
|
|||||||
if (likely(ctx)) {
|
if (likely(ctx)) {
|
||||||
DPRINT(("context unlocked\n"));
|
DPRINT(("context unlocked\n"));
|
||||||
UNPROTECT_CTX(ctx, flags);
|
UNPROTECT_CTX(ctx, flags);
|
||||||
fput(file);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* copy argument back to user, if needed */
|
/* copy argument back to user, if needed */
|
||||||
if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
|
if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
|
||||||
|
|
||||||
error_args:
|
error_args:
|
||||||
|
if (file)
|
||||||
|
fput(file);
|
||||||
|
|
||||||
kfree(args_k);
|
kfree(args_k);
|
||||||
|
|
||||||
DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
|
DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
|
||||||
|
@ -163,10 +163,25 @@ sys_pipe (void)
|
|||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int ia64_mmap_check(unsigned long addr, unsigned long len,
|
||||||
|
unsigned long flags)
|
||||||
|
{
|
||||||
|
unsigned long roff;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Don't permit mappings into unmapped space, the virtual page table
|
||||||
|
* of a region, or across a region boundary. Note: RGN_MAP_LIMIT is
|
||||||
|
* equal to 2^n-PAGE_SIZE (for some integer n <= 61) and len > 0.
|
||||||
|
*/
|
||||||
|
roff = REGION_OFFSET(addr);
|
||||||
|
if ((len > RGN_MAP_LIMIT) || (roff > (RGN_MAP_LIMIT - len)))
|
||||||
|
return -EINVAL;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, unsigned long pgoff)
|
do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, unsigned long pgoff)
|
||||||
{
|
{
|
||||||
unsigned long roff;
|
|
||||||
struct file *file = NULL;
|
struct file *file = NULL;
|
||||||
|
|
||||||
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
|
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
|
||||||
@ -188,17 +203,6 @@ do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, un
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Don't permit mappings into unmapped space, the virtual page table of a region,
|
|
||||||
* or across a region boundary. Note: RGN_MAP_LIMIT is equal to 2^n-PAGE_SIZE
|
|
||||||
* (for some integer n <= 61) and len > 0.
|
|
||||||
*/
|
|
||||||
roff = REGION_OFFSET(addr);
|
|
||||||
if ((len > RGN_MAP_LIMIT) || (roff > (RGN_MAP_LIMIT - len))) {
|
|
||||||
addr = -EINVAL;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
down_write(¤t->mm->mmap_sem);
|
down_write(¤t->mm->mmap_sem);
|
||||||
addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
|
addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
|
||||||
up_write(¤t->mm->mmap_sem);
|
up_write(¤t->mm->mmap_sem);
|
||||||
|
@ -565,7 +565,7 @@ static void __init sn_init_pdas(char **cmdline_p)
|
|||||||
* Also sets up a few fields in the nodepda. Also known as
|
* Also sets up a few fields in the nodepda. Also known as
|
||||||
* platform_cpu_init() by the ia64 machvec code.
|
* platform_cpu_init() by the ia64 machvec code.
|
||||||
*/
|
*/
|
||||||
void __init sn_cpu_init(void)
|
void __cpuinit sn_cpu_init(void)
|
||||||
{
|
{
|
||||||
int cpuid;
|
int cpuid;
|
||||||
int cpuphyid;
|
int cpuphyid;
|
||||||
|
@ -219,6 +219,21 @@ out:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int sparc_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
|
||||||
|
{
|
||||||
|
if (ARCH_SUN4C_SUN4 &&
|
||||||
|
(len > 0x20000000 ||
|
||||||
|
((flags & MAP_FIXED) &&
|
||||||
|
addr < 0xe0000000 && addr + len > 0x20000000)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* See asm-sparc/uaccess.h */
|
||||||
|
if (len > TASK_SIZE - PAGE_SIZE || addr + len > TASK_SIZE - PAGE_SIZE)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* Linux version of mmap */
|
/* Linux version of mmap */
|
||||||
static unsigned long do_mmap2(unsigned long addr, unsigned long len,
|
static unsigned long do_mmap2(unsigned long addr, unsigned long len,
|
||||||
unsigned long prot, unsigned long flags, unsigned long fd,
|
unsigned long prot, unsigned long flags, unsigned long fd,
|
||||||
@ -233,25 +248,13 @@ static unsigned long do_mmap2(unsigned long addr, unsigned long len,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
retval = -EINVAL;
|
|
||||||
len = PAGE_ALIGN(len);
|
len = PAGE_ALIGN(len);
|
||||||
if (ARCH_SUN4C_SUN4 &&
|
|
||||||
(len > 0x20000000 ||
|
|
||||||
((flags & MAP_FIXED) &&
|
|
||||||
addr < 0xe0000000 && addr + len > 0x20000000)))
|
|
||||||
goto out_putf;
|
|
||||||
|
|
||||||
/* See asm-sparc/uaccess.h */
|
|
||||||
if (len > TASK_SIZE - PAGE_SIZE || addr + len > TASK_SIZE - PAGE_SIZE)
|
|
||||||
goto out_putf;
|
|
||||||
|
|
||||||
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
|
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
|
||||||
|
|
||||||
down_write(¤t->mm->mmap_sem);
|
down_write(¤t->mm->mmap_sem);
|
||||||
retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
|
retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
|
||||||
up_write(¤t->mm->mmap_sem);
|
up_write(¤t->mm->mmap_sem);
|
||||||
|
|
||||||
out_putf:
|
|
||||||
if (file)
|
if (file)
|
||||||
fput(file);
|
fput(file);
|
||||||
out:
|
out:
|
||||||
|
@ -548,6 +548,26 @@ asmlinkage long sparc64_personality(unsigned long personality)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int sparc64_mmap_check(unsigned long addr, unsigned long len,
|
||||||
|
unsigned long flags)
|
||||||
|
{
|
||||||
|
if (test_thread_flag(TIF_32BIT)) {
|
||||||
|
if (len >= STACK_TOP32)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if ((flags & MAP_FIXED) && addr > STACK_TOP32 - len)
|
||||||
|
return -EINVAL;
|
||||||
|
} else {
|
||||||
|
if (len >= VA_EXCLUDE_START)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if ((flags & MAP_FIXED) && invalid_64bit_range(addr, len))
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* Linux version of mmap */
|
/* Linux version of mmap */
|
||||||
asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
|
asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
|
||||||
unsigned long prot, unsigned long flags, unsigned long fd,
|
unsigned long prot, unsigned long flags, unsigned long fd,
|
||||||
@ -563,27 +583,11 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
|
|||||||
}
|
}
|
||||||
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
|
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
|
||||||
len = PAGE_ALIGN(len);
|
len = PAGE_ALIGN(len);
|
||||||
retval = -EINVAL;
|
|
||||||
|
|
||||||
if (test_thread_flag(TIF_32BIT)) {
|
|
||||||
if (len >= STACK_TOP32)
|
|
||||||
goto out_putf;
|
|
||||||
|
|
||||||
if ((flags & MAP_FIXED) && addr > STACK_TOP32 - len)
|
|
||||||
goto out_putf;
|
|
||||||
} else {
|
|
||||||
if (len >= VA_EXCLUDE_START)
|
|
||||||
goto out_putf;
|
|
||||||
|
|
||||||
if ((flags & MAP_FIXED) && invalid_64bit_range(addr, len))
|
|
||||||
goto out_putf;
|
|
||||||
}
|
|
||||||
|
|
||||||
down_write(¤t->mm->mmap_sem);
|
down_write(¤t->mm->mmap_sem);
|
||||||
retval = do_mmap(file, addr, len, prot, flags, off);
|
retval = do_mmap(file, addr, len, prot, flags, off);
|
||||||
up_write(¤t->mm->mmap_sem);
|
up_write(¤t->mm->mmap_sem);
|
||||||
|
|
||||||
out_putf:
|
|
||||||
if (file)
|
if (file)
|
||||||
fput(file);
|
fput(file);
|
||||||
out:
|
out:
|
||||||
|
@ -376,6 +376,8 @@ static int proc_ide_read_media
|
|||||||
break;
|
break;
|
||||||
case ide_floppy:media = "floppy\n";
|
case ide_floppy:media = "floppy\n";
|
||||||
break;
|
break;
|
||||||
|
case ide_optical:media = "optical\n";
|
||||||
|
break;
|
||||||
default: media = "UNKNOWN\n";
|
default: media = "UNKNOWN\n";
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -86,6 +86,8 @@ static const struct {
|
|||||||
u8 chipset_family;
|
u8 chipset_family;
|
||||||
u8 flags;
|
u8 flags;
|
||||||
} SiSHostChipInfo[] = {
|
} SiSHostChipInfo[] = {
|
||||||
|
{ "SiS968", PCI_DEVICE_ID_SI_968, ATA_133 },
|
||||||
|
{ "SiS966", PCI_DEVICE_ID_SI_966, ATA_133 },
|
||||||
{ "SiS965", PCI_DEVICE_ID_SI_965, ATA_133 },
|
{ "SiS965", PCI_DEVICE_ID_SI_965, ATA_133 },
|
||||||
{ "SiS745", PCI_DEVICE_ID_SI_745, ATA_100 },
|
{ "SiS745", PCI_DEVICE_ID_SI_745, ATA_100 },
|
||||||
{ "SiS735", PCI_DEVICE_ID_SI_735, ATA_100 },
|
{ "SiS735", PCI_DEVICE_ID_SI_735, ATA_100 },
|
||||||
|
@ -1009,11 +1009,14 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
|
|||||||
buffer_trace_init(&dummy.b_history);
|
buffer_trace_init(&dummy.b_history);
|
||||||
err = ext3_get_blocks_handle(handle, inode, block, 1,
|
err = ext3_get_blocks_handle(handle, inode, block, 1,
|
||||||
&dummy, create, 1);
|
&dummy, create, 1);
|
||||||
if (err == 1) {
|
/*
|
||||||
|
* ext3_get_blocks_handle() returns number of blocks
|
||||||
|
* mapped. 0 in case of a HOLE.
|
||||||
|
*/
|
||||||
|
if (err > 0) {
|
||||||
|
if (err > 1)
|
||||||
|
WARN_ON(1);
|
||||||
err = 0;
|
err = 0;
|
||||||
} else if (err >= 0) {
|
|
||||||
WARN_ON(1);
|
|
||||||
err = -EIO;
|
|
||||||
}
|
}
|
||||||
*errp = err;
|
*errp = err;
|
||||||
if (!err && buffer_mapped(&dummy)) {
|
if (!err && buffer_mapped(&dummy)) {
|
||||||
|
@ -100,25 +100,6 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
|
|||||||
return atomic_dec_and_test(&dreq->io_count);
|
return atomic_dec_and_test(&dreq->io_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* "size" is never larger than rsize or wsize.
|
|
||||||
*/
|
|
||||||
static inline int nfs_direct_count_pages(unsigned long user_addr, size_t size)
|
|
||||||
{
|
|
||||||
int page_count;
|
|
||||||
|
|
||||||
page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
||||||
page_count -= user_addr >> PAGE_SHIFT;
|
|
||||||
BUG_ON(page_count < 0);
|
|
||||||
|
|
||||||
return page_count;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned int nfs_max_pages(unsigned int size)
|
|
||||||
{
|
|
||||||
return (size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* nfs_direct_IO - NFS address space operation for direct I/O
|
* nfs_direct_IO - NFS address space operation for direct I/O
|
||||||
* @rw: direction (read or write)
|
* @rw: direction (read or write)
|
||||||
@ -276,28 +257,24 @@ static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned lo
|
|||||||
struct nfs_open_context *ctx = dreq->ctx;
|
struct nfs_open_context *ctx = dreq->ctx;
|
||||||
struct inode *inode = ctx->dentry->d_inode;
|
struct inode *inode = ctx->dentry->d_inode;
|
||||||
size_t rsize = NFS_SERVER(inode)->rsize;
|
size_t rsize = NFS_SERVER(inode)->rsize;
|
||||||
unsigned int rpages = nfs_max_pages(rsize);
|
|
||||||
unsigned int pgbase;
|
unsigned int pgbase;
|
||||||
int result;
|
int result;
|
||||||
ssize_t started = 0;
|
ssize_t started = 0;
|
||||||
|
|
||||||
get_dreq(dreq);
|
get_dreq(dreq);
|
||||||
|
|
||||||
pgbase = user_addr & ~PAGE_MASK;
|
|
||||||
do {
|
do {
|
||||||
struct nfs_read_data *data;
|
struct nfs_read_data *data;
|
||||||
size_t bytes;
|
size_t bytes;
|
||||||
|
|
||||||
|
pgbase = user_addr & ~PAGE_MASK;
|
||||||
|
bytes = min(rsize,count);
|
||||||
|
|
||||||
result = -ENOMEM;
|
result = -ENOMEM;
|
||||||
data = nfs_readdata_alloc(rpages);
|
data = nfs_readdata_alloc(pgbase + bytes);
|
||||||
if (unlikely(!data))
|
if (unlikely(!data))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
bytes = rsize;
|
|
||||||
if (count < rsize)
|
|
||||||
bytes = count;
|
|
||||||
|
|
||||||
data->npages = nfs_direct_count_pages(user_addr, bytes);
|
|
||||||
down_read(¤t->mm->mmap_sem);
|
down_read(¤t->mm->mmap_sem);
|
||||||
result = get_user_pages(current, current->mm, user_addr,
|
result = get_user_pages(current, current->mm, user_addr,
|
||||||
data->npages, 1, 0, data->pagevec, NULL);
|
data->npages, 1, 0, data->pagevec, NULL);
|
||||||
@ -344,8 +321,10 @@ static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned lo
|
|||||||
started += bytes;
|
started += bytes;
|
||||||
user_addr += bytes;
|
user_addr += bytes;
|
||||||
pos += bytes;
|
pos += bytes;
|
||||||
|
/* FIXME: Remove this unnecessary math from final patch */
|
||||||
pgbase += bytes;
|
pgbase += bytes;
|
||||||
pgbase &= ~PAGE_MASK;
|
pgbase &= ~PAGE_MASK;
|
||||||
|
BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
|
||||||
|
|
||||||
count -= bytes;
|
count -= bytes;
|
||||||
} while (count != 0);
|
} while (count != 0);
|
||||||
@ -524,7 +503,7 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode
|
|||||||
|
|
||||||
static void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
|
static void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
|
||||||
{
|
{
|
||||||
dreq->commit_data = nfs_commit_alloc(0);
|
dreq->commit_data = nfs_commit_alloc();
|
||||||
if (dreq->commit_data != NULL)
|
if (dreq->commit_data != NULL)
|
||||||
dreq->commit_data->req = (struct nfs_page *) dreq;
|
dreq->commit_data->req = (struct nfs_page *) dreq;
|
||||||
}
|
}
|
||||||
@ -605,28 +584,24 @@ static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned l
|
|||||||
struct nfs_open_context *ctx = dreq->ctx;
|
struct nfs_open_context *ctx = dreq->ctx;
|
||||||
struct inode *inode = ctx->dentry->d_inode;
|
struct inode *inode = ctx->dentry->d_inode;
|
||||||
size_t wsize = NFS_SERVER(inode)->wsize;
|
size_t wsize = NFS_SERVER(inode)->wsize;
|
||||||
unsigned int wpages = nfs_max_pages(wsize);
|
|
||||||
unsigned int pgbase;
|
unsigned int pgbase;
|
||||||
int result;
|
int result;
|
||||||
ssize_t started = 0;
|
ssize_t started = 0;
|
||||||
|
|
||||||
get_dreq(dreq);
|
get_dreq(dreq);
|
||||||
|
|
||||||
pgbase = user_addr & ~PAGE_MASK;
|
|
||||||
do {
|
do {
|
||||||
struct nfs_write_data *data;
|
struct nfs_write_data *data;
|
||||||
size_t bytes;
|
size_t bytes;
|
||||||
|
|
||||||
|
pgbase = user_addr & ~PAGE_MASK;
|
||||||
|
bytes = min(wsize,count);
|
||||||
|
|
||||||
result = -ENOMEM;
|
result = -ENOMEM;
|
||||||
data = nfs_writedata_alloc(wpages);
|
data = nfs_writedata_alloc(pgbase + bytes);
|
||||||
if (unlikely(!data))
|
if (unlikely(!data))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
bytes = wsize;
|
|
||||||
if (count < wsize)
|
|
||||||
bytes = count;
|
|
||||||
|
|
||||||
data->npages = nfs_direct_count_pages(user_addr, bytes);
|
|
||||||
down_read(¤t->mm->mmap_sem);
|
down_read(¤t->mm->mmap_sem);
|
||||||
result = get_user_pages(current, current->mm, user_addr,
|
result = get_user_pages(current, current->mm, user_addr,
|
||||||
data->npages, 0, 0, data->pagevec, NULL);
|
data->npages, 0, 0, data->pagevec, NULL);
|
||||||
@ -676,8 +651,11 @@ static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned l
|
|||||||
started += bytes;
|
started += bytes;
|
||||||
user_addr += bytes;
|
user_addr += bytes;
|
||||||
pos += bytes;
|
pos += bytes;
|
||||||
|
|
||||||
|
/* FIXME: Remove this useless math from the final patch */
|
||||||
pgbase += bytes;
|
pgbase += bytes;
|
||||||
pgbase &= ~PAGE_MASK;
|
pgbase &= ~PAGE_MASK;
|
||||||
|
BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
|
||||||
|
|
||||||
count -= bytes;
|
count -= bytes;
|
||||||
} while (count != 0);
|
} while (count != 0);
|
||||||
|
@ -43,13 +43,15 @@ static mempool_t *nfs_rdata_mempool;
|
|||||||
|
|
||||||
#define MIN_POOL_READ (32)
|
#define MIN_POOL_READ (32)
|
||||||
|
|
||||||
struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
|
struct nfs_read_data *nfs_readdata_alloc(size_t len)
|
||||||
{
|
{
|
||||||
|
unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||||
struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, SLAB_NOFS);
|
struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, SLAB_NOFS);
|
||||||
|
|
||||||
if (p) {
|
if (p) {
|
||||||
memset(p, 0, sizeof(*p));
|
memset(p, 0, sizeof(*p));
|
||||||
INIT_LIST_HEAD(&p->pages);
|
INIT_LIST_HEAD(&p->pages);
|
||||||
|
p->npages = pagecount;
|
||||||
if (pagecount <= ARRAY_SIZE(p->page_array))
|
if (pagecount <= ARRAY_SIZE(p->page_array))
|
||||||
p->pagevec = p->page_array;
|
p->pagevec = p->page_array;
|
||||||
else {
|
else {
|
||||||
@ -140,7 +142,7 @@ static int nfs_readpage_sync(struct nfs_open_context *ctx, struct inode *inode,
|
|||||||
int result;
|
int result;
|
||||||
struct nfs_read_data *rdata;
|
struct nfs_read_data *rdata;
|
||||||
|
|
||||||
rdata = nfs_readdata_alloc(1);
|
rdata = nfs_readdata_alloc(count);
|
||||||
if (!rdata)
|
if (!rdata)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
@ -336,25 +338,25 @@ static int nfs_pagein_multi(struct list_head *head, struct inode *inode)
|
|||||||
struct nfs_page *req = nfs_list_entry(head->next);
|
struct nfs_page *req = nfs_list_entry(head->next);
|
||||||
struct page *page = req->wb_page;
|
struct page *page = req->wb_page;
|
||||||
struct nfs_read_data *data;
|
struct nfs_read_data *data;
|
||||||
unsigned int rsize = NFS_SERVER(inode)->rsize;
|
size_t rsize = NFS_SERVER(inode)->rsize, nbytes;
|
||||||
unsigned int nbytes, offset;
|
unsigned int offset;
|
||||||
int requests = 0;
|
int requests = 0;
|
||||||
LIST_HEAD(list);
|
LIST_HEAD(list);
|
||||||
|
|
||||||
nfs_list_remove_request(req);
|
nfs_list_remove_request(req);
|
||||||
|
|
||||||
nbytes = req->wb_bytes;
|
nbytes = req->wb_bytes;
|
||||||
for(;;) {
|
do {
|
||||||
data = nfs_readdata_alloc(1);
|
size_t len = min(nbytes,rsize);
|
||||||
|
|
||||||
|
data = nfs_readdata_alloc(len);
|
||||||
if (!data)
|
if (!data)
|
||||||
goto out_bad;
|
goto out_bad;
|
||||||
INIT_LIST_HEAD(&data->pages);
|
INIT_LIST_HEAD(&data->pages);
|
||||||
list_add(&data->pages, &list);
|
list_add(&data->pages, &list);
|
||||||
requests++;
|
requests++;
|
||||||
if (nbytes <= rsize)
|
nbytes -= len;
|
||||||
break;
|
} while(nbytes != 0);
|
||||||
nbytes -= rsize;
|
|
||||||
}
|
|
||||||
atomic_set(&req->wb_complete, requests);
|
atomic_set(&req->wb_complete, requests);
|
||||||
|
|
||||||
ClearPageError(page);
|
ClearPageError(page);
|
||||||
@ -402,7 +404,7 @@ static int nfs_pagein_one(struct list_head *head, struct inode *inode)
|
|||||||
if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
|
if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
|
||||||
return nfs_pagein_multi(head, inode);
|
return nfs_pagein_multi(head, inode);
|
||||||
|
|
||||||
data = nfs_readdata_alloc(NFS_SERVER(inode)->rpages);
|
data = nfs_readdata_alloc(NFS_SERVER(inode)->rsize);
|
||||||
if (!data)
|
if (!data)
|
||||||
goto out_bad;
|
goto out_bad;
|
||||||
|
|
||||||
|
@ -90,22 +90,13 @@ static mempool_t *nfs_commit_mempool;
|
|||||||
|
|
||||||
static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion);
|
static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion);
|
||||||
|
|
||||||
struct nfs_write_data *nfs_commit_alloc(unsigned int pagecount)
|
struct nfs_write_data *nfs_commit_alloc(void)
|
||||||
{
|
{
|
||||||
struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS);
|
struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS);
|
||||||
|
|
||||||
if (p) {
|
if (p) {
|
||||||
memset(p, 0, sizeof(*p));
|
memset(p, 0, sizeof(*p));
|
||||||
INIT_LIST_HEAD(&p->pages);
|
INIT_LIST_HEAD(&p->pages);
|
||||||
if (pagecount <= ARRAY_SIZE(p->page_array))
|
|
||||||
p->pagevec = p->page_array;
|
|
||||||
else {
|
|
||||||
p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
|
|
||||||
if (!p->pagevec) {
|
|
||||||
mempool_free(p, nfs_commit_mempool);
|
|
||||||
p = NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
@ -117,13 +108,15 @@ void nfs_commit_free(struct nfs_write_data *p)
|
|||||||
mempool_free(p, nfs_commit_mempool);
|
mempool_free(p, nfs_commit_mempool);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
|
struct nfs_write_data *nfs_writedata_alloc(size_t len)
|
||||||
{
|
{
|
||||||
|
unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||||
struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS);
|
struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS);
|
||||||
|
|
||||||
if (p) {
|
if (p) {
|
||||||
memset(p, 0, sizeof(*p));
|
memset(p, 0, sizeof(*p));
|
||||||
INIT_LIST_HEAD(&p->pages);
|
INIT_LIST_HEAD(&p->pages);
|
||||||
|
p->npages = pagecount;
|
||||||
if (pagecount <= ARRAY_SIZE(p->page_array))
|
if (pagecount <= ARRAY_SIZE(p->page_array))
|
||||||
p->pagevec = p->page_array;
|
p->pagevec = p->page_array;
|
||||||
else {
|
else {
|
||||||
@ -208,7 +201,7 @@ static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode,
|
|||||||
int result, written = 0;
|
int result, written = 0;
|
||||||
struct nfs_write_data *wdata;
|
struct nfs_write_data *wdata;
|
||||||
|
|
||||||
wdata = nfs_writedata_alloc(1);
|
wdata = nfs_writedata_alloc(wsize);
|
||||||
if (!wdata)
|
if (!wdata)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
@ -999,24 +992,24 @@ static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how)
|
|||||||
struct nfs_page *req = nfs_list_entry(head->next);
|
struct nfs_page *req = nfs_list_entry(head->next);
|
||||||
struct page *page = req->wb_page;
|
struct page *page = req->wb_page;
|
||||||
struct nfs_write_data *data;
|
struct nfs_write_data *data;
|
||||||
unsigned int wsize = NFS_SERVER(inode)->wsize;
|
size_t wsize = NFS_SERVER(inode)->wsize, nbytes;
|
||||||
unsigned int nbytes, offset;
|
unsigned int offset;
|
||||||
int requests = 0;
|
int requests = 0;
|
||||||
LIST_HEAD(list);
|
LIST_HEAD(list);
|
||||||
|
|
||||||
nfs_list_remove_request(req);
|
nfs_list_remove_request(req);
|
||||||
|
|
||||||
nbytes = req->wb_bytes;
|
nbytes = req->wb_bytes;
|
||||||
for (;;) {
|
do {
|
||||||
data = nfs_writedata_alloc(1);
|
size_t len = min(nbytes, wsize);
|
||||||
|
|
||||||
|
data = nfs_writedata_alloc(len);
|
||||||
if (!data)
|
if (!data)
|
||||||
goto out_bad;
|
goto out_bad;
|
||||||
list_add(&data->pages, &list);
|
list_add(&data->pages, &list);
|
||||||
requests++;
|
requests++;
|
||||||
if (nbytes <= wsize)
|
nbytes -= len;
|
||||||
break;
|
} while (nbytes != 0);
|
||||||
nbytes -= wsize;
|
|
||||||
}
|
|
||||||
atomic_set(&req->wb_complete, requests);
|
atomic_set(&req->wb_complete, requests);
|
||||||
|
|
||||||
ClearPageError(page);
|
ClearPageError(page);
|
||||||
@ -1070,7 +1063,7 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, int how)
|
|||||||
struct nfs_write_data *data;
|
struct nfs_write_data *data;
|
||||||
unsigned int count;
|
unsigned int count;
|
||||||
|
|
||||||
data = nfs_writedata_alloc(NFS_SERVER(inode)->wpages);
|
data = nfs_writedata_alloc(NFS_SERVER(inode)->wsize);
|
||||||
if (!data)
|
if (!data)
|
||||||
goto out_bad;
|
goto out_bad;
|
||||||
|
|
||||||
@ -1378,7 +1371,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
|
|||||||
struct nfs_write_data *data;
|
struct nfs_write_data *data;
|
||||||
struct nfs_page *req;
|
struct nfs_page *req;
|
||||||
|
|
||||||
data = nfs_commit_alloc(NFS_SERVER(inode)->wpages);
|
data = nfs_commit_alloc();
|
||||||
|
|
||||||
if (!data)
|
if (!data)
|
||||||
goto out_bad;
|
goto out_bad;
|
||||||
|
@ -49,6 +49,7 @@ DEFINE_SPINLOCK(sb_lock);
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* alloc_super - create new superblock
|
* alloc_super - create new superblock
|
||||||
|
* @type: filesystem type superblock should belong to
|
||||||
*
|
*
|
||||||
* Allocates and initializes a new &struct super_block. alloc_super()
|
* Allocates and initializes a new &struct super_block. alloc_super()
|
||||||
* returns a pointer new superblock or %NULL if allocation had failed.
|
* returns a pointer new superblock or %NULL if allocation had failed.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
include include/asm-generic/Kbuild.asm
|
include include/asm-generic/Kbuild.asm
|
||||||
|
|
||||||
header-y += boot.h cpufeature.h debugreg.h ldt.h setup.h ucontext.h
|
header-y += boot.h debugreg.h ldt.h setup.h ucontext.h
|
||||||
|
|
||||||
unifdef-y += mtrr.h vm86.h
|
unifdef-y += mtrr.h vm86.h
|
||||||
|
@ -22,4 +22,12 @@
|
|||||||
#define MCL_CURRENT 1 /* lock all current mappings */
|
#define MCL_CURRENT 1 /* lock all current mappings */
|
||||||
#define MCL_FUTURE 2 /* lock all future mappings */
|
#define MCL_FUTURE 2 /* lock all future mappings */
|
||||||
|
|
||||||
|
#ifdef __KERNEL__
|
||||||
|
#ifndef __ASSEMBLY__
|
||||||
|
#define arch_mmap_check ia64_mmap_check
|
||||||
|
int ia64_mmap_check(unsigned long addr, unsigned long len,
|
||||||
|
unsigned long flags);
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* _ASM_IA64_MMAN_H */
|
#endif /* _ASM_IA64_MMAN_H */
|
||||||
|
@ -286,8 +286,7 @@
|
|||||||
/* 1294, 1295 reserved for pselect/ppoll */
|
/* 1294, 1295 reserved for pselect/ppoll */
|
||||||
#define __NR_unshare 1296
|
#define __NR_unshare 1296
|
||||||
#define __NR_splice 1297
|
#define __NR_splice 1297
|
||||||
#define __NR_set_robust_list 1298
|
/* 1298, 1299 reserved for set_robust_list/get_robust_list */
|
||||||
#define __NR_get_robust_list 1299
|
|
||||||
#define __NR_sync_file_range 1300
|
#define __NR_sync_file_range 1300
|
||||||
#define __NR_tee 1301
|
#define __NR_tee 1301
|
||||||
#define __NR_vmsplice 1302
|
#define __NR_vmsplice 1302
|
||||||
|
@ -104,7 +104,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
|
|||||||
|
|
||||||
/* PFN start number, because of __MEMORY_START */
|
/* PFN start number, because of __MEMORY_START */
|
||||||
#define PFN_START (__MEMORY_START >> PAGE_SHIFT)
|
#define PFN_START (__MEMORY_START >> PAGE_SHIFT)
|
||||||
#define ARCH_PFN_OFFSET (FPN_START)
|
#define ARCH_PFN_OFFSET (PFN_START)
|
||||||
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
|
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
|
||||||
#define pfn_valid(pfn) (((pfn) - PFN_START) < max_mapnr)
|
#define pfn_valid(pfn) (((pfn) - PFN_START) < max_mapnr)
|
||||||
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
|
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
|
||||||
|
@ -35,4 +35,12 @@
|
|||||||
|
|
||||||
#define MADV_FREE 0x5 /* (Solaris) contents can be freed */
|
#define MADV_FREE 0x5 /* (Solaris) contents can be freed */
|
||||||
|
|
||||||
|
#ifdef __KERNEL__
|
||||||
|
#ifndef __ASSEMBLY__
|
||||||
|
#define arch_mmap_check sparc_mmap_check
|
||||||
|
int sparc_mmap_check(unsigned long addr, unsigned long len,
|
||||||
|
unsigned long flags);
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* __SPARC_MMAN_H__ */
|
#endif /* __SPARC_MMAN_H__ */
|
||||||
|
@ -35,4 +35,12 @@
|
|||||||
|
|
||||||
#define MADV_FREE 0x5 /* (Solaris) contents can be freed */
|
#define MADV_FREE 0x5 /* (Solaris) contents can be freed */
|
||||||
|
|
||||||
|
#ifdef __KERNEL__
|
||||||
|
#ifndef __ASSEMBLY__
|
||||||
|
#define arch_mmap_check sparc64_mmap_check
|
||||||
|
int sparc64_mmap_check(unsigned long addr, unsigned long len,
|
||||||
|
unsigned long flags);
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* __SPARC64_MMAN_H__ */
|
#endif /* __SPARC64_MMAN_H__ */
|
||||||
|
@ -7,7 +7,6 @@
|
|||||||
#define LINUX_ATMDEV_H
|
#define LINUX_ATMDEV_H
|
||||||
|
|
||||||
|
|
||||||
#include <linux/device.h>
|
|
||||||
#include <linux/atmapi.h>
|
#include <linux/atmapi.h>
|
||||||
#include <linux/atm.h>
|
#include <linux/atm.h>
|
||||||
#include <linux/atmioc.h>
|
#include <linux/atmioc.h>
|
||||||
@ -210,6 +209,7 @@ struct atm_cirange {
|
|||||||
|
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
|
|
||||||
|
#include <linux/device.h>
|
||||||
#include <linux/wait.h> /* wait_queue_head_t */
|
#include <linux/wait.h> /* wait_queue_head_t */
|
||||||
#include <linux/time.h> /* struct timeval */
|
#include <linux/time.h> /* struct timeval */
|
||||||
#include <linux/net.h>
|
#include <linux/net.h>
|
||||||
|
@ -80,6 +80,7 @@ struct hrtimer_sleeper {
|
|||||||
* @get_softirq_time: function to retrieve the current time from the softirq
|
* @get_softirq_time: function to retrieve the current time from the softirq
|
||||||
* @curr_timer: the timer which is executing a callback right now
|
* @curr_timer: the timer which is executing a callback right now
|
||||||
* @softirq_time: the time when running the hrtimer queue in the softirq
|
* @softirq_time: the time when running the hrtimer queue in the softirq
|
||||||
|
* @lock_key: the lock_class_key for use with lockdep
|
||||||
*/
|
*/
|
||||||
struct hrtimer_base {
|
struct hrtimer_base {
|
||||||
clockid_t index;
|
clockid_t index;
|
||||||
|
@ -56,7 +56,8 @@ typedef union {
|
|||||||
#endif
|
#endif
|
||||||
} ktime_t;
|
} ktime_t;
|
||||||
|
|
||||||
#define KTIME_MAX (~((u64)1 << 63))
|
#define KTIME_MAX ((s64)~((u64)1 << 63))
|
||||||
|
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ktime_t definitions when using the 64-bit scalar representation:
|
* ktime_t definitions when using the 64-bit scalar representation:
|
||||||
@ -73,6 +74,10 @@ typedef union {
|
|||||||
*/
|
*/
|
||||||
static inline ktime_t ktime_set(const long secs, const unsigned long nsecs)
|
static inline ktime_t ktime_set(const long secs, const unsigned long nsecs)
|
||||||
{
|
{
|
||||||
|
#if (BITS_PER_LONG == 64)
|
||||||
|
if (unlikely(secs >= KTIME_SEC_MAX))
|
||||||
|
return (ktime_t){ .tv64 = KTIME_MAX };
|
||||||
|
#endif
|
||||||
return (ktime_t) { .tv64 = (s64)secs * NSEC_PER_SEC + (s64)nsecs };
|
return (ktime_t) { .tv64 = (s64)secs * NSEC_PER_SEC + (s64)nsecs };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -427,7 +427,7 @@ extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *);
|
|||||||
extern void nfs_writedata_release(void *);
|
extern void nfs_writedata_release(void *);
|
||||||
|
|
||||||
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
|
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
|
||||||
struct nfs_write_data *nfs_commit_alloc(unsigned int pagecount);
|
struct nfs_write_data *nfs_commit_alloc(void);
|
||||||
void nfs_commit_free(struct nfs_write_data *p);
|
void nfs_commit_free(struct nfs_write_data *p);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -478,7 +478,7 @@ static inline int nfs_wb_page(struct inode *inode, struct page* page)
|
|||||||
/*
|
/*
|
||||||
* Allocate nfs_write_data structures
|
* Allocate nfs_write_data structures
|
||||||
*/
|
*/
|
||||||
extern struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount);
|
extern struct nfs_write_data *nfs_writedata_alloc(size_t len);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* linux/fs/nfs/read.c
|
* linux/fs/nfs/read.c
|
||||||
@ -492,7 +492,7 @@ extern void nfs_readdata_release(void *data);
|
|||||||
/*
|
/*
|
||||||
* Allocate nfs_read_data structures
|
* Allocate nfs_read_data structures
|
||||||
*/
|
*/
|
||||||
extern struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount);
|
extern struct nfs_read_data *nfs_readdata_alloc(size_t len);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* linux/fs/nfs3proc.c
|
* linux/fs/nfs3proc.c
|
||||||
|
@ -729,7 +729,7 @@ struct nfs_read_data {
|
|||||||
struct list_head pages; /* Coalesced read requests */
|
struct list_head pages; /* Coalesced read requests */
|
||||||
struct nfs_page *req; /* multi ops per nfs_page */
|
struct nfs_page *req; /* multi ops per nfs_page */
|
||||||
struct page **pagevec;
|
struct page **pagevec;
|
||||||
unsigned int npages; /* active pages in pagevec */
|
unsigned int npages; /* Max length of pagevec */
|
||||||
struct nfs_readargs args;
|
struct nfs_readargs args;
|
||||||
struct nfs_readres res;
|
struct nfs_readres res;
|
||||||
#ifdef CONFIG_NFS_V4
|
#ifdef CONFIG_NFS_V4
|
||||||
@ -748,7 +748,7 @@ struct nfs_write_data {
|
|||||||
struct list_head pages; /* Coalesced requests we wish to flush */
|
struct list_head pages; /* Coalesced requests we wish to flush */
|
||||||
struct nfs_page *req; /* multi ops per nfs_page */
|
struct nfs_page *req; /* multi ops per nfs_page */
|
||||||
struct page **pagevec;
|
struct page **pagevec;
|
||||||
unsigned int npages; /* active pages in pagevec */
|
unsigned int npages; /* Max length of pagevec */
|
||||||
struct nfs_writeargs args; /* argument struct */
|
struct nfs_writeargs args; /* argument struct */
|
||||||
struct nfs_writeres res; /* result struct */
|
struct nfs_writeres res; /* result struct */
|
||||||
#ifdef CONFIG_NFS_V4
|
#ifdef CONFIG_NFS_V4
|
||||||
|
@ -648,6 +648,8 @@
|
|||||||
#define PCI_DEVICE_ID_SI_962 0x0962
|
#define PCI_DEVICE_ID_SI_962 0x0962
|
||||||
#define PCI_DEVICE_ID_SI_963 0x0963
|
#define PCI_DEVICE_ID_SI_963 0x0963
|
||||||
#define PCI_DEVICE_ID_SI_965 0x0965
|
#define PCI_DEVICE_ID_SI_965 0x0965
|
||||||
|
#define PCI_DEVICE_ID_SI_966 0x0966
|
||||||
|
#define PCI_DEVICE_ID_SI_968 0x0968
|
||||||
#define PCI_DEVICE_ID_SI_5511 0x5511
|
#define PCI_DEVICE_ID_SI_5511 0x5511
|
||||||
#define PCI_DEVICE_ID_SI_5513 0x5513
|
#define PCI_DEVICE_ID_SI_5513 0x5513
|
||||||
#define PCI_DEVICE_ID_SI_5517 0x5517
|
#define PCI_DEVICE_ID_SI_5517 0x5517
|
||||||
|
@ -1120,9 +1120,10 @@ static int futex_wait(u32 __user *uaddr, u32 val, unsigned long time)
|
|||||||
* if there are waiters then it will block, it does PI, etc. (Due to
|
* if there are waiters then it will block, it does PI, etc. (Due to
|
||||||
* races the kernel might see a 0 value of the futex too.)
|
* races the kernel might see a 0 value of the futex too.)
|
||||||
*/
|
*/
|
||||||
static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
|
static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
|
||||||
struct hrtimer_sleeper *to)
|
long nsec, int trylock)
|
||||||
{
|
{
|
||||||
|
struct hrtimer_sleeper timeout, *to = NULL;
|
||||||
struct task_struct *curr = current;
|
struct task_struct *curr = current;
|
||||||
struct futex_hash_bucket *hb;
|
struct futex_hash_bucket *hb;
|
||||||
u32 uval, newval, curval;
|
u32 uval, newval, curval;
|
||||||
@ -1132,6 +1133,13 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
|
|||||||
if (refill_pi_state_cache())
|
if (refill_pi_state_cache())
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
if (sec != MAX_SCHEDULE_TIMEOUT) {
|
||||||
|
to = &timeout;
|
||||||
|
hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS);
|
||||||
|
hrtimer_init_sleeper(to, current);
|
||||||
|
to->timer.expires = ktime_set(sec, nsec);
|
||||||
|
}
|
||||||
|
|
||||||
q.pi_state = NULL;
|
q.pi_state = NULL;
|
||||||
retry:
|
retry:
|
||||||
down_read(&curr->mm->mmap_sem);
|
down_read(&curr->mm->mmap_sem);
|
||||||
@ -1307,7 +1315,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
|
|||||||
if (!detect && ret == -EDEADLK && 0)
|
if (!detect && ret == -EDEADLK && 0)
|
||||||
force_sig(SIGKILL, current);
|
force_sig(SIGKILL, current);
|
||||||
|
|
||||||
return ret;
|
return ret != -EINTR ? ret : -ERESTARTNOINTR;
|
||||||
|
|
||||||
out_unlock_release_sem:
|
out_unlock_release_sem:
|
||||||
queue_unlock(&q, hb);
|
queue_unlock(&q, hb);
|
||||||
@ -1341,76 +1349,6 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Restart handler
|
|
||||||
*/
|
|
||||||
static long futex_lock_pi_restart(struct restart_block *restart)
|
|
||||||
{
|
|
||||||
struct hrtimer_sleeper timeout, *to = NULL;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
restart->fn = do_no_restart_syscall;
|
|
||||||
|
|
||||||
if (restart->arg2 || restart->arg3) {
|
|
||||||
to = &timeout;
|
|
||||||
hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS);
|
|
||||||
hrtimer_init_sleeper(to, current);
|
|
||||||
to->timer.expires.tv64 = ((u64)restart->arg1 << 32) |
|
|
||||||
(u64) restart->arg0;
|
|
||||||
}
|
|
||||||
|
|
||||||
pr_debug("lock_pi restart: %p, %d (%d)\n",
|
|
||||||
(u32 __user *)restart->arg0, current->pid);
|
|
||||||
|
|
||||||
ret = do_futex_lock_pi((u32 __user *)restart->arg0, restart->arg1,
|
|
||||||
0, to);
|
|
||||||
|
|
||||||
if (ret != -EINTR)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
restart->fn = futex_lock_pi_restart;
|
|
||||||
|
|
||||||
/* The other values are filled in */
|
|
||||||
return -ERESTART_RESTARTBLOCK;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Called from the syscall entry below.
|
|
||||||
*/
|
|
||||||
static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
|
|
||||||
long nsec, int trylock)
|
|
||||||
{
|
|
||||||
struct hrtimer_sleeper timeout, *to = NULL;
|
|
||||||
struct restart_block *restart;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (sec != MAX_SCHEDULE_TIMEOUT) {
|
|
||||||
to = &timeout;
|
|
||||||
hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS);
|
|
||||||
hrtimer_init_sleeper(to, current);
|
|
||||||
to->timer.expires = ktime_set(sec, nsec);
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = do_futex_lock_pi(uaddr, detect, trylock, to);
|
|
||||||
|
|
||||||
if (ret != -EINTR)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
pr_debug("lock_pi interrupted: %p, %d (%d)\n", uaddr, current->pid);
|
|
||||||
|
|
||||||
restart = ¤t_thread_info()->restart_block;
|
|
||||||
restart->fn = futex_lock_pi_restart;
|
|
||||||
restart->arg0 = (unsigned long) uaddr;
|
|
||||||
restart->arg1 = detect;
|
|
||||||
if (to) {
|
|
||||||
restart->arg2 = to->timer.expires.tv64 & 0xFFFFFFFF;
|
|
||||||
restart->arg3 = to->timer.expires.tv64 >> 32;
|
|
||||||
} else
|
|
||||||
restart->arg2 = restart->arg3 = 0;
|
|
||||||
|
|
||||||
return -ERESTART_RESTARTBLOCK;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Userspace attempted a TID -> 0 atomic transition, and failed.
|
* Userspace attempted a TID -> 0 atomic transition, and failed.
|
||||||
* This is the in-kernel slowpath: we look up the PI state (if any),
|
* This is the in-kernel slowpath: we look up the PI state (if any),
|
||||||
|
@ -173,7 +173,7 @@ const char *print_tainted(void)
|
|||||||
|
|
||||||
void add_taint(unsigned flag)
|
void add_taint(unsigned flag)
|
||||||
{
|
{
|
||||||
debug_locks_off(); /* can't trust the integrity of the kernel anymore */
|
debug_locks = 0; /* can't trust the integrity of the kernel anymore */
|
||||||
tainted |= flag;
|
tainted |= flag;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(add_taint);
|
EXPORT_SYMBOL(add_taint);
|
||||||
|
@ -56,7 +56,7 @@ config PM_TRACE
|
|||||||
|
|
||||||
config SOFTWARE_SUSPEND
|
config SOFTWARE_SUSPEND
|
||||||
bool "Software Suspend"
|
bool "Software Suspend"
|
||||||
depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP)
|
depends on PM && SWAP && ((X86 && (!SMP || SUSPEND_SMP) && !X86_PAE) || ((FRV || PPC32) && !SMP))
|
||||||
---help---
|
---help---
|
||||||
Enable the possibility of suspending the machine.
|
Enable the possibility of suspending the machine.
|
||||||
It doesn't need ACPI or APM.
|
It doesn't need ACPI or APM.
|
||||||
@ -78,6 +78,10 @@ config SOFTWARE_SUSPEND
|
|||||||
|
|
||||||
For more information take a look at <file:Documentation/power/swsusp.txt>.
|
For more information take a look at <file:Documentation/power/swsusp.txt>.
|
||||||
|
|
||||||
|
(For now, swsusp is incompatible with PAE aka HIGHMEM_64G on i386.
|
||||||
|
we need identity mapping for resume to work, and that is trivial
|
||||||
|
to get with 4MB pages, but less than trivial on PAE).
|
||||||
|
|
||||||
config PM_STD_PARTITION
|
config PM_STD_PARTITION
|
||||||
string "Default resume partition"
|
string "Default resume partition"
|
||||||
depends on SOFTWARE_SUSPEND
|
depends on SOFTWARE_SUSPEND
|
||||||
|
@ -72,7 +72,7 @@ EXPORT_SYMBOL(_write_trylock);
|
|||||||
* not re-enabled during lock-acquire (which the preempt-spin-ops do):
|
* not re-enabled during lock-acquire (which the preempt-spin-ops do):
|
||||||
*/
|
*/
|
||||||
#if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) || \
|
#if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) || \
|
||||||
defined(CONFIG_PROVE_LOCKING)
|
defined(CONFIG_DEBUG_LOCK_ALLOC)
|
||||||
|
|
||||||
void __lockfunc _read_lock(rwlock_t *lock)
|
void __lockfunc _read_lock(rwlock_t *lock)
|
||||||
{
|
{
|
||||||
|
17
mm/mmap.c
17
mm/mmap.c
@ -30,6 +30,10 @@
|
|||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/tlb.h>
|
#include <asm/tlb.h>
|
||||||
|
|
||||||
|
#ifndef arch_mmap_check
|
||||||
|
#define arch_mmap_check(addr, len, flags) (0)
|
||||||
|
#endif
|
||||||
|
|
||||||
static void unmap_region(struct mm_struct *mm,
|
static void unmap_region(struct mm_struct *mm,
|
||||||
struct vm_area_struct *vma, struct vm_area_struct *prev,
|
struct vm_area_struct *vma, struct vm_area_struct *prev,
|
||||||
unsigned long start, unsigned long end);
|
unsigned long start, unsigned long end);
|
||||||
@ -913,6 +917,10 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
|
|||||||
if (!len)
|
if (!len)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
error = arch_mmap_check(addr, len, flags);
|
||||||
|
if (error)
|
||||||
|
return error;
|
||||||
|
|
||||||
/* Careful about overflows.. */
|
/* Careful about overflows.. */
|
||||||
len = PAGE_ALIGN(len);
|
len = PAGE_ALIGN(len);
|
||||||
if (!len || len > TASK_SIZE)
|
if (!len || len > TASK_SIZE)
|
||||||
@ -1859,6 +1867,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct rb_node ** rb_link, * rb_parent;
|
struct rb_node ** rb_link, * rb_parent;
|
||||||
pgoff_t pgoff = addr >> PAGE_SHIFT;
|
pgoff_t pgoff = addr >> PAGE_SHIFT;
|
||||||
|
int error;
|
||||||
|
|
||||||
len = PAGE_ALIGN(len);
|
len = PAGE_ALIGN(len);
|
||||||
if (!len)
|
if (!len)
|
||||||
@ -1867,6 +1876,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
|
|||||||
if ((addr + len) > TASK_SIZE || (addr + len) < addr)
|
if ((addr + len) > TASK_SIZE || (addr + len) < addr)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
|
||||||
|
|
||||||
|
error = arch_mmap_check(addr, len, flags);
|
||||||
|
if (error)
|
||||||
|
return error;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* mlock MCL_FUTURE?
|
* mlock MCL_FUTURE?
|
||||||
*/
|
*/
|
||||||
@ -1907,8 +1922,6 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
|
|||||||
if (security_vm_enough_memory(len >> PAGE_SHIFT))
|
if (security_vm_enough_memory(len >> PAGE_SHIFT))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
|
|
||||||
|
|
||||||
/* Can we just expand an old private anonymous mapping? */
|
/* Can we just expand an old private anonymous mapping? */
|
||||||
if (vma_merge(mm, prev, addr, addr + len, flags,
|
if (vma_merge(mm, prev, addr, addr + len, flags,
|
||||||
NULL, NULL, pgoff, NULL))
|
NULL, NULL, pgoff, NULL))
|
||||||
|
@ -68,10 +68,10 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
write_lock_irq(&mapping->tree_lock);
|
write_lock_irq(&mapping->tree_lock);
|
||||||
if (PageDirty(page)) {
|
if (PageDirty(page))
|
||||||
write_unlock_irq(&mapping->tree_lock);
|
goto failed;
|
||||||
return 0;
|
if (page_count(page) != 2) /* caller's ref + pagecache ref */
|
||||||
}
|
goto failed;
|
||||||
|
|
||||||
BUG_ON(PagePrivate(page));
|
BUG_ON(PagePrivate(page));
|
||||||
__remove_from_page_cache(page);
|
__remove_from_page_cache(page);
|
||||||
@ -79,6 +79,9 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
|
|||||||
ClearPageUptodate(page);
|
ClearPageUptodate(page);
|
||||||
page_cache_release(page); /* pagecache ref */
|
page_cache_release(page); /* pagecache ref */
|
||||||
return 1;
|
return 1;
|
||||||
|
failed:
|
||||||
|
write_unlock_irq(&mapping->tree_lock);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
Loading…
Reference in New Issue
Block a user