2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-22 12:14:01 +08:00
linux-next/fs/nfs/nfs4file.c

284 lines
6.6 KiB
C
Raw Normal View History

/*
* linux/fs/nfs/file.c
*
* Copyright (C) 1992 Rick Sladkey
*/
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/falloc.h>
#include <linux/nfs_fs.h>
#include <uapi/linux/btrfs.h> /* BTRFS_IOC_CLONE/BTRFS_IOC_CLONE_RANGE */
#include "delegation.h"
#include "internal.h"
#include "iostat.h"
#include "fscache.h"
#include "pnfs.h"
#include "nfstrace.h"
#ifdef CONFIG_NFS_V4_2
#include "nfs42.h"
#endif
#define NFSDBG_FACILITY NFSDBG_FILE
static int
nfs4_file_open(struct inode *inode, struct file *filp)
{
struct nfs_open_context *ctx;
struct dentry *dentry = filp->f_path.dentry;
struct dentry *parent = NULL;
struct inode *dir;
unsigned openflags = filp->f_flags;
struct iattr attr;
int err;
/*
* If no cached dentry exists or if it's negative, NFSv4 handled the
* opens in ->lookup() or ->create().
*
* We only get this far for a cached positive dentry. We skipped
* revalidation, so handle it here by dropping the dentry and returning
* -EOPENSTALE. The VFS will retry the lookup/create/open.
*/
dprintk("NFS: open file(%pd2)\n", dentry);
err = nfs_check_flags(openflags);
if (err)
return err;
if ((openflags & O_ACCMODE) == 3)
openflags--;
/* We can't create new files here */
openflags &= ~(O_CREAT|O_EXCL);
parent = dget_parent(dentry);
dir = d_inode(parent);
ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
err = PTR_ERR(ctx);
if (IS_ERR(ctx))
goto out;
attr.ia_valid = ATTR_OPEN;
if (openflags & O_TRUNC) {
attr.ia_valid |= ATTR_SIZE;
attr.ia_size = 0;
nfs_sync_inode(inode);
}
inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr, NULL);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
switch (err) {
case -EPERM:
case -EACCES:
case -EDQUOT:
case -ENOSPC:
case -EROFS:
goto out_put_ctx;
default:
goto out_drop;
}
}
if (inode != d_inode(dentry))
goto out_drop;
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
nfs_file_set_open_context(filp, ctx);
NFS: Use i_writecount to control whether to get an fscache cookie in nfs_open() Use i_writecount to control whether to get an fscache cookie in nfs_open() as NFS does not do write caching yet. I *think* this is the cause of a problem encountered by Mark Moseley whereby __fscache_uncache_page() gets a NULL pointer dereference because cookie->def is NULL: BUG: unable to handle kernel NULL pointer dereference at 0000000000000010 IP: [<ffffffff812a1903>] __fscache_uncache_page+0x23/0x160 PGD 0 Thread overran stack, or stack corrupted Oops: 0000 [#1] SMP Modules linked in: ... CPU: 7 PID: 18993 Comm: php Not tainted 3.11.1 #1 Hardware name: Dell Inc. PowerEdge R420/072XWF, BIOS 1.3.5 08/21/2012 task: ffff8804203460c0 ti: ffff880420346640 RIP: 0010:[<ffffffff812a1903>] __fscache_uncache_page+0x23/0x160 RSP: 0018:ffff8801053af878 EFLAGS: 00210286 RAX: 0000000000000000 RBX: ffff8800be2f8780 RCX: ffff88022ffae5e8 RDX: 0000000000004c66 RSI: ffffea00055ff440 RDI: ffff8800be2f8780 RBP: ffff8801053af898 R08: 0000000000000001 R09: 0000000000000003 R10: 0000000000000000 R11: 0000000000000000 R12: ffffea00055ff440 R13: 0000000000001000 R14: ffff8800c50be538 R15: 0000000000000000 FS: 0000000000000000(0000) GS:ffff88042fc60000(0063) knlGS:00000000e439c700 CS: 0010 DS: 002b ES: 002b CR0: 0000000080050033 CR2: 0000000000000010 CR3: 0000000001d8f000 CR4: 00000000000607f0 Stack: ... Call Trace: [<ffffffff81365a72>] __nfs_fscache_invalidate_page+0x42/0x70 [<ffffffff813553d5>] nfs_invalidate_page+0x75/0x90 [<ffffffff811b8f5e>] truncate_inode_page+0x8e/0x90 [<ffffffff811b90ad>] truncate_inode_pages_range.part.12+0x14d/0x620 [<ffffffff81d6387d>] ? __mutex_lock_slowpath+0x1fd/0x2e0 [<ffffffff811b95d3>] truncate_inode_pages_range+0x53/0x70 [<ffffffff811b969d>] truncate_inode_pages+0x2d/0x40 [<ffffffff811b96ff>] truncate_pagecache+0x4f/0x70 [<ffffffff81356840>] nfs_setattr_update_inode+0xa0/0x120 [<ffffffff81368de4>] nfs3_proc_setattr+0xc4/0xe0 [<ffffffff81357f78>] nfs_setattr+0xc8/0x150 [<ffffffff8122d95b>] notify_change+0x1cb/0x390 [<ffffffff8120a55b>] do_truncate+0x7b/0xc0 [<ffffffff8121f96c>] do_last+0xa4c/0xfd0 [<ffffffff8121ffbc>] path_openat+0xcc/0x670 [<ffffffff81220a0e>] do_filp_open+0x4e/0xb0 [<ffffffff8120ba1f>] do_sys_open+0x13f/0x2b0 [<ffffffff8126aaf6>] compat_SyS_open+0x36/0x50 [<ffffffff81d7204c>] sysenter_dispatch+0x7/0x24 The code at the instruction pointer was disassembled: > (gdb) disas __fscache_uncache_page > Dump of assembler code for function __fscache_uncache_page: > ... > 0xffffffff812a18ff <+31>: mov 0x48(%rbx),%rax > 0xffffffff812a1903 <+35>: cmpb $0x0,0x10(%rax) > 0xffffffff812a1907 <+39>: je 0xffffffff812a19cd <__fscache_uncache_page+237> These instructions make up: ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); That cmpb is the faulting instruction (%rax is 0). So cookie->def is NULL - which presumably means that the cookie has already been at least partway through __fscache_relinquish_cookie(). What I think may be happening is something like a three-way race on the same file: PROCESS 1 PROCESS 2 PROCESS 3 =============== =============== =============== open(O_TRUNC|O_WRONLY) open(O_RDONLY) open(O_WRONLY) -->nfs_open() -->nfs_fscache_set_inode_cookie() nfs_fscache_inode_lock() nfs_fscache_disable_inode_cookie() __fscache_relinquish_cookie() nfs_inode->fscache = NULL <--nfs_fscache_set_inode_cookie() -->nfs_open() -->nfs_fscache_set_inode_cookie() nfs_fscache_inode_lock() nfs_fscache_enable_inode_cookie() __fscache_acquire_cookie() nfs_inode->fscache = cookie <--nfs_fscache_set_inode_cookie() <--nfs_open() -->nfs_setattr() ... ... -->nfs_invalidate_page() -->__nfs_fscache_invalidate_page() cookie = nfsi->fscache -->nfs_open() -->nfs_fscache_set_inode_cookie() nfs_fscache_inode_lock() nfs_fscache_disable_inode_cookie() -->__fscache_relinquish_cookie() -->__fscache_uncache_page(cookie) <crash> <--__fscache_relinquish_cookie() nfs_inode->fscache = NULL <--nfs_fscache_set_inode_cookie() What is needed is something to prevent process #2 from reacquiring the cookie - and I think checking i_writecount should do the trick. It's also possible to have a two-way race on this if the file is opened O_TRUNC|O_RDONLY instead. Reported-by: Mark Moseley <moseleymark@gmail.com> Signed-off-by: David Howells <dhowells@redhat.com>
2013-09-27 18:20:03 +08:00
nfs_fscache_open_file(inode, filp);
err = 0;
out_put_ctx:
put_nfs_open_context(ctx);
out:
dput(parent);
return err;
out_drop:
d_drop(dentry);
err = -EOPENSTALE;
goto out_put_ctx;
}
/*
* Flush all dirty pages, and check for write errors.
*/
static int
nfs4_file_flush(struct file *file, fl_owner_t id)
{
struct inode *inode = file_inode(file);
dprintk("NFS: flush(%pD2)\n", file);
nfs_inc_stats(inode, NFSIOS_VFSFLUSH);
if ((file->f_mode & FMODE_WRITE) == 0)
return 0;
/*
* If we're holding a write delegation, then check if we're required
* to flush the i/o on close. If not, then just start the i/o now.
*/
if (!nfs4_delegation_flush_on_close(inode))
return filemap_fdatawrite(file->f_mapping);
/* Flush writes to the server and return any errors */
return vfs_fsync(file, 0);
}
static int
nfs4_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
int ret;
struct inode *inode = file_inode(file);
trace_nfs_fsync_enter(inode);
nfs_inode_dio_wait(inode);
do {
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (ret != 0)
break;
inode_lock(inode);
ret = nfs_file_fsync_commit(file, start, end, datasync);
if (!ret)
ret = pnfs_sync_inode(inode, !!datasync);
inode_unlock(inode);
/*
* If nfs_file_fsync_commit detected a server reboot, then
* resend all dirty pages that might have been covered by
* the NFS_CONTEXT_RESEND_WRITES flag
*/
start = 0;
end = LLONG_MAX;
} while (ret == -EAGAIN);
trace_nfs_fsync_exit(inode, ret);
return ret;
}
#ifdef CONFIG_NFS_V4_2
static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence)
{
loff_t ret;
switch (whence) {
case SEEK_HOLE:
case SEEK_DATA:
ret = nfs42_proc_llseek(filep, offset, whence);
if (ret != -ENOTSUPP)
return ret;
default:
return nfs_file_llseek(filep, offset, whence);
}
}
static long nfs42_fallocate(struct file *filep, int mode, loff_t offset, loff_t len)
{
struct inode *inode = file_inode(filep);
long ret;
if (!S_ISREG(inode->i_mode))
return -EOPNOTSUPP;
if ((mode != 0) && (mode != (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE)))
return -EOPNOTSUPP;
ret = inode_newsize_ok(inode, offset + len);
if (ret < 0)
return ret;
if (mode & FALLOC_FL_PUNCH_HOLE)
return nfs42_proc_deallocate(filep, offset, len);
return nfs42_proc_allocate(filep, offset, len);
}
static int nfs42_clone_file_range(struct file *src_file, loff_t src_off,
struct file *dst_file, loff_t dst_off, u64 count)
{
struct inode *dst_inode = file_inode(dst_file);
struct nfs_server *server = NFS_SERVER(dst_inode);
struct inode *src_inode = file_inode(src_file);
unsigned int bs = server->clone_blksize;
bool same_inode = false;
int ret;
/* check alignment w.r.t. clone_blksize */
ret = -EINVAL;
if (bs) {
if (!IS_ALIGNED(src_off, bs) || !IS_ALIGNED(dst_off, bs))
goto out;
if (!IS_ALIGNED(count, bs) && i_size_read(src_inode) != (src_off + count))
goto out;
}
if (src_inode == dst_inode)
same_inode = true;
/* XXX: do we lock at all? what if server needs CB_RECALL_LAYOUT? */
if (same_inode) {
inode_lock(src_inode);
} else if (dst_inode < src_inode) {
inode_lock_nested(dst_inode, I_MUTEX_PARENT);
inode_lock_nested(src_inode, I_MUTEX_CHILD);
} else {
inode_lock_nested(src_inode, I_MUTEX_PARENT);
inode_lock_nested(dst_inode, I_MUTEX_CHILD);
}
/* flush all pending writes on both src and dst so that server
* has the latest data */
ret = nfs_sync_inode(src_inode);
if (ret)
goto out_unlock;
ret = nfs_sync_inode(dst_inode);
if (ret)
goto out_unlock;
ret = nfs42_proc_clone(src_file, dst_file, src_off, dst_off, count);
/* truncate inode page cache of the dst range so that future reads can fetch
* new data from server */
if (!ret)
truncate_inode_pages_range(&dst_inode->i_data, dst_off, dst_off + count - 1);
out_unlock:
if (same_inode) {
inode_unlock(src_inode);
} else if (dst_inode < src_inode) {
inode_unlock(src_inode);
inode_unlock(dst_inode);
} else {
inode_unlock(dst_inode);
inode_unlock(src_inode);
}
out:
return ret;
}
#endif /* CONFIG_NFS_V4_2 */
const struct file_operations nfs4_file_operations = {
.read_iter = nfs_file_read,
.write_iter = nfs_file_write,
.mmap = nfs_file_mmap,
.open = nfs4_file_open,
.flush = nfs4_file_flush,
.release = nfs_file_release,
.fsync = nfs4_file_fsync,
.lock = nfs_lock,
.flock = nfs_flock,
.splice_read = nfs_file_splice_read,
.splice_write = iter_file_splice_write,
.check_flags = nfs_check_flags,
.setlease = simple_nosetlease,
#ifdef CONFIG_NFS_V4_2
.llseek = nfs4_file_llseek,
.fallocate = nfs42_fallocate,
.clone_file_range = nfs42_clone_file_range,
#else
.llseek = nfs_file_llseek,
#endif
};