2005-09-10 04:10:28 +08:00
|
|
|
/*
|
|
|
|
FUSE: Filesystem in Userspace
|
2008-11-26 19:03:54 +08:00
|
|
|
Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
|
2005-09-10 04:10:28 +08:00
|
|
|
|
|
|
|
This program can be distributed under the terms of the GNU GPL.
|
|
|
|
See the file COPYING.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "fuse_i.h"
|
|
|
|
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/file.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/namei.h>
|
2010-12-08 03:16:56 +08:00
|
|
|
#include <linux/slab.h>
|
2005-09-10 04:10:28 +08:00
|
|
|
|
2013-05-18 15:03:58 +08:00
|
|
|
static bool fuse_use_readdirplus(struct inode *dir, struct dir_context *ctx)
|
2013-01-15 11:23:28 +08:00
|
|
|
{
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(dir);
|
|
|
|
struct fuse_inode *fi = get_fuse_inode(dir);
|
|
|
|
|
|
|
|
if (!fc->do_readdirplus)
|
|
|
|
return false;
|
2013-02-07 06:29:01 +08:00
|
|
|
if (!fc->readdirplus_auto)
|
|
|
|
return true;
|
2013-01-15 11:23:28 +08:00
|
|
|
if (test_and_clear_bit(FUSE_I_ADVISE_RDPLUS, &fi->state))
|
|
|
|
return true;
|
2013-05-18 15:03:58 +08:00
|
|
|
if (ctx->pos == 0)
|
2013-01-15 11:23:28 +08:00
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void fuse_advise_use_readdirplus(struct inode *dir)
|
|
|
|
{
|
|
|
|
struct fuse_inode *fi = get_fuse_inode(dir);
|
|
|
|
|
|
|
|
set_bit(FUSE_I_ADVISE_RDPLUS, &fi->state);
|
|
|
|
}
|
|
|
|
|
2006-07-30 18:04:10 +08:00
|
|
|
#if BITS_PER_LONG >= 64
|
|
|
|
static inline void fuse_dentry_settime(struct dentry *entry, u64 time)
|
|
|
|
{
|
|
|
|
entry->d_time = time;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 fuse_dentry_time(struct dentry *entry)
|
|
|
|
{
|
|
|
|
return entry->d_time;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* On 32 bit archs store the high 32 bits of time in d_fsdata
|
|
|
|
*/
|
|
|
|
static void fuse_dentry_settime(struct dentry *entry, u64 time)
|
|
|
|
{
|
|
|
|
entry->d_time = time;
|
|
|
|
entry->d_fsdata = (void *) (unsigned long) (time >> 32);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u64 fuse_dentry_time(struct dentry *entry)
|
|
|
|
{
|
|
|
|
return (u64) entry->d_time +
|
|
|
|
((u64) (unsigned long) entry->d_fsdata << 32);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2006-01-06 16:19:39 +08:00
|
|
|
/*
|
|
|
|
* FUSE caches dentries and attributes with separate timeout. The
|
|
|
|
* time in jiffies until the dentry/attributes are valid is stored in
|
|
|
|
* dentry->d_time and fuse_inode->i_time respectively.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate the time in jiffies until a dentry/attributes are valid
|
|
|
|
*/
|
2006-07-30 18:04:10 +08:00
|
|
|
static u64 time_to_jiffies(unsigned long sec, unsigned long nsec)
|
2005-09-10 04:10:28 +08:00
|
|
|
{
|
2006-07-30 18:04:08 +08:00
|
|
|
if (sec || nsec) {
|
|
|
|
struct timespec ts = {sec, nsec};
|
2006-07-30 18:04:10 +08:00
|
|
|
return get_jiffies_64() + timespec_to_jiffies(&ts);
|
2006-07-30 18:04:08 +08:00
|
|
|
} else
|
2006-07-30 18:04:10 +08:00
|
|
|
return 0;
|
2005-09-10 04:10:28 +08:00
|
|
|
}
|
|
|
|
|
2006-01-06 16:19:39 +08:00
|
|
|
/*
|
|
|
|
* Set dentry and possibly attribute timeouts from the lookup/mk*
|
|
|
|
* replies
|
|
|
|
*/
|
fuse: fix race between getattr and write
Getattr and lookup operations can be running in parallel to attribute changing
operations, such as write and setattr.
This means, that if for example getattr was slower than a write, the cached
size attribute could be set to a stale value.
To prevent this race, introduce a per-filesystem attribute version counter.
This counter is incremented whenever cached attributes are modified, and the
incremented value stored in the inode.
Before storing new attributes in the cache, getattr and lookup check, using
the version number, whether the attributes have been modified during the
request's lifetime. If so, the returned attributes are not cached, because
they might be stale.
Thanks to Jakub Bogusz for the bug report and test program.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Jakub Bogusz <jakub.bogusz@gemius.pl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-18 18:06:58 +08:00
|
|
|
static void fuse_change_entry_timeout(struct dentry *entry,
|
|
|
|
struct fuse_entry_out *o)
|
2006-01-06 16:19:34 +08:00
|
|
|
{
|
2006-07-30 18:04:10 +08:00
|
|
|
fuse_dentry_settime(entry,
|
|
|
|
time_to_jiffies(o->entry_valid, o->entry_valid_nsec));
|
fuse: fix race between getattr and write
Getattr and lookup operations can be running in parallel to attribute changing
operations, such as write and setattr.
This means, that if for example getattr was slower than a write, the cached
size attribute could be set to a stale value.
To prevent this race, introduce a per-filesystem attribute version counter.
This counter is incremented whenever cached attributes are modified, and the
incremented value stored in the inode.
Before storing new attributes in the cache, getattr and lookup check, using
the version number, whether the attributes have been modified during the
request's lifetime. If so, the returned attributes are not cached, because
they might be stale.
Thanks to Jakub Bogusz for the bug report and test program.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Jakub Bogusz <jakub.bogusz@gemius.pl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-18 18:06:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static u64 attr_timeout(struct fuse_attr_out *o)
|
|
|
|
{
|
|
|
|
return time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u64 entry_attr_timeout(struct fuse_entry_out *o)
|
|
|
|
{
|
|
|
|
return time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
|
2006-01-06 16:19:38 +08:00
|
|
|
}
|
|
|
|
|
2006-01-06 16:19:39 +08:00
|
|
|
/*
|
|
|
|
* Mark the attributes as stale, so that at the next call to
|
|
|
|
* ->getattr() they will be fetched from userspace
|
|
|
|
*/
|
2006-01-06 16:19:38 +08:00
|
|
|
void fuse_invalidate_attr(struct inode *inode)
|
|
|
|
{
|
2006-07-30 18:04:10 +08:00
|
|
|
get_fuse_inode(inode)->i_time = 0;
|
2006-01-06 16:19:38 +08:00
|
|
|
}
|
|
|
|
|
2013-11-05 19:55:43 +08:00
|
|
|
/**
|
|
|
|
* Mark the attributes as stale due to an atime change. Avoid the invalidate if
|
|
|
|
* atime is not used.
|
|
|
|
*/
|
|
|
|
void fuse_invalidate_atime(struct inode *inode)
|
|
|
|
{
|
|
|
|
if (!IS_RDONLY(inode))
|
|
|
|
fuse_invalidate_attr(inode);
|
|
|
|
}
|
|
|
|
|
2006-01-06 16:19:39 +08:00
|
|
|
/*
|
|
|
|
* Just mark the entry as stale, so that a next attempt to look it up
|
|
|
|
* will result in a new lookup call to userspace
|
|
|
|
*
|
|
|
|
* This is called when a dentry is about to become negative and the
|
|
|
|
* timeout is unknown (unlink, rmdir, rename and in some cases
|
|
|
|
* lookup)
|
|
|
|
*/
|
2008-07-25 16:49:00 +08:00
|
|
|
void fuse_invalidate_entry_cache(struct dentry *entry)
|
2006-01-06 16:19:38 +08:00
|
|
|
{
|
2006-07-30 18:04:10 +08:00
|
|
|
fuse_dentry_settime(entry, 0);
|
2006-01-06 16:19:38 +08:00
|
|
|
}
|
|
|
|
|
2006-01-06 16:19:39 +08:00
|
|
|
/*
|
|
|
|
* Same as fuse_invalidate_entry_cache(), but also try to remove the
|
|
|
|
* dentry from the hash
|
|
|
|
*/
|
2006-01-06 16:19:38 +08:00
|
|
|
static void fuse_invalidate_entry(struct dentry *entry)
|
|
|
|
{
|
|
|
|
d_invalidate(entry);
|
|
|
|
fuse_invalidate_entry_cache(entry);
|
2006-01-06 16:19:34 +08:00
|
|
|
}
|
|
|
|
|
2008-07-25 16:49:01 +08:00
|
|
|
static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_req *req,
|
|
|
|
u64 nodeid, struct qstr *name,
|
2005-09-10 04:10:28 +08:00
|
|
|
struct fuse_entry_out *outarg)
|
|
|
|
{
|
2007-10-18 18:07:05 +08:00
|
|
|
memset(outarg, 0, sizeof(struct fuse_entry_out));
|
2005-09-10 04:10:28 +08:00
|
|
|
req->in.h.opcode = FUSE_LOOKUP;
|
2008-07-25 16:49:01 +08:00
|
|
|
req->in.h.nodeid = nodeid;
|
2005-09-10 04:10:28 +08:00
|
|
|
req->in.numargs = 1;
|
2008-07-25 16:49:01 +08:00
|
|
|
req->in.args[0].size = name->len + 1;
|
|
|
|
req->in.args[0].value = name->name;
|
2005-09-10 04:10:28 +08:00
|
|
|
req->out.numargs = 1;
|
2007-10-18 18:07:05 +08:00
|
|
|
if (fc->minor < 9)
|
|
|
|
req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
|
|
|
|
else
|
|
|
|
req->out.args[0].size = sizeof(struct fuse_entry_out);
|
2005-09-10 04:10:28 +08:00
|
|
|
req->out.args[0].value = outarg;
|
|
|
|
}
|
|
|
|
|
2008-04-30 15:54:43 +08:00
|
|
|
u64 fuse_get_attr_version(struct fuse_conn *fc)
|
2007-11-29 08:21:59 +08:00
|
|
|
{
|
|
|
|
u64 curr_version;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The spin lock isn't actually needed on 64bit archs, but we
|
|
|
|
* don't yet care too much about such optimizations.
|
|
|
|
*/
|
|
|
|
spin_lock(&fc->lock);
|
|
|
|
curr_version = fc->attr_version;
|
|
|
|
spin_unlock(&fc->lock);
|
|
|
|
|
|
|
|
return curr_version;
|
|
|
|
}
|
|
|
|
|
2006-01-06 16:19:39 +08:00
|
|
|
/*
|
|
|
|
* Check whether the dentry is still valid
|
|
|
|
*
|
|
|
|
* If the entry validity timeout has expired and the dentry is
|
|
|
|
* positive, try to redo the lookup. If the lookup results in a
|
|
|
|
* different inode, then let the VFS invalidate the dentry and redo
|
|
|
|
* the lookup once more. If the lookup results in the same inode,
|
|
|
|
* then refresh the attributes, timeouts and mark the dentry valid.
|
|
|
|
*/
|
2012-06-11 04:03:43 +08:00
|
|
|
static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
|
2005-09-10 04:10:28 +08:00
|
|
|
{
|
2011-01-07 14:49:57 +08:00
|
|
|
struct inode *inode;
|
2013-06-03 20:40:22 +08:00
|
|
|
struct dentry *parent;
|
|
|
|
struct fuse_conn *fc;
|
2013-10-01 22:41:22 +08:00
|
|
|
struct fuse_inode *fi;
|
2013-09-05 17:44:43 +08:00
|
|
|
int ret;
|
2006-01-06 16:19:38 +08:00
|
|
|
|
2011-03-21 20:58:06 +08:00
|
|
|
inode = ACCESS_ONCE(entry->d_inode);
|
2006-01-06 16:19:38 +08:00
|
|
|
if (inode && is_bad_inode(inode))
|
2013-09-05 17:44:43 +08:00
|
|
|
goto invalid;
|
2014-07-07 21:28:50 +08:00
|
|
|
else if (time_before64(fuse_dentry_time(entry), get_jiffies_64())) {
|
2005-09-10 04:10:28 +08:00
|
|
|
int err;
|
|
|
|
struct fuse_entry_out outarg;
|
2006-01-06 16:19:38 +08:00
|
|
|
struct fuse_req *req;
|
2010-12-08 03:16:56 +08:00
|
|
|
struct fuse_forget_link *forget;
|
fuse: fix race between getattr and write
Getattr and lookup operations can be running in parallel to attribute changing
operations, such as write and setattr.
This means, that if for example getattr was slower than a write, the cached
size attribute could be set to a stale value.
To prevent this race, introduce a per-filesystem attribute version counter.
This counter is incremented whenever cached attributes are modified, and the
incremented value stored in the inode.
Before storing new attributes in the cache, getattr and lookup check, using
the version number, whether the attributes have been modified during the
request's lifetime. If so, the returned attributes are not cached, because
they might be stale.
Thanks to Jakub Bogusz for the bug report and test program.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Jakub Bogusz <jakub.bogusz@gemius.pl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-18 18:06:58 +08:00
|
|
|
u64 attr_version;
|
2006-01-06 16:19:38 +08:00
|
|
|
|
2006-03-01 08:59:03 +08:00
|
|
|
/* For negative dentries, always do a fresh lookup */
|
2006-01-06 16:19:38 +08:00
|
|
|
if (!inode)
|
2013-09-05 17:44:43 +08:00
|
|
|
goto invalid;
|
2006-01-06 16:19:38 +08:00
|
|
|
|
2013-09-05 17:44:43 +08:00
|
|
|
ret = -ECHILD;
|
2012-06-11 04:03:43 +08:00
|
|
|
if (flags & LOOKUP_RCU)
|
2013-09-05 17:44:43 +08:00
|
|
|
goto out;
|
2011-03-21 20:58:06 +08:00
|
|
|
|
2006-01-06 16:19:38 +08:00
|
|
|
fc = get_fuse_conn(inode);
|
2012-10-26 23:48:30 +08:00
|
|
|
req = fuse_get_req_nopages(fc);
|
2013-09-05 17:44:43 +08:00
|
|
|
ret = PTR_ERR(req);
|
2006-04-11 13:54:58 +08:00
|
|
|
if (IS_ERR(req))
|
2013-09-05 17:44:43 +08:00
|
|
|
goto out;
|
2005-09-10 04:10:28 +08:00
|
|
|
|
2010-12-08 03:16:56 +08:00
|
|
|
forget = fuse_alloc_forget();
|
|
|
|
if (!forget) {
|
2006-11-26 03:09:20 +08:00
|
|
|
fuse_put_request(fc, req);
|
2013-09-05 17:44:43 +08:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
2006-11-26 03:09:20 +08:00
|
|
|
}
|
|
|
|
|
2007-11-29 08:21:59 +08:00
|
|
|
attr_version = fuse_get_attr_version(fc);
|
fuse: fix race between getattr and write
Getattr and lookup operations can be running in parallel to attribute changing
operations, such as write and setattr.
This means, that if for example getattr was slower than a write, the cached
size attribute could be set to a stale value.
To prevent this race, introduce a per-filesystem attribute version counter.
This counter is incremented whenever cached attributes are modified, and the
incremented value stored in the inode.
Before storing new attributes in the cache, getattr and lookup check, using
the version number, whether the attributes have been modified during the
request's lifetime. If so, the returned attributes are not cached, because
they might be stale.
Thanks to Jakub Bogusz for the bug report and test program.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Jakub Bogusz <jakub.bogusz@gemius.pl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-18 18:06:58 +08:00
|
|
|
|
2006-10-17 15:10:12 +08:00
|
|
|
parent = dget_parent(entry);
|
2008-07-25 16:49:01 +08:00
|
|
|
fuse_lookup_init(fc, req, get_node_id(parent->d_inode),
|
|
|
|
&entry->d_name, &outarg);
|
2008-11-26 19:03:55 +08:00
|
|
|
fuse_request_send(fc, req);
|
2006-10-17 15:10:12 +08:00
|
|
|
dput(parent);
|
2005-09-10 04:10:28 +08:00
|
|
|
err = req->out.h.error;
|
2006-11-26 03:09:20 +08:00
|
|
|
fuse_put_request(fc, req);
|
2006-03-01 08:59:03 +08:00
|
|
|
/* Zero nodeid is same as -ENOENT */
|
|
|
|
if (!err && !outarg.nodeid)
|
|
|
|
err = -ENOENT;
|
2005-09-10 04:10:29 +08:00
|
|
|
if (!err) {
|
2013-10-01 22:41:22 +08:00
|
|
|
fi = get_fuse_inode(inode);
|
2005-09-10 04:10:29 +08:00
|
|
|
if (outarg.nodeid != get_node_id(inode)) {
|
2010-12-08 03:16:56 +08:00
|
|
|
fuse_queue_forget(fc, forget, outarg.nodeid, 1);
|
2013-09-05 17:44:43 +08:00
|
|
|
goto invalid;
|
2005-09-10 04:10:29 +08:00
|
|
|
}
|
2006-10-17 15:10:08 +08:00
|
|
|
spin_lock(&fc->lock);
|
2008-11-26 19:03:54 +08:00
|
|
|
fi->nlookup++;
|
2006-10-17 15:10:08 +08:00
|
|
|
spin_unlock(&fc->lock);
|
2005-09-10 04:10:29 +08:00
|
|
|
}
|
2010-12-08 03:16:56 +08:00
|
|
|
kfree(forget);
|
2005-09-10 04:10:29 +08:00
|
|
|
if (err || (outarg.attr.mode ^ inode->i_mode) & S_IFMT)
|
2013-09-05 17:44:43 +08:00
|
|
|
goto invalid;
|
2005-09-10 04:10:28 +08:00
|
|
|
|
fuse: fix race between getattr and write
Getattr and lookup operations can be running in parallel to attribute changing
operations, such as write and setattr.
This means, that if for example getattr was slower than a write, the cached
size attribute could be set to a stale value.
To prevent this race, introduce a per-filesystem attribute version counter.
This counter is incremented whenever cached attributes are modified, and the
incremented value stored in the inode.
Before storing new attributes in the cache, getattr and lookup check, using
the version number, whether the attributes have been modified during the
request's lifetime. If so, the returned attributes are not cached, because
they might be stale.
Thanks to Jakub Bogusz for the bug report and test program.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Jakub Bogusz <jakub.bogusz@gemius.pl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-18 18:06:58 +08:00
|
|
|
fuse_change_attributes(inode, &outarg.attr,
|
|
|
|
entry_attr_timeout(&outarg),
|
|
|
|
attr_version);
|
|
|
|
fuse_change_entry_timeout(entry, &outarg);
|
2013-06-03 20:40:22 +08:00
|
|
|
} else if (inode) {
|
2013-10-01 22:41:22 +08:00
|
|
|
fi = get_fuse_inode(inode);
|
|
|
|
if (flags & LOOKUP_RCU) {
|
|
|
|
if (test_bit(FUSE_I_INIT_RDPLUS, &fi->state))
|
|
|
|
return -ECHILD;
|
|
|
|
} else if (test_and_clear_bit(FUSE_I_INIT_RDPLUS, &fi->state)) {
|
2013-06-03 20:40:22 +08:00
|
|
|
parent = dget_parent(entry);
|
|
|
|
fuse_advise_use_readdirplus(parent->d_inode);
|
|
|
|
dput(parent);
|
|
|
|
}
|
2005-09-10 04:10:28 +08:00
|
|
|
}
|
2013-09-05 17:44:43 +08:00
|
|
|
ret = 1;
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
invalid:
|
|
|
|
ret = 0;
|
2013-10-01 22:41:22 +08:00
|
|
|
|
|
|
|
if (!(flags & LOOKUP_RCU) && check_submounts_and_drop(entry) != 0)
|
2013-09-05 17:44:44 +08:00
|
|
|
ret = 1;
|
2013-09-05 17:44:43 +08:00
|
|
|
goto out;
|
2005-09-10 04:10:28 +08:00
|
|
|
}
|
|
|
|
|
2006-01-17 14:14:28 +08:00
|
|
|
static int invalid_nodeid(u64 nodeid)
|
2005-11-29 05:44:16 +08:00
|
|
|
{
|
|
|
|
return !nodeid || nodeid == FUSE_ROOT_ID;
|
|
|
|
}
|
|
|
|
|
2009-02-20 13:59:13 +08:00
|
|
|
const struct dentry_operations fuse_dentry_operations = {
|
2005-09-10 04:10:28 +08:00
|
|
|
.d_revalidate = fuse_dentry_revalidate,
|
|
|
|
};
|
|
|
|
|
2007-04-09 07:04:00 +08:00
|
|
|
int fuse_valid_type(int m)
|
2006-01-06 16:19:43 +08:00
|
|
|
{
|
|
|
|
return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) ||
|
|
|
|
S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m);
|
|
|
|
}
|
|
|
|
|
2008-07-25 16:49:01 +08:00
|
|
|
int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name,
|
|
|
|
struct fuse_entry_out *outarg, struct inode **inode)
|
2005-09-10 04:10:28 +08:00
|
|
|
{
|
2008-07-25 16:49:01 +08:00
|
|
|
struct fuse_conn *fc = get_fuse_conn_super(sb);
|
2005-09-10 04:10:28 +08:00
|
|
|
struct fuse_req *req;
|
2010-12-08 03:16:56 +08:00
|
|
|
struct fuse_forget_link *forget;
|
fuse: fix race between getattr and write
Getattr and lookup operations can be running in parallel to attribute changing
operations, such as write and setattr.
This means, that if for example getattr was slower than a write, the cached
size attribute could be set to a stale value.
To prevent this race, introduce a per-filesystem attribute version counter.
This counter is incremented whenever cached attributes are modified, and the
incremented value stored in the inode.
Before storing new attributes in the cache, getattr and lookup check, using
the version number, whether the attributes have been modified during the
request's lifetime. If so, the returned attributes are not cached, because
they might be stale.
Thanks to Jakub Bogusz for the bug report and test program.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Jakub Bogusz <jakub.bogusz@gemius.pl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-18 18:06:58 +08:00
|
|
|
u64 attr_version;
|
2008-07-25 16:49:01 +08:00
|
|
|
int err;
|
2005-09-10 04:10:28 +08:00
|
|
|
|
2008-07-25 16:49:01 +08:00
|
|
|
*inode = NULL;
|
|
|
|
err = -ENAMETOOLONG;
|
|
|
|
if (name->len > FUSE_NAME_MAX)
|
|
|
|
goto out;
|
2005-09-10 04:10:28 +08:00
|
|
|
|
2012-10-26 23:48:30 +08:00
|
|
|
req = fuse_get_req_nopages(fc);
|
2008-07-25 16:49:01 +08:00
|
|
|
err = PTR_ERR(req);
|
2006-04-11 13:54:58 +08:00
|
|
|
if (IS_ERR(req))
|
2008-07-25 16:49:01 +08:00
|
|
|
goto out;
|
2005-09-10 04:10:28 +08:00
|
|
|
|
2010-12-08 03:16:56 +08:00
|
|
|
forget = fuse_alloc_forget();
|
|
|
|
err = -ENOMEM;
|
|
|
|
if (!forget) {
|
2006-11-26 03:09:20 +08:00
|
|
|
fuse_put_request(fc, req);
|
2008-07-25 16:49:01 +08:00
|
|
|
goto out;
|
2006-11-26 03:09:20 +08:00
|
|
|
}
|
|
|
|
|
2007-11-29 08:21:59 +08:00
|
|
|
attr_version = fuse_get_attr_version(fc);
|
fuse: fix race between getattr and write
Getattr and lookup operations can be running in parallel to attribute changing
operations, such as write and setattr.
This means, that if for example getattr was slower than a write, the cached
size attribute could be set to a stale value.
To prevent this race, introduce a per-filesystem attribute version counter.
This counter is incremented whenever cached attributes are modified, and the
incremented value stored in the inode.
Before storing new attributes in the cache, getattr and lookup check, using
the version number, whether the attributes have been modified during the
request's lifetime. If so, the returned attributes are not cached, because
they might be stale.
Thanks to Jakub Bogusz for the bug report and test program.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Jakub Bogusz <jakub.bogusz@gemius.pl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-18 18:06:58 +08:00
|
|
|
|
2008-07-25 16:49:01 +08:00
|
|
|
fuse_lookup_init(fc, req, nodeid, name, outarg);
|
2008-11-26 19:03:55 +08:00
|
|
|
fuse_request_send(fc, req);
|
2005-09-10 04:10:28 +08:00
|
|
|
err = req->out.h.error;
|
2006-11-26 03:09:20 +08:00
|
|
|
fuse_put_request(fc, req);
|
2006-03-01 08:59:03 +08:00
|
|
|
/* Zero nodeid is same as -ENOENT, but with valid timeout */
|
2008-07-25 16:49:01 +08:00
|
|
|
if (err || !outarg->nodeid)
|
|
|
|
goto out_put_forget;
|
|
|
|
|
|
|
|
err = -EIO;
|
|
|
|
if (!outarg->nodeid)
|
|
|
|
goto out_put_forget;
|
|
|
|
if (!fuse_valid_type(outarg->attr.mode))
|
|
|
|
goto out_put_forget;
|
|
|
|
|
|
|
|
*inode = fuse_iget(sb, outarg->nodeid, outarg->generation,
|
|
|
|
&outarg->attr, entry_attr_timeout(outarg),
|
|
|
|
attr_version);
|
|
|
|
err = -ENOMEM;
|
|
|
|
if (!*inode) {
|
2010-12-08 03:16:56 +08:00
|
|
|
fuse_queue_forget(fc, forget, outarg->nodeid, 1);
|
2008-07-25 16:49:01 +08:00
|
|
|
goto out;
|
2005-09-10 04:10:28 +08:00
|
|
|
}
|
2008-07-25 16:49:01 +08:00
|
|
|
err = 0;
|
|
|
|
|
|
|
|
out_put_forget:
|
2010-12-08 03:16:56 +08:00
|
|
|
kfree(forget);
|
2008-07-25 16:49:01 +08:00
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
|
2012-06-11 05:13:09 +08:00
|
|
|
unsigned int flags)
|
2008-07-25 16:49:01 +08:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct fuse_entry_out outarg;
|
|
|
|
struct inode *inode;
|
|
|
|
struct dentry *newent;
|
|
|
|
bool outarg_valid = true;
|
|
|
|
|
|
|
|
err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
|
|
|
|
&outarg, &inode);
|
|
|
|
if (err == -ENOENT) {
|
|
|
|
outarg_valid = false;
|
|
|
|
err = 0;
|
|
|
|
}
|
|
|
|
if (err)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
err = -EIO;
|
|
|
|
if (inode && get_node_id(inode) == FUSE_ROOT_ID)
|
|
|
|
goto out_iput;
|
2005-09-10 04:10:28 +08:00
|
|
|
|
2013-10-01 22:44:54 +08:00
|
|
|
newent = d_materialise_unique(entry, inode);
|
2013-09-05 17:44:42 +08:00
|
|
|
err = PTR_ERR(newent);
|
|
|
|
if (IS_ERR(newent))
|
|
|
|
goto out_err;
|
2006-10-17 15:10:11 +08:00
|
|
|
|
2008-07-25 16:48:59 +08:00
|
|
|
entry = newent ? newent : entry;
|
2008-07-25 16:49:01 +08:00
|
|
|
if (outarg_valid)
|
fuse: fix race between getattr and write
Getattr and lookup operations can be running in parallel to attribute changing
operations, such as write and setattr.
This means, that if for example getattr was slower than a write, the cached
size attribute could be set to a stale value.
To prevent this race, introduce a per-filesystem attribute version counter.
This counter is incremented whenever cached attributes are modified, and the
incremented value stored in the inode.
Before storing new attributes in the cache, getattr and lookup check, using
the version number, whether the attributes have been modified during the
request's lifetime. If so, the returned attributes are not cached, because
they might be stale.
Thanks to Jakub Bogusz for the bug report and test program.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Jakub Bogusz <jakub.bogusz@gemius.pl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-18 18:06:58 +08:00
|
|
|
fuse_change_entry_timeout(entry, &outarg);
|
2006-01-06 16:19:38 +08:00
|
|
|
else
|
|
|
|
fuse_invalidate_entry_cache(entry);
|
2008-07-25 16:49:01 +08:00
|
|
|
|
2013-01-15 11:23:28 +08:00
|
|
|
fuse_advise_use_readdirplus(dir);
|
2008-07-25 16:48:59 +08:00
|
|
|
return newent;
|
2008-07-25 16:49:01 +08:00
|
|
|
|
|
|
|
out_iput:
|
|
|
|
iput(inode);
|
|
|
|
out_err:
|
|
|
|
return ERR_PTR(err);
|
2005-09-10 04:10:28 +08:00
|
|
|
}
|
|
|
|
|
2006-01-06 16:19:39 +08:00
|
|
|
/*
|
|
|
|
* Atomic create+open operation
|
|
|
|
*
|
|
|
|
* If the filesystem doesn't support this, then fall back to separate
|
|
|
|
* 'mknod' + 'open' requests.
|
|
|
|
*/
|
2012-06-22 16:39:14 +08:00
|
|
|
static int fuse_create_open(struct inode *dir, struct dentry *entry,
|
2012-06-22 16:40:19 +08:00
|
|
|
struct file *file, unsigned flags,
|
2012-06-22 16:39:14 +08:00
|
|
|
umode_t mode, int *opened)
|
2005-11-07 16:59:51 +08:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct inode *inode;
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(dir);
|
|
|
|
struct fuse_req *req;
|
2010-12-08 03:16:56 +08:00
|
|
|
struct fuse_forget_link *forget;
|
2009-07-01 02:12:23 +08:00
|
|
|
struct fuse_create_in inarg;
|
2005-11-07 16:59:51 +08:00
|
|
|
struct fuse_open_out outopen;
|
|
|
|
struct fuse_entry_out outentry;
|
|
|
|
struct fuse_file *ff;
|
|
|
|
|
2012-08-15 19:01:24 +08:00
|
|
|
/* Userspace expects S_IFREG in create mode */
|
|
|
|
BUG_ON((mode & S_IFMT) != S_IFREG);
|
|
|
|
|
2010-12-08 03:16:56 +08:00
|
|
|
forget = fuse_alloc_forget();
|
2012-06-05 21:10:22 +08:00
|
|
|
err = -ENOMEM;
|
2010-12-08 03:16:56 +08:00
|
|
|
if (!forget)
|
2012-06-05 21:10:22 +08:00
|
|
|
goto out_err;
|
2006-06-25 20:48:50 +08:00
|
|
|
|
2012-10-26 23:48:30 +08:00
|
|
|
req = fuse_get_req_nopages(fc);
|
2006-06-25 20:48:50 +08:00
|
|
|
err = PTR_ERR(req);
|
2006-04-11 13:54:58 +08:00
|
|
|
if (IS_ERR(req))
|
2006-06-25 20:48:50 +08:00
|
|
|
goto out_put_forget_req;
|
2005-11-07 16:59:51 +08:00
|
|
|
|
2006-04-11 13:54:58 +08:00
|
|
|
err = -ENOMEM;
|
2008-11-26 19:03:55 +08:00
|
|
|
ff = fuse_file_alloc(fc);
|
2005-11-07 16:59:51 +08:00
|
|
|
if (!ff)
|
|
|
|
goto out_put_request;
|
|
|
|
|
2009-07-01 02:12:23 +08:00
|
|
|
if (!fc->dont_mask)
|
|
|
|
mode &= ~current_umask();
|
|
|
|
|
2005-11-07 16:59:51 +08:00
|
|
|
flags &= ~O_NOCTTY;
|
|
|
|
memset(&inarg, 0, sizeof(inarg));
|
2007-10-18 18:07:05 +08:00
|
|
|
memset(&outentry, 0, sizeof(outentry));
|
2005-11-07 16:59:51 +08:00
|
|
|
inarg.flags = flags;
|
|
|
|
inarg.mode = mode;
|
2009-07-01 02:12:23 +08:00
|
|
|
inarg.umask = current_umask();
|
2005-11-07 16:59:51 +08:00
|
|
|
req->in.h.opcode = FUSE_CREATE;
|
|
|
|
req->in.h.nodeid = get_node_id(dir);
|
|
|
|
req->in.numargs = 2;
|
2009-07-01 02:12:23 +08:00
|
|
|
req->in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) :
|
|
|
|
sizeof(inarg);
|
2005-11-07 16:59:51 +08:00
|
|
|
req->in.args[0].value = &inarg;
|
|
|
|
req->in.args[1].size = entry->d_name.len + 1;
|
|
|
|
req->in.args[1].value = entry->d_name.name;
|
|
|
|
req->out.numargs = 2;
|
2007-10-18 18:07:05 +08:00
|
|
|
if (fc->minor < 9)
|
|
|
|
req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
|
|
|
|
else
|
|
|
|
req->out.args[0].size = sizeof(outentry);
|
2005-11-07 16:59:51 +08:00
|
|
|
req->out.args[0].value = &outentry;
|
|
|
|
req->out.args[1].size = sizeof(outopen);
|
|
|
|
req->out.args[1].value = &outopen;
|
2008-11-26 19:03:55 +08:00
|
|
|
fuse_request_send(fc, req);
|
2005-11-07 16:59:51 +08:00
|
|
|
err = req->out.h.error;
|
2012-06-05 21:10:22 +08:00
|
|
|
if (err)
|
2005-11-07 16:59:51 +08:00
|
|
|
goto out_free_ff;
|
|
|
|
|
|
|
|
err = -EIO;
|
2005-11-29 05:44:16 +08:00
|
|
|
if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid))
|
2005-11-07 16:59:51 +08:00
|
|
|
goto out_free_ff;
|
|
|
|
|
2006-06-25 20:48:50 +08:00
|
|
|
fuse_put_request(fc, req);
|
2009-04-28 22:56:37 +08:00
|
|
|
ff->fh = outopen.fh;
|
|
|
|
ff->nodeid = outentry.nodeid;
|
|
|
|
ff->open_flags = outopen.open_flags;
|
2005-11-07 16:59:51 +08:00
|
|
|
inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
|
fuse: fix race between getattr and write
Getattr and lookup operations can be running in parallel to attribute changing
operations, such as write and setattr.
This means, that if for example getattr was slower than a write, the cached
size attribute could be set to a stale value.
To prevent this race, introduce a per-filesystem attribute version counter.
This counter is incremented whenever cached attributes are modified, and the
incremented value stored in the inode.
Before storing new attributes in the cache, getattr and lookup check, using
the version number, whether the attributes have been modified during the
request's lifetime. If so, the returned attributes are not cached, because
they might be stale.
Thanks to Jakub Bogusz for the bug report and test program.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Jakub Bogusz <jakub.bogusz@gemius.pl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-18 18:06:58 +08:00
|
|
|
&outentry.attr, entry_attr_timeout(&outentry), 0);
|
2005-11-07 16:59:51 +08:00
|
|
|
if (!inode) {
|
|
|
|
flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
|
2009-04-28 22:56:39 +08:00
|
|
|
fuse_sync_release(ff, flags);
|
2010-12-08 03:16:56 +08:00
|
|
|
fuse_queue_forget(fc, forget, outentry.nodeid, 1);
|
2012-06-05 21:10:22 +08:00
|
|
|
err = -ENOMEM;
|
|
|
|
goto out_err;
|
2005-11-07 16:59:51 +08:00
|
|
|
}
|
2010-12-08 03:16:56 +08:00
|
|
|
kfree(forget);
|
2005-11-07 16:59:51 +08:00
|
|
|
d_instantiate(entry, inode);
|
fuse: fix race between getattr and write
Getattr and lookup operations can be running in parallel to attribute changing
operations, such as write and setattr.
This means, that if for example getattr was slower than a write, the cached
size attribute could be set to a stale value.
To prevent this race, introduce a per-filesystem attribute version counter.
This counter is incremented whenever cached attributes are modified, and the
incremented value stored in the inode.
Before storing new attributes in the cache, getattr and lookup check, using
the version number, whether the attributes have been modified during the
request's lifetime. If so, the returned attributes are not cached, because
they might be stale.
Thanks to Jakub Bogusz for the bug report and test program.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Jakub Bogusz <jakub.bogusz@gemius.pl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-18 18:06:58 +08:00
|
|
|
fuse_change_entry_timeout(entry, &outentry);
|
2008-02-06 17:38:38 +08:00
|
|
|
fuse_invalidate_attr(dir);
|
2012-06-22 16:40:19 +08:00
|
|
|
err = finish_open(file, entry, generic_file_open, opened);
|
|
|
|
if (err) {
|
2009-04-28 22:56:39 +08:00
|
|
|
fuse_sync_release(ff, flags);
|
2012-06-05 21:10:22 +08:00
|
|
|
} else {
|
|
|
|
file->private_data = fuse_file_get(ff);
|
|
|
|
fuse_finish_open(inode, file);
|
2005-11-07 16:59:51 +08:00
|
|
|
}
|
2012-06-22 16:39:14 +08:00
|
|
|
return err;
|
2005-11-07 16:59:51 +08:00
|
|
|
|
2012-06-05 21:10:22 +08:00
|
|
|
out_free_ff:
|
2005-11-07 16:59:51 +08:00
|
|
|
fuse_file_free(ff);
|
2012-06-05 21:10:22 +08:00
|
|
|
out_put_request:
|
2005-11-07 16:59:51 +08:00
|
|
|
fuse_put_request(fc, req);
|
2012-06-05 21:10:22 +08:00
|
|
|
out_put_forget_req:
|
2010-12-08 03:16:56 +08:00
|
|
|
kfree(forget);
|
2012-06-05 21:10:22 +08:00
|
|
|
out_err:
|
2012-06-22 16:39:14 +08:00
|
|
|
return err;
|
2012-06-05 21:10:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int fuse_mknod(struct inode *, struct dentry *, umode_t, dev_t);
|
2012-06-22 16:39:14 +08:00
|
|
|
static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
|
2012-06-22 16:40:19 +08:00
|
|
|
struct file *file, unsigned flags,
|
2012-06-22 16:39:14 +08:00
|
|
|
umode_t mode, int *opened)
|
2012-06-05 21:10:22 +08:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(dir);
|
|
|
|
struct dentry *res = NULL;
|
|
|
|
|
|
|
|
if (d_unhashed(entry)) {
|
2012-06-11 05:13:09 +08:00
|
|
|
res = fuse_lookup(dir, entry, 0);
|
2012-06-05 21:10:22 +08:00
|
|
|
if (IS_ERR(res))
|
2012-06-22 16:39:14 +08:00
|
|
|
return PTR_ERR(res);
|
2012-06-05 21:10:22 +08:00
|
|
|
|
|
|
|
if (res)
|
|
|
|
entry = res;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(flags & O_CREAT) || entry->d_inode)
|
|
|
|
goto no_open;
|
|
|
|
|
|
|
|
/* Only creates */
|
2012-06-10 17:01:45 +08:00
|
|
|
*opened |= FILE_CREATED;
|
2012-06-05 21:10:22 +08:00
|
|
|
|
|
|
|
if (fc->no_create)
|
|
|
|
goto mknod;
|
|
|
|
|
2012-06-22 16:40:19 +08:00
|
|
|
err = fuse_create_open(dir, entry, file, flags, mode, opened);
|
2012-06-22 16:39:14 +08:00
|
|
|
if (err == -ENOSYS) {
|
2012-06-05 21:10:22 +08:00
|
|
|
fc->no_create = 1;
|
|
|
|
goto mknod;
|
|
|
|
}
|
|
|
|
out_dput:
|
|
|
|
dput(res);
|
2012-06-22 16:39:14 +08:00
|
|
|
return err;
|
2012-06-05 21:10:22 +08:00
|
|
|
|
|
|
|
mknod:
|
|
|
|
err = fuse_mknod(dir, entry, mode, 0);
|
2012-06-22 16:39:14 +08:00
|
|
|
if (err)
|
2012-06-05 21:10:22 +08:00
|
|
|
goto out_dput;
|
|
|
|
no_open:
|
2012-06-10 18:48:09 +08:00
|
|
|
return finish_no_open(file, res);
|
2005-11-07 16:59:51 +08:00
|
|
|
}
|
|
|
|
|
2006-01-06 16:19:39 +08:00
|
|
|
/*
|
|
|
|
* Code shared between mknod, mkdir, symlink and link
|
|
|
|
*/
|
2005-09-10 04:10:29 +08:00
|
|
|
static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
|
|
|
|
struct inode *dir, struct dentry *entry,
|
2011-07-26 15:17:33 +08:00
|
|
|
umode_t mode)
|
2005-09-10 04:10:29 +08:00
|
|
|
{
|
|
|
|
struct fuse_entry_out outarg;
|
|
|
|
struct inode *inode;
|
|
|
|
int err;
|
2010-12-08 03:16:56 +08:00
|
|
|
struct fuse_forget_link *forget;
|
2006-11-26 03:09:20 +08:00
|
|
|
|
2010-12-08 03:16:56 +08:00
|
|
|
forget = fuse_alloc_forget();
|
|
|
|
if (!forget) {
|
2006-11-26 03:09:20 +08:00
|
|
|
fuse_put_request(fc, req);
|
2010-12-08 03:16:56 +08:00
|
|
|
return -ENOMEM;
|
2006-11-26 03:09:20 +08:00
|
|
|
}
|
2005-09-10 04:10:29 +08:00
|
|
|
|
2007-10-18 18:07:05 +08:00
|
|
|
memset(&outarg, 0, sizeof(outarg));
|
2005-09-10 04:10:29 +08:00
|
|
|
req->in.h.nodeid = get_node_id(dir);
|
|
|
|
req->out.numargs = 1;
|
2007-10-18 18:07:05 +08:00
|
|
|
if (fc->minor < 9)
|
|
|
|
req->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
|
|
|
|
else
|
|
|
|
req->out.args[0].size = sizeof(outarg);
|
2005-09-10 04:10:29 +08:00
|
|
|
req->out.args[0].value = &outarg;
|
2008-11-26 19:03:55 +08:00
|
|
|
fuse_request_send(fc, req);
|
2005-09-10 04:10:29 +08:00
|
|
|
err = req->out.h.error;
|
2006-11-26 03:09:20 +08:00
|
|
|
fuse_put_request(fc, req);
|
|
|
|
if (err)
|
|
|
|
goto out_put_forget_req;
|
|
|
|
|
2006-01-06 16:19:43 +08:00
|
|
|
err = -EIO;
|
|
|
|
if (invalid_nodeid(outarg.nodeid))
|
2006-11-26 03:09:20 +08:00
|
|
|
goto out_put_forget_req;
|
2006-01-06 16:19:43 +08:00
|
|
|
|
|
|
|
if ((outarg.attr.mode ^ mode) & S_IFMT)
|
2006-11-26 03:09:20 +08:00
|
|
|
goto out_put_forget_req;
|
2006-01-06 16:19:43 +08:00
|
|
|
|
2005-09-10 04:10:29 +08:00
|
|
|
inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
|
fuse: fix race between getattr and write
Getattr and lookup operations can be running in parallel to attribute changing
operations, such as write and setattr.
This means, that if for example getattr was slower than a write, the cached
size attribute could be set to a stale value.
To prevent this race, introduce a per-filesystem attribute version counter.
This counter is incremented whenever cached attributes are modified, and the
incremented value stored in the inode.
Before storing new attributes in the cache, getattr and lookup check, using
the version number, whether the attributes have been modified during the
request's lifetime. If so, the returned attributes are not cached, because
they might be stale.
Thanks to Jakub Bogusz for the bug report and test program.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Jakub Bogusz <jakub.bogusz@gemius.pl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-18 18:06:58 +08:00
|
|
|
&outarg.attr, entry_attr_timeout(&outarg), 0);
|
2005-09-10 04:10:29 +08:00
|
|
|
if (!inode) {
|
2010-12-08 03:16:56 +08:00
|
|
|
fuse_queue_forget(fc, forget, outarg.nodeid, 1);
|
2005-09-10 04:10:29 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2010-12-08 03:16:56 +08:00
|
|
|
kfree(forget);
|
2005-09-10 04:10:29 +08:00
|
|
|
|
2013-10-01 22:44:54 +08:00
|
|
|
err = d_instantiate_no_diralias(entry, inode);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2005-09-10 04:10:29 +08:00
|
|
|
|
fuse: fix race between getattr and write
Getattr and lookup operations can be running in parallel to attribute changing
operations, such as write and setattr.
This means, that if for example getattr was slower than a write, the cached
size attribute could be set to a stale value.
To prevent this race, introduce a per-filesystem attribute version counter.
This counter is incremented whenever cached attributes are modified, and the
incremented value stored in the inode.
Before storing new attributes in the cache, getattr and lookup check, using
the version number, whether the attributes have been modified during the
request's lifetime. If so, the returned attributes are not cached, because
they might be stale.
Thanks to Jakub Bogusz for the bug report and test program.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Jakub Bogusz <jakub.bogusz@gemius.pl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-18 18:06:58 +08:00
|
|
|
fuse_change_entry_timeout(entry, &outarg);
|
2005-09-10 04:10:29 +08:00
|
|
|
fuse_invalidate_attr(dir);
|
|
|
|
return 0;
|
2006-01-06 16:19:43 +08:00
|
|
|
|
2006-11-26 03:09:20 +08:00
|
|
|
out_put_forget_req:
|
2010-12-08 03:16:56 +08:00
|
|
|
kfree(forget);
|
2006-01-06 16:19:43 +08:00
|
|
|
return err;
|
2005-09-10 04:10:29 +08:00
|
|
|
}
|
|
|
|
|
2011-07-26 13:52:52 +08:00
|
|
|
static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode,
|
2005-09-10 04:10:29 +08:00
|
|
|
dev_t rdev)
|
|
|
|
{
|
|
|
|
struct fuse_mknod_in inarg;
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(dir);
|
2012-10-26 23:48:30 +08:00
|
|
|
struct fuse_req *req = fuse_get_req_nopages(fc);
|
2006-04-11 13:54:58 +08:00
|
|
|
if (IS_ERR(req))
|
|
|
|
return PTR_ERR(req);
|
2005-09-10 04:10:29 +08:00
|
|
|
|
2009-07-01 02:12:23 +08:00
|
|
|
if (!fc->dont_mask)
|
|
|
|
mode &= ~current_umask();
|
|
|
|
|
2005-09-10 04:10:29 +08:00
|
|
|
memset(&inarg, 0, sizeof(inarg));
|
|
|
|
inarg.mode = mode;
|
|
|
|
inarg.rdev = new_encode_dev(rdev);
|
2009-07-01 02:12:23 +08:00
|
|
|
inarg.umask = current_umask();
|
2005-09-10 04:10:29 +08:00
|
|
|
req->in.h.opcode = FUSE_MKNOD;
|
|
|
|
req->in.numargs = 2;
|
2009-07-01 02:12:23 +08:00
|
|
|
req->in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE :
|
|
|
|
sizeof(inarg);
|
2005-09-10 04:10:29 +08:00
|
|
|
req->in.args[0].value = &inarg;
|
|
|
|
req->in.args[1].size = entry->d_name.len + 1;
|
|
|
|
req->in.args[1].value = entry->d_name.name;
|
|
|
|
return create_new_entry(fc, req, dir, entry, mode);
|
|
|
|
}
|
|
|
|
|
2011-07-26 13:42:34 +08:00
|
|
|
static int fuse_create(struct inode *dir, struct dentry *entry, umode_t mode,
|
2012-06-11 06:05:36 +08:00
|
|
|
bool excl)
|
2005-09-10 04:10:29 +08:00
|
|
|
{
|
|
|
|
return fuse_mknod(dir, entry, mode, 0);
|
|
|
|
}
|
|
|
|
|
2011-07-26 13:41:39 +08:00
|
|
|
static int fuse_mkdir(struct inode *dir, struct dentry *entry, umode_t mode)
|
2005-09-10 04:10:29 +08:00
|
|
|
{
|
|
|
|
struct fuse_mkdir_in inarg;
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(dir);
|
2012-10-26 23:48:30 +08:00
|
|
|
struct fuse_req *req = fuse_get_req_nopages(fc);
|
2006-04-11 13:54:58 +08:00
|
|
|
if (IS_ERR(req))
|
|
|
|
return PTR_ERR(req);
|
2005-09-10 04:10:29 +08:00
|
|
|
|
2009-07-01 02:12:23 +08:00
|
|
|
if (!fc->dont_mask)
|
|
|
|
mode &= ~current_umask();
|
|
|
|
|
2005-09-10 04:10:29 +08:00
|
|
|
memset(&inarg, 0, sizeof(inarg));
|
|
|
|
inarg.mode = mode;
|
2009-07-01 02:12:23 +08:00
|
|
|
inarg.umask = current_umask();
|
2005-09-10 04:10:29 +08:00
|
|
|
req->in.h.opcode = FUSE_MKDIR;
|
|
|
|
req->in.numargs = 2;
|
|
|
|
req->in.args[0].size = sizeof(inarg);
|
|
|
|
req->in.args[0].value = &inarg;
|
|
|
|
req->in.args[1].size = entry->d_name.len + 1;
|
|
|
|
req->in.args[1].value = entry->d_name.name;
|
|
|
|
return create_new_entry(fc, req, dir, entry, S_IFDIR);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fuse_symlink(struct inode *dir, struct dentry *entry,
|
|
|
|
const char *link)
|
|
|
|
{
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(dir);
|
|
|
|
unsigned len = strlen(link) + 1;
|
2012-10-26 23:48:30 +08:00
|
|
|
struct fuse_req *req = fuse_get_req_nopages(fc);
|
2006-04-11 13:54:58 +08:00
|
|
|
if (IS_ERR(req))
|
|
|
|
return PTR_ERR(req);
|
2005-09-10 04:10:29 +08:00
|
|
|
|
|
|
|
req->in.h.opcode = FUSE_SYMLINK;
|
|
|
|
req->in.numargs = 2;
|
|
|
|
req->in.args[0].size = entry->d_name.len + 1;
|
|
|
|
req->in.args[0].value = entry->d_name.name;
|
|
|
|
req->in.args[1].size = len;
|
|
|
|
req->in.args[1].value = link;
|
|
|
|
return create_new_entry(fc, req, dir, entry, S_IFLNK);
|
|
|
|
}
|
|
|
|
|
2014-04-28 20:19:24 +08:00
|
|
|
static inline void fuse_update_ctime(struct inode *inode)
|
|
|
|
{
|
|
|
|
if (!IS_NOCMTIME(inode)) {
|
|
|
|
inode->i_ctime = current_fs_time(inode->i_sb);
|
|
|
|
mark_inode_dirty_sync(inode);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-09-10 04:10:29 +08:00
|
|
|
static int fuse_unlink(struct inode *dir, struct dentry *entry)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(dir);
|
2012-10-26 23:48:30 +08:00
|
|
|
struct fuse_req *req = fuse_get_req_nopages(fc);
|
2006-04-11 13:54:58 +08:00
|
|
|
if (IS_ERR(req))
|
|
|
|
return PTR_ERR(req);
|
2005-09-10 04:10:29 +08:00
|
|
|
|
|
|
|
req->in.h.opcode = FUSE_UNLINK;
|
|
|
|
req->in.h.nodeid = get_node_id(dir);
|
|
|
|
req->in.numargs = 1;
|
|
|
|
req->in.args[0].size = entry->d_name.len + 1;
|
|
|
|
req->in.args[0].value = entry->d_name.name;
|
2008-11-26 19:03:55 +08:00
|
|
|
fuse_request_send(fc, req);
|
2005-09-10 04:10:29 +08:00
|
|
|
err = req->out.h.error;
|
|
|
|
fuse_put_request(fc, req);
|
|
|
|
if (!err) {
|
|
|
|
struct inode *inode = entry->d_inode;
|
2012-03-05 22:48:11 +08:00
|
|
|
struct fuse_inode *fi = get_fuse_inode(inode);
|
2005-09-10 04:10:29 +08:00
|
|
|
|
2012-03-05 22:48:11 +08:00
|
|
|
spin_lock(&fc->lock);
|
|
|
|
fi->attr_version = ++fc->attr_version;
|
2013-02-04 22:57:42 +08:00
|
|
|
/*
|
|
|
|
* If i_nlink == 0 then unlink doesn't make sense, yet this can
|
|
|
|
* happen if userspace filesystem is careless. It would be
|
|
|
|
* difficult to enforce correct nlink usage so just ignore this
|
|
|
|
* condition here
|
|
|
|
*/
|
|
|
|
if (inode->i_nlink > 0)
|
|
|
|
drop_nlink(inode);
|
2012-03-05 22:48:11 +08:00
|
|
|
spin_unlock(&fc->lock);
|
2005-09-10 04:10:29 +08:00
|
|
|
fuse_invalidate_attr(inode);
|
|
|
|
fuse_invalidate_attr(dir);
|
2006-01-06 16:19:38 +08:00
|
|
|
fuse_invalidate_entry_cache(entry);
|
2014-04-28 20:19:24 +08:00
|
|
|
fuse_update_ctime(inode);
|
2005-09-10 04:10:29 +08:00
|
|
|
} else if (err == -EINTR)
|
|
|
|
fuse_invalidate_entry(entry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fuse_rmdir(struct inode *dir, struct dentry *entry)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(dir);
|
2012-10-26 23:48:30 +08:00
|
|
|
struct fuse_req *req = fuse_get_req_nopages(fc);
|
2006-04-11 13:54:58 +08:00
|
|
|
if (IS_ERR(req))
|
|
|
|
return PTR_ERR(req);
|
2005-09-10 04:10:29 +08:00
|
|
|
|
|
|
|
req->in.h.opcode = FUSE_RMDIR;
|
|
|
|
req->in.h.nodeid = get_node_id(dir);
|
|
|
|
req->in.numargs = 1;
|
|
|
|
req->in.args[0].size = entry->d_name.len + 1;
|
|
|
|
req->in.args[0].value = entry->d_name.name;
|
2008-11-26 19:03:55 +08:00
|
|
|
fuse_request_send(fc, req);
|
2005-09-10 04:10:29 +08:00
|
|
|
err = req->out.h.error;
|
|
|
|
fuse_put_request(fc, req);
|
|
|
|
if (!err) {
|
2006-10-01 14:29:06 +08:00
|
|
|
clear_nlink(entry->d_inode);
|
2005-09-10 04:10:29 +08:00
|
|
|
fuse_invalidate_attr(dir);
|
2006-01-06 16:19:38 +08:00
|
|
|
fuse_invalidate_entry_cache(entry);
|
2005-09-10 04:10:29 +08:00
|
|
|
} else if (err == -EINTR)
|
|
|
|
fuse_invalidate_entry(entry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2014-04-28 22:43:44 +08:00
|
|
|
static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
|
|
|
|
struct inode *newdir, struct dentry *newent,
|
|
|
|
unsigned int flags, int opcode, size_t argsize)
|
2005-09-10 04:10:29 +08:00
|
|
|
{
|
|
|
|
int err;
|
2014-04-28 22:43:44 +08:00
|
|
|
struct fuse_rename2_in inarg;
|
2005-09-10 04:10:29 +08:00
|
|
|
struct fuse_conn *fc = get_fuse_conn(olddir);
|
2014-04-28 22:43:44 +08:00
|
|
|
struct fuse_req *req;
|
2011-05-25 04:06:07 +08:00
|
|
|
|
2014-04-28 22:43:44 +08:00
|
|
|
req = fuse_get_req_nopages(fc);
|
2006-04-11 13:54:58 +08:00
|
|
|
if (IS_ERR(req))
|
|
|
|
return PTR_ERR(req);
|
2005-09-10 04:10:29 +08:00
|
|
|
|
2014-04-28 22:43:44 +08:00
|
|
|
memset(&inarg, 0, argsize);
|
2005-09-10 04:10:29 +08:00
|
|
|
inarg.newdir = get_node_id(newdir);
|
2014-04-28 22:43:44 +08:00
|
|
|
inarg.flags = flags;
|
|
|
|
req->in.h.opcode = opcode;
|
2005-09-10 04:10:29 +08:00
|
|
|
req->in.h.nodeid = get_node_id(olddir);
|
|
|
|
req->in.numargs = 3;
|
2014-04-28 22:43:44 +08:00
|
|
|
req->in.args[0].size = argsize;
|
2005-09-10 04:10:29 +08:00
|
|
|
req->in.args[0].value = &inarg;
|
|
|
|
req->in.args[1].size = oldent->d_name.len + 1;
|
|
|
|
req->in.args[1].value = oldent->d_name.name;
|
|
|
|
req->in.args[2].size = newent->d_name.len + 1;
|
|
|
|
req->in.args[2].value = newent->d_name.name;
|
2008-11-26 19:03:55 +08:00
|
|
|
fuse_request_send(fc, req);
|
2005-09-10 04:10:29 +08:00
|
|
|
err = req->out.h.error;
|
|
|
|
fuse_put_request(fc, req);
|
|
|
|
if (!err) {
|
2007-11-29 08:22:03 +08:00
|
|
|
/* ctime changes */
|
|
|
|
fuse_invalidate_attr(oldent->d_inode);
|
2014-04-28 20:19:24 +08:00
|
|
|
fuse_update_ctime(oldent->d_inode);
|
2007-11-29 08:22:03 +08:00
|
|
|
|
2014-04-28 22:43:44 +08:00
|
|
|
if (flags & RENAME_EXCHANGE) {
|
|
|
|
fuse_invalidate_attr(newent->d_inode);
|
|
|
|
fuse_update_ctime(newent->d_inode);
|
|
|
|
}
|
|
|
|
|
2005-09-10 04:10:29 +08:00
|
|
|
fuse_invalidate_attr(olddir);
|
|
|
|
if (olddir != newdir)
|
|
|
|
fuse_invalidate_attr(newdir);
|
2006-01-06 16:19:38 +08:00
|
|
|
|
|
|
|
/* newent will end up negative */
|
2014-04-28 22:43:44 +08:00
|
|
|
if (!(flags & RENAME_EXCHANGE) && newent->d_inode) {
|
2009-11-04 17:24:52 +08:00
|
|
|
fuse_invalidate_attr(newent->d_inode);
|
2006-01-06 16:19:38 +08:00
|
|
|
fuse_invalidate_entry_cache(newent);
|
2014-04-28 20:19:24 +08:00
|
|
|
fuse_update_ctime(newent->d_inode);
|
2009-11-04 17:24:52 +08:00
|
|
|
}
|
2005-09-10 04:10:29 +08:00
|
|
|
} else if (err == -EINTR) {
|
|
|
|
/* If request was interrupted, DEITY only knows if the
|
|
|
|
rename actually took place. If the invalidation
|
|
|
|
fails (e.g. some process has CWD under the renamed
|
|
|
|
directory), then there can be inconsistency between
|
|
|
|
the dcache and the real filesystem. Tough luck. */
|
|
|
|
fuse_invalidate_entry(oldent);
|
|
|
|
if (newent->d_inode)
|
|
|
|
fuse_invalidate_entry(newent);
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2014-04-28 22:43:44 +08:00
|
|
|
static int fuse_rename(struct inode *olddir, struct dentry *oldent,
|
|
|
|
struct inode *newdir, struct dentry *newent)
|
|
|
|
{
|
|
|
|
return fuse_rename_common(olddir, oldent, newdir, newent, 0,
|
|
|
|
FUSE_RENAME, sizeof(struct fuse_rename_in));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
|
|
|
|
struct inode *newdir, struct dentry *newent,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(olddir);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (fc->no_rename2 || fc->minor < 23)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
|
|
|
|
FUSE_RENAME2, sizeof(struct fuse_rename2_in));
|
|
|
|
if (err == -ENOSYS) {
|
|
|
|
fc->no_rename2 = 1;
|
|
|
|
err = -EINVAL;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2005-09-10 04:10:29 +08:00
|
|
|
static int fuse_link(struct dentry *entry, struct inode *newdir,
|
|
|
|
struct dentry *newent)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct fuse_link_in inarg;
|
|
|
|
struct inode *inode = entry->d_inode;
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
2012-10-26 23:48:30 +08:00
|
|
|
struct fuse_req *req = fuse_get_req_nopages(fc);
|
2006-04-11 13:54:58 +08:00
|
|
|
if (IS_ERR(req))
|
|
|
|
return PTR_ERR(req);
|
2005-09-10 04:10:29 +08:00
|
|
|
|
|
|
|
memset(&inarg, 0, sizeof(inarg));
|
|
|
|
inarg.oldnodeid = get_node_id(inode);
|
|
|
|
req->in.h.opcode = FUSE_LINK;
|
|
|
|
req->in.numargs = 2;
|
|
|
|
req->in.args[0].size = sizeof(inarg);
|
|
|
|
req->in.args[0].value = &inarg;
|
|
|
|
req->in.args[1].size = newent->d_name.len + 1;
|
|
|
|
req->in.args[1].value = newent->d_name.name;
|
|
|
|
err = create_new_entry(fc, req, newdir, newent, inode->i_mode);
|
|
|
|
/* Contrary to "normal" filesystems it can happen that link
|
|
|
|
makes two "logical" inodes point to the same "physical"
|
|
|
|
inode. We invalidate the attributes of the old one, so it
|
|
|
|
will reflect changes in the backing inode (link count,
|
|
|
|
etc.)
|
|
|
|
*/
|
2012-03-05 22:48:11 +08:00
|
|
|
if (!err) {
|
|
|
|
struct fuse_inode *fi = get_fuse_inode(inode);
|
|
|
|
|
|
|
|
spin_lock(&fc->lock);
|
|
|
|
fi->attr_version = ++fc->attr_version;
|
|
|
|
inc_nlink(inode);
|
|
|
|
spin_unlock(&fc->lock);
|
2005-09-10 04:10:29 +08:00
|
|
|
fuse_invalidate_attr(inode);
|
2014-04-28 20:19:24 +08:00
|
|
|
fuse_update_ctime(inode);
|
2012-03-05 22:48:11 +08:00
|
|
|
} else if (err == -EINTR) {
|
|
|
|
fuse_invalidate_attr(inode);
|
|
|
|
}
|
2005-09-10 04:10:29 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
fuse: fix race between getattr and write
Getattr and lookup operations can be running in parallel to attribute changing
operations, such as write and setattr.
This means, that if for example getattr was slower than a write, the cached
size attribute could be set to a stale value.
To prevent this race, introduce a per-filesystem attribute version counter.
This counter is incremented whenever cached attributes are modified, and the
incremented value stored in the inode.
Before storing new attributes in the cache, getattr and lookup check, using
the version number, whether the attributes have been modified during the
request's lifetime. If so, the returned attributes are not cached, because
they might be stale.
Thanks to Jakub Bogusz for the bug report and test program.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Jakub Bogusz <jakub.bogusz@gemius.pl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-18 18:06:58 +08:00
|
|
|
static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
|
|
|
|
struct kstat *stat)
|
|
|
|
{
|
2012-05-10 23:49:38 +08:00
|
|
|
unsigned int blkbits;
|
2013-10-10 21:10:46 +08:00
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
|
|
|
|
|
|
|
/* see the comment in fuse_change_attributes() */
|
2013-12-26 23:51:11 +08:00
|
|
|
if (fc->writeback_cache && S_ISREG(inode->i_mode)) {
|
2013-10-10 21:10:46 +08:00
|
|
|
attr->size = i_size_read(inode);
|
2013-12-26 23:51:11 +08:00
|
|
|
attr->mtime = inode->i_mtime.tv_sec;
|
|
|
|
attr->mtimensec = inode->i_mtime.tv_nsec;
|
2014-04-28 20:19:24 +08:00
|
|
|
attr->ctime = inode->i_ctime.tv_sec;
|
|
|
|
attr->ctimensec = inode->i_ctime.tv_nsec;
|
2013-12-26 23:51:11 +08:00
|
|
|
}
|
2012-05-10 23:49:38 +08:00
|
|
|
|
fuse: fix race between getattr and write
Getattr and lookup operations can be running in parallel to attribute changing
operations, such as write and setattr.
This means, that if for example getattr was slower than a write, the cached
size attribute could be set to a stale value.
To prevent this race, introduce a per-filesystem attribute version counter.
This counter is incremented whenever cached attributes are modified, and the
incremented value stored in the inode.
Before storing new attributes in the cache, getattr and lookup check, using
the version number, whether the attributes have been modified during the
request's lifetime. If so, the returned attributes are not cached, because
they might be stale.
Thanks to Jakub Bogusz for the bug report and test program.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Jakub Bogusz <jakub.bogusz@gemius.pl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-18 18:06:58 +08:00
|
|
|
stat->dev = inode->i_sb->s_dev;
|
|
|
|
stat->ino = attr->ino;
|
|
|
|
stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
|
|
|
|
stat->nlink = attr->nlink;
|
2012-02-08 08:26:03 +08:00
|
|
|
stat->uid = make_kuid(&init_user_ns, attr->uid);
|
|
|
|
stat->gid = make_kgid(&init_user_ns, attr->gid);
|
fuse: fix race between getattr and write
Getattr and lookup operations can be running in parallel to attribute changing
operations, such as write and setattr.
This means, that if for example getattr was slower than a write, the cached
size attribute could be set to a stale value.
To prevent this race, introduce a per-filesystem attribute version counter.
This counter is incremented whenever cached attributes are modified, and the
incremented value stored in the inode.
Before storing new attributes in the cache, getattr and lookup check, using
the version number, whether the attributes have been modified during the
request's lifetime. If so, the returned attributes are not cached, because
they might be stale.
Thanks to Jakub Bogusz for the bug report and test program.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Jakub Bogusz <jakub.bogusz@gemius.pl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-18 18:06:58 +08:00
|
|
|
stat->rdev = inode->i_rdev;
|
|
|
|
stat->atime.tv_sec = attr->atime;
|
|
|
|
stat->atime.tv_nsec = attr->atimensec;
|
|
|
|
stat->mtime.tv_sec = attr->mtime;
|
|
|
|
stat->mtime.tv_nsec = attr->mtimensec;
|
|
|
|
stat->ctime.tv_sec = attr->ctime;
|
|
|
|
stat->ctime.tv_nsec = attr->ctimensec;
|
|
|
|
stat->size = attr->size;
|
|
|
|
stat->blocks = attr->blocks;
|
2012-05-10 23:49:38 +08:00
|
|
|
|
|
|
|
if (attr->blksize != 0)
|
|
|
|
blkbits = ilog2(attr->blksize);
|
|
|
|
else
|
|
|
|
blkbits = inode->i_sb->s_blocksize_bits;
|
|
|
|
|
|
|
|
stat->blksize = 1 << blkbits;
|
fuse: fix race between getattr and write
Getattr and lookup operations can be running in parallel to attribute changing
operations, such as write and setattr.
This means, that if for example getattr was slower than a write, the cached
size attribute could be set to a stale value.
To prevent this race, introduce a per-filesystem attribute version counter.
This counter is incremented whenever cached attributes are modified, and the
incremented value stored in the inode.
Before storing new attributes in the cache, getattr and lookup check, using
the version number, whether the attributes have been modified during the
request's lifetime. If so, the returned attributes are not cached, because
they might be stale.
Thanks to Jakub Bogusz for the bug report and test program.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Jakub Bogusz <jakub.bogusz@gemius.pl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-18 18:06:58 +08:00
|
|
|
}
|
|
|
|
|
2007-10-18 18:06:59 +08:00
|
|
|
static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
|
|
|
|
struct file *file)
|
2005-09-10 04:10:28 +08:00
|
|
|
{
|
|
|
|
int err;
|
2007-10-18 18:06:59 +08:00
|
|
|
struct fuse_getattr_in inarg;
|
|
|
|
struct fuse_attr_out outarg;
|
2005-09-10 04:10:28 +08:00
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
fuse: fix race between getattr and write
Getattr and lookup operations can be running in parallel to attribute changing
operations, such as write and setattr.
This means, that if for example getattr was slower than a write, the cached
size attribute could be set to a stale value.
To prevent this race, introduce a per-filesystem attribute version counter.
This counter is incremented whenever cached attributes are modified, and the
incremented value stored in the inode.
Before storing new attributes in the cache, getattr and lookup check, using
the version number, whether the attributes have been modified during the
request's lifetime. If so, the returned attributes are not cached, because
they might be stale.
Thanks to Jakub Bogusz for the bug report and test program.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Jakub Bogusz <jakub.bogusz@gemius.pl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-18 18:06:58 +08:00
|
|
|
struct fuse_req *req;
|
|
|
|
u64 attr_version;
|
|
|
|
|
2012-10-26 23:48:30 +08:00
|
|
|
req = fuse_get_req_nopages(fc);
|
2006-04-11 13:54:58 +08:00
|
|
|
if (IS_ERR(req))
|
|
|
|
return PTR_ERR(req);
|
2005-09-10 04:10:28 +08:00
|
|
|
|
2007-11-29 08:21:59 +08:00
|
|
|
attr_version = fuse_get_attr_version(fc);
|
fuse: fix race between getattr and write
Getattr and lookup operations can be running in parallel to attribute changing
operations, such as write and setattr.
This means, that if for example getattr was slower than a write, the cached
size attribute could be set to a stale value.
To prevent this race, introduce a per-filesystem attribute version counter.
This counter is incremented whenever cached attributes are modified, and the
incremented value stored in the inode.
Before storing new attributes in the cache, getattr and lookup check, using
the version number, whether the attributes have been modified during the
request's lifetime. If so, the returned attributes are not cached, because
they might be stale.
Thanks to Jakub Bogusz for the bug report and test program.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Jakub Bogusz <jakub.bogusz@gemius.pl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-18 18:06:58 +08:00
|
|
|
|
2007-10-18 18:06:59 +08:00
|
|
|
memset(&inarg, 0, sizeof(inarg));
|
2007-10-18 18:07:05 +08:00
|
|
|
memset(&outarg, 0, sizeof(outarg));
|
2007-10-18 18:06:59 +08:00
|
|
|
/* Directories have separate file-handle space */
|
|
|
|
if (file && S_ISREG(inode->i_mode)) {
|
|
|
|
struct fuse_file *ff = file->private_data;
|
|
|
|
|
|
|
|
inarg.getattr_flags |= FUSE_GETATTR_FH;
|
|
|
|
inarg.fh = ff->fh;
|
|
|
|
}
|
2005-09-10 04:10:28 +08:00
|
|
|
req->in.h.opcode = FUSE_GETATTR;
|
|
|
|
req->in.h.nodeid = get_node_id(inode);
|
2007-10-18 18:06:59 +08:00
|
|
|
req->in.numargs = 1;
|
|
|
|
req->in.args[0].size = sizeof(inarg);
|
|
|
|
req->in.args[0].value = &inarg;
|
2005-09-10 04:10:28 +08:00
|
|
|
req->out.numargs = 1;
|
2007-10-18 18:07:05 +08:00
|
|
|
if (fc->minor < 9)
|
|
|
|
req->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
|
|
|
|
else
|
|
|
|
req->out.args[0].size = sizeof(outarg);
|
2007-10-18 18:06:59 +08:00
|
|
|
req->out.args[0].value = &outarg;
|
2008-11-26 19:03:55 +08:00
|
|
|
fuse_request_send(fc, req);
|
2005-09-10 04:10:28 +08:00
|
|
|
err = req->out.h.error;
|
|
|
|
fuse_put_request(fc, req);
|
|
|
|
if (!err) {
|
2007-10-18 18:06:59 +08:00
|
|
|
if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
|
2005-09-10 04:10:28 +08:00
|
|
|
make_bad_inode(inode);
|
|
|
|
err = -EIO;
|
|
|
|
} else {
|
2007-10-18 18:06:59 +08:00
|
|
|
fuse_change_attributes(inode, &outarg.attr,
|
|
|
|
attr_timeout(&outarg),
|
fuse: fix race between getattr and write
Getattr and lookup operations can be running in parallel to attribute changing
operations, such as write and setattr.
This means, that if for example getattr was slower than a write, the cached
size attribute could be set to a stale value.
To prevent this race, introduce a per-filesystem attribute version counter.
This counter is incremented whenever cached attributes are modified, and the
incremented value stored in the inode.
Before storing new attributes in the cache, getattr and lookup check, using
the version number, whether the attributes have been modified during the
request's lifetime. If so, the returned attributes are not cached, because
they might be stale.
Thanks to Jakub Bogusz for the bug report and test program.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Jakub Bogusz <jakub.bogusz@gemius.pl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-18 18:06:58 +08:00
|
|
|
attr_version);
|
|
|
|
if (stat)
|
2007-10-18 18:06:59 +08:00
|
|
|
fuse_fillattr(inode, &outarg.attr, stat);
|
2005-09-10 04:10:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2007-11-29 08:21:59 +08:00
|
|
|
int fuse_update_attributes(struct inode *inode, struct kstat *stat,
|
|
|
|
struct file *file, bool *refreshed)
|
|
|
|
{
|
|
|
|
struct fuse_inode *fi = get_fuse_inode(inode);
|
|
|
|
int err;
|
|
|
|
bool r;
|
|
|
|
|
2014-07-07 21:28:50 +08:00
|
|
|
if (time_before64(fi->i_time, get_jiffies_64())) {
|
2007-11-29 08:21:59 +08:00
|
|
|
r = true;
|
|
|
|
err = fuse_do_getattr(inode, stat, file);
|
|
|
|
} else {
|
|
|
|
r = false;
|
|
|
|
err = 0;
|
|
|
|
if (stat) {
|
|
|
|
generic_fillattr(inode, stat);
|
|
|
|
stat->mode = fi->orig_i_mode;
|
2012-05-10 23:49:38 +08:00
|
|
|
stat->ino = fi->orig_ino;
|
2007-11-29 08:21:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (refreshed != NULL)
|
|
|
|
*refreshed = r;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2009-05-31 23:13:57 +08:00
|
|
|
int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
|
2011-12-07 04:50:06 +08:00
|
|
|
u64 child_nodeid, struct qstr *name)
|
2009-05-31 23:13:57 +08:00
|
|
|
{
|
|
|
|
int err = -ENOTDIR;
|
|
|
|
struct inode *parent;
|
|
|
|
struct dentry *dir;
|
|
|
|
struct dentry *entry;
|
|
|
|
|
|
|
|
parent = ilookup5(sb, parent_nodeid, fuse_inode_eq, &parent_nodeid);
|
|
|
|
if (!parent)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
mutex_lock(&parent->i_mutex);
|
|
|
|
if (!S_ISDIR(parent->i_mode))
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
err = -ENOENT;
|
|
|
|
dir = d_find_alias(parent);
|
|
|
|
if (!dir)
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
entry = d_lookup(dir, name);
|
|
|
|
dput(dir);
|
|
|
|
if (!entry)
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
fuse_invalidate_attr(parent);
|
|
|
|
fuse_invalidate_entry(entry);
|
2011-12-07 04:50:06 +08:00
|
|
|
|
|
|
|
if (child_nodeid != 0 && entry->d_inode) {
|
|
|
|
mutex_lock(&entry->d_inode->i_mutex);
|
|
|
|
if (get_node_id(entry->d_inode) != child_nodeid) {
|
|
|
|
err = -ENOENT;
|
|
|
|
goto badentry;
|
|
|
|
}
|
|
|
|
if (d_mountpoint(entry)) {
|
|
|
|
err = -EBUSY;
|
|
|
|
goto badentry;
|
|
|
|
}
|
|
|
|
if (S_ISDIR(entry->d_inode->i_mode)) {
|
|
|
|
shrink_dcache_parent(entry);
|
|
|
|
if (!simple_empty(entry)) {
|
|
|
|
err = -ENOTEMPTY;
|
|
|
|
goto badentry;
|
|
|
|
}
|
|
|
|
entry->d_inode->i_flags |= S_DEAD;
|
|
|
|
}
|
|
|
|
dont_mount(entry);
|
|
|
|
clear_nlink(entry->d_inode);
|
|
|
|
err = 0;
|
|
|
|
badentry:
|
|
|
|
mutex_unlock(&entry->d_inode->i_mutex);
|
|
|
|
if (!err)
|
|
|
|
d_delete(entry);
|
|
|
|
} else {
|
|
|
|
err = 0;
|
|
|
|
}
|
2009-05-31 23:13:57 +08:00
|
|
|
dput(entry);
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
mutex_unlock(&parent->i_mutex);
|
|
|
|
iput(parent);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2005-09-10 04:10:34 +08:00
|
|
|
/*
|
|
|
|
* Calling into a user-controlled filesystem gives the filesystem
|
2013-01-15 14:30:00 +08:00
|
|
|
* daemon ptrace-like capabilities over the current process. This
|
2005-09-10 04:10:34 +08:00
|
|
|
* means, that the filesystem daemon is able to record the exact
|
|
|
|
* filesystem operations performed, and can also control the behavior
|
|
|
|
* of the requester process in otherwise impossible ways. For example
|
|
|
|
* it can delay the operation for arbitrary length of time allowing
|
|
|
|
* DoS against the requester.
|
|
|
|
*
|
|
|
|
* For this reason only those processes can call into the filesystem,
|
|
|
|
* for which the owner of the mount has ptrace privilege. This
|
|
|
|
* excludes processes started by other users, suid or sgid processes.
|
|
|
|
*/
|
2013-01-15 14:30:00 +08:00
|
|
|
int fuse_allow_current_process(struct fuse_conn *fc)
|
2005-09-10 04:10:34 +08:00
|
|
|
{
|
2008-11-14 07:39:19 +08:00
|
|
|
const struct cred *cred;
|
2005-09-10 04:10:34 +08:00
|
|
|
|
2008-11-14 07:39:19 +08:00
|
|
|
if (fc->flags & FUSE_ALLOW_OTHER)
|
2005-09-10 04:10:34 +08:00
|
|
|
return 1;
|
|
|
|
|
2013-01-15 14:30:00 +08:00
|
|
|
cred = current_cred();
|
2012-02-08 08:26:03 +08:00
|
|
|
if (uid_eq(cred->euid, fc->user_id) &&
|
|
|
|
uid_eq(cred->suid, fc->user_id) &&
|
|
|
|
uid_eq(cred->uid, fc->user_id) &&
|
|
|
|
gid_eq(cred->egid, fc->group_id) &&
|
|
|
|
gid_eq(cred->sgid, fc->group_id) &&
|
|
|
|
gid_eq(cred->gid, fc->group_id))
|
2013-01-15 14:30:00 +08:00
|
|
|
return 1;
|
2008-11-14 07:39:19 +08:00
|
|
|
|
2013-01-15 14:30:00 +08:00
|
|
|
return 0;
|
2005-09-10 04:10:34 +08:00
|
|
|
}
|
|
|
|
|
2005-11-07 16:59:50 +08:00
|
|
|
static int fuse_access(struct inode *inode, int mask)
|
|
|
|
{
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
|
|
|
struct fuse_req *req;
|
|
|
|
struct fuse_access_in inarg;
|
|
|
|
int err;
|
|
|
|
|
2013-10-01 22:41:23 +08:00
|
|
|
BUG_ON(mask & MAY_NOT_BLOCK);
|
|
|
|
|
2005-11-07 16:59:50 +08:00
|
|
|
if (fc->no_access)
|
|
|
|
return 0;
|
|
|
|
|
2012-10-26 23:48:30 +08:00
|
|
|
req = fuse_get_req_nopages(fc);
|
2006-04-11 13:54:58 +08:00
|
|
|
if (IS_ERR(req))
|
|
|
|
return PTR_ERR(req);
|
2005-11-07 16:59:50 +08:00
|
|
|
|
|
|
|
memset(&inarg, 0, sizeof(inarg));
|
2008-07-16 09:03:57 +08:00
|
|
|
inarg.mask = mask & (MAY_READ | MAY_WRITE | MAY_EXEC);
|
2005-11-07 16:59:50 +08:00
|
|
|
req->in.h.opcode = FUSE_ACCESS;
|
|
|
|
req->in.h.nodeid = get_node_id(inode);
|
|
|
|
req->in.numargs = 1;
|
|
|
|
req->in.args[0].size = sizeof(inarg);
|
|
|
|
req->in.args[0].value = &inarg;
|
2008-11-26 19:03:55 +08:00
|
|
|
fuse_request_send(fc, req);
|
2005-11-07 16:59:50 +08:00
|
|
|
err = req->out.h.error;
|
|
|
|
fuse_put_request(fc, req);
|
|
|
|
if (err == -ENOSYS) {
|
|
|
|
fc->no_access = 1;
|
|
|
|
err = 0;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2011-06-21 07:28:19 +08:00
|
|
|
static int fuse_perm_getattr(struct inode *inode, int mask)
|
2011-03-21 20:58:06 +08:00
|
|
|
{
|
2011-06-21 07:28:19 +08:00
|
|
|
if (mask & MAY_NOT_BLOCK)
|
2011-03-21 20:58:06 +08:00
|
|
|
return -ECHILD;
|
|
|
|
|
|
|
|
return fuse_do_getattr(inode, NULL, NULL);
|
|
|
|
}
|
|
|
|
|
2006-01-06 16:19:39 +08:00
|
|
|
/*
|
|
|
|
* Check permission. The two basic access models of FUSE are:
|
|
|
|
*
|
|
|
|
* 1) Local access checking ('default_permissions' mount option) based
|
|
|
|
* on file mode. This is the plain old disk filesystem permission
|
|
|
|
* modell.
|
|
|
|
*
|
|
|
|
* 2) "Remote" access checking, where server is responsible for
|
|
|
|
* checking permission in each inode operation. An exception to this
|
|
|
|
* is if ->permission() was invoked from sys_access() in which case an
|
|
|
|
* access request is sent. Execute permission is still checked
|
|
|
|
* locally based on file mode.
|
|
|
|
*/
|
2011-06-21 07:28:19 +08:00
|
|
|
static int fuse_permission(struct inode *inode, int mask)
|
2005-09-10 04:10:28 +08:00
|
|
|
{
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
2007-10-17 14:31:02 +08:00
|
|
|
bool refreshed = false;
|
|
|
|
int err = 0;
|
2005-09-10 04:10:28 +08:00
|
|
|
|
2013-01-15 14:30:00 +08:00
|
|
|
if (!fuse_allow_current_process(fc))
|
2005-09-10 04:10:28 +08:00
|
|
|
return -EACCES;
|
2007-10-17 14:31:02 +08:00
|
|
|
|
|
|
|
/*
|
2007-10-17 14:31:06 +08:00
|
|
|
* If attributes are needed, refresh them before proceeding
|
2007-10-17 14:31:02 +08:00
|
|
|
*/
|
2007-10-17 14:31:06 +08:00
|
|
|
if ((fc->flags & FUSE_DEFAULT_PERMISSIONS) ||
|
|
|
|
((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
|
2011-03-21 20:58:06 +08:00
|
|
|
struct fuse_inode *fi = get_fuse_inode(inode);
|
|
|
|
|
2014-07-07 21:28:50 +08:00
|
|
|
if (time_before64(fi->i_time, get_jiffies_64())) {
|
2011-03-21 20:58:06 +08:00
|
|
|
refreshed = true;
|
|
|
|
|
2011-06-21 07:28:19 +08:00
|
|
|
err = fuse_perm_getattr(inode, mask);
|
2011-03-21 20:58:06 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
2007-10-17 14:31:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (fc->flags & FUSE_DEFAULT_PERMISSIONS) {
|
2011-06-21 07:16:29 +08:00
|
|
|
err = generic_permission(inode, mask);
|
2005-09-10 04:10:31 +08:00
|
|
|
|
|
|
|
/* If permission is denied, try to refresh file
|
|
|
|
attributes. This is also needed, because the root
|
|
|
|
node will at first have no permissions */
|
2007-10-17 14:31:02 +08:00
|
|
|
if (err == -EACCES && !refreshed) {
|
2011-06-21 07:28:19 +08:00
|
|
|
err = fuse_perm_getattr(inode, mask);
|
2005-09-10 04:10:31 +08:00
|
|
|
if (!err)
|
2011-06-21 07:16:29 +08:00
|
|
|
err = generic_permission(inode, mask);
|
2005-09-10 04:10:31 +08:00
|
|
|
}
|
|
|
|
|
2006-01-06 16:19:39 +08:00
|
|
|
/* Note: the opposite of the above test does not
|
|
|
|
exist. So if permissions are revoked this won't be
|
|
|
|
noticed immediately, only after the attribute
|
|
|
|
timeout has expired */
|
2010-07-23 23:43:51 +08:00
|
|
|
} else if (mask & (MAY_ACCESS | MAY_CHDIR)) {
|
2007-10-17 14:31:06 +08:00
|
|
|
err = fuse_access(inode, mask);
|
|
|
|
} else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) {
|
|
|
|
if (!(inode->i_mode & S_IXUGO)) {
|
|
|
|
if (refreshed)
|
|
|
|
return -EACCES;
|
|
|
|
|
2011-06-21 07:28:19 +08:00
|
|
|
err = fuse_perm_getattr(inode, mask);
|
2007-10-17 14:31:06 +08:00
|
|
|
if (!err && !(inode->i_mode & S_IXUGO))
|
|
|
|
return -EACCES;
|
|
|
|
}
|
2005-09-10 04:10:28 +08:00
|
|
|
}
|
2007-10-17 14:31:02 +08:00
|
|
|
return err;
|
2005-09-10 04:10:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int parse_dirfile(char *buf, size_t nbytes, struct file *file,
|
2013-05-18 15:03:58 +08:00
|
|
|
struct dir_context *ctx)
|
2005-09-10 04:10:28 +08:00
|
|
|
{
|
|
|
|
while (nbytes >= FUSE_NAME_OFFSET) {
|
|
|
|
struct fuse_dirent *dirent = (struct fuse_dirent *) buf;
|
|
|
|
size_t reclen = FUSE_DIRENT_SIZE(dirent);
|
|
|
|
if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX)
|
|
|
|
return -EIO;
|
|
|
|
if (reclen > nbytes)
|
|
|
|
break;
|
2013-09-03 20:28:38 +08:00
|
|
|
if (memchr(dirent->name, '/', dirent->namelen) != NULL)
|
|
|
|
return -EIO;
|
2005-09-10 04:10:28 +08:00
|
|
|
|
2013-05-18 15:03:58 +08:00
|
|
|
if (!dir_emit(ctx, dirent->name, dirent->namelen,
|
|
|
|
dirent->ino, dirent->type))
|
2005-09-10 04:10:28 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
buf += reclen;
|
|
|
|
nbytes -= reclen;
|
2013-05-18 15:03:58 +08:00
|
|
|
ctx->pos = dirent->off;
|
2005-09-10 04:10:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-08-19 20:53:23 +08:00
|
|
|
static int fuse_direntplus_link(struct file *file,
|
|
|
|
struct fuse_direntplus *direntplus,
|
|
|
|
u64 attr_version)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct fuse_entry_out *o = &direntplus->entry_out;
|
|
|
|
struct fuse_dirent *dirent = &direntplus->dirent;
|
|
|
|
struct dentry *parent = file->f_path.dentry;
|
|
|
|
struct qstr name = QSTR_INIT(dirent->name, dirent->namelen);
|
|
|
|
struct dentry *dentry;
|
|
|
|
struct dentry *alias;
|
|
|
|
struct inode *dir = parent->d_inode;
|
|
|
|
struct fuse_conn *fc;
|
|
|
|
struct inode *inode;
|
|
|
|
|
|
|
|
if (!o->nodeid) {
|
|
|
|
/*
|
|
|
|
* Unlike in the case of fuse_lookup, zero nodeid does not mean
|
|
|
|
* ENOENT. Instead, it only means the userspace filesystem did
|
|
|
|
* not want to return attributes/handle for this entry.
|
|
|
|
*
|
|
|
|
* So do nothing.
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (name.name[0] == '.') {
|
|
|
|
/*
|
|
|
|
* We could potentially refresh the attributes of the directory
|
|
|
|
* and its parent?
|
|
|
|
*/
|
|
|
|
if (name.len == 1)
|
|
|
|
return 0;
|
|
|
|
if (name.name[1] == '.' && name.len == 2)
|
|
|
|
return 0;
|
|
|
|
}
|
2013-07-17 20:53:53 +08:00
|
|
|
|
|
|
|
if (invalid_nodeid(o->nodeid))
|
|
|
|
return -EIO;
|
|
|
|
if (!fuse_valid_type(o->attr.mode))
|
|
|
|
return -EIO;
|
|
|
|
|
2012-08-19 20:53:23 +08:00
|
|
|
fc = get_fuse_conn(dir);
|
|
|
|
|
|
|
|
name.hash = full_name_hash(name.name, name.len);
|
|
|
|
dentry = d_lookup(parent, &name);
|
2013-07-17 20:53:53 +08:00
|
|
|
if (dentry) {
|
2012-08-19 20:53:23 +08:00
|
|
|
inode = dentry->d_inode;
|
2013-07-17 20:53:53 +08:00
|
|
|
if (!inode) {
|
|
|
|
d_drop(dentry);
|
2013-07-17 20:53:53 +08:00
|
|
|
} else if (get_node_id(inode) != o->nodeid ||
|
|
|
|
((o->attr.mode ^ inode->i_mode) & S_IFMT)) {
|
2013-07-17 20:53:53 +08:00
|
|
|
err = d_invalidate(dentry);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
2013-07-17 20:53:53 +08:00
|
|
|
} else if (is_bad_inode(inode)) {
|
|
|
|
err = -EIO;
|
|
|
|
goto out;
|
2013-07-17 20:53:53 +08:00
|
|
|
} else {
|
2012-08-19 20:53:23 +08:00
|
|
|
struct fuse_inode *fi;
|
|
|
|
fi = get_fuse_inode(inode);
|
|
|
|
spin_lock(&fc->lock);
|
|
|
|
fi->nlookup++;
|
|
|
|
spin_unlock(&fc->lock);
|
|
|
|
|
2013-07-17 20:53:53 +08:00
|
|
|
fuse_change_attributes(inode, &o->attr,
|
|
|
|
entry_attr_timeout(o),
|
|
|
|
attr_version);
|
|
|
|
|
2012-08-19 20:53:23 +08:00
|
|
|
/*
|
|
|
|
* The other branch to 'found' comes via fuse_iget()
|
|
|
|
* which bumps nlookup inside
|
|
|
|
*/
|
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
dput(dentry);
|
|
|
|
}
|
|
|
|
|
|
|
|
dentry = d_alloc(parent, &name);
|
|
|
|
err = -ENOMEM;
|
|
|
|
if (!dentry)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
inode = fuse_iget(dir->i_sb, o->nodeid, o->generation,
|
|
|
|
&o->attr, entry_attr_timeout(o), attr_version);
|
|
|
|
if (!inode)
|
|
|
|
goto out;
|
|
|
|
|
2013-10-01 22:44:54 +08:00
|
|
|
alias = d_materialise_unique(dentry, inode);
|
2013-09-05 17:44:42 +08:00
|
|
|
err = PTR_ERR(alias);
|
|
|
|
if (IS_ERR(alias))
|
|
|
|
goto out;
|
2013-07-17 20:53:53 +08:00
|
|
|
|
2012-08-19 20:53:23 +08:00
|
|
|
if (alias) {
|
|
|
|
dput(dentry);
|
|
|
|
dentry = alias;
|
|
|
|
}
|
|
|
|
|
|
|
|
found:
|
2013-10-01 22:41:22 +08:00
|
|
|
if (fc->readdirplus_auto)
|
|
|
|
set_bit(FUSE_I_INIT_RDPLUS, &get_fuse_inode(inode)->state);
|
2012-08-19 20:53:23 +08:00
|
|
|
fuse_change_entry_timeout(dentry, o);
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
out:
|
2013-07-17 20:53:54 +08:00
|
|
|
dput(dentry);
|
2012-08-19 20:53:23 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
|
2013-05-18 15:03:58 +08:00
|
|
|
struct dir_context *ctx, u64 attr_version)
|
2012-08-19 20:53:23 +08:00
|
|
|
{
|
|
|
|
struct fuse_direntplus *direntplus;
|
|
|
|
struct fuse_dirent *dirent;
|
|
|
|
size_t reclen;
|
|
|
|
int over = 0;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
while (nbytes >= FUSE_NAME_OFFSET_DIRENTPLUS) {
|
|
|
|
direntplus = (struct fuse_direntplus *) buf;
|
|
|
|
dirent = &direntplus->dirent;
|
|
|
|
reclen = FUSE_DIRENTPLUS_SIZE(direntplus);
|
|
|
|
|
|
|
|
if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX)
|
|
|
|
return -EIO;
|
|
|
|
if (reclen > nbytes)
|
|
|
|
break;
|
2013-09-03 20:28:38 +08:00
|
|
|
if (memchr(dirent->name, '/', dirent->namelen) != NULL)
|
|
|
|
return -EIO;
|
2012-08-19 20:53:23 +08:00
|
|
|
|
|
|
|
if (!over) {
|
|
|
|
/* We fill entries into dstbuf only as much as
|
|
|
|
it can hold. But we still continue iterating
|
|
|
|
over remaining entries to link them. If not,
|
|
|
|
we need to send a FORGET for each of those
|
|
|
|
which we did not link.
|
|
|
|
*/
|
2013-05-18 15:03:58 +08:00
|
|
|
over = !dir_emit(ctx, dirent->name, dirent->namelen,
|
|
|
|
dirent->ino, dirent->type);
|
|
|
|
ctx->pos = dirent->off;
|
2012-08-19 20:53:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
buf += reclen;
|
|
|
|
nbytes -= reclen;
|
|
|
|
|
|
|
|
ret = fuse_direntplus_link(file, direntplus, attr_version);
|
|
|
|
if (ret)
|
|
|
|
fuse_force_forget(file, direntplus->entry_out.nodeid);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-05-18 15:03:58 +08:00
|
|
|
static int fuse_readdir(struct file *file, struct dir_context *ctx)
|
2005-09-10 04:10:28 +08:00
|
|
|
{
|
2013-01-15 11:23:28 +08:00
|
|
|
int plus, err;
|
2005-09-10 04:10:36 +08:00
|
|
|
size_t nbytes;
|
|
|
|
struct page *page;
|
2013-01-24 06:07:38 +08:00
|
|
|
struct inode *inode = file_inode(file);
|
2005-09-10 04:10:28 +08:00
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
2006-01-06 16:19:39 +08:00
|
|
|
struct fuse_req *req;
|
2012-08-19 20:53:23 +08:00
|
|
|
u64 attr_version = 0;
|
2006-01-06 16:19:39 +08:00
|
|
|
|
|
|
|
if (is_bad_inode(inode))
|
|
|
|
return -EIO;
|
|
|
|
|
2012-10-26 23:48:30 +08:00
|
|
|
req = fuse_get_req(fc, 1);
|
2006-04-11 13:54:58 +08:00
|
|
|
if (IS_ERR(req))
|
|
|
|
return PTR_ERR(req);
|
2005-09-10 04:10:28 +08:00
|
|
|
|
2005-09-10 04:10:36 +08:00
|
|
|
page = alloc_page(GFP_KERNEL);
|
|
|
|
if (!page) {
|
|
|
|
fuse_put_request(fc, req);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2013-01-15 11:23:28 +08:00
|
|
|
|
2013-05-18 15:03:58 +08:00
|
|
|
plus = fuse_use_readdirplus(inode, ctx);
|
2009-04-02 20:25:34 +08:00
|
|
|
req->out.argpages = 1;
|
2005-09-10 04:10:36 +08:00
|
|
|
req->num_pages = 1;
|
|
|
|
req->pages[0] = page;
|
2012-10-26 23:49:33 +08:00
|
|
|
req->page_descs[0].length = PAGE_SIZE;
|
2013-01-15 11:23:28 +08:00
|
|
|
if (plus) {
|
2012-08-19 20:53:23 +08:00
|
|
|
attr_version = fuse_get_attr_version(fc);
|
2013-05-18 15:03:58 +08:00
|
|
|
fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
|
2012-08-19 20:53:23 +08:00
|
|
|
FUSE_READDIRPLUS);
|
|
|
|
} else {
|
2013-05-18 15:03:58 +08:00
|
|
|
fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
|
2012-08-19 20:53:23 +08:00
|
|
|
FUSE_READDIR);
|
|
|
|
}
|
2008-11-26 19:03:55 +08:00
|
|
|
fuse_request_send(fc, req);
|
2006-01-17 14:14:45 +08:00
|
|
|
nbytes = req->out.args[0].size;
|
2005-09-10 04:10:28 +08:00
|
|
|
err = req->out.h.error;
|
|
|
|
fuse_put_request(fc, req);
|
2012-08-19 20:53:23 +08:00
|
|
|
if (!err) {
|
2013-01-15 11:23:28 +08:00
|
|
|
if (plus) {
|
2012-08-19 20:53:23 +08:00
|
|
|
err = parse_dirplusfile(page_address(page), nbytes,
|
2013-05-18 15:03:58 +08:00
|
|
|
file, ctx,
|
2012-08-19 20:53:23 +08:00
|
|
|
attr_version);
|
|
|
|
} else {
|
|
|
|
err = parse_dirfile(page_address(page), nbytes, file,
|
2013-05-18 15:03:58 +08:00
|
|
|
ctx);
|
2012-08-19 20:53:23 +08:00
|
|
|
}
|
|
|
|
}
|
2005-09-10 04:10:28 +08:00
|
|
|
|
2005-09-10 04:10:36 +08:00
|
|
|
__free_page(page);
|
2013-11-05 19:55:43 +08:00
|
|
|
fuse_invalidate_atime(inode);
|
2005-09-10 04:10:36 +08:00
|
|
|
return err;
|
2005-09-10 04:10:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static char *read_link(struct dentry *dentry)
|
|
|
|
{
|
|
|
|
struct inode *inode = dentry->d_inode;
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
2012-10-26 23:48:30 +08:00
|
|
|
struct fuse_req *req = fuse_get_req_nopages(fc);
|
2005-09-10 04:10:28 +08:00
|
|
|
char *link;
|
|
|
|
|
2006-04-11 13:54:58 +08:00
|
|
|
if (IS_ERR(req))
|
2008-02-07 16:15:26 +08:00
|
|
|
return ERR_CAST(req);
|
2005-09-10 04:10:28 +08:00
|
|
|
|
|
|
|
link = (char *) __get_free_page(GFP_KERNEL);
|
|
|
|
if (!link) {
|
|
|
|
link = ERR_PTR(-ENOMEM);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
req->in.h.opcode = FUSE_READLINK;
|
|
|
|
req->in.h.nodeid = get_node_id(inode);
|
|
|
|
req->out.argvar = 1;
|
|
|
|
req->out.numargs = 1;
|
|
|
|
req->out.args[0].size = PAGE_SIZE - 1;
|
|
|
|
req->out.args[0].value = link;
|
2008-11-26 19:03:55 +08:00
|
|
|
fuse_request_send(fc, req);
|
2005-09-10 04:10:28 +08:00
|
|
|
if (req->out.h.error) {
|
|
|
|
free_page((unsigned long) link);
|
|
|
|
link = ERR_PTR(req->out.h.error);
|
|
|
|
} else
|
|
|
|
link[req->out.args[0].size] = '\0';
|
|
|
|
out:
|
|
|
|
fuse_put_request(fc, req);
|
2013-11-05 19:55:43 +08:00
|
|
|
fuse_invalidate_atime(inode);
|
2005-09-10 04:10:28 +08:00
|
|
|
return link;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void free_link(char *link)
|
|
|
|
{
|
|
|
|
if (!IS_ERR(link))
|
|
|
|
free_page((unsigned long) link);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *fuse_follow_link(struct dentry *dentry, struct nameidata *nd)
|
|
|
|
{
|
|
|
|
nd_set_link(nd, read_link(dentry));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void fuse_put_link(struct dentry *dentry, struct nameidata *nd, void *c)
|
|
|
|
{
|
|
|
|
free_link(nd_get_link(nd));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fuse_dir_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
2009-04-28 22:56:37 +08:00
|
|
|
return fuse_open_common(inode, file, true);
|
2005-09-10 04:10:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int fuse_dir_release(struct inode *inode, struct file *file)
|
|
|
|
{
|
2009-04-28 22:56:39 +08:00
|
|
|
fuse_release_common(file, FUSE_RELEASEDIR);
|
|
|
|
|
|
|
|
return 0;
|
2005-09-10 04:10:28 +08:00
|
|
|
}
|
|
|
|
|
2011-07-17 08:44:56 +08:00
|
|
|
static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end,
|
|
|
|
int datasync)
|
2005-09-10 04:10:38 +08:00
|
|
|
{
|
2011-07-17 08:44:56 +08:00
|
|
|
return fuse_fsync_common(file, start, end, datasync, 1);
|
2005-09-10 04:10:38 +08:00
|
|
|
}
|
|
|
|
|
2011-12-13 18:58:49 +08:00
|
|
|
static long fuse_dir_ioctl(struct file *file, unsigned int cmd,
|
|
|
|
unsigned long arg)
|
|
|
|
{
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(file->f_mapping->host);
|
|
|
|
|
|
|
|
/* FUSE_IOCTL_DIR only supported for API version >= 7.18 */
|
|
|
|
if (fc->minor < 18)
|
|
|
|
return -ENOTTY;
|
|
|
|
|
|
|
|
return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_DIR);
|
|
|
|
}
|
|
|
|
|
|
|
|
static long fuse_dir_compat_ioctl(struct file *file, unsigned int cmd,
|
|
|
|
unsigned long arg)
|
|
|
|
{
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(file->f_mapping->host);
|
|
|
|
|
|
|
|
if (fc->minor < 18)
|
|
|
|
return -ENOTTY;
|
|
|
|
|
|
|
|
return fuse_ioctl_common(file, cmd, arg,
|
|
|
|
FUSE_IOCTL_COMPAT | FUSE_IOCTL_DIR);
|
|
|
|
}
|
|
|
|
|
2013-12-26 23:51:11 +08:00
|
|
|
static bool update_mtime(unsigned ivalid, bool trust_local_mtime)
|
2007-10-18 18:07:01 +08:00
|
|
|
{
|
|
|
|
/* Always update if mtime is explicitly set */
|
|
|
|
if (ivalid & ATTR_MTIME_SET)
|
|
|
|
return true;
|
|
|
|
|
2013-12-26 23:51:11 +08:00
|
|
|
/* Or if kernel i_mtime is the official one */
|
|
|
|
if (trust_local_mtime)
|
|
|
|
return true;
|
|
|
|
|
2007-10-18 18:07:01 +08:00
|
|
|
/* If it's an open(O_TRUNC) or an ftruncate(), don't update */
|
|
|
|
if ((ivalid & ATTR_SIZE) && (ivalid & (ATTR_OPEN | ATTR_FILE)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* In all other cases update */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-12-26 23:51:11 +08:00
|
|
|
static void iattr_to_fattr(struct iattr *iattr, struct fuse_setattr_in *arg,
|
2014-04-28 20:19:25 +08:00
|
|
|
bool trust_local_cmtime)
|
2005-09-10 04:10:29 +08:00
|
|
|
{
|
|
|
|
unsigned ivalid = iattr->ia_valid;
|
|
|
|
|
|
|
|
if (ivalid & ATTR_MODE)
|
2005-11-07 16:59:52 +08:00
|
|
|
arg->valid |= FATTR_MODE, arg->mode = iattr->ia_mode;
|
2005-09-10 04:10:29 +08:00
|
|
|
if (ivalid & ATTR_UID)
|
2012-02-08 08:26:03 +08:00
|
|
|
arg->valid |= FATTR_UID, arg->uid = from_kuid(&init_user_ns, iattr->ia_uid);
|
2005-09-10 04:10:29 +08:00
|
|
|
if (ivalid & ATTR_GID)
|
2012-02-08 08:26:03 +08:00
|
|
|
arg->valid |= FATTR_GID, arg->gid = from_kgid(&init_user_ns, iattr->ia_gid);
|
2005-09-10 04:10:29 +08:00
|
|
|
if (ivalid & ATTR_SIZE)
|
2005-11-07 16:59:52 +08:00
|
|
|
arg->valid |= FATTR_SIZE, arg->size = iattr->ia_size;
|
2007-10-18 18:07:01 +08:00
|
|
|
if (ivalid & ATTR_ATIME) {
|
|
|
|
arg->valid |= FATTR_ATIME;
|
2005-11-07 16:59:52 +08:00
|
|
|
arg->atime = iattr->ia_atime.tv_sec;
|
2007-10-18 18:07:01 +08:00
|
|
|
arg->atimensec = iattr->ia_atime.tv_nsec;
|
|
|
|
if (!(ivalid & ATTR_ATIME_SET))
|
|
|
|
arg->valid |= FATTR_ATIME_NOW;
|
|
|
|
}
|
2014-04-28 20:19:25 +08:00
|
|
|
if ((ivalid & ATTR_MTIME) && update_mtime(ivalid, trust_local_cmtime)) {
|
2007-10-18 18:07:01 +08:00
|
|
|
arg->valid |= FATTR_MTIME;
|
2005-11-07 16:59:52 +08:00
|
|
|
arg->mtime = iattr->ia_mtime.tv_sec;
|
2007-10-18 18:07:01 +08:00
|
|
|
arg->mtimensec = iattr->ia_mtime.tv_nsec;
|
2014-04-28 20:19:25 +08:00
|
|
|
if (!(ivalid & ATTR_MTIME_SET) && !trust_local_cmtime)
|
2007-10-18 18:07:01 +08:00
|
|
|
arg->valid |= FATTR_MTIME_NOW;
|
2005-11-07 16:59:52 +08:00
|
|
|
}
|
2014-04-28 20:19:25 +08:00
|
|
|
if ((ivalid & ATTR_CTIME) && trust_local_cmtime) {
|
|
|
|
arg->valid |= FATTR_CTIME;
|
|
|
|
arg->ctime = iattr->ia_ctime.tv_sec;
|
|
|
|
arg->ctimensec = iattr->ia_ctime.tv_nsec;
|
|
|
|
}
|
2005-09-10 04:10:29 +08:00
|
|
|
}
|
|
|
|
|
fuse: support writable mmap
Quoting Linus (3 years ago, FUSE inclusion discussions):
"User-space filesystems are hard to get right. I'd claim that they
are almost impossible, unless you limit them somehow (shared
writable mappings are the nastiest part - if you don't have those,
you can reasonably limit your problems by limiting the number of
dirty pages you accept through normal "write()" calls)."
Instead of attempting the impossible, I've just waited for the dirty page
accounting infrastructure to materialize (thanks to Peter Zijlstra and
others). This nicely solved the biggest problem: limiting the number of pages
used for write caching.
Some small details remained, however, which this largish patch attempts to
address. It provides a page writeback implementation for fuse, which is
completely safe against VM related deadlocks. Performance may not be very
good for certain usage patterns, but generally it should be acceptable.
It has been tested extensively with fsx-linux and bash-shared-mapping.
Fuse page writeback design
--------------------------
fuse_writepage() allocates a new temporary page with GFP_NOFS|__GFP_HIGHMEM.
It copies the contents of the original page, and queues a WRITE request to the
userspace filesystem using this temp page.
The writeback is finished instantly from the MM's point of view: the page is
removed from the radix trees, and the PageDirty and PageWriteback flags are
cleared.
For the duration of the actual write, the NR_WRITEBACK_TEMP counter is
incremented. The per-bdi writeback count is not decremented until the actual
write completes.
On dirtying the page, fuse waits for a previous write to finish before
proceeding. This makes sure, there can only be one temporary page used at a
time for one cached page.
This approach is wasteful in both memory and CPU bandwidth, so why is this
complication needed?
The basic problem is that there can be no guarantee about the time in which
the userspace filesystem will complete a write. It may be buggy or even
malicious, and fail to complete WRITE requests. We don't want unrelated parts
of the system to grind to a halt in such cases.
Also a filesystem may need additional resources (particularly memory) to
complete a WRITE request. There's a great danger of a deadlock if that
allocation may wait for the writepage to finish.
Currently there are several cases where the kernel can block on page
writeback:
- allocation order is larger than PAGE_ALLOC_COSTLY_ORDER
- page migration
- throttle_vm_writeout (through NR_WRITEBACK)
- sync(2)
Of course in some cases (fsync, msync) we explicitly want to allow blocking.
So for these cases new code has to be added to fuse, since the VM is not
tracking writeback pages for us any more.
As an extra safetly measure, the maximum dirty ratio allocated to a single
fuse filesystem is set to 1% by default. This way one (or several) buggy or
malicious fuse filesystems cannot slow down the rest of the system by hogging
dirty memory.
With appropriate privileges, this limit can be raised through
'/sys/class/bdi/<bdi>/max_ratio'.
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-30 15:54:41 +08:00
|
|
|
/*
|
|
|
|
* Prevent concurrent writepages on inode
|
|
|
|
*
|
|
|
|
* This is done by adding a negative bias to the inode write counter
|
|
|
|
* and waiting for all pending writes to finish.
|
|
|
|
*/
|
|
|
|
void fuse_set_nowrite(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
|
|
|
struct fuse_inode *fi = get_fuse_inode(inode);
|
|
|
|
|
|
|
|
BUG_ON(!mutex_is_locked(&inode->i_mutex));
|
|
|
|
|
|
|
|
spin_lock(&fc->lock);
|
|
|
|
BUG_ON(fi->writectr < 0);
|
|
|
|
fi->writectr += FUSE_NOWRITE;
|
|
|
|
spin_unlock(&fc->lock);
|
|
|
|
wait_event(fi->page_waitq, fi->writectr == FUSE_NOWRITE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allow writepages on inode
|
|
|
|
*
|
|
|
|
* Remove the bias from the writecounter and send any queued
|
|
|
|
* writepages.
|
|
|
|
*/
|
|
|
|
static void __fuse_release_nowrite(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct fuse_inode *fi = get_fuse_inode(inode);
|
|
|
|
|
|
|
|
BUG_ON(fi->writectr != FUSE_NOWRITE);
|
|
|
|
fi->writectr = 0;
|
|
|
|
fuse_flush_writepages(inode);
|
|
|
|
}
|
|
|
|
|
|
|
|
void fuse_release_nowrite(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
|
|
|
|
|
|
|
spin_lock(&fc->lock);
|
|
|
|
__fuse_release_nowrite(inode);
|
|
|
|
spin_unlock(&fc->lock);
|
|
|
|
}
|
|
|
|
|
2013-12-26 23:51:11 +08:00
|
|
|
static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_req *req,
|
|
|
|
struct inode *inode,
|
|
|
|
struct fuse_setattr_in *inarg_p,
|
|
|
|
struct fuse_attr_out *outarg_p)
|
|
|
|
{
|
|
|
|
req->in.h.opcode = FUSE_SETATTR;
|
|
|
|
req->in.h.nodeid = get_node_id(inode);
|
|
|
|
req->in.numargs = 1;
|
|
|
|
req->in.args[0].size = sizeof(*inarg_p);
|
|
|
|
req->in.args[0].value = inarg_p;
|
|
|
|
req->out.numargs = 1;
|
|
|
|
if (fc->minor < 9)
|
|
|
|
req->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
|
|
|
|
else
|
|
|
|
req->out.args[0].size = sizeof(*outarg_p);
|
|
|
|
req->out.args[0].value = outarg_p;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush inode->i_mtime to the server
|
|
|
|
*/
|
2014-04-28 20:19:24 +08:00
|
|
|
int fuse_flush_times(struct inode *inode, struct fuse_file *ff)
|
2013-12-26 23:51:11 +08:00
|
|
|
{
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
2014-04-28 20:19:23 +08:00
|
|
|
struct fuse_req *req;
|
2013-12-26 23:51:11 +08:00
|
|
|
struct fuse_setattr_in inarg;
|
|
|
|
struct fuse_attr_out outarg;
|
|
|
|
int err;
|
|
|
|
|
2014-04-28 20:19:23 +08:00
|
|
|
req = fuse_get_req_nopages(fc);
|
|
|
|
if (IS_ERR(req))
|
|
|
|
return PTR_ERR(req);
|
2013-12-26 23:51:11 +08:00
|
|
|
|
|
|
|
memset(&inarg, 0, sizeof(inarg));
|
|
|
|
memset(&outarg, 0, sizeof(outarg));
|
|
|
|
|
2014-04-28 20:19:24 +08:00
|
|
|
inarg.valid = FATTR_MTIME;
|
2013-12-26 23:51:11 +08:00
|
|
|
inarg.mtime = inode->i_mtime.tv_sec;
|
|
|
|
inarg.mtimensec = inode->i_mtime.tv_nsec;
|
2014-04-28 20:19:24 +08:00
|
|
|
if (fc->minor >= 23) {
|
|
|
|
inarg.valid |= FATTR_CTIME;
|
|
|
|
inarg.ctime = inode->i_ctime.tv_sec;
|
|
|
|
inarg.ctimensec = inode->i_ctime.tv_nsec;
|
|
|
|
}
|
2014-04-28 20:19:23 +08:00
|
|
|
if (ff) {
|
|
|
|
inarg.valid |= FATTR_FH;
|
|
|
|
inarg.fh = ff->fh;
|
|
|
|
}
|
2013-12-26 23:51:11 +08:00
|
|
|
fuse_setattr_fill(fc, req, inode, &inarg, &outarg);
|
|
|
|
fuse_request_send(fc, req);
|
|
|
|
err = req->out.h.error;
|
|
|
|
fuse_put_request(fc, req);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2006-01-06 16:19:39 +08:00
|
|
|
/*
|
|
|
|
* Set attributes, and at the same time refresh them.
|
|
|
|
*
|
|
|
|
* Truncation is slightly complicated, because the 'truncate' request
|
|
|
|
* may fail, in which case we don't want to touch the mapping.
|
2006-10-17 15:10:06 +08:00
|
|
|
* vmtruncate() doesn't allow for this case, so do the rlimit checking
|
|
|
|
* and the actual truncation by hand.
|
2006-01-06 16:19:39 +08:00
|
|
|
*/
|
2012-12-18 18:05:08 +08:00
|
|
|
int fuse_do_setattr(struct inode *inode, struct iattr *attr,
|
|
|
|
struct file *file)
|
2005-09-10 04:10:29 +08:00
|
|
|
{
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
fuse: hotfix truncate_pagecache() issue
The way how fuse calls truncate_pagecache() from fuse_change_attributes()
is completely wrong. Because, w/o i_mutex held, we never sure whether
'oldsize' and 'attr->size' are valid by the time of execution of
truncate_pagecache(inode, oldsize, attr->size). In fact, as soon as we
released fc->lock in the middle of fuse_change_attributes(), we completely
loose control of actions which may happen with given inode until we reach
truncate_pagecache. The list of potentially dangerous actions includes
mmap-ed reads and writes, ftruncate(2) and write(2) extending file size.
The typical outcome of doing truncate_pagecache() with outdated arguments
is data corruption from user point of view. This is (in some sense)
acceptable in cases when the issue is triggered by a change of the file on
the server (i.e. externally wrt fuse operation), but it is absolutely
intolerable in scenarios when a single fuse client modifies a file without
any external intervention. A real life case I discovered by fsx-linux
looked like this:
1. Shrinking ftruncate(2) comes to fuse_do_setattr(). The latter sends
FUSE_SETATTR to the server synchronously, but before getting fc->lock ...
2. fuse_dentry_revalidate() is asynchronously called. It sends FUSE_LOOKUP
to the server synchronously, then calls fuse_change_attributes(). The
latter updates i_size, releases fc->lock, but before comparing oldsize vs
attr->size..
3. fuse_do_setattr() from the first step proceeds by acquiring fc->lock and
updating attributes and i_size, but now oldsize is equal to
outarg.attr.size because i_size has just been updated (step 2). Hence,
fuse_do_setattr() returns w/o calling truncate_pagecache().
4. As soon as ftruncate(2) completes, the user extends file size by
write(2) making a hole in the middle of file, then reads data from the hole
either by read(2) or mmap-ed read. The user expects to get zero data from
the hole, but gets stale data because truncate_pagecache() is not executed
yet.
The scenario above illustrates one side of the problem: not truncating the
page cache even though we should. Another side corresponds to truncating
page cache too late, when the state of inode changed significantly.
Theoretically, the following is possible:
1. As in the previous scenario fuse_dentry_revalidate() discovered that
i_size changed (due to our own fuse_do_setattr()) and is going to call
truncate_pagecache() for some 'new_size' it believes valid right now. But
by the time that particular truncate_pagecache() is called ...
2. fuse_do_setattr() returns (either having called truncate_pagecache() or
not -- it doesn't matter).
3. The file is extended either by write(2) or ftruncate(2) or fallocate(2).
4. mmap-ed write makes a page in the extended region dirty.
The result will be the lost of data user wrote on the fourth step.
The patch is a hotfix resolving the issue in a simplistic way: let's skip
dangerous i_size update and truncate_pagecache if an operation changing
file size is in progress. This simplistic approach looks correct for the
cases w/o external changes. And to handle them properly, more sophisticated
and intrusive techniques (e.g. NFS-like one) would be required. I'd like to
postpone it until the issue is well discussed on the mailing list(s).
Changed in v2:
- improved patch description to cover both sides of the issue.
Signed-off-by: Maxim Patlasov <mpatlasov@parallels.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: stable@vger.kernel.org
2013-08-30 21:06:04 +08:00
|
|
|
struct fuse_inode *fi = get_fuse_inode(inode);
|
2005-09-10 04:10:29 +08:00
|
|
|
struct fuse_req *req;
|
|
|
|
struct fuse_setattr_in inarg;
|
|
|
|
struct fuse_attr_out outarg;
|
fuse: support writable mmap
Quoting Linus (3 years ago, FUSE inclusion discussions):
"User-space filesystems are hard to get right. I'd claim that they
are almost impossible, unless you limit them somehow (shared
writable mappings are the nastiest part - if you don't have those,
you can reasonably limit your problems by limiting the number of
dirty pages you accept through normal "write()" calls)."
Instead of attempting the impossible, I've just waited for the dirty page
accounting infrastructure to materialize (thanks to Peter Zijlstra and
others). This nicely solved the biggest problem: limiting the number of pages
used for write caching.
Some small details remained, however, which this largish patch attempts to
address. It provides a page writeback implementation for fuse, which is
completely safe against VM related deadlocks. Performance may not be very
good for certain usage patterns, but generally it should be acceptable.
It has been tested extensively with fsx-linux and bash-shared-mapping.
Fuse page writeback design
--------------------------
fuse_writepage() allocates a new temporary page with GFP_NOFS|__GFP_HIGHMEM.
It copies the contents of the original page, and queues a WRITE request to the
userspace filesystem using this temp page.
The writeback is finished instantly from the MM's point of view: the page is
removed from the radix trees, and the PageDirty and PageWriteback flags are
cleared.
For the duration of the actual write, the NR_WRITEBACK_TEMP counter is
incremented. The per-bdi writeback count is not decremented until the actual
write completes.
On dirtying the page, fuse waits for a previous write to finish before
proceeding. This makes sure, there can only be one temporary page used at a
time for one cached page.
This approach is wasteful in both memory and CPU bandwidth, so why is this
complication needed?
The basic problem is that there can be no guarantee about the time in which
the userspace filesystem will complete a write. It may be buggy or even
malicious, and fail to complete WRITE requests. We don't want unrelated parts
of the system to grind to a halt in such cases.
Also a filesystem may need additional resources (particularly memory) to
complete a WRITE request. There's a great danger of a deadlock if that
allocation may wait for the writepage to finish.
Currently there are several cases where the kernel can block on page
writeback:
- allocation order is larger than PAGE_ALLOC_COSTLY_ORDER
- page migration
- throttle_vm_writeout (through NR_WRITEBACK)
- sync(2)
Of course in some cases (fsync, msync) we explicitly want to allow blocking.
So for these cases new code has to be added to fuse, since the VM is not
tracking writeback pages for us any more.
As an extra safetly measure, the maximum dirty ratio allocated to a single
fuse filesystem is set to 1% by default. This way one (or several) buggy or
malicious fuse filesystems cannot slow down the rest of the system by hogging
dirty memory.
With appropriate privileges, this limit can be raised through
'/sys/class/bdi/<bdi>/max_ratio'.
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-30 15:54:41 +08:00
|
|
|
bool is_truncate = false;
|
2013-10-10 21:10:46 +08:00
|
|
|
bool is_wb = fc->writeback_cache;
|
fuse: support writable mmap
Quoting Linus (3 years ago, FUSE inclusion discussions):
"User-space filesystems are hard to get right. I'd claim that they
are almost impossible, unless you limit them somehow (shared
writable mappings are the nastiest part - if you don't have those,
you can reasonably limit your problems by limiting the number of
dirty pages you accept through normal "write()" calls)."
Instead of attempting the impossible, I've just waited for the dirty page
accounting infrastructure to materialize (thanks to Peter Zijlstra and
others). This nicely solved the biggest problem: limiting the number of pages
used for write caching.
Some small details remained, however, which this largish patch attempts to
address. It provides a page writeback implementation for fuse, which is
completely safe against VM related deadlocks. Performance may not be very
good for certain usage patterns, but generally it should be acceptable.
It has been tested extensively with fsx-linux and bash-shared-mapping.
Fuse page writeback design
--------------------------
fuse_writepage() allocates a new temporary page with GFP_NOFS|__GFP_HIGHMEM.
It copies the contents of the original page, and queues a WRITE request to the
userspace filesystem using this temp page.
The writeback is finished instantly from the MM's point of view: the page is
removed from the radix trees, and the PageDirty and PageWriteback flags are
cleared.
For the duration of the actual write, the NR_WRITEBACK_TEMP counter is
incremented. The per-bdi writeback count is not decremented until the actual
write completes.
On dirtying the page, fuse waits for a previous write to finish before
proceeding. This makes sure, there can only be one temporary page used at a
time for one cached page.
This approach is wasteful in both memory and CPU bandwidth, so why is this
complication needed?
The basic problem is that there can be no guarantee about the time in which
the userspace filesystem will complete a write. It may be buggy or even
malicious, and fail to complete WRITE requests. We don't want unrelated parts
of the system to grind to a halt in such cases.
Also a filesystem may need additional resources (particularly memory) to
complete a WRITE request. There's a great danger of a deadlock if that
allocation may wait for the writepage to finish.
Currently there are several cases where the kernel can block on page
writeback:
- allocation order is larger than PAGE_ALLOC_COSTLY_ORDER
- page migration
- throttle_vm_writeout (through NR_WRITEBACK)
- sync(2)
Of course in some cases (fsync, msync) we explicitly want to allow blocking.
So for these cases new code has to be added to fuse, since the VM is not
tracking writeback pages for us any more.
As an extra safetly measure, the maximum dirty ratio allocated to a single
fuse filesystem is set to 1% by default. This way one (or several) buggy or
malicious fuse filesystems cannot slow down the rest of the system by hogging
dirty memory.
With appropriate privileges, this limit can be raised through
'/sys/class/bdi/<bdi>/max_ratio'.
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-30 15:54:41 +08:00
|
|
|
loff_t oldsize;
|
2005-09-10 04:10:29 +08:00
|
|
|
int err;
|
2014-04-28 20:19:25 +08:00
|
|
|
bool trust_local_cmtime = is_wb && S_ISREG(inode->i_mode);
|
2005-09-10 04:10:29 +08:00
|
|
|
|
2010-06-04 17:30:03 +08:00
|
|
|
if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS))
|
|
|
|
attr->ia_valid |= ATTR_FORCE;
|
|
|
|
|
|
|
|
err = inode_change_ok(inode, attr);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2005-09-10 04:10:31 +08:00
|
|
|
|
2011-02-25 21:44:58 +08:00
|
|
|
if (attr->ia_valid & ATTR_OPEN) {
|
|
|
|
if (fc->atomic_o_trunc)
|
|
|
|
return 0;
|
|
|
|
file = NULL;
|
|
|
|
}
|
2007-10-18 18:07:02 +08:00
|
|
|
|
2010-06-04 17:30:04 +08:00
|
|
|
if (attr->ia_valid & ATTR_SIZE)
|
fuse: support writable mmap
Quoting Linus (3 years ago, FUSE inclusion discussions):
"User-space filesystems are hard to get right. I'd claim that they
are almost impossible, unless you limit them somehow (shared
writable mappings are the nastiest part - if you don't have those,
you can reasonably limit your problems by limiting the number of
dirty pages you accept through normal "write()" calls)."
Instead of attempting the impossible, I've just waited for the dirty page
accounting infrastructure to materialize (thanks to Peter Zijlstra and
others). This nicely solved the biggest problem: limiting the number of pages
used for write caching.
Some small details remained, however, which this largish patch attempts to
address. It provides a page writeback implementation for fuse, which is
completely safe against VM related deadlocks. Performance may not be very
good for certain usage patterns, but generally it should be acceptable.
It has been tested extensively with fsx-linux and bash-shared-mapping.
Fuse page writeback design
--------------------------
fuse_writepage() allocates a new temporary page with GFP_NOFS|__GFP_HIGHMEM.
It copies the contents of the original page, and queues a WRITE request to the
userspace filesystem using this temp page.
The writeback is finished instantly from the MM's point of view: the page is
removed from the radix trees, and the PageDirty and PageWriteback flags are
cleared.
For the duration of the actual write, the NR_WRITEBACK_TEMP counter is
incremented. The per-bdi writeback count is not decremented until the actual
write completes.
On dirtying the page, fuse waits for a previous write to finish before
proceeding. This makes sure, there can only be one temporary page used at a
time for one cached page.
This approach is wasteful in both memory and CPU bandwidth, so why is this
complication needed?
The basic problem is that there can be no guarantee about the time in which
the userspace filesystem will complete a write. It may be buggy or even
malicious, and fail to complete WRITE requests. We don't want unrelated parts
of the system to grind to a halt in such cases.
Also a filesystem may need additional resources (particularly memory) to
complete a WRITE request. There's a great danger of a deadlock if that
allocation may wait for the writepage to finish.
Currently there are several cases where the kernel can block on page
writeback:
- allocation order is larger than PAGE_ALLOC_COSTLY_ORDER
- page migration
- throttle_vm_writeout (through NR_WRITEBACK)
- sync(2)
Of course in some cases (fsync, msync) we explicitly want to allow blocking.
So for these cases new code has to be added to fuse, since the VM is not
tracking writeback pages for us any more.
As an extra safetly measure, the maximum dirty ratio allocated to a single
fuse filesystem is set to 1% by default. This way one (or several) buggy or
malicious fuse filesystems cannot slow down the rest of the system by hogging
dirty memory.
With appropriate privileges, this limit can be raised through
'/sys/class/bdi/<bdi>/max_ratio'.
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-30 15:54:41 +08:00
|
|
|
is_truncate = true;
|
2005-09-10 04:10:29 +08:00
|
|
|
|
2012-10-26 23:48:30 +08:00
|
|
|
req = fuse_get_req_nopages(fc);
|
2006-04-11 13:54:58 +08:00
|
|
|
if (IS_ERR(req))
|
|
|
|
return PTR_ERR(req);
|
2005-09-10 04:10:29 +08:00
|
|
|
|
fuse: hotfix truncate_pagecache() issue
The way how fuse calls truncate_pagecache() from fuse_change_attributes()
is completely wrong. Because, w/o i_mutex held, we never sure whether
'oldsize' and 'attr->size' are valid by the time of execution of
truncate_pagecache(inode, oldsize, attr->size). In fact, as soon as we
released fc->lock in the middle of fuse_change_attributes(), we completely
loose control of actions which may happen with given inode until we reach
truncate_pagecache. The list of potentially dangerous actions includes
mmap-ed reads and writes, ftruncate(2) and write(2) extending file size.
The typical outcome of doing truncate_pagecache() with outdated arguments
is data corruption from user point of view. This is (in some sense)
acceptable in cases when the issue is triggered by a change of the file on
the server (i.e. externally wrt fuse operation), but it is absolutely
intolerable in scenarios when a single fuse client modifies a file without
any external intervention. A real life case I discovered by fsx-linux
looked like this:
1. Shrinking ftruncate(2) comes to fuse_do_setattr(). The latter sends
FUSE_SETATTR to the server synchronously, but before getting fc->lock ...
2. fuse_dentry_revalidate() is asynchronously called. It sends FUSE_LOOKUP
to the server synchronously, then calls fuse_change_attributes(). The
latter updates i_size, releases fc->lock, but before comparing oldsize vs
attr->size..
3. fuse_do_setattr() from the first step proceeds by acquiring fc->lock and
updating attributes and i_size, but now oldsize is equal to
outarg.attr.size because i_size has just been updated (step 2). Hence,
fuse_do_setattr() returns w/o calling truncate_pagecache().
4. As soon as ftruncate(2) completes, the user extends file size by
write(2) making a hole in the middle of file, then reads data from the hole
either by read(2) or mmap-ed read. The user expects to get zero data from
the hole, but gets stale data because truncate_pagecache() is not executed
yet.
The scenario above illustrates one side of the problem: not truncating the
page cache even though we should. Another side corresponds to truncating
page cache too late, when the state of inode changed significantly.
Theoretically, the following is possible:
1. As in the previous scenario fuse_dentry_revalidate() discovered that
i_size changed (due to our own fuse_do_setattr()) and is going to call
truncate_pagecache() for some 'new_size' it believes valid right now. But
by the time that particular truncate_pagecache() is called ...
2. fuse_do_setattr() returns (either having called truncate_pagecache() or
not -- it doesn't matter).
3. The file is extended either by write(2) or ftruncate(2) or fallocate(2).
4. mmap-ed write makes a page in the extended region dirty.
The result will be the lost of data user wrote on the fourth step.
The patch is a hotfix resolving the issue in a simplistic way: let's skip
dangerous i_size update and truncate_pagecache if an operation changing
file size is in progress. This simplistic approach looks correct for the
cases w/o external changes. And to handle them properly, more sophisticated
and intrusive techniques (e.g. NFS-like one) would be required. I'd like to
postpone it until the issue is well discussed on the mailing list(s).
Changed in v2:
- improved patch description to cover both sides of the issue.
Signed-off-by: Maxim Patlasov <mpatlasov@parallels.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: stable@vger.kernel.org
2013-08-30 21:06:04 +08:00
|
|
|
if (is_truncate) {
|
fuse: support writable mmap
Quoting Linus (3 years ago, FUSE inclusion discussions):
"User-space filesystems are hard to get right. I'd claim that they
are almost impossible, unless you limit them somehow (shared
writable mappings are the nastiest part - if you don't have those,
you can reasonably limit your problems by limiting the number of
dirty pages you accept through normal "write()" calls)."
Instead of attempting the impossible, I've just waited for the dirty page
accounting infrastructure to materialize (thanks to Peter Zijlstra and
others). This nicely solved the biggest problem: limiting the number of pages
used for write caching.
Some small details remained, however, which this largish patch attempts to
address. It provides a page writeback implementation for fuse, which is
completely safe against VM related deadlocks. Performance may not be very
good for certain usage patterns, but generally it should be acceptable.
It has been tested extensively with fsx-linux and bash-shared-mapping.
Fuse page writeback design
--------------------------
fuse_writepage() allocates a new temporary page with GFP_NOFS|__GFP_HIGHMEM.
It copies the contents of the original page, and queues a WRITE request to the
userspace filesystem using this temp page.
The writeback is finished instantly from the MM's point of view: the page is
removed from the radix trees, and the PageDirty and PageWriteback flags are
cleared.
For the duration of the actual write, the NR_WRITEBACK_TEMP counter is
incremented. The per-bdi writeback count is not decremented until the actual
write completes.
On dirtying the page, fuse waits for a previous write to finish before
proceeding. This makes sure, there can only be one temporary page used at a
time for one cached page.
This approach is wasteful in both memory and CPU bandwidth, so why is this
complication needed?
The basic problem is that there can be no guarantee about the time in which
the userspace filesystem will complete a write. It may be buggy or even
malicious, and fail to complete WRITE requests. We don't want unrelated parts
of the system to grind to a halt in such cases.
Also a filesystem may need additional resources (particularly memory) to
complete a WRITE request. There's a great danger of a deadlock if that
allocation may wait for the writepage to finish.
Currently there are several cases where the kernel can block on page
writeback:
- allocation order is larger than PAGE_ALLOC_COSTLY_ORDER
- page migration
- throttle_vm_writeout (through NR_WRITEBACK)
- sync(2)
Of course in some cases (fsync, msync) we explicitly want to allow blocking.
So for these cases new code has to be added to fuse, since the VM is not
tracking writeback pages for us any more.
As an extra safetly measure, the maximum dirty ratio allocated to a single
fuse filesystem is set to 1% by default. This way one (or several) buggy or
malicious fuse filesystems cannot slow down the rest of the system by hogging
dirty memory.
With appropriate privileges, this limit can be raised through
'/sys/class/bdi/<bdi>/max_ratio'.
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-30 15:54:41 +08:00
|
|
|
fuse_set_nowrite(inode);
|
fuse: hotfix truncate_pagecache() issue
The way how fuse calls truncate_pagecache() from fuse_change_attributes()
is completely wrong. Because, w/o i_mutex held, we never sure whether
'oldsize' and 'attr->size' are valid by the time of execution of
truncate_pagecache(inode, oldsize, attr->size). In fact, as soon as we
released fc->lock in the middle of fuse_change_attributes(), we completely
loose control of actions which may happen with given inode until we reach
truncate_pagecache. The list of potentially dangerous actions includes
mmap-ed reads and writes, ftruncate(2) and write(2) extending file size.
The typical outcome of doing truncate_pagecache() with outdated arguments
is data corruption from user point of view. This is (in some sense)
acceptable in cases when the issue is triggered by a change of the file on
the server (i.e. externally wrt fuse operation), but it is absolutely
intolerable in scenarios when a single fuse client modifies a file without
any external intervention. A real life case I discovered by fsx-linux
looked like this:
1. Shrinking ftruncate(2) comes to fuse_do_setattr(). The latter sends
FUSE_SETATTR to the server synchronously, but before getting fc->lock ...
2. fuse_dentry_revalidate() is asynchronously called. It sends FUSE_LOOKUP
to the server synchronously, then calls fuse_change_attributes(). The
latter updates i_size, releases fc->lock, but before comparing oldsize vs
attr->size..
3. fuse_do_setattr() from the first step proceeds by acquiring fc->lock and
updating attributes and i_size, but now oldsize is equal to
outarg.attr.size because i_size has just been updated (step 2). Hence,
fuse_do_setattr() returns w/o calling truncate_pagecache().
4. As soon as ftruncate(2) completes, the user extends file size by
write(2) making a hole in the middle of file, then reads data from the hole
either by read(2) or mmap-ed read. The user expects to get zero data from
the hole, but gets stale data because truncate_pagecache() is not executed
yet.
The scenario above illustrates one side of the problem: not truncating the
page cache even though we should. Another side corresponds to truncating
page cache too late, when the state of inode changed significantly.
Theoretically, the following is possible:
1. As in the previous scenario fuse_dentry_revalidate() discovered that
i_size changed (due to our own fuse_do_setattr()) and is going to call
truncate_pagecache() for some 'new_size' it believes valid right now. But
by the time that particular truncate_pagecache() is called ...
2. fuse_do_setattr() returns (either having called truncate_pagecache() or
not -- it doesn't matter).
3. The file is extended either by write(2) or ftruncate(2) or fallocate(2).
4. mmap-ed write makes a page in the extended region dirty.
The result will be the lost of data user wrote on the fourth step.
The patch is a hotfix resolving the issue in a simplistic way: let's skip
dangerous i_size update and truncate_pagecache if an operation changing
file size is in progress. This simplistic approach looks correct for the
cases w/o external changes. And to handle them properly, more sophisticated
and intrusive techniques (e.g. NFS-like one) would be required. I'd like to
postpone it until the issue is well discussed on the mailing list(s).
Changed in v2:
- improved patch description to cover both sides of the issue.
Signed-off-by: Maxim Patlasov <mpatlasov@parallels.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: stable@vger.kernel.org
2013-08-30 21:06:04 +08:00
|
|
|
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
|
2014-04-28 20:19:25 +08:00
|
|
|
if (trust_local_cmtime && attr->ia_size != inode->i_size)
|
|
|
|
attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
|
fuse: hotfix truncate_pagecache() issue
The way how fuse calls truncate_pagecache() from fuse_change_attributes()
is completely wrong. Because, w/o i_mutex held, we never sure whether
'oldsize' and 'attr->size' are valid by the time of execution of
truncate_pagecache(inode, oldsize, attr->size). In fact, as soon as we
released fc->lock in the middle of fuse_change_attributes(), we completely
loose control of actions which may happen with given inode until we reach
truncate_pagecache. The list of potentially dangerous actions includes
mmap-ed reads and writes, ftruncate(2) and write(2) extending file size.
The typical outcome of doing truncate_pagecache() with outdated arguments
is data corruption from user point of view. This is (in some sense)
acceptable in cases when the issue is triggered by a change of the file on
the server (i.e. externally wrt fuse operation), but it is absolutely
intolerable in scenarios when a single fuse client modifies a file without
any external intervention. A real life case I discovered by fsx-linux
looked like this:
1. Shrinking ftruncate(2) comes to fuse_do_setattr(). The latter sends
FUSE_SETATTR to the server synchronously, but before getting fc->lock ...
2. fuse_dentry_revalidate() is asynchronously called. It sends FUSE_LOOKUP
to the server synchronously, then calls fuse_change_attributes(). The
latter updates i_size, releases fc->lock, but before comparing oldsize vs
attr->size..
3. fuse_do_setattr() from the first step proceeds by acquiring fc->lock and
updating attributes and i_size, but now oldsize is equal to
outarg.attr.size because i_size has just been updated (step 2). Hence,
fuse_do_setattr() returns w/o calling truncate_pagecache().
4. As soon as ftruncate(2) completes, the user extends file size by
write(2) making a hole in the middle of file, then reads data from the hole
either by read(2) or mmap-ed read. The user expects to get zero data from
the hole, but gets stale data because truncate_pagecache() is not executed
yet.
The scenario above illustrates one side of the problem: not truncating the
page cache even though we should. Another side corresponds to truncating
page cache too late, when the state of inode changed significantly.
Theoretically, the following is possible:
1. As in the previous scenario fuse_dentry_revalidate() discovered that
i_size changed (due to our own fuse_do_setattr()) and is going to call
truncate_pagecache() for some 'new_size' it believes valid right now. But
by the time that particular truncate_pagecache() is called ...
2. fuse_do_setattr() returns (either having called truncate_pagecache() or
not -- it doesn't matter).
3. The file is extended either by write(2) or ftruncate(2) or fallocate(2).
4. mmap-ed write makes a page in the extended region dirty.
The result will be the lost of data user wrote on the fourth step.
The patch is a hotfix resolving the issue in a simplistic way: let's skip
dangerous i_size update and truncate_pagecache if an operation changing
file size is in progress. This simplistic approach looks correct for the
cases w/o external changes. And to handle them properly, more sophisticated
and intrusive techniques (e.g. NFS-like one) would be required. I'd like to
postpone it until the issue is well discussed on the mailing list(s).
Changed in v2:
- improved patch description to cover both sides of the issue.
Signed-off-by: Maxim Patlasov <mpatlasov@parallels.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: stable@vger.kernel.org
2013-08-30 21:06:04 +08:00
|
|
|
}
|
fuse: support writable mmap
Quoting Linus (3 years ago, FUSE inclusion discussions):
"User-space filesystems are hard to get right. I'd claim that they
are almost impossible, unless you limit them somehow (shared
writable mappings are the nastiest part - if you don't have those,
you can reasonably limit your problems by limiting the number of
dirty pages you accept through normal "write()" calls)."
Instead of attempting the impossible, I've just waited for the dirty page
accounting infrastructure to materialize (thanks to Peter Zijlstra and
others). This nicely solved the biggest problem: limiting the number of pages
used for write caching.
Some small details remained, however, which this largish patch attempts to
address. It provides a page writeback implementation for fuse, which is
completely safe against VM related deadlocks. Performance may not be very
good for certain usage patterns, but generally it should be acceptable.
It has been tested extensively with fsx-linux and bash-shared-mapping.
Fuse page writeback design
--------------------------
fuse_writepage() allocates a new temporary page with GFP_NOFS|__GFP_HIGHMEM.
It copies the contents of the original page, and queues a WRITE request to the
userspace filesystem using this temp page.
The writeback is finished instantly from the MM's point of view: the page is
removed from the radix trees, and the PageDirty and PageWriteback flags are
cleared.
For the duration of the actual write, the NR_WRITEBACK_TEMP counter is
incremented. The per-bdi writeback count is not decremented until the actual
write completes.
On dirtying the page, fuse waits for a previous write to finish before
proceeding. This makes sure, there can only be one temporary page used at a
time for one cached page.
This approach is wasteful in both memory and CPU bandwidth, so why is this
complication needed?
The basic problem is that there can be no guarantee about the time in which
the userspace filesystem will complete a write. It may be buggy or even
malicious, and fail to complete WRITE requests. We don't want unrelated parts
of the system to grind to a halt in such cases.
Also a filesystem may need additional resources (particularly memory) to
complete a WRITE request. There's a great danger of a deadlock if that
allocation may wait for the writepage to finish.
Currently there are several cases where the kernel can block on page
writeback:
- allocation order is larger than PAGE_ALLOC_COSTLY_ORDER
- page migration
- throttle_vm_writeout (through NR_WRITEBACK)
- sync(2)
Of course in some cases (fsync, msync) we explicitly want to allow blocking.
So for these cases new code has to be added to fuse, since the VM is not
tracking writeback pages for us any more.
As an extra safetly measure, the maximum dirty ratio allocated to a single
fuse filesystem is set to 1% by default. This way one (or several) buggy or
malicious fuse filesystems cannot slow down the rest of the system by hogging
dirty memory.
With appropriate privileges, this limit can be raised through
'/sys/class/bdi/<bdi>/max_ratio'.
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-30 15:54:41 +08:00
|
|
|
|
2005-09-10 04:10:29 +08:00
|
|
|
memset(&inarg, 0, sizeof(inarg));
|
2007-10-18 18:07:05 +08:00
|
|
|
memset(&outarg, 0, sizeof(outarg));
|
2014-04-28 20:19:25 +08:00
|
|
|
iattr_to_fattr(attr, &inarg, trust_local_cmtime);
|
2007-10-18 18:07:00 +08:00
|
|
|
if (file) {
|
|
|
|
struct fuse_file *ff = file->private_data;
|
|
|
|
inarg.valid |= FATTR_FH;
|
|
|
|
inarg.fh = ff->fh;
|
|
|
|
}
|
2007-10-18 18:07:04 +08:00
|
|
|
if (attr->ia_valid & ATTR_SIZE) {
|
|
|
|
/* For mandatory locking in truncate */
|
|
|
|
inarg.valid |= FATTR_LOCKOWNER;
|
|
|
|
inarg.lock_owner = fuse_lock_owner_id(fc, current->files);
|
|
|
|
}
|
2013-12-26 23:51:11 +08:00
|
|
|
fuse_setattr_fill(fc, req, inode, &inarg, &outarg);
|
2008-11-26 19:03:55 +08:00
|
|
|
fuse_request_send(fc, req);
|
2005-09-10 04:10:29 +08:00
|
|
|
err = req->out.h.error;
|
|
|
|
fuse_put_request(fc, req);
|
2007-10-17 14:31:01 +08:00
|
|
|
if (err) {
|
|
|
|
if (err == -EINTR)
|
|
|
|
fuse_invalidate_attr(inode);
|
fuse: support writable mmap
Quoting Linus (3 years ago, FUSE inclusion discussions):
"User-space filesystems are hard to get right. I'd claim that they
are almost impossible, unless you limit them somehow (shared
writable mappings are the nastiest part - if you don't have those,
you can reasonably limit your problems by limiting the number of
dirty pages you accept through normal "write()" calls)."
Instead of attempting the impossible, I've just waited for the dirty page
accounting infrastructure to materialize (thanks to Peter Zijlstra and
others). This nicely solved the biggest problem: limiting the number of pages
used for write caching.
Some small details remained, however, which this largish patch attempts to
address. It provides a page writeback implementation for fuse, which is
completely safe against VM related deadlocks. Performance may not be very
good for certain usage patterns, but generally it should be acceptable.
It has been tested extensively with fsx-linux and bash-shared-mapping.
Fuse page writeback design
--------------------------
fuse_writepage() allocates a new temporary page with GFP_NOFS|__GFP_HIGHMEM.
It copies the contents of the original page, and queues a WRITE request to the
userspace filesystem using this temp page.
The writeback is finished instantly from the MM's point of view: the page is
removed from the radix trees, and the PageDirty and PageWriteback flags are
cleared.
For the duration of the actual write, the NR_WRITEBACK_TEMP counter is
incremented. The per-bdi writeback count is not decremented until the actual
write completes.
On dirtying the page, fuse waits for a previous write to finish before
proceeding. This makes sure, there can only be one temporary page used at a
time for one cached page.
This approach is wasteful in both memory and CPU bandwidth, so why is this
complication needed?
The basic problem is that there can be no guarantee about the time in which
the userspace filesystem will complete a write. It may be buggy or even
malicious, and fail to complete WRITE requests. We don't want unrelated parts
of the system to grind to a halt in such cases.
Also a filesystem may need additional resources (particularly memory) to
complete a WRITE request. There's a great danger of a deadlock if that
allocation may wait for the writepage to finish.
Currently there are several cases where the kernel can block on page
writeback:
- allocation order is larger than PAGE_ALLOC_COSTLY_ORDER
- page migration
- throttle_vm_writeout (through NR_WRITEBACK)
- sync(2)
Of course in some cases (fsync, msync) we explicitly want to allow blocking.
So for these cases new code has to be added to fuse, since the VM is not
tracking writeback pages for us any more.
As an extra safetly measure, the maximum dirty ratio allocated to a single
fuse filesystem is set to 1% by default. This way one (or several) buggy or
malicious fuse filesystems cannot slow down the rest of the system by hogging
dirty memory.
With appropriate privileges, this limit can be raised through
'/sys/class/bdi/<bdi>/max_ratio'.
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-30 15:54:41 +08:00
|
|
|
goto error;
|
2007-10-17 14:31:01 +08:00
|
|
|
}
|
2005-09-10 04:10:29 +08:00
|
|
|
|
2007-10-17 14:31:01 +08:00
|
|
|
if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
|
|
|
|
make_bad_inode(inode);
|
fuse: support writable mmap
Quoting Linus (3 years ago, FUSE inclusion discussions):
"User-space filesystems are hard to get right. I'd claim that they
are almost impossible, unless you limit them somehow (shared
writable mappings are the nastiest part - if you don't have those,
you can reasonably limit your problems by limiting the number of
dirty pages you accept through normal "write()" calls)."
Instead of attempting the impossible, I've just waited for the dirty page
accounting infrastructure to materialize (thanks to Peter Zijlstra and
others). This nicely solved the biggest problem: limiting the number of pages
used for write caching.
Some small details remained, however, which this largish patch attempts to
address. It provides a page writeback implementation for fuse, which is
completely safe against VM related deadlocks. Performance may not be very
good for certain usage patterns, but generally it should be acceptable.
It has been tested extensively with fsx-linux and bash-shared-mapping.
Fuse page writeback design
--------------------------
fuse_writepage() allocates a new temporary page with GFP_NOFS|__GFP_HIGHMEM.
It copies the contents of the original page, and queues a WRITE request to the
userspace filesystem using this temp page.
The writeback is finished instantly from the MM's point of view: the page is
removed from the radix trees, and the PageDirty and PageWriteback flags are
cleared.
For the duration of the actual write, the NR_WRITEBACK_TEMP counter is
incremented. The per-bdi writeback count is not decremented until the actual
write completes.
On dirtying the page, fuse waits for a previous write to finish before
proceeding. This makes sure, there can only be one temporary page used at a
time for one cached page.
This approach is wasteful in both memory and CPU bandwidth, so why is this
complication needed?
The basic problem is that there can be no guarantee about the time in which
the userspace filesystem will complete a write. It may be buggy or even
malicious, and fail to complete WRITE requests. We don't want unrelated parts
of the system to grind to a halt in such cases.
Also a filesystem may need additional resources (particularly memory) to
complete a WRITE request. There's a great danger of a deadlock if that
allocation may wait for the writepage to finish.
Currently there are several cases where the kernel can block on page
writeback:
- allocation order is larger than PAGE_ALLOC_COSTLY_ORDER
- page migration
- throttle_vm_writeout (through NR_WRITEBACK)
- sync(2)
Of course in some cases (fsync, msync) we explicitly want to allow blocking.
So for these cases new code has to be added to fuse, since the VM is not
tracking writeback pages for us any more.
As an extra safetly measure, the maximum dirty ratio allocated to a single
fuse filesystem is set to 1% by default. This way one (or several) buggy or
malicious fuse filesystems cannot slow down the rest of the system by hogging
dirty memory.
With appropriate privileges, this limit can be raised through
'/sys/class/bdi/<bdi>/max_ratio'.
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-30 15:54:41 +08:00
|
|
|
err = -EIO;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock(&fc->lock);
|
2013-12-26 23:51:11 +08:00
|
|
|
/* the kernel maintains i_mtime locally */
|
2014-04-28 20:19:25 +08:00
|
|
|
if (trust_local_cmtime) {
|
|
|
|
if (attr->ia_valid & ATTR_MTIME)
|
|
|
|
inode->i_mtime = attr->ia_mtime;
|
|
|
|
if (attr->ia_valid & ATTR_CTIME)
|
|
|
|
inode->i_ctime = attr->ia_ctime;
|
2014-04-28 20:19:23 +08:00
|
|
|
/* FIXME: clear I_DIRTY_SYNC? */
|
2013-12-26 23:51:11 +08:00
|
|
|
}
|
|
|
|
|
fuse: support writable mmap
Quoting Linus (3 years ago, FUSE inclusion discussions):
"User-space filesystems are hard to get right. I'd claim that they
are almost impossible, unless you limit them somehow (shared
writable mappings are the nastiest part - if you don't have those,
you can reasonably limit your problems by limiting the number of
dirty pages you accept through normal "write()" calls)."
Instead of attempting the impossible, I've just waited for the dirty page
accounting infrastructure to materialize (thanks to Peter Zijlstra and
others). This nicely solved the biggest problem: limiting the number of pages
used for write caching.
Some small details remained, however, which this largish patch attempts to
address. It provides a page writeback implementation for fuse, which is
completely safe against VM related deadlocks. Performance may not be very
good for certain usage patterns, but generally it should be acceptable.
It has been tested extensively with fsx-linux and bash-shared-mapping.
Fuse page writeback design
--------------------------
fuse_writepage() allocates a new temporary page with GFP_NOFS|__GFP_HIGHMEM.
It copies the contents of the original page, and queues a WRITE request to the
userspace filesystem using this temp page.
The writeback is finished instantly from the MM's point of view: the page is
removed from the radix trees, and the PageDirty and PageWriteback flags are
cleared.
For the duration of the actual write, the NR_WRITEBACK_TEMP counter is
incremented. The per-bdi writeback count is not decremented until the actual
write completes.
On dirtying the page, fuse waits for a previous write to finish before
proceeding. This makes sure, there can only be one temporary page used at a
time for one cached page.
This approach is wasteful in both memory and CPU bandwidth, so why is this
complication needed?
The basic problem is that there can be no guarantee about the time in which
the userspace filesystem will complete a write. It may be buggy or even
malicious, and fail to complete WRITE requests. We don't want unrelated parts
of the system to grind to a halt in such cases.
Also a filesystem may need additional resources (particularly memory) to
complete a WRITE request. There's a great danger of a deadlock if that
allocation may wait for the writepage to finish.
Currently there are several cases where the kernel can block on page
writeback:
- allocation order is larger than PAGE_ALLOC_COSTLY_ORDER
- page migration
- throttle_vm_writeout (through NR_WRITEBACK)
- sync(2)
Of course in some cases (fsync, msync) we explicitly want to allow blocking.
So for these cases new code has to be added to fuse, since the VM is not
tracking writeback pages for us any more.
As an extra safetly measure, the maximum dirty ratio allocated to a single
fuse filesystem is set to 1% by default. This way one (or several) buggy or
malicious fuse filesystems cannot slow down the rest of the system by hogging
dirty memory.
With appropriate privileges, this limit can be raised through
'/sys/class/bdi/<bdi>/max_ratio'.
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-30 15:54:41 +08:00
|
|
|
fuse_change_attributes_common(inode, &outarg.attr,
|
|
|
|
attr_timeout(&outarg));
|
|
|
|
oldsize = inode->i_size;
|
2013-10-10 21:10:46 +08:00
|
|
|
/* see the comment in fuse_change_attributes() */
|
|
|
|
if (!is_wb || is_truncate || !S_ISREG(inode->i_mode))
|
|
|
|
i_size_write(inode, outarg.attr.size);
|
fuse: support writable mmap
Quoting Linus (3 years ago, FUSE inclusion discussions):
"User-space filesystems are hard to get right. I'd claim that they
are almost impossible, unless you limit them somehow (shared
writable mappings are the nastiest part - if you don't have those,
you can reasonably limit your problems by limiting the number of
dirty pages you accept through normal "write()" calls)."
Instead of attempting the impossible, I've just waited for the dirty page
accounting infrastructure to materialize (thanks to Peter Zijlstra and
others). This nicely solved the biggest problem: limiting the number of pages
used for write caching.
Some small details remained, however, which this largish patch attempts to
address. It provides a page writeback implementation for fuse, which is
completely safe against VM related deadlocks. Performance may not be very
good for certain usage patterns, but generally it should be acceptable.
It has been tested extensively with fsx-linux and bash-shared-mapping.
Fuse page writeback design
--------------------------
fuse_writepage() allocates a new temporary page with GFP_NOFS|__GFP_HIGHMEM.
It copies the contents of the original page, and queues a WRITE request to the
userspace filesystem using this temp page.
The writeback is finished instantly from the MM's point of view: the page is
removed from the radix trees, and the PageDirty and PageWriteback flags are
cleared.
For the duration of the actual write, the NR_WRITEBACK_TEMP counter is
incremented. The per-bdi writeback count is not decremented until the actual
write completes.
On dirtying the page, fuse waits for a previous write to finish before
proceeding. This makes sure, there can only be one temporary page used at a
time for one cached page.
This approach is wasteful in both memory and CPU bandwidth, so why is this
complication needed?
The basic problem is that there can be no guarantee about the time in which
the userspace filesystem will complete a write. It may be buggy or even
malicious, and fail to complete WRITE requests. We don't want unrelated parts
of the system to grind to a halt in such cases.
Also a filesystem may need additional resources (particularly memory) to
complete a WRITE request. There's a great danger of a deadlock if that
allocation may wait for the writepage to finish.
Currently there are several cases where the kernel can block on page
writeback:
- allocation order is larger than PAGE_ALLOC_COSTLY_ORDER
- page migration
- throttle_vm_writeout (through NR_WRITEBACK)
- sync(2)
Of course in some cases (fsync, msync) we explicitly want to allow blocking.
So for these cases new code has to be added to fuse, since the VM is not
tracking writeback pages for us any more.
As an extra safetly measure, the maximum dirty ratio allocated to a single
fuse filesystem is set to 1% by default. This way one (or several) buggy or
malicious fuse filesystems cannot slow down the rest of the system by hogging
dirty memory.
With appropriate privileges, this limit can be raised through
'/sys/class/bdi/<bdi>/max_ratio'.
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-30 15:54:41 +08:00
|
|
|
|
|
|
|
if (is_truncate) {
|
|
|
|
/* NOTE: this may release/reacquire fc->lock */
|
|
|
|
__fuse_release_nowrite(inode);
|
|
|
|
}
|
|
|
|
spin_unlock(&fc->lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Only call invalidate_inode_pages2() after removing
|
|
|
|
* FUSE_NOWRITE, otherwise fuse_launder_page() would deadlock.
|
|
|
|
*/
|
2013-10-10 21:10:46 +08:00
|
|
|
if ((is_truncate || !is_wb) &&
|
|
|
|
S_ISREG(inode->i_mode) && oldsize != outarg.attr.size) {
|
2013-09-13 06:13:56 +08:00
|
|
|
truncate_pagecache(inode, outarg.attr.size);
|
fuse: support writable mmap
Quoting Linus (3 years ago, FUSE inclusion discussions):
"User-space filesystems are hard to get right. I'd claim that they
are almost impossible, unless you limit them somehow (shared
writable mappings are the nastiest part - if you don't have those,
you can reasonably limit your problems by limiting the number of
dirty pages you accept through normal "write()" calls)."
Instead of attempting the impossible, I've just waited for the dirty page
accounting infrastructure to materialize (thanks to Peter Zijlstra and
others). This nicely solved the biggest problem: limiting the number of pages
used for write caching.
Some small details remained, however, which this largish patch attempts to
address. It provides a page writeback implementation for fuse, which is
completely safe against VM related deadlocks. Performance may not be very
good for certain usage patterns, but generally it should be acceptable.
It has been tested extensively with fsx-linux and bash-shared-mapping.
Fuse page writeback design
--------------------------
fuse_writepage() allocates a new temporary page with GFP_NOFS|__GFP_HIGHMEM.
It copies the contents of the original page, and queues a WRITE request to the
userspace filesystem using this temp page.
The writeback is finished instantly from the MM's point of view: the page is
removed from the radix trees, and the PageDirty and PageWriteback flags are
cleared.
For the duration of the actual write, the NR_WRITEBACK_TEMP counter is
incremented. The per-bdi writeback count is not decremented until the actual
write completes.
On dirtying the page, fuse waits for a previous write to finish before
proceeding. This makes sure, there can only be one temporary page used at a
time for one cached page.
This approach is wasteful in both memory and CPU bandwidth, so why is this
complication needed?
The basic problem is that there can be no guarantee about the time in which
the userspace filesystem will complete a write. It may be buggy or even
malicious, and fail to complete WRITE requests. We don't want unrelated parts
of the system to grind to a halt in such cases.
Also a filesystem may need additional resources (particularly memory) to
complete a WRITE request. There's a great danger of a deadlock if that
allocation may wait for the writepage to finish.
Currently there are several cases where the kernel can block on page
writeback:
- allocation order is larger than PAGE_ALLOC_COSTLY_ORDER
- page migration
- throttle_vm_writeout (through NR_WRITEBACK)
- sync(2)
Of course in some cases (fsync, msync) we explicitly want to allow blocking.
So for these cases new code has to be added to fuse, since the VM is not
tracking writeback pages for us any more.
As an extra safetly measure, the maximum dirty ratio allocated to a single
fuse filesystem is set to 1% by default. This way one (or several) buggy or
malicious fuse filesystems cannot slow down the rest of the system by hogging
dirty memory.
With appropriate privileges, this limit can be raised through
'/sys/class/bdi/<bdi>/max_ratio'.
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-30 15:54:41 +08:00
|
|
|
invalidate_inode_pages2(inode->i_mapping);
|
2007-10-17 14:31:01 +08:00
|
|
|
}
|
|
|
|
|
fuse: hotfix truncate_pagecache() issue
The way how fuse calls truncate_pagecache() from fuse_change_attributes()
is completely wrong. Because, w/o i_mutex held, we never sure whether
'oldsize' and 'attr->size' are valid by the time of execution of
truncate_pagecache(inode, oldsize, attr->size). In fact, as soon as we
released fc->lock in the middle of fuse_change_attributes(), we completely
loose control of actions which may happen with given inode until we reach
truncate_pagecache. The list of potentially dangerous actions includes
mmap-ed reads and writes, ftruncate(2) and write(2) extending file size.
The typical outcome of doing truncate_pagecache() with outdated arguments
is data corruption from user point of view. This is (in some sense)
acceptable in cases when the issue is triggered by a change of the file on
the server (i.e. externally wrt fuse operation), but it is absolutely
intolerable in scenarios when a single fuse client modifies a file without
any external intervention. A real life case I discovered by fsx-linux
looked like this:
1. Shrinking ftruncate(2) comes to fuse_do_setattr(). The latter sends
FUSE_SETATTR to the server synchronously, but before getting fc->lock ...
2. fuse_dentry_revalidate() is asynchronously called. It sends FUSE_LOOKUP
to the server synchronously, then calls fuse_change_attributes(). The
latter updates i_size, releases fc->lock, but before comparing oldsize vs
attr->size..
3. fuse_do_setattr() from the first step proceeds by acquiring fc->lock and
updating attributes and i_size, but now oldsize is equal to
outarg.attr.size because i_size has just been updated (step 2). Hence,
fuse_do_setattr() returns w/o calling truncate_pagecache().
4. As soon as ftruncate(2) completes, the user extends file size by
write(2) making a hole in the middle of file, then reads data from the hole
either by read(2) or mmap-ed read. The user expects to get zero data from
the hole, but gets stale data because truncate_pagecache() is not executed
yet.
The scenario above illustrates one side of the problem: not truncating the
page cache even though we should. Another side corresponds to truncating
page cache too late, when the state of inode changed significantly.
Theoretically, the following is possible:
1. As in the previous scenario fuse_dentry_revalidate() discovered that
i_size changed (due to our own fuse_do_setattr()) and is going to call
truncate_pagecache() for some 'new_size' it believes valid right now. But
by the time that particular truncate_pagecache() is called ...
2. fuse_do_setattr() returns (either having called truncate_pagecache() or
not -- it doesn't matter).
3. The file is extended either by write(2) or ftruncate(2) or fallocate(2).
4. mmap-ed write makes a page in the extended region dirty.
The result will be the lost of data user wrote on the fourth step.
The patch is a hotfix resolving the issue in a simplistic way: let's skip
dangerous i_size update and truncate_pagecache if an operation changing
file size is in progress. This simplistic approach looks correct for the
cases w/o external changes. And to handle them properly, more sophisticated
and intrusive techniques (e.g. NFS-like one) would be required. I'd like to
postpone it until the issue is well discussed on the mailing list(s).
Changed in v2:
- improved patch description to cover both sides of the issue.
Signed-off-by: Maxim Patlasov <mpatlasov@parallels.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: stable@vger.kernel.org
2013-08-30 21:06:04 +08:00
|
|
|
clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
|
2007-10-17 14:31:01 +08:00
|
|
|
return 0;
|
fuse: support writable mmap
Quoting Linus (3 years ago, FUSE inclusion discussions):
"User-space filesystems are hard to get right. I'd claim that they
are almost impossible, unless you limit them somehow (shared
writable mappings are the nastiest part - if you don't have those,
you can reasonably limit your problems by limiting the number of
dirty pages you accept through normal "write()" calls)."
Instead of attempting the impossible, I've just waited for the dirty page
accounting infrastructure to materialize (thanks to Peter Zijlstra and
others). This nicely solved the biggest problem: limiting the number of pages
used for write caching.
Some small details remained, however, which this largish patch attempts to
address. It provides a page writeback implementation for fuse, which is
completely safe against VM related deadlocks. Performance may not be very
good for certain usage patterns, but generally it should be acceptable.
It has been tested extensively with fsx-linux and bash-shared-mapping.
Fuse page writeback design
--------------------------
fuse_writepage() allocates a new temporary page with GFP_NOFS|__GFP_HIGHMEM.
It copies the contents of the original page, and queues a WRITE request to the
userspace filesystem using this temp page.
The writeback is finished instantly from the MM's point of view: the page is
removed from the radix trees, and the PageDirty and PageWriteback flags are
cleared.
For the duration of the actual write, the NR_WRITEBACK_TEMP counter is
incremented. The per-bdi writeback count is not decremented until the actual
write completes.
On dirtying the page, fuse waits for a previous write to finish before
proceeding. This makes sure, there can only be one temporary page used at a
time for one cached page.
This approach is wasteful in both memory and CPU bandwidth, so why is this
complication needed?
The basic problem is that there can be no guarantee about the time in which
the userspace filesystem will complete a write. It may be buggy or even
malicious, and fail to complete WRITE requests. We don't want unrelated parts
of the system to grind to a halt in such cases.
Also a filesystem may need additional resources (particularly memory) to
complete a WRITE request. There's a great danger of a deadlock if that
allocation may wait for the writepage to finish.
Currently there are several cases where the kernel can block on page
writeback:
- allocation order is larger than PAGE_ALLOC_COSTLY_ORDER
- page migration
- throttle_vm_writeout (through NR_WRITEBACK)
- sync(2)
Of course in some cases (fsync, msync) we explicitly want to allow blocking.
So for these cases new code has to be added to fuse, since the VM is not
tracking writeback pages for us any more.
As an extra safetly measure, the maximum dirty ratio allocated to a single
fuse filesystem is set to 1% by default. This way one (or several) buggy or
malicious fuse filesystems cannot slow down the rest of the system by hogging
dirty memory.
With appropriate privileges, this limit can be raised through
'/sys/class/bdi/<bdi>/max_ratio'.
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-30 15:54:41 +08:00
|
|
|
|
|
|
|
error:
|
|
|
|
if (is_truncate)
|
|
|
|
fuse_release_nowrite(inode);
|
|
|
|
|
fuse: hotfix truncate_pagecache() issue
The way how fuse calls truncate_pagecache() from fuse_change_attributes()
is completely wrong. Because, w/o i_mutex held, we never sure whether
'oldsize' and 'attr->size' are valid by the time of execution of
truncate_pagecache(inode, oldsize, attr->size). In fact, as soon as we
released fc->lock in the middle of fuse_change_attributes(), we completely
loose control of actions which may happen with given inode until we reach
truncate_pagecache. The list of potentially dangerous actions includes
mmap-ed reads and writes, ftruncate(2) and write(2) extending file size.
The typical outcome of doing truncate_pagecache() with outdated arguments
is data corruption from user point of view. This is (in some sense)
acceptable in cases when the issue is triggered by a change of the file on
the server (i.e. externally wrt fuse operation), but it is absolutely
intolerable in scenarios when a single fuse client modifies a file without
any external intervention. A real life case I discovered by fsx-linux
looked like this:
1. Shrinking ftruncate(2) comes to fuse_do_setattr(). The latter sends
FUSE_SETATTR to the server synchronously, but before getting fc->lock ...
2. fuse_dentry_revalidate() is asynchronously called. It sends FUSE_LOOKUP
to the server synchronously, then calls fuse_change_attributes(). The
latter updates i_size, releases fc->lock, but before comparing oldsize vs
attr->size..
3. fuse_do_setattr() from the first step proceeds by acquiring fc->lock and
updating attributes and i_size, but now oldsize is equal to
outarg.attr.size because i_size has just been updated (step 2). Hence,
fuse_do_setattr() returns w/o calling truncate_pagecache().
4. As soon as ftruncate(2) completes, the user extends file size by
write(2) making a hole in the middle of file, then reads data from the hole
either by read(2) or mmap-ed read. The user expects to get zero data from
the hole, but gets stale data because truncate_pagecache() is not executed
yet.
The scenario above illustrates one side of the problem: not truncating the
page cache even though we should. Another side corresponds to truncating
page cache too late, when the state of inode changed significantly.
Theoretically, the following is possible:
1. As in the previous scenario fuse_dentry_revalidate() discovered that
i_size changed (due to our own fuse_do_setattr()) and is going to call
truncate_pagecache() for some 'new_size' it believes valid right now. But
by the time that particular truncate_pagecache() is called ...
2. fuse_do_setattr() returns (either having called truncate_pagecache() or
not -- it doesn't matter).
3. The file is extended either by write(2) or ftruncate(2) or fallocate(2).
4. mmap-ed write makes a page in the extended region dirty.
The result will be the lost of data user wrote on the fourth step.
The patch is a hotfix resolving the issue in a simplistic way: let's skip
dangerous i_size update and truncate_pagecache if an operation changing
file size is in progress. This simplistic approach looks correct for the
cases w/o external changes. And to handle them properly, more sophisticated
and intrusive techniques (e.g. NFS-like one) would be required. I'd like to
postpone it until the issue is well discussed on the mailing list(s).
Changed in v2:
- improved patch description to cover both sides of the issue.
Signed-off-by: Maxim Patlasov <mpatlasov@parallels.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: stable@vger.kernel.org
2013-08-30 21:06:04 +08:00
|
|
|
clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
|
fuse: support writable mmap
Quoting Linus (3 years ago, FUSE inclusion discussions):
"User-space filesystems are hard to get right. I'd claim that they
are almost impossible, unless you limit them somehow (shared
writable mappings are the nastiest part - if you don't have those,
you can reasonably limit your problems by limiting the number of
dirty pages you accept through normal "write()" calls)."
Instead of attempting the impossible, I've just waited for the dirty page
accounting infrastructure to materialize (thanks to Peter Zijlstra and
others). This nicely solved the biggest problem: limiting the number of pages
used for write caching.
Some small details remained, however, which this largish patch attempts to
address. It provides a page writeback implementation for fuse, which is
completely safe against VM related deadlocks. Performance may not be very
good for certain usage patterns, but generally it should be acceptable.
It has been tested extensively with fsx-linux and bash-shared-mapping.
Fuse page writeback design
--------------------------
fuse_writepage() allocates a new temporary page with GFP_NOFS|__GFP_HIGHMEM.
It copies the contents of the original page, and queues a WRITE request to the
userspace filesystem using this temp page.
The writeback is finished instantly from the MM's point of view: the page is
removed from the radix trees, and the PageDirty and PageWriteback flags are
cleared.
For the duration of the actual write, the NR_WRITEBACK_TEMP counter is
incremented. The per-bdi writeback count is not decremented until the actual
write completes.
On dirtying the page, fuse waits for a previous write to finish before
proceeding. This makes sure, there can only be one temporary page used at a
time for one cached page.
This approach is wasteful in both memory and CPU bandwidth, so why is this
complication needed?
The basic problem is that there can be no guarantee about the time in which
the userspace filesystem will complete a write. It may be buggy or even
malicious, and fail to complete WRITE requests. We don't want unrelated parts
of the system to grind to a halt in such cases.
Also a filesystem may need additional resources (particularly memory) to
complete a WRITE request. There's a great danger of a deadlock if that
allocation may wait for the writepage to finish.
Currently there are several cases where the kernel can block on page
writeback:
- allocation order is larger than PAGE_ALLOC_COSTLY_ORDER
- page migration
- throttle_vm_writeout (through NR_WRITEBACK)
- sync(2)
Of course in some cases (fsync, msync) we explicitly want to allow blocking.
So for these cases new code has to be added to fuse, since the VM is not
tracking writeback pages for us any more.
As an extra safetly measure, the maximum dirty ratio allocated to a single
fuse filesystem is set to 1% by default. This way one (or several) buggy or
malicious fuse filesystems cannot slow down the rest of the system by hogging
dirty memory.
With appropriate privileges, this limit can be raised through
'/sys/class/bdi/<bdi>/max_ratio'.
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-30 15:54:41 +08:00
|
|
|
return err;
|
2005-09-10 04:10:29 +08:00
|
|
|
}
|
|
|
|
|
2007-10-18 18:07:00 +08:00
|
|
|
static int fuse_setattr(struct dentry *entry, struct iattr *attr)
|
|
|
|
{
|
2012-12-18 18:05:08 +08:00
|
|
|
struct inode *inode = entry->d_inode;
|
|
|
|
|
|
|
|
if (!fuse_allow_current_process(get_fuse_conn(inode)))
|
|
|
|
return -EACCES;
|
|
|
|
|
2007-10-18 18:07:00 +08:00
|
|
|
if (attr->ia_valid & ATTR_FILE)
|
2012-12-18 18:05:08 +08:00
|
|
|
return fuse_do_setattr(inode, attr, attr->ia_file);
|
2007-10-18 18:07:00 +08:00
|
|
|
else
|
2012-12-18 18:05:08 +08:00
|
|
|
return fuse_do_setattr(inode, attr, NULL);
|
2007-10-18 18:07:00 +08:00
|
|
|
}
|
|
|
|
|
2005-09-10 04:10:28 +08:00
|
|
|
static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry,
|
|
|
|
struct kstat *stat)
|
|
|
|
{
|
|
|
|
struct inode *inode = entry->d_inode;
|
2007-10-17 14:31:02 +08:00
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
|
|
|
|
2013-01-15 14:30:00 +08:00
|
|
|
if (!fuse_allow_current_process(fc))
|
2007-10-17 14:31:02 +08:00
|
|
|
return -EACCES;
|
|
|
|
|
2007-11-29 08:21:59 +08:00
|
|
|
return fuse_update_attributes(inode, stat, NULL, NULL);
|
2005-09-10 04:10:28 +08:00
|
|
|
}
|
|
|
|
|
2005-09-10 04:10:31 +08:00
|
|
|
static int fuse_setxattr(struct dentry *entry, const char *name,
|
|
|
|
const void *value, size_t size, int flags)
|
|
|
|
{
|
|
|
|
struct inode *inode = entry->d_inode;
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
|
|
|
struct fuse_req *req;
|
|
|
|
struct fuse_setxattr_in inarg;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (fc->no_setxattr)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2012-10-26 23:48:30 +08:00
|
|
|
req = fuse_get_req_nopages(fc);
|
2006-04-11 13:54:58 +08:00
|
|
|
if (IS_ERR(req))
|
|
|
|
return PTR_ERR(req);
|
2005-09-10 04:10:31 +08:00
|
|
|
|
|
|
|
memset(&inarg, 0, sizeof(inarg));
|
|
|
|
inarg.size = size;
|
|
|
|
inarg.flags = flags;
|
|
|
|
req->in.h.opcode = FUSE_SETXATTR;
|
|
|
|
req->in.h.nodeid = get_node_id(inode);
|
|
|
|
req->in.numargs = 3;
|
|
|
|
req->in.args[0].size = sizeof(inarg);
|
|
|
|
req->in.args[0].value = &inarg;
|
|
|
|
req->in.args[1].size = strlen(name) + 1;
|
|
|
|
req->in.args[1].value = name;
|
|
|
|
req->in.args[2].size = size;
|
|
|
|
req->in.args[2].value = value;
|
2008-11-26 19:03:55 +08:00
|
|
|
fuse_request_send(fc, req);
|
2005-09-10 04:10:31 +08:00
|
|
|
err = req->out.h.error;
|
|
|
|
fuse_put_request(fc, req);
|
|
|
|
if (err == -ENOSYS) {
|
|
|
|
fc->no_setxattr = 1;
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
}
|
2014-04-28 20:19:24 +08:00
|
|
|
if (!err) {
|
2013-08-20 14:21:07 +08:00
|
|
|
fuse_invalidate_attr(inode);
|
2014-04-28 20:19:24 +08:00
|
|
|
fuse_update_ctime(inode);
|
|
|
|
}
|
2005-09-10 04:10:31 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t fuse_getxattr(struct dentry *entry, const char *name,
|
|
|
|
void *value, size_t size)
|
|
|
|
{
|
|
|
|
struct inode *inode = entry->d_inode;
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
|
|
|
struct fuse_req *req;
|
|
|
|
struct fuse_getxattr_in inarg;
|
|
|
|
struct fuse_getxattr_out outarg;
|
|
|
|
ssize_t ret;
|
|
|
|
|
|
|
|
if (fc->no_getxattr)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2012-10-26 23:48:30 +08:00
|
|
|
req = fuse_get_req_nopages(fc);
|
2006-04-11 13:54:58 +08:00
|
|
|
if (IS_ERR(req))
|
|
|
|
return PTR_ERR(req);
|
2005-09-10 04:10:31 +08:00
|
|
|
|
|
|
|
memset(&inarg, 0, sizeof(inarg));
|
|
|
|
inarg.size = size;
|
|
|
|
req->in.h.opcode = FUSE_GETXATTR;
|
|
|
|
req->in.h.nodeid = get_node_id(inode);
|
|
|
|
req->in.numargs = 2;
|
|
|
|
req->in.args[0].size = sizeof(inarg);
|
|
|
|
req->in.args[0].value = &inarg;
|
|
|
|
req->in.args[1].size = strlen(name) + 1;
|
|
|
|
req->in.args[1].value = name;
|
|
|
|
/* This is really two different operations rolled into one */
|
|
|
|
req->out.numargs = 1;
|
|
|
|
if (size) {
|
|
|
|
req->out.argvar = 1;
|
|
|
|
req->out.args[0].size = size;
|
|
|
|
req->out.args[0].value = value;
|
|
|
|
} else {
|
|
|
|
req->out.args[0].size = sizeof(outarg);
|
|
|
|
req->out.args[0].value = &outarg;
|
|
|
|
}
|
2008-11-26 19:03:55 +08:00
|
|
|
fuse_request_send(fc, req);
|
2005-09-10 04:10:31 +08:00
|
|
|
ret = req->out.h.error;
|
|
|
|
if (!ret)
|
|
|
|
ret = size ? req->out.args[0].size : outarg.size;
|
|
|
|
else {
|
|
|
|
if (ret == -ENOSYS) {
|
|
|
|
fc->no_getxattr = 1;
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fuse_put_request(fc, req);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
|
|
|
|
{
|
|
|
|
struct inode *inode = entry->d_inode;
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
|
|
|
struct fuse_req *req;
|
|
|
|
struct fuse_getxattr_in inarg;
|
|
|
|
struct fuse_getxattr_out outarg;
|
|
|
|
ssize_t ret;
|
|
|
|
|
2013-01-15 14:30:00 +08:00
|
|
|
if (!fuse_allow_current_process(fc))
|
2007-10-18 18:06:58 +08:00
|
|
|
return -EACCES;
|
|
|
|
|
2005-09-10 04:10:31 +08:00
|
|
|
if (fc->no_listxattr)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2012-10-26 23:48:30 +08:00
|
|
|
req = fuse_get_req_nopages(fc);
|
2006-04-11 13:54:58 +08:00
|
|
|
if (IS_ERR(req))
|
|
|
|
return PTR_ERR(req);
|
2005-09-10 04:10:31 +08:00
|
|
|
|
|
|
|
memset(&inarg, 0, sizeof(inarg));
|
|
|
|
inarg.size = size;
|
|
|
|
req->in.h.opcode = FUSE_LISTXATTR;
|
|
|
|
req->in.h.nodeid = get_node_id(inode);
|
|
|
|
req->in.numargs = 1;
|
|
|
|
req->in.args[0].size = sizeof(inarg);
|
|
|
|
req->in.args[0].value = &inarg;
|
|
|
|
/* This is really two different operations rolled into one */
|
|
|
|
req->out.numargs = 1;
|
|
|
|
if (size) {
|
|
|
|
req->out.argvar = 1;
|
|
|
|
req->out.args[0].size = size;
|
|
|
|
req->out.args[0].value = list;
|
|
|
|
} else {
|
|
|
|
req->out.args[0].size = sizeof(outarg);
|
|
|
|
req->out.args[0].value = &outarg;
|
|
|
|
}
|
2008-11-26 19:03:55 +08:00
|
|
|
fuse_request_send(fc, req);
|
2005-09-10 04:10:31 +08:00
|
|
|
ret = req->out.h.error;
|
|
|
|
if (!ret)
|
|
|
|
ret = size ? req->out.args[0].size : outarg.size;
|
|
|
|
else {
|
|
|
|
if (ret == -ENOSYS) {
|
|
|
|
fc->no_listxattr = 1;
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fuse_put_request(fc, req);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fuse_removexattr(struct dentry *entry, const char *name)
|
|
|
|
{
|
|
|
|
struct inode *inode = entry->d_inode;
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
|
|
|
struct fuse_req *req;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (fc->no_removexattr)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2012-10-26 23:48:30 +08:00
|
|
|
req = fuse_get_req_nopages(fc);
|
2006-04-11 13:54:58 +08:00
|
|
|
if (IS_ERR(req))
|
|
|
|
return PTR_ERR(req);
|
2005-09-10 04:10:31 +08:00
|
|
|
|
|
|
|
req->in.h.opcode = FUSE_REMOVEXATTR;
|
|
|
|
req->in.h.nodeid = get_node_id(inode);
|
|
|
|
req->in.numargs = 1;
|
|
|
|
req->in.args[0].size = strlen(name) + 1;
|
|
|
|
req->in.args[0].value = name;
|
2008-11-26 19:03:55 +08:00
|
|
|
fuse_request_send(fc, req);
|
2005-09-10 04:10:31 +08:00
|
|
|
err = req->out.h.error;
|
|
|
|
fuse_put_request(fc, req);
|
|
|
|
if (err == -ENOSYS) {
|
|
|
|
fc->no_removexattr = 1;
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
}
|
2014-04-28 20:19:24 +08:00
|
|
|
if (!err) {
|
2013-08-20 14:21:07 +08:00
|
|
|
fuse_invalidate_attr(inode);
|
2014-04-28 20:19:24 +08:00
|
|
|
fuse_update_ctime(inode);
|
|
|
|
}
|
2005-09-10 04:10:31 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2007-02-12 16:55:38 +08:00
|
|
|
static const struct inode_operations fuse_dir_inode_operations = {
|
2005-09-10 04:10:28 +08:00
|
|
|
.lookup = fuse_lookup,
|
2005-09-10 04:10:29 +08:00
|
|
|
.mkdir = fuse_mkdir,
|
|
|
|
.symlink = fuse_symlink,
|
|
|
|
.unlink = fuse_unlink,
|
|
|
|
.rmdir = fuse_rmdir,
|
|
|
|
.rename = fuse_rename,
|
2014-04-28 22:43:44 +08:00
|
|
|
.rename2 = fuse_rename2,
|
2005-09-10 04:10:29 +08:00
|
|
|
.link = fuse_link,
|
|
|
|
.setattr = fuse_setattr,
|
|
|
|
.create = fuse_create,
|
2012-06-05 21:10:22 +08:00
|
|
|
.atomic_open = fuse_atomic_open,
|
2005-09-10 04:10:29 +08:00
|
|
|
.mknod = fuse_mknod,
|
2005-09-10 04:10:28 +08:00
|
|
|
.permission = fuse_permission,
|
|
|
|
.getattr = fuse_getattr,
|
2005-09-10 04:10:31 +08:00
|
|
|
.setxattr = fuse_setxattr,
|
|
|
|
.getxattr = fuse_getxattr,
|
|
|
|
.listxattr = fuse_listxattr,
|
|
|
|
.removexattr = fuse_removexattr,
|
2005-09-10 04:10:28 +08:00
|
|
|
};
|
|
|
|
|
2006-03-28 17:56:42 +08:00
|
|
|
static const struct file_operations fuse_dir_operations = {
|
2005-09-10 04:10:30 +08:00
|
|
|
.llseek = generic_file_llseek,
|
2005-09-10 04:10:28 +08:00
|
|
|
.read = generic_read_dir,
|
2013-05-18 15:03:58 +08:00
|
|
|
.iterate = fuse_readdir,
|
2005-09-10 04:10:28 +08:00
|
|
|
.open = fuse_dir_open,
|
|
|
|
.release = fuse_dir_release,
|
2005-09-10 04:10:38 +08:00
|
|
|
.fsync = fuse_dir_fsync,
|
2011-12-13 18:58:49 +08:00
|
|
|
.unlocked_ioctl = fuse_dir_ioctl,
|
|
|
|
.compat_ioctl = fuse_dir_compat_ioctl,
|
2005-09-10 04:10:28 +08:00
|
|
|
};
|
|
|
|
|
2007-02-12 16:55:38 +08:00
|
|
|
static const struct inode_operations fuse_common_inode_operations = {
|
2005-09-10 04:10:29 +08:00
|
|
|
.setattr = fuse_setattr,
|
2005-09-10 04:10:28 +08:00
|
|
|
.permission = fuse_permission,
|
|
|
|
.getattr = fuse_getattr,
|
2005-09-10 04:10:31 +08:00
|
|
|
.setxattr = fuse_setxattr,
|
|
|
|
.getxattr = fuse_getxattr,
|
|
|
|
.listxattr = fuse_listxattr,
|
|
|
|
.removexattr = fuse_removexattr,
|
2005-09-10 04:10:28 +08:00
|
|
|
};
|
|
|
|
|
2007-02-12 16:55:38 +08:00
|
|
|
static const struct inode_operations fuse_symlink_inode_operations = {
|
2005-09-10 04:10:29 +08:00
|
|
|
.setattr = fuse_setattr,
|
2005-09-10 04:10:28 +08:00
|
|
|
.follow_link = fuse_follow_link,
|
|
|
|
.put_link = fuse_put_link,
|
|
|
|
.readlink = generic_readlink,
|
|
|
|
.getattr = fuse_getattr,
|
2005-09-10 04:10:31 +08:00
|
|
|
.setxattr = fuse_setxattr,
|
|
|
|
.getxattr = fuse_getxattr,
|
|
|
|
.listxattr = fuse_listxattr,
|
|
|
|
.removexattr = fuse_removexattr,
|
2005-09-10 04:10:28 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
void fuse_init_common(struct inode *inode)
|
|
|
|
{
|
|
|
|
inode->i_op = &fuse_common_inode_operations;
|
|
|
|
}
|
|
|
|
|
|
|
|
void fuse_init_dir(struct inode *inode)
|
|
|
|
{
|
|
|
|
inode->i_op = &fuse_dir_inode_operations;
|
|
|
|
inode->i_fop = &fuse_dir_operations;
|
|
|
|
}
|
|
|
|
|
|
|
|
void fuse_init_symlink(struct inode *inode)
|
|
|
|
{
|
|
|
|
inode->i_op = &fuse_symlink_inode_operations;
|
|
|
|
}
|