2019-05-19 20:08:55 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* linux/fs/nfs/dir.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 1992 Rick Sladkey
|
|
|
|
*
|
|
|
|
* nfs directory handling functions
|
|
|
|
*
|
|
|
|
* 10 Apr 1996 Added silly rename for unlink --okir
|
|
|
|
* 28 Sep 1996 Improved directory cache --okir
|
|
|
|
* 23 Aug 1997 Claus Heine claus@momo.math.rwth-aachen.de
|
|
|
|
* Re-implemented silly rename for unlink, newly implemented
|
|
|
|
* silly rename for nfs_rename() following the suggestions
|
|
|
|
* of Olaf Kirch (okir) found in this file.
|
|
|
|
* Following Linus comments on my original hack, this version
|
|
|
|
* depends only on the dcache stuff and doesn't touch the inode
|
|
|
|
* layer (iput() and friends).
|
|
|
|
* 6 Jun 1999 Cache readdir lookups in the page cache. -DaveM
|
|
|
|
*/
|
|
|
|
|
2012-07-31 04:05:23 +08:00
|
|
|
#include <linux/module.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/time.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/stat.h>
|
|
|
|
#include <linux/fcntl.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/sunrpc/clnt.h>
|
|
|
|
#include <linux/nfs_fs.h>
|
|
|
|
#include <linux/nfs_mount.h>
|
|
|
|
#include <linux/pagemap.h>
|
2006-08-23 08:06:23 +08:00
|
|
|
#include <linux/pagevec.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/namei.h>
|
NFS: Share NFS superblocks per-protocol per-server per-FSID
The attached patch makes NFS share superblocks between mounts from the same
server and FSID over the same protocol.
It does this by creating each superblock with a false root and returning the
real root dentry in the vfsmount presented by get_sb(). The root dentry set
starts off as an anonymous dentry if we don't already have the dentry for its
inode, otherwise it simply returns the dentry we already have.
We may thus end up with several trees of dentries in the superblock, and if at
some later point one of anonymous tree roots is discovered by normal filesystem
activity to be located in another tree within the superblock, the anonymous
root is named and materialises attached to the second tree at the appropriate
point.
Why do it this way? Why not pass an extra argument to the mount() syscall to
indicate the subpath and then pathwalk from the server root to the desired
directory? You can't guarantee this will work for two reasons:
(1) The root and intervening nodes may not be accessible to the client.
With NFS2 and NFS3, for instance, mountd is called on the server to get
the filehandle for the tip of a path. mountd won't give us handles for
anything we don't have permission to access, and so we can't set up NFS
inodes for such nodes, and so can't easily set up dentries (we'd have to
have ghost inodes or something).
With this patch we don't actually create dentries until we get handles
from the server that we can use to set up their inodes, and we don't
actually bind them into the tree until we know for sure where they go.
(2) Inaccessible symbolic links.
If we're asked to mount two exports from the server, eg:
mount warthog:/warthog/aaa/xxx /mmm
mount warthog:/warthog/bbb/yyy /nnn
We may not be able to access anything nearer the root than xxx and yyy,
but we may find out later that /mmm/www/yyy, say, is actually the same
directory as the one mounted on /nnn. What we might then find out, for
example, is that /warthog/bbb was actually a symbolic link to
/warthog/aaa/xxx/www, but we can't actually determine that by talking to
the server until /warthog is made available by NFS.
This would lead to having constructed an errneous dentry tree which we
can't easily fix. We can end up with a dentry marked as a directory when
it should actually be a symlink, or we could end up with an apparently
hardlinked directory.
With this patch we need not make assumptions about the type of a dentry
for which we can't retrieve information, nor need we assume we know its
place in the grand scheme of things until we actually see that place.
This patch reduces the possibility of aliasing in the inode and page caches for
inodes that may be accessed by more than one NFS export. It also reduces the
number of superblocks required for NFS where there are many NFS exports being
used from a server (home directory server + autofs for example).
This in turn makes it simpler to do local caching of network filesystems, as it
can then be guaranteed that there won't be links from multiple inodes in
separate superblocks to the same cache file.
Obviously, cache aliasing between different levels of NFS protocol could still
be a problem, but at least that gives us another key to use when indexing the
cache.
This patch makes the following changes:
(1) The server record construction/destruction has been abstracted out into
its own set of functions to make things easier to get right. These have
been moved into fs/nfs/client.c.
All the code in fs/nfs/client.c has to do with the management of
connections to servers, and doesn't touch superblocks in any way; the
remaining code in fs/nfs/super.c has to do with VFS superblock management.
(2) The sequence of events undertaken by NFS mount is now reordered:
(a) A volume representation (struct nfs_server) is allocated.
(b) A server representation (struct nfs_client) is acquired. This may be
allocated or shared, and is keyed on server address, port and NFS
version.
(c) If allocated, the client representation is initialised. The state
member variable of nfs_client is used to prevent a race during
initialisation from two mounts.
(d) For NFS4 a simple pathwalk is performed, walking from FH to FH to find
the root filehandle for the mount (fs/nfs/getroot.c). For NFS2/3 we
are given the root FH in advance.
(e) The volume FSID is probed for on the root FH.
(f) The volume representation is initialised from the FSINFO record
retrieved on the root FH.
(g) sget() is called to acquire a superblock. This may be allocated or
shared, keyed on client pointer and FSID.
(h) If allocated, the superblock is initialised.
(i) If the superblock is shared, then the new nfs_server record is
discarded.
(j) The root dentry for this mount is looked up from the root FH.
(k) The root dentry for this mount is assigned to the vfsmount.
(3) nfs_readdir_lookup() creates dentries for each of the entries readdir()
returns; this function now attaches disconnected trees from alternate
roots that happen to be discovered attached to a directory being read (in
the same way nfs_lookup() is made to do for lookup ops).
The new d_materialise_unique() function is now used to do this, thus
permitting the whole thing to be done under one set of locks, and thus
avoiding any race between mount and lookup operations on the same
directory.
(4) The client management code uses a new debug facility: NFSDBG_CLIENT which
is set by echoing 1024 to /proc/net/sunrpc/nfs_debug.
(5) Clone mounts are now called xdev mounts.
(6) Use the dentry passed to the statfs() op as the handle for retrieving fs
statistics rather than the root dentry of the superblock (which is now a
dummy).
Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2006-08-23 08:06:13 +08:00
|
|
|
#include <linux/mount.h>
|
2013-07-04 06:02:32 +08:00
|
|
|
#include <linux/swap.h>
|
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 05:22:52 +08:00
|
|
|
#include <linux/sched.h>
|
2010-11-11 20:53:47 +08:00
|
|
|
#include <linux/kmemleak.h>
|
2010-12-09 19:35:25 +08:00
|
|
|
#include <linux/xattr.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#include "delegation.h"
|
2006-03-21 02:44:14 +08:00
|
|
|
#include "iostat.h"
|
2007-11-22 07:04:31 +08:00
|
|
|
#include "internal.h"
|
2010-09-17 22:56:50 +08:00
|
|
|
#include "fscache.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-08-20 06:59:33 +08:00
|
|
|
#include "nfstrace.h"
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* #define NFS_DEBUG_VERBOSE 1 */
|
|
|
|
|
|
|
|
static int nfs_opendir(struct inode *, struct file *);
|
2011-03-24 02:48:29 +08:00
|
|
|
static int nfs_closedir(struct inode *, struct file *);
|
2013-05-18 04:34:50 +08:00
|
|
|
static int nfs_readdir(struct file *, struct dir_context *);
|
2011-07-17 08:44:56 +08:00
|
|
|
static int nfs_fsync_dir(struct file *, loff_t, loff_t, int);
|
2005-06-23 01:16:29 +08:00
|
|
|
static loff_t nfs_llseek_dir(struct file *, loff_t, int);
|
2010-12-02 03:17:06 +08:00
|
|
|
static void nfs_readdir_clear_array(struct page*);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-03-28 17:56:42 +08:00
|
|
|
const struct file_operations nfs_dir_operations = {
|
2005-06-23 01:16:29 +08:00
|
|
|
.llseek = nfs_llseek_dir,
|
2005-04-17 06:20:36 +08:00
|
|
|
.read = generic_read_dir,
|
2020-02-03 06:53:56 +08:00
|
|
|
.iterate_shared = nfs_readdir,
|
2005-04-17 06:20:36 +08:00
|
|
|
.open = nfs_opendir,
|
2011-03-24 02:48:29 +08:00
|
|
|
.release = nfs_closedir,
|
2005-04-17 06:20:36 +08:00
|
|
|
.fsync = nfs_fsync_dir,
|
|
|
|
};
|
|
|
|
|
2010-12-02 03:17:06 +08:00
|
|
|
const struct address_space_operations nfs_dir_aops = {
|
|
|
|
.freepage = nfs_readdir_clear_array,
|
2010-09-25 02:48:42 +08:00
|
|
|
};
|
|
|
|
|
2018-12-03 08:30:30 +08:00
|
|
|
static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir, const struct cred *cred)
|
2011-03-24 02:48:29 +08:00
|
|
|
{
|
2014-02-08 06:02:08 +08:00
|
|
|
struct nfs_inode *nfsi = NFS_I(dir);
|
2011-03-24 02:48:29 +08:00
|
|
|
struct nfs_open_dir_context *ctx;
|
|
|
|
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
|
|
|
|
if (ctx != NULL) {
|
2011-03-24 03:04:31 +08:00
|
|
|
ctx->duped = 0;
|
2014-02-08 06:02:08 +08:00
|
|
|
ctx->attr_gencount = nfsi->attr_gencount;
|
2011-03-24 02:48:29 +08:00
|
|
|
ctx->dir_cookie = 0;
|
2011-03-24 03:04:31 +08:00
|
|
|
ctx->dup_cookie = 0;
|
2018-12-03 08:30:30 +08:00
|
|
|
ctx->cred = get_cred(cred);
|
2014-02-08 06:02:08 +08:00
|
|
|
spin_lock(&dir->i_lock);
|
2019-05-22 20:38:57 +08:00
|
|
|
if (list_empty(&nfsi->open_files) &&
|
|
|
|
(nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER))
|
|
|
|
nfsi->cache_validity |= NFS_INO_INVALID_DATA |
|
|
|
|
NFS_INO_REVAL_FORCED;
|
2014-02-08 06:02:08 +08:00
|
|
|
list_add(&ctx->list, &nfsi->open_files);
|
|
|
|
spin_unlock(&dir->i_lock);
|
2011-07-31 00:45:35 +08:00
|
|
|
return ctx;
|
|
|
|
}
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2011-03-24 02:48:29 +08:00
|
|
|
}
|
|
|
|
|
2014-02-08 06:02:08 +08:00
|
|
|
static void put_nfs_open_dir_context(struct inode *dir, struct nfs_open_dir_context *ctx)
|
2011-03-24 02:48:29 +08:00
|
|
|
{
|
2014-02-08 06:02:08 +08:00
|
|
|
spin_lock(&dir->i_lock);
|
|
|
|
list_del(&ctx->list);
|
|
|
|
spin_unlock(&dir->i_lock);
|
2018-12-03 08:30:30 +08:00
|
|
|
put_cred(ctx->cred);
|
2011-03-24 02:48:29 +08:00
|
|
|
kfree(ctx);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Open file
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
nfs_opendir(struct inode *inode, struct file *filp)
|
|
|
|
{
|
2011-03-24 02:48:29 +08:00
|
|
|
int res = 0;
|
|
|
|
struct nfs_open_dir_context *ctx;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(FILE, "NFS: open dir(%pD2)\n", filp);
|
2008-06-12 05:55:42 +08:00
|
|
|
|
|
|
|
nfs_inc_stats(inode, NFSIOS_VFSOPEN);
|
2006-03-21 02:44:24 +08:00
|
|
|
|
2018-12-03 08:30:30 +08:00
|
|
|
ctx = alloc_nfs_open_dir_context(inode, current_cred());
|
2011-03-24 02:48:29 +08:00
|
|
|
if (IS_ERR(ctx)) {
|
|
|
|
res = PTR_ERR(ctx);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
filp->private_data = ctx;
|
|
|
|
out:
|
2005-04-17 06:20:36 +08:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2011-03-24 02:48:29 +08:00
|
|
|
static int
|
|
|
|
nfs_closedir(struct inode *inode, struct file *filp)
|
|
|
|
{
|
2014-10-22 08:11:25 +08:00
|
|
|
put_nfs_open_dir_context(file_inode(filp), filp->private_data);
|
2011-03-24 02:48:29 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-09-25 02:48:42 +08:00
|
|
|
struct nfs_cache_array_entry {
|
|
|
|
u64 cookie;
|
|
|
|
u64 ino;
|
|
|
|
struct qstr string;
|
2010-11-21 03:26:44 +08:00
|
|
|
unsigned char d_type;
|
2010-09-25 02:48:42 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct nfs_cache_array {
|
2012-03-02 06:00:23 +08:00
|
|
|
int size;
|
2010-09-25 02:48:42 +08:00
|
|
|
int eof_index;
|
|
|
|
u64 last_cookie;
|
2020-03-10 02:24:42 +08:00
|
|
|
struct nfs_cache_array_entry array[];
|
2010-09-25 02:48:42 +08:00
|
|
|
};
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
typedef struct {
|
|
|
|
struct file *file;
|
|
|
|
struct page *page;
|
2013-05-18 04:34:50 +08:00
|
|
|
struct dir_context *ctx;
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned long page_index;
|
2005-06-23 01:16:29 +08:00
|
|
|
u64 *dir_cookie;
|
2010-12-01 10:56:32 +08:00
|
|
|
u64 last_cookie;
|
2005-06-23 01:16:29 +08:00
|
|
|
loff_t current_index;
|
2020-02-04 03:49:33 +08:00
|
|
|
loff_t prev_index;
|
2010-09-25 02:48:42 +08:00
|
|
|
|
2020-02-05 22:01:52 +08:00
|
|
|
unsigned long dir_verifier;
|
2007-04-16 07:35:27 +08:00
|
|
|
unsigned long timestamp;
|
2008-10-15 07:16:07 +08:00
|
|
|
unsigned long gencount;
|
2010-09-25 02:48:42 +08:00
|
|
|
unsigned int cache_entry_index;
|
2017-06-20 20:33:44 +08:00
|
|
|
bool plus;
|
|
|
|
bool eof;
|
2005-04-17 06:20:36 +08:00
|
|
|
} nfs_readdir_descriptor_t;
|
|
|
|
|
2020-02-03 06:53:53 +08:00
|
|
|
static
|
|
|
|
void nfs_readdir_init_array(struct page *page)
|
|
|
|
{
|
|
|
|
struct nfs_cache_array *array;
|
|
|
|
|
|
|
|
array = kmap_atomic(page);
|
|
|
|
memset(array, 0, sizeof(struct nfs_cache_array));
|
|
|
|
array->eof_index = -1;
|
|
|
|
kunmap_atomic(array);
|
|
|
|
}
|
|
|
|
|
2010-09-25 02:48:42 +08:00
|
|
|
/*
|
|
|
|
* we are freeing strings created by nfs_add_to_readdir_array()
|
|
|
|
*/
|
|
|
|
static
|
2010-12-02 03:17:06 +08:00
|
|
|
void nfs_readdir_clear_array(struct page *page)
|
2010-09-25 02:48:42 +08:00
|
|
|
{
|
2010-12-02 03:17:06 +08:00
|
|
|
struct nfs_cache_array *array;
|
2010-09-25 02:48:42 +08:00
|
|
|
int i;
|
2010-11-16 09:26:22 +08:00
|
|
|
|
2011-11-25 23:14:33 +08:00
|
|
|
array = kmap_atomic(page);
|
2017-03-11 06:07:46 +08:00
|
|
|
for (i = 0; i < array->size; i++)
|
|
|
|
kfree(array->array[i].string.name);
|
2020-02-03 06:53:53 +08:00
|
|
|
array->size = 0;
|
2011-11-25 23:14:33 +08:00
|
|
|
kunmap_atomic(array);
|
2010-09-25 02:48:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* the caller is responsible for freeing qstr.name
|
|
|
|
* when called by nfs_readdir_add_to_array, the strings will be freed in
|
|
|
|
* nfs_clear_readdir_array()
|
|
|
|
*/
|
|
|
|
static
|
2010-10-24 02:53:23 +08:00
|
|
|
int nfs_readdir_make_qstr(struct qstr *string, const char *name, unsigned int len)
|
2010-09-25 02:48:42 +08:00
|
|
|
{
|
|
|
|
string->len = len;
|
2020-02-03 06:53:55 +08:00
|
|
|
string->name = kmemdup_nul(name, len, GFP_KERNEL);
|
2010-10-24 02:53:23 +08:00
|
|
|
if (string->name == NULL)
|
|
|
|
return -ENOMEM;
|
2010-11-11 20:53:47 +08:00
|
|
|
/*
|
|
|
|
* Avoid a kmemleak false positive. The pointer to the name is stored
|
|
|
|
* in a page cache page which kmemleak does not scan.
|
|
|
|
*/
|
|
|
|
kmemleak_not_leak(string->name);
|
2016-06-10 22:51:30 +08:00
|
|
|
string->hash = full_name_hash(NULL, name, len);
|
2010-10-24 02:53:23 +08:00
|
|
|
return 0;
|
2010-09-25 02:48:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static
|
|
|
|
int nfs_readdir_add_to_array(struct nfs_entry *entry, struct page *page)
|
|
|
|
{
|
2017-05-04 02:52:21 +08:00
|
|
|
struct nfs_cache_array *array = kmap(page);
|
2010-10-24 02:53:23 +08:00
|
|
|
struct nfs_cache_array_entry *cache_entry;
|
|
|
|
int ret;
|
|
|
|
|
2010-11-21 04:18:22 +08:00
|
|
|
cache_entry = &array->array[array->size];
|
|
|
|
|
|
|
|
/* Check that this entry lies within the page bounds */
|
2010-11-16 09:26:22 +08:00
|
|
|
ret = -ENOSPC;
|
2010-11-21 04:18:22 +08:00
|
|
|
if ((char *)&cache_entry[1] - (char *)page_address(page) > PAGE_SIZE)
|
2010-10-24 02:53:23 +08:00
|
|
|
goto out;
|
2010-09-25 02:48:42 +08:00
|
|
|
|
2010-10-24 02:53:23 +08:00
|
|
|
cache_entry->cookie = entry->prev_cookie;
|
|
|
|
cache_entry->ino = entry->ino;
|
2010-11-21 03:26:44 +08:00
|
|
|
cache_entry->d_type = entry->d_type;
|
2010-10-24 02:53:23 +08:00
|
|
|
ret = nfs_readdir_make_qstr(&cache_entry->string, entry->name, entry->len);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
2010-09-25 02:48:42 +08:00
|
|
|
array->last_cookie = entry->cookie;
|
2010-11-16 09:26:22 +08:00
|
|
|
array->size++;
|
2010-12-08 01:44:56 +08:00
|
|
|
if (entry->eof != 0)
|
2010-09-25 02:48:42 +08:00
|
|
|
array->eof_index = array->size;
|
2010-10-24 02:53:23 +08:00
|
|
|
out:
|
2017-05-04 02:52:21 +08:00
|
|
|
kunmap(page);
|
2010-10-24 02:53:23 +08:00
|
|
|
return ret;
|
2010-09-25 02:48:42 +08:00
|
|
|
}
|
|
|
|
|
2020-02-04 03:49:33 +08:00
|
|
|
static inline
|
|
|
|
int is_32bit_api(void)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
return in_compat_syscall();
|
|
|
|
#else
|
|
|
|
return (BITS_PER_LONG == 32);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static
|
|
|
|
bool nfs_readdir_use_cookie(const struct file *filp)
|
|
|
|
{
|
|
|
|
if ((filp->f_mode & FMODE_32BITHASH) ||
|
|
|
|
(!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-09-25 02:48:42 +08:00
|
|
|
static
|
|
|
|
int nfs_readdir_search_for_pos(struct nfs_cache_array *array, nfs_readdir_descriptor_t *desc)
|
|
|
|
{
|
2013-05-18 04:34:50 +08:00
|
|
|
loff_t diff = desc->ctx->pos - desc->current_index;
|
2010-09-25 02:48:42 +08:00
|
|
|
unsigned int index;
|
|
|
|
|
|
|
|
if (diff < 0)
|
|
|
|
goto out_eof;
|
|
|
|
if (diff >= array->size) {
|
2010-11-16 09:26:22 +08:00
|
|
|
if (array->eof_index >= 0)
|
2010-09-25 02:48:42 +08:00
|
|
|
goto out_eof;
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
|
|
|
index = (unsigned int)diff;
|
|
|
|
*desc->dir_cookie = array->array[index].cookie;
|
|
|
|
desc->cache_entry_index = index;
|
|
|
|
return 0;
|
|
|
|
out_eof:
|
2017-10-07 22:02:21 +08:00
|
|
|
desc->eof = true;
|
2010-09-25 02:48:42 +08:00
|
|
|
return -EBADCOOKIE;
|
|
|
|
}
|
|
|
|
|
2014-01-29 02:47:46 +08:00
|
|
|
static bool
|
|
|
|
nfs_readdir_inode_mapping_valid(struct nfs_inode *nfsi)
|
|
|
|
{
|
|
|
|
if (nfsi->cache_validity & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA))
|
|
|
|
return false;
|
|
|
|
smp_rmb();
|
|
|
|
return !test_bit(NFS_INO_INVALIDATING, &nfsi->flags);
|
|
|
|
}
|
|
|
|
|
2010-09-25 02:48:42 +08:00
|
|
|
static
|
|
|
|
int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_descriptor_t *desc)
|
|
|
|
{
|
|
|
|
int i;
|
2011-03-24 03:04:31 +08:00
|
|
|
loff_t new_pos;
|
2010-09-25 02:48:42 +08:00
|
|
|
int status = -EAGAIN;
|
|
|
|
|
|
|
|
for (i = 0; i < array->size; i++) {
|
|
|
|
if (array->array[i].cookie == *desc->dir_cookie) {
|
2013-01-24 06:07:38 +08:00
|
|
|
struct nfs_inode *nfsi = NFS_I(file_inode(desc->file));
|
2011-07-31 00:45:35 +08:00
|
|
|
struct nfs_open_dir_context *ctx = desc->file->private_data;
|
|
|
|
|
2011-03-24 03:04:31 +08:00
|
|
|
new_pos = desc->current_index + i;
|
2014-01-29 02:47:46 +08:00
|
|
|
if (ctx->attr_gencount != nfsi->attr_gencount ||
|
|
|
|
!nfs_readdir_inode_mapping_valid(nfsi)) {
|
2011-07-31 00:45:35 +08:00
|
|
|
ctx->duped = 0;
|
|
|
|
ctx->attr_gencount = nfsi->attr_gencount;
|
2020-02-04 03:49:33 +08:00
|
|
|
} else if (new_pos < desc->prev_index) {
|
2011-07-31 00:45:35 +08:00
|
|
|
if (ctx->duped > 0
|
|
|
|
&& ctx->dup_cookie == *desc->dir_cookie) {
|
|
|
|
if (printk_ratelimit()) {
|
2013-09-16 22:53:17 +08:00
|
|
|
pr_notice("NFS: directory %pD2 contains a readdir loop."
|
2011-07-31 00:45:35 +08:00
|
|
|
"Please contact your server vendor. "
|
2014-04-05 20:45:57 +08:00
|
|
|
"The file: %.*s has duplicate cookie %llu\n",
|
|
|
|
desc->file, array->array[i].string.len,
|
|
|
|
array->array[i].string.name, *desc->dir_cookie);
|
2011-07-31 00:45:35 +08:00
|
|
|
}
|
|
|
|
status = -ELOOP;
|
|
|
|
goto out;
|
|
|
|
}
|
2011-03-24 03:04:31 +08:00
|
|
|
ctx->dup_cookie = *desc->dir_cookie;
|
2011-07-31 00:45:35 +08:00
|
|
|
ctx->duped = -1;
|
2011-03-24 03:04:31 +08:00
|
|
|
}
|
2020-02-04 03:49:33 +08:00
|
|
|
if (nfs_readdir_use_cookie(desc->file))
|
|
|
|
desc->ctx->pos = *desc->dir_cookie;
|
|
|
|
else
|
|
|
|
desc->ctx->pos = new_pos;
|
|
|
|
desc->prev_index = new_pos;
|
2010-09-25 02:48:42 +08:00
|
|
|
desc->cache_entry_index = i;
|
2010-12-08 01:44:56 +08:00
|
|
|
return 0;
|
2010-09-25 02:48:42 +08:00
|
|
|
}
|
|
|
|
}
|
2010-12-08 01:44:56 +08:00
|
|
|
if (array->eof_index >= 0) {
|
2010-11-16 09:26:22 +08:00
|
|
|
status = -EBADCOOKIE;
|
2010-12-08 01:41:58 +08:00
|
|
|
if (*desc->dir_cookie == array->last_cookie)
|
2017-10-07 22:02:21 +08:00
|
|
|
desc->eof = true;
|
2010-11-16 09:26:22 +08:00
|
|
|
}
|
2011-07-31 00:45:35 +08:00
|
|
|
out:
|
2010-09-25 02:48:42 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static
|
|
|
|
int nfs_readdir_search_array(nfs_readdir_descriptor_t *desc)
|
|
|
|
{
|
|
|
|
struct nfs_cache_array *array;
|
2010-12-08 01:44:56 +08:00
|
|
|
int status;
|
2010-09-25 02:48:42 +08:00
|
|
|
|
2017-05-04 02:52:21 +08:00
|
|
|
array = kmap(desc->page);
|
2010-09-25 02:48:42 +08:00
|
|
|
|
|
|
|
if (*desc->dir_cookie == 0)
|
|
|
|
status = nfs_readdir_search_for_pos(array, desc);
|
|
|
|
else
|
|
|
|
status = nfs_readdir_search_for_cookie(array, desc);
|
|
|
|
|
2010-12-08 01:44:56 +08:00
|
|
|
if (status == -EAGAIN) {
|
2010-12-01 10:56:32 +08:00
|
|
|
desc->last_cookie = array->last_cookie;
|
2011-03-23 20:43:09 +08:00
|
|
|
desc->current_index += array->size;
|
2010-12-08 01:44:56 +08:00
|
|
|
desc->page_index++;
|
|
|
|
}
|
2017-05-04 02:52:21 +08:00
|
|
|
kunmap(desc->page);
|
2010-09-25 02:48:42 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fill a page with xdr information before transferring to the cache page */
|
|
|
|
static
|
2010-10-21 03:44:37 +08:00
|
|
|
int nfs_readdir_xdr_filler(struct page **pages, nfs_readdir_descriptor_t *desc,
|
2010-09-25 02:48:42 +08:00
|
|
|
struct nfs_entry *entry, struct file *file, struct inode *inode)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2011-03-24 02:48:29 +08:00
|
|
|
struct nfs_open_dir_context *ctx = file->private_data;
|
2018-12-03 08:30:30 +08:00
|
|
|
const struct cred *cred = ctx->cred;
|
2008-10-15 07:16:07 +08:00
|
|
|
unsigned long timestamp, gencount;
|
2005-04-17 06:20:36 +08:00
|
|
|
int error;
|
|
|
|
|
|
|
|
again:
|
|
|
|
timestamp = jiffies;
|
2008-10-15 07:16:07 +08:00
|
|
|
gencount = nfs_inc_attr_generation_counter();
|
2020-02-05 22:01:52 +08:00
|
|
|
desc->dir_verifier = nfs_save_change_attribute(inode);
|
2016-03-27 04:14:39 +08:00
|
|
|
error = NFS_PROTO(inode)->readdir(file_dentry(file), cred, entry->cookie, pages,
|
2005-04-17 06:20:36 +08:00
|
|
|
NFS_SERVER(inode)->dtsize, desc->plus);
|
|
|
|
if (error < 0) {
|
|
|
|
/* We requested READDIRPLUS, but the server doesn't grok it */
|
|
|
|
if (error == -ENOTSUPP && desc->plus) {
|
|
|
|
NFS_SERVER(inode)->caps &= ~NFS_CAP_READDIRPLUS;
|
2008-01-23 14:58:59 +08:00
|
|
|
clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
|
2017-06-20 20:33:44 +08:00
|
|
|
desc->plus = false;
|
2005-04-17 06:20:36 +08:00
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
goto error;
|
|
|
|
}
|
2007-04-16 07:35:27 +08:00
|
|
|
desc->timestamp = timestamp;
|
2008-10-15 07:16:07 +08:00
|
|
|
desc->gencount = gencount;
|
2010-09-25 02:48:42 +08:00
|
|
|
error:
|
|
|
|
return error;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2010-12-14 22:58:11 +08:00
|
|
|
static int xdr_decode(nfs_readdir_descriptor_t *desc,
|
|
|
|
struct nfs_entry *entry, struct xdr_stream *xdr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2020-02-04 03:49:33 +08:00
|
|
|
struct inode *inode = file_inode(desc->file);
|
2010-12-14 22:58:11 +08:00
|
|
|
int error;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2020-02-04 03:49:33 +08:00
|
|
|
error = NFS_PROTO(inode)->decode_dirent(xdr, entry, desc->plus);
|
2010-12-14 22:58:11 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
2010-09-25 02:48:42 +08:00
|
|
|
entry->fattr->time_start = desc->timestamp;
|
|
|
|
entry->fattr->gencount = desc->gencount;
|
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2015-02-24 07:51:32 +08:00
|
|
|
/* Match file and dirent using either filehandle or fileid
|
|
|
|
* Note: caller is responsible for checking the fsid
|
|
|
|
*/
|
2010-09-25 06:50:01 +08:00
|
|
|
static
|
|
|
|
int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry)
|
|
|
|
{
|
2016-06-18 04:48:27 +08:00
|
|
|
struct inode *inode;
|
2015-02-24 07:51:32 +08:00
|
|
|
struct nfs_inode *nfsi;
|
|
|
|
|
2015-03-18 06:25:59 +08:00
|
|
|
if (d_really_is_negative(dentry))
|
|
|
|
return 0;
|
2015-02-24 07:51:32 +08:00
|
|
|
|
2016-06-18 04:48:27 +08:00
|
|
|
inode = d_inode(dentry);
|
|
|
|
if (is_bad_inode(inode) || NFS_STALE(inode))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
nfsi = NFS_I(inode);
|
2016-09-23 01:38:52 +08:00
|
|
|
if (entry->fattr->fileid != nfsi->fileid)
|
|
|
|
return 0;
|
|
|
|
if (entry->fh->size && nfs_compare_fh(entry->fh, &nfsi->fh) != 0)
|
|
|
|
return 0;
|
|
|
|
return 1;
|
2010-09-25 06:50:01 +08:00
|
|
|
}
|
|
|
|
|
2012-05-02 05:37:59 +08:00
|
|
|
static
|
2013-05-18 04:34:50 +08:00
|
|
|
bool nfs_use_readdirplus(struct inode *dir, struct dir_context *ctx)
|
2012-05-02 05:37:59 +08:00
|
|
|
{
|
|
|
|
if (!nfs_server_capable(dir, NFS_CAP_READDIRPLUS))
|
|
|
|
return false;
|
|
|
|
if (test_and_clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(dir)->flags))
|
|
|
|
return true;
|
2013-05-18 04:34:50 +08:00
|
|
|
if (ctx->pos == 0)
|
2012-05-02 05:37:59 +08:00
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-11-20 00:21:54 +08:00
|
|
|
* This function is called by the lookup and getattr code to request the
|
|
|
|
* use of readdirplus to accelerate any future lookups in the same
|
2012-05-02 05:37:59 +08:00
|
|
|
* directory.
|
|
|
|
*/
|
|
|
|
void nfs_advise_use_readdirplus(struct inode *dir)
|
|
|
|
{
|
2016-11-20 00:21:54 +08:00
|
|
|
struct nfs_inode *nfsi = NFS_I(dir);
|
|
|
|
|
|
|
|
if (nfs_server_capable(dir, NFS_CAP_READDIRPLUS) &&
|
|
|
|
!list_empty(&nfsi->open_files))
|
|
|
|
set_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags);
|
2012-05-02 05:37:59 +08:00
|
|
|
}
|
|
|
|
|
2014-02-08 06:02:08 +08:00
|
|
|
/*
|
|
|
|
* This function is mainly for use by nfs_getattr().
|
|
|
|
*
|
|
|
|
* If this is an 'ls -l', we want to force use of readdirplus.
|
|
|
|
* Do this by checking if there is an active file descriptor
|
|
|
|
* and calling nfs_advise_use_readdirplus, then forcing a
|
|
|
|
* cache flush.
|
|
|
|
*/
|
|
|
|
void nfs_force_use_readdirplus(struct inode *dir)
|
|
|
|
{
|
2016-11-20 00:21:54 +08:00
|
|
|
struct nfs_inode *nfsi = NFS_I(dir);
|
|
|
|
|
|
|
|
if (nfs_server_capable(dir, NFS_CAP_READDIRPLUS) &&
|
|
|
|
!list_empty(&nfsi->open_files)) {
|
|
|
|
set_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags);
|
2020-01-23 09:45:39 +08:00
|
|
|
invalidate_mapping_pages(dir->i_mapping,
|
|
|
|
nfsi->page_index + 1, -1);
|
2014-02-08 06:02:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-09-25 06:50:01 +08:00
|
|
|
static
|
2020-02-05 22:01:52 +08:00
|
|
|
void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry,
|
|
|
|
unsigned long dir_verifier)
|
2010-09-25 06:50:01 +08:00
|
|
|
{
|
2012-05-11 04:14:12 +08:00
|
|
|
struct qstr filename = QSTR_INIT(entry->name, entry->len);
|
2016-04-29 07:52:56 +08:00
|
|
|
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
|
2010-10-24 02:53:23 +08:00
|
|
|
struct dentry *dentry;
|
|
|
|
struct dentry *alias;
|
2010-09-25 06:50:01 +08:00
|
|
|
struct inode *inode;
|
2013-05-23 00:50:44 +08:00
|
|
|
int status;
|
2010-09-25 06:50:01 +08:00
|
|
|
|
2015-02-24 07:51:32 +08:00
|
|
|
if (!(entry->fattr->valid & NFS_ATTR_FATTR_FILEID))
|
|
|
|
return;
|
2015-02-23 05:35:36 +08:00
|
|
|
if (!(entry->fattr->valid & NFS_ATTR_FATTR_FSID))
|
|
|
|
return;
|
2016-09-21 02:34:24 +08:00
|
|
|
if (filename.len == 0)
|
|
|
|
return;
|
|
|
|
/* Validate that the name doesn't contain any illegal '\0' */
|
|
|
|
if (strnlen(filename.name, filename.len) != filename.len)
|
|
|
|
return;
|
|
|
|
/* ...or '/' */
|
|
|
|
if (strnchr(filename.name, filename.len, '/'))
|
|
|
|
return;
|
2010-10-24 02:53:23 +08:00
|
|
|
if (filename.name[0] == '.') {
|
|
|
|
if (filename.len == 1)
|
|
|
|
return;
|
|
|
|
if (filename.len == 2 && filename.name[1] == '.')
|
|
|
|
return;
|
|
|
|
}
|
2016-06-10 22:51:30 +08:00
|
|
|
filename.hash = full_name_hash(parent, filename.name, filename.len);
|
2010-09-25 06:50:01 +08:00
|
|
|
|
2010-10-24 02:53:23 +08:00
|
|
|
dentry = d_lookup(parent, &filename);
|
2016-04-29 07:52:56 +08:00
|
|
|
again:
|
|
|
|
if (!dentry) {
|
|
|
|
dentry = d_alloc_parallel(parent, &filename, &wq);
|
|
|
|
if (IS_ERR(dentry))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!d_in_lookup(dentry)) {
|
2015-02-23 05:35:36 +08:00
|
|
|
/* Is there a mountpoint here? If so, just exit */
|
|
|
|
if (!nfs_fsid_equal(&NFS_SB(dentry->d_sb)->fsid,
|
|
|
|
&entry->fattr->fsid))
|
|
|
|
goto out;
|
2010-09-25 06:50:01 +08:00
|
|
|
if (nfs_same_file(dentry, entry)) {
|
2016-09-23 01:38:52 +08:00
|
|
|
if (!entry->fh->size)
|
|
|
|
goto out;
|
2020-02-05 22:01:52 +08:00
|
|
|
nfs_set_verifier(dentry, dir_verifier);
|
2015-03-18 06:25:59 +08:00
|
|
|
status = nfs_refresh_inode(d_inode(dentry), entry->fattr);
|
2013-05-23 00:50:44 +08:00
|
|
|
if (!status)
|
2015-03-18 06:25:59 +08:00
|
|
|
nfs_setsecurity(d_inode(dentry), entry->fattr, entry->label);
|
2010-09-25 06:50:01 +08:00
|
|
|
goto out;
|
|
|
|
} else {
|
2014-02-14 01:46:25 +08:00
|
|
|
d_invalidate(dentry);
|
2010-09-25 06:50:01 +08:00
|
|
|
dput(dentry);
|
2016-04-29 07:52:56 +08:00
|
|
|
dentry = NULL;
|
|
|
|
goto again;
|
2010-09-25 06:50:01 +08:00
|
|
|
}
|
|
|
|
}
|
2016-09-23 01:38:52 +08:00
|
|
|
if (!entry->fh->size) {
|
|
|
|
d_lookup_done(dentry);
|
|
|
|
goto out;
|
|
|
|
}
|
2010-09-25 06:50:01 +08:00
|
|
|
|
2013-05-23 00:50:42 +08:00
|
|
|
inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr, entry->label);
|
2014-10-13 10:24:21 +08:00
|
|
|
alias = d_splice_alias(inode, dentry);
|
2016-04-29 07:52:56 +08:00
|
|
|
d_lookup_done(dentry);
|
|
|
|
if (alias) {
|
|
|
|
if (IS_ERR(alias))
|
|
|
|
goto out;
|
|
|
|
dput(dentry);
|
|
|
|
dentry = alias;
|
|
|
|
}
|
2020-02-05 22:01:52 +08:00
|
|
|
nfs_set_verifier(dentry, dir_verifier);
|
2010-09-25 06:50:01 +08:00
|
|
|
out:
|
|
|
|
dput(dentry);
|
|
|
|
}
|
|
|
|
|
2010-09-25 02:48:42 +08:00
|
|
|
/* Perform conversion from xdr to cache array */
|
|
|
|
static
|
2010-11-16 09:26:22 +08:00
|
|
|
int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry,
|
2011-01-09 06:45:38 +08:00
|
|
|
struct page **xdr_pages, struct page *page, unsigned int buflen)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-10-21 03:44:29 +08:00
|
|
|
struct xdr_stream stream;
|
2011-05-20 02:16:47 +08:00
|
|
|
struct xdr_buf buf;
|
2011-01-09 06:45:38 +08:00
|
|
|
struct page *scratch;
|
2010-10-22 04:33:16 +08:00
|
|
|
struct nfs_cache_array *array;
|
2010-11-21 01:43:45 +08:00
|
|
|
unsigned int count = 0;
|
|
|
|
int status;
|
2010-10-21 03:44:29 +08:00
|
|
|
|
2011-01-09 06:45:38 +08:00
|
|
|
scratch = alloc_page(GFP_KERNEL);
|
|
|
|
if (scratch == NULL)
|
|
|
|
return -ENOMEM;
|
2010-10-21 03:44:29 +08:00
|
|
|
|
2015-04-22 02:17:35 +08:00
|
|
|
if (buflen == 0)
|
|
|
|
goto out_nopages;
|
|
|
|
|
2011-05-20 02:16:47 +08:00
|
|
|
xdr_init_decode_pages(&stream, &buf, xdr_pages, buflen);
|
2011-01-09 06:45:38 +08:00
|
|
|
xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
|
2010-10-22 04:33:16 +08:00
|
|
|
|
|
|
|
do {
|
|
|
|
status = xdr_decode(desc, entry, &stream);
|
2010-11-16 09:26:22 +08:00
|
|
|
if (status != 0) {
|
|
|
|
if (status == -EAGAIN)
|
|
|
|
status = 0;
|
2010-10-22 04:33:16 +08:00
|
|
|
break;
|
2010-11-16 09:26:22 +08:00
|
|
|
}
|
2010-10-22 04:33:16 +08:00
|
|
|
|
2010-11-21 01:43:45 +08:00
|
|
|
count++;
|
|
|
|
|
2017-06-20 20:33:44 +08:00
|
|
|
if (desc->plus)
|
2020-02-05 22:01:52 +08:00
|
|
|
nfs_prime_dcache(file_dentry(desc->file), entry,
|
|
|
|
desc->dir_verifier);
|
2010-11-16 09:26:22 +08:00
|
|
|
|
Revert "NFS: readdirplus optimization by cache mechanism" (memleak)
This reverts commit be4c2d4723a4a637f0d1b4f7c66447141a4b3564.
That commit caused a severe memory leak in nfs_readdir_make_qstr().
When listing a directory with more than 100 files (this is how many
struct nfs_cache_array_entry elements fit in one 4kB page), all
allocated file name strings past those 100 leak.
The root of the leakage is that those string pointers are managed in
pages which are never linked into the page cache.
fs/nfs/dir.c puts pages into the page cache by calling
read_cache_page(); the callback function nfs_readdir_filler() will
then fill the given page struct which was passed to it, which is
already linked in the page cache (by do_read_cache_page() calling
add_to_page_cache_lru()).
Commit be4c2d4723a4 added another (local) array of allocated pages, to
be filled with more data, instead of discarding excess items received
from the NFS server. Those additional pages can be used by the next
nfs_readdir_filler() call (from within the same nfs_readdir() call).
The leak happens when some of those additional pages are never used
(copied to the page cache using copy_highpage()). The pages will be
freed by nfs_readdir_free_pages(), but their contents will not. The
commit did not invoke nfs_readdir_clear_array() (and doing so would
have been dangerous, because it did not track which of those pages
were already copied to the page cache, risking double free bugs).
How to reproduce the leak:
- Use a kernel with CONFIG_SLUB_DEBUG_ON.
- Create a directory on a NFS mount with more than 100 files with
names long enough to use the "kmalloc-32" slab (so we can easily
look up the allocation counts):
for i in `seq 110`; do touch ${i}_0123456789abcdef; done
- Drop all caches:
echo 3 >/proc/sys/vm/drop_caches
- Check the allocation counter:
grep nfs_readdir /sys/kernel/slab/kmalloc-32/alloc_calls
30564391 nfs_readdir_add_to_array+0x73/0xd0 age=534558/4791307/6540952 pid=370-1048386 cpus=0-47 nodes=0-1
- Request a directory listing and check the allocation counters again:
ls
[...]
grep nfs_readdir /sys/kernel/slab/kmalloc-32/alloc_calls
30564511 nfs_readdir_add_to_array+0x73/0xd0 age=207/4792999/6542663 pid=370-1048386 cpus=0-47 nodes=0-1
There are now 120 new allocations.
- Drop all caches and check the counters again:
echo 3 >/proc/sys/vm/drop_caches
grep nfs_readdir /sys/kernel/slab/kmalloc-32/alloc_calls
30564401 nfs_readdir_add_to_array+0x73/0xd0 age=735/4793524/6543176 pid=370-1048386 cpus=0-47 nodes=0-1
110 allocations are gone, but 10 have leaked and will never be freed.
Unhelpfully, those allocations are explicitly excluded from KMEMLEAK,
that's why my initial attempts with KMEMLEAK were not successful:
/*
* Avoid a kmemleak false positive. The pointer to the name is stored
* in a page cache page which kmemleak does not scan.
*/
kmemleak_not_leak(string->name);
It would be possible to solve this bug without reverting the whole
commit:
- keep track of which pages were not used, and call
nfs_readdir_clear_array() on them, or
- manually link those pages into the page cache
But for now I have decided to just revert the commit, because the real
fix would require complex considerations, risking more dangerous
(crash) bugs, which may seem unsuitable for the stable branches.
Signed-off-by: Max Kellermann <mk@cm4all.com>
Cc: stable@vger.kernel.org # v5.1+
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2019-07-12 22:18:06 +08:00
|
|
|
status = nfs_readdir_add_to_array(entry, page);
|
2010-11-16 09:26:22 +08:00
|
|
|
if (status != 0)
|
|
|
|
break;
|
2010-10-22 04:33:16 +08:00
|
|
|
} while (!entry->eof);
|
|
|
|
|
2015-04-22 02:17:35 +08:00
|
|
|
out_nopages:
|
2010-12-08 01:44:56 +08:00
|
|
|
if (count == 0 || (status == -EBADCOOKIE && entry->eof != 0)) {
|
Revert "NFS: readdirplus optimization by cache mechanism" (memleak)
This reverts commit be4c2d4723a4a637f0d1b4f7c66447141a4b3564.
That commit caused a severe memory leak in nfs_readdir_make_qstr().
When listing a directory with more than 100 files (this is how many
struct nfs_cache_array_entry elements fit in one 4kB page), all
allocated file name strings past those 100 leak.
The root of the leakage is that those string pointers are managed in
pages which are never linked into the page cache.
fs/nfs/dir.c puts pages into the page cache by calling
read_cache_page(); the callback function nfs_readdir_filler() will
then fill the given page struct which was passed to it, which is
already linked in the page cache (by do_read_cache_page() calling
add_to_page_cache_lru()).
Commit be4c2d4723a4 added another (local) array of allocated pages, to
be filled with more data, instead of discarding excess items received
from the NFS server. Those additional pages can be used by the next
nfs_readdir_filler() call (from within the same nfs_readdir() call).
The leak happens when some of those additional pages are never used
(copied to the page cache using copy_highpage()). The pages will be
freed by nfs_readdir_free_pages(), but their contents will not. The
commit did not invoke nfs_readdir_clear_array() (and doing so would
have been dangerous, because it did not track which of those pages
were already copied to the page cache, risking double free bugs).
How to reproduce the leak:
- Use a kernel with CONFIG_SLUB_DEBUG_ON.
- Create a directory on a NFS mount with more than 100 files with
names long enough to use the "kmalloc-32" slab (so we can easily
look up the allocation counts):
for i in `seq 110`; do touch ${i}_0123456789abcdef; done
- Drop all caches:
echo 3 >/proc/sys/vm/drop_caches
- Check the allocation counter:
grep nfs_readdir /sys/kernel/slab/kmalloc-32/alloc_calls
30564391 nfs_readdir_add_to_array+0x73/0xd0 age=534558/4791307/6540952 pid=370-1048386 cpus=0-47 nodes=0-1
- Request a directory listing and check the allocation counters again:
ls
[...]
grep nfs_readdir /sys/kernel/slab/kmalloc-32/alloc_calls
30564511 nfs_readdir_add_to_array+0x73/0xd0 age=207/4792999/6542663 pid=370-1048386 cpus=0-47 nodes=0-1
There are now 120 new allocations.
- Drop all caches and check the counters again:
echo 3 >/proc/sys/vm/drop_caches
grep nfs_readdir /sys/kernel/slab/kmalloc-32/alloc_calls
30564401 nfs_readdir_add_to_array+0x73/0xd0 age=735/4793524/6543176 pid=370-1048386 cpus=0-47 nodes=0-1
110 allocations are gone, but 10 have leaked and will never be freed.
Unhelpfully, those allocations are explicitly excluded from KMEMLEAK,
that's why my initial attempts with KMEMLEAK were not successful:
/*
* Avoid a kmemleak false positive. The pointer to the name is stored
* in a page cache page which kmemleak does not scan.
*/
kmemleak_not_leak(string->name);
It would be possible to solve this bug without reverting the whole
commit:
- keep track of which pages were not used, and call
nfs_readdir_clear_array() on them, or
- manually link those pages into the page cache
But for now I have decided to just revert the commit, because the real
fix would require complex considerations, risking more dangerous
(crash) bugs, which may seem unsuitable for the stable branches.
Signed-off-by: Max Kellermann <mk@cm4all.com>
Cc: stable@vger.kernel.org # v5.1+
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2019-07-12 22:18:06 +08:00
|
|
|
array = kmap(page);
|
2017-05-04 02:52:21 +08:00
|
|
|
array->eof_index = array->size;
|
|
|
|
status = 0;
|
Revert "NFS: readdirplus optimization by cache mechanism" (memleak)
This reverts commit be4c2d4723a4a637f0d1b4f7c66447141a4b3564.
That commit caused a severe memory leak in nfs_readdir_make_qstr().
When listing a directory with more than 100 files (this is how many
struct nfs_cache_array_entry elements fit in one 4kB page), all
allocated file name strings past those 100 leak.
The root of the leakage is that those string pointers are managed in
pages which are never linked into the page cache.
fs/nfs/dir.c puts pages into the page cache by calling
read_cache_page(); the callback function nfs_readdir_filler() will
then fill the given page struct which was passed to it, which is
already linked in the page cache (by do_read_cache_page() calling
add_to_page_cache_lru()).
Commit be4c2d4723a4 added another (local) array of allocated pages, to
be filled with more data, instead of discarding excess items received
from the NFS server. Those additional pages can be used by the next
nfs_readdir_filler() call (from within the same nfs_readdir() call).
The leak happens when some of those additional pages are never used
(copied to the page cache using copy_highpage()). The pages will be
freed by nfs_readdir_free_pages(), but their contents will not. The
commit did not invoke nfs_readdir_clear_array() (and doing so would
have been dangerous, because it did not track which of those pages
were already copied to the page cache, risking double free bugs).
How to reproduce the leak:
- Use a kernel with CONFIG_SLUB_DEBUG_ON.
- Create a directory on a NFS mount with more than 100 files with
names long enough to use the "kmalloc-32" slab (so we can easily
look up the allocation counts):
for i in `seq 110`; do touch ${i}_0123456789abcdef; done
- Drop all caches:
echo 3 >/proc/sys/vm/drop_caches
- Check the allocation counter:
grep nfs_readdir /sys/kernel/slab/kmalloc-32/alloc_calls
30564391 nfs_readdir_add_to_array+0x73/0xd0 age=534558/4791307/6540952 pid=370-1048386 cpus=0-47 nodes=0-1
- Request a directory listing and check the allocation counters again:
ls
[...]
grep nfs_readdir /sys/kernel/slab/kmalloc-32/alloc_calls
30564511 nfs_readdir_add_to_array+0x73/0xd0 age=207/4792999/6542663 pid=370-1048386 cpus=0-47 nodes=0-1
There are now 120 new allocations.
- Drop all caches and check the counters again:
echo 3 >/proc/sys/vm/drop_caches
grep nfs_readdir /sys/kernel/slab/kmalloc-32/alloc_calls
30564401 nfs_readdir_add_to_array+0x73/0xd0 age=735/4793524/6543176 pid=370-1048386 cpus=0-47 nodes=0-1
110 allocations are gone, but 10 have leaked and will never be freed.
Unhelpfully, those allocations are explicitly excluded from KMEMLEAK,
that's why my initial attempts with KMEMLEAK were not successful:
/*
* Avoid a kmemleak false positive. The pointer to the name is stored
* in a page cache page which kmemleak does not scan.
*/
kmemleak_not_leak(string->name);
It would be possible to solve this bug without reverting the whole
commit:
- keep track of which pages were not used, and call
nfs_readdir_clear_array() on them, or
- manually link those pages into the page cache
But for now I have decided to just revert the commit, because the real
fix would require complex considerations, risking more dangerous
(crash) bugs, which may seem unsuitable for the stable branches.
Signed-off-by: Max Kellermann <mk@cm4all.com>
Cc: stable@vger.kernel.org # v5.1+
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2019-07-12 22:18:06 +08:00
|
|
|
kunmap(page);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2011-01-09 06:45:38 +08:00
|
|
|
|
|
|
|
put_page(scratch);
|
2010-11-16 09:26:22 +08:00
|
|
|
return status;
|
2010-10-21 03:44:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static
|
2015-07-14 02:01:25 +08:00
|
|
|
void nfs_readdir_free_pages(struct page **pages, unsigned int npages)
|
2010-10-21 03:44:37 +08:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
for (i = 0; i < npages; i++)
|
|
|
|
put_page(pages[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2019-02-16 08:38:40 +08:00
|
|
|
* nfs_readdir_alloc_pages() will allocate pages that must be freed with a call
|
|
|
|
* to nfs_readdir_free_pages()
|
2010-10-21 03:44:37 +08:00
|
|
|
*/
|
|
|
|
static
|
2015-07-14 02:01:25 +08:00
|
|
|
int nfs_readdir_alloc_pages(struct page **pages, unsigned int npages)
|
2010-10-21 03:44:37 +08:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < npages; i++) {
|
|
|
|
struct page *page = alloc_page(GFP_KERNEL);
|
|
|
|
if (page == NULL)
|
|
|
|
goto out_freepages;
|
|
|
|
pages[i] = page;
|
|
|
|
}
|
2011-01-09 06:45:38 +08:00
|
|
|
return 0;
|
2010-10-21 03:44:37 +08:00
|
|
|
|
|
|
|
out_freepages:
|
2015-07-14 02:01:25 +08:00
|
|
|
nfs_readdir_free_pages(pages, i);
|
2011-01-09 06:45:38 +08:00
|
|
|
return -ENOMEM;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2010-09-25 02:48:42 +08:00
|
|
|
static
|
|
|
|
int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, struct inode *inode)
|
2005-06-23 01:16:29 +08:00
|
|
|
{
|
2010-10-21 03:44:37 +08:00
|
|
|
struct page *pages[NFS_MAX_READDIR_PAGES];
|
2010-09-25 02:48:42 +08:00
|
|
|
struct nfs_entry entry;
|
|
|
|
struct file *file = desc->file;
|
|
|
|
struct nfs_cache_array *array;
|
2010-11-16 09:26:22 +08:00
|
|
|
int status = -ENOMEM;
|
2010-10-21 03:44:37 +08:00
|
|
|
unsigned int array_size = ARRAY_SIZE(pages);
|
2010-09-25 02:48:42 +08:00
|
|
|
|
2020-02-03 06:53:53 +08:00
|
|
|
nfs_readdir_init_array(page);
|
|
|
|
|
2010-09-25 02:48:42 +08:00
|
|
|
entry.prev_cookie = 0;
|
2010-12-01 10:56:32 +08:00
|
|
|
entry.cookie = desc->last_cookie;
|
2010-09-25 02:48:42 +08:00
|
|
|
entry.eof = 0;
|
|
|
|
entry.fh = nfs_alloc_fhandle();
|
|
|
|
entry.fattr = nfs_alloc_fattr();
|
2010-12-14 22:58:11 +08:00
|
|
|
entry.server = NFS_SERVER(inode);
|
2010-09-25 02:48:42 +08:00
|
|
|
if (entry.fh == NULL || entry.fattr == NULL)
|
|
|
|
goto out;
|
2005-06-23 01:16:29 +08:00
|
|
|
|
2013-05-23 00:50:43 +08:00
|
|
|
entry.label = nfs4_label_alloc(NFS_SERVER(inode), GFP_NOWAIT);
|
|
|
|
if (IS_ERR(entry.label)) {
|
|
|
|
status = PTR_ERR(entry.label);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2017-05-04 02:52:21 +08:00
|
|
|
array = kmap(page);
|
2005-06-23 01:16:29 +08:00
|
|
|
|
2015-07-14 02:01:25 +08:00
|
|
|
status = nfs_readdir_alloc_pages(pages, array_size);
|
2011-01-09 06:45:38 +08:00
|
|
|
if (status < 0)
|
2010-09-25 02:48:42 +08:00
|
|
|
goto out_release_array;
|
|
|
|
do {
|
2010-11-16 09:26:22 +08:00
|
|
|
unsigned int pglen;
|
2010-10-21 03:44:37 +08:00
|
|
|
status = nfs_readdir_xdr_filler(pages, desc, &entry, file, inode);
|
2010-10-21 03:44:29 +08:00
|
|
|
|
2010-09-25 02:48:42 +08:00
|
|
|
if (status < 0)
|
2005-06-23 01:16:29 +08:00
|
|
|
break;
|
2010-11-16 09:26:22 +08:00
|
|
|
pglen = status;
|
2011-01-09 06:45:38 +08:00
|
|
|
status = nfs_readdir_page_filler(desc, &entry, pages, page, pglen);
|
2010-11-16 09:26:22 +08:00
|
|
|
if (status < 0) {
|
|
|
|
if (status == -ENOSPC)
|
|
|
|
status = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} while (array->eof_index < 0);
|
2010-09-25 02:48:42 +08:00
|
|
|
|
2015-07-14 02:01:25 +08:00
|
|
|
nfs_readdir_free_pages(pages, array_size);
|
2010-09-25 02:48:42 +08:00
|
|
|
out_release_array:
|
2017-05-04 02:52:21 +08:00
|
|
|
kunmap(page);
|
2013-05-23 00:50:43 +08:00
|
|
|
nfs4_label_free(entry.label);
|
2010-09-25 02:48:42 +08:00
|
|
|
out:
|
|
|
|
nfs_free_fattr(entry.fattr);
|
|
|
|
nfs_free_fhandle(entry.fh);
|
2005-06-23 01:16:29 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2010-09-25 02:48:42 +08:00
|
|
|
* Now we cache directories properly, by converting xdr information
|
|
|
|
* to an array that can be used for lookups later. This results in
|
|
|
|
* fewer cache pages, since we can store more information on each page.
|
|
|
|
* We only need to convert from xdr once so future lookups are much simpler
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2010-09-25 02:48:42 +08:00
|
|
|
static
|
2019-05-02 00:06:35 +08:00
|
|
|
int nfs_readdir_filler(void *data, struct page* page)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2019-05-02 00:06:35 +08:00
|
|
|
nfs_readdir_descriptor_t *desc = data;
|
2013-01-24 06:07:38 +08:00
|
|
|
struct inode *inode = file_inode(desc->file);
|
2010-11-16 09:26:22 +08:00
|
|
|
int ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
Revert "NFS: readdirplus optimization by cache mechanism" (memleak)
This reverts commit be4c2d4723a4a637f0d1b4f7c66447141a4b3564.
That commit caused a severe memory leak in nfs_readdir_make_qstr().
When listing a directory with more than 100 files (this is how many
struct nfs_cache_array_entry elements fit in one 4kB page), all
allocated file name strings past those 100 leak.
The root of the leakage is that those string pointers are managed in
pages which are never linked into the page cache.
fs/nfs/dir.c puts pages into the page cache by calling
read_cache_page(); the callback function nfs_readdir_filler() will
then fill the given page struct which was passed to it, which is
already linked in the page cache (by do_read_cache_page() calling
add_to_page_cache_lru()).
Commit be4c2d4723a4 added another (local) array of allocated pages, to
be filled with more data, instead of discarding excess items received
from the NFS server. Those additional pages can be used by the next
nfs_readdir_filler() call (from within the same nfs_readdir() call).
The leak happens when some of those additional pages are never used
(copied to the page cache using copy_highpage()). The pages will be
freed by nfs_readdir_free_pages(), but their contents will not. The
commit did not invoke nfs_readdir_clear_array() (and doing so would
have been dangerous, because it did not track which of those pages
were already copied to the page cache, risking double free bugs).
How to reproduce the leak:
- Use a kernel with CONFIG_SLUB_DEBUG_ON.
- Create a directory on a NFS mount with more than 100 files with
names long enough to use the "kmalloc-32" slab (so we can easily
look up the allocation counts):
for i in `seq 110`; do touch ${i}_0123456789abcdef; done
- Drop all caches:
echo 3 >/proc/sys/vm/drop_caches
- Check the allocation counter:
grep nfs_readdir /sys/kernel/slab/kmalloc-32/alloc_calls
30564391 nfs_readdir_add_to_array+0x73/0xd0 age=534558/4791307/6540952 pid=370-1048386 cpus=0-47 nodes=0-1
- Request a directory listing and check the allocation counters again:
ls
[...]
grep nfs_readdir /sys/kernel/slab/kmalloc-32/alloc_calls
30564511 nfs_readdir_add_to_array+0x73/0xd0 age=207/4792999/6542663 pid=370-1048386 cpus=0-47 nodes=0-1
There are now 120 new allocations.
- Drop all caches and check the counters again:
echo 3 >/proc/sys/vm/drop_caches
grep nfs_readdir /sys/kernel/slab/kmalloc-32/alloc_calls
30564401 nfs_readdir_add_to_array+0x73/0xd0 age=735/4793524/6543176 pid=370-1048386 cpus=0-47 nodes=0-1
110 allocations are gone, but 10 have leaked and will never be freed.
Unhelpfully, those allocations are explicitly excluded from KMEMLEAK,
that's why my initial attempts with KMEMLEAK were not successful:
/*
* Avoid a kmemleak false positive. The pointer to the name is stored
* in a page cache page which kmemleak does not scan.
*/
kmemleak_not_leak(string->name);
It would be possible to solve this bug without reverting the whole
commit:
- keep track of which pages were not used, and call
nfs_readdir_clear_array() on them, or
- manually link those pages into the page cache
But for now I have decided to just revert the commit, because the real
fix would require complex considerations, risking more dangerous
(crash) bugs, which may seem unsuitable for the stable branches.
Signed-off-by: Max Kellermann <mk@cm4all.com>
Cc: stable@vger.kernel.org # v5.1+
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2019-07-12 22:18:06 +08:00
|
|
|
ret = nfs_readdir_xdr_to_array(desc, page, inode);
|
|
|
|
if (ret < 0)
|
|
|
|
goto error;
|
2010-09-25 02:48:42 +08:00
|
|
|
SetPageUptodate(page);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-09-25 02:48:42 +08:00
|
|
|
if (invalidate_inode_pages2_range(inode->i_mapping, page->index + 1, -1) < 0) {
|
|
|
|
/* Should never happen */
|
|
|
|
nfs_zap_mapping(inode, inode->i_mapping);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2010-09-25 02:48:42 +08:00
|
|
|
unlock_page(page);
|
|
|
|
return 0;
|
|
|
|
error:
|
2020-02-03 06:53:53 +08:00
|
|
|
nfs_readdir_clear_array(page);
|
2010-09-25 02:48:42 +08:00
|
|
|
unlock_page(page);
|
2010-11-16 09:26:22 +08:00
|
|
|
return ret;
|
2010-09-25 02:48:42 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-09-25 02:48:42 +08:00
|
|
|
static
|
|
|
|
void cache_page_release(nfs_readdir_descriptor_t *desc)
|
|
|
|
{
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
put_page(desc->page);
|
2010-09-25 02:48:42 +08:00
|
|
|
desc->page = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static
|
|
|
|
struct page *get_cache_page(nfs_readdir_descriptor_t *desc)
|
|
|
|
{
|
2019-05-02 00:06:35 +08:00
|
|
|
return read_cache_page(desc->file->f_mapping, desc->page_index,
|
|
|
|
nfs_readdir_filler, desc);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2010-09-25 02:48:42 +08:00
|
|
|
* Returns 0 if desc->dir_cookie was found on page desc->page_index
|
2020-02-03 06:53:54 +08:00
|
|
|
* and locks the page to prevent removal from the page cache.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2010-09-25 02:48:42 +08:00
|
|
|
static
|
2020-02-03 06:53:54 +08:00
|
|
|
int find_and_lock_cache_page(nfs_readdir_descriptor_t *desc)
|
2010-09-25 02:48:42 +08:00
|
|
|
{
|
2020-01-23 09:45:39 +08:00
|
|
|
struct inode *inode = file_inode(desc->file);
|
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
2010-09-25 02:48:42 +08:00
|
|
|
int res;
|
|
|
|
|
|
|
|
desc->page = get_cache_page(desc);
|
|
|
|
if (IS_ERR(desc->page))
|
|
|
|
return PTR_ERR(desc->page);
|
2020-02-03 06:53:54 +08:00
|
|
|
res = lock_page_killable(desc->page);
|
2010-12-08 01:44:56 +08:00
|
|
|
if (res != 0)
|
2020-02-03 06:53:54 +08:00
|
|
|
goto error;
|
|
|
|
res = -EAGAIN;
|
|
|
|
if (desc->page->mapping != NULL) {
|
|
|
|
res = nfs_readdir_search_array(desc);
|
2020-01-23 09:45:39 +08:00
|
|
|
if (res == 0) {
|
|
|
|
nfsi->page_index = desc->page_index;
|
2020-02-03 06:53:54 +08:00
|
|
|
return 0;
|
2020-01-23 09:45:39 +08:00
|
|
|
}
|
2020-02-03 06:53:54 +08:00
|
|
|
}
|
|
|
|
unlock_page(desc->page);
|
|
|
|
error:
|
|
|
|
cache_page_release(desc);
|
2010-09-25 02:48:42 +08:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Search for desc->dir_cookie from the beginning of the page cache */
|
2005-04-17 06:20:36 +08:00
|
|
|
static inline
|
|
|
|
int readdir_search_pagecache(nfs_readdir_descriptor_t *desc)
|
|
|
|
{
|
2010-11-16 09:26:22 +08:00
|
|
|
int res;
|
2010-09-25 02:48:42 +08:00
|
|
|
|
2010-12-01 10:56:32 +08:00
|
|
|
if (desc->page_index == 0) {
|
2010-11-16 09:26:22 +08:00
|
|
|
desc->current_index = 0;
|
2020-02-04 03:49:33 +08:00
|
|
|
desc->prev_index = 0;
|
2010-12-01 10:56:32 +08:00
|
|
|
desc->last_cookie = 0;
|
|
|
|
}
|
2010-12-08 01:44:56 +08:00
|
|
|
do {
|
2020-02-03 06:53:54 +08:00
|
|
|
res = find_and_lock_cache_page(desc);
|
2010-12-08 01:44:56 +08:00
|
|
|
} while (res == -EAGAIN);
|
2005-04-17 06:20:36 +08:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Once we've found the start of the dirent within a page: fill 'er up...
|
|
|
|
*/
|
|
|
|
static
|
2013-05-18 04:34:50 +08:00
|
|
|
int nfs_do_filldir(nfs_readdir_descriptor_t *desc)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct file *file = desc->file;
|
2010-09-25 02:48:42 +08:00
|
|
|
int i = 0;
|
|
|
|
int res = 0;
|
|
|
|
struct nfs_cache_array *array = NULL;
|
2011-03-24 03:04:31 +08:00
|
|
|
struct nfs_open_dir_context *ctx = file->private_data;
|
|
|
|
|
2017-05-04 02:52:21 +08:00
|
|
|
array = kmap(desc->page);
|
2010-09-25 02:48:42 +08:00
|
|
|
for (i = desc->cache_entry_index; i < array->size; i++) {
|
2010-11-21 02:55:33 +08:00
|
|
|
struct nfs_cache_array_entry *ent;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-11-21 02:55:33 +08:00
|
|
|
ent = &array->array[i];
|
2013-05-18 04:34:50 +08:00
|
|
|
if (!dir_emit(desc->ctx, ent->string.name, ent->string.len,
|
|
|
|
nfs_compat_user_ino64(ent->ino), ent->d_type)) {
|
2017-10-07 22:02:21 +08:00
|
|
|
desc->eof = true;
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
2010-11-21 02:55:33 +08:00
|
|
|
}
|
2010-09-25 02:48:42 +08:00
|
|
|
if (i < (array->size-1))
|
|
|
|
*desc->dir_cookie = array->array[i+1].cookie;
|
|
|
|
else
|
|
|
|
*desc->dir_cookie = array->last_cookie;
|
2020-02-04 03:49:33 +08:00
|
|
|
if (nfs_readdir_use_cookie(file))
|
|
|
|
desc->ctx->pos = *desc->dir_cookie;
|
|
|
|
else
|
|
|
|
desc->ctx->pos++;
|
2011-07-31 00:45:35 +08:00
|
|
|
if (ctx->duped != 0)
|
|
|
|
ctx->duped = 1;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2010-12-08 01:44:56 +08:00
|
|
|
if (array->eof_index >= 0)
|
2017-10-07 22:02:21 +08:00
|
|
|
desc->eof = true;
|
2010-09-25 02:48:42 +08:00
|
|
|
|
2017-05-04 02:52:21 +08:00
|
|
|
kunmap(desc->page);
|
2006-03-21 02:44:24 +08:00
|
|
|
dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling ended @ cookie %Lu; returning = %d\n",
|
|
|
|
(unsigned long long)*desc->dir_cookie, res);
|
2005-04-17 06:20:36 +08:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we cannot find a cookie in our cache, we suspect that this is
|
|
|
|
* because it points to a deleted file, so we ask the server to return
|
|
|
|
* whatever it thinks is the next entry. We then feed this to filldir.
|
|
|
|
* If all goes well, we should then be able to find our way round the
|
|
|
|
* cache on the next call to readdir_search_pagecache();
|
|
|
|
*
|
|
|
|
* NOTE: we cannot add the anonymous page to the pagecache because
|
|
|
|
* the data it contains might not be page aligned. Besides,
|
|
|
|
* we should already have a complete representation of the
|
|
|
|
* directory in the page cache by the time we get here.
|
|
|
|
*/
|
|
|
|
static inline
|
2013-05-18 04:34:50 +08:00
|
|
|
int uncached_readdir(nfs_readdir_descriptor_t *desc)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct page *page = NULL;
|
|
|
|
int status;
|
2013-01-24 06:07:38 +08:00
|
|
|
struct inode *inode = file_inode(desc->file);
|
2011-07-31 00:45:35 +08:00
|
|
|
struct nfs_open_dir_context *ctx = desc->file->private_data;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-03-21 02:44:24 +08:00
|
|
|
dfprintk(DIRCACHE, "NFS: uncached_readdir() searching for cookie %Lu\n",
|
|
|
|
(unsigned long long)*desc->dir_cookie);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
page = alloc_page(GFP_HIGHUSER);
|
|
|
|
if (!page) {
|
|
|
|
status = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
2010-09-25 02:48:42 +08:00
|
|
|
|
2010-11-21 02:24:46 +08:00
|
|
|
desc->page_index = 0;
|
2010-12-01 10:56:32 +08:00
|
|
|
desc->last_cookie = *desc->dir_cookie;
|
2010-11-21 02:24:46 +08:00
|
|
|
desc->page = page;
|
2011-07-31 00:45:35 +08:00
|
|
|
ctx->duped = 0;
|
2010-11-21 02:24:46 +08:00
|
|
|
|
2010-11-21 02:24:49 +08:00
|
|
|
status = nfs_readdir_xdr_to_array(desc, page, inode);
|
|
|
|
if (status < 0)
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out_release;
|
|
|
|
|
2013-05-18 04:34:50 +08:00
|
|
|
status = nfs_do_filldir(desc);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2020-02-03 06:53:54 +08:00
|
|
|
out_release:
|
|
|
|
nfs_readdir_clear_array(desc->page);
|
|
|
|
cache_page_release(desc);
|
2005-04-17 06:20:36 +08:00
|
|
|
out:
|
2006-03-21 02:44:24 +08:00
|
|
|
dfprintk(DIRCACHE, "NFS: %s: returns %d\n",
|
2008-05-03 04:42:44 +08:00
|
|
|
__func__, status);
|
2005-04-17 06:20:36 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2005-06-23 01:16:29 +08:00
|
|
|
/* The file offset position represents the dirent entry number. A
|
|
|
|
last cookie cache takes care of the common case of reading the
|
|
|
|
whole directory.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2013-05-18 04:34:50 +08:00
|
|
|
static int nfs_readdir(struct file *file, struct dir_context *ctx)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2016-03-27 04:14:39 +08:00
|
|
|
struct dentry *dentry = file_dentry(file);
|
2015-03-18 06:25:59 +08:00
|
|
|
struct inode *inode = d_inode(dentry);
|
2013-05-18 04:34:50 +08:00
|
|
|
struct nfs_open_dir_context *dir_ctx = file->private_data;
|
2020-02-04 03:49:33 +08:00
|
|
|
nfs_readdir_descriptor_t my_desc = {
|
|
|
|
.file = file,
|
|
|
|
.ctx = ctx,
|
|
|
|
.dir_cookie = &dir_ctx->dir_cookie,
|
|
|
|
.plus = nfs_use_readdirplus(inode, ctx),
|
|
|
|
},
|
|
|
|
*desc = &my_desc;
|
2013-07-06 05:49:31 +08:00
|
|
|
int res = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(FILE, "NFS: readdir(%pD2) starting at cookie %llu\n",
|
|
|
|
file, (long long)ctx->pos);
|
2006-03-21 02:44:14 +08:00
|
|
|
nfs_inc_stats(inode, NFSIOS_VFSGETDENTS);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2013-05-18 04:34:50 +08:00
|
|
|
* ctx->pos points to the dirent entry number.
|
2005-06-23 01:16:29 +08:00
|
|
|
* *desc->dir_cookie has the cookie for the next entry. We have
|
2005-06-23 01:16:29 +08:00
|
|
|
* to either find the entry with the appropriate number or
|
|
|
|
* revalidate the cookie.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2016-11-19 23:54:55 +08:00
|
|
|
if (ctx->pos == 0 || nfs_attribute_cache_expired(inode))
|
2013-07-06 05:49:31 +08:00
|
|
|
res = nfs_revalidate_mapping(inode, file->f_mapping);
|
2008-01-27 06:37:47 +08:00
|
|
|
if (res < 0)
|
|
|
|
goto out;
|
|
|
|
|
2010-12-08 01:44:56 +08:00
|
|
|
do {
|
2005-04-17 06:20:36 +08:00
|
|
|
res = readdir_search_pagecache(desc);
|
2005-06-23 01:16:29 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (res == -EBADCOOKIE) {
|
2010-11-21 02:55:33 +08:00
|
|
|
res = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
/* This means either end of directory */
|
2017-10-07 22:02:21 +08:00
|
|
|
if (*desc->dir_cookie && !desc->eof) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Or that the server has 'lost' a cookie */
|
2013-05-18 04:34:50 +08:00
|
|
|
res = uncached_readdir(desc);
|
2010-11-21 02:55:33 +08:00
|
|
|
if (res == 0)
|
2005-04-17 06:20:36 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (res == -ETOOSMALL && desc->plus) {
|
2008-01-23 14:58:59 +08:00
|
|
|
clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
nfs_zap_caches(inode);
|
2010-09-25 06:49:43 +08:00
|
|
|
desc->page_index = 0;
|
2017-06-20 20:33:44 +08:00
|
|
|
desc->plus = false;
|
|
|
|
desc->eof = false;
|
2005-04-17 06:20:36 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (res < 0)
|
|
|
|
break;
|
|
|
|
|
2013-05-18 04:34:50 +08:00
|
|
|
res = nfs_do_filldir(desc);
|
2020-02-03 06:53:54 +08:00
|
|
|
unlock_page(desc->page);
|
|
|
|
cache_page_release(desc);
|
2010-11-21 02:55:33 +08:00
|
|
|
if (res < 0)
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
2010-12-08 01:44:56 +08:00
|
|
|
} while (!desc->eof);
|
2008-01-27 06:37:47 +08:00
|
|
|
out:
|
2006-03-21 02:44:24 +08:00
|
|
|
if (res > 0)
|
|
|
|
res = 0;
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(FILE, "NFS: readdir(%pD2) returns %d\n", file, res);
|
2006-03-21 02:44:24 +08:00
|
|
|
return res;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2012-12-18 07:59:39 +08:00
|
|
|
static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence)
|
2005-06-23 01:16:29 +08:00
|
|
|
{
|
2017-03-11 06:07:46 +08:00
|
|
|
struct inode *inode = file_inode(filp);
|
2011-03-24 02:48:29 +08:00
|
|
|
struct nfs_open_dir_context *dir_ctx = filp->private_data;
|
2008-06-12 05:55:34 +08:00
|
|
|
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(FILE, "NFS: llseek dir(%pD2, %lld, %d)\n",
|
|
|
|
filp, offset, whence);
|
2008-06-12 05:55:34 +08:00
|
|
|
|
2012-12-18 07:59:39 +08:00
|
|
|
switch (whence) {
|
2018-06-28 04:25:40 +08:00
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
case SEEK_SET:
|
|
|
|
if (offset < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
inode_lock(inode);
|
|
|
|
break;
|
|
|
|
case SEEK_CUR:
|
|
|
|
if (offset == 0)
|
|
|
|
return filp->f_pos;
|
|
|
|
inode_lock(inode);
|
|
|
|
offset += filp->f_pos;
|
|
|
|
if (offset < 0) {
|
|
|
|
inode_unlock(inode);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2005-06-23 01:16:29 +08:00
|
|
|
}
|
|
|
|
if (offset != filp->f_pos) {
|
|
|
|
filp->f_pos = offset;
|
2020-02-04 03:49:33 +08:00
|
|
|
if (nfs_readdir_use_cookie(filp))
|
|
|
|
dir_ctx->dir_cookie = offset;
|
|
|
|
else
|
|
|
|
dir_ctx->dir_cookie = 0;
|
2011-03-24 03:04:31 +08:00
|
|
|
dir_ctx->duped = 0;
|
2005-06-23 01:16:29 +08:00
|
|
|
}
|
2017-03-11 06:07:46 +08:00
|
|
|
inode_unlock(inode);
|
2005-06-23 01:16:29 +08:00
|
|
|
return offset;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* All directory operations under NFS are synchronous, so fsync()
|
|
|
|
* is a dummy operation.
|
|
|
|
*/
|
2011-07-17 08:44:56 +08:00
|
|
|
static int nfs_fsync_dir(struct file *filp, loff_t start, loff_t end,
|
|
|
|
int datasync)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-09-16 22:53:17 +08:00
|
|
|
struct inode *inode = file_inode(filp);
|
2010-05-26 23:53:25 +08:00
|
|
|
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(FILE, "NFS: fsync dir(%pD2) datasync %d\n", filp, datasync);
|
2006-03-21 02:44:24 +08:00
|
|
|
|
2016-01-23 04:40:57 +08:00
|
|
|
inode_lock(inode);
|
2013-09-16 22:53:17 +08:00
|
|
|
nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
|
2016-01-23 04:40:57 +08:00
|
|
|
inode_unlock(inode);
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-10-16 06:18:29 +08:00
|
|
|
/**
|
|
|
|
* nfs_force_lookup_revalidate - Mark the directory as having changed
|
2019-02-19 02:32:38 +08:00
|
|
|
* @dir: pointer to directory inode
|
2007-10-16 06:18:29 +08:00
|
|
|
*
|
|
|
|
* This forces the revalidation code in nfs_lookup_revalidate() to do a
|
|
|
|
* full lookup on all child dentries of 'dir' whenever a change occurs
|
|
|
|
* on the server that might have invalidated our dcache.
|
|
|
|
*
|
2020-02-05 22:01:54 +08:00
|
|
|
* Note that we reserve bit '0' as a tag to let us know when a dentry
|
|
|
|
* was revalidated while holding a delegation on its inode.
|
|
|
|
*
|
2007-10-16 06:18:29 +08:00
|
|
|
* The caller should be holding dir->i_lock
|
|
|
|
*/
|
|
|
|
void nfs_force_lookup_revalidate(struct inode *dir)
|
|
|
|
{
|
2020-02-05 22:01:54 +08:00
|
|
|
NFS_I(dir)->cache_change_attribute += 2;
|
2007-10-16 06:18:29 +08:00
|
|
|
}
|
2012-07-31 04:05:25 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_force_lookup_revalidate);
|
2007-10-16 06:18:29 +08:00
|
|
|
|
2020-02-05 22:01:54 +08:00
|
|
|
/**
|
|
|
|
* nfs_verify_change_attribute - Detects NFS remote directory changes
|
|
|
|
* @dir: pointer to parent directory inode
|
|
|
|
* @verf: previously saved change attribute
|
|
|
|
*
|
|
|
|
* Return "false" if the verifiers doesn't match the change attribute.
|
|
|
|
* This would usually indicate that the directory contents have changed on
|
|
|
|
* the server, and that any dentries need revalidating.
|
|
|
|
*/
|
|
|
|
static bool nfs_verify_change_attribute(struct inode *dir, unsigned long verf)
|
|
|
|
{
|
|
|
|
return (verf & ~1UL) == nfs_save_change_attribute(dir);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs_set_verifier_delegated(unsigned long *verf)
|
|
|
|
{
|
|
|
|
*verf |= 1UL;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_NFS_V4)
|
|
|
|
static void nfs_unset_verifier_delegated(unsigned long *verf)
|
|
|
|
{
|
|
|
|
*verf &= ~1UL;
|
|
|
|
}
|
|
|
|
#endif /* IS_ENABLED(CONFIG_NFS_V4) */
|
|
|
|
|
|
|
|
static bool nfs_test_verifier_delegated(unsigned long verf)
|
|
|
|
{
|
|
|
|
return verf & 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool nfs_verifier_is_delegated(struct dentry *dentry)
|
|
|
|
{
|
|
|
|
return nfs_test_verifier_delegated(dentry->d_time);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs_set_verifier_locked(struct dentry *dentry, unsigned long verf)
|
|
|
|
{
|
|
|
|
struct inode *inode = d_inode(dentry);
|
|
|
|
|
|
|
|
if (!nfs_verifier_is_delegated(dentry) &&
|
|
|
|
!nfs_verify_change_attribute(d_inode(dentry->d_parent), verf))
|
|
|
|
goto out;
|
|
|
|
if (inode && NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
|
|
|
|
nfs_set_verifier_delegated(&verf);
|
|
|
|
out:
|
|
|
|
dentry->d_time = verf;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfs_set_verifier - save a parent directory verifier in the dentry
|
|
|
|
* @dentry: pointer to dentry
|
|
|
|
* @verf: verifier to save
|
|
|
|
*
|
|
|
|
* Saves the parent directory verifier in @dentry. If the inode has
|
|
|
|
* a delegation, we also tag the dentry as having been revalidated
|
|
|
|
* while holding a delegation so that we know we don't have to
|
|
|
|
* look it up again after a directory change.
|
|
|
|
*/
|
|
|
|
void nfs_set_verifier(struct dentry *dentry, unsigned long verf)
|
|
|
|
{
|
|
|
|
|
|
|
|
spin_lock(&dentry->d_lock);
|
|
|
|
nfs_set_verifier_locked(dentry, verf);
|
|
|
|
spin_unlock(&dentry->d_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nfs_set_verifier);
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_NFS_V4)
|
|
|
|
/**
|
|
|
|
* nfs_clear_verifier_delegated - clear the dir verifier delegation tag
|
|
|
|
* @inode: pointer to inode
|
|
|
|
*
|
|
|
|
* Iterates through the dentries in the inode alias list and clears
|
|
|
|
* the tag used to indicate that the dentry has been revalidated
|
|
|
|
* while holding a delegation.
|
|
|
|
* This function is intended for use when the delegation is being
|
|
|
|
* returned or revoked.
|
|
|
|
*/
|
|
|
|
void nfs_clear_verifier_delegated(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct dentry *alias;
|
|
|
|
|
|
|
|
if (!inode)
|
|
|
|
return;
|
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
|
|
|
|
spin_lock(&alias->d_lock);
|
|
|
|
nfs_unset_verifier_delegated(&alias->d_time);
|
|
|
|
spin_unlock(&alias->d_lock);
|
|
|
|
}
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nfs_clear_verifier_delegated);
|
|
|
|
#endif /* IS_ENABLED(CONFIG_NFS_V4) */
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* A check for whether or not the parent directory has changed.
|
|
|
|
* In the case it has, we assume that the dentries are untrustworthy
|
|
|
|
* and may need to be looked up again.
|
2014-07-14 09:28:20 +08:00
|
|
|
* If rcu_walk prevents us from performing a full check, return 0.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2014-07-14 09:28:20 +08:00
|
|
|
static int nfs_check_verifier(struct inode *dir, struct dentry *dentry,
|
|
|
|
int rcu_walk)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
if (IS_ROOT(dentry))
|
|
|
|
return 1;
|
2008-07-16 05:58:13 +08:00
|
|
|
if (NFS_SERVER(dir)->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)
|
|
|
|
return 0;
|
2007-10-03 00:54:39 +08:00
|
|
|
if (!nfs_verify_change_attribute(dir, dentry->d_time))
|
|
|
|
return 0;
|
|
|
|
/* Revalidate nfsi->cache_change_attribute before we declare a match */
|
2016-12-05 07:34:34 +08:00
|
|
|
if (nfs_mapping_need_revalidate_inode(dir)) {
|
|
|
|
if (rcu_walk)
|
|
|
|
return 0;
|
|
|
|
if (__nfs_revalidate_inode(NFS_SERVER(dir), dir) < 0)
|
|
|
|
return 0;
|
|
|
|
}
|
2007-10-03 00:54:39 +08:00
|
|
|
if (!nfs_verify_change_attribute(dir, dentry->d_time))
|
|
|
|
return 0;
|
|
|
|
return 1;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-10-03 07:13:04 +08:00
|
|
|
/*
|
|
|
|
* Use intent information to check whether or not we're going to do
|
|
|
|
* an O_EXCL create using this path component.
|
|
|
|
*/
|
2012-06-11 03:36:40 +08:00
|
|
|
static int nfs_is_exclusive_create(struct inode *dir, unsigned int flags)
|
2007-10-03 07:13:04 +08:00
|
|
|
{
|
|
|
|
if (NFS_PROTO(dir)->version == 2)
|
|
|
|
return 0;
|
2012-06-11 03:36:40 +08:00
|
|
|
return flags & LOOKUP_EXCL;
|
2007-10-03 07:13:04 +08:00
|
|
|
}
|
|
|
|
|
2005-06-08 06:37:01 +08:00
|
|
|
/*
|
|
|
|
* Inode and filehandle revalidation for lookups.
|
|
|
|
*
|
|
|
|
* We force revalidation in the cases where the VFS sets LOOKUP_REVAL,
|
|
|
|
* or if the intent information indicates that we're about to open this
|
|
|
|
* particular file and the "nocto" mount flag is not set.
|
|
|
|
*
|
|
|
|
*/
|
2012-12-15 06:51:40 +08:00
|
|
|
static
|
2012-06-11 03:36:40 +08:00
|
|
|
int nfs_lookup_verify_inode(struct inode *inode, unsigned int flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
2012-12-15 06:51:40 +08:00
|
|
|
int ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-01-15 02:45:42 +08:00
|
|
|
if (IS_AUTOMOUNT(inode))
|
2008-03-07 01:34:59 +08:00
|
|
|
return 0;
|
2018-05-10 22:08:36 +08:00
|
|
|
|
|
|
|
if (flags & LOOKUP_OPEN) {
|
|
|
|
switch (inode->i_mode & S_IFMT) {
|
|
|
|
case S_IFREG:
|
|
|
|
/* A NFSv4 OPEN will revalidate later */
|
|
|
|
if (server->caps & NFS_CAP_ATOMIC_OPEN)
|
|
|
|
goto out;
|
|
|
|
/* Fallthrough */
|
|
|
|
case S_IFDIR:
|
|
|
|
if (server->flags & NFS_MOUNT_NOCTO)
|
|
|
|
break;
|
|
|
|
/* NFS close-to-open cache consistency validation */
|
|
|
|
goto out_force;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-11 03:33:51 +08:00
|
|
|
/* VFS wants an on-the-wire revalidation */
|
2012-06-11 03:36:40 +08:00
|
|
|
if (flags & LOOKUP_REVAL)
|
2012-06-11 03:33:51 +08:00
|
|
|
goto out_force;
|
2012-12-15 06:51:40 +08:00
|
|
|
out:
|
2018-07-17 01:05:36 +08:00
|
|
|
return (inode->i_nlink == 0) ? -ESTALE : 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
out_force:
|
2014-07-14 09:28:20 +08:00
|
|
|
if (flags & LOOKUP_RCU)
|
|
|
|
return -ECHILD;
|
2012-12-15 06:51:40 +08:00
|
|
|
ret = __nfs_revalidate_inode(server, inode);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We judge how long we want to trust negative
|
|
|
|
* dentries by looking at the parent inode mtime.
|
|
|
|
*
|
|
|
|
* If parent mtime has changed, we revalidate, else we wait for a
|
|
|
|
* period corresponding to the parent's attribute cache timeout value.
|
2014-07-14 09:28:20 +08:00
|
|
|
*
|
|
|
|
* If LOOKUP_RCU prevents us from performing a full check, return 1
|
|
|
|
* suggesting a reval is needed.
|
2018-05-10 22:34:21 +08:00
|
|
|
*
|
|
|
|
* Note that when creating a new file, or looking up a rename target,
|
|
|
|
* then it shouldn't be necessary to revalidate a negative dentry.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
static inline
|
|
|
|
int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry,
|
2012-06-11 03:36:40 +08:00
|
|
|
unsigned int flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2018-05-10 22:34:21 +08:00
|
|
|
if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
2008-07-16 05:58:13 +08:00
|
|
|
if (NFS_SERVER(dir)->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG)
|
|
|
|
return 1;
|
2014-07-14 09:28:20 +08:00
|
|
|
return !nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2018-09-28 21:04:05 +08:00
|
|
|
static int
|
|
|
|
nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry,
|
|
|
|
struct inode *inode, int error)
|
|
|
|
{
|
|
|
|
switch (error) {
|
|
|
|
case 1:
|
|
|
|
dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is valid\n",
|
|
|
|
__func__, dentry);
|
|
|
|
return 1;
|
|
|
|
case 0:
|
|
|
|
nfs_mark_for_revalidate(dir);
|
|
|
|
if (inode && S_ISDIR(inode->i_mode)) {
|
|
|
|
/* Purge readdir caches. */
|
|
|
|
nfs_zap_caches(inode);
|
|
|
|
/*
|
|
|
|
* We can't d_drop the root of a disconnected tree:
|
|
|
|
* its d_hash is on the s_anon list and d_drop() would hide
|
|
|
|
* it from shrink_dcache_for_unmount(), leading to busy
|
|
|
|
* inodes on unmount and further oopses.
|
|
|
|
*/
|
|
|
|
if (IS_ROOT(dentry))
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
|
|
|
|
__func__, dentry);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) lookup returned error %d\n",
|
|
|
|
__func__, dentry, error);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nfs_lookup_revalidate_negative(struct inode *dir, struct dentry *dentry,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
int ret = 1;
|
|
|
|
if (nfs_neg_need_reval(dir, dentry, flags)) {
|
|
|
|
if (flags & LOOKUP_RCU)
|
|
|
|
return -ECHILD;
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
return nfs_lookup_revalidate_done(dir, dentry, NULL, ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nfs_lookup_revalidate_delegated(struct inode *dir, struct dentry *dentry,
|
|
|
|
struct inode *inode)
|
|
|
|
{
|
|
|
|
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
|
|
|
return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nfs_lookup_revalidate_dentry(struct inode *dir, struct dentry *dentry,
|
|
|
|
struct inode *inode)
|
|
|
|
{
|
|
|
|
struct nfs_fh *fhandle;
|
|
|
|
struct nfs_fattr *fattr;
|
|
|
|
struct nfs4_label *label;
|
2020-02-05 22:01:52 +08:00
|
|
|
unsigned long dir_verifier;
|
2018-09-28 21:04:05 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = -ENOMEM;
|
|
|
|
fhandle = nfs_alloc_fhandle();
|
|
|
|
fattr = nfs_alloc_fattr();
|
|
|
|
label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
|
|
|
|
if (fhandle == NULL || fattr == NULL || IS_ERR(label))
|
|
|
|
goto out;
|
|
|
|
|
2020-02-05 22:01:52 +08:00
|
|
|
dir_verifier = nfs_save_change_attribute(dir);
|
2020-01-15 01:06:34 +08:00
|
|
|
ret = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr, label);
|
2018-09-28 21:04:05 +08:00
|
|
|
if (ret < 0) {
|
2020-01-15 01:06:34 +08:00
|
|
|
switch (ret) {
|
|
|
|
case -ESTALE:
|
|
|
|
case -ENOENT:
|
2018-09-28 21:04:05 +08:00
|
|
|
ret = 0;
|
2020-01-15 01:06:34 +08:00
|
|
|
break;
|
|
|
|
case -ETIMEDOUT:
|
|
|
|
if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
|
|
|
|
ret = 1;
|
|
|
|
}
|
2018-09-28 21:04:05 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
if (nfs_compare_fh(NFS_FH(inode), fhandle))
|
|
|
|
goto out;
|
|
|
|
if (nfs_refresh_inode(inode, fattr) < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
nfs_setsecurity(inode, fattr, label);
|
2020-02-05 22:01:52 +08:00
|
|
|
nfs_set_verifier(dentry, dir_verifier);
|
2018-09-28 21:04:05 +08:00
|
|
|
|
|
|
|
/* set a readdirplus hint that we had a cache miss */
|
|
|
|
nfs_force_use_readdirplus(dir);
|
|
|
|
ret = 1;
|
|
|
|
out:
|
|
|
|
nfs_free_fattr(fattr);
|
|
|
|
nfs_free_fhandle(fhandle);
|
|
|
|
nfs4_label_free(label);
|
|
|
|
return nfs_lookup_revalidate_done(dir, dentry, inode, ret);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* This is called every time the dcache has a lookup hit,
|
|
|
|
* and we should check whether we can really trust that
|
|
|
|
* lookup.
|
|
|
|
*
|
|
|
|
* NOTE! The hit can be a negative hit too, don't assume
|
|
|
|
* we have an inode!
|
|
|
|
*
|
|
|
|
* If the parent directory is seen to have changed, we throw out the
|
|
|
|
* cached dentry and do a new lookup.
|
|
|
|
*/
|
2018-09-28 21:04:05 +08:00
|
|
|
static int
|
|
|
|
nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
|
|
|
|
unsigned int flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct inode *inode;
|
|
|
|
int error;
|
|
|
|
|
2006-03-21 02:44:14 +08:00
|
|
|
nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE);
|
2015-03-18 06:25:59 +08:00
|
|
|
inode = d_inode(dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-09-28 21:04:05 +08:00
|
|
|
if (!inode)
|
|
|
|
return nfs_lookup_revalidate_negative(dir, dentry, flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (is_bad_inode(inode)) {
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(LOOKUPCACHE, "%s: %pd2 has dud inode\n",
|
|
|
|
__func__, dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out_bad;
|
|
|
|
}
|
|
|
|
|
2020-02-05 22:01:54 +08:00
|
|
|
if (nfs_verifier_is_delegated(dentry))
|
2018-09-28 21:04:05 +08:00
|
|
|
return nfs_lookup_revalidate_delegated(dir, dentry, inode);
|
2008-12-24 04:21:54 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Force a full look up iff the parent directory has changed */
|
2018-05-10 22:13:09 +08:00
|
|
|
if (!(flags & (LOOKUP_EXCL | LOOKUP_REVAL)) &&
|
2014-07-14 09:28:20 +08:00
|
|
|
nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU)) {
|
2017-07-05 10:22:20 +08:00
|
|
|
error = nfs_lookup_verify_inode(inode, flags);
|
|
|
|
if (error) {
|
|
|
|
if (error == -ESTALE)
|
2018-09-28 21:04:05 +08:00
|
|
|
nfs_zap_caches(dir);
|
|
|
|
goto out_bad;
|
2014-07-14 09:28:20 +08:00
|
|
|
}
|
2016-11-20 00:21:54 +08:00
|
|
|
nfs_advise_use_readdirplus(dir);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out_valid;
|
|
|
|
}
|
|
|
|
|
2014-07-14 09:28:20 +08:00
|
|
|
if (flags & LOOKUP_RCU)
|
|
|
|
return -ECHILD;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (NFS_STALE(inode))
|
|
|
|
goto out_bad;
|
|
|
|
|
2013-08-20 23:26:17 +08:00
|
|
|
trace_nfs_lookup_revalidate_enter(dir, dentry, flags);
|
2018-09-28 21:04:05 +08:00
|
|
|
error = nfs_lookup_revalidate_dentry(dir, dentry, inode);
|
2013-08-20 23:26:17 +08:00
|
|
|
trace_nfs_lookup_revalidate_exit(dir, dentry, flags, error);
|
2018-09-28 21:04:05 +08:00
|
|
|
return error;
|
|
|
|
out_valid:
|
|
|
|
return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
|
|
|
|
out_bad:
|
|
|
|
if (flags & LOOKUP_RCU)
|
|
|
|
return -ECHILD;
|
|
|
|
return nfs_lookup_revalidate_done(dir, dentry, inode, 0);
|
|
|
|
}
|
2013-05-23 00:50:43 +08:00
|
|
|
|
2018-09-28 21:04:05 +08:00
|
|
|
static int
|
2018-09-29 00:42:51 +08:00
|
|
|
__nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags,
|
|
|
|
int (*reval)(struct inode *, struct dentry *, unsigned int))
|
2018-09-28 21:04:05 +08:00
|
|
|
{
|
|
|
|
struct dentry *parent;
|
|
|
|
struct inode *dir;
|
|
|
|
int ret;
|
2016-11-20 00:21:54 +08:00
|
|
|
|
2014-07-14 09:28:20 +08:00
|
|
|
if (flags & LOOKUP_RCU) {
|
2018-09-28 21:04:05 +08:00
|
|
|
parent = READ_ONCE(dentry->d_parent);
|
|
|
|
dir = d_inode_rcu(parent);
|
|
|
|
if (!dir)
|
|
|
|
return -ECHILD;
|
2018-09-29 00:42:51 +08:00
|
|
|
ret = reval(dir, dentry, flags);
|
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 05:07:29 +08:00
|
|
|
if (parent != READ_ONCE(dentry->d_parent))
|
2014-07-14 09:28:20 +08:00
|
|
|
return -ECHILD;
|
2018-09-28 21:04:05 +08:00
|
|
|
} else {
|
|
|
|
parent = dget_parent(dentry);
|
2018-09-29 00:42:51 +08:00
|
|
|
ret = reval(d_inode(parent), dentry, flags);
|
2014-07-14 09:28:20 +08:00
|
|
|
dput(parent);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2018-09-28 21:04:05 +08:00
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2018-09-29 00:42:51 +08:00
|
|
|
static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
|
|
|
|
{
|
|
|
|
return __nfs_lookup_revalidate(dentry, flags, nfs_do_lookup_revalidate);
|
|
|
|
}
|
|
|
|
|
2013-02-21 00:19:05 +08:00
|
|
|
/*
|
2015-03-18 06:25:59 +08:00
|
|
|
* A weaker form of d_revalidate for revalidating just the d_inode(dentry)
|
2013-02-21 00:19:05 +08:00
|
|
|
* when we don't really care about the dentry name. This is called when a
|
|
|
|
* pathwalk ends on a dentry that was not found via a normal lookup in the
|
|
|
|
* parent dir (e.g.: ".", "..", procfs symlinks or mountpoint traversals).
|
|
|
|
*
|
|
|
|
* In this situation, we just want to verify that the inode itself is OK
|
|
|
|
* since the dentry might have changed on the server.
|
|
|
|
*/
|
|
|
|
static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
|
|
|
|
{
|
2015-03-18 06:25:59 +08:00
|
|
|
struct inode *inode = d_inode(dentry);
|
2016-12-17 07:04:47 +08:00
|
|
|
int error = 0;
|
2013-02-21 00:19:05 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* I believe we can only get a negative dentry here in the case of a
|
|
|
|
* procfs-style symlink. Just assume it's correct for now, but we may
|
|
|
|
* eventually need to do something more here.
|
|
|
|
*/
|
|
|
|
if (!inode) {
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(LOOKUPCACHE, "%s: %pd2 has negative inode\n",
|
|
|
|
__func__, dentry);
|
2013-02-21 00:19:05 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_bad_inode(inode)) {
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(LOOKUPCACHE, "%s: %pd2 has dud inode\n",
|
|
|
|
__func__, dentry);
|
2013-02-21 00:19:05 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-25 15:34:41 +08:00
|
|
|
error = nfs_lookup_verify_inode(inode, flags);
|
2013-02-21 00:19:05 +08:00
|
|
|
dfprintk(LOOKUPCACHE, "NFS: %s: inode %lu is %s\n",
|
|
|
|
__func__, inode->i_ino, error ? "invalid" : "valid");
|
|
|
|
return !error;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* This is called from dput() when d_count is going to 0.
|
|
|
|
*/
|
2011-01-07 14:49:23 +08:00
|
|
|
static int nfs_dentry_delete(const struct dentry *dentry)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(VFS, "NFS: dentry_delete(%pd2, %x)\n",
|
|
|
|
dentry, dentry->d_flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-01-29 08:43:19 +08:00
|
|
|
/* Unhash any dentry with a stale inode */
|
2015-03-18 06:25:59 +08:00
|
|
|
if (d_really_is_positive(dentry) && NFS_STALE(d_inode(dentry)))
|
2008-01-29 08:43:19 +08:00
|
|
|
return 1;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
|
|
|
|
/* Unhash it, so that ->d_iput() would be called */
|
|
|
|
return 1;
|
|
|
|
}
|
2017-11-28 05:05:09 +08:00
|
|
|
if (!(dentry->d_sb->s_flags & SB_ACTIVE)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Unhash it, so that ancestors of killed async unlink
|
|
|
|
* files will be cleaned up during umount */
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2012-12-15 05:38:46 +08:00
|
|
|
/* Ensure that we revalidate inode->i_nlink */
|
2008-06-12 03:44:04 +08:00
|
|
|
static void nfs_drop_nlink(struct inode *inode)
|
|
|
|
{
|
|
|
|
spin_lock(&inode->i_lock);
|
2012-12-15 05:38:46 +08:00
|
|
|
/* drop the inode if we're reasonably sure this is the last link */
|
2018-04-09 06:11:18 +08:00
|
|
|
if (inode->i_nlink > 0)
|
|
|
|
drop_nlink(inode);
|
|
|
|
NFS_I(inode)->attr_gencount = nfs_inc_attr_generation_counter();
|
2018-03-21 04:53:31 +08:00
|
|
|
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_CHANGE
|
|
|
|
| NFS_INO_INVALID_CTIME
|
2018-04-09 06:11:18 +08:00
|
|
|
| NFS_INO_INVALID_OTHER
|
|
|
|
| NFS_INO_REVAL_FORCED;
|
2008-06-12 03:44:04 +08:00
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Called when the dentry loses inode.
|
|
|
|
* We use it to clean up silly-renamed files.
|
|
|
|
*/
|
|
|
|
static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode)
|
|
|
|
{
|
2007-02-26 09:48:25 +08:00
|
|
|
if (S_ISDIR(inode->i_mode))
|
|
|
|
/* drop any readdir cache as it could easily be old */
|
|
|
|
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_DATA;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
|
2007-07-15 03:39:58 +08:00
|
|
|
nfs_complete_unlink(dentry, inode);
|
2012-12-15 05:38:46 +08:00
|
|
|
nfs_drop_nlink(inode);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
iput(inode);
|
|
|
|
}
|
|
|
|
|
2011-03-16 17:44:14 +08:00
|
|
|
static void nfs_d_release(struct dentry *dentry)
|
|
|
|
{
|
|
|
|
/* free cached devname value, if it survived that far */
|
|
|
|
if (unlikely(dentry->d_fsdata)) {
|
|
|
|
if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
|
|
|
|
WARN_ON(1);
|
|
|
|
else
|
|
|
|
kfree(dentry->d_fsdata);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-02-20 13:51:22 +08:00
|
|
|
const struct dentry_operations nfs_dentry_operations = {
|
2005-04-17 06:20:36 +08:00
|
|
|
.d_revalidate = nfs_lookup_revalidate,
|
2013-02-21 00:19:05 +08:00
|
|
|
.d_weak_revalidate = nfs_weak_revalidate,
|
2005-04-17 06:20:36 +08:00
|
|
|
.d_delete = nfs_dentry_delete,
|
|
|
|
.d_iput = nfs_dentry_iput,
|
2011-01-15 02:45:42 +08:00
|
|
|
.d_automount = nfs_d_automount,
|
2011-03-16 17:44:14 +08:00
|
|
|
.d_release = nfs_d_release,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
2012-07-31 04:05:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_dentry_operations);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-07-17 04:39:10 +08:00
|
|
|
struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct dentry *res;
|
|
|
|
struct inode *inode = NULL;
|
2010-04-17 04:22:47 +08:00
|
|
|
struct nfs_fh *fhandle = NULL;
|
|
|
|
struct nfs_fattr *fattr = NULL;
|
2013-05-23 00:50:42 +08:00
|
|
|
struct nfs4_label *label = NULL;
|
2020-02-05 22:01:52 +08:00
|
|
|
unsigned long dir_verifier;
|
2005-04-17 06:20:36 +08:00
|
|
|
int error;
|
|
|
|
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(VFS, "NFS: lookup(%pd2)\n", dentry);
|
2006-03-21 02:44:14 +08:00
|
|
|
nfs_inc_stats(dir, NFSIOS_VFSLOOKUP);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2016-03-08 11:40:43 +08:00
|
|
|
if (unlikely(dentry->d_name.len > NFS_SERVER(dir)->namelen))
|
|
|
|
return ERR_PTR(-ENAMETOOLONG);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-09-06 00:27:44 +08:00
|
|
|
/*
|
|
|
|
* If we're doing an exclusive create, optimize away the lookup
|
|
|
|
* but don't hash the dentry.
|
|
|
|
*/
|
2018-05-10 22:34:21 +08:00
|
|
|
if (nfs_is_exclusive_create(dir, flags) || flags & LOOKUP_RENAME_TARGET)
|
2016-03-08 11:40:43 +08:00
|
|
|
return NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-04-17 04:22:47 +08:00
|
|
|
res = ERR_PTR(-ENOMEM);
|
|
|
|
fhandle = nfs_alloc_fhandle();
|
|
|
|
fattr = nfs_alloc_fattr();
|
|
|
|
if (fhandle == NULL || fattr == NULL)
|
|
|
|
goto out;
|
|
|
|
|
2013-05-23 00:50:43 +08:00
|
|
|
label = nfs4_label_alloc(NFS_SERVER(dir), GFP_NOWAIT);
|
|
|
|
if (IS_ERR(label))
|
|
|
|
goto out;
|
|
|
|
|
2020-02-05 22:01:52 +08:00
|
|
|
dir_verifier = nfs_save_change_attribute(dir);
|
2013-08-20 23:26:17 +08:00
|
|
|
trace_nfs_lookup_enter(dir, dentry, flags);
|
2020-01-15 01:06:34 +08:00
|
|
|
error = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr, label);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error == -ENOENT)
|
|
|
|
goto no_entry;
|
|
|
|
if (error < 0) {
|
|
|
|
res = ERR_PTR(error);
|
2016-05-30 03:14:14 +08:00
|
|
|
goto out_label;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2013-05-23 00:50:42 +08:00
|
|
|
inode = nfs_fhget(dentry->d_sb, fhandle, fattr, label);
|
2010-12-29 01:02:46 +08:00
|
|
|
res = ERR_CAST(inode);
|
2006-03-21 02:44:48 +08:00
|
|
|
if (IS_ERR(res))
|
2016-05-30 03:14:14 +08:00
|
|
|
goto out_label;
|
NFS: Share NFS superblocks per-protocol per-server per-FSID
The attached patch makes NFS share superblocks between mounts from the same
server and FSID over the same protocol.
It does this by creating each superblock with a false root and returning the
real root dentry in the vfsmount presented by get_sb(). The root dentry set
starts off as an anonymous dentry if we don't already have the dentry for its
inode, otherwise it simply returns the dentry we already have.
We may thus end up with several trees of dentries in the superblock, and if at
some later point one of anonymous tree roots is discovered by normal filesystem
activity to be located in another tree within the superblock, the anonymous
root is named and materialises attached to the second tree at the appropriate
point.
Why do it this way? Why not pass an extra argument to the mount() syscall to
indicate the subpath and then pathwalk from the server root to the desired
directory? You can't guarantee this will work for two reasons:
(1) The root and intervening nodes may not be accessible to the client.
With NFS2 and NFS3, for instance, mountd is called on the server to get
the filehandle for the tip of a path. mountd won't give us handles for
anything we don't have permission to access, and so we can't set up NFS
inodes for such nodes, and so can't easily set up dentries (we'd have to
have ghost inodes or something).
With this patch we don't actually create dentries until we get handles
from the server that we can use to set up their inodes, and we don't
actually bind them into the tree until we know for sure where they go.
(2) Inaccessible symbolic links.
If we're asked to mount two exports from the server, eg:
mount warthog:/warthog/aaa/xxx /mmm
mount warthog:/warthog/bbb/yyy /nnn
We may not be able to access anything nearer the root than xxx and yyy,
but we may find out later that /mmm/www/yyy, say, is actually the same
directory as the one mounted on /nnn. What we might then find out, for
example, is that /warthog/bbb was actually a symbolic link to
/warthog/aaa/xxx/www, but we can't actually determine that by talking to
the server until /warthog is made available by NFS.
This would lead to having constructed an errneous dentry tree which we
can't easily fix. We can end up with a dentry marked as a directory when
it should actually be a symlink, or we could end up with an apparently
hardlinked directory.
With this patch we need not make assumptions about the type of a dentry
for which we can't retrieve information, nor need we assume we know its
place in the grand scheme of things until we actually see that place.
This patch reduces the possibility of aliasing in the inode and page caches for
inodes that may be accessed by more than one NFS export. It also reduces the
number of superblocks required for NFS where there are many NFS exports being
used from a server (home directory server + autofs for example).
This in turn makes it simpler to do local caching of network filesystems, as it
can then be guaranteed that there won't be links from multiple inodes in
separate superblocks to the same cache file.
Obviously, cache aliasing between different levels of NFS protocol could still
be a problem, but at least that gives us another key to use when indexing the
cache.
This patch makes the following changes:
(1) The server record construction/destruction has been abstracted out into
its own set of functions to make things easier to get right. These have
been moved into fs/nfs/client.c.
All the code in fs/nfs/client.c has to do with the management of
connections to servers, and doesn't touch superblocks in any way; the
remaining code in fs/nfs/super.c has to do with VFS superblock management.
(2) The sequence of events undertaken by NFS mount is now reordered:
(a) A volume representation (struct nfs_server) is allocated.
(b) A server representation (struct nfs_client) is acquired. This may be
allocated or shared, and is keyed on server address, port and NFS
version.
(c) If allocated, the client representation is initialised. The state
member variable of nfs_client is used to prevent a race during
initialisation from two mounts.
(d) For NFS4 a simple pathwalk is performed, walking from FH to FH to find
the root filehandle for the mount (fs/nfs/getroot.c). For NFS2/3 we
are given the root FH in advance.
(e) The volume FSID is probed for on the root FH.
(f) The volume representation is initialised from the FSINFO record
retrieved on the root FH.
(g) sget() is called to acquire a superblock. This may be allocated or
shared, keyed on client pointer and FSID.
(h) If allocated, the superblock is initialised.
(i) If the superblock is shared, then the new nfs_server record is
discarded.
(j) The root dentry for this mount is looked up from the root FH.
(k) The root dentry for this mount is assigned to the vfsmount.
(3) nfs_readdir_lookup() creates dentries for each of the entries readdir()
returns; this function now attaches disconnected trees from alternate
roots that happen to be discovered attached to a directory being read (in
the same way nfs_lookup() is made to do for lookup ops).
The new d_materialise_unique() function is now used to do this, thus
permitting the whole thing to be done under one set of locks, and thus
avoiding any race between mount and lookup operations on the same
directory.
(4) The client management code uses a new debug facility: NFSDBG_CLIENT which
is set by echoing 1024 to /proc/net/sunrpc/nfs_debug.
(5) Clone mounts are now called xdev mounts.
(6) Use the dentry passed to the statfs() op as the handle for retrieving fs
statistics rather than the root dentry of the superblock (which is now a
dummy).
Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2006-08-23 08:06:13 +08:00
|
|
|
|
2016-11-20 00:21:54 +08:00
|
|
|
/* Notify readdir to use READDIRPLUS */
|
|
|
|
nfs_force_use_readdirplus(dir);
|
2012-05-02 05:37:59 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
no_entry:
|
2014-10-13 10:24:21 +08:00
|
|
|
res = d_splice_alias(inode, dentry);
|
2006-10-22 01:24:20 +08:00
|
|
|
if (res != NULL) {
|
|
|
|
if (IS_ERR(res))
|
2016-05-30 03:14:14 +08:00
|
|
|
goto out_label;
|
2005-04-17 06:20:36 +08:00
|
|
|
dentry = res;
|
2006-10-22 01:24:20 +08:00
|
|
|
}
|
2020-02-05 22:01:52 +08:00
|
|
|
nfs_set_verifier(dentry, dir_verifier);
|
2016-05-30 03:14:14 +08:00
|
|
|
out_label:
|
2013-08-20 23:26:17 +08:00
|
|
|
trace_nfs_lookup_exit(dir, dentry, flags, error);
|
2013-05-23 00:50:43 +08:00
|
|
|
nfs4_label_free(label);
|
2005-04-17 06:20:36 +08:00
|
|
|
out:
|
2010-04-17 04:22:47 +08:00
|
|
|
nfs_free_fattr(fattr);
|
|
|
|
nfs_free_fhandle(fhandle);
|
2005-04-17 06:20:36 +08:00
|
|
|
return res;
|
|
|
|
}
|
2012-07-31 04:05:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_lookup);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-07-31 04:05:25 +08:00
|
|
|
#if IS_ENABLED(CONFIG_NFS_V4)
|
2012-06-11 04:03:43 +08:00
|
|
|
static int nfs4_lookup_revalidate(struct dentry *, unsigned int);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-02-20 13:51:22 +08:00
|
|
|
const struct dentry_operations nfs4_dentry_operations = {
|
2012-05-21 23:30:20 +08:00
|
|
|
.d_revalidate = nfs4_lookup_revalidate,
|
2017-08-25 15:34:41 +08:00
|
|
|
.d_weak_revalidate = nfs_weak_revalidate,
|
2005-04-17 06:20:36 +08:00
|
|
|
.d_delete = nfs_dentry_delete,
|
|
|
|
.d_iput = nfs_dentry_iput,
|
2011-01-15 02:45:42 +08:00
|
|
|
.d_automount = nfs_d_automount,
|
2011-03-16 17:44:14 +08:00
|
|
|
.d_release = nfs_d_release,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
2012-07-31 04:05:25 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs4_dentry_operations);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-06-26 07:15:54 +08:00
|
|
|
static fmode_t flags_to_mode(int flags)
|
|
|
|
{
|
|
|
|
fmode_t res = (__force fmode_t)flags & FMODE_EXEC;
|
|
|
|
if ((flags & O_ACCMODE) != O_WRONLY)
|
|
|
|
res |= FMODE_READ;
|
|
|
|
if ((flags & O_ACCMODE) != O_RDONLY)
|
|
|
|
res |= FMODE_WRITE;
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2016-10-13 12:26:47 +08:00
|
|
|
static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, int open_flags, struct file *filp)
|
2010-09-17 22:56:50 +08:00
|
|
|
{
|
2016-10-13 12:26:47 +08:00
|
|
|
return alloc_nfs_open_context(dentry, flags_to_mode(open_flags), filp);
|
2010-09-17 22:56:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int do_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
2013-09-27 18:20:03 +08:00
|
|
|
nfs_fscache_open_file(inode, filp);
|
2010-09-17 22:56:50 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-06-22 16:39:14 +08:00
|
|
|
static int nfs_finish_open(struct nfs_open_context *ctx,
|
|
|
|
struct dentry *dentry,
|
2018-06-09 01:06:28 +08:00
|
|
|
struct file *file, unsigned open_flags)
|
2010-09-17 22:56:50 +08:00
|
|
|
{
|
2012-06-05 21:10:18 +08:00
|
|
|
int err;
|
|
|
|
|
2018-06-08 23:44:56 +08:00
|
|
|
err = finish_open(file, dentry, do_open);
|
2012-06-22 16:40:19 +08:00
|
|
|
if (err)
|
2012-06-22 16:39:14 +08:00
|
|
|
goto out;
|
2017-07-03 13:27:26 +08:00
|
|
|
if (S_ISREG(file->f_path.dentry->d_inode->i_mode))
|
|
|
|
nfs_file_set_open_context(file, ctx);
|
|
|
|
else
|
2019-08-10 00:15:07 +08:00
|
|
|
err = -EOPENSTALE;
|
2010-09-17 22:56:50 +08:00
|
|
|
out:
|
2012-06-22 16:39:14 +08:00
|
|
|
return err;
|
2010-09-17 22:56:50 +08:00
|
|
|
}
|
|
|
|
|
2012-07-17 04:39:12 +08:00
|
|
|
int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
|
|
|
|
struct file *file, unsigned open_flags,
|
2018-06-09 01:32:02 +08:00
|
|
|
umode_t mode)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2016-07-05 21:49:21 +08:00
|
|
|
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
|
2010-09-17 22:56:50 +08:00
|
|
|
struct nfs_open_context *ctx;
|
2012-06-05 21:10:18 +08:00
|
|
|
struct dentry *res;
|
|
|
|
struct iattr attr = { .ia_valid = ATTR_OPEN };
|
2010-09-17 22:56:50 +08:00
|
|
|
struct inode *inode;
|
2013-08-20 23:59:41 +08:00
|
|
|
unsigned int lookup_flags = 0;
|
2016-07-05 21:49:21 +08:00
|
|
|
bool switched = false;
|
2018-06-09 01:22:02 +08:00
|
|
|
int created = 0;
|
2010-10-23 23:24:25 +08:00
|
|
|
int err;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-06-05 21:10:18 +08:00
|
|
|
/* Expect a negative dentry */
|
2015-03-18 06:25:59 +08:00
|
|
|
BUG_ON(d_inode(dentry));
|
2012-06-05 21:10:18 +08:00
|
|
|
|
2013-12-18 01:20:16 +08:00
|
|
|
dfprintk(VFS, "NFS: atomic_open(%s/%lu), %pd\n",
|
2013-09-16 22:53:17 +08:00
|
|
|
dir->i_sb->s_id, dir->i_ino, dentry);
|
2006-03-21 02:44:24 +08:00
|
|
|
|
2013-08-02 23:39:32 +08:00
|
|
|
err = nfs_check_flags(open_flags);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2012-06-05 21:10:18 +08:00
|
|
|
/* NFS only supports OPEN on regular files */
|
|
|
|
if ((open_flags & O_DIRECTORY)) {
|
2016-07-05 21:44:53 +08:00
|
|
|
if (!d_in_lookup(dentry)) {
|
2012-06-05 21:10:18 +08:00
|
|
|
/*
|
|
|
|
* Hashed negative dentry with O_DIRECTORY: dentry was
|
|
|
|
* revalidated and is fine, no need to perform lookup
|
|
|
|
* again
|
|
|
|
*/
|
2012-06-22 16:39:14 +08:00
|
|
|
return -ENOENT;
|
2012-06-05 21:10:18 +08:00
|
|
|
}
|
2013-08-20 23:59:41 +08:00
|
|
|
lookup_flags = LOOKUP_OPEN|LOOKUP_DIRECTORY;
|
2005-04-17 06:20:36 +08:00
|
|
|
goto no_open;
|
2005-10-19 05:20:17 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-06-05 21:10:18 +08:00
|
|
|
if (dentry->d_name.len > NFS_SERVER(dir)->namelen)
|
2012-06-22 16:39:14 +08:00
|
|
|
return -ENAMETOOLONG;
|
2010-09-17 22:56:50 +08:00
|
|
|
|
2012-06-05 21:10:18 +08:00
|
|
|
if (open_flags & O_CREAT) {
|
2016-12-03 11:53:30 +08:00
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
|
|
|
|
|
|
|
if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
|
|
|
|
mode &= ~current_umask();
|
|
|
|
|
2012-01-18 11:04:26 +08:00
|
|
|
attr.ia_valid |= ATTR_MODE;
|
2016-12-03 11:53:30 +08:00
|
|
|
attr.ia_mode = mode;
|
2012-06-05 21:10:18 +08:00
|
|
|
}
|
2012-01-18 11:04:26 +08:00
|
|
|
if (open_flags & O_TRUNC) {
|
|
|
|
attr.ia_valid |= ATTR_SIZE;
|
|
|
|
attr.ia_size = 0;
|
2010-09-17 22:56:50 +08:00
|
|
|
}
|
|
|
|
|
2016-07-05 21:49:21 +08:00
|
|
|
if (!(open_flags & O_CREAT) && !d_in_lookup(dentry)) {
|
|
|
|
d_drop(dentry);
|
|
|
|
switched = true;
|
|
|
|
dentry = d_alloc_parallel(dentry->d_parent,
|
|
|
|
&dentry->d_name, &wq);
|
|
|
|
if (IS_ERR(dentry))
|
|
|
|
return PTR_ERR(dentry);
|
|
|
|
if (unlikely(!d_in_lookup(dentry)))
|
|
|
|
return finish_no_open(file, dentry);
|
|
|
|
}
|
|
|
|
|
2016-10-13 12:26:47 +08:00
|
|
|
ctx = create_nfs_open_context(dentry, open_flags, file);
|
2012-06-05 21:10:18 +08:00
|
|
|
err = PTR_ERR(ctx);
|
|
|
|
if (IS_ERR(ctx))
|
2012-06-22 16:39:14 +08:00
|
|
|
goto out;
|
2012-06-05 21:10:18 +08:00
|
|
|
|
2013-08-20 23:26:17 +08:00
|
|
|
trace_nfs_atomic_open_enter(dir, ctx, open_flags);
|
2018-06-09 01:22:02 +08:00
|
|
|
inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr, &created);
|
|
|
|
if (created)
|
|
|
|
file->f_mode |= FMODE_CREATED;
|
2010-09-17 22:56:50 +08:00
|
|
|
if (IS_ERR(inode)) {
|
2012-06-05 21:10:18 +08:00
|
|
|
err = PTR_ERR(inode);
|
2013-08-20 23:26:17 +08:00
|
|
|
trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
|
2013-08-30 21:17:33 +08:00
|
|
|
put_nfs_open_context(ctx);
|
2016-06-21 01:14:36 +08:00
|
|
|
d_drop(dentry);
|
2012-06-05 21:10:18 +08:00
|
|
|
switch (err) {
|
|
|
|
case -ENOENT:
|
2017-06-29 21:34:50 +08:00
|
|
|
d_splice_alias(NULL, dentry);
|
2014-10-24 00:33:14 +08:00
|
|
|
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
2012-06-05 21:10:18 +08:00
|
|
|
break;
|
|
|
|
case -EISDIR:
|
|
|
|
case -ENOTDIR:
|
|
|
|
goto no_open;
|
|
|
|
case -ELOOP:
|
|
|
|
if (!(open_flags & O_NOFOLLOW))
|
2005-10-19 05:20:18 +08:00
|
|
|
goto no_open;
|
2012-06-05 21:10:18 +08:00
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
/* case -EINVAL: */
|
2012-06-05 21:10:18 +08:00
|
|
|
default:
|
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2012-06-22 16:39:14 +08:00
|
|
|
goto out;
|
2010-09-17 22:56:50 +08:00
|
|
|
}
|
2012-06-05 21:10:18 +08:00
|
|
|
|
2018-06-09 01:06:28 +08:00
|
|
|
err = nfs_finish_open(ctx, ctx->dentry, file, open_flags);
|
2013-08-20 23:26:17 +08:00
|
|
|
trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
|
2013-08-30 21:17:33 +08:00
|
|
|
put_nfs_open_context(ctx);
|
2012-06-22 16:39:14 +08:00
|
|
|
out:
|
2016-07-05 21:49:21 +08:00
|
|
|
if (unlikely(switched)) {
|
|
|
|
d_lookup_done(dentry);
|
|
|
|
dput(dentry);
|
|
|
|
}
|
2012-06-22 16:39:14 +08:00
|
|
|
return err;
|
2012-06-05 21:10:18 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
no_open:
|
2013-08-20 23:59:41 +08:00
|
|
|
res = nfs_lookup(dir, dentry, lookup_flags);
|
2016-07-05 21:49:21 +08:00
|
|
|
if (switched) {
|
|
|
|
d_lookup_done(dentry);
|
|
|
|
if (!res)
|
|
|
|
res = dentry;
|
|
|
|
else
|
|
|
|
dput(dentry);
|
|
|
|
}
|
2012-06-05 21:10:18 +08:00
|
|
|
if (IS_ERR(res))
|
2016-07-05 21:49:21 +08:00
|
|
|
return PTR_ERR(res);
|
2012-06-10 18:48:09 +08:00
|
|
|
return finish_no_open(file, res);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2012-07-31 04:05:25 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_atomic_open);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-09-29 00:42:51 +08:00
|
|
|
static int
|
|
|
|
nfs4_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
|
|
|
|
unsigned int flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2011-01-14 10:48:39 +08:00
|
|
|
struct inode *inode;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-06-11 03:36:40 +08:00
|
|
|
if (!(flags & LOOKUP_OPEN) || (flags & LOOKUP_DIRECTORY))
|
2018-09-29 00:42:51 +08:00
|
|
|
goto full_reval;
|
2012-06-05 21:10:21 +08:00
|
|
|
if (d_mountpoint(dentry))
|
2018-09-29 00:42:51 +08:00
|
|
|
goto full_reval;
|
2010-09-17 22:56:51 +08:00
|
|
|
|
2015-03-18 06:25:59 +08:00
|
|
|
inode = d_inode(dentry);
|
2010-09-17 22:56:51 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* We can't create new files in nfs_open_revalidate(), so we
|
|
|
|
* optimize away revalidation of negative dentries.
|
|
|
|
*/
|
2018-09-29 00:42:51 +08:00
|
|
|
if (inode == NULL)
|
|
|
|
goto full_reval;
|
|
|
|
|
2020-02-05 22:01:54 +08:00
|
|
|
if (nfs_verifier_is_delegated(dentry))
|
2018-09-29 00:42:51 +08:00
|
|
|
return nfs_lookup_revalidate_delegated(dir, dentry, inode);
|
2007-10-02 08:10:12 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* NFS only supports OPEN on regular files */
|
|
|
|
if (!S_ISREG(inode->i_mode))
|
2018-09-29 00:42:51 +08:00
|
|
|
goto full_reval;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* We cannot do exclusive creation on a positive dentry */
|
2018-09-29 00:42:51 +08:00
|
|
|
if (flags & (LOOKUP_EXCL | LOOKUP_REVAL))
|
|
|
|
goto reval_dentry;
|
|
|
|
|
|
|
|
/* Check if the directory changed */
|
|
|
|
if (!nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU))
|
|
|
|
goto reval_dentry;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-05-21 23:30:20 +08:00
|
|
|
/* Let f_op->open() actually open (and revalidate) the file */
|
2018-09-29 00:42:51 +08:00
|
|
|
return 1;
|
|
|
|
reval_dentry:
|
|
|
|
if (flags & LOOKUP_RCU)
|
|
|
|
return -ECHILD;
|
2019-02-12 09:38:33 +08:00
|
|
|
return nfs_lookup_revalidate_dentry(dir, dentry, inode);
|
2012-01-18 11:04:26 +08:00
|
|
|
|
2018-09-29 00:42:51 +08:00
|
|
|
full_reval:
|
|
|
|
return nfs_do_lookup_revalidate(dir, dentry, flags);
|
|
|
|
}
|
2010-09-17 22:56:51 +08:00
|
|
|
|
2018-09-29 00:42:51 +08:00
|
|
|
static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
|
|
|
|
{
|
|
|
|
return __nfs_lookup_revalidate(dentry, flags,
|
|
|
|
nfs4_do_lookup_revalidate);
|
2010-09-17 22:56:51 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif /* CONFIG_NFSV4 */
|
|
|
|
|
2019-09-13 20:29:02 +08:00
|
|
|
struct dentry *
|
|
|
|
nfs_add_or_obtain(struct dentry *dentry, struct nfs_fh *fhandle,
|
2013-05-23 00:50:42 +08:00
|
|
|
struct nfs_fattr *fattr,
|
|
|
|
struct nfs4_label *label)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-09-30 05:41:33 +08:00
|
|
|
struct dentry *parent = dget_parent(dentry);
|
2015-03-18 06:25:59 +08:00
|
|
|
struct inode *dir = d_inode(parent);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct inode *inode;
|
2018-05-16 22:55:01 +08:00
|
|
|
struct dentry *d;
|
2019-09-13 20:29:02 +08:00
|
|
|
int error;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-09-30 05:41:33 +08:00
|
|
|
d_drop(dentry);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (fhandle->size == 0) {
|
2020-01-15 01:06:34 +08:00
|
|
|
error = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error)
|
2007-09-30 05:41:33 +08:00
|
|
|
goto out_error;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-10-02 09:51:38 +08:00
|
|
|
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!(fattr->valid & NFS_ATTR_FATTR)) {
|
|
|
|
struct nfs_server *server = NFS_SB(dentry->d_sb);
|
2018-04-08 01:50:59 +08:00
|
|
|
error = server->nfs_client->rpc_ops->getattr(server, fhandle,
|
|
|
|
fattr, NULL, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error < 0)
|
2007-09-30 05:41:33 +08:00
|
|
|
goto out_error;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2013-05-23 00:50:42 +08:00
|
|
|
inode = nfs_fhget(dentry->d_sb, fhandle, fattr, label);
|
2018-05-16 22:55:01 +08:00
|
|
|
d = d_splice_alias(inode, dentry);
|
2007-09-30 05:41:33 +08:00
|
|
|
out:
|
|
|
|
dput(parent);
|
2019-09-13 20:29:02 +08:00
|
|
|
return d;
|
2007-09-30 05:41:33 +08:00
|
|
|
out_error:
|
|
|
|
nfs_mark_for_revalidate(dir);
|
2019-09-13 20:29:02 +08:00
|
|
|
d = ERR_PTR(error);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nfs_add_or_obtain);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Code common to create, mkdir, and mknod.
|
|
|
|
*/
|
|
|
|
int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
|
|
|
|
struct nfs_fattr *fattr,
|
|
|
|
struct nfs4_label *label)
|
|
|
|
{
|
|
|
|
struct dentry *d;
|
|
|
|
|
|
|
|
d = nfs_add_or_obtain(dentry, fhandle, fattr, label);
|
|
|
|
if (IS_ERR(d))
|
|
|
|
return PTR_ERR(d);
|
|
|
|
|
|
|
|
/* Callers don't care */
|
|
|
|
dput(d);
|
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2012-07-31 04:05:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_instantiate);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Following a failed create operation, we drop the dentry rather
|
|
|
|
* than retain a negative dentry. This avoids a problem in the event
|
|
|
|
* that the operation succeeded on the server, but an error in the
|
|
|
|
* reply path made it appear to have failed.
|
|
|
|
*/
|
2012-07-17 04:39:10 +08:00
|
|
|
int nfs_create(struct inode *dir, struct dentry *dentry,
|
2012-06-11 06:05:36 +08:00
|
|
|
umode_t mode, bool excl)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct iattr attr;
|
2012-06-11 06:05:36 +08:00
|
|
|
int open_flags = excl ? O_CREAT | O_EXCL : O_CREAT;
|
2005-04-17 06:20:36 +08:00
|
|
|
int error;
|
|
|
|
|
2013-12-18 01:20:16 +08:00
|
|
|
dfprintk(VFS, "NFS: create(%s/%lu), %pd\n",
|
2013-09-16 22:53:17 +08:00
|
|
|
dir->i_sb->s_id, dir->i_ino, dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
attr.ia_mode = mode;
|
|
|
|
attr.ia_valid = ATTR_MODE;
|
|
|
|
|
2013-08-21 22:53:09 +08:00
|
|
|
trace_nfs_create_enter(dir, dentry, open_flags);
|
2012-06-05 21:10:19 +08:00
|
|
|
error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags);
|
2013-08-21 22:53:09 +08:00
|
|
|
trace_nfs_create_exit(dir, dentry, open_flags, error);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error != 0)
|
|
|
|
goto out_err;
|
|
|
|
return 0;
|
|
|
|
out_err:
|
|
|
|
d_drop(dentry);
|
|
|
|
return error;
|
|
|
|
}
|
2012-07-31 04:05:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_create);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* See comments for nfs_proc_create regarding failed operations.
|
|
|
|
*/
|
2012-07-17 04:39:10 +08:00
|
|
|
int
|
2011-07-26 13:52:52 +08:00
|
|
|
nfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct iattr attr;
|
|
|
|
int status;
|
|
|
|
|
2013-12-18 01:20:16 +08:00
|
|
|
dfprintk(VFS, "NFS: mknod(%s/%lu), %pd\n",
|
2013-09-16 22:53:17 +08:00
|
|
|
dir->i_sb->s_id, dir->i_ino, dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
attr.ia_mode = mode;
|
|
|
|
attr.ia_valid = ATTR_MODE;
|
|
|
|
|
2013-08-22 00:36:04 +08:00
|
|
|
trace_nfs_mknod_enter(dir, dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
status = NFS_PROTO(dir)->mknod(dir, dentry, &attr, rdev);
|
2013-08-22 00:36:04 +08:00
|
|
|
trace_nfs_mknod_exit(dir, dentry, status);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (status != 0)
|
|
|
|
goto out_err;
|
|
|
|
return 0;
|
|
|
|
out_err:
|
|
|
|
d_drop(dentry);
|
|
|
|
return status;
|
|
|
|
}
|
2012-07-31 04:05:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_mknod);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* See comments for nfs_proc_create regarding failed operations.
|
|
|
|
*/
|
2012-07-17 04:39:10 +08:00
|
|
|
int nfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct iattr attr;
|
|
|
|
int error;
|
|
|
|
|
2013-12-18 01:20:16 +08:00
|
|
|
dfprintk(VFS, "NFS: mkdir(%s/%lu), %pd\n",
|
2013-09-16 22:53:17 +08:00
|
|
|
dir->i_sb->s_id, dir->i_ino, dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
attr.ia_valid = ATTR_MODE;
|
|
|
|
attr.ia_mode = mode | S_IFDIR;
|
|
|
|
|
2013-08-22 00:36:04 +08:00
|
|
|
trace_nfs_mkdir_enter(dir, dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
error = NFS_PROTO(dir)->mkdir(dir, dentry, &attr);
|
2013-08-22 00:36:04 +08:00
|
|
|
trace_nfs_mkdir_exit(dir, dentry, error);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error != 0)
|
|
|
|
goto out_err;
|
|
|
|
return 0;
|
|
|
|
out_err:
|
|
|
|
d_drop(dentry);
|
|
|
|
return error;
|
|
|
|
}
|
2012-07-31 04:05:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_mkdir);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-01-29 08:43:18 +08:00
|
|
|
static void nfs_dentry_handle_enoent(struct dentry *dentry)
|
|
|
|
{
|
2015-05-18 22:10:34 +08:00
|
|
|
if (simple_positive(dentry))
|
2008-01-29 08:43:18 +08:00
|
|
|
d_delete(dentry);
|
|
|
|
}
|
|
|
|
|
2012-07-17 04:39:10 +08:00
|
|
|
int nfs_rmdir(struct inode *dir, struct dentry *dentry)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
2013-12-18 01:20:16 +08:00
|
|
|
dfprintk(VFS, "NFS: rmdir(%s/%lu), %pd\n",
|
2013-09-16 22:53:17 +08:00
|
|
|
dir->i_sb->s_id, dir->i_ino, dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-08-22 00:36:04 +08:00
|
|
|
trace_nfs_rmdir_enter(dir, dentry);
|
2015-03-18 06:25:59 +08:00
|
|
|
if (d_really_is_positive(dentry)) {
|
2016-04-29 11:56:31 +08:00
|
|
|
down_write(&NFS_I(d_inode(dentry))->rmdir_sem);
|
2013-08-31 00:24:25 +08:00
|
|
|
error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
|
|
|
|
/* Ensure the VFS deletes this inode */
|
|
|
|
switch (error) {
|
|
|
|
case 0:
|
2015-03-18 06:25:59 +08:00
|
|
|
clear_nlink(d_inode(dentry));
|
2013-08-31 00:24:25 +08:00
|
|
|
break;
|
|
|
|
case -ENOENT:
|
|
|
|
nfs_dentry_handle_enoent(dentry);
|
|
|
|
}
|
2016-04-29 11:56:31 +08:00
|
|
|
up_write(&NFS_I(d_inode(dentry))->rmdir_sem);
|
2013-08-31 00:24:25 +08:00
|
|
|
} else
|
|
|
|
error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
|
2013-08-22 00:36:04 +08:00
|
|
|
trace_nfs_rmdir_exit(dir, dentry, error);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
2012-07-31 04:05:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_rmdir);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove a file after making sure there are no pending writes,
|
|
|
|
* and after checking that the file has only one user.
|
|
|
|
*
|
|
|
|
* We invalidate the attribute cache and free the inode prior to the operation
|
|
|
|
* to avoid possible races if the server reuses the inode.
|
|
|
|
*/
|
|
|
|
static int nfs_safe_remove(struct dentry *dentry)
|
|
|
|
{
|
2015-03-18 06:25:59 +08:00
|
|
|
struct inode *dir = d_inode(dentry->d_parent);
|
|
|
|
struct inode *inode = d_inode(dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
int error = -EBUSY;
|
|
|
|
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(VFS, "NFS: safe_remove(%pd2)\n", dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* If the dentry was sillyrenamed, we simply call d_delete() */
|
|
|
|
if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
|
|
|
|
error = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2013-08-22 00:36:04 +08:00
|
|
|
trace_nfs_remove_enter(dir, dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (inode != NULL) {
|
2018-03-21 04:43:15 +08:00
|
|
|
error = NFS_PROTO(dir)->remove(dir, dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error == 0)
|
2008-06-12 03:44:04 +08:00
|
|
|
nfs_drop_nlink(inode);
|
2005-04-17 06:20:36 +08:00
|
|
|
} else
|
2018-03-21 04:43:15 +08:00
|
|
|
error = NFS_PROTO(dir)->remove(dir, dentry);
|
2008-01-29 08:43:18 +08:00
|
|
|
if (error == -ENOENT)
|
|
|
|
nfs_dentry_handle_enoent(dentry);
|
2013-08-22 00:36:04 +08:00
|
|
|
trace_nfs_remove_exit(dir, dentry, error);
|
2005-04-17 06:20:36 +08:00
|
|
|
out:
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We do silly rename. In case sillyrename() returns -EBUSY, the inode
|
|
|
|
* belongs to an active ".nfs..." file and we return -EBUSY.
|
|
|
|
*
|
|
|
|
* If sillyrename() returns 0, we do nothing, otherwise we unlink.
|
|
|
|
*/
|
2012-07-17 04:39:10 +08:00
|
|
|
int nfs_unlink(struct inode *dir, struct dentry *dentry)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
int need_rehash = 0;
|
|
|
|
|
2013-12-18 01:20:16 +08:00
|
|
|
dfprintk(VFS, "NFS: unlink(%s/%lu, %pd)\n", dir->i_sb->s_id,
|
2013-09-16 22:53:17 +08:00
|
|
|
dir->i_ino, dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-08-22 00:36:04 +08:00
|
|
|
trace_nfs_unlink_enter(dir, dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
spin_lock(&dentry->d_lock);
|
2013-07-05 22:59:33 +08:00
|
|
|
if (d_count(dentry) > 1) {
|
2005-04-17 06:20:36 +08:00
|
|
|
spin_unlock(&dentry->d_lock);
|
2007-01-13 15:28:12 +08:00
|
|
|
/* Start asynchronous writeout of the inode */
|
2015-03-18 06:25:59 +08:00
|
|
|
write_inode_now(d_inode(dentry), 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
error = nfs_sillyrename(dir, dentry);
|
2013-08-22 00:36:04 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
if (!d_unhashed(dentry)) {
|
|
|
|
__d_drop(dentry);
|
|
|
|
need_rehash = 1;
|
|
|
|
}
|
|
|
|
spin_unlock(&dentry->d_lock);
|
|
|
|
error = nfs_safe_remove(dentry);
|
2008-01-29 08:43:18 +08:00
|
|
|
if (!error || error == -ENOENT) {
|
2005-04-17 06:20:36 +08:00
|
|
|
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
|
|
|
} else if (need_rehash)
|
|
|
|
d_rehash(dentry);
|
2013-08-22 00:36:04 +08:00
|
|
|
out:
|
|
|
|
trace_nfs_unlink_exit(dir, dentry, error);
|
2005-04-17 06:20:36 +08:00
|
|
|
return error;
|
|
|
|
}
|
2012-07-31 04:05:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_unlink);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-08-23 08:06:23 +08:00
|
|
|
/*
|
|
|
|
* To create a symbolic link, most file systems instantiate a new inode,
|
|
|
|
* add a page to it containing the path, then write it out to the disk
|
|
|
|
* using prepare_write/commit_write.
|
|
|
|
*
|
|
|
|
* Unfortunately the NFS client can't create the in-core inode first
|
|
|
|
* because it needs a file handle to create an in-core inode (see
|
|
|
|
* fs/nfs/inode.c:nfs_fhget). We only have a file handle *after* the
|
|
|
|
* symlink request has completed on the server.
|
|
|
|
*
|
|
|
|
* So instead we allocate a raw page, copy the symname into it, then do
|
|
|
|
* the SYMLINK request with the page as the buffer. If it succeeds, we
|
|
|
|
* now have a new file handle and can instantiate an in-core NFS inode
|
|
|
|
* and move the raw page into its mapping.
|
|
|
|
*/
|
2012-07-17 04:39:10 +08:00
|
|
|
int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-08-23 08:06:23 +08:00
|
|
|
struct page *page;
|
|
|
|
char *kaddr;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct iattr attr;
|
2006-08-23 08:06:23 +08:00
|
|
|
unsigned int pathlen = strlen(symname);
|
2005-04-17 06:20:36 +08:00
|
|
|
int error;
|
|
|
|
|
2013-12-18 01:20:16 +08:00
|
|
|
dfprintk(VFS, "NFS: symlink(%s/%lu, %pd, %s)\n", dir->i_sb->s_id,
|
2013-09-16 22:53:17 +08:00
|
|
|
dir->i_ino, dentry, symname);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-08-23 08:06:23 +08:00
|
|
|
if (pathlen > PAGE_SIZE)
|
|
|
|
return -ENAMETOOLONG;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-08-23 08:06:23 +08:00
|
|
|
attr.ia_mode = S_IFLNK | S_IRWXUGO;
|
|
|
|
attr.ia_valid = ATTR_MODE;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2016-01-15 06:52:59 +08:00
|
|
|
page = alloc_page(GFP_USER);
|
2008-06-12 03:44:22 +08:00
|
|
|
if (!page)
|
2006-08-23 08:06:23 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2016-01-15 06:52:59 +08:00
|
|
|
kaddr = page_address(page);
|
2006-08-23 08:06:23 +08:00
|
|
|
memcpy(kaddr, symname, pathlen);
|
|
|
|
if (pathlen < PAGE_SIZE)
|
|
|
|
memset(kaddr + pathlen, 0, PAGE_SIZE - pathlen);
|
|
|
|
|
2013-08-22 00:36:04 +08:00
|
|
|
trace_nfs_symlink_enter(dir, dentry);
|
2006-08-23 08:06:23 +08:00
|
|
|
error = NFS_PROTO(dir)->symlink(dir, dentry, page, pathlen, &attr);
|
2013-08-22 00:36:04 +08:00
|
|
|
trace_nfs_symlink_exit(dir, dentry, error);
|
2006-08-23 08:06:23 +08:00
|
|
|
if (error != 0) {
|
2013-12-18 01:20:16 +08:00
|
|
|
dfprintk(VFS, "NFS: symlink(%s/%lu, %pd, %s) error %d\n",
|
2006-08-23 08:06:23 +08:00
|
|
|
dir->i_sb->s_id, dir->i_ino,
|
2013-09-16 22:53:17 +08:00
|
|
|
dentry, symname, error);
|
2005-04-17 06:20:36 +08:00
|
|
|
d_drop(dentry);
|
2006-08-23 08:06:23 +08:00
|
|
|
__free_page(page);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* No big deal if we can't add this page to the page cache here.
|
|
|
|
* READLINK will get the missing page from the server if needed.
|
|
|
|
*/
|
2015-03-18 06:25:59 +08:00
|
|
|
if (!add_to_page_cache_lru(page, d_inode(dentry)->i_mapping, 0,
|
2006-08-23 08:06:23 +08:00
|
|
|
GFP_KERNEL)) {
|
|
|
|
SetPageUptodate(page);
|
|
|
|
unlock_page(page);
|
2014-02-11 06:25:48 +08:00
|
|
|
/*
|
|
|
|
* add_to_page_cache_lru() grabs an extra page refcount.
|
|
|
|
* Drop it here to avoid leaking this page later.
|
|
|
|
*/
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
put_page(page);
|
2006-08-23 08:06:23 +08:00
|
|
|
} else
|
|
|
|
__free_page(page);
|
|
|
|
|
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2012-07-31 04:05:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_symlink);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-07-17 04:39:10 +08:00
|
|
|
int
|
2005-04-17 06:20:36 +08:00
|
|
|
nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
|
|
|
|
{
|
2015-03-18 06:25:59 +08:00
|
|
|
struct inode *inode = d_inode(old_dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
int error;
|
|
|
|
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(VFS, "NFS: link(%pd2 -> %pd2)\n",
|
|
|
|
old_dentry, dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-08-22 01:54:44 +08:00
|
|
|
trace_nfs_link_enter(inode, dir, dentry);
|
2007-10-03 09:58:05 +08:00
|
|
|
d_drop(dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name);
|
2005-10-28 10:12:42 +08:00
|
|
|
if (error == 0) {
|
2010-10-23 23:11:40 +08:00
|
|
|
ihold(inode);
|
2007-10-03 09:58:05 +08:00
|
|
|
d_add(dentry, inode);
|
2005-10-28 10:12:42 +08:00
|
|
|
}
|
2013-08-22 01:54:44 +08:00
|
|
|
trace_nfs_link_exit(inode, dir, dentry, error);
|
2005-04-17 06:20:36 +08:00
|
|
|
return error;
|
|
|
|
}
|
2012-07-31 04:05:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_link);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* RENAME
|
|
|
|
* FIXME: Some nfsds, like the Linux user space nfsd, may generate a
|
|
|
|
* different file handle for the same inode after a rename (e.g. when
|
|
|
|
* moving to a different directory). A fail-safe method to do so would
|
|
|
|
* be to look up old_dir/old_name, create a link to new_dir/new_name and
|
|
|
|
* rename the old file using the sillyrename stuff. This way, the original
|
|
|
|
* file in old_dir will go away when the last process iput()s the inode.
|
|
|
|
*
|
|
|
|
* FIXED.
|
|
|
|
*
|
|
|
|
* It actually works quite well. One needs to have the possibility for
|
|
|
|
* at least one ".nfs..." file in each directory the file ever gets
|
|
|
|
* moved or linked to which happens automagically with the new
|
|
|
|
* implementation that only depends on the dcache stuff instead of
|
|
|
|
* using the inode layer
|
|
|
|
*
|
|
|
|
* Unfortunately, things are a little more complicated than indicated
|
|
|
|
* above. For a cross-directory move, we want to make sure we can get
|
|
|
|
* rid of the old inode after the operation. This means there must be
|
|
|
|
* no pending writes (if it's a file), and the use count must be 1.
|
|
|
|
* If these conditions are met, we can drop the dentries before doing
|
|
|
|
* the rename.
|
|
|
|
*/
|
2012-07-17 04:39:10 +08:00
|
|
|
int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
fs: make remaining filesystems use .rename2
This is trivial to do:
- add flags argument to foo_rename()
- check if flags is zero
- assign foo_rename() to .rename2 instead of .rename
This doesn't mean it's impossible to support RENAME_NOREPLACE for these
filesystems, but it is not trivial, like for local filesystems.
RENAME_NOREPLACE must guarantee atomicity (i.e. it shouldn't be possible
for a file to be created on one host while it is overwritten by rename on
another host).
Filesystems converted:
9p, afs, ceph, coda, ecryptfs, kernfs, lustre, ncpfs, nfs, ocfs2, orangefs.
After this, we can get rid of the duplicate interfaces for rename.
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Acked-by: David Howells <dhowells@redhat.com> [AFS]
Acked-by: Mike Marshall <hubcap@omnibond.com>
Cc: Eric Van Hensbergen <ericvh@gmail.com>
Cc: Ilya Dryomov <idryomov@gmail.com>
Cc: Jan Harkes <jaharkes@cs.cmu.edu>
Cc: Tyler Hicks <tyhicks@canonical.com>
Cc: Oleg Drokin <oleg.drokin@intel.com>
Cc: Trond Myklebust <trond.myklebust@primarydata.com>
Cc: Mark Fasheh <mfasheh@suse.com>
2016-09-27 17:03:58 +08:00
|
|
|
struct inode *new_dir, struct dentry *new_dentry,
|
|
|
|
unsigned int flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2015-03-18 06:25:59 +08:00
|
|
|
struct inode *old_inode = d_inode(old_dentry);
|
|
|
|
struct inode *new_inode = d_inode(new_dentry);
|
2017-06-16 23:12:59 +08:00
|
|
|
struct dentry *dentry = NULL, *rehash = NULL;
|
2014-03-17 19:06:56 +08:00
|
|
|
struct rpc_task *task;
|
2005-04-17 06:20:36 +08:00
|
|
|
int error = -EBUSY;
|
|
|
|
|
fs: make remaining filesystems use .rename2
This is trivial to do:
- add flags argument to foo_rename()
- check if flags is zero
- assign foo_rename() to .rename2 instead of .rename
This doesn't mean it's impossible to support RENAME_NOREPLACE for these
filesystems, but it is not trivial, like for local filesystems.
RENAME_NOREPLACE must guarantee atomicity (i.e. it shouldn't be possible
for a file to be created on one host while it is overwritten by rename on
another host).
Filesystems converted:
9p, afs, ceph, coda, ecryptfs, kernfs, lustre, ncpfs, nfs, ocfs2, orangefs.
After this, we can get rid of the duplicate interfaces for rename.
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Acked-by: David Howells <dhowells@redhat.com> [AFS]
Acked-by: Mike Marshall <hubcap@omnibond.com>
Cc: Eric Van Hensbergen <ericvh@gmail.com>
Cc: Ilya Dryomov <idryomov@gmail.com>
Cc: Jan Harkes <jaharkes@cs.cmu.edu>
Cc: Tyler Hicks <tyhicks@canonical.com>
Cc: Oleg Drokin <oleg.drokin@intel.com>
Cc: Trond Myklebust <trond.myklebust@primarydata.com>
Cc: Mark Fasheh <mfasheh@suse.com>
2016-09-27 17:03:58 +08:00
|
|
|
if (flags)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(VFS, "NFS: rename(%pd2 -> %pd2, ct=%d)\n",
|
|
|
|
old_dentry, new_dentry,
|
2013-07-05 22:59:33 +08:00
|
|
|
d_count(new_dentry));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-08-22 00:08:45 +08:00
|
|
|
trace_nfs_rename_enter(old_dir, old_dentry, new_dir, new_dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2009-12-04 04:58:56 +08:00
|
|
|
* For non-directories, check whether the target is busy and if so,
|
|
|
|
* make a copy of the dentry and then do a silly-rename. If the
|
|
|
|
* silly-rename succeeds, the copied dentry is hashed and becomes
|
|
|
|
* the new target.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2009-12-04 04:58:56 +08:00
|
|
|
if (new_inode && !S_ISDIR(new_inode->i_mode)) {
|
|
|
|
/*
|
|
|
|
* To prevent any new references to the target during the
|
|
|
|
* rename, we unhash the dentry in advance.
|
|
|
|
*/
|
2017-06-16 23:12:59 +08:00
|
|
|
if (!d_unhashed(new_dentry)) {
|
2009-12-04 04:58:56 +08:00
|
|
|
d_drop(new_dentry);
|
2017-06-16 23:12:59 +08:00
|
|
|
rehash = new_dentry;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-07-05 22:59:33 +08:00
|
|
|
if (d_count(new_dentry) > 2) {
|
2009-12-04 04:58:56 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
/* copy the target dentry's name */
|
|
|
|
dentry = d_alloc(new_dentry->d_parent,
|
|
|
|
&new_dentry->d_name);
|
|
|
|
if (!dentry)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* silly-rename the existing target ... */
|
|
|
|
err = nfs_sillyrename(new_dir, new_dentry);
|
2009-12-04 04:58:56 +08:00
|
|
|
if (err)
|
2009-12-04 04:58:56 +08:00
|
|
|
goto out;
|
2009-12-04 04:58:56 +08:00
|
|
|
|
|
|
|
new_dentry = dentry;
|
2017-06-16 23:12:59 +08:00
|
|
|
rehash = NULL;
|
2009-12-04 04:58:56 +08:00
|
|
|
new_inode = NULL;
|
2009-12-04 04:58:56 +08:00
|
|
|
}
|
2009-03-20 03:35:49 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-06-16 23:12:59 +08:00
|
|
|
task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL);
|
2014-03-17 19:06:56 +08:00
|
|
|
if (IS_ERR(task)) {
|
|
|
|
error = PTR_ERR(task);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = rpc_wait_for_completion_task(task);
|
2017-06-16 23:13:00 +08:00
|
|
|
if (error != 0) {
|
|
|
|
((struct nfs_renamedata *)task->tk_calldata)->cancelled = 1;
|
|
|
|
/* Paired with the atomic_dec_and_test() barrier in rpc_do_put_task() */
|
|
|
|
smp_wmb();
|
|
|
|
} else
|
2014-03-17 19:06:56 +08:00
|
|
|
error = task->tk_status;
|
|
|
|
rpc_put_task(task);
|
2018-04-09 06:11:18 +08:00
|
|
|
/* Ensure the inode attributes are revalidated */
|
|
|
|
if (error == 0) {
|
|
|
|
spin_lock(&old_inode->i_lock);
|
|
|
|
NFS_I(old_inode)->attr_gencount = nfs_inc_attr_generation_counter();
|
|
|
|
NFS_I(old_inode)->cache_validity |= NFS_INO_INVALID_CHANGE
|
|
|
|
| NFS_INO_INVALID_CTIME
|
|
|
|
| NFS_INO_REVAL_FORCED;
|
|
|
|
spin_unlock(&old_inode->i_lock);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
out:
|
2017-06-16 23:12:59 +08:00
|
|
|
if (rehash)
|
|
|
|
d_rehash(rehash);
|
2013-08-22 00:08:45 +08:00
|
|
|
trace_nfs_rename_exit(old_dir, old_dentry,
|
|
|
|
new_dir, new_dentry, error);
|
2017-06-16 23:12:59 +08:00
|
|
|
if (!error) {
|
|
|
|
if (new_inode != NULL)
|
|
|
|
nfs_drop_nlink(new_inode);
|
|
|
|
/*
|
|
|
|
* The d_move() should be here instead of in an async RPC completion
|
|
|
|
* handler because we need the proper locks to move the dentry. If
|
|
|
|
* we're interrupted by a signal, the async RPC completion handler
|
|
|
|
* should mark the directories for revalidation.
|
|
|
|
*/
|
|
|
|
d_move(old_dentry, new_dentry);
|
2017-11-07 04:28:04 +08:00
|
|
|
nfs_set_verifier(old_dentry,
|
2017-06-16 23:12:59 +08:00
|
|
|
nfs_save_change_attribute(new_dir));
|
|
|
|
} else if (error == -ENOENT)
|
|
|
|
nfs_dentry_handle_enoent(old_dentry);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* new dentry created? */
|
|
|
|
if (dentry)
|
|
|
|
dput(dentry);
|
|
|
|
return error;
|
|
|
|
}
|
2012-07-31 04:05:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_rename);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-07-25 23:28:18 +08:00
|
|
|
static DEFINE_SPINLOCK(nfs_access_lru_lock);
|
|
|
|
static LIST_HEAD(nfs_access_lru_list);
|
|
|
|
static atomic_long_t nfs_access_nr_entries;
|
|
|
|
|
2020-02-08 22:14:11 +08:00
|
|
|
static unsigned long nfs_access_max_cachesize = 4*1024*1024;
|
2014-07-22 01:53:48 +08:00
|
|
|
module_param(nfs_access_max_cachesize, ulong, 0644);
|
|
|
|
MODULE_PARM_DESC(nfs_access_max_cachesize, "NFS access maximum total cache length");
|
|
|
|
|
2006-07-25 23:28:18 +08:00
|
|
|
static void nfs_access_free_entry(struct nfs_access_entry *entry)
|
|
|
|
{
|
2018-12-03 08:30:30 +08:00
|
|
|
put_cred(entry->cred);
|
2014-07-14 09:28:20 +08:00
|
|
|
kfree_rcu(entry, rcu_head);
|
2014-03-18 01:06:10 +08:00
|
|
|
smp_mb__before_atomic();
|
2006-07-25 23:28:18 +08:00
|
|
|
atomic_long_dec(&nfs_access_nr_entries);
|
2014-03-18 01:06:10 +08:00
|
|
|
smp_mb__after_atomic();
|
2006-07-25 23:28:18 +08:00
|
|
|
}
|
|
|
|
|
2010-05-14 00:51:06 +08:00
|
|
|
static void nfs_access_free_list(struct list_head *head)
|
|
|
|
{
|
|
|
|
struct nfs_access_entry *cache;
|
|
|
|
|
|
|
|
while (!list_empty(head)) {
|
|
|
|
cache = list_entry(head->next, struct nfs_access_entry, lru);
|
|
|
|
list_del(&cache->lru);
|
|
|
|
nfs_access_free_entry(cache);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-22 01:53:48 +08:00
|
|
|
static unsigned long
|
|
|
|
nfs_do_access_cache_scan(unsigned int nr_to_scan)
|
2006-07-25 23:28:19 +08:00
|
|
|
{
|
|
|
|
LIST_HEAD(head);
|
2010-09-30 03:11:56 +08:00
|
|
|
struct nfs_inode *nfsi, *next;
|
2006-07-25 23:28:19 +08:00
|
|
|
struct nfs_access_entry *cache;
|
2013-08-28 08:18:09 +08:00
|
|
|
long freed = 0;
|
2006-07-25 23:28:19 +08:00
|
|
|
|
2007-06-06 07:23:43 +08:00
|
|
|
spin_lock(&nfs_access_lru_lock);
|
2010-09-30 03:11:56 +08:00
|
|
|
list_for_each_entry_safe(nfsi, next, &nfs_access_lru_list, access_cache_inode_lru) {
|
2006-07-25 23:28:19 +08:00
|
|
|
struct inode *inode;
|
|
|
|
|
|
|
|
if (nr_to_scan-- == 0)
|
|
|
|
break;
|
2010-05-14 00:51:06 +08:00
|
|
|
inode = &nfsi->vfs_inode;
|
2006-07-25 23:28:19 +08:00
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
if (list_empty(&nfsi->access_cache_entry_lru))
|
|
|
|
goto remove_lru_entry;
|
|
|
|
cache = list_entry(nfsi->access_cache_entry_lru.next,
|
|
|
|
struct nfs_access_entry, lru);
|
|
|
|
list_move(&cache->lru, &head);
|
|
|
|
rb_erase(&cache->rb_node, &nfsi->access_cache);
|
2013-08-28 08:18:09 +08:00
|
|
|
freed++;
|
2006-07-25 23:28:19 +08:00
|
|
|
if (!list_empty(&nfsi->access_cache_entry_lru))
|
|
|
|
list_move_tail(&nfsi->access_cache_inode_lru,
|
|
|
|
&nfs_access_lru_list);
|
|
|
|
else {
|
|
|
|
remove_lru_entry:
|
|
|
|
list_del_init(&nfsi->access_cache_inode_lru);
|
2014-03-18 01:06:10 +08:00
|
|
|
smp_mb__before_atomic();
|
2006-07-25 23:28:19 +08:00
|
|
|
clear_bit(NFS_INO_ACL_LRU_SET, &nfsi->flags);
|
2014-03-18 01:06:10 +08:00
|
|
|
smp_mb__after_atomic();
|
2006-07-25 23:28:19 +08:00
|
|
|
}
|
2010-05-26 20:42:24 +08:00
|
|
|
spin_unlock(&inode->i_lock);
|
2006-07-25 23:28:19 +08:00
|
|
|
}
|
|
|
|
spin_unlock(&nfs_access_lru_lock);
|
2010-05-14 00:51:06 +08:00
|
|
|
nfs_access_free_list(&head);
|
2013-08-28 08:18:09 +08:00
|
|
|
return freed;
|
|
|
|
}
|
|
|
|
|
2014-07-22 01:53:48 +08:00
|
|
|
unsigned long
|
|
|
|
nfs_access_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
|
|
|
|
{
|
|
|
|
int nr_to_scan = sc->nr_to_scan;
|
|
|
|
gfp_t gfp_mask = sc->gfp_mask;
|
|
|
|
|
|
|
|
if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
|
|
|
|
return SHRINK_STOP;
|
|
|
|
return nfs_do_access_cache_scan(nr_to_scan);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-08-28 08:18:09 +08:00
|
|
|
unsigned long
|
|
|
|
nfs_access_cache_count(struct shrinker *shrink, struct shrink_control *sc)
|
|
|
|
{
|
2013-08-28 08:17:53 +08:00
|
|
|
return vfs_pressure_ratio(atomic_long_read(&nfs_access_nr_entries));
|
2006-07-25 23:28:19 +08:00
|
|
|
}
|
|
|
|
|
2014-07-22 01:53:48 +08:00
|
|
|
static void
|
|
|
|
nfs_access_cache_enforce_limit(void)
|
|
|
|
{
|
|
|
|
long nr_entries = atomic_long_read(&nfs_access_nr_entries);
|
|
|
|
unsigned long diff;
|
|
|
|
unsigned int nr_to_scan;
|
|
|
|
|
|
|
|
if (nr_entries < 0 || nr_entries <= nfs_access_max_cachesize)
|
|
|
|
return;
|
|
|
|
nr_to_scan = 100;
|
|
|
|
diff = nr_entries - nfs_access_max_cachesize;
|
|
|
|
if (diff < nr_to_scan)
|
|
|
|
nr_to_scan = diff;
|
|
|
|
nfs_do_access_cache_scan(nr_to_scan);
|
|
|
|
}
|
|
|
|
|
2010-05-14 00:51:06 +08:00
|
|
|
static void __nfs_access_zap_cache(struct nfs_inode *nfsi, struct list_head *head)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-07-25 23:28:18 +08:00
|
|
|
struct rb_root *root_node = &nfsi->access_cache;
|
2010-05-14 00:51:06 +08:00
|
|
|
struct rb_node *n;
|
2006-07-25 23:28:18 +08:00
|
|
|
struct nfs_access_entry *entry;
|
|
|
|
|
|
|
|
/* Unhook entries from the cache */
|
|
|
|
while ((n = rb_first(root_node)) != NULL) {
|
|
|
|
entry = rb_entry(n, struct nfs_access_entry, rb_node);
|
|
|
|
rb_erase(n, root_node);
|
2010-05-14 00:51:06 +08:00
|
|
|
list_move(&entry->lru, head);
|
2006-07-25 23:28:18 +08:00
|
|
|
}
|
|
|
|
nfsi->cache_validity &= ~NFS_INO_INVALID_ACCESS;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2006-07-25 23:28:18 +08:00
|
|
|
void nfs_access_zap_cache(struct inode *inode)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-05-14 00:51:06 +08:00
|
|
|
LIST_HEAD(head);
|
|
|
|
|
|
|
|
if (test_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags) == 0)
|
|
|
|
return;
|
2006-07-25 23:28:18 +08:00
|
|
|
/* Remove from global LRU init */
|
2010-05-14 00:51:06 +08:00
|
|
|
spin_lock(&nfs_access_lru_lock);
|
|
|
|
if (test_and_clear_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags))
|
2006-07-25 23:28:18 +08:00
|
|
|
list_del_init(&NFS_I(inode)->access_cache_inode_lru);
|
|
|
|
|
2006-07-25 23:28:18 +08:00
|
|
|
spin_lock(&inode->i_lock);
|
2010-05-14 00:51:06 +08:00
|
|
|
__nfs_access_zap_cache(NFS_I(inode), &head);
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
spin_unlock(&nfs_access_lru_lock);
|
|
|
|
nfs_access_free_list(&head);
|
2006-07-25 23:28:18 +08:00
|
|
|
}
|
2012-07-31 04:05:24 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_access_zap_cache);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-12-03 08:30:30 +08:00
|
|
|
static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, const struct cred *cred)
|
2006-07-25 23:28:18 +08:00
|
|
|
{
|
|
|
|
struct rb_node *n = NFS_I(inode)->access_cache.rb_node;
|
|
|
|
|
|
|
|
while (n != NULL) {
|
2018-12-03 08:30:30 +08:00
|
|
|
struct nfs_access_entry *entry =
|
|
|
|
rb_entry(n, struct nfs_access_entry, rb_node);
|
|
|
|
int cmp = cred_fscmp(cred, entry->cred);
|
2006-07-25 23:28:18 +08:00
|
|
|
|
2018-12-03 08:30:30 +08:00
|
|
|
if (cmp < 0)
|
2006-07-25 23:28:18 +08:00
|
|
|
n = n->rb_left;
|
2018-12-03 08:30:30 +08:00
|
|
|
else if (cmp > 0)
|
2006-07-25 23:28:18 +08:00
|
|
|
n = n->rb_right;
|
|
|
|
else
|
|
|
|
return entry;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2006-07-25 23:28:18 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-06-24 06:38:57 +08:00
|
|
|
static int nfs_access_get_cached_locked(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res, bool may_block)
|
2006-07-25 23:28:18 +08:00
|
|
|
{
|
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
|
|
|
struct nfs_access_entry *cache;
|
2016-06-04 05:07:19 +08:00
|
|
|
bool retry = true;
|
|
|
|
int err;
|
2006-07-25 23:28:18 +08:00
|
|
|
|
2005-08-19 02:24:12 +08:00
|
|
|
spin_lock(&inode->i_lock);
|
2016-06-04 05:07:19 +08:00
|
|
|
for(;;) {
|
|
|
|
if (nfsi->cache_validity & NFS_INO_INVALID_ACCESS)
|
|
|
|
goto out_zap;
|
|
|
|
cache = nfs_access_search_rbtree(inode, cred);
|
|
|
|
err = -ENOENT;
|
|
|
|
if (cache == NULL)
|
|
|
|
goto out;
|
|
|
|
/* Found an entry, is our attribute cache valid? */
|
2016-12-17 07:40:03 +08:00
|
|
|
if (!nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS))
|
2016-06-04 05:07:19 +08:00
|
|
|
break;
|
2020-01-07 04:39:36 +08:00
|
|
|
if (!retry)
|
|
|
|
break;
|
2016-06-04 05:07:19 +08:00
|
|
|
err = -ECHILD;
|
|
|
|
if (!may_block)
|
|
|
|
goto out;
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
err = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
retry = false;
|
|
|
|
}
|
2006-07-25 23:28:18 +08:00
|
|
|
res->cred = cache->cred;
|
|
|
|
res->mask = cache->mask;
|
2006-07-25 23:28:18 +08:00
|
|
|
list_move_tail(&cache->lru, &nfsi->access_cache_entry_lru);
|
2006-07-25 23:28:18 +08:00
|
|
|
err = 0;
|
|
|
|
out:
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
return err;
|
|
|
|
out_zap:
|
2010-05-14 00:51:06 +08:00
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
nfs_access_zap_cache(inode);
|
2006-07-25 23:28:18 +08:00
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
2018-12-03 08:30:30 +08:00
|
|
|
static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res)
|
2014-07-14 09:28:20 +08:00
|
|
|
{
|
|
|
|
/* Only check the most recently returned cache entry,
|
|
|
|
* but do it without locking.
|
|
|
|
*/
|
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
|
|
|
struct nfs_access_entry *cache;
|
|
|
|
int err = -ECHILD;
|
|
|
|
struct list_head *lh;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
if (nfsi->cache_validity & NFS_INO_INVALID_ACCESS)
|
|
|
|
goto out;
|
2019-12-10 13:46:39 +08:00
|
|
|
lh = rcu_dereference(list_tail_rcu(&nfsi->access_cache_entry_lru));
|
2014-07-14 09:28:20 +08:00
|
|
|
cache = list_entry(lh, struct nfs_access_entry, lru);
|
|
|
|
if (lh == &nfsi->access_cache_entry_lru ||
|
2020-01-27 06:31:14 +08:00
|
|
|
cred_fscmp(cred, cache->cred) != 0)
|
2014-07-14 09:28:20 +08:00
|
|
|
cache = NULL;
|
|
|
|
if (cache == NULL)
|
|
|
|
goto out;
|
2016-12-17 07:40:03 +08:00
|
|
|
if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS))
|
2014-07-14 09:28:20 +08:00
|
|
|
goto out;
|
|
|
|
res->cred = cache->cred;
|
|
|
|
res->mask = cache->mask;
|
2016-12-17 07:40:03 +08:00
|
|
|
err = 0;
|
2014-07-14 09:28:20 +08:00
|
|
|
out:
|
|
|
|
rcu_read_unlock();
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-06-24 06:38:57 +08:00
|
|
|
int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct
|
|
|
|
nfs_access_entry *res, bool may_block)
|
|
|
|
{
|
|
|
|
int status;
|
|
|
|
|
|
|
|
status = nfs_access_get_cached_rcu(inode, cred, res);
|
|
|
|
if (status != 0)
|
|
|
|
status = nfs_access_get_cached_locked(inode, cred, res,
|
|
|
|
may_block);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nfs_access_get_cached);
|
|
|
|
|
2006-07-25 23:28:18 +08:00
|
|
|
static void nfs_access_add_rbtree(struct inode *inode, struct nfs_access_entry *set)
|
|
|
|
{
|
2006-07-25 23:28:18 +08:00
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
|
|
|
struct rb_root *root_node = &nfsi->access_cache;
|
2006-07-25 23:28:18 +08:00
|
|
|
struct rb_node **p = &root_node->rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct nfs_access_entry *entry;
|
2018-12-03 08:30:30 +08:00
|
|
|
int cmp;
|
2006-07-25 23:28:18 +08:00
|
|
|
|
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
while (*p != NULL) {
|
|
|
|
parent = *p;
|
|
|
|
entry = rb_entry(parent, struct nfs_access_entry, rb_node);
|
2018-12-03 08:30:30 +08:00
|
|
|
cmp = cred_fscmp(set->cred, entry->cred);
|
2006-07-25 23:28:18 +08:00
|
|
|
|
2018-12-03 08:30:30 +08:00
|
|
|
if (cmp < 0)
|
2006-07-25 23:28:18 +08:00
|
|
|
p = &parent->rb_left;
|
2018-12-03 08:30:30 +08:00
|
|
|
else if (cmp > 0)
|
2006-07-25 23:28:18 +08:00
|
|
|
p = &parent->rb_right;
|
|
|
|
else
|
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
rb_link_node(&set->rb_node, parent, p);
|
|
|
|
rb_insert_color(&set->rb_node, root_node);
|
2006-07-25 23:28:18 +08:00
|
|
|
list_add_tail(&set->lru, &nfsi->access_cache_entry_lru);
|
2005-08-19 02:24:12 +08:00
|
|
|
spin_unlock(&inode->i_lock);
|
2006-07-25 23:28:18 +08:00
|
|
|
return;
|
|
|
|
found:
|
|
|
|
rb_replace_node(parent, &set->rb_node, root_node);
|
2006-07-25 23:28:18 +08:00
|
|
|
list_add_tail(&set->lru, &nfsi->access_cache_entry_lru);
|
|
|
|
list_del(&entry->lru);
|
2006-07-25 23:28:18 +08:00
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
nfs_access_free_entry(entry);
|
|
|
|
}
|
|
|
|
|
2012-09-11 02:00:46 +08:00
|
|
|
void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set)
|
2006-07-25 23:28:18 +08:00
|
|
|
{
|
|
|
|
struct nfs_access_entry *cache = kmalloc(sizeof(*cache), GFP_KERNEL);
|
|
|
|
if (cache == NULL)
|
|
|
|
return;
|
|
|
|
RB_CLEAR_NODE(&cache->rb_node);
|
2018-12-03 08:30:30 +08:00
|
|
|
cache->cred = get_cred(set->cred);
|
2005-04-17 06:20:36 +08:00
|
|
|
cache->mask = set->mask;
|
2006-07-25 23:28:18 +08:00
|
|
|
|
2014-07-14 09:28:20 +08:00
|
|
|
/* The above field assignments must be visible
|
|
|
|
* before this item appears on the lru. We cannot easily
|
|
|
|
* use rcu_assign_pointer, so just force the memory barrier.
|
|
|
|
*/
|
|
|
|
smp_wmb();
|
2006-07-25 23:28:18 +08:00
|
|
|
nfs_access_add_rbtree(inode, cache);
|
2006-07-25 23:28:18 +08:00
|
|
|
|
|
|
|
/* Update accounting */
|
2014-03-18 01:06:10 +08:00
|
|
|
smp_mb__before_atomic();
|
2006-07-25 23:28:18 +08:00
|
|
|
atomic_long_inc(&nfs_access_nr_entries);
|
2014-03-18 01:06:10 +08:00
|
|
|
smp_mb__after_atomic();
|
2006-07-25 23:28:18 +08:00
|
|
|
|
|
|
|
/* Add inode to global LRU list */
|
2010-05-14 00:51:06 +08:00
|
|
|
if (!test_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) {
|
2006-07-25 23:28:18 +08:00
|
|
|
spin_lock(&nfs_access_lru_lock);
|
2010-05-14 00:51:06 +08:00
|
|
|
if (!test_and_set_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags))
|
|
|
|
list_add_tail(&NFS_I(inode)->access_cache_inode_lru,
|
|
|
|
&nfs_access_lru_list);
|
2006-07-25 23:28:18 +08:00
|
|
|
spin_unlock(&nfs_access_lru_lock);
|
|
|
|
}
|
2014-07-22 01:53:48 +08:00
|
|
|
nfs_access_cache_enforce_limit();
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2012-09-11 02:00:46 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_access_add_cache);
|
|
|
|
|
2017-07-26 22:14:55 +08:00
|
|
|
#define NFS_MAY_READ (NFS_ACCESS_READ)
|
|
|
|
#define NFS_MAY_WRITE (NFS_ACCESS_MODIFY | \
|
|
|
|
NFS_ACCESS_EXTEND | \
|
|
|
|
NFS_ACCESS_DELETE)
|
|
|
|
#define NFS_FILE_MAY_WRITE (NFS_ACCESS_MODIFY | \
|
|
|
|
NFS_ACCESS_EXTEND)
|
2017-07-12 05:54:35 +08:00
|
|
|
#define NFS_DIR_MAY_WRITE NFS_MAY_WRITE
|
2017-07-26 22:14:55 +08:00
|
|
|
#define NFS_MAY_LOOKUP (NFS_ACCESS_LOOKUP)
|
|
|
|
#define NFS_MAY_EXECUTE (NFS_ACCESS_EXECUTE)
|
2017-07-12 05:54:32 +08:00
|
|
|
static int
|
2017-07-12 05:54:35 +08:00
|
|
|
nfs_access_calc_mask(u32 access_result, umode_t umode)
|
2017-07-12 05:54:32 +08:00
|
|
|
{
|
|
|
|
int mask = 0;
|
|
|
|
|
|
|
|
if (access_result & NFS_MAY_READ)
|
|
|
|
mask |= MAY_READ;
|
2017-07-12 05:54:35 +08:00
|
|
|
if (S_ISDIR(umode)) {
|
|
|
|
if ((access_result & NFS_DIR_MAY_WRITE) == NFS_DIR_MAY_WRITE)
|
|
|
|
mask |= MAY_WRITE;
|
|
|
|
if ((access_result & NFS_MAY_LOOKUP) == NFS_MAY_LOOKUP)
|
|
|
|
mask |= MAY_EXEC;
|
|
|
|
} else if (S_ISREG(umode)) {
|
|
|
|
if ((access_result & NFS_FILE_MAY_WRITE) == NFS_FILE_MAY_WRITE)
|
|
|
|
mask |= MAY_WRITE;
|
|
|
|
if ((access_result & NFS_MAY_EXECUTE) == NFS_MAY_EXECUTE)
|
|
|
|
mask |= MAY_EXEC;
|
|
|
|
} else if (access_result & NFS_MAY_WRITE)
|
|
|
|
mask |= MAY_WRITE;
|
2017-07-12 05:54:32 +08:00
|
|
|
return mask;
|
|
|
|
}
|
|
|
|
|
2012-09-11 02:00:46 +08:00
|
|
|
void nfs_access_set_mask(struct nfs_access_entry *entry, u32 access_result)
|
|
|
|
{
|
2017-07-12 05:54:34 +08:00
|
|
|
entry->mask = access_result;
|
2012-09-11 02:00:46 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nfs_access_set_mask);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-12-03 08:30:30 +08:00
|
|
|
static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct nfs_access_entry cache;
|
2016-06-04 05:07:19 +08:00
|
|
|
bool may_block = (mask & MAY_NOT_BLOCK) == 0;
|
2020-01-07 04:25:12 +08:00
|
|
|
int cache_mask = -1;
|
2005-04-17 06:20:36 +08:00
|
|
|
int status;
|
|
|
|
|
2013-08-20 06:59:33 +08:00
|
|
|
trace_nfs_access_enter(inode);
|
|
|
|
|
2020-06-24 06:38:57 +08:00
|
|
|
status = nfs_access_get_cached(inode, cred, &cache, may_block);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (status == 0)
|
2013-08-20 06:59:33 +08:00
|
|
|
goto out_cached;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-07-14 09:28:20 +08:00
|
|
|
status = -ECHILD;
|
2016-06-04 05:07:19 +08:00
|
|
|
if (!may_block)
|
2014-07-14 09:28:20 +08:00
|
|
|
goto out;
|
|
|
|
|
2017-07-27 00:00:21 +08:00
|
|
|
/*
|
|
|
|
* Determine which access bits we want to ask for...
|
|
|
|
*/
|
|
|
|
cache.mask = NFS_ACCESS_READ | NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND;
|
|
|
|
if (S_ISDIR(inode->i_mode))
|
|
|
|
cache.mask |= NFS_ACCESS_DELETE | NFS_ACCESS_LOOKUP;
|
|
|
|
else
|
|
|
|
cache.mask |= NFS_ACCESS_EXECUTE;
|
2005-04-17 06:20:36 +08:00
|
|
|
cache.cred = cred;
|
|
|
|
status = NFS_PROTO(inode)->access(inode, &cache);
|
2009-03-11 08:33:21 +08:00
|
|
|
if (status != 0) {
|
|
|
|
if (status == -ESTALE) {
|
|
|
|
if (!S_ISDIR(inode->i_mode))
|
2020-04-07 01:39:29 +08:00
|
|
|
nfs_set_inode_stale(inode);
|
|
|
|
else
|
|
|
|
nfs_zap_caches(inode);
|
2009-03-11 08:33:21 +08:00
|
|
|
}
|
2013-08-20 06:59:33 +08:00
|
|
|
goto out;
|
2009-03-11 08:33:21 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
nfs_access_add_cache(inode, &cache);
|
2013-08-20 06:59:33 +08:00
|
|
|
out_cached:
|
2017-07-12 05:54:35 +08:00
|
|
|
cache_mask = nfs_access_calc_mask(cache.mask, inode->i_mode);
|
2017-07-12 05:54:34 +08:00
|
|
|
if ((mask & ~cache_mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) != 0)
|
2013-08-20 06:59:33 +08:00
|
|
|
status = -EACCES;
|
2005-04-17 06:20:36 +08:00
|
|
|
out:
|
2020-01-07 04:25:12 +08:00
|
|
|
trace_nfs_access_exit(inode, mask, cache_mask, status);
|
2013-08-20 06:59:33 +08:00
|
|
|
return status;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-08-11 05:45:10 +08:00
|
|
|
static int nfs_open_permission_mask(int openflags)
|
|
|
|
{
|
|
|
|
int mask = 0;
|
|
|
|
|
2013-01-04 05:42:29 +08:00
|
|
|
if (openflags & __FMODE_EXEC) {
|
|
|
|
/* ONLY check exec rights */
|
|
|
|
mask = MAY_EXEC;
|
|
|
|
} else {
|
|
|
|
if ((openflags & O_ACCMODE) != O_WRONLY)
|
|
|
|
mask |= MAY_READ;
|
|
|
|
if ((openflags & O_ACCMODE) != O_RDONLY)
|
|
|
|
mask |= MAY_WRITE;
|
|
|
|
}
|
|
|
|
|
2007-08-11 05:45:10 +08:00
|
|
|
return mask;
|
|
|
|
}
|
|
|
|
|
2018-12-03 08:30:30 +08:00
|
|
|
int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags)
|
2007-08-11 05:45:10 +08:00
|
|
|
{
|
|
|
|
return nfs_do_access(inode, cred, nfs_open_permission_mask(openflags));
|
|
|
|
}
|
2012-07-31 04:05:25 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_may_open);
|
2007-08-11 05:45:10 +08:00
|
|
|
|
2015-12-29 08:30:05 +08:00
|
|
|
static int nfs_execute_ok(struct inode *inode, int mask)
|
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
2016-12-17 07:40:03 +08:00
|
|
|
int ret = 0;
|
2015-12-29 08:30:05 +08:00
|
|
|
|
2018-07-25 02:27:11 +08:00
|
|
|
if (S_ISDIR(inode->i_mode))
|
|
|
|
return 0;
|
2018-07-21 04:19:35 +08:00
|
|
|
if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_OTHER)) {
|
2016-12-17 07:40:03 +08:00
|
|
|
if (mask & MAY_NOT_BLOCK)
|
|
|
|
return -ECHILD;
|
|
|
|
ret = __nfs_revalidate_inode(server, inode);
|
|
|
|
}
|
2015-12-29 08:30:05 +08:00
|
|
|
if (ret == 0 && !execute_ok(inode))
|
|
|
|
ret = -EACCES;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-06-21 07:28:19 +08:00
|
|
|
int nfs_permission(struct inode *inode, int mask)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2018-12-03 08:30:30 +08:00
|
|
|
const struct cred *cred = current_cred();
|
2005-04-17 06:20:36 +08:00
|
|
|
int res = 0;
|
|
|
|
|
2006-03-21 02:44:14 +08:00
|
|
|
nfs_inc_stats(inode, NFSIOS_VFSACCESS);
|
|
|
|
|
2008-07-16 09:03:57 +08:00
|
|
|
if ((mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out;
|
|
|
|
/* Is this sys_access() ? */
|
2010-07-23 23:43:51 +08:00
|
|
|
if (mask & (MAY_ACCESS | MAY_CHDIR))
|
2005-04-17 06:20:36 +08:00
|
|
|
goto force_lookup;
|
|
|
|
|
|
|
|
switch (inode->i_mode & S_IFMT) {
|
|
|
|
case S_IFLNK:
|
|
|
|
goto out;
|
|
|
|
case S_IFREG:
|
2015-12-27 10:54:58 +08:00
|
|
|
if ((mask & MAY_OPEN) &&
|
|
|
|
nfs_server_capable(inode, NFS_CAP_ATOMIC_OPEN))
|
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
case S_IFDIR:
|
|
|
|
/*
|
|
|
|
* Optimize away all write operations, since the server
|
|
|
|
* will check permissions when we perform the op.
|
|
|
|
*/
|
|
|
|
if ((mask & MAY_WRITE) && !(mask & MAY_READ))
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
force_lookup:
|
|
|
|
if (!NFS_PROTO(inode)->access)
|
|
|
|
goto out_notsup;
|
|
|
|
|
2020-03-06 11:45:26 +08:00
|
|
|
res = nfs_do_access(inode, cred, mask);
|
2005-04-17 06:20:36 +08:00
|
|
|
out:
|
2015-12-29 08:30:05 +08:00
|
|
|
if (!res && (mask & MAY_EXEC))
|
|
|
|
res = nfs_execute_ok(inode, mask);
|
2008-07-31 19:41:58 +08:00
|
|
|
|
2013-12-18 01:20:16 +08:00
|
|
|
dfprintk(VFS, "NFS: permission(%s/%lu), mask=0x%x, res=%d\n",
|
2006-03-21 02:44:24 +08:00
|
|
|
inode->i_sb->s_id, inode->i_ino, mask, res);
|
2005-04-17 06:20:36 +08:00
|
|
|
return res;
|
|
|
|
out_notsup:
|
2014-07-14 09:28:20 +08:00
|
|
|
if (mask & MAY_NOT_BLOCK)
|
|
|
|
return -ECHILD;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
res = nfs_revalidate_inode(NFS_SERVER(inode), inode);
|
|
|
|
if (res == 0)
|
2011-06-21 07:16:29 +08:00
|
|
|
res = generic_permission(inode, mask);
|
2006-03-21 02:44:24 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2012-07-31 04:05:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_permission);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Local variables:
|
|
|
|
* version-control: t
|
|
|
|
* kept-new-versions: 5
|
|
|
|
* End:
|
|
|
|
*/
|