mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 12:14:10 +08:00
vfs-6.11-rc4.fixes
-----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCZrym4AAKCRCRxhvAZXjc oqT3AP9ydoUNavaZcRayH8r3ybvz9+aJGJ6Q7NznFVCk71vn0gD/buLzmq96Muns M5DWHbft2AFwK0Rz2nx8j5OXUeHwrQg= =HZBL -----END PGP SIGNATURE----- Merge tag 'vfs-6.11-rc4.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs Pull vfs fixes from Christian Brauner: "VFS: - Fix the name of file lease slab cache. When file leases were split out of file locks the name of the file lock slab cache was used for the file leases slab cache as well. - Fix a type in take_fd() helper. - Fix infinite directory iteration for stable offsets in tmpfs. - When the icache is pruned all reclaimable inodes are marked with I_FREEING and other processes that try to lookup such inodes will block. But some filesystems like ext4 can trigger lookups in their inode evict callback causing deadlocks. Ext4 does such lookups if the ea_inode feature is used whereby a separate inode may be used to store xattrs. Introduce I_LRU_ISOLATING which pins the inode while its pages are reclaimed. This avoids inode deletion during inode_lru_isolate() avoiding the deadlock and evict is made to wait until I_LRU_ISOLATING is done. netfs: - Fault in smaller chunks for non-large folio mappings for filesystems that haven't been converted to large folios yet. - Fix the CONFIG_NETFS_DEBUG config option. The config option was renamed a short while ago and that introduced two minor issues. First, it depended on CONFIG_NETFS whereas it wants to depend on CONFIG_NETFS_SUPPORT. The former doesn't exist, while the latter does. Second, the documentation for the config option wasn't fixed up. - Revert the removal of the PG_private_2 writeback flag as ceph is using it and fix how that flag is handled in netfs. - Fix DIO reads on 9p. A program watching a file on a 9p mount wouldn't see any changes in the size of the file being exported by the server if the file was changed directly in the source filesystem. Fix this by attempting to read the full size specified when a DIO read is requested. - Fix a NULL pointer dereference bug due to a data race where a cachefiles cookies was retired even though it was still in use. Check the cookie's n_accesses counter before discarding it. nsfs: - Fix ioctl declaration for NS_GET_MNTNS_ID from _IO() to _IOR() as the kernel is writing to userspace. pidfs: - Prevent the creation of pidfds for kthreads until we have a use-case for it and we know the semantics we want. It also confuses userspace why they can get pidfds for kthreads. squashfs: - Fix an unitialized value bug reported by KMSAN caused by a corrupted symbolic link size read from disk. Check that the symbolic link size is not larger than expected" * tag 'vfs-6.11-rc4.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs: Squashfs: sanity check symbolic link size 9p: Fix DIO read through netfs vfs: Don't evict inode under the inode lru traversing context netfs: Fix handling of USE_PGPRIV2 and WRITE_TO_CACHE flags netfs, ceph: Revert "netfs: Remove deprecated use of PG_private_2 as a second writeback flag" file: fix typo in take_fd() comment pidfd: prevent creation of pidfds for kthreads netfs: clean up after renaming FSCACHE_DEBUG config libfs: fix infinite directory reads for offset dir nsfs: fix ioctl declaration fs/netfs/fscache_cookie: add missing "n_accesses" check filelock: fix name of file_lease slab cache netfs: Fault in smaller chunks for non-large folio mappings
This commit is contained in:
commit
4ac0f08f44
@ -318,10 +318,10 @@ where the columns are:
|
||||
Debugging
|
||||
=========
|
||||
|
||||
If CONFIG_FSCACHE_DEBUG is enabled, the FS-Cache facility can have runtime
|
||||
debugging enabled by adjusting the value in::
|
||||
If CONFIG_NETFS_DEBUG is enabled, the FS-Cache facility and NETFS support can
|
||||
have runtime debugging enabled by adjusting the value in::
|
||||
|
||||
/sys/module/fscache/parameters/debug
|
||||
/sys/module/netfs/parameters/debug
|
||||
|
||||
This is a bitmask of debugging streams to enable:
|
||||
|
||||
@ -343,6 +343,6 @@ This is a bitmask of debugging streams to enable:
|
||||
The appropriate set of values should be OR'd together and the result written to
|
||||
the control file. For example::
|
||||
|
||||
echo $((1|8|512)) >/sys/module/fscache/parameters/debug
|
||||
echo $((1|8|512)) >/sys/module/netfs/parameters/debug
|
||||
|
||||
will turn on all function entry debugging.
|
||||
|
@ -75,7 +75,8 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
|
||||
|
||||
/* if we just extended the file size, any portion not in
|
||||
* cache won't be on server and is zeroes */
|
||||
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
|
||||
if (subreq->rreq->origin != NETFS_DIO_READ)
|
||||
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
|
||||
|
||||
netfs_subreq_terminated(subreq, err ?: total, false);
|
||||
}
|
||||
|
@ -242,7 +242,8 @@ static void afs_fetch_data_notify(struct afs_operation *op)
|
||||
|
||||
req->error = error;
|
||||
if (subreq) {
|
||||
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
|
||||
if (subreq->rreq->origin != NETFS_DIO_READ)
|
||||
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
|
||||
netfs_subreq_terminated(subreq, error ?: req->actual_len, false);
|
||||
req->subreq = NULL;
|
||||
} else if (req->done) {
|
||||
|
@ -246,7 +246,8 @@ static void finish_netfs_read(struct ceph_osd_request *req)
|
||||
if (err >= 0) {
|
||||
if (sparse && err > 0)
|
||||
err = ceph_sparse_ext_map_end(op);
|
||||
if (err < subreq->len)
|
||||
if (err < subreq->len &&
|
||||
subreq->rreq->origin != NETFS_DIO_READ)
|
||||
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
|
||||
if (IS_ENCRYPTED(inode) && err > 0) {
|
||||
err = ceph_fscrypt_decrypt_extents(inode,
|
||||
@ -282,7 +283,8 @@ static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
|
||||
size_t len;
|
||||
int mode;
|
||||
|
||||
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
|
||||
if (rreq->origin != NETFS_DIO_READ)
|
||||
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
|
||||
__clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
|
||||
|
||||
if (subreq->start >= inode->i_size)
|
||||
@ -424,6 +426,9 @@ static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
|
||||
struct ceph_netfs_request_data *priv;
|
||||
int ret = 0;
|
||||
|
||||
/* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
|
||||
__set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags);
|
||||
|
||||
if (rreq->origin != NETFS_READAHEAD)
|
||||
return 0;
|
||||
|
||||
@ -498,6 +503,11 @@ const struct netfs_request_ops ceph_netfs_ops = {
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CEPH_FSCACHE
|
||||
static void ceph_set_page_fscache(struct page *page)
|
||||
{
|
||||
folio_start_private_2(page_folio(page)); /* [DEPRECATED] */
|
||||
}
|
||||
|
||||
static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async)
|
||||
{
|
||||
struct inode *inode = priv;
|
||||
@ -515,6 +525,10 @@ static void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, b
|
||||
ceph_fscache_write_terminated, inode, true, caching);
|
||||
}
|
||||
#else
|
||||
static inline void ceph_set_page_fscache(struct page *page)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
|
||||
{
|
||||
}
|
||||
@ -706,6 +720,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
|
||||
len = wlen;
|
||||
|
||||
set_page_writeback(page);
|
||||
if (caching)
|
||||
ceph_set_page_fscache(page);
|
||||
ceph_fscache_write_to_cache(inode, page_off, len, caching);
|
||||
|
||||
if (IS_ENCRYPTED(inode)) {
|
||||
@ -789,6 +805,8 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
|
||||
return AOP_WRITEPAGE_ACTIVATE;
|
||||
}
|
||||
|
||||
folio_wait_private_2(page_folio(page)); /* [DEPRECATED] */
|
||||
|
||||
err = writepage_nounlock(page, wbc);
|
||||
if (err == -ERESTARTSYS) {
|
||||
/* direct memory reclaimer was killed by SIGKILL. return 0
|
||||
@ -1062,7 +1080,8 @@ get_more_pages:
|
||||
unlock_page(page);
|
||||
break;
|
||||
}
|
||||
if (PageWriteback(page)) {
|
||||
if (PageWriteback(page) ||
|
||||
PagePrivate2(page) /* [DEPRECATED] */) {
|
||||
if (wbc->sync_mode == WB_SYNC_NONE) {
|
||||
doutc(cl, "%p under writeback\n", page);
|
||||
unlock_page(page);
|
||||
@ -1070,6 +1089,7 @@ get_more_pages:
|
||||
}
|
||||
doutc(cl, "waiting on writeback %p\n", page);
|
||||
wait_on_page_writeback(page);
|
||||
folio_wait_private_2(page_folio(page)); /* [DEPRECATED] */
|
||||
}
|
||||
|
||||
if (!clear_page_dirty_for_io(page)) {
|
||||
@ -1254,6 +1274,8 @@ new_request:
|
||||
}
|
||||
|
||||
set_page_writeback(page);
|
||||
if (caching)
|
||||
ceph_set_page_fscache(page);
|
||||
len += thp_size(page);
|
||||
}
|
||||
ceph_fscache_write_to_cache(inode, offset, len, caching);
|
||||
|
@ -577,8 +577,6 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
|
||||
|
||||
/* Set parameters for the netfs library */
|
||||
netfs_inode_init(&ci->netfs, &ceph_netfs_ops, false);
|
||||
/* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
|
||||
__set_bit(NETFS_ICTX_USE_PGPRIV2, &ci->netfs.flags);
|
||||
|
||||
spin_lock_init(&ci->i_ceph_lock);
|
||||
|
||||
|
39
fs/inode.c
39
fs/inode.c
@ -488,6 +488,39 @@ static void inode_lru_list_del(struct inode *inode)
|
||||
this_cpu_dec(nr_unused);
|
||||
}
|
||||
|
||||
static void inode_pin_lru_isolating(struct inode *inode)
|
||||
{
|
||||
lockdep_assert_held(&inode->i_lock);
|
||||
WARN_ON(inode->i_state & (I_LRU_ISOLATING | I_FREEING | I_WILL_FREE));
|
||||
inode->i_state |= I_LRU_ISOLATING;
|
||||
}
|
||||
|
||||
static void inode_unpin_lru_isolating(struct inode *inode)
|
||||
{
|
||||
spin_lock(&inode->i_lock);
|
||||
WARN_ON(!(inode->i_state & I_LRU_ISOLATING));
|
||||
inode->i_state &= ~I_LRU_ISOLATING;
|
||||
smp_mb();
|
||||
wake_up_bit(&inode->i_state, __I_LRU_ISOLATING);
|
||||
spin_unlock(&inode->i_lock);
|
||||
}
|
||||
|
||||
static void inode_wait_for_lru_isolating(struct inode *inode)
|
||||
{
|
||||
spin_lock(&inode->i_lock);
|
||||
if (inode->i_state & I_LRU_ISOLATING) {
|
||||
DEFINE_WAIT_BIT(wq, &inode->i_state, __I_LRU_ISOLATING);
|
||||
wait_queue_head_t *wqh;
|
||||
|
||||
wqh = bit_waitqueue(&inode->i_state, __I_LRU_ISOLATING);
|
||||
spin_unlock(&inode->i_lock);
|
||||
__wait_on_bit(wqh, &wq, bit_wait, TASK_UNINTERRUPTIBLE);
|
||||
spin_lock(&inode->i_lock);
|
||||
WARN_ON(inode->i_state & I_LRU_ISOLATING);
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* inode_sb_list_add - add inode to the superblock list of inodes
|
||||
* @inode: inode to add
|
||||
@ -657,6 +690,8 @@ static void evict(struct inode *inode)
|
||||
|
||||
inode_sb_list_del(inode);
|
||||
|
||||
inode_wait_for_lru_isolating(inode);
|
||||
|
||||
/*
|
||||
* Wait for flusher thread to be done with the inode so that filesystem
|
||||
* does not start destroying it while writeback is still running. Since
|
||||
@ -855,7 +890,7 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
|
||||
* be under pressure before the cache inside the highmem zone.
|
||||
*/
|
||||
if (inode_has_buffers(inode) || !mapping_empty(&inode->i_data)) {
|
||||
__iget(inode);
|
||||
inode_pin_lru_isolating(inode);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(lru_lock);
|
||||
if (remove_inode_buffers(inode)) {
|
||||
@ -867,7 +902,7 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
|
||||
__count_vm_events(PGINODESTEAL, reap);
|
||||
mm_account_reclaimed_pages(reap);
|
||||
}
|
||||
iput(inode);
|
||||
inode_unpin_lru_isolating(inode);
|
||||
spin_lock(lru_lock);
|
||||
return LRU_RETRY;
|
||||
}
|
||||
|
35
fs/libfs.c
35
fs/libfs.c
@ -450,6 +450,14 @@ void simple_offset_destroy(struct offset_ctx *octx)
|
||||
mtree_destroy(&octx->mt);
|
||||
}
|
||||
|
||||
static int offset_dir_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct offset_ctx *ctx = inode->i_op->get_offset_ctx(inode);
|
||||
|
||||
file->private_data = (void *)ctx->next_offset;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* offset_dir_llseek - Advance the read position of a directory descriptor
|
||||
* @file: an open directory whose position is to be updated
|
||||
@ -463,6 +471,9 @@ void simple_offset_destroy(struct offset_ctx *octx)
|
||||
*/
|
||||
static loff_t offset_dir_llseek(struct file *file, loff_t offset, int whence)
|
||||
{
|
||||
struct inode *inode = file->f_inode;
|
||||
struct offset_ctx *ctx = inode->i_op->get_offset_ctx(inode);
|
||||
|
||||
switch (whence) {
|
||||
case SEEK_CUR:
|
||||
offset += file->f_pos;
|
||||
@ -476,7 +487,8 @@ static loff_t offset_dir_llseek(struct file *file, loff_t offset, int whence)
|
||||
}
|
||||
|
||||
/* In this case, ->private_data is protected by f_pos_lock */
|
||||
file->private_data = NULL;
|
||||
if (!offset)
|
||||
file->private_data = (void *)ctx->next_offset;
|
||||
return vfs_setpos(file, offset, LONG_MAX);
|
||||
}
|
||||
|
||||
@ -507,7 +519,7 @@ static bool offset_dir_emit(struct dir_context *ctx, struct dentry *dentry)
|
||||
inode->i_ino, fs_umode_to_dtype(inode->i_mode));
|
||||
}
|
||||
|
||||
static void *offset_iterate_dir(struct inode *inode, struct dir_context *ctx)
|
||||
static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx, long last_index)
|
||||
{
|
||||
struct offset_ctx *octx = inode->i_op->get_offset_ctx(inode);
|
||||
struct dentry *dentry;
|
||||
@ -515,17 +527,21 @@ static void *offset_iterate_dir(struct inode *inode, struct dir_context *ctx)
|
||||
while (true) {
|
||||
dentry = offset_find_next(octx, ctx->pos);
|
||||
if (!dentry)
|
||||
return ERR_PTR(-ENOENT);
|
||||
return;
|
||||
|
||||
if (dentry2offset(dentry) >= last_index) {
|
||||
dput(dentry);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!offset_dir_emit(ctx, dentry)) {
|
||||
dput(dentry);
|
||||
break;
|
||||
return;
|
||||
}
|
||||
|
||||
ctx->pos = dentry2offset(dentry) + 1;
|
||||
dput(dentry);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -552,22 +568,19 @@ static void *offset_iterate_dir(struct inode *inode, struct dir_context *ctx)
|
||||
static int offset_readdir(struct file *file, struct dir_context *ctx)
|
||||
{
|
||||
struct dentry *dir = file->f_path.dentry;
|
||||
long last_index = (long)file->private_data;
|
||||
|
||||
lockdep_assert_held(&d_inode(dir)->i_rwsem);
|
||||
|
||||
if (!dir_emit_dots(file, ctx))
|
||||
return 0;
|
||||
|
||||
/* In this case, ->private_data is protected by f_pos_lock */
|
||||
if (ctx->pos == DIR_OFFSET_MIN)
|
||||
file->private_data = NULL;
|
||||
else if (file->private_data == ERR_PTR(-ENOENT))
|
||||
return 0;
|
||||
file->private_data = offset_iterate_dir(d_inode(dir), ctx);
|
||||
offset_iterate_dir(d_inode(dir), ctx, last_index);
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct file_operations simple_offset_dir_operations = {
|
||||
.open = offset_dir_open,
|
||||
.llseek = offset_dir_llseek,
|
||||
.iterate_shared = offset_readdir,
|
||||
.read = generic_read_dir,
|
||||
|
@ -2984,7 +2984,7 @@ static int __init filelock_init(void)
|
||||
filelock_cache = kmem_cache_create("file_lock_cache",
|
||||
sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
|
||||
|
||||
filelease_cache = kmem_cache_create("file_lock_cache",
|
||||
filelease_cache = kmem_cache_create("file_lease_cache",
|
||||
sizeof(struct file_lease), 0, SLAB_PANIC, NULL);
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
|
@ -24,7 +24,7 @@ config NETFS_STATS
|
||||
|
||||
config NETFS_DEBUG
|
||||
bool "Enable dynamic debugging netfslib and FS-Cache"
|
||||
depends on NETFS
|
||||
depends on NETFS_SUPPORT
|
||||
help
|
||||
This permits debugging to be dynamically enabled in the local caching
|
||||
management module. If this is set, the debugging output may be
|
||||
|
@ -9,6 +9,97 @@
|
||||
#include <linux/task_io_accounting_ops.h>
|
||||
#include "internal.h"
|
||||
|
||||
/*
|
||||
* [DEPRECATED] Unlock the folios in a read operation for when the filesystem
|
||||
* is using PG_private_2 and direct writing to the cache from here rather than
|
||||
* marking the page for writeback.
|
||||
*
|
||||
* Note that we don't touch folio->private in this code.
|
||||
*/
|
||||
static void netfs_rreq_unlock_folios_pgpriv2(struct netfs_io_request *rreq,
|
||||
size_t *account)
|
||||
{
|
||||
struct netfs_io_subrequest *subreq;
|
||||
struct folio *folio;
|
||||
pgoff_t start_page = rreq->start / PAGE_SIZE;
|
||||
pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
|
||||
bool subreq_failed = false;
|
||||
|
||||
XA_STATE(xas, &rreq->mapping->i_pages, start_page);
|
||||
|
||||
/* Walk through the pagecache and the I/O request lists simultaneously.
|
||||
* We may have a mixture of cached and uncached sections and we only
|
||||
* really want to write out the uncached sections. This is slightly
|
||||
* complicated by the possibility that we might have huge pages with a
|
||||
* mixture inside.
|
||||
*/
|
||||
subreq = list_first_entry(&rreq->subrequests,
|
||||
struct netfs_io_subrequest, rreq_link);
|
||||
subreq_failed = (subreq->error < 0);
|
||||
|
||||
trace_netfs_rreq(rreq, netfs_rreq_trace_unlock_pgpriv2);
|
||||
|
||||
rcu_read_lock();
|
||||
xas_for_each(&xas, folio, last_page) {
|
||||
loff_t pg_end;
|
||||
bool pg_failed = false;
|
||||
bool folio_started = false;
|
||||
|
||||
if (xas_retry(&xas, folio))
|
||||
continue;
|
||||
|
||||
pg_end = folio_pos(folio) + folio_size(folio) - 1;
|
||||
|
||||
for (;;) {
|
||||
loff_t sreq_end;
|
||||
|
||||
if (!subreq) {
|
||||
pg_failed = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!folio_started &&
|
||||
test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags) &&
|
||||
fscache_operation_valid(&rreq->cache_resources)) {
|
||||
trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
|
||||
folio_start_private_2(folio);
|
||||
folio_started = true;
|
||||
}
|
||||
|
||||
pg_failed |= subreq_failed;
|
||||
sreq_end = subreq->start + subreq->len - 1;
|
||||
if (pg_end < sreq_end)
|
||||
break;
|
||||
|
||||
*account += subreq->transferred;
|
||||
if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
|
||||
subreq = list_next_entry(subreq, rreq_link);
|
||||
subreq_failed = (subreq->error < 0);
|
||||
} else {
|
||||
subreq = NULL;
|
||||
subreq_failed = false;
|
||||
}
|
||||
|
||||
if (pg_end == sreq_end)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!pg_failed) {
|
||||
flush_dcache_folio(folio);
|
||||
folio_mark_uptodate(folio);
|
||||
}
|
||||
|
||||
if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
|
||||
if (folio->index == rreq->no_unlock_folio &&
|
||||
test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
|
||||
_debug("no unlock");
|
||||
else
|
||||
folio_unlock(folio);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
* Unlock the folios in a read operation. We need to set PG_writeback on any
|
||||
* folios we're going to write back before we unlock them.
|
||||
@ -35,6 +126,12 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
|
||||
}
|
||||
}
|
||||
|
||||
/* Handle deprecated PG_private_2 case. */
|
||||
if (test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)) {
|
||||
netfs_rreq_unlock_folios_pgpriv2(rreq, &account);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Walk through the pagecache and the I/O request lists simultaneously.
|
||||
* We may have a mixture of cached and uncached sections and we only
|
||||
* really want to write out the uncached sections. This is slightly
|
||||
@ -52,7 +149,6 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
|
||||
loff_t pg_end;
|
||||
bool pg_failed = false;
|
||||
bool wback_to_cache = false;
|
||||
bool folio_started = false;
|
||||
|
||||
if (xas_retry(&xas, folio))
|
||||
continue;
|
||||
@ -66,17 +162,8 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
|
||||
pg_failed = true;
|
||||
break;
|
||||
}
|
||||
if (test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)) {
|
||||
if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE,
|
||||
&subreq->flags)) {
|
||||
trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
|
||||
folio_start_private_2(folio);
|
||||
folio_started = true;
|
||||
}
|
||||
} else {
|
||||
wback_to_cache |=
|
||||
test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
|
||||
}
|
||||
|
||||
wback_to_cache |= test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
|
||||
pg_failed |= subreq_failed;
|
||||
sreq_end = subreq->start + subreq->len - 1;
|
||||
if (pg_end < sreq_end)
|
||||
@ -124,6 +211,7 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
out:
|
||||
task_io_account_read(account);
|
||||
if (rreq->netfs_ops->done)
|
||||
rreq->netfs_ops->done(rreq);
|
||||
@ -395,7 +483,7 @@ zero_out:
|
||||
}
|
||||
|
||||
/**
|
||||
* netfs_write_begin - Helper to prepare for writing
|
||||
* netfs_write_begin - Helper to prepare for writing [DEPRECATED]
|
||||
* @ctx: The netfs context
|
||||
* @file: The file to read from
|
||||
* @mapping: The mapping to read from
|
||||
@ -426,6 +514,9 @@ zero_out:
|
||||
* inode before calling this.
|
||||
*
|
||||
* This is usable whether or not caching is enabled.
|
||||
*
|
||||
* Note that this should be considered deprecated and netfs_perform_write()
|
||||
* used instead.
|
||||
*/
|
||||
int netfs_write_begin(struct netfs_inode *ctx,
|
||||
struct file *file, struct address_space *mapping,
|
||||
@ -466,7 +557,7 @@ retry:
|
||||
if (!netfs_is_cache_enabled(ctx) &&
|
||||
netfs_skip_folio_read(folio, pos, len, false)) {
|
||||
netfs_stat(&netfs_n_rh_write_zskip);
|
||||
goto have_folio;
|
||||
goto have_folio_no_wait;
|
||||
}
|
||||
|
||||
rreq = netfs_alloc_request(mapping, file,
|
||||
@ -507,6 +598,10 @@ retry:
|
||||
netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
|
||||
|
||||
have_folio:
|
||||
ret = folio_wait_private_2_killable(folio);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
have_folio_no_wait:
|
||||
*_folio = folio;
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
|
@ -184,7 +184,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
|
||||
unsigned int bdp_flags = (iocb->ki_flags & IOCB_NOWAIT) ? BDP_ASYNC : 0;
|
||||
ssize_t written = 0, ret, ret2;
|
||||
loff_t i_size, pos = iocb->ki_pos, from, to;
|
||||
size_t max_chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
|
||||
size_t max_chunk = mapping_max_folio_size(mapping);
|
||||
bool maybe_trouble = false;
|
||||
|
||||
if (unlikely(test_bit(NETFS_ICTX_WRITETHROUGH, &ctx->flags) ||
|
||||
|
@ -741,6 +741,10 @@ again_locked:
|
||||
spin_lock(&cookie->lock);
|
||||
}
|
||||
if (test_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags)) {
|
||||
if (atomic_read(&cookie->n_accesses) != 0)
|
||||
/* still being accessed: postpone it */
|
||||
break;
|
||||
|
||||
__fscache_set_cookie_state(cookie,
|
||||
FSCACHE_COOKIE_STATE_LRU_DISCARDING);
|
||||
wake = true;
|
||||
|
161
fs/netfs/io.c
161
fs/netfs/io.c
@ -98,6 +98,146 @@ static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async)
|
||||
netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete);
|
||||
}
|
||||
|
||||
/*
|
||||
* [DEPRECATED] Deal with the completion of writing the data to the cache. We
|
||||
* have to clear the PG_fscache bits on the folios involved and release the
|
||||
* caller's ref.
|
||||
*
|
||||
* May be called in softirq mode and we inherit a ref from the caller.
|
||||
*/
|
||||
static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
|
||||
bool was_async)
|
||||
{
|
||||
struct netfs_io_subrequest *subreq;
|
||||
struct folio *folio;
|
||||
pgoff_t unlocked = 0;
|
||||
bool have_unlocked = false;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
|
||||
XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
|
||||
|
||||
xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
|
||||
if (xas_retry(&xas, folio))
|
||||
continue;
|
||||
|
||||
/* We might have multiple writes from the same huge
|
||||
* folio, but we mustn't unlock a folio more than once.
|
||||
*/
|
||||
if (have_unlocked && folio->index <= unlocked)
|
||||
continue;
|
||||
unlocked = folio_next_index(folio) - 1;
|
||||
trace_netfs_folio(folio, netfs_folio_trace_end_copy);
|
||||
folio_end_private_2(folio);
|
||||
have_unlocked = true;
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
netfs_rreq_completed(rreq, was_async);
|
||||
}
|
||||
|
||||
static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
|
||||
bool was_async) /* [DEPRECATED] */
|
||||
{
|
||||
struct netfs_io_subrequest *subreq = priv;
|
||||
struct netfs_io_request *rreq = subreq->rreq;
|
||||
|
||||
if (IS_ERR_VALUE(transferred_or_error)) {
|
||||
netfs_stat(&netfs_n_rh_write_failed);
|
||||
trace_netfs_failure(rreq, subreq, transferred_or_error,
|
||||
netfs_fail_copy_to_cache);
|
||||
} else {
|
||||
netfs_stat(&netfs_n_rh_write_done);
|
||||
}
|
||||
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
|
||||
|
||||
/* If we decrement nr_copy_ops to 0, the ref belongs to us. */
|
||||
if (atomic_dec_and_test(&rreq->nr_copy_ops))
|
||||
netfs_rreq_unmark_after_write(rreq, was_async);
|
||||
|
||||
netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
|
||||
}
|
||||
|
||||
/*
|
||||
* [DEPRECATED] Perform any outstanding writes to the cache. We inherit a ref
|
||||
* from the caller.
|
||||
*/
|
||||
static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
|
||||
{
|
||||
struct netfs_cache_resources *cres = &rreq->cache_resources;
|
||||
struct netfs_io_subrequest *subreq, *next, *p;
|
||||
struct iov_iter iter;
|
||||
int ret;
|
||||
|
||||
trace_netfs_rreq(rreq, netfs_rreq_trace_copy);
|
||||
|
||||
/* We don't want terminating writes trying to wake us up whilst we're
|
||||
* still going through the list.
|
||||
*/
|
||||
atomic_inc(&rreq->nr_copy_ops);
|
||||
|
||||
list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
|
||||
if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
|
||||
list_del_init(&subreq->rreq_link);
|
||||
netfs_put_subrequest(subreq, false,
|
||||
netfs_sreq_trace_put_no_copy);
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
|
||||
/* Amalgamate adjacent writes */
|
||||
while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
|
||||
next = list_next_entry(subreq, rreq_link);
|
||||
if (next->start != subreq->start + subreq->len)
|
||||
break;
|
||||
subreq->len += next->len;
|
||||
list_del_init(&next->rreq_link);
|
||||
netfs_put_subrequest(next, false,
|
||||
netfs_sreq_trace_put_merged);
|
||||
}
|
||||
|
||||
ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
|
||||
subreq->len, rreq->i_size, true);
|
||||
if (ret < 0) {
|
||||
trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
|
||||
continue;
|
||||
}
|
||||
|
||||
iov_iter_xarray(&iter, ITER_SOURCE, &rreq->mapping->i_pages,
|
||||
subreq->start, subreq->len);
|
||||
|
||||
atomic_inc(&rreq->nr_copy_ops);
|
||||
netfs_stat(&netfs_n_rh_write);
|
||||
netfs_get_subrequest(subreq, netfs_sreq_trace_get_copy_to_cache);
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_write);
|
||||
cres->ops->write(cres, subreq->start, &iter,
|
||||
netfs_rreq_copy_terminated, subreq);
|
||||
}
|
||||
|
||||
/* If we decrement nr_copy_ops to 0, the usage ref belongs to us. */
|
||||
if (atomic_dec_and_test(&rreq->nr_copy_ops))
|
||||
netfs_rreq_unmark_after_write(rreq, false);
|
||||
}
|
||||
|
||||
static void netfs_rreq_write_to_cache_work(struct work_struct *work) /* [DEPRECATED] */
|
||||
{
|
||||
struct netfs_io_request *rreq =
|
||||
container_of(work, struct netfs_io_request, work);
|
||||
|
||||
netfs_rreq_do_write_to_cache(rreq);
|
||||
}
|
||||
|
||||
static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq) /* [DEPRECATED] */
|
||||
{
|
||||
rreq->work.func = netfs_rreq_write_to_cache_work;
|
||||
if (!queue_work(system_unbound_wq, &rreq->work))
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle a short read.
|
||||
*/
|
||||
@ -275,6 +415,10 @@ again:
|
||||
clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
|
||||
wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
|
||||
|
||||
if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags) &&
|
||||
test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags))
|
||||
return netfs_rreq_write_to_cache(rreq);
|
||||
|
||||
netfs_rreq_completed(rreq, was_async);
|
||||
}
|
||||
|
||||
@ -386,7 +530,8 @@ incomplete:
|
||||
|
||||
if (transferred_or_error == 0) {
|
||||
if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
|
||||
subreq->error = -ENODATA;
|
||||
if (rreq->origin != NETFS_DIO_READ)
|
||||
subreq->error = -ENODATA;
|
||||
goto failed;
|
||||
}
|
||||
} else {
|
||||
@ -457,9 +602,14 @@ netfs_rreq_prepare_read(struct netfs_io_request *rreq,
|
||||
}
|
||||
if (subreq->len > ictx->zero_point - subreq->start)
|
||||
subreq->len = ictx->zero_point - subreq->start;
|
||||
|
||||
/* We limit buffered reads to the EOF, but let the
|
||||
* server deal with larger-than-EOF DIO/unbuffered
|
||||
* reads.
|
||||
*/
|
||||
if (subreq->len > rreq->i_size - subreq->start)
|
||||
subreq->len = rreq->i_size - subreq->start;
|
||||
}
|
||||
if (subreq->len > rreq->i_size - subreq->start)
|
||||
subreq->len = rreq->i_size - subreq->start;
|
||||
if (rreq->rsize && subreq->len > rreq->rsize)
|
||||
subreq->len = rreq->rsize;
|
||||
|
||||
@ -595,11 +745,10 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
|
||||
do {
|
||||
_debug("submit %llx + %llx >= %llx",
|
||||
rreq->start, rreq->submitted, rreq->i_size);
|
||||
if (rreq->origin == NETFS_DIO_READ &&
|
||||
rreq->start + rreq->submitted >= rreq->i_size)
|
||||
break;
|
||||
if (!netfs_rreq_submit_slice(rreq, &io_iter))
|
||||
break;
|
||||
if (test_bit(NETFS_SREQ_NO_PROGRESS, &rreq->flags))
|
||||
break;
|
||||
if (test_bit(NETFS_RREQ_BLOCKED, &rreq->flags) &&
|
||||
test_bit(NETFS_RREQ_NONBLOCK, &rreq->flags))
|
||||
break;
|
||||
|
@ -24,10 +24,6 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
|
||||
struct netfs_io_request *rreq;
|
||||
mempool_t *mempool = ctx->ops->request_pool ?: &netfs_request_pool;
|
||||
struct kmem_cache *cache = mempool->pool_data;
|
||||
bool is_unbuffered = (origin == NETFS_UNBUFFERED_WRITE ||
|
||||
origin == NETFS_DIO_READ ||
|
||||
origin == NETFS_DIO_WRITE);
|
||||
bool cached = !is_unbuffered && netfs_is_cache_enabled(ctx);
|
||||
int ret;
|
||||
|
||||
for (;;) {
|
||||
@ -56,12 +52,6 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
|
||||
refcount_set(&rreq->ref, 1);
|
||||
|
||||
__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
|
||||
if (cached) {
|
||||
__set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
|
||||
if (test_bit(NETFS_ICTX_USE_PGPRIV2, &ctx->flags))
|
||||
/* Filesystem uses deprecated PG_private_2 marking. */
|
||||
__set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags);
|
||||
}
|
||||
if (file && file->f_flags & O_NONBLOCK)
|
||||
__set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags);
|
||||
if (rreq->netfs_ops->init_request) {
|
||||
|
@ -94,6 +94,8 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
|
||||
{
|
||||
struct netfs_io_request *wreq;
|
||||
struct netfs_inode *ictx;
|
||||
bool is_buffered = (origin == NETFS_WRITEBACK ||
|
||||
origin == NETFS_WRITETHROUGH);
|
||||
|
||||
wreq = netfs_alloc_request(mapping, file, start, 0, origin);
|
||||
if (IS_ERR(wreq))
|
||||
@ -102,7 +104,7 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
|
||||
_enter("R=%x", wreq->debug_id);
|
||||
|
||||
ictx = netfs_inode(wreq->inode);
|
||||
if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &wreq->flags))
|
||||
if (is_buffered && netfs_is_cache_enabled(ictx))
|
||||
fscache_begin_write_operation(&wreq->cache_resources, netfs_i_cookie(ictx));
|
||||
|
||||
wreq->contiguity = wreq->start;
|
||||
|
@ -265,6 +265,8 @@ static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *fi
|
||||
{
|
||||
rreq->netfs_priv = get_nfs_open_context(nfs_file_open_context(file));
|
||||
rreq->debug_id = atomic_inc_return(&nfs_netfs_debug_id);
|
||||
/* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
|
||||
__set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -361,7 +363,8 @@ void nfs_netfs_read_completion(struct nfs_pgio_header *hdr)
|
||||
return;
|
||||
|
||||
sreq = netfs->sreq;
|
||||
if (test_bit(NFS_IOHDR_EOF, &hdr->flags))
|
||||
if (test_bit(NFS_IOHDR_EOF, &hdr->flags) &&
|
||||
sreq->rreq->origin != NETFS_DIO_READ)
|
||||
__set_bit(NETFS_SREQ_CLEAR_TAIL, &sreq->flags);
|
||||
|
||||
if (hdr->error)
|
||||
|
@ -81,8 +81,6 @@ static inline void nfs_netfs_put(struct nfs_netfs_io_data *netfs)
|
||||
static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi)
|
||||
{
|
||||
netfs_inode_init(&nfsi->netfs, &nfs_netfs_ops, false);
|
||||
/* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
|
||||
__set_bit(NETFS_ICTX_USE_PGPRIV2, &nfsi->netfs.flags);
|
||||
}
|
||||
extern void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr);
|
||||
extern void nfs_netfs_read_completion(struct nfs_pgio_header *hdr);
|
||||
|
@ -217,7 +217,8 @@ static void cifs_req_issue_read(struct netfs_io_subrequest *subreq)
|
||||
goto out;
|
||||
}
|
||||
|
||||
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
|
||||
if (subreq->rreq->origin != NETFS_DIO_READ)
|
||||
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
|
||||
|
||||
rc = rdata->server->ops->async_readv(rdata);
|
||||
out:
|
||||
|
@ -279,8 +279,13 @@ int squashfs_read_inode(struct inode *inode, long long ino)
|
||||
if (err < 0)
|
||||
goto failed_read;
|
||||
|
||||
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
|
||||
inode->i_size = le32_to_cpu(sqsh_ino->symlink_size);
|
||||
if (inode->i_size > PAGE_SIZE) {
|
||||
ERROR("Corrupted symlink\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
|
||||
inode->i_op = &squashfs_symlink_inode_ops;
|
||||
inode_nohighmem(inode);
|
||||
inode->i_data.a_ops = &squashfs_symlink_aops;
|
||||
|
@ -110,7 +110,7 @@ DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T),
|
||||
*
|
||||
* f = dentry_open(&path, O_RDONLY, current_cred());
|
||||
* if (IS_ERR(f))
|
||||
* return PTR_ERR(fd);
|
||||
* return PTR_ERR(f);
|
||||
*
|
||||
* fd_install(fd, f);
|
||||
* return take_fd(fd);
|
||||
|
@ -2392,6 +2392,9 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
|
||||
*
|
||||
* I_PINNING_FSCACHE_WB Inode is pinning an fscache object for writeback.
|
||||
*
|
||||
* I_LRU_ISOLATING Inode is pinned being isolated from LRU without holding
|
||||
* i_count.
|
||||
*
|
||||
* Q: What is the difference between I_WILL_FREE and I_FREEING?
|
||||
*/
|
||||
#define I_DIRTY_SYNC (1 << 0)
|
||||
@ -2415,6 +2418,8 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
|
||||
#define I_DONTCACHE (1 << 16)
|
||||
#define I_SYNC_QUEUED (1 << 17)
|
||||
#define I_PINNING_NETFS_WB (1 << 18)
|
||||
#define __I_LRU_ISOLATING 19
|
||||
#define I_LRU_ISOLATING (1 << __I_LRU_ISOLATING)
|
||||
|
||||
#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
|
||||
#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
|
||||
|
@ -73,8 +73,6 @@ struct netfs_inode {
|
||||
#define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */
|
||||
#define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */
|
||||
#define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */
|
||||
#define NETFS_ICTX_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark
|
||||
* write to cache on read */
|
||||
};
|
||||
|
||||
/*
|
||||
@ -269,7 +267,6 @@ struct netfs_io_request {
|
||||
#define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */
|
||||
#define NETFS_RREQ_FAILED 4 /* The request failed */
|
||||
#define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */
|
||||
#define NETFS_RREQ_WRITE_TO_CACHE 7 /* Need to write to the cache */
|
||||
#define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */
|
||||
#define NETFS_RREQ_NONBLOCK 9 /* Don't block if possible (O_NONBLOCK) */
|
||||
#define NETFS_RREQ_BLOCKED 10 /* We blocked */
|
||||
|
@ -51,6 +51,7 @@
|
||||
EM(netfs_rreq_trace_resubmit, "RESUBMT") \
|
||||
EM(netfs_rreq_trace_set_pause, "PAUSE ") \
|
||||
EM(netfs_rreq_trace_unlock, "UNLOCK ") \
|
||||
EM(netfs_rreq_trace_unlock_pgpriv2, "UNLCK-2") \
|
||||
EM(netfs_rreq_trace_unmark, "UNMARK ") \
|
||||
EM(netfs_rreq_trace_wait_ip, "WAIT-IP") \
|
||||
EM(netfs_rreq_trace_wait_pause, "WT-PAUS") \
|
||||
@ -145,6 +146,7 @@
|
||||
EM(netfs_folio_trace_clear_g, "clear-g") \
|
||||
EM(netfs_folio_trace_clear_s, "clear-s") \
|
||||
EM(netfs_folio_trace_copy_to_cache, "mark-copy") \
|
||||
EM(netfs_folio_trace_end_copy, "end-copy") \
|
||||
EM(netfs_folio_trace_filled_gaps, "filled-gaps") \
|
||||
EM(netfs_folio_trace_kill, "kill") \
|
||||
EM(netfs_folio_trace_kill_cc, "kill-cc") \
|
||||
|
@ -3,6 +3,7 @@
|
||||
#define __LINUX_NSFS_H
|
||||
|
||||
#include <linux/ioctl.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define NSIO 0xb7
|
||||
|
||||
@ -16,7 +17,7 @@
|
||||
/* Get owner UID (in the caller's user namespace) for a user namespace */
|
||||
#define NS_GET_OWNER_UID _IO(NSIO, 0x4)
|
||||
/* Get the id for a mount namespace */
|
||||
#define NS_GET_MNTNS_ID _IO(NSIO, 0x5)
|
||||
#define NS_GET_MNTNS_ID _IOR(NSIO, 0x5, __u64)
|
||||
/* Translate pid from target pid namespace into the caller's pid namespace. */
|
||||
#define NS_GET_PID_FROM_PIDNS _IOR(NSIO, 0x6, int)
|
||||
/* Return thread-group leader id of pid in the callers pid namespace. */
|
||||
|
@ -2053,11 +2053,24 @@ static int __pidfd_prepare(struct pid *pid, unsigned int flags, struct file **re
|
||||
*/
|
||||
int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret)
|
||||
{
|
||||
bool thread = flags & PIDFD_THREAD;
|
||||
|
||||
if (!pid || !pid_has_task(pid, thread ? PIDTYPE_PID : PIDTYPE_TGID))
|
||||
if (!pid)
|
||||
return -EINVAL;
|
||||
|
||||
scoped_guard(rcu) {
|
||||
struct task_struct *tsk;
|
||||
|
||||
if (flags & PIDFD_THREAD)
|
||||
tsk = pid_task(pid, PIDTYPE_PID);
|
||||
else
|
||||
tsk = pid_task(pid, PIDTYPE_TGID);
|
||||
if (!tsk)
|
||||
return -EINVAL;
|
||||
|
||||
/* Don't create pidfds for kernel threads for now. */
|
||||
if (tsk->flags & PF_KTHREAD)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return __pidfd_prepare(pid, flags, ret);
|
||||
}
|
||||
|
||||
@ -2403,6 +2416,12 @@ __latent_entropy struct task_struct *copy_process(
|
||||
if (clone_flags & CLONE_PIDFD) {
|
||||
int flags = (clone_flags & CLONE_THREAD) ? PIDFD_THREAD : 0;
|
||||
|
||||
/* Don't create pidfds for kernel threads for now. */
|
||||
if (args->kthread) {
|
||||
retval = -EINVAL;
|
||||
goto bad_fork_free_pid;
|
||||
}
|
||||
|
||||
/* Note that no task has been attached to @pid yet. */
|
||||
retval = __pidfd_prepare(pid, flags, &pidfile);
|
||||
if (retval < 0)
|
||||
|
Loading…
Reference in New Issue
Block a user