2020-08-20 06:19:47 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* dax: direct host memory access
|
|
|
|
* Copyright (C) 2020 Red Hat, Inc.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "fuse_i.h"
|
|
|
|
|
2020-08-20 06:19:56 +08:00
|
|
|
#include <linux/delay.h>
|
2020-08-20 06:19:47 +08:00
|
|
|
#include <linux/dax.h>
|
2020-08-20 06:19:51 +08:00
|
|
|
#include <linux/uio.h>
|
2021-06-29 10:36:30 +08:00
|
|
|
#include <linux/pagemap.h>
|
2020-08-20 06:19:48 +08:00
|
|
|
#include <linux/pfn_t.h>
|
2020-08-20 06:19:51 +08:00
|
|
|
#include <linux/iomap.h>
|
|
|
|
#include <linux/interval_tree.h>
|
2020-08-20 06:19:48 +08:00
|
|
|
|
2020-08-20 06:19:49 +08:00
|
|
|
/*
|
|
|
|
* Default memory range size. A power of 2 so it agrees with common FUSE_INIT
|
|
|
|
* map_alignment values 4KB and 64KB.
|
|
|
|
*/
|
2020-08-20 06:19:48 +08:00
|
|
|
#define FUSE_DAX_SHIFT 21
|
|
|
|
#define FUSE_DAX_SZ (1 << FUSE_DAX_SHIFT)
|
|
|
|
#define FUSE_DAX_PAGES (FUSE_DAX_SZ / PAGE_SIZE)
|
|
|
|
|
2020-08-20 06:19:56 +08:00
|
|
|
/* Number of ranges reclaimer will try to free in one invocation */
|
|
|
|
#define FUSE_DAX_RECLAIM_CHUNK (10)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Dax memory reclaim threshold in percetage of total ranges. When free
|
|
|
|
* number of free ranges drops below this threshold, reclaim can trigger
|
|
|
|
* Default is 20%
|
|
|
|
*/
|
|
|
|
#define FUSE_DAX_RECLAIM_THRESHOLD (20)
|
|
|
|
|
2020-08-20 06:19:48 +08:00
|
|
|
/** Translation information for file offsets to DAX window offsets */
|
|
|
|
struct fuse_dax_mapping {
|
2020-08-20 06:19:56 +08:00
|
|
|
/* Pointer to inode where this memory range is mapped */
|
|
|
|
struct inode *inode;
|
|
|
|
|
2020-08-20 06:19:48 +08:00
|
|
|
/* Will connect in fcd->free_ranges to keep track of free memory */
|
|
|
|
struct list_head list;
|
|
|
|
|
2020-08-20 06:19:51 +08:00
|
|
|
/* For interval tree in file/inode */
|
|
|
|
struct interval_tree_node itn;
|
|
|
|
|
2020-08-20 06:19:55 +08:00
|
|
|
/* Will connect in fc->busy_ranges to keep track busy memory */
|
|
|
|
struct list_head busy_list;
|
|
|
|
|
2020-08-20 06:19:48 +08:00
|
|
|
/** Position in DAX window */
|
|
|
|
u64 window_offset;
|
|
|
|
|
|
|
|
/** Length of mapping, in bytes */
|
|
|
|
loff_t length;
|
2020-08-20 06:19:51 +08:00
|
|
|
|
|
|
|
/* Is this mapping read-only or read-write */
|
|
|
|
bool writable;
|
2020-08-20 06:19:56 +08:00
|
|
|
|
|
|
|
/* reference count when the mapping is used by dax iomap. */
|
|
|
|
refcount_t refcnt;
|
2020-08-20 06:19:51 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Per-inode dax map */
|
|
|
|
struct fuse_inode_dax {
|
|
|
|
/* Semaphore to protect modifications to the dmap tree */
|
|
|
|
struct rw_semaphore sem;
|
|
|
|
|
|
|
|
/* Sorted rb tree of struct fuse_dax_mapping elements */
|
|
|
|
struct rb_root_cached tree;
|
|
|
|
unsigned long nr;
|
2020-08-20 06:19:48 +08:00
|
|
|
};
|
2020-08-20 06:19:47 +08:00
|
|
|
|
|
|
|
struct fuse_conn_dax {
|
|
|
|
/* DAX device */
|
|
|
|
struct dax_device *dev;
|
2020-08-20 06:19:48 +08:00
|
|
|
|
2020-08-20 06:19:51 +08:00
|
|
|
/* Lock protecting accessess to members of this structure */
|
|
|
|
spinlock_t lock;
|
|
|
|
|
2020-08-20 06:19:55 +08:00
|
|
|
/* List of memory ranges which are busy */
|
|
|
|
unsigned long nr_busy_ranges;
|
|
|
|
struct list_head busy_ranges;
|
|
|
|
|
2020-08-20 06:19:56 +08:00
|
|
|
/* Worker to free up memory ranges */
|
|
|
|
struct delayed_work free_work;
|
|
|
|
|
|
|
|
/* Wait queue for a dax range to become free */
|
|
|
|
wait_queue_head_t range_waitq;
|
|
|
|
|
2020-08-20 06:19:48 +08:00
|
|
|
/* DAX Window Free Ranges */
|
|
|
|
long nr_free_ranges;
|
|
|
|
struct list_head free_ranges;
|
2020-08-20 06:19:56 +08:00
|
|
|
|
|
|
|
unsigned long nr_ranges;
|
2020-08-20 06:19:47 +08:00
|
|
|
};
|
|
|
|
|
2020-08-20 06:19:51 +08:00
|
|
|
static inline struct fuse_dax_mapping *
|
|
|
|
node_to_dmap(struct interval_tree_node *node)
|
|
|
|
{
|
|
|
|
if (!node)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return container_of(node, struct fuse_dax_mapping, itn);
|
|
|
|
}
|
|
|
|
|
2020-08-20 06:19:56 +08:00
|
|
|
static struct fuse_dax_mapping *
|
|
|
|
alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode);
|
|
|
|
|
|
|
|
static void
|
|
|
|
__kick_dmap_free_worker(struct fuse_conn_dax *fcd, unsigned long delay_ms)
|
|
|
|
{
|
|
|
|
unsigned long free_threshold;
|
|
|
|
|
|
|
|
/* If number of free ranges are below threshold, start reclaim */
|
|
|
|
free_threshold = max_t(unsigned long, fcd->nr_ranges * FUSE_DAX_RECLAIM_THRESHOLD / 100,
|
|
|
|
1);
|
|
|
|
if (fcd->nr_free_ranges < free_threshold)
|
|
|
|
queue_delayed_work(system_long_wq, &fcd->free_work,
|
|
|
|
msecs_to_jiffies(delay_ms));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kick_dmap_free_worker(struct fuse_conn_dax *fcd,
|
|
|
|
unsigned long delay_ms)
|
|
|
|
{
|
|
|
|
spin_lock(&fcd->lock);
|
|
|
|
__kick_dmap_free_worker(fcd, delay_ms);
|
|
|
|
spin_unlock(&fcd->lock);
|
|
|
|
}
|
|
|
|
|
2020-08-20 06:19:51 +08:00
|
|
|
static struct fuse_dax_mapping *alloc_dax_mapping(struct fuse_conn_dax *fcd)
|
|
|
|
{
|
|
|
|
struct fuse_dax_mapping *dmap;
|
|
|
|
|
|
|
|
spin_lock(&fcd->lock);
|
|
|
|
dmap = list_first_entry_or_null(&fcd->free_ranges,
|
|
|
|
struct fuse_dax_mapping, list);
|
|
|
|
if (dmap) {
|
|
|
|
list_del_init(&dmap->list);
|
|
|
|
WARN_ON(fcd->nr_free_ranges <= 0);
|
|
|
|
fcd->nr_free_ranges--;
|
|
|
|
}
|
2022-04-02 18:32:50 +08:00
|
|
|
__kick_dmap_free_worker(fcd, 0);
|
2020-08-20 06:19:51 +08:00
|
|
|
spin_unlock(&fcd->lock);
|
2020-08-20 06:19:56 +08:00
|
|
|
|
2020-08-20 06:19:51 +08:00
|
|
|
return dmap;
|
|
|
|
}
|
|
|
|
|
2020-08-20 06:19:55 +08:00
|
|
|
/* This assumes fcd->lock is held */
|
|
|
|
static void __dmap_remove_busy_list(struct fuse_conn_dax *fcd,
|
|
|
|
struct fuse_dax_mapping *dmap)
|
|
|
|
{
|
|
|
|
list_del_init(&dmap->busy_list);
|
|
|
|
WARN_ON(fcd->nr_busy_ranges == 0);
|
|
|
|
fcd->nr_busy_ranges--;
|
|
|
|
}
|
|
|
|
|
2020-08-20 06:19:56 +08:00
|
|
|
static void dmap_remove_busy_list(struct fuse_conn_dax *fcd,
|
|
|
|
struct fuse_dax_mapping *dmap)
|
|
|
|
{
|
|
|
|
spin_lock(&fcd->lock);
|
|
|
|
__dmap_remove_busy_list(fcd, dmap);
|
|
|
|
spin_unlock(&fcd->lock);
|
|
|
|
}
|
|
|
|
|
2020-08-20 06:19:51 +08:00
|
|
|
/* This assumes fcd->lock is held */
|
|
|
|
static void __dmap_add_to_free_pool(struct fuse_conn_dax *fcd,
|
|
|
|
struct fuse_dax_mapping *dmap)
|
|
|
|
{
|
|
|
|
list_add_tail(&dmap->list, &fcd->free_ranges);
|
|
|
|
fcd->nr_free_ranges++;
|
2020-08-20 06:19:56 +08:00
|
|
|
wake_up(&fcd->range_waitq);
|
2020-08-20 06:19:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dmap_add_to_free_pool(struct fuse_conn_dax *fcd,
|
|
|
|
struct fuse_dax_mapping *dmap)
|
|
|
|
{
|
|
|
|
/* Return fuse_dax_mapping to free list */
|
|
|
|
spin_lock(&fcd->lock);
|
|
|
|
__dmap_add_to_free_pool(fcd, dmap);
|
|
|
|
spin_unlock(&fcd->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fuse_setup_one_mapping(struct inode *inode, unsigned long start_idx,
|
|
|
|
struct fuse_dax_mapping *dmap, bool writable,
|
|
|
|
bool upgrade)
|
|
|
|
{
|
2020-05-06 23:44:12 +08:00
|
|
|
struct fuse_mount *fm = get_fuse_mount(inode);
|
|
|
|
struct fuse_conn_dax *fcd = fm->fc->dax;
|
2020-08-20 06:19:51 +08:00
|
|
|
struct fuse_inode *fi = get_fuse_inode(inode);
|
|
|
|
struct fuse_setupmapping_in inarg;
|
|
|
|
loff_t offset = start_idx << FUSE_DAX_SHIFT;
|
|
|
|
FUSE_ARGS(args);
|
|
|
|
ssize_t err;
|
|
|
|
|
|
|
|
WARN_ON(fcd->nr_free_ranges < 0);
|
|
|
|
|
|
|
|
/* Ask fuse daemon to setup mapping */
|
|
|
|
memset(&inarg, 0, sizeof(inarg));
|
|
|
|
inarg.foffset = offset;
|
|
|
|
inarg.fh = -1;
|
|
|
|
inarg.moffset = dmap->window_offset;
|
|
|
|
inarg.len = FUSE_DAX_SZ;
|
|
|
|
inarg.flags |= FUSE_SETUPMAPPING_FLAG_READ;
|
|
|
|
if (writable)
|
|
|
|
inarg.flags |= FUSE_SETUPMAPPING_FLAG_WRITE;
|
|
|
|
args.opcode = FUSE_SETUPMAPPING;
|
|
|
|
args.nodeid = fi->nodeid;
|
|
|
|
args.in_numargs = 1;
|
|
|
|
args.in_args[0].size = sizeof(inarg);
|
|
|
|
args.in_args[0].value = &inarg;
|
2020-05-06 23:44:12 +08:00
|
|
|
err = fuse_simple_request(fm, &args);
|
2020-08-20 06:19:51 +08:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
dmap->writable = writable;
|
|
|
|
if (!upgrade) {
|
2020-08-20 06:19:56 +08:00
|
|
|
/*
|
2021-06-04 09:46:17 +08:00
|
|
|
* We don't take a reference on inode. inode is valid right now
|
2020-08-20 06:19:56 +08:00
|
|
|
* and when inode is going away, cleanup logic should first
|
|
|
|
* cleanup dmap entries.
|
|
|
|
*/
|
|
|
|
dmap->inode = inode;
|
2020-08-20 06:19:51 +08:00
|
|
|
dmap->itn.start = dmap->itn.last = start_idx;
|
|
|
|
/* Protected by fi->dax->sem */
|
|
|
|
interval_tree_insert(&dmap->itn, &fi->dax->tree);
|
|
|
|
fi->dax->nr++;
|
2020-08-20 06:19:55 +08:00
|
|
|
spin_lock(&fcd->lock);
|
|
|
|
list_add_tail(&dmap->busy_list, &fcd->busy_ranges);
|
|
|
|
fcd->nr_busy_ranges++;
|
|
|
|
spin_unlock(&fcd->lock);
|
2020-08-20 06:19:51 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fuse_send_removemapping(struct inode *inode,
|
|
|
|
struct fuse_removemapping_in *inargp,
|
|
|
|
struct fuse_removemapping_one *remove_one)
|
|
|
|
{
|
|
|
|
struct fuse_inode *fi = get_fuse_inode(inode);
|
2020-05-06 23:44:12 +08:00
|
|
|
struct fuse_mount *fm = get_fuse_mount(inode);
|
2020-08-20 06:19:51 +08:00
|
|
|
FUSE_ARGS(args);
|
|
|
|
|
|
|
|
args.opcode = FUSE_REMOVEMAPPING;
|
|
|
|
args.nodeid = fi->nodeid;
|
|
|
|
args.in_numargs = 2;
|
|
|
|
args.in_args[0].size = sizeof(*inargp);
|
|
|
|
args.in_args[0].value = inargp;
|
|
|
|
args.in_args[1].size = inargp->count * sizeof(*remove_one);
|
|
|
|
args.in_args[1].value = remove_one;
|
2020-05-06 23:44:12 +08:00
|
|
|
return fuse_simple_request(fm, &args);
|
2020-08-20 06:19:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int dmap_removemapping_list(struct inode *inode, unsigned int num,
|
|
|
|
struct list_head *to_remove)
|
|
|
|
{
|
|
|
|
struct fuse_removemapping_one *remove_one, *ptr;
|
|
|
|
struct fuse_removemapping_in inarg;
|
|
|
|
struct fuse_dax_mapping *dmap;
|
|
|
|
int ret, i = 0, nr_alloc;
|
|
|
|
|
|
|
|
nr_alloc = min_t(unsigned int, num, FUSE_REMOVEMAPPING_MAX_ENTRY);
|
|
|
|
remove_one = kmalloc_array(nr_alloc, sizeof(*remove_one), GFP_NOFS);
|
|
|
|
if (!remove_one)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ptr = remove_one;
|
|
|
|
list_for_each_entry(dmap, to_remove, list) {
|
|
|
|
ptr->moffset = dmap->window_offset;
|
|
|
|
ptr->len = dmap->length;
|
|
|
|
ptr++;
|
|
|
|
i++;
|
|
|
|
num--;
|
|
|
|
if (i >= nr_alloc || num == 0) {
|
|
|
|
memset(&inarg, 0, sizeof(inarg));
|
|
|
|
inarg.count = i;
|
|
|
|
ret = fuse_send_removemapping(inode, &inarg,
|
|
|
|
remove_one);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
ptr = remove_one;
|
|
|
|
i = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
kfree(remove_one);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Cleanup dmap entry and add back to free list. This should be called with
|
|
|
|
* fcd->lock held.
|
|
|
|
*/
|
|
|
|
static void dmap_reinit_add_to_free_pool(struct fuse_conn_dax *fcd,
|
|
|
|
struct fuse_dax_mapping *dmap)
|
|
|
|
{
|
|
|
|
pr_debug("fuse: freeing memory range start_idx=0x%lx end_idx=0x%lx window_offset=0x%llx length=0x%llx\n",
|
|
|
|
dmap->itn.start, dmap->itn.last, dmap->window_offset,
|
|
|
|
dmap->length);
|
2020-08-20 06:19:55 +08:00
|
|
|
__dmap_remove_busy_list(fcd, dmap);
|
2020-08-20 06:19:56 +08:00
|
|
|
dmap->inode = NULL;
|
2020-08-20 06:19:51 +08:00
|
|
|
dmap->itn.start = dmap->itn.last = 0;
|
|
|
|
__dmap_add_to_free_pool(fcd, dmap);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free inode dmap entries whose range falls inside [start, end].
|
|
|
|
* Does not take any locks. At this point of time it should only be
|
|
|
|
* called from evict_inode() path where we know all dmap entries can be
|
|
|
|
* reclaimed.
|
|
|
|
*/
|
|
|
|
static void inode_reclaim_dmap_range(struct fuse_conn_dax *fcd,
|
|
|
|
struct inode *inode,
|
|
|
|
loff_t start, loff_t end)
|
|
|
|
{
|
|
|
|
struct fuse_inode *fi = get_fuse_inode(inode);
|
|
|
|
struct fuse_dax_mapping *dmap, *n;
|
|
|
|
int err, num = 0;
|
|
|
|
LIST_HEAD(to_remove);
|
|
|
|
unsigned long start_idx = start >> FUSE_DAX_SHIFT;
|
|
|
|
unsigned long end_idx = end >> FUSE_DAX_SHIFT;
|
|
|
|
struct interval_tree_node *node;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
node = interval_tree_iter_first(&fi->dax->tree, start_idx,
|
|
|
|
end_idx);
|
|
|
|
if (!node)
|
|
|
|
break;
|
|
|
|
dmap = node_to_dmap(node);
|
2020-08-20 06:19:56 +08:00
|
|
|
/* inode is going away. There should not be any users of dmap */
|
|
|
|
WARN_ON(refcount_read(&dmap->refcnt) > 1);
|
2020-08-20 06:19:51 +08:00
|
|
|
interval_tree_remove(&dmap->itn, &fi->dax->tree);
|
|
|
|
num++;
|
|
|
|
list_add(&dmap->list, &to_remove);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Nothing to remove */
|
|
|
|
if (list_empty(&to_remove))
|
|
|
|
return;
|
|
|
|
|
|
|
|
WARN_ON(fi->dax->nr < num);
|
|
|
|
fi->dax->nr -= num;
|
|
|
|
err = dmap_removemapping_list(inode, num, &to_remove);
|
|
|
|
if (err && err != -ENOTCONN) {
|
|
|
|
pr_warn("Failed to removemappings. start=0x%llx end=0x%llx\n",
|
|
|
|
start, end);
|
|
|
|
}
|
|
|
|
spin_lock(&fcd->lock);
|
|
|
|
list_for_each_entry_safe(dmap, n, &to_remove, list) {
|
|
|
|
list_del_init(&dmap->list);
|
|
|
|
dmap_reinit_add_to_free_pool(fcd, dmap);
|
|
|
|
}
|
|
|
|
spin_unlock(&fcd->lock);
|
|
|
|
}
|
|
|
|
|
2020-08-20 06:19:56 +08:00
|
|
|
static int dmap_removemapping_one(struct inode *inode,
|
|
|
|
struct fuse_dax_mapping *dmap)
|
|
|
|
{
|
|
|
|
struct fuse_removemapping_one forget_one;
|
|
|
|
struct fuse_removemapping_in inarg;
|
|
|
|
|
|
|
|
memset(&inarg, 0, sizeof(inarg));
|
|
|
|
inarg.count = 1;
|
|
|
|
memset(&forget_one, 0, sizeof(forget_one));
|
|
|
|
forget_one.moffset = dmap->window_offset;
|
|
|
|
forget_one.len = dmap->length;
|
|
|
|
|
|
|
|
return fuse_send_removemapping(inode, &inarg, &forget_one);
|
|
|
|
}
|
|
|
|
|
2020-08-20 06:19:51 +08:00
|
|
|
/*
|
|
|
|
* It is called from evict_inode() and by that time inode is going away. So
|
|
|
|
* this function does not take any locks like fi->dax->sem for traversing
|
|
|
|
* that fuse inode interval tree. If that lock is taken then lock validator
|
|
|
|
* complains of deadlock situation w.r.t fs_reclaim lock.
|
|
|
|
*/
|
|
|
|
void fuse_dax_inode_cleanup(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
|
|
|
struct fuse_inode *fi = get_fuse_inode(inode);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* fuse_evict_inode() has already called truncate_inode_pages_final()
|
|
|
|
* before we arrive here. So we should not have to worry about any
|
|
|
|
* pages/exception entries still associated with inode.
|
|
|
|
*/
|
|
|
|
inode_reclaim_dmap_range(fc->dax, inode, 0, -1);
|
|
|
|
WARN_ON(fi->dax->nr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void fuse_fill_iomap_hole(struct iomap *iomap, loff_t length)
|
|
|
|
{
|
|
|
|
iomap->addr = IOMAP_NULL_ADDR;
|
|
|
|
iomap->length = length;
|
|
|
|
iomap->type = IOMAP_HOLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void fuse_fill_iomap(struct inode *inode, loff_t pos, loff_t length,
|
|
|
|
struct iomap *iomap, struct fuse_dax_mapping *dmap,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
loff_t offset, len;
|
|
|
|
loff_t i_size = i_size_read(inode);
|
|
|
|
|
|
|
|
offset = pos - (dmap->itn.start << FUSE_DAX_SHIFT);
|
|
|
|
len = min(length, dmap->length - offset);
|
|
|
|
|
|
|
|
/* If length is beyond end of file, truncate further */
|
|
|
|
if (pos + len > i_size)
|
|
|
|
len = i_size - pos;
|
|
|
|
|
|
|
|
if (len > 0) {
|
|
|
|
iomap->addr = dmap->window_offset + offset;
|
|
|
|
iomap->length = len;
|
|
|
|
if (flags & IOMAP_FAULT)
|
|
|
|
iomap->length = ALIGN(len, PAGE_SIZE);
|
|
|
|
iomap->type = IOMAP_MAPPED;
|
2020-08-20 06:19:56 +08:00
|
|
|
/*
|
|
|
|
* increace refcnt so that reclaim code knows this dmap is in
|
|
|
|
* use. This assumes fi->dax->sem mutex is held either
|
|
|
|
* shared/exclusive.
|
|
|
|
*/
|
|
|
|
refcount_inc(&dmap->refcnt);
|
|
|
|
|
|
|
|
/* iomap->private should be NULL */
|
|
|
|
WARN_ON_ONCE(iomap->private);
|
|
|
|
iomap->private = dmap;
|
2020-08-20 06:19:51 +08:00
|
|
|
} else {
|
|
|
|
/* Mapping beyond end of file is hole */
|
|
|
|
fuse_fill_iomap_hole(iomap, length);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fuse_setup_new_dax_mapping(struct inode *inode, loff_t pos,
|
|
|
|
loff_t length, unsigned int flags,
|
|
|
|
struct iomap *iomap)
|
|
|
|
{
|
|
|
|
struct fuse_inode *fi = get_fuse_inode(inode);
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
|
|
|
struct fuse_conn_dax *fcd = fc->dax;
|
|
|
|
struct fuse_dax_mapping *dmap, *alloc_dmap = NULL;
|
|
|
|
int ret;
|
|
|
|
bool writable = flags & IOMAP_WRITE;
|
|
|
|
unsigned long start_idx = pos >> FUSE_DAX_SHIFT;
|
|
|
|
struct interval_tree_node *node;
|
|
|
|
|
2020-08-20 06:19:56 +08:00
|
|
|
/*
|
|
|
|
* Can't do inline reclaim in fault path. We call
|
|
|
|
* dax_layout_busy_page() before we free a range. And
|
2021-04-21 23:18:39 +08:00
|
|
|
* fuse_wait_dax_page() drops mapping->invalidate_lock and requires it.
|
|
|
|
* In fault path we enter with mapping->invalidate_lock held and can't
|
|
|
|
* drop it. Also in fault path we hold mapping->invalidate_lock shared
|
|
|
|
* and not exclusive, so that creates further issues with
|
|
|
|
* fuse_wait_dax_page(). Hence return -EAGAIN and fuse_dax_fault()
|
|
|
|
* will wait for a memory range to become free and retry.
|
2020-08-20 06:19:56 +08:00
|
|
|
*/
|
|
|
|
if (flags & IOMAP_FAULT) {
|
|
|
|
alloc_dmap = alloc_dax_mapping(fcd);
|
|
|
|
if (!alloc_dmap)
|
|
|
|
return -EAGAIN;
|
|
|
|
} else {
|
|
|
|
alloc_dmap = alloc_dax_mapping_reclaim(fcd, inode);
|
|
|
|
if (IS_ERR(alloc_dmap))
|
|
|
|
return PTR_ERR(alloc_dmap);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we are here, we should have memory allocated */
|
|
|
|
if (WARN_ON(!alloc_dmap))
|
2020-08-20 06:19:51 +08:00
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Take write lock so that only one caller can try to setup mapping
|
|
|
|
* and other waits.
|
|
|
|
*/
|
|
|
|
down_write(&fi->dax->sem);
|
|
|
|
/*
|
|
|
|
* We dropped lock. Check again if somebody else setup
|
|
|
|
* mapping already.
|
|
|
|
*/
|
|
|
|
node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
|
|
|
|
if (node) {
|
|
|
|
dmap = node_to_dmap(node);
|
|
|
|
fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
|
|
|
|
dmap_add_to_free_pool(fcd, alloc_dmap);
|
|
|
|
up_write(&fi->dax->sem);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Setup one mapping */
|
|
|
|
ret = fuse_setup_one_mapping(inode, pos >> FUSE_DAX_SHIFT, alloc_dmap,
|
|
|
|
writable, false);
|
|
|
|
if (ret < 0) {
|
|
|
|
dmap_add_to_free_pool(fcd, alloc_dmap);
|
|
|
|
up_write(&fi->dax->sem);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
fuse_fill_iomap(inode, pos, length, iomap, alloc_dmap, flags);
|
|
|
|
up_write(&fi->dax->sem);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fuse_upgrade_dax_mapping(struct inode *inode, loff_t pos,
|
|
|
|
loff_t length, unsigned int flags,
|
|
|
|
struct iomap *iomap)
|
|
|
|
{
|
|
|
|
struct fuse_inode *fi = get_fuse_inode(inode);
|
|
|
|
struct fuse_dax_mapping *dmap;
|
|
|
|
int ret;
|
|
|
|
unsigned long idx = pos >> FUSE_DAX_SHIFT;
|
|
|
|
struct interval_tree_node *node;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Take exclusive lock so that only one caller can try to setup
|
|
|
|
* mapping and others wait.
|
|
|
|
*/
|
|
|
|
down_write(&fi->dax->sem);
|
|
|
|
node = interval_tree_iter_first(&fi->dax->tree, idx, idx);
|
|
|
|
|
2021-04-21 23:18:39 +08:00
|
|
|
/* We are holding either inode lock or invalidate_lock, and that should
|
2020-08-20 06:19:56 +08:00
|
|
|
* ensure that dmap can't be truncated. We are holding a reference
|
|
|
|
* on dmap and that should make sure it can't be reclaimed. So dmap
|
|
|
|
* should still be there in tree despite the fact we dropped and
|
|
|
|
* re-acquired the fi->dax->sem lock.
|
2020-08-20 06:19:51 +08:00
|
|
|
*/
|
|
|
|
ret = -EIO;
|
|
|
|
if (WARN_ON(!node))
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
dmap = node_to_dmap(node);
|
|
|
|
|
2020-08-20 06:19:56 +08:00
|
|
|
/* We took an extra reference on dmap to make sure its not reclaimd.
|
|
|
|
* Now we hold fi->dax->sem lock and that reference is not needed
|
|
|
|
* anymore. Drop it.
|
|
|
|
*/
|
|
|
|
if (refcount_dec_and_test(&dmap->refcnt)) {
|
|
|
|
/* refcount should not hit 0. This object only goes
|
|
|
|
* away when fuse connection goes away
|
|
|
|
*/
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
}
|
|
|
|
|
2020-08-20 06:19:51 +08:00
|
|
|
/* Maybe another thread already upgraded mapping while we were not
|
|
|
|
* holding lock.
|
|
|
|
*/
|
|
|
|
if (dmap->writable) {
|
|
|
|
ret = 0;
|
|
|
|
goto out_fill_iomap;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = fuse_setup_one_mapping(inode, pos >> FUSE_DAX_SHIFT, dmap, true,
|
|
|
|
true);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out_err;
|
|
|
|
out_fill_iomap:
|
|
|
|
fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
|
|
|
|
out_err:
|
|
|
|
up_write(&fi->dax->sem);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This is just for DAX and the mapping is ephemeral, do not use it for other
|
|
|
|
* purposes since there is no block device with a permanent mapping.
|
|
|
|
*/
|
|
|
|
static int fuse_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
|
|
|
|
unsigned int flags, struct iomap *iomap,
|
|
|
|
struct iomap *srcmap)
|
|
|
|
{
|
|
|
|
struct fuse_inode *fi = get_fuse_inode(inode);
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
|
|
|
struct fuse_dax_mapping *dmap;
|
|
|
|
bool writable = flags & IOMAP_WRITE;
|
|
|
|
unsigned long start_idx = pos >> FUSE_DAX_SHIFT;
|
|
|
|
struct interval_tree_node *node;
|
|
|
|
|
|
|
|
/* We don't support FIEMAP */
|
|
|
|
if (WARN_ON(flags & IOMAP_REPORT))
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
iomap->offset = pos;
|
|
|
|
iomap->flags = 0;
|
|
|
|
iomap->bdev = NULL;
|
|
|
|
iomap->dax_dev = fc->dax->dev;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Both read/write and mmap path can race here. So we need something
|
|
|
|
* to make sure if we are setting up mapping, then other path waits
|
|
|
|
*
|
|
|
|
* For now, use a semaphore for this. It probably needs to be
|
|
|
|
* optimized later.
|
|
|
|
*/
|
|
|
|
down_read(&fi->dax->sem);
|
|
|
|
node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
|
|
|
|
if (node) {
|
|
|
|
dmap = node_to_dmap(node);
|
|
|
|
if (writable && !dmap->writable) {
|
|
|
|
/* Upgrade read-only mapping to read-write. This will
|
|
|
|
* require exclusive fi->dax->sem lock as we don't want
|
|
|
|
* two threads to be trying to this simultaneously
|
|
|
|
* for same dmap. So drop shared lock and acquire
|
|
|
|
* exclusive lock.
|
2020-08-20 06:19:56 +08:00
|
|
|
*
|
|
|
|
* Before dropping fi->dax->sem lock, take reference
|
|
|
|
* on dmap so that its not freed by range reclaim.
|
2020-08-20 06:19:51 +08:00
|
|
|
*/
|
2020-08-20 06:19:56 +08:00
|
|
|
refcount_inc(&dmap->refcnt);
|
2020-08-20 06:19:51 +08:00
|
|
|
up_read(&fi->dax->sem);
|
|
|
|
pr_debug("%s: Upgrading mapping at offset 0x%llx length 0x%llx\n",
|
|
|
|
__func__, pos, length);
|
|
|
|
return fuse_upgrade_dax_mapping(inode, pos, length,
|
|
|
|
flags, iomap);
|
|
|
|
} else {
|
|
|
|
fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
|
|
|
|
up_read(&fi->dax->sem);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
up_read(&fi->dax->sem);
|
|
|
|
pr_debug("%s: no mapping at offset 0x%llx length 0x%llx\n",
|
|
|
|
__func__, pos, length);
|
|
|
|
if (pos >= i_size_read(inode))
|
|
|
|
goto iomap_hole;
|
|
|
|
|
|
|
|
return fuse_setup_new_dax_mapping(inode, pos, length, flags,
|
|
|
|
iomap);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-06-04 09:46:17 +08:00
|
|
|
* If read beyond end of file happens, fs code seems to return
|
2020-08-20 06:19:51 +08:00
|
|
|
* it as hole
|
|
|
|
*/
|
|
|
|
iomap_hole:
|
|
|
|
fuse_fill_iomap_hole(iomap, length);
|
|
|
|
pr_debug("%s returning hole mapping. pos=0x%llx length_asked=0x%llx length_returned=0x%llx\n",
|
|
|
|
__func__, pos, length, iomap->length);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fuse_iomap_end(struct inode *inode, loff_t pos, loff_t length,
|
|
|
|
ssize_t written, unsigned int flags,
|
|
|
|
struct iomap *iomap)
|
|
|
|
{
|
2020-08-20 06:19:56 +08:00
|
|
|
struct fuse_dax_mapping *dmap = iomap->private;
|
|
|
|
|
|
|
|
if (dmap) {
|
|
|
|
if (refcount_dec_and_test(&dmap->refcnt)) {
|
|
|
|
/* refcount should not hit 0. This object only goes
|
|
|
|
* away when fuse connection goes away
|
|
|
|
*/
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-20 06:19:51 +08:00
|
|
|
/* DAX writes beyond end-of-file aren't handled using iomap, so the
|
|
|
|
* file size is unchanged and there is nothing to do here.
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct iomap_ops fuse_iomap_ops = {
|
|
|
|
.iomap_begin = fuse_iomap_begin,
|
|
|
|
.iomap_end = fuse_iomap_end,
|
|
|
|
};
|
|
|
|
|
virtiofs: serialize truncate/punch_hole and dax fault path
Currently in fuse we don't seem have any lock which can serialize fault
path with truncate/punch_hole path. With dax support I need one for
following reasons.
1. Dax requirement
DAX fault code relies on inode size being stable for the duration of
fault and want to serialize with truncate/punch_hole and they explicitly
mention it.
static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
const struct iomap_ops *ops)
/*
* Check whether offset isn't beyond end of file now. Caller is
* supposed to hold locks serializing us with truncate / punch hole so
* this is a reliable test.
*/
max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2. Make sure there are no users of pages being truncated/punch_hole
get_user_pages() might take references to page and then do some DMA
to said pages. Filesystem might truncate those pages without knowing
that a DMA is in progress or some I/O is in progress. So use
dax_layout_busy_page() to make sure there are no such references
and I/O is not in progress on said pages before moving ahead with
truncation.
3. Limitation of kvm page fault error reporting
If we are truncating file on host first and then removing mappings in
guest lateter (truncate page cache etc), then this could lead to a
problem with KVM. Say a mapping is in place in guest and truncation
happens on host. Now if guest accesses that mapping, then host will
take a fault and kvm will either exit to qemu or spin infinitely.
IOW, before we do truncation on host, we need to make sure that guest
inode does not have any mapping in that region or whole file.
4. virtiofs memory range reclaim
Soon I will introduce the notion of being able to reclaim dax memory
ranges from a fuse dax inode. There also I need to make sure that
no I/O or fault is going on in the reclaimed range and nobody is using
it so that range can be reclaimed without issues.
Currently if we take inode lock, that serializes read/write. But it does
not do anything for faults. So I add another semaphore fuse_inode->i_mmap_sem
for this purpose. It can be used to serialize with faults.
As of now, I am adding taking this semaphore only in dax fault path and
not regular fault path because existing code does not have one. May
be existing code can benefit from it as well to take care of some
races, but that we can fix later if need be. For now, I am just focussing
only on DAX path which is new path.
Also added logic to take fuse_inode->i_mmap_sem in
truncate/punch_hole/open(O_TRUNC) path to make sure file truncation and
fuse dax fault are mutually exlusive and avoid all the above problems.
Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
2020-08-20 06:19:54 +08:00
|
|
|
static void fuse_wait_dax_page(struct inode *inode)
|
|
|
|
{
|
2021-04-21 23:18:39 +08:00
|
|
|
filemap_invalidate_unlock(inode->i_mapping);
|
virtiofs: serialize truncate/punch_hole and dax fault path
Currently in fuse we don't seem have any lock which can serialize fault
path with truncate/punch_hole path. With dax support I need one for
following reasons.
1. Dax requirement
DAX fault code relies on inode size being stable for the duration of
fault and want to serialize with truncate/punch_hole and they explicitly
mention it.
static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
const struct iomap_ops *ops)
/*
* Check whether offset isn't beyond end of file now. Caller is
* supposed to hold locks serializing us with truncate / punch hole so
* this is a reliable test.
*/
max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2. Make sure there are no users of pages being truncated/punch_hole
get_user_pages() might take references to page and then do some DMA
to said pages. Filesystem might truncate those pages without knowing
that a DMA is in progress or some I/O is in progress. So use
dax_layout_busy_page() to make sure there are no such references
and I/O is not in progress on said pages before moving ahead with
truncation.
3. Limitation of kvm page fault error reporting
If we are truncating file on host first and then removing mappings in
guest lateter (truncate page cache etc), then this could lead to a
problem with KVM. Say a mapping is in place in guest and truncation
happens on host. Now if guest accesses that mapping, then host will
take a fault and kvm will either exit to qemu or spin infinitely.
IOW, before we do truncation on host, we need to make sure that guest
inode does not have any mapping in that region or whole file.
4. virtiofs memory range reclaim
Soon I will introduce the notion of being able to reclaim dax memory
ranges from a fuse dax inode. There also I need to make sure that
no I/O or fault is going on in the reclaimed range and nobody is using
it so that range can be reclaimed without issues.
Currently if we take inode lock, that serializes read/write. But it does
not do anything for faults. So I add another semaphore fuse_inode->i_mmap_sem
for this purpose. It can be used to serialize with faults.
As of now, I am adding taking this semaphore only in dax fault path and
not regular fault path because existing code does not have one. May
be existing code can benefit from it as well to take care of some
races, but that we can fix later if need be. For now, I am just focussing
only on DAX path which is new path.
Also added logic to take fuse_inode->i_mmap_sem in
truncate/punch_hole/open(O_TRUNC) path to make sure file truncation and
fuse dax fault are mutually exlusive and avoid all the above problems.
Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
2020-08-20 06:19:54 +08:00
|
|
|
schedule();
|
2021-04-21 23:18:39 +08:00
|
|
|
filemap_invalidate_lock(inode->i_mapping);
|
virtiofs: serialize truncate/punch_hole and dax fault path
Currently in fuse we don't seem have any lock which can serialize fault
path with truncate/punch_hole path. With dax support I need one for
following reasons.
1. Dax requirement
DAX fault code relies on inode size being stable for the duration of
fault and want to serialize with truncate/punch_hole and they explicitly
mention it.
static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
const struct iomap_ops *ops)
/*
* Check whether offset isn't beyond end of file now. Caller is
* supposed to hold locks serializing us with truncate / punch hole so
* this is a reliable test.
*/
max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2. Make sure there are no users of pages being truncated/punch_hole
get_user_pages() might take references to page and then do some DMA
to said pages. Filesystem might truncate those pages without knowing
that a DMA is in progress or some I/O is in progress. So use
dax_layout_busy_page() to make sure there are no such references
and I/O is not in progress on said pages before moving ahead with
truncation.
3. Limitation of kvm page fault error reporting
If we are truncating file on host first and then removing mappings in
guest lateter (truncate page cache etc), then this could lead to a
problem with KVM. Say a mapping is in place in guest and truncation
happens on host. Now if guest accesses that mapping, then host will
take a fault and kvm will either exit to qemu or spin infinitely.
IOW, before we do truncation on host, we need to make sure that guest
inode does not have any mapping in that region or whole file.
4. virtiofs memory range reclaim
Soon I will introduce the notion of being able to reclaim dax memory
ranges from a fuse dax inode. There also I need to make sure that
no I/O or fault is going on in the reclaimed range and nobody is using
it so that range can be reclaimed without issues.
Currently if we take inode lock, that serializes read/write. But it does
not do anything for faults. So I add another semaphore fuse_inode->i_mmap_sem
for this purpose. It can be used to serialize with faults.
As of now, I am adding taking this semaphore only in dax fault path and
not regular fault path because existing code does not have one. May
be existing code can benefit from it as well to take care of some
races, but that we can fix later if need be. For now, I am just focussing
only on DAX path which is new path.
Also added logic to take fuse_inode->i_mmap_sem in
truncate/punch_hole/open(O_TRUNC) path to make sure file truncation and
fuse dax fault are mutually exlusive and avoid all the above problems.
Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
2020-08-20 06:19:54 +08:00
|
|
|
}
|
|
|
|
|
2021-04-21 23:18:39 +08:00
|
|
|
/* Should be called with mapping->invalidate_lock held exclusively */
|
virtiofs: serialize truncate/punch_hole and dax fault path
Currently in fuse we don't seem have any lock which can serialize fault
path with truncate/punch_hole path. With dax support I need one for
following reasons.
1. Dax requirement
DAX fault code relies on inode size being stable for the duration of
fault and want to serialize with truncate/punch_hole and they explicitly
mention it.
static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
const struct iomap_ops *ops)
/*
* Check whether offset isn't beyond end of file now. Caller is
* supposed to hold locks serializing us with truncate / punch hole so
* this is a reliable test.
*/
max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2. Make sure there are no users of pages being truncated/punch_hole
get_user_pages() might take references to page and then do some DMA
to said pages. Filesystem might truncate those pages without knowing
that a DMA is in progress or some I/O is in progress. So use
dax_layout_busy_page() to make sure there are no such references
and I/O is not in progress on said pages before moving ahead with
truncation.
3. Limitation of kvm page fault error reporting
If we are truncating file on host first and then removing mappings in
guest lateter (truncate page cache etc), then this could lead to a
problem with KVM. Say a mapping is in place in guest and truncation
happens on host. Now if guest accesses that mapping, then host will
take a fault and kvm will either exit to qemu or spin infinitely.
IOW, before we do truncation on host, we need to make sure that guest
inode does not have any mapping in that region or whole file.
4. virtiofs memory range reclaim
Soon I will introduce the notion of being able to reclaim dax memory
ranges from a fuse dax inode. There also I need to make sure that
no I/O or fault is going on in the reclaimed range and nobody is using
it so that range can be reclaimed without issues.
Currently if we take inode lock, that serializes read/write. But it does
not do anything for faults. So I add another semaphore fuse_inode->i_mmap_sem
for this purpose. It can be used to serialize with faults.
As of now, I am adding taking this semaphore only in dax fault path and
not regular fault path because existing code does not have one. May
be existing code can benefit from it as well to take care of some
races, but that we can fix later if need be. For now, I am just focussing
only on DAX path which is new path.
Also added logic to take fuse_inode->i_mmap_sem in
truncate/punch_hole/open(O_TRUNC) path to make sure file truncation and
fuse dax fault are mutually exlusive and avoid all the above problems.
Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
2020-08-20 06:19:54 +08:00
|
|
|
static int __fuse_dax_break_layouts(struct inode *inode, bool *retry,
|
|
|
|
loff_t start, loff_t end)
|
|
|
|
{
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
page = dax_layout_busy_page_range(inode->i_mapping, start, end);
|
|
|
|
if (!page)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
*retry = true;
|
|
|
|
return ___wait_var_event(&page->_refcount,
|
|
|
|
atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
|
|
|
|
0, 0, fuse_wait_dax_page(inode));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* dmap_end == 0 leads to unmapping of whole file */
|
|
|
|
int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start,
|
|
|
|
u64 dmap_end)
|
|
|
|
{
|
|
|
|
bool retry;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
do {
|
|
|
|
retry = false;
|
|
|
|
ret = __fuse_dax_break_layouts(inode, &retry, dmap_start,
|
|
|
|
dmap_end);
|
|
|
|
} while (ret == 0 && retry);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-08-20 06:19:51 +08:00
|
|
|
ssize_t fuse_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
|
|
|
{
|
|
|
|
struct inode *inode = file_inode(iocb->ki_filp);
|
|
|
|
ssize_t ret;
|
|
|
|
|
|
|
|
if (iocb->ki_flags & IOCB_NOWAIT) {
|
|
|
|
if (!inode_trylock_shared(inode))
|
|
|
|
return -EAGAIN;
|
|
|
|
} else {
|
|
|
|
inode_lock_shared(inode);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = dax_iomap_rw(iocb, to, &fuse_iomap_ops);
|
|
|
|
inode_unlock_shared(inode);
|
|
|
|
|
|
|
|
/* TODO file_accessed(iocb->f_filp) */
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool file_extending_write(struct kiocb *iocb, struct iov_iter *from)
|
|
|
|
{
|
|
|
|
struct inode *inode = file_inode(iocb->ki_filp);
|
|
|
|
|
|
|
|
return (iov_iter_rw(from) == WRITE &&
|
|
|
|
((iocb->ki_pos) >= i_size_read(inode) ||
|
|
|
|
(iocb->ki_pos + iov_iter_count(from) > i_size_read(inode))));
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t fuse_dax_direct_write(struct kiocb *iocb, struct iov_iter *from)
|
|
|
|
{
|
|
|
|
struct inode *inode = file_inode(iocb->ki_filp);
|
|
|
|
struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
|
|
|
|
ssize_t ret;
|
|
|
|
|
|
|
|
ret = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE);
|
|
|
|
|
2021-10-22 23:03:02 +08:00
|
|
|
fuse_write_update_attr(inode, iocb->ki_pos, ret);
|
2020-08-20 06:19:51 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ssize_t fuse_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|
|
|
{
|
|
|
|
struct inode *inode = file_inode(iocb->ki_filp);
|
|
|
|
ssize_t ret;
|
|
|
|
|
|
|
|
if (iocb->ki_flags & IOCB_NOWAIT) {
|
|
|
|
if (!inode_trylock(inode))
|
|
|
|
return -EAGAIN;
|
|
|
|
} else {
|
|
|
|
inode_lock(inode);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = generic_write_checks(iocb, from);
|
|
|
|
if (ret <= 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = file_remove_privs(iocb->ki_filp);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
/* TODO file_update_time() but we don't want metadata I/O */
|
|
|
|
|
|
|
|
/* Do not use dax for file extending writes as write and on
|
|
|
|
* disk i_size increase are not atomic otherwise.
|
|
|
|
*/
|
|
|
|
if (file_extending_write(iocb, from))
|
|
|
|
ret = fuse_dax_direct_write(iocb, from);
|
|
|
|
else
|
|
|
|
ret = dax_iomap_rw(iocb, from, &fuse_iomap_ops);
|
|
|
|
|
|
|
|
out:
|
|
|
|
inode_unlock(inode);
|
|
|
|
|
|
|
|
if (ret > 0)
|
|
|
|
ret = generic_write_sync(iocb, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-08-20 06:19:53 +08:00
|
|
|
static int fuse_dax_writepages(struct address_space *mapping,
|
|
|
|
struct writeback_control *wbc)
|
|
|
|
{
|
|
|
|
|
|
|
|
struct inode *inode = mapping->host;
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
|
|
|
|
|
|
|
return dax_writeback_mapping_range(mapping, fc->dax->dev, wbc);
|
|
|
|
}
|
|
|
|
|
2023-08-19 04:23:35 +08:00
|
|
|
static vm_fault_t __fuse_dax_fault(struct vm_fault *vmf, unsigned int order,
|
|
|
|
bool write)
|
2020-08-20 06:19:52 +08:00
|
|
|
{
|
|
|
|
vm_fault_t ret;
|
|
|
|
struct inode *inode = file_inode(vmf->vma->vm_file);
|
|
|
|
struct super_block *sb = inode->i_sb;
|
|
|
|
pfn_t pfn;
|
2020-08-20 06:19:56 +08:00
|
|
|
int error = 0;
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
|
|
|
struct fuse_conn_dax *fcd = fc->dax;
|
|
|
|
bool retry = false;
|
2020-08-20 06:19:52 +08:00
|
|
|
|
|
|
|
if (write)
|
|
|
|
sb_start_pagefault(sb);
|
2020-08-20 06:19:56 +08:00
|
|
|
retry:
|
|
|
|
if (retry && !(fcd->nr_free_ranges > 0))
|
|
|
|
wait_event(fcd->range_waitq, (fcd->nr_free_ranges > 0));
|
2020-08-20 06:19:52 +08:00
|
|
|
|
virtiofs: serialize truncate/punch_hole and dax fault path
Currently in fuse we don't seem have any lock which can serialize fault
path with truncate/punch_hole path. With dax support I need one for
following reasons.
1. Dax requirement
DAX fault code relies on inode size being stable for the duration of
fault and want to serialize with truncate/punch_hole and they explicitly
mention it.
static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
const struct iomap_ops *ops)
/*
* Check whether offset isn't beyond end of file now. Caller is
* supposed to hold locks serializing us with truncate / punch hole so
* this is a reliable test.
*/
max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2. Make sure there are no users of pages being truncated/punch_hole
get_user_pages() might take references to page and then do some DMA
to said pages. Filesystem might truncate those pages without knowing
that a DMA is in progress or some I/O is in progress. So use
dax_layout_busy_page() to make sure there are no such references
and I/O is not in progress on said pages before moving ahead with
truncation.
3. Limitation of kvm page fault error reporting
If we are truncating file on host first and then removing mappings in
guest lateter (truncate page cache etc), then this could lead to a
problem with KVM. Say a mapping is in place in guest and truncation
happens on host. Now if guest accesses that mapping, then host will
take a fault and kvm will either exit to qemu or spin infinitely.
IOW, before we do truncation on host, we need to make sure that guest
inode does not have any mapping in that region or whole file.
4. virtiofs memory range reclaim
Soon I will introduce the notion of being able to reclaim dax memory
ranges from a fuse dax inode. There also I need to make sure that
no I/O or fault is going on in the reclaimed range and nobody is using
it so that range can be reclaimed without issues.
Currently if we take inode lock, that serializes read/write. But it does
not do anything for faults. So I add another semaphore fuse_inode->i_mmap_sem
for this purpose. It can be used to serialize with faults.
As of now, I am adding taking this semaphore only in dax fault path and
not regular fault path because existing code does not have one. May
be existing code can benefit from it as well to take care of some
races, but that we can fix later if need be. For now, I am just focussing
only on DAX path which is new path.
Also added logic to take fuse_inode->i_mmap_sem in
truncate/punch_hole/open(O_TRUNC) path to make sure file truncation and
fuse dax fault are mutually exlusive and avoid all the above problems.
Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
2020-08-20 06:19:54 +08:00
|
|
|
/*
|
|
|
|
* We need to serialize against not only truncate but also against
|
|
|
|
* fuse dax memory range reclaim. While a range is being reclaimed,
|
|
|
|
* we do not want any read/write/mmap to make progress and try
|
|
|
|
* to populate page cache or access memory we are trying to free.
|
|
|
|
*/
|
2021-04-21 23:18:39 +08:00
|
|
|
filemap_invalidate_lock_shared(inode->i_mapping);
|
2023-08-19 04:23:35 +08:00
|
|
|
ret = dax_iomap_fault(vmf, order, &pfn, &error, &fuse_iomap_ops);
|
2020-08-20 06:19:56 +08:00
|
|
|
if ((ret & VM_FAULT_ERROR) && error == -EAGAIN) {
|
|
|
|
error = 0;
|
|
|
|
retry = true;
|
2021-04-21 23:18:39 +08:00
|
|
|
filemap_invalidate_unlock_shared(inode->i_mapping);
|
2020-08-20 06:19:56 +08:00
|
|
|
goto retry;
|
|
|
|
}
|
2020-08-20 06:19:52 +08:00
|
|
|
|
|
|
|
if (ret & VM_FAULT_NEEDDSYNC)
|
2023-08-19 04:23:35 +08:00
|
|
|
ret = dax_finish_sync_fault(vmf, order, pfn);
|
2021-04-21 23:18:39 +08:00
|
|
|
filemap_invalidate_unlock_shared(inode->i_mapping);
|
2020-08-20 06:19:52 +08:00
|
|
|
|
|
|
|
if (write)
|
|
|
|
sb_end_pagefault(sb);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static vm_fault_t fuse_dax_fault(struct vm_fault *vmf)
|
|
|
|
{
|
2023-08-19 04:23:35 +08:00
|
|
|
return __fuse_dax_fault(vmf, 0, vmf->flags & FAULT_FLAG_WRITE);
|
2020-08-20 06:19:52 +08:00
|
|
|
}
|
|
|
|
|
2023-08-19 04:23:35 +08:00
|
|
|
static vm_fault_t fuse_dax_huge_fault(struct vm_fault *vmf, unsigned int order)
|
2020-08-20 06:19:52 +08:00
|
|
|
{
|
2023-08-19 04:23:35 +08:00
|
|
|
return __fuse_dax_fault(vmf, order, vmf->flags & FAULT_FLAG_WRITE);
|
2020-08-20 06:19:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static vm_fault_t fuse_dax_page_mkwrite(struct vm_fault *vmf)
|
|
|
|
{
|
2023-08-19 04:23:35 +08:00
|
|
|
return __fuse_dax_fault(vmf, 0, true);
|
2020-08-20 06:19:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static vm_fault_t fuse_dax_pfn_mkwrite(struct vm_fault *vmf)
|
|
|
|
{
|
2023-08-19 04:23:35 +08:00
|
|
|
return __fuse_dax_fault(vmf, 0, true);
|
2020-08-20 06:19:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct vm_operations_struct fuse_dax_vm_ops = {
|
|
|
|
.fault = fuse_dax_fault,
|
|
|
|
.huge_fault = fuse_dax_huge_fault,
|
|
|
|
.page_mkwrite = fuse_dax_page_mkwrite,
|
|
|
|
.pfn_mkwrite = fuse_dax_pfn_mkwrite,
|
|
|
|
};
|
|
|
|
|
|
|
|
int fuse_dax_mmap(struct file *file, struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
file_accessed(file);
|
|
|
|
vma->vm_ops = &fuse_dax_vm_ops;
|
2023-01-27 03:37:49 +08:00
|
|
|
vm_flags_set(vma, VM_MIXEDMAP | VM_HUGEPAGE);
|
2020-08-20 06:19:52 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-20 06:19:56 +08:00
|
|
|
static int dmap_writeback_invalidate(struct inode *inode,
|
|
|
|
struct fuse_dax_mapping *dmap)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
loff_t start_pos = dmap->itn.start << FUSE_DAX_SHIFT;
|
|
|
|
loff_t end_pos = (start_pos + FUSE_DAX_SZ - 1);
|
|
|
|
|
|
|
|
ret = filemap_fdatawrite_range(inode->i_mapping, start_pos, end_pos);
|
|
|
|
if (ret) {
|
|
|
|
pr_debug("fuse: filemap_fdatawrite_range() failed. err=%d start_pos=0x%llx, end_pos=0x%llx\n",
|
|
|
|
ret, start_pos, end_pos);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = invalidate_inode_pages2_range(inode->i_mapping,
|
|
|
|
start_pos >> PAGE_SHIFT,
|
|
|
|
end_pos >> PAGE_SHIFT);
|
|
|
|
if (ret)
|
|
|
|
pr_debug("fuse: invalidate_inode_pages2_range() failed err=%d\n",
|
|
|
|
ret);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int reclaim_one_dmap_locked(struct inode *inode,
|
|
|
|
struct fuse_dax_mapping *dmap)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct fuse_inode *fi = get_fuse_inode(inode);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* igrab() was done to make sure inode won't go under us, and this
|
|
|
|
* further avoids the race with evict().
|
|
|
|
*/
|
|
|
|
ret = dmap_writeback_invalidate(inode, dmap);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Remove dax mapping from inode interval tree now */
|
|
|
|
interval_tree_remove(&dmap->itn, &fi->dax->tree);
|
|
|
|
fi->dax->nr--;
|
|
|
|
|
|
|
|
/* It is possible that umount/shutdown has killed the fuse connection
|
|
|
|
* and worker thread is trying to reclaim memory in parallel. Don't
|
|
|
|
* warn in that case.
|
|
|
|
*/
|
|
|
|
ret = dmap_removemapping_one(inode, dmap);
|
|
|
|
if (ret && ret != -ENOTCONN) {
|
|
|
|
pr_warn("Failed to remove mapping. offset=0x%llx len=0x%llx ret=%d\n",
|
|
|
|
dmap->window_offset, dmap->length, ret);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Find first mapped dmap for an inode and return file offset. Caller needs
|
|
|
|
* to hold fi->dax->sem lock either shared or exclusive.
|
|
|
|
*/
|
|
|
|
static struct fuse_dax_mapping *inode_lookup_first_dmap(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct fuse_inode *fi = get_fuse_inode(inode);
|
|
|
|
struct fuse_dax_mapping *dmap;
|
|
|
|
struct interval_tree_node *node;
|
|
|
|
|
|
|
|
for (node = interval_tree_iter_first(&fi->dax->tree, 0, -1); node;
|
|
|
|
node = interval_tree_iter_next(node, 0, -1)) {
|
|
|
|
dmap = node_to_dmap(node);
|
|
|
|
/* still in use. */
|
|
|
|
if (refcount_read(&dmap->refcnt) > 1)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
return dmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find first mapping in the tree and free it and return it. Do not add
|
|
|
|
* it back to free pool.
|
|
|
|
*/
|
|
|
|
static struct fuse_dax_mapping *
|
|
|
|
inode_inline_reclaim_one_dmap(struct fuse_conn_dax *fcd, struct inode *inode,
|
|
|
|
bool *retry)
|
|
|
|
{
|
|
|
|
struct fuse_inode *fi = get_fuse_inode(inode);
|
|
|
|
struct fuse_dax_mapping *dmap;
|
|
|
|
u64 dmap_start, dmap_end;
|
|
|
|
unsigned long start_idx;
|
|
|
|
int ret;
|
|
|
|
struct interval_tree_node *node;
|
|
|
|
|
2021-04-21 23:18:39 +08:00
|
|
|
filemap_invalidate_lock(inode->i_mapping);
|
2020-08-20 06:19:56 +08:00
|
|
|
|
|
|
|
/* Lookup a dmap and corresponding file offset to reclaim. */
|
|
|
|
down_read(&fi->dax->sem);
|
|
|
|
dmap = inode_lookup_first_dmap(inode);
|
|
|
|
if (dmap) {
|
|
|
|
start_idx = dmap->itn.start;
|
|
|
|
dmap_start = start_idx << FUSE_DAX_SHIFT;
|
|
|
|
dmap_end = dmap_start + FUSE_DAX_SZ - 1;
|
|
|
|
}
|
|
|
|
up_read(&fi->dax->sem);
|
|
|
|
|
|
|
|
if (!dmap)
|
|
|
|
goto out_mmap_sem;
|
|
|
|
/*
|
|
|
|
* Make sure there are no references to inode pages using
|
|
|
|
* get_user_pages()
|
|
|
|
*/
|
|
|
|
ret = fuse_dax_break_layouts(inode, dmap_start, dmap_end);
|
|
|
|
if (ret) {
|
|
|
|
pr_debug("fuse: fuse_dax_break_layouts() failed. err=%d\n",
|
|
|
|
ret);
|
|
|
|
dmap = ERR_PTR(ret);
|
|
|
|
goto out_mmap_sem;
|
|
|
|
}
|
|
|
|
|
|
|
|
down_write(&fi->dax->sem);
|
|
|
|
node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
|
|
|
|
/* Range already got reclaimed by somebody else */
|
|
|
|
if (!node) {
|
|
|
|
if (retry)
|
|
|
|
*retry = true;
|
|
|
|
goto out_write_dmap_sem;
|
|
|
|
}
|
|
|
|
|
|
|
|
dmap = node_to_dmap(node);
|
|
|
|
/* still in use. */
|
|
|
|
if (refcount_read(&dmap->refcnt) > 1) {
|
|
|
|
dmap = NULL;
|
|
|
|
if (retry)
|
|
|
|
*retry = true;
|
|
|
|
goto out_write_dmap_sem;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = reclaim_one_dmap_locked(inode, dmap);
|
|
|
|
if (ret < 0) {
|
|
|
|
dmap = ERR_PTR(ret);
|
|
|
|
goto out_write_dmap_sem;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clean up dmap. Do not add back to free list */
|
|
|
|
dmap_remove_busy_list(fcd, dmap);
|
|
|
|
dmap->inode = NULL;
|
|
|
|
dmap->itn.start = dmap->itn.last = 0;
|
|
|
|
|
|
|
|
pr_debug("fuse: %s: inline reclaimed memory range. inode=%p, window_offset=0x%llx, length=0x%llx\n",
|
|
|
|
__func__, inode, dmap->window_offset, dmap->length);
|
|
|
|
|
|
|
|
out_write_dmap_sem:
|
|
|
|
up_write(&fi->dax->sem);
|
|
|
|
out_mmap_sem:
|
2021-04-21 23:18:39 +08:00
|
|
|
filemap_invalidate_unlock(inode->i_mapping);
|
2020-08-20 06:19:56 +08:00
|
|
|
return dmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct fuse_dax_mapping *
|
|
|
|
alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode)
|
|
|
|
{
|
|
|
|
struct fuse_dax_mapping *dmap;
|
|
|
|
struct fuse_inode *fi = get_fuse_inode(inode);
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
bool retry = false;
|
|
|
|
|
|
|
|
dmap = alloc_dax_mapping(fcd);
|
|
|
|
if (dmap)
|
|
|
|
return dmap;
|
|
|
|
|
|
|
|
dmap = inode_inline_reclaim_one_dmap(fcd, inode, &retry);
|
|
|
|
/*
|
|
|
|
* Either we got a mapping or it is an error, return in both
|
|
|
|
* the cases.
|
|
|
|
*/
|
|
|
|
if (dmap)
|
|
|
|
return dmap;
|
|
|
|
|
|
|
|
/* If we could not reclaim a mapping because it
|
|
|
|
* had a reference or some other temporary failure,
|
|
|
|
* Try again. We want to give up inline reclaim only
|
|
|
|
* if there is no range assigned to this node. Otherwise
|
2021-04-21 23:18:39 +08:00
|
|
|
* if a deadlock is possible if we sleep with
|
|
|
|
* mapping->invalidate_lock held and worker to free memory
|
|
|
|
* can't make progress due to unavailability of
|
|
|
|
* mapping->invalidate_lock. So sleep only if fi->dax->nr=0
|
2020-08-20 06:19:56 +08:00
|
|
|
*/
|
|
|
|
if (retry)
|
|
|
|
continue;
|
|
|
|
/*
|
|
|
|
* There are no mappings which can be reclaimed. Wait for one.
|
|
|
|
* We are not holding fi->dax->sem. So it is possible
|
|
|
|
* that range gets added now. But as we are not holding
|
2021-04-21 23:18:39 +08:00
|
|
|
* mapping->invalidate_lock, worker should still be able to
|
|
|
|
* free up a range and wake us up.
|
2020-08-20 06:19:56 +08:00
|
|
|
*/
|
|
|
|
if (!fi->dax->nr && !(fcd->nr_free_ranges > 0)) {
|
|
|
|
if (wait_event_killable_exclusive(fcd->range_waitq,
|
|
|
|
(fcd->nr_free_ranges > 0))) {
|
|
|
|
return ERR_PTR(-EINTR);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int lookup_and_reclaim_dmap_locked(struct fuse_conn_dax *fcd,
|
|
|
|
struct inode *inode,
|
|
|
|
unsigned long start_idx)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct fuse_inode *fi = get_fuse_inode(inode);
|
|
|
|
struct fuse_dax_mapping *dmap;
|
|
|
|
struct interval_tree_node *node;
|
|
|
|
|
|
|
|
/* Find fuse dax mapping at file offset inode. */
|
|
|
|
node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
|
|
|
|
|
|
|
|
/* Range already got cleaned up by somebody else */
|
|
|
|
if (!node)
|
|
|
|
return 0;
|
|
|
|
dmap = node_to_dmap(node);
|
|
|
|
|
|
|
|
/* still in use. */
|
|
|
|
if (refcount_read(&dmap->refcnt) > 1)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = reclaim_one_dmap_locked(inode, dmap);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Cleanup dmap entry and add back to free list */
|
|
|
|
spin_lock(&fcd->lock);
|
|
|
|
dmap_reinit_add_to_free_pool(fcd, dmap);
|
|
|
|
spin_unlock(&fcd->lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free a range of memory.
|
|
|
|
* Locking:
|
2021-04-21 23:18:39 +08:00
|
|
|
* 1. Take mapping->invalidate_lock to block dax faults.
|
2020-08-20 06:19:56 +08:00
|
|
|
* 2. Take fi->dax->sem to protect interval tree and also to make sure
|
|
|
|
* read/write can not reuse a dmap which we might be freeing.
|
|
|
|
*/
|
|
|
|
static int lookup_and_reclaim_dmap(struct fuse_conn_dax *fcd,
|
|
|
|
struct inode *inode,
|
|
|
|
unsigned long start_idx,
|
|
|
|
unsigned long end_idx)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct fuse_inode *fi = get_fuse_inode(inode);
|
|
|
|
loff_t dmap_start = start_idx << FUSE_DAX_SHIFT;
|
|
|
|
loff_t dmap_end = (dmap_start + FUSE_DAX_SZ) - 1;
|
|
|
|
|
2021-04-21 23:18:39 +08:00
|
|
|
filemap_invalidate_lock(inode->i_mapping);
|
2020-08-20 06:19:56 +08:00
|
|
|
ret = fuse_dax_break_layouts(inode, dmap_start, dmap_end);
|
|
|
|
if (ret) {
|
|
|
|
pr_debug("virtio_fs: fuse_dax_break_layouts() failed. err=%d\n",
|
|
|
|
ret);
|
|
|
|
goto out_mmap_sem;
|
|
|
|
}
|
|
|
|
|
|
|
|
down_write(&fi->dax->sem);
|
|
|
|
ret = lookup_and_reclaim_dmap_locked(fcd, inode, start_idx);
|
|
|
|
up_write(&fi->dax->sem);
|
|
|
|
out_mmap_sem:
|
2021-04-21 23:18:39 +08:00
|
|
|
filemap_invalidate_unlock(inode->i_mapping);
|
2020-08-20 06:19:56 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int try_to_free_dmap_chunks(struct fuse_conn_dax *fcd,
|
|
|
|
unsigned long nr_to_free)
|
|
|
|
{
|
|
|
|
struct fuse_dax_mapping *dmap, *pos, *temp;
|
|
|
|
int ret, nr_freed = 0;
|
|
|
|
unsigned long start_idx = 0, end_idx = 0;
|
|
|
|
struct inode *inode = NULL;
|
|
|
|
|
|
|
|
/* Pick first busy range and free it for now*/
|
|
|
|
while (1) {
|
|
|
|
if (nr_freed >= nr_to_free)
|
|
|
|
break;
|
|
|
|
|
|
|
|
dmap = NULL;
|
|
|
|
spin_lock(&fcd->lock);
|
|
|
|
|
|
|
|
if (!fcd->nr_busy_ranges) {
|
|
|
|
spin_unlock(&fcd->lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry_safe(pos, temp, &fcd->busy_ranges,
|
|
|
|
busy_list) {
|
|
|
|
/* skip this range if it's in use. */
|
|
|
|
if (refcount_read(&pos->refcnt) > 1)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
inode = igrab(pos->inode);
|
|
|
|
/*
|
|
|
|
* This inode is going away. That will free
|
|
|
|
* up all the ranges anyway, continue to
|
|
|
|
* next range.
|
|
|
|
*/
|
|
|
|
if (!inode)
|
|
|
|
continue;
|
|
|
|
/*
|
|
|
|
* Take this element off list and add it tail. If
|
|
|
|
* this element can't be freed, it will help with
|
|
|
|
* selecting new element in next iteration of loop.
|
|
|
|
*/
|
|
|
|
dmap = pos;
|
|
|
|
list_move_tail(&dmap->busy_list, &fcd->busy_ranges);
|
|
|
|
start_idx = end_idx = dmap->itn.start;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
spin_unlock(&fcd->lock);
|
|
|
|
if (!dmap)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = lookup_and_reclaim_dmap(fcd, inode, start_idx, end_idx);
|
|
|
|
iput(inode);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
nr_freed++;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void fuse_dax_free_mem_worker(struct work_struct *work)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct fuse_conn_dax *fcd = container_of(work, struct fuse_conn_dax,
|
|
|
|
free_work.work);
|
|
|
|
ret = try_to_free_dmap_chunks(fcd, FUSE_DAX_RECLAIM_CHUNK);
|
|
|
|
if (ret) {
|
|
|
|
pr_debug("fuse: try_to_free_dmap_chunks() failed with err=%d\n",
|
|
|
|
ret);
|
|
|
|
}
|
|
|
|
|
2021-06-04 09:46:17 +08:00
|
|
|
/* If number of free ranges are still below threshold, requeue */
|
2020-08-20 06:19:56 +08:00
|
|
|
kick_dmap_free_worker(fcd, 1);
|
|
|
|
}
|
|
|
|
|
2020-08-20 06:19:48 +08:00
|
|
|
static void fuse_free_dax_mem_ranges(struct list_head *mem_list)
|
|
|
|
{
|
|
|
|
struct fuse_dax_mapping *range, *temp;
|
|
|
|
|
|
|
|
/* Free All allocated elements */
|
|
|
|
list_for_each_entry_safe(range, temp, mem_list, list) {
|
|
|
|
list_del(&range->list);
|
2020-08-20 06:19:55 +08:00
|
|
|
if (!list_empty(&range->busy_list))
|
|
|
|
list_del(&range->busy_list);
|
2020-08-20 06:19:48 +08:00
|
|
|
kfree(range);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-20 06:19:47 +08:00
|
|
|
void fuse_dax_conn_free(struct fuse_conn *fc)
|
|
|
|
{
|
2020-08-20 06:19:48 +08:00
|
|
|
if (fc->dax) {
|
|
|
|
fuse_free_dax_mem_ranges(&fc->dax->free_ranges);
|
|
|
|
kfree(fc->dax);
|
2023-11-16 15:57:26 +08:00
|
|
|
fc->dax = NULL;
|
2020-08-20 06:19:48 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd)
|
|
|
|
{
|
|
|
|
long nr_pages, nr_ranges;
|
|
|
|
struct fuse_dax_mapping *range;
|
|
|
|
int ret, id;
|
|
|
|
size_t dax_size = -1;
|
|
|
|
unsigned long i;
|
|
|
|
|
2020-08-20 06:19:56 +08:00
|
|
|
init_waitqueue_head(&fcd->range_waitq);
|
2020-08-20 06:19:48 +08:00
|
|
|
INIT_LIST_HEAD(&fcd->free_ranges);
|
2020-08-20 06:19:55 +08:00
|
|
|
INIT_LIST_HEAD(&fcd->busy_ranges);
|
2020-08-20 06:19:56 +08:00
|
|
|
INIT_DELAYED_WORK(&fcd->free_work, fuse_dax_free_mem_worker);
|
|
|
|
|
2020-08-20 06:19:48 +08:00
|
|
|
id = dax_read_lock();
|
2022-05-14 06:10:58 +08:00
|
|
|
nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size),
|
|
|
|
DAX_ACCESS, NULL, NULL);
|
2020-08-20 06:19:48 +08:00
|
|
|
dax_read_unlock(id);
|
|
|
|
if (nr_pages < 0) {
|
|
|
|
pr_debug("dax_direct_access() returned %ld\n", nr_pages);
|
|
|
|
return nr_pages;
|
|
|
|
}
|
|
|
|
|
|
|
|
nr_ranges = nr_pages/FUSE_DAX_PAGES;
|
|
|
|
pr_debug("%s: dax mapped %ld pages. nr_ranges=%ld\n",
|
|
|
|
__func__, nr_pages, nr_ranges);
|
|
|
|
|
|
|
|
for (i = 0; i < nr_ranges; i++) {
|
|
|
|
range = kzalloc(sizeof(struct fuse_dax_mapping), GFP_KERNEL);
|
|
|
|
ret = -ENOMEM;
|
|
|
|
if (!range)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
/* TODO: This offset only works if virtio-fs driver is not
|
|
|
|
* having some memory hidden at the beginning. This needs
|
|
|
|
* better handling
|
|
|
|
*/
|
|
|
|
range->window_offset = i * FUSE_DAX_SZ;
|
|
|
|
range->length = FUSE_DAX_SZ;
|
2020-08-20 06:19:55 +08:00
|
|
|
INIT_LIST_HEAD(&range->busy_list);
|
2020-08-20 06:19:56 +08:00
|
|
|
refcount_set(&range->refcnt, 1);
|
2020-08-20 06:19:48 +08:00
|
|
|
list_add_tail(&range->list, &fcd->free_ranges);
|
|
|
|
}
|
|
|
|
|
|
|
|
fcd->nr_free_ranges = nr_ranges;
|
2020-08-20 06:19:56 +08:00
|
|
|
fcd->nr_ranges = nr_ranges;
|
2020-08-20 06:19:48 +08:00
|
|
|
return 0;
|
|
|
|
out_err:
|
|
|
|
/* Free All allocated elements */
|
|
|
|
fuse_free_dax_mem_ranges(&fcd->free_ranges);
|
|
|
|
return ret;
|
2020-08-20 06:19:47 +08:00
|
|
|
}
|
|
|
|
|
2021-11-25 15:05:25 +08:00
|
|
|
int fuse_dax_conn_alloc(struct fuse_conn *fc, enum fuse_dax_mode dax_mode,
|
|
|
|
struct dax_device *dax_dev)
|
2020-08-20 06:19:47 +08:00
|
|
|
{
|
|
|
|
struct fuse_conn_dax *fcd;
|
2020-08-20 06:19:48 +08:00
|
|
|
int err;
|
2020-08-20 06:19:47 +08:00
|
|
|
|
2021-11-25 15:05:25 +08:00
|
|
|
fc->dax_mode = dax_mode;
|
|
|
|
|
2020-08-20 06:19:47 +08:00
|
|
|
if (!dax_dev)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fcd = kzalloc(sizeof(*fcd), GFP_KERNEL);
|
|
|
|
if (!fcd)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2020-08-20 06:19:51 +08:00
|
|
|
spin_lock_init(&fcd->lock);
|
2020-08-20 06:19:47 +08:00
|
|
|
fcd->dev = dax_dev;
|
2020-08-20 06:19:48 +08:00
|
|
|
err = fuse_dax_mem_range_init(fcd);
|
|
|
|
if (err) {
|
|
|
|
kfree(fcd);
|
|
|
|
return err;
|
|
|
|
}
|
2020-08-20 06:19:47 +08:00
|
|
|
|
|
|
|
fc->dax = fcd;
|
|
|
|
return 0;
|
|
|
|
}
|
2020-08-20 06:19:49 +08:00
|
|
|
|
2020-08-20 06:19:51 +08:00
|
|
|
bool fuse_dax_inode_alloc(struct super_block *sb, struct fuse_inode *fi)
|
|
|
|
{
|
|
|
|
struct fuse_conn *fc = get_fuse_conn_super(sb);
|
|
|
|
|
|
|
|
fi->dax = NULL;
|
|
|
|
if (fc->dax) {
|
|
|
|
fi->dax = kzalloc(sizeof(*fi->dax), GFP_KERNEL_ACCOUNT);
|
|
|
|
if (!fi->dax)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
init_rwsem(&fi->dax->sem);
|
|
|
|
fi->dax->tree = RB_ROOT_CACHED;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-08-20 06:19:53 +08:00
|
|
|
static const struct address_space_operations fuse_dax_file_aops = {
|
|
|
|
.writepages = fuse_dax_writepages,
|
|
|
|
.direct_IO = noop_direct_IO,
|
2022-02-10 04:22:13 +08:00
|
|
|
.dirty_folio = noop_dirty_folio,
|
2020-08-20 06:19:53 +08:00
|
|
|
};
|
|
|
|
|
fuse: enable per inode DAX
DAX may be limited in some specific situation. When the number of usable
DAX windows is under watermark, the recalim routine will be triggered to
reclaim some DAX windows. It may have a negative impact on the
performance, since some processes may need to wait for DAX windows to be
recalimed and reused then. To mitigate the performance degradation, the
overall DAX window need to be expanded larger.
However, simply expanding the DAX window may not be a good deal in some
scenario. To maintain one DAX window chunk (i.e., 2MB in size), 32KB
(512 * 64 bytes) memory footprint will be consumed for page descriptors
inside guest, which is greater than the memory footprint if it uses
guest page cache when DAX disabled. Thus it'd better disable DAX for
those files smaller than 32KB, to reduce the demand for DAX window and
thus avoid the unworthy memory overhead.
Per inode DAX feature is introduced to address this issue, by offering a
finer grained control for dax to users, trying to achieve a balance
between performance and memory overhead.
The FUSE_ATTR_DAX flag in FUSE_LOOKUP reply is used to indicate whether
DAX should be enabled or not for corresponding file. Currently the state
whether DAX is enabled or not for the file is initialized only when
inode is instantiated.
Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
2021-11-25 15:05:27 +08:00
|
|
|
static bool fuse_should_enable_dax(struct inode *inode, unsigned int flags)
|
2020-08-20 06:19:51 +08:00
|
|
|
{
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
2021-11-25 15:05:25 +08:00
|
|
|
enum fuse_dax_mode dax_mode = fc->dax_mode;
|
|
|
|
|
|
|
|
if (dax_mode == FUSE_DAX_NEVER)
|
|
|
|
return false;
|
2020-08-20 06:19:51 +08:00
|
|
|
|
2021-11-25 15:05:25 +08:00
|
|
|
/*
|
|
|
|
* fc->dax may be NULL in 'inode' mode when filesystem device doesn't
|
|
|
|
* support DAX, in which case it will silently fallback to 'never' mode.
|
|
|
|
*/
|
2020-08-20 06:19:51 +08:00
|
|
|
if (!fc->dax)
|
2021-11-25 15:05:24 +08:00
|
|
|
return false;
|
|
|
|
|
fuse: enable per inode DAX
DAX may be limited in some specific situation. When the number of usable
DAX windows is under watermark, the recalim routine will be triggered to
reclaim some DAX windows. It may have a negative impact on the
performance, since some processes may need to wait for DAX windows to be
recalimed and reused then. To mitigate the performance degradation, the
overall DAX window need to be expanded larger.
However, simply expanding the DAX window may not be a good deal in some
scenario. To maintain one DAX window chunk (i.e., 2MB in size), 32KB
(512 * 64 bytes) memory footprint will be consumed for page descriptors
inside guest, which is greater than the memory footprint if it uses
guest page cache when DAX disabled. Thus it'd better disable DAX for
those files smaller than 32KB, to reduce the demand for DAX window and
thus avoid the unworthy memory overhead.
Per inode DAX feature is introduced to address this issue, by offering a
finer grained control for dax to users, trying to achieve a balance
between performance and memory overhead.
The FUSE_ATTR_DAX flag in FUSE_LOOKUP reply is used to indicate whether
DAX should be enabled or not for corresponding file. Currently the state
whether DAX is enabled or not for the file is initialized only when
inode is instantiated.
Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
2021-11-25 15:05:27 +08:00
|
|
|
if (dax_mode == FUSE_DAX_ALWAYS)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/* dax_mode is FUSE_DAX_INODE* */
|
2021-11-25 15:05:28 +08:00
|
|
|
return fc->inode_dax && (flags & FUSE_ATTR_DAX);
|
2021-11-25 15:05:24 +08:00
|
|
|
}
|
|
|
|
|
fuse: enable per inode DAX
DAX may be limited in some specific situation. When the number of usable
DAX windows is under watermark, the recalim routine will be triggered to
reclaim some DAX windows. It may have a negative impact on the
performance, since some processes may need to wait for DAX windows to be
recalimed and reused then. To mitigate the performance degradation, the
overall DAX window need to be expanded larger.
However, simply expanding the DAX window may not be a good deal in some
scenario. To maintain one DAX window chunk (i.e., 2MB in size), 32KB
(512 * 64 bytes) memory footprint will be consumed for page descriptors
inside guest, which is greater than the memory footprint if it uses
guest page cache when DAX disabled. Thus it'd better disable DAX for
those files smaller than 32KB, to reduce the demand for DAX window and
thus avoid the unworthy memory overhead.
Per inode DAX feature is introduced to address this issue, by offering a
finer grained control for dax to users, trying to achieve a balance
between performance and memory overhead.
The FUSE_ATTR_DAX flag in FUSE_LOOKUP reply is used to indicate whether
DAX should be enabled or not for corresponding file. Currently the state
whether DAX is enabled or not for the file is initialized only when
inode is instantiated.
Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
2021-11-25 15:05:27 +08:00
|
|
|
void fuse_dax_inode_init(struct inode *inode, unsigned int flags)
|
2021-11-25 15:05:24 +08:00
|
|
|
{
|
fuse: enable per inode DAX
DAX may be limited in some specific situation. When the number of usable
DAX windows is under watermark, the recalim routine will be triggered to
reclaim some DAX windows. It may have a negative impact on the
performance, since some processes may need to wait for DAX windows to be
recalimed and reused then. To mitigate the performance degradation, the
overall DAX window need to be expanded larger.
However, simply expanding the DAX window may not be a good deal in some
scenario. To maintain one DAX window chunk (i.e., 2MB in size), 32KB
(512 * 64 bytes) memory footprint will be consumed for page descriptors
inside guest, which is greater than the memory footprint if it uses
guest page cache when DAX disabled. Thus it'd better disable DAX for
those files smaller than 32KB, to reduce the demand for DAX window and
thus avoid the unworthy memory overhead.
Per inode DAX feature is introduced to address this issue, by offering a
finer grained control for dax to users, trying to achieve a balance
between performance and memory overhead.
The FUSE_ATTR_DAX flag in FUSE_LOOKUP reply is used to indicate whether
DAX should be enabled or not for corresponding file. Currently the state
whether DAX is enabled or not for the file is initialized only when
inode is instantiated.
Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
2021-11-25 15:05:27 +08:00
|
|
|
if (!fuse_should_enable_dax(inode, flags))
|
2020-08-20 06:19:51 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
inode->i_flags |= S_DAX;
|
2020-08-20 06:19:53 +08:00
|
|
|
inode->i_data.a_ops = &fuse_dax_file_aops;
|
2020-08-20 06:19:51 +08:00
|
|
|
}
|
|
|
|
|
2021-11-25 15:05:29 +08:00
|
|
|
void fuse_dax_dontcache(struct inode *inode, unsigned int flags)
|
|
|
|
{
|
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
|
|
|
|
|
|
|
if (fuse_is_inode_dax_mode(fc->dax_mode) &&
|
|
|
|
((bool) IS_DAX(inode) != (bool) (flags & FUSE_ATTR_DAX)))
|
|
|
|
d_mark_dontcache(inode);
|
|
|
|
}
|
|
|
|
|
2020-08-20 06:19:49 +08:00
|
|
|
bool fuse_dax_check_alignment(struct fuse_conn *fc, unsigned int map_alignment)
|
|
|
|
{
|
|
|
|
if (fc->dax && (map_alignment > FUSE_DAX_SHIFT)) {
|
|
|
|
pr_warn("FUSE: map_alignment %u incompatible with dax mem range size %u\n",
|
|
|
|
map_alignment, FUSE_DAX_SZ);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2020-08-20 06:19:56 +08:00
|
|
|
|
|
|
|
void fuse_dax_cancel_work(struct fuse_conn *fc)
|
|
|
|
{
|
|
|
|
struct fuse_conn_dax *fcd = fc->dax;
|
|
|
|
|
|
|
|
if (fcd)
|
|
|
|
cancel_delayed_work_sync(&fcd->free_work);
|
|
|
|
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(fuse_dax_cancel_work);
|