mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-13 14:24:11 +08:00
cifs: Use alternative invalidation to using launder_folio
Use writepages-based flushing invalidation instead of invalidate_inode_pages2() and ->launder_folio(). This will allow ->launder_folio() to be removed eventually. Signed-off-by: David Howells <dhowells@redhat.com> cc: Steve French <sfrench@samba.org> cc: Shyam Prasad N <nspmangalore@gmail.com> cc: Rohith Surabattula <rohiths.msft@gmail.com> cc: Jeff Layton <jlayton@kernel.org> cc: linux-cifs@vger.kernel.org cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org
This commit is contained in:
parent
1ecb146f7c
commit
0f7c0f3f51
@ -69,7 +69,6 @@ extern int cifs_revalidate_file_attr(struct file *filp);
|
||||
extern int cifs_revalidate_dentry_attr(struct dentry *);
|
||||
extern int cifs_revalidate_file(struct file *filp);
|
||||
extern int cifs_revalidate_dentry(struct dentry *);
|
||||
extern int cifs_invalidate_mapping(struct inode *inode);
|
||||
extern int cifs_revalidate_mapping(struct inode *inode);
|
||||
extern int cifs_zap_mapping(struct inode *inode);
|
||||
extern int cifs_getattr(struct mnt_idmap *, const struct path *,
|
||||
|
@ -2655,64 +2655,6 @@ struct cifs_writedata *cifs_writedata_alloc(work_func_t complete)
|
||||
return wdata;
|
||||
}
|
||||
|
||||
static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
|
||||
{
|
||||
struct address_space *mapping = page->mapping;
|
||||
loff_t offset = (loff_t)page->index << PAGE_SHIFT;
|
||||
char *write_data;
|
||||
int rc = -EFAULT;
|
||||
int bytes_written = 0;
|
||||
struct inode *inode;
|
||||
struct cifsFileInfo *open_file;
|
||||
|
||||
if (!mapping || !mapping->host)
|
||||
return -EFAULT;
|
||||
|
||||
inode = page->mapping->host;
|
||||
|
||||
offset += (loff_t)from;
|
||||
write_data = kmap(page);
|
||||
write_data += from;
|
||||
|
||||
if ((to > PAGE_SIZE) || (from > to)) {
|
||||
kunmap(page);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* racing with truncate? */
|
||||
if (offset > mapping->host->i_size) {
|
||||
kunmap(page);
|
||||
return 0; /* don't care */
|
||||
}
|
||||
|
||||
/* check to make sure that we are not extending the file */
|
||||
if (mapping->host->i_size - offset < (loff_t)to)
|
||||
to = (unsigned)(mapping->host->i_size - offset);
|
||||
|
||||
rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY,
|
||||
&open_file);
|
||||
if (!rc) {
|
||||
bytes_written = cifs_write(open_file, open_file->pid,
|
||||
write_data, to - from, &offset);
|
||||
cifsFileInfo_put(open_file);
|
||||
/* Does mm or vfs already set times? */
|
||||
simple_inode_init_ts(inode);
|
||||
if ((bytes_written > 0) && (offset))
|
||||
rc = 0;
|
||||
else if (bytes_written < 0)
|
||||
rc = bytes_written;
|
||||
else
|
||||
rc = -EFAULT;
|
||||
} else {
|
||||
cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc);
|
||||
if (!is_retryable_error(rc))
|
||||
rc = -EIO;
|
||||
}
|
||||
|
||||
kunmap(page);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Extend the region to be written back to include subsequent contiguously
|
||||
* dirty pages if possible, but don't sleep while doing so.
|
||||
@ -3126,47 +3068,6 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
|
||||
{
|
||||
int rc;
|
||||
unsigned int xid;
|
||||
|
||||
xid = get_xid();
|
||||
/* BB add check for wbc flags */
|
||||
get_page(page);
|
||||
if (!PageUptodate(page))
|
||||
cifs_dbg(FYI, "ppw - page not up to date\n");
|
||||
|
||||
/*
|
||||
* Set the "writeback" flag, and clear "dirty" in the radix tree.
|
||||
*
|
||||
* A writepage() implementation always needs to do either this,
|
||||
* or re-dirty the page with "redirty_page_for_writepage()" in
|
||||
* the case of a failure.
|
||||
*
|
||||
* Just unlocking the page will cause the radix tree tag-bits
|
||||
* to fail to update with the state of the page correctly.
|
||||
*/
|
||||
set_page_writeback(page);
|
||||
retry_write:
|
||||
rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
|
||||
if (is_retryable_error(rc)) {
|
||||
if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
|
||||
goto retry_write;
|
||||
redirty_page_for_writepage(wbc, page);
|
||||
} else if (rc != 0) {
|
||||
SetPageError(page);
|
||||
mapping_set_error(page->mapping, rc);
|
||||
} else {
|
||||
SetPageUptodate(page);
|
||||
}
|
||||
end_page_writeback(page);
|
||||
put_page(page);
|
||||
free_xid(xid);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cifs_write_end(struct file *file, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata)
|
||||
@ -4992,27 +4893,6 @@ static void cifs_invalidate_folio(struct folio *folio, size_t offset,
|
||||
folio_wait_private_2(folio); /* [DEPRECATED] */
|
||||
}
|
||||
|
||||
static int cifs_launder_folio(struct folio *folio)
|
||||
{
|
||||
int rc = 0;
|
||||
loff_t range_start = folio_pos(folio);
|
||||
loff_t range_end = range_start + folio_size(folio);
|
||||
struct writeback_control wbc = {
|
||||
.sync_mode = WB_SYNC_ALL,
|
||||
.nr_to_write = 0,
|
||||
.range_start = range_start,
|
||||
.range_end = range_end,
|
||||
};
|
||||
|
||||
cifs_dbg(FYI, "Launder page: %lu\n", folio->index);
|
||||
|
||||
if (folio_clear_dirty_for_io(folio))
|
||||
rc = cifs_writepage_locked(&folio->page, &wbc);
|
||||
|
||||
folio_wait_private_2(folio); /* [DEPRECATED] */
|
||||
return rc;
|
||||
}
|
||||
|
||||
void cifs_oplock_break(struct work_struct *work)
|
||||
{
|
||||
struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
|
||||
@ -5191,7 +5071,6 @@ const struct address_space_operations cifs_addr_ops = {
|
||||
.release_folio = cifs_release_folio,
|
||||
.direct_IO = cifs_direct_io,
|
||||
.invalidate_folio = cifs_invalidate_folio,
|
||||
.launder_folio = cifs_launder_folio,
|
||||
.migrate_folio = filemap_migrate_folio,
|
||||
/*
|
||||
* TODO: investigate and if useful we could add an is_dirty_writeback
|
||||
@ -5214,6 +5093,5 @@ const struct address_space_operations cifs_addr_ops_smallbuf = {
|
||||
.dirty_folio = netfs_dirty_folio,
|
||||
.release_folio = cifs_release_folio,
|
||||
.invalidate_folio = cifs_invalidate_folio,
|
||||
.launder_folio = cifs_launder_folio,
|
||||
.migrate_folio = filemap_migrate_folio,
|
||||
};
|
||||
|
@ -2431,24 +2431,6 @@ cifs_dentry_needs_reval(struct dentry *dentry)
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Zap the cache. Called when invalid_mapping flag is set.
|
||||
*/
|
||||
int
|
||||
cifs_invalidate_mapping(struct inode *inode)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (inode->i_mapping && inode->i_mapping->nrpages != 0) {
|
||||
rc = invalidate_inode_pages2(inode->i_mapping);
|
||||
if (rc)
|
||||
cifs_dbg(VFS, "%s: invalidate inode %p failed with rc %d\n",
|
||||
__func__, inode, rc);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* cifs_wait_bit_killable - helper for functions that are sleeping on bit locks
|
||||
*
|
||||
@ -2485,9 +2467,12 @@ cifs_revalidate_mapping(struct inode *inode)
|
||||
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
|
||||
goto skip_invalidate;
|
||||
|
||||
rc = cifs_invalidate_mapping(inode);
|
||||
if (rc)
|
||||
rc = filemap_invalidate_inode(inode, true, 0, LLONG_MAX);
|
||||
if (rc) {
|
||||
cifs_dbg(VFS, "%s: invalidate inode %p failed with rc %d\n",
|
||||
__func__, inode, rc);
|
||||
set_bit(CIFS_INO_INVALID_MAPPING, flags);
|
||||
}
|
||||
}
|
||||
|
||||
skip_invalidate:
|
||||
|
Loading…
Reference in New Issue
Block a user