mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
afs: Extract writeback extension into its own function
Extract writeback extension into its own function to break up the writeback function a bit. Signed-off-by: David Howells <dhowells@redhat.com> Tested-By: Marc Dionne <marc.dionne@auristor.com> cc: linux-afs@lists.infradead.org cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/160588538471.3465195.782513375683399583.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161118154610.1232039.1765365632920504822.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161161050546.2537118.2202554806419189453.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/161340414102.1303470.9078891484034668985.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/161539558417.286939.2879469588895925399.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653813972.2770958.12671731209438112378.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789097132.6155.4916609419912731964.stgit@warthog.procyon.org.uk/ # v6
This commit is contained in:
parent
630f5dda84
commit
810caa3e67
109
fs/afs/write.c
109
fs/afs/write.c
@ -490,47 +490,25 @@ try_next_key:
|
||||
}
|
||||
|
||||
/*
|
||||
* Synchronously write back the locked page and any subsequent non-locked dirty
|
||||
* pages.
|
||||
* Extend the region to be written back to include subsequent contiguously
|
||||
* dirty pages if possible, but don't sleep while doing so.
|
||||
*
|
||||
* If this page holds new content, then we can include filler zeros in the
|
||||
* writeback.
|
||||
*/
|
||||
static int afs_write_back_from_locked_page(struct address_space *mapping,
|
||||
struct writeback_control *wbc,
|
||||
struct page *primary_page,
|
||||
pgoff_t final_page)
|
||||
static void afs_extend_writeback(struct address_space *mapping,
|
||||
struct afs_vnode *vnode,
|
||||
long *_count,
|
||||
pgoff_t start,
|
||||
pgoff_t final_page,
|
||||
unsigned *_offset,
|
||||
unsigned *_to,
|
||||
bool new_content)
|
||||
{
|
||||
struct afs_vnode *vnode = AFS_FS_I(mapping->host);
|
||||
struct iov_iter iter;
|
||||
struct page *pages[8], *page;
|
||||
unsigned long count, priv;
|
||||
unsigned n, offset, to, f, t;
|
||||
pgoff_t start, first, last;
|
||||
loff_t i_size, pos, end;
|
||||
int loop, ret;
|
||||
|
||||
_enter(",%lx", primary_page->index);
|
||||
|
||||
count = 1;
|
||||
if (test_set_page_writeback(primary_page))
|
||||
BUG();
|
||||
|
||||
/* Find all consecutive lockable dirty pages that have contiguous
|
||||
* written regions, stopping when we find a page that is not
|
||||
* immediately lockable, is not dirty or is missing, or we reach the
|
||||
* end of the range.
|
||||
*/
|
||||
start = primary_page->index;
|
||||
priv = page_private(primary_page);
|
||||
offset = afs_page_dirty_from(primary_page, priv);
|
||||
to = afs_page_dirty_to(primary_page, priv);
|
||||
trace_afs_page_dirty(vnode, tracepoint_string("store"), primary_page);
|
||||
|
||||
WARN_ON(offset == to);
|
||||
if (offset == to)
|
||||
trace_afs_page_dirty(vnode, tracepoint_string("WARN"), primary_page);
|
||||
|
||||
if (start >= final_page ||
|
||||
(to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
|
||||
goto no_more;
|
||||
unsigned long count = *_count, priv;
|
||||
unsigned offset = *_offset, to = *_to, n, f, t;
|
||||
int loop;
|
||||
|
||||
start++;
|
||||
do {
|
||||
@ -551,8 +529,7 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
|
||||
|
||||
for (loop = 0; loop < n; loop++) {
|
||||
page = pages[loop];
|
||||
if (to != PAGE_SIZE &&
|
||||
!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
|
||||
if (to != PAGE_SIZE && !new_content)
|
||||
break;
|
||||
if (page->index > final_page)
|
||||
break;
|
||||
@ -566,8 +543,7 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
|
||||
priv = page_private(page);
|
||||
f = afs_page_dirty_from(page, priv);
|
||||
t = afs_page_dirty_to(page, priv);
|
||||
if (f != 0 &&
|
||||
!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
|
||||
if (f != 0 && !new_content) {
|
||||
unlock_page(page);
|
||||
break;
|
||||
}
|
||||
@ -593,6 +569,55 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
|
||||
} while (start <= final_page && count < 65536);
|
||||
|
||||
no_more:
|
||||
*_count = count;
|
||||
*_offset = offset;
|
||||
*_to = to;
|
||||
}
|
||||
|
||||
/*
|
||||
* Synchronously write back the locked page and any subsequent non-locked dirty
|
||||
* pages.
|
||||
*/
|
||||
static int afs_write_back_from_locked_page(struct address_space *mapping,
|
||||
struct writeback_control *wbc,
|
||||
struct page *primary_page,
|
||||
pgoff_t final_page)
|
||||
{
|
||||
struct afs_vnode *vnode = AFS_FS_I(mapping->host);
|
||||
struct iov_iter iter;
|
||||
unsigned long count, priv;
|
||||
unsigned offset, to;
|
||||
pgoff_t start, first, last;
|
||||
loff_t i_size, pos, end;
|
||||
bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
|
||||
int ret;
|
||||
|
||||
_enter(",%lx", primary_page->index);
|
||||
|
||||
count = 1;
|
||||
if (test_set_page_writeback(primary_page))
|
||||
BUG();
|
||||
|
||||
/* Find all consecutive lockable dirty pages that have contiguous
|
||||
* written regions, stopping when we find a page that is not
|
||||
* immediately lockable, is not dirty or is missing, or we reach the
|
||||
* end of the range.
|
||||
*/
|
||||
start = primary_page->index;
|
||||
priv = page_private(primary_page);
|
||||
offset = afs_page_dirty_from(primary_page, priv);
|
||||
to = afs_page_dirty_to(primary_page, priv);
|
||||
trace_afs_page_dirty(vnode, tracepoint_string("store"), primary_page);
|
||||
|
||||
WARN_ON(offset == to);
|
||||
if (offset == to)
|
||||
trace_afs_page_dirty(vnode, tracepoint_string("WARN"), primary_page);
|
||||
|
||||
if (start < final_page &&
|
||||
(to == PAGE_SIZE || new_content))
|
||||
afs_extend_writeback(mapping, vnode, &count, start, final_page,
|
||||
&offset, &to, new_content);
|
||||
|
||||
/* We now have a contiguous set of dirty pages, each with writeback
|
||||
* set; the first page is still locked at this point, but all the rest
|
||||
* have been unlocked.
|
||||
|
Loading…
Reference in New Issue
Block a user