David Howells 2020-10-30 10:01:09 +00:00
parent 630f5dda84
commit 810caa3e67

View File

@ -490,47 +490,25 @@ try_next_key:
}
/*
* Synchronously write back the locked page and any subsequent non-locked dirty
* pages.
* Extend the region to be written back to include subsequent contiguously
* dirty pages if possible, but don't sleep while doing so.
*
* If this page holds new content, then we can include filler zeros in the
* writeback.
*/
static int afs_write_back_from_locked_page(struct address_space *mapping,
struct writeback_control *wbc,
struct page *primary_page,
pgoff_t final_page)
static void afs_extend_writeback(struct address_space *mapping,
struct afs_vnode *vnode,
long *_count,
pgoff_t start,
pgoff_t final_page,
unsigned *_offset,
unsigned *_to,
bool new_content)
{
struct afs_vnode *vnode = AFS_FS_I(mapping->host);
struct iov_iter iter;
struct page *pages[8], *page;
unsigned long count, priv;
unsigned n, offset, to, f, t;
pgoff_t start, first, last;
loff_t i_size, pos, end;
int loop, ret;
_enter(",%lx", primary_page->index);
count = 1;
if (test_set_page_writeback(primary_page))
BUG();
/* Find all consecutive lockable dirty pages that have contiguous
* written regions, stopping when we find a page that is not
* immediately lockable, is not dirty or is missing, or we reach the
* end of the range.
*/
start = primary_page->index;
priv = page_private(primary_page);
offset = afs_page_dirty_from(primary_page, priv);
to = afs_page_dirty_to(primary_page, priv);
trace_afs_page_dirty(vnode, tracepoint_string("store"), primary_page);
WARN_ON(offset == to);
if (offset == to)
trace_afs_page_dirty(vnode, tracepoint_string("WARN"), primary_page);
if (start >= final_page ||
(to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
goto no_more;
unsigned long count = *_count, priv;
unsigned offset = *_offset, to = *_to, n, f, t;
int loop;
start++;
do {
@ -551,8 +529,7 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
for (loop = 0; loop < n; loop++) {
page = pages[loop];
if (to != PAGE_SIZE &&
!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
if (to != PAGE_SIZE && !new_content)
break;
if (page->index > final_page)
break;
@ -566,8 +543,7 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
priv = page_private(page);
f = afs_page_dirty_from(page, priv);
t = afs_page_dirty_to(page, priv);
if (f != 0 &&
!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
if (f != 0 && !new_content) {
unlock_page(page);
break;
}
@ -593,6 +569,55 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
} while (start <= final_page && count < 65536);
no_more:
*_count = count;
*_offset = offset;
*_to = to;
}
/*
* Synchronously write back the locked page and any subsequent non-locked dirty
* pages.
*/
static int afs_write_back_from_locked_page(struct address_space *mapping,
struct writeback_control *wbc,
struct page *primary_page,
pgoff_t final_page)
{
struct afs_vnode *vnode = AFS_FS_I(mapping->host);
struct iov_iter iter;
unsigned long count, priv;
unsigned offset, to;
pgoff_t start, first, last;
loff_t i_size, pos, end;
bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
int ret;
_enter(",%lx", primary_page->index);
count = 1;
if (test_set_page_writeback(primary_page))
BUG();
/* Find all consecutive lockable dirty pages that have contiguous
* written regions, stopping when we find a page that is not
* immediately lockable, is not dirty or is missing, or we reach the
* end of the range.
*/
start = primary_page->index;
priv = page_private(primary_page);
offset = afs_page_dirty_from(primary_page, priv);
to = afs_page_dirty_to(primary_page, priv);
trace_afs_page_dirty(vnode, tracepoint_string("store"), primary_page);
WARN_ON(offset == to);
if (offset == to)
trace_afs_page_dirty(vnode, tracepoint_string("WARN"), primary_page);
if (start < final_page &&
(to == PAGE_SIZE || new_content))
afs_extend_writeback(mapping, vnode, &count, start, final_page,
&offset, &to, new_content);
/* We now have a contiguous set of dirty pages, each with writeback
* set; the first page is still locked at this point, but all the rest
* have been unlocked.