mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-30 08:04:13 +08:00
btrfs: factor out btree page submission code to a helper
In btree_write_cache_pages() we have a btree page submission routine buried deeply in a nested loop. This patch will extract that part of code into a helper function, submit_eb_page(), to do the same work. Since submit_eb_page() now can return >0 for successful extent buffer submission, remove the "ASSERT(ret <= 0);" line. Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
f44cf41075
commit
f91e0d0c4c
@ -3987,10 +3987,81 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Submit all page(s) of one extent buffer.
|
||||
*
|
||||
* @page: the page of one extent buffer
|
||||
* @eb_context: to determine if we need to submit this page, if current page
|
||||
* belongs to this eb, we don't need to submit
|
||||
*
|
||||
* The caller should pass each page in their bytenr order, and here we use
|
||||
* @eb_context to determine if we have submitted pages of one extent buffer.
|
||||
*
|
||||
* If we have, we just skip until we hit a new page that doesn't belong to
|
||||
* current @eb_context.
|
||||
*
|
||||
* If not, we submit all the page(s) of the extent buffer.
|
||||
*
|
||||
* Return >0 if we have submitted the extent buffer successfully.
|
||||
* Return 0 if we don't need to submit the page, as it's already submitted by
|
||||
* previous call.
|
||||
* Return <0 for fatal error.
|
||||
*/
|
||||
static int submit_eb_page(struct page *page, struct writeback_control *wbc,
|
||||
struct extent_page_data *epd,
|
||||
struct extent_buffer **eb_context)
|
||||
{
|
||||
struct address_space *mapping = page->mapping;
|
||||
struct extent_buffer *eb;
|
||||
int ret;
|
||||
|
||||
if (!PagePrivate(page))
|
||||
return 0;
|
||||
|
||||
spin_lock(&mapping->private_lock);
|
||||
if (!PagePrivate(page)) {
|
||||
spin_unlock(&mapping->private_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
eb = (struct extent_buffer *)page->private;
|
||||
|
||||
/*
|
||||
* Shouldn't happen and normally this would be a BUG_ON but no point
|
||||
* crashing the machine for something we can survive anyway.
|
||||
*/
|
||||
if (WARN_ON(!eb)) {
|
||||
spin_unlock(&mapping->private_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (eb == *eb_context) {
|
||||
spin_unlock(&mapping->private_lock);
|
||||
return 0;
|
||||
}
|
||||
ret = atomic_inc_not_zero(&eb->refs);
|
||||
spin_unlock(&mapping->private_lock);
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
*eb_context = eb;
|
||||
|
||||
ret = lock_extent_buffer_for_io(eb, epd);
|
||||
if (ret <= 0) {
|
||||
free_extent_buffer(eb);
|
||||
return ret;
|
||||
}
|
||||
ret = write_one_eb(eb, wbc, epd);
|
||||
free_extent_buffer(eb);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int btree_write_cache_pages(struct address_space *mapping,
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
struct extent_buffer *eb, *prev_eb = NULL;
|
||||
struct extent_buffer *eb_context = NULL;
|
||||
struct extent_page_data epd = {
|
||||
.bio = NULL,
|
||||
.extent_locked = 0,
|
||||
@ -4036,56 +4107,14 @@ retry:
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
struct page *page = pvec.pages[i];
|
||||
|
||||
if (!PagePrivate(page))
|
||||
ret = submit_eb_page(page, wbc, &epd, &eb_context);
|
||||
if (ret == 0)
|
||||
continue;
|
||||
|
||||
spin_lock(&mapping->private_lock);
|
||||
if (!PagePrivate(page)) {
|
||||
spin_unlock(&mapping->private_lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
eb = (struct extent_buffer *)page->private;
|
||||
|
||||
/*
|
||||
* Shouldn't happen and normally this would be a BUG_ON
|
||||
* but no sense in crashing the users box for something
|
||||
* we can survive anyway.
|
||||
*/
|
||||
if (WARN_ON(!eb)) {
|
||||
spin_unlock(&mapping->private_lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (eb == prev_eb) {
|
||||
spin_unlock(&mapping->private_lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = atomic_inc_not_zero(&eb->refs);
|
||||
spin_unlock(&mapping->private_lock);
|
||||
if (!ret)
|
||||
continue;
|
||||
|
||||
prev_eb = eb;
|
||||
ret = lock_extent_buffer_for_io(eb, &epd);
|
||||
if (!ret) {
|
||||
free_extent_buffer(eb);
|
||||
continue;
|
||||
} else if (ret < 0) {
|
||||
if (ret < 0) {
|
||||
done = 1;
|
||||
free_extent_buffer(eb);
|
||||
break;
|
||||
}
|
||||
|
||||
ret = write_one_eb(eb, wbc, &epd);
|
||||
if (ret) {
|
||||
done = 1;
|
||||
free_extent_buffer(eb);
|
||||
break;
|
||||
}
|
||||
free_extent_buffer(eb);
|
||||
|
||||
/*
|
||||
* the filesystem may choose to bump up nr_to_write.
|
||||
* We have to make sure to honor the new nr_to_write
|
||||
@ -4105,7 +4134,6 @@ retry:
|
||||
index = 0;
|
||||
goto retry;
|
||||
}
|
||||
ASSERT(ret <= 0);
|
||||
if (ret < 0) {
|
||||
end_write_bio(&epd, ret);
|
||||
return ret;
|
||||
|
Loading…
Reference in New Issue
Block a user