mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-20 03:04:01 +08:00
dax: Convert dax_layout_busy_page to XArray
Instead of using a pagevec, just use the XArray iterators. Add a conditional rescheduling point which probably should have been there in the original. Signed-off-by: Matthew Wilcox <willy@infradead.org>
This commit is contained in:
parent
cfc93c6c6c
commit
084a899008
64
fs/dax.c
64
fs/dax.c
@ -722,11 +722,10 @@ restart:
|
||||
*/
|
||||
struct page *dax_layout_busy_page(struct address_space *mapping)
|
||||
{
|
||||
pgoff_t indices[PAGEVEC_SIZE];
|
||||
XA_STATE(xas, &mapping->i_pages, 0);
|
||||
void *entry;
|
||||
unsigned int scanned = 0;
|
||||
struct page *page = NULL;
|
||||
struct pagevec pvec;
|
||||
pgoff_t index, end;
|
||||
unsigned i;
|
||||
|
||||
/*
|
||||
* In the 'limited' case get_user_pages() for dax is disabled.
|
||||
@ -737,13 +736,9 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
|
||||
if (!dax_mapping(mapping) || !mapping_mapped(mapping))
|
||||
return NULL;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
index = 0;
|
||||
end = -1;
|
||||
|
||||
/*
|
||||
* If we race get_user_pages_fast() here either we'll see the
|
||||
* elevated page count in the pagevec_lookup and wait, or
|
||||
* elevated page count in the iteration and wait, or
|
||||
* get_user_pages_fast() will see that the page it took a reference
|
||||
* against is no longer mapped in the page tables and bail to the
|
||||
* get_user_pages() slow path. The slow path is protected by
|
||||
@ -755,43 +750,26 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
|
||||
*/
|
||||
unmap_mapping_range(mapping, 0, 0, 1);
|
||||
|
||||
while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
|
||||
min(end - index, (pgoff_t)PAGEVEC_SIZE),
|
||||
indices)) {
|
||||
for (i = 0; i < pagevec_count(&pvec); i++) {
|
||||
struct page *pvec_ent = pvec.pages[i];
|
||||
void *entry;
|
||||
|
||||
index = indices[i];
|
||||
if (index >= end)
|
||||
break;
|
||||
|
||||
if (WARN_ON_ONCE(!xa_is_value(pvec_ent)))
|
||||
continue;
|
||||
|
||||
xa_lock_irq(&mapping->i_pages);
|
||||
entry = get_unlocked_mapping_entry(mapping, index, NULL);
|
||||
if (entry)
|
||||
page = dax_busy_page(entry);
|
||||
put_unlocked_mapping_entry(mapping, index, entry);
|
||||
xa_unlock_irq(&mapping->i_pages);
|
||||
if (page)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't expect normal struct page entries to exist in our
|
||||
* tree, but we keep these pagevec calls so that this code is
|
||||
* consistent with the common pattern for handling pagevecs
|
||||
* throughout the kernel.
|
||||
*/
|
||||
pagevec_remove_exceptionals(&pvec);
|
||||
pagevec_release(&pvec);
|
||||
index++;
|
||||
|
||||
xas_lock_irq(&xas);
|
||||
xas_for_each(&xas, entry, ULONG_MAX) {
|
||||
if (WARN_ON_ONCE(!xa_is_value(entry)))
|
||||
continue;
|
||||
if (unlikely(dax_is_locked(entry)))
|
||||
entry = get_unlocked_entry(&xas);
|
||||
if (entry)
|
||||
page = dax_busy_page(entry);
|
||||
put_unlocked_entry(&xas, entry);
|
||||
if (page)
|
||||
break;
|
||||
if (++scanned % XA_CHECK_SCHED)
|
||||
continue;
|
||||
|
||||
xas_pause(&xas);
|
||||
xas_unlock_irq(&xas);
|
||||
cond_resched();
|
||||
xas_lock_irq(&xas);
|
||||
}
|
||||
xas_unlock_irq(&xas);
|
||||
return page;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dax_layout_busy_page);
|
||||
|
Loading…
Reference in New Issue
Block a user