mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
netfs: Delete some xarray-wangling functions that aren't used
Delete some xarray-based buffer wangling functions that are intended for use with bounce buffering, but aren't used because bounce-buffering got deferred to a later patch series. Now, however, the intention is to use something other than an xarray to do this. Signed-off-by: David Howells <dhowells@redhat.com> cc: Jeff Layton <jlayton@kernel.org> cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/20240620173137.610345-9-dhowells@redhat.com Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
parent
6470e0bc6f
commit
84dfbc9cad
@ -63,15 +63,6 @@ static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
|
||||
/*
|
||||
* misc.c
|
||||
*/
|
||||
#define NETFS_FLAG_PUT_MARK BIT(0)
|
||||
#define NETFS_FLAG_PAGECACHE_MARK BIT(1)
|
||||
int netfs_xa_store_and_mark(struct xarray *xa, unsigned long index,
|
||||
struct folio *folio, unsigned int flags,
|
||||
gfp_t gfp_mask);
|
||||
int netfs_add_folios_to_buffer(struct xarray *buffer,
|
||||
struct address_space *mapping,
|
||||
pgoff_t index, pgoff_t to, gfp_t gfp_mask);
|
||||
void netfs_clear_buffer(struct xarray *buffer);
|
||||
|
||||
/*
|
||||
* objects.c
|
||||
|
@ -8,87 +8,6 @@
|
||||
#include <linux/swap.h>
|
||||
#include "internal.h"
|
||||
|
||||
/*
|
||||
* Attach a folio to the buffer and maybe set marks on it to say that we need
|
||||
* to put the folio later and twiddle the pagecache flags.
|
||||
*/
|
||||
int netfs_xa_store_and_mark(struct xarray *xa, unsigned long index,
|
||||
struct folio *folio, unsigned int flags,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
XA_STATE_ORDER(xas, xa, index, folio_order(folio));
|
||||
|
||||
retry:
|
||||
xas_lock(&xas);
|
||||
for (;;) {
|
||||
xas_store(&xas, folio);
|
||||
if (!xas_error(&xas))
|
||||
break;
|
||||
xas_unlock(&xas);
|
||||
if (!xas_nomem(&xas, gfp_mask))
|
||||
return xas_error(&xas);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (flags & NETFS_FLAG_PUT_MARK)
|
||||
xas_set_mark(&xas, NETFS_BUF_PUT_MARK);
|
||||
if (flags & NETFS_FLAG_PAGECACHE_MARK)
|
||||
xas_set_mark(&xas, NETFS_BUF_PAGECACHE_MARK);
|
||||
xas_unlock(&xas);
|
||||
return xas_error(&xas);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the specified range of folios in the buffer attached to the read
|
||||
* request. The folios are marked with NETFS_BUF_PUT_MARK so that we know that
|
||||
* these need freeing later.
|
||||
*/
|
||||
int netfs_add_folios_to_buffer(struct xarray *buffer,
|
||||
struct address_space *mapping,
|
||||
pgoff_t index, pgoff_t to, gfp_t gfp_mask)
|
||||
{
|
||||
struct folio *folio;
|
||||
int ret;
|
||||
|
||||
if (to + 1 == index) /* Page range is inclusive */
|
||||
return 0;
|
||||
|
||||
do {
|
||||
/* TODO: Figure out what order folio can be allocated here */
|
||||
folio = filemap_alloc_folio(readahead_gfp_mask(mapping), 0);
|
||||
if (!folio)
|
||||
return -ENOMEM;
|
||||
folio->index = index;
|
||||
ret = netfs_xa_store_and_mark(buffer, index, folio,
|
||||
NETFS_FLAG_PUT_MARK, gfp_mask);
|
||||
if (ret < 0) {
|
||||
folio_put(folio);
|
||||
return ret;
|
||||
}
|
||||
|
||||
index += folio_nr_pages(folio);
|
||||
} while (index <= to && index != 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear an xarray buffer, putting a ref on the folios that have
|
||||
* NETFS_BUF_PUT_MARK set.
|
||||
*/
|
||||
void netfs_clear_buffer(struct xarray *buffer)
|
||||
{
|
||||
struct folio *folio;
|
||||
XA_STATE(xas, buffer, 0);
|
||||
|
||||
rcu_read_lock();
|
||||
xas_for_each_marked(&xas, folio, ULONG_MAX, NETFS_BUF_PUT_MARK) {
|
||||
folio_put(folio);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
xa_destroy(buffer);
|
||||
}
|
||||
|
||||
/**
|
||||
* netfs_dirty_folio - Mark folio dirty and pin a cache object for writeback
|
||||
* @mapping: The mapping the folio belongs to.
|
||||
|
Loading…
Reference in New Issue
Block a user