mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
xarray: Add XArray iterators
The xa_for_each iterator allows the user to efficiently walk a range of the array, executing the loop body once for each entry in that range that matches the filter. This commit also includes xa_find() and xa_find_after() which are helper functions for xa_for_each() but may also be useful in their own right. In the xas family of functions, we have xas_for_each(), xas_find(), xas_next_entry(), xas_for_each_tagged(), xas_find_tagged(), xas_next_tagged() and xas_pause(). Signed-off-by: Matthew Wilcox <willy@infradead.org>
This commit is contained in:
parent
41aec91f55
commit
b803b42823
@ -280,6 +280,10 @@ void *xa_cmpxchg(struct xarray *, unsigned long index,
|
||||
bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
|
||||
void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
|
||||
void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
|
||||
void *xa_find(struct xarray *xa, unsigned long *index,
|
||||
unsigned long max, xa_mark_t) __attribute__((nonnull(2)));
|
||||
void *xa_find_after(struct xarray *xa, unsigned long *index,
|
||||
unsigned long max, xa_mark_t) __attribute__((nonnull(2)));
|
||||
|
||||
/**
|
||||
* xa_init() - Initialise an empty XArray.
|
||||
@ -364,6 +368,35 @@ static inline int xa_insert(struct xarray *xa, unsigned long index,
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
/**
|
||||
* xa_for_each() - Iterate over a portion of an XArray.
|
||||
* @xa: XArray.
|
||||
* @entry: Entry retrieved from array.
|
||||
* @index: Index of @entry.
|
||||
* @max: Maximum index to retrieve from array.
|
||||
* @filter: Selection criterion.
|
||||
*
|
||||
* Initialise @index to the lowest index you want to retrieve from the
|
||||
* array. During the iteration, @entry will have the value of the entry
|
||||
* stored in @xa at @index. The iteration will skip all entries in the
|
||||
* array which do not match @filter. You may modify @index during the
|
||||
* iteration if you want to skip or reprocess indices. It is safe to modify
|
||||
* the array during the iteration. At the end of the iteration, @entry will
|
||||
* be set to NULL and @index will have a value less than or equal to max.
|
||||
*
|
||||
* xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have
|
||||
* to handle your own locking with xas_for_each(), and if you have to unlock
|
||||
* after each iteration, it will also end up being O(n.log(n)). xa_for_each()
|
||||
* will spin if it hits a retry entry; if you intend to see retry entries,
|
||||
* you should use the xas_for_each() iterator instead. The xas_for_each()
|
||||
* iterator will expand into more inline code than xa_for_each().
|
||||
*
|
||||
* Context: Any context. Takes and releases the RCU lock.
|
||||
*/
|
||||
#define xa_for_each(xa, entry, index, max, filter) \
|
||||
for (entry = xa_find(xa, &index, max, filter); entry; \
|
||||
entry = xa_find_after(xa, &index, max, filter))
|
||||
|
||||
#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
|
||||
#define xa_lock(xa) spin_lock(&(xa)->xa_lock)
|
||||
#define xa_unlock(xa) spin_unlock(&(xa)->xa_lock)
|
||||
@ -835,13 +868,16 @@ static inline bool xas_retry(struct xa_state *xas, const void *entry)
|
||||
|
||||
void *xas_load(struct xa_state *);
|
||||
void *xas_store(struct xa_state *, void *entry);
|
||||
void *xas_find(struct xa_state *, unsigned long max);
|
||||
|
||||
bool xas_get_mark(const struct xa_state *, xa_mark_t);
|
||||
void xas_set_mark(const struct xa_state *, xa_mark_t);
|
||||
void xas_clear_mark(const struct xa_state *, xa_mark_t);
|
||||
void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t);
|
||||
void xas_init_marks(const struct xa_state *);
|
||||
|
||||
bool xas_nomem(struct xa_state *, gfp_t);
|
||||
void xas_pause(struct xa_state *);
|
||||
|
||||
/**
|
||||
* xas_reload() - Refetch an entry from the xarray.
|
||||
@ -914,4 +950,133 @@ static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update)
|
||||
xas->xa_update = update;
|
||||
}
|
||||
|
||||
/**
|
||||
* xas_next_entry() - Advance iterator to next present entry.
|
||||
* @xas: XArray operation state.
|
||||
* @max: Highest index to return.
|
||||
*
|
||||
* xas_next_entry() is an inline function to optimise xarray traversal for
|
||||
* speed. It is equivalent to calling xas_find(), and will call xas_find()
|
||||
* for all the hard cases.
|
||||
*
|
||||
* Return: The next present entry after the one currently referred to by @xas.
|
||||
*/
|
||||
static inline void *xas_next_entry(struct xa_state *xas, unsigned long max)
|
||||
{
|
||||
struct xa_node *node = xas->xa_node;
|
||||
void *entry;
|
||||
|
||||
if (unlikely(xas_not_node(node) || node->shift ||
|
||||
xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)))
|
||||
return xas_find(xas, max);
|
||||
|
||||
do {
|
||||
if (unlikely(xas->xa_index >= max))
|
||||
return xas_find(xas, max);
|
||||
if (unlikely(xas->xa_offset == XA_CHUNK_MASK))
|
||||
return xas_find(xas, max);
|
||||
entry = xa_entry(xas->xa, node, xas->xa_offset + 1);
|
||||
if (unlikely(xa_is_internal(entry)))
|
||||
return xas_find(xas, max);
|
||||
xas->xa_offset++;
|
||||
xas->xa_index++;
|
||||
} while (!entry);
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
/* Private */
|
||||
static inline unsigned int xas_find_chunk(struct xa_state *xas, bool advance,
|
||||
xa_mark_t mark)
|
||||
{
|
||||
unsigned long *addr = xas->xa_node->marks[(__force unsigned)mark];
|
||||
unsigned int offset = xas->xa_offset;
|
||||
|
||||
if (advance)
|
||||
offset++;
|
||||
if (XA_CHUNK_SIZE == BITS_PER_LONG) {
|
||||
if (offset < XA_CHUNK_SIZE) {
|
||||
unsigned long data = *addr & (~0UL << offset);
|
||||
if (data)
|
||||
return __ffs(data);
|
||||
}
|
||||
return XA_CHUNK_SIZE;
|
||||
}
|
||||
|
||||
return find_next_bit(addr, XA_CHUNK_SIZE, offset);
|
||||
}
|
||||
|
||||
/**
|
||||
* xas_next_marked() - Advance iterator to next marked entry.
|
||||
* @xas: XArray operation state.
|
||||
* @max: Highest index to return.
|
||||
* @mark: Mark to search for.
|
||||
*
|
||||
* xas_next_marked() is an inline function to optimise xarray traversal for
|
||||
* speed. It is equivalent to calling xas_find_marked(), and will call
|
||||
* xas_find_marked() for all the hard cases.
|
||||
*
|
||||
* Return: The next marked entry after the one currently referred to by @xas.
|
||||
*/
|
||||
static inline void *xas_next_marked(struct xa_state *xas, unsigned long max,
|
||||
xa_mark_t mark)
|
||||
{
|
||||
struct xa_node *node = xas->xa_node;
|
||||
unsigned int offset;
|
||||
|
||||
if (unlikely(xas_not_node(node) || node->shift))
|
||||
return xas_find_marked(xas, max, mark);
|
||||
offset = xas_find_chunk(xas, true, mark);
|
||||
xas->xa_offset = offset;
|
||||
xas->xa_index = (xas->xa_index & ~XA_CHUNK_MASK) + offset;
|
||||
if (xas->xa_index > max)
|
||||
return NULL;
|
||||
if (offset == XA_CHUNK_SIZE)
|
||||
return xas_find_marked(xas, max, mark);
|
||||
return xa_entry(xas->xa, node, offset);
|
||||
}
|
||||
|
||||
/*
|
||||
* If iterating while holding a lock, drop the lock and reschedule
|
||||
* every %XA_CHECK_SCHED loops.
|
||||
*/
|
||||
enum {
|
||||
XA_CHECK_SCHED = 4096,
|
||||
};
|
||||
|
||||
/**
|
||||
* xas_for_each() - Iterate over a range of an XArray.
|
||||
* @xas: XArray operation state.
|
||||
* @entry: Entry retrieved from the array.
|
||||
* @max: Maximum index to retrieve from array.
|
||||
*
|
||||
* The loop body will be executed for each entry present in the xarray
|
||||
* between the current xas position and @max. @entry will be set to
|
||||
* the entry retrieved from the xarray. It is safe to delete entries
|
||||
* from the array in the loop body. You should hold either the RCU lock
|
||||
* or the xa_lock while iterating. If you need to drop the lock, call
|
||||
* xas_pause() first.
|
||||
*/
|
||||
#define xas_for_each(xas, entry, max) \
|
||||
for (entry = xas_find(xas, max); entry; \
|
||||
entry = xas_next_entry(xas, max))
|
||||
|
||||
/**
|
||||
* xas_for_each_marked() - Iterate over a range of an XArray.
|
||||
* @xas: XArray operation state.
|
||||
* @entry: Entry retrieved from the array.
|
||||
* @max: Maximum index to retrieve from array.
|
||||
* @mark: Mark to search for.
|
||||
*
|
||||
* The loop body will be executed for each marked entry in the xarray
|
||||
* between the current xas position and @max. @entry will be set to
|
||||
* the entry retrieved from the xarray. It is safe to delete entries
|
||||
* from the array in the loop body. You should hold either the RCU lock
|
||||
* or the xa_lock while iterating. If you need to drop the lock, call
|
||||
* xas_pause() first.
|
||||
*/
|
||||
#define xas_for_each_marked(xas, entry, max, mark) \
|
||||
for (entry = xas_find_marked(xas, max, mark); entry; \
|
||||
entry = xas_next_marked(xas, max, mark))
|
||||
|
||||
#endif /* _LINUX_XARRAY_H */
|
||||
|
@ -75,6 +75,48 @@ static noinline void check_xa_err(struct xarray *xa)
|
||||
// XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL);
|
||||
}
|
||||
|
||||
static noinline void check_xas_retry(struct xarray *xa)
|
||||
{
|
||||
XA_STATE(xas, xa, 0);
|
||||
void *entry;
|
||||
|
||||
xa_store_index(xa, 0, GFP_KERNEL);
|
||||
xa_store_index(xa, 1, GFP_KERNEL);
|
||||
|
||||
rcu_read_lock();
|
||||
XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0));
|
||||
xa_erase_index(xa, 1);
|
||||
XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas)));
|
||||
XA_BUG_ON(xa, xas_retry(&xas, NULL));
|
||||
XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0)));
|
||||
xas_reset(&xas);
|
||||
XA_BUG_ON(xa, xas.xa_node != XAS_RESTART);
|
||||
XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
|
||||
XA_BUG_ON(xa, xas.xa_node != NULL);
|
||||
|
||||
XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
|
||||
XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas)));
|
||||
xas.xa_node = XAS_RESTART;
|
||||
XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
|
||||
rcu_read_unlock();
|
||||
|
||||
/* Make sure we can iterate through retry entries */
|
||||
xas_lock(&xas);
|
||||
xas_set(&xas, 0);
|
||||
xas_store(&xas, XA_RETRY_ENTRY);
|
||||
xas_set(&xas, 1);
|
||||
xas_store(&xas, XA_RETRY_ENTRY);
|
||||
|
||||
xas_set(&xas, 0);
|
||||
xas_for_each(&xas, entry, ULONG_MAX) {
|
||||
xas_store(&xas, xa_mk_value(xas.xa_index));
|
||||
}
|
||||
xas_unlock(&xas);
|
||||
|
||||
xa_erase_index(xa, 0);
|
||||
xa_erase_index(xa, 1);
|
||||
}
|
||||
|
||||
static noinline void check_xa_load(struct xarray *xa)
|
||||
{
|
||||
unsigned long i, j;
|
||||
@ -217,6 +259,44 @@ static noinline void check_cmpxchg(struct xarray *xa)
|
||||
XA_BUG_ON(xa, !xa_empty(xa));
|
||||
}
|
||||
|
||||
static noinline void check_xas_erase(struct xarray *xa)
|
||||
{
|
||||
XA_STATE(xas, xa, 0);
|
||||
void *entry;
|
||||
unsigned long i, j;
|
||||
|
||||
for (i = 0; i < 200; i++) {
|
||||
for (j = i; j < 2 * i + 17; j++) {
|
||||
xas_set(&xas, j);
|
||||
do {
|
||||
xas_lock(&xas);
|
||||
xas_store(&xas, xa_mk_value(j));
|
||||
xas_unlock(&xas);
|
||||
} while (xas_nomem(&xas, GFP_KERNEL));
|
||||
}
|
||||
|
||||
xas_set(&xas, ULONG_MAX);
|
||||
do {
|
||||
xas_lock(&xas);
|
||||
xas_store(&xas, xa_mk_value(0));
|
||||
xas_unlock(&xas);
|
||||
} while (xas_nomem(&xas, GFP_KERNEL));
|
||||
|
||||
xas_lock(&xas);
|
||||
xas_store(&xas, NULL);
|
||||
|
||||
xas_set(&xas, 0);
|
||||
j = i;
|
||||
xas_for_each(&xas, entry, ULONG_MAX) {
|
||||
XA_BUG_ON(xa, entry != xa_mk_value(j));
|
||||
xas_store(&xas, NULL);
|
||||
j++;
|
||||
}
|
||||
xas_unlock(&xas);
|
||||
XA_BUG_ON(xa, !xa_empty(xa));
|
||||
}
|
||||
}
|
||||
|
||||
static noinline void check_multi_store(struct xarray *xa)
|
||||
{
|
||||
#ifdef CONFIG_XARRAY_MULTI
|
||||
@ -285,16 +365,119 @@ static noinline void check_multi_store(struct xarray *xa)
|
||||
#endif
|
||||
}
|
||||
|
||||
static noinline void check_multi_find(struct xarray *xa)
|
||||
{
|
||||
#ifdef CONFIG_XARRAY_MULTI
|
||||
unsigned long index;
|
||||
|
||||
xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL);
|
||||
XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL);
|
||||
|
||||
index = 0;
|
||||
XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
|
||||
xa_mk_value(12));
|
||||
XA_BUG_ON(xa, index != 12);
|
||||
index = 13;
|
||||
XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
|
||||
xa_mk_value(12));
|
||||
XA_BUG_ON(xa, (index < 12) || (index >= 16));
|
||||
XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
|
||||
xa_mk_value(16));
|
||||
XA_BUG_ON(xa, index != 16);
|
||||
|
||||
xa_erase_index(xa, 12);
|
||||
xa_erase_index(xa, 16);
|
||||
XA_BUG_ON(xa, !xa_empty(xa));
|
||||
#endif
|
||||
}
|
||||
|
||||
static noinline void check_multi_find_2(struct xarray *xa)
|
||||
{
|
||||
unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1;
|
||||
unsigned int i, j;
|
||||
void *entry;
|
||||
|
||||
for (i = 0; i < max_order; i++) {
|
||||
unsigned long index = 1UL << i;
|
||||
for (j = 0; j < index; j++) {
|
||||
XA_STATE(xas, xa, j + index);
|
||||
xa_store_index(xa, index - 1, GFP_KERNEL);
|
||||
xa_store_order(xa, index, i, xa_mk_value(index),
|
||||
GFP_KERNEL);
|
||||
rcu_read_lock();
|
||||
xas_for_each(&xas, entry, ULONG_MAX) {
|
||||
xa_erase_index(xa, index);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
xa_erase_index(xa, index - 1);
|
||||
XA_BUG_ON(xa, !xa_empty(xa));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static noinline void check_find(struct xarray *xa)
|
||||
{
|
||||
unsigned long i, j, k;
|
||||
|
||||
XA_BUG_ON(xa, !xa_empty(xa));
|
||||
|
||||
/*
|
||||
* Check xa_find with all pairs between 0 and 99 inclusive,
|
||||
* starting at every index between 0 and 99
|
||||
*/
|
||||
for (i = 0; i < 100; i++) {
|
||||
XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
|
||||
xa_set_mark(xa, i, XA_MARK_0);
|
||||
for (j = 0; j < i; j++) {
|
||||
XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) !=
|
||||
NULL);
|
||||
xa_set_mark(xa, j, XA_MARK_0);
|
||||
for (k = 0; k < 100; k++) {
|
||||
unsigned long index = k;
|
||||
void *entry = xa_find(xa, &index, ULONG_MAX,
|
||||
XA_PRESENT);
|
||||
if (k <= j)
|
||||
XA_BUG_ON(xa, index != j);
|
||||
else if (k <= i)
|
||||
XA_BUG_ON(xa, index != i);
|
||||
else
|
||||
XA_BUG_ON(xa, entry != NULL);
|
||||
|
||||
index = k;
|
||||
entry = xa_find(xa, &index, ULONG_MAX,
|
||||
XA_MARK_0);
|
||||
if (k <= j)
|
||||
XA_BUG_ON(xa, index != j);
|
||||
else if (k <= i)
|
||||
XA_BUG_ON(xa, index != i);
|
||||
else
|
||||
XA_BUG_ON(xa, entry != NULL);
|
||||
}
|
||||
xa_erase_index(xa, j);
|
||||
XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0));
|
||||
XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
|
||||
}
|
||||
xa_erase_index(xa, i);
|
||||
XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0));
|
||||
}
|
||||
XA_BUG_ON(xa, !xa_empty(xa));
|
||||
check_multi_find(xa);
|
||||
check_multi_find_2(xa);
|
||||
}
|
||||
|
||||
static DEFINE_XARRAY(array);
|
||||
|
||||
static int xarray_checks(void)
|
||||
{
|
||||
check_xa_err(&array);
|
||||
check_xas_retry(&array);
|
||||
check_xa_load(&array);
|
||||
check_xa_mark(&array);
|
||||
check_xa_shrink(&array);
|
||||
check_xas_erase(&array);
|
||||
check_cmpxchg(&array);
|
||||
check_multi_store(&array);
|
||||
check_find(&array);
|
||||
|
||||
printk("XArray: %u of %u tests passed\n", tests_passed, tests_run);
|
||||
return (tests_run == tests_passed) ? 0 : -EINVAL;
|
||||
|
292
lib/xarray.c
292
lib/xarray.c
@ -128,6 +128,11 @@ static unsigned int get_offset(unsigned long index, struct xa_node *node)
|
||||
return (index >> node->shift) & XA_CHUNK_MASK;
|
||||
}
|
||||
|
||||
static void xas_set_offset(struct xa_state *xas)
|
||||
{
|
||||
xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
|
||||
}
|
||||
|
||||
/* move the index either forwards (find) or backwards (sibling slot) */
|
||||
static void xas_move_index(struct xa_state *xas, unsigned long offset)
|
||||
{
|
||||
@ -136,6 +141,12 @@ static void xas_move_index(struct xa_state *xas, unsigned long offset)
|
||||
xas->xa_index += offset << shift;
|
||||
}
|
||||
|
||||
static void xas_advance(struct xa_state *xas)
|
||||
{
|
||||
xas->xa_offset++;
|
||||
xas_move_index(xas, xas->xa_offset);
|
||||
}
|
||||
|
||||
static void *set_bounds(struct xa_state *xas)
|
||||
{
|
||||
xas->xa_node = XAS_BOUNDS;
|
||||
@ -829,6 +840,202 @@ void xas_init_marks(const struct xa_state *xas)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xas_init_marks);
|
||||
|
||||
/**
|
||||
* xas_pause() - Pause a walk to drop a lock.
|
||||
* @xas: XArray operation state.
|
||||
*
|
||||
* Some users need to pause a walk and drop the lock they're holding in
|
||||
* order to yield to a higher priority thread or carry out an operation
|
||||
* on an entry. Those users should call this function before they drop
|
||||
* the lock. It resets the @xas to be suitable for the next iteration
|
||||
* of the loop after the user has reacquired the lock. If most entries
|
||||
* found during a walk require you to call xas_pause(), the xa_for_each()
|
||||
* iterator may be more appropriate.
|
||||
*
|
||||
* Note that xas_pause() only works for forward iteration. If a user needs
|
||||
* to pause a reverse iteration, we will need a xas_pause_rev().
|
||||
*/
|
||||
void xas_pause(struct xa_state *xas)
|
||||
{
|
||||
struct xa_node *node = xas->xa_node;
|
||||
|
||||
if (xas_invalid(xas))
|
||||
return;
|
||||
|
||||
if (node) {
|
||||
unsigned int offset = xas->xa_offset;
|
||||
while (++offset < XA_CHUNK_SIZE) {
|
||||
if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
|
||||
break;
|
||||
}
|
||||
xas->xa_index += (offset - xas->xa_offset) << node->shift;
|
||||
} else {
|
||||
xas->xa_index++;
|
||||
}
|
||||
xas->xa_node = XAS_RESTART;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xas_pause);
|
||||
|
||||
/**
|
||||
* xas_find() - Find the next present entry in the XArray.
|
||||
* @xas: XArray operation state.
|
||||
* @max: Highest index to return.
|
||||
*
|
||||
* If the @xas has not yet been walked to an entry, return the entry
|
||||
* which has an index >= xas.xa_index. If it has been walked, the entry
|
||||
* currently being pointed at has been processed, and so we move to the
|
||||
* next entry.
|
||||
*
|
||||
* If no entry is found and the array is smaller than @max, the iterator
|
||||
* is set to the smallest index not yet in the array. This allows @xas
|
||||
* to be immediately passed to xas_store().
|
||||
*
|
||||
* Return: The entry, if found, otherwise %NULL.
|
||||
*/
|
||||
void *xas_find(struct xa_state *xas, unsigned long max)
|
||||
{
|
||||
void *entry;
|
||||
|
||||
if (xas_error(xas))
|
||||
return NULL;
|
||||
|
||||
if (!xas->xa_node) {
|
||||
xas->xa_index = 1;
|
||||
return set_bounds(xas);
|
||||
} else if (xas_top(xas->xa_node)) {
|
||||
entry = xas_load(xas);
|
||||
if (entry || xas_not_node(xas->xa_node))
|
||||
return entry;
|
||||
} else if (!xas->xa_node->shift &&
|
||||
xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
|
||||
xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
|
||||
}
|
||||
|
||||
xas_advance(xas);
|
||||
|
||||
while (xas->xa_node && (xas->xa_index <= max)) {
|
||||
if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
|
||||
xas->xa_offset = xas->xa_node->offset + 1;
|
||||
xas->xa_node = xa_parent(xas->xa, xas->xa_node);
|
||||
continue;
|
||||
}
|
||||
|
||||
entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
|
||||
if (xa_is_node(entry)) {
|
||||
xas->xa_node = xa_to_node(entry);
|
||||
xas->xa_offset = 0;
|
||||
continue;
|
||||
}
|
||||
if (entry && !xa_is_sibling(entry))
|
||||
return entry;
|
||||
|
||||
xas_advance(xas);
|
||||
}
|
||||
|
||||
if (!xas->xa_node)
|
||||
xas->xa_node = XAS_BOUNDS;
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xas_find);
|
||||
|
||||
/**
|
||||
* xas_find_marked() - Find the next marked entry in the XArray.
|
||||
* @xas: XArray operation state.
|
||||
* @max: Highest index to return.
|
||||
* @mark: Mark number to search for.
|
||||
*
|
||||
* If the @xas has not yet been walked to an entry, return the marked entry
|
||||
* which has an index >= xas.xa_index. If it has been walked, the entry
|
||||
* currently being pointed at has been processed, and so we return the
|
||||
* first marked entry with an index > xas.xa_index.
|
||||
*
|
||||
* If no marked entry is found and the array is smaller than @max, @xas is
|
||||
* set to the bounds state and xas->xa_index is set to the smallest index
|
||||
* not yet in the array. This allows @xas to be immediately passed to
|
||||
* xas_store().
|
||||
*
|
||||
* If no entry is found before @max is reached, @xas is set to the restart
|
||||
* state.
|
||||
*
|
||||
* Return: The entry, if found, otherwise %NULL.
|
||||
*/
|
||||
void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
|
||||
{
|
||||
bool advance = true;
|
||||
unsigned int offset;
|
||||
void *entry;
|
||||
|
||||
if (xas_error(xas))
|
||||
return NULL;
|
||||
|
||||
if (!xas->xa_node) {
|
||||
xas->xa_index = 1;
|
||||
goto out;
|
||||
} else if (xas_top(xas->xa_node)) {
|
||||
advance = false;
|
||||
entry = xa_head(xas->xa);
|
||||
xas->xa_node = NULL;
|
||||
if (xas->xa_index > max_index(entry))
|
||||
goto bounds;
|
||||
if (!xa_is_node(entry)) {
|
||||
if (xa_marked(xas->xa, mark))
|
||||
return entry;
|
||||
xas->xa_index = 1;
|
||||
goto out;
|
||||
}
|
||||
xas->xa_node = xa_to_node(entry);
|
||||
xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
|
||||
}
|
||||
|
||||
while (xas->xa_index <= max) {
|
||||
if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
|
||||
xas->xa_offset = xas->xa_node->offset + 1;
|
||||
xas->xa_node = xa_parent(xas->xa, xas->xa_node);
|
||||
if (!xas->xa_node)
|
||||
break;
|
||||
advance = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!advance) {
|
||||
entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
|
||||
if (xa_is_sibling(entry)) {
|
||||
xas->xa_offset = xa_to_sibling(entry);
|
||||
xas_move_index(xas, xas->xa_offset);
|
||||
}
|
||||
}
|
||||
|
||||
offset = xas_find_chunk(xas, advance, mark);
|
||||
if (offset > xas->xa_offset) {
|
||||
advance = false;
|
||||
xas_move_index(xas, offset);
|
||||
/* Mind the wrap */
|
||||
if ((xas->xa_index - 1) >= max)
|
||||
goto max;
|
||||
xas->xa_offset = offset;
|
||||
if (offset == XA_CHUNK_SIZE)
|
||||
continue;
|
||||
}
|
||||
|
||||
entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
|
||||
if (!xa_is_node(entry))
|
||||
return entry;
|
||||
xas->xa_node = xa_to_node(entry);
|
||||
xas_set_offset(xas);
|
||||
}
|
||||
|
||||
out:
|
||||
if (!max)
|
||||
goto max;
|
||||
bounds:
|
||||
xas->xa_node = XAS_BOUNDS;
|
||||
return NULL;
|
||||
max:
|
||||
xas->xa_node = XAS_RESTART;
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xas_find_marked);
|
||||
|
||||
/**
|
||||
* xa_init_flags() - Initialise an empty XArray with flags.
|
||||
* @xa: XArray.
|
||||
@ -1152,6 +1359,91 @@ void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
|
||||
}
|
||||
EXPORT_SYMBOL(xa_clear_mark);
|
||||
|
||||
/**
|
||||
* xa_find() - Search the XArray for an entry.
|
||||
* @xa: XArray.
|
||||
* @indexp: Pointer to an index.
|
||||
* @max: Maximum index to search to.
|
||||
* @filter: Selection criterion.
|
||||
*
|
||||
* Finds the entry in @xa which matches the @filter, and has the lowest
|
||||
* index that is at least @indexp and no more than @max.
|
||||
* If an entry is found, @indexp is updated to be the index of the entry.
|
||||
* This function is protected by the RCU read lock, so it may not find
|
||||
* entries which are being simultaneously added. It will not return an
|
||||
* %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
|
||||
*
|
||||
* Context: Any context. Takes and releases the RCU lock.
|
||||
* Return: The entry, if found, otherwise %NULL.
|
||||
*/
|
||||
void *xa_find(struct xarray *xa, unsigned long *indexp,
|
||||
unsigned long max, xa_mark_t filter)
|
||||
{
|
||||
XA_STATE(xas, xa, *indexp);
|
||||
void *entry;
|
||||
|
||||
rcu_read_lock();
|
||||
do {
|
||||
if ((__force unsigned int)filter < XA_MAX_MARKS)
|
||||
entry = xas_find_marked(&xas, max, filter);
|
||||
else
|
||||
entry = xas_find(&xas, max);
|
||||
} while (xas_retry(&xas, entry));
|
||||
rcu_read_unlock();
|
||||
|
||||
if (entry)
|
||||
*indexp = xas.xa_index;
|
||||
return entry;
|
||||
}
|
||||
EXPORT_SYMBOL(xa_find);
|
||||
|
||||
/**
|
||||
* xa_find_after() - Search the XArray for a present entry.
|
||||
* @xa: XArray.
|
||||
* @indexp: Pointer to an index.
|
||||
* @max: Maximum index to search to.
|
||||
* @filter: Selection criterion.
|
||||
*
|
||||
* Finds the entry in @xa which matches the @filter and has the lowest
|
||||
* index that is above @indexp and no more than @max.
|
||||
* If an entry is found, @indexp is updated to be the index of the entry.
|
||||
* This function is protected by the RCU read lock, so it may miss entries
|
||||
* which are being simultaneously added. It will not return an
|
||||
* %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
|
||||
*
|
||||
* Context: Any context. Takes and releases the RCU lock.
|
||||
* Return: The pointer, if found, otherwise %NULL.
|
||||
*/
|
||||
void *xa_find_after(struct xarray *xa, unsigned long *indexp,
|
||||
unsigned long max, xa_mark_t filter)
|
||||
{
|
||||
XA_STATE(xas, xa, *indexp + 1);
|
||||
void *entry;
|
||||
|
||||
rcu_read_lock();
|
||||
for (;;) {
|
||||
if ((__force unsigned int)filter < XA_MAX_MARKS)
|
||||
entry = xas_find_marked(&xas, max, filter);
|
||||
else
|
||||
entry = xas_find(&xas, max);
|
||||
if (xas.xa_shift) {
|
||||
if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
|
||||
continue;
|
||||
} else {
|
||||
if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK))
|
||||
continue;
|
||||
}
|
||||
if (!xas_retry(&xas, entry))
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (entry)
|
||||
*indexp = xas.xa_index;
|
||||
return entry;
|
||||
}
|
||||
EXPORT_SYMBOL(xa_find_after);
|
||||
|
||||
#ifdef XA_DEBUG
|
||||
void xa_dump_node(const struct xa_node *node)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user