mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 20:24:12 +08:00
XArray: Add cyclic allocation
This differs slightly from the IDR equivalent in five ways. 1. It can allocate up to UINT_MAX instead of being limited to INT_MAX, like xa_alloc(). Also like xa_alloc(), it will write to the 'id' pointer before placing the entry in the XArray. 2. The 'next' cursor is allocated separately from the XArray instead of being part of the IDR. This saves memory for all the users which do not use the cyclic allocation API and suits some users better. 3. It returns -EBUSY instead of -ENOSPC. 4. It will attempt to wrap back to the minimum value on memory allocation failure as well as on an -EBUSY error, assuming that a user would rather allocate a small ID than suffer an ID allocation failure. 5. It reports whether it has wrapped, which is important to some users. Signed-off-by: Matthew Wilcox <willy@infradead.org>
This commit is contained in:
parent
a3e4d3f97e
commit
2fa044e51a
@ -144,7 +144,9 @@ you only want to free the entry if it's ``NULL``).
|
||||
|
||||
By default, the lowest free entry is allocated starting from 0. If you
|
||||
want to allocate entries starting at 1, it is more efficient to use
|
||||
:c:func:`DEFINE_XARRAY_ALLOC1` or ``XA_FLAGS_ALLOC1``.
|
||||
:c:func:`DEFINE_XARRAY_ALLOC1` or ``XA_FLAGS_ALLOC1``. If you want to
|
||||
allocate IDs up to a maximum, then wrap back around to the lowest free
|
||||
ID, you can use :c:func:`xa_alloc_cyclic`.
|
||||
|
||||
You cannot use ``XA_MARK_0`` with an allocating XArray as this mark
|
||||
is used to track whether an entry is free or not. The other marks are
|
||||
|
@ -242,6 +242,7 @@ enum xa_lock_type {
|
||||
#define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH)
|
||||
#define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U)
|
||||
#define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U)
|
||||
#define XA_FLAGS_ALLOC_WRAPPED ((__force gfp_t)16U)
|
||||
#define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
|
||||
(__force unsigned)(mark)))
|
||||
|
||||
@ -499,6 +500,8 @@ void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
|
||||
int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t);
|
||||
int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry,
|
||||
struct xa_limit, gfp_t);
|
||||
int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry,
|
||||
struct xa_limit, u32 *next, gfp_t);
|
||||
int __xa_reserve(struct xarray *, unsigned long index, gfp_t);
|
||||
void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
|
||||
void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
|
||||
@ -858,6 +861,105 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id,
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
|
||||
* @xa: XArray.
|
||||
* @id: Pointer to ID.
|
||||
* @entry: New entry.
|
||||
* @limit: Range of allocated ID.
|
||||
* @next: Pointer to next ID to allocate.
|
||||
* @gfp: Memory allocation flags.
|
||||
*
|
||||
* Finds an empty entry in @xa between @limit.min and @limit.max,
|
||||
* stores the index into the @id pointer, then stores the entry at
|
||||
* that index. A concurrent lookup will not see an uninitialised @id.
|
||||
* The search for an empty entry will start at @next and will wrap
|
||||
* around if necessary.
|
||||
*
|
||||
* Context: Any context. Takes and releases the xa_lock. May sleep if
|
||||
* the @gfp flags permit.
|
||||
* Return: 0 if the allocation succeeded without wrapping. 1 if the
|
||||
* allocation succeeded after wrapping, -ENOMEM if memory could not be
|
||||
* allocated or -EBUSY if there are no free entries in @limit.
|
||||
*/
|
||||
static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
|
||||
struct xa_limit limit, u32 *next, gfp_t gfp)
|
||||
{
|
||||
int err;
|
||||
|
||||
xa_lock(xa);
|
||||
err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
|
||||
xa_unlock(xa);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* xa_alloc_cyclic_bh() - Find somewhere to store this entry in the XArray.
|
||||
* @xa: XArray.
|
||||
* @id: Pointer to ID.
|
||||
* @entry: New entry.
|
||||
* @limit: Range of allocated ID.
|
||||
* @next: Pointer to next ID to allocate.
|
||||
* @gfp: Memory allocation flags.
|
||||
*
|
||||
* Finds an empty entry in @xa between @limit.min and @limit.max,
|
||||
* stores the index into the @id pointer, then stores the entry at
|
||||
* that index. A concurrent lookup will not see an uninitialised @id.
|
||||
* The search for an empty entry will start at @next and will wrap
|
||||
* around if necessary.
|
||||
*
|
||||
* Context: Any context. Takes and releases the xa_lock while
|
||||
* disabling softirqs. May sleep if the @gfp flags permit.
|
||||
* Return: 0 if the allocation succeeded without wrapping. 1 if the
|
||||
* allocation succeeded after wrapping, -ENOMEM if memory could not be
|
||||
* allocated or -EBUSY if there are no free entries in @limit.
|
||||
*/
|
||||
static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
|
||||
struct xa_limit limit, u32 *next, gfp_t gfp)
|
||||
{
|
||||
int err;
|
||||
|
||||
xa_lock_bh(xa);
|
||||
err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
|
||||
xa_unlock_bh(xa);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* xa_alloc_cyclic_irq() - Find somewhere to store this entry in the XArray.
|
||||
* @xa: XArray.
|
||||
* @id: Pointer to ID.
|
||||
* @entry: New entry.
|
||||
* @limit: Range of allocated ID.
|
||||
* @next: Pointer to next ID to allocate.
|
||||
* @gfp: Memory allocation flags.
|
||||
*
|
||||
* Finds an empty entry in @xa between @limit.min and @limit.max,
|
||||
* stores the index into the @id pointer, then stores the entry at
|
||||
* that index. A concurrent lookup will not see an uninitialised @id.
|
||||
* The search for an empty entry will start at @next and will wrap
|
||||
* around if necessary.
|
||||
*
|
||||
* Context: Process context. Takes and releases the xa_lock while
|
||||
* disabling interrupts. May sleep if the @gfp flags permit.
|
||||
* Return: 0 if the allocation succeeded without wrapping. 1 if the
|
||||
* allocation succeeded after wrapping, -ENOMEM if memory could not be
|
||||
* allocated or -EBUSY if there are no free entries in @limit.
|
||||
*/
|
||||
static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry,
|
||||
struct xa_limit limit, u32 *next, gfp_t gfp)
|
||||
{
|
||||
int err;
|
||||
|
||||
xa_lock_irq(xa);
|
||||
err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
|
||||
xa_unlock_irq(xa);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* xa_reserve() - Reserve this index in the XArray.
|
||||
* @xa: XArray.
|
||||
|
@ -715,6 +715,57 @@ static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base)
|
||||
xa_destroy(xa);
|
||||
}
|
||||
|
||||
static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
|
||||
{
|
||||
struct xa_limit limit = XA_LIMIT(1, 0x3fff);
|
||||
u32 next = 0;
|
||||
unsigned int i, id;
|
||||
unsigned long index;
|
||||
void *entry;
|
||||
|
||||
XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit,
|
||||
&next, GFP_KERNEL) != 0);
|
||||
XA_BUG_ON(xa, id != 1);
|
||||
|
||||
next = 0x3ffd;
|
||||
XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit,
|
||||
&next, GFP_KERNEL) != 0);
|
||||
XA_BUG_ON(xa, id != 0x3ffd);
|
||||
xa_erase_index(xa, 0x3ffd);
|
||||
xa_erase_index(xa, 1);
|
||||
XA_BUG_ON(xa, !xa_empty(xa));
|
||||
|
||||
for (i = 0x3ffe; i < 0x4003; i++) {
|
||||
if (i < 0x4000)
|
||||
entry = xa_mk_index(i);
|
||||
else
|
||||
entry = xa_mk_index(i - 0x3fff);
|
||||
XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit,
|
||||
&next, GFP_KERNEL) != (id == 1));
|
||||
XA_BUG_ON(xa, xa_mk_index(id) != entry);
|
||||
}
|
||||
|
||||
/* Check wrap-around is handled correctly */
|
||||
if (base != 0)
|
||||
xa_erase_index(xa, base);
|
||||
xa_erase_index(xa, base + 1);
|
||||
next = UINT_MAX;
|
||||
XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX),
|
||||
xa_limit_32b, &next, GFP_KERNEL) != 0);
|
||||
XA_BUG_ON(xa, id != UINT_MAX);
|
||||
XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base),
|
||||
xa_limit_32b, &next, GFP_KERNEL) != 1);
|
||||
XA_BUG_ON(xa, id != base);
|
||||
XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1),
|
||||
xa_limit_32b, &next, GFP_KERNEL) != 0);
|
||||
XA_BUG_ON(xa, id != base + 1);
|
||||
|
||||
xa_for_each(xa, index, entry)
|
||||
xa_erase_index(xa, index);
|
||||
|
||||
XA_BUG_ON(xa, !xa_empty(xa));
|
||||
}
|
||||
|
||||
static DEFINE_XARRAY_ALLOC(xa0);
|
||||
static DEFINE_XARRAY_ALLOC1(xa1);
|
||||
|
||||
@ -724,6 +775,8 @@ static noinline void check_xa_alloc(void)
|
||||
check_xa_alloc_1(&xa1, 1);
|
||||
check_xa_alloc_2(&xa0, 0);
|
||||
check_xa_alloc_2(&xa1, 1);
|
||||
check_xa_alloc_3(&xa0, 0);
|
||||
check_xa_alloc_3(&xa1, 1);
|
||||
}
|
||||
|
||||
static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
|
||||
|
50
lib/xarray.c
50
lib/xarray.c
@ -1656,6 +1656,56 @@ int __xa_alloc(struct xarray *xa, u32 *id, void *entry,
|
||||
}
|
||||
EXPORT_SYMBOL(__xa_alloc);
|
||||
|
||||
/**
|
||||
* __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
|
||||
* @xa: XArray.
|
||||
* @id: Pointer to ID.
|
||||
* @entry: New entry.
|
||||
* @limit: Range of allocated ID.
|
||||
* @next: Pointer to next ID to allocate.
|
||||
* @gfp: Memory allocation flags.
|
||||
*
|
||||
* Finds an empty entry in @xa between @limit.min and @limit.max,
|
||||
* stores the index into the @id pointer, then stores the entry at
|
||||
* that index. A concurrent lookup will not see an uninitialised @id.
|
||||
* The search for an empty entry will start at @next and will wrap
|
||||
* around if necessary.
|
||||
*
|
||||
* Context: Any context. Expects xa_lock to be held on entry. May
|
||||
* release and reacquire xa_lock if @gfp flags permit.
|
||||
* Return: 0 if the allocation succeeded without wrapping. 1 if the
|
||||
* allocation succeeded after wrapping, -ENOMEM if memory could not be
|
||||
* allocated or -EBUSY if there are no free entries in @limit.
|
||||
*/
|
||||
int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
|
||||
struct xa_limit limit, u32 *next, gfp_t gfp)
|
||||
{
|
||||
u32 min = limit.min;
|
||||
int ret;
|
||||
|
||||
limit.min = max(min, *next);
|
||||
ret = __xa_alloc(xa, id, entry, limit, gfp);
|
||||
if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) {
|
||||
xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED;
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
if (ret < 0 && limit.min > min) {
|
||||
limit.min = min;
|
||||
ret = __xa_alloc(xa, id, entry, limit, gfp);
|
||||
if (ret == 0)
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
if (ret >= 0) {
|
||||
*next = *id + 1;
|
||||
if (*next == 0)
|
||||
xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__xa_alloc_cyclic);
|
||||
|
||||
/**
|
||||
* __xa_set_mark() - Set this mark on this entry while locked.
|
||||
* @xa: XArray.
|
||||
|
Loading…
Reference in New Issue
Block a user