mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
XArray/IDA updates for 6.6
- Fix a bug encountered by people using bittorrent where they'd get NULL pointer dereferences on page cache lookups when using XFS - Two documentation fixes -----BEGIN PGP SIGNATURE----- iQEzBAABCgAdFiEEejHryeLBw/spnjHrDpNsjXcpgj4FAmT7XLYACgkQDpNsjXcp gj6qrAf+LiAs3dUELOjrqaQQbbNGp4na+YwJCiezuvwZn8P+ieJpt6QCEDHEb1jH LCOjr0GFMhHnAWp9Q0Qay4IXoKk8DPkA/avSaZgsl5blmMyNqFMgHklU7mjRvhCG ayb/NeZYwrJhA9NyueXYuH3h7QDryxyIN3TZS1/7z13YrohMIQeu3q7X/ZBMh7NS uPd7vmDj8TnZ/agQzplQ4XDov9lrzkUXDJqpMvn/Gbr4K7y66UZa3SLxi1JPrnah ffDvBlK2OImNBoaADfiRImWc7QlXVkF/B08xUcJ6tXAeO6xJDykkie+gjsF2S040 YP2YIG+IWi47zqa25EuxFRtavwUh6w== =4GKy -----END PGP SIGNATURE----- Merge tag 'xarray-6.6' of git://git.infradead.org/users/willy/xarray Pull xarray fixes from Matthew Wilcox: - Fix a bug encountered by people using bittorrent where they'd get NULL pointer dereferences on page cache lookups when using XFS - Two documentation fixes * tag 'xarray-6.6' of git://git.infradead.org/users/willy/xarray: idr: fix param name in idr_alloc_cyclic() doc xarray: Document necessary flag in alloc functions XArray: Do not return sibling entries from xa_load()
This commit is contained in:
commit
3095dd99dd
@ -856,6 +856,9 @@ static inline int __must_check xa_insert_irq(struct xarray *xa,
|
||||
* stores the index into the @id pointer, then stores the entry at
|
||||
* that index. A concurrent lookup will not see an uninitialised @id.
|
||||
*
|
||||
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
|
||||
* in xa_init_flags().
|
||||
*
|
||||
* Context: Any context. Takes and releases the xa_lock. May sleep if
|
||||
* the @gfp flags permit.
|
||||
* Return: 0 on success, -ENOMEM if memory could not be allocated or
|
||||
@ -886,6 +889,9 @@ static inline __must_check int xa_alloc(struct xarray *xa, u32 *id,
|
||||
* stores the index into the @id pointer, then stores the entry at
|
||||
* that index. A concurrent lookup will not see an uninitialised @id.
|
||||
*
|
||||
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
|
||||
* in xa_init_flags().
|
||||
*
|
||||
* Context: Any context. Takes and releases the xa_lock while
|
||||
* disabling softirqs. May sleep if the @gfp flags permit.
|
||||
* Return: 0 on success, -ENOMEM if memory could not be allocated or
|
||||
@ -916,6 +922,9 @@ static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id,
|
||||
* stores the index into the @id pointer, then stores the entry at
|
||||
* that index. A concurrent lookup will not see an uninitialised @id.
|
||||
*
|
||||
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
|
||||
* in xa_init_flags().
|
||||
*
|
||||
* Context: Process context. Takes and releases the xa_lock while
|
||||
* disabling interrupts. May sleep if the @gfp flags permit.
|
||||
* Return: 0 on success, -ENOMEM if memory could not be allocated or
|
||||
@ -949,6 +958,9 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id,
|
||||
* The search for an empty entry will start at @next and will wrap
|
||||
* around if necessary.
|
||||
*
|
||||
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
|
||||
* in xa_init_flags().
|
||||
*
|
||||
* Context: Any context. Takes and releases the xa_lock. May sleep if
|
||||
* the @gfp flags permit.
|
||||
* Return: 0 if the allocation succeeded without wrapping. 1 if the
|
||||
@ -983,6 +995,9 @@ static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
|
||||
* The search for an empty entry will start at @next and will wrap
|
||||
* around if necessary.
|
||||
*
|
||||
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
|
||||
* in xa_init_flags().
|
||||
*
|
||||
* Context: Any context. Takes and releases the xa_lock while
|
||||
* disabling softirqs. May sleep if the @gfp flags permit.
|
||||
* Return: 0 if the allocation succeeded without wrapping. 1 if the
|
||||
@ -1017,6 +1032,9 @@ static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
|
||||
* The search for an empty entry will start at @next and will wrap
|
||||
* around if necessary.
|
||||
*
|
||||
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
|
||||
* in xa_init_flags().
|
||||
*
|
||||
* Context: Process context. Takes and releases the xa_lock while
|
||||
* disabling interrupts. May sleep if the @gfp flags permit.
|
||||
* Return: 0 if the allocation succeeded without wrapping. 1 if the
|
||||
|
@ -100,7 +100,7 @@ EXPORT_SYMBOL_GPL(idr_alloc);
|
||||
* @end: The maximum ID (exclusive).
|
||||
* @gfp: Memory allocation flags.
|
||||
*
|
||||
* Allocates an unused ID in the range specified by @nextid and @end. If
|
||||
* Allocates an unused ID in the range specified by @start and @end. If
|
||||
* @end is <= 0, it is treated as one larger than %INT_MAX. This allows
|
||||
* callers to use @start + N as @end as long as N is within integer range.
|
||||
* The search for an unused ID will start at the last ID allocated and will
|
||||
|
@ -206,7 +206,7 @@ static void *xas_descend(struct xa_state *xas, struct xa_node *node)
|
||||
void *entry = xa_entry(xas->xa, node, offset);
|
||||
|
||||
xas->xa_node = node;
|
||||
if (xa_is_sibling(entry)) {
|
||||
while (xa_is_sibling(entry)) {
|
||||
offset = xa_to_sibling(entry);
|
||||
entry = xa_entry(xas->xa, node, offset);
|
||||
if (node->shift && xa_is_node(entry))
|
||||
@ -1802,6 +1802,9 @@ EXPORT_SYMBOL(xa_get_order);
|
||||
* stores the index into the @id pointer, then stores the entry at
|
||||
* that index. A concurrent lookup will not see an uninitialised @id.
|
||||
*
|
||||
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
|
||||
* in xa_init_flags().
|
||||
*
|
||||
* Context: Any context. Expects xa_lock to be held on entry. May
|
||||
* release and reacquire xa_lock if @gfp flags permit.
|
||||
* Return: 0 on success, -ENOMEM if memory could not be allocated or
|
||||
@ -1850,6 +1853,9 @@ EXPORT_SYMBOL(__xa_alloc);
|
||||
* The search for an empty entry will start at @next and will wrap
|
||||
* around if necessary.
|
||||
*
|
||||
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
|
||||
* in xa_init_flags().
|
||||
*
|
||||
* Context: Any context. Expects xa_lock to be held on entry. May
|
||||
* release and reacquire xa_lock if @gfp flags permit.
|
||||
* Return: 0 if the allocation succeeded without wrapping. 1 if the
|
||||
|
@ -159,7 +159,7 @@ void multiorder_tagged_iteration(struct xarray *xa)
|
||||
item_kill_tree(xa);
|
||||
}
|
||||
|
||||
bool stop_iteration = false;
|
||||
bool stop_iteration;
|
||||
|
||||
static void *creator_func(void *ptr)
|
||||
{
|
||||
@ -201,6 +201,7 @@ static void multiorder_iteration_race(struct xarray *xa)
|
||||
pthread_t worker_thread[num_threads];
|
||||
int i;
|
||||
|
||||
stop_iteration = false;
|
||||
pthread_create(&worker_thread[0], NULL, &creator_func, xa);
|
||||
for (i = 1; i < num_threads; i++)
|
||||
pthread_create(&worker_thread[i], NULL, &iterator_func, xa);
|
||||
@ -211,6 +212,61 @@ static void multiorder_iteration_race(struct xarray *xa)
|
||||
item_kill_tree(xa);
|
||||
}
|
||||
|
||||
static void *load_creator(void *ptr)
|
||||
{
|
||||
/* 'order' is set up to ensure we have sibling entries */
|
||||
unsigned int order;
|
||||
struct radix_tree_root *tree = ptr;
|
||||
int i;
|
||||
|
||||
rcu_register_thread();
|
||||
item_insert_order(tree, 3 << RADIX_TREE_MAP_SHIFT, 0);
|
||||
item_insert_order(tree, 2 << RADIX_TREE_MAP_SHIFT, 0);
|
||||
for (i = 0; i < 10000; i++) {
|
||||
for (order = 1; order < RADIX_TREE_MAP_SHIFT; order++) {
|
||||
unsigned long index = (3 << RADIX_TREE_MAP_SHIFT) -
|
||||
(1 << order);
|
||||
item_insert_order(tree, index, order);
|
||||
item_delete_rcu(tree, index);
|
||||
}
|
||||
}
|
||||
rcu_unregister_thread();
|
||||
|
||||
stop_iteration = true;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *load_worker(void *ptr)
|
||||
{
|
||||
unsigned long index = (3 << RADIX_TREE_MAP_SHIFT) - 1;
|
||||
|
||||
rcu_register_thread();
|
||||
while (!stop_iteration) {
|
||||
struct item *item = xa_load(ptr, index);
|
||||
assert(!xa_is_internal(item));
|
||||
}
|
||||
rcu_unregister_thread();
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void load_race(struct xarray *xa)
|
||||
{
|
||||
const int num_threads = sysconf(_SC_NPROCESSORS_ONLN) * 4;
|
||||
pthread_t worker_thread[num_threads];
|
||||
int i;
|
||||
|
||||
stop_iteration = false;
|
||||
pthread_create(&worker_thread[0], NULL, &load_creator, xa);
|
||||
for (i = 1; i < num_threads; i++)
|
||||
pthread_create(&worker_thread[i], NULL, &load_worker, xa);
|
||||
|
||||
for (i = 0; i < num_threads; i++)
|
||||
pthread_join(worker_thread[i], NULL);
|
||||
|
||||
item_kill_tree(xa);
|
||||
}
|
||||
|
||||
static DEFINE_XARRAY(array);
|
||||
|
||||
void multiorder_checks(void)
|
||||
@ -218,12 +274,20 @@ void multiorder_checks(void)
|
||||
multiorder_iteration(&array);
|
||||
multiorder_tagged_iteration(&array);
|
||||
multiorder_iteration_race(&array);
|
||||
load_race(&array);
|
||||
|
||||
radix_tree_cpu_dead(0);
|
||||
}
|
||||
|
||||
int __weak main(void)
|
||||
int __weak main(int argc, char **argv)
|
||||
{
|
||||
int opt;
|
||||
|
||||
while ((opt = getopt(argc, argv, "ls:v")) != -1) {
|
||||
if (opt == 'v')
|
||||
test_verbose++;
|
||||
}
|
||||
|
||||
rcu_register_thread();
|
||||
radix_tree_init();
|
||||
multiorder_checks();
|
||||
|
Loading…
Reference in New Issue
Block a user