mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 14:44:10 +08:00
Merge branch 'idr-4.11' of git://git.infradead.org/users/willy/linux-dax
Pull idr fix (and new tests) from Matthew Wilcox: "One urgent patch in here; freeing the correct IDA bitmap. Everything else is changes to the test suite" * 'idr-4.11' of git://git.infradead.org/users/willy/linux-dax: radix tree test suite: Specify -m32 in LDFLAGS too ida: Free correct IDA bitmap radix tree test suite: Depend on Makefile and quieten grep radix tree test suite: Fix build with --as-needed radix tree test suite: Build 32 bit binaries radix tree test suite: Add performance test for radix_tree_join() radix tree test suite: Add performance test for radix_tree_split() radix tree test suite: Add performance benchmarks radix tree test suite: Add test for radix_tree_clear_tags() radix tree test suite: Add tests for ida_simple_get() and ida_simple_remove() radix tree test suite: Add test for idr_get_next()
This commit is contained in:
commit
9e91c144e6
@ -2129,8 +2129,8 @@ int ida_pre_get(struct ida *ida, gfp_t gfp)
|
||||
struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
|
||||
if (!bitmap)
|
||||
return 0;
|
||||
bitmap = this_cpu_cmpxchg(ida_bitmap, NULL, bitmap);
|
||||
kfree(bitmap);
|
||||
if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap))
|
||||
kfree(bitmap);
|
||||
}
|
||||
|
||||
return 1;
|
||||
|
@ -1,6 +1,7 @@
|
||||
|
||||
CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE -fsanitize=address
|
||||
LDFLAGS += -lpthread -lurcu
|
||||
LDFLAGS += -fsanitize=address
|
||||
LDLIBS+= -lpthread -lurcu
|
||||
TARGETS = main idr-test multiorder
|
||||
CORE_OFILES := radix-tree.o idr.o linux.o test.o find_bit.o
|
||||
OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
|
||||
@ -10,23 +11,25 @@ ifndef SHIFT
|
||||
SHIFT=3
|
||||
endif
|
||||
|
||||
ifeq ($(BUILD), 32)
|
||||
CFLAGS += -m32
|
||||
LDFLAGS += -m32
|
||||
endif
|
||||
|
||||
targets: mapshift $(TARGETS)
|
||||
|
||||
main: $(OFILES)
|
||||
$(CC) $(CFLAGS) $(LDFLAGS) $^ -o main
|
||||
|
||||
idr-test: idr-test.o $(CORE_OFILES)
|
||||
$(CC) $(CFLAGS) $(LDFLAGS) $^ -o idr-test
|
||||
|
||||
multiorder: multiorder.o $(CORE_OFILES)
|
||||
$(CC) $(CFLAGS) $(LDFLAGS) $^ -o multiorder
|
||||
|
||||
clean:
|
||||
$(RM) $(TARGETS) *.o radix-tree.c idr.c generated/map-shift.h
|
||||
|
||||
vpath %.c ../../lib
|
||||
|
||||
$(OFILES): *.h */*.h generated/map-shift.h \
|
||||
$(OFILES): Makefile *.h */*.h generated/map-shift.h \
|
||||
../../include/linux/*.h \
|
||||
../../include/asm/*.h \
|
||||
../../../include/linux/radix-tree.h \
|
||||
@ -41,7 +44,7 @@ idr.c: ../../../lib/idr.c
|
||||
.PHONY: mapshift
|
||||
|
||||
mapshift:
|
||||
@if ! grep -qw $(SHIFT) generated/map-shift.h; then \
|
||||
@if ! grep -qws $(SHIFT) generated/map-shift.h; then \
|
||||
echo "#define RADIX_TREE_MAP_SHIFT $(SHIFT)" > \
|
||||
generated/map-shift.h; \
|
||||
fi
|
||||
|
@ -17,6 +17,9 @@
|
||||
#include <time.h>
|
||||
#include "test.h"
|
||||
|
||||
#define for_each_index(i, base, order) \
|
||||
for (i = base; i < base + (1 << order); i++)
|
||||
|
||||
#define NSEC_PER_SEC 1000000000L
|
||||
|
||||
static long long benchmark_iter(struct radix_tree_root *root, bool tagged)
|
||||
@ -57,27 +60,176 @@ again:
|
||||
return nsec;
|
||||
}
|
||||
|
||||
static void benchmark_insert(struct radix_tree_root *root,
|
||||
unsigned long size, unsigned long step, int order)
|
||||
{
|
||||
struct timespec start, finish;
|
||||
unsigned long index;
|
||||
long long nsec;
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC, &start);
|
||||
|
||||
for (index = 0 ; index < size ; index += step)
|
||||
item_insert_order(root, index, order);
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC, &finish);
|
||||
|
||||
nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
|
||||
(finish.tv_nsec - start.tv_nsec);
|
||||
|
||||
printv(2, "Size: %8ld, step: %8ld, order: %d, insertion: %15lld ns\n",
|
||||
size, step, order, nsec);
|
||||
}
|
||||
|
||||
static void benchmark_tagging(struct radix_tree_root *root,
|
||||
unsigned long size, unsigned long step, int order)
|
||||
{
|
||||
struct timespec start, finish;
|
||||
unsigned long index;
|
||||
long long nsec;
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC, &start);
|
||||
|
||||
for (index = 0 ; index < size ; index += step)
|
||||
radix_tree_tag_set(root, index, 0);
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC, &finish);
|
||||
|
||||
nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
|
||||
(finish.tv_nsec - start.tv_nsec);
|
||||
|
||||
printv(2, "Size: %8ld, step: %8ld, order: %d, tagging: %17lld ns\n",
|
||||
size, step, order, nsec);
|
||||
}
|
||||
|
||||
static void benchmark_delete(struct radix_tree_root *root,
|
||||
unsigned long size, unsigned long step, int order)
|
||||
{
|
||||
struct timespec start, finish;
|
||||
unsigned long index, i;
|
||||
long long nsec;
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC, &start);
|
||||
|
||||
for (index = 0 ; index < size ; index += step)
|
||||
for_each_index(i, index, order)
|
||||
item_delete(root, i);
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC, &finish);
|
||||
|
||||
nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
|
||||
(finish.tv_nsec - start.tv_nsec);
|
||||
|
||||
printv(2, "Size: %8ld, step: %8ld, order: %d, deletion: %16lld ns\n",
|
||||
size, step, order, nsec);
|
||||
}
|
||||
|
||||
static void benchmark_size(unsigned long size, unsigned long step, int order)
|
||||
{
|
||||
RADIX_TREE(tree, GFP_KERNEL);
|
||||
long long normal, tagged;
|
||||
unsigned long index;
|
||||
|
||||
for (index = 0 ; index < size ; index += step) {
|
||||
item_insert_order(&tree, index, order);
|
||||
radix_tree_tag_set(&tree, index, 0);
|
||||
}
|
||||
benchmark_insert(&tree, size, step, order);
|
||||
benchmark_tagging(&tree, size, step, order);
|
||||
|
||||
tagged = benchmark_iter(&tree, true);
|
||||
normal = benchmark_iter(&tree, false);
|
||||
|
||||
printv(2, "Size %ld, step %6ld, order %d tagged %10lld ns, normal %10lld ns\n",
|
||||
size, step, order, tagged, normal);
|
||||
printv(2, "Size: %8ld, step: %8ld, order: %d, tagged iteration: %8lld ns\n",
|
||||
size, step, order, tagged);
|
||||
printv(2, "Size: %8ld, step: %8ld, order: %d, normal iteration: %8lld ns\n",
|
||||
size, step, order, normal);
|
||||
|
||||
benchmark_delete(&tree, size, step, order);
|
||||
|
||||
item_kill_tree(&tree);
|
||||
rcu_barrier();
|
||||
}
|
||||
|
||||
static long long __benchmark_split(unsigned long index,
|
||||
int old_order, int new_order)
|
||||
{
|
||||
struct timespec start, finish;
|
||||
long long nsec;
|
||||
RADIX_TREE(tree, GFP_ATOMIC);
|
||||
|
||||
item_insert_order(&tree, index, old_order);
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC, &start);
|
||||
radix_tree_split(&tree, index, new_order);
|
||||
clock_gettime(CLOCK_MONOTONIC, &finish);
|
||||
nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
|
||||
(finish.tv_nsec - start.tv_nsec);
|
||||
|
||||
item_kill_tree(&tree);
|
||||
|
||||
return nsec;
|
||||
|
||||
}
|
||||
|
||||
static void benchmark_split(unsigned long size, unsigned long step)
|
||||
{
|
||||
int i, j, idx;
|
||||
long long nsec = 0;
|
||||
|
||||
|
||||
for (idx = 0; idx < size; idx += step) {
|
||||
for (i = 3; i < 11; i++) {
|
||||
for (j = 0; j < i; j++) {
|
||||
nsec += __benchmark_split(idx, i, j);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
printv(2, "Size %8ld, step %8ld, split time %10lld ns\n",
|
||||
size, step, nsec);
|
||||
|
||||
}
|
||||
|
||||
static long long __benchmark_join(unsigned long index,
|
||||
unsigned order1, unsigned order2)
|
||||
{
|
||||
unsigned long loc;
|
||||
struct timespec start, finish;
|
||||
long long nsec;
|
||||
void *item, *item2 = item_create(index + 1, order1);
|
||||
RADIX_TREE(tree, GFP_KERNEL);
|
||||
|
||||
item_insert_order(&tree, index, order2);
|
||||
item = radix_tree_lookup(&tree, index);
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC, &start);
|
||||
radix_tree_join(&tree, index + 1, order1, item2);
|
||||
clock_gettime(CLOCK_MONOTONIC, &finish);
|
||||
nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
|
||||
(finish.tv_nsec - start.tv_nsec);
|
||||
|
||||
loc = find_item(&tree, item);
|
||||
if (loc == -1)
|
||||
free(item);
|
||||
|
||||
item_kill_tree(&tree);
|
||||
|
||||
return nsec;
|
||||
}
|
||||
|
||||
static void benchmark_join(unsigned long step)
|
||||
{
|
||||
int i, j, idx;
|
||||
long long nsec = 0;
|
||||
|
||||
for (idx = 0; idx < 1 << 10; idx += step) {
|
||||
for (i = 1; i < 15; i++) {
|
||||
for (j = 0; j < i; j++) {
|
||||
nsec += __benchmark_join(idx, i, j);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
printv(2, "Size %8d, step %8ld, join time %10lld ns\n",
|
||||
1 << 10, step, nsec);
|
||||
}
|
||||
|
||||
void benchmark(void)
|
||||
{
|
||||
unsigned long size[] = {1 << 10, 1 << 20, 0};
|
||||
@ -95,4 +247,11 @@ void benchmark(void)
|
||||
for (c = 0; size[c]; c++)
|
||||
for (s = 0; step[s]; s++)
|
||||
benchmark_size(size[c], step[s] << 9, 9);
|
||||
|
||||
for (c = 0; size[c]; c++)
|
||||
for (s = 0; step[s]; s++)
|
||||
benchmark_split(size[c], step[s]);
|
||||
|
||||
for (s = 0; step[s]; s++)
|
||||
benchmark_join(step[s]);
|
||||
}
|
||||
|
@ -153,6 +153,30 @@ void idr_nowait_test(void)
|
||||
idr_destroy(&idr);
|
||||
}
|
||||
|
||||
void idr_get_next_test(void)
|
||||
{
|
||||
unsigned long i;
|
||||
int nextid;
|
||||
DEFINE_IDR(idr);
|
||||
|
||||
int indices[] = {4, 7, 9, 15, 65, 128, 1000, 99999, 0};
|
||||
|
||||
for(i = 0; indices[i]; i++) {
|
||||
struct item *item = item_create(indices[i], 0);
|
||||
assert(idr_alloc(&idr, item, indices[i], indices[i+1],
|
||||
GFP_KERNEL) == indices[i]);
|
||||
}
|
||||
|
||||
for(i = 0, nextid = 0; indices[i]; i++) {
|
||||
idr_get_next(&idr, &nextid);
|
||||
assert(nextid == indices[i]);
|
||||
nextid++;
|
||||
}
|
||||
|
||||
idr_for_each(&idr, item_idr_free, &idr);
|
||||
idr_destroy(&idr);
|
||||
}
|
||||
|
||||
void idr_checks(void)
|
||||
{
|
||||
unsigned long i;
|
||||
@ -202,6 +226,7 @@ void idr_checks(void)
|
||||
idr_alloc_test();
|
||||
idr_null_test();
|
||||
idr_nowait_test();
|
||||
idr_get_next_test();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -338,7 +363,7 @@ void ida_check_random(void)
|
||||
{
|
||||
DEFINE_IDA(ida);
|
||||
DECLARE_BITMAP(bitmap, 2048);
|
||||
int id;
|
||||
int id, err;
|
||||
unsigned int i;
|
||||
time_t s = time(NULL);
|
||||
|
||||
@ -352,8 +377,11 @@ void ida_check_random(void)
|
||||
ida_remove(&ida, bit);
|
||||
} else {
|
||||
__set_bit(bit, bitmap);
|
||||
ida_pre_get(&ida, GFP_KERNEL);
|
||||
assert(!ida_get_new_above(&ida, bit, &id));
|
||||
do {
|
||||
ida_pre_get(&ida, GFP_KERNEL);
|
||||
err = ida_get_new_above(&ida, bit, &id);
|
||||
} while (err == -ENOMEM);
|
||||
assert(!err);
|
||||
assert(id == bit);
|
||||
}
|
||||
}
|
||||
@ -362,6 +390,24 @@ void ida_check_random(void)
|
||||
goto repeat;
|
||||
}
|
||||
|
||||
void ida_simple_get_remove_test(void)
|
||||
{
|
||||
DEFINE_IDA(ida);
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < 10000; i++) {
|
||||
assert(ida_simple_get(&ida, 0, 20000, GFP_KERNEL) == i);
|
||||
}
|
||||
assert(ida_simple_get(&ida, 5, 30, GFP_KERNEL) < 0);
|
||||
|
||||
for (i = 0; i < 10000; i++) {
|
||||
ida_simple_remove(&ida, i);
|
||||
}
|
||||
assert(ida_is_empty(&ida));
|
||||
|
||||
ida_destroy(&ida);
|
||||
}
|
||||
|
||||
void ida_checks(void)
|
||||
{
|
||||
DEFINE_IDA(ida);
|
||||
@ -428,15 +474,41 @@ void ida_checks(void)
|
||||
ida_check_max();
|
||||
ida_check_conv();
|
||||
ida_check_random();
|
||||
ida_simple_get_remove_test();
|
||||
|
||||
radix_tree_cpu_dead(1);
|
||||
}
|
||||
|
||||
static void *ida_random_fn(void *arg)
|
||||
{
|
||||
rcu_register_thread();
|
||||
ida_check_random();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void ida_thread_tests(void)
|
||||
{
|
||||
pthread_t threads[10];
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(threads); i++)
|
||||
if (pthread_create(&threads[i], NULL, ida_random_fn, NULL)) {
|
||||
perror("creating ida thread");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
while (i--)
|
||||
pthread_join(threads[i], NULL);
|
||||
}
|
||||
|
||||
int __weak main(void)
|
||||
{
|
||||
radix_tree_init();
|
||||
idr_checks();
|
||||
ida_checks();
|
||||
ida_thread_tests();
|
||||
radix_tree_cpu_dead(1);
|
||||
rcu_barrier();
|
||||
if (nr_allocated)
|
||||
printf("nr_allocated = %d\n", nr_allocated);
|
||||
|
@ -368,6 +368,7 @@ int main(int argc, char **argv)
|
||||
iteration_test(0, 10 + 90 * long_run);
|
||||
iteration_test(7, 10 + 90 * long_run);
|
||||
single_thread_tests(long_run);
|
||||
ida_thread_tests();
|
||||
|
||||
/* Free any remaining preallocated nodes */
|
||||
radix_tree_cpu_dead(0);
|
||||
|
@ -330,6 +330,34 @@ static void single_check(void)
|
||||
item_kill_tree(&tree);
|
||||
}
|
||||
|
||||
void radix_tree_clear_tags_test(void)
|
||||
{
|
||||
unsigned long index;
|
||||
struct radix_tree_node *node;
|
||||
struct radix_tree_iter iter;
|
||||
void **slot;
|
||||
|
||||
RADIX_TREE(tree, GFP_KERNEL);
|
||||
|
||||
item_insert(&tree, 0);
|
||||
item_tag_set(&tree, 0, 0);
|
||||
__radix_tree_lookup(&tree, 0, &node, &slot);
|
||||
radix_tree_clear_tags(&tree, node, slot);
|
||||
assert(item_tag_get(&tree, 0, 0) == 0);
|
||||
|
||||
for (index = 0; index < 1000; index++) {
|
||||
item_insert(&tree, index);
|
||||
item_tag_set(&tree, index, 0);
|
||||
}
|
||||
|
||||
radix_tree_for_each_slot(slot, &tree, &iter, 0) {
|
||||
radix_tree_clear_tags(&tree, iter.node, slot);
|
||||
assert(item_tag_get(&tree, iter.index, 0) == 0);
|
||||
}
|
||||
|
||||
item_kill_tree(&tree);
|
||||
}
|
||||
|
||||
void tag_check(void)
|
||||
{
|
||||
single_check();
|
||||
@ -347,4 +375,5 @@ void tag_check(void)
|
||||
thrash_tags();
|
||||
rcu_barrier();
|
||||
printv(2, "after thrash_tags: %d allocated\n", nr_allocated);
|
||||
radix_tree_clear_tags_test();
|
||||
}
|
||||
|
@ -36,6 +36,7 @@ void iteration_test(unsigned order, unsigned duration);
|
||||
void benchmark(void);
|
||||
void idr_checks(void);
|
||||
void ida_checks(void);
|
||||
void ida_thread_tests(void);
|
||||
|
||||
struct item *
|
||||
item_tag_set(struct radix_tree_root *root, unsigned long index, int tag);
|
||||
|
Loading…
Reference in New Issue
Block a user