linux/lib/rbtree_test.c

259 lines
5.5 KiB
C
Raw Normal View History

#include <linux/module.h>
#include <linux/rbtree_augmented.h>
#include <linux/random.h>
#include <asm/timex.h>
#define NODES 100
#define PERF_LOOPS 100000
#define CHECK_LOOPS 100
struct test_node {
u32 key;
struct rb_node rb;
/* following fields used for testing augmented rbtree functionality */
u32 val;
u32 augmented;
};
static struct rb_root root = RB_ROOT;
static struct test_node nodes[NODES];
static struct rnd_state rnd;
static void insert(struct test_node *node, struct rb_root *root)
{
struct rb_node **new = &root->rb_node, *parent = NULL;
u32 key = node->key;
while (*new) {
parent = *new;
if (key < rb_entry(parent, struct test_node, rb)->key)
new = &parent->rb_left;
else
new = &parent->rb_right;
}
rb_link_node(&node->rb, parent, new);
rb_insert_color(&node->rb, root);
}
static inline void erase(struct test_node *node, struct rb_root *root)
{
rb_erase(&node->rb, root);
}
static inline u32 augment_recompute(struct test_node *node)
{
u32 max = node->val, child_augmented;
if (node->rb.rb_left) {
child_augmented = rb_entry(node->rb.rb_left, struct test_node,
rb)->augmented;
if (max < child_augmented)
max = child_augmented;
}
if (node->rb.rb_right) {
child_augmented = rb_entry(node->rb.rb_right, struct test_node,
rb)->augmented;
if (max < child_augmented)
max = child_augmented;
}
return max;
}
RB_DECLARE_CALLBACKS(static, augment_callbacks, struct test_node, rb,
u32, augmented, augment_recompute)
rbtree: faster augmented rbtree manipulation Introduce new augmented rbtree APIs that allow minimal recalculation of augmented node information. A new callback is added to the rbtree insertion and erase rebalancing functions, to be called on each tree rotations. Such rotations preserve the subtree's root augmented value, but require recalculation of the one child that was previously located at the subtree root. In the insertion case, the handcoded search phase must be updated to maintain the augmented information on insertion, and then the rbtree coloring/rebalancing algorithms keep it up to date. In the erase case, things are more complicated since it is library code that manipulates the rbtree in order to remove internal nodes. This requires a couple additional callbacks to copy a subtree's augmented value when a new root is stitched in, and to recompute augmented values down the ancestry path when a node is removed from the tree. In order to preserve maximum speed for the non-augmented case, we provide two versions of each tree manipulation function. rb_insert_augmented() is the augmented equivalent of rb_insert_color(), and rb_erase_augmented() is the augmented equivalent of rb_erase(). Signed-off-by: Michel Lespinasse <walken@google.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: David Woodhouse <dwmw2@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-10-09 07:31:17 +08:00
static void insert_augmented(struct test_node *node, struct rb_root *root)
{
rbtree: faster augmented rbtree manipulation Introduce new augmented rbtree APIs that allow minimal recalculation of augmented node information. A new callback is added to the rbtree insertion and erase rebalancing functions, to be called on each tree rotations. Such rotations preserve the subtree's root augmented value, but require recalculation of the one child that was previously located at the subtree root. In the insertion case, the handcoded search phase must be updated to maintain the augmented information on insertion, and then the rbtree coloring/rebalancing algorithms keep it up to date. In the erase case, things are more complicated since it is library code that manipulates the rbtree in order to remove internal nodes. This requires a couple additional callbacks to copy a subtree's augmented value when a new root is stitched in, and to recompute augmented values down the ancestry path when a node is removed from the tree. In order to preserve maximum speed for the non-augmented case, we provide two versions of each tree manipulation function. rb_insert_augmented() is the augmented equivalent of rb_insert_color(), and rb_erase_augmented() is the augmented equivalent of rb_erase(). Signed-off-by: Michel Lespinasse <walken@google.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: David Woodhouse <dwmw2@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-10-09 07:31:17 +08:00
struct rb_node **new = &root->rb_node, *rb_parent = NULL;
u32 key = node->key;
rbtree: faster augmented rbtree manipulation Introduce new augmented rbtree APIs that allow minimal recalculation of augmented node information. A new callback is added to the rbtree insertion and erase rebalancing functions, to be called on each tree rotations. Such rotations preserve the subtree's root augmented value, but require recalculation of the one child that was previously located at the subtree root. In the insertion case, the handcoded search phase must be updated to maintain the augmented information on insertion, and then the rbtree coloring/rebalancing algorithms keep it up to date. In the erase case, things are more complicated since it is library code that manipulates the rbtree in order to remove internal nodes. This requires a couple additional callbacks to copy a subtree's augmented value when a new root is stitched in, and to recompute augmented values down the ancestry path when a node is removed from the tree. In order to preserve maximum speed for the non-augmented case, we provide two versions of each tree manipulation function. rb_insert_augmented() is the augmented equivalent of rb_insert_color(), and rb_erase_augmented() is the augmented equivalent of rb_erase(). Signed-off-by: Michel Lespinasse <walken@google.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: David Woodhouse <dwmw2@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-10-09 07:31:17 +08:00
u32 val = node->val;
struct test_node *parent;
while (*new) {
rbtree: faster augmented rbtree manipulation Introduce new augmented rbtree APIs that allow minimal recalculation of augmented node information. A new callback is added to the rbtree insertion and erase rebalancing functions, to be called on each tree rotations. Such rotations preserve the subtree's root augmented value, but require recalculation of the one child that was previously located at the subtree root. In the insertion case, the handcoded search phase must be updated to maintain the augmented information on insertion, and then the rbtree coloring/rebalancing algorithms keep it up to date. In the erase case, things are more complicated since it is library code that manipulates the rbtree in order to remove internal nodes. This requires a couple additional callbacks to copy a subtree's augmented value when a new root is stitched in, and to recompute augmented values down the ancestry path when a node is removed from the tree. In order to preserve maximum speed for the non-augmented case, we provide two versions of each tree manipulation function. rb_insert_augmented() is the augmented equivalent of rb_insert_color(), and rb_erase_augmented() is the augmented equivalent of rb_erase(). Signed-off-by: Michel Lespinasse <walken@google.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: David Woodhouse <dwmw2@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-10-09 07:31:17 +08:00
rb_parent = *new;
parent = rb_entry(rb_parent, struct test_node, rb);
if (parent->augmented < val)
parent->augmented = val;
if (key < parent->key)
new = &parent->rb.rb_left;
else
rbtree: faster augmented rbtree manipulation Introduce new augmented rbtree APIs that allow minimal recalculation of augmented node information. A new callback is added to the rbtree insertion and erase rebalancing functions, to be called on each tree rotations. Such rotations preserve the subtree's root augmented value, but require recalculation of the one child that was previously located at the subtree root. In the insertion case, the handcoded search phase must be updated to maintain the augmented information on insertion, and then the rbtree coloring/rebalancing algorithms keep it up to date. In the erase case, things are more complicated since it is library code that manipulates the rbtree in order to remove internal nodes. This requires a couple additional callbacks to copy a subtree's augmented value when a new root is stitched in, and to recompute augmented values down the ancestry path when a node is removed from the tree. In order to preserve maximum speed for the non-augmented case, we provide two versions of each tree manipulation function. rb_insert_augmented() is the augmented equivalent of rb_insert_color(), and rb_erase_augmented() is the augmented equivalent of rb_erase(). Signed-off-by: Michel Lespinasse <walken@google.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: David Woodhouse <dwmw2@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-10-09 07:31:17 +08:00
new = &parent->rb.rb_right;
}
rbtree: faster augmented rbtree manipulation Introduce new augmented rbtree APIs that allow minimal recalculation of augmented node information. A new callback is added to the rbtree insertion and erase rebalancing functions, to be called on each tree rotations. Such rotations preserve the subtree's root augmented value, but require recalculation of the one child that was previously located at the subtree root. In the insertion case, the handcoded search phase must be updated to maintain the augmented information on insertion, and then the rbtree coloring/rebalancing algorithms keep it up to date. In the erase case, things are more complicated since it is library code that manipulates the rbtree in order to remove internal nodes. This requires a couple additional callbacks to copy a subtree's augmented value when a new root is stitched in, and to recompute augmented values down the ancestry path when a node is removed from the tree. In order to preserve maximum speed for the non-augmented case, we provide two versions of each tree manipulation function. rb_insert_augmented() is the augmented equivalent of rb_insert_color(), and rb_erase_augmented() is the augmented equivalent of rb_erase(). Signed-off-by: Michel Lespinasse <walken@google.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: David Woodhouse <dwmw2@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-10-09 07:31:17 +08:00
node->augmented = val;
rb_link_node(&node->rb, rb_parent, new);
rb_insert_augmented(&node->rb, root, &augment_callbacks);
}
static void erase_augmented(struct test_node *node, struct rb_root *root)
{
rbtree: faster augmented rbtree manipulation Introduce new augmented rbtree APIs that allow minimal recalculation of augmented node information. A new callback is added to the rbtree insertion and erase rebalancing functions, to be called on each tree rotations. Such rotations preserve the subtree's root augmented value, but require recalculation of the one child that was previously located at the subtree root. In the insertion case, the handcoded search phase must be updated to maintain the augmented information on insertion, and then the rbtree coloring/rebalancing algorithms keep it up to date. In the erase case, things are more complicated since it is library code that manipulates the rbtree in order to remove internal nodes. This requires a couple additional callbacks to copy a subtree's augmented value when a new root is stitched in, and to recompute augmented values down the ancestry path when a node is removed from the tree. In order to preserve maximum speed for the non-augmented case, we provide two versions of each tree manipulation function. rb_insert_augmented() is the augmented equivalent of rb_insert_color(), and rb_erase_augmented() is the augmented equivalent of rb_erase(). Signed-off-by: Michel Lespinasse <walken@google.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: David Woodhouse <dwmw2@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-10-09 07:31:17 +08:00
rb_erase_augmented(&node->rb, root, &augment_callbacks);
}
static void init(void)
{
int i;
for (i = 0; i < NODES; i++) {
random32: rename random32 to prandom This renames all random32 functions to have 'prandom_' prefix as follows: void prandom_seed(u32 seed); /* rename from srandom32() */ u32 prandom_u32(void); /* rename from random32() */ void prandom_seed_state(struct rnd_state *state, u64 seed); /* rename from prandom32_seed() */ u32 prandom_u32_state(struct rnd_state *state); /* rename from prandom32() */ The purpose of this renaming is to prevent some kernel developers from assuming that prandom32() and random32() might imply that only prandom32() was the one using a pseudo-random number generator by prandom32's "p", and the result may be a very embarassing security exposure. This concern was expressed by Theodore Ts'o. And furthermore, I'm going to introduce new functions for getting the requested number of pseudo-random bytes. If I continue to use both prandom32 and random32 prefixes for these functions, the confusion is getting worse. As a result of this renaming, "prandom_" is the common prefix for pseudo-random number library. Currently, srandom32() and random32() are preserved because it is difficult to rename too many users at once. Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Robert Love <robert.w.love@intel.com> Cc: Michel Lespinasse <walken@google.com> Cc: Valdis Kletnieks <valdis.kletnieks@vt.edu> Cc: David Laight <david.laight@aculab.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Artem Bityutskiy <dedekind1@gmail.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-12-18 08:04:23 +08:00
nodes[i].key = prandom_u32_state(&rnd);
nodes[i].val = prandom_u32_state(&rnd);
}
}
static bool is_red(struct rb_node *rb)
{
return !(rb->__rb_parent_color & 1);
}
static int black_path_count(struct rb_node *rb)
{
int count;
for (count = 0; rb; rb = rb_parent(rb))
count += !is_red(rb);
return count;
}
static void check_postorder_foreach(int nr_nodes)
{
struct test_node *cur, *n;
int count = 0;
rbtree_postorder_for_each_entry_safe(cur, n, &root, rb)
count++;
WARN_ON_ONCE(count != nr_nodes);
}
static void check_postorder(int nr_nodes)
{
struct rb_node *rb;
int count = 0;
for (rb = rb_first_postorder(&root); rb; rb = rb_next_postorder(rb))
count++;
WARN_ON_ONCE(count != nr_nodes);
}
static void check(int nr_nodes)
{
struct rb_node *rb;
int count = 0, blacks = 0;
u32 prev_key = 0;
for (rb = rb_first(&root); rb; rb = rb_next(rb)) {
struct test_node *node = rb_entry(rb, struct test_node, rb);
WARN_ON_ONCE(node->key < prev_key);
WARN_ON_ONCE(is_red(rb) &&
(!rb_parent(rb) || is_red(rb_parent(rb))));
if (!count)
blacks = black_path_count(rb);
else
WARN_ON_ONCE((!rb->rb_left || !rb->rb_right) &&
blacks != black_path_count(rb));
prev_key = node->key;
count++;
}
WARN_ON_ONCE(count != nr_nodes);
WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root))) - 1);
check_postorder(nr_nodes);
check_postorder_foreach(nr_nodes);
}
static void check_augmented(int nr_nodes)
{
struct rb_node *rb;
check(nr_nodes);
for (rb = rb_first(&root); rb; rb = rb_next(rb)) {
struct test_node *node = rb_entry(rb, struct test_node, rb);
WARN_ON_ONCE(node->augmented != augment_recompute(node));
}
}
static int __init rbtree_test_init(void)
{
int i, j;
cycles_t time1, time2, time;
printk(KERN_ALERT "rbtree testing");
random32: rename random32 to prandom This renames all random32 functions to have 'prandom_' prefix as follows: void prandom_seed(u32 seed); /* rename from srandom32() */ u32 prandom_u32(void); /* rename from random32() */ void prandom_seed_state(struct rnd_state *state, u64 seed); /* rename from prandom32_seed() */ u32 prandom_u32_state(struct rnd_state *state); /* rename from prandom32() */ The purpose of this renaming is to prevent some kernel developers from assuming that prandom32() and random32() might imply that only prandom32() was the one using a pseudo-random number generator by prandom32's "p", and the result may be a very embarassing security exposure. This concern was expressed by Theodore Ts'o. And furthermore, I'm going to introduce new functions for getting the requested number of pseudo-random bytes. If I continue to use both prandom32 and random32 prefixes for these functions, the confusion is getting worse. As a result of this renaming, "prandom_" is the common prefix for pseudo-random number library. Currently, srandom32() and random32() are preserved because it is difficult to rename too many users at once. Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Robert Love <robert.w.love@intel.com> Cc: Michel Lespinasse <walken@google.com> Cc: Valdis Kletnieks <valdis.kletnieks@vt.edu> Cc: David Laight <david.laight@aculab.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Artem Bityutskiy <dedekind1@gmail.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-12-18 08:04:23 +08:00
prandom_seed_state(&rnd, 3141592653589793238ULL);
init();
time1 = get_cycles();
for (i = 0; i < PERF_LOOPS; i++) {
for (j = 0; j < NODES; j++)
insert(nodes + j, &root);
for (j = 0; j < NODES; j++)
erase(nodes + j, &root);
}
time2 = get_cycles();
time = time2 - time1;
time = div_u64(time, PERF_LOOPS);
printk(" -> %llu cycles\n", (unsigned long long)time);
for (i = 0; i < CHECK_LOOPS; i++) {
init();
for (j = 0; j < NODES; j++) {
check(j);
insert(nodes + j, &root);
}
for (j = 0; j < NODES; j++) {
check(NODES - j);
erase(nodes + j, &root);
}
check(0);
}
printk(KERN_ALERT "augmented rbtree testing");
init();
time1 = get_cycles();
for (i = 0; i < PERF_LOOPS; i++) {
for (j = 0; j < NODES; j++)
insert_augmented(nodes + j, &root);
for (j = 0; j < NODES; j++)
erase_augmented(nodes + j, &root);
}
time2 = get_cycles();
time = time2 - time1;
time = div_u64(time, PERF_LOOPS);
printk(" -> %llu cycles\n", (unsigned long long)time);
for (i = 0; i < CHECK_LOOPS; i++) {
init();
for (j = 0; j < NODES; j++) {
check_augmented(j);
insert_augmented(nodes + j, &root);
}
for (j = 0; j < NODES; j++) {
check_augmented(NODES - j);
erase_augmented(nodes + j, &root);
}
check_augmented(0);
}
return -EAGAIN; /* Fail will directly unload the module */
}
static void __exit rbtree_test_exit(void)
{
printk(KERN_ALERT "test exit\n");
}
module_init(rbtree_test_init)
module_exit(rbtree_test_exit)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michel Lespinasse");
MODULE_DESCRIPTION("Red Black Tree test");