2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-16 01:04:08 +08:00
linux-next/lib/cpumask_kunit.c
Linus Torvalds 596ff4a09b cpumask: re-introduce constant-sized cpumask optimizations
Commit aa47a7c215 ("lib/cpumask: deprecate nr_cpumask_bits") resulted
in the cpumask operations potentially becoming hugely less efficient,
because suddenly the cpumask was always considered to be variable-sized.

The optimization was then later added back in a limited form by commit
6f9c07be9d ("lib/cpumask: add FORCE_NR_CPUS config option"), but that
FORCE_NR_CPUS option is not useful in a generic kernel and more of a
special case for embedded situations with fixed hardware.

Instead, just re-introduce the optimization, with some changes.

Instead of depending on CPUMASK_OFFSTACK being false, and then always
using the full constant cpumask width, this introduces three different
cpumask "sizes":

 - the exact size (nr_cpumask_bits) remains identical to nr_cpu_ids.

   This is used for situations where we should use the exact size.

 - the "small" size (small_cpumask_bits) is the NR_CPUS constant if it
   fits in a single word and the bitmap operations thus end up able
   to trigger the "small_const_nbits()" optimizations.

   This is used for the operations that have optimized single-word
   cases that get inlined, notably the bit find and scanning functions.

 - the "large" size (large_cpumask_bits) is the NR_CPUS constant if it
   is an sufficiently small constant that makes simple "copy" and
   "clear" operations more efficient.

   This is arbitrarily set at four words or less.

As a an example of this situation, without this fixed size optimization,
cpumask_clear() will generate code like

        movl    nr_cpu_ids(%rip), %edx
        addq    $63, %rdx
        shrq    $3, %rdx
        andl    $-8, %edx
        callq   memset@PLT

on x86-64, because it would calculate the "exact" number of longwords
that need to be cleared.

In contrast, with this patch, using a MAX_CPU of 64 (which is quite a
reasonable value to use), the above becomes a single

	movq $0,cpumask

instruction instead, because instead of caring to figure out exactly how
many CPU's the system has, it just knows that the cpumask will be a
single word and can just clear it all.

Note that this does end up tightening the rules a bit from the original
version in another way: operations that set bits in the cpumask are now
limited to the actual nr_cpu_ids limit, whereas we used to do the
nr_cpumask_bits thing almost everywhere in the cpumask code.

But if you just clear bits, or scan for bits, we can use the simpler
compile-time constants.

In the process, remove 'cpumask_complement()' and 'for_each_cpu_not()'
which were not useful, and which fundamentally have to be limited to
'nr_cpu_ids'.  Better remove them now than have somebody introduce use
of them later.

Of course, on x86-64 with MAXSMP there is no sane small compile-time
constant for the cpumask sizes, and we end up using the actual CPU bits,
and will generate the above kind of horrors regardless.  Please don't
use MAXSMP unless you really expect to have machines with thousands of
cores.

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2023-03-05 14:30:34 -08:00

156 lines
4.9 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* KUnit tests for cpumask.
*
* Author: Sander Vanheule <sander@svanheule.net>
*/
#include <kunit/test.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#define MASK_MSG(m) \
"%s contains %sCPUs %*pbl", #m, (cpumask_weight(m) ? "" : "no "), \
nr_cpumask_bits, cpumask_bits(m)
#define EXPECT_FOR_EACH_CPU_EQ(test, mask) \
do { \
const cpumask_t *m = (mask); \
int mask_weight = cpumask_weight(m); \
int cpu, iter = 0; \
for_each_cpu(cpu, m) \
iter++; \
KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(mask)); \
} while (0)
#define EXPECT_FOR_EACH_CPU_OP_EQ(test, op, mask1, mask2) \
do { \
const cpumask_t *m1 = (mask1); \
const cpumask_t *m2 = (mask2); \
int weight; \
int cpu, iter = 0; \
cpumask_##op(&mask_tmp, m1, m2); \
weight = cpumask_weight(&mask_tmp); \
for_each_cpu_##op(cpu, mask1, mask2) \
iter++; \
KUNIT_EXPECT_EQ((test), weight, iter); \
} while (0)
#define EXPECT_FOR_EACH_CPU_WRAP_EQ(test, mask) \
do { \
const cpumask_t *m = (mask); \
int mask_weight = cpumask_weight(m); \
int cpu, iter = 0; \
for_each_cpu_wrap(cpu, m, nr_cpu_ids / 2) \
iter++; \
KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(mask)); \
} while (0)
#define EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, name) \
do { \
int mask_weight = num_##name##_cpus(); \
int cpu, iter = 0; \
for_each_##name##_cpu(cpu) \
iter++; \
KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(cpu_##name##_mask)); \
} while (0)
static cpumask_t mask_empty;
static cpumask_t mask_all;
static cpumask_t mask_tmp;
static void test_cpumask_weight(struct kunit *test)
{
KUNIT_EXPECT_TRUE_MSG(test, cpumask_empty(&mask_empty), MASK_MSG(&mask_empty));
KUNIT_EXPECT_TRUE_MSG(test, cpumask_full(&mask_all), MASK_MSG(&mask_all));
KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_weight(&mask_empty), MASK_MSG(&mask_empty));
KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(cpu_possible_mask),
MASK_MSG(cpu_possible_mask));
KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(&mask_all), MASK_MSG(&mask_all));
}
static void test_cpumask_first(struct kunit *test)
{
KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_first(&mask_empty), MASK_MSG(&mask_empty));
KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_first(cpu_possible_mask), MASK_MSG(cpu_possible_mask));
KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_first_zero(&mask_empty), MASK_MSG(&mask_empty));
KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_first_zero(cpu_possible_mask),
MASK_MSG(cpu_possible_mask));
}
static void test_cpumask_last(struct kunit *test)
{
KUNIT_EXPECT_LE_MSG(test, nr_cpumask_bits, cpumask_last(&mask_empty),
MASK_MSG(&mask_empty));
KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids - 1, cpumask_last(cpu_possible_mask),
MASK_MSG(cpu_possible_mask));
}
static void test_cpumask_next(struct kunit *test)
{
KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_next_zero(-1, &mask_empty), MASK_MSG(&mask_empty));
KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_next_zero(-1, cpu_possible_mask),
MASK_MSG(cpu_possible_mask));
KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_next(-1, &mask_empty),
MASK_MSG(&mask_empty));
KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_next(-1, cpu_possible_mask),
MASK_MSG(cpu_possible_mask));
}
static void test_cpumask_iterators(struct kunit *test)
{
EXPECT_FOR_EACH_CPU_EQ(test, &mask_empty);
EXPECT_FOR_EACH_CPU_WRAP_EQ(test, &mask_empty);
EXPECT_FOR_EACH_CPU_OP_EQ(test, and, &mask_empty, &mask_empty);
EXPECT_FOR_EACH_CPU_OP_EQ(test, and, cpu_possible_mask, &mask_empty);
EXPECT_FOR_EACH_CPU_OP_EQ(test, andnot, &mask_empty, &mask_empty);
EXPECT_FOR_EACH_CPU_EQ(test, cpu_possible_mask);
EXPECT_FOR_EACH_CPU_WRAP_EQ(test, cpu_possible_mask);
EXPECT_FOR_EACH_CPU_OP_EQ(test, and, cpu_possible_mask, cpu_possible_mask);
EXPECT_FOR_EACH_CPU_OP_EQ(test, andnot, cpu_possible_mask, &mask_empty);
}
static void test_cpumask_iterators_builtin(struct kunit *test)
{
EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, possible);
/* Ensure the dynamic masks are stable while running the tests */
cpu_hotplug_disable();
EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, online);
EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, present);
cpu_hotplug_enable();
}
static int test_cpumask_init(struct kunit *test)
{
cpumask_clear(&mask_empty);
cpumask_setall(&mask_all);
return 0;
}
static struct kunit_case test_cpumask_cases[] = {
KUNIT_CASE(test_cpumask_weight),
KUNIT_CASE(test_cpumask_first),
KUNIT_CASE(test_cpumask_last),
KUNIT_CASE(test_cpumask_next),
KUNIT_CASE(test_cpumask_iterators),
KUNIT_CASE(test_cpumask_iterators_builtin),
{}
};
static struct kunit_suite test_cpumask_suite = {
.name = "cpumask",
.init = test_cpumask_init,
.test_cases = test_cpumask_cases,
};
kunit_test_suite(test_cpumask_suite);
MODULE_LICENSE("GPL");