mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-19 00:54:41 +08:00
eee1950192
Allow use of hashmap in perf. Modify perf's check-headers.sh script to check that the files are kept in sync, in the same way kernel headers are checked. This will warn if they are out of sync at the start of a perf build. Committer note: This starts out of synch as a fix went thru the bpf tree, namely the one removing the needless libbpf_internal.h include in hashmap.h. There is also another change related to __WORDSIZE, that as is in tools/lib/bpf/hashmap.h causes the tools/perf/ build to fail in systems such as Alpine Linus, that uses the Musl libc, so we need an alternative way of having __WORDSIZE available, use the one used by tools/include/linux/bitops.h, that builds in all the systems I have build containers for. These differences will be resolved at some point, so keep the warning in check-headers.sh as a reminder. Signed-off-by: Ian Rogers <irogers@google.com> Acked-by: Andrii Nakryiko <andriin@fb.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Andi Kleen <ak@linux.intel.com> Cc: Cong Wang <xiyou.wangcong@gmail.com> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: John Fastabend <john.fastabend@gmail.com> Cc: John Garry <john.garry@huawei.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Kim Phillips <kim.phillips@amd.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Martin KaFai Lau <kafai@fb.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Song Liu <songliubraving@fb.com> Cc: Stephane Eranian <eranian@google.com> Cc: Yonghong Song <yhs@fb.com> Cc: bpf@vger.kernel.org Cc: kp singh <kpsingh@chromium.org> Cc: netdev@vger.kernel.org Link: http://lore.kernel.org/lkml/20200515221732.44078-5-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
239 lines
4.9 KiB
C
239 lines
4.9 KiB
C
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
|
|
|
/*
|
|
* Generic non-thread safe hash map implementation.
|
|
*
|
|
* Copyright (c) 2019 Facebook
|
|
*/
|
|
#include <stdint.h>
|
|
#include <stdlib.h>
|
|
#include <stdio.h>
|
|
#include <errno.h>
|
|
#include <linux/err.h>
|
|
#include "hashmap.h"
|
|
|
|
/* make sure libbpf doesn't use kernel-only integer typedefs */
|
|
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
|
|
|
|
/* start with 4 buckets */
|
|
#define HASHMAP_MIN_CAP_BITS 2
|
|
|
|
static void hashmap_add_entry(struct hashmap_entry **pprev,
|
|
struct hashmap_entry *entry)
|
|
{
|
|
entry->next = *pprev;
|
|
*pprev = entry;
|
|
}
|
|
|
|
static void hashmap_del_entry(struct hashmap_entry **pprev,
|
|
struct hashmap_entry *entry)
|
|
{
|
|
*pprev = entry->next;
|
|
entry->next = NULL;
|
|
}
|
|
|
|
void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn,
|
|
hashmap_equal_fn equal_fn, void *ctx)
|
|
{
|
|
map->hash_fn = hash_fn;
|
|
map->equal_fn = equal_fn;
|
|
map->ctx = ctx;
|
|
|
|
map->buckets = NULL;
|
|
map->cap = 0;
|
|
map->cap_bits = 0;
|
|
map->sz = 0;
|
|
}
|
|
|
|
struct hashmap *hashmap__new(hashmap_hash_fn hash_fn,
|
|
hashmap_equal_fn equal_fn,
|
|
void *ctx)
|
|
{
|
|
struct hashmap *map = malloc(sizeof(struct hashmap));
|
|
|
|
if (!map)
|
|
return ERR_PTR(-ENOMEM);
|
|
hashmap__init(map, hash_fn, equal_fn, ctx);
|
|
return map;
|
|
}
|
|
|
|
void hashmap__clear(struct hashmap *map)
|
|
{
|
|
struct hashmap_entry *cur, *tmp;
|
|
size_t bkt;
|
|
|
|
hashmap__for_each_entry_safe(map, cur, tmp, bkt) {
|
|
free(cur);
|
|
}
|
|
free(map->buckets);
|
|
map->buckets = NULL;
|
|
map->cap = map->cap_bits = map->sz = 0;
|
|
}
|
|
|
|
void hashmap__free(struct hashmap *map)
|
|
{
|
|
if (!map)
|
|
return;
|
|
|
|
hashmap__clear(map);
|
|
free(map);
|
|
}
|
|
|
|
size_t hashmap__size(const struct hashmap *map)
|
|
{
|
|
return map->sz;
|
|
}
|
|
|
|
size_t hashmap__capacity(const struct hashmap *map)
|
|
{
|
|
return map->cap;
|
|
}
|
|
|
|
static bool hashmap_needs_to_grow(struct hashmap *map)
|
|
{
|
|
/* grow if empty or more than 75% filled */
|
|
return (map->cap == 0) || ((map->sz + 1) * 4 / 3 > map->cap);
|
|
}
|
|
|
|
static int hashmap_grow(struct hashmap *map)
|
|
{
|
|
struct hashmap_entry **new_buckets;
|
|
struct hashmap_entry *cur, *tmp;
|
|
size_t new_cap_bits, new_cap;
|
|
size_t h, bkt;
|
|
|
|
new_cap_bits = map->cap_bits + 1;
|
|
if (new_cap_bits < HASHMAP_MIN_CAP_BITS)
|
|
new_cap_bits = HASHMAP_MIN_CAP_BITS;
|
|
|
|
new_cap = 1UL << new_cap_bits;
|
|
new_buckets = calloc(new_cap, sizeof(new_buckets[0]));
|
|
if (!new_buckets)
|
|
return -ENOMEM;
|
|
|
|
hashmap__for_each_entry_safe(map, cur, tmp, bkt) {
|
|
h = hash_bits(map->hash_fn(cur->key, map->ctx), new_cap_bits);
|
|
hashmap_add_entry(&new_buckets[h], cur);
|
|
}
|
|
|
|
map->cap = new_cap;
|
|
map->cap_bits = new_cap_bits;
|
|
free(map->buckets);
|
|
map->buckets = new_buckets;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static bool hashmap_find_entry(const struct hashmap *map,
|
|
const void *key, size_t hash,
|
|
struct hashmap_entry ***pprev,
|
|
struct hashmap_entry **entry)
|
|
{
|
|
struct hashmap_entry *cur, **prev_ptr;
|
|
|
|
if (!map->buckets)
|
|
return false;
|
|
|
|
for (prev_ptr = &map->buckets[hash], cur = *prev_ptr;
|
|
cur;
|
|
prev_ptr = &cur->next, cur = cur->next) {
|
|
if (map->equal_fn(cur->key, key, map->ctx)) {
|
|
if (pprev)
|
|
*pprev = prev_ptr;
|
|
*entry = cur;
|
|
return true;
|
|
}
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
int hashmap__insert(struct hashmap *map, const void *key, void *value,
|
|
enum hashmap_insert_strategy strategy,
|
|
const void **old_key, void **old_value)
|
|
{
|
|
struct hashmap_entry *entry;
|
|
size_t h;
|
|
int err;
|
|
|
|
if (old_key)
|
|
*old_key = NULL;
|
|
if (old_value)
|
|
*old_value = NULL;
|
|
|
|
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
|
|
if (strategy != HASHMAP_APPEND &&
|
|
hashmap_find_entry(map, key, h, NULL, &entry)) {
|
|
if (old_key)
|
|
*old_key = entry->key;
|
|
if (old_value)
|
|
*old_value = entry->value;
|
|
|
|
if (strategy == HASHMAP_SET || strategy == HASHMAP_UPDATE) {
|
|
entry->key = key;
|
|
entry->value = value;
|
|
return 0;
|
|
} else if (strategy == HASHMAP_ADD) {
|
|
return -EEXIST;
|
|
}
|
|
}
|
|
|
|
if (strategy == HASHMAP_UPDATE)
|
|
return -ENOENT;
|
|
|
|
if (hashmap_needs_to_grow(map)) {
|
|
err = hashmap_grow(map);
|
|
if (err)
|
|
return err;
|
|
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
|
|
}
|
|
|
|
entry = malloc(sizeof(struct hashmap_entry));
|
|
if (!entry)
|
|
return -ENOMEM;
|
|
|
|
entry->key = key;
|
|
entry->value = value;
|
|
hashmap_add_entry(&map->buckets[h], entry);
|
|
map->sz++;
|
|
|
|
return 0;
|
|
}
|
|
|
|
bool hashmap__find(const struct hashmap *map, const void *key, void **value)
|
|
{
|
|
struct hashmap_entry *entry;
|
|
size_t h;
|
|
|
|
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
|
|
if (!hashmap_find_entry(map, key, h, NULL, &entry))
|
|
return false;
|
|
|
|
if (value)
|
|
*value = entry->value;
|
|
return true;
|
|
}
|
|
|
|
bool hashmap__delete(struct hashmap *map, const void *key,
|
|
const void **old_key, void **old_value)
|
|
{
|
|
struct hashmap_entry **pprev, *entry;
|
|
size_t h;
|
|
|
|
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
|
|
if (!hashmap_find_entry(map, key, h, &pprev, &entry))
|
|
return false;
|
|
|
|
if (old_key)
|
|
*old_key = entry->key;
|
|
if (old_value)
|
|
*old_value = entry->value;
|
|
|
|
hashmap_del_entry(pprev, entry);
|
|
free(entry);
|
|
map->sz--;
|
|
|
|
return true;
|
|
}
|
|
|