mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-27 14:43:58 +08:00
1879fd6a26
Now that the whole dcache_hash_bucket crap is gone, go all the way and also remove the weird locking layering violations for locking the hash buckets. Add hlist_bl_lock/unlock helpers to move the locking into the list abstraction instead of requiring each caller to open code it. After all allowing for the bit locks is the whole point of these helpers over the plain hlist variant. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
157 lines
4.0 KiB
C
157 lines
4.0 KiB
C
#ifndef _LINUX_LIST_BL_H
|
|
#define _LINUX_LIST_BL_H
|
|
|
|
#include <linux/list.h>
|
|
#include <linux/bit_spinlock.h>
|
|
|
|
/*
|
|
* Special version of lists, where head of the list has a lock in the lowest
|
|
* bit. This is useful for scalable hash tables without increasing memory
|
|
* footprint overhead.
|
|
*
|
|
* For modification operations, the 0 bit of hlist_bl_head->first
|
|
* pointer must be set.
|
|
*
|
|
* With some small modifications, this can easily be adapted to store several
|
|
* arbitrary bits (not just a single lock bit), if the need arises to store
|
|
* some fast and compact auxiliary data.
|
|
*/
|
|
|
|
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
#define LIST_BL_LOCKMASK 1UL
|
|
#else
|
|
#define LIST_BL_LOCKMASK 0UL
|
|
#endif
|
|
|
|
#ifdef CONFIG_DEBUG_LIST
|
|
#define LIST_BL_BUG_ON(x) BUG_ON(x)
|
|
#else
|
|
#define LIST_BL_BUG_ON(x)
|
|
#endif
|
|
|
|
|
|
struct hlist_bl_head {
|
|
struct hlist_bl_node *first;
|
|
};
|
|
|
|
struct hlist_bl_node {
|
|
struct hlist_bl_node *next, **pprev;
|
|
};
|
|
#define INIT_HLIST_BL_HEAD(ptr) \
|
|
((ptr)->first = NULL)
|
|
|
|
static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
|
|
{
|
|
h->next = NULL;
|
|
h->pprev = NULL;
|
|
}
|
|
|
|
#define hlist_bl_entry(ptr, type, member) container_of(ptr,type,member)
|
|
|
|
static inline int hlist_bl_unhashed(const struct hlist_bl_node *h)
|
|
{
|
|
return !h->pprev;
|
|
}
|
|
|
|
static inline struct hlist_bl_node *hlist_bl_first(struct hlist_bl_head *h)
|
|
{
|
|
return (struct hlist_bl_node *)
|
|
((unsigned long)h->first & ~LIST_BL_LOCKMASK);
|
|
}
|
|
|
|
static inline void hlist_bl_set_first(struct hlist_bl_head *h,
|
|
struct hlist_bl_node *n)
|
|
{
|
|
LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
|
|
LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) !=
|
|
LIST_BL_LOCKMASK);
|
|
h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK);
|
|
}
|
|
|
|
static inline int hlist_bl_empty(const struct hlist_bl_head *h)
|
|
{
|
|
return !((unsigned long)h->first & ~LIST_BL_LOCKMASK);
|
|
}
|
|
|
|
static inline void hlist_bl_add_head(struct hlist_bl_node *n,
|
|
struct hlist_bl_head *h)
|
|
{
|
|
struct hlist_bl_node *first = hlist_bl_first(h);
|
|
|
|
n->next = first;
|
|
if (first)
|
|
first->pprev = &n->next;
|
|
n->pprev = &h->first;
|
|
hlist_bl_set_first(h, n);
|
|
}
|
|
|
|
static inline void __hlist_bl_del(struct hlist_bl_node *n)
|
|
{
|
|
struct hlist_bl_node *next = n->next;
|
|
struct hlist_bl_node **pprev = n->pprev;
|
|
|
|
LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
|
|
|
|
/* pprev may be `first`, so be careful not to lose the lock bit */
|
|
*pprev = (struct hlist_bl_node *)
|
|
((unsigned long)next |
|
|
((unsigned long)*pprev & LIST_BL_LOCKMASK));
|
|
if (next)
|
|
next->pprev = pprev;
|
|
}
|
|
|
|
static inline void hlist_bl_del(struct hlist_bl_node *n)
|
|
{
|
|
__hlist_bl_del(n);
|
|
n->next = LIST_POISON1;
|
|
n->pprev = LIST_POISON2;
|
|
}
|
|
|
|
static inline void hlist_bl_del_init(struct hlist_bl_node *n)
|
|
{
|
|
if (!hlist_bl_unhashed(n)) {
|
|
__hlist_bl_del(n);
|
|
INIT_HLIST_BL_NODE(n);
|
|
}
|
|
}
|
|
|
|
static inline void hlist_bl_lock(struct hlist_bl_head *b)
|
|
{
|
|
bit_spin_lock(0, (unsigned long *)b);
|
|
}
|
|
|
|
static inline void hlist_bl_unlock(struct hlist_bl_head *b)
|
|
{
|
|
__bit_spin_unlock(0, (unsigned long *)b);
|
|
}
|
|
|
|
/**
|
|
* hlist_bl_for_each_entry - iterate over list of given type
|
|
* @tpos: the type * to use as a loop cursor.
|
|
* @pos: the &struct hlist_node to use as a loop cursor.
|
|
* @head: the head for your list.
|
|
* @member: the name of the hlist_node within the struct.
|
|
*
|
|
*/
|
|
#define hlist_bl_for_each_entry(tpos, pos, head, member) \
|
|
for (pos = hlist_bl_first(head); \
|
|
pos && \
|
|
({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \
|
|
pos = pos->next)
|
|
|
|
/**
|
|
* hlist_bl_for_each_entry_safe - iterate over list of given type safe against removal of list entry
|
|
* @tpos: the type * to use as a loop cursor.
|
|
* @pos: the &struct hlist_node to use as a loop cursor.
|
|
* @n: another &struct hlist_node to use as temporary storage
|
|
* @head: the head for your list.
|
|
* @member: the name of the hlist_node within the struct.
|
|
*/
|
|
#define hlist_bl_for_each_entry_safe(tpos, pos, n, head, member) \
|
|
for (pos = hlist_bl_first(head); \
|
|
pos && ({ n = pos->next; 1; }) && \
|
|
({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \
|
|
pos = n)
|
|
|
|
#endif
|