2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-14 16:23:51 +08:00

mbcache: get rid of _e_hash_list_head

Get rid of field _e_hash_list_head in cache entries and add bit field
e_referenced instead.

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
This commit is contained in:
Andreas Gruenbacher 2016-02-22 22:42:05 -05:00 committed by Theodore Ts'o
parent 2335d05f3a
commit dc8d5e565f
2 changed files with 12 additions and 37 deletions

View File

@ -45,27 +45,10 @@ static struct kmem_cache *mb_entry_cache;
static unsigned long mb_cache_shrink(struct mb_cache *cache,
unsigned int nr_to_scan);
static inline bool mb_cache_entry_referenced(struct mb_cache_entry *entry)
static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
u32 key)
{
return entry->_e_hash_list_head & 1;
}
static inline void mb_cache_entry_set_referenced(struct mb_cache_entry *entry)
{
entry->_e_hash_list_head |= 1;
}
static inline void mb_cache_entry_clear_referenced(
struct mb_cache_entry *entry)
{
entry->_e_hash_list_head &= ~1;
}
static inline struct hlist_bl_head *mb_cache_entry_head(
struct mb_cache_entry *entry)
{
return (struct hlist_bl_head *)
(entry->_e_hash_list_head & ~1);
return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
}
/*
@ -108,8 +91,7 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
atomic_set(&entry->e_refcnt, 1);
entry->e_key = key;
entry->e_block = block;
head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
entry->_e_hash_list_head = (unsigned long)head;
head = mb_cache_entry_head(cache, key);
hlist_bl_lock(head);
hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
if (dup->e_key == key && dup->e_block == block) {
@ -146,10 +128,7 @@ static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
struct hlist_bl_node *node;
struct hlist_bl_head *head;
if (entry)
head = mb_cache_entry_head(entry);
else
head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
head = mb_cache_entry_head(cache, key);
hlist_bl_lock(head);
if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
node = entry->e_hash_list.next;
@ -219,7 +198,7 @@ void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key,
struct hlist_bl_head *head;
struct mb_cache_entry *entry;
head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
head = mb_cache_entry_head(cache, key);
hlist_bl_lock(head);
hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
if (entry->e_key == key && entry->e_block == block) {
@ -250,7 +229,7 @@ EXPORT_SYMBOL(mb_cache_entry_delete_block);
void mb_cache_entry_touch(struct mb_cache *cache,
struct mb_cache_entry *entry)
{
mb_cache_entry_set_referenced(entry);
entry->e_referenced = 1;
}
EXPORT_SYMBOL(mb_cache_entry_touch);
@ -275,8 +254,8 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
while (nr_to_scan-- && !list_empty(&cache->c_list)) {
entry = list_first_entry(&cache->c_list,
struct mb_cache_entry, e_list);
if (mb_cache_entry_referenced(entry)) {
mb_cache_entry_clear_referenced(entry);
if (entry->e_referenced) {
entry->e_referenced = 0;
list_move_tail(&cache->c_list, &entry->e_list);
continue;
}
@ -287,7 +266,7 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
* from under us.
*/
spin_unlock(&cache->c_list_lock);
head = mb_cache_entry_head(entry);
head = mb_cache_entry_head(cache, entry->e_key);
hlist_bl_lock(head);
if (!hlist_bl_unhashed(&entry->e_hash_list)) {
hlist_bl_del_init(&entry->e_hash_list);

View File

@ -12,18 +12,14 @@ struct mb_cache;
struct mb_cache_entry {
/* List of entries in cache - protected by cache->c_list_lock */
struct list_head e_list;
/* Hash table list - protected by bitlock in e_hash_list_head */
/* Hash table list - protected by hash chain bitlock */
struct hlist_bl_node e_hash_list;
atomic_t e_refcnt;
/* Key in hash - stable during lifetime of the entry */
u32 e_key;
u32 e_referenced:1;
/* Block number of hashed block - stable during lifetime of the entry */
sector_t e_block;
/*
* Head of hash list (for list bit lock) - stable. Combined with
* referenced bit of entry
*/
unsigned long _e_hash_list_head;
};
struct mb_cache *mb_cache_create(int bucket_bits);