mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-23 20:53:53 +08:00
1ab6c4997e
Convert the filesystem shrinkers to use the new API, and standardise some of the behaviours of the shrinkers at the same time. For example, nr_to_scan means the number of objects to scan, not the number of objects to free. I refactored the CIFS idmap shrinker a little - it really needs to be broken up into a shrinker per tree and keep an item count with the tree root so that we don't need to walk the tree every time the shrinker needs to count the number of objects in the tree (i.e. all the time under memory pressure). [glommer@openvz.org: fixes for ext4, ubifs, nfs, cifs and glock. Fixes are needed mainly due to new code merged in the tree] [assorted fixes folded in] Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Glauber Costa <glommer@openvz.org> Acked-by: Mel Gorman <mgorman@suse.de> Acked-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Acked-by: Jan Kara <jack@suse.cz> Acked-by: Steven Whitehouse <swhiteho@redhat.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: J. Bruce Fields <bfields@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kent Overstreet <koverstreet@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
626 lines
16 KiB
C
626 lines
16 KiB
C
/*
|
|
* linux/fs/mbcache.c
|
|
* (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org>
|
|
*/
|
|
|
|
/*
|
|
* Filesystem Meta Information Block Cache (mbcache)
|
|
*
|
|
* The mbcache caches blocks of block devices that need to be located
|
|
* by their device/block number, as well as by other criteria (such
|
|
* as the block's contents).
|
|
*
|
|
* There can only be one cache entry in a cache per device and block number.
|
|
* Additional indexes need not be unique in this sense. The number of
|
|
* additional indexes (=other criteria) can be hardwired at compile time
|
|
* or specified at cache create time.
|
|
*
|
|
* Each cache entry is of fixed size. An entry may be `valid' or `invalid'
|
|
* in the cache. A valid entry is in the main hash tables of the cache,
|
|
* and may also be in the lru list. An invalid entry is not in any hashes
|
|
* or lists.
|
|
*
|
|
* A valid cache entry is only in the lru list if no handles refer to it.
|
|
* Invalid cache entries will be freed when the last handle to the cache
|
|
* entry is released. Entries that cannot be freed immediately are put
|
|
* back on the lru list.
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/hash.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/init.h>
|
|
#include <linux/mbcache.h>
|
|
|
|
|
|
#ifdef MB_CACHE_DEBUG
|
|
# define mb_debug(f...) do { \
|
|
printk(KERN_DEBUG f); \
|
|
printk("\n"); \
|
|
} while (0)
|
|
#define mb_assert(c) do { if (!(c)) \
|
|
printk(KERN_ERR "assertion " #c " failed\n"); \
|
|
} while(0)
|
|
#else
|
|
# define mb_debug(f...) do { } while(0)
|
|
# define mb_assert(c) do { } while(0)
|
|
#endif
|
|
#define mb_error(f...) do { \
|
|
printk(KERN_ERR f); \
|
|
printk("\n"); \
|
|
} while(0)
|
|
|
|
#define MB_CACHE_WRITER ((unsigned short)~0U >> 1)
|
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue);
|
|
|
|
MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
|
|
MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
|
|
MODULE_LICENSE("GPL");
|
|
|
|
EXPORT_SYMBOL(mb_cache_create);
|
|
EXPORT_SYMBOL(mb_cache_shrink);
|
|
EXPORT_SYMBOL(mb_cache_destroy);
|
|
EXPORT_SYMBOL(mb_cache_entry_alloc);
|
|
EXPORT_SYMBOL(mb_cache_entry_insert);
|
|
EXPORT_SYMBOL(mb_cache_entry_release);
|
|
EXPORT_SYMBOL(mb_cache_entry_free);
|
|
EXPORT_SYMBOL(mb_cache_entry_get);
|
|
#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
|
|
EXPORT_SYMBOL(mb_cache_entry_find_first);
|
|
EXPORT_SYMBOL(mb_cache_entry_find_next);
|
|
#endif
|
|
|
|
/*
|
|
* Global data: list of all mbcache's, lru list, and a spinlock for
|
|
* accessing cache data structures on SMP machines. The lru list is
|
|
* global across all mbcaches.
|
|
*/
|
|
|
|
static LIST_HEAD(mb_cache_list);
|
|
static LIST_HEAD(mb_cache_lru_list);
|
|
static DEFINE_SPINLOCK(mb_cache_spinlock);
|
|
|
|
static inline int
|
|
__mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
|
|
{
|
|
return !list_empty(&ce->e_block_list);
|
|
}
|
|
|
|
|
|
static void
|
|
__mb_cache_entry_unhash(struct mb_cache_entry *ce)
|
|
{
|
|
if (__mb_cache_entry_is_hashed(ce)) {
|
|
list_del_init(&ce->e_block_list);
|
|
list_del(&ce->e_index.o_list);
|
|
}
|
|
}
|
|
|
|
|
|
static void
|
|
__mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask)
|
|
{
|
|
struct mb_cache *cache = ce->e_cache;
|
|
|
|
mb_assert(!(ce->e_used || ce->e_queued));
|
|
kmem_cache_free(cache->c_entry_cache, ce);
|
|
atomic_dec(&cache->c_entry_count);
|
|
}
|
|
|
|
|
|
static void
|
|
__mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
|
|
__releases(mb_cache_spinlock)
|
|
{
|
|
/* Wake up all processes queuing for this cache entry. */
|
|
if (ce->e_queued)
|
|
wake_up_all(&mb_cache_queue);
|
|
if (ce->e_used >= MB_CACHE_WRITER)
|
|
ce->e_used -= MB_CACHE_WRITER;
|
|
ce->e_used--;
|
|
if (!(ce->e_used || ce->e_queued)) {
|
|
if (!__mb_cache_entry_is_hashed(ce))
|
|
goto forget;
|
|
mb_assert(list_empty(&ce->e_lru_list));
|
|
list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
|
|
}
|
|
spin_unlock(&mb_cache_spinlock);
|
|
return;
|
|
forget:
|
|
spin_unlock(&mb_cache_spinlock);
|
|
__mb_cache_entry_forget(ce, GFP_KERNEL);
|
|
}
|
|
|
|
|
|
/*
|
|
* mb_cache_shrink_scan() memory pressure callback
|
|
*
|
|
* This function is called by the kernel memory management when memory
|
|
* gets low.
|
|
*
|
|
* @shrink: (ignored)
|
|
* @sc: shrink_control passed from reclaim
|
|
*
|
|
* Returns the number of objects freed.
|
|
*/
|
|
static unsigned long
|
|
mb_cache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
|
{
|
|
LIST_HEAD(free_list);
|
|
struct mb_cache_entry *entry, *tmp;
|
|
int nr_to_scan = sc->nr_to_scan;
|
|
gfp_t gfp_mask = sc->gfp_mask;
|
|
unsigned long freed = 0;
|
|
|
|
mb_debug("trying to free %d entries", nr_to_scan);
|
|
spin_lock(&mb_cache_spinlock);
|
|
while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) {
|
|
struct mb_cache_entry *ce =
|
|
list_entry(mb_cache_lru_list.next,
|
|
struct mb_cache_entry, e_lru_list);
|
|
list_move_tail(&ce->e_lru_list, &free_list);
|
|
__mb_cache_entry_unhash(ce);
|
|
freed++;
|
|
}
|
|
spin_unlock(&mb_cache_spinlock);
|
|
list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) {
|
|
__mb_cache_entry_forget(entry, gfp_mask);
|
|
}
|
|
return freed;
|
|
}
|
|
|
|
static unsigned long
|
|
mb_cache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
|
|
{
|
|
struct mb_cache *cache;
|
|
unsigned long count = 0;
|
|
|
|
spin_lock(&mb_cache_spinlock);
|
|
list_for_each_entry(cache, &mb_cache_list, c_cache_list) {
|
|
mb_debug("cache %s (%d)", cache->c_name,
|
|
atomic_read(&cache->c_entry_count));
|
|
count += atomic_read(&cache->c_entry_count);
|
|
}
|
|
spin_unlock(&mb_cache_spinlock);
|
|
|
|
return vfs_pressure_ratio(count);
|
|
}
|
|
|
|
static struct shrinker mb_cache_shrinker = {
|
|
.count_objects = mb_cache_shrink_count,
|
|
.scan_objects = mb_cache_shrink_scan,
|
|
.seeks = DEFAULT_SEEKS,
|
|
};
|
|
|
|
/*
|
|
* mb_cache_create() create a new cache
|
|
*
|
|
* All entries in one cache are equal size. Cache entries may be from
|
|
* multiple devices. If this is the first mbcache created, registers
|
|
* the cache with kernel memory management. Returns NULL if no more
|
|
* memory was available.
|
|
*
|
|
* @name: name of the cache (informal)
|
|
* @bucket_bits: log2(number of hash buckets)
|
|
*/
|
|
struct mb_cache *
|
|
mb_cache_create(const char *name, int bucket_bits)
|
|
{
|
|
int n, bucket_count = 1 << bucket_bits;
|
|
struct mb_cache *cache = NULL;
|
|
|
|
cache = kmalloc(sizeof(struct mb_cache), GFP_KERNEL);
|
|
if (!cache)
|
|
return NULL;
|
|
cache->c_name = name;
|
|
atomic_set(&cache->c_entry_count, 0);
|
|
cache->c_bucket_bits = bucket_bits;
|
|
cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head),
|
|
GFP_KERNEL);
|
|
if (!cache->c_block_hash)
|
|
goto fail;
|
|
for (n=0; n<bucket_count; n++)
|
|
INIT_LIST_HEAD(&cache->c_block_hash[n]);
|
|
cache->c_index_hash = kmalloc(bucket_count * sizeof(struct list_head),
|
|
GFP_KERNEL);
|
|
if (!cache->c_index_hash)
|
|
goto fail;
|
|
for (n=0; n<bucket_count; n++)
|
|
INIT_LIST_HEAD(&cache->c_index_hash[n]);
|
|
cache->c_entry_cache = kmem_cache_create(name,
|
|
sizeof(struct mb_cache_entry), 0,
|
|
SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
|
|
if (!cache->c_entry_cache)
|
|
goto fail2;
|
|
|
|
/*
|
|
* Set an upper limit on the number of cache entries so that the hash
|
|
* chains won't grow too long.
|
|
*/
|
|
cache->c_max_entries = bucket_count << 4;
|
|
|
|
spin_lock(&mb_cache_spinlock);
|
|
list_add(&cache->c_cache_list, &mb_cache_list);
|
|
spin_unlock(&mb_cache_spinlock);
|
|
return cache;
|
|
|
|
fail2:
|
|
kfree(cache->c_index_hash);
|
|
|
|
fail:
|
|
kfree(cache->c_block_hash);
|
|
kfree(cache);
|
|
return NULL;
|
|
}
|
|
|
|
|
|
/*
|
|
* mb_cache_shrink()
|
|
*
|
|
* Removes all cache entries of a device from the cache. All cache entries
|
|
* currently in use cannot be freed, and thus remain in the cache. All others
|
|
* are freed.
|
|
*
|
|
* @bdev: which device's cache entries to shrink
|
|
*/
|
|
void
|
|
mb_cache_shrink(struct block_device *bdev)
|
|
{
|
|
LIST_HEAD(free_list);
|
|
struct list_head *l, *ltmp;
|
|
|
|
spin_lock(&mb_cache_spinlock);
|
|
list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
|
|
struct mb_cache_entry *ce =
|
|
list_entry(l, struct mb_cache_entry, e_lru_list);
|
|
if (ce->e_bdev == bdev) {
|
|
list_move_tail(&ce->e_lru_list, &free_list);
|
|
__mb_cache_entry_unhash(ce);
|
|
}
|
|
}
|
|
spin_unlock(&mb_cache_spinlock);
|
|
list_for_each_safe(l, ltmp, &free_list) {
|
|
__mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
|
|
e_lru_list), GFP_KERNEL);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
* mb_cache_destroy()
|
|
*
|
|
* Shrinks the cache to its minimum possible size (hopefully 0 entries),
|
|
* and then destroys it. If this was the last mbcache, un-registers the
|
|
* mbcache from kernel memory management.
|
|
*/
|
|
void
|
|
mb_cache_destroy(struct mb_cache *cache)
|
|
{
|
|
LIST_HEAD(free_list);
|
|
struct list_head *l, *ltmp;
|
|
|
|
spin_lock(&mb_cache_spinlock);
|
|
list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
|
|
struct mb_cache_entry *ce =
|
|
list_entry(l, struct mb_cache_entry, e_lru_list);
|
|
if (ce->e_cache == cache) {
|
|
list_move_tail(&ce->e_lru_list, &free_list);
|
|
__mb_cache_entry_unhash(ce);
|
|
}
|
|
}
|
|
list_del(&cache->c_cache_list);
|
|
spin_unlock(&mb_cache_spinlock);
|
|
|
|
list_for_each_safe(l, ltmp, &free_list) {
|
|
__mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
|
|
e_lru_list), GFP_KERNEL);
|
|
}
|
|
|
|
if (atomic_read(&cache->c_entry_count) > 0) {
|
|
mb_error("cache %s: %d orphaned entries",
|
|
cache->c_name,
|
|
atomic_read(&cache->c_entry_count));
|
|
}
|
|
|
|
kmem_cache_destroy(cache->c_entry_cache);
|
|
|
|
kfree(cache->c_index_hash);
|
|
kfree(cache->c_block_hash);
|
|
kfree(cache);
|
|
}
|
|
|
|
/*
|
|
* mb_cache_entry_alloc()
|
|
*
|
|
* Allocates a new cache entry. The new entry will not be valid initially,
|
|
* and thus cannot be looked up yet. It should be filled with data, and
|
|
* then inserted into the cache using mb_cache_entry_insert(). Returns NULL
|
|
* if no more memory was available.
|
|
*/
|
|
struct mb_cache_entry *
|
|
mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
|
|
{
|
|
struct mb_cache_entry *ce = NULL;
|
|
|
|
if (atomic_read(&cache->c_entry_count) >= cache->c_max_entries) {
|
|
spin_lock(&mb_cache_spinlock);
|
|
if (!list_empty(&mb_cache_lru_list)) {
|
|
ce = list_entry(mb_cache_lru_list.next,
|
|
struct mb_cache_entry, e_lru_list);
|
|
list_del_init(&ce->e_lru_list);
|
|
__mb_cache_entry_unhash(ce);
|
|
}
|
|
spin_unlock(&mb_cache_spinlock);
|
|
}
|
|
if (!ce) {
|
|
ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
|
|
if (!ce)
|
|
return NULL;
|
|
atomic_inc(&cache->c_entry_count);
|
|
INIT_LIST_HEAD(&ce->e_lru_list);
|
|
INIT_LIST_HEAD(&ce->e_block_list);
|
|
ce->e_cache = cache;
|
|
ce->e_queued = 0;
|
|
}
|
|
ce->e_used = 1 + MB_CACHE_WRITER;
|
|
return ce;
|
|
}
|
|
|
|
|
|
/*
|
|
* mb_cache_entry_insert()
|
|
*
|
|
* Inserts an entry that was allocated using mb_cache_entry_alloc() into
|
|
* the cache. After this, the cache entry can be looked up, but is not yet
|
|
* in the lru list as the caller still holds a handle to it. Returns 0 on
|
|
* success, or -EBUSY if a cache entry for that device + inode exists
|
|
* already (this may happen after a failed lookup, but when another process
|
|
* has inserted the same cache entry in the meantime).
|
|
*
|
|
* @bdev: device the cache entry belongs to
|
|
* @block: block number
|
|
* @key: lookup key
|
|
*/
|
|
int
|
|
mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
|
|
sector_t block, unsigned int key)
|
|
{
|
|
struct mb_cache *cache = ce->e_cache;
|
|
unsigned int bucket;
|
|
struct list_head *l;
|
|
int error = -EBUSY;
|
|
|
|
bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
|
|
cache->c_bucket_bits);
|
|
spin_lock(&mb_cache_spinlock);
|
|
list_for_each_prev(l, &cache->c_block_hash[bucket]) {
|
|
struct mb_cache_entry *ce =
|
|
list_entry(l, struct mb_cache_entry, e_block_list);
|
|
if (ce->e_bdev == bdev && ce->e_block == block)
|
|
goto out;
|
|
}
|
|
__mb_cache_entry_unhash(ce);
|
|
ce->e_bdev = bdev;
|
|
ce->e_block = block;
|
|
list_add(&ce->e_block_list, &cache->c_block_hash[bucket]);
|
|
ce->e_index.o_key = key;
|
|
bucket = hash_long(key, cache->c_bucket_bits);
|
|
list_add(&ce->e_index.o_list, &cache->c_index_hash[bucket]);
|
|
error = 0;
|
|
out:
|
|
spin_unlock(&mb_cache_spinlock);
|
|
return error;
|
|
}
|
|
|
|
|
|
/*
|
|
* mb_cache_entry_release()
|
|
*
|
|
* Release a handle to a cache entry. When the last handle to a cache entry
|
|
* is released it is either freed (if it is invalid) or otherwise inserted
|
|
* in to the lru list.
|
|
*/
|
|
void
|
|
mb_cache_entry_release(struct mb_cache_entry *ce)
|
|
{
|
|
spin_lock(&mb_cache_spinlock);
|
|
__mb_cache_entry_release_unlock(ce);
|
|
}
|
|
|
|
|
|
/*
|
|
* mb_cache_entry_free()
|
|
*
|
|
* This is equivalent to the sequence mb_cache_entry_takeout() --
|
|
* mb_cache_entry_release().
|
|
*/
|
|
void
|
|
mb_cache_entry_free(struct mb_cache_entry *ce)
|
|
{
|
|
spin_lock(&mb_cache_spinlock);
|
|
mb_assert(list_empty(&ce->e_lru_list));
|
|
__mb_cache_entry_unhash(ce);
|
|
__mb_cache_entry_release_unlock(ce);
|
|
}
|
|
|
|
|
|
/*
|
|
* mb_cache_entry_get()
|
|
*
|
|
* Get a cache entry by device / block number. (There can only be one entry
|
|
* in the cache per device and block.) Returns NULL if no such cache entry
|
|
* exists. The returned cache entry is locked for exclusive access ("single
|
|
* writer").
|
|
*/
|
|
struct mb_cache_entry *
|
|
mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
|
|
sector_t block)
|
|
{
|
|
unsigned int bucket;
|
|
struct list_head *l;
|
|
struct mb_cache_entry *ce;
|
|
|
|
bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
|
|
cache->c_bucket_bits);
|
|
spin_lock(&mb_cache_spinlock);
|
|
list_for_each(l, &cache->c_block_hash[bucket]) {
|
|
ce = list_entry(l, struct mb_cache_entry, e_block_list);
|
|
if (ce->e_bdev == bdev && ce->e_block == block) {
|
|
DEFINE_WAIT(wait);
|
|
|
|
if (!list_empty(&ce->e_lru_list))
|
|
list_del_init(&ce->e_lru_list);
|
|
|
|
while (ce->e_used > 0) {
|
|
ce->e_queued++;
|
|
prepare_to_wait(&mb_cache_queue, &wait,
|
|
TASK_UNINTERRUPTIBLE);
|
|
spin_unlock(&mb_cache_spinlock);
|
|
schedule();
|
|
spin_lock(&mb_cache_spinlock);
|
|
ce->e_queued--;
|
|
}
|
|
finish_wait(&mb_cache_queue, &wait);
|
|
ce->e_used += 1 + MB_CACHE_WRITER;
|
|
|
|
if (!__mb_cache_entry_is_hashed(ce)) {
|
|
__mb_cache_entry_release_unlock(ce);
|
|
return NULL;
|
|
}
|
|
goto cleanup;
|
|
}
|
|
}
|
|
ce = NULL;
|
|
|
|
cleanup:
|
|
spin_unlock(&mb_cache_spinlock);
|
|
return ce;
|
|
}
|
|
|
|
#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
|
|
|
|
static struct mb_cache_entry *
|
|
__mb_cache_entry_find(struct list_head *l, struct list_head *head,
|
|
struct block_device *bdev, unsigned int key)
|
|
{
|
|
while (l != head) {
|
|
struct mb_cache_entry *ce =
|
|
list_entry(l, struct mb_cache_entry, e_index.o_list);
|
|
if (ce->e_bdev == bdev && ce->e_index.o_key == key) {
|
|
DEFINE_WAIT(wait);
|
|
|
|
if (!list_empty(&ce->e_lru_list))
|
|
list_del_init(&ce->e_lru_list);
|
|
|
|
/* Incrementing before holding the lock gives readers
|
|
priority over writers. */
|
|
ce->e_used++;
|
|
while (ce->e_used >= MB_CACHE_WRITER) {
|
|
ce->e_queued++;
|
|
prepare_to_wait(&mb_cache_queue, &wait,
|
|
TASK_UNINTERRUPTIBLE);
|
|
spin_unlock(&mb_cache_spinlock);
|
|
schedule();
|
|
spin_lock(&mb_cache_spinlock);
|
|
ce->e_queued--;
|
|
}
|
|
finish_wait(&mb_cache_queue, &wait);
|
|
|
|
if (!__mb_cache_entry_is_hashed(ce)) {
|
|
__mb_cache_entry_release_unlock(ce);
|
|
spin_lock(&mb_cache_spinlock);
|
|
return ERR_PTR(-EAGAIN);
|
|
}
|
|
return ce;
|
|
}
|
|
l = l->next;
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
|
|
/*
|
|
* mb_cache_entry_find_first()
|
|
*
|
|
* Find the first cache entry on a given device with a certain key in
|
|
* an additional index. Additional matches can be found with
|
|
* mb_cache_entry_find_next(). Returns NULL if no match was found. The
|
|
* returned cache entry is locked for shared access ("multiple readers").
|
|
*
|
|
* @cache: the cache to search
|
|
* @bdev: the device the cache entry should belong to
|
|
* @key: the key in the index
|
|
*/
|
|
struct mb_cache_entry *
|
|
mb_cache_entry_find_first(struct mb_cache *cache, struct block_device *bdev,
|
|
unsigned int key)
|
|
{
|
|
unsigned int bucket = hash_long(key, cache->c_bucket_bits);
|
|
struct list_head *l;
|
|
struct mb_cache_entry *ce;
|
|
|
|
spin_lock(&mb_cache_spinlock);
|
|
l = cache->c_index_hash[bucket].next;
|
|
ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key);
|
|
spin_unlock(&mb_cache_spinlock);
|
|
return ce;
|
|
}
|
|
|
|
|
|
/*
|
|
* mb_cache_entry_find_next()
|
|
*
|
|
* Find the next cache entry on a given device with a certain key in an
|
|
* additional index. Returns NULL if no match could be found. The previous
|
|
* entry is atomatically released, so that mb_cache_entry_find_next() can
|
|
* be called like this:
|
|
*
|
|
* entry = mb_cache_entry_find_first();
|
|
* while (entry) {
|
|
* ...
|
|
* entry = mb_cache_entry_find_next(entry, ...);
|
|
* }
|
|
*
|
|
* @prev: The previous match
|
|
* @bdev: the device the cache entry should belong to
|
|
* @key: the key in the index
|
|
*/
|
|
struct mb_cache_entry *
|
|
mb_cache_entry_find_next(struct mb_cache_entry *prev,
|
|
struct block_device *bdev, unsigned int key)
|
|
{
|
|
struct mb_cache *cache = prev->e_cache;
|
|
unsigned int bucket = hash_long(key, cache->c_bucket_bits);
|
|
struct list_head *l;
|
|
struct mb_cache_entry *ce;
|
|
|
|
spin_lock(&mb_cache_spinlock);
|
|
l = prev->e_index.o_list.next;
|
|
ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key);
|
|
__mb_cache_entry_release_unlock(prev);
|
|
return ce;
|
|
}
|
|
|
|
#endif /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */
|
|
|
|
static int __init init_mbcache(void)
|
|
{
|
|
register_shrinker(&mb_cache_shrinker);
|
|
return 0;
|
|
}
|
|
|
|
static void __exit exit_mbcache(void)
|
|
{
|
|
unregister_shrinker(&mb_cache_shrinker);
|
|
}
|
|
|
|
module_init(init_mbcache)
|
|
module_exit(exit_mbcache)
|
|
|