mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-21 19:53:59 +08:00
335e92e8a5
mb_cache_entry_alloc() was allocating cache entries with GFP_KERNEL. But filesystems are calling this function while holding xattr_sem so possible recursion into the fs violates locking ordering of xattr_sem and transaction start / i_mutex for ext2-4. Change mb_cache_entry_alloc() so that filesystems can specify desired gfp mask and use GFP_NOFS from all of them. Signed-off-by: Jan Kara <jack@suse.cz> Reported-by: Dave Jones <davej@redhat.com> Cc: <linux-ext4@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
53 lines
1.5 KiB
C
53 lines
1.5 KiB
C
/*
|
|
File: linux/mbcache.h
|
|
|
|
(C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
|
|
*/
|
|
|
|
/* Hardwire the number of additional indexes */
|
|
#define MB_CACHE_INDEXES_COUNT 1
|
|
|
|
struct mb_cache_entry {
|
|
struct list_head e_lru_list;
|
|
struct mb_cache *e_cache;
|
|
unsigned short e_used;
|
|
unsigned short e_queued;
|
|
struct block_device *e_bdev;
|
|
sector_t e_block;
|
|
struct list_head e_block_list;
|
|
struct {
|
|
struct list_head o_list;
|
|
unsigned int o_key;
|
|
} e_indexes[0];
|
|
};
|
|
|
|
struct mb_cache_op {
|
|
int (*free)(struct mb_cache_entry *, gfp_t);
|
|
};
|
|
|
|
/* Functions on caches */
|
|
|
|
struct mb_cache * mb_cache_create(const char *, struct mb_cache_op *, size_t,
|
|
int, int);
|
|
void mb_cache_shrink(struct block_device *);
|
|
void mb_cache_destroy(struct mb_cache *);
|
|
|
|
/* Functions on cache entries */
|
|
|
|
struct mb_cache_entry *mb_cache_entry_alloc(struct mb_cache *, gfp_t);
|
|
int mb_cache_entry_insert(struct mb_cache_entry *, struct block_device *,
|
|
sector_t, unsigned int[]);
|
|
void mb_cache_entry_release(struct mb_cache_entry *);
|
|
void mb_cache_entry_free(struct mb_cache_entry *);
|
|
struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *,
|
|
struct block_device *,
|
|
sector_t);
|
|
#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
|
|
struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache, int,
|
|
struct block_device *,
|
|
unsigned int);
|
|
struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache_entry *, int,
|
|
struct block_device *,
|
|
unsigned int);
|
|
#endif
|