mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-19 17:14:40 +08:00
ff44c6e36d
Dave Sterba pointed out a sleeping while atomic bug while doing fsync. This is because I'm an idiot and didn't realize that rwlock's were spin locks, so we've been holding this thing while doing allocations and such which is not good. This patch fixes this by dropping the write lock before we do anything heavy and re-acquire it when it is done. We also need to take a ref on the em's in case their corresponding pages are evicted and mark them as being logged so that releasepage does not remove them and doesn't remove them from our local list. Thanks, Reported-by: Dave Sterba <dave@jikos.cz> Signed-off-by: Josef Bacik <jbacik@fusionio.com>
73 lines
1.9 KiB
C
73 lines
1.9 KiB
C
#ifndef __EXTENTMAP__
|
|
#define __EXTENTMAP__
|
|
|
|
#include <linux/rbtree.h>
|
|
|
|
#define EXTENT_MAP_LAST_BYTE (u64)-4
|
|
#define EXTENT_MAP_HOLE (u64)-3
|
|
#define EXTENT_MAP_INLINE (u64)-2
|
|
#define EXTENT_MAP_DELALLOC (u64)-1
|
|
|
|
/* bits for the flags field */
|
|
#define EXTENT_FLAG_PINNED 0 /* this entry not yet on disk, don't free it */
|
|
#define EXTENT_FLAG_COMPRESSED 1
|
|
#define EXTENT_FLAG_VACANCY 2 /* no file extent item found */
|
|
#define EXTENT_FLAG_PREALLOC 3 /* pre-allocated extent */
|
|
#define EXTENT_FLAG_LOGGING 4 /* Logging this extent */
|
|
|
|
struct extent_map {
|
|
struct rb_node rb_node;
|
|
|
|
/* all of these are in bytes */
|
|
u64 start;
|
|
u64 len;
|
|
u64 mod_start;
|
|
u64 mod_len;
|
|
u64 orig_start;
|
|
u64 block_start;
|
|
u64 block_len;
|
|
u64 generation;
|
|
unsigned long flags;
|
|
struct block_device *bdev;
|
|
atomic_t refs;
|
|
unsigned int in_tree;
|
|
unsigned int compress_type;
|
|
struct list_head list;
|
|
};
|
|
|
|
struct extent_map_tree {
|
|
struct rb_root map;
|
|
struct list_head modified_extents;
|
|
rwlock_t lock;
|
|
};
|
|
|
|
static inline u64 extent_map_end(struct extent_map *em)
|
|
{
|
|
if (em->start + em->len < em->start)
|
|
return (u64)-1;
|
|
return em->start + em->len;
|
|
}
|
|
|
|
static inline u64 extent_map_block_end(struct extent_map *em)
|
|
{
|
|
if (em->block_start + em->block_len < em->block_start)
|
|
return (u64)-1;
|
|
return em->block_start + em->block_len;
|
|
}
|
|
|
|
void extent_map_tree_init(struct extent_map_tree *tree);
|
|
struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
|
|
u64 start, u64 len);
|
|
int add_extent_mapping(struct extent_map_tree *tree,
|
|
struct extent_map *em);
|
|
int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em);
|
|
|
|
struct extent_map *alloc_extent_map(void);
|
|
void free_extent_map(struct extent_map *em);
|
|
int __init extent_map_init(void);
|
|
void extent_map_exit(void);
|
|
int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, u64 gen);
|
|
struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
|
|
u64 start, u64 len);
|
|
#endif
|