mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 20:54:10 +08:00
Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (44 commits) ext4: fix trimming starting with block 0 with small blocksize ext4: revert buggy trim overflow patch ext4: don't pass entire map to check_eofblocks_fl ext4: fix memory leak in ext4_free_branches ext4: remove ext4_mb_return_to_preallocation() ext4: flush the i_completed_io_list during ext4_truncate ext4: add error checking to calls to ext4_handle_dirty_metadata() ext4: fix trimming of a single group ext4: fix uninitialized variable in ext4_register_li_request ext4: dynamically allocate the jbd2_inode in ext4_inode_info as necessary ext4: drop i_state_flags on architectures with 64-bit longs ext4: reorder ext4_inode_info structure elements to remove unneeded padding ext4: drop ec_type from the ext4_ext_cache structure ext4: use ext4_lblk_t instead of sector_t for logical blocks ext4: replace i_delalloc_reserved_flag with EXT4_STATE_DELALLOC_RESERVED ext4: fix 32bit overflow in ext4_ext_find_goal() ext4: add more error checks to ext4_mkdir() ext4: ext4_ext_migrate should use NULL not 0 ext4: Use ext4_error_file() to print the pathname to the corrupted inode ext4: use IS_ERR() to check for errors in ext4_error_file ...
This commit is contained in:
commit
e9688f6aca
@ -199,14 +199,6 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_get",
|
||||
goto found;
|
||||
entry = next;
|
||||
}
|
||||
/* Check the remaining name entries */
|
||||
while (!IS_LAST_ENTRY(entry)) {
|
||||
struct ext2_xattr_entry *next =
|
||||
EXT2_XATTR_NEXT(entry);
|
||||
if ((char *)next >= end)
|
||||
goto bad_block;
|
||||
entry = next;
|
||||
}
|
||||
if (ext2_xattr_cache_insert(bh))
|
||||
ea_idebug(inode, "cache insert failed");
|
||||
error = -ENODATA;
|
||||
@ -355,7 +347,7 @@ static void ext2_xattr_update_super_block(struct super_block *sb)
|
||||
/*
|
||||
* ext2_xattr_set()
|
||||
*
|
||||
* Create, replace or remove an extended attribute for this inode. Buffer
|
||||
* Create, replace or remove an extended attribute for this inode. Value
|
||||
* is NULL to remove an existing extended attribute, and non-NULL to
|
||||
* either replace an existing extended attribute, or create a new extended
|
||||
* attribute. The flags XATTR_REPLACE and XATTR_CREATE
|
||||
|
@ -925,7 +925,7 @@ ext3_xattr_ibody_set(handle_t *handle, struct inode *inode,
|
||||
/*
|
||||
* ext3_xattr_set_handle()
|
||||
*
|
||||
* Create, replace or remove an extended attribute for this inode. Buffer
|
||||
* Create, replace or remove an extended attribute for this inode. Value
|
||||
* is NULL to remove an existing extended attribute, and non-NULL to
|
||||
* either replace an existing extended attribute, or create a new extended
|
||||
* attribute. The flags XATTR_REPLACE and XATTR_CREATE
|
||||
|
@ -592,7 +592,8 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
|
||||
* Account for the allocated meta blocks. We will never
|
||||
* fail EDQUOT for metdata, but we do account for it.
|
||||
*/
|
||||
if (!(*errp) && EXT4_I(inode)->i_delalloc_reserved_flag) {
|
||||
if (!(*errp) &&
|
||||
ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
|
||||
spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
|
||||
EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
|
||||
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
|
||||
|
@ -60,9 +60,13 @@ static unsigned char get_dtype(struct super_block *sb, int filetype)
|
||||
return (ext4_filetype_table[filetype]);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Return 0 if the directory entry is OK, and 1 if there is a problem
|
||||
*
|
||||
* Note: this is the opposite of what ext2 and ext3 historically returned...
|
||||
*/
|
||||
int __ext4_check_dir_entry(const char *function, unsigned int line,
|
||||
struct inode *dir,
|
||||
struct inode *dir, struct file *filp,
|
||||
struct ext4_dir_entry_2 *de,
|
||||
struct buffer_head *bh,
|
||||
unsigned int offset)
|
||||
@ -71,26 +75,37 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
|
||||
const int rlen = ext4_rec_len_from_disk(de->rec_len,
|
||||
dir->i_sb->s_blocksize);
|
||||
|
||||
if (rlen < EXT4_DIR_REC_LEN(1))
|
||||
if (unlikely(rlen < EXT4_DIR_REC_LEN(1)))
|
||||
error_msg = "rec_len is smaller than minimal";
|
||||
else if (rlen % 4 != 0)
|
||||
else if (unlikely(rlen % 4 != 0))
|
||||
error_msg = "rec_len % 4 != 0";
|
||||
else if (rlen < EXT4_DIR_REC_LEN(de->name_len))
|
||||
else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
|
||||
error_msg = "rec_len is too small for name_len";
|
||||
else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
|
||||
else if (unlikely(((char *) de - bh->b_data) + rlen >
|
||||
dir->i_sb->s_blocksize))
|
||||
error_msg = "directory entry across blocks";
|
||||
else if (le32_to_cpu(de->inode) >
|
||||
le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count))
|
||||
else if (unlikely(le32_to_cpu(de->inode) >
|
||||
le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
|
||||
error_msg = "inode out of bounds";
|
||||
else
|
||||
return 0;
|
||||
|
||||
if (error_msg != NULL)
|
||||
ext4_error_inode(dir, function, line, bh->b_blocknr,
|
||||
"bad entry in directory: %s - "
|
||||
"offset=%u(%u), inode=%u, rec_len=%d, name_len=%d",
|
||||
error_msg, (unsigned) (offset%bh->b_size), offset,
|
||||
le32_to_cpu(de->inode),
|
||||
rlen, de->name_len);
|
||||
return error_msg == NULL ? 1 : 0;
|
||||
if (filp)
|
||||
ext4_error_file(filp, function, line, bh ? bh->b_blocknr : 0,
|
||||
"bad entry in directory: %s - offset=%u(%u), "
|
||||
"inode=%u, rec_len=%d, name_len=%d",
|
||||
error_msg, (unsigned) (offset%bh->b_size),
|
||||
offset, le32_to_cpu(de->inode),
|
||||
rlen, de->name_len);
|
||||
else
|
||||
ext4_error_inode(dir, function, line, bh ? bh->b_blocknr : 0,
|
||||
"bad entry in directory: %s - offset=%u(%u), "
|
||||
"inode=%u, rec_len=%d, name_len=%d",
|
||||
error_msg, (unsigned) (offset%bh->b_size),
|
||||
offset, le32_to_cpu(de->inode),
|
||||
rlen, de->name_len);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int ext4_readdir(struct file *filp,
|
||||
@ -152,8 +167,9 @@ static int ext4_readdir(struct file *filp,
|
||||
*/
|
||||
if (!bh) {
|
||||
if (!dir_has_error) {
|
||||
EXT4_ERROR_INODE(inode, "directory "
|
||||
"contains a hole at offset %Lu",
|
||||
EXT4_ERROR_FILE(filp, 0,
|
||||
"directory contains a "
|
||||
"hole at offset %llu",
|
||||
(unsigned long long) filp->f_pos);
|
||||
dir_has_error = 1;
|
||||
}
|
||||
@ -194,8 +210,8 @@ revalidate:
|
||||
while (!error && filp->f_pos < inode->i_size
|
||||
&& offset < sb->s_blocksize) {
|
||||
de = (struct ext4_dir_entry_2 *) (bh->b_data + offset);
|
||||
if (!ext4_check_dir_entry(inode, de,
|
||||
bh, offset)) {
|
||||
if (ext4_check_dir_entry(inode, filp, de,
|
||||
bh, offset)) {
|
||||
/*
|
||||
* On error, skip the f_pos to the next block
|
||||
*/
|
||||
|
@ -62,8 +62,8 @@
|
||||
#define EXT4_ERROR_INODE_BLOCK(inode, block, fmt, a...) \
|
||||
ext4_error_inode((inode), __func__, __LINE__, (block), (fmt), ## a)
|
||||
|
||||
#define EXT4_ERROR_FILE(file, fmt, a...) \
|
||||
ext4_error_file(__func__, __LINE__, (file), (fmt), ## a)
|
||||
#define EXT4_ERROR_FILE(file, block, fmt, a...) \
|
||||
ext4_error_file((file), __func__, __LINE__, (block), (fmt), ## a)
|
||||
|
||||
/* data type for block offset of block group */
|
||||
typedef int ext4_grpblk_t;
|
||||
@ -561,22 +561,6 @@ struct ext4_new_group_data {
|
||||
#define EXT4_IOC32_SETVERSION_OLD FS_IOC32_SETVERSION
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Mount options
|
||||
*/
|
||||
struct ext4_mount_options {
|
||||
unsigned long s_mount_opt;
|
||||
uid_t s_resuid;
|
||||
gid_t s_resgid;
|
||||
unsigned long s_commit_interval;
|
||||
u32 s_min_batch_time, s_max_batch_time;
|
||||
#ifdef CONFIG_QUOTA
|
||||
int s_jquota_fmt;
|
||||
char *s_qf_names[MAXQUOTAS];
|
||||
#endif
|
||||
};
|
||||
|
||||
/* Max physical block we can addres w/o extents */
|
||||
#define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF
|
||||
|
||||
@ -709,6 +693,8 @@ do { \
|
||||
if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra)) \
|
||||
ext4_decode_extra_time(&(inode)->xtime, \
|
||||
raw_inode->xtime ## _extra); \
|
||||
else \
|
||||
(inode)->xtime.tv_nsec = 0; \
|
||||
} while (0)
|
||||
|
||||
#define EXT4_EINODE_GET_XTIME(xtime, einode, raw_inode) \
|
||||
@ -719,6 +705,8 @@ do { \
|
||||
if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \
|
||||
ext4_decode_extra_time(&(einode)->xtime, \
|
||||
raw_inode->xtime ## _extra); \
|
||||
else \
|
||||
(einode)->xtime.tv_nsec = 0; \
|
||||
} while (0)
|
||||
|
||||
#define i_disk_version osd1.linux1.l_i_version
|
||||
@ -750,12 +738,13 @@ do { \
|
||||
|
||||
/*
|
||||
* storage for cached extent
|
||||
* If ec_len == 0, then the cache is invalid.
|
||||
* If ec_start == 0, then the cache represents a gap (null mapping)
|
||||
*/
|
||||
struct ext4_ext_cache {
|
||||
ext4_fsblk_t ec_start;
|
||||
ext4_lblk_t ec_block;
|
||||
__u32 ec_len; /* must be 32bit to return holes */
|
||||
__u32 ec_type;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -774,10 +763,12 @@ struct ext4_inode_info {
|
||||
* near to their parent directory's inode.
|
||||
*/
|
||||
ext4_group_t i_block_group;
|
||||
ext4_lblk_t i_dir_start_lookup;
|
||||
#if (BITS_PER_LONG < 64)
|
||||
unsigned long i_state_flags; /* Dynamic state flags */
|
||||
#endif
|
||||
unsigned long i_flags;
|
||||
|
||||
ext4_lblk_t i_dir_start_lookup;
|
||||
#ifdef CONFIG_EXT4_FS_XATTR
|
||||
/*
|
||||
* Extended attributes can be read independently of the main file
|
||||
@ -820,7 +811,7 @@ struct ext4_inode_info {
|
||||
*/
|
||||
struct rw_semaphore i_data_sem;
|
||||
struct inode vfs_inode;
|
||||
struct jbd2_inode jinode;
|
||||
struct jbd2_inode *jinode;
|
||||
|
||||
struct ext4_ext_cache i_cached_extent;
|
||||
/*
|
||||
@ -840,14 +831,12 @@ struct ext4_inode_info {
|
||||
unsigned int i_reserved_data_blocks;
|
||||
unsigned int i_reserved_meta_blocks;
|
||||
unsigned int i_allocated_meta_blocks;
|
||||
unsigned short i_delalloc_reserved_flag;
|
||||
sector_t i_da_metadata_calc_last_lblock;
|
||||
ext4_lblk_t i_da_metadata_calc_last_lblock;
|
||||
int i_da_metadata_calc_len;
|
||||
|
||||
/* on-disk additional length */
|
||||
__u16 i_extra_isize;
|
||||
|
||||
spinlock_t i_block_reservation_lock;
|
||||
#ifdef CONFIG_QUOTA
|
||||
/* quota space reservation, managed internally by quota code */
|
||||
qsize_t i_reserved_quota;
|
||||
@ -856,9 +845,11 @@ struct ext4_inode_info {
|
||||
/* completed IOs that might need unwritten extents handling */
|
||||
struct list_head i_completed_io_list;
|
||||
spinlock_t i_completed_io_lock;
|
||||
atomic_t i_ioend_count; /* Number of outstanding io_end structs */
|
||||
/* current io_end structure for async DIO write*/
|
||||
ext4_io_end_t *cur_aio_dio;
|
||||
atomic_t i_ioend_count; /* Number of outstanding io_end structs */
|
||||
|
||||
spinlock_t i_block_reservation_lock;
|
||||
|
||||
/*
|
||||
* Transactions that contain inode's metadata needed to complete
|
||||
@ -917,11 +908,20 @@ struct ext4_inode_info {
|
||||
#define EXT4_MOUNT_DISCARD 0x40000000 /* Issue DISCARD requests */
|
||||
#define EXT4_MOUNT_INIT_INODE_TABLE 0x80000000 /* Initialize uninitialized itables */
|
||||
|
||||
#define clear_opt(o, opt) o &= ~EXT4_MOUNT_##opt
|
||||
#define set_opt(o, opt) o |= EXT4_MOUNT_##opt
|
||||
#define clear_opt(sb, opt) EXT4_SB(sb)->s_mount_opt &= \
|
||||
~EXT4_MOUNT_##opt
|
||||
#define set_opt(sb, opt) EXT4_SB(sb)->s_mount_opt |= \
|
||||
EXT4_MOUNT_##opt
|
||||
#define test_opt(sb, opt) (EXT4_SB(sb)->s_mount_opt & \
|
||||
EXT4_MOUNT_##opt)
|
||||
|
||||
#define clear_opt2(sb, opt) EXT4_SB(sb)->s_mount_opt2 &= \
|
||||
~EXT4_MOUNT2_##opt
|
||||
#define set_opt2(sb, opt) EXT4_SB(sb)->s_mount_opt2 |= \
|
||||
EXT4_MOUNT2_##opt
|
||||
#define test_opt2(sb, opt) (EXT4_SB(sb)->s_mount_opt2 & \
|
||||
EXT4_MOUNT2_##opt)
|
||||
|
||||
#define ext4_set_bit ext2_set_bit
|
||||
#define ext4_set_bit_atomic ext2_set_bit_atomic
|
||||
#define ext4_clear_bit ext2_clear_bit
|
||||
@ -1087,6 +1087,7 @@ struct ext4_sb_info {
|
||||
struct ext4_super_block *s_es; /* Pointer to the super block in the buffer */
|
||||
struct buffer_head **s_group_desc;
|
||||
unsigned int s_mount_opt;
|
||||
unsigned int s_mount_opt2;
|
||||
unsigned int s_mount_flags;
|
||||
ext4_fsblk_t s_sb_block;
|
||||
uid_t s_resuid;
|
||||
@ -1237,24 +1238,39 @@ enum {
|
||||
EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */
|
||||
EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/
|
||||
EXT4_STATE_NEWENTRY, /* File just added to dir */
|
||||
EXT4_STATE_DELALLOC_RESERVED, /* blks already reserved for delalloc */
|
||||
};
|
||||
|
||||
#define EXT4_INODE_BIT_FNS(name, field) \
|
||||
#define EXT4_INODE_BIT_FNS(name, field, offset) \
|
||||
static inline int ext4_test_inode_##name(struct inode *inode, int bit) \
|
||||
{ \
|
||||
return test_bit(bit, &EXT4_I(inode)->i_##field); \
|
||||
return test_bit(bit + (offset), &EXT4_I(inode)->i_##field); \
|
||||
} \
|
||||
static inline void ext4_set_inode_##name(struct inode *inode, int bit) \
|
||||
{ \
|
||||
set_bit(bit, &EXT4_I(inode)->i_##field); \
|
||||
set_bit(bit + (offset), &EXT4_I(inode)->i_##field); \
|
||||
} \
|
||||
static inline void ext4_clear_inode_##name(struct inode *inode, int bit) \
|
||||
{ \
|
||||
clear_bit(bit, &EXT4_I(inode)->i_##field); \
|
||||
clear_bit(bit + (offset), &EXT4_I(inode)->i_##field); \
|
||||
}
|
||||
|
||||
EXT4_INODE_BIT_FNS(flag, flags)
|
||||
EXT4_INODE_BIT_FNS(state, state_flags)
|
||||
EXT4_INODE_BIT_FNS(flag, flags, 0)
|
||||
#if (BITS_PER_LONG < 64)
|
||||
EXT4_INODE_BIT_FNS(state, state_flags, 0)
|
||||
|
||||
static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
|
||||
{
|
||||
(ei)->i_state_flags = 0;
|
||||
}
|
||||
#else
|
||||
EXT4_INODE_BIT_FNS(state, flags, 32)
|
||||
|
||||
static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
|
||||
{
|
||||
/* We depend on the fact that callers will set i_flags */
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
/* Assume that user mode programs are passing in an ext4fs superblock, not
|
||||
* a kernel struct super_block. This will allow us to call the feature-test
|
||||
@ -1642,10 +1658,12 @@ extern unsigned ext4_init_block_bitmap(struct super_block *sb,
|
||||
|
||||
/* dir.c */
|
||||
extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
|
||||
struct file *,
|
||||
struct ext4_dir_entry_2 *,
|
||||
struct buffer_head *, unsigned int);
|
||||
#define ext4_check_dir_entry(dir, de, bh, offset) \
|
||||
__ext4_check_dir_entry(__func__, __LINE__, (dir), (de), (bh), (offset))
|
||||
#define ext4_check_dir_entry(dir, filp, de, bh, offset) \
|
||||
unlikely(__ext4_check_dir_entry(__func__, __LINE__, (dir), (filp), \
|
||||
(de), (bh), (offset)))
|
||||
extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
|
||||
__u32 minor_hash,
|
||||
struct ext4_dir_entry_2 *dirent);
|
||||
@ -1653,6 +1671,7 @@ extern void ext4_htree_free_dir_info(struct dir_private_info *p);
|
||||
|
||||
/* fsync.c */
|
||||
extern int ext4_sync_file(struct file *, int);
|
||||
extern int ext4_flush_completed_IO(struct inode *);
|
||||
|
||||
/* hash.c */
|
||||
extern int ext4fs_dirhash(const char *name, int len, struct
|
||||
@ -1752,8 +1771,8 @@ extern void ext4_error_inode(struct inode *, const char *, unsigned int,
|
||||
ext4_fsblk_t, const char *, ...)
|
||||
__attribute__ ((format (printf, 5, 6)));
|
||||
extern void ext4_error_file(struct file *, const char *, unsigned int,
|
||||
const char *, ...)
|
||||
__attribute__ ((format (printf, 4, 5)));
|
||||
ext4_fsblk_t, const char *, ...)
|
||||
__attribute__ ((format (printf, 5, 6)));
|
||||
extern void __ext4_std_error(struct super_block *, const char *,
|
||||
unsigned int, int);
|
||||
extern void __ext4_abort(struct super_block *, const char *, unsigned int,
|
||||
|
@ -119,10 +119,6 @@ struct ext4_ext_path {
|
||||
* structure for external API
|
||||
*/
|
||||
|
||||
#define EXT4_EXT_CACHE_NO 0
|
||||
#define EXT4_EXT_CACHE_GAP 1
|
||||
#define EXT4_EXT_CACHE_EXTENT 2
|
||||
|
||||
/*
|
||||
* to be called by ext4_ext_walk_space()
|
||||
* negative retcode - error
|
||||
@ -197,7 +193,7 @@ static inline unsigned short ext_depth(struct inode *inode)
|
||||
static inline void
|
||||
ext4_ext_invalidate_cache(struct inode *inode)
|
||||
{
|
||||
EXT4_I(inode)->i_cached_extent.ec_type = EXT4_EXT_CACHE_NO;
|
||||
EXT4_I(inode)->i_cached_extent.ec_len = 0;
|
||||
}
|
||||
|
||||
static inline void ext4_ext_mark_uninitialized(struct ext4_extent *ext)
|
||||
@ -278,7 +274,7 @@ static inline void ext4_idx_store_pblock(struct ext4_extent_idx *ix,
|
||||
}
|
||||
|
||||
extern int ext4_ext_calc_metadata_amount(struct inode *inode,
|
||||
sector_t lblocks);
|
||||
ext4_lblk_t lblocks);
|
||||
extern int ext4_extent_tree_init(handle_t *, struct inode *);
|
||||
extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
|
||||
int num,
|
||||
|
@ -253,7 +253,7 @@ static inline int ext4_journal_force_commit(journal_t *journal)
|
||||
static inline int ext4_jbd2_file_inode(handle_t *handle, struct inode *inode)
|
||||
{
|
||||
if (ext4_handle_valid(handle))
|
||||
return jbd2_journal_file_inode(handle, &EXT4_I(inode)->jinode);
|
||||
return jbd2_journal_file_inode(handle, EXT4_I(inode)->jinode);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -117,11 +117,33 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
|
||||
struct ext4_extent *ex;
|
||||
depth = path->p_depth;
|
||||
|
||||
/* try to predict block placement */
|
||||
/*
|
||||
* Try to predict block placement assuming that we are
|
||||
* filling in a file which will eventually be
|
||||
* non-sparse --- i.e., in the case of libbfd writing
|
||||
* an ELF object sections out-of-order but in a way
|
||||
* the eventually results in a contiguous object or
|
||||
* executable file, or some database extending a table
|
||||
* space file. However, this is actually somewhat
|
||||
* non-ideal if we are writing a sparse file such as
|
||||
* qemu or KVM writing a raw image file that is going
|
||||
* to stay fairly sparse, since it will end up
|
||||
* fragmenting the file system's free space. Maybe we
|
||||
* should have some hueristics or some way to allow
|
||||
* userspace to pass a hint to file system,
|
||||
* especiially if the latter case turns out to be
|
||||
* common.
|
||||
*/
|
||||
ex = path[depth].p_ext;
|
||||
if (ex)
|
||||
return (ext4_ext_pblock(ex) +
|
||||
(block - le32_to_cpu(ex->ee_block)));
|
||||
if (ex) {
|
||||
ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
|
||||
ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
|
||||
|
||||
if (block > ext_block)
|
||||
return ext_pblk + (block - ext_block);
|
||||
else
|
||||
return ext_pblk - (ext_block - block);
|
||||
}
|
||||
|
||||
/* it looks like index is empty;
|
||||
* try to find starting block from index itself */
|
||||
@ -244,7 +266,7 @@ static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
|
||||
* to allocate @blocks
|
||||
* Worse case is one block per extent
|
||||
*/
|
||||
int ext4_ext_calc_metadata_amount(struct inode *inode, sector_t lblock)
|
||||
int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
|
||||
{
|
||||
struct ext4_inode_info *ei = EXT4_I(inode);
|
||||
int idxs, num = 0;
|
||||
@ -1872,12 +1894,10 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
|
||||
cbex.ec_block = start;
|
||||
cbex.ec_len = end - start;
|
||||
cbex.ec_start = 0;
|
||||
cbex.ec_type = EXT4_EXT_CACHE_GAP;
|
||||
} else {
|
||||
cbex.ec_block = le32_to_cpu(ex->ee_block);
|
||||
cbex.ec_len = ext4_ext_get_actual_len(ex);
|
||||
cbex.ec_start = ext4_ext_pblock(ex);
|
||||
cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
|
||||
}
|
||||
|
||||
if (unlikely(cbex.ec_len == 0)) {
|
||||
@ -1917,13 +1937,12 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
|
||||
|
||||
static void
|
||||
ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
|
||||
__u32 len, ext4_fsblk_t start, int type)
|
||||
__u32 len, ext4_fsblk_t start)
|
||||
{
|
||||
struct ext4_ext_cache *cex;
|
||||
BUG_ON(len == 0);
|
||||
spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
|
||||
cex = &EXT4_I(inode)->i_cached_extent;
|
||||
cex->ec_type = type;
|
||||
cex->ec_block = block;
|
||||
cex->ec_len = len;
|
||||
cex->ec_start = start;
|
||||
@ -1976,15 +1995,18 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
|
||||
}
|
||||
|
||||
ext_debug(" -> %u:%lu\n", lblock, len);
|
||||
ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
|
||||
ext4_ext_put_in_cache(inode, lblock, len, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return 0 if cache is invalid; 1 if the cache is valid
|
||||
*/
|
||||
static int
|
||||
ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
|
||||
struct ext4_extent *ex)
|
||||
{
|
||||
struct ext4_ext_cache *cex;
|
||||
int ret = EXT4_EXT_CACHE_NO;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* We borrow i_block_reservation_lock to protect i_cached_extent
|
||||
@ -1993,11 +2015,9 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
|
||||
cex = &EXT4_I(inode)->i_cached_extent;
|
||||
|
||||
/* has cache valid data? */
|
||||
if (cex->ec_type == EXT4_EXT_CACHE_NO)
|
||||
if (cex->ec_len == 0)
|
||||
goto errout;
|
||||
|
||||
BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
|
||||
cex->ec_type != EXT4_EXT_CACHE_EXTENT);
|
||||
if (in_range(block, cex->ec_block, cex->ec_len)) {
|
||||
ex->ee_block = cpu_to_le32(cex->ec_block);
|
||||
ext4_ext_store_pblock(ex, cex->ec_start);
|
||||
@ -2005,7 +2025,7 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
|
||||
ext_debug("%u cached by %u:%u:%llu\n",
|
||||
block,
|
||||
cex->ec_block, cex->ec_len, cex->ec_start);
|
||||
ret = cex->ec_type;
|
||||
ret = 1;
|
||||
}
|
||||
errout:
|
||||
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
|
||||
@ -3082,7 +3102,7 @@ static void unmap_underlying_metadata_blocks(struct block_device *bdev,
|
||||
* Handle EOFBLOCKS_FL flag, clearing it if necessary
|
||||
*/
|
||||
static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
|
||||
struct ext4_map_blocks *map,
|
||||
ext4_lblk_t lblk,
|
||||
struct ext4_ext_path *path,
|
||||
unsigned int len)
|
||||
{
|
||||
@ -3112,7 +3132,7 @@ static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
|
||||
* this turns out to be false, we can bail out from this
|
||||
* function immediately.
|
||||
*/
|
||||
if (map->m_lblk + len < le32_to_cpu(last_ex->ee_block) +
|
||||
if (lblk + len < le32_to_cpu(last_ex->ee_block) +
|
||||
ext4_ext_get_actual_len(last_ex))
|
||||
return 0;
|
||||
/*
|
||||
@ -3168,8 +3188,8 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
|
||||
path);
|
||||
if (ret >= 0) {
|
||||
ext4_update_inode_fsync_trans(handle, inode, 1);
|
||||
err = check_eofblocks_fl(handle, inode, map, path,
|
||||
map->m_len);
|
||||
err = check_eofblocks_fl(handle, inode, map->m_lblk,
|
||||
path, map->m_len);
|
||||
} else
|
||||
err = ret;
|
||||
goto out2;
|
||||
@ -3199,7 +3219,8 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
|
||||
ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
|
||||
if (ret >= 0) {
|
||||
ext4_update_inode_fsync_trans(handle, inode, 1);
|
||||
err = check_eofblocks_fl(handle, inode, map, path, map->m_len);
|
||||
err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
|
||||
map->m_len);
|
||||
if (err < 0)
|
||||
goto out2;
|
||||
}
|
||||
@ -3276,7 +3297,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
||||
struct ext4_extent_header *eh;
|
||||
struct ext4_extent newex, *ex;
|
||||
ext4_fsblk_t newblock;
|
||||
int err = 0, depth, ret, cache_type;
|
||||
int err = 0, depth, ret;
|
||||
unsigned int allocated = 0;
|
||||
struct ext4_allocation_request ar;
|
||||
ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
|
||||
@ -3285,9 +3306,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
||||
map->m_lblk, map->m_len, inode->i_ino);
|
||||
|
||||
/* check in cache */
|
||||
cache_type = ext4_ext_in_cache(inode, map->m_lblk, &newex);
|
||||
if (cache_type) {
|
||||
if (cache_type == EXT4_EXT_CACHE_GAP) {
|
||||
if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
|
||||
if (!newex.ee_start_lo && !newex.ee_start_hi) {
|
||||
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
|
||||
/*
|
||||
* block isn't allocated yet and
|
||||
@ -3296,7 +3316,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
||||
goto out2;
|
||||
}
|
||||
/* we should allocate requested block */
|
||||
} else if (cache_type == EXT4_EXT_CACHE_EXTENT) {
|
||||
} else {
|
||||
/* block is already allocated */
|
||||
newblock = map->m_lblk
|
||||
- le32_to_cpu(newex.ee_block)
|
||||
@ -3305,8 +3325,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
||||
allocated = ext4_ext_get_actual_len(&newex) -
|
||||
(map->m_lblk - le32_to_cpu(newex.ee_block));
|
||||
goto out;
|
||||
} else {
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
@ -3357,8 +3375,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
||||
/* Do not put uninitialized extent in the cache */
|
||||
if (!ext4_ext_is_uninitialized(ex)) {
|
||||
ext4_ext_put_in_cache(inode, ee_block,
|
||||
ee_len, ee_start,
|
||||
EXT4_EXT_CACHE_EXTENT);
|
||||
ee_len, ee_start);
|
||||
goto out;
|
||||
}
|
||||
ret = ext4_ext_handle_uninitialized_extents(handle,
|
||||
@ -3456,7 +3473,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
||||
map->m_flags |= EXT4_MAP_UNINIT;
|
||||
}
|
||||
|
||||
err = check_eofblocks_fl(handle, inode, map, path, ar.len);
|
||||
err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len);
|
||||
if (err)
|
||||
goto out2;
|
||||
|
||||
@ -3490,8 +3507,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
||||
* when it is _not_ an uninitialized extent.
|
||||
*/
|
||||
if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
|
||||
ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock,
|
||||
EXT4_EXT_CACHE_EXTENT);
|
||||
ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
|
||||
ext4_update_inode_fsync_trans(handle, inode, 1);
|
||||
} else
|
||||
ext4_update_inode_fsync_trans(handle, inode, 0);
|
||||
@ -3518,6 +3534,12 @@ void ext4_ext_truncate(struct inode *inode)
|
||||
handle_t *handle;
|
||||
int err = 0;
|
||||
|
||||
/*
|
||||
* finish any pending end_io work so we won't run the risk of
|
||||
* converting any truncated blocks to initialized later
|
||||
*/
|
||||
ext4_flush_completed_IO(inode);
|
||||
|
||||
/*
|
||||
* probably first extent we're gonna free will be last in block
|
||||
*/
|
||||
@ -3767,7 +3789,7 @@ static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
|
||||
|
||||
logical = (__u64)newex->ec_block << blksize_bits;
|
||||
|
||||
if (newex->ec_type == EXT4_EXT_CACHE_GAP) {
|
||||
if (newex->ec_start == 0) {
|
||||
pgoff_t offset;
|
||||
struct page *page;
|
||||
struct buffer_head *bh = NULL;
|
||||
|
@ -104,6 +104,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
||||
struct ext4_inode_info *ei = EXT4_I(inode);
|
||||
struct vfsmount *mnt = filp->f_path.mnt;
|
||||
struct path path;
|
||||
char buf[64], *cp;
|
||||
@ -127,6 +128,27 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
|
||||
ext4_mark_super_dirty(sb);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Set up the jbd2_inode if we are opening the inode for
|
||||
* writing and the journal is present
|
||||
*/
|
||||
if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) {
|
||||
struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL);
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
if (!ei->jinode) {
|
||||
if (!jinode) {
|
||||
spin_unlock(&inode->i_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
ei->jinode = jinode;
|
||||
jbd2_journal_init_jbd_inode(ei->jinode, inode);
|
||||
jinode = NULL;
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
if (unlikely(jinode != NULL))
|
||||
jbd2_free_inode(jinode);
|
||||
}
|
||||
return dquot_file_open(inode, filp);
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@ static void dump_completed_IO(struct inode * inode)
|
||||
* to written.
|
||||
* The function return the number of pending IOs on success.
|
||||
*/
|
||||
static int flush_completed_IO(struct inode *inode)
|
||||
extern int ext4_flush_completed_IO(struct inode *inode)
|
||||
{
|
||||
ext4_io_end_t *io;
|
||||
struct ext4_inode_info *ei = EXT4_I(inode);
|
||||
@ -169,7 +169,7 @@ int ext4_sync_file(struct file *file, int datasync)
|
||||
if (inode->i_sb->s_flags & MS_RDONLY)
|
||||
return 0;
|
||||
|
||||
ret = flush_completed_IO(inode);
|
||||
ret = ext4_flush_completed_IO(inode);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -1027,7 +1027,7 @@ got:
|
||||
inode->i_generation = sbi->s_next_generation++;
|
||||
spin_unlock(&sbi->s_next_gen_lock);
|
||||
|
||||
ei->i_state_flags = 0;
|
||||
ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
|
||||
ext4_set_inode_state(inode, EXT4_STATE_NEW);
|
||||
|
||||
ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/ratelimit.h>
|
||||
|
||||
#include "ext4_jbd2.h"
|
||||
#include "xattr.h"
|
||||
@ -54,10 +55,17 @@ static inline int ext4_begin_ordered_truncate(struct inode *inode,
|
||||
loff_t new_size)
|
||||
{
|
||||
trace_ext4_begin_ordered_truncate(inode, new_size);
|
||||
return jbd2_journal_begin_ordered_truncate(
|
||||
EXT4_SB(inode->i_sb)->s_journal,
|
||||
&EXT4_I(inode)->jinode,
|
||||
new_size);
|
||||
/*
|
||||
* If jinode is zero, then we never opened the file for
|
||||
* writing, so there's no need to call
|
||||
* jbd2_journal_begin_ordered_truncate() since there's no
|
||||
* outstanding writes we need to flush.
|
||||
*/
|
||||
if (!EXT4_I(inode)->jinode)
|
||||
return 0;
|
||||
return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
|
||||
EXT4_I(inode)->jinode,
|
||||
new_size);
|
||||
}
|
||||
|
||||
static void ext4_invalidatepage(struct page *page, unsigned long offset);
|
||||
@ -552,7 +560,7 @@ static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
|
||||
}
|
||||
|
||||
/**
|
||||
* ext4_blks_to_allocate: Look up the block map and count the number
|
||||
* ext4_blks_to_allocate - Look up the block map and count the number
|
||||
* of direct blocks need to be allocated for the given branch.
|
||||
*
|
||||
* @branch: chain of indirect blocks
|
||||
@ -591,13 +599,19 @@ static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
|
||||
|
||||
/**
|
||||
* ext4_alloc_blocks: multiple allocate blocks needed for a branch
|
||||
* @handle: handle for this transaction
|
||||
* @inode: inode which needs allocated blocks
|
||||
* @iblock: the logical block to start allocated at
|
||||
* @goal: preferred physical block of allocation
|
||||
* @indirect_blks: the number of blocks need to allocate for indirect
|
||||
* blocks
|
||||
*
|
||||
* @blks: number of desired blocks
|
||||
* @new_blocks: on return it will store the new block numbers for
|
||||
* the indirect blocks(if needed) and the first direct block,
|
||||
* @blks: on return it will store the total number of allocated
|
||||
* direct blocks
|
||||
* @err: on return it will store the error code
|
||||
*
|
||||
* This function will return the number of blocks allocated as
|
||||
* requested by the passed-in parameters.
|
||||
*/
|
||||
static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
|
||||
ext4_lblk_t iblock, ext4_fsblk_t goal,
|
||||
@ -711,9 +725,11 @@ failed_out:
|
||||
|
||||
/**
|
||||
* ext4_alloc_branch - allocate and set up a chain of blocks.
|
||||
* @handle: handle for this transaction
|
||||
* @inode: owner
|
||||
* @indirect_blks: number of allocated indirect blocks
|
||||
* @blks: number of allocated direct blocks
|
||||
* @goal: preferred place for allocation
|
||||
* @offsets: offsets (in the blocks) to store the pointers to next.
|
||||
* @branch: place to store the chain in.
|
||||
*
|
||||
@ -826,6 +842,7 @@ failed:
|
||||
|
||||
/**
|
||||
* ext4_splice_branch - splice the allocated branch onto inode.
|
||||
* @handle: handle for this transaction
|
||||
* @inode: owner
|
||||
* @block: (logical) number of block we are adding
|
||||
* @chain: chain of indirect blocks (with a missing link - see
|
||||
@ -1081,7 +1098,7 @@ static int ext4_indirect_calc_metadata_amount(struct inode *inode,
|
||||
* Calculate the number of metadata blocks need to reserve
|
||||
* to allocate a block located at @lblock
|
||||
*/
|
||||
static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock)
|
||||
static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
|
||||
{
|
||||
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
|
||||
return ext4_ext_calc_metadata_amount(inode, lblock);
|
||||
@ -1320,7 +1337,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
|
||||
* avoid double accounting
|
||||
*/
|
||||
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
|
||||
EXT4_I(inode)->i_delalloc_reserved_flag = 1;
|
||||
ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
|
||||
/*
|
||||
* We need to check for EXT4 here because migrate
|
||||
* could have changed the inode type in between
|
||||
@ -1350,7 +1367,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
|
||||
ext4_da_update_reserve_space(inode, retval, 1);
|
||||
}
|
||||
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
|
||||
EXT4_I(inode)->i_delalloc_reserved_flag = 0;
|
||||
ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
|
||||
|
||||
up_write((&EXT4_I(inode)->i_data_sem));
|
||||
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
|
||||
@ -1878,7 +1895,7 @@ static int ext4_journalled_write_end(struct file *file,
|
||||
/*
|
||||
* Reserve a single block located at lblock
|
||||
*/
|
||||
static int ext4_da_reserve_space(struct inode *inode, sector_t lblock)
|
||||
static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
|
||||
{
|
||||
int retries = 0;
|
||||
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
||||
@ -2239,7 +2256,7 @@ static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
|
||||
* affects functions in many different parts of the allocation
|
||||
* call path. This flag exists primarily because we don't
|
||||
* want to change *many* call functions, so ext4_map_blocks()
|
||||
* will set the magic i_delalloc_reserved_flag once the
|
||||
* will set the EXT4_STATE_DELALLOC_RESERVED flag once the
|
||||
* inode's allocation semaphore is taken.
|
||||
*
|
||||
* If the blocks in questions were delalloc blocks, set
|
||||
@ -3720,8 +3737,7 @@ static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode)
|
||||
retry:
|
||||
io_end = ext4_init_io_end(inode, GFP_ATOMIC);
|
||||
if (!io_end) {
|
||||
if (printk_ratelimit())
|
||||
printk(KERN_WARNING "%s: allocation fail\n", __func__);
|
||||
pr_warning_ratelimited("%s: allocation fail\n", __func__);
|
||||
schedule();
|
||||
goto retry;
|
||||
}
|
||||
@ -4045,7 +4061,7 @@ int ext4_block_truncate_page(handle_t *handle,
|
||||
if (ext4_should_journal_data(inode)) {
|
||||
err = ext4_handle_dirty_metadata(handle, inode, bh);
|
||||
} else {
|
||||
if (ext4_should_order_data(inode))
|
||||
if (ext4_should_order_data(inode) && EXT4_I(inode)->jinode)
|
||||
err = ext4_jbd2_file_inode(handle, inode);
|
||||
mark_buffer_dirty(bh);
|
||||
}
|
||||
@ -4169,6 +4185,7 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
|
||||
{
|
||||
__le32 *p;
|
||||
int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED;
|
||||
int err;
|
||||
|
||||
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
|
||||
flags |= EXT4_FREE_BLOCKS_METADATA;
|
||||
@ -4184,11 +4201,23 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
|
||||
if (try_to_extend_transaction(handle, inode)) {
|
||||
if (bh) {
|
||||
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
|
||||
ext4_handle_dirty_metadata(handle, inode, bh);
|
||||
err = ext4_handle_dirty_metadata(handle, inode, bh);
|
||||
if (unlikely(err)) {
|
||||
ext4_std_error(inode->i_sb, err);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
err = ext4_mark_inode_dirty(handle, inode);
|
||||
if (unlikely(err)) {
|
||||
ext4_std_error(inode->i_sb, err);
|
||||
return 1;
|
||||
}
|
||||
err = ext4_truncate_restart_trans(handle, inode,
|
||||
blocks_for_truncate(inode));
|
||||
if (unlikely(err)) {
|
||||
ext4_std_error(inode->i_sb, err);
|
||||
return 1;
|
||||
}
|
||||
ext4_mark_inode_dirty(handle, inode);
|
||||
ext4_truncate_restart_trans(handle, inode,
|
||||
blocks_for_truncate(inode));
|
||||
if (bh) {
|
||||
BUFFER_TRACE(bh, "retaking write access");
|
||||
ext4_journal_get_write_access(handle, bh);
|
||||
@ -4349,6 +4378,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
|
||||
(__le32 *) bh->b_data,
|
||||
(__le32 *) bh->b_data + addr_per_block,
|
||||
depth);
|
||||
brelse(bh);
|
||||
|
||||
/*
|
||||
* Everything below this this pointer has been
|
||||
@ -4859,7 +4889,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
|
||||
}
|
||||
inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
|
||||
|
||||
ei->i_state_flags = 0;
|
||||
ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
|
||||
ei->i_dir_start_lookup = 0;
|
||||
ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
|
||||
/* We now have enough fields to check if the inode was active or not.
|
||||
@ -5118,7 +5148,7 @@ static int ext4_do_update_inode(handle_t *handle,
|
||||
if (ext4_inode_blocks_set(handle, raw_inode, ei))
|
||||
goto out_brelse;
|
||||
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
|
||||
raw_inode->i_flags = cpu_to_le32(ei->i_flags);
|
||||
raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
|
||||
if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
|
||||
cpu_to_le32(EXT4_OS_HURD))
|
||||
raw_inode->i_file_acl_high =
|
||||
|
@ -2608,18 +2608,12 @@ int ext4_mb_release(struct super_block *sb)
|
||||
static inline int ext4_issue_discard(struct super_block *sb,
|
||||
ext4_group_t block_group, ext4_grpblk_t block, int count)
|
||||
{
|
||||
int ret;
|
||||
ext4_fsblk_t discard_block;
|
||||
|
||||
discard_block = block + ext4_group_first_block_no(sb, block_group);
|
||||
trace_ext4_discard_blocks(sb,
|
||||
(unsigned long long) discard_block, count);
|
||||
ret = sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
|
||||
if (ret == -EOPNOTSUPP) {
|
||||
ext4_warning(sb, "discard not supported, disabling");
|
||||
clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
|
||||
}
|
||||
return ret;
|
||||
return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2631,7 +2625,7 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
|
||||
struct super_block *sb = journal->j_private;
|
||||
struct ext4_buddy e4b;
|
||||
struct ext4_group_info *db;
|
||||
int err, count = 0, count2 = 0;
|
||||
int err, ret, count = 0, count2 = 0;
|
||||
struct ext4_free_data *entry;
|
||||
struct list_head *l, *ltmp;
|
||||
|
||||
@ -2641,9 +2635,15 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
|
||||
mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
|
||||
entry->count, entry->group, entry);
|
||||
|
||||
if (test_opt(sb, DISCARD))
|
||||
ext4_issue_discard(sb, entry->group,
|
||||
if (test_opt(sb, DISCARD)) {
|
||||
ret = ext4_issue_discard(sb, entry->group,
|
||||
entry->start_blk, entry->count);
|
||||
if (unlikely(ret == -EOPNOTSUPP)) {
|
||||
ext4_warning(sb, "discard not supported, "
|
||||
"disabling");
|
||||
clear_opt(sb, DISCARD);
|
||||
}
|
||||
}
|
||||
|
||||
err = ext4_mb_load_buddy(sb, entry->group, &e4b);
|
||||
/* we expect to find existing buddy because it's pinned */
|
||||
@ -3881,19 +3881,6 @@ repeat:
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* finds all preallocated spaces and return blocks being freed to them
|
||||
* if preallocated space becomes full (no block is used from the space)
|
||||
* then the function frees space in buddy
|
||||
* XXX: at the moment, truncate (which is the only way to free blocks)
|
||||
* discards all preallocations
|
||||
*/
|
||||
static void ext4_mb_return_to_preallocation(struct inode *inode,
|
||||
struct ext4_buddy *e4b,
|
||||
sector_t block, int count)
|
||||
{
|
||||
BUG_ON(!list_empty(&EXT4_I(inode)->i_prealloc_list));
|
||||
}
|
||||
#ifdef CONFIG_EXT4_DEBUG
|
||||
static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
|
||||
{
|
||||
@ -4283,7 +4270,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
|
||||
* EDQUOT check, as blocks and quotas have been already
|
||||
* reserved when data being copied into pagecache.
|
||||
*/
|
||||
if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
|
||||
if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED))
|
||||
ar->flags |= EXT4_MB_DELALLOC_RESERVED;
|
||||
else {
|
||||
/* Without delayed allocation we need to verify
|
||||
@ -4380,7 +4367,8 @@ out:
|
||||
if (inquota && ar->len < inquota)
|
||||
dquot_free_block(ar->inode, inquota - ar->len);
|
||||
if (!ar->len) {
|
||||
if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag)
|
||||
if (!ext4_test_inode_state(ar->inode,
|
||||
EXT4_STATE_DELALLOC_RESERVED))
|
||||
/* release all the reserved blocks if non delalloc */
|
||||
percpu_counter_sub(&sbi->s_dirtyblocks_counter,
|
||||
reserv_blks);
|
||||
@ -4626,7 +4614,11 @@ do_more:
|
||||
* blocks being freed are metadata. these blocks shouldn't
|
||||
* be used until this transaction is committed
|
||||
*/
|
||||
new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
|
||||
new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
|
||||
if (!new_entry) {
|
||||
err = -ENOMEM;
|
||||
goto error_return;
|
||||
}
|
||||
new_entry->start_blk = bit;
|
||||
new_entry->group = block_group;
|
||||
new_entry->count = count;
|
||||
@ -4643,7 +4635,6 @@ do_more:
|
||||
ext4_lock_group(sb, block_group);
|
||||
mb_clear_bits(bitmap_bh->b_data, bit, count);
|
||||
mb_free_blocks(inode, &e4b, bit, count);
|
||||
ext4_mb_return_to_preallocation(inode, &e4b, block, count);
|
||||
}
|
||||
|
||||
ret = ext4_free_blks_count(sb, gdp) + count;
|
||||
@ -4718,8 +4709,6 @@ static int ext4_trim_extent(struct super_block *sb, int start, int count,
|
||||
ext4_unlock_group(sb, group);
|
||||
|
||||
ret = ext4_issue_discard(sb, group, start, count);
|
||||
if (ret)
|
||||
ext4_std_error(sb, ret);
|
||||
|
||||
ext4_lock_group(sb, group);
|
||||
mb_free_blocks(NULL, e4b, start, ex.fe_len);
|
||||
@ -4819,6 +4808,8 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
|
||||
ext4_group_t group, ngroups = ext4_get_groups_count(sb);
|
||||
ext4_grpblk_t cnt = 0, first_block, last_block;
|
||||
uint64_t start, len, minlen, trimmed;
|
||||
ext4_fsblk_t first_data_blk =
|
||||
le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
|
||||
int ret = 0;
|
||||
|
||||
start = range->start >> sb->s_blocksize_bits;
|
||||
@ -4828,6 +4819,10 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
|
||||
|
||||
if (unlikely(minlen > EXT4_BLOCKS_PER_GROUP(sb)))
|
||||
return -EINVAL;
|
||||
if (start < first_data_blk) {
|
||||
len -= first_data_blk - start;
|
||||
start = first_data_blk;
|
||||
}
|
||||
|
||||
/* Determine first and last group to examine based on start and len */
|
||||
ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
|
||||
@ -4851,7 +4846,7 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
|
||||
if (len >= EXT4_BLOCKS_PER_GROUP(sb))
|
||||
len -= (EXT4_BLOCKS_PER_GROUP(sb) - first_block);
|
||||
else
|
||||
last_block = len;
|
||||
last_block = first_block + len;
|
||||
|
||||
if (e4b.bd_info->bb_free >= minlen) {
|
||||
cnt = ext4_trim_all_free(sb, &e4b, first_block,
|
||||
|
@ -496,7 +496,7 @@ int ext4_ext_migrate(struct inode *inode)
|
||||
goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
|
||||
EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
|
||||
tmp_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
|
||||
S_IFREG, 0, goal);
|
||||
S_IFREG, NULL, goal);
|
||||
if (IS_ERR(tmp_inode)) {
|
||||
retval = -ENOMEM;
|
||||
ext4_journal_stop(handle);
|
||||
|
@ -581,9 +581,9 @@ static int htree_dirblock_to_tree(struct file *dir_file,
|
||||
dir->i_sb->s_blocksize -
|
||||
EXT4_DIR_REC_LEN(0));
|
||||
for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
|
||||
if (!ext4_check_dir_entry(dir, de, bh,
|
||||
(block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
|
||||
+((char *)de - bh->b_data))) {
|
||||
if (ext4_check_dir_entry(dir, NULL, de, bh,
|
||||
(block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
|
||||
+ ((char *)de - bh->b_data))) {
|
||||
/* On error, skip the f_pos to the next block. */
|
||||
dir_file->f_pos = (dir_file->f_pos |
|
||||
(dir->i_sb->s_blocksize - 1)) + 1;
|
||||
@ -820,7 +820,7 @@ static inline int search_dirblock(struct buffer_head *bh,
|
||||
if ((char *) de + namelen <= dlimit &&
|
||||
ext4_match (namelen, name, de)) {
|
||||
/* found a match - just to be sure, do a full check */
|
||||
if (!ext4_check_dir_entry(dir, de, bh, offset))
|
||||
if (ext4_check_dir_entry(dir, NULL, de, bh, offset))
|
||||
return -1;
|
||||
*res_dir = de;
|
||||
return 1;
|
||||
@ -1036,7 +1036,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru
|
||||
return ERR_PTR(-EIO);
|
||||
}
|
||||
inode = ext4_iget(dir->i_sb, ino);
|
||||
if (unlikely(IS_ERR(inode))) {
|
||||
if (IS_ERR(inode)) {
|
||||
if (PTR_ERR(inode) == -ESTALE) {
|
||||
EXT4_ERROR_INODE(dir,
|
||||
"deleted inode referenced: %u",
|
||||
@ -1269,7 +1269,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
|
||||
de = (struct ext4_dir_entry_2 *)bh->b_data;
|
||||
top = bh->b_data + blocksize - reclen;
|
||||
while ((char *) de <= top) {
|
||||
if (!ext4_check_dir_entry(dir, de, bh, offset))
|
||||
if (ext4_check_dir_entry(dir, NULL, de, bh, offset))
|
||||
return -EIO;
|
||||
if (ext4_match(namelen, name, de))
|
||||
return -EEXIST;
|
||||
@ -1602,7 +1602,11 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
|
||||
if (err)
|
||||
goto journal_error;
|
||||
}
|
||||
ext4_handle_dirty_metadata(handle, inode, frames[0].bh);
|
||||
err = ext4_handle_dirty_metadata(handle, inode, frames[0].bh);
|
||||
if (err) {
|
||||
ext4_std_error(inode->i_sb, err);
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
de = do_split(handle, dir, &bh, frame, &hinfo, &err);
|
||||
if (!de)
|
||||
@ -1630,17 +1634,21 @@ static int ext4_delete_entry(handle_t *handle,
|
||||
{
|
||||
struct ext4_dir_entry_2 *de, *pde;
|
||||
unsigned int blocksize = dir->i_sb->s_blocksize;
|
||||
int i;
|
||||
int i, err;
|
||||
|
||||
i = 0;
|
||||
pde = NULL;
|
||||
de = (struct ext4_dir_entry_2 *) bh->b_data;
|
||||
while (i < bh->b_size) {
|
||||
if (!ext4_check_dir_entry(dir, de, bh, i))
|
||||
if (ext4_check_dir_entry(dir, NULL, de, bh, i))
|
||||
return -EIO;
|
||||
if (de == de_del) {
|
||||
BUFFER_TRACE(bh, "get_write_access");
|
||||
ext4_journal_get_write_access(handle, bh);
|
||||
err = ext4_journal_get_write_access(handle, bh);
|
||||
if (unlikely(err)) {
|
||||
ext4_std_error(dir->i_sb, err);
|
||||
return err;
|
||||
}
|
||||
if (pde)
|
||||
pde->rec_len = ext4_rec_len_to_disk(
|
||||
ext4_rec_len_from_disk(pde->rec_len,
|
||||
@ -1652,7 +1660,11 @@ static int ext4_delete_entry(handle_t *handle,
|
||||
de->inode = 0;
|
||||
dir->i_version++;
|
||||
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
|
||||
ext4_handle_dirty_metadata(handle, dir, bh);
|
||||
err = ext4_handle_dirty_metadata(handle, dir, bh);
|
||||
if (unlikely(err)) {
|
||||
ext4_std_error(dir->i_sb, err);
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
i += ext4_rec_len_from_disk(de->rec_len, blocksize);
|
||||
@ -1789,7 +1801,7 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, int mode)
|
||||
{
|
||||
handle_t *handle;
|
||||
struct inode *inode;
|
||||
struct buffer_head *dir_block;
|
||||
struct buffer_head *dir_block = NULL;
|
||||
struct ext4_dir_entry_2 *de;
|
||||
unsigned int blocksize = dir->i_sb->s_blocksize;
|
||||
int err, retries = 0;
|
||||
@ -1822,7 +1834,9 @@ retry:
|
||||
if (!dir_block)
|
||||
goto out_clear_inode;
|
||||
BUFFER_TRACE(dir_block, "get_write_access");
|
||||
ext4_journal_get_write_access(handle, dir_block);
|
||||
err = ext4_journal_get_write_access(handle, dir_block);
|
||||
if (err)
|
||||
goto out_clear_inode;
|
||||
de = (struct ext4_dir_entry_2 *) dir_block->b_data;
|
||||
de->inode = cpu_to_le32(inode->i_ino);
|
||||
de->name_len = 1;
|
||||
@ -1839,10 +1853,12 @@ retry:
|
||||
ext4_set_de_type(dir->i_sb, de, S_IFDIR);
|
||||
inode->i_nlink = 2;
|
||||
BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
|
||||
ext4_handle_dirty_metadata(handle, dir, dir_block);
|
||||
brelse(dir_block);
|
||||
ext4_mark_inode_dirty(handle, inode);
|
||||
err = ext4_add_entry(handle, dentry, inode);
|
||||
err = ext4_handle_dirty_metadata(handle, dir, dir_block);
|
||||
if (err)
|
||||
goto out_clear_inode;
|
||||
err = ext4_mark_inode_dirty(handle, inode);
|
||||
if (!err)
|
||||
err = ext4_add_entry(handle, dentry, inode);
|
||||
if (err) {
|
||||
out_clear_inode:
|
||||
clear_nlink(inode);
|
||||
@ -1853,10 +1869,13 @@ out_clear_inode:
|
||||
}
|
||||
ext4_inc_count(handle, dir);
|
||||
ext4_update_dx_flag(dir);
|
||||
ext4_mark_inode_dirty(handle, dir);
|
||||
err = ext4_mark_inode_dirty(handle, dir);
|
||||
if (err)
|
||||
goto out_clear_inode;
|
||||
d_instantiate(dentry, inode);
|
||||
unlock_new_inode(inode);
|
||||
out_stop:
|
||||
brelse(dir_block);
|
||||
ext4_journal_stop(handle);
|
||||
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
|
||||
goto retry;
|
||||
@ -1919,7 +1938,7 @@ static int empty_dir(struct inode *inode)
|
||||
}
|
||||
de = (struct ext4_dir_entry_2 *) bh->b_data;
|
||||
}
|
||||
if (!ext4_check_dir_entry(inode, de, bh, offset)) {
|
||||
if (ext4_check_dir_entry(inode, NULL, de, bh, offset)) {
|
||||
de = (struct ext4_dir_entry_2 *)(bh->b_data +
|
||||
sb->s_blocksize);
|
||||
offset = (offset | (sb->s_blocksize - 1)) + 1;
|
||||
@ -2407,7 +2426,11 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
|
||||
ext4_current_time(new_dir);
|
||||
ext4_mark_inode_dirty(handle, new_dir);
|
||||
BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata");
|
||||
ext4_handle_dirty_metadata(handle, new_dir, new_bh);
|
||||
retval = ext4_handle_dirty_metadata(handle, new_dir, new_bh);
|
||||
if (unlikely(retval)) {
|
||||
ext4_std_error(new_dir->i_sb, retval);
|
||||
goto end_rename;
|
||||
}
|
||||
brelse(new_bh);
|
||||
new_bh = NULL;
|
||||
}
|
||||
@ -2459,7 +2482,11 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
|
||||
PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) =
|
||||
cpu_to_le32(new_dir->i_ino);
|
||||
BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
|
||||
ext4_handle_dirty_metadata(handle, old_dir, dir_bh);
|
||||
retval = ext4_handle_dirty_metadata(handle, old_dir, dir_bh);
|
||||
if (retval) {
|
||||
ext4_std_error(old_dir->i_sb, retval);
|
||||
goto end_rename;
|
||||
}
|
||||
ext4_dec_count(handle, old_dir);
|
||||
if (new_inode) {
|
||||
/* checked empty_dir above, can't have another parent,
|
||||
|
@ -44,7 +44,7 @@ int __init ext4_init_pageio(void)
|
||||
if (io_page_cachep == NULL)
|
||||
return -ENOMEM;
|
||||
io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
|
||||
if (io_page_cachep == NULL) {
|
||||
if (io_end_cachep == NULL) {
|
||||
kmem_cache_destroy(io_page_cachep);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -158,11 +158,8 @@ static void ext4_end_io_work(struct work_struct *work)
|
||||
|
||||
ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
|
||||
{
|
||||
ext4_io_end_t *io = NULL;
|
||||
|
||||
io = kmem_cache_alloc(io_end_cachep, flags);
|
||||
ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
|
||||
if (io) {
|
||||
memset(io, 0, sizeof(*io));
|
||||
atomic_inc(&EXT4_I(inode)->i_ioend_count);
|
||||
io->inode = inode;
|
||||
INIT_WORK(&io->work, ext4_end_io_work);
|
||||
|
@ -220,7 +220,11 @@ static int setup_new_group_blocks(struct super_block *sb,
|
||||
memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size);
|
||||
set_buffer_uptodate(gdb);
|
||||
unlock_buffer(gdb);
|
||||
ext4_handle_dirty_metadata(handle, NULL, gdb);
|
||||
err = ext4_handle_dirty_metadata(handle, NULL, gdb);
|
||||
if (unlikely(err)) {
|
||||
brelse(gdb);
|
||||
goto exit_bh;
|
||||
}
|
||||
ext4_set_bit(bit, bh->b_data);
|
||||
brelse(gdb);
|
||||
}
|
||||
@ -258,7 +262,11 @@ static int setup_new_group_blocks(struct super_block *sb,
|
||||
|
||||
ext4_mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8,
|
||||
bh->b_data);
|
||||
ext4_handle_dirty_metadata(handle, NULL, bh);
|
||||
err = ext4_handle_dirty_metadata(handle, NULL, bh);
|
||||
if (unlikely(err)) {
|
||||
ext4_std_error(sb, err);
|
||||
goto exit_bh;
|
||||
}
|
||||
brelse(bh);
|
||||
/* Mark unused entries in inode bitmap used */
|
||||
ext4_debug("clear inode bitmap %#04llx (+%llu)\n",
|
||||
@ -270,7 +278,9 @@ static int setup_new_group_blocks(struct super_block *sb,
|
||||
|
||||
ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
|
||||
bh->b_data);
|
||||
ext4_handle_dirty_metadata(handle, NULL, bh);
|
||||
err = ext4_handle_dirty_metadata(handle, NULL, bh);
|
||||
if (unlikely(err))
|
||||
ext4_std_error(sb, err);
|
||||
exit_bh:
|
||||
brelse(bh);
|
||||
|
||||
@ -422,17 +432,21 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
||||
goto exit_dind;
|
||||
}
|
||||
|
||||
if ((err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh)))
|
||||
err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
|
||||
if (unlikely(err))
|
||||
goto exit_dind;
|
||||
|
||||
if ((err = ext4_journal_get_write_access(handle, *primary)))
|
||||
err = ext4_journal_get_write_access(handle, *primary);
|
||||
if (unlikely(err))
|
||||
goto exit_sbh;
|
||||
|
||||
if ((err = ext4_journal_get_write_access(handle, dind)))
|
||||
goto exit_primary;
|
||||
err = ext4_journal_get_write_access(handle, dind);
|
||||
if (unlikely(err))
|
||||
ext4_std_error(sb, err);
|
||||
|
||||
/* ext4_reserve_inode_write() gets a reference on the iloc */
|
||||
if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
|
||||
err = ext4_reserve_inode_write(handle, inode, &iloc);
|
||||
if (unlikely(err))
|
||||
goto exit_dindj;
|
||||
|
||||
n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
|
||||
@ -454,12 +468,20 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
||||
* reserved inode, and will become GDT blocks (primary and backup).
|
||||
*/
|
||||
data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
|
||||
ext4_handle_dirty_metadata(handle, NULL, dind);
|
||||
brelse(dind);
|
||||
err = ext4_handle_dirty_metadata(handle, NULL, dind);
|
||||
if (unlikely(err)) {
|
||||
ext4_std_error(sb, err);
|
||||
goto exit_inode;
|
||||
}
|
||||
inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
|
||||
ext4_mark_iloc_dirty(handle, inode, &iloc);
|
||||
memset((*primary)->b_data, 0, sb->s_blocksize);
|
||||
ext4_handle_dirty_metadata(handle, NULL, *primary);
|
||||
err = ext4_handle_dirty_metadata(handle, NULL, *primary);
|
||||
if (unlikely(err)) {
|
||||
ext4_std_error(sb, err);
|
||||
goto exit_inode;
|
||||
}
|
||||
brelse(dind);
|
||||
|
||||
o_group_desc = EXT4_SB(sb)->s_group_desc;
|
||||
memcpy(n_group_desc, o_group_desc,
|
||||
@ -470,19 +492,19 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
||||
kfree(o_group_desc);
|
||||
|
||||
le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
|
||||
ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
|
||||
err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
|
||||
if (err)
|
||||
ext4_std_error(sb, err);
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
|
||||
exit_inode:
|
||||
/* ext4_journal_release_buffer(handle, iloc.bh); */
|
||||
brelse(iloc.bh);
|
||||
exit_dindj:
|
||||
/* ext4_journal_release_buffer(handle, dind); */
|
||||
exit_primary:
|
||||
/* ext4_journal_release_buffer(handle, *primary); */
|
||||
exit_sbh:
|
||||
/* ext4_journal_release_buffer(handle, *primary); */
|
||||
/* ext4_journal_release_buffer(handle, EXT4_SB(sb)->s_sbh); */
|
||||
exit_dind:
|
||||
brelse(dind);
|
||||
exit_bh:
|
||||
@ -665,7 +687,9 @@ static void update_backups(struct super_block *sb,
|
||||
memset(bh->b_data + size, 0, rest);
|
||||
set_buffer_uptodate(bh);
|
||||
unlock_buffer(bh);
|
||||
ext4_handle_dirty_metadata(handle, NULL, bh);
|
||||
err = ext4_handle_dirty_metadata(handle, NULL, bh);
|
||||
if (unlikely(err))
|
||||
ext4_std_error(sb, err);
|
||||
brelse(bh);
|
||||
}
|
||||
if ((err2 = ext4_journal_stop(handle)) && !err)
|
||||
@ -883,7 +907,11 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
|
||||
/* Update the global fs size fields */
|
||||
sbi->s_groups_count++;
|
||||
|
||||
ext4_handle_dirty_metadata(handle, NULL, primary);
|
||||
err = ext4_handle_dirty_metadata(handle, NULL, primary);
|
||||
if (unlikely(err)) {
|
||||
ext4_std_error(sb, err);
|
||||
goto exit_journal;
|
||||
}
|
||||
|
||||
/* Update the reserved block counts only once the new group is
|
||||
* active. */
|
||||
|
288
fs/ext4/super.c
288
fs/ext4/super.c
@ -388,13 +388,14 @@ static void ext4_handle_error(struct super_block *sb)
|
||||
void __ext4_error(struct super_block *sb, const char *function,
|
||||
unsigned int line, const char *fmt, ...)
|
||||
{
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
|
||||
va_start(args, fmt);
|
||||
printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: ",
|
||||
sb->s_id, function, line, current->comm);
|
||||
vprintk(fmt, args);
|
||||
printk("\n");
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
|
||||
sb->s_id, function, line, current->comm, &vaf);
|
||||
va_end(args);
|
||||
|
||||
ext4_handle_error(sb);
|
||||
@ -405,28 +406,31 @@ void ext4_error_inode(struct inode *inode, const char *function,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
struct va_format vaf;
|
||||
struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
|
||||
|
||||
es->s_last_error_ino = cpu_to_le32(inode->i_ino);
|
||||
es->s_last_error_block = cpu_to_le64(block);
|
||||
save_error_info(inode->i_sb, function, line);
|
||||
va_start(args, fmt);
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: inode #%lu: ",
|
||||
inode->i_sb->s_id, function, line, inode->i_ino);
|
||||
if (block)
|
||||
printk("block %llu: ", block);
|
||||
printk("comm %s: ", current->comm);
|
||||
vprintk(fmt, args);
|
||||
printk("\n");
|
||||
printk(KERN_CONT "block %llu: ", block);
|
||||
printk(KERN_CONT "comm %s: %pV\n", current->comm, &vaf);
|
||||
va_end(args);
|
||||
|
||||
ext4_handle_error(inode->i_sb);
|
||||
}
|
||||
|
||||
void ext4_error_file(struct file *file, const char *function,
|
||||
unsigned int line, const char *fmt, ...)
|
||||
unsigned int line, ext4_fsblk_t block,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
struct va_format vaf;
|
||||
struct ext4_super_block *es;
|
||||
struct inode *inode = file->f_dentry->d_inode;
|
||||
char pathname[80], *path;
|
||||
@ -434,17 +438,18 @@ void ext4_error_file(struct file *file, const char *function,
|
||||
es = EXT4_SB(inode->i_sb)->s_es;
|
||||
es->s_last_error_ino = cpu_to_le32(inode->i_ino);
|
||||
save_error_info(inode->i_sb, function, line);
|
||||
va_start(args, fmt);
|
||||
path = d_path(&(file->f_path), pathname, sizeof(pathname));
|
||||
if (!path)
|
||||
if (IS_ERR(path))
|
||||
path = "(unknown)";
|
||||
printk(KERN_CRIT
|
||||
"EXT4-fs error (device %s): %s:%d: inode #%lu "
|
||||
"(comm %s path %s): ",
|
||||
inode->i_sb->s_id, function, line, inode->i_ino,
|
||||
current->comm, path);
|
||||
vprintk(fmt, args);
|
||||
printk("\n");
|
||||
"EXT4-fs error (device %s): %s:%d: inode #%lu: ",
|
||||
inode->i_sb->s_id, function, line, inode->i_ino);
|
||||
if (block)
|
||||
printk(KERN_CONT "block %llu: ", block);
|
||||
va_start(args, fmt);
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
printk(KERN_CONT "comm %s: path %s: %pV\n", current->comm, path, &vaf);
|
||||
va_end(args);
|
||||
|
||||
ext4_handle_error(inode->i_sb);
|
||||
@ -543,28 +548,29 @@ void __ext4_abort(struct super_block *sb, const char *function,
|
||||
panic("EXT4-fs panic from previous error\n");
|
||||
}
|
||||
|
||||
void ext4_msg (struct super_block * sb, const char *prefix,
|
||||
const char *fmt, ...)
|
||||
void ext4_msg(struct super_block *sb, const char *prefix, const char *fmt, ...)
|
||||
{
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
|
||||
va_start(args, fmt);
|
||||
printk("%sEXT4-fs (%s): ", prefix, sb->s_id);
|
||||
vprintk(fmt, args);
|
||||
printk("\n");
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
void __ext4_warning(struct super_block *sb, const char *function,
|
||||
unsigned int line, const char *fmt, ...)
|
||||
{
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
|
||||
va_start(args, fmt);
|
||||
printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: ",
|
||||
sb->s_id, function, line);
|
||||
vprintk(fmt, args);
|
||||
printk("\n");
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
|
||||
sb->s_id, function, line, &vaf);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
@ -575,21 +581,25 @@ void __ext4_grp_locked_error(const char *function, unsigned int line,
|
||||
__releases(bitlock)
|
||||
__acquires(bitlock)
|
||||
{
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
|
||||
|
||||
es->s_last_error_ino = cpu_to_le32(ino);
|
||||
es->s_last_error_block = cpu_to_le64(block);
|
||||
__save_error_info(sb, function, line);
|
||||
|
||||
va_start(args, fmt);
|
||||
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u",
|
||||
sb->s_id, function, line, grp);
|
||||
if (ino)
|
||||
printk("inode %lu: ", ino);
|
||||
printk(KERN_CONT "inode %lu: ", ino);
|
||||
if (block)
|
||||
printk("block %llu:", (unsigned long long) block);
|
||||
vprintk(fmt, args);
|
||||
printk("\n");
|
||||
printk(KERN_CONT "block %llu:", (unsigned long long) block);
|
||||
printk(KERN_CONT "%pV\n", &vaf);
|
||||
va_end(args);
|
||||
|
||||
if (test_opt(sb, ERRORS_CONT)) {
|
||||
@ -808,21 +818,15 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
|
||||
memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache));
|
||||
INIT_LIST_HEAD(&ei->i_prealloc_list);
|
||||
spin_lock_init(&ei->i_prealloc_lock);
|
||||
/*
|
||||
* Note: We can be called before EXT4_SB(sb)->s_journal is set,
|
||||
* therefore it can be null here. Don't check it, just initialize
|
||||
* jinode.
|
||||
*/
|
||||
jbd2_journal_init_jbd_inode(&ei->jinode, &ei->vfs_inode);
|
||||
ei->i_reserved_data_blocks = 0;
|
||||
ei->i_reserved_meta_blocks = 0;
|
||||
ei->i_allocated_meta_blocks = 0;
|
||||
ei->i_da_metadata_calc_len = 0;
|
||||
ei->i_delalloc_reserved_flag = 0;
|
||||
spin_lock_init(&(ei->i_block_reservation_lock));
|
||||
#ifdef CONFIG_QUOTA
|
||||
ei->i_reserved_quota = 0;
|
||||
#endif
|
||||
ei->jinode = NULL;
|
||||
INIT_LIST_HEAD(&ei->i_completed_io_list);
|
||||
spin_lock_init(&ei->i_completed_io_lock);
|
||||
ei->cur_aio_dio = NULL;
|
||||
@ -898,9 +902,12 @@ void ext4_clear_inode(struct inode *inode)
|
||||
end_writeback(inode);
|
||||
dquot_drop(inode);
|
||||
ext4_discard_preallocations(inode);
|
||||
if (EXT4_JOURNAL(inode))
|
||||
jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal,
|
||||
&EXT4_I(inode)->jinode);
|
||||
if (EXT4_I(inode)->jinode) {
|
||||
jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
|
||||
EXT4_I(inode)->jinode);
|
||||
jbd2_free_inode(EXT4_I(inode)->jinode);
|
||||
EXT4_I(inode)->jinode = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void ext4_show_quota_options(struct seq_file *seq,
|
||||
@ -1393,7 +1400,7 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
|
||||
sbi->s_qf_names[qtype] = NULL;
|
||||
return 0;
|
||||
}
|
||||
set_opt(sbi->s_mount_opt, QUOTA);
|
||||
set_opt(sb, QUOTA);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -1448,21 +1455,21 @@ static int parse_options(char *options, struct super_block *sb,
|
||||
switch (token) {
|
||||
case Opt_bsd_df:
|
||||
ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
|
||||
clear_opt(sbi->s_mount_opt, MINIX_DF);
|
||||
clear_opt(sb, MINIX_DF);
|
||||
break;
|
||||
case Opt_minix_df:
|
||||
ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
|
||||
set_opt(sbi->s_mount_opt, MINIX_DF);
|
||||
set_opt(sb, MINIX_DF);
|
||||
|
||||
break;
|
||||
case Opt_grpid:
|
||||
ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
|
||||
set_opt(sbi->s_mount_opt, GRPID);
|
||||
set_opt(sb, GRPID);
|
||||
|
||||
break;
|
||||
case Opt_nogrpid:
|
||||
ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
|
||||
clear_opt(sbi->s_mount_opt, GRPID);
|
||||
clear_opt(sb, GRPID);
|
||||
|
||||
break;
|
||||
case Opt_resuid:
|
||||
@ -1480,38 +1487,38 @@ static int parse_options(char *options, struct super_block *sb,
|
||||
/* *sb_block = match_int(&args[0]); */
|
||||
break;
|
||||
case Opt_err_panic:
|
||||
clear_opt(sbi->s_mount_opt, ERRORS_CONT);
|
||||
clear_opt(sbi->s_mount_opt, ERRORS_RO);
|
||||
set_opt(sbi->s_mount_opt, ERRORS_PANIC);
|
||||
clear_opt(sb, ERRORS_CONT);
|
||||
clear_opt(sb, ERRORS_RO);
|
||||
set_opt(sb, ERRORS_PANIC);
|
||||
break;
|
||||
case Opt_err_ro:
|
||||
clear_opt(sbi->s_mount_opt, ERRORS_CONT);
|
||||
clear_opt(sbi->s_mount_opt, ERRORS_PANIC);
|
||||
set_opt(sbi->s_mount_opt, ERRORS_RO);
|
||||
clear_opt(sb, ERRORS_CONT);
|
||||
clear_opt(sb, ERRORS_PANIC);
|
||||
set_opt(sb, ERRORS_RO);
|
||||
break;
|
||||
case Opt_err_cont:
|
||||
clear_opt(sbi->s_mount_opt, ERRORS_RO);
|
||||
clear_opt(sbi->s_mount_opt, ERRORS_PANIC);
|
||||
set_opt(sbi->s_mount_opt, ERRORS_CONT);
|
||||
clear_opt(sb, ERRORS_RO);
|
||||
clear_opt(sb, ERRORS_PANIC);
|
||||
set_opt(sb, ERRORS_CONT);
|
||||
break;
|
||||
case Opt_nouid32:
|
||||
set_opt(sbi->s_mount_opt, NO_UID32);
|
||||
set_opt(sb, NO_UID32);
|
||||
break;
|
||||
case Opt_debug:
|
||||
set_opt(sbi->s_mount_opt, DEBUG);
|
||||
set_opt(sb, DEBUG);
|
||||
break;
|
||||
case Opt_oldalloc:
|
||||
set_opt(sbi->s_mount_opt, OLDALLOC);
|
||||
set_opt(sb, OLDALLOC);
|
||||
break;
|
||||
case Opt_orlov:
|
||||
clear_opt(sbi->s_mount_opt, OLDALLOC);
|
||||
clear_opt(sb, OLDALLOC);
|
||||
break;
|
||||
#ifdef CONFIG_EXT4_FS_XATTR
|
||||
case Opt_user_xattr:
|
||||
set_opt(sbi->s_mount_opt, XATTR_USER);
|
||||
set_opt(sb, XATTR_USER);
|
||||
break;
|
||||
case Opt_nouser_xattr:
|
||||
clear_opt(sbi->s_mount_opt, XATTR_USER);
|
||||
clear_opt(sb, XATTR_USER);
|
||||
break;
|
||||
#else
|
||||
case Opt_user_xattr:
|
||||
@ -1521,10 +1528,10 @@ static int parse_options(char *options, struct super_block *sb,
|
||||
#endif
|
||||
#ifdef CONFIG_EXT4_FS_POSIX_ACL
|
||||
case Opt_acl:
|
||||
set_opt(sbi->s_mount_opt, POSIX_ACL);
|
||||
set_opt(sb, POSIX_ACL);
|
||||
break;
|
||||
case Opt_noacl:
|
||||
clear_opt(sbi->s_mount_opt, POSIX_ACL);
|
||||
clear_opt(sb, POSIX_ACL);
|
||||
break;
|
||||
#else
|
||||
case Opt_acl:
|
||||
@ -1543,7 +1550,7 @@ static int parse_options(char *options, struct super_block *sb,
|
||||
"Cannot specify journal on remount");
|
||||
return 0;
|
||||
}
|
||||
set_opt(sbi->s_mount_opt, UPDATE_JOURNAL);
|
||||
set_opt(sb, UPDATE_JOURNAL);
|
||||
break;
|
||||
case Opt_journal_dev:
|
||||
if (is_remount) {
|
||||
@ -1556,14 +1563,14 @@ static int parse_options(char *options, struct super_block *sb,
|
||||
*journal_devnum = option;
|
||||
break;
|
||||
case Opt_journal_checksum:
|
||||
set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM);
|
||||
set_opt(sb, JOURNAL_CHECKSUM);
|
||||
break;
|
||||
case Opt_journal_async_commit:
|
||||
set_opt(sbi->s_mount_opt, JOURNAL_ASYNC_COMMIT);
|
||||
set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM);
|
||||
set_opt(sb, JOURNAL_ASYNC_COMMIT);
|
||||
set_opt(sb, JOURNAL_CHECKSUM);
|
||||
break;
|
||||
case Opt_noload:
|
||||
set_opt(sbi->s_mount_opt, NOLOAD);
|
||||
set_opt(sb, NOLOAD);
|
||||
break;
|
||||
case Opt_commit:
|
||||
if (match_int(&args[0], &option))
|
||||
@ -1606,15 +1613,15 @@ static int parse_options(char *options, struct super_block *sb,
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
clear_opt(sbi->s_mount_opt, DATA_FLAGS);
|
||||
clear_opt(sb, DATA_FLAGS);
|
||||
sbi->s_mount_opt |= data_opt;
|
||||
}
|
||||
break;
|
||||
case Opt_data_err_abort:
|
||||
set_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
|
||||
set_opt(sb, DATA_ERR_ABORT);
|
||||
break;
|
||||
case Opt_data_err_ignore:
|
||||
clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
|
||||
clear_opt(sb, DATA_ERR_ABORT);
|
||||
break;
|
||||
#ifdef CONFIG_QUOTA
|
||||
case Opt_usrjquota:
|
||||
@ -1654,12 +1661,12 @@ set_qf_format:
|
||||
break;
|
||||
case Opt_quota:
|
||||
case Opt_usrquota:
|
||||
set_opt(sbi->s_mount_opt, QUOTA);
|
||||
set_opt(sbi->s_mount_opt, USRQUOTA);
|
||||
set_opt(sb, QUOTA);
|
||||
set_opt(sb, USRQUOTA);
|
||||
break;
|
||||
case Opt_grpquota:
|
||||
set_opt(sbi->s_mount_opt, QUOTA);
|
||||
set_opt(sbi->s_mount_opt, GRPQUOTA);
|
||||
set_opt(sb, QUOTA);
|
||||
set_opt(sb, GRPQUOTA);
|
||||
break;
|
||||
case Opt_noquota:
|
||||
if (sb_any_quota_loaded(sb)) {
|
||||
@ -1667,9 +1674,9 @@ set_qf_format:
|
||||
"options when quota turned on");
|
||||
return 0;
|
||||
}
|
||||
clear_opt(sbi->s_mount_opt, QUOTA);
|
||||
clear_opt(sbi->s_mount_opt, USRQUOTA);
|
||||
clear_opt(sbi->s_mount_opt, GRPQUOTA);
|
||||
clear_opt(sb, QUOTA);
|
||||
clear_opt(sb, USRQUOTA);
|
||||
clear_opt(sb, GRPQUOTA);
|
||||
break;
|
||||
#else
|
||||
case Opt_quota:
|
||||
@ -1695,7 +1702,7 @@ set_qf_format:
|
||||
sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
|
||||
break;
|
||||
case Opt_nobarrier:
|
||||
clear_opt(sbi->s_mount_opt, BARRIER);
|
||||
clear_opt(sb, BARRIER);
|
||||
break;
|
||||
case Opt_barrier:
|
||||
if (args[0].from) {
|
||||
@ -1704,9 +1711,9 @@ set_qf_format:
|
||||
} else
|
||||
option = 1; /* No argument, default to 1 */
|
||||
if (option)
|
||||
set_opt(sbi->s_mount_opt, BARRIER);
|
||||
set_opt(sb, BARRIER);
|
||||
else
|
||||
clear_opt(sbi->s_mount_opt, BARRIER);
|
||||
clear_opt(sb, BARRIER);
|
||||
break;
|
||||
case Opt_ignore:
|
||||
break;
|
||||
@ -1730,17 +1737,17 @@ set_qf_format:
|
||||
"Ignoring deprecated bh option");
|
||||
break;
|
||||
case Opt_i_version:
|
||||
set_opt(sbi->s_mount_opt, I_VERSION);
|
||||
set_opt(sb, I_VERSION);
|
||||
sb->s_flags |= MS_I_VERSION;
|
||||
break;
|
||||
case Opt_nodelalloc:
|
||||
clear_opt(sbi->s_mount_opt, DELALLOC);
|
||||
clear_opt(sb, DELALLOC);
|
||||
break;
|
||||
case Opt_mblk_io_submit:
|
||||
set_opt(sbi->s_mount_opt, MBLK_IO_SUBMIT);
|
||||
set_opt(sb, MBLK_IO_SUBMIT);
|
||||
break;
|
||||
case Opt_nomblk_io_submit:
|
||||
clear_opt(sbi->s_mount_opt, MBLK_IO_SUBMIT);
|
||||
clear_opt(sb, MBLK_IO_SUBMIT);
|
||||
break;
|
||||
case Opt_stripe:
|
||||
if (match_int(&args[0], &option))
|
||||
@ -1750,13 +1757,13 @@ set_qf_format:
|
||||
sbi->s_stripe = option;
|
||||
break;
|
||||
case Opt_delalloc:
|
||||
set_opt(sbi->s_mount_opt, DELALLOC);
|
||||
set_opt(sb, DELALLOC);
|
||||
break;
|
||||
case Opt_block_validity:
|
||||
set_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
|
||||
set_opt(sb, BLOCK_VALIDITY);
|
||||
break;
|
||||
case Opt_noblock_validity:
|
||||
clear_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
|
||||
clear_opt(sb, BLOCK_VALIDITY);
|
||||
break;
|
||||
case Opt_inode_readahead_blks:
|
||||
if (match_int(&args[0], &option))
|
||||
@ -1780,7 +1787,7 @@ set_qf_format:
|
||||
option);
|
||||
break;
|
||||
case Opt_noauto_da_alloc:
|
||||
set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
|
||||
set_opt(sb, NO_AUTO_DA_ALLOC);
|
||||
break;
|
||||
case Opt_auto_da_alloc:
|
||||
if (args[0].from) {
|
||||
@ -1789,24 +1796,24 @@ set_qf_format:
|
||||
} else
|
||||
option = 1; /* No argument, default to 1 */
|
||||
if (option)
|
||||
clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC);
|
||||
clear_opt(sb, NO_AUTO_DA_ALLOC);
|
||||
else
|
||||
set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
|
||||
set_opt(sb,NO_AUTO_DA_ALLOC);
|
||||
break;
|
||||
case Opt_discard:
|
||||
set_opt(sbi->s_mount_opt, DISCARD);
|
||||
set_opt(sb, DISCARD);
|
||||
break;
|
||||
case Opt_nodiscard:
|
||||
clear_opt(sbi->s_mount_opt, DISCARD);
|
||||
clear_opt(sb, DISCARD);
|
||||
break;
|
||||
case Opt_dioread_nolock:
|
||||
set_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
|
||||
set_opt(sb, DIOREAD_NOLOCK);
|
||||
break;
|
||||
case Opt_dioread_lock:
|
||||
clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
|
||||
clear_opt(sb, DIOREAD_NOLOCK);
|
||||
break;
|
||||
case Opt_init_inode_table:
|
||||
set_opt(sbi->s_mount_opt, INIT_INODE_TABLE);
|
||||
set_opt(sb, INIT_INODE_TABLE);
|
||||
if (args[0].from) {
|
||||
if (match_int(&args[0], &option))
|
||||
return 0;
|
||||
@ -1817,7 +1824,7 @@ set_qf_format:
|
||||
sbi->s_li_wait_mult = option;
|
||||
break;
|
||||
case Opt_noinit_inode_table:
|
||||
clear_opt(sbi->s_mount_opt, INIT_INODE_TABLE);
|
||||
clear_opt(sb, INIT_INODE_TABLE);
|
||||
break;
|
||||
default:
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
@ -1829,10 +1836,10 @@ set_qf_format:
|
||||
#ifdef CONFIG_QUOTA
|
||||
if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
|
||||
if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
|
||||
clear_opt(sbi->s_mount_opt, USRQUOTA);
|
||||
clear_opt(sb, USRQUOTA);
|
||||
|
||||
if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
|
||||
clear_opt(sbi->s_mount_opt, GRPQUOTA);
|
||||
clear_opt(sb, GRPQUOTA);
|
||||
|
||||
if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
|
||||
ext4_msg(sb, KERN_ERR, "old and new quota "
|
||||
@ -1902,12 +1909,12 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
|
||||
ext4_commit_super(sb, 1);
|
||||
if (test_opt(sb, DEBUG))
|
||||
printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
|
||||
"bpg=%lu, ipg=%lu, mo=%04x]\n",
|
||||
"bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
|
||||
sb->s_blocksize,
|
||||
sbi->s_groups_count,
|
||||
EXT4_BLOCKS_PER_GROUP(sb),
|
||||
EXT4_INODES_PER_GROUP(sb),
|
||||
sbi->s_mount_opt);
|
||||
sbi->s_mount_opt, sbi->s_mount_opt2);
|
||||
|
||||
return res;
|
||||
}
|
||||
@ -1937,14 +1944,13 @@ static int ext4_fill_flex_info(struct super_block *sb)
|
||||
size = flex_group_count * sizeof(struct flex_groups);
|
||||
sbi->s_flex_groups = kzalloc(size, GFP_KERNEL);
|
||||
if (sbi->s_flex_groups == NULL) {
|
||||
sbi->s_flex_groups = vmalloc(size);
|
||||
if (sbi->s_flex_groups)
|
||||
memset(sbi->s_flex_groups, 0, size);
|
||||
}
|
||||
if (sbi->s_flex_groups == NULL) {
|
||||
ext4_msg(sb, KERN_ERR, "not enough memory for "
|
||||
"%u flex groups", flex_group_count);
|
||||
goto failed;
|
||||
sbi->s_flex_groups = vzalloc(size);
|
||||
if (sbi->s_flex_groups == NULL) {
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
"not enough memory for %u flex groups",
|
||||
flex_group_count);
|
||||
goto failed;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < sbi->s_groups_count; i++) {
|
||||
@ -2923,7 +2929,7 @@ static int ext4_register_li_request(struct super_block *sb,
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
struct ext4_li_request *elr;
|
||||
ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
if (sbi->s_li_request != NULL)
|
||||
return 0;
|
||||
@ -3078,41 +3084,41 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
||||
|
||||
/* Set defaults before we parse the mount options */
|
||||
def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
|
||||
set_opt(sbi->s_mount_opt, INIT_INODE_TABLE);
|
||||
set_opt(sb, INIT_INODE_TABLE);
|
||||
if (def_mount_opts & EXT4_DEFM_DEBUG)
|
||||
set_opt(sbi->s_mount_opt, DEBUG);
|
||||
set_opt(sb, DEBUG);
|
||||
if (def_mount_opts & EXT4_DEFM_BSDGROUPS) {
|
||||
ext4_msg(sb, KERN_WARNING, deprecated_msg, "bsdgroups",
|
||||
"2.6.38");
|
||||
set_opt(sbi->s_mount_opt, GRPID);
|
||||
set_opt(sb, GRPID);
|
||||
}
|
||||
if (def_mount_opts & EXT4_DEFM_UID16)
|
||||
set_opt(sbi->s_mount_opt, NO_UID32);
|
||||
set_opt(sb, NO_UID32);
|
||||
#ifdef CONFIG_EXT4_FS_XATTR
|
||||
if (def_mount_opts & EXT4_DEFM_XATTR_USER)
|
||||
set_opt(sbi->s_mount_opt, XATTR_USER);
|
||||
set_opt(sb, XATTR_USER);
|
||||
#endif
|
||||
#ifdef CONFIG_EXT4_FS_POSIX_ACL
|
||||
if (def_mount_opts & EXT4_DEFM_ACL)
|
||||
set_opt(sbi->s_mount_opt, POSIX_ACL);
|
||||
set_opt(sb, POSIX_ACL);
|
||||
#endif
|
||||
if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
|
||||
set_opt(sbi->s_mount_opt, JOURNAL_DATA);
|
||||
set_opt(sb, JOURNAL_DATA);
|
||||
else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
|
||||
set_opt(sbi->s_mount_opt, ORDERED_DATA);
|
||||
set_opt(sb, ORDERED_DATA);
|
||||
else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
|
||||
set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
|
||||
set_opt(sb, WRITEBACK_DATA);
|
||||
|
||||
if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
|
||||
set_opt(sbi->s_mount_opt, ERRORS_PANIC);
|
||||
set_opt(sb, ERRORS_PANIC);
|
||||
else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
|
||||
set_opt(sbi->s_mount_opt, ERRORS_CONT);
|
||||
set_opt(sb, ERRORS_CONT);
|
||||
else
|
||||
set_opt(sbi->s_mount_opt, ERRORS_RO);
|
||||
set_opt(sb, ERRORS_RO);
|
||||
if (def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY)
|
||||
set_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
|
||||
set_opt(sb, BLOCK_VALIDITY);
|
||||
if (def_mount_opts & EXT4_DEFM_DISCARD)
|
||||
set_opt(sbi->s_mount_opt, DISCARD);
|
||||
set_opt(sb, DISCARD);
|
||||
|
||||
sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
|
||||
sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
|
||||
@ -3121,7 +3127,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
||||
sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
|
||||
|
||||
if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
|
||||
set_opt(sbi->s_mount_opt, BARRIER);
|
||||
set_opt(sb, BARRIER);
|
||||
|
||||
/*
|
||||
* enable delayed allocation by default
|
||||
@ -3129,7 +3135,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
||||
*/
|
||||
if (!IS_EXT3_SB(sb) &&
|
||||
((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
|
||||
set_opt(sbi->s_mount_opt, DELALLOC);
|
||||
set_opt(sb, DELALLOC);
|
||||
|
||||
if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
|
||||
&journal_devnum, &journal_ioprio, NULL, 0)) {
|
||||
@ -3432,8 +3438,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
||||
"suppressed and not mounted read-only");
|
||||
goto failed_mount_wq;
|
||||
} else {
|
||||
clear_opt(sbi->s_mount_opt, DATA_FLAGS);
|
||||
set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
|
||||
clear_opt(sb, DATA_FLAGS);
|
||||
set_opt(sb, WRITEBACK_DATA);
|
||||
sbi->s_journal = NULL;
|
||||
needs_recovery = 0;
|
||||
goto no_journal;
|
||||
@ -3471,9 +3477,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
||||
*/
|
||||
if (jbd2_journal_check_available_features
|
||||
(sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE))
|
||||
set_opt(sbi->s_mount_opt, ORDERED_DATA);
|
||||
set_opt(sb, ORDERED_DATA);
|
||||
else
|
||||
set_opt(sbi->s_mount_opt, JOURNAL_DATA);
|
||||
set_opt(sb, JOURNAL_DATA);
|
||||
break;
|
||||
|
||||
case EXT4_MOUNT_ORDERED_DATA:
|
||||
@ -3563,18 +3569,18 @@ no_journal:
|
||||
(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) {
|
||||
ext4_msg(sb, KERN_WARNING, "Ignoring delalloc option - "
|
||||
"requested data journaling mode");
|
||||
clear_opt(sbi->s_mount_opt, DELALLOC);
|
||||
clear_opt(sb, DELALLOC);
|
||||
}
|
||||
if (test_opt(sb, DIOREAD_NOLOCK)) {
|
||||
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
|
||||
ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock "
|
||||
"option - requested data journaling mode");
|
||||
clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
|
||||
clear_opt(sb, DIOREAD_NOLOCK);
|
||||
}
|
||||
if (sb->s_blocksize < PAGE_SIZE) {
|
||||
ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock "
|
||||
"option - block size is too small");
|
||||
clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
|
||||
clear_opt(sb, DIOREAD_NOLOCK);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4173,6 +4179,22 @@ static int ext4_unfreeze(struct super_block *sb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Structure to save mount options for ext4_remount's benefit
|
||||
*/
|
||||
struct ext4_mount_options {
|
||||
unsigned long s_mount_opt;
|
||||
unsigned long s_mount_opt2;
|
||||
uid_t s_resuid;
|
||||
gid_t s_resgid;
|
||||
unsigned long s_commit_interval;
|
||||
u32 s_min_batch_time, s_max_batch_time;
|
||||
#ifdef CONFIG_QUOTA
|
||||
int s_jquota_fmt;
|
||||
char *s_qf_names[MAXQUOTAS];
|
||||
#endif
|
||||
};
|
||||
|
||||
static int ext4_remount(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
struct ext4_super_block *es;
|
||||
@ -4193,6 +4215,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
|
||||
lock_super(sb);
|
||||
old_sb_flags = sb->s_flags;
|
||||
old_opts.s_mount_opt = sbi->s_mount_opt;
|
||||
old_opts.s_mount_opt2 = sbi->s_mount_opt2;
|
||||
old_opts.s_resuid = sbi->s_resuid;
|
||||
old_opts.s_resgid = sbi->s_resgid;
|
||||
old_opts.s_commit_interval = sbi->s_commit_interval;
|
||||
@ -4346,6 +4369,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
|
||||
restore_opts:
|
||||
sb->s_flags = old_sb_flags;
|
||||
sbi->s_mount_opt = old_opts.s_mount_opt;
|
||||
sbi->s_mount_opt2 = old_opts.s_mount_opt2;
|
||||
sbi->s_resuid = old_opts.s_resuid;
|
||||
sbi->s_resgid = old_opts.s_resgid;
|
||||
sbi->s_commit_interval = old_opts.s_commit_interval;
|
||||
|
@ -427,23 +427,23 @@ cleanup:
|
||||
static int
|
||||
ext4_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
|
||||
{
|
||||
int i_error, b_error;
|
||||
int ret, ret2;
|
||||
|
||||
down_read(&EXT4_I(dentry->d_inode)->xattr_sem);
|
||||
i_error = ext4_xattr_ibody_list(dentry, buffer, buffer_size);
|
||||
if (i_error < 0) {
|
||||
b_error = 0;
|
||||
} else {
|
||||
if (buffer) {
|
||||
buffer += i_error;
|
||||
buffer_size -= i_error;
|
||||
}
|
||||
b_error = ext4_xattr_block_list(dentry, buffer, buffer_size);
|
||||
if (b_error < 0)
|
||||
i_error = 0;
|
||||
ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size);
|
||||
if (ret < 0)
|
||||
goto errout;
|
||||
if (buffer) {
|
||||
buffer += ret;
|
||||
buffer_size -= ret;
|
||||
}
|
||||
ret = ext4_xattr_block_list(dentry, buffer, buffer_size);
|
||||
if (ret < 0)
|
||||
goto errout;
|
||||
ret += ret2;
|
||||
errout:
|
||||
up_read(&EXT4_I(dentry->d_inode)->xattr_sem);
|
||||
return i_error + b_error;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -947,7 +947,7 @@ ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
|
||||
/*
|
||||
* ext4_xattr_set_handle()
|
||||
*
|
||||
* Create, replace or remove an extended attribute for this inode. Buffer
|
||||
* Create, replace or remove an extended attribute for this inode. Value
|
||||
* is NULL to remove an existing extended attribute, and non-NULL to
|
||||
* either replace an existing extended attribute, or create a new extended
|
||||
* attribute. The flags XATTR_REPLACE and XATTR_CREATE
|
||||
|
@ -43,6 +43,7 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/ratelimit.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/jbd2.h>
|
||||
@ -93,6 +94,7 @@ EXPORT_SYMBOL(jbd2_journal_file_inode);
|
||||
EXPORT_SYMBOL(jbd2_journal_init_jbd_inode);
|
||||
EXPORT_SYMBOL(jbd2_journal_release_jbd_inode);
|
||||
EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate);
|
||||
EXPORT_SYMBOL(jbd2_inode_cache);
|
||||
|
||||
static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
|
||||
static void __journal_abort_soft (journal_t *journal, int errno);
|
||||
@ -827,7 +829,7 @@ static journal_t * journal_init_common (void)
|
||||
|
||||
journal = kzalloc(sizeof(*journal), GFP_KERNEL);
|
||||
if (!journal)
|
||||
goto fail;
|
||||
return NULL;
|
||||
|
||||
init_waitqueue_head(&journal->j_wait_transaction_locked);
|
||||
init_waitqueue_head(&journal->j_wait_logspace);
|
||||
@ -852,14 +854,12 @@ static journal_t * journal_init_common (void)
|
||||
err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
|
||||
if (err) {
|
||||
kfree(journal);
|
||||
goto fail;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
spin_lock_init(&journal->j_history_lock);
|
||||
|
||||
return journal;
|
||||
fail:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* jbd2_journal_init_dev and jbd2_journal_init_inode:
|
||||
@ -1982,7 +1982,6 @@ static void jbd2_journal_destroy_jbd2_journal_head_cache(void)
|
||||
static struct journal_head *journal_alloc_journal_head(void)
|
||||
{
|
||||
struct journal_head *ret;
|
||||
static unsigned long last_warning;
|
||||
|
||||
#ifdef CONFIG_JBD2_DEBUG
|
||||
atomic_inc(&nr_journal_heads);
|
||||
@ -1990,11 +1989,7 @@ static struct journal_head *journal_alloc_journal_head(void)
|
||||
ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
|
||||
if (!ret) {
|
||||
jbd_debug(1, "out of memory for journal_head\n");
|
||||
if (time_after(jiffies, last_warning + 5*HZ)) {
|
||||
printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
|
||||
__func__);
|
||||
last_warning = jiffies;
|
||||
}
|
||||
pr_notice_ratelimited("ENOMEM in %s, retrying.\n", __func__);
|
||||
while (!ret) {
|
||||
yield();
|
||||
ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
|
||||
@ -2292,17 +2287,19 @@ static void __exit jbd2_remove_jbd_stats_proc_entry(void)
|
||||
|
||||
#endif
|
||||
|
||||
struct kmem_cache *jbd2_handle_cache;
|
||||
struct kmem_cache *jbd2_handle_cache, *jbd2_inode_cache;
|
||||
|
||||
static int __init journal_init_handle_cache(void)
|
||||
{
|
||||
jbd2_handle_cache = kmem_cache_create("jbd2_journal_handle",
|
||||
sizeof(handle_t),
|
||||
0, /* offset */
|
||||
SLAB_TEMPORARY, /* flags */
|
||||
NULL); /* ctor */
|
||||
jbd2_handle_cache = KMEM_CACHE(jbd2_journal_handle, SLAB_TEMPORARY);
|
||||
if (jbd2_handle_cache == NULL) {
|
||||
printk(KERN_EMERG "JBD: failed to create handle cache\n");
|
||||
printk(KERN_EMERG "JBD2: failed to create handle cache\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
jbd2_inode_cache = KMEM_CACHE(jbd2_inode, 0);
|
||||
if (jbd2_inode_cache == NULL) {
|
||||
printk(KERN_EMERG "JBD2: failed to create inode cache\n");
|
||||
kmem_cache_destroy(jbd2_handle_cache);
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
@ -2312,6 +2309,9 @@ static void jbd2_journal_destroy_handle_cache(void)
|
||||
{
|
||||
if (jbd2_handle_cache)
|
||||
kmem_cache_destroy(jbd2_handle_cache);
|
||||
if (jbd2_inode_cache)
|
||||
kmem_cache_destroy(jbd2_inode_cache);
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -299,10 +299,10 @@ int jbd2_journal_skip_recovery(journal_t *journal)
|
||||
#ifdef CONFIG_JBD2_DEBUG
|
||||
int dropped = info.end_transaction -
|
||||
be32_to_cpu(journal->j_superblock->s_sequence);
|
||||
#endif
|
||||
jbd_debug(1,
|
||||
"JBD: ignoring %d transaction%s from the journal.\n",
|
||||
dropped, (dropped == 1) ? "" : "s");
|
||||
#endif
|
||||
journal->j_transaction_sequence = ++info.end_transaction;
|
||||
}
|
||||
|
||||
|
@ -340,9 +340,7 @@ handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int gfp_mask)
|
||||
jbd2_free_handle(handle);
|
||||
current->journal_info = NULL;
|
||||
handle = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
return handle;
|
||||
}
|
||||
EXPORT_SYMBOL(jbd2__journal_start);
|
||||
@ -589,7 +587,7 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
|
||||
transaction = handle->h_transaction;
|
||||
journal = transaction->t_journal;
|
||||
|
||||
jbd_debug(5, "buffer_head %p, force_copy %d\n", jh, force_copy);
|
||||
jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
|
||||
|
||||
JBUFFER_TRACE(jh, "entry");
|
||||
repeat:
|
||||
@ -774,7 +772,7 @@ done:
|
||||
J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
|
||||
"Possible IO failure.\n");
|
||||
page = jh2bh(jh)->b_page;
|
||||
offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK;
|
||||
offset = offset_in_page(jh2bh(jh)->b_data);
|
||||
source = kmap_atomic(page, KM_USER0);
|
||||
/* Fire data frozen trigger just before we copy the data */
|
||||
jbd2_buffer_frozen_trigger(jh, source + offset,
|
||||
|
@ -94,7 +94,7 @@ extern void jbd2_free(void *ptr, size_t size);
|
||||
*
|
||||
* This is an opaque datatype.
|
||||
**/
|
||||
typedef struct handle_s handle_t; /* Atomic operation type */
|
||||
typedef struct jbd2_journal_handle handle_t; /* Atomic operation type */
|
||||
|
||||
|
||||
/**
|
||||
@ -416,7 +416,7 @@ struct jbd2_revoke_table_s;
|
||||
* in so it can be fixed later.
|
||||
*/
|
||||
|
||||
struct handle_s
|
||||
struct jbd2_journal_handle
|
||||
{
|
||||
/* Which compound transaction is this update a part of? */
|
||||
transaction_t *h_transaction;
|
||||
@ -1158,6 +1158,22 @@ static inline void jbd2_free_handle(handle_t *handle)
|
||||
kmem_cache_free(jbd2_handle_cache, handle);
|
||||
}
|
||||
|
||||
/*
|
||||
* jbd2_inode management (optional, for those file systems that want to use
|
||||
* dynamically allocated jbd2_inode structures)
|
||||
*/
|
||||
extern struct kmem_cache *jbd2_inode_cache;
|
||||
|
||||
static inline struct jbd2_inode *jbd2_alloc_inode(gfp_t gfp_flags)
|
||||
{
|
||||
return kmem_cache_alloc(jbd2_inode_cache, gfp_flags);
|
||||
}
|
||||
|
||||
static inline void jbd2_free_inode(struct jbd2_inode *jinode)
|
||||
{
|
||||
kmem_cache_free(jbd2_inode_cache, jinode);
|
||||
}
|
||||
|
||||
/* Primary revoke support */
|
||||
#define JOURNAL_REVOKE_DEFAULT_HASH 256
|
||||
extern int jbd2_journal_init_revoke(journal_t *, int);
|
||||
|
Loading…
Reference in New Issue
Block a user