mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 20:54:10 +08:00
hfsplus: lift the 2TB size limit
Replace the hardcoded 2TB limit with a dynamic limit based on the block size now that we have fixed the few overflows preventing operation with large volumes. Signed-off-by: Christoph Hellwig <hch@tuxera.com>
This commit is contained in:
parent
4ba2d5fdcf
commit
c6d5f5fa65
@ -393,6 +393,13 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
|
||||
if (!sbi->rsrc_clump_blocks)
|
||||
sbi->rsrc_clump_blocks = 1;
|
||||
|
||||
err = generic_check_addressable(sbi->alloc_blksz_shift,
|
||||
sbi->total_blocks);
|
||||
if (err) {
|
||||
printk(KERN_ERR "hfs: filesystem size too large.\n");
|
||||
goto out_free_vhdr;
|
||||
}
|
||||
|
||||
/* Set up operations so we can load metadata */
|
||||
sb->s_op = &hfsplus_sops;
|
||||
sb->s_maxbytes = MAX_LFS_FILESIZE;
|
||||
@ -417,6 +424,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
|
||||
sb->s_flags |= MS_RDONLY;
|
||||
}
|
||||
|
||||
err = -EINVAL;
|
||||
|
||||
/* Load metadata objects (B*Trees) */
|
||||
sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID);
|
||||
if (!sbi->ext_tree) {
|
||||
|
@ -141,10 +141,6 @@ int hfsplus_read_wrapper(struct super_block *sb)
|
||||
|
||||
if (hfsplus_get_last_session(sb, &part_start, &part_size))
|
||||
goto out;
|
||||
if ((u64)part_start + part_size > 0x100000000ULL) {
|
||||
pr_err("hfs: volumes larger than 2TB are not supported yet\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
error = -ENOMEM;
|
||||
sbi->s_vhdr = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);
|
||||
|
Loading…
Reference in New Issue
Block a user