mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 12:44:11 +08:00
Squashfs: move table allocation into squashfs_read_table()
This eliminates a lot of duplicate code. Signed-off-by: Phillip Lougher <phillip@lougher.demon.co.uk>
This commit is contained in:
parent
117a91e0f2
commit
82de647e1f
@ -393,19 +393,36 @@ struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb,
|
||||
/*
|
||||
* Read a filesystem table (uncompressed sequence of bytes) from disk
|
||||
*/
|
||||
int squashfs_read_table(struct super_block *sb, void *buffer, u64 block,
|
||||
int length)
|
||||
void *squashfs_read_table(struct super_block *sb, u64 block, int length)
|
||||
{
|
||||
int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
|
||||
int i, res;
|
||||
void **data = kcalloc(pages, sizeof(void *), GFP_KERNEL);
|
||||
if (data == NULL)
|
||||
return -ENOMEM;
|
||||
void *table, *buffer, **data;
|
||||
|
||||
table = buffer = kmalloc(length, GFP_KERNEL);
|
||||
if (table == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
data = kcalloc(pages, sizeof(void *), GFP_KERNEL);
|
||||
if (data == NULL) {
|
||||
res = -ENOMEM;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE)
|
||||
data[i] = buffer;
|
||||
|
||||
res = squashfs_read_data(sb, data, block, length |
|
||||
SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length, pages);
|
||||
|
||||
kfree(data);
|
||||
return res;
|
||||
|
||||
if (res < 0)
|
||||
goto failed;
|
||||
|
||||
return table;
|
||||
|
||||
failed:
|
||||
kfree(table);
|
||||
return ERR_PTR(res);
|
||||
}
|
||||
|
@ -124,27 +124,10 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
|
||||
u64 lookup_table_start, unsigned int inodes)
|
||||
{
|
||||
unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes);
|
||||
__le64 *inode_lookup_table;
|
||||
int err;
|
||||
|
||||
TRACE("In read_inode_lookup_table, length %d\n", length);
|
||||
|
||||
/* Allocate inode lookup table indexes */
|
||||
inode_lookup_table = kmalloc(length, GFP_KERNEL);
|
||||
if (inode_lookup_table == NULL) {
|
||||
ERROR("Failed to allocate inode lookup table\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
err = squashfs_read_table(sb, inode_lookup_table, lookup_table_start,
|
||||
length);
|
||||
if (err < 0) {
|
||||
ERROR("unable to read inode lookup table\n");
|
||||
kfree(inode_lookup_table);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
return inode_lookup_table;
|
||||
return squashfs_read_table(sb, lookup_table_start, length);
|
||||
}
|
||||
|
||||
|
||||
|
@ -74,23 +74,6 @@ __le64 *squashfs_read_fragment_index_table(struct super_block *sb,
|
||||
u64 fragment_table_start, unsigned int fragments)
|
||||
{
|
||||
unsigned int length = SQUASHFS_FRAGMENT_INDEX_BYTES(fragments);
|
||||
__le64 *fragment_index;
|
||||
int err;
|
||||
|
||||
/* Allocate fragment lookup table indexes */
|
||||
fragment_index = kmalloc(length, GFP_KERNEL);
|
||||
if (fragment_index == NULL) {
|
||||
ERROR("Failed to allocate fragment index table\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
err = squashfs_read_table(sb, fragment_index, fragment_table_start,
|
||||
length);
|
||||
if (err < 0) {
|
||||
ERROR("unable to read fragment index table\n");
|
||||
kfree(fragment_index);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
return fragment_index;
|
||||
return squashfs_read_table(sb, fragment_table_start, length);
|
||||
}
|
||||
|
@ -69,24 +69,8 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
|
||||
u64 id_table_start, unsigned short no_ids)
|
||||
{
|
||||
unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids);
|
||||
__le64 *id_table;
|
||||
int err;
|
||||
|
||||
TRACE("In read_id_index_table, length %d\n", length);
|
||||
|
||||
/* Allocate id lookup table indexes */
|
||||
id_table = kmalloc(length, GFP_KERNEL);
|
||||
if (id_table == NULL) {
|
||||
ERROR("Failed to allocate id index table\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
err = squashfs_read_table(sb, id_table, id_table_start, length);
|
||||
if (err < 0) {
|
||||
ERROR("unable to read id index table\n");
|
||||
kfree(id_table);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
return id_table;
|
||||
return squashfs_read_table(sb, id_table_start, length);
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ extern struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *,
|
||||
u64, int);
|
||||
extern struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *,
|
||||
u64, int);
|
||||
extern int squashfs_read_table(struct super_block *, void *, u64, int);
|
||||
extern void *squashfs_read_table(struct super_block *, u64, int);
|
||||
|
||||
/* decompressor.c */
|
||||
extern const struct squashfs_decompressor *squashfs_lookup_decompressor(int);
|
||||
|
@ -95,12 +95,6 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
}
|
||||
msblk = sb->s_fs_info;
|
||||
|
||||
sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
|
||||
if (sblk == NULL) {
|
||||
ERROR("Failed to allocate squashfs_super_block\n");
|
||||
goto failure;
|
||||
}
|
||||
|
||||
msblk->devblksize = sb_min_blocksize(sb, BLOCK_SIZE);
|
||||
msblk->devblksize_log2 = ffz(~msblk->devblksize);
|
||||
|
||||
@ -114,10 +108,12 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
* of bytes_used) we need to set it to an initial sensible dummy value
|
||||
*/
|
||||
msblk->bytes_used = sizeof(*sblk);
|
||||
err = squashfs_read_table(sb, sblk, SQUASHFS_START, sizeof(*sblk));
|
||||
sblk = squashfs_read_table(sb, SQUASHFS_START, sizeof(*sblk));
|
||||
|
||||
if (err < 0) {
|
||||
if (IS_ERR(sblk)) {
|
||||
ERROR("unable to read squashfs_super_block\n");
|
||||
err = PTR_ERR(sblk);
|
||||
sblk = NULL;
|
||||
goto failed_mount;
|
||||
}
|
||||
|
||||
@ -222,6 +218,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
msblk->id_table = squashfs_read_id_index_table(sb,
|
||||
le64_to_cpu(sblk->id_table_start), le16_to_cpu(sblk->no_ids));
|
||||
if (IS_ERR(msblk->id_table)) {
|
||||
ERROR("unable to read id index table\n");
|
||||
err = PTR_ERR(msblk->id_table);
|
||||
msblk->id_table = NULL;
|
||||
goto failed_mount;
|
||||
@ -242,6 +239,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
msblk->fragment_index = squashfs_read_fragment_index_table(sb,
|
||||
le64_to_cpu(sblk->fragment_table_start), fragments);
|
||||
if (IS_ERR(msblk->fragment_index)) {
|
||||
ERROR("unable to read fragment index table\n");
|
||||
err = PTR_ERR(msblk->fragment_index);
|
||||
msblk->fragment_index = NULL;
|
||||
goto failed_mount;
|
||||
@ -256,6 +254,7 @@ allocate_lookup_table:
|
||||
msblk->inode_lookup_table = squashfs_read_inode_lookup_table(sb,
|
||||
lookup_table_start, msblk->inodes);
|
||||
if (IS_ERR(msblk->inode_lookup_table)) {
|
||||
ERROR("unable to read inode lookup table\n");
|
||||
err = PTR_ERR(msblk->inode_lookup_table);
|
||||
msblk->inode_lookup_table = NULL;
|
||||
goto failed_mount;
|
||||
@ -273,6 +272,7 @@ allocate_xattr_table:
|
||||
msblk->xattr_id_table = squashfs_read_xattr_id_table(sb,
|
||||
xattr_id_table_start, &msblk->xattr_table, &msblk->xattr_ids);
|
||||
if (IS_ERR(msblk->xattr_id_table)) {
|
||||
ERROR("unable to read xattr id index table\n");
|
||||
err = PTR_ERR(msblk->xattr_id_table);
|
||||
msblk->xattr_id_table = NULL;
|
||||
if (err != -ENOTSUPP)
|
||||
@ -318,11 +318,6 @@ failed_mount:
|
||||
sb->s_fs_info = NULL;
|
||||
kfree(sblk);
|
||||
return err;
|
||||
|
||||
failure:
|
||||
kfree(sb->s_fs_info);
|
||||
sb->s_fs_info = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
||||
|
@ -67,34 +67,18 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start,
|
||||
u64 *xattr_table_start, int *xattr_ids)
|
||||
{
|
||||
unsigned int len;
|
||||
__le64 *xid_table;
|
||||
struct squashfs_xattr_id_table id_table;
|
||||
int err;
|
||||
struct squashfs_xattr_id_table *id_table;
|
||||
|
||||
err = squashfs_read_table(sb, &id_table, start, sizeof(id_table));
|
||||
if (err < 0) {
|
||||
ERROR("unable to read xattr id table\n");
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
*xattr_table_start = le64_to_cpu(id_table.xattr_table_start);
|
||||
*xattr_ids = le32_to_cpu(id_table.xattr_ids);
|
||||
id_table = squashfs_read_table(sb, start, sizeof(*id_table));
|
||||
if (IS_ERR(id_table))
|
||||
return (__le64 *) id_table;
|
||||
|
||||
*xattr_table_start = le64_to_cpu(id_table->xattr_table_start);
|
||||
*xattr_ids = le32_to_cpu(id_table->xattr_ids);
|
||||
kfree(id_table);
|
||||
len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
|
||||
|
||||
TRACE("In read_xattr_index_table, length %d\n", len);
|
||||
|
||||
/* Allocate xattr id lookup table indexes */
|
||||
xid_table = kmalloc(len, GFP_KERNEL);
|
||||
if (xid_table == NULL) {
|
||||
ERROR("Failed to allocate xattr id index table\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
err = squashfs_read_table(sb, xid_table, start + sizeof(id_table), len);
|
||||
if (err < 0) {
|
||||
ERROR("unable to read xattr id index table\n");
|
||||
kfree(xid_table);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
return xid_table;
|
||||
return squashfs_read_table(sb, start + sizeof(*id_table), len);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user