mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-18 07:35:12 +08:00
address hfs on-disk corruption robustness review comments
Address Roman's review comments for the previously sent on-disk corruption hfs robustness patch. - use 0 as a failure value, rather than making a new macro HFS_BAD_KEYLEN, and use a switch statement instead of if's. - Add new fail: target to __hfs_brec_find to skip assignments using bad values when exiting with a failure. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Eric Sandeen <sandeen@redhat.com> Cc: Roman Zippel <zippel@linux-m68k.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
7c28cbaed6
commit
55581d018e
@ -52,9 +52,9 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
|
||||
rec = (e + b) / 2;
|
||||
len = hfs_brec_lenoff(bnode, rec, &off);
|
||||
keylen = hfs_brec_keylen(bnode, rec);
|
||||
if (keylen == HFS_BAD_KEYLEN) {
|
||||
if (keylen == 0) {
|
||||
res = -EINVAL;
|
||||
goto done;
|
||||
goto fail;
|
||||
}
|
||||
hfs_bnode_read(bnode, fd->key, off, keylen);
|
||||
cmpval = bnode->tree->keycmp(fd->key, fd->search_key);
|
||||
@ -71,9 +71,9 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
|
||||
if (rec != e && e >= 0) {
|
||||
len = hfs_brec_lenoff(bnode, e, &off);
|
||||
keylen = hfs_brec_keylen(bnode, e);
|
||||
if (keylen == HFS_BAD_KEYLEN) {
|
||||
if (keylen == 0) {
|
||||
res = -EINVAL;
|
||||
goto done;
|
||||
goto fail;
|
||||
}
|
||||
hfs_bnode_read(bnode, fd->key, off, keylen);
|
||||
}
|
||||
@ -83,6 +83,7 @@ done:
|
||||
fd->keylength = keylen;
|
||||
fd->entryoffset = off + keylen;
|
||||
fd->entrylength = len - keylen;
|
||||
fail:
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -206,7 +207,7 @@ int hfs_brec_goto(struct hfs_find_data *fd, int cnt)
|
||||
|
||||
len = hfs_brec_lenoff(bnode, fd->record, &off);
|
||||
keylen = hfs_brec_keylen(bnode, fd->record);
|
||||
if (keylen == HFS_BAD_KEYLEN) {
|
||||
if (keylen == 0) {
|
||||
res = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
@ -49,14 +49,14 @@ u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec)
|
||||
if (retval > node->tree->max_key_len + 2) {
|
||||
printk(KERN_ERR "hfs: keylen %d too large\n",
|
||||
retval);
|
||||
retval = HFS_BAD_KEYLEN;
|
||||
retval = 0;
|
||||
}
|
||||
} else {
|
||||
retval = (hfs_bnode_read_u8(node, recoff) | 1) + 1;
|
||||
if (retval > node->tree->max_key_len + 1) {
|
||||
printk(KERN_ERR "hfs: keylen %d too large\n",
|
||||
retval);
|
||||
retval = HFS_BAD_KEYLEN;
|
||||
retval = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -81,15 +81,23 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
|
||||
goto fail_page;
|
||||
if (!tree->node_count)
|
||||
goto fail_page;
|
||||
if ((id == HFS_EXT_CNID) && (tree->max_key_len != HFS_MAX_EXT_KEYLEN)) {
|
||||
printk(KERN_ERR "hfs: invalid extent max_key_len %d\n",
|
||||
tree->max_key_len);
|
||||
goto fail_page;
|
||||
}
|
||||
if ((id == HFS_CAT_CNID) && (tree->max_key_len != HFS_MAX_CAT_KEYLEN)) {
|
||||
printk(KERN_ERR "hfs: invalid catalog max_key_len %d\n",
|
||||
tree->max_key_len);
|
||||
goto fail_page;
|
||||
switch (id) {
|
||||
case HFS_EXT_CNID:
|
||||
if (tree->max_key_len != HFS_MAX_EXT_KEYLEN) {
|
||||
printk(KERN_ERR "hfs: invalid extent max_key_len %d\n",
|
||||
tree->max_key_len);
|
||||
goto fail_page;
|
||||
}
|
||||
break;
|
||||
case HFS_CAT_CNID:
|
||||
if (tree->max_key_len != HFS_MAX_CAT_KEYLEN) {
|
||||
printk(KERN_ERR "hfs: invalid catalog max_key_len %d\n",
|
||||
tree->max_key_len);
|
||||
goto fail_page;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
tree->node_size_shift = ffs(size) - 1;
|
||||
|
@ -28,8 +28,6 @@
|
||||
#define HFS_MAX_NAMELEN 128
|
||||
#define HFS_MAX_VALENCE 32767U
|
||||
|
||||
#define HFS_BAD_KEYLEN 0xFF
|
||||
|
||||
/* Meanings of the drAtrb field of the MDB,
|
||||
* Reference: _Inside Macintosh: Files_ p. 2-61
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user