mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-20 00:26:39 +08:00
[JFFS2] Switch to using an array of jffs2_raw_node_refs instead of a list.
This allows us to drop another pointer from the struct jffs2_raw_node_ref, shrinking it to 8 bytes on 32-bit machines (if the TEST_TOTLEN) paranoia check is turned off, which will be committed soon). Signed-off-by: David Woodhouse <dwmw2@infradead.org>
This commit is contained in:
parent
f75e5097ef
commit
9bfeb691e7
@ -285,20 +285,25 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
|
||||
|
||||
void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
|
||||
{
|
||||
struct jffs2_raw_node_ref *ref;
|
||||
struct jffs2_raw_node_ref *block, *ref;
|
||||
D1(printk(KERN_DEBUG "Freeing all node refs for eraseblock offset 0x%08x\n", jeb->offset));
|
||||
while(jeb->first_node) {
|
||||
ref = jeb->first_node;
|
||||
jeb->first_node = ref->next_phys;
|
||||
|
||||
/* Remove from the inode-list */
|
||||
if (ref->next_in_ino)
|
||||
block = ref = jeb->first_node;
|
||||
|
||||
while (ref) {
|
||||
if (ref->flash_offset == REF_LINK_NODE) {
|
||||
ref = ref->next_in_ino;
|
||||
jffs2_free_refblock(block);
|
||||
block = ref;
|
||||
continue;
|
||||
}
|
||||
if (ref->flash_offset != REF_EMPTY_NODE && ref->next_in_ino)
|
||||
jffs2_remove_node_refs_from_ino_list(c, ref, jeb);
|
||||
/* else it was a non-inode node or already removed, so don't bother */
|
||||
|
||||
__jffs2_free_raw_node_ref(ref);
|
||||
ref++;
|
||||
}
|
||||
jeb->last_node = NULL;
|
||||
jeb->first_node = jeb->last_node = NULL;
|
||||
}
|
||||
|
||||
static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset)
|
||||
|
@ -26,9 +26,6 @@ struct jffs2_inodirty;
|
||||
struct jffs2_sb_info {
|
||||
struct mtd_info *mtd;
|
||||
|
||||
struct jffs2_raw_node_ref *refs;
|
||||
int reserved_refs;
|
||||
|
||||
uint32_t highest_ino;
|
||||
uint32_t checked_ino;
|
||||
|
||||
|
@ -57,8 +57,8 @@ int __init jffs2_create_slab_caches(void)
|
||||
if (!tmp_dnode_info_slab)
|
||||
goto err;
|
||||
|
||||
raw_node_ref_slab = kmem_cache_create("jffs2_raw_node_ref",
|
||||
sizeof(struct jffs2_raw_node_ref),
|
||||
raw_node_ref_slab = kmem_cache_create("jffs2_refblock",
|
||||
sizeof(struct jffs2_raw_node_ref) * (REFS_PER_BLOCK + 1),
|
||||
0, 0, NULL, NULL);
|
||||
if (!raw_node_ref_slab)
|
||||
goto err;
|
||||
@ -190,38 +190,65 @@ void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x)
|
||||
kmem_cache_free(tmp_dnode_info_slab, x);
|
||||
}
|
||||
|
||||
int jffs2_prealloc_raw_node_refs(struct jffs2_sb_info *c,
|
||||
struct jffs2_eraseblock *jeb, int nr)
|
||||
{
|
||||
struct jffs2_raw_node_ref *p = c->refs;
|
||||
|
||||
dbg_memalloc("%d\n", nr);
|
||||
|
||||
while (nr && p) {
|
||||
p = p->next_in_ino;
|
||||
nr--;
|
||||
}
|
||||
while (nr) {
|
||||
p = __jffs2_alloc_raw_node_ref();
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
p->next_in_ino = c->refs;
|
||||
c->refs = p;
|
||||
nr--;
|
||||
}
|
||||
c->reserved_refs = nr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct jffs2_raw_node_ref *__jffs2_alloc_raw_node_ref(void)
|
||||
struct jffs2_raw_node_ref *jffs2_alloc_refblock(void)
|
||||
{
|
||||
struct jffs2_raw_node_ref *ret;
|
||||
|
||||
ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL);
|
||||
dbg_memalloc("%p\n", ret);
|
||||
if (ret) {
|
||||
int i = 0;
|
||||
for (i=0; i < REFS_PER_BLOCK; i++) {
|
||||
ret[i].flash_offset = REF_EMPTY_NODE;
|
||||
ret[i].next_in_ino = NULL;
|
||||
}
|
||||
ret[i].flash_offset = REF_LINK_NODE;
|
||||
ret[i].next_in_ino = NULL;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __jffs2_free_raw_node_ref(struct jffs2_raw_node_ref *x)
|
||||
int jffs2_prealloc_raw_node_refs(struct jffs2_sb_info *c,
|
||||
struct jffs2_eraseblock *jeb, int nr)
|
||||
{
|
||||
struct jffs2_raw_node_ref **p, *ref;
|
||||
int i = nr;
|
||||
|
||||
dbg_memalloc("%d\n", nr);
|
||||
|
||||
p = &jeb->last_node;
|
||||
ref = *p;
|
||||
|
||||
dbg_memalloc("Reserving %d refs for block @0x%08x\n", nr, jeb->offset);
|
||||
|
||||
/* If jeb->last_node is really a valid node then skip over it */
|
||||
if (ref && ref->flash_offset != REF_EMPTY_NODE)
|
||||
ref++;
|
||||
|
||||
while (i) {
|
||||
if (!ref) {
|
||||
dbg_memalloc("Allocating new refblock linked from %p\n", p);
|
||||
ref = *p = jffs2_alloc_refblock();
|
||||
if (!ref)
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (ref->flash_offset == REF_LINK_NODE) {
|
||||
p = &ref->next_in_ino;
|
||||
ref = *p;
|
||||
continue;
|
||||
}
|
||||
i--;
|
||||
ref++;
|
||||
}
|
||||
jeb->allocated_refs = nr;
|
||||
|
||||
dbg_memalloc("Reserved %d refs for block @0x%08x, last_node is %p (%08x,%p)\n",
|
||||
nr, jeb->offset, jeb->last_node, jeb->last_node->flash_offset,
|
||||
jeb->last_node->next_in_ino);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void jffs2_free_refblock(struct jffs2_raw_node_ref *x)
|
||||
{
|
||||
dbg_memalloc("%p\n", x);
|
||||
kmem_cache_free(raw_node_ref_slab, x);
|
||||
|
@ -954,18 +954,16 @@ void jffs2_free_raw_node_refs(struct jffs2_sb_info *c)
|
||||
for (i=0; i<c->nr_blocks; i++) {
|
||||
this = c->blocks[i].first_node;
|
||||
while (this) {
|
||||
next = this->next_phys;
|
||||
__jffs2_free_raw_node_ref(this);
|
||||
if (this[REFS_PER_BLOCK].flash_offset == REF_LINK_NODE)
|
||||
next = this[REFS_PER_BLOCK].next_in_ino;
|
||||
else
|
||||
next = NULL;
|
||||
|
||||
jffs2_free_refblock(this);
|
||||
this = next;
|
||||
}
|
||||
c->blocks[i].first_node = c->blocks[i].last_node = NULL;
|
||||
}
|
||||
this = c->refs;
|
||||
while (this) {
|
||||
next = this->next_in_ino;
|
||||
__jffs2_free_raw_node_ref(this);
|
||||
this = next;
|
||||
}
|
||||
}
|
||||
|
||||
struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset)
|
||||
@ -1060,32 +1058,37 @@ struct jffs2_raw_node_ref *jffs2_link_node_ref(struct jffs2_sb_info *c,
|
||||
{
|
||||
struct jffs2_raw_node_ref *ref;
|
||||
|
||||
/* These will be preallocated _very_ shortly. */
|
||||
ref = c->refs;
|
||||
if (!c->refs) {
|
||||
JFFS2_WARNING("Using non-preallocated refs!\n");
|
||||
ref = __jffs2_alloc_raw_node_ref();
|
||||
BUG_ON(!ref);
|
||||
WARN_ON(1);
|
||||
} else {
|
||||
c->refs = ref->next_in_ino;
|
||||
BUG_ON(!jeb->allocated_refs);
|
||||
jeb->allocated_refs--;
|
||||
|
||||
ref = jeb->last_node;
|
||||
|
||||
dbg_noderef("Last node at %p is (%08x,%p)\n", ref, ref->flash_offset,
|
||||
ref->next_in_ino);
|
||||
|
||||
while (ref->flash_offset != REF_EMPTY_NODE) {
|
||||
if (ref->flash_offset == REF_LINK_NODE)
|
||||
ref = ref->next_in_ino;
|
||||
else
|
||||
ref++;
|
||||
}
|
||||
|
||||
ref->next_phys = NULL;
|
||||
dbg_noderef("New ref is %p (%08x becomes %08x,%p) len 0x%x\n", ref,
|
||||
ref->flash_offset, ofs, ref->next_in_ino, len);
|
||||
|
||||
ref->flash_offset = ofs;
|
||||
|
||||
if (!jeb->first_node)
|
||||
if (!jeb->first_node) {
|
||||
jeb->first_node = ref;
|
||||
if (jeb->last_node) {
|
||||
jeb->last_node->next_phys = ref;
|
||||
#ifdef TEST_TOTLEN
|
||||
if (ref_offset(jeb->last_node) + jeb->last_node->__totlen != ref_offset(ref)) {
|
||||
printk(KERN_CRIT "Adding new ref %p at (0x%08x-0x%08x) not immediately after previous (0x%08x-0x%08x)\n",
|
||||
ref, ref_offset(ref), ref_offset(ref)+ref->__totlen,
|
||||
ref_offset(jeb->last_node), ref_offset(jeb->last_node)+jeb->last_node->__totlen);
|
||||
WARN_ON(1);
|
||||
}
|
||||
#endif
|
||||
BUG_ON(ref_offset(ref) != jeb->offset);
|
||||
} else if (unlikely(ref_offset(ref) != jeb->offset + c->sector_size - jeb->free_size)) {
|
||||
uint32_t last_len = ref_totlen(c, jeb, jeb->last_node);
|
||||
|
||||
JFFS2_ERROR("Adding new ref %p at (0x%08x-0x%08x) not immediately after previous (0x%08x-0x%08x)\n",
|
||||
ref, ref_offset(ref), ref_offset(ref)+len,
|
||||
ref_offset(jeb->last_node),
|
||||
ref_offset(jeb->last_node)+last_len);
|
||||
BUG();
|
||||
}
|
||||
jeb->last_node = ref;
|
||||
|
||||
@ -1130,12 +1133,13 @@ int jffs2_scan_dirty_space(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb
|
||||
{
|
||||
if (!size)
|
||||
return 0;
|
||||
if (size > c->sector_size - jeb->used_size) {
|
||||
printk(KERN_CRIT "Dirty space 0x%x larger then used_size 0x%x (wasted 0x%x)\n",
|
||||
size, jeb->used_size, jeb->wasted_size);
|
||||
if (unlikely(size > jeb->free_size)) {
|
||||
printk(KERN_CRIT "Dirty space 0x%x larger then free_size 0x%x (wasted 0x%x)\n",
|
||||
size, jeb->free_size, jeb->wasted_size);
|
||||
BUG();
|
||||
}
|
||||
if (jeb->last_node && ref_obsolete(jeb->last_node)) {
|
||||
/* REF_EMPTY_NODE is !obsolete, so that works OK */
|
||||
if (ref_obsolete(jeb->last_node)) {
|
||||
#ifdef TEST_TOTLEN
|
||||
jeb->last_node->__totlen += size;
|
||||
#endif
|
||||
@ -1168,7 +1172,7 @@ static inline uint32_t __ref_totlen(struct jffs2_sb_info *c,
|
||||
jeb = &c->blocks[ref->flash_offset / c->sector_size];
|
||||
|
||||
/* Last node in block. Use free_space */
|
||||
if (ref != jeb->last_node) {
|
||||
if (unlikely(ref != jeb->last_node)) {
|
||||
printk(KERN_CRIT "ref %p @0x%08x is not jeb->last_node (%p @0x%08x)\n",
|
||||
ref, ref_offset(ref), jeb->last_node, jeb->last_node?ref_offset(jeb->last_node):0);
|
||||
BUG();
|
||||
@ -1183,17 +1187,13 @@ uint32_t __jffs2_ref_totlen(struct jffs2_sb_info *c, struct jffs2_eraseblock *je
|
||||
{
|
||||
uint32_t ret;
|
||||
|
||||
#if CONFIG_JFFS2_FS_DEBUG > 0
|
||||
if (jeb && jeb != &c->blocks[ref->flash_offset / c->sector_size]) {
|
||||
printk(KERN_CRIT "ref_totlen called with wrong block -- at 0x%08x instead of 0x%08x; ref 0x%08x\n",
|
||||
jeb->offset, c->blocks[ref->flash_offset / c->sector_size].offset, ref_offset(ref));
|
||||
BUG();
|
||||
}
|
||||
#endif
|
||||
|
||||
ret = __ref_totlen(c, jeb, ref);
|
||||
|
||||
#ifdef TEST_TOTLEN
|
||||
if (ret != ref->__totlen) {
|
||||
if (unlikely(ret != ref->__totlen)) {
|
||||
if (!jeb)
|
||||
jeb = &c->blocks[ref->flash_offset / c->sector_size];
|
||||
|
||||
printk(KERN_CRIT "Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n",
|
||||
ref, ref_offset(ref), ref_offset(ref)+ref->__totlen,
|
||||
ret, ref->__totlen);
|
||||
@ -1204,13 +1204,14 @@ uint32_t __jffs2_ref_totlen(struct jffs2_sb_info *c, struct jffs2_eraseblock *je
|
||||
printk(KERN_CRIT "No next ref. jeb->last_node is %p\n", jeb->last_node);
|
||||
|
||||
printk(KERN_CRIT "jeb->wasted_size %x, dirty_size %x, used_size %x, free_size %x\n", jeb->wasted_size, jeb->dirty_size, jeb->used_size, jeb->free_size);
|
||||
ret = ref->__totlen;
|
||||
if (!jeb)
|
||||
jeb = &c->blocks[ref->flash_offset / c->sector_size];
|
||||
|
||||
#if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS)
|
||||
__jffs2_dbg_dump_node_refs_nolock(c, jeb);
|
||||
#endif
|
||||
|
||||
WARN_ON(1);
|
||||
|
||||
ret = ref->__totlen;
|
||||
}
|
||||
#endif /* TEST_TOTLEN */
|
||||
return ret;
|
||||
|
@ -80,7 +80,6 @@ struct jffs2_raw_node_ref
|
||||
for this object. If this _is_ the last, it points to the inode_cache,
|
||||
xattr_ref or xattr_datum instead. The common part of those structures
|
||||
has NULL in the first word. See jffs2_raw_ref_to_ic() below */
|
||||
struct jffs2_raw_node_ref *next_phys;
|
||||
uint32_t flash_offset;
|
||||
#define TEST_TOTLEN
|
||||
#ifdef TEST_TOTLEN
|
||||
@ -88,7 +87,29 @@ struct jffs2_raw_node_ref
|
||||
#endif
|
||||
};
|
||||
|
||||
#define ref_next(r) ((r)->next_phys)
|
||||
#define REF_LINK_NODE ((int32_t)-1)
|
||||
#define REF_EMPTY_NODE ((int32_t)-2)
|
||||
|
||||
/* Use blocks of about 256 bytes */
|
||||
#define REFS_PER_BLOCK ((255/sizeof(struct jffs2_raw_node_ref))-1)
|
||||
|
||||
static inline struct jffs2_raw_node_ref *ref_next(struct jffs2_raw_node_ref *ref)
|
||||
{
|
||||
ref++;
|
||||
|
||||
/* Link to another block of refs */
|
||||
if (ref->flash_offset == REF_LINK_NODE) {
|
||||
ref = ref->next_in_ino;
|
||||
if (!ref)
|
||||
return ref;
|
||||
}
|
||||
|
||||
/* End of chain */
|
||||
if (ref->flash_offset == REF_EMPTY_NODE)
|
||||
return NULL;
|
||||
|
||||
return ref;
|
||||
}
|
||||
|
||||
static inline struct jffs2_inode_cache *jffs2_raw_ref_to_ic(struct jffs2_raw_node_ref *raw)
|
||||
{
|
||||
@ -234,6 +255,7 @@ struct jffs2_eraseblock
|
||||
uint32_t wasted_size;
|
||||
uint32_t free_size; /* Note that sector_size - free_size
|
||||
is the address of the first free space */
|
||||
uint32_t allocated_refs;
|
||||
struct jffs2_raw_node_ref *first_node;
|
||||
struct jffs2_raw_node_ref *last_node;
|
||||
|
||||
@ -378,10 +400,9 @@ struct jffs2_raw_inode *jffs2_alloc_raw_inode(void);
|
||||
void jffs2_free_raw_inode(struct jffs2_raw_inode *);
|
||||
struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void);
|
||||
void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *);
|
||||
int jffs2_prealloc_raw_node_refs(struct jffs2_sb_info *c,
|
||||
int jffs2_prealloc_raw_node_refs(struct jffs2_sb_info *c,
|
||||
struct jffs2_eraseblock *jeb, int nr);
|
||||
struct jffs2_raw_node_ref *__jffs2_alloc_raw_node_ref(void);
|
||||
void __jffs2_free_raw_node_ref(struct jffs2_raw_node_ref *);
|
||||
void jffs2_free_refblock(struct jffs2_raw_node_ref *);
|
||||
struct jffs2_node_frag *jffs2_alloc_node_frag(void);
|
||||
void jffs2_free_node_frag(struct jffs2_node_frag *);
|
||||
struct jffs2_inode_cache *jffs2_alloc_inode_cache(void);
|
||||
|
@ -458,14 +458,13 @@ static inline int on_list(struct list_head *obj, struct list_head *head)
|
||||
void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
|
||||
{
|
||||
struct jffs2_eraseblock *jeb;
|
||||
struct jffs2_raw_node_ref *next_ref;
|
||||
int blocknr;
|
||||
struct jffs2_unknown_node n;
|
||||
int ret, addedsize;
|
||||
size_t retlen;
|
||||
uint32_t freed_len;
|
||||
|
||||
if(!ref) {
|
||||
if(unlikely(!ref)) {
|
||||
printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
|
||||
return;
|
||||
}
|
||||
@ -683,54 +682,6 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
|
||||
spin_unlock(&c->erase_completion_lock);
|
||||
}
|
||||
|
||||
|
||||
/* Merge with the next node in the physical list, if there is one
|
||||
and if it's also obsolete and if it doesn't belong to any inode */
|
||||
next_ref = ref_next(ref);
|
||||
|
||||
if (next_ref && ref_obsolete(next_ref) && !next_ref->next_in_ino) {
|
||||
spin_lock(&c->erase_completion_lock);
|
||||
|
||||
#ifdef TEST_TOTLEN
|
||||
ref->__totlen += next_ref->__totlen;
|
||||
#endif
|
||||
ref->next_phys = ref_next(next_ref);
|
||||
if (jeb->last_node == next_ref) jeb->last_node = ref;
|
||||
if (jeb->gc_node == next_ref) {
|
||||
/* gc will be happy continuing gc on this node */
|
||||
jeb->gc_node=ref;
|
||||
}
|
||||
spin_unlock(&c->erase_completion_lock);
|
||||
|
||||
__jffs2_free_raw_node_ref(next_ref);
|
||||
}
|
||||
|
||||
/* Also merge with the previous node in the list, if there is one
|
||||
and that one is obsolete */
|
||||
if (ref != jeb->first_node ) {
|
||||
struct jffs2_raw_node_ref *p = jeb->first_node;
|
||||
|
||||
spin_lock(&c->erase_completion_lock);
|
||||
|
||||
while ((next_ref = ref_next(ref)) != ref)
|
||||
p = next_ref;
|
||||
|
||||
if (ref_obsolete(p) && !ref->next_in_ino) {
|
||||
#ifdef TEST_TOTLEN
|
||||
p->__totlen += ref->__totlen;
|
||||
#endif
|
||||
if (jeb->last_node == ref) {
|
||||
jeb->last_node = p;
|
||||
}
|
||||
if (jeb->gc_node == ref) {
|
||||
/* gc will be happy continuing gc on this node */
|
||||
jeb->gc_node=p;
|
||||
}
|
||||
p->next_phys = ref_next(ref);
|
||||
__jffs2_free_raw_node_ref(ref);
|
||||
}
|
||||
spin_unlock(&c->erase_completion_lock);
|
||||
}
|
||||
out_erase_sem:
|
||||
up(&c->erase_free_sem);
|
||||
}
|
||||
|
@ -95,6 +95,7 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
|
||||
#define jffs2_dataflash(c) (0)
|
||||
#define jffs2_dataflash_setup(c) (0)
|
||||
#define jffs2_dataflash_cleanup(c) do {} while (0)
|
||||
#define jffs2_nor_wbuf_flash(c) (0)
|
||||
#define jffs2_nor_wbuf_flash_setup(c) (0)
|
||||
#define jffs2_nor_wbuf_flash_cleanup(c) do {} while (0)
|
||||
|
||||
|
@ -511,7 +511,8 @@ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eras
|
||||
spr = (struct jffs2_sum_xref_flash *)sp;
|
||||
dbg_summary("xref at %#08x-%#08x\n",
|
||||
jeb->offset + je32_to_cpu(spr->offset),
|
||||
jeb->offset + je32_to_cpu(spr->offset) + PAD(sizeof(struct jffs2_raw_xref)));
|
||||
jeb->offset + je32_to_cpu(spr->offset) +
|
||||
(uint32_t)PAD(sizeof(struct jffs2_raw_xref)));
|
||||
|
||||
ref = jffs2_alloc_xattr_ref();
|
||||
if (!ref) {
|
||||
@ -787,10 +788,12 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
|
||||
JFFS2_WARNING("Write of %u bytes at 0x%08x failed. returned %d, retlen %zd\n",
|
||||
infosize, sum_ofs, ret, retlen);
|
||||
|
||||
/* Waste remaining space */
|
||||
spin_lock(&c->erase_completion_lock);
|
||||
jffs2_link_node_ref(c, jeb, sum_ofs | REF_OBSOLETE, infosize, NULL);
|
||||
spin_unlock(&c->erase_completion_lock);
|
||||
if (retlen) {
|
||||
/* Waste remaining space */
|
||||
spin_lock(&c->erase_completion_lock);
|
||||
jffs2_link_node_ref(c, jeb, sum_ofs | REF_OBSOLETE, infosize, NULL);
|
||||
spin_unlock(&c->erase_completion_lock);
|
||||
}
|
||||
|
||||
c->summary->sum_size = JFFS2_SUMMARY_NOSUM_SIZE;
|
||||
|
||||
@ -836,6 +839,7 @@ int jffs2_sum_write_sumnode(struct jffs2_sb_info *c)
|
||||
jffs2_sum_disable_collecting(c->summary);
|
||||
|
||||
JFFS2_WARNING("Not enough space for summary, padsize = %d\n", padsize);
|
||||
spin_lock(&c->erase_completion_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
287
fs/jffs2/wbuf.c
287
fs/jffs2/wbuf.c
@ -156,72 +156,126 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock
|
||||
jffs2_erase_pending_trigger(c);
|
||||
}
|
||||
|
||||
/* Adjust its size counts accordingly */
|
||||
c->wasted_size += jeb->free_size;
|
||||
c->free_size -= jeb->free_size;
|
||||
jeb->wasted_size += jeb->free_size;
|
||||
jeb->free_size = 0;
|
||||
if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
|
||||
uint32_t oldfree = jeb->free_size;
|
||||
|
||||
jffs2_link_node_ref(c, jeb,
|
||||
(jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
|
||||
oldfree, NULL);
|
||||
/* convert to wasted */
|
||||
c->wasted_size += oldfree;
|
||||
jeb->wasted_size += oldfree;
|
||||
c->dirty_size -= oldfree;
|
||||
jeb->dirty_size -= oldfree;
|
||||
}
|
||||
|
||||
jffs2_dbg_dump_block_lists_nolock(c);
|
||||
jffs2_dbg_acct_sanity_check_nolock(c,jeb);
|
||||
jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
|
||||
}
|
||||
|
||||
static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
|
||||
struct jffs2_inode_info *f,
|
||||
struct jffs2_raw_node_ref *raw,
|
||||
union jffs2_node_union *node)
|
||||
{
|
||||
struct jffs2_node_frag *frag;
|
||||
struct jffs2_full_dirent *fd;
|
||||
|
||||
dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
|
||||
node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
|
||||
|
||||
BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
|
||||
je16_to_cpu(node->u.magic) != 0);
|
||||
|
||||
switch (je16_to_cpu(node->u.nodetype)) {
|
||||
case JFFS2_NODETYPE_INODE:
|
||||
frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
|
||||
BUG_ON(!frag);
|
||||
/* Find a frag which refers to the full_dnode we want to modify */
|
||||
while (!frag->node || frag->node->raw != raw) {
|
||||
frag = frag_next(frag);
|
||||
BUG_ON(!frag);
|
||||
}
|
||||
dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
|
||||
return &frag->node->raw;
|
||||
break;
|
||||
|
||||
case JFFS2_NODETYPE_DIRENT:
|
||||
for (fd = f->dents; fd; fd = fd->next) {
|
||||
if (fd->raw == raw) {
|
||||
dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
|
||||
return &fd->raw;
|
||||
}
|
||||
}
|
||||
BUG();
|
||||
default:
|
||||
dbg_noderef("Don't care about replacing raw for nodetype %x\n",
|
||||
je16_to_cpu(node->u.nodetype));
|
||||
break;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Recover from failure to write wbuf. Recover the nodes up to the
|
||||
* wbuf, not the one which we were starting to try to write. */
|
||||
|
||||
static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
|
||||
{
|
||||
struct jffs2_eraseblock *jeb, *new_jeb;
|
||||
struct jffs2_raw_node_ref **first_raw, **raw;
|
||||
struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
|
||||
size_t retlen;
|
||||
int ret;
|
||||
int nr_refile = 0;
|
||||
unsigned char *buf;
|
||||
uint32_t start, end, ofs, len;
|
||||
|
||||
jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
|
||||
|
||||
if (jffs2_prealloc_raw_node_refs(c, jeb, c->reserved_refs + 1))
|
||||
return;
|
||||
|
||||
spin_lock(&c->erase_completion_lock);
|
||||
|
||||
jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
|
||||
spin_unlock(&c->erase_completion_lock);
|
||||
|
||||
BUG_ON(!ref_obsolete(jeb->last_node));
|
||||
|
||||
/* Find the first node to be recovered, by skipping over every
|
||||
node which ends before the wbuf starts, or which is obsolete. */
|
||||
first_raw = &jeb->first_node;
|
||||
while (*first_raw &&
|
||||
(ref_obsolete(*first_raw) ||
|
||||
(ref_offset(*first_raw)+ref_totlen(c, jeb, *first_raw)) < c->wbuf_ofs)) {
|
||||
D1(printk(KERN_DEBUG "Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
|
||||
ref_offset(*first_raw), ref_flags(*first_raw),
|
||||
(ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw)),
|
||||
c->wbuf_ofs));
|
||||
first_raw = &(*first_raw)->next_phys;
|
||||
for (next = raw = jeb->first_node; next; raw = next) {
|
||||
next = ref_next(raw);
|
||||
|
||||
if (ref_obsolete(raw) ||
|
||||
(next && ref_offset(next) <= c->wbuf_ofs)) {
|
||||
dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
|
||||
ref_offset(raw), ref_flags(raw),
|
||||
(ref_offset(raw) + ref_totlen(c, jeb, raw)),
|
||||
c->wbuf_ofs);
|
||||
continue;
|
||||
}
|
||||
dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
|
||||
ref_offset(raw), ref_flags(raw),
|
||||
(ref_offset(raw) + ref_totlen(c, jeb, raw)));
|
||||
|
||||
first_raw = raw;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!*first_raw) {
|
||||
if (!first_raw) {
|
||||
/* All nodes were obsolete. Nothing to recover. */
|
||||
D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n"));
|
||||
spin_unlock(&c->erase_completion_lock);
|
||||
c->wbuf_len = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
start = ref_offset(*first_raw);
|
||||
end = ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw);
|
||||
start = ref_offset(first_raw);
|
||||
end = ref_offset(jeb->last_node);
|
||||
nr_refile = 1;
|
||||
|
||||
/* Find the last node to be recovered */
|
||||
raw = first_raw;
|
||||
while ((*raw)) {
|
||||
if (!ref_obsolete(*raw))
|
||||
end = ref_offset(*raw) + ref_totlen(c, jeb, *raw);
|
||||
/* Count the number of refs which need to be copied */
|
||||
while ((raw = ref_next(raw)) != jeb->last_node)
|
||||
nr_refile++;
|
||||
|
||||
raw = &(*raw)->next_phys;
|
||||
}
|
||||
spin_unlock(&c->erase_completion_lock);
|
||||
|
||||
D1(printk(KERN_DEBUG "wbuf recover %08x-%08x\n", start, end));
|
||||
dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
|
||||
start, end, end - start, nr_refile);
|
||||
|
||||
buf = NULL;
|
||||
if (start < c->wbuf_ofs) {
|
||||
@ -248,13 +302,24 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
|
||||
kfree(buf);
|
||||
buf = NULL;
|
||||
read_failed:
|
||||
first_raw = &(*first_raw)->next_phys;
|
||||
first_raw = ref_next(first_raw);
|
||||
nr_refile--;
|
||||
while (first_raw && ref_obsolete(first_raw)) {
|
||||
first_raw = ref_next(first_raw);
|
||||
nr_refile--;
|
||||
}
|
||||
|
||||
/* If this was the only node to be recovered, give up */
|
||||
if (!(*first_raw))
|
||||
if (!first_raw) {
|
||||
c->wbuf_len = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
/* It wasn't. Go on and try to recover nodes complete in the wbuf */
|
||||
start = ref_offset(*first_raw);
|
||||
start = ref_offset(first_raw);
|
||||
dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
|
||||
start, end, end - start, nr_refile);
|
||||
|
||||
} else {
|
||||
/* Read succeeded. Copy the remaining data from the wbuf */
|
||||
memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
|
||||
@ -263,7 +328,6 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
|
||||
/* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
|
||||
Either 'buf' contains the data, or we find it in the wbuf */
|
||||
|
||||
|
||||
/* ... and get an allocation of space from a shiny new block instead */
|
||||
ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
|
||||
if (ret) {
|
||||
@ -271,6 +335,14 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
|
||||
kfree(buf);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
|
||||
if (ret) {
|
||||
printk(KERN_WARNING "Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
|
||||
kfree(buf);
|
||||
return;
|
||||
}
|
||||
|
||||
ofs = write_ofs(c);
|
||||
|
||||
if (end-start >= c->wbuf_pagesize) {
|
||||
@ -304,7 +376,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
|
||||
kfree(buf);
|
||||
|
||||
if (retlen)
|
||||
jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, *first_raw), NULL);
|
||||
jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
|
||||
|
||||
return;
|
||||
}
|
||||
@ -314,12 +386,10 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
|
||||
c->wbuf_ofs = ofs + towrite;
|
||||
memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
|
||||
/* Don't muck about with c->wbuf_inodes. False positives are harmless. */
|
||||
kfree(buf);
|
||||
} else {
|
||||
/* OK, now we're left with the dregs in whichever buffer we're using */
|
||||
if (buf) {
|
||||
memcpy(c->wbuf, buf, end-start);
|
||||
kfree(buf);
|
||||
} else {
|
||||
memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
|
||||
}
|
||||
@ -331,62 +401,111 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
|
||||
new_jeb = &c->blocks[ofs / c->sector_size];
|
||||
|
||||
spin_lock(&c->erase_completion_lock);
|
||||
if (new_jeb->first_node) {
|
||||
/* Odd, but possible with ST flash later maybe */
|
||||
new_jeb->last_node->next_phys = *first_raw;
|
||||
} else {
|
||||
new_jeb->first_node = *first_raw;
|
||||
}
|
||||
|
||||
raw = first_raw;
|
||||
while (*raw) {
|
||||
uint32_t rawlen = ref_totlen(c, jeb, *raw);
|
||||
for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
|
||||
uint32_t rawlen = ref_totlen(c, jeb, raw);
|
||||
struct jffs2_inode_cache *ic;
|
||||
struct jffs2_raw_node_ref *new_ref;
|
||||
struct jffs2_raw_node_ref **adjust_ref = NULL;
|
||||
struct jffs2_inode_info *f = NULL;
|
||||
|
||||
D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
|
||||
rawlen, ref_offset(*raw), ref_flags(*raw), ofs));
|
||||
rawlen, ref_offset(raw), ref_flags(raw), ofs));
|
||||
|
||||
if (ref_obsolete(*raw)) {
|
||||
/* Shouldn't really happen much */
|
||||
new_jeb->dirty_size += rawlen;
|
||||
new_jeb->free_size -= rawlen;
|
||||
c->dirty_size += rawlen;
|
||||
} else {
|
||||
new_jeb->used_size += rawlen;
|
||||
new_jeb->free_size -= rawlen;
|
||||
ic = jffs2_raw_ref_to_ic(raw);
|
||||
|
||||
/* Ick. This XATTR mess should be fixed shortly... */
|
||||
if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
|
||||
struct jffs2_xattr_datum *xd = (void *)ic;
|
||||
BUG_ON(xd->node != raw);
|
||||
adjust_ref = &xd->node;
|
||||
raw->next_in_ino = NULL;
|
||||
ic = NULL;
|
||||
} else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
|
||||
struct jffs2_xattr_datum *xr = (void *)ic;
|
||||
BUG_ON(xr->node != raw);
|
||||
adjust_ref = &xr->node;
|
||||
raw->next_in_ino = NULL;
|
||||
ic = NULL;
|
||||
} else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
|
||||
struct jffs2_raw_node_ref **p = &ic->nodes;
|
||||
|
||||
/* Remove the old node from the per-inode list */
|
||||
while (*p && *p != (void *)ic) {
|
||||
if (*p == raw) {
|
||||
(*p) = (raw->next_in_ino);
|
||||
raw->next_in_ino = NULL;
|
||||
break;
|
||||
}
|
||||
p = &((*p)->next_in_ino);
|
||||
}
|
||||
|
||||
if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
|
||||
/* If it's an in-core inode, then we have to adjust any
|
||||
full_dirent or full_dnode structure to point to the
|
||||
new version instead of the old */
|
||||
f = jffs2_gc_fetch_inode(c, ic->ino, ic->nlink);
|
||||
if (IS_ERR(f)) {
|
||||
/* Should never happen; it _must_ be present */
|
||||
JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
|
||||
ic->ino, PTR_ERR(f));
|
||||
BUG();
|
||||
}
|
||||
/* We don't lock f->sem. There's a number of ways we could
|
||||
end up in here with it already being locked, and nobody's
|
||||
going to modify it on us anyway because we hold the
|
||||
alloc_sem. We're only changing one ->raw pointer too,
|
||||
which we can get away with without upsetting readers. */
|
||||
adjust_ref = jffs2_incore_replace_raw(c, f, raw,
|
||||
(void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
|
||||
} else if (unlikely(ic->state != INO_STATE_PRESENT &&
|
||||
ic->state != INO_STATE_CHECKEDABSENT &&
|
||||
ic->state != INO_STATE_GC)) {
|
||||
JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
|
||||
|
||||
if (adjust_ref) {
|
||||
BUG_ON(*adjust_ref != raw);
|
||||
*adjust_ref = new_ref;
|
||||
}
|
||||
if (f)
|
||||
jffs2_gc_release_inode(c, f);
|
||||
|
||||
if (!ref_obsolete(raw)) {
|
||||
jeb->dirty_size += rawlen;
|
||||
jeb->used_size -= rawlen;
|
||||
c->dirty_size += rawlen;
|
||||
c->used_size -= rawlen;
|
||||
raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
|
||||
BUG_ON(raw->next_in_ino);
|
||||
}
|
||||
c->free_size -= rawlen;
|
||||
(*raw)->flash_offset = ofs | ref_flags(*raw);
|
||||
ofs += rawlen;
|
||||
new_jeb->last_node = *raw;
|
||||
|
||||
raw = &(*raw)->next_phys;
|
||||
}
|
||||
|
||||
kfree(buf);
|
||||
|
||||
/* Fix up the original jeb now it's on the bad_list */
|
||||
*first_raw = NULL;
|
||||
if (first_raw == &jeb->first_node) {
|
||||
jeb->last_node = NULL;
|
||||
if (first_raw == jeb->first_node) {
|
||||
D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
|
||||
list_del(&jeb->list);
|
||||
list_add(&jeb->list, &c->erase_pending_list);
|
||||
c->nr_erasing_blocks++;
|
||||
jffs2_erase_pending_trigger(c);
|
||||
}
|
||||
else
|
||||
jeb->last_node = container_of(first_raw, struct jffs2_raw_node_ref, next_phys);
|
||||
|
||||
jffs2_dbg_acct_sanity_check_nolock(c, jeb);
|
||||
jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
|
||||
jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
|
||||
|
||||
jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
|
||||
jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
|
||||
jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
|
||||
|
||||
spin_unlock(&c->erase_completion_lock);
|
||||
|
||||
D1(printk(KERN_DEBUG "wbuf recovery completed OK\n"));
|
||||
D1(printk(KERN_DEBUG "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", c->wbuf_ofs, c->wbuf_len));
|
||||
|
||||
}
|
||||
|
||||
/* Meaning of pad argument:
|
||||
@ -400,6 +519,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
|
||||
|
||||
static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
|
||||
{
|
||||
struct jffs2_eraseblock *wbuf_jeb;
|
||||
int ret;
|
||||
size_t retlen;
|
||||
|
||||
@ -417,7 +537,8 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
|
||||
if (!c->wbuf_len) /* already checked c->wbuf above */
|
||||
return 0;
|
||||
|
||||
if (jffs2_prealloc_raw_node_refs(c, c->nextblock, c->reserved_refs + 1))
|
||||
wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
|
||||
if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
|
||||
return -ENOMEM;
|
||||
|
||||
/* claim remaining space on the page
|
||||
@ -473,32 +594,29 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
|
||||
|
||||
/* Adjust free size of the block if we padded. */
|
||||
if (pad) {
|
||||
struct jffs2_eraseblock *jeb;
|
||||
uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
|
||||
|
||||
jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
|
||||
|
||||
D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
|
||||
(jeb==c->nextblock)?"next":"", jeb->offset));
|
||||
(wbuf_jeb==c->nextblock)?"next":"", wbuf_jeb->offset));
|
||||
|
||||
/* wbuf_pagesize - wbuf_len is the amount of space that's to be
|
||||
padded. If there is less free space in the block than that,
|
||||
something screwed up */
|
||||
if (jeb->free_size < waste) {
|
||||
if (wbuf_jeb->free_size < waste) {
|
||||
printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
|
||||
c->wbuf_ofs, c->wbuf_len, waste);
|
||||
printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
|
||||
jeb->offset, jeb->free_size);
|
||||
wbuf_jeb->offset, wbuf_jeb->free_size);
|
||||
BUG();
|
||||
}
|
||||
|
||||
spin_lock(&c->erase_completion_lock);
|
||||
|
||||
jffs2_link_node_ref(c, jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
|
||||
jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
|
||||
/* FIXME: that made it count as dirty. Convert to wasted */
|
||||
jeb->dirty_size -= waste;
|
||||
wbuf_jeb->dirty_size -= waste;
|
||||
c->dirty_size -= waste;
|
||||
jeb->wasted_size += waste;
|
||||
wbuf_jeb->wasted_size += waste;
|
||||
c->wasted_size += waste;
|
||||
} else
|
||||
spin_lock(&c->erase_completion_lock);
|
||||
@ -758,7 +876,8 @@ outerr:
|
||||
* This is the entry for flash write.
|
||||
* Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
|
||||
*/
|
||||
int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf)
|
||||
int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
|
||||
size_t *retlen, const u_char *buf)
|
||||
{
|
||||
struct kvec vecs[1];
|
||||
|
||||
@ -953,7 +1072,7 @@ int jffs2_check_nand_cleanmarker (struct jffs2_sb_info *c, struct jffs2_eraseblo
|
||||
}
|
||||
D1(if (retval == 1) {
|
||||
printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): Cleanmarker node not detected in block at %08x\n", jeb->offset);
|
||||
printk(KERN_WARNING "OOB at %08x was ", offset);
|
||||
printk(KERN_WARNING "OOB at %08zx was ", offset);
|
||||
for (i=0; i < oob_size; i++) {
|
||||
printk("%02x ", buf[i]);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user