diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c index 28f55f9cf715..0ee452275578 100644 --- a/drivers/mtd/ubi/fastmap-wl.c +++ b/drivers/mtd/ubi/fastmap-wl.c @@ -97,6 +97,33 @@ out: return e; } +/* + * has_enough_free_count - whether ubi has enough free pebs to fill fm pools + * @ubi: UBI device description object + * @is_wl_pool: whether UBI is filling wear leveling pool + * + * This helper function checks whether there are enough free pebs (deducted + * by fastmap pebs) to fill fm_pool and fm_wl_pool, above rule works after + * there is at least one of free pebs is filled into fm_wl_pool. + * For wear leveling pool, UBI should also reserve free pebs for bad pebs + * handling, because there maybe no enough free pebs for user volumes after + * producing new bad pebs. + */ +static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool) +{ + int fm_used = 0; // fastmap non anchor pebs. + int beb_rsvd_pebs; + + if (!ubi->free.rb_node) + return false; + + beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0; + if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled)) + fm_used = ubi->fm_size / ubi->leb_size - 1; + + return ubi->free_count - beb_rsvd_pebs > fm_used; +} + /** * ubi_refill_pools - refills all fastmap PEB pools. * @ubi: UBI device description object @@ -120,21 +147,17 @@ void ubi_refill_pools(struct ubi_device *ubi) wl_tree_add(ubi->fm_anchor, &ubi->free); ubi->free_count++; } - if (ubi->fm_next_anchor) { - wl_tree_add(ubi->fm_next_anchor, &ubi->free); - ubi->free_count++; - } - /* All available PEBs are in ubi->free, now is the time to get + /* + * All available PEBs are in ubi->free, now is the time to get * the best anchor PEBs. */ ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1); - ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1); for (;;) { enough = 0; if (pool->size < pool->max_size) { - if (!ubi->free.rb_node) + if (!has_enough_free_count(ubi, false)) break; e = wl_get_wle(ubi); @@ -147,8 +170,7 @@ void ubi_refill_pools(struct ubi_device *ubi) enough++; if (wl_pool->size < wl_pool->max_size) { - if (!ubi->free.rb_node || - (ubi->free_count - ubi->beb_rsvd_pebs < 5)) + if (!has_enough_free_count(ubi, true)) break; e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); @@ -253,6 +275,58 @@ out: return ret; } +/** + * next_peb_for_wl - returns next PEB to be used internally by the + * WL sub-system. + * + * @ubi: UBI device description object + */ +static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi) +{ + struct ubi_fm_pool *pool = &ubi->fm_wl_pool; + int pnum; + + if (pool->used == pool->size) + return NULL; + + pnum = pool->pebs[pool->used]; + return ubi->lookuptbl[pnum]; +} + +/** + * need_wear_leveling - checks whether to trigger a wear leveling work. + * UBI fetches free PEB from wl_pool, we check free PEBs from both 'wl_pool' + * and 'ubi->free', because free PEB in 'ubi->free' tree maybe moved into + * 'wl_pool' by ubi_refill_pools(). + * + * @ubi: UBI device description object + */ +static bool need_wear_leveling(struct ubi_device *ubi) +{ + int ec; + struct ubi_wl_entry *e; + + if (!ubi->used.rb_node) + return false; + + e = next_peb_for_wl(ubi); + if (!e) { + if (!ubi->free.rb_node) + return false; + e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); + ec = e->ec; + } else { + ec = e->ec; + if (ubi->free.rb_node) { + e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); + ec = max(ec, e->ec); + } + } + e = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); + + return ec - e->ec >= UBI_WL_THRESHOLD; +} + /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system. * * @ubi: UBI device description object @@ -286,20 +360,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) int ubi_ensure_anchor_pebs(struct ubi_device *ubi) { struct ubi_work *wrk; + struct ubi_wl_entry *anchor; spin_lock(&ubi->wl_lock); - /* Do we have a next anchor? */ - if (!ubi->fm_next_anchor) { - ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1); - if (!ubi->fm_next_anchor) - /* Tell wear leveling to produce a new anchor PEB */ - ubi->fm_do_produce_anchor = 1; + /* Do we already have an anchor? */ + if (ubi->fm_anchor) { + spin_unlock(&ubi->wl_lock); + return 0; } - /* Do wear leveling to get a new anchor PEB or check the - * existing next anchor candidate. - */ + /* See if we can find an anchor PEB on the list of free PEBs */ + anchor = ubi_wl_get_fm_peb(ubi, 1); + if (anchor) { + ubi->fm_anchor = anchor; + spin_unlock(&ubi->wl_lock); + return 0; + } + + ubi->fm_do_produce_anchor = 1; + /* No luck, trigger wear leveling to produce a new anchor PEB. */ if (ubi->wl_scheduled) { spin_unlock(&ubi->wl_lock); return 0; @@ -381,11 +461,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi) ubi->fm_anchor = NULL; } - if (ubi->fm_next_anchor) { - return_unused_peb(ubi, ubi->fm_next_anchor); - ubi->fm_next_anchor = NULL; - } - if (ubi->fm) { for (i = 0; i < ubi->fm->used_blocks; i++) kfree(ubi->fm->e[i]); diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index 6b5f1ffd961b..6e95c4b1473e 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -1230,17 +1230,6 @@ static int ubi_write_fastmap(struct ubi_device *ubi, fm_pos += sizeof(*fec); ubi_assert(fm_pos <= ubi->fm_size); } - if (ubi->fm_next_anchor) { - fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); - - fec->pnum = cpu_to_be32(ubi->fm_next_anchor->pnum); - set_seen(ubi, ubi->fm_next_anchor->pnum, seen_pebs); - fec->ec = cpu_to_be32(ubi->fm_next_anchor->ec); - - free_peb_count++; - fm_pos += sizeof(*fec); - ubi_assert(fm_pos <= ubi->fm_size); - } fmh->free_peb_count = cpu_to_be32(free_peb_count); ubi_for_each_used_peb(ubi, wl_e, tmp_rb) { diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 7c083ad58274..078112e23dfd 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -489,8 +489,7 @@ struct ubi_debug_info { * @fm_work: fastmap work queue * @fm_work_scheduled: non-zero if fastmap work was scheduled * @fast_attach: non-zero if UBI was attached by fastmap - * @fm_anchor: The new anchor PEB used during fastmap update - * @fm_next_anchor: An anchor PEB candidate for the next time fastmap is updated + * @fm_anchor: The next anchor PEB to use for fastmap * @fm_do_produce_anchor: If true produce an anchor PEB in wl * * @used: RB-tree of used physical eraseblocks @@ -601,7 +600,6 @@ struct ubi_device { int fm_work_scheduled; int fast_attach; struct ubi_wl_entry *fm_anchor; - struct ubi_wl_entry *fm_next_anchor; int fm_do_produce_anchor; /* Wear-leveling sub-system's stuff */ diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index 1bc7b3a05604..6ea95ade4ca6 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c @@ -309,7 +309,6 @@ out_mapping: ubi->volumes[vol_id] = NULL; ubi->vol_count -= 1; spin_unlock(&ubi->volumes_lock); - ubi_eba_destroy_table(eba_tbl); out_acc: spin_lock(&ubi->volumes_lock); ubi->rsvd_pebs -= vol->reserved_pebs; diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 8455f1d47f3c..55bae06cf408 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -670,7 +670,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, ubi_assert(!ubi->move_from && !ubi->move_to); ubi_assert(!ubi->move_to_put); +#ifdef CONFIG_MTD_UBI_FASTMAP + if (!next_peb_for_wl(ubi) || +#else if (!ubi->free.rb_node || +#endif (!ubi->used.rb_node && !ubi->scrub.rb_node)) { /* * No free physical eraseblocks? Well, they must be waiting in @@ -689,16 +693,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, #ifdef CONFIG_MTD_UBI_FASTMAP e1 = find_anchor_wl_entry(&ubi->used); - if (e1 && ubi->fm_next_anchor && - (ubi->fm_next_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) { + if (e1 && ubi->fm_anchor && + (ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) { ubi->fm_do_produce_anchor = 1; - /* fm_next_anchor is no longer considered a good anchor - * candidate. + /* + * fm_anchor is no longer considered a good anchor. * NULL assignment also prevents multiple wear level checks * of this PEB. */ - wl_tree_add(ubi->fm_next_anchor, &ubi->free); - ubi->fm_next_anchor = NULL; + wl_tree_add(ubi->fm_anchor, &ubi->free); + ubi->fm_anchor = NULL; ubi->free_count++; } @@ -1003,8 +1007,6 @@ out_cancel: static int ensure_wear_leveling(struct ubi_device *ubi, int nested) { int err = 0; - struct ubi_wl_entry *e1; - struct ubi_wl_entry *e2; struct ubi_work *wrk; spin_lock(&ubi->wl_lock); @@ -1017,6 +1019,13 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested) * the WL worker has to be scheduled anyway. */ if (!ubi->scrub.rb_node) { +#ifdef CONFIG_MTD_UBI_FASTMAP + if (!need_wear_leveling(ubi)) + goto out_unlock; +#else + struct ubi_wl_entry *e1; + struct ubi_wl_entry *e2; + if (!ubi->used.rb_node || !ubi->free.rb_node) /* No physical eraseblocks - no deal */ goto out_unlock; @@ -1032,6 +1041,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested) if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) goto out_unlock; +#endif dbg_wl("schedule wear-leveling"); } else dbg_wl("schedule scrubbing"); @@ -1085,12 +1095,13 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) if (!err) { spin_lock(&ubi->wl_lock); - if (!ubi->fm_disabled && !ubi->fm_next_anchor && + if (!ubi->fm_disabled && !ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) { - /* Abort anchor production, if needed it will be + /* + * Abort anchor production, if needed it will be * enabled again in the wear leveling started below. */ - ubi->fm_next_anchor = e; + ubi->fm_anchor = e; ubi->fm_do_produce_anchor = 0; } else { wl_tree_add(e, &ubi->free); diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h index c93a53293786..5ebe374a08ae 100644 --- a/drivers/mtd/ubi/wl.h +++ b/drivers/mtd/ubi/wl.h @@ -5,6 +5,8 @@ static void update_fastmap_work_fn(struct work_struct *wrk); static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root); static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi); +static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi); +static bool need_wear_leveling(struct ubi_device *ubi); static void ubi_fastmap_close(struct ubi_device *ubi); static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count) { diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c index 7e9abdb89712..acd32f05b519 100644 --- a/fs/jffs2/erase.c +++ b/fs/jffs2/erase.c @@ -43,9 +43,9 @@ static void jffs2_erase_block(struct jffs2_sb_info *c, jffs2_dbg(1, "%s(): erase block %#08x (range %#08x-%#08x)\n", __func__, jeb->offset, jeb->offset, jeb->offset + c->sector_size); - instr = kmalloc(sizeof(struct erase_info), GFP_KERNEL); + instr = kzalloc(sizeof(struct erase_info), GFP_KERNEL); if (!instr) { - pr_warn("kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); + pr_warn("kzalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); mutex_lock(&c->erase_free_sem); spin_lock(&c->erase_completion_lock); list_move(&jeb->list, &c->erase_pending_list); @@ -57,8 +57,6 @@ static void jffs2_erase_block(struct jffs2_sb_info *c, return; } - memset(instr, 0, sizeof(*instr)); - instr->addr = jeb->offset; instr->len = c->sector_size; diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index 00a110f40e10..39cec28096a7 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c @@ -604,6 +604,7 @@ out_root: jffs2_free_raw_node_refs(c); kvfree(c->blocks); jffs2_clear_xattr_subsystem(c); + jffs2_sum_exit(c); out_inohash: kfree(c->inocache_list); out_wbuf: diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index c0b84e960b20..e8b9b756f0ac 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -65,7 +65,7 @@ static void shrink_liability(struct ubifs_info *c, int nr_to_write) */ static int run_gc(struct ubifs_info *c) { - int err, lnum; + int lnum; /* Make some free space by garbage-collecting dirty space */ down_read(&c->commit_sem); @@ -76,10 +76,7 @@ static int run_gc(struct ubifs_info *c) /* GC freed one LEB, return it to lprops */ dbg_budg("GC freed LEB %d", lnum); - err = ubifs_return_leb(c, lnum); - if (err) - return err; - return 0; + return ubifs_return_leb(c, lnum); } /** diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index e4f193eae4b2..e4c4761aff7f 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c @@ -677,7 +677,7 @@ int ubifs_init_security(struct inode *dentry, struct inode *inode, int err; err = security_inode_init_security(inode, dentry, qstr, - &init_xattrs, 0); + &init_xattrs, NULL); if (err) { struct ubifs_info *c = dentry->i_sb->s_fs_info; ubifs_err(c, "cannot initialize security for inode %lu, error %d",