mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 12:14:10 +08:00
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs.git
This commit is contained in:
commit
7c37720922
@ -1447,7 +1447,7 @@ out_ech:
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct ubi_attach_info *alloc_ai(void)
|
||||
static struct ubi_attach_info *alloc_ai(const char *slab_name)
|
||||
{
|
||||
struct ubi_attach_info *ai;
|
||||
|
||||
@ -1461,7 +1461,7 @@ static struct ubi_attach_info *alloc_ai(void)
|
||||
INIT_LIST_HEAD(&ai->alien);
|
||||
INIT_LIST_HEAD(&ai->fastmap);
|
||||
ai->volumes = RB_ROOT;
|
||||
ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
|
||||
ai->aeb_slab_cache = kmem_cache_create(slab_name,
|
||||
sizeof(struct ubi_ainf_peb),
|
||||
0, 0, NULL);
|
||||
if (!ai->aeb_slab_cache) {
|
||||
@ -1491,7 +1491,7 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
|
||||
|
||||
err = -ENOMEM;
|
||||
|
||||
scan_ai = alloc_ai();
|
||||
scan_ai = alloc_ai("ubi_aeb_slab_cache_fastmap");
|
||||
if (!scan_ai)
|
||||
goto out;
|
||||
|
||||
@ -1557,7 +1557,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
|
||||
int err;
|
||||
struct ubi_attach_info *ai;
|
||||
|
||||
ai = alloc_ai();
|
||||
ai = alloc_ai("ubi_aeb_slab_cache");
|
||||
if (!ai)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1575,7 +1575,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
|
||||
if (err > 0 || mtd_is_eccerr(err)) {
|
||||
if (err != UBI_NO_FASTMAP) {
|
||||
destroy_ai(ai);
|
||||
ai = alloc_ai();
|
||||
ai = alloc_ai("ubi_aeb_slab_cache");
|
||||
if (!ai)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1614,7 +1614,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
|
||||
if (ubi->fm && ubi_dbg_chk_fastmap(ubi)) {
|
||||
struct ubi_attach_info *scan_ai;
|
||||
|
||||
scan_ai = alloc_ai();
|
||||
scan_ai = alloc_ai("ubi_aeb_slab_cache_dbg_chk_fastmap");
|
||||
if (!scan_ai) {
|
||||
err = -ENOMEM;
|
||||
goto out_wl;
|
||||
|
@ -346,14 +346,27 @@ out:
|
||||
* WL sub-system.
|
||||
*
|
||||
* @ubi: UBI device description object
|
||||
* @need_fill: whether to fill wear-leveling pool when no PEBs are found
|
||||
*/
|
||||
static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi)
|
||||
static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi,
|
||||
bool need_fill)
|
||||
{
|
||||
struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
|
||||
int pnum;
|
||||
|
||||
if (pool->used == pool->size)
|
||||
if (pool->used == pool->size) {
|
||||
if (need_fill && !ubi->fm_work_scheduled) {
|
||||
/*
|
||||
* We cannot update the fastmap here because this
|
||||
* function is called in atomic context.
|
||||
* Let's fail here and refill/update it as soon as
|
||||
* possible.
|
||||
*/
|
||||
ubi->fm_work_scheduled = 1;
|
||||
schedule_work(&ubi->fm_work);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pnum = pool->pebs[pool->used];
|
||||
return ubi->lookuptbl[pnum];
|
||||
@ -375,7 +388,7 @@ static bool need_wear_leveling(struct ubi_device *ubi)
|
||||
if (!ubi->used.rb_node)
|
||||
return false;
|
||||
|
||||
e = next_peb_for_wl(ubi);
|
||||
e = next_peb_for_wl(ubi, false);
|
||||
if (!e) {
|
||||
if (!ubi->free.rb_node)
|
||||
return false;
|
||||
|
@ -55,7 +55,7 @@ static int ubi_nvmem_reg_read(void *priv, unsigned int from,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return bytes_left == 0 ? 0 : -EIO;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ubi_nvmem_add(struct ubi_volume_info *vi)
|
||||
|
@ -549,6 +549,7 @@ struct ubi_debug_info {
|
||||
* @peb_buf: a buffer of PEB size used for different purposes
|
||||
* @buf_mutex: protects @peb_buf
|
||||
* @ckvol_mutex: serializes static volume checking when opening
|
||||
* @wl_reboot_notifier: close all wear-leveling work before reboot
|
||||
*
|
||||
* @dbg: debugging information for this UBI device
|
||||
*/
|
||||
@ -651,6 +652,7 @@ struct ubi_device {
|
||||
void *peb_buf;
|
||||
struct mutex buf_mutex;
|
||||
struct mutex ckvol_mutex;
|
||||
struct notifier_block wl_reboot_notifier;
|
||||
|
||||
struct ubi_debug_info dbg;
|
||||
};
|
||||
@ -831,7 +833,6 @@ void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av);
|
||||
struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
|
||||
struct ubi_attach_info *ai);
|
||||
int ubi_attach(struct ubi_device *ubi, int force_scan);
|
||||
void ubi_destroy_ai(struct ubi_attach_info *ai);
|
||||
|
||||
/* vtbl.c */
|
||||
int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
|
||||
|
@ -143,8 +143,10 @@ static struct fwnode_handle *find_volume_fwnode(struct ubi_volume *vol)
|
||||
vol->vol_id != volid)
|
||||
continue;
|
||||
|
||||
fwnode_handle_put(fw_vols);
|
||||
return fw_vol;
|
||||
}
|
||||
fwnode_handle_put(fw_vols);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -89,6 +89,7 @@
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/reboot.h>
|
||||
#include "ubi.h"
|
||||
#include "wl.h"
|
||||
|
||||
@ -127,6 +128,8 @@ static int self_check_in_wl_tree(const struct ubi_device *ubi,
|
||||
struct ubi_wl_entry *e, struct rb_root *root);
|
||||
static int self_check_in_pq(const struct ubi_device *ubi,
|
||||
struct ubi_wl_entry *e);
|
||||
static int ubi_wl_reboot_notifier(struct notifier_block *n,
|
||||
unsigned long state, void *cmd);
|
||||
|
||||
/**
|
||||
* wl_tree_add - add a wear-leveling entry to a WL RB-tree.
|
||||
@ -683,7 +686,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
|
||||
ubi_assert(!ubi->move_to_put);
|
||||
|
||||
#ifdef CONFIG_MTD_UBI_FASTMAP
|
||||
if (!next_peb_for_wl(ubi) ||
|
||||
if (!next_peb_for_wl(ubi, true) ||
|
||||
#else
|
||||
if (!ubi->free.rb_node ||
|
||||
#endif
|
||||
@ -846,7 +849,14 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
|
||||
goto out_not_moved;
|
||||
}
|
||||
if (err == MOVE_RETRY) {
|
||||
scrubbing = 1;
|
||||
/*
|
||||
* For source PEB:
|
||||
* 1. The scrubbing is set for scrub type PEB, it will
|
||||
* be put back into ubi->scrub list.
|
||||
* 2. Non-scrub type PEB will be put back into ubi->used
|
||||
* list.
|
||||
*/
|
||||
keep = 1;
|
||||
dst_leb_clean = 1;
|
||||
goto out_not_moved;
|
||||
}
|
||||
@ -1943,6 +1953,13 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
|
||||
if (!ubi->ro_mode && !ubi->fm_disabled)
|
||||
ubi_ensure_anchor_pebs(ubi);
|
||||
#endif
|
||||
|
||||
if (!ubi->wl_reboot_notifier.notifier_call) {
|
||||
ubi->wl_reboot_notifier.notifier_call = ubi_wl_reboot_notifier;
|
||||
ubi->wl_reboot_notifier.priority = 1; /* Higher than MTD */
|
||||
register_reboot_notifier(&ubi->wl_reboot_notifier);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
@ -1988,6 +2005,17 @@ void ubi_wl_close(struct ubi_device *ubi)
|
||||
kfree(ubi->lookuptbl);
|
||||
}
|
||||
|
||||
static int ubi_wl_reboot_notifier(struct notifier_block *n,
|
||||
unsigned long state, void *cmd)
|
||||
{
|
||||
struct ubi_device *ubi;
|
||||
|
||||
ubi = container_of(n, struct ubi_device, wl_reboot_notifier);
|
||||
ubi_wl_close(ubi);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
/**
|
||||
* self_check_ec - make sure that the erase counter of a PEB is correct.
|
||||
* @ubi: UBI device description object
|
||||
|
@ -5,7 +5,8 @@
|
||||
static void update_fastmap_work_fn(struct work_struct *wrk);
|
||||
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
|
||||
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
|
||||
static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi);
|
||||
static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi,
|
||||
bool need_fill);
|
||||
static bool need_wear_leveling(struct ubi_device *ubi);
|
||||
static void ubi_fastmap_close(struct ubi_device *ubi);
|
||||
static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count)
|
||||
|
@ -95,6 +95,9 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
|
||||
|
||||
positions[value]=outpos;
|
||||
if (repeat) {
|
||||
if ((outpos + repeat) >= destlen) {
|
||||
return 1;
|
||||
}
|
||||
if (backoffs + repeat >= outpos) {
|
||||
while(repeat) {
|
||||
cpage_out[outpos++] = cpage_out[backoffs++];
|
||||
|
@ -276,11 +276,6 @@ static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in,
|
||||
|
||||
end_rubin(&rs);
|
||||
|
||||
if (outpos > pos) {
|
||||
/* We failed */
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Tell the caller how much we managed to compress,
|
||||
* and how much space it took */
|
||||
|
||||
|
@ -338,10 +338,9 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
|
||||
} while(--retlen);
|
||||
mtd_unpoint(c->mtd, jeb->offset, c->sector_size);
|
||||
if (retlen) {
|
||||
pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08tx\n",
|
||||
*wordebuf,
|
||||
jeb->offset +
|
||||
c->sector_size-retlen * sizeof(*wordebuf));
|
||||
*bad_offset = jeb->offset + c->sector_size - retlen * sizeof(*wordebuf);
|
||||
pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08x\n",
|
||||
*wordebuf, *bad_offset);
|
||||
return -EIO;
|
||||
}
|
||||
return 0;
|
||||
|
@ -82,7 +82,7 @@ again:
|
||||
|
||||
nextlist = &c->erasable_list;
|
||||
} else if (!list_empty(&c->erasable_pending_wbuf_list)) {
|
||||
/* There are blocks are wating for the wbuf sync */
|
||||
/* There are blocks are waiting for the wbuf sync */
|
||||
jffs2_dbg(1, "Synching wbuf in order to reuse erasable_pending_wbuf_list blocks\n");
|
||||
spin_unlock(&c->erase_completion_lock);
|
||||
jffs2_flush_wbuf_pad(c);
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/string_choices.h>
|
||||
#include "nodelist.h"
|
||||
#include "debug.h"
|
||||
|
||||
@ -317,9 +318,9 @@ static int jffs2_find_nextblock(struct jffs2_sb_info *c)
|
||||
And there's no space left. At all. */
|
||||
pr_crit("Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
|
||||
c->nr_erasing_blocks, c->nr_free_blocks,
|
||||
list_empty(&c->erasable_list) ? "yes" : "no",
|
||||
list_empty(&c->erasing_list) ? "yes" : "no",
|
||||
list_empty(&c->erase_pending_list) ? "yes" : "no");
|
||||
str_yes_no(list_empty(&c->erasable_list)),
|
||||
str_yes_no(list_empty(&c->erasing_list)),
|
||||
str_yes_no(list_empty(&c->erase_pending_list)));
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
@ -630,8 +631,8 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
|
||||
ref->flash_offset, jeb->used_size);
|
||||
BUG();
|
||||
})
|
||||
jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n",
|
||||
ref_offset(ref), freed_len);
|
||||
jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n",
|
||||
ref_offset(ref), freed_len);
|
||||
jeb->unchecked_size -= freed_len;
|
||||
c->unchecked_size -= freed_len;
|
||||
} else {
|
||||
@ -641,8 +642,8 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
|
||||
ref->flash_offset, jeb->used_size);
|
||||
BUG();
|
||||
})
|
||||
jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ",
|
||||
ref_offset(ref), freed_len);
|
||||
jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ",
|
||||
ref_offset(ref), freed_len);
|
||||
jeb->used_size -= freed_len;
|
||||
c->used_size -= freed_len;
|
||||
}
|
||||
@ -883,7 +884,7 @@ int jffs2_thread_should_wake(struct jffs2_sb_info *c)
|
||||
|
||||
jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
|
||||
__func__, c->nr_free_blocks, c->nr_erasing_blocks,
|
||||
c->dirty_size, nr_very_dirty, ret ? "yes" : "no");
|
||||
c->dirty_size, nr_very_dirty, str_yes_no(ret));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
|
||||
if (err != -EOPNOTSUPP)
|
||||
JFFS2_WARNING("MTD point failed: error code %d.\n", err);
|
||||
} else
|
||||
pointed = 1; /* succefully pointed to device */
|
||||
pointed = 1; /* successfully pointed to device */
|
||||
#endif
|
||||
|
||||
if (!pointed) {
|
||||
|
@ -213,12 +213,6 @@ long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
long ubifs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
switch (cmd) {
|
||||
case FS_IOC32_GETFLAGS:
|
||||
cmd = FS_IOC_GETFLAGS;
|
||||
break;
|
||||
case FS_IOC32_SETFLAGS:
|
||||
cmd = FS_IOC_SETFLAGS;
|
||||
break;
|
||||
case FS_IOC_SET_ENCRYPTION_POLICY:
|
||||
case FS_IOC_GET_ENCRYPTION_POLICY:
|
||||
case FS_IOC_GET_ENCRYPTION_POLICY_EX:
|
||||
|
@ -981,6 +981,13 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
|
||||
|
||||
dbg_jnl("ino %lu, nlink %u", inode->i_ino, inode->i_nlink);
|
||||
|
||||
if (kill_xattrs && ui->xattr_cnt > ubifs_xattr_max_cnt(c)) {
|
||||
ubifs_err(c, "Cannot delete inode, it has too much xattrs!");
|
||||
err = -EPERM;
|
||||
ubifs_ro_mode(c, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the inode is being deleted, do not write the attached data. No
|
||||
* need to synchronize the write-buffer either.
|
||||
@ -1012,12 +1019,6 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
|
||||
struct inode *xino;
|
||||
struct ubifs_dent_node *xent, *pxent = NULL;
|
||||
|
||||
if (ui->xattr_cnt > ubifs_xattr_max_cnt(c)) {
|
||||
err = -EPERM;
|
||||
ubifs_err(c, "Cannot delete inode, it has too much xattrs!");
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
lowest_xent_key(c, &key, inode->i_ino);
|
||||
while (1) {
|
||||
xent = ubifs_tnc_next_ent(c, &key, &nm);
|
||||
|
@ -577,7 +577,7 @@ static struct ubifs_pnode *next_pnode_to_dirty(struct ubifs_info *c,
|
||||
/* Go right */
|
||||
nnode = ubifs_get_nnode(c, nnode, iip);
|
||||
if (IS_ERR(nnode))
|
||||
return (void *)nnode;
|
||||
return ERR_CAST(nnode);
|
||||
|
||||
/* Go down to level 1 */
|
||||
while (nnode->level > 1) {
|
||||
@ -594,7 +594,7 @@ static struct ubifs_pnode *next_pnode_to_dirty(struct ubifs_info *c,
|
||||
}
|
||||
nnode = ubifs_get_nnode(c, nnode, iip);
|
||||
if (IS_ERR(nnode))
|
||||
return (void *)nnode;
|
||||
return ERR_CAST(nnode);
|
||||
}
|
||||
|
||||
for (iip = 0; iip < UBIFS_LPT_FANOUT; iip++)
|
||||
|
@ -76,7 +76,7 @@ int ubifs_add_orphan(struct ubifs_info *c, ino_t inum)
|
||||
else if (inum > o->inum)
|
||||
p = &(*p)->rb_right;
|
||||
else {
|
||||
ubifs_err(c, "orphaned twice");
|
||||
ubifs_err(c, "ino %lu orphaned twice", (unsigned long)inum);
|
||||
spin_unlock(&c->orphan_lock);
|
||||
kfree(orphan);
|
||||
return -EINVAL;
|
||||
|
@ -773,10 +773,10 @@ static void init_constants_master(struct ubifs_info *c)
|
||||
* necessary to report something for the 'statfs()' call.
|
||||
*
|
||||
* Subtract the LEB reserved for GC, the LEB which is reserved for
|
||||
* deletions, minimum LEBs for the index, and assume only one journal
|
||||
* head is available.
|
||||
* deletions, minimum LEBs for the index, the LEBs which are reserved
|
||||
* for each journal head.
|
||||
*/
|
||||
tmp64 = c->main_lebs - 1 - 1 - MIN_INDEX_LEBS - c->jhead_cnt + 1;
|
||||
tmp64 = c->main_lebs - 1 - 1 - MIN_INDEX_LEBS - c->jhead_cnt;
|
||||
tmp64 *= (long long)c->leb_size - c->leb_overhead;
|
||||
tmp64 = ubifs_reported_space(c, tmp64);
|
||||
c->block_cnt = tmp64 >> UBIFS_BLOCK_SHIFT;
|
||||
@ -2206,6 +2206,8 @@ static int ubifs_fill_super(struct super_block *sb, struct fs_context *fc)
|
||||
}
|
||||
|
||||
super_set_uuid(sb, c->uuid, sizeof(c->uuid));
|
||||
super_set_sysfs_name_generic(sb, UBIFS_DFS_DIR_NAME,
|
||||
c->vi.ubi_num, c->vi.vol_id);
|
||||
|
||||
mutex_unlock(&c->umount_mutex);
|
||||
return 0;
|
||||
|
@ -2930,8 +2930,6 @@ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum)
|
||||
dbg_tnc("xent '%s', ino %lu", xent->name,
|
||||
(unsigned long)xattr_inum);
|
||||
|
||||
ubifs_evict_xattr_inode(c, xattr_inum);
|
||||
|
||||
fname_name(&nm) = xent->name;
|
||||
fname_len(&nm) = le16_to_cpu(xent->nlen);
|
||||
err = ubifs_tnc_remove_nm(c, &key1, &nm);
|
||||
|
@ -657,6 +657,8 @@ static int get_znodes_to_commit(struct ubifs_info *c)
|
||||
znode->alt = 0;
|
||||
cnext = find_next_dirty(znode);
|
||||
if (!cnext) {
|
||||
ubifs_assert(c, !znode->parent);
|
||||
znode->cparent = NULL;
|
||||
znode->cnext = c->cnext;
|
||||
break;
|
||||
}
|
||||
|
@ -2040,13 +2040,10 @@ ssize_t ubifs_xattr_get(struct inode *host, const char *name, void *buf,
|
||||
#ifdef CONFIG_UBIFS_FS_XATTR
|
||||
extern const struct xattr_handler * const ubifs_xattr_handlers[];
|
||||
ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size);
|
||||
void ubifs_evict_xattr_inode(struct ubifs_info *c, ino_t xattr_inum);
|
||||
int ubifs_purge_xattrs(struct inode *host);
|
||||
#else
|
||||
#define ubifs_listxattr NULL
|
||||
#define ubifs_xattr_handlers NULL
|
||||
static inline void ubifs_evict_xattr_inode(struct ubifs_info *c,
|
||||
ino_t xattr_inum) { }
|
||||
static inline int ubifs_purge_xattrs(struct inode *host)
|
||||
{
|
||||
return 0;
|
||||
|
@ -48,19 +48,6 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/xattr.h>
|
||||
|
||||
/*
|
||||
* Extended attribute type constants.
|
||||
*
|
||||
* USER_XATTR: user extended attribute ("user.*")
|
||||
* TRUSTED_XATTR: trusted extended attribute ("trusted.*)
|
||||
* SECURITY_XATTR: security extended attribute ("security.*")
|
||||
*/
|
||||
enum {
|
||||
USER_XATTR,
|
||||
TRUSTED_XATTR,
|
||||
SECURITY_XATTR,
|
||||
};
|
||||
|
||||
static const struct inode_operations empty_iops;
|
||||
static const struct file_operations empty_fops;
|
||||
|
||||
@ -532,8 +519,6 @@ int ubifs_purge_xattrs(struct inode *host)
|
||||
ubifs_err(c, "dead directory entry '%s', error %d",
|
||||
xent->name, err);
|
||||
ubifs_ro_mode(c, err);
|
||||
kfree(pxent);
|
||||
kfree(xent);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
@ -541,16 +526,12 @@ int ubifs_purge_xattrs(struct inode *host)
|
||||
|
||||
clear_nlink(xino);
|
||||
err = remove_xattr(c, host, xino, &nm);
|
||||
iput(xino);
|
||||
if (err) {
|
||||
kfree(pxent);
|
||||
kfree(xent);
|
||||
iput(xino);
|
||||
ubifs_err(c, "cannot remove xattr, error %d", err);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
iput(xino);
|
||||
|
||||
kfree(pxent);
|
||||
pxent = xent;
|
||||
key_read(c, &xent->key, &key);
|
||||
@ -566,32 +547,12 @@ int ubifs_purge_xattrs(struct inode *host)
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
kfree(pxent);
|
||||
kfree(xent);
|
||||
up_write(&ubifs_inode(host)->xattr_sem);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* ubifs_evict_xattr_inode - Evict an xattr inode.
|
||||
* @c: UBIFS file-system description object
|
||||
* @xattr_inum: xattr inode number
|
||||
*
|
||||
* When an inode that hosts xattrs is being removed we have to make sure
|
||||
* that cached inodes of the xattrs also get removed from the inode cache
|
||||
* otherwise we'd waste memory. This function looks up an inode from the
|
||||
* inode cache and clears the link counter such that iput() will evict
|
||||
* the inode.
|
||||
*/
|
||||
void ubifs_evict_xattr_inode(struct ubifs_info *c, ino_t xattr_inum)
|
||||
{
|
||||
struct inode *inode;
|
||||
|
||||
inode = ilookup(c->vfs_sb, xattr_inum);
|
||||
if (inode) {
|
||||
clear_nlink(inode);
|
||||
iput(inode);
|
||||
}
|
||||
}
|
||||
|
||||
static int ubifs_xattr_remove(struct inode *host, const char *name)
|
||||
{
|
||||
struct inode *inode;
|
||||
|
Loading…
Reference in New Issue
Block a user