mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-15 00:34:10 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-nmw
Pull GFS2 updates from Steven Whitehouse: "The major feature this time is the "rbm" conversion in the resource group code. The new struct gfs2_rbm specifies the location of an allocatable block in (resource group, bitmap, offset) form. There are a number of added helper functions, and later patches then rewrite some of the resource group code in terms of this new structure. Not only does this give us a nice code clean up, but it also removes some of the previous restrictions where extents could not cross bitmap boundaries, for example. In addition to that, there are a few bug fixes and clean ups, but the rbm work is by far the majority of this patch set in terms of number of changed lines." * git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-nmw: (27 commits) GFS2: Write out dirty inode metadata in delayed deletes GFS2: fix s_writers.counter imbalance in gfs2_ail_empty_gl GFS2: Fix infinite loop in rbm_find GFS2: Consolidate free block searching functions GFS2: Get rid of I_MUTEX_QUOTA usage GFS2: Stop block extents at the end of bitmaps GFS2: Fix unclaimed_blocks() wrapping bug and clean up GFS2: Improve block reservation tracing GFS2: Fall back to ignoring reservations, if there are no other blocks left GFS2: Fix ->show_options() for statfs slow GFS2: Use rbm for gfs2_setbit() GFS2: Use rbm for gfs2_testbit() GFS2: Eliminate unnecessary check for state > 3 in bitfit GFS2: Eliminate redundant calls to may_grant GFS2: Combine functions gfs2_glock_dq_wait and wait_on_demote GFS2: Combine functions gfs2_glock_wait and wait_on_holder GFS2: inline __gfs2_glock_schedule_for_reclaim GFS2: change function gfs2_direct_IO to use a normal gfs2_glock_dq GFS2: rbm code cleanup GFS2: Fix case where reservation finished at end of rgrp ...
This commit is contained in:
commit
e151960a23
@ -612,6 +612,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
|
||||
struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
|
||||
struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
|
||||
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
|
||||
unsigned requested = 0;
|
||||
int alloc_required;
|
||||
int error = 0;
|
||||
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
|
||||
@ -641,7 +642,8 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
|
||||
error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
|
||||
requested = data_blocks + ind_blocks;
|
||||
error = gfs2_inplace_reserve(ip, requested);
|
||||
if (error)
|
||||
goto out_qunlock;
|
||||
}
|
||||
@ -654,7 +656,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
|
||||
if (&ip->i_inode == sdp->sd_rindex)
|
||||
rblocks += 2 * RES_STATFS;
|
||||
if (alloc_required)
|
||||
rblocks += gfs2_rg_blocks(ip);
|
||||
rblocks += gfs2_rg_blocks(ip, requested);
|
||||
|
||||
error = gfs2_trans_begin(sdp, rblocks,
|
||||
PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
|
||||
@ -868,8 +870,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
|
||||
brelse(dibh);
|
||||
failed:
|
||||
gfs2_trans_end(sdp);
|
||||
if (gfs2_mb_reserved(ip))
|
||||
gfs2_inplace_release(ip);
|
||||
gfs2_inplace_release(ip);
|
||||
if (ip->i_res->rs_qa_qd_num)
|
||||
gfs2_quota_unlock(ip);
|
||||
if (inode == sdp->sd_rindex) {
|
||||
@ -1023,7 +1024,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
|
||||
offset, nr_segs, gfs2_get_block_direct,
|
||||
NULL, NULL, 0);
|
||||
out:
|
||||
gfs2_glock_dq_m(1, &gh);
|
||||
gfs2_glock_dq(&gh);
|
||||
gfs2_holder_uninit(&gh);
|
||||
return rv;
|
||||
}
|
||||
|
@ -786,7 +786,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
|
||||
goto out_rlist;
|
||||
|
||||
if (gfs2_rs_active(ip->i_res)) /* needs to be done with the rgrp glock held */
|
||||
gfs2_rs_deltree(ip->i_res);
|
||||
gfs2_rs_deltree(ip, ip->i_res);
|
||||
|
||||
error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE +
|
||||
RES_INDIRECT + RES_STATFS + RES_QUOTA,
|
||||
|
@ -441,7 +441,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
rblocks += data_blocks ? data_blocks : 1;
|
||||
if (ind_blocks || data_blocks) {
|
||||
rblocks += RES_STATFS + RES_QUOTA;
|
||||
rblocks += gfs2_rg_blocks(ip);
|
||||
rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
|
||||
}
|
||||
ret = gfs2_trans_begin(sdp, rblocks, 0);
|
||||
if (ret)
|
||||
@ -845,7 +845,7 @@ retry:
|
||||
&max_bytes, &data_blocks, &ind_blocks);
|
||||
|
||||
rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
|
||||
RES_RG_HDR + gfs2_rg_blocks(ip);
|
||||
RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
|
||||
if (gfs2_is_jdata(ip))
|
||||
rblocks += data_blocks ? data_blocks : 1;
|
||||
|
||||
|
@ -185,20 +185,6 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
|
||||
spin_unlock(&lru_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
|
||||
* @gl: the glock
|
||||
*
|
||||
* If the glock is demotable, then we add it (or move it) to the end
|
||||
* of the glock LRU list.
|
||||
*/
|
||||
|
||||
static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
|
||||
{
|
||||
if (demote_ok(gl))
|
||||
gfs2_glock_add_to_lru(gl);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_glock_put_nolock() - Decrement reference count on glock
|
||||
* @gl: The glock to put
|
||||
@ -883,7 +869,14 @@ static int gfs2_glock_demote_wait(void *word)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void wait_on_holder(struct gfs2_holder *gh)
|
||||
/**
|
||||
* gfs2_glock_wait - wait on a glock acquisition
|
||||
* @gh: the glock holder
|
||||
*
|
||||
* Returns: 0 on success
|
||||
*/
|
||||
|
||||
int gfs2_glock_wait(struct gfs2_holder *gh)
|
||||
{
|
||||
unsigned long time1 = jiffies;
|
||||
|
||||
@ -894,12 +887,7 @@ static void wait_on_holder(struct gfs2_holder *gh)
|
||||
gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
|
||||
GL_GLOCK_HOLD_INCR,
|
||||
GL_GLOCK_MAX_HOLD);
|
||||
}
|
||||
|
||||
static void wait_on_demote(struct gfs2_glock *gl)
|
||||
{
|
||||
might_sleep();
|
||||
wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
|
||||
return gh->gh_error;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -929,19 +917,6 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state,
|
||||
trace_gfs2_demote_rq(gl);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_glock_wait - wait on a glock acquisition
|
||||
* @gh: the glock holder
|
||||
*
|
||||
* Returns: 0 on success
|
||||
*/
|
||||
|
||||
int gfs2_glock_wait(struct gfs2_holder *gh)
|
||||
{
|
||||
wait_on_holder(gh);
|
||||
return gh->gh_error;
|
||||
}
|
||||
|
||||
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
|
||||
{
|
||||
struct va_format vaf;
|
||||
@ -979,7 +954,7 @@ __acquires(&gl->gl_spin)
|
||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||
struct list_head *insert_pt = NULL;
|
||||
struct gfs2_holder *gh2;
|
||||
int try_lock = 0;
|
||||
int try_futile = 0;
|
||||
|
||||
BUG_ON(gh->gh_owner_pid == NULL);
|
||||
if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
|
||||
@ -987,7 +962,7 @@ __acquires(&gl->gl_spin)
|
||||
|
||||
if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
|
||||
if (test_bit(GLF_LOCK, &gl->gl_flags))
|
||||
try_lock = 1;
|
||||
try_futile = !may_grant(gl, gh);
|
||||
if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
|
||||
goto fail;
|
||||
}
|
||||
@ -996,9 +971,8 @@ __acquires(&gl->gl_spin)
|
||||
if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
|
||||
(gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
|
||||
goto trap_recursive;
|
||||
if (try_lock &&
|
||||
!(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
|
||||
!may_grant(gl, gh)) {
|
||||
if (try_futile &&
|
||||
!(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
|
||||
fail:
|
||||
gh->gh_error = GLR_TRYFAILED;
|
||||
gfs2_holder_wake(gh);
|
||||
@ -1121,8 +1095,9 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
|
||||
!test_bit(GLF_DEMOTE, &gl->gl_flags))
|
||||
fast_path = 1;
|
||||
}
|
||||
if (!test_bit(GLF_LFLUSH, &gl->gl_flags))
|
||||
__gfs2_glock_schedule_for_reclaim(gl);
|
||||
if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
|
||||
gfs2_glock_add_to_lru(gl);
|
||||
|
||||
trace_gfs2_glock_queue(gh, 0);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
if (likely(fast_path))
|
||||
@ -1141,7 +1116,8 @@ void gfs2_glock_dq_wait(struct gfs2_holder *gh)
|
||||
{
|
||||
struct gfs2_glock *gl = gh->gh_gl;
|
||||
gfs2_glock_dq(gh);
|
||||
wait_on_demote(gl);
|
||||
might_sleep();
|
||||
wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -94,6 +94,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
|
||||
/* A shortened, inline version of gfs2_trans_begin() */
|
||||
tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
|
||||
tr.tr_ip = (unsigned long)__builtin_return_address(0);
|
||||
sb_start_intwrite(sdp->sd_vfs);
|
||||
gfs2_log_reserve(sdp, tr.tr_reserved);
|
||||
BUG_ON(current->journal_info);
|
||||
current->journal_info = &tr;
|
||||
|
@ -99,9 +99,26 @@ struct gfs2_rgrpd {
|
||||
#define GFS2_RDF_MASK 0xf0000000 /* mask for internal flags */
|
||||
spinlock_t rd_rsspin; /* protects reservation related vars */
|
||||
struct rb_root rd_rstree; /* multi-block reservation tree */
|
||||
u32 rd_rs_cnt; /* count of current reservations */
|
||||
};
|
||||
|
||||
struct gfs2_rbm {
|
||||
struct gfs2_rgrpd *rgd;
|
||||
struct gfs2_bitmap *bi; /* Bitmap must belong to the rgd */
|
||||
u32 offset; /* The offset is bitmap relative */
|
||||
};
|
||||
|
||||
static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm)
|
||||
{
|
||||
return rbm->rgd->rd_data0 + (rbm->bi->bi_start * GFS2_NBBY) + rbm->offset;
|
||||
}
|
||||
|
||||
static inline bool gfs2_rbm_eq(const struct gfs2_rbm *rbm1,
|
||||
const struct gfs2_rbm *rbm2)
|
||||
{
|
||||
return (rbm1->rgd == rbm2->rgd) && (rbm1->bi == rbm2->bi) &&
|
||||
(rbm1->offset == rbm2->offset);
|
||||
}
|
||||
|
||||
enum gfs2_state_bits {
|
||||
BH_Pinned = BH_PrivateStart,
|
||||
BH_Escaped = BH_PrivateStart + 1,
|
||||
@ -250,18 +267,11 @@ struct gfs2_blkreserv {
|
||||
/* components used during write (step 1): */
|
||||
atomic_t rs_sizehint; /* hint of the write size */
|
||||
|
||||
/* components used during inplace_reserve (step 2): */
|
||||
u32 rs_requested; /* Filled in by caller of gfs2_inplace_reserve() */
|
||||
|
||||
/* components used during get_local_rgrp (step 3): */
|
||||
struct gfs2_rgrpd *rs_rgd; /* pointer to the gfs2_rgrpd */
|
||||
struct gfs2_holder rs_rgd_gh; /* Filled in by get_local_rgrp */
|
||||
struct rb_node rs_node; /* link to other block reservations */
|
||||
|
||||
/* components used during block searches and assignments (step 4): */
|
||||
struct gfs2_bitmap *rs_bi; /* bitmap for the current allocation */
|
||||
u32 rs_biblk; /* start block relative to the bi */
|
||||
struct gfs2_rbm rs_rbm; /* Start of reservation */
|
||||
u32 rs_free; /* how many blocks are still free */
|
||||
u64 rs_inum; /* Inode number for reservation */
|
||||
|
||||
/* ancillary quota stuff */
|
||||
struct gfs2_quota_data *rs_qa_qd[2 * MAXQUOTAS];
|
||||
|
@ -712,14 +712,9 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
|
||||
if (error)
|
||||
goto fail_gunlock2;
|
||||
|
||||
/* The newly created inode needs a reservation so it can allocate
|
||||
xattrs. At the same time, we want new blocks allocated to the new
|
||||
dinode to be as contiguous as possible. Since we allocated the
|
||||
dinode block under the directory's reservation, we transfer
|
||||
ownership of that reservation to the new inode. The directory
|
||||
doesn't need a reservation unless it needs a new allocation. */
|
||||
ip->i_res = dip->i_res;
|
||||
dip->i_res = NULL;
|
||||
error = gfs2_rs_alloc(ip);
|
||||
if (error)
|
||||
goto fail_gunlock2;
|
||||
|
||||
error = gfs2_acl_create(dip, inode);
|
||||
if (error)
|
||||
@ -737,10 +732,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
|
||||
brelse(bh);
|
||||
|
||||
gfs2_trans_end(sdp);
|
||||
/* Check if we reserved space in the rgrp. Function link_dinode may
|
||||
not, depending on whether alloc is required. */
|
||||
if (gfs2_mb_reserved(dip))
|
||||
gfs2_inplace_release(dip);
|
||||
gfs2_inplace_release(dip);
|
||||
gfs2_quota_unlock(dip);
|
||||
mark_inode_dirty(inode);
|
||||
gfs2_glock_dq_uninit_m(2, ghs);
|
||||
@ -897,7 +889,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
|
||||
goto out_gunlock_q;
|
||||
|
||||
error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
|
||||
gfs2_rg_blocks(dip) +
|
||||
gfs2_rg_blocks(dip, sdp->sd_max_dirres) +
|
||||
2 * RES_DINODE + RES_STATFS +
|
||||
RES_QUOTA, 0);
|
||||
if (error)
|
||||
@ -1378,7 +1370,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
|
||||
goto out_gunlock_q;
|
||||
|
||||
error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
|
||||
gfs2_rg_blocks(ndip) +
|
||||
gfs2_rg_blocks(ndip, sdp->sd_max_dirres) +
|
||||
4 * RES_DINODE + 4 * RES_LEAF +
|
||||
RES_STATFS + RES_QUOTA + 4, 0);
|
||||
if (error)
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <linux/mount.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/quotaops.h>
|
||||
#include <linux/lockdep.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
@ -766,6 +767,7 @@ fail:
|
||||
return error;
|
||||
}
|
||||
|
||||
static struct lock_class_key gfs2_quota_imutex_key;
|
||||
|
||||
static int init_inodes(struct gfs2_sbd *sdp, int undo)
|
||||
{
|
||||
@ -803,6 +805,12 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo)
|
||||
fs_err(sdp, "can't get quota file inode: %d\n", error);
|
||||
goto fail_rindex;
|
||||
}
|
||||
/*
|
||||
* i_mutex on quota files is special. Since this inode is hidden system
|
||||
* file, we are safe to define locking ourselves.
|
||||
*/
|
||||
lockdep_set_class(&sdp->sd_quota_inode->i_mutex,
|
||||
&gfs2_quota_imutex_key);
|
||||
|
||||
error = gfs2_rindex_update(sdp);
|
||||
if (error)
|
||||
|
@ -765,6 +765,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
|
||||
struct gfs2_holder *ghs, i_gh;
|
||||
unsigned int qx, x;
|
||||
struct gfs2_quota_data *qd;
|
||||
unsigned reserved;
|
||||
loff_t offset;
|
||||
unsigned int nalloc = 0, blocks;
|
||||
int error;
|
||||
@ -781,7 +782,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
|
||||
return -ENOMEM;
|
||||
|
||||
sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
|
||||
mutex_lock_nested(&ip->i_inode.i_mutex, I_MUTEX_QUOTA);
|
||||
mutex_lock(&ip->i_inode.i_mutex);
|
||||
for (qx = 0; qx < num_qd; qx++) {
|
||||
error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
|
||||
GL_NOCACHE, &ghs[qx]);
|
||||
@ -811,13 +812,13 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
|
||||
* two blocks need to be updated instead of 1 */
|
||||
blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
|
||||
|
||||
error = gfs2_inplace_reserve(ip, 1 +
|
||||
(nalloc * (data_blocks + ind_blocks)));
|
||||
reserved = 1 + (nalloc * (data_blocks + ind_blocks));
|
||||
error = gfs2_inplace_reserve(ip, reserved);
|
||||
if (error)
|
||||
goto out_alloc;
|
||||
|
||||
if (nalloc)
|
||||
blocks += gfs2_rg_blocks(ip) + nalloc * ind_blocks + RES_STATFS;
|
||||
blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
|
||||
|
||||
error = gfs2_trans_begin(sdp, blocks, 0);
|
||||
if (error)
|
||||
@ -1598,7 +1599,7 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
|
||||
error = gfs2_inplace_reserve(ip, blocks);
|
||||
if (error)
|
||||
goto out_i;
|
||||
blocks += gfs2_rg_blocks(ip);
|
||||
blocks += gfs2_rg_blocks(ip, blocks);
|
||||
}
|
||||
|
||||
/* Some quotas span block boundaries and can update two blocks,
|
||||
|
1215
fs/gfs2/rgrp.c
1215
fs/gfs2/rgrp.c
File diff suppressed because it is too large
Load Diff
@ -46,7 +46,7 @@ extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
|
||||
bool dinode, u64 *generation);
|
||||
|
||||
extern int gfs2_rs_alloc(struct gfs2_inode *ip);
|
||||
extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
|
||||
extern void gfs2_rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs);
|
||||
extern void gfs2_rs_delete(struct gfs2_inode *ip);
|
||||
extern void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta);
|
||||
extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);
|
||||
@ -73,30 +73,10 @@ extern int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
|
||||
const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed);
|
||||
extern int gfs2_fitrim(struct file *filp, void __user *argp);
|
||||
|
||||
/* This is how to tell if a multi-block reservation is "inplace" reserved: */
|
||||
static inline int gfs2_mb_reserved(struct gfs2_inode *ip)
|
||||
/* This is how to tell if a reservation is in the rgrp tree: */
|
||||
static inline bool gfs2_rs_active(struct gfs2_blkreserv *rs)
|
||||
{
|
||||
if (ip->i_res && ip->i_res->rs_requested)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This is how to tell if a multi-block reservation is in the rgrp tree: */
|
||||
static inline int gfs2_rs_active(struct gfs2_blkreserv *rs)
|
||||
{
|
||||
if (rs && rs->rs_bi)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u32 gfs2_bi2rgd_blk(const struct gfs2_bitmap *bi, u32 blk)
|
||||
{
|
||||
return (bi->bi_start * GFS2_NBBY) + blk;
|
||||
}
|
||||
|
||||
static inline u64 gfs2_rs_startblk(const struct gfs2_blkreserv *rs)
|
||||
{
|
||||
return gfs2_bi2rgd_blk(rs->rs_bi, rs->rs_biblk) + rs->rs_rgd->rd_data0;
|
||||
return rs && !RB_EMPTY_NODE(&rs->rs_node);
|
||||
}
|
||||
|
||||
#endif /* __RGRP_DOT_H__ */
|
||||
|
@ -1366,6 +1366,8 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
|
||||
val = sdp->sd_tune.gt_statfs_quantum;
|
||||
if (val != 30)
|
||||
seq_printf(s, ",statfs_quantum=%d", val);
|
||||
else if (sdp->sd_tune.gt_statfs_slow)
|
||||
seq_puts(s, ",statfs_quantum=0");
|
||||
val = sdp->sd_tune.gt_quota_quantum;
|
||||
if (val != 60)
|
||||
seq_printf(s, ",quota_quantum=%d", val);
|
||||
@ -1543,6 +1545,11 @@ static void gfs2_evict_inode(struct inode *inode)
|
||||
|
||||
out_truncate:
|
||||
gfs2_log_flush(sdp, ip->i_gl);
|
||||
if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
|
||||
struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
|
||||
filemap_fdatawrite(metamapping);
|
||||
filemap_fdatawait(metamapping);
|
||||
}
|
||||
write_inode_now(inode, 1);
|
||||
gfs2_ail_flush(ip->i_gl, 0);
|
||||
|
||||
@ -1557,7 +1564,7 @@ out_truncate:
|
||||
out_unlock:
|
||||
/* Error path for case 1 */
|
||||
if (gfs2_rs_active(ip->i_res))
|
||||
gfs2_rs_deltree(ip->i_res);
|
||||
gfs2_rs_deltree(ip, ip->i_res);
|
||||
|
||||
if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
|
||||
gfs2_glock_dq(&ip->i_iopen_gh);
|
||||
|
@ -509,10 +509,9 @@ TRACE_EVENT(gfs2_block_alloc,
|
||||
/* Keep track of multi-block reservations as they are allocated/freed */
|
||||
TRACE_EVENT(gfs2_rs,
|
||||
|
||||
TP_PROTO(const struct gfs2_inode *ip, const struct gfs2_blkreserv *rs,
|
||||
u8 func),
|
||||
TP_PROTO(const struct gfs2_blkreserv *rs, u8 func),
|
||||
|
||||
TP_ARGS(ip, rs, func),
|
||||
TP_ARGS(rs, func),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( dev_t, dev )
|
||||
@ -526,18 +525,17 @@ TRACE_EVENT(gfs2_rs,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = rs->rs_rgd ? rs->rs_rgd->rd_sbd->sd_vfs->s_dev : 0;
|
||||
__entry->rd_addr = rs->rs_rgd ? rs->rs_rgd->rd_addr : 0;
|
||||
__entry->rd_free_clone = rs->rs_rgd ? rs->rs_rgd->rd_free_clone : 0;
|
||||
__entry->rd_reserved = rs->rs_rgd ? rs->rs_rgd->rd_reserved : 0;
|
||||
__entry->inum = ip ? ip->i_no_addr : 0;
|
||||
__entry->start = gfs2_rs_startblk(rs);
|
||||
__entry->dev = rs->rs_rbm.rgd->rd_sbd->sd_vfs->s_dev;
|
||||
__entry->rd_addr = rs->rs_rbm.rgd->rd_addr;
|
||||
__entry->rd_free_clone = rs->rs_rbm.rgd->rd_free_clone;
|
||||
__entry->rd_reserved = rs->rs_rbm.rgd->rd_reserved;
|
||||
__entry->inum = rs->rs_inum;
|
||||
__entry->start = gfs2_rbm_to_block(&rs->rs_rbm);
|
||||
__entry->free = rs->rs_free;
|
||||
__entry->func = func;
|
||||
),
|
||||
|
||||
TP_printk("%u,%u bmap %llu resrv %llu rg:%llu rf:%lu rr:%lu %s "
|
||||
"f:%lu",
|
||||
TP_printk("%u,%u bmap %llu resrv %llu rg:%llu rf:%lu rr:%lu %s f:%lu",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
(unsigned long long)__entry->inum,
|
||||
(unsigned long long)__entry->start,
|
||||
|
@ -28,11 +28,10 @@ struct gfs2_glock;
|
||||
|
||||
/* reserve either the number of blocks to be allocated plus the rg header
|
||||
* block, or all of the blocks in the rg, whichever is smaller */
|
||||
static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip)
|
||||
static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip, unsigned requested)
|
||||
{
|
||||
const struct gfs2_blkreserv *rs = ip->i_res;
|
||||
if (rs && rs->rs_requested < ip->i_rgd->rd_length)
|
||||
return rs->rs_requested + 1;
|
||||
if (requested < ip->i_rgd->rd_length)
|
||||
return requested + 1;
|
||||
return ip->i_rgd->rd_length;
|
||||
}
|
||||
|
||||
|
@ -448,17 +448,18 @@ ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
|
||||
}
|
||||
|
||||
/**
|
||||
* ea_get_unstuffed - actually copies the unstuffed data into the
|
||||
* request buffer
|
||||
* ea_iter_unstuffed - copies the unstuffed xattr data to/from the
|
||||
* request buffer
|
||||
* @ip: The GFS2 inode
|
||||
* @ea: The extended attribute header structure
|
||||
* @data: The data to be copied
|
||||
* @din: The data to be copied in
|
||||
* @dout: The data to be copied out (one of din,dout will be NULL)
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
|
||||
char *data)
|
||||
static int gfs2_iter_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
|
||||
const char *din, char *dout)
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
||||
struct buffer_head **bh;
|
||||
@ -467,6 +468,8 @@ static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
|
||||
__be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
|
||||
unsigned int x;
|
||||
int error = 0;
|
||||
unsigned char *pos;
|
||||
unsigned cp_size;
|
||||
|
||||
bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
|
||||
if (!bh)
|
||||
@ -497,12 +500,21 @@ static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
|
||||
goto out;
|
||||
}
|
||||
|
||||
memcpy(data, bh[x]->b_data + sizeof(struct gfs2_meta_header),
|
||||
(sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
|
||||
pos = bh[x]->b_data + sizeof(struct gfs2_meta_header);
|
||||
cp_size = (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize;
|
||||
|
||||
if (dout) {
|
||||
memcpy(dout, pos, cp_size);
|
||||
dout += sdp->sd_jbsize;
|
||||
}
|
||||
|
||||
if (din) {
|
||||
gfs2_trans_add_bh(ip->i_gl, bh[x], 1);
|
||||
memcpy(pos, din, cp_size);
|
||||
din += sdp->sd_jbsize;
|
||||
}
|
||||
|
||||
amount -= sdp->sd_jbsize;
|
||||
data += sdp->sd_jbsize;
|
||||
|
||||
brelse(bh[x]);
|
||||
}
|
||||
|
||||
@ -523,7 +535,7 @@ static int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
|
||||
memcpy(data, GFS2_EA2DATA(el->el_ea), len);
|
||||
return len;
|
||||
}
|
||||
ret = ea_get_unstuffed(ip, el->el_ea, data);
|
||||
ret = gfs2_iter_unstuffed(ip, el->el_ea, NULL, data);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return len;
|
||||
@ -727,7 +739,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
|
||||
goto out_gunlock_q;
|
||||
|
||||
error = gfs2_trans_begin(GFS2_SB(&ip->i_inode),
|
||||
blks + gfs2_rg_blocks(ip) +
|
||||
blks + gfs2_rg_blocks(ip, blks) +
|
||||
RES_DINODE + RES_STATFS + RES_QUOTA, 0);
|
||||
if (error)
|
||||
goto out_ipres;
|
||||
@ -1220,69 +1232,23 @@ static int gfs2_xattr_set(struct dentry *dentry, const char *name,
|
||||
size, flags, type);
|
||||
}
|
||||
|
||||
|
||||
static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
|
||||
struct gfs2_ea_header *ea, char *data)
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
||||
struct buffer_head **bh;
|
||||
unsigned int amount = GFS2_EA_DATA_LEN(ea);
|
||||
unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
|
||||
__be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
|
||||
unsigned int x;
|
||||
int error;
|
||||
int ret;
|
||||
|
||||
bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
|
||||
if (!bh)
|
||||
return -ENOMEM;
|
||||
ret = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
error = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
for (x = 0; x < nptrs; x++) {
|
||||
error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
|
||||
bh + x);
|
||||
if (error) {
|
||||
while (x--)
|
||||
brelse(bh[x]);
|
||||
goto fail;
|
||||
}
|
||||
dataptrs++;
|
||||
}
|
||||
|
||||
for (x = 0; x < nptrs; x++) {
|
||||
error = gfs2_meta_wait(sdp, bh[x]);
|
||||
if (error) {
|
||||
for (; x < nptrs; x++)
|
||||
brelse(bh[x]);
|
||||
goto fail;
|
||||
}
|
||||
if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
|
||||
for (; x < nptrs; x++)
|
||||
brelse(bh[x]);
|
||||
error = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
gfs2_trans_add_bh(ip->i_gl, bh[x], 1);
|
||||
|
||||
memcpy(bh[x]->b_data + sizeof(struct gfs2_meta_header), data,
|
||||
(sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
|
||||
|
||||
amount -= sdp->sd_jbsize;
|
||||
data += sdp->sd_jbsize;
|
||||
|
||||
brelse(bh[x]);
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(bh);
|
||||
return error;
|
||||
|
||||
fail:
|
||||
ret = gfs2_iter_unstuffed(ip, ea, data, NULL);
|
||||
gfs2_trans_end(sdp);
|
||||
kfree(bh);
|
||||
return error;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data)
|
||||
|
Loading…
Reference in New Issue
Block a user