mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 21:38:32 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-fixes
Pull GFS2 fixes from Steven Whitehouse: "There are two patches which fix up a couple of minor issues in the DLM interface code, a missing error path in gfs2_rs_alloc(), one patch which fixes a problem during "withdraw" and a fix for discards/FITRIM when using 4k sector sized devices." * git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-fixes: GFS2: Issue discards in 512b sectors GFS2: Fix unlock of fcntl locks during withdrawn state GFS2: return error if malloc failed in gfs2_rs_alloc() GFS2: use memchr_inv GFS2: use kmalloc for lvb bitmap
This commit is contained in:
commit
00fa6fe963
@ -923,8 +923,11 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
|
|||||||
cmd = F_SETLK;
|
cmd = F_SETLK;
|
||||||
fl->fl_type = F_UNLCK;
|
fl->fl_type = F_UNLCK;
|
||||||
}
|
}
|
||||||
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
|
||||||
|
if (fl->fl_type == F_UNLCK)
|
||||||
|
posix_lock_file_wait(file, fl);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
}
|
||||||
if (IS_GETLK(cmd))
|
if (IS_GETLK(cmd))
|
||||||
return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
|
return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
|
||||||
else if (fl->fl_type == F_UNLCK)
|
else if (fl->fl_type == F_UNLCK)
|
||||||
|
@ -588,6 +588,7 @@ struct lm_lockstruct {
|
|||||||
struct dlm_lksb ls_control_lksb; /* control_lock */
|
struct dlm_lksb ls_control_lksb; /* control_lock */
|
||||||
char ls_control_lvb[GDLM_LVB_SIZE]; /* control_lock lvb */
|
char ls_control_lvb[GDLM_LVB_SIZE]; /* control_lock lvb */
|
||||||
struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */
|
struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */
|
||||||
|
char *ls_lvb_bits;
|
||||||
|
|
||||||
spinlock_t ls_recover_spin; /* protects following fields */
|
spinlock_t ls_recover_spin; /* protects following fields */
|
||||||
unsigned long ls_recover_flags; /* DFL_ */
|
unsigned long ls_recover_flags; /* DFL_ */
|
||||||
|
@ -483,12 +483,8 @@ static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen,
|
|||||||
|
|
||||||
static int all_jid_bits_clear(char *lvb)
|
static int all_jid_bits_clear(char *lvb)
|
||||||
{
|
{
|
||||||
int i;
|
return !memchr_inv(lvb + JID_BITMAP_OFFSET, 0,
|
||||||
for (i = JID_BITMAP_OFFSET; i < GDLM_LVB_SIZE; i++) {
|
GDLM_LVB_SIZE - JID_BITMAP_OFFSET);
|
||||||
if (lvb[i])
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
return 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sync_wait_cb(void *arg)
|
static void sync_wait_cb(void *arg)
|
||||||
@ -580,7 +576,6 @@ static void gfs2_control_func(struct work_struct *work)
|
|||||||
{
|
{
|
||||||
struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work);
|
struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work);
|
||||||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||||
char lvb_bits[GDLM_LVB_SIZE];
|
|
||||||
uint32_t block_gen, start_gen, lvb_gen, flags;
|
uint32_t block_gen, start_gen, lvb_gen, flags;
|
||||||
int recover_set = 0;
|
int recover_set = 0;
|
||||||
int write_lvb = 0;
|
int write_lvb = 0;
|
||||||
@ -634,7 +629,7 @@ static void gfs2_control_func(struct work_struct *work)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
control_lvb_read(ls, &lvb_gen, lvb_bits);
|
control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits);
|
||||||
|
|
||||||
spin_lock(&ls->ls_recover_spin);
|
spin_lock(&ls->ls_recover_spin);
|
||||||
if (block_gen != ls->ls_recover_block ||
|
if (block_gen != ls->ls_recover_block ||
|
||||||
@ -664,10 +659,10 @@ static void gfs2_control_func(struct work_struct *work)
|
|||||||
|
|
||||||
ls->ls_recover_result[i] = 0;
|
ls->ls_recover_result[i] = 0;
|
||||||
|
|
||||||
if (!test_bit_le(i, lvb_bits + JID_BITMAP_OFFSET))
|
if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
__clear_bit_le(i, lvb_bits + JID_BITMAP_OFFSET);
|
__clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET);
|
||||||
write_lvb = 1;
|
write_lvb = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -691,7 +686,7 @@ static void gfs2_control_func(struct work_struct *work)
|
|||||||
continue;
|
continue;
|
||||||
if (ls->ls_recover_submit[i] < start_gen) {
|
if (ls->ls_recover_submit[i] < start_gen) {
|
||||||
ls->ls_recover_submit[i] = 0;
|
ls->ls_recover_submit[i] = 0;
|
||||||
__set_bit_le(i, lvb_bits + JID_BITMAP_OFFSET);
|
__set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* even if there are no bits to set, we need to write the
|
/* even if there are no bits to set, we need to write the
|
||||||
@ -705,7 +700,7 @@ static void gfs2_control_func(struct work_struct *work)
|
|||||||
spin_unlock(&ls->ls_recover_spin);
|
spin_unlock(&ls->ls_recover_spin);
|
||||||
|
|
||||||
if (write_lvb) {
|
if (write_lvb) {
|
||||||
control_lvb_write(ls, start_gen, lvb_bits);
|
control_lvb_write(ls, start_gen, ls->ls_lvb_bits);
|
||||||
flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK;
|
flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK;
|
||||||
} else {
|
} else {
|
||||||
flags = DLM_LKF_CONVERT;
|
flags = DLM_LKF_CONVERT;
|
||||||
@ -725,7 +720,7 @@ static void gfs2_control_func(struct work_struct *work)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
for (i = 0; i < recover_size; i++) {
|
for (i = 0; i < recover_size; i++) {
|
||||||
if (test_bit_le(i, lvb_bits + JID_BITMAP_OFFSET)) {
|
if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) {
|
||||||
fs_info(sdp, "recover generation %u jid %d\n",
|
fs_info(sdp, "recover generation %u jid %d\n",
|
||||||
start_gen, i);
|
start_gen, i);
|
||||||
gfs2_recover_set(sdp, i);
|
gfs2_recover_set(sdp, i);
|
||||||
@ -758,7 +753,6 @@ static void gfs2_control_func(struct work_struct *work)
|
|||||||
static int control_mount(struct gfs2_sbd *sdp)
|
static int control_mount(struct gfs2_sbd *sdp)
|
||||||
{
|
{
|
||||||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||||
char lvb_bits[GDLM_LVB_SIZE];
|
|
||||||
uint32_t start_gen, block_gen, mount_gen, lvb_gen;
|
uint32_t start_gen, block_gen, mount_gen, lvb_gen;
|
||||||
int mounted_mode;
|
int mounted_mode;
|
||||||
int retries = 0;
|
int retries = 0;
|
||||||
@ -857,7 +851,7 @@ locks_done:
|
|||||||
* lvb_gen will be non-zero.
|
* lvb_gen will be non-zero.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
control_lvb_read(ls, &lvb_gen, lvb_bits);
|
control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits);
|
||||||
|
|
||||||
if (lvb_gen == 0xFFFFFFFF) {
|
if (lvb_gen == 0xFFFFFFFF) {
|
||||||
/* special value to force mount attempts to fail */
|
/* special value to force mount attempts to fail */
|
||||||
@ -887,7 +881,7 @@ locks_done:
|
|||||||
* and all lvb bits to be clear (no pending journal recoveries.)
|
* and all lvb bits to be clear (no pending journal recoveries.)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (!all_jid_bits_clear(lvb_bits)) {
|
if (!all_jid_bits_clear(ls->ls_lvb_bits)) {
|
||||||
/* journals need recovery, wait until all are clear */
|
/* journals need recovery, wait until all are clear */
|
||||||
fs_info(sdp, "control_mount wait for journal recovery\n");
|
fs_info(sdp, "control_mount wait for journal recovery\n");
|
||||||
goto restart;
|
goto restart;
|
||||||
@ -949,7 +943,6 @@ static int dlm_recovery_wait(void *word)
|
|||||||
static int control_first_done(struct gfs2_sbd *sdp)
|
static int control_first_done(struct gfs2_sbd *sdp)
|
||||||
{
|
{
|
||||||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||||
char lvb_bits[GDLM_LVB_SIZE];
|
|
||||||
uint32_t start_gen, block_gen;
|
uint32_t start_gen, block_gen;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
@ -991,8 +984,8 @@ restart:
|
|||||||
memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t));
|
memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t));
|
||||||
spin_unlock(&ls->ls_recover_spin);
|
spin_unlock(&ls->ls_recover_spin);
|
||||||
|
|
||||||
memset(lvb_bits, 0, sizeof(lvb_bits));
|
memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE);
|
||||||
control_lvb_write(ls, start_gen, lvb_bits);
|
control_lvb_write(ls, start_gen, ls->ls_lvb_bits);
|
||||||
|
|
||||||
error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT);
|
error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT);
|
||||||
if (error)
|
if (error)
|
||||||
@ -1022,6 +1015,12 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
|
|||||||
uint32_t old_size, new_size;
|
uint32_t old_size, new_size;
|
||||||
int i, max_jid;
|
int i, max_jid;
|
||||||
|
|
||||||
|
if (!ls->ls_lvb_bits) {
|
||||||
|
ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
|
||||||
|
if (!ls->ls_lvb_bits)
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
max_jid = 0;
|
max_jid = 0;
|
||||||
for (i = 0; i < num_slots; i++) {
|
for (i = 0; i < num_slots; i++) {
|
||||||
if (max_jid < slots[i].slot - 1)
|
if (max_jid < slots[i].slot - 1)
|
||||||
@ -1057,6 +1056,7 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
|
|||||||
|
|
||||||
static void free_recover_size(struct lm_lockstruct *ls)
|
static void free_recover_size(struct lm_lockstruct *ls)
|
||||||
{
|
{
|
||||||
|
kfree(ls->ls_lvb_bits);
|
||||||
kfree(ls->ls_recover_submit);
|
kfree(ls->ls_recover_submit);
|
||||||
kfree(ls->ls_recover_result);
|
kfree(ls->ls_recover_result);
|
||||||
ls->ls_recover_submit = NULL;
|
ls->ls_recover_submit = NULL;
|
||||||
@ -1205,6 +1205,7 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)
|
|||||||
ls->ls_recover_size = 0;
|
ls->ls_recover_size = 0;
|
||||||
ls->ls_recover_submit = NULL;
|
ls->ls_recover_submit = NULL;
|
||||||
ls->ls_recover_result = NULL;
|
ls->ls_recover_result = NULL;
|
||||||
|
ls->ls_lvb_bits = NULL;
|
||||||
|
|
||||||
error = set_recover_size(sdp, NULL, 0);
|
error = set_recover_size(sdp, NULL, 0);
|
||||||
if (error)
|
if (error)
|
||||||
|
@ -576,7 +576,7 @@ int gfs2_rs_alloc(struct gfs2_inode *ip)
|
|||||||
RB_CLEAR_NODE(&ip->i_res->rs_node);
|
RB_CLEAR_NODE(&ip->i_res->rs_node);
|
||||||
out:
|
out:
|
||||||
up_write(&ip->i_rw_mutex);
|
up_write(&ip->i_rw_mutex);
|
||||||
return 0;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
|
static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
|
||||||
@ -1181,12 +1181,9 @@ int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
|
|||||||
const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
|
const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
|
||||||
{
|
{
|
||||||
struct super_block *sb = sdp->sd_vfs;
|
struct super_block *sb = sdp->sd_vfs;
|
||||||
struct block_device *bdev = sb->s_bdev;
|
|
||||||
const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize /
|
|
||||||
bdev_logical_block_size(sb->s_bdev);
|
|
||||||
u64 blk;
|
u64 blk;
|
||||||
sector_t start = 0;
|
sector_t start = 0;
|
||||||
sector_t nr_sects = 0;
|
sector_t nr_blks = 0;
|
||||||
int rv;
|
int rv;
|
||||||
unsigned int x;
|
unsigned int x;
|
||||||
u32 trimmed = 0;
|
u32 trimmed = 0;
|
||||||
@ -1206,35 +1203,34 @@ int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
|
|||||||
if (diff == 0)
|
if (diff == 0)
|
||||||
continue;
|
continue;
|
||||||
blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
|
blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
|
||||||
blk *= sects_per_blk; /* convert to sectors */
|
|
||||||
while(diff) {
|
while(diff) {
|
||||||
if (diff & 1) {
|
if (diff & 1) {
|
||||||
if (nr_sects == 0)
|
if (nr_blks == 0)
|
||||||
goto start_new_extent;
|
goto start_new_extent;
|
||||||
if ((start + nr_sects) != blk) {
|
if ((start + nr_blks) != blk) {
|
||||||
if (nr_sects >= minlen) {
|
if (nr_blks >= minlen) {
|
||||||
rv = blkdev_issue_discard(bdev,
|
rv = sb_issue_discard(sb,
|
||||||
start, nr_sects,
|
start, nr_blks,
|
||||||
GFP_NOFS, 0);
|
GFP_NOFS, 0);
|
||||||
if (rv)
|
if (rv)
|
||||||
goto fail;
|
goto fail;
|
||||||
trimmed += nr_sects;
|
trimmed += nr_blks;
|
||||||
}
|
}
|
||||||
nr_sects = 0;
|
nr_blks = 0;
|
||||||
start_new_extent:
|
start_new_extent:
|
||||||
start = blk;
|
start = blk;
|
||||||
}
|
}
|
||||||
nr_sects += sects_per_blk;
|
nr_blks++;
|
||||||
}
|
}
|
||||||
diff >>= 2;
|
diff >>= 2;
|
||||||
blk += sects_per_blk;
|
blk++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (nr_sects >= minlen) {
|
if (nr_blks >= minlen) {
|
||||||
rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 0);
|
rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0);
|
||||||
if (rv)
|
if (rv)
|
||||||
goto fail;
|
goto fail;
|
||||||
trimmed += nr_sects;
|
trimmed += nr_blks;
|
||||||
}
|
}
|
||||||
if (ptrimmed)
|
if (ptrimmed)
|
||||||
*ptrimmed = trimmed;
|
*ptrimmed = trimmed;
|
||||||
|
Loading…
Reference in New Issue
Block a user