mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
- A couple DM raid and DM mirror fixes
- A couple .request_fn request-based DM NULL pointer fixes - A fix for a DM target reference count leak, on target load error, that prevented associated DM target kernel module(s) from being removed -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJYEo+lAAoJEMUj8QotnQNaGfkH/jGqr4bj4l2Ty3QgV95fYW7+ lqp4Flkevm35HotEGKuuizvqbbVrj57BCGLE+dV48/X2cv5QbUFht6QBu9iJTrk6 Q7VqyBOvDDnOZHIof5CfKBeLZ2gd8YHZwUpYvzJcThSWS1+LjeVqg8a33LMZroMQ rghVxFCIKy6LqCryIiTHk1t+OfmuBz3S2LXcQXFY7XAPpWq/f+V66gthTZUpm86+ Gu1xOHQlvnmf5xnDUxCpPVbQNY334D/aSbU73i2cdvfL1pkxBFNcI+LbPcu+sNP9 ugGjPj4etbIRsVysuW3fLhn2kKqaXXVuD1rLTQ+C3ytciI+RQJvG892gWhAABRQ= =apHk -----END PGP SIGNATURE----- Merge tag 'dm-4.9-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull device mapper fixes from Mike Snitzer: - a couple DM raid and DM mirror fixes - a couple .request_fn request-based DM NULL pointer fixes - a fix for a DM target reference count leak, on target load error, that prevented associated DM target kernel module(s) from being removed * tag 'dm-4.9-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm table: fix missing dm_put_target_type() in dm_table_add_target() dm rq: clear kworker_task if kthread_run() returned an error dm: free io_barrier after blk_cleanup_queue call dm raid: fix activation of existing raid4/10 devices dm mirror: use all available legs on multiple failures dm mirror: fix read error on recovery after default leg failure dm raid: fix compat_features validation
This commit is contained in:
commit
e0f3e6a7cc
@ -309,3 +309,4 @@ Version History
|
|||||||
with a reshape in progress.
|
with a reshape in progress.
|
||||||
1.9.0 Add support for RAID level takeover/reshape/region size
|
1.9.0 Add support for RAID level takeover/reshape/region size
|
||||||
and set size reduction.
|
and set size reduction.
|
||||||
|
1.9.1 Fix activation of existing RAID 4/10 mapped devices
|
||||||
|
@ -266,7 +266,7 @@ static struct raid_type {
|
|||||||
{"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET},
|
{"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET},
|
||||||
{"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR},
|
{"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR},
|
||||||
{"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT},
|
{"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT},
|
||||||
{"raid4", "raid4 (dedicated last parity disk)", 1, 2, 4, ALGORITHM_PARITY_N}, /* raid4 layout = raid5_n */
|
{"raid4", "raid4 (dedicated first parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, /* raid4 layout = raid5_0 */
|
||||||
{"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N},
|
{"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N},
|
||||||
{"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
|
{"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
|
||||||
{"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
|
{"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
|
||||||
@ -2087,11 +2087,11 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
|
|||||||
/*
|
/*
|
||||||
* No takeover/reshaping, because we don't have the extended v1.9.0 metadata
|
* No takeover/reshaping, because we don't have the extended v1.9.0 metadata
|
||||||
*/
|
*/
|
||||||
if (le32_to_cpu(sb->level) != mddev->level) {
|
if (le32_to_cpu(sb->level) != mddev->new_level) {
|
||||||
DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)");
|
DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
if (le32_to_cpu(sb->layout) != mddev->layout) {
|
if (le32_to_cpu(sb->layout) != mddev->new_layout) {
|
||||||
DMERR("Reshaping raid sets not yet supported. (raid layout change)");
|
DMERR("Reshaping raid sets not yet supported. (raid layout change)");
|
||||||
DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
|
DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
|
||||||
DMERR(" Old layout: %s w/ %d copies",
|
DMERR(" Old layout: %s w/ %d copies",
|
||||||
@ -2102,7 +2102,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
|
|||||||
raid10_md_layout_to_copies(mddev->layout));
|
raid10_md_layout_to_copies(mddev->layout));
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
|
if (le32_to_cpu(sb->stripe_sectors) != mddev->new_chunk_sectors) {
|
||||||
DMERR("Reshaping raid sets not yet supported. (stripe sectors change)");
|
DMERR("Reshaping raid sets not yet supported. (stripe sectors change)");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -2115,6 +2115,8 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
DMINFO("Discovered old metadata format; upgrading to extended metadata format");
|
||||||
|
|
||||||
/* Table line is checked vs. authoritative superblock */
|
/* Table line is checked vs. authoritative superblock */
|
||||||
rs_set_new(rs);
|
rs_set_new(rs);
|
||||||
}
|
}
|
||||||
@ -2258,7 +2260,8 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
|
|||||||
if (!mddev->events && super_init_validation(rs, rdev))
|
if (!mddev->events && super_init_validation(rs, rdev))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
|
if (le32_to_cpu(sb->compat_features) &&
|
||||||
|
le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
|
||||||
rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags";
|
rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags";
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -3646,7 +3649,7 @@ static void raid_resume(struct dm_target *ti)
|
|||||||
|
|
||||||
static struct target_type raid_target = {
|
static struct target_type raid_target = {
|
||||||
.name = "raid",
|
.name = "raid",
|
||||||
.version = {1, 9, 0},
|
.version = {1, 9, 1},
|
||||||
.module = THIS_MODULE,
|
.module = THIS_MODULE,
|
||||||
.ctr = raid_ctr,
|
.ctr = raid_ctr,
|
||||||
.dtr = raid_dtr,
|
.dtr = raid_dtr,
|
||||||
|
@ -145,7 +145,6 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
|
|||||||
|
|
||||||
struct dm_raid1_bio_record {
|
struct dm_raid1_bio_record {
|
||||||
struct mirror *m;
|
struct mirror *m;
|
||||||
/* if details->bi_bdev == NULL, details were not saved */
|
|
||||||
struct dm_bio_details details;
|
struct dm_bio_details details;
|
||||||
region_t write_region;
|
region_t write_region;
|
||||||
};
|
};
|
||||||
@ -1200,8 +1199,6 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
|
|||||||
struct dm_raid1_bio_record *bio_record =
|
struct dm_raid1_bio_record *bio_record =
|
||||||
dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
|
dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
|
||||||
|
|
||||||
bio_record->details.bi_bdev = NULL;
|
|
||||||
|
|
||||||
if (rw == WRITE) {
|
if (rw == WRITE) {
|
||||||
/* Save region for mirror_end_io() handler */
|
/* Save region for mirror_end_io() handler */
|
||||||
bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
|
bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
|
||||||
@ -1260,22 +1257,12 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (error == -EOPNOTSUPP)
|
if (error == -EOPNOTSUPP)
|
||||||
goto out;
|
return error;
|
||||||
|
|
||||||
if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
|
if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
|
||||||
goto out;
|
return error;
|
||||||
|
|
||||||
if (unlikely(error)) {
|
if (unlikely(error)) {
|
||||||
if (!bio_record->details.bi_bdev) {
|
|
||||||
/*
|
|
||||||
* There wasn't enough memory to record necessary
|
|
||||||
* information for a retry or there was no other
|
|
||||||
* mirror in-sync.
|
|
||||||
*/
|
|
||||||
DMERR_LIMIT("Mirror read failed.");
|
|
||||||
return -EIO;
|
|
||||||
}
|
|
||||||
|
|
||||||
m = bio_record->m;
|
m = bio_record->m;
|
||||||
|
|
||||||
DMERR("Mirror read failed from %s. Trying alternative device.",
|
DMERR("Mirror read failed from %s. Trying alternative device.",
|
||||||
@ -1291,7 +1278,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
|
|||||||
bd = &bio_record->details;
|
bd = &bio_record->details;
|
||||||
|
|
||||||
dm_bio_restore(bd, bio);
|
dm_bio_restore(bd, bio);
|
||||||
bio_record->details.bi_bdev = NULL;
|
bio->bi_error = 0;
|
||||||
|
|
||||||
queue_bio(ms, bio, rw);
|
queue_bio(ms, bio, rw);
|
||||||
return DM_ENDIO_INCOMPLETE;
|
return DM_ENDIO_INCOMPLETE;
|
||||||
@ -1299,9 +1286,6 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
|
|||||||
DMERR("All replicated volumes dead, failing I/O");
|
DMERR("All replicated volumes dead, failing I/O");
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
|
||||||
bio_record->details.bi_bdev = NULL;
|
|
||||||
|
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -856,8 +856,11 @@ int dm_old_init_request_queue(struct mapped_device *md)
|
|||||||
kthread_init_worker(&md->kworker);
|
kthread_init_worker(&md->kworker);
|
||||||
md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
|
md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
|
||||||
"kdmwork-%s", dm_device_name(md));
|
"kdmwork-%s", dm_device_name(md));
|
||||||
if (IS_ERR(md->kworker_task))
|
if (IS_ERR(md->kworker_task)) {
|
||||||
return PTR_ERR(md->kworker_task);
|
int error = PTR_ERR(md->kworker_task);
|
||||||
|
md->kworker_task = NULL;
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
elv_register_queue(md->queue);
|
elv_register_queue(md->queue);
|
||||||
|
|
||||||
|
@ -695,37 +695,32 @@ int dm_table_add_target(struct dm_table *t, const char *type,
|
|||||||
|
|
||||||
tgt->type = dm_get_target_type(type);
|
tgt->type = dm_get_target_type(type);
|
||||||
if (!tgt->type) {
|
if (!tgt->type) {
|
||||||
DMERR("%s: %s: unknown target type", dm_device_name(t->md),
|
DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
|
||||||
type);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dm_target_needs_singleton(tgt->type)) {
|
if (dm_target_needs_singleton(tgt->type)) {
|
||||||
if (t->num_targets) {
|
if (t->num_targets) {
|
||||||
DMERR("%s: target type %s must appear alone in table",
|
tgt->error = "singleton target type must appear alone in table";
|
||||||
dm_device_name(t->md), type);
|
goto bad;
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
t->singleton = true;
|
t->singleton = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
|
if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
|
||||||
DMERR("%s: target type %s may not be included in read-only tables",
|
tgt->error = "target type may not be included in a read-only table";
|
||||||
dm_device_name(t->md), type);
|
goto bad;
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (t->immutable_target_type) {
|
if (t->immutable_target_type) {
|
||||||
if (t->immutable_target_type != tgt->type) {
|
if (t->immutable_target_type != tgt->type) {
|
||||||
DMERR("%s: immutable target type %s cannot be mixed with other target types",
|
tgt->error = "immutable target type cannot be mixed with other target types";
|
||||||
dm_device_name(t->md), t->immutable_target_type->name);
|
goto bad;
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
} else if (dm_target_is_immutable(tgt->type)) {
|
} else if (dm_target_is_immutable(tgt->type)) {
|
||||||
if (t->num_targets) {
|
if (t->num_targets) {
|
||||||
DMERR("%s: immutable target type %s cannot be mixed with other target types",
|
tgt->error = "immutable target type cannot be mixed with other target types";
|
||||||
dm_device_name(t->md), tgt->type->name);
|
goto bad;
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
t->immutable_target_type = tgt->type;
|
t->immutable_target_type = tgt->type;
|
||||||
}
|
}
|
||||||
@ -740,7 +735,6 @@ int dm_table_add_target(struct dm_table *t, const char *type,
|
|||||||
*/
|
*/
|
||||||
if (!adjoin(t, tgt)) {
|
if (!adjoin(t, tgt)) {
|
||||||
tgt->error = "Gap in table";
|
tgt->error = "Gap in table";
|
||||||
r = -EINVAL;
|
|
||||||
goto bad;
|
goto bad;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1423,8 +1423,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
|
|||||||
if (md->bs)
|
if (md->bs)
|
||||||
bioset_free(md->bs);
|
bioset_free(md->bs);
|
||||||
|
|
||||||
cleanup_srcu_struct(&md->io_barrier);
|
|
||||||
|
|
||||||
if (md->disk) {
|
if (md->disk) {
|
||||||
spin_lock(&_minor_lock);
|
spin_lock(&_minor_lock);
|
||||||
md->disk->private_data = NULL;
|
md->disk->private_data = NULL;
|
||||||
@ -1436,6 +1434,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
|
|||||||
if (md->queue)
|
if (md->queue)
|
||||||
blk_cleanup_queue(md->queue);
|
blk_cleanup_queue(md->queue);
|
||||||
|
|
||||||
|
cleanup_srcu_struct(&md->io_barrier);
|
||||||
|
|
||||||
if (md->bdev) {
|
if (md->bdev) {
|
||||||
bdput(md->bdev);
|
bdput(md->bdev);
|
||||||
md->bdev = NULL;
|
md->bdev = NULL;
|
||||||
|
Loading…
Reference in New Issue
Block a user