mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-14 08:13:56 +08:00
- Revert a dm-multipath change that caused a regression for unprivledged
users (e.g. kvm guests) that issued ioctls when a multipath device had no available paths. - Include Christoph's refactoring of DM's ioctl handling and add support for passing through persistent reservations with DM multipath. - All other changes are very simple cleanups. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJWOp04AAoJEMUj8QotnQNaFLsH/AhMEH/jI1ObOfy4J1Wy4rOx ujJT91uS/s0H3pc9cGKQYnuGpFkX6WWU4wMiabIyiTn4sAsoXaflfIGutivLiDJr HfecrMrGZgnP4ZlpPPB02BmlxFbcPW8yzAU4ma38xBgQ+Pu30RO/HkvX/2vKOppG qwPop/XsNxq3KXgFGM44ToytM6c/MPGluhuvOwbaacAO1HviMuen9qsVjk4kwcf3 jGYTbEPHATxyu5/6oKDTkQTYhzdwg3B2qHCiKMGw3l1kXhaQLFcaOivOLV8Sf3xh bj1070pkGe9OpqaVzMnwDtJ8rnsBl/Nt4wj9oiQPxbX71GYZAmcMIYn9WEkcKFI= =AR2D -----END PGP SIGNATURE----- Merge tag 'dm-4.4-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull device mapper updates from Mike Snitzer: "Smaller set of DM changes for this merge. I've based these changes on Jens' for-4.4/reservations branch because the associated DM changes required it. - Revert a dm-multipath change that caused a regression for unprivledged users (e.g. kvm guests) that issued ioctls when a multipath device had no available paths. - Include Christoph's refactoring of DM's ioctl handling and add support for passing through persistent reservations with DM multipath. - All other changes are very simple cleanups" * tag 'dm-4.4-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm switch: simplify conditional in alloc_region_table() dm delay: document that offsets are specified in sectors dm delay: capitalize the start of an delay_ctr() error message dm delay: Use DM_MAPIO macros instead of open-coded equivalents dm linear: remove redundant target name from error messages dm persistent data: eliminate unnecessary return values dm: eliminate unused "bioset" process for each bio-based DM device dm: convert ffs to __ffs dm: drop NULL test before kmem_cache_destroy() and mempool_destroy() dm: add support for passing through persistent reservations dm: refactor ioctl handling Revert "dm mpath: fix stalls when handling invalid ioctls" dm: initialize non-blk-mq queue data before queue is used
This commit is contained in:
commit
e0700ce709
@ -8,6 +8,7 @@ Parameters:
|
||||
<device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
|
||||
|
||||
With separate write parameters, the first set is only used for reads.
|
||||
Offsets are specified in sectors.
|
||||
Delays are specified in milliseconds.
|
||||
|
||||
Example scripts
|
||||
|
@ -1598,11 +1598,11 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
|
||||
|
||||
c->bdev = bdev;
|
||||
c->block_size = block_size;
|
||||
c->sectors_per_block_bits = ffs(block_size) - 1 - SECTOR_SHIFT;
|
||||
c->pages_per_block_bits = (ffs(block_size) - 1 >= PAGE_SHIFT) ?
|
||||
ffs(block_size) - 1 - PAGE_SHIFT : 0;
|
||||
c->blocks_per_page_bits = (ffs(block_size) - 1 < PAGE_SHIFT ?
|
||||
PAGE_SHIFT - (ffs(block_size) - 1) : 0);
|
||||
c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
|
||||
c->pages_per_block_bits = (__ffs(block_size) >= PAGE_SHIFT) ?
|
||||
__ffs(block_size) - PAGE_SHIFT : 0;
|
||||
c->blocks_per_page_bits = (__ffs(block_size) < PAGE_SHIFT ?
|
||||
PAGE_SHIFT - __ffs(block_size) : 0);
|
||||
|
||||
c->aux_size = aux_size;
|
||||
c->alloc_callback = alloc_callback;
|
||||
@ -1861,12 +1861,8 @@ static void __exit dm_bufio_exit(void)
|
||||
cancel_delayed_work_sync(&dm_bufio_work);
|
||||
destroy_workqueue(dm_bufio_wq);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++) {
|
||||
struct kmem_cache *kc = dm_bufio_caches[i];
|
||||
|
||||
if (kc)
|
||||
kmem_cache_destroy(kc);
|
||||
}
|
||||
for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++)
|
||||
kmem_cache_destroy(dm_bufio_caches[i]);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
|
||||
kfree(dm_bufio_cache_names[i]);
|
||||
|
@ -260,7 +260,9 @@ static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
|
||||
}
|
||||
}
|
||||
|
||||
return dm_bm_unlock(b);
|
||||
dm_bm_unlock(b);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __setup_mapping_info(struct dm_cache_metadata *cmd)
|
||||
@ -465,7 +467,9 @@ static int __open_metadata(struct dm_cache_metadata *cmd)
|
||||
dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
|
||||
sb_flags = le32_to_cpu(disk_super->flags);
|
||||
cmd->clean_when_opened = test_bit(CLEAN_SHUTDOWN, &sb_flags);
|
||||
return dm_bm_unlock(sblock);
|
||||
dm_bm_unlock(sblock);
|
||||
|
||||
return 0;
|
||||
|
||||
bad:
|
||||
dm_bm_unlock(sblock);
|
||||
|
@ -83,7 +83,7 @@ static struct list_head *list_pop(struct list_head *q)
|
||||
static int alloc_hash(struct hash *hash, unsigned elts)
|
||||
{
|
||||
hash->nr_buckets = next_power(elts >> 4, 16);
|
||||
hash->hash_bits = ffs(hash->nr_buckets) - 1;
|
||||
hash->hash_bits = __ffs(hash->nr_buckets);
|
||||
hash->table = vzalloc(sizeof(*hash->table) * hash->nr_buckets);
|
||||
|
||||
return hash->table ? 0 : -ENOMEM;
|
||||
|
@ -1410,7 +1410,7 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
|
||||
mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U);
|
||||
|
||||
mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
|
||||
mq->hash_bits = ffs(mq->nr_buckets) - 1;
|
||||
mq->hash_bits = __ffs(mq->nr_buckets);
|
||||
mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets);
|
||||
if (!mq->table)
|
||||
goto bad_alloc_table;
|
||||
|
@ -566,7 +566,7 @@ static int h_init(struct hash_table *ht, struct entry_space *es, unsigned nr_ent
|
||||
|
||||
ht->es = es;
|
||||
nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u));
|
||||
ht->hash_bits = ffs(nr_buckets) - 1;
|
||||
ht->hash_bits = __ffs(nr_buckets);
|
||||
|
||||
ht->buckets = vmalloc(sizeof(*ht->buckets) * nr_buckets);
|
||||
if (!ht->buckets)
|
||||
|
@ -2309,8 +2309,7 @@ static void destroy(struct cache *cache)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
if (cache->migration_pool)
|
||||
mempool_destroy(cache->migration_pool);
|
||||
mempool_destroy(cache->migration_pool);
|
||||
|
||||
if (cache->all_io_ds)
|
||||
dm_deferred_set_destroy(cache->all_io_ds);
|
||||
|
@ -1544,10 +1544,8 @@ static void crypt_dtr(struct dm_target *ti)
|
||||
if (cc->bs)
|
||||
bioset_free(cc->bs);
|
||||
|
||||
if (cc->page_pool)
|
||||
mempool_destroy(cc->page_pool);
|
||||
if (cc->req_pool)
|
||||
mempool_destroy(cc->req_pool);
|
||||
mempool_destroy(cc->page_pool);
|
||||
mempool_destroy(cc->req_pool);
|
||||
|
||||
if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
|
||||
cc->iv_gen_ops->dtr(cc);
|
||||
|
@ -122,6 +122,7 @@ static void flush_expired_bios(struct work_struct *work)
|
||||
* <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
|
||||
*
|
||||
* With separate write parameters, the first set is only used for reads.
|
||||
* Offsets are specified in sectors.
|
||||
* Delays are specified in milliseconds.
|
||||
*/
|
||||
static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||
@ -132,7 +133,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||
int ret;
|
||||
|
||||
if (argc != 3 && argc != 6) {
|
||||
ti->error = "requires exactly 3 or 6 arguments";
|
||||
ti->error = "Requires exactly 3 or 6 arguments";
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -237,7 +238,7 @@ static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
|
||||
unsigned long expires = 0;
|
||||
|
||||
if (!delay || !atomic_read(&dc->may_delay))
|
||||
return 1;
|
||||
return DM_MAPIO_REMAPPED;
|
||||
|
||||
delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
|
||||
|
||||
@ -257,7 +258,7 @@ static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
|
||||
|
||||
queue_timeout(dc, expires);
|
||||
|
||||
return 0;
|
||||
return DM_MAPIO_SUBMITTED;
|
||||
}
|
||||
|
||||
static void delay_presuspend(struct dm_target *ti)
|
||||
|
@ -343,7 +343,9 @@ static int superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
|
||||
}
|
||||
}
|
||||
|
||||
return dm_bm_unlock(b);
|
||||
dm_bm_unlock(b);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*----------------------------------------------------------------*/
|
||||
@ -582,7 +584,9 @@ static int open_metadata(struct era_metadata *md)
|
||||
md->metadata_snap = le64_to_cpu(disk->metadata_snap);
|
||||
md->archived_writesets = true;
|
||||
|
||||
return dm_bm_unlock(sblock);
|
||||
dm_bm_unlock(sblock);
|
||||
|
||||
return 0;
|
||||
|
||||
bad:
|
||||
dm_bm_unlock(sblock);
|
||||
@ -1046,12 +1050,7 @@ static int metadata_take_snap(struct era_metadata *md)
|
||||
|
||||
md->metadata_snap = dm_block_location(clone);
|
||||
|
||||
r = dm_tm_unlock(md->tm, clone);
|
||||
if (r) {
|
||||
DMERR("%s: couldn't unlock clone", __func__);
|
||||
md->metadata_snap = SUPERBLOCK_LOCATION;
|
||||
return r;
|
||||
}
|
||||
dm_tm_unlock(md->tm, clone);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -183,7 +183,7 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
|
||||
|
||||
store->chunk_size = chunk_size;
|
||||
store->chunk_mask = chunk_size - 1;
|
||||
store->chunk_shift = ffs(chunk_size) - 1;
|
||||
store->chunk_shift = __ffs(chunk_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -373,20 +373,20 @@ static void flakey_status(struct dm_target *ti, status_type_t type,
|
||||
}
|
||||
}
|
||||
|
||||
static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
|
||||
static int flakey_prepare_ioctl(struct dm_target *ti,
|
||||
struct block_device **bdev, fmode_t *mode)
|
||||
{
|
||||
struct flakey_c *fc = ti->private;
|
||||
struct dm_dev *dev = fc->dev;
|
||||
int r = 0;
|
||||
|
||||
*bdev = fc->dev->bdev;
|
||||
|
||||
/*
|
||||
* Only pass ioctls through if the device sizes match exactly.
|
||||
*/
|
||||
if (fc->start ||
|
||||
ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
|
||||
r = scsi_verify_blk_ioctl(NULL, cmd);
|
||||
|
||||
return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
|
||||
ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
|
||||
@ -405,7 +405,7 @@ static struct target_type flakey_target = {
|
||||
.map = flakey_map,
|
||||
.end_io = flakey_end_io,
|
||||
.status = flakey_status,
|
||||
.ioctl = flakey_ioctl,
|
||||
.prepare_ioctl = flakey_prepare_ioctl,
|
||||
.iterate_devices = flakey_iterate_devices,
|
||||
};
|
||||
|
||||
|
@ -65,8 +65,7 @@ struct dm_io_client *dm_io_client_create(void)
|
||||
return client;
|
||||
|
||||
bad:
|
||||
if (client->pool)
|
||||
mempool_destroy(client->pool);
|
||||
mempool_destroy(client->pool);
|
||||
kfree(client);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
@ -39,20 +39,20 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||
|
||||
lc = kmalloc(sizeof(*lc), GFP_KERNEL);
|
||||
if (lc == NULL) {
|
||||
ti->error = "dm-linear: Cannot allocate linear context";
|
||||
ti->error = "Cannot allocate linear context";
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = -EINVAL;
|
||||
if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) {
|
||||
ti->error = "dm-linear: Invalid device sector";
|
||||
ti->error = "Invalid device sector";
|
||||
goto bad;
|
||||
}
|
||||
lc->start = tmp;
|
||||
|
||||
ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev);
|
||||
if (ret) {
|
||||
ti->error = "dm-linear: Device lookup failed";
|
||||
ti->error = "Device lookup failed";
|
||||
goto bad;
|
||||
}
|
||||
|
||||
@ -116,21 +116,21 @@ static void linear_status(struct dm_target *ti, status_type_t type,
|
||||
}
|
||||
}
|
||||
|
||||
static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
static int linear_prepare_ioctl(struct dm_target *ti,
|
||||
struct block_device **bdev, fmode_t *mode)
|
||||
{
|
||||
struct linear_c *lc = (struct linear_c *) ti->private;
|
||||
struct dm_dev *dev = lc->dev;
|
||||
int r = 0;
|
||||
|
||||
*bdev = dev->bdev;
|
||||
|
||||
/*
|
||||
* Only pass ioctls through if the device sizes match exactly.
|
||||
*/
|
||||
if (lc->start ||
|
||||
ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
|
||||
r = scsi_verify_blk_ioctl(NULL, cmd);
|
||||
|
||||
return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int linear_iterate_devices(struct dm_target *ti,
|
||||
@ -149,7 +149,7 @@ static struct target_type linear_target = {
|
||||
.dtr = linear_dtr,
|
||||
.map = linear_map,
|
||||
.status = linear_status,
|
||||
.ioctl = linear_ioctl,
|
||||
.prepare_ioctl = linear_prepare_ioctl,
|
||||
.iterate_devices = linear_iterate_devices,
|
||||
};
|
||||
|
||||
|
@ -313,8 +313,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
|
||||
out:
|
||||
kfree(devices_rdata);
|
||||
if (r) {
|
||||
if (lc->flush_entry_pool)
|
||||
mempool_destroy(lc->flush_entry_pool);
|
||||
mempool_destroy(lc->flush_entry_pool);
|
||||
kfree(lc);
|
||||
kfree(ctr_str);
|
||||
} else {
|
||||
|
@ -714,20 +714,19 @@ static void log_writes_status(struct dm_target *ti, status_type_t type,
|
||||
}
|
||||
}
|
||||
|
||||
static int log_writes_ioctl(struct dm_target *ti, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
static int log_writes_prepare_ioctl(struct dm_target *ti,
|
||||
struct block_device **bdev, fmode_t *mode)
|
||||
{
|
||||
struct log_writes_c *lc = ti->private;
|
||||
struct dm_dev *dev = lc->dev;
|
||||
int r = 0;
|
||||
|
||||
*bdev = dev->bdev;
|
||||
/*
|
||||
* Only pass ioctls through if the device sizes match exactly.
|
||||
*/
|
||||
if (ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
|
||||
r = scsi_verify_blk_ioctl(NULL, cmd);
|
||||
|
||||
return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int log_writes_iterate_devices(struct dm_target *ti,
|
||||
@ -782,7 +781,7 @@ static struct target_type log_writes_target = {
|
||||
.map = log_writes_map,
|
||||
.end_io = normal_end_io,
|
||||
.status = log_writes_status,
|
||||
.ioctl = log_writes_ioctl,
|
||||
.prepare_ioctl = log_writes_prepare_ioctl,
|
||||
.message = log_writes_message,
|
||||
.iterate_devices = log_writes_iterate_devices,
|
||||
.io_hints = log_writes_io_hints,
|
||||
|
@ -1533,18 +1533,14 @@ out:
|
||||
return r;
|
||||
}
|
||||
|
||||
static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
static int multipath_prepare_ioctl(struct dm_target *ti,
|
||||
struct block_device **bdev, fmode_t *mode)
|
||||
{
|
||||
struct multipath *m = ti->private;
|
||||
struct pgpath *pgpath;
|
||||
struct block_device *bdev;
|
||||
fmode_t mode;
|
||||
unsigned long flags;
|
||||
int r;
|
||||
|
||||
bdev = NULL;
|
||||
mode = 0;
|
||||
r = 0;
|
||||
|
||||
spin_lock_irqsave(&m->lock, flags);
|
||||
@ -1555,26 +1551,17 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
|
||||
pgpath = m->current_pgpath;
|
||||
|
||||
if (pgpath) {
|
||||
bdev = pgpath->path.dev->bdev;
|
||||
mode = pgpath->path.dev->mode;
|
||||
*bdev = pgpath->path.dev->bdev;
|
||||
*mode = pgpath->path.dev->mode;
|
||||
}
|
||||
|
||||
if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
|
||||
r = -ENOTCONN;
|
||||
else if (!bdev)
|
||||
else if (!*bdev)
|
||||
r = -EIO;
|
||||
|
||||
spin_unlock_irqrestore(&m->lock, flags);
|
||||
|
||||
/*
|
||||
* Only pass ioctls through if the device sizes match exactly.
|
||||
*/
|
||||
if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) {
|
||||
int err = scsi_verify_blk_ioctl(NULL, cmd);
|
||||
if (err)
|
||||
r = err;
|
||||
}
|
||||
|
||||
if (r == -ENOTCONN && !fatal_signal_pending(current)) {
|
||||
spin_lock_irqsave(&m->lock, flags);
|
||||
if (!m->current_pg) {
|
||||
@ -1587,7 +1574,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
|
||||
dm_table_run_md_queue_async(m->ti->table);
|
||||
}
|
||||
|
||||
return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
|
||||
/*
|
||||
* Only pass ioctls through if the device sizes match exactly.
|
||||
*/
|
||||
if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
|
||||
return 1;
|
||||
return r;
|
||||
}
|
||||
|
||||
static int multipath_iterate_devices(struct dm_target *ti,
|
||||
@ -1690,7 +1682,7 @@ out:
|
||||
*---------------------------------------------------------------*/
|
||||
static struct target_type multipath_target = {
|
||||
.name = "multipath",
|
||||
.version = {1, 9, 0},
|
||||
.version = {1, 10, 0},
|
||||
.module = THIS_MODULE,
|
||||
.ctr = multipath_ctr,
|
||||
.dtr = multipath_dtr,
|
||||
@ -1703,7 +1695,7 @@ static struct target_type multipath_target = {
|
||||
.resume = multipath_resume,
|
||||
.status = multipath_status,
|
||||
.message = multipath_message,
|
||||
.ioctl = multipath_ioctl,
|
||||
.prepare_ioctl = multipath_prepare_ioctl,
|
||||
.iterate_devices = multipath_iterate_devices,
|
||||
.busy = multipath_busy,
|
||||
};
|
||||
|
@ -193,7 +193,7 @@ struct dm_region_hash *dm_region_hash_create(
|
||||
rh->max_recovery = max_recovery;
|
||||
rh->log = log;
|
||||
rh->region_size = region_size;
|
||||
rh->region_shift = ffs(region_size) - 1;
|
||||
rh->region_shift = __ffs(region_size);
|
||||
rwlock_init(&rh->hash_lock);
|
||||
rh->mask = nr_buckets - 1;
|
||||
rh->nr_buckets = nr_buckets;
|
||||
@ -249,9 +249,7 @@ void dm_region_hash_destroy(struct dm_region_hash *rh)
|
||||
if (rh->log)
|
||||
dm_dirty_log_destroy(rh->log);
|
||||
|
||||
if (rh->region_pool)
|
||||
mempool_destroy(rh->region_pool);
|
||||
|
||||
mempool_destroy(rh->region_pool);
|
||||
vfree(rh->buckets);
|
||||
kfree(rh);
|
||||
}
|
||||
|
@ -322,7 +322,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
|
||||
bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
|
||||
bdev) >> 9);
|
||||
ps->store->chunk_mask = ps->store->chunk_size - 1;
|
||||
ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
|
||||
ps->store->chunk_shift = __ffs(ps->store->chunk_size);
|
||||
chunk_size_supplied = 0;
|
||||
}
|
||||
|
||||
|
@ -99,11 +99,11 @@ static int alloc_region_table(struct dm_target *ti, unsigned nr_paths)
|
||||
if (sector_div(nr_regions, sctx->region_size))
|
||||
nr_regions++;
|
||||
|
||||
sctx->nr_regions = nr_regions;
|
||||
if (sctx->nr_regions != nr_regions || sctx->nr_regions >= ULONG_MAX) {
|
||||
if (nr_regions >= ULONG_MAX) {
|
||||
ti->error = "Region table too large";
|
||||
return -EINVAL;
|
||||
}
|
||||
sctx->nr_regions = nr_regions;
|
||||
|
||||
nr_slots = nr_regions;
|
||||
if (sector_div(nr_slots, sctx->region_entries_per_slot))
|
||||
@ -511,27 +511,24 @@ static void switch_status(struct dm_target *ti, status_type_t type,
|
||||
*
|
||||
* Passthrough all ioctls to the path for sector 0
|
||||
*/
|
||||
static int switch_ioctl(struct dm_target *ti, unsigned cmd,
|
||||
unsigned long arg)
|
||||
static int switch_prepare_ioctl(struct dm_target *ti,
|
||||
struct block_device **bdev, fmode_t *mode)
|
||||
{
|
||||
struct switch_ctx *sctx = ti->private;
|
||||
struct block_device *bdev;
|
||||
fmode_t mode;
|
||||
unsigned path_nr;
|
||||
int r = 0;
|
||||
|
||||
path_nr = switch_get_path_nr(sctx, 0);
|
||||
|
||||
bdev = sctx->path_list[path_nr].dmdev->bdev;
|
||||
mode = sctx->path_list[path_nr].dmdev->mode;
|
||||
*bdev = sctx->path_list[path_nr].dmdev->bdev;
|
||||
*mode = sctx->path_list[path_nr].dmdev->mode;
|
||||
|
||||
/*
|
||||
* Only pass ioctls through if the device sizes match exactly.
|
||||
*/
|
||||
if (ti->len + sctx->path_list[path_nr].start != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
|
||||
r = scsi_verify_blk_ioctl(NULL, cmd);
|
||||
|
||||
return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
|
||||
if (ti->len + sctx->path_list[path_nr].start !=
|
||||
i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int switch_iterate_devices(struct dm_target *ti,
|
||||
@ -560,7 +557,7 @@ static struct target_type switch_target = {
|
||||
.map = switch_map,
|
||||
.message = switch_message,
|
||||
.status = switch_status,
|
||||
.ioctl = switch_ioctl,
|
||||
.prepare_ioctl = switch_prepare_ioctl,
|
||||
.iterate_devices = switch_iterate_devices,
|
||||
};
|
||||
|
||||
|
@ -396,7 +396,9 @@ static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
|
||||
}
|
||||
}
|
||||
|
||||
return dm_bm_unlock(b);
|
||||
dm_bm_unlock(b);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __setup_btree_details(struct dm_pool_metadata *pmd)
|
||||
@ -650,7 +652,9 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
|
||||
}
|
||||
|
||||
__setup_btree_details(pmd);
|
||||
return dm_bm_unlock(sblock);
|
||||
dm_bm_unlock(sblock);
|
||||
|
||||
return 0;
|
||||
|
||||
bad_cleanup_data_sm:
|
||||
dm_sm_destroy(pmd->data_sm);
|
||||
@ -1297,7 +1301,9 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd)
|
||||
dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
|
||||
dm_sm_dec_block(pmd->metadata_sm, held_root);
|
||||
|
||||
return dm_tm_unlock(pmd->tm, copy);
|
||||
dm_tm_unlock(pmd->tm, copy);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
|
||||
@ -1327,7 +1333,9 @@ static int __get_metadata_snap(struct dm_pool_metadata *pmd,
|
||||
disk_super = dm_block_data(sblock);
|
||||
*result = le64_to_cpu(disk_super->held_root);
|
||||
|
||||
return dm_bm_unlock(sblock);
|
||||
dm_bm_unlock(sblock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
|
||||
|
@ -631,18 +631,17 @@ static void verity_status(struct dm_target *ti, status_type_t type,
|
||||
}
|
||||
}
|
||||
|
||||
static int verity_ioctl(struct dm_target *ti, unsigned cmd,
|
||||
unsigned long arg)
|
||||
static int verity_prepare_ioctl(struct dm_target *ti,
|
||||
struct block_device **bdev, fmode_t *mode)
|
||||
{
|
||||
struct dm_verity *v = ti->private;
|
||||
int r = 0;
|
||||
|
||||
*bdev = v->data_dev->bdev;
|
||||
|
||||
if (v->data_start ||
|
||||
ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT)
|
||||
r = scsi_verify_blk_ioctl(NULL, cmd);
|
||||
|
||||
return r ? : __blkdev_driver_ioctl(v->data_dev->bdev, v->data_dev->mode,
|
||||
cmd, arg);
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int verity_iterate_devices(struct dm_target *ti,
|
||||
@ -965,7 +964,7 @@ static struct target_type verity_target = {
|
||||
.dtr = verity_dtr,
|
||||
.map = verity_map,
|
||||
.status = verity_status,
|
||||
.ioctl = verity_ioctl,
|
||||
.prepare_ioctl = verity_prepare_ioctl,
|
||||
.iterate_devices = verity_iterate_devices,
|
||||
.io_hints = verity_io_hints,
|
||||
};
|
||||
|
209
drivers/md/dm.c
209
drivers/md/dm.c
@ -24,6 +24,7 @@
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/elevator.h> /* for rq_end_sector() */
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/pr.h>
|
||||
|
||||
#include <trace/events/block.h>
|
||||
|
||||
@ -555,18 +556,16 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
||||
return dm_get_geometry(md, geo);
|
||||
}
|
||||
|
||||
static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
static int dm_get_live_table_for_ioctl(struct mapped_device *md,
|
||||
struct dm_target **tgt, struct block_device **bdev,
|
||||
fmode_t *mode, int *srcu_idx)
|
||||
{
|
||||
struct mapped_device *md = bdev->bd_disk->private_data;
|
||||
int srcu_idx;
|
||||
struct dm_table *map;
|
||||
struct dm_target *tgt;
|
||||
int r = -ENOTTY;
|
||||
int r;
|
||||
|
||||
retry:
|
||||
map = dm_get_live_table(md, &srcu_idx);
|
||||
|
||||
r = -ENOTTY;
|
||||
map = dm_get_live_table(md, srcu_idx);
|
||||
if (!map || !dm_table_get_size(map))
|
||||
goto out;
|
||||
|
||||
@ -574,8 +573,9 @@ retry:
|
||||
if (dm_table_get_num_targets(map) != 1)
|
||||
goto out;
|
||||
|
||||
tgt = dm_table_get_target(map, 0);
|
||||
if (!tgt->type->ioctl)
|
||||
*tgt = dm_table_get_target(map, 0);
|
||||
|
||||
if (!(*tgt)->type->prepare_ioctl)
|
||||
goto out;
|
||||
|
||||
if (dm_suspended_md(md)) {
|
||||
@ -583,16 +583,46 @@ retry:
|
||||
goto out;
|
||||
}
|
||||
|
||||
r = tgt->type->ioctl(tgt, cmd, arg);
|
||||
r = (*tgt)->type->prepare_ioctl(*tgt, bdev, mode);
|
||||
if (r < 0)
|
||||
goto out;
|
||||
|
||||
return r;
|
||||
|
||||
out:
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
|
||||
dm_put_live_table(md, *srcu_idx);
|
||||
if (r == -ENOTCONN) {
|
||||
msleep(10);
|
||||
goto retry;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct mapped_device *md = bdev->bd_disk->private_data;
|
||||
struct dm_target *tgt;
|
||||
int srcu_idx, r;
|
||||
|
||||
r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
if (r > 0) {
|
||||
/*
|
||||
* Target determined this ioctl is being issued against
|
||||
* a logical partition of the parent bdev; so extra
|
||||
* validation is needed.
|
||||
*/
|
||||
r = scsi_verify_blk_ioctl(NULL, cmd);
|
||||
if (r)
|
||||
goto out;
|
||||
}
|
||||
|
||||
r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
|
||||
out:
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -1734,8 +1764,6 @@ static void dm_make_request(struct request_queue *q, struct bio *bio)
|
||||
|
||||
map = dm_get_live_table(md, &srcu_idx);
|
||||
|
||||
blk_queue_split(q, &bio, q->bio_split);
|
||||
|
||||
generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0);
|
||||
|
||||
/* if we're suspended, we have to queue this io for later */
|
||||
@ -2198,6 +2226,13 @@ static void dm_init_md_queue(struct mapped_device *md)
|
||||
* This queue is new, so no concurrency on the queue_flags.
|
||||
*/
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
|
||||
|
||||
/*
|
||||
* Initialize data that will only be used by a non-blk-mq DM queue
|
||||
* - must do so here (in alloc_dev callchain) before queue is used
|
||||
*/
|
||||
md->queue->queuedata = md;
|
||||
md->queue->backing_dev_info.congested_data = md;
|
||||
}
|
||||
|
||||
static void dm_init_old_md_queue(struct mapped_device *md)
|
||||
@ -2208,10 +2243,7 @@ static void dm_init_old_md_queue(struct mapped_device *md)
|
||||
/*
|
||||
* Initialize aspects of queue that aren't relevant for blk-mq
|
||||
*/
|
||||
md->queue->queuedata = md;
|
||||
md->queue->backing_dev_info.congested_fn = dm_any_congested;
|
||||
md->queue->backing_dev_info.congested_data = md;
|
||||
|
||||
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
|
||||
}
|
||||
|
||||
@ -2221,10 +2253,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
|
||||
destroy_workqueue(md->wq);
|
||||
if (md->kworker_task)
|
||||
kthread_stop(md->kworker_task);
|
||||
if (md->io_pool)
|
||||
mempool_destroy(md->io_pool);
|
||||
if (md->rq_pool)
|
||||
mempool_destroy(md->rq_pool);
|
||||
mempool_destroy(md->io_pool);
|
||||
mempool_destroy(md->rq_pool);
|
||||
if (md->bs)
|
||||
bioset_free(md->bs);
|
||||
|
||||
@ -2759,6 +2789,12 @@ int dm_setup_md_queue(struct mapped_device *md)
|
||||
case DM_TYPE_BIO_BASED:
|
||||
dm_init_old_md_queue(md);
|
||||
blk_queue_make_request(md->queue, dm_make_request);
|
||||
/*
|
||||
* DM handles splitting bios as needed. Free the bio_split bioset
|
||||
* since it won't be used (saves 1 process per bio-based DM device).
|
||||
*/
|
||||
bioset_free(md->queue->bio_split);
|
||||
md->queue->bio_split = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -3505,11 +3541,8 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
|
||||
if (!pools)
|
||||
return;
|
||||
|
||||
if (pools->io_pool)
|
||||
mempool_destroy(pools->io_pool);
|
||||
|
||||
if (pools->rq_pool)
|
||||
mempool_destroy(pools->rq_pool);
|
||||
mempool_destroy(pools->io_pool);
|
||||
mempool_destroy(pools->rq_pool);
|
||||
|
||||
if (pools->bs)
|
||||
bioset_free(pools->bs);
|
||||
@ -3517,11 +3550,133 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
|
||||
kfree(pools);
|
||||
}
|
||||
|
||||
static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
|
||||
u32 flags)
|
||||
{
|
||||
struct mapped_device *md = bdev->bd_disk->private_data;
|
||||
const struct pr_ops *ops;
|
||||
struct dm_target *tgt;
|
||||
fmode_t mode;
|
||||
int srcu_idx, r;
|
||||
|
||||
r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
ops = bdev->bd_disk->fops->pr_ops;
|
||||
if (ops && ops->pr_register)
|
||||
r = ops->pr_register(bdev, old_key, new_key, flags);
|
||||
else
|
||||
r = -EOPNOTSUPP;
|
||||
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
|
||||
u32 flags)
|
||||
{
|
||||
struct mapped_device *md = bdev->bd_disk->private_data;
|
||||
const struct pr_ops *ops;
|
||||
struct dm_target *tgt;
|
||||
fmode_t mode;
|
||||
int srcu_idx, r;
|
||||
|
||||
r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
ops = bdev->bd_disk->fops->pr_ops;
|
||||
if (ops && ops->pr_reserve)
|
||||
r = ops->pr_reserve(bdev, key, type, flags);
|
||||
else
|
||||
r = -EOPNOTSUPP;
|
||||
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
|
||||
{
|
||||
struct mapped_device *md = bdev->bd_disk->private_data;
|
||||
const struct pr_ops *ops;
|
||||
struct dm_target *tgt;
|
||||
fmode_t mode;
|
||||
int srcu_idx, r;
|
||||
|
||||
r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
ops = bdev->bd_disk->fops->pr_ops;
|
||||
if (ops && ops->pr_release)
|
||||
r = ops->pr_release(bdev, key, type);
|
||||
else
|
||||
r = -EOPNOTSUPP;
|
||||
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
|
||||
enum pr_type type, bool abort)
|
||||
{
|
||||
struct mapped_device *md = bdev->bd_disk->private_data;
|
||||
const struct pr_ops *ops;
|
||||
struct dm_target *tgt;
|
||||
fmode_t mode;
|
||||
int srcu_idx, r;
|
||||
|
||||
r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
ops = bdev->bd_disk->fops->pr_ops;
|
||||
if (ops && ops->pr_preempt)
|
||||
r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
|
||||
else
|
||||
r = -EOPNOTSUPP;
|
||||
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int dm_pr_clear(struct block_device *bdev, u64 key)
|
||||
{
|
||||
struct mapped_device *md = bdev->bd_disk->private_data;
|
||||
const struct pr_ops *ops;
|
||||
struct dm_target *tgt;
|
||||
fmode_t mode;
|
||||
int srcu_idx, r;
|
||||
|
||||
r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
ops = bdev->bd_disk->fops->pr_ops;
|
||||
if (ops && ops->pr_clear)
|
||||
r = ops->pr_clear(bdev, key);
|
||||
else
|
||||
r = -EOPNOTSUPP;
|
||||
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
return r;
|
||||
}
|
||||
|
||||
static const struct pr_ops dm_pr_ops = {
|
||||
.pr_register = dm_pr_register,
|
||||
.pr_reserve = dm_pr_reserve,
|
||||
.pr_release = dm_pr_release,
|
||||
.pr_preempt = dm_pr_preempt,
|
||||
.pr_clear = dm_pr_clear,
|
||||
};
|
||||
|
||||
static const struct block_device_operations dm_blk_dops = {
|
||||
.open = dm_blk_open,
|
||||
.release = dm_blk_close,
|
||||
.ioctl = dm_blk_ioctl,
|
||||
.getgeo = dm_blk_getgeo,
|
||||
.pr_ops = &dm_pr_ops,
|
||||
.owner = THIS_MODULE
|
||||
};
|
||||
|
||||
|
@ -233,9 +233,9 @@ static int get_ablock(struct dm_array_info *info, dm_block_t b,
|
||||
/*
|
||||
* Unlocks an array block.
|
||||
*/
|
||||
static int unlock_ablock(struct dm_array_info *info, struct dm_block *block)
|
||||
static void unlock_ablock(struct dm_array_info *info, struct dm_block *block)
|
||||
{
|
||||
return dm_tm_unlock(info->btree_info.tm, block);
|
||||
dm_tm_unlock(info->btree_info.tm, block);
|
||||
}
|
||||
|
||||
/*----------------------------------------------------------------*/
|
||||
|
@ -578,7 +578,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_bm_write_lock_zero);
|
||||
|
||||
int dm_bm_unlock(struct dm_block *b)
|
||||
void dm_bm_unlock(struct dm_block *b)
|
||||
{
|
||||
struct buffer_aux *aux;
|
||||
aux = dm_bufio_get_aux_data(to_buffer(b));
|
||||
@ -590,8 +590,6 @@ int dm_bm_unlock(struct dm_block *b)
|
||||
bl_up_read(&aux->lock);
|
||||
|
||||
dm_bufio_release(to_buffer(b));
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_bm_unlock);
|
||||
|
||||
|
@ -94,7 +94,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm, dm_block_t b,
|
||||
struct dm_block_validator *v,
|
||||
struct dm_block **result);
|
||||
|
||||
int dm_bm_unlock(struct dm_block *b);
|
||||
void dm_bm_unlock(struct dm_block *b);
|
||||
|
||||
/*
|
||||
* It's a common idiom to have a superblock that should be committed last.
|
||||
|
@ -52,7 +52,7 @@ void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
|
||||
struct dm_btree_value_type *vt);
|
||||
|
||||
int new_block(struct dm_btree_info *info, struct dm_block **result);
|
||||
int unlock_block(struct dm_btree_info *info, struct dm_block *b);
|
||||
void unlock_block(struct dm_btree_info *info, struct dm_block *b);
|
||||
|
||||
/*
|
||||
* Spines keep track of the rolling locks. There are 2 variants, read-only
|
||||
|
@ -165,9 +165,9 @@ static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int exit_child(struct dm_btree_info *info, struct child *c)
|
||||
static void exit_child(struct dm_btree_info *info, struct child *c)
|
||||
{
|
||||
return dm_tm_unlock(info->tm, c->block);
|
||||
dm_tm_unlock(info->tm, c->block);
|
||||
}
|
||||
|
||||
static void shift(struct btree_node *left, struct btree_node *right, int count)
|
||||
@ -249,13 +249,10 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
|
||||
|
||||
__rebalance2(info, parent, &left, &right);
|
||||
|
||||
r = exit_child(info, &left);
|
||||
if (r) {
|
||||
exit_child(info, &right);
|
||||
return r;
|
||||
}
|
||||
exit_child(info, &left);
|
||||
exit_child(info, &right);
|
||||
|
||||
return exit_child(info, &right);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -394,22 +391,9 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
|
||||
|
||||
__rebalance3(info, parent, &left, ¢er, &right);
|
||||
|
||||
r = exit_child(info, &left);
|
||||
if (r) {
|
||||
exit_child(info, ¢er);
|
||||
exit_child(info, &right);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = exit_child(info, ¢er);
|
||||
if (r) {
|
||||
exit_child(info, &right);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = exit_child(info, &right);
|
||||
if (r)
|
||||
return r;
|
||||
exit_child(info, &left);
|
||||
exit_child(info, ¢er);
|
||||
exit_child(info, &right);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -433,9 +417,7 @@ static int rebalance_children(struct shadow_spine *s,
|
||||
|
||||
memcpy(n, dm_block_data(child),
|
||||
dm_bm_block_size(dm_tm_get_bm(info->tm)));
|
||||
r = dm_tm_unlock(info->tm, child);
|
||||
if (r)
|
||||
return r;
|
||||
dm_tm_unlock(info->tm, child);
|
||||
|
||||
dm_tm_dec(info->tm, dm_block_location(child));
|
||||
return 0;
|
||||
|
@ -117,9 +117,9 @@ int new_block(struct dm_btree_info *info, struct dm_block **result)
|
||||
return dm_tm_new_block(info->tm, &btree_node_validator, result);
|
||||
}
|
||||
|
||||
int unlock_block(struct dm_btree_info *info, struct dm_block *b)
|
||||
void unlock_block(struct dm_btree_info *info, struct dm_block *b)
|
||||
{
|
||||
return dm_tm_unlock(info->tm, b);
|
||||
dm_tm_unlock(info->tm, b);
|
||||
}
|
||||
|
||||
/*----------------------------------------------------------------*/
|
||||
@ -137,9 +137,7 @@ int exit_ro_spine(struct ro_spine *s)
|
||||
int r = 0, i;
|
||||
|
||||
for (i = 0; i < s->count; i++) {
|
||||
int r2 = unlock_block(s->info, s->nodes[i]);
|
||||
if (r2 < 0)
|
||||
r = r2;
|
||||
unlock_block(s->info, s->nodes[i]);
|
||||
}
|
||||
|
||||
return r;
|
||||
@ -150,9 +148,7 @@ int ro_step(struct ro_spine *s, dm_block_t new_child)
|
||||
int r;
|
||||
|
||||
if (s->count == 2) {
|
||||
r = unlock_block(s->info, s->nodes[0]);
|
||||
if (r < 0)
|
||||
return r;
|
||||
unlock_block(s->info, s->nodes[0]);
|
||||
s->nodes[0] = s->nodes[1];
|
||||
s->count--;
|
||||
}
|
||||
@ -194,9 +190,7 @@ int exit_shadow_spine(struct shadow_spine *s)
|
||||
int r = 0, i;
|
||||
|
||||
for (i = 0; i < s->count; i++) {
|
||||
int r2 = unlock_block(s->info, s->nodes[i]);
|
||||
if (r2 < 0)
|
||||
r = r2;
|
||||
unlock_block(s->info, s->nodes[i]);
|
||||
}
|
||||
|
||||
return r;
|
||||
@ -208,9 +202,7 @@ int shadow_step(struct shadow_spine *s, dm_block_t b,
|
||||
int r;
|
||||
|
||||
if (s->count == 2) {
|
||||
r = unlock_block(s->info, s->nodes[0]);
|
||||
if (r < 0)
|
||||
return r;
|
||||
unlock_block(s->info, s->nodes[0]);
|
||||
s->nodes[0] = s->nodes[1];
|
||||
s->count--;
|
||||
}
|
||||
|
@ -141,7 +141,9 @@ int dm_btree_empty(struct dm_btree_info *info, dm_block_t *root)
|
||||
n->header.value_size = cpu_to_le32(info->value_type.size);
|
||||
|
||||
*root = dm_block_location(b);
|
||||
return unlock_block(info, b);
|
||||
unlock_block(info, b);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_btree_empty);
|
||||
|
||||
|
@ -259,9 +259,7 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
|
||||
|
||||
idx.blocknr = cpu_to_le64(dm_block_location(b));
|
||||
|
||||
r = dm_tm_unlock(ll->tm, b);
|
||||
if (r < 0)
|
||||
return r;
|
||||
dm_tm_unlock(ll->tm, b);
|
||||
|
||||
idx.nr_free = cpu_to_le32(ll->entries_per_block);
|
||||
idx.none_free_before = 0;
|
||||
@ -293,7 +291,9 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result)
|
||||
|
||||
*result = sm_lookup_bitmap(dm_bitmap_data(blk), b);
|
||||
|
||||
return dm_tm_unlock(ll->tm, blk);
|
||||
dm_tm_unlock(ll->tm, blk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sm_ll_lookup_big_ref_count(struct ll_disk *ll, dm_block_t b,
|
||||
@ -373,9 +373,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
|
||||
return r;
|
||||
}
|
||||
|
||||
r = dm_tm_unlock(ll->tm, blk);
|
||||
if (r < 0)
|
||||
return r;
|
||||
dm_tm_unlock(ll->tm, blk);
|
||||
|
||||
*result = i * ll->entries_per_block + (dm_block_t) position;
|
||||
return 0;
|
||||
@ -429,9 +427,7 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
|
||||
if (ref_count <= 2) {
|
||||
sm_set_bitmap(bm_le, bit, ref_count);
|
||||
|
||||
r = dm_tm_unlock(ll->tm, nb);
|
||||
if (r < 0)
|
||||
return r;
|
||||
dm_tm_unlock(ll->tm, nb);
|
||||
|
||||
if (old > 2) {
|
||||
r = dm_btree_remove(&ll->ref_count_info,
|
||||
@ -445,9 +441,7 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
|
||||
__le32 le_rc = cpu_to_le32(ref_count);
|
||||
|
||||
sm_set_bitmap(bm_le, bit, 3);
|
||||
r = dm_tm_unlock(ll->tm, nb);
|
||||
if (r < 0)
|
||||
return r;
|
||||
dm_tm_unlock(ll->tm, nb);
|
||||
|
||||
__dm_bless_for_disk(&le_rc);
|
||||
r = dm_btree_insert(&ll->ref_count_info, ll->ref_count_root,
|
||||
@ -556,7 +550,9 @@ static int metadata_ll_init_index(struct ll_disk *ll)
|
||||
memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
|
||||
ll->bitmap_root = dm_block_location(b);
|
||||
|
||||
return dm_tm_unlock(ll->tm, b);
|
||||
dm_tm_unlock(ll->tm, b);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int metadata_ll_open(struct ll_disk *ll)
|
||||
@ -570,7 +566,9 @@ static int metadata_ll_open(struct ll_disk *ll)
|
||||
return r;
|
||||
|
||||
memcpy(&ll->mi_le, dm_block_data(block), sizeof(ll->mi_le));
|
||||
return dm_tm_unlock(ll->tm, block);
|
||||
dm_tm_unlock(ll->tm, block);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static dm_block_t metadata_ll_max_entries(struct ll_disk *ll)
|
||||
@ -590,7 +588,9 @@ static int metadata_ll_commit(struct ll_disk *ll)
|
||||
memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
|
||||
ll->bitmap_root = dm_block_location(b);
|
||||
|
||||
return dm_tm_unlock(ll->tm, b);
|
||||
dm_tm_unlock(ll->tm, b);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sm_ll_new_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm)
|
||||
|
@ -342,9 +342,9 @@ int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_tm_read_lock);
|
||||
|
||||
int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
|
||||
void dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
|
||||
{
|
||||
return dm_bm_unlock(b);
|
||||
dm_bm_unlock(b);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_tm_unlock);
|
||||
|
||||
|
@ -94,7 +94,7 @@ int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
|
||||
struct dm_block_validator *v,
|
||||
struct dm_block **result);
|
||||
|
||||
int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b);
|
||||
void dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b);
|
||||
|
||||
/*
|
||||
* Functions for altering the reference count of a block directly.
|
||||
|
@ -79,8 +79,8 @@ typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
|
||||
|
||||
typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
|
||||
|
||||
typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti,
|
||||
struct block_device **bdev, fmode_t *mode);
|
||||
|
||||
/*
|
||||
* These iteration functions are typically used to check (and combine)
|
||||
@ -156,7 +156,7 @@ struct target_type {
|
||||
dm_resume_fn resume;
|
||||
dm_status_fn status;
|
||||
dm_message_fn message;
|
||||
dm_ioctl_fn ioctl;
|
||||
dm_prepare_ioctl_fn prepare_ioctl;
|
||||
dm_busy_fn busy;
|
||||
dm_iterate_devices_fn iterate_devices;
|
||||
dm_io_hints_fn io_hints;
|
||||
|
@ -267,9 +267,9 @@ enum {
|
||||
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
|
||||
|
||||
#define DM_VERSION_MAJOR 4
|
||||
#define DM_VERSION_MINOR 33
|
||||
#define DM_VERSION_MINOR 34
|
||||
#define DM_VERSION_PATCHLEVEL 0
|
||||
#define DM_VERSION_EXTRA "-ioctl (2015-8-18)"
|
||||
#define DM_VERSION_EXTRA "-ioctl (2015-10-28)"
|
||||
|
||||
/* Status bits */
|
||||
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
|
||||
|
Loading…
Reference in New Issue
Block a user