mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-28 23:23:55 +08:00
dm table: audit all dm_table_get_target() callers
All callers of dm_table_get_target() are expected to do proper bounds checking on the index they pass. Move dm_table_get_target() to dm-core.h to make it extra clear that only DM core code should be using it. Switch it to be inlined while at it. Standardize all DM core callers to use the same for loop pattern and make associated variables as local as possible. Rename some variables (e.g. s/table/t/ and s/tgt/ti/) along the way. Signed-off-by: Mike Snitzer <snitzer@kernel.org>
This commit is contained in:
parent
2aec377a29
commit
564b5c5476
@ -226,6 +226,13 @@ struct dm_table {
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline struct dm_target *dm_table_get_target(struct dm_table *t,
|
||||
unsigned int index)
|
||||
{
|
||||
BUG_ON(index >= t->num_targets);
|
||||
return t->targets + index;
|
||||
}
|
||||
|
||||
/*
|
||||
* One of these is allocated per clone bio.
|
||||
*/
|
||||
|
@ -237,9 +237,6 @@ void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_fl
|
||||
for (i = 0; i < num_targets; i++) {
|
||||
struct dm_target *ti = dm_table_get_target(table, i);
|
||||
|
||||
if (!ti)
|
||||
goto error;
|
||||
|
||||
last_target_measured = 0;
|
||||
|
||||
/*
|
||||
|
@ -175,8 +175,6 @@ static void dm_table_destroy_crypto_profile(struct dm_table *t);
|
||||
|
||||
void dm_table_destroy(struct dm_table *t)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (!t)
|
||||
return;
|
||||
|
||||
@ -185,13 +183,13 @@ void dm_table_destroy(struct dm_table *t)
|
||||
kvfree(t->index[t->depth - 2]);
|
||||
|
||||
/* free the targets */
|
||||
for (i = 0; i < t->num_targets; i++) {
|
||||
struct dm_target *tgt = t->targets + i;
|
||||
for (unsigned int i = 0; i < t->num_targets; i++) {
|
||||
struct dm_target *ti = dm_table_get_target(t, i);
|
||||
|
||||
if (tgt->type->dtr)
|
||||
tgt->type->dtr(tgt);
|
||||
if (ti->type->dtr)
|
||||
ti->type->dtr(ti);
|
||||
|
||||
dm_put_target_type(tgt->type);
|
||||
dm_put_target_type(ti->type);
|
||||
}
|
||||
|
||||
kvfree(t->highs);
|
||||
@ -451,14 +449,14 @@ EXPORT_SYMBOL(dm_put_device);
|
||||
/*
|
||||
* Checks to see if the target joins onto the end of the table.
|
||||
*/
|
||||
static int adjoin(struct dm_table *table, struct dm_target *ti)
|
||||
static int adjoin(struct dm_table *t, struct dm_target *ti)
|
||||
{
|
||||
struct dm_target *prev;
|
||||
|
||||
if (!table->num_targets)
|
||||
if (!t->num_targets)
|
||||
return !ti->begin;
|
||||
|
||||
prev = &table->targets[table->num_targets - 1];
|
||||
prev = &t->targets[t->num_targets - 1];
|
||||
return (ti->begin == (prev->begin + prev->len));
|
||||
}
|
||||
|
||||
@ -565,8 +563,8 @@ int dm_split_args(int *argc, char ***argvp, char *input)
|
||||
* two or more targets, the size of each piece it gets split into must
|
||||
* be compatible with the logical_block_size of the target processing it.
|
||||
*/
|
||||
static int validate_hardware_logical_block_alignment(struct dm_table *table,
|
||||
struct queue_limits *limits)
|
||||
static int validate_hardware_logical_block_alignment(struct dm_table *t,
|
||||
struct queue_limits *limits)
|
||||
{
|
||||
/*
|
||||
* This function uses arithmetic modulo the logical_block_size
|
||||
@ -588,13 +586,13 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table,
|
||||
|
||||
struct dm_target *ti;
|
||||
struct queue_limits ti_limits;
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
/*
|
||||
* Check each entry in the table in turn.
|
||||
*/
|
||||
for (i = 0; i < table->num_targets; i++) {
|
||||
ti = dm_table_get_target(table, i);
|
||||
for (i = 0; i < t->num_targets; i++) {
|
||||
ti = dm_table_get_target(t, i);
|
||||
|
||||
blk_set_stacking_limits(&ti_limits);
|
||||
|
||||
@ -622,7 +620,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table,
|
||||
if (remaining) {
|
||||
DMWARN("%s: table line %u (start sect %llu len %llu) "
|
||||
"not aligned to h/w logical block size %u",
|
||||
dm_device_name(table->md), i,
|
||||
dm_device_name(t->md), i,
|
||||
(unsigned long long) ti->begin,
|
||||
(unsigned long long) ti->len,
|
||||
limits->logical_block_size);
|
||||
@ -826,14 +824,11 @@ static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_de
|
||||
}
|
||||
|
||||
static bool dm_table_supports_dax(struct dm_table *t,
|
||||
iterate_devices_callout_fn iterate_fn)
|
||||
iterate_devices_callout_fn iterate_fn)
|
||||
{
|
||||
struct dm_target *ti;
|
||||
unsigned i;
|
||||
|
||||
/* Ensure that all targets support DAX. */
|
||||
for (i = 0; i < t->num_targets; i++) {
|
||||
ti = dm_table_get_target(t, i);
|
||||
for (unsigned int i = 0; i < t->num_targets; i++) {
|
||||
struct dm_target *ti = dm_table_get_target(t, i);
|
||||
|
||||
if (!ti->type->direct_access)
|
||||
return false;
|
||||
@ -861,9 +856,8 @@ static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
|
||||
|
||||
static int dm_table_determine_type(struct dm_table *t)
|
||||
{
|
||||
unsigned i;
|
||||
unsigned bio_based = 0, request_based = 0, hybrid = 0;
|
||||
struct dm_target *tgt;
|
||||
struct dm_target *ti;
|
||||
struct list_head *devices = dm_table_get_devices(t);
|
||||
enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
|
||||
|
||||
@ -877,11 +871,11 @@ static int dm_table_determine_type(struct dm_table *t)
|
||||
goto verify_rq_based;
|
||||
}
|
||||
|
||||
for (i = 0; i < t->num_targets; i++) {
|
||||
tgt = t->targets + i;
|
||||
if (dm_target_hybrid(tgt))
|
||||
for (unsigned int i = 0; i < t->num_targets; i++) {
|
||||
ti = dm_table_get_target(t, i);
|
||||
if (dm_target_hybrid(ti))
|
||||
hybrid = 1;
|
||||
else if (dm_target_request_based(tgt))
|
||||
else if (dm_target_request_based(ti))
|
||||
request_based = 1;
|
||||
else
|
||||
bio_based = 1;
|
||||
@ -943,18 +937,18 @@ verify_rq_based:
|
||||
return 0;
|
||||
}
|
||||
|
||||
tgt = dm_table_get_immutable_target(t);
|
||||
if (!tgt) {
|
||||
ti = dm_table_get_immutable_target(t);
|
||||
if (!ti) {
|
||||
DMERR("table load rejected: immutable target is required");
|
||||
return -EINVAL;
|
||||
} else if (tgt->max_io_len) {
|
||||
} else if (ti->max_io_len) {
|
||||
DMERR("table load rejected: immutable target that splits IO is not supported");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Non-request-stackable devices can't be used for request-based dm */
|
||||
if (!tgt->type->iterate_devices ||
|
||||
!tgt->type->iterate_devices(tgt, device_is_rq_stackable, NULL)) {
|
||||
if (!ti->type->iterate_devices ||
|
||||
!ti->type->iterate_devices(ti, device_is_rq_stackable, NULL)) {
|
||||
DMERR("table load rejected: including non-request-stackable devices");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -984,11 +978,9 @@ struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
|
||||
|
||||
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
|
||||
{
|
||||
struct dm_target *ti;
|
||||
unsigned i;
|
||||
for (unsigned int i = 0; i < t->num_targets; i++) {
|
||||
struct dm_target *ti = dm_table_get_target(t, i);
|
||||
|
||||
for (i = 0; i < t->num_targets; i++) {
|
||||
ti = dm_table_get_target(t, i);
|
||||
if (dm_target_is_wildcard(ti->type))
|
||||
return ti;
|
||||
}
|
||||
@ -1031,7 +1023,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
|
||||
}
|
||||
|
||||
for (unsigned int i = 0; i < t->num_targets; i++) {
|
||||
struct dm_target *ti = t->targets + i;
|
||||
struct dm_target *ti = dm_table_get_target(t, i);
|
||||
|
||||
per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
|
||||
min_pool_size = max(min_pool_size, ti->num_flush_bios);
|
||||
@ -1125,10 +1117,10 @@ static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
|
||||
struct list_head *devices = dm_table_get_devices(t);
|
||||
struct dm_dev_internal *dd = NULL;
|
||||
struct gendisk *prev_disk = NULL, *template_disk = NULL;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < t->num_targets; i++) {
|
||||
for (unsigned int i = 0; i < t->num_targets; i++) {
|
||||
struct dm_target *ti = dm_table_get_target(t, i);
|
||||
|
||||
if (!dm_target_passes_integrity(ti->type))
|
||||
goto no_integrity;
|
||||
}
|
||||
@ -1242,18 +1234,19 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile,
|
||||
struct dm_keyslot_evict_args args = { key };
|
||||
struct dm_table *t;
|
||||
int srcu_idx;
|
||||
int i;
|
||||
struct dm_target *ti;
|
||||
|
||||
t = dm_get_live_table(md, &srcu_idx);
|
||||
if (!t)
|
||||
return 0;
|
||||
for (i = 0; i < t->num_targets; i++) {
|
||||
ti = dm_table_get_target(t, i);
|
||||
|
||||
for (unsigned int i = 0; i < t->num_targets; i++) {
|
||||
struct dm_target *ti = dm_table_get_target(t, i);
|
||||
|
||||
if (!ti->type->iterate_devices)
|
||||
continue;
|
||||
ti->type->iterate_devices(ti, dm_keyslot_evict_callback, &args);
|
||||
}
|
||||
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
return args.err;
|
||||
}
|
||||
@ -1302,7 +1295,6 @@ static int dm_table_construct_crypto_profile(struct dm_table *t)
|
||||
{
|
||||
struct dm_crypto_profile *dmcp;
|
||||
struct blk_crypto_profile *profile;
|
||||
struct dm_target *ti;
|
||||
unsigned int i;
|
||||
bool empty_profile = true;
|
||||
|
||||
@ -1319,7 +1311,7 @@ static int dm_table_construct_crypto_profile(struct dm_table *t)
|
||||
sizeof(profile->modes_supported));
|
||||
|
||||
for (i = 0; i < t->num_targets; i++) {
|
||||
ti = dm_table_get_target(t, i);
|
||||
struct dm_target *ti = dm_table_get_target(t, i);
|
||||
|
||||
if (!dm_target_passes_crypto(ti->type)) {
|
||||
blk_crypto_intersect_capabilities(profile, NULL);
|
||||
@ -1469,14 +1461,6 @@ inline sector_t dm_table_get_size(struct dm_table *t)
|
||||
}
|
||||
EXPORT_SYMBOL(dm_table_get_size);
|
||||
|
||||
struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
|
||||
{
|
||||
if (index >= t->num_targets)
|
||||
return NULL;
|
||||
|
||||
return t->targets + index;
|
||||
}
|
||||
|
||||
/*
|
||||
* Search the btree for the correct target.
|
||||
*
|
||||
@ -1537,11 +1521,8 @@ static int device_not_poll_capable(struct dm_target *ti, struct dm_dev *dev,
|
||||
static bool dm_table_any_dev_attr(struct dm_table *t,
|
||||
iterate_devices_callout_fn func, void *data)
|
||||
{
|
||||
struct dm_target *ti;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < t->num_targets; i++) {
|
||||
ti = dm_table_get_target(t, i);
|
||||
for (unsigned int i = 0; i < t->num_targets; i++) {
|
||||
struct dm_target *ti = dm_table_get_target(t, i);
|
||||
|
||||
if (ti->type->iterate_devices &&
|
||||
ti->type->iterate_devices(ti, func, data))
|
||||
@ -1563,11 +1544,8 @@ static int count_device(struct dm_target *ti, struct dm_dev *dev,
|
||||
|
||||
static bool dm_table_supports_poll(struct dm_table *t)
|
||||
{
|
||||
struct dm_target *ti;
|
||||
unsigned i = 0;
|
||||
|
||||
while (i < t->num_targets) {
|
||||
ti = dm_table_get_target(t, i++);
|
||||
for (unsigned int i = 0; i < t->num_targets; i++) {
|
||||
struct dm_target *ti = dm_table_get_target(t, i);
|
||||
|
||||
if (!ti->type->iterate_devices ||
|
||||
ti->type->iterate_devices(ti, device_not_poll_capable, NULL))
|
||||
@ -1583,18 +1561,15 @@ static bool dm_table_supports_poll(struct dm_table *t)
|
||||
* Returns false if the result is unknown because a target doesn't
|
||||
* support iterate_devices.
|
||||
*/
|
||||
bool dm_table_has_no_data_devices(struct dm_table *table)
|
||||
bool dm_table_has_no_data_devices(struct dm_table *t)
|
||||
{
|
||||
struct dm_target *ti;
|
||||
unsigned i, num_devices;
|
||||
|
||||
for (i = 0; i < table->num_targets; i++) {
|
||||
ti = dm_table_get_target(table, i);
|
||||
for (unsigned int i = 0; i < t->num_targets; i++) {
|
||||
struct dm_target *ti = dm_table_get_target(t, i);
|
||||
unsigned num_devices = 0;
|
||||
|
||||
if (!ti->type->iterate_devices)
|
||||
return false;
|
||||
|
||||
num_devices = 0;
|
||||
ti->type->iterate_devices(ti, count_device, &num_devices);
|
||||
if (num_devices)
|
||||
return false;
|
||||
@ -1622,11 +1597,8 @@ static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
|
||||
static bool dm_table_supports_zoned_model(struct dm_table *t,
|
||||
enum blk_zoned_model zoned_model)
|
||||
{
|
||||
struct dm_target *ti;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < t->num_targets; i++) {
|
||||
ti = dm_table_get_target(t, i);
|
||||
for (unsigned int i = 0; i < t->num_targets; i++) {
|
||||
struct dm_target *ti = dm_table_get_target(t, i);
|
||||
|
||||
if (dm_target_supports_zoned_hm(ti->type)) {
|
||||
if (!ti->type->iterate_devices ||
|
||||
@ -1659,16 +1631,16 @@ static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *
|
||||
* zone sectors, if the destination device is a zoned block device, it shall
|
||||
* have the specified zone_sectors.
|
||||
*/
|
||||
static int validate_hardware_zoned_model(struct dm_table *table,
|
||||
static int validate_hardware_zoned_model(struct dm_table *t,
|
||||
enum blk_zoned_model zoned_model,
|
||||
unsigned int zone_sectors)
|
||||
{
|
||||
if (zoned_model == BLK_ZONED_NONE)
|
||||
return 0;
|
||||
|
||||
if (!dm_table_supports_zoned_model(table, zoned_model)) {
|
||||
if (!dm_table_supports_zoned_model(t, zoned_model)) {
|
||||
DMERR("%s: zoned model is not consistent across all devices",
|
||||
dm_device_name(table->md));
|
||||
dm_device_name(t->md));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1676,9 +1648,9 @@ static int validate_hardware_zoned_model(struct dm_table *table,
|
||||
if (!zone_sectors || !is_power_of_2(zone_sectors))
|
||||
return -EINVAL;
|
||||
|
||||
if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) {
|
||||
if (dm_table_any_dev_attr(t, device_not_matches_zone_sectors, &zone_sectors)) {
|
||||
DMERR("%s: zone sectors is not consistent across all zoned devices",
|
||||
dm_device_name(table->md));
|
||||
dm_device_name(t->md));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1688,21 +1660,19 @@ static int validate_hardware_zoned_model(struct dm_table *table,
|
||||
/*
|
||||
* Establish the new table's queue_limits and validate them.
|
||||
*/
|
||||
int dm_calculate_queue_limits(struct dm_table *table,
|
||||
int dm_calculate_queue_limits(struct dm_table *t,
|
||||
struct queue_limits *limits)
|
||||
{
|
||||
struct dm_target *ti;
|
||||
struct queue_limits ti_limits;
|
||||
unsigned i;
|
||||
enum blk_zoned_model zoned_model = BLK_ZONED_NONE;
|
||||
unsigned int zone_sectors = 0;
|
||||
|
||||
blk_set_stacking_limits(limits);
|
||||
|
||||
for (i = 0; i < table->num_targets; i++) {
|
||||
blk_set_stacking_limits(&ti_limits);
|
||||
for (unsigned int i = 0; i < t->num_targets; i++) {
|
||||
struct dm_target *ti = dm_table_get_target(t, i);
|
||||
|
||||
ti = dm_table_get_target(table, i);
|
||||
blk_set_stacking_limits(&ti_limits);
|
||||
|
||||
if (!ti->type->iterate_devices)
|
||||
goto combine_limits;
|
||||
@ -1743,7 +1713,7 @@ combine_limits:
|
||||
DMWARN("%s: adding target device "
|
||||
"(start sect %llu len %llu) "
|
||||
"caused an alignment inconsistency",
|
||||
dm_device_name(table->md),
|
||||
dm_device_name(t->md),
|
||||
(unsigned long long) ti->begin,
|
||||
(unsigned long long) ti->len);
|
||||
}
|
||||
@ -1763,10 +1733,10 @@ combine_limits:
|
||||
zoned_model = limits->zoned;
|
||||
zone_sectors = limits->chunk_sectors;
|
||||
}
|
||||
if (validate_hardware_zoned_model(table, zoned_model, zone_sectors))
|
||||
if (validate_hardware_zoned_model(t, zoned_model, zone_sectors))
|
||||
return -EINVAL;
|
||||
|
||||
return validate_hardware_logical_block_alignment(table, limits);
|
||||
return validate_hardware_logical_block_alignment(t, limits);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1810,17 +1780,14 @@ static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
|
||||
|
||||
static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
|
||||
{
|
||||
struct dm_target *ti;
|
||||
unsigned i;
|
||||
|
||||
/*
|
||||
* Require at least one underlying device to support flushes.
|
||||
* t->devices includes internal dm devices such as mirror logs
|
||||
* so we need to use iterate_devices here, which targets
|
||||
* supporting flushes must provide.
|
||||
*/
|
||||
for (i = 0; i < t->num_targets; i++) {
|
||||
ti = dm_table_get_target(t, i);
|
||||
for (unsigned int i = 0; i < t->num_targets; i++) {
|
||||
struct dm_target *ti = dm_table_get_target(t, i);
|
||||
|
||||
if (!ti->num_flush_bios)
|
||||
continue;
|
||||
@ -1874,11 +1841,8 @@ static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *
|
||||
|
||||
static bool dm_table_supports_write_zeroes(struct dm_table *t)
|
||||
{
|
||||
struct dm_target *ti;
|
||||
unsigned i = 0;
|
||||
|
||||
while (i < t->num_targets) {
|
||||
ti = dm_table_get_target(t, i++);
|
||||
for (unsigned int i = 0; i < t->num_targets; i++) {
|
||||
struct dm_target *ti = dm_table_get_target(t, i);
|
||||
|
||||
if (!ti->num_write_zeroes_bios)
|
||||
return false;
|
||||
@ -1901,11 +1865,8 @@ static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
|
||||
|
||||
static bool dm_table_supports_nowait(struct dm_table *t)
|
||||
{
|
||||
struct dm_target *ti;
|
||||
unsigned i = 0;
|
||||
|
||||
while (i < t->num_targets) {
|
||||
ti = dm_table_get_target(t, i++);
|
||||
for (unsigned int i = 0; i < t->num_targets; i++) {
|
||||
struct dm_target *ti = dm_table_get_target(t, i);
|
||||
|
||||
if (!dm_target_supports_nowait(ti->type))
|
||||
return false;
|
||||
@ -1926,11 +1887,8 @@ static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
|
||||
|
||||
static bool dm_table_supports_discards(struct dm_table *t)
|
||||
{
|
||||
struct dm_target *ti;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < t->num_targets; i++) {
|
||||
ti = dm_table_get_target(t, i);
|
||||
for (unsigned int i = 0; i < t->num_targets; i++) {
|
||||
struct dm_target *ti = dm_table_get_target(t, i);
|
||||
|
||||
if (!ti->num_discard_bios)
|
||||
return false;
|
||||
@ -1958,11 +1916,8 @@ static int device_not_secure_erase_capable(struct dm_target *ti,
|
||||
|
||||
static bool dm_table_supports_secure_erase(struct dm_table *t)
|
||||
{
|
||||
struct dm_target *ti;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < t->num_targets; i++) {
|
||||
ti = dm_table_get_target(t, i);
|
||||
for (unsigned int i = 0; i < t->num_targets; i++) {
|
||||
struct dm_target *ti = dm_table_get_target(t, i);
|
||||
|
||||
if (!ti->num_secure_erase_bios)
|
||||
return false;
|
||||
@ -2111,12 +2066,11 @@ enum suspend_mode {
|
||||
|
||||
static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
|
||||
{
|
||||
int i = t->num_targets;
|
||||
struct dm_target *ti = t->targets;
|
||||
|
||||
lockdep_assert_held(&t->md->suspend_lock);
|
||||
|
||||
while (i--) {
|
||||
for (unsigned int i = 0; i < t->num_targets; i++) {
|
||||
struct dm_target *ti = dm_table_get_target(t, i);
|
||||
|
||||
switch (mode) {
|
||||
case PRESUSPEND:
|
||||
if (ti->type->presuspend)
|
||||
@ -2131,7 +2085,6 @@ static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
|
||||
ti->type->postsuspend(ti);
|
||||
break;
|
||||
}
|
||||
ti++;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2161,12 +2114,13 @@ void dm_table_postsuspend_targets(struct dm_table *t)
|
||||
|
||||
int dm_table_resume_targets(struct dm_table *t)
|
||||
{
|
||||
int i, r = 0;
|
||||
unsigned int i;
|
||||
int r = 0;
|
||||
|
||||
lockdep_assert_held(&t->md->suspend_lock);
|
||||
|
||||
for (i = 0; i < t->num_targets; i++) {
|
||||
struct dm_target *ti = t->targets + i;
|
||||
struct dm_target *ti = dm_table_get_target(t, i);
|
||||
|
||||
if (!ti->type->preresume)
|
||||
continue;
|
||||
@ -2180,7 +2134,7 @@ int dm_table_resume_targets(struct dm_table *t)
|
||||
}
|
||||
|
||||
for (i = 0; i < t->num_targets; i++) {
|
||||
struct dm_target *ti = t->targets + i;
|
||||
struct dm_target *ti = dm_table_get_target(t, i);
|
||||
|
||||
if (ti->type->resume)
|
||||
ti->type->resume(ti);
|
||||
|
@ -275,11 +275,8 @@ static int device_not_zone_append_capable(struct dm_target *ti,
|
||||
|
||||
static bool dm_table_supports_zone_append(struct dm_table *t)
|
||||
{
|
||||
struct dm_target *ti;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < t->num_targets; i++) {
|
||||
ti = dm_table_get_target(t, i);
|
||||
for (unsigned int i = 0; i < t->num_targets; i++) {
|
||||
struct dm_target *ti = dm_table_get_target(t, i);
|
||||
|
||||
if (ti->emulate_zone_append)
|
||||
return false;
|
||||
|
@ -411,7 +411,7 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
||||
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
|
||||
struct block_device **bdev)
|
||||
{
|
||||
struct dm_target *tgt;
|
||||
struct dm_target *ti;
|
||||
struct dm_table *map;
|
||||
int r;
|
||||
|
||||
@ -425,14 +425,14 @@ retry:
|
||||
if (map->num_targets != 1)
|
||||
return r;
|
||||
|
||||
tgt = dm_table_get_target(map, 0);
|
||||
if (!tgt->type->prepare_ioctl)
|
||||
ti = dm_table_get_target(map, 0);
|
||||
if (!ti->type->prepare_ioctl)
|
||||
return r;
|
||||
|
||||
if (dm_suspended_md(md))
|
||||
return -EAGAIN;
|
||||
|
||||
r = tgt->type->prepare_ioctl(tgt, bdev);
|
||||
r = ti->type->prepare_ioctl(ti, bdev);
|
||||
if (r == -ENOTCONN && !fatal_signal_pending(current)) {
|
||||
dm_put_live_table(md, *srcu_idx);
|
||||
msleep(10);
|
||||
@ -1506,11 +1506,11 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
|
||||
}
|
||||
|
||||
static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
|
||||
unsigned num_bios, unsigned *len)
|
||||
unsigned int num_bios, unsigned *len)
|
||||
{
|
||||
struct bio_list blist = BIO_EMPTY_LIST;
|
||||
struct bio *clone;
|
||||
int ret = 0;
|
||||
unsigned int ret = 0;
|
||||
|
||||
switch (num_bios) {
|
||||
case 0:
|
||||
@ -1538,8 +1538,7 @@ static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
|
||||
|
||||
static void __send_empty_flush(struct clone_info *ci)
|
||||
{
|
||||
unsigned target_nr = 0;
|
||||
struct dm_target *ti;
|
||||
struct dm_table *t = ci->map;
|
||||
struct bio flush_bio;
|
||||
|
||||
/*
|
||||
@ -1554,8 +1553,9 @@ static void __send_empty_flush(struct clone_info *ci)
|
||||
ci->sector_count = 0;
|
||||
ci->io->tio.clone.bi_iter.bi_size = 0;
|
||||
|
||||
while ((ti = dm_table_get_target(ci->map, target_nr++))) {
|
||||
int bios;
|
||||
for (unsigned int i = 0; i < t->num_targets; i++) {
|
||||
unsigned int bios;
|
||||
struct dm_target *ti = dm_table_get_target(t, i);
|
||||
|
||||
atomic_add(ti->num_flush_bios, &ci->io->io_count);
|
||||
bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
|
||||
@ -1575,7 +1575,7 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target
|
||||
unsigned num_bios)
|
||||
{
|
||||
unsigned len;
|
||||
int bios;
|
||||
unsigned int bios;
|
||||
|
||||
len = min_t(sector_t, ci->sector_count,
|
||||
max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
|
||||
|
@ -53,7 +53,6 @@ struct dm_io;
|
||||
*---------------------------------------------------------------*/
|
||||
void dm_table_event_callback(struct dm_table *t,
|
||||
void (*fn)(void *), void *context);
|
||||
struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
|
||||
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
|
||||
bool dm_table_has_no_data_devices(struct dm_table *table);
|
||||
int dm_calculate_queue_limits(struct dm_table *table,
|
||||
|
Loading…
Reference in New Issue
Block a user