dm table: remove dm_table_get_num_targets() wrapper

More efficient and readable to just access table->num_targets directly.

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
This commit is contained in:
Mike Snitzer 2022-07-05 14:00:36 -04:00
parent 8b211aaccb
commit 2aec377a29
6 changed files with 23 additions and 29 deletions

View File

@ -208,7 +208,7 @@ void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_fl
if (!target_data_buf)
goto error;
num_targets = dm_table_get_num_targets(table);
num_targets = table->num_targets;
if (dm_ima_alloc_and_copy_device_data(table->md, &device_data_buf, num_targets, noio))
goto error;

View File

@ -832,7 +832,7 @@ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param)
if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) {
if (get_disk_ro(disk))
param->flags |= DM_READONLY_FLAG;
param->target_count = dm_table_get_num_targets(table);
param->target_count = table->num_targets;
}
param->flags |= DM_ACTIVE_PRESENT_FLAG;
@ -845,7 +845,7 @@ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param)
if (table) {
if (!(dm_table_get_mode(table) & FMODE_WRITE))
param->flags |= DM_READONLY_FLAG;
param->target_count = dm_table_get_num_targets(table);
param->target_count = table->num_targets;
}
dm_put_live_table(md, srcu_idx);
}
@ -1248,7 +1248,7 @@ static void retrieve_status(struct dm_table *table,
type = STATUSTYPE_INFO;
/* Get all the target info */
num_targets = dm_table_get_num_targets(table);
num_targets = table->num_targets;
for (i = 0; i < num_targets; i++) {
struct dm_target *ti = dm_table_get_target(table, i);
size_t l;

View File

@ -593,7 +593,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table,
/*
* Check each entry in the table in turn.
*/
for (i = 0; i < dm_table_get_num_targets(table); i++) {
for (i = 0; i < table->num_targets; i++) {
ti = dm_table_get_target(table, i);
blk_set_stacking_limits(&ti_limits);
@ -832,7 +832,7 @@ static bool dm_table_supports_dax(struct dm_table *t,
unsigned i;
/* Ensure that all targets support DAX. */
for (i = 0; i < dm_table_get_num_targets(t); i++) {
for (i = 0; i < t->num_targets; i++) {
ti = dm_table_get_target(t, i);
if (!ti->type->direct_access)
@ -987,7 +987,7 @@ struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
struct dm_target *ti;
unsigned i;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
for (i = 0; i < t->num_targets; i++) {
ti = dm_table_get_target(t, i);
if (dm_target_is_wildcard(ti->type))
return ti;
@ -1127,7 +1127,7 @@ static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
struct gendisk *prev_disk = NULL, *template_disk = NULL;
unsigned i;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
for (i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
if (!dm_target_passes_integrity(ti->type))
goto no_integrity;
@ -1248,7 +1248,7 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile,
t = dm_get_live_table(md, &srcu_idx);
if (!t)
return 0;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
for (i = 0; i < t->num_targets; i++) {
ti = dm_table_get_target(t, i);
if (!ti->type->iterate_devices)
continue;
@ -1318,7 +1318,7 @@ static int dm_table_construct_crypto_profile(struct dm_table *t)
memset(profile->modes_supported, 0xFF,
sizeof(profile->modes_supported));
for (i = 0; i < dm_table_get_num_targets(t); i++) {
for (i = 0; i < t->num_targets; i++) {
ti = dm_table_get_target(t, i);
if (!dm_target_passes_crypto(ti->type)) {
@ -1540,7 +1540,7 @@ static bool dm_table_any_dev_attr(struct dm_table *t,
struct dm_target *ti;
unsigned int i;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
for (i = 0; i < t->num_targets; i++) {
ti = dm_table_get_target(t, i);
if (ti->type->iterate_devices &&
@ -1566,7 +1566,7 @@ static bool dm_table_supports_poll(struct dm_table *t)
struct dm_target *ti;
unsigned i = 0;
while (i < dm_table_get_num_targets(t)) {
while (i < t->num_targets) {
ti = dm_table_get_target(t, i++);
if (!ti->type->iterate_devices ||
@ -1588,7 +1588,7 @@ bool dm_table_has_no_data_devices(struct dm_table *table)
struct dm_target *ti;
unsigned i, num_devices;
for (i = 0; i < dm_table_get_num_targets(table); i++) {
for (i = 0; i < table->num_targets; i++) {
ti = dm_table_get_target(table, i);
if (!ti->type->iterate_devices)
@ -1625,7 +1625,7 @@ static bool dm_table_supports_zoned_model(struct dm_table *t,
struct dm_target *ti;
unsigned i;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
for (i = 0; i < t->num_targets; i++) {
ti = dm_table_get_target(t, i);
if (dm_target_supports_zoned_hm(ti->type)) {
@ -1699,7 +1699,7 @@ int dm_calculate_queue_limits(struct dm_table *table,
blk_set_stacking_limits(limits);
for (i = 0; i < dm_table_get_num_targets(table); i++) {
for (i = 0; i < table->num_targets; i++) {
blk_set_stacking_limits(&ti_limits);
ti = dm_table_get_target(table, i);
@ -1819,7 +1819,7 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
* so we need to use iterate_devices here, which targets
* supporting flushes must provide.
*/
for (i = 0; i < dm_table_get_num_targets(t); i++) {
for (i = 0; i < t->num_targets; i++) {
ti = dm_table_get_target(t, i);
if (!ti->num_flush_bios)
@ -1877,7 +1877,7 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t)
struct dm_target *ti;
unsigned i = 0;
while (i < dm_table_get_num_targets(t)) {
while (i < t->num_targets) {
ti = dm_table_get_target(t, i++);
if (!ti->num_write_zeroes_bios)
@ -1904,7 +1904,7 @@ static bool dm_table_supports_nowait(struct dm_table *t)
struct dm_target *ti;
unsigned i = 0;
while (i < dm_table_get_num_targets(t)) {
while (i < t->num_targets) {
ti = dm_table_get_target(t, i++);
if (!dm_target_supports_nowait(ti->type))
@ -1929,7 +1929,7 @@ static bool dm_table_supports_discards(struct dm_table *t)
struct dm_target *ti;
unsigned i;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
for (i = 0; i < t->num_targets; i++) {
ti = dm_table_get_target(t, i);
if (!ti->num_discard_bios)
@ -1961,7 +1961,7 @@ static bool dm_table_supports_secure_erase(struct dm_table *t)
struct dm_target *ti;
unsigned int i;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
for (i = 0; i < t->num_targets; i++) {
ti = dm_table_get_target(t, i);
if (!ti->num_secure_erase_bios)
@ -2092,11 +2092,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
return 0;
}
unsigned int dm_table_get_num_targets(struct dm_table *t)
{
return t->num_targets;
}
struct list_head *dm_table_get_devices(struct dm_table *t)
{
return &t->devices;

View File

@ -278,7 +278,7 @@ static bool dm_table_supports_zone_append(struct dm_table *t)
struct dm_target *ti;
unsigned int i;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
for (i = 0; i < t->num_targets; i++) {
ti = dm_table_get_target(t, i);
if (ti->emulate_zone_append)

View File

@ -422,7 +422,7 @@ retry:
return r;
/* We only support devices that have a single target */
if (dm_table_get_num_targets(map) != 1)
if (map->num_targets != 1)
return r;
tgt = dm_table_get_target(map, 0);
@ -3092,7 +3092,7 @@ static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
goto out;
/* We only support devices that have a single target */
if (dm_table_get_num_targets(table) != 1)
if (table->num_targets != 1)
goto out;
ti = dm_table_get_target(table, 0);

View File

@ -561,7 +561,6 @@ void dm_sync_table(struct mapped_device *md);
* Queries
*/
sector_t dm_table_get_size(struct dm_table *t);
unsigned int dm_table_get_num_targets(struct dm_table *t);
fmode_t dm_table_get_mode(struct dm_table *t);
struct mapped_device *dm_table_get_md(struct dm_table *t);
const char *dm_table_device_name(struct dm_table *t);