dm table: fix DAX iterate_devices based device capability checks

Fix dm_table_supports_dax() and invert logic of both
iterate_devices_callout_fn so that all devices' DAX capabilities are
properly checked.

Fixes: 545ed20e6d ("dm: add infrastructure for DAX support")
Cc: stable@vger.kernel.org
Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
Jeffle Xu 2021-02-08 22:34:36 -05:00 committed by Mike Snitzer
parent a4c8dd9c2d
commit 5b0fab5089
3 changed files with 12 additions and 29 deletions

View File

@ -820,24 +820,24 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
EXPORT_SYMBOL_GPL(dm_table_set_type); EXPORT_SYMBOL_GPL(dm_table_set_type);
/* validate the dax capability of the target device span */ /* validate the dax capability of the target device span */
int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data) sector_t start, sector_t len, void *data)
{ {
int blocksize = *(int *) data, id; int blocksize = *(int *) data, id;
bool rc; bool rc;
id = dax_read_lock(); id = dax_read_lock();
rc = dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len); rc = !dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
dax_read_unlock(id); dax_read_unlock(id);
return rc; return rc;
} }
/* Check devices support synchronous DAX */ /* Check devices support synchronous DAX */
static int device_dax_synchronous(struct dm_target *ti, struct dm_dev *dev, static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data) sector_t start, sector_t len, void *data)
{ {
return dev->dax_dev && dax_synchronous(dev->dax_dev); return !dev->dax_dev || !dax_synchronous(dev->dax_dev);
} }
bool dm_table_supports_dax(struct dm_table *t, bool dm_table_supports_dax(struct dm_table *t,
@ -854,7 +854,7 @@ bool dm_table_supports_dax(struct dm_table *t,
return false; return false;
if (!ti->type->iterate_devices || if (!ti->type->iterate_devices ||
!ti->type->iterate_devices(ti, iterate_fn, blocksize)) ti->type->iterate_devices(ti, iterate_fn, blocksize))
return false; return false;
} }
@ -925,7 +925,7 @@ static int dm_table_determine_type(struct dm_table *t)
verify_bio_based: verify_bio_based:
/* We must use this table as bio-based */ /* We must use this table as bio-based */
t->type = DM_TYPE_BIO_BASED; t->type = DM_TYPE_BIO_BASED;
if (dm_table_supports_dax(t, device_supports_dax, &page_size) || if (dm_table_supports_dax(t, device_not_dax_capable, &page_size) ||
(list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) { (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
t->type = DM_TYPE_DAX_BIO_BASED; t->type = DM_TYPE_DAX_BIO_BASED;
} }
@ -1618,23 +1618,6 @@ static int device_dax_write_cache_enabled(struct dm_target *ti,
return false; return false;
} }
static int dm_table_supports_dax_write_cache(struct dm_table *t)
{
struct dm_target *ti;
unsigned i;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (ti->type->iterate_devices &&
ti->type->iterate_devices(ti,
device_dax_write_cache_enabled, NULL))
return true;
}
return false;
}
static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev, static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data) sector_t start, sector_t len, void *data)
{ {
@ -1839,15 +1822,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
} }
blk_queue_write_cache(q, wc, fua); blk_queue_write_cache(q, wc, fua);
if (dm_table_supports_dax(t, device_supports_dax, &page_size)) { if (dm_table_supports_dax(t, device_not_dax_capable, &page_size)) {
blk_queue_flag_set(QUEUE_FLAG_DAX, q); blk_queue_flag_set(QUEUE_FLAG_DAX, q);
if (dm_table_supports_dax(t, device_dax_synchronous, NULL)) if (dm_table_supports_dax(t, device_not_dax_synchronous_capable, NULL))
set_dax_synchronous(t->md->dax_dev); set_dax_synchronous(t->md->dax_dev);
} }
else else
blk_queue_flag_clear(QUEUE_FLAG_DAX, q); blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
if (dm_table_supports_dax_write_cache(t)) if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled))
dax_write_cache(t->md->dax_dev, true); dax_write_cache(t->md->dax_dev, true);
/* Ensure that all underlying devices are non-rotational. */ /* Ensure that all underlying devices are non-rotational. */

View File

@ -1133,7 +1133,7 @@ static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bd
if (!map) if (!map)
goto out; goto out;
ret = dm_table_supports_dax(map, device_supports_dax, &blocksize); ret = dm_table_supports_dax(map, device_not_dax_capable, &blocksize);
out: out:
dm_put_live_table(md, srcu_idx); dm_put_live_table(md, srcu_idx);

View File

@ -73,7 +73,7 @@ void dm_table_free_md_mempools(struct dm_table *t);
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
bool dm_table_supports_dax(struct dm_table *t, iterate_devices_callout_fn fn, bool dm_table_supports_dax(struct dm_table *t, iterate_devices_callout_fn fn,
int *blocksize); int *blocksize);
int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data); sector_t start, sector_t len, void *data);
void dm_lock_md_type(struct mapped_device *md); void dm_lock_md_type(struct mapped_device *md);