mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 20:24:12 +08:00
dm cache policy smq: ensure IO doesn't prevent cleaner policy progress
When using the cleaner policy to decommission the cache, there is
never any writeback started from the cache as it is constantly delayed
due to normal I/O keeping the device busy. Meaning @idle=false was
always being passed to clean_target_met()
Fix this by adding a specific 'cleaner' flag that is set when the
cleaner policy is configured. This flag serves to always allow the
cleaner's writeback work to be queued until the cache is
decommissioned (even if the cache isn't idle).
Reported-by: David Jeffery <djeffery@redhat.com>
Fixes: b29d4986d0
("dm cache: significant rework to leverage dm-bio-prison-v2")
Cc: stable@vger.kernel.org
Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
This commit is contained in:
parent
7d5fff8982
commit
1e4ab7b4c8
@ -857,7 +857,13 @@ struct smq_policy {
|
||||
|
||||
struct background_tracker *bg_work;
|
||||
|
||||
bool migrations_allowed;
|
||||
bool migrations_allowed:1;
|
||||
|
||||
/*
|
||||
* If this is set the policy will try and clean the whole cache
|
||||
* even if the device is not idle.
|
||||
*/
|
||||
bool cleaner:1;
|
||||
};
|
||||
|
||||
/*----------------------------------------------------------------*/
|
||||
@ -1138,7 +1144,7 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
|
||||
* Cache entries may not be populated. So we cannot rely on the
|
||||
* size of the clean queue.
|
||||
*/
|
||||
if (idle) {
|
||||
if (idle || mq->cleaner) {
|
||||
/*
|
||||
* We'd like to clean everything.
|
||||
*/
|
||||
@ -1722,11 +1728,9 @@ static void calc_hotspot_params(sector_t origin_size,
|
||||
*hotspot_block_size /= 2u;
|
||||
}
|
||||
|
||||
static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
|
||||
sector_t origin_size,
|
||||
sector_t cache_block_size,
|
||||
bool mimic_mq,
|
||||
bool migrations_allowed)
|
||||
static struct dm_cache_policy *
|
||||
__smq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size,
|
||||
bool mimic_mq, bool migrations_allowed, bool cleaner)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned int nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
|
||||
@ -1813,6 +1817,7 @@ static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
|
||||
goto bad_btracker;
|
||||
|
||||
mq->migrations_allowed = migrations_allowed;
|
||||
mq->cleaner = cleaner;
|
||||
|
||||
return &mq->policy;
|
||||
|
||||
@ -1836,21 +1841,24 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
|
||||
sector_t origin_size,
|
||||
sector_t cache_block_size)
|
||||
{
|
||||
return __smq_create(cache_size, origin_size, cache_block_size, false, true);
|
||||
return __smq_create(cache_size, origin_size, cache_block_size,
|
||||
false, true, false);
|
||||
}
|
||||
|
||||
static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
|
||||
sector_t origin_size,
|
||||
sector_t cache_block_size)
|
||||
{
|
||||
return __smq_create(cache_size, origin_size, cache_block_size, true, true);
|
||||
return __smq_create(cache_size, origin_size, cache_block_size,
|
||||
true, true, false);
|
||||
}
|
||||
|
||||
static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size,
|
||||
sector_t origin_size,
|
||||
sector_t cache_block_size)
|
||||
{
|
||||
return __smq_create(cache_size, origin_size, cache_block_size, false, false);
|
||||
return __smq_create(cache_size, origin_size, cache_block_size,
|
||||
false, false, true);
|
||||
}
|
||||
|
||||
/*----------------------------------------------------------------*/
|
||||
|
Loading…
Reference in New Issue
Block a user