mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-13 14:24:11 +08:00
dm cache policy smq: stop preemptively demoting blocks
It causes a lot of churn if the working set's size is close to the fast device's size. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
parent
4d44ec5ab7
commit
6cf4cc8f8b
@ -1134,13 +1134,10 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
|
||||
percent_to_target(mq, CLEAN_TARGET);
|
||||
}
|
||||
|
||||
static bool free_target_met(struct smq_policy *mq, bool idle)
|
||||
static bool free_target_met(struct smq_policy *mq)
|
||||
{
|
||||
unsigned nr_free;
|
||||
|
||||
if (!idle)
|
||||
return true;
|
||||
|
||||
nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
|
||||
return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
|
||||
percent_to_target(mq, FREE_TARGET);
|
||||
@ -1220,7 +1217,7 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
|
||||
* We always claim to be 'idle' to ensure some demotions happen
|
||||
* with continuous loads.
|
||||
*/
|
||||
if (!free_target_met(mq, true))
|
||||
if (!free_target_met(mq))
|
||||
queue_demotion(mq);
|
||||
return;
|
||||
}
|
||||
@ -1421,14 +1418,10 @@ static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
|
||||
spin_lock_irqsave(&mq->lock, flags);
|
||||
r = btracker_issue(mq->bg_work, result);
|
||||
if (r == -ENODATA) {
|
||||
/* find some writeback work to do */
|
||||
if (mq->migrations_allowed && !free_target_met(mq, idle))
|
||||
queue_demotion(mq);
|
||||
|
||||
else if (!clean_target_met(mq, idle))
|
||||
if (!clean_target_met(mq, idle)) {
|
||||
queue_writeback(mq);
|
||||
|
||||
r = btracker_issue(mq->bg_work, result);
|
||||
r = btracker_issue(mq->bg_work, result);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&mq->lock, flags);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user