mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-10 22:54:11 +08:00
mm/page_alloc: simplify locking during free_unref_page_list
While freeing a large list, the zone lock will be released and reacquired
to avoid long hold times since commit c24ad77d96
("mm/page_alloc.c:
avoid excessive IRQ disabled times in free_unref_page_list()"). As
suggested by Vlastimil Babka, the lockrelease/reacquire logic can be
simplified by reusing the logic that acquires a different lock when
changing zones.
Link: https://lkml.kernel.org/r/20221122131229.5263-3-mgorman@techsingularity.net
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
5749077415
commit
a4bafffb5d
@ -3525,13 +3525,19 @@ void free_unref_page_list(struct list_head *list)
|
||||
list_del(&page->lru);
|
||||
migratetype = get_pcppage_migratetype(page);
|
||||
|
||||
/* Different zone, different pcp lock. */
|
||||
if (zone != locked_zone) {
|
||||
/*
|
||||
* Either different zone requiring a different pcp lock or
|
||||
* excessive lock hold times when freeing a large list of
|
||||
* pages.
|
||||
*/
|
||||
if (zone != locked_zone || batch_count == SWAP_CLUSTER_MAX) {
|
||||
if (pcp) {
|
||||
pcp_spin_unlock(pcp);
|
||||
pcp_trylock_finish(UP_flags);
|
||||
}
|
||||
|
||||
batch_count = 0;
|
||||
|
||||
/*
|
||||
* trylock is necessary as pages may be getting freed
|
||||
* from IRQ or SoftIRQ context after an IO completion.
|
||||
@ -3546,7 +3552,6 @@ void free_unref_page_list(struct list_head *list)
|
||||
continue;
|
||||
}
|
||||
locked_zone = zone;
|
||||
batch_count = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3558,19 +3563,7 @@ void free_unref_page_list(struct list_head *list)
|
||||
|
||||
trace_mm_page_free_batched(page);
|
||||
free_unref_page_commit(zone, pcp, page, migratetype, 0);
|
||||
|
||||
/*
|
||||
* Guard against excessive lock hold times when freeing
|
||||
* a large list of pages. Lock will be reacquired if
|
||||
* necessary on the next iteration.
|
||||
*/
|
||||
if (++batch_count == SWAP_CLUSTER_MAX) {
|
||||
pcp_spin_unlock(pcp);
|
||||
pcp_trylock_finish(UP_flags);
|
||||
batch_count = 0;
|
||||
pcp = NULL;
|
||||
locked_zone = NULL;
|
||||
}
|
||||
batch_count++;
|
||||
}
|
||||
|
||||
if (pcp) {
|
||||
|
Loading…
Reference in New Issue
Block a user