mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
mm/swap: convert lru_deactivate_file to a folio_batch
Use a folio throughout lru_deactivate_file_fn(), removing many hidden calls to compound_head(). Shrinks the kernel by 864 bytes of text. Link: https://lkml.kernel.org/r/20220617175020.717127-6-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
70dea5346e
commit
7a3dbfe8a5
1 changed files with 39 additions and 43 deletions
82
mm/swap.c
82
mm/swap.c
|
@ -62,7 +62,7 @@ static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = {
|
||||||
struct lru_pvecs {
|
struct lru_pvecs {
|
||||||
local_lock_t lock;
|
local_lock_t lock;
|
||||||
struct folio_batch lru_add;
|
struct folio_batch lru_add;
|
||||||
struct pagevec lru_deactivate_file;
|
struct folio_batch lru_deactivate_file;
|
||||||
struct pagevec lru_deactivate;
|
struct pagevec lru_deactivate;
|
||||||
struct pagevec lru_lazyfree;
|
struct pagevec lru_lazyfree;
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
@ -562,56 +562,57 @@ void lru_cache_add_inactive_or_unevictable(struct page *page,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the page can not be invalidated, it is moved to the
|
* If the folio cannot be invalidated, it is moved to the
|
||||||
* inactive list to speed up its reclaim. It is moved to the
|
* inactive list to speed up its reclaim. It is moved to the
|
||||||
* head of the list, rather than the tail, to give the flusher
|
* head of the list, rather than the tail, to give the flusher
|
||||||
* threads some time to write it out, as this is much more
|
* threads some time to write it out, as this is much more
|
||||||
* effective than the single-page writeout from reclaim.
|
* effective than the single-page writeout from reclaim.
|
||||||
*
|
*
|
||||||
* If the page isn't page_mapped and dirty/writeback, the page
|
* If the folio isn't mapped and dirty/writeback, the folio
|
||||||
* could reclaim asap using PG_reclaim.
|
* could be reclaimed asap using the reclaim flag.
|
||||||
*
|
*
|
||||||
* 1. active, mapped page -> none
|
* 1. active, mapped folio -> none
|
||||||
* 2. active, dirty/writeback page -> inactive, head, PG_reclaim
|
* 2. active, dirty/writeback folio -> inactive, head, reclaim
|
||||||
* 3. inactive, mapped page -> none
|
* 3. inactive, mapped folio -> none
|
||||||
* 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
|
* 4. inactive, dirty/writeback folio -> inactive, head, reclaim
|
||||||
* 5. inactive, clean -> inactive, tail
|
* 5. inactive, clean -> inactive, tail
|
||||||
* 6. Others -> none
|
* 6. Others -> none
|
||||||
*
|
*
|
||||||
* In 4, why it moves inactive's head, the VM expects the page would
|
* In 4, it moves to the head of the inactive list so the folio is
|
||||||
* be write it out by flusher threads as this is much more effective
|
* written out by flusher threads as this is much more efficient
|
||||||
* than the single-page writeout from reclaim.
|
* than the single-page writeout from reclaim.
|
||||||
*/
|
*/
|
||||||
static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
|
static void lru_deactivate_file_fn(struct lruvec *lruvec, struct folio *folio)
|
||||||
{
|
{
|
||||||
bool active = PageActive(page);
|
bool active = folio_test_active(folio);
|
||||||
int nr_pages = thp_nr_pages(page);
|
long nr_pages = folio_nr_pages(folio);
|
||||||
|
|
||||||
if (PageUnevictable(page))
|
if (folio_test_unevictable(folio))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Some processes are using the page */
|
/* Some processes are using the folio */
|
||||||
if (page_mapped(page))
|
if (folio_mapped(folio))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
del_page_from_lru_list(page, lruvec);
|
lruvec_del_folio(lruvec, folio);
|
||||||
ClearPageActive(page);
|
folio_clear_active(folio);
|
||||||
ClearPageReferenced(page);
|
folio_clear_referenced(folio);
|
||||||
|
|
||||||
if (PageWriteback(page) || PageDirty(page)) {
|
if (folio_test_writeback(folio) || folio_test_dirty(folio)) {
|
||||||
/*
|
/*
|
||||||
* PG_reclaim could be raced with end_page_writeback
|
* Setting the reclaim flag could race with
|
||||||
* It can make readahead confusing. But race window
|
* folio_end_writeback() and confuse readahead. But the
|
||||||
* is _really_ small and it's non-critical problem.
|
* race window is _really_ small and it's not a critical
|
||||||
|
* problem.
|
||||||
*/
|
*/
|
||||||
add_page_to_lru_list(page, lruvec);
|
lruvec_add_folio(lruvec, folio);
|
||||||
SetPageReclaim(page);
|
folio_set_reclaim(folio);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* The page's writeback ends up during pagevec
|
* The folio's writeback ended while it was in the batch.
|
||||||
* We move that page into tail of inactive.
|
* We move that folio to the tail of the inactive list.
|
||||||
*/
|
*/
|
||||||
add_page_to_lru_list_tail(page, lruvec);
|
lruvec_add_folio_tail(lruvec, folio);
|
||||||
__count_vm_events(PGROTATED, nr_pages);
|
__count_vm_events(PGROTATED, nr_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -685,9 +686,9 @@ void lru_add_drain_cpu(int cpu)
|
||||||
local_unlock_irqrestore(&lru_rotate.lock, flags);
|
local_unlock_irqrestore(&lru_rotate.lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
pvec = &per_cpu(lru_pvecs.lru_deactivate_file, cpu);
|
fbatch = &per_cpu(lru_pvecs.lru_deactivate_file, cpu);
|
||||||
if (pagevec_count(pvec))
|
if (folio_batch_count(fbatch))
|
||||||
pagevec_lru_move_fn(pvec, lru_deactivate_file_fn);
|
folio_batch_move_lru(fbatch, lru_deactivate_file_fn);
|
||||||
|
|
||||||
pvec = &per_cpu(lru_pvecs.lru_deactivate, cpu);
|
pvec = &per_cpu(lru_pvecs.lru_deactivate, cpu);
|
||||||
if (pagevec_count(pvec))
|
if (pagevec_count(pvec))
|
||||||
|
@ -701,32 +702,27 @@ void lru_add_drain_cpu(int cpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* deactivate_file_folio() - Forcefully deactivate a file folio.
|
* deactivate_file_folio() - Deactivate a file folio.
|
||||||
* @folio: Folio to deactivate.
|
* @folio: Folio to deactivate.
|
||||||
*
|
*
|
||||||
* This function hints to the VM that @folio is a good reclaim candidate,
|
* This function hints to the VM that @folio is a good reclaim candidate,
|
||||||
* for example if its invalidation fails due to the folio being dirty
|
* for example if its invalidation fails due to the folio being dirty
|
||||||
* or under writeback.
|
* or under writeback.
|
||||||
*
|
*
|
||||||
* Context: Caller holds a reference on the page.
|
* Context: Caller holds a reference on the folio.
|
||||||
*/
|
*/
|
||||||
void deactivate_file_folio(struct folio *folio)
|
void deactivate_file_folio(struct folio *folio)
|
||||||
{
|
{
|
||||||
struct pagevec *pvec;
|
struct folio_batch *fbatch;
|
||||||
|
|
||||||
/*
|
/* Deactivating an unevictable folio will not accelerate reclaim */
|
||||||
* In a workload with many unevictable pages such as mprotect,
|
|
||||||
* unevictable folio deactivation for accelerating reclaim is pointless.
|
|
||||||
*/
|
|
||||||
if (folio_test_unevictable(folio))
|
if (folio_test_unevictable(folio))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
folio_get(folio);
|
folio_get(folio);
|
||||||
local_lock(&lru_pvecs.lock);
|
local_lock(&lru_pvecs.lock);
|
||||||
pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
|
fbatch = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
|
||||||
|
folio_batch_add_and_move(fbatch, folio, lru_deactivate_file_fn);
|
||||||
if (pagevec_add_and_need_flush(pvec, &folio->page))
|
|
||||||
pagevec_lru_move_fn(pvec, lru_deactivate_file_fn);
|
|
||||||
local_unlock(&lru_pvecs.lock);
|
local_unlock(&lru_pvecs.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -899,7 +895,7 @@ static inline void __lru_add_drain_all(bool force_all_cpus)
|
||||||
|
|
||||||
if (folio_batch_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
|
if (folio_batch_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
|
||||||
data_race(folio_batch_count(&per_cpu(lru_rotate.fbatch, cpu))) ||
|
data_race(folio_batch_count(&per_cpu(lru_rotate.fbatch, cpu))) ||
|
||||||
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
|
folio_batch_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
|
||||||
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
|
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
|
||||||
pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
|
pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
|
||||||
need_activate_page_drain(cpu) ||
|
need_activate_page_drain(cpu) ||
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue