vmscan: Add check_move_unevictable_folios()

Change the guts of check_move_unevictable_pages() over to use folios
and add check_move_unevictable_pages() as a wrapper.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Christian Brauner (Microsoft) <brauner@kernel.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2022-06-04 17:39:09 -04:00
parent f6e0e17344
commit 77414d195f
2 changed files with 39 additions and 26 deletions

View file

@ -438,7 +438,8 @@ static inline bool node_reclaim_enabled(void)
return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP); return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
} }
extern void check_move_unevictable_pages(struct pagevec *pvec); void check_move_unevictable_folios(struct folio_batch *fbatch);
void check_move_unevictable_pages(struct pagevec *pvec);
extern void kswapd_run(int nid); extern void kswapd_run(int nid);
extern void kswapd_stop(int nid); extern void kswapd_stop(int nid);

View file

@ -4790,45 +4790,57 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
} }
#endif #endif
/**
* check_move_unevictable_pages - check pages for evictability and move to
* appropriate zone lru list
* @pvec: pagevec with lru pages to check
*
* Checks pages for evictability, if an evictable page is in the unevictable
* lru list, moves it to the appropriate evictable lru list. This function
* should be only used for lru pages.
*/
void check_move_unevictable_pages(struct pagevec *pvec) void check_move_unevictable_pages(struct pagevec *pvec)
{
struct folio_batch fbatch;
unsigned i;
folio_batch_init(&fbatch);
for (i = 0; i < pvec->nr; i++) {
struct page *page = pvec->pages[i];
if (PageTransTail(page))
continue;
folio_batch_add(&fbatch, page_folio(page));
}
check_move_unevictable_folios(&fbatch);
}
EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
/**
* check_move_unevictable_folios - Move evictable folios to appropriate zone
* lru list
* @fbatch: Batch of lru folios to check.
*
* Checks folios for evictability, if an evictable folio is in the unevictable
* lru list, moves it to the appropriate evictable lru list. This function
* should be only used for lru folios.
*/
void check_move_unevictable_folios(struct folio_batch *fbatch)
{ {
struct lruvec *lruvec = NULL; struct lruvec *lruvec = NULL;
int pgscanned = 0; int pgscanned = 0;
int pgrescued = 0; int pgrescued = 0;
int i; int i;
for (i = 0; i < pvec->nr; i++) { for (i = 0; i < fbatch->nr; i++) {
struct page *page = pvec->pages[i]; struct folio *folio = fbatch->folios[i];
struct folio *folio = page_folio(page); int nr_pages = folio_nr_pages(folio);
int nr_pages;
if (PageTransTail(page))
continue;
nr_pages = thp_nr_pages(page);
pgscanned += nr_pages; pgscanned += nr_pages;
/* block memcg migration during page moving between lru */ /* block memcg migration while the folio moves between lrus */
if (!TestClearPageLRU(page)) if (!folio_test_clear_lru(folio))
continue; continue;
lruvec = folio_lruvec_relock_irq(folio, lruvec); lruvec = folio_lruvec_relock_irq(folio, lruvec);
if (page_evictable(page) && PageUnevictable(page)) { if (folio_evictable(folio) && folio_test_unevictable(folio)) {
del_page_from_lru_list(page, lruvec); lruvec_del_folio(lruvec, folio);
ClearPageUnevictable(page); folio_clear_unevictable(folio);
add_page_to_lru_list(page, lruvec); lruvec_add_folio(lruvec, folio);
pgrescued += nr_pages; pgrescued += nr_pages;
} }
SetPageLRU(page); folio_set_lru(folio);
} }
if (lruvec) { if (lruvec) {
@ -4839,4 +4851,4 @@ void check_move_unevictable_pages(struct pagevec *pvec)
count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
} }
} }
EXPORT_SYMBOL_GPL(check_move_unevictable_pages); EXPORT_SYMBOL_GPL(check_move_unevictable_folios);