mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
mm/lru: Convert __pagevec_lru_add_fn to take a folio
This saves five calls to compound_head(), totalling 60 bytes of text. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: David Howells <dhowells@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
3eed3ef55c
commit
934387c99f
2 changed files with 41 additions and 40 deletions
|
@ -16,38 +16,38 @@
|
||||||
#define PAGEMAP_MAPPEDDISK 0x0020u
|
#define PAGEMAP_MAPPEDDISK 0x0020u
|
||||||
#define PAGEMAP_BUFFERS 0x0040u
|
#define PAGEMAP_BUFFERS 0x0040u
|
||||||
|
|
||||||
#define trace_pagemap_flags(page) ( \
|
#define trace_pagemap_flags(folio) ( \
|
||||||
(PageAnon(page) ? PAGEMAP_ANONYMOUS : PAGEMAP_FILE) | \
|
(folio_test_anon(folio) ? PAGEMAP_ANONYMOUS : PAGEMAP_FILE) | \
|
||||||
(page_mapped(page) ? PAGEMAP_MAPPED : 0) | \
|
(folio_mapped(folio) ? PAGEMAP_MAPPED : 0) | \
|
||||||
(PageSwapCache(page) ? PAGEMAP_SWAPCACHE : 0) | \
|
(folio_test_swapcache(folio) ? PAGEMAP_SWAPCACHE : 0) | \
|
||||||
(PageSwapBacked(page) ? PAGEMAP_SWAPBACKED : 0) | \
|
(folio_test_swapbacked(folio) ? PAGEMAP_SWAPBACKED : 0) | \
|
||||||
(PageMappedToDisk(page) ? PAGEMAP_MAPPEDDISK : 0) | \
|
(folio_test_mappedtodisk(folio) ? PAGEMAP_MAPPEDDISK : 0) | \
|
||||||
(page_has_private(page) ? PAGEMAP_BUFFERS : 0) \
|
(folio_test_private(folio) ? PAGEMAP_BUFFERS : 0) \
|
||||||
)
|
)
|
||||||
|
|
||||||
TRACE_EVENT(mm_lru_insertion,
|
TRACE_EVENT(mm_lru_insertion,
|
||||||
|
|
||||||
TP_PROTO(struct page *page),
|
TP_PROTO(struct folio *folio),
|
||||||
|
|
||||||
TP_ARGS(page),
|
TP_ARGS(folio),
|
||||||
|
|
||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
__field(struct page *, page )
|
__field(struct folio *, folio )
|
||||||
__field(unsigned long, pfn )
|
__field(unsigned long, pfn )
|
||||||
__field(enum lru_list, lru )
|
__field(enum lru_list, lru )
|
||||||
__field(unsigned long, flags )
|
__field(unsigned long, flags )
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->page = page;
|
__entry->folio = folio;
|
||||||
__entry->pfn = page_to_pfn(page);
|
__entry->pfn = folio_pfn(folio);
|
||||||
__entry->lru = folio_lru_list(page_folio(page));
|
__entry->lru = folio_lru_list(folio);
|
||||||
__entry->flags = trace_pagemap_flags(page);
|
__entry->flags = trace_pagemap_flags(folio);
|
||||||
),
|
),
|
||||||
|
|
||||||
/* Flag format is based on page-types.c formatting for pagemap */
|
/* Flag format is based on page-types.c formatting for pagemap */
|
||||||
TP_printk("page=%p pfn=0x%lx lru=%d flags=%s%s%s%s%s%s",
|
TP_printk("folio=%p pfn=0x%lx lru=%d flags=%s%s%s%s%s%s",
|
||||||
__entry->page,
|
__entry->folio,
|
||||||
__entry->pfn,
|
__entry->pfn,
|
||||||
__entry->lru,
|
__entry->lru,
|
||||||
__entry->flags & PAGEMAP_MAPPED ? "M" : " ",
|
__entry->flags & PAGEMAP_MAPPED ? "M" : " ",
|
||||||
|
|
49
mm/swap.c
49
mm/swap.c
|
@ -992,17 +992,18 @@ void __pagevec_release(struct pagevec *pvec)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__pagevec_release);
|
EXPORT_SYMBOL(__pagevec_release);
|
||||||
|
|
||||||
static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
|
static void __pagevec_lru_add_fn(struct folio *folio, struct lruvec *lruvec)
|
||||||
{
|
{
|
||||||
int was_unevictable = TestClearPageUnevictable(page);
|
int was_unevictable = folio_test_clear_unevictable(folio);
|
||||||
int nr_pages = thp_nr_pages(page);
|
long nr_pages = folio_nr_pages(folio);
|
||||||
|
|
||||||
VM_BUG_ON_PAGE(PageLRU(page), page);
|
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Page becomes evictable in two ways:
|
* A folio becomes evictable in two ways:
|
||||||
* 1) Within LRU lock [munlock_vma_page() and __munlock_pagevec()].
|
* 1) Within LRU lock [munlock_vma_page() and __munlock_pagevec()].
|
||||||
* 2) Before acquiring LRU lock to put the page to correct LRU and then
|
* 2) Before acquiring LRU lock to put the folio on the correct LRU
|
||||||
|
* and then
|
||||||
* a) do PageLRU check with lock [check_move_unevictable_pages]
|
* a) do PageLRU check with lock [check_move_unevictable_pages]
|
||||||
* b) do PageLRU check before lock [clear_page_mlock]
|
* b) do PageLRU check before lock [clear_page_mlock]
|
||||||
*
|
*
|
||||||
|
@ -1011,35 +1012,36 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
|
||||||
*
|
*
|
||||||
* #0: __pagevec_lru_add_fn #1: clear_page_mlock
|
* #0: __pagevec_lru_add_fn #1: clear_page_mlock
|
||||||
*
|
*
|
||||||
* SetPageLRU() TestClearPageMlocked()
|
* folio_set_lru() folio_test_clear_mlocked()
|
||||||
* smp_mb() // explicit ordering // above provides strict
|
* smp_mb() // explicit ordering // above provides strict
|
||||||
* // ordering
|
* // ordering
|
||||||
* PageMlocked() PageLRU()
|
* folio_test_mlocked() folio_test_lru()
|
||||||
*
|
*
|
||||||
*
|
*
|
||||||
* if '#1' does not observe setting of PG_lru by '#0' and fails
|
* if '#1' does not observe setting of PG_lru by '#0' and
|
||||||
* isolation, the explicit barrier will make sure that page_evictable
|
* fails isolation, the explicit barrier will make sure that
|
||||||
* check will put the page in correct LRU. Without smp_mb(), SetPageLRU
|
* folio_evictable check will put the folio on the correct
|
||||||
* can be reordered after PageMlocked check and can make '#1' to fail
|
* LRU. Without smp_mb(), folio_set_lru() can be reordered
|
||||||
* the isolation of the page whose Mlocked bit is cleared (#0 is also
|
* after folio_test_mlocked() check and can make '#1' fail the
|
||||||
* looking at the same page) and the evictable page will be stranded
|
* isolation of the folio whose mlocked bit is cleared (#0 is
|
||||||
* in an unevictable LRU.
|
* also looking at the same folio) and the evictable folio will
|
||||||
|
* be stranded on an unevictable LRU.
|
||||||
*/
|
*/
|
||||||
SetPageLRU(page);
|
folio_set_lru(folio);
|
||||||
smp_mb__after_atomic();
|
smp_mb__after_atomic();
|
||||||
|
|
||||||
if (page_evictable(page)) {
|
if (folio_evictable(folio)) {
|
||||||
if (was_unevictable)
|
if (was_unevictable)
|
||||||
__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
|
__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
|
||||||
} else {
|
} else {
|
||||||
ClearPageActive(page);
|
folio_clear_active(folio);
|
||||||
SetPageUnevictable(page);
|
folio_set_unevictable(folio);
|
||||||
if (!was_unevictable)
|
if (!was_unevictable)
|
||||||
__count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
|
__count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
add_page_to_lru_list(page, lruvec);
|
lruvec_add_folio(lruvec, folio);
|
||||||
trace_mm_lru_insertion(page);
|
trace_mm_lru_insertion(folio);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1053,11 +1055,10 @@ void __pagevec_lru_add(struct pagevec *pvec)
|
||||||
unsigned long flags = 0;
|
unsigned long flags = 0;
|
||||||
|
|
||||||
for (i = 0; i < pagevec_count(pvec); i++) {
|
for (i = 0; i < pagevec_count(pvec); i++) {
|
||||||
struct page *page = pvec->pages[i];
|
struct folio *folio = page_folio(pvec->pages[i]);
|
||||||
struct folio *folio = page_folio(page);
|
|
||||||
|
|
||||||
lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
|
lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
|
||||||
__pagevec_lru_add_fn(page, lruvec);
|
__pagevec_lru_add_fn(folio, lruvec);
|
||||||
}
|
}
|
||||||
if (lruvec)
|
if (lruvec)
|
||||||
unlock_page_lruvec_irqrestore(lruvec, flags);
|
unlock_page_lruvec_irqrestore(lruvec, flags);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue