mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-07-01 23:53:16 -04:00
mm: Count the number of pages affected in change_protection()
This will be used for three kinds of purposes: - to optimize mprotect() - to speed up working set scanning for working set areas that have not been touched - to more accurately scan per real working set No change in functionality from this patch. Suggested-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
4fd017708c
commit
7da4d641c5
4 changed files with 61 additions and 18 deletions
|
@ -87,7 +87,7 @@ struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
|
|||
pud_t *pud, int write);
|
||||
int pmd_huge(pmd_t pmd);
|
||||
int pud_huge(pud_t pmd);
|
||||
void hugetlb_change_protection(struct vm_area_struct *vma,
|
||||
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned long end, pgprot_t newprot);
|
||||
|
||||
#else /* !CONFIG_HUGETLB_PAGE */
|
||||
|
@ -132,7 +132,11 @@ static inline void copy_huge_page(struct page *dst, struct page *src)
|
|||
{
|
||||
}
|
||||
|
||||
#define hugetlb_change_protection(vma, address, end, newprot)
|
||||
static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned long end, pgprot_t newprot)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
|
||||
struct vm_area_struct *vma, unsigned long start,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue