mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
- Yosry Ahmed brought back some cgroup v1 stats in OOM logs.
- Yosry has also eliminated cgroup's atomic rstat flushing. - Nhat Pham adds the new cachestat() syscall. It provides userspace with the ability to query pagecache status - a similar concept to mincore() but more powerful and with improved usability. - Mel Gorman provides more optimizations for compaction, reducing the prevalence of page rescanning. - Lorenzo Stoakes has done some maintanance work on the get_user_pages() interface. - Liam Howlett continues with cleanups and maintenance work to the maple tree code. Peng Zhang also does some work on maple tree. - Johannes Weiner has done some cleanup work on the compaction code. - David Hildenbrand has contributed additional selftests for get_user_pages(). - Thomas Gleixner has contributed some maintenance and optimization work for the vmalloc code. - Baolin Wang has provided some compaction cleanups, - SeongJae Park continues maintenance work on the DAMON code. - Huang Ying has done some maintenance on the swap code's usage of device refcounting. - Christoph Hellwig has some cleanups for the filemap/directio code. - Ryan Roberts provides two patch series which yield some rationalization of the kernel's access to pte entries - use the provided APIs rather than open-coding accesses. - Lorenzo Stoakes has some fixes to the interaction between pagecache and directio access to file mappings. - John Hubbard has a series of fixes to the MM selftesting code. - ZhangPeng continues the folio conversion campaign. - Hugh Dickins has been working on the pagetable handling code, mainly with a view to reducing the load on the mmap_lock. - Catalin Marinas has reduced the arm64 kmalloc() minimum alignment from 128 to 8. - Domenico Cerasuolo has improved the zswap reclaim mechanism by reorganizing the LRU management. - Matthew Wilcox provides some fixups to make gfs2 work better with the buffer_head code. - Vishal Moola also has done some folio conversion work. - Matthew Wilcox has removed the remnants of the pagevec code - their functionality is migrated over to struct folio_batch. -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZJejewAKCRDdBJ7gKXxA joggAPwKMfT9lvDBEUnJagY7dbDPky1cSYZdJKxxM2cApGa42gEA6Cl8HRAWqSOh J0qXCzqaaN8+BuEyLGDVPaXur9KirwY= =B7yQ -----END PGP SIGNATURE----- Merge tag 'mm-stable-2023-06-24-19-15' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull mm updates from Andrew Morton: - Yosry Ahmed brought back some cgroup v1 stats in OOM logs - Yosry has also eliminated cgroup's atomic rstat flushing - Nhat Pham adds the new cachestat() syscall. It provides userspace with the ability to query pagecache status - a similar concept to mincore() but more powerful and with improved usability - Mel Gorman provides more optimizations for compaction, reducing the prevalence of page rescanning - Lorenzo Stoakes has done some maintanance work on the get_user_pages() interface - Liam Howlett continues with cleanups and maintenance work to the maple tree code. Peng Zhang also does some work on maple tree - Johannes Weiner has done some cleanup work on the compaction code - David Hildenbrand has contributed additional selftests for get_user_pages() - Thomas Gleixner has contributed some maintenance and optimization work for the vmalloc code - Baolin Wang has provided some compaction cleanups, - SeongJae Park continues maintenance work on the DAMON code - Huang Ying has done some maintenance on the swap code's usage of device refcounting - Christoph Hellwig has some cleanups for the filemap/directio code - Ryan Roberts provides two patch series which yield some rationalization of the kernel's access to pte entries - use the provided APIs rather than open-coding accesses - Lorenzo Stoakes has some fixes to the interaction between pagecache and directio access to file mappings - John Hubbard has a series of fixes to the MM selftesting code - ZhangPeng continues the folio conversion campaign - Hugh Dickins has been working on the pagetable handling code, mainly with a view to reducing the load on the mmap_lock - Catalin Marinas has reduced the arm64 kmalloc() minimum alignment from 128 to 8 - Domenico Cerasuolo has improved the zswap reclaim mechanism by reorganizing the LRU management - Matthew Wilcox provides some fixups to make gfs2 work better with the buffer_head code - Vishal Moola also has done some folio conversion work - Matthew Wilcox has removed the remnants of the pagevec code - their functionality is migrated over to struct folio_batch * tag 'mm-stable-2023-06-24-19-15' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (380 commits) mm/hugetlb: remove hugetlb_set_page_subpool() mm: nommu: correct the range of mmap_sem_read_lock in task_mem() hugetlb: revert use of page_cache_next_miss() Revert "page cache: fix page_cache_next/prev_miss off by one" mm/vmscan: fix root proactive reclaim unthrottling unbalanced node mm: memcg: rename and document global_reclaim() mm: kill [add|del]_page_to_lru_list() mm: compaction: convert to use a folio in isolate_migratepages_block() mm: zswap: fix double invalidate with exclusive loads mm: remove unnecessary pagevec includes mm: remove references to pagevec mm: rename invalidate_mapping_pagevec to mapping_try_invalidate mm: remove struct pagevec net: convert sunrpc from pagevec to folio_batch i915: convert i915_gpu_error to use a folio_batch pagevec: rename fbatch_count() mm: remove check_move_unevictable_pages() drm: convert drm_gem_put_pages() to use a folio_batch i915: convert shmem_sg_free_table() to use a folio_batch scatterlist: add sg_set_folio() ...
This commit is contained in:
commit
6e17c6de3d
334 changed files with 8347 additions and 6911 deletions
|
@ -99,7 +99,7 @@ static inline void load_ksp_mmu(struct task_struct *task)
|
|||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
pte_t *pte = NULL;
|
||||
unsigned long mmuar;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
@ -139,7 +139,7 @@ static inline void load_ksp_mmu(struct task_struct *task)
|
|||
|
||||
pte = (mmuar >= PAGE_OFFSET) ? pte_offset_kernel(pmd, mmuar)
|
||||
: pte_offset_map(pmd, mmuar);
|
||||
if (pte_none(*pte) || !pte_present(*pte))
|
||||
if (!pte || pte_none(*pte) || !pte_present(*pte))
|
||||
goto bug;
|
||||
|
||||
set_pte(pte, pte_mkyoung(*pte));
|
||||
|
@ -161,6 +161,8 @@ static inline void load_ksp_mmu(struct task_struct *task)
|
|||
bug:
|
||||
pr_info("ksp load failed: mm=0x%p ksp=0x08%lx\n", mm, mmuar);
|
||||
end:
|
||||
if (pte && mmuar < PAGE_OFFSET)
|
||||
pte_unmap(pte);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -488,6 +488,8 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
|
|||
if (!pmd_present(*pmd))
|
||||
goto bad_access;
|
||||
pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
|
||||
if (!pte)
|
||||
goto bad_access;
|
||||
if (!pte_present(*pte) || !pte_dirty(*pte)
|
||||
|| !pte_write(*pte)) {
|
||||
pte_unmap_unlock(pte, ptl);
|
||||
|
|
|
@ -450,3 +450,4 @@
|
|||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
|
||||
451 common cachestat sys_cachestat
|
||||
|
|
|
@ -91,7 +91,8 @@ int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
|
|||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
pte_t *pte = NULL;
|
||||
int ret = -1;
|
||||
int asid;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
@ -100,47 +101,33 @@ int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
|
|||
regs->pc + (extension_word * sizeof(long));
|
||||
|
||||
mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
|
||||
if (!mm) {
|
||||
local_irq_restore(flags);
|
||||
return -1;
|
||||
}
|
||||
if (!mm)
|
||||
goto out;
|
||||
|
||||
pgd = pgd_offset(mm, mmuar);
|
||||
if (pgd_none(*pgd)) {
|
||||
local_irq_restore(flags);
|
||||
return -1;
|
||||
}
|
||||
if (pgd_none(*pgd))
|
||||
goto out;
|
||||
|
||||
p4d = p4d_offset(pgd, mmuar);
|
||||
if (p4d_none(*p4d)) {
|
||||
local_irq_restore(flags);
|
||||
return -1;
|
||||
}
|
||||
if (p4d_none(*p4d))
|
||||
goto out;
|
||||
|
||||
pud = pud_offset(p4d, mmuar);
|
||||
if (pud_none(*pud)) {
|
||||
local_irq_restore(flags);
|
||||
return -1;
|
||||
}
|
||||
if (pud_none(*pud))
|
||||
goto out;
|
||||
|
||||
pmd = pmd_offset(pud, mmuar);
|
||||
if (pmd_none(*pmd)) {
|
||||
local_irq_restore(flags);
|
||||
return -1;
|
||||
}
|
||||
if (pmd_none(*pmd))
|
||||
goto out;
|
||||
|
||||
pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
|
||||
: pte_offset_map(pmd, mmuar);
|
||||
if (pte_none(*pte) || !pte_present(*pte)) {
|
||||
local_irq_restore(flags);
|
||||
return -1;
|
||||
}
|
||||
if (!pte || pte_none(*pte) || !pte_present(*pte))
|
||||
goto out;
|
||||
|
||||
if (write) {
|
||||
if (!pte_write(*pte)) {
|
||||
local_irq_restore(flags);
|
||||
return -1;
|
||||
}
|
||||
if (!pte_write(*pte))
|
||||
goto out;
|
||||
set_pte(pte, pte_mkdirty(*pte));
|
||||
}
|
||||
|
||||
|
@ -161,9 +148,12 @@ int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
|
|||
mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
|
||||
else
|
||||
mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
if (pte && !KMAPAREA(mmuar))
|
||||
pte_unmap(pte);
|
||||
local_irq_restore(flags);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __init cf_bootmem_alloc(void)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue