mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
mm: rmap: remove lock_page_memcg()
The previous patch made sure charge moving only touches pages for which page_mapped() is stable. lock_page_memcg() is no longer needed. Link: https://lkml.kernel.org/r/20221206171340.139790-3-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Hugh Dickins <hughd@google.com> Acked-by: Shakeel Butt <shakeelb@google.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Muchun Song <songmuchun@bytedance.com> Cc: Roman Gushchin <roman.gushchin@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
4e0cf05f60
commit
c7c3dec1c9
1 changed files with 8 additions and 18 deletions
26
mm/rmap.c
26
mm/rmap.c
|
@ -1222,9 +1222,6 @@ void page_add_anon_rmap(struct page *page,
|
||||||
bool compound = flags & RMAP_COMPOUND;
|
bool compound = flags & RMAP_COMPOUND;
|
||||||
bool first = true;
|
bool first = true;
|
||||||
|
|
||||||
if (unlikely(PageKsm(page)))
|
|
||||||
lock_page_memcg(page);
|
|
||||||
|
|
||||||
/* Is page being mapped by PTE? Is this its first map to be added? */
|
/* Is page being mapped by PTE? Is this its first map to be added? */
|
||||||
if (likely(!compound)) {
|
if (likely(!compound)) {
|
||||||
first = atomic_inc_and_test(&page->_mapcount);
|
first = atomic_inc_and_test(&page->_mapcount);
|
||||||
|
@ -1262,15 +1259,14 @@ void page_add_anon_rmap(struct page *page,
|
||||||
if (nr)
|
if (nr)
|
||||||
__mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
|
__mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
|
||||||
|
|
||||||
if (unlikely(PageKsm(page)))
|
if (likely(!PageKsm(page))) {
|
||||||
unlock_page_memcg(page);
|
/* address might be in next vma when migration races vma_adjust */
|
||||||
|
if (first)
|
||||||
/* address might be in next vma when migration races vma_adjust */
|
__page_set_anon_rmap(page, vma, address,
|
||||||
else if (first)
|
!!(flags & RMAP_EXCLUSIVE));
|
||||||
__page_set_anon_rmap(page, vma, address,
|
else
|
||||||
!!(flags & RMAP_EXCLUSIVE));
|
__page_check_anon_rmap(page, vma, address);
|
||||||
else
|
}
|
||||||
__page_check_anon_rmap(page, vma, address);
|
|
||||||
|
|
||||||
mlock_vma_page(page, vma, compound);
|
mlock_vma_page(page, vma, compound);
|
||||||
}
|
}
|
||||||
|
@ -1329,7 +1325,6 @@ void page_add_file_rmap(struct page *page,
|
||||||
bool first;
|
bool first;
|
||||||
|
|
||||||
VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
|
VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
|
||||||
lock_page_memcg(page);
|
|
||||||
|
|
||||||
/* Is page being mapped by PTE? Is this its first map to be added? */
|
/* Is page being mapped by PTE? Is this its first map to be added? */
|
||||||
if (likely(!compound)) {
|
if (likely(!compound)) {
|
||||||
|
@ -1365,7 +1360,6 @@ void page_add_file_rmap(struct page *page,
|
||||||
NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped);
|
NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped);
|
||||||
if (nr)
|
if (nr)
|
||||||
__mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
|
__mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
|
||||||
unlock_page_memcg(page);
|
|
||||||
|
|
||||||
mlock_vma_page(page, vma, compound);
|
mlock_vma_page(page, vma, compound);
|
||||||
}
|
}
|
||||||
|
@ -1394,8 +1388,6 @@ void page_remove_rmap(struct page *page,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
lock_page_memcg(page);
|
|
||||||
|
|
||||||
/* Is page being unmapped by PTE? Is this its last map to be removed? */
|
/* Is page being unmapped by PTE? Is this its last map to be removed? */
|
||||||
if (likely(!compound)) {
|
if (likely(!compound)) {
|
||||||
last = atomic_add_negative(-1, &page->_mapcount);
|
last = atomic_add_negative(-1, &page->_mapcount);
|
||||||
|
@ -1451,8 +1443,6 @@ void page_remove_rmap(struct page *page,
|
||||||
* and remember that it's only reliable while mapped.
|
* and remember that it's only reliable while mapped.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
unlock_page_memcg(page);
|
|
||||||
|
|
||||||
munlock_vma_page(page, vma, compound);
|
munlock_vma_page(page, vma, compound);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue