mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
Merge branch 'slab/for-6.3/cleanups' into slab/for-linus
A bunch of cleanups for SLAB and SLUB: - Use the standard idiom to get head page of folio (by SeongJae Park) - Simplify and inline is_debug_pagealloc_cache() in SLAB (by lvqian) - Remove unused variable in SLAB (by Gou Hao)
This commit is contained in:
commit
0028517724
2 changed files with 6 additions and 12 deletions
16
mm/slab.c
16
mm/slab.c
|
@ -220,7 +220,6 @@ static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
|
||||||
static inline void fixup_slab_list(struct kmem_cache *cachep,
|
static inline void fixup_slab_list(struct kmem_cache *cachep,
|
||||||
struct kmem_cache_node *n, struct slab *slab,
|
struct kmem_cache_node *n, struct slab *slab,
|
||||||
void **list);
|
void **list);
|
||||||
static int slab_early_init = 1;
|
|
||||||
|
|
||||||
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
|
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
|
||||||
|
|
||||||
|
@ -1249,8 +1248,6 @@ void __init kmem_cache_init(void)
|
||||||
slab_state = PARTIAL_NODE;
|
slab_state = PARTIAL_NODE;
|
||||||
setup_kmalloc_cache_index_table();
|
setup_kmalloc_cache_index_table();
|
||||||
|
|
||||||
slab_early_init = 0;
|
|
||||||
|
|
||||||
/* 5) Replace the bootstrap kmem_cache_node */
|
/* 5) Replace the bootstrap kmem_cache_node */
|
||||||
{
|
{
|
||||||
int nid;
|
int nid;
|
||||||
|
@ -1389,7 +1386,7 @@ static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab)
|
||||||
|
|
||||||
BUG_ON(!folio_test_slab(folio));
|
BUG_ON(!folio_test_slab(folio));
|
||||||
__slab_clear_pfmemalloc(slab);
|
__slab_clear_pfmemalloc(slab);
|
||||||
page_mapcount_reset(folio_page(folio, 0));
|
page_mapcount_reset(&folio->page);
|
||||||
folio->mapping = NULL;
|
folio->mapping = NULL;
|
||||||
/* Make the mapping reset visible before clearing the flag */
|
/* Make the mapping reset visible before clearing the flag */
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
|
@ -1398,7 +1395,7 @@ static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab)
|
||||||
if (current->reclaim_state)
|
if (current->reclaim_state)
|
||||||
current->reclaim_state->reclaimed_slab += 1 << order;
|
current->reclaim_state->reclaimed_slab += 1 << order;
|
||||||
unaccount_slab(slab, order, cachep);
|
unaccount_slab(slab, order, cachep);
|
||||||
__free_pages(folio_page(folio, 0), order);
|
__free_pages(&folio->page, order);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kmem_rcu_free(struct rcu_head *head)
|
static void kmem_rcu_free(struct rcu_head *head)
|
||||||
|
@ -1413,13 +1410,10 @@ static void kmem_rcu_free(struct rcu_head *head)
|
||||||
}
|
}
|
||||||
|
|
||||||
#if DEBUG
|
#if DEBUG
|
||||||
static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
|
static inline bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
|
||||||
{
|
{
|
||||||
if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) &&
|
return debug_pagealloc_enabled_static() && OFF_SLAB(cachep) &&
|
||||||
(cachep->size % PAGE_SIZE) == 0)
|
((cachep->size % PAGE_SIZE) == 0);
|
||||||
return true;
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||||
|
|
|
@ -2066,7 +2066,7 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab)
|
||||||
if (current->reclaim_state)
|
if (current->reclaim_state)
|
||||||
current->reclaim_state->reclaimed_slab += pages;
|
current->reclaim_state->reclaimed_slab += pages;
|
||||||
unaccount_slab(slab, order, s);
|
unaccount_slab(slab, order, s);
|
||||||
__free_pages(folio_page(folio, 0), order);
|
__free_pages(&folio->page, order);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rcu_free_slab(struct rcu_head *h)
|
static void rcu_free_slab(struct rcu_head *h)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue