mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
Merge branch 'slab/for-6.2/fit_rcu_head' into slab/for-next
A series by myself to reorder fields in struct slab to allow the embedded rcu_head to grow (for debugging purposes). Requires changes to isolate_movable_page() to skip slab pages which can otherwise become false-positive __PageMovable due to its use of low bits in page->mapping.
This commit is contained in:
commit
76537db3b9
4 changed files with 64 additions and 37 deletions
15
mm/migrate.c
15
mm/migrate.c
|
@ -74,13 +74,22 @@ int isolate_movable_page(struct page *page, isolate_mode_t mode)
|
||||||
if (unlikely(!get_page_unless_zero(page)))
|
if (unlikely(!get_page_unless_zero(page)))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
if (unlikely(PageSlab(page)))
|
||||||
|
goto out_putpage;
|
||||||
|
/* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
|
||||||
|
smp_rmb();
|
||||||
/*
|
/*
|
||||||
* Check PageMovable before holding a PG_lock because page's owner
|
* Check movable flag before taking the page lock because
|
||||||
* assumes anybody doesn't touch PG_lock of newly allocated page
|
* we use non-atomic bitops on newly allocated page flags so
|
||||||
* so unconditionally grabbing the lock ruins page's owner side.
|
* unconditionally grabbing the lock ruins page's owner side.
|
||||||
*/
|
*/
|
||||||
if (unlikely(!__PageMovable(page)))
|
if (unlikely(!__PageMovable(page)))
|
||||||
goto out_putpage;
|
goto out_putpage;
|
||||||
|
/* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
|
||||||
|
smp_rmb();
|
||||||
|
if (unlikely(PageSlab(page)))
|
||||||
|
goto out_putpage;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* As movable pages are not isolated from LRU lists, concurrent
|
* As movable pages are not isolated from LRU lists, concurrent
|
||||||
* compaction threads can race against page migration functions
|
* compaction threads can race against page migration functions
|
||||||
|
|
|
@ -1370,6 +1370,8 @@ static struct slab *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
|
||||||
|
|
||||||
account_slab(slab, cachep->gfporder, cachep, flags);
|
account_slab(slab, cachep->gfporder, cachep, flags);
|
||||||
__folio_set_slab(folio);
|
__folio_set_slab(folio);
|
||||||
|
/* Make the flag visible before any changes to folio->mapping */
|
||||||
|
smp_wmb();
|
||||||
/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
|
/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
|
||||||
if (sk_memalloc_socks() && page_is_pfmemalloc(folio_page(folio, 0)))
|
if (sk_memalloc_socks() && page_is_pfmemalloc(folio_page(folio, 0)))
|
||||||
slab_set_pfmemalloc(slab);
|
slab_set_pfmemalloc(slab);
|
||||||
|
@ -1387,9 +1389,11 @@ static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab)
|
||||||
|
|
||||||
BUG_ON(!folio_test_slab(folio));
|
BUG_ON(!folio_test_slab(folio));
|
||||||
__slab_clear_pfmemalloc(slab);
|
__slab_clear_pfmemalloc(slab);
|
||||||
__folio_clear_slab(folio);
|
|
||||||
page_mapcount_reset(folio_page(folio, 0));
|
page_mapcount_reset(folio_page(folio, 0));
|
||||||
folio->mapping = NULL;
|
folio->mapping = NULL;
|
||||||
|
/* Make the mapping reset visible before clearing the flag */
|
||||||
|
smp_wmb();
|
||||||
|
__folio_clear_slab(folio);
|
||||||
|
|
||||||
if (current->reclaim_state)
|
if (current->reclaim_state)
|
||||||
current->reclaim_state->reclaimed_slab += 1 << order;
|
current->reclaim_state->reclaimed_slab += 1 << order;
|
||||||
|
|
54
mm/slab.h
54
mm/slab.h
|
@ -11,37 +11,43 @@ struct slab {
|
||||||
|
|
||||||
#if defined(CONFIG_SLAB)
|
#if defined(CONFIG_SLAB)
|
||||||
|
|
||||||
|
struct kmem_cache *slab_cache;
|
||||||
union {
|
union {
|
||||||
struct list_head slab_list;
|
struct {
|
||||||
|
struct list_head slab_list;
|
||||||
|
void *freelist; /* array of free object indexes */
|
||||||
|
void *s_mem; /* first object */
|
||||||
|
};
|
||||||
struct rcu_head rcu_head;
|
struct rcu_head rcu_head;
|
||||||
};
|
};
|
||||||
struct kmem_cache *slab_cache;
|
|
||||||
void *freelist; /* array of free object indexes */
|
|
||||||
void *s_mem; /* first object */
|
|
||||||
unsigned int active;
|
unsigned int active;
|
||||||
|
|
||||||
#elif defined(CONFIG_SLUB)
|
#elif defined(CONFIG_SLUB)
|
||||||
|
|
||||||
union {
|
|
||||||
struct list_head slab_list;
|
|
||||||
struct rcu_head rcu_head;
|
|
||||||
#ifdef CONFIG_SLUB_CPU_PARTIAL
|
|
||||||
struct {
|
|
||||||
struct slab *next;
|
|
||||||
int slabs; /* Nr of slabs left */
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
struct kmem_cache *slab_cache;
|
struct kmem_cache *slab_cache;
|
||||||
/* Double-word boundary */
|
|
||||||
void *freelist; /* first free object */
|
|
||||||
union {
|
union {
|
||||||
unsigned long counters;
|
|
||||||
struct {
|
struct {
|
||||||
unsigned inuse:16;
|
union {
|
||||||
unsigned objects:15;
|
struct list_head slab_list;
|
||||||
unsigned frozen:1;
|
#ifdef CONFIG_SLUB_CPU_PARTIAL
|
||||||
|
struct {
|
||||||
|
struct slab *next;
|
||||||
|
int slabs; /* Nr of slabs left */
|
||||||
|
};
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
/* Double-word boundary */
|
||||||
|
void *freelist; /* first free object */
|
||||||
|
union {
|
||||||
|
unsigned long counters;
|
||||||
|
struct {
|
||||||
|
unsigned inuse:16;
|
||||||
|
unsigned objects:15;
|
||||||
|
unsigned frozen:1;
|
||||||
|
};
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
struct rcu_head rcu_head;
|
||||||
};
|
};
|
||||||
unsigned int __unused;
|
unsigned int __unused;
|
||||||
|
|
||||||
|
@ -66,9 +72,10 @@ struct slab {
|
||||||
#define SLAB_MATCH(pg, sl) \
|
#define SLAB_MATCH(pg, sl) \
|
||||||
static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
|
static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
|
||||||
SLAB_MATCH(flags, __page_flags);
|
SLAB_MATCH(flags, __page_flags);
|
||||||
SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */
|
|
||||||
#ifndef CONFIG_SLOB
|
#ifndef CONFIG_SLOB
|
||||||
SLAB_MATCH(rcu_head, rcu_head);
|
SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */
|
||||||
|
#else
|
||||||
|
SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */
|
||||||
#endif
|
#endif
|
||||||
SLAB_MATCH(_refcount, __page_refcount);
|
SLAB_MATCH(_refcount, __page_refcount);
|
||||||
#ifdef CONFIG_MEMCG
|
#ifdef CONFIG_MEMCG
|
||||||
|
@ -76,6 +83,9 @@ SLAB_MATCH(memcg_data, memcg_data);
|
||||||
#endif
|
#endif
|
||||||
#undef SLAB_MATCH
|
#undef SLAB_MATCH
|
||||||
static_assert(sizeof(struct slab) <= sizeof(struct page));
|
static_assert(sizeof(struct slab) <= sizeof(struct page));
|
||||||
|
#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && defined(CONFIG_SLUB)
|
||||||
|
static_assert(IS_ALIGNED(offsetof(struct slab, freelist), 2*sizeof(void *)));
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* folio_slab - Converts from folio to slab.
|
* folio_slab - Converts from folio to slab.
|
||||||
|
|
26
mm/slub.c
26
mm/slub.c
|
@ -1800,6 +1800,8 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node,
|
||||||
|
|
||||||
slab = folio_slab(folio);
|
slab = folio_slab(folio);
|
||||||
__folio_set_slab(folio);
|
__folio_set_slab(folio);
|
||||||
|
/* Make the flag visible before any changes to folio->mapping */
|
||||||
|
smp_wmb();
|
||||||
if (page_is_pfmemalloc(folio_page(folio, 0)))
|
if (page_is_pfmemalloc(folio_page(folio, 0)))
|
||||||
slab_set_pfmemalloc(slab);
|
slab_set_pfmemalloc(slab);
|
||||||
|
|
||||||
|
@ -1999,17 +2001,11 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab)
|
||||||
int order = folio_order(folio);
|
int order = folio_order(folio);
|
||||||
int pages = 1 << order;
|
int pages = 1 << order;
|
||||||
|
|
||||||
if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
|
|
||||||
void *p;
|
|
||||||
|
|
||||||
slab_pad_check(s, slab);
|
|
||||||
for_each_object(p, s, slab_address(slab), slab->objects)
|
|
||||||
check_object(s, slab, p, SLUB_RED_INACTIVE);
|
|
||||||
}
|
|
||||||
|
|
||||||
__slab_clear_pfmemalloc(slab);
|
__slab_clear_pfmemalloc(slab);
|
||||||
__folio_clear_slab(folio);
|
|
||||||
folio->mapping = NULL;
|
folio->mapping = NULL;
|
||||||
|
/* Make the mapping reset visible before clearing the flag */
|
||||||
|
smp_wmb();
|
||||||
|
__folio_clear_slab(folio);
|
||||||
if (current->reclaim_state)
|
if (current->reclaim_state)
|
||||||
current->reclaim_state->reclaimed_slab += pages;
|
current->reclaim_state->reclaimed_slab += pages;
|
||||||
unaccount_slab(slab, order, s);
|
unaccount_slab(slab, order, s);
|
||||||
|
@ -2025,9 +2021,17 @@ static void rcu_free_slab(struct rcu_head *h)
|
||||||
|
|
||||||
static void free_slab(struct kmem_cache *s, struct slab *slab)
|
static void free_slab(struct kmem_cache *s, struct slab *slab)
|
||||||
{
|
{
|
||||||
if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
|
if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
|
||||||
|
void *p;
|
||||||
|
|
||||||
|
slab_pad_check(s, slab);
|
||||||
|
for_each_object(p, s, slab_address(slab), slab->objects)
|
||||||
|
check_object(s, slab, p, SLUB_RED_INACTIVE);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU))
|
||||||
call_rcu(&slab->rcu_head, rcu_free_slab);
|
call_rcu(&slab->rcu_head, rcu_free_slab);
|
||||||
} else
|
else
|
||||||
__free_slab(s, slab);
|
__free_slab(s, slab);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue