mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
mm/sl*b: Differentiate struct slab fields by sl*b implementations
With a struct slab definition separate from struct page, we can go further and define only fields that the chosen sl*b implementation uses. This means everything between __page_flags and __page_refcount placeholders now depends on the chosen CONFIG_SL*B. Some fields exist in all implementations (slab_list) but can be part of a union in some, so it's simpler to repeat them than complicate the definition with ifdefs even more. The patch doesn't change physical offsets of the fields, although it could be done later - for example it's now clear that tighter packing in SLOB could be possible. This should also prevent accidental use of fields that don't exist in given implementation. Before this patch virt_to_cache() and cache_from_obj() were visible for SLOB (albeit not used), although they rely on the slab_cache field that isn't set by SLOB. With this patch it's now a compile error, so these functions are now hidden behind an #ifndef CONFIG_SLOB. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Roman Gushchin <guro@fb.com> Tested-by: Marco Elver <elver@google.com> # kfence Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Alexander Potapenko <glider@google.com> Cc: Marco Elver <elver@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: <kasan-dev@googlegroups.com>
This commit is contained in:
parent
8dae0cfed5
commit
401fb12c68
2 changed files with 43 additions and 14 deletions
|
@ -427,10 +427,11 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
|
||||||
/* Set required slab fields. */
|
/* Set required slab fields. */
|
||||||
slab = virt_to_slab((void *)meta->addr);
|
slab = virt_to_slab((void *)meta->addr);
|
||||||
slab->slab_cache = cache;
|
slab->slab_cache = cache;
|
||||||
if (IS_ENABLED(CONFIG_SLUB))
|
#if defined(CONFIG_SLUB)
|
||||||
slab->objects = 1;
|
slab->objects = 1;
|
||||||
if (IS_ENABLED(CONFIG_SLAB))
|
#elif defined(CONFIG_SLAB)
|
||||||
slab->s_mem = addr;
|
slab->s_mem = addr;
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Memory initialization. */
|
/* Memory initialization. */
|
||||||
for_each_canary(meta, set_canary_byte);
|
for_each_canary(meta, set_canary_byte);
|
||||||
|
|
48
mm/slab.h
48
mm/slab.h
|
@ -8,9 +8,24 @@
|
||||||
/* Reuses the bits in struct page */
|
/* Reuses the bits in struct page */
|
||||||
struct slab {
|
struct slab {
|
||||||
unsigned long __page_flags;
|
unsigned long __page_flags;
|
||||||
|
|
||||||
|
#if defined(CONFIG_SLAB)
|
||||||
|
|
||||||
union {
|
union {
|
||||||
struct list_head slab_list;
|
struct list_head slab_list;
|
||||||
struct { /* Partial pages */
|
struct rcu_head rcu_head;
|
||||||
|
};
|
||||||
|
struct kmem_cache *slab_cache;
|
||||||
|
void *freelist; /* array of free object indexes */
|
||||||
|
void *s_mem; /* first object */
|
||||||
|
unsigned int active;
|
||||||
|
|
||||||
|
#elif defined(CONFIG_SLUB)
|
||||||
|
|
||||||
|
union {
|
||||||
|
struct list_head slab_list;
|
||||||
|
struct rcu_head rcu_head;
|
||||||
|
struct {
|
||||||
struct slab *next;
|
struct slab *next;
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
int slabs; /* Nr of slabs left */
|
int slabs; /* Nr of slabs left */
|
||||||
|
@ -18,25 +33,32 @@ struct slab {
|
||||||
short int slabs;
|
short int slabs;
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
struct rcu_head rcu_head;
|
|
||||||
};
|
};
|
||||||
struct kmem_cache *slab_cache; /* not slob */
|
struct kmem_cache *slab_cache;
|
||||||
/* Double-word boundary */
|
/* Double-word boundary */
|
||||||
void *freelist; /* first free object */
|
void *freelist; /* first free object */
|
||||||
union {
|
union {
|
||||||
void *s_mem; /* slab: first object */
|
unsigned long counters;
|
||||||
unsigned long counters; /* SLUB */
|
struct {
|
||||||
struct { /* SLUB */
|
|
||||||
unsigned inuse:16;
|
unsigned inuse:16;
|
||||||
unsigned objects:15;
|
unsigned objects:15;
|
||||||
unsigned frozen:1;
|
unsigned frozen:1;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
unsigned int __unused;
|
||||||
|
|
||||||
|
#elif defined(CONFIG_SLOB)
|
||||||
|
|
||||||
|
struct list_head slab_list;
|
||||||
|
void *__unused_1;
|
||||||
|
void *freelist; /* first free block */
|
||||||
|
void *__unused_2;
|
||||||
|
int units;
|
||||||
|
|
||||||
|
#else
|
||||||
|
#error "Unexpected slab allocator configured"
|
||||||
|
#endif
|
||||||
|
|
||||||
union {
|
|
||||||
unsigned int active; /* SLAB */
|
|
||||||
int units; /* SLOB */
|
|
||||||
};
|
|
||||||
atomic_t __page_refcount;
|
atomic_t __page_refcount;
|
||||||
#ifdef CONFIG_MEMCG
|
#ifdef CONFIG_MEMCG
|
||||||
unsigned long memcg_data;
|
unsigned long memcg_data;
|
||||||
|
@ -48,10 +70,14 @@ struct slab {
|
||||||
SLAB_MATCH(flags, __page_flags);
|
SLAB_MATCH(flags, __page_flags);
|
||||||
SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */
|
SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */
|
||||||
SLAB_MATCH(slab_list, slab_list);
|
SLAB_MATCH(slab_list, slab_list);
|
||||||
|
#ifndef CONFIG_SLOB
|
||||||
SLAB_MATCH(rcu_head, rcu_head);
|
SLAB_MATCH(rcu_head, rcu_head);
|
||||||
SLAB_MATCH(slab_cache, slab_cache);
|
SLAB_MATCH(slab_cache, slab_cache);
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_SLAB
|
||||||
SLAB_MATCH(s_mem, s_mem);
|
SLAB_MATCH(s_mem, s_mem);
|
||||||
SLAB_MATCH(active, active);
|
SLAB_MATCH(active, active);
|
||||||
|
#endif
|
||||||
SLAB_MATCH(_refcount, __page_refcount);
|
SLAB_MATCH(_refcount, __page_refcount);
|
||||||
#ifdef CONFIG_MEMCG
|
#ifdef CONFIG_MEMCG
|
||||||
SLAB_MATCH(memcg_data, memcg_data);
|
SLAB_MATCH(memcg_data, memcg_data);
|
||||||
|
@ -599,6 +625,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s,
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_MEMCG_KMEM */
|
#endif /* CONFIG_MEMCG_KMEM */
|
||||||
|
|
||||||
|
#ifndef CONFIG_SLOB
|
||||||
static inline struct kmem_cache *virt_to_cache(const void *obj)
|
static inline struct kmem_cache *virt_to_cache(const void *obj)
|
||||||
{
|
{
|
||||||
struct slab *slab;
|
struct slab *slab;
|
||||||
|
@ -645,6 +672,7 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
|
||||||
print_tracking(cachep, x);
|
print_tracking(cachep, x);
|
||||||
return cachep;
|
return cachep;
|
||||||
}
|
}
|
||||||
|
#endif /* CONFIG_SLOB */
|
||||||
|
|
||||||
static inline size_t slab_ksize(const struct kmem_cache *s)
|
static inline size_t slab_ksize(const struct kmem_cache *s)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue