mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
mm/slab: use kmalloc_node() for off slab freelist_idx_t array allocation
After commitd6a71648db
("mm/slab: kmalloc: pass requests larger than order-1 page to page allocator"), SLAB passes large ( > PAGE_SIZE * 2) requests to buddy like SLUB does. SLAB has been using kmalloc caches to allocate freelist_idx_t array for off slab caches. But after the commit, freelist_size can be bigger than KMALLOC_MAX_CACHE_SIZE. Instead of using pointer to kmalloc cache, use kmalloc_node() and only check if the kmalloc cache is off slab during calculate_slab_order(). If freelist_size > KMALLOC_MAX_CACHE_SIZE, no looping condition happens as it allocates freelist_idx_t array directly from buddy. Link: https://lore.kernel.org/all/20221014205818.GA1428667@roeck-us.net/ Reported-and-tested-by: Guenter Roeck <linux@roeck-us.net> Fixes:d6a71648db
("mm/slab: kmalloc: pass requests larger than order-1 page to page allocator") Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
d5eff73690
commit
e36ce448a0
2 changed files with 18 additions and 18 deletions
|
@ -33,7 +33,6 @@ struct kmem_cache {
|
||||||
|
|
||||||
size_t colour; /* cache colouring range */
|
size_t colour; /* cache colouring range */
|
||||||
unsigned int colour_off; /* colour offset */
|
unsigned int colour_off; /* colour offset */
|
||||||
struct kmem_cache *freelist_cache;
|
|
||||||
unsigned int freelist_size;
|
unsigned int freelist_size;
|
||||||
|
|
||||||
/* constructor func */
|
/* constructor func */
|
||||||
|
|
35
mm/slab.c
35
mm/slab.c
|
@ -1619,7 +1619,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slab)
|
||||||
* although actual page can be freed in rcu context
|
* although actual page can be freed in rcu context
|
||||||
*/
|
*/
|
||||||
if (OFF_SLAB(cachep))
|
if (OFF_SLAB(cachep))
|
||||||
kmem_cache_free(cachep->freelist_cache, freelist);
|
kfree(freelist);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1671,21 +1671,27 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
|
||||||
if (flags & CFLGS_OFF_SLAB) {
|
if (flags & CFLGS_OFF_SLAB) {
|
||||||
struct kmem_cache *freelist_cache;
|
struct kmem_cache *freelist_cache;
|
||||||
size_t freelist_size;
|
size_t freelist_size;
|
||||||
|
size_t freelist_cache_size;
|
||||||
|
|
||||||
freelist_size = num * sizeof(freelist_idx_t);
|
freelist_size = num * sizeof(freelist_idx_t);
|
||||||
freelist_cache = kmalloc_slab(freelist_size, 0u);
|
if (freelist_size > KMALLOC_MAX_CACHE_SIZE) {
|
||||||
if (!freelist_cache)
|
freelist_cache_size = PAGE_SIZE << get_order(freelist_size);
|
||||||
continue;
|
} else {
|
||||||
|
freelist_cache = kmalloc_slab(freelist_size, 0u);
|
||||||
|
if (!freelist_cache)
|
||||||
|
continue;
|
||||||
|
freelist_cache_size = freelist_cache->size;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Needed to avoid possible looping condition
|
* Needed to avoid possible looping condition
|
||||||
* in cache_grow_begin()
|
* in cache_grow_begin()
|
||||||
*/
|
*/
|
||||||
if (OFF_SLAB(freelist_cache))
|
if (OFF_SLAB(freelist_cache))
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
/* check if off slab has enough benefit */
|
/* check if off slab has enough benefit */
|
||||||
if (freelist_cache->size > cachep->size / 2)
|
if (freelist_cache_size > cachep->size / 2)
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2061,11 +2067,6 @@ done:
|
||||||
cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
|
cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (OFF_SLAB(cachep)) {
|
|
||||||
cachep->freelist_cache =
|
|
||||||
kmalloc_slab(cachep->freelist_size, 0u);
|
|
||||||
}
|
|
||||||
|
|
||||||
err = setup_cpu_cache(cachep, gfp);
|
err = setup_cpu_cache(cachep, gfp);
|
||||||
if (err) {
|
if (err) {
|
||||||
__kmem_cache_release(cachep);
|
__kmem_cache_release(cachep);
|
||||||
|
@ -2292,7 +2293,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
|
||||||
freelist = NULL;
|
freelist = NULL;
|
||||||
else if (OFF_SLAB(cachep)) {
|
else if (OFF_SLAB(cachep)) {
|
||||||
/* Slab management obj is off-slab. */
|
/* Slab management obj is off-slab. */
|
||||||
freelist = kmem_cache_alloc_node(cachep->freelist_cache,
|
freelist = kmalloc_node(cachep->freelist_size,
|
||||||
local_flags, nodeid);
|
local_flags, nodeid);
|
||||||
} else {
|
} else {
|
||||||
/* We will use last bytes at the slab for freelist */
|
/* We will use last bytes at the slab for freelist */
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue