mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
User space might be creating and destroying a lot of hash maps. Synchronous rcu_barrier-s in a destruction path of hash map delay freeing of hash buckets and other map memory and may cause artificial OOM situation under stress. Optimize rcu_barrier usage between bpf hash map and bpf_mem_alloc: - remove rcu_barrier from hash map, since htab doesn't use call_rcu directly and there are no callback to wait for. - bpf_mem_alloc has call_rcu_in_progress flag that indicates pending callbacks. Use it to avoid barriers in fast path. - When barriers are needed copy bpf_mem_alloc into temp structure and wait for rcu barrier-s in the worker to let the rest of hash map freeing to proceed. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20220902211058.60789-17-alexei.starovoitov@gmail.com
28 lines
862 B
C
28 lines
862 B
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
|
|
#ifndef _BPF_MEM_ALLOC_H
|
|
#define _BPF_MEM_ALLOC_H
|
|
#include <linux/compiler_types.h>
|
|
#include <linux/workqueue.h>
|
|
|
|
struct bpf_mem_cache;
|
|
struct bpf_mem_caches;
|
|
|
|
struct bpf_mem_alloc {
|
|
struct bpf_mem_caches __percpu *caches;
|
|
struct bpf_mem_cache __percpu *cache;
|
|
struct work_struct work;
|
|
};
|
|
|
|
int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu);
|
|
void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);
|
|
|
|
/* kmalloc/kfree equivalent: */
|
|
void *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size);
|
|
void bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr);
|
|
|
|
/* kmem_cache_alloc/free equivalent: */
|
|
void *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma);
|
|
void bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr);
|
|
|
|
#endif /* _BPF_MEM_ALLOC_H */
|