[lvc-project] [PATCH 6.1 03/16] bpf: Rename few bpf_mem_alloc fields.
Fedor Pchelkin
pchelkin at ispras.ru
Mon Feb 3 00:35:55 MSK 2025
On Sun, 02. Feb 07:46, Alexey Nepomnyashih wrote:
> From: Alexei Starovoitov <ast at kernel.org>
>
> commit 12c8d0f4c8702f88a74973fb7ced85b59043b0ab upstream.
>
> Rename:
> - struct rcu_head rcu;
> - struct llist_head free_by_rcu;
> - struct llist_head waiting_for_gp;
> - atomic_t call_rcu_in_progress;
> + struct llist_head free_by_rcu_ttrace;
> + struct llist_head waiting_for_gp_ttrace;
> + struct rcu_head rcu_ttrace;
> + atomic_t call_rcu_ttrace_in_progress;
> ...
> - static void do_call_rcu(struct bpf_mem_cache *c)
> + static void do_call_rcu_ttrace(struct bpf_mem_cache *c)
>
> to better indicate intended use.
>
> The 'tasks trace' is shortened to 'ttrace' to reduce verbosity.
> No functional changes.
>
> Later patches will add free_by_rcu/waiting_for_gp fields to be used with normal RCU.
>
> Signed-off-by: Alexei Starovoitov <ast at kernel.org>
> Signed-off-by: Daniel Borkmann <daniel at iogearbox.net>
> Acked-by: Hou Tao <houtao1 at huawei.com>
> Link: https://lore.kernel.org/bpf/20230706033447.54696-2-alexei.starovoitov@gmail.com
> Signed-off-by: Alexey Nepomnyashih <sdl at nppct.ru>
> ---
Также в результате этого коммита наблюдается ошибка сборки.
In file included from ./include/linux/bits.h:22,
from ./include/linux/ratelimit_types.h:5,
from ./include/linux/printk.h:9,
from ./include/asm-generic/bug.h:22,
from ./arch/x86/include/asm/bug.h:87,
from ./include/linux/bug.h:5,
from ./include/linux/mmdebug.h:5,
from ./include/linux/mm.h:6,
from kernel/bpf/memalloc.c:3:
kernel/bpf/memalloc.c: In function ‘__free_rcu_tasks_trace’:
./include/linux/container_of.h:19:54: error: ‘struct bpf_mem_cache’ has no member named ‘rcu’
19 | static_assert(__same_type(*(ptr), ((type *)0)->member) || \
| ^~
./include/linux/build_bug.h:78:56: note: in definition of macro ‘__static_assert’
78 | #define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
| ^~~~
./include/linux/container_of.h:19:9: note: in expansion of macro ‘static_assert’
19 | static_assert(__same_type(*(ptr), ((type *)0)->member) || \
| ^~~~~~~~~~~~~
./include/linux/container_of.h:19:23: note: in expansion of macro ‘__same_type’
19 | static_assert(__same_type(*(ptr), ((type *)0)->member) || \
| ^~~~~~~~~~~
kernel/bpf/memalloc.c:244:35: note: in expansion of macro ‘container_of’
244 | struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu);
| ^~~~~~~~~~~~
././include/linux/compiler_types.h:316:27: error: expression in static assertion is not an integer
316 | #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
./include/linux/build_bug.h:78:56: note: in definition of macro ‘__static_assert’
78 | #define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
| ^~~~
./include/linux/container_of.h:19:9: note: in expansion of macro ‘static_assert’
19 | static_assert(__same_type(*(ptr), ((type *)0)->member) || \
| ^~~~~~~~~~~~~
./include/linux/container_of.h:19:23: note: in expansion of macro ‘__same_type’
19 | static_assert(__same_type(*(ptr), ((type *)0)->member) || \
| ^~~~~~~~~~~
kernel/bpf/memalloc.c:244:35: note: in expansion of macro ‘container_of’
244 | struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu);
| ^~~~~~~~~~~~
In file included from ./include/uapi/linux/posix_types.h:5,
from ./include/uapi/linux/types.h:14,
from ./include/linux/types.h:6,
from ./include/linux/objtool.h:7,
from ./arch/x86/include/asm/bug.h:7:
./include/linux/stddef.h:16:33: error: ‘struct bpf_mem_cache’ has no member named ‘rcu’
16 | #define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE, MEMBER)
| ^~~~~~~~~~~~~~~~~~
./include/linux/container_of.h:22:28: note: in expansion of macro ‘offsetof’
22 | ((type *)(__mptr - offsetof(type, member))); })
| ^~~~~~~~
kernel/bpf/memalloc.c:244:35: note: in expansion of macro ‘container_of’
244 | struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu);
| ^~~~~~~~~~~~
kernel/bpf/memalloc.c:246:20: error: ‘struct bpf_mem_cache’ has no member named ‘rcu’
246 | call_rcu(&c->rcu, __free_rcu);
| ^~
> kernel/bpf/memalloc.c | 57 ++++++++++++++++++++++---------------------
> 1 file changed, 29 insertions(+), 28 deletions(-)
>
> diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
> index b9bdc9d81b9c..63b787128de8 100644
> --- a/kernel/bpf/memalloc.c
> +++ b/kernel/bpf/memalloc.c
> @@ -99,10 +99,11 @@ struct bpf_mem_cache {
> int low_watermark, high_watermark, batch;
> int percpu_size;
>
> - struct rcu_head rcu;
> - struct llist_head free_by_rcu;
> - struct llist_head waiting_for_gp;
> - atomic_t call_rcu_in_progress;
> + /* list of objects to be freed after RCU tasks trace GP */
> + struct llist_head free_by_rcu_ttrace;
> + struct llist_head waiting_for_gp_ttrace;
> + struct rcu_head rcu_ttrace;
> + atomic_t call_rcu_ttrace_in_progress;
> };
>
> struct bpf_mem_caches {
> @@ -165,18 +166,18 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
> old_memcg = set_active_memcg(memcg);
> for (i = 0; i < cnt; i++) {
> /*
> - * free_by_rcu is only manipulated by irq work refill_work().
> + * free_by_rcu_ttrace is only manipulated by irq work refill_work().
> * IRQ works on the same CPU are called sequentially, so it is
> * safe to use __llist_del_first() here. If alloc_bulk() is
> * invoked by the initial prefill, there will be no running
> * refill_work(), so __llist_del_first() is fine as well.
> *
> - * In most cases, objects on free_by_rcu are from the same CPU.
> + * In most cases, objects on free_by_rcu_ttrace are from the same CPU.
> * If some objects come from other CPUs, it doesn't incur any
> * harm because NUMA_NO_NODE means the preference for current
> * numa node and it is not a guarantee.
> */
> - obj = __llist_del_first(&c->free_by_rcu);
> + obj = __llist_del_first(&c->free_by_rcu_ttrace);
> if (!obj) {
> /* Allocate, but don't deplete atomic reserves that typical
> * GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc
> @@ -232,10 +233,10 @@ static void free_all(struct llist_node *llnode, bool percpu)
>
> static void __free_rcu(struct rcu_head *head)
> {
> - struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu);
> + struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu_ttrace);
>
> - free_all(llist_del_all(&c->waiting_for_gp), !!c->percpu_size);
> - atomic_set(&c->call_rcu_in_progress, 0);
> + free_all(llist_del_all(&c->waiting_for_gp_ttrace), !!c->percpu_size);
> + atomic_set(&c->call_rcu_ttrace_in_progress, 0);
> }
>
> static void __free_rcu_tasks_trace(struct rcu_head *head)
> @@ -250,31 +251,31 @@ static void enque_to_free(struct bpf_mem_cache *c, void *obj)
> struct llist_node *llnode = obj;
>
> /* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work.
> - * Nothing races to add to free_by_rcu list.
> + * Nothing races to add to free_by_rcu_ttrace list.
> */
> - __llist_add(llnode, &c->free_by_rcu);
> + __llist_add(llnode, &c->free_by_rcu_ttrace);
> }
>
> -static void do_call_rcu(struct bpf_mem_cache *c)
> +static void do_call_rcu_ttrace(struct bpf_mem_cache *c)
> {
> struct llist_node *llnode, *t;
>
> - if (atomic_xchg(&c->call_rcu_in_progress, 1))
> + if (atomic_xchg(&c->call_rcu_ttrace_in_progress, 1))
> return;
>
> - WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp));
> - llist_for_each_safe(llnode, t, __llist_del_all(&c->free_by_rcu))
> - /* There is no concurrent __llist_add(waiting_for_gp) access.
> + WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace));
> + llist_for_each_safe(llnode, t, __llist_del_all(&c->free_by_rcu_ttrace))
> + /* There is no concurrent __llist_add(waiting_for_gp_ttrace) access.
> * It doesn't race with llist_del_all either.
> - * But there could be two concurrent llist_del_all(waiting_for_gp):
> + * But there could be two concurrent llist_del_all(waiting_for_gp_ttrace):
> * from __free_rcu() and from drain_mem_cache().
> */
> - __llist_add(llnode, &c->waiting_for_gp);
> + __llist_add(llnode, &c->waiting_for_gp_ttrace);
> /* Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
> * Then use call_rcu() to wait for normal progs to finish
> * and finally do free_one() on each element.
> */
> - call_rcu_tasks_trace(&c->rcu, __free_rcu_tasks_trace);
> + call_rcu_tasks_trace(&c->rcu_ttrace, __free_rcu_tasks_trace);
> }
>
> static void free_bulk(struct bpf_mem_cache *c)
> @@ -302,7 +303,7 @@ static void free_bulk(struct bpf_mem_cache *c)
> /* and drain free_llist_extra */
> llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra))
> enque_to_free(c, llnode);
> - do_call_rcu(c);
> + do_call_rcu_ttrace(c);
> }
>
> static void bpf_mem_refill(struct irq_work *work)
> @@ -435,13 +436,13 @@ static void drain_mem_cache(struct bpf_mem_cache *c)
>
> /* No progs are using this bpf_mem_cache, but htab_map_free() called
> * bpf_mem_cache_free() for all remaining elements and they can be in
> - * free_by_rcu or in waiting_for_gp lists, so drain those lists now.
> + * free_by_rcu_ttrace or in waiting_for_gp_ttrace lists, so drain those lists now.
> *
> - * Except for waiting_for_gp list, there are no concurrent operations
> + * Except for waiting_for_gp_ttrace list, there are no concurrent operations
> * on these lists, so it is safe to use __llist_del_all().
> */
> - free_all(__llist_del_all(&c->free_by_rcu), percpu);
> - free_all(llist_del_all(&c->waiting_for_gp), percpu);
> + free_all(__llist_del_all(&c->free_by_rcu_ttrace), percpu);
> + free_all(llist_del_all(&c->waiting_for_gp_ttrace), percpu);
> free_all(__llist_del_all(&c->free_llist), percpu);
> free_all(__llist_del_all(&c->free_llist_extra), percpu);
> }
> @@ -456,7 +457,7 @@ static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma)
>
> static void free_mem_alloc(struct bpf_mem_alloc *ma)
> {
> - /* waiting_for_gp lists was drained, but __free_rcu might
> + /* waiting_for_gp_ttrace lists was drained, but __free_rcu might
> * still execute. Wait for it now before we freeing percpu caches.
> */
> rcu_barrier_tasks_trace();
> @@ -521,7 +522,7 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
> */
> irq_work_sync(&c->refill_work);
> drain_mem_cache(c);
> - rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
> + rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress);
> }
> /* objcg is the same across cpus */
> if (c->objcg)
> @@ -536,7 +537,7 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
> c = &cc->cache[i];
> irq_work_sync(&c->refill_work);
> drain_mem_cache(c);
> - rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
> + rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress);
> }
> }
> if (c->objcg)
> --
> 2.43.0
More information about the lvc-project
mailing list