BPF List
 help / color / mirror / Atom feed
From: Yonghong Song <yonghong.song@linux.dev>
To: bpf@vger.kernel.org
Cc: Alexei Starovoitov <ast@kernel.org>,
	Andrii Nakryiko <andrii@kernel.org>,
	Daniel Borkmann <daniel@iogearbox.net>,
	"Jose E . Marchesi" <jose.marchesi@oracle.com>,
	kernel-team@fb.com, Martin KaFai Lau <martin.lau@kernel.org>
Subject: [PATCH bpf-next v3 02/24] bpf: Remove copy_register_state wrapper function
Date: Sun, 10 May 2026 22:33:12 -0700	[thread overview]
Message-ID: <20260511053312.1881536-1-yonghong.song@linux.dev> (raw)
In-Reply-To: <20260511053301.1878610-1-yonghong.song@linux.dev>

Remove the copy_register_state() helper which was just a plain struct
assignment wrapper and replace all call sites with direct struct
assignment. This simplifies the code in preparation for upcoming stack
argument support.

No functional change.

Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
---
 kernel/bpf/verifier.c | 44 +++++++++++++++++++------------------------
 1 file changed, 19 insertions(+), 25 deletions(-)

diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 11054ad89c14..3bafb7ad2ba7 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3410,12 +3410,6 @@ static void assign_scalar_id_before_mov(struct bpf_verifier_env *env,
 		src_reg->id = ++env->id_gen;
 }
 
-/* Copy src state preserving dst->parent and dst->live fields */
-static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src)
-{
-	*dst = *src;
-}
-
 static void save_register_state(struct bpf_verifier_env *env,
 				struct bpf_func_state *state,
 				int spi, struct bpf_reg_state *reg,
@@ -3423,7 +3417,7 @@ static void save_register_state(struct bpf_verifier_env *env,
 {
 	int i;
 
-	copy_register_state(&state->stack[spi].spilled_ptr, reg);
+	state->stack[spi].spilled_ptr = *reg;
 
 	for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--)
 		state->stack[spi].slot_type[i - 1] = STACK_SPILL;
@@ -3822,7 +3816,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
 					 * with the destination register on fill.
 					 */
 					assign_scalar_id_before_mov(env, reg);
-				copy_register_state(&state->regs[dst_regno], reg);
+				state->regs[dst_regno] = *reg;
 				state->regs[dst_regno].subreg_def = subreg_def;
 
 				/* Break the relation on a narrowing fill.
@@ -3877,7 +3871,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
 				 * with the destination register on fill.
 				 */
 				assign_scalar_id_before_mov(env, reg);
-			copy_register_state(&state->regs[dst_regno], reg);
+			state->regs[dst_regno] = *reg;
 			/* mark reg as written since spilled pointer state likely
 			 * has its liveness marks cleared by is_state_visited()
 			 * which resets stack/reg liveness for state transitions
@@ -6031,7 +6025,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, struct b
 						     size);
 					return -EACCES;
 				}
-				copy_register_state(&regs[value_regno], reg);
+				regs[value_regno] = *reg;
 				add_scalar_to_reg(&regs[value_regno], off);
 				regs[value_regno].type = PTR_TO_INSN;
 			} else {
@@ -13248,7 +13242,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
 	 */
 	if (!ptr_is_dst_reg) {
 		tmp = *dst_reg;
-		copy_register_state(dst_reg, ptr_reg);
+		*dst_reg = *ptr_reg;
 	}
 	err = sanitize_speculative_path(env, NULL, env->insn_idx + 1, env->insn_idx);
 	if (err < 0)
@@ -14698,7 +14692,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
 					 * copy register state to dest reg
 					 */
 					assign_scalar_id_before_mov(env, src_reg);
-					copy_register_state(dst_reg, src_reg);
+					*dst_reg = *src_reg;
 					dst_reg->subreg_def = DEF_NOT_SUBREG;
 				} else {
 					/* case: R1 = (s8, s16 s32)R2 */
@@ -14713,7 +14707,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
 						no_sext = reg_umax(src_reg) < (1ULL << (insn->off - 1));
 						if (no_sext)
 							assign_scalar_id_before_mov(env, src_reg);
-						copy_register_state(dst_reg, src_reg);
+						*dst_reg = *src_reg;
 						if (!no_sext)
 							clear_scalar_id(dst_reg);
 						coerce_reg_to_size_sx(dst_reg, insn->off >> 3);
@@ -14735,7 +14729,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
 
 						if (is_src_reg_u32)
 							assign_scalar_id_before_mov(env, src_reg);
-						copy_register_state(dst_reg, src_reg);
+						*dst_reg = *src_reg;
 						/* Make sure ID is cleared if src_reg is not in u32
 						 * range otherwise dst_reg min/max could be incorrectly
 						 * propagated into src_reg by sync_linked_regs()
@@ -14749,7 +14743,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
 
 						if (no_sext)
 							assign_scalar_id_before_mov(env, src_reg);
-						copy_register_state(dst_reg, src_reg);
+						*dst_reg = *src_reg;
 						if (!no_sext)
 							clear_scalar_id(dst_reg);
 						dst_reg->subreg_def = env->insn_idx + 1;
@@ -15629,7 +15623,7 @@ static void sync_linked_regs(struct bpf_verifier_env *env, struct bpf_verifier_s
 		    reg->delta == known_reg->delta) {
 			s32 saved_subreg_def = reg->subreg_def;
 
-			copy_register_state(reg, known_reg);
+			*reg = *known_reg;
 			reg->subreg_def = saved_subreg_def;
 		} else {
 			s32 saved_subreg_def = reg->subreg_def;
@@ -15640,7 +15634,7 @@ static void sync_linked_regs(struct bpf_verifier_env *env, struct bpf_verifier_s
 			__mark_reg_known(&fake_reg, (s64)reg->delta - (s64)known_reg->delta);
 
 			/* reg = known_reg; reg += delta */
-			copy_register_state(reg, known_reg);
+			*reg = *known_reg;
 			/*
 			 * Must preserve off, id and subreg_def flag,
 			 * otherwise another sync_linked_regs() will be incorrect.
@@ -15743,10 +15737,10 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
 	}
 
 	is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
-	copy_register_state(&env->false_reg1, dst_reg);
-	copy_register_state(&env->false_reg2, src_reg);
-	copy_register_state(&env->true_reg1, dst_reg);
-	copy_register_state(&env->true_reg2, src_reg);
+	env->false_reg1 = *dst_reg;
+	env->false_reg2 = *src_reg;
+	env->true_reg1 = *dst_reg;
+	env->true_reg2 = *src_reg;
 	pred = is_branch_taken(env, dst_reg, src_reg, opcode, is_jmp32);
 	if (pred >= 0) {
 		/* If we get here with a dst_reg pointer type it is because
@@ -15815,11 +15809,11 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
 	if (err)
 		return err;
 
-	copy_register_state(dst_reg, &env->false_reg1);
-	copy_register_state(src_reg, &env->false_reg2);
-	copy_register_state(&other_branch_regs[insn->dst_reg], &env->true_reg1);
+	*dst_reg = env->false_reg1;
+	*src_reg = env->false_reg2;
+	other_branch_regs[insn->dst_reg] = env->true_reg1;
 	if (BPF_SRC(insn->code) == BPF_X)
-		copy_register_state(&other_branch_regs[insn->src_reg], &env->true_reg2);
+		other_branch_regs[insn->src_reg] = env->true_reg2;
 
 	if (BPF_SRC(insn->code) == BPF_X &&
 	    src_reg->type == SCALAR_VALUE && src_reg->id &&
-- 
2.53.0-Meta


  parent reply	other threads:[~2026-05-11  5:33 UTC|newest]

Thread overview: 49+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-05-11  5:33 [PATCH bpf-next v3 00/24] bpf: Support stack arguments for BPF functions and kfuncs Yonghong Song
2026-05-11  5:33 ` [PATCH bpf-next v3 01/24] bpf: Convert bpf_get_spilled_reg macro to static inline function Yonghong Song
2026-05-11  5:33 ` Yonghong Song [this message]
2026-05-11  5:33 ` [PATCH bpf-next v3 03/24] bpf: Add helper functions for r11-based stack argument insns Yonghong Song
2026-05-11  5:33 ` [PATCH bpf-next v3 04/24] bpf: Set sub->arg_cnt earlier in btf_prepare_func_args() Yonghong Song
2026-05-11  6:19   ` bot+bpf-ci
2026-05-11 16:29     ` Yonghong Song
2026-05-11 17:18       ` Yonghong Song
2026-05-11  5:33 ` [PATCH bpf-next v3 05/24] bpf: Support stack arguments for bpf functions Yonghong Song
2026-05-11  6:19   ` bot+bpf-ci
2026-05-11 15:46     ` Yonghong Song
2026-05-11 16:05       ` Alexei Starovoitov
2026-05-11 16:21         ` Yonghong Song
2026-05-12  4:17         ` Yonghong Song
2026-05-11  5:33 ` [PATCH bpf-next v3 06/24] bpf: Refactor jmp history to use dedicated spi/frame fields Yonghong Song
2026-05-11 16:17   ` Alexei Starovoitov
2026-05-11 16:33     ` Yonghong Song
2026-05-11  5:33 ` [PATCH bpf-next v3 07/24] bpf: Add precision marking and backtracking for stack argument slots Yonghong Song
2026-05-11  6:19   ` bot+bpf-ci
2026-05-11  5:33 ` [PATCH bpf-next v3 08/24] bpf: Refactor record_call_access() to extract per-arg logic Yonghong Song
2026-05-11  5:33 ` [PATCH bpf-next v3 09/24] bpf: Extend liveness analysis to track stack argument slots Yonghong Song
2026-05-11  6:19   ` bot+bpf-ci
2026-05-11 16:35     ` Yonghong Song
2026-05-11 16:34   ` Alexei Starovoitov
2026-05-11 16:40     ` Yonghong Song
2026-05-11  5:33 ` [PATCH bpf-next v3 10/24] bpf: Reject stack arguments in non-JITed programs Yonghong Song
2026-05-11  6:19   ` bot+bpf-ci
2026-05-11 16:42     ` Yonghong Song
2026-05-11  5:33 ` [PATCH bpf-next v3 11/24] bpf: Prepare architecture JIT support for stack arguments Yonghong Song
2026-05-11  5:34 ` [PATCH bpf-next v3 12/24] bpf: Enable r11 based insns Yonghong Song
2026-05-11  5:34 ` [PATCH bpf-next v3 13/24] bpf: Support stack arguments for kfunc calls Yonghong Song
2026-05-11  5:34 ` [PATCH bpf-next v3 14/24] bpf: Reject stack arguments if tail call reachable Yonghong Song
2026-05-11  6:19   ` bot+bpf-ci
2026-05-11  5:34 ` [PATCH bpf-next v3 15/24] bpf: Pass bpf_subprog_info to bpf_int_jit_compile() Yonghong Song
2026-05-11 16:38   ` Alexei Starovoitov
2026-05-11 16:47     ` Yonghong Song
2026-05-11  5:34 ` [PATCH bpf-next v3 16/24] bpf,x86: Implement JIT support for stack arguments Yonghong Song
2026-05-11 16:39   ` Alexei Starovoitov
2026-05-11 16:47     ` Yonghong Song
2026-05-11  5:34 ` [PATCH bpf-next v3 17/24] selftests/bpf: Add tests for BPF function " Yonghong Song
2026-05-11  5:34 ` [PATCH bpf-next v3 18/24] selftests/bpf: Add tests for stack argument validation Yonghong Song
2026-05-11  5:34 ` [PATCH bpf-next v3 19/24] selftests/bpf: Add BTF fixup for __naked subprog parameter names Yonghong Song
2026-05-11  5:34 ` [PATCH bpf-next v3 20/24] selftests/bpf: Add verifier tests for stack argument validation Yonghong Song
2026-05-11  6:19   ` bot+bpf-ci
2026-05-11 16:49     ` Yonghong Song
2026-05-11  5:34 ` [PATCH bpf-next v3 21/24] selftests/bpf: Add precision backtracking test for stack arguments Yonghong Song
2026-05-11  5:35 ` [PATCH bpf-next v3 22/24] bpf, arm64: Map BPF_REG_0 to x8 instead of x7 Yonghong Song
2026-05-11  5:35 ` [PATCH bpf-next v3 23/24] bpf, arm64: Add JIT support for stack arguments Yonghong Song
2026-05-11  5:35 ` [PATCH bpf-next v3 24/24] selftests/bpf: Enable stack argument tests for arm64 Yonghong Song

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260511053312.1881536-1-yonghong.song@linux.dev \
    --to=yonghong.song@linux.dev \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=jose.marchesi@oracle.com \
    --cc=kernel-team@fb.com \
    --cc=martin.lau@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox