public inbox for bpf@vger.kernel.org
 help / color / mirror / Atom feed
From: Jiri Olsa <olsajiri@gmail.com>
To: Dave Marchevsky <davemarchevsky@fb.com>
Cc: bpf@vger.kernel.org, Alexei Starovoitov <ast@kernel.org>,
	Daniel Borkmann <daniel@iogearbox.net>,
	Andrii Nakryiko <andrii@kernel.org>,
	Martin KaFai Lau <martin.lau@kernel.org>,
	Kernel Team <kernel-team@fb.com>
Subject: Re: [PATCH v1 bpf-next 2/4] bpf: Refactor btf_find_field with btf_field_info_search
Date: Sat, 28 Oct 2023 16:52:07 +0200	[thread overview]
Message-ID: <ZT0gF7kRnen7x14H@krava> (raw)
In-Reply-To: <20231023220030.2556229-3-davemarchevsky@fb.com>

On Mon, Oct 23, 2023 at 03:00:28PM -0700, Dave Marchevsky wrote:

SNIP

>  
>  #undef field_mask_test_name_check_seen
>  #undef field_mask_test_name
>  
> +static int __struct_member_check_align(u32 off, enum btf_field_type field_type)
> +{
> +	u32 align = btf_field_type_align(field_type);
> +
> +	if (off % align)
> +		return -EINVAL;
> +	return 0;
> +}
> +
>  static int btf_find_struct_field(const struct btf *btf,
> -				 const struct btf_type *t, u32 field_mask,
> -				 struct btf_field_info *info, int info_cnt)
> +				 const struct btf_type *t,
> +				 struct btf_field_info_search *srch)
>  {
> -	int ret, idx = 0, align, sz, field_type;
>  	const struct btf_member *member;
> -	struct btf_field_info tmp;
> -	u32 i, off, seen_mask = 0;
> +	int ret, field_type;
> +	u32 i, off, sz;
>  
>  	for_each_member(i, t, member) {
>  		const struct btf_type *member_type = btf_type_by_id(btf,
>  								    member->type);
> -
> -		field_type = btf_get_field_type(__btf_name_by_offset(btf, member_type->name_off),
> -						field_mask, &seen_mask, &align, &sz);
> -		if (field_type == 0)
> -			continue;
> -		if (field_type < 0)
> -			return field_type;
> -
>  		off = __btf_member_bit_offset(t, member);
>  		if (off % 8)
>  			/* valid C code cannot generate such BTF */
>  			return -EINVAL;
>  		off /= 8;
> -		if (off % align)
> +
> +		field_type = btf_get_field_type_by_name(btf, member_type, srch);
> +		if (field_type < 0)
> +			return field_type;
> +
> +		if (field_type == 0) {
> +			/* Maybe it's a kptr. Use BPF_KPTR_REF for align
> +			 * checks, all ptrs have same align.
> +			 * btf_maybe_find_kptr will find actual kptr type
> +			 */
> +			if (__struct_member_check_align(off, BPF_KPTR_REF))
> +				continue;
> +
> +			ret = btf_maybe_find_kptr(btf, member_type, off, srch);
> +			if (ret < 0)
> +				return ret;
> +			continue;
> +		}

would it be possible to split the change to:
  - factor the arguments to 'struct btf_field_info_search *srch' and
    passing it to btf_find_field and all related descedant calls
  - factor the allignment handling

I can't see these two changes being dependent on each other,
if that's the case I think the change would be simpler

jirka

> +
> +		sz = btf_field_type_size(field_type);
> +		if (__struct_member_check_align(off, field_type))
>  			continue;
>  
>  		switch (field_type) {
> @@ -3453,64 +3542,81 @@ static int btf_find_struct_field(const struct btf *btf,
>  		case BPF_RB_NODE:
>  		case BPF_REFCOUNT:
>  			ret = btf_find_struct(btf, member_type, off, sz, field_type,
> -					      idx < info_cnt ? &info[idx] : &tmp);
> -			if (ret < 0)
> -				return ret;
> -			break;
> -		case BPF_KPTR_UNREF:
> -		case BPF_KPTR_REF:
> -		case BPF_KPTR_PERCPU:
> -			ret = btf_find_kptr(btf, member_type, off, sz,
> -					    idx < info_cnt ? &info[idx] : &tmp);
> +					      srch);
>  			if (ret < 0)
>  				return ret;
>  			break;
>  		case BPF_LIST_HEAD:
>  		case BPF_RB_ROOT:
>  			ret = btf_find_graph_root(btf, t, member_type,
> -						  i, off, sz,
> -						  idx < info_cnt ? &info[idx] : &tmp,
> -						  field_type);
> +						  i, off, sz, srch, field_type);
>  			if (ret < 0)
>  				return ret;
>  			break;
> +		/* kptr fields are not handled in this switch, see
> +		 * btf_maybe_find_kptr above
> +		 */
> +		case BPF_KPTR_UNREF:
> +		case BPF_KPTR_REF:
> +		case BPF_KPTR_PERCPU:
>  		default:
>  			return -EFAULT;
>  		}
> -
> -		if (ret == BTF_FIELD_IGNORE)
> -			continue;
> -		if (idx >= info_cnt)
> -			return -E2BIG;
> -		++idx;
>  	}
> -	return idx;
> +	return srch->idx;
> +}
> +
> +static int __datasec_vsi_check_align_sz(const struct btf_var_secinfo *vsi,
> +					enum btf_field_type field_type,
> +					u32 expected_sz)
> +{
> +	u32 off, align;
> +
> +	off = vsi->offset;
> +	align = btf_field_type_align(field_type);
> +
> +	if (vsi->size != expected_sz)
> +		return -EINVAL;
> +	if (off % align)
> +		return -EINVAL;
> +
> +	return 0;
>  }
>  
>  static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
> -				u32 field_mask, struct btf_field_info *info,
> -				int info_cnt)
> +				struct btf_field_info_search *srch)
>  {
> -	int ret, idx = 0, align, sz, field_type;
>  	const struct btf_var_secinfo *vsi;
> -	struct btf_field_info tmp;
> -	u32 i, off, seen_mask = 0;
> +	int ret, field_type;
> +	u32 i, off, sz;
>  
>  	for_each_vsi(i, t, vsi) {
>  		const struct btf_type *var = btf_type_by_id(btf, vsi->type);
>  		const struct btf_type *var_type = btf_type_by_id(btf, var->type);
>  
> -		field_type = btf_get_field_type(__btf_name_by_offset(btf, var_type->name_off),
> -						field_mask, &seen_mask, &align, &sz);
> -		if (field_type == 0)
> -			continue;
> +		off = vsi->offset;
> +		field_type = btf_get_field_type_by_name(btf, var_type, srch);
>  		if (field_type < 0)
>  			return field_type;
>  
> -		off = vsi->offset;
> -		if (vsi->size != sz)
> +		if (field_type == 0) {
> +			/* Maybe it's a kptr. Use BPF_KPTR_REF for sz / align
> +			 * checks, all ptrs have same sz / align.
> +			 * btf_maybe_find_kptr will find actual kptr type
> +			 */
> +			sz = btf_field_type_size(BPF_KPTR_REF);
> +			if (__datasec_vsi_check_align_sz(vsi, BPF_KPTR_REF, sz))
> +				continue;
> +
> +			ret = btf_maybe_find_kptr(btf, var_type, off, srch);
> +			if (ret < 0)
> +				return ret;
>  			continue;
> -		if (off % align)
> +		}
> +
> +		sz = btf_field_type_size(field_type);
> +
> +		if (__datasec_vsi_check_align_sz(vsi, field_type, sz))
>  			continue;
>  
>  		switch (field_type) {
> @@ -3520,48 +3626,38 @@ static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
>  		case BPF_RB_NODE:
>  		case BPF_REFCOUNT:
>  			ret = btf_find_struct(btf, var_type, off, sz, field_type,
> -					      idx < info_cnt ? &info[idx] : &tmp);
> -			if (ret < 0)
> -				return ret;
> -			break;
> -		case BPF_KPTR_UNREF:
> -		case BPF_KPTR_REF:
> -		case BPF_KPTR_PERCPU:
> -			ret = btf_find_kptr(btf, var_type, off, sz,
> -					    idx < info_cnt ? &info[idx] : &tmp);
> +					      srch);
>  			if (ret < 0)
>  				return ret;
>  			break;
>  		case BPF_LIST_HEAD:
>  		case BPF_RB_ROOT:
>  			ret = btf_find_graph_root(btf, var, var_type,
> -						  -1, off, sz,
> -						  idx < info_cnt ? &info[idx] : &tmp,
> +						  -1, off, sz, srch,
>  						  field_type);
>  			if (ret < 0)
>  				return ret;
>  			break;
> +		/* kptr fields are not handled in this switch, see
> +		 * btf_maybe_find_kptr above
> +		 */
> +		case BPF_KPTR_UNREF:
> +		case BPF_KPTR_REF:
> +		case BPF_KPTR_PERCPU:
>  		default:
>  			return -EFAULT;
>  		}
> -
> -		if (ret == BTF_FIELD_IGNORE)
> -			continue;
> -		if (idx >= info_cnt)
> -			return -E2BIG;
> -		++idx;
>  	}
> -	return idx;
> +	return srch->idx;
>  }
>  
>  static int btf_find_field(const struct btf *btf, const struct btf_type *t,
> -			  u32 field_mask, struct btf_field_info *info,
> -			  int info_cnt)
> +			  struct btf_field_info_search *srch)
>  {
>  	if (__btf_type_is_struct(t))
> -		return btf_find_struct_field(btf, t, field_mask, info, info_cnt);
> +		return btf_find_struct_field(btf, t, srch);
>  	else if (btf_type_is_datasec(t))
> -		return btf_find_datasec_var(btf, t, field_mask, info, info_cnt);
> +		return btf_find_datasec_var(btf, t, srch);
>  	return -EINVAL;
>  }
>  
> @@ -3729,47 +3825,51 @@ static int btf_field_cmp(const void *_a, const void *_b, const void *priv)
>  struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t,
>  				    u32 field_mask, u32 value_size)
>  {
> -	struct btf_field_info info_arr[BTF_FIELDS_MAX];
> +	struct btf_field_info_search srch;
>  	u32 next_off = 0, field_type_size;
> +	struct btf_field_info *info;
>  	struct btf_record *rec;
>  	int ret, i, cnt;
>  
> -	ret = btf_find_field(btf, t, field_mask, info_arr, ARRAY_SIZE(info_arr));
> -	if (ret < 0)
> -		return ERR_PTR(ret);
> -	if (!ret)
> -		return NULL;
> +	memset(&srch, 0, sizeof(srch));
> +	srch.field_mask = field_mask;
> +	ret = btf_find_field(btf, t, &srch);
> +	if (ret <= 0)
> +		goto end_srch;
>  
>  	cnt = ret;
>  	/* This needs to be kzalloc to zero out padding and unused fields, see
>  	 * comment in btf_record_equal.
>  	 */
>  	rec = kzalloc(offsetof(struct btf_record, fields[cnt]), GFP_KERNEL | __GFP_NOWARN);
> -	if (!rec)
> -		return ERR_PTR(-ENOMEM);
> +	if (!rec) {
> +		ret = -ENOMEM;
> +		goto end_srch;
> +	}
>  
>  	rec->spin_lock_off = -EINVAL;
>  	rec->timer_off = -EINVAL;
>  	rec->refcount_off = -EINVAL;
>  	for (i = 0; i < cnt; i++) {
> -		field_type_size = btf_field_type_size(info_arr[i].type);
> -		if (info_arr[i].off + field_type_size > value_size) {
> -			WARN_ONCE(1, "verifier bug off %d size %d", info_arr[i].off, value_size);
> +		info = &srch.infos[i];
> +		field_type_size = btf_field_type_size(info->type);
> +		if (info->off + field_type_size > value_size) {
> +			WARN_ONCE(1, "verifier bug off %d size %d", info->off, value_size);
>  			ret = -EFAULT;
>  			goto end;
>  		}
> -		if (info_arr[i].off < next_off) {
> +		if (info->off < next_off) {
>  			ret = -EEXIST;
>  			goto end;
>  		}
> -		next_off = info_arr[i].off + field_type_size;
> +		next_off = info->off + field_type_size;
>  
> -		rec->field_mask |= info_arr[i].type;
> -		rec->fields[i].offset = info_arr[i].off;
> -		rec->fields[i].type = info_arr[i].type;
> +		rec->field_mask |= info->type;
> +		rec->fields[i].offset = info->off;
> +		rec->fields[i].type = info->type;
>  		rec->fields[i].size = field_type_size;
>  
> -		switch (info_arr[i].type) {
> +		switch (info->type) {
>  		case BPF_SPIN_LOCK:
>  			WARN_ON_ONCE(rec->spin_lock_off >= 0);
>  			/* Cache offset for faster lookup at runtime */
> @@ -3788,17 +3888,17 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
>  		case BPF_KPTR_UNREF:
>  		case BPF_KPTR_REF:
>  		case BPF_KPTR_PERCPU:
> -			ret = btf_parse_kptr(btf, &rec->fields[i], &info_arr[i]);
> +			ret = btf_parse_kptr(btf, &rec->fields[i], info);
>  			if (ret < 0)
>  				goto end;
>  			break;
>  		case BPF_LIST_HEAD:
> -			ret = btf_parse_list_head(btf, &rec->fields[i], &info_arr[i]);
> +			ret = btf_parse_list_head(btf, &rec->fields[i], info);
>  			if (ret < 0)
>  				goto end;
>  			break;
>  		case BPF_RB_ROOT:
> -			ret = btf_parse_rb_root(btf, &rec->fields[i], &info_arr[i]);
> +			ret = btf_parse_rb_root(btf, &rec->fields[i], info);
>  			if (ret < 0)
>  				goto end;
>  			break;
> @@ -3828,10 +3928,13 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
>  
>  	sort_r(rec->fields, rec->cnt, sizeof(struct btf_field), btf_field_cmp,
>  	       NULL, rec);
> +	kfree(srch.infos);
>  
>  	return rec;
>  end:
>  	btf_record_free(rec);
> +end_srch:
> +	kfree(srch.infos);
>  	return ERR_PTR(ret);
>  }
>  
> -- 
> 2.34.1
> 
> 

  reply	other threads:[~2023-10-28 14:52 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-23 22:00 [PATCH v1 bpf-next 0/4] Descend into struct, array types when searching for fields Dave Marchevsky
2023-10-23 22:00 ` [PATCH v1 bpf-next 1/4] bpf: Fix btf_get_field_type to fail for multiple bpf_refcount fields Dave Marchevsky
2023-10-30 17:56   ` Yonghong Song
2023-11-01 21:56   ` Andrii Nakryiko
2023-10-23 22:00 ` [PATCH v1 bpf-next 2/4] bpf: Refactor btf_find_field with btf_field_info_search Dave Marchevsky
2023-10-28 14:52   ` Jiri Olsa [this message]
2023-10-30 19:31   ` Yonghong Song
2023-10-30 19:56     ` Yonghong Song
2023-11-01 21:56   ` Andrii Nakryiko
2023-10-23 22:00 ` [PATCH v1 bpf-next 3/4] btf: Descend into structs and arrays during special field search Dave Marchevsky
2023-10-26  1:24   ` kernel test robot
2023-10-30 12:56   ` Jiri Olsa
2023-10-30 20:56   ` Yonghong Song
2023-11-01 21:56   ` Andrii Nakryiko
2023-10-23 22:00 ` [PATCH v1 bpf-next 4/4] selftests/bpf: Add tests exercising aggregate type BTF " Dave Marchevsky
2023-10-23 23:32   ` kernel test robot
2023-10-30 21:10   ` Yonghong Song
2023-11-01 21:56   ` Andrii Nakryiko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZT0gF7kRnen7x14H@krava \
    --to=olsajiri@gmail.com \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=davemarchevsky@fb.com \
    --cc=kernel-team@fb.com \
    --cc=martin.lau@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox