netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: David Ahern <dsahern@kernel.org>
To: Vladimir Oltean <vladimir.oltean@nxp.com>, netdev@vger.kernel.org
Cc: Stephen Hemminger <stephen@networkplumber.org>,
	Vinicius Costa Gomes <vinicius.gomes@intel.com>,
	Jamal Hadi Salim <jhs@mojatatu.com>,
	Cong Wang <xiyou.wangcong@gmail.com>,
	Jiri Pirko <jiri@resnulli.us>
Subject: Re: [PATCH v2 iproute2-next 2/2] taprio: support dumping and setting per-tc max SDU
Date: Sun, 9 Oct 2022 14:07:37 -0600	[thread overview]
Message-ID: <0c35a3cc-a790-d8fb-a1a2-aeadf35fb9cf@kernel.org> (raw)
In-Reply-To: <20221004120028.679586-2-vladimir.oltean@nxp.com>

On 10/4/22 6:00 AM, Vladimir Oltean wrote:
> diff --git a/tc/q_taprio.c b/tc/q_taprio.c
> index e3af3f3fa047..45f82be1f50a 100644
> --- a/tc/q_taprio.c
> +++ b/tc/q_taprio.c
> @@ -151,13 +151,32 @@ static struct sched_entry *create_entry(uint32_t gatemask, uint32_t interval, ui
>  	return e;
>  }
>  
> +static void add_tc_entries(struct nlmsghdr *n,
> +			   __u32 max_sdu[TC_QOPT_MAX_QUEUE])
> +{
> +	struct rtattr *l;
> +	__u32 tc;
> +
> +	for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
> +		l = addattr_nest(n, 1024, TCA_TAPRIO_ATTR_TC_ENTRY | NLA_F_NESTED);
> +
> +		addattr_l(n, 1024, TCA_TAPRIO_TC_ENTRY_INDEX, &tc, sizeof(tc));
> +		addattr_l(n, 1024, TCA_TAPRIO_TC_ENTRY_MAX_SDU,
> +			  &max_sdu[tc], sizeof(max_sdu[tc]));
> +
> +		addattr_nest_end(n, l);


Why the full TC_QOPT_MAX_QUEUE? the parse_opt knows the index of the
last entry used.

> +	}
> +}
> +
>  static int taprio_parse_opt(struct qdisc_util *qu, int argc,
>  			    char **argv, struct nlmsghdr *n, const char *dev)
>  {
> +	__u32 max_sdu[TC_QOPT_MAX_QUEUE] = { };
>  	__s32 clockid = CLOCKID_INVALID;
>  	struct tc_mqprio_qopt opt = { };
>  	__s64 cycle_time_extension = 0;
>  	struct list_head sched_entries;
> +	bool have_tc_entries = false;
>  	struct rtattr *tail, *l;
>  	__u32 taprio_flags = 0;
>  	__u32 txtime_delay = 0;
> @@ -211,6 +230,18 @@ static int taprio_parse_opt(struct qdisc_util *qu, int argc,
>  				free(tmp);
>  				idx++;
>  			}
> +		} else if (strcmp(*argv, "max-sdu") == 0) {
> +			while (idx < TC_QOPT_MAX_QUEUE && NEXT_ARG_OK()) {
> +				NEXT_ARG();
> +				if (get_u32(&max_sdu[idx], *argv, 10)) {
> +					PREV_ARG();
> +					break;
> +				}
> +				idx++;
> +			}
> +			for ( ; idx < TC_QOPT_MAX_QUEUE; idx++)
> +				max_sdu[idx] = 0;

max_sdu is initialized to 0 and you have "have_tc_entries" to detect
multiple options on the command line.

> +			have_tc_entries = true;
>  		} else if (strcmp(*argv, "sched-entry") == 0) {
>  			uint32_t mask, interval;
>  			struct sched_entry *e;
> @@ -341,6 +372,9 @@ static int taprio_parse_opt(struct qdisc_util *qu, int argc,
>  		addattr_l(n, 1024, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
>  			  &cycle_time_extension, sizeof(cycle_time_extension));
>  
> +	if (have_tc_entries)
> +		add_tc_entries(n, max_sdu);
> +
>  	l = addattr_nest(n, 1024, TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST | NLA_F_NESTED);
>  
>  	err = add_sched_list(&sched_entries, n);
> @@ -430,6 +464,59 @@ static int print_schedule(FILE *f, struct rtattr **tb)
>  	return 0;
>  }
>  
> +static void dump_tc_entry(__u32 max_sdu[TC_QOPT_MAX_QUEUE],
> +			  struct rtattr *item, bool *have_tc_entries)
> +{
> +	struct rtattr *tb[TCA_TAPRIO_TC_ENTRY_MAX + 1];
> +	__u32 tc, val = 0;
> +
> +	parse_rtattr_nested(tb, TCA_TAPRIO_TC_ENTRY_MAX, item);
> +
> +	if (!tb[TCA_TAPRIO_TC_ENTRY_INDEX]) {
> +		fprintf(stderr, "Missing tc entry index\n");
> +		return;
> +	}
> +
> +	tc = rta_getattr_u32(tb[TCA_TAPRIO_TC_ENTRY_INDEX]);
> +
> +	if (tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU])
> +		val = rta_getattr_u32(tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]);
> +
> +	max_sdu[tc] = val;
> +
> +	*have_tc_entries = true;
> +}
> +
> +static void dump_tc_entries(FILE *f, struct rtattr *opt)
> +{
> +	__u32 max_sdu[TC_QOPT_MAX_QUEUE] = {};
> +	bool have_tc_entries = false;
> +	struct rtattr *i;
> +	int tc, rem;
> +
> +	for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
> +		max_sdu[tc] = 0;

max_sdu is initialized to 0 above when it is declared.

> +
> +	rem = RTA_PAYLOAD(opt);
> +
> +	for (i = RTA_DATA(opt); RTA_OK(i, rem); i = RTA_NEXT(i, rem)) {
> +		if (i->rta_type != (TCA_TAPRIO_ATTR_TC_ENTRY | NLA_F_NESTED))
> +			continue;
> +
> +		dump_tc_entry(max_sdu, i, &have_tc_entries);
> +	}
> +
> +	if (!have_tc_entries)
> +		return;
> +
> +	open_json_array(PRINT_ANY, "max-sdu");
> +	for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)

you can know the max index so why not use it here?


  reply	other threads:[~2022-10-09 20:10 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-04 12:00 [PATCH v2 iproute2-next 1/2] taprio: don't print the clockid if invalid Vladimir Oltean
2022-10-04 12:00 ` [PATCH v2 iproute2-next 2/2] taprio: support dumping and setting per-tc max SDU Vladimir Oltean
2022-10-09 20:07   ` David Ahern [this message]
2022-10-12  9:53     ` Vladimir Oltean
2022-10-09 19:50 ` [PATCH v2 iproute2-next 1/2] taprio: don't print the clockid if invalid patchwork-bot+netdevbpf

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=0c35a3cc-a790-d8fb-a1a2-aeadf35fb9cf@kernel.org \
    --to=dsahern@kernel.org \
    --cc=jhs@mojatatu.com \
    --cc=jiri@resnulli.us \
    --cc=netdev@vger.kernel.org \
    --cc=stephen@networkplumber.org \
    --cc=vinicius.gomes@intel.com \
    --cc=vladimir.oltean@nxp.com \
    --cc=xiyou.wangcong@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).