From: Changli Gao <xiaosuo@gmail.com>
To: Krishna Kumar <krkumar2@in.ibm.com>
Cc: davem@davemloft.net, arnd@arndb.de, bhutchings@solarflare.com,
netdev@vger.kernel.org, therbert@google.com, mst@redhat.com
Subject: Re: [PATCH v3 1/2] core: Factor out flow calculation from get_rps_cpu
Date: Tue, 3 Aug 2010 12:05:34 +0800 [thread overview]
Message-ID: <AANLkTikNOstJr3qqMUqtUoOm1xHOjw5uM7NpQUNJz4PR@mail.gmail.com> (raw)
In-Reply-To: <20100803030256.8486.82622.sendpatchset@krkumar2.in.ibm.com>
On Tue, Aug 3, 2010 at 11:02 AM, Krishna Kumar <krkumar2@in.ibm.com> wrote:
> From: Krishna Kumar <krkumar2@in.ibm.com>
>
> Factor out flow calculation code from get_rps_cpu, since macvtap
> driver can use the same code.
>
> Revisions:
>
> v2 - Ben: Separate flow calcuation out and use in select queue
> v3 - Arnd: Don't re-implement MIN
>
> Signed-off-by: Krishna Kumar <krkumar2@in.ibm.com>
> ---
> include/linux/netdevice.h | 1
> net/core/dev.c | 94 ++++++++++++++++++++++--------------
> 2 files changed, 59 insertions(+), 36 deletions(-)
>
> diff -ruNp org/include/linux/netdevice.h new/include/linux/netdevice.h
> --- org/include/linux/netdevice.h 2010-08-03 08:19:57.000000000 +0530
> +++ new/include/linux/netdevice.h 2010-08-03 08:19:57.000000000 +0530
> @@ -2253,6 +2253,7 @@ static inline const char *netdev_name(co
> return dev->name;
> }
>
> +extern int skb_calculate_flow(struct net_device *dev, struct sk_buff *skb);
> extern int netdev_printk(const char *level, const struct net_device *dev,
> const char *format, ...)
> __attribute__ ((format (printf, 3, 4)));
> diff -ruNp org/net/core/dev.c new/net/core/dev.c
> --- org/net/core/dev.c 2010-08-03 08:19:57.000000000 +0530
> +++ new/net/core/dev.c 2010-08-03 08:19:57.000000000 +0530
> @@ -2263,51 +2263,24 @@ static inline void ____napi_schedule(str
> __raise_softirq_irqoff(NET_RX_SOFTIRQ);
> }
>
> -#ifdef CONFIG_RPS
> -
> -/* One global table that all flow-based protocols share. */
> -struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
> -EXPORT_SYMBOL(rps_sock_flow_table);
> -
> /*
> - * get_rps_cpu is called from netif_receive_skb and returns the target
> - * CPU from the RPS map of the receiving queue for a given skb.
> - * rcu_read_lock must be held on entry.
> + * skb_calculate_flow: calculate a flow hash based on src/dst addresses
> + * and src/dst port numbers. On success, returns a hash number (> 0),
> + * otherwise -1.
> */
> -static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
> - struct rps_dev_flow **rflowp)
> +int skb_calculate_flow(struct net_device *dev, struct sk_buff *skb)
> {
> + int hash = skb->rxhash;
> struct ipv6hdr *ip6;
> struct iphdr *ip;
> - struct netdev_rx_queue *rxqueue;
> - struct rps_map *map;
> - struct rps_dev_flow_table *flow_table;
> - struct rps_sock_flow_table *sock_flow_table;
> - int cpu = -1;
> u8 ip_proto;
> - u16 tcpu;
> u32 addr1, addr2, ihl;
> union {
> u32 v32;
> u16 v16[2];
> } ports;
>
> - if (skb_rx_queue_recorded(skb)) {
> - u16 index = skb_get_rx_queue(skb);
> - if (unlikely(index >= dev->num_rx_queues)) {
> - WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
> - "on queue %u, but number of RX queues is %u\n",
> - dev->name, index, dev->num_rx_queues);
> - goto done;
> - }
> - rxqueue = dev->_rx + index;
> - } else
> - rxqueue = dev->_rx;
> -
> - if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
> - goto done;
> -
> - if (skb->rxhash)
> + if (hash)
> goto got_hash; /* Skip hash computation on packet header */
>
> switch (skb->protocol) {
> @@ -2334,6 +2307,7 @@ static int get_rps_cpu(struct net_device
> default:
> goto done;
> }
> +
> switch (ip_proto) {
> case IPPROTO_TCP:
> case IPPROTO_UDP:
> @@ -2356,11 +2330,59 @@ static int get_rps_cpu(struct net_device
> /* get a consistent hash (same value on both flow directions) */
> if (addr2 < addr1)
> swap(addr1, addr2);
> - skb->rxhash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
> - if (!skb->rxhash)
> - skb->rxhash = 1;
> +
> + hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
> + if (!hash)
> + hash = 1;
>
> got_hash:
> + return hash;
> +
> +done:
> + return -1;
> +}
> +EXPORT_SYMBOL(skb_calculate_flow);
I have noticed that you use skb_calculate_flow() in
macvtap_get_queue() where skb->data doesn't point to the network
header but the ethernet header. However, skb_calculate_flow() assume
skb->data points to the network header. There are two choices:
* update skb_calculate_flow to support called in ethernet layer.
* pull skb before skb_calculate_flow, and push skb after
skb_calculate_flow() in macvtap_get_queue().
I prefer the former way.
BTW: the function name skb_calculate_flow isn't good. How about
skb_get_rxhash(). Maybe we can implement two versions: fast path and
slow path. And implement the fast path version as a inline function in
skbuff.h.
static inline u32 skb_get_rxhash(struct sk_buff *skb)
{
u32 rxhash;
rxhash = skb->rxhash;
if (!rxhash)
return __skb_get_rxhash(skb);
return rxhash;
}
> +
> +#ifdef CONFIG_RPS
> +
> +/* One global table that all flow-based protocols share. */
> +struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
> +EXPORT_SYMBOL(rps_sock_flow_table);
> +
> +/*
> + * get_rps_cpu is called from netif_receive_skb and returns the target
> + * CPU from the RPS map of the receiving queue for a given skb.
> + * rcu_read_lock must be held on entry.
> + */
> +static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
> + struct rps_dev_flow **rflowp)
> +{
> + struct netdev_rx_queue *rxqueue;
> + struct rps_map *map;
> + struct rps_dev_flow_table *flow_table;
> + struct rps_sock_flow_table *sock_flow_table;
> + int cpu = -1;
> + u16 tcpu;
> +
> + if (skb_rx_queue_recorded(skb)) {
> + u16 index = skb_get_rx_queue(skb);
> + if (unlikely(index >= dev->num_rx_queues)) {
> + WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
> + "on queue %u, but number of RX queues is %u\n",
> + dev->name, index, dev->num_rx_queues);
> + goto done;
> + }
> + rxqueue = dev->_rx + index;
> + } else
> + rxqueue = dev->_rx;
> +
> + if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
> + goto done;
> +
> + skb->rxhash = skb_calculate_flow(dev, skb);
> + if (skb->rxhash < 0)
> + goto done;
> +
> flow_table = rcu_dereference(rxqueue->rps_flow_table);
> sock_flow_table = rcu_dereference(rps_sock_flow_table);
> if (flow_table && sock_flow_table) {
> --
> To unsubscribe from this list: send the line "unsubscribe netdev" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
>
--
Regards,
Changli Gao(xiaosuo@gmail.com)
next prev parent reply other threads:[~2010-08-03 4:05 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-08-03 3:02 [PATCH v3 1/2] core: Factor out flow calculation from get_rps_cpu Krishna Kumar
2010-08-03 3:03 ` [PATCH v3 2/2] macvtap: Implement multiqueue macvtap driver Krishna Kumar
2010-08-03 4:05 ` Changli Gao [this message]
2010-08-03 5:57 ` [PATCH v3 1/2] core: Factor out flow calculation from get_rps_cpu Krishna Kumar2
2010-08-03 6:11 ` Changli Gao
2010-08-03 7:18 ` Changli Gao
2010-08-03 8:32 ` Arnd Bergmann
2010-08-03 22:36 ` Sridhar Samudrala
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=AANLkTikNOstJr3qqMUqtUoOm1xHOjw5uM7NpQUNJz4PR@mail.gmail.com \
--to=xiaosuo@gmail.com \
--cc=arnd@arndb.de \
--cc=bhutchings@solarflare.com \
--cc=davem@davemloft.net \
--cc=krkumar2@in.ibm.com \
--cc=mst@redhat.com \
--cc=netdev@vger.kernel.org \
--cc=therbert@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).