* [PATCH net-next 1/2] net: Add Transparent Ethernet Bridging GRO support.
@ 2014-12-31 3:10 Jesse Gross
2014-12-31 3:10 ` [PATCH net-next 2/2] geneve: Add Geneve " Jesse Gross
` (2 more replies)
0 siblings, 3 replies; 6+ messages in thread
From: Jesse Gross @ 2014-12-31 3:10 UTC (permalink / raw)
To: David Miller; +Cc: netdev
Currently the only tunnel protocol that supports GRO with encapsulated
Ethernet is VXLAN. This pulls out the Ethernet code into a proper layer
so that it can be used by other tunnel protocols such as GRE and Geneve.
Signed-off-by: Jesse Gross <jesse@nicira.com>
---
drivers/net/vxlan.c | 53 +++-----------------------
include/linux/etherdevice.h | 4 ++
net/ethernet/eth.c | 92 +++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 102 insertions(+), 47 deletions(-)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 7fbd89f..2ab0922 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -549,10 +549,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
{
struct sk_buff *p, **pp = NULL;
struct vxlanhdr *vh, *vh2;
- struct ethhdr *eh, *eh2;
- unsigned int hlen, off_vx, off_eth;
- const struct packet_offload *ptype;
- __be16 type;
+ unsigned int hlen, off_vx;
int flush = 1;
off_vx = skb_gro_offset(skb);
@@ -563,17 +560,6 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
if (unlikely(!vh))
goto out;
}
- skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
- skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
-
- off_eth = skb_gro_offset(skb);
- hlen = off_eth + sizeof(*eh);
- eh = skb_gro_header_fast(skb, off_eth);
- if (skb_gro_header_hard(skb, hlen)) {
- eh = skb_gro_header_slow(skb, hlen, off_eth);
- if (unlikely(!eh))
- goto out;
- }
flush = 0;
@@ -582,28 +568,16 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
continue;
vh2 = (struct vxlanhdr *)(p->data + off_vx);
- eh2 = (struct ethhdr *)(p->data + off_eth);
- if (vh->vx_vni != vh2->vx_vni || compare_ether_header(eh, eh2)) {
+ if (vh->vx_vni != vh2->vx_vni) {
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
}
- type = eh->h_proto;
-
- rcu_read_lock();
- ptype = gro_find_receive_by_type(type);
- if (ptype == NULL) {
- flush = 1;
- goto out_unlock;
- }
-
- skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */
- skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
- pp = ptype->callbacks.gro_receive(head, skb);
+ skb_gro_pull(skb, sizeof(struct vxlanhdr));
+ skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
+ pp = eth_gro_receive(head, skb);
-out_unlock:
- rcu_read_unlock();
out:
NAPI_GRO_CB(skb)->flush |= flush;
@@ -612,24 +586,9 @@ out:
static int vxlan_gro_complete(struct sk_buff *skb, int nhoff)
{
- struct ethhdr *eh;
- struct packet_offload *ptype;
- __be16 type;
- int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr);
- int err = -ENOSYS;
-
udp_tunnel_gro_complete(skb, nhoff);
- eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr));
- type = eh->h_proto;
-
- rcu_read_lock();
- ptype = gro_find_complete_by_type(type);
- if (ptype != NULL)
- err = ptype->callbacks.gro_complete(skb, nhoff + vxlan_len);
-
- rcu_read_unlock();
- return err;
+ return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
}
/* Notify netdevs that UDP port started listening */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 41c891d..1d869d1 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -52,6 +52,10 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
+struct sk_buff **eth_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb);
+int eth_gro_complete(struct sk_buff *skb, int nhoff);
+
/* Reserved Ethernet Addresses per IEEE 802.1Q */
static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 33a140e..238f38d 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -424,3 +424,95 @@ ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
return scnprintf(buf, PAGE_SIZE, "%*phC\n", len, addr);
}
EXPORT_SYMBOL(sysfs_format_mac);
+
+struct sk_buff **eth_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ struct sk_buff *p, **pp = NULL;
+ struct ethhdr *eh, *eh2;
+ unsigned int hlen, off_eth;
+ const struct packet_offload *ptype;
+ __be16 type;
+ int flush = 1;
+
+ off_eth = skb_gro_offset(skb);
+ hlen = off_eth + sizeof(*eh);
+ eh = skb_gro_header_fast(skb, off_eth);
+ if (skb_gro_header_hard(skb, hlen)) {
+ eh = skb_gro_header_slow(skb, hlen, off_eth);
+ if (unlikely(!eh))
+ goto out;
+ }
+
+ flush = 0;
+
+ for (p = *head; p; p = p->next) {
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ eh2 = (struct ethhdr *)(p->data + off_eth);
+ if (compare_ether_header(eh, eh2)) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+ }
+
+ type = eh->h_proto;
+
+ rcu_read_lock();
+ ptype = gro_find_receive_by_type(type);
+ if (ptype == NULL) {
+ flush = 1;
+ goto out_unlock;
+ }
+
+ skb_gro_pull(skb, sizeof(*eh));
+ skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
+ pp = ptype->callbacks.gro_receive(head, skb);
+
+out_unlock:
+ rcu_read_unlock();
+out:
+ NAPI_GRO_CB(skb)->flush |= flush;
+
+ return pp;
+}
+EXPORT_SYMBOL(eth_gro_receive);
+
+int eth_gro_complete(struct sk_buff *skb, int nhoff)
+{
+ struct ethhdr *eh = (struct ethhdr *)(skb->data + nhoff);
+ __be16 type = eh->h_proto;
+ struct packet_offload *ptype;
+ int err = -ENOSYS;
+
+ if (skb->encapsulation)
+ skb_set_inner_mac_header(skb, nhoff);
+
+ rcu_read_lock();
+ ptype = gro_find_complete_by_type(type);
+ if (ptype != NULL)
+ err = ptype->callbacks.gro_complete(skb, nhoff +
+ sizeof(struct ethhdr));
+
+ rcu_read_unlock();
+ return err;
+}
+EXPORT_SYMBOL(eth_gro_complete);
+
+static struct packet_offload eth_packet_offload __read_mostly = {
+ .type = cpu_to_be16(ETH_P_TEB),
+ .callbacks = {
+ .gro_receive = eth_gro_receive,
+ .gro_complete = eth_gro_complete,
+ },
+};
+
+static int __init eth_offload_init(void)
+{
+ dev_add_offload(ð_packet_offload);
+
+ return 0;
+}
+
+fs_initcall(eth_offload_init);
--
1.9.1
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH net-next 2/2] geneve: Add Geneve GRO support
2014-12-31 3:10 [PATCH net-next 1/2] net: Add Transparent Ethernet Bridging GRO support Jesse Gross
@ 2014-12-31 3:10 ` Jesse Gross
2015-01-02 20:47 ` David Miller
2014-12-31 9:19 ` [PATCH net-next 1/2] net: Add Transparent Ethernet Bridging " Or Gerlitz
2015-01-02 20:46 ` David Miller
2 siblings, 1 reply; 6+ messages in thread
From: Jesse Gross @ 2014-12-31 3:10 UTC (permalink / raw)
To: David Miller; +Cc: netdev, Joe Stringer
From: Joe Stringer <joestringer@nicira.com>
This results in an approximately 30% increase in throughput
when handling encapsulated bulk traffic.
Signed-off-by: Joe Stringer <joestringer@nicira.com>
Signed-off-by: Jesse Gross <jesse@nicira.com>
---
net/ipv4/geneve.c | 97 +++++++++++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 95 insertions(+), 2 deletions(-)
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
index 394a200..19e256e 100644
--- a/net/ipv4/geneve.c
+++ b/net/ipv4/geneve.c
@@ -149,6 +149,99 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
}
EXPORT_SYMBOL_GPL(geneve_xmit_skb);
+static int geneve_hlen(struct genevehdr *gh)
+{
+ return sizeof(*gh) + gh->opt_len * 4;
+}
+
+static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ struct sk_buff *p, **pp = NULL;
+ struct genevehdr *gh, *gh2;
+ unsigned int hlen, gh_len, off_gnv;
+ const struct packet_offload *ptype;
+ __be16 type;
+ int flush = 1;
+
+ off_gnv = skb_gro_offset(skb);
+ hlen = off_gnv + sizeof(*gh);
+ gh = skb_gro_header_fast(skb, off_gnv);
+ if (skb_gro_header_hard(skb, hlen)) {
+ gh = skb_gro_header_slow(skb, hlen, off_gnv);
+ if (unlikely(!gh))
+ goto out;
+ }
+
+ if (gh->ver != GENEVE_VER || gh->oam)
+ goto out;
+ gh_len = geneve_hlen(gh);
+
+ hlen = off_gnv + gh_len;
+ if (skb_gro_header_hard(skb, hlen)) {
+ gh = skb_gro_header_slow(skb, hlen, off_gnv);
+ if (unlikely(!gh))
+ goto out;
+ }
+
+ flush = 0;
+
+ for (p = *head; p; p = p->next) {
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ gh2 = (struct genevehdr *)(p->data + off_gnv);
+ if (gh->opt_len != gh2->opt_len ||
+ memcmp(gh, gh2, gh_len)) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+ }
+
+ type = gh->proto_type;
+
+ rcu_read_lock();
+ ptype = gro_find_receive_by_type(type);
+ if (ptype == NULL) {
+ flush = 1;
+ goto out_unlock;
+ }
+
+ skb_gro_pull(skb, gh_len);
+ skb_gro_postpull_rcsum(skb, gh, gh_len);
+ pp = ptype->callbacks.gro_receive(head, skb);
+
+out_unlock:
+ rcu_read_unlock();
+out:
+ NAPI_GRO_CB(skb)->flush |= flush;
+
+ return pp;
+}
+
+static int geneve_gro_complete(struct sk_buff *skb, int nhoff)
+{
+ struct genevehdr *gh;
+ struct packet_offload *ptype;
+ __be16 type;
+ int gh_len;
+ int err = -ENOSYS;
+
+ udp_tunnel_gro_complete(skb, nhoff);
+
+ gh = (struct genevehdr *)(skb->data + nhoff);
+ gh_len = geneve_hlen(gh);
+ type = gh->proto_type;
+
+ rcu_read_lock();
+ ptype = gro_find_complete_by_type(type);
+ if (ptype != NULL)
+ err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
+
+ rcu_read_unlock();
+ return err;
+}
+
static void geneve_notify_add_rx_port(struct geneve_sock *gs)
{
struct sock *sk = gs->sock->sk;
@@ -278,8 +371,8 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
/* Initialize the geneve udp offloads structure */
gs->udp_offloads.port = port;
- gs->udp_offloads.callbacks.gro_receive = NULL;
- gs->udp_offloads.callbacks.gro_complete = NULL;
+ gs->udp_offloads.callbacks.gro_receive = geneve_gro_receive;
+ gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete;
spin_lock(&gn->sock_lock);
hlist_add_head_rcu(&gs->hlist, gs_head(net, port));
--
1.9.1
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH net-next 1/2] net: Add Transparent Ethernet Bridging GRO support.
2014-12-31 3:10 [PATCH net-next 1/2] net: Add Transparent Ethernet Bridging GRO support Jesse Gross
2014-12-31 3:10 ` [PATCH net-next 2/2] geneve: Add Geneve " Jesse Gross
@ 2014-12-31 9:19 ` Or Gerlitz
2014-12-31 14:30 ` Jesse Gross
2015-01-02 20:46 ` David Miller
2 siblings, 1 reply; 6+ messages in thread
From: Or Gerlitz @ 2014-12-31 9:19 UTC (permalink / raw)
To: Jesse Gross; +Cc: David Miller, Linux Netdev List
On Wed, Dec 31, 2014 at 5:10 AM, Jesse Gross <jesse@nicira.com> wrote:
> Currently the only tunnel protocol that supports GRO with encapsulated
> Ethernet is VXLAN. This pulls out the Ethernet code into a proper layer
> so that it can be used by other tunnel protocols such as GRE and Geneve.
Hi Jesse,
Thanks for taking care of that, I also had it coded under the
intention of adding GRO support for OVS's TEB based GRE, but didn't
make it to submit before your post... anyway, I would recommend that
you break this patch into two:
1. basic TEB GRO support
2. refactoring of the VXLAN GRO logic to use it
Or.
>
> Signed-off-by: Jesse Gross <jesse@nicira.com>
> ---
> drivers/net/vxlan.c | 53 +++-----------------------
> include/linux/etherdevice.h | 4 ++
> net/ethernet/eth.c | 92 +++++++++++++++++++++++++++++++++++++++++++++
> 3 files changed, 102 insertions(+), 47 deletions(-)
>
> diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
> index 7fbd89f..2ab0922 100644
> --- a/drivers/net/vxlan.c
> +++ b/drivers/net/vxlan.c
> @@ -549,10 +549,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
> {
> struct sk_buff *p, **pp = NULL;
> struct vxlanhdr *vh, *vh2;
> - struct ethhdr *eh, *eh2;
> - unsigned int hlen, off_vx, off_eth;
> - const struct packet_offload *ptype;
> - __be16 type;
> + unsigned int hlen, off_vx;
> int flush = 1;
>
> off_vx = skb_gro_offset(skb);
> @@ -563,17 +560,6 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
> if (unlikely(!vh))
> goto out;
> }
> - skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
> - skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
> -
> - off_eth = skb_gro_offset(skb);
> - hlen = off_eth + sizeof(*eh);
> - eh = skb_gro_header_fast(skb, off_eth);
> - if (skb_gro_header_hard(skb, hlen)) {
> - eh = skb_gro_header_slow(skb, hlen, off_eth);
> - if (unlikely(!eh))
> - goto out;
> - }
>
> flush = 0;
>
> @@ -582,28 +568,16 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
> continue;
>
> vh2 = (struct vxlanhdr *)(p->data + off_vx);
> - eh2 = (struct ethhdr *)(p->data + off_eth);
> - if (vh->vx_vni != vh2->vx_vni || compare_ether_header(eh, eh2)) {
> + if (vh->vx_vni != vh2->vx_vni) {
> NAPI_GRO_CB(p)->same_flow = 0;
> continue;
> }
> }
>
> - type = eh->h_proto;
> -
> - rcu_read_lock();
> - ptype = gro_find_receive_by_type(type);
> - if (ptype == NULL) {
> - flush = 1;
> - goto out_unlock;
> - }
> -
> - skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */
> - skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
> - pp = ptype->callbacks.gro_receive(head, skb);
> + skb_gro_pull(skb, sizeof(struct vxlanhdr));
> + skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
> + pp = eth_gro_receive(head, skb);
>
> -out_unlock:
> - rcu_read_unlock();
> out:
> NAPI_GRO_CB(skb)->flush |= flush;
>
> @@ -612,24 +586,9 @@ out:
>
> static int vxlan_gro_complete(struct sk_buff *skb, int nhoff)
> {
> - struct ethhdr *eh;
> - struct packet_offload *ptype;
> - __be16 type;
> - int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr);
> - int err = -ENOSYS;
> -
> udp_tunnel_gro_complete(skb, nhoff);
>
> - eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr));
> - type = eh->h_proto;
> -
> - rcu_read_lock();
> - ptype = gro_find_complete_by_type(type);
> - if (ptype != NULL)
> - err = ptype->callbacks.gro_complete(skb, nhoff + vxlan_len);
> -
> - rcu_read_unlock();
> - return err;
> + return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
> }
>
> /* Notify netdevs that UDP port started listening */
> diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
> index 41c891d..1d869d1 100644
> --- a/include/linux/etherdevice.h
> +++ b/include/linux/etherdevice.h
> @@ -52,6 +52,10 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
> #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
> #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
>
> +struct sk_buff **eth_gro_receive(struct sk_buff **head,
> + struct sk_buff *skb);
> +int eth_gro_complete(struct sk_buff *skb, int nhoff);
> +
> /* Reserved Ethernet Addresses per IEEE 802.1Q */
> static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
> { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
> diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
> index 33a140e..238f38d 100644
> --- a/net/ethernet/eth.c
> +++ b/net/ethernet/eth.c
> @@ -424,3 +424,95 @@ ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
> return scnprintf(buf, PAGE_SIZE, "%*phC\n", len, addr);
> }
> EXPORT_SYMBOL(sysfs_format_mac);
> +
> +struct sk_buff **eth_gro_receive(struct sk_buff **head,
> + struct sk_buff *skb)
> +{
> + struct sk_buff *p, **pp = NULL;
> + struct ethhdr *eh, *eh2;
> + unsigned int hlen, off_eth;
> + const struct packet_offload *ptype;
> + __be16 type;
> + int flush = 1;
> +
> + off_eth = skb_gro_offset(skb);
> + hlen = off_eth + sizeof(*eh);
> + eh = skb_gro_header_fast(skb, off_eth);
> + if (skb_gro_header_hard(skb, hlen)) {
> + eh = skb_gro_header_slow(skb, hlen, off_eth);
> + if (unlikely(!eh))
> + goto out;
> + }
> +
> + flush = 0;
> +
> + for (p = *head; p; p = p->next) {
> + if (!NAPI_GRO_CB(p)->same_flow)
> + continue;
> +
> + eh2 = (struct ethhdr *)(p->data + off_eth);
> + if (compare_ether_header(eh, eh2)) {
> + NAPI_GRO_CB(p)->same_flow = 0;
> + continue;
> + }
> + }
> +
> + type = eh->h_proto;
> +
> + rcu_read_lock();
> + ptype = gro_find_receive_by_type(type);
> + if (ptype == NULL) {
> + flush = 1;
> + goto out_unlock;
> + }
> +
> + skb_gro_pull(skb, sizeof(*eh));
> + skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
> + pp = ptype->callbacks.gro_receive(head, skb);
> +
> +out_unlock:
> + rcu_read_unlock();
> +out:
> + NAPI_GRO_CB(skb)->flush |= flush;
> +
> + return pp;
> +}
> +EXPORT_SYMBOL(eth_gro_receive);
> +
> +int eth_gro_complete(struct sk_buff *skb, int nhoff)
> +{
> + struct ethhdr *eh = (struct ethhdr *)(skb->data + nhoff);
> + __be16 type = eh->h_proto;
> + struct packet_offload *ptype;
> + int err = -ENOSYS;
> +
> + if (skb->encapsulation)
> + skb_set_inner_mac_header(skb, nhoff);
> +
> + rcu_read_lock();
> + ptype = gro_find_complete_by_type(type);
> + if (ptype != NULL)
> + err = ptype->callbacks.gro_complete(skb, nhoff +
> + sizeof(struct ethhdr));
> +
> + rcu_read_unlock();
> + return err;
> +}
> +EXPORT_SYMBOL(eth_gro_complete);
> +
> +static struct packet_offload eth_packet_offload __read_mostly = {
> + .type = cpu_to_be16(ETH_P_TEB),
> + .callbacks = {
> + .gro_receive = eth_gro_receive,
> + .gro_complete = eth_gro_complete,
> + },
> +};
> +
> +static int __init eth_offload_init(void)
> +{
> + dev_add_offload(ð_packet_offload);
> +
> + return 0;
> +}
> +
> +fs_initcall(eth_offload_init);
> --
> 1.9.1
>
> --
> To unsubscribe from this list: send the line "unsubscribe netdev" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH net-next 1/2] net: Add Transparent Ethernet Bridging GRO support.
2014-12-31 9:19 ` [PATCH net-next 1/2] net: Add Transparent Ethernet Bridging " Or Gerlitz
@ 2014-12-31 14:30 ` Jesse Gross
0 siblings, 0 replies; 6+ messages in thread
From: Jesse Gross @ 2014-12-31 14:30 UTC (permalink / raw)
To: Or Gerlitz; +Cc: David Miller, Linux Netdev List
On Wed, Dec 31, 2014 at 4:19 AM, Or Gerlitz <gerlitz.or@gmail.com> wrote:
> On Wed, Dec 31, 2014 at 5:10 AM, Jesse Gross <jesse@nicira.com> wrote:
>> Currently the only tunnel protocol that supports GRO with encapsulated
>> Ethernet is VXLAN. This pulls out the Ethernet code into a proper layer
>> so that it can be used by other tunnel protocols such as GRE and Geneve.
>
> Hi Jesse,
>
> Thanks for taking care of that, I also had it coded under the
> intention of adding GRO support for OVS's TEB based GRE, but didn't
> make it to submit before your post... anyway, I would recommend that
> you break this patch into two:
>
> 1. basic TEB GRO support
> 2. refactoring of the VXLAN GRO logic to use it
This patch is really just moving code so breaking it into two steps
would essentially mean introducing duplicate code and then deleting
the first version in the next patch. It's already a pretty short patch
and I think splitting it would actually make it harder to verify that
it is correct.
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH net-next 1/2] net: Add Transparent Ethernet Bridging GRO support.
2014-12-31 3:10 [PATCH net-next 1/2] net: Add Transparent Ethernet Bridging GRO support Jesse Gross
2014-12-31 3:10 ` [PATCH net-next 2/2] geneve: Add Geneve " Jesse Gross
2014-12-31 9:19 ` [PATCH net-next 1/2] net: Add Transparent Ethernet Bridging " Or Gerlitz
@ 2015-01-02 20:46 ` David Miller
2 siblings, 0 replies; 6+ messages in thread
From: David Miller @ 2015-01-02 20:46 UTC (permalink / raw)
To: jesse; +Cc: netdev
From: Jesse Gross <jesse@nicira.com>
Date: Tue, 30 Dec 2014 19:10:15 -0800
> Currently the only tunnel protocol that supports GRO with encapsulated
> Ethernet is VXLAN. This pulls out the Ethernet code into a proper layer
> so that it can be used by other tunnel protocols such as GRE and Geneve.
>
> Signed-off-by: Jesse Gross <jesse@nicira.com>
Applied.
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH net-next 2/2] geneve: Add Geneve GRO support
2014-12-31 3:10 ` [PATCH net-next 2/2] geneve: Add Geneve " Jesse Gross
@ 2015-01-02 20:47 ` David Miller
0 siblings, 0 replies; 6+ messages in thread
From: David Miller @ 2015-01-02 20:47 UTC (permalink / raw)
To: jesse; +Cc: netdev, joestringer
From: Jesse Gross <jesse@nicira.com>
Date: Tue, 30 Dec 2014 19:10:16 -0800
> From: Joe Stringer <joestringer@nicira.com>
>
> This results in an approximately 30% increase in throughput
> when handling encapsulated bulk traffic.
>
> Signed-off-by: Joe Stringer <joestringer@nicira.com>
> Signed-off-by: Jesse Gross <jesse@nicira.com>
Applied, looks great, thanks Jesse.
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2015-01-02 20:47 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-12-31 3:10 [PATCH net-next 1/2] net: Add Transparent Ethernet Bridging GRO support Jesse Gross
2014-12-31 3:10 ` [PATCH net-next 2/2] geneve: Add Geneve " Jesse Gross
2015-01-02 20:47 ` David Miller
2014-12-31 9:19 ` [PATCH net-next 1/2] net: Add Transparent Ethernet Bridging " Or Gerlitz
2014-12-31 14:30 ` Jesse Gross
2015-01-02 20:46 ` David Miller
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).