* [PATCH 1/3] myri10ge: use physical pages for skb allocation
[not found] <451B044A.2050404@myri.com>
@ 2006-09-27 23:12 ` Brice Goglin
2006-09-27 23:12 ` [PATCH 2/3] myri10ge: Large Receive Offload Brice Goglin
2006-09-27 23:12 ` [PATCH 3/3] myri10ge: update driver version to 1.1.0 Brice Goglin
2 siblings, 0 replies; 3+ messages in thread
From: Brice Goglin @ 2006-09-27 23:12 UTC (permalink / raw)
To: Jeff Garzik, netdev
[PATCH 1/3] myri10ge: use physical pages for skb allocation
Physical pages are used instead of 16kB contiguous buffers for the
skb frags. And we also put as much fragments as possible in any page
so that we do not have to allocate a page for every fragments.
Signed-off-by: Brice Goglin <brice@myri.com>
Signed-off-by: Andrew J. Gallatin <gallatin@myri.com>
---
drivers/net/myri10ge/myri10ge.c | 500 ++++++++++++++++++++--------------------
1 file changed, 255 insertions(+), 245 deletions(-)
Index: linux-mm/drivers/net/myri10ge/myri10ge.c
===================================================================
--- linux-mm.orig/drivers/net/myri10ge/myri10ge.c 2006-09-28 01:05:29.000000000 +0200
+++ linux-mm/drivers/net/myri10ge/myri10ge.c 2006-09-28 01:05:33.000000000 +0200
@@ -61,6 +61,8 @@
#include <linux/moduleparam.h>
#include <linux/io.h>
#include <net/checksum.h>
+#include <net/ip.h>
+#include <net/tcp.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/processor.h>
@@ -91,9 +93,13 @@
#define MYRI10GE_NO_CONFIRM_DATA 0xffffffff
#define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff
+#define MYRI10GE_ALLOC_ORDER 0
+#define MYRI10GE_ALLOC_SIZE ((1 << MYRI10GE_ALLOC_ORDER) * PAGE_SIZE)
+#define MYRI10GE_MAX_FRAGS_PER_FRAME (MYRI10GE_MAX_ETHER_MTU/MYRI10GE_ALLOC_SIZE + 1)
struct myri10ge_rx_buffer_state {
- struct sk_buff *skb;
+ struct page *page;
+ int page_offset;
DECLARE_PCI_UNMAP_ADDR(bus)
DECLARE_PCI_UNMAP_LEN(len)
};
@@ -116,9 +122,14 @@
u8 __iomem *wc_fifo; /* w/c rx dma addr fifo address */
struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */
struct myri10ge_rx_buffer_state *info;
+ struct page *page;
+ dma_addr_t bus;
+ int page_offset;
int cnt;
+ int fill_cnt;
int alloc_fail;
int mask; /* number of rx slots -1 */
+ int watchdog_needed;
};
struct myri10ge_tx_buf {
@@ -150,6 +161,7 @@
struct myri10ge_rx_buf rx_big;
struct myri10ge_rx_done rx_done;
int small_bytes;
+ int big_bytes;
struct net_device *dev;
struct net_device_stats stats;
u8 __iomem *sram;
@@ -238,11 +250,6 @@
MODULE_PARM_DESC(myri10ge_force_firmware,
"Force firmware to assume aligned completions\n");
-static int myri10ge_skb_cross_4k = 0;
-module_param(myri10ge_skb_cross_4k, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(myri10ge_skb_cross_4k,
- "Can a small skb cross a 4KB boundary?\n");
-
static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
module_param(myri10ge_initial_mtu, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU\n");
@@ -266,6 +273,10 @@
module_param(myri10ge_debug, int, 0);
MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
+static int myri10ge_fill_thresh = 256;
+module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed\n");
+
#define MYRI10GE_FW_OFFSET 1024*1024
#define MYRI10GE_HIGHPART_TO_U32(X) \
(sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
@@ -797,194 +808,180 @@
mb();
}
-/*
- * Set of routines to get a new receive buffer. Any buffer which
- * crosses a 4KB boundary must start on a 4KB boundary due to PCIe
- * wdma restrictions. We also try to align any smaller allocation to
- * at least a 16 byte boundary for efficiency. We assume the linux
- * memory allocator works by powers of 2, and will not return memory
- * smaller than 2KB which crosses a 4KB boundary. If it does, we fall
- * back to allocating 2x as much space as required.
- *
- * We intend to replace large (>4KB) skb allocations by using
- * pages directly and building a fraglist in the near future.
- */
-
-static inline struct sk_buff *myri10ge_alloc_big(struct net_device *dev,
- int bytes)
-{
- struct sk_buff *skb;
- unsigned long data, roundup;
-
- skb = netdev_alloc_skb(dev, bytes + 4096 + MXGEFW_PAD);
- if (skb == NULL)
- return NULL;
-
- /* Correct skb->truesize so that socket buffer
- * accounting is not confused the rounding we must
- * do to satisfy alignment constraints.
- */
- skb->truesize -= 4096;
-
- data = (unsigned long)(skb->data);
- roundup = (-data) & (4095);
- skb_reserve(skb, roundup);
- return skb;
-}
-
-/* Allocate 2x as much space as required and use whichever portion
- * does not cross a 4KB boundary */
-static inline struct sk_buff *myri10ge_alloc_small_safe(struct net_device *dev,
- unsigned int bytes)
+static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, u16 hw_csum)
{
- struct sk_buff *skb;
- unsigned long data, boundary;
-
- skb = netdev_alloc_skb(dev, 2 * (bytes + MXGEFW_PAD) - 1);
- if (unlikely(skb == NULL))
- return NULL;
-
- /* Correct skb->truesize so that socket buffer
- * accounting is not confused the rounding we must
- * do to satisfy alignment constraints.
- */
- skb->truesize -= bytes + MXGEFW_PAD;
-
- data = (unsigned long)(skb->data);
- boundary = (data + 4095UL) & ~4095UL;
- if ((boundary - data) >= (bytes + MXGEFW_PAD))
- return skb;
+ struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data);
- skb_reserve(skb, boundary - data);
- return skb;
+ if ((skb->protocol == ntohs(ETH_P_8021Q)) &&
+ (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) ||
+ vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) {
+ skb->csum = hw_csum;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
}
-/* Allocate just enough space, and verify that the allocated
- * space does not cross a 4KB boundary */
-static inline struct sk_buff *myri10ge_alloc_small(struct net_device *dev,
- int bytes)
+static inline void
+myri10ge_rx_skb_build(struct sk_buff *skb, u8 * va,
+ struct skb_frag_struct *rx_frags, int len, int hlen)
{
- struct sk_buff *skb;
- unsigned long roundup, data, end;
+ struct skb_frag_struct *skb_frags;
- skb = netdev_alloc_skb(dev, bytes + 16 + MXGEFW_PAD);
- if (unlikely(skb == NULL))
- return NULL;
-
- /* Round allocated buffer to 16 byte boundary */
- data = (unsigned long)(skb->data);
- roundup = (-data) & 15UL;
- skb_reserve(skb, roundup);
- /* Verify that the data buffer does not cross a page boundary */
- data = (unsigned long)(skb->data);
- end = data + bytes + MXGEFW_PAD - 1;
- if (unlikely(((end >> 12) != (data >> 12)) && (data & 4095UL))) {
- printk(KERN_NOTICE
- "myri10ge_alloc_small: small skb crossed 4KB boundary\n");
- myri10ge_skb_cross_4k = 1;
- dev_kfree_skb_any(skb);
- skb = myri10ge_alloc_small_safe(dev, bytes);
- }
- return skb;
+ skb->len = skb->data_len = len;
+ skb->truesize = len + sizeof(struct sk_buff);
+ /* attach the page(s) */
+
+ skb_frags = skb_shinfo(skb)->frags;
+ while (len > 0) {
+ memcpy(skb_frags, rx_frags, sizeof(*skb_frags));
+ len -= rx_frags->size;
+ skb_frags++;
+ rx_frags++;
+ skb_shinfo(skb)->nr_frags++;
+ }
+
+ /* pskb_may_pull is not available in irq context, but
+ * skb_pull() (for ether_pad and eth_type_trans()) requires
+ * the beginning of the packet in skb_headlen(), move it
+ * manually */
+ memcpy(skb->data, va, hlen);
+ skb_shinfo(skb)->frags[0].page_offset += hlen;
+ skb_shinfo(skb)->frags[0].size -= hlen;
+ skb->data_len -= hlen;
+ skb->tail += hlen;
+ skb_pull(skb, MXGEFW_PAD);
}
-static inline int
-myri10ge_getbuf(struct myri10ge_rx_buf *rx, struct myri10ge_priv *mgp,
- int bytes, int idx)
+static void
+myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
+ int bytes, int watchdog)
{
- struct net_device *dev = mgp->dev;
- struct pci_dev *pdev = mgp->pdev;
- struct sk_buff *skb;
- dma_addr_t bus;
- int len, retval = 0;
-
- bytes += VLAN_HLEN; /* account for 802.1q vlan tag */
+ struct page *page;
+ int idx;
- if ((bytes + MXGEFW_PAD) > (4096 - 16) /* linux overhead */ )
- skb = myri10ge_alloc_big(dev, bytes);
- else if (myri10ge_skb_cross_4k)
- skb = myri10ge_alloc_small_safe(dev, bytes);
- else
- skb = myri10ge_alloc_small(dev, bytes);
+ if (unlikely(rx->watchdog_needed && !watchdog))
+ return;
- if (unlikely(skb == NULL)) {
- rx->alloc_fail++;
- retval = -ENOBUFS;
- goto done;
- }
-
- /* set len so that it only covers the area we
- * need mapped for DMA */
- len = bytes + MXGEFW_PAD;
-
- bus = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE);
- rx->info[idx].skb = skb;
- pci_unmap_addr_set(&rx->info[idx], bus, bus);
- pci_unmap_len_set(&rx->info[idx], len, len);
- rx->shadow[idx].addr_low = htonl(MYRI10GE_LOWPART_TO_U32(bus));
- rx->shadow[idx].addr_high = htonl(MYRI10GE_HIGHPART_TO_U32(bus));
-
-done:
- /* copy 8 descriptors (64-bytes) to the mcp at a time */
- if ((idx & 7) == 7) {
- if (rx->wc_fifo == NULL)
- myri10ge_submit_8rx(&rx->lanai[idx - 7],
- &rx->shadow[idx - 7]);
- else {
- mb();
- myri10ge_pio_copy(rx->wc_fifo,
- &rx->shadow[idx - 7], 64);
+ /* try to refill entire ring */
+ while (rx->fill_cnt != (rx->cnt + rx->mask + 1)) {
+ idx = rx->fill_cnt & rx->mask;
+
+ if ((bytes < MYRI10GE_ALLOC_SIZE / 2) &&
+ (rx->page_offset + bytes <= MYRI10GE_ALLOC_SIZE)) {
+ /* we can use part of previous page */
+ get_page(rx->page);
+ } else {
+ /* we need a new page */
+ page =
+ alloc_pages(GFP_ATOMIC | __GFP_COMP,
+ MYRI10GE_ALLOC_ORDER);
+ if (unlikely(page == NULL)) {
+ if (rx->fill_cnt - rx->cnt < 16)
+ rx->watchdog_needed = 1;
+ return;
+ }
+ rx->page = page;
+ rx->page_offset = 0;
+ rx->bus = pci_map_page(mgp->pdev, page, 0,
+ MYRI10GE_ALLOC_SIZE,
+ PCI_DMA_FROMDEVICE);
+ }
+ rx->info[idx].page = rx->page;
+ rx->info[idx].page_offset = rx->page_offset;
+ /* note that this is the address of the start of the
+ * page */
+ pci_unmap_addr_set(&rx->info[idx], bus, rx->bus);
+ rx->shadow[idx].addr_low =
+ htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset);
+ rx->shadow[idx].addr_high =
+ htonl(MYRI10GE_HIGHPART_TO_U32(rx->bus));
+
+ /* start next packet on a cacheline boundary */
+ rx->page_offset += SKB_DATA_ALIGN(bytes);
+ rx->fill_cnt++;
+
+ /* copy 8 descriptors to the firmware at a time */
+ if ((idx & 7) == 7) {
+ if (rx->wc_fifo == NULL)
+ myri10ge_submit_8rx(&rx->lanai[idx - 7],
+ &rx->shadow[idx - 7]);
+ else {
+ mb();
+ myri10ge_pio_copy(rx->wc_fifo,
+ &rx->shadow[idx - 7], 64);
+ }
}
}
- return retval;
}
-static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, u16 hw_csum)
+static inline void
+myri10ge_unmap_rx_page(struct pci_dev *pdev,
+ struct myri10ge_rx_buffer_state *info, int bytes)
{
- struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data);
-
- if ((skb->protocol == ntohs(ETH_P_8021Q)) &&
- (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) ||
- vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) {
- skb->csum = hw_csum;
- skb->ip_summed = CHECKSUM_COMPLETE;
+ /* unmap the recvd page if we're the only or last user of it */
+ if (bytes >= MYRI10GE_ALLOC_SIZE / 2 ||
+ (info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) {
+ pci_unmap_page(pdev, (pci_unmap_addr(info, bus)
+ & ~(MYRI10GE_ALLOC_SIZE - 1)),
+ MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
}
}
-static inline unsigned long
+#define MYRI10GE_HLEN 64 /* The number of bytes to copy from a
+ * page into an skb */
+
+static inline int
myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
int bytes, int len, int csum)
{
- dma_addr_t bus;
struct sk_buff *skb;
- int idx, unmap_len;
+ struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME];
+ int i, idx, hlen, remainder;
+ struct pci_dev *pdev = mgp->pdev;
+ struct net_device *dev = mgp->dev;
+ u8 *va;
+ len += MXGEFW_PAD;
idx = rx->cnt & rx->mask;
- rx->cnt++;
-
- /* save a pointer to the received skb */
- skb = rx->info[idx].skb;
- bus = pci_unmap_addr(&rx->info[idx], bus);
- unmap_len = pci_unmap_len(&rx->info[idx], len);
-
- /* try to replace the received skb */
- if (myri10ge_getbuf(rx, mgp, bytes, idx)) {
- /* drop the frame -- the old skbuf is re-cycled */
- mgp->stats.rx_dropped += 1;
- return 0;
+ va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
+ prefetch(va);
+ /* Fill skb_frag_struct(s) with data from our receive */
+ for (i = 0, remainder = len; remainder > 0; i++) {
+ myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
+ rx_frags[i].page = rx->info[idx].page;
+ rx_frags[i].page_offset = rx->info[idx].page_offset;
+ if (remainder < MYRI10GE_ALLOC_SIZE)
+ rx_frags[i].size = remainder;
+ else
+ rx_frags[i].size = MYRI10GE_ALLOC_SIZE;
+ rx->cnt++;
+ idx = rx->cnt & rx->mask;
+ remainder -= MYRI10GE_ALLOC_SIZE;
}
- /* unmap the recvd skb */
- pci_unmap_single(mgp->pdev, bus, unmap_len, PCI_DMA_FROMDEVICE);
+ hlen = MYRI10GE_HLEN > len ? len : MYRI10GE_HLEN;
- /* mcp implicitly skips 1st bytes so that packet is properly
- * aligned */
- skb_reserve(skb, MXGEFW_PAD);
+ /* allocate an skb to attach the page(s) to. This is done
+ * after trying LRO, so as to avoid skb allocation overheads */
+
+ skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16);
+ if (unlikely(skb == NULL)) {
+ mgp->stats.rx_dropped++;
+ do {
+ i--;
+ put_page(rx_frags[i].page);
+ } while (i != 0);
+ return 0;
+ }
- /* set the length of the frame */
- skb_put(skb, len);
+ /* Attach the pages to the skb, and trim off any padding */
+ myri10ge_rx_skb_build(skb, va, rx_frags, len, hlen);
+ if (skb_shinfo(skb)->frags[0].size <= 0) {
+ put_page(skb_shinfo(skb)->frags[0].page);
+ skb_shinfo(skb)->nr_frags = 0;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->dev = dev;
- skb->protocol = eth_type_trans(skb, mgp->dev);
if (mgp->csum_flag) {
if ((skb->protocol == ntohs(ETH_P_IP)) ||
(skb->protocol == ntohs(ETH_P_IPV6))) {
@@ -993,9 +990,8 @@
} else
myri10ge_vlan_ip_csum(skb, ntohs((u16) csum));
}
-
netif_receive_skb(skb);
- mgp->dev->last_rx = jiffies;
+ dev->last_rx = jiffies;
return 1;
}
@@ -1072,7 +1068,7 @@
length, checksum);
else
rx_ok = myri10ge_rx_done(mgp, &mgp->rx_big,
- mgp->dev->mtu + ETH_HLEN,
+ mgp->big_bytes,
length, checksum);
rx_packets += rx_ok;
rx_bytes += rx_ok * (unsigned long)length;
@@ -1087,6 +1083,14 @@
rx_done->cnt = cnt;
mgp->stats.rx_packets += rx_packets;
mgp->stats.rx_bytes += rx_bytes;
+
+ /* restock receive rings if needed */
+ if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt < myri10ge_fill_thresh)
+ myri10ge_alloc_rx_pages(mgp, &mgp->rx_small,
+ mgp->small_bytes + MXGEFW_PAD, 0);
+ if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt < myri10ge_fill_thresh)
+ myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0);
+
}
static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
@@ -1389,7 +1393,7 @@
return mgp->msg_enable;
}
-static const struct ethtool_ops myri10ge_ethtool_ops = {
+static struct ethtool_ops myri10ge_ethtool_ops = {
.get_settings = myri10ge_get_settings,
.get_drvinfo = myri10ge_get_drvinfo,
.get_coalesce = myri10ge_get_coalesce,
@@ -1477,56 +1481,48 @@
goto abort_with_rx_small_info;
/* Fill the receive rings */
-
- for (i = 0; i <= mgp->rx_small.mask; i++) {
- status = myri10ge_getbuf(&mgp->rx_small, mgp,
- mgp->small_bytes, i);
- if (status) {
- printk(KERN_ERR
- "myri10ge: %s: alloced only %d small bufs\n",
- dev->name, i);
- goto abort_with_rx_small_ring;
- }
- }
-
- for (i = 0; i <= mgp->rx_big.mask; i++) {
- status =
- myri10ge_getbuf(&mgp->rx_big, mgp, dev->mtu + ETH_HLEN, i);
- if (status) {
- printk(KERN_ERR
- "myri10ge: %s: alloced only %d big bufs\n",
- dev->name, i);
- goto abort_with_rx_big_ring;
- }
+ mgp->rx_big.cnt = 0;
+ mgp->rx_small.cnt = 0;
+ mgp->rx_big.fill_cnt = 0;
+ mgp->rx_small.fill_cnt = 0;
+ mgp->rx_small.page_offset = MYRI10GE_ALLOC_SIZE;
+ mgp->rx_big.page_offset = MYRI10GE_ALLOC_SIZE;
+ mgp->rx_small.watchdog_needed = 0;
+ mgp->rx_big.watchdog_needed = 0;
+ myri10ge_alloc_rx_pages(mgp, &mgp->rx_small,
+ mgp->small_bytes + MXGEFW_PAD, 0);
+
+ if (mgp->rx_small.fill_cnt < mgp->rx_small.mask + 1) {
+ printk(KERN_ERR "myri10ge: %s: alloced only %d small bufs\n",
+ dev->name, mgp->rx_small.fill_cnt);
+ goto abort_with_rx_small_ring;
+ }
+
+ myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0);
+ if (mgp->rx_big.fill_cnt < mgp->rx_big.mask + 1) {
+ printk(KERN_ERR "myri10ge: %s: alloced only %d big bufs\n",
+ dev->name, mgp->rx_big.fill_cnt);
+ goto abort_with_rx_big_ring;
}
return 0;
abort_with_rx_big_ring:
- for (i = 0; i <= mgp->rx_big.mask; i++) {
- if (mgp->rx_big.info[i].skb != NULL)
- dev_kfree_skb_any(mgp->rx_big.info[i].skb);
- if (pci_unmap_len(&mgp->rx_big.info[i], len))
- pci_unmap_single(mgp->pdev,
- pci_unmap_addr(&mgp->rx_big.info[i],
- bus),
- pci_unmap_len(&mgp->rx_big.info[i],
- len),
- PCI_DMA_FROMDEVICE);
+ for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) {
+ int idx = i & mgp->rx_big.mask;
+ myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx],
+ mgp->big_bytes);
+ put_page(mgp->rx_big.info[idx].page);
}
abort_with_rx_small_ring:
- for (i = 0; i <= mgp->rx_small.mask; i++) {
- if (mgp->rx_small.info[i].skb != NULL)
- dev_kfree_skb_any(mgp->rx_small.info[i].skb);
- if (pci_unmap_len(&mgp->rx_small.info[i], len))
- pci_unmap_single(mgp->pdev,
- pci_unmap_addr(&mgp->rx_small.info[i],
- bus),
- pci_unmap_len(&mgp->rx_small.info[i],
- len),
- PCI_DMA_FROMDEVICE);
+ for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) {
+ int idx = i & mgp->rx_small.mask;
+ myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx],
+ mgp->small_bytes + MXGEFW_PAD);
+ put_page(mgp->rx_small.info[idx].page);
}
+
kfree(mgp->rx_big.info);
abort_with_rx_small_info:
@@ -1559,30 +1555,24 @@
mgp = netdev_priv(dev);
- for (i = 0; i <= mgp->rx_big.mask; i++) {
- if (mgp->rx_big.info[i].skb != NULL)
- dev_kfree_skb_any(mgp->rx_big.info[i].skb);
- if (pci_unmap_len(&mgp->rx_big.info[i], len))
- pci_unmap_single(mgp->pdev,
- pci_unmap_addr(&mgp->rx_big.info[i],
- bus),
- pci_unmap_len(&mgp->rx_big.info[i],
- len),
- PCI_DMA_FROMDEVICE);
- }
-
- for (i = 0; i <= mgp->rx_small.mask; i++) {
- if (mgp->rx_small.info[i].skb != NULL)
- dev_kfree_skb_any(mgp->rx_small.info[i].skb);
- if (pci_unmap_len(&mgp->rx_small.info[i], len))
- pci_unmap_single(mgp->pdev,
- pci_unmap_addr(&mgp->rx_small.info[i],
- bus),
- pci_unmap_len(&mgp->rx_small.info[i],
- len),
- PCI_DMA_FROMDEVICE);
+ for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) {
+ idx = i & mgp->rx_big.mask;
+ if (i == mgp->rx_big.fill_cnt - 1)
+ mgp->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE;
+ myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx],
+ mgp->big_bytes);
+ put_page(mgp->rx_big.info[idx].page);
+ }
+
+ for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) {
+ idx = i & mgp->rx_small.mask;
+ if (i == mgp->rx_small.fill_cnt - 1)
+ mgp->rx_small.info[idx].page_offset =
+ MYRI10GE_ALLOC_SIZE;
+ myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx],
+ mgp->small_bytes + MXGEFW_PAD);
+ put_page(mgp->rx_small.info[idx].page);
}
-
tx = &mgp->tx;
while (tx->done != tx->req) {
idx = tx->done & tx->mask;
@@ -1650,19 +1640,18 @@
*/
if (dev->mtu <= ETH_DATA_LEN)
- mgp->small_bytes = 128; /* enough for a TCP header */
+ /* enough for a TCP header */
+ mgp->small_bytes = (128 > SMP_CACHE_BYTES)
+ ? (128 - MXGEFW_PAD)
+ : (SMP_CACHE_BYTES - MXGEFW_PAD);
else
- mgp->small_bytes = ETH_FRAME_LEN; /* enough for an ETH_DATA_LEN frame */
+ /* enough for an ETH_DATA_LEN frame */
+ mgp->small_bytes = ETH_FRAME_LEN;
/* Override the small buffer size? */
if (myri10ge_small_bytes > 0)
mgp->small_bytes = myri10ge_small_bytes;
- /* If the user sets an obscenely small MTU, adjust the small
- * bytes down to nearly nothing */
- if (mgp->small_bytes >= (dev->mtu + ETH_HLEN))
- mgp->small_bytes = 64;
-
/* get the lanai pointers to the send and receive rings */
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0);
@@ -1698,17 +1687,23 @@
mgp->rx_big.wc_fifo = NULL;
}
- status = myri10ge_allocate_rings(dev);
- if (status != 0)
- goto abort_with_nothing;
-
/* Firmware needs the big buff size as a power of 2. Lie and
* tell him the buffer is larger, because we only use 1
* buffer/pkt, and the mtu will prevent overruns.
*/
big_pow2 = dev->mtu + ETH_HLEN + MXGEFW_PAD;
- while ((big_pow2 & (big_pow2 - 1)) != 0)
- big_pow2++;
+ if (big_pow2 < MYRI10GE_ALLOC_SIZE / 2) {
+ while ((big_pow2 & (big_pow2 - 1)) != 0)
+ big_pow2++;
+ mgp->big_bytes = dev->mtu + ETH_HLEN + MXGEFW_PAD;
+ } else {
+ big_pow2 = MYRI10GE_ALLOC_SIZE;
+ mgp->big_bytes = big_pow2;
+ }
+
+ status = myri10ge_allocate_rings(dev);
+ if (status != 0)
+ goto abort_with_nothing;
/* now give firmware buffers sizes, and MTU */
cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN;
@@ -2691,6 +2686,21 @@
struct myri10ge_priv *mgp;
mgp = (struct myri10ge_priv *)arg;
+
+ if (mgp->rx_small.watchdog_needed) {
+ myri10ge_alloc_rx_pages(mgp, &mgp->rx_small,
+ mgp->small_bytes + MXGEFW_PAD, 1);
+ if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt >=
+ myri10ge_fill_thresh)
+ mgp->rx_small.watchdog_needed = 0;
+ }
+ if (mgp->rx_big.watchdog_needed) {
+ myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 1);
+ if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt >=
+ myri10ge_fill_thresh)
+ mgp->rx_big.watchdog_needed = 0;
+ }
+
if (mgp->tx.req != mgp->tx.done &&
mgp->tx.done == mgp->watchdog_tx_done &&
mgp->watchdog_tx_req != mgp->watchdog_tx_done)
^ permalink raw reply [flat|nested] 3+ messages in thread
* [PATCH 2/3] myri10ge: Large Receive Offload
[not found] <451B044A.2050404@myri.com>
2006-09-27 23:12 ` [PATCH 1/3] myri10ge: use physical pages for skb allocation Brice Goglin
@ 2006-09-27 23:12 ` Brice Goglin
2006-09-27 23:12 ` [PATCH 3/3] myri10ge: update driver version to 1.1.0 Brice Goglin
2 siblings, 0 replies; 3+ messages in thread
From: Brice Goglin @ 2006-09-27 23:12 UTC (permalink / raw)
To: Jeff Garzik, netdev
[PATCH 2/3] myri10ge: Large Receive Offload
This is a Large Receive Offload entirely implemented in the driver.
myri10ge_rx_done() now first calls myri10ge_lro_rx() in case the new
fragment is the next one for any of the pending lro receives. Those
receive are stored in the lro_active queue (up to 8 by default).
LRO receives are flushed through myri10ge_lro_flush() whenever an
out-of-order packet arrives in the same stream, or when there is a
chance that the next fragment might not fit in the current skb.
Signed-off-by: Brice Goglin <brice@myri.com>
Signed-off-by: Andrew J. Gallatin <gallatin@myri.com>
---
drivers/net/myri10ge/myri10ge.c | 350 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 350 insertions(+)
Index: linux-mm/drivers/net/myri10ge/myri10ge.c
===================================================================
--- linux-mm.orig/drivers/net/myri10ge/myri10ge.c 2006-09-28 01:05:33.000000000 +0200
+++ linux-mm/drivers/net/myri10ge/myri10ge.c 2006-09-28 01:06:00.000000000 +0200
@@ -146,11 +146,31 @@
int pkt_done; /* packets completed */
};
+struct myri10ge_lro_packet {
+ struct hlist_node lro_node;
+ struct sk_buff *skb;
+ int timestamp;
+ __u32 tsval;
+ __u32 tsecr;
+ __u32 source_ip;
+ __u32 dest_ip;
+ __u32 next_seq;
+ __u32 ack_seq;
+ __u16 window;
+ __u16 source_port;
+ __u16 dest_port;
+ __u16 append_cnt;
+ __u16 mss;
+ __u16 vlan_tci;
+};
+
struct myri10ge_rx_done {
struct mcp_slot *entry;
dma_addr_t bus;
int cnt;
int idx;
+ struct hlist_head lro_active;
+ struct hlist_head lro_free;
};
struct myri10ge_priv {
@@ -162,6 +182,9 @@
struct myri10ge_rx_done rx_done;
int small_bytes;
int big_bytes;
+ int lro_flushed;
+ int lro_queued;
+ int lro_too_many_streams;
struct net_device *dev;
struct net_device_stats stats;
u8 __iomem *sram;
@@ -273,6 +296,10 @@
module_param(myri10ge_debug, int, 0);
MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
+static int myri10ge_lro = 8;
+module_param(myri10ge_lro, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_lro, "Enable large N receive offload queues\n");
+
static int myri10ge_fill_thresh = 256;
module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed\n");
@@ -787,6 +814,9 @@
mgp->rx_done.idx = 0;
mgp->rx_done.cnt = 0;
mgp->link_changes = 0;
+ mgp->lro_queued = 0;
+ mgp->lro_flushed = 0;
+ mgp->lro_too_many_streams = 0;
status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
myri10ge_change_promisc(mgp, 0, 0);
myri10ge_change_pause(mgp, mgp->pause);
@@ -851,6 +881,292 @@
skb_pull(skb, MXGEFW_PAD);
}
+static inline int myri10ge_lro_csum(int tcplen, struct iphdr *iph, u32 csum)
+{
+ if (unlikely(ip_fast_csum((u8 *) iph, iph->ihl)))
+ return -1;
+
+ if (unlikely(csum_tcpudp_magic(iph->saddr, iph->daddr,
+ tcplen, IPPROTO_TCP, csum)))
+ return -1;
+ return 0;
+}
+
+static inline void
+myri10ge_lro_flush(struct myri10ge_priv *mgp, struct myri10ge_lro_packet *lro)
+{
+ struct iphdr *iph;
+ struct tcphdr *th;
+ struct sk_buff *skb;
+ u32 *ts_ptr;
+
+ skb = lro->skb;
+
+ if (lro->append_cnt) {
+ /* incorporate the new len into the ip header and
+ * re-calculate the checksum, Note that
+ * eth_type_trans() left skb->data at the start of
+ * the vlan header, so we need to skip past it to
+ * get to the IP header */
+ if (lro->vlan_tci) {
+ iph = (struct iphdr *)(skb->data + VLAN_HLEN);
+ iph->tot_len = ntohs(skb->len - VLAN_HLEN);
+ } else {
+ iph = (struct iphdr *)skb->data;
+ iph->tot_len = ntohs(skb->len);
+ }
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+
+ /* incorporate the latest ack into the tcp header */
+ th = (struct tcphdr *)(iph + 1);
+ th->ack_seq = lro->ack_seq;
+ th->window = lro->window;
+
+ /* incorporate latest timestamp into the tcp header */
+ if (lro->timestamp) {
+ ts_ptr = (u32 *) (th + 1);
+ ts_ptr[1] = htonl(lro->tsval);
+ ts_ptr[2] = lro->tsecr;
+ }
+ skb->truesize = skb->len + sizeof(struct sk_buff);
+ }
+
+ skb_shinfo(skb)->gso_size = lro->mss;
+ netif_receive_skb(skb);
+ mgp->dev->last_rx = jiffies;
+ mgp->lro_queued += lro->append_cnt + 1;
+ mgp->lro_flushed++;
+ lro->skb = NULL;
+ lro->timestamp = 0;
+ lro->append_cnt = 0;
+ hlist_add_head(&lro->lro_node, &mgp->rx_done.lro_free);
+}
+
+static int
+myri10ge_lro_rx(struct myri10ge_priv *mgp, u8 * va,
+ struct skb_frag_struct *rx_frags, int len, unsigned int csum)
+{
+ struct ethhdr *eh;
+ struct vlan_ethhdr *vh;
+ struct iphdr *iph;
+ struct tcphdr *th;
+ struct myri10ge_lro_packet *lro;
+ u32 *ts_ptr = NULL; /* XXX -Wuninitialized */
+ struct sk_buff *skb;
+ struct skb_frag_struct *skb_frags;
+ struct hlist_node *node;
+ int opt_bytes, tcp_data_len, hlen, pseudo_len, trim, llhlen;
+ __u32 seq;
+ __u16 ip_len, vlan_tci;
+
+ /* check to see that it is IP */
+ eh = (struct ethhdr *)(va + MXGEFW_PAD);
+ csum = ntohs((u16) csum);
+ if (eh->h_proto == ntohs(ETH_P_IP)) {
+ llhlen = ETH_HLEN;
+ vlan_tci = 0;
+ } else if (eh->h_proto == ntohs(ETH_P_8021Q)) {
+ vh = (struct vlan_ethhdr *)(va + MXGEFW_PAD);
+ if (vh->h_vlan_encapsulated_proto != ntohs(ETH_P_IP))
+ return -1;
+ llhlen = VLAN_ETH_HLEN;
+ vlan_tci = vh->h_vlan_TCI;
+ /* HW checksum starts after the ethernet header, we
+ * must subtract off the VLAN header's checksum before
+ * csum can be used */
+ csum = csum_sub(csum,
+ csum_partial(va + MXGEFW_PAD + ETH_HLEN,
+ VLAN_HLEN, 0));
+ } else {
+ return -1;
+ }
+
+ /* now check to see if it is TCP */
+ iph = (struct iphdr *)(va + llhlen + MXGEFW_PAD);
+ if (iph->protocol != IPPROTO_TCP)
+ return -1;
+
+ /* ensure there are no options */
+ if ((iph->ihl << 2) != sizeof(*iph))
+ return -1;
+
+ /* .. and the packet is not fragmented */
+ if (iph->frag_off & htons(IP_MF | IP_OFFSET))
+ return -1;
+
+ /* find the TCP header */
+ th = (struct tcphdr *)(iph + 1);
+
+ /* ensure no bits set besides ack or psh */
+ if (th->fin || th->syn || th->rst || th->urg || th->ece
+ || th->cwr || !th->ack)
+ return -1;
+
+ /* check for timestamps. Since the only option we handle are
+ * timestamps, we only have to handle the simple case of
+ * aligned timestamps */
+
+ opt_bytes = (th->doff << 2) - sizeof(*th);
+ if (opt_bytes != 0) {
+ ts_ptr = (u32 *) (th + 1);
+ if (unlikely(opt_bytes != TCPOLEN_TSTAMP_ALIGNED) ||
+ (*ts_ptr != ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
+ | (TCPOPT_TIMESTAMP << 8)
+ | TCPOLEN_TIMESTAMP))) {
+ return -1;
+ }
+ }
+
+ ip_len = ntohs(iph->tot_len);
+ tcp_data_len = ip_len - (th->doff << 2) - sizeof(*iph);
+
+ /*
+ * If frame is padded beyond the end of the IP packet,
+ * then we must trim the extra bytes off the end. We only
+ * do the actual trim after we have committed to doing
+ * the LRO.
+ */
+ trim = len - (ip_len + llhlen + MXGEFW_PAD);
+
+ /* ensure we received the full frame */
+ if (unlikely(trim < 0))
+ return -1;
+
+ hlen = ip_len + llhlen - tcp_data_len;
+ pseudo_len = len - llhlen - sizeof(*iph) - MXGEFW_PAD;
+
+ seq = ntohl(th->seq);
+
+ if (unlikely(myri10ge_lro_csum(pseudo_len, iph, csum)))
+ return -1;
+
+ /* now we have a packet that might be eligible for LRO,
+ * so see if it matches anything we might expect */
+
+ hlist_for_each_entry(lro, node, &mgp->rx_done.lro_active, lro_node) {
+ if (lro->source_port == th->source &&
+ lro->dest_port == th->dest &&
+ lro->source_ip == iph->saddr &&
+ lro->dest_ip == iph->daddr && lro->vlan_tci == vlan_tci) {
+ /* Try to append it */
+
+ if (unlikely(seq != lro->next_seq)) {
+ /* out of order packet */
+ hlist_del(&lro->lro_node);
+ myri10ge_lro_flush(mgp, lro);
+ return -1;
+ }
+ if (lro->timestamp) {
+ __u32 tsval = ntohl(*(ts_ptr + 1));
+ /* make sure timestamp values are increasing */
+ if (unlikely(lro->tsval > tsval ||
+ *(ts_ptr + 2) == 0)) {
+ return -1;
+ }
+ lro->tsval = tsval;
+ lro->tsecr = *(ts_ptr + 2);
+ }
+ lro->next_seq += tcp_data_len;
+ lro->ack_seq = th->ack_seq;
+ lro->window = th->window;
+ skb = lro->skb;
+ skb->data_len += tcp_data_len;
+ skb->len += tcp_data_len;
+ if (tcp_data_len > lro->mss)
+ lro->mss = tcp_data_len;
+
+ /* pull off the header and firmware pad
+ * before we copy the data */
+
+ hlen += MXGEFW_PAD;
+ rx_frags[0].page_offset += hlen;
+ rx_frags[0].size -= hlen;
+ len -= hlen;
+ skb_frags =
+ &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags];
+ /* if it was just header (like a TCP ack with
+ * no data), release the page */
+ if (len <= 0) {
+ put_page(rx_frags[0].page);
+ } else {
+ while (len > 0) {
+ memcpy(skb_frags, rx_frags,
+ sizeof(*skb_frags));
+ len -= rx_frags->size;
+ rx_frags++;
+ skb_frags++;
+ skb_shinfo(skb)->nr_frags++;
+ }
+ }
+ if (trim)
+ skb_trim(skb, skb->len - trim);
+
+ lro->append_cnt++;
+
+ /* cheap, conservative test. We may waste
+ * some slots with a 1500 byte mtu */
+ if (skb_shinfo(skb)->nr_frags
+ + MYRI10GE_MAX_FRAGS_PER_FRAME > MAX_SKB_FRAGS
+ || mgp->dev->mtu + skb->len > 65535) {
+ hlist_del(&lro->lro_node);
+ myri10ge_lro_flush(mgp, lro);
+ }
+ return 0;
+ }
+ }
+
+ /* start a new packet */
+ if (!hlist_empty(&mgp->rx_done.lro_free)) {
+ lro = hlist_entry(mgp->rx_done.lro_free.first,
+ struct myri10ge_lro_packet, lro_node);
+ /* allocate an skb to attach the page(s) to */
+
+ skb = netdev_alloc_skb(mgp->dev, hlen + 16);
+ if (unlikely(skb == NULL))
+ return -1;
+
+ myri10ge_rx_skb_build(skb, va, rx_frags, len, hlen);
+ skb->protocol = eth_type_trans(skb, mgp->dev);
+ skb->dev = mgp->dev;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ lro->skb = skb;
+ lro->source_ip = iph->saddr;
+ lro->dest_ip = iph->daddr;
+ lro->source_port = th->source;
+ lro->dest_port = th->dest;
+ lro->next_seq = seq + tcp_data_len;
+ lro->mss = tcp_data_len;
+ lro->ack_seq = th->ack_seq;
+ lro->window = th->window;
+ lro->vlan_tci = vlan_tci;
+ /* record timestamp if it is present */
+ if (opt_bytes) {
+ lro->timestamp = 1;
+ lro->tsval = ntohl(*(ts_ptr + 1));
+ lro->tsecr = *(ts_ptr + 2);
+ }
+ /* remove first packet from freelist.. */
+ hlist_del(&lro->lro_node);
+ /* .. and insert at the front of the active list */
+ hlist_add_head(&lro->lro_node, &mgp->rx_done.lro_active);
+
+ /* release the page if there was no data. We do it
+ * down here since the code above refers to the
+ * contents of the page */
+ if (skb_shinfo(skb)->frags[0].size <= 0) {
+ put_page(skb_shinfo(skb)->frags[0].page);
+ skb_shinfo(skb)->nr_frags = 0;
+ }
+ if (trim)
+ skb_trim(skb, skb->len - trim);
+
+ return 0;
+ }
+ mgp->lro_too_many_streams++;
+ return -1;
+}
+
static void
myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
int bytes, int watchdog)
@@ -958,6 +1274,9 @@
remainder -= MYRI10GE_ALLOC_SIZE;
}
+ if (mgp->csum_flag && myri10ge_lro &&
+ (0 == myri10ge_lro_rx(mgp, va, rx_frags, len, csum)))
+ return 0;
hlen = MYRI10GE_HLEN > len ? len : MYRI10GE_HLEN;
/* allocate an skb to attach the page(s) to. This is done
@@ -1049,6 +1368,8 @@
static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit)
{
struct myri10ge_rx_done *rx_done = &mgp->rx_done;
+ struct hlist_node *node, *node2;
+ struct myri10ge_lro_packet *lro;
unsigned long rx_bytes = 0;
unsigned long rx_packets = 0;
unsigned long rx_ok;
@@ -1081,6 +1402,11 @@
}
rx_done->idx = idx;
rx_done->cnt = cnt;
+ hlist_for_each_entry_safe(lro, node, node2, &mgp->rx_done.lro_active,
+ lro_node) {
+ hlist_del(&lro->lro_node);
+ myri10ge_lro_flush(mgp, lro);
+ }
mgp->stats.rx_packets += rx_packets;
mgp->stats.rx_bytes += rx_bytes;
@@ -1314,6 +1640,7 @@
"read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
"serial_number", "tx_pkt_start", "tx_pkt_done",
"tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt",
+ "lro_queued", "lro_flushed", "lro_too_many_streams",
"wake_queue", "stop_queue", "watchdog_resets", "tx_linearized",
"link_changes", "link_up", "dropped_link_overflow",
"dropped_link_error_or_filtered", "dropped_multicast_filtered",
@@ -1364,6 +1691,9 @@
data[i++] = (unsigned int)mgp->tx.done;
data[i++] = (unsigned int)mgp->rx_small.cnt;
data[i++] = (unsigned int)mgp->rx_big.cnt;
+ data[i++] = (unsigned int)mgp->lro_queued;
+ data[i++] = (unsigned int)mgp->lro_flushed;
+ data[i++] = (unsigned int)mgp->lro_too_many_streams;
data[i++] = (unsigned int)mgp->wake_queue;
data[i++] = (unsigned int)mgp->stop_queue;
data[i++] = (unsigned int)mgp->watchdog_resets;
@@ -1505,6 +1835,18 @@
goto abort_with_rx_big_ring;
}
+ bytes = sizeof(struct myri10ge_lro_packet);
+ INIT_HLIST_HEAD(&mgp->rx_done.lro_free);
+ INIT_HLIST_HEAD(&mgp->rx_done.lro_active);
+ for (i = 0; i < myri10ge_lro; i++) {
+ struct myri10ge_lro_packet *lro;
+ lro = kzalloc(bytes, GFP_KERNEL);
+ if (lro != NULL) {
+ INIT_HLIST_NODE(&lro->lro_node);
+ hlist_add_head(&lro->lro_node, &mgp->rx_done.lro_free);
+ }
+ }
+
return 0;
abort_with_rx_big_ring:
@@ -1551,10 +1893,18 @@
struct myri10ge_priv *mgp;
struct sk_buff *skb;
struct myri10ge_tx_buf *tx;
+ struct hlist_node *node, *node2;
+ struct myri10ge_lro_packet *lro;
int i, len, idx;
mgp = netdev_priv(dev);
+ hlist_for_each_entry_safe(lro, node, node2, &mgp->rx_done.lro_active,
+ lro_node) {
+ hlist_del(&lro->lro_node);
+ kfree(lro);
+ }
+
for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) {
idx = i & mgp->rx_big.mask;
if (i == mgp->rx_big.fill_cnt - 1)
^ permalink raw reply [flat|nested] 3+ messages in thread
* [PATCH 3/3] myri10ge: update driver version to 1.1.0
[not found] <451B044A.2050404@myri.com>
2006-09-27 23:12 ` [PATCH 1/3] myri10ge: use physical pages for skb allocation Brice Goglin
2006-09-27 23:12 ` [PATCH 2/3] myri10ge: Large Receive Offload Brice Goglin
@ 2006-09-27 23:12 ` Brice Goglin
2 siblings, 0 replies; 3+ messages in thread
From: Brice Goglin @ 2006-09-27 23:12 UTC (permalink / raw)
To: Jeff Garzik, netdev
[PATCH 3/3] myri10ge: update driver version to 1.1.0
Update driver version to 1.1.0.
Signed-off-by: Brice Goglin <brice@myri.com>
Signed-off-by: Andrew J. Gallatin <gallatin@myri.com>
---
drivers/net/myri10ge/myri10ge.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
Index: linux-mm/drivers/net/myri10ge/myri10ge.c
===================================================================
--- linux-mm.orig/drivers/net/myri10ge/myri10ge.c 2006-09-28 01:06:00.000000000 +0200
+++ linux-mm/drivers/net/myri10ge/myri10ge.c 2006-09-28 01:06:05.000000000 +0200
@@ -73,7 +73,7 @@
#include "myri10ge_mcp.h"
#include "myri10ge_mcp_gen_header.h"
-#define MYRI10GE_VERSION_STR "1.0.0"
+#define MYRI10GE_VERSION_STR "1.1.0"
MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
MODULE_AUTHOR("Maintainer: help@myri.com");
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2006-09-27 23:12 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <451B044A.2050404@myri.com>
2006-09-27 23:12 ` [PATCH 1/3] myri10ge: use physical pages for skb allocation Brice Goglin
2006-09-27 23:12 ` [PATCH 2/3] myri10ge: Large Receive Offload Brice Goglin
2006-09-27 23:12 ` [PATCH 3/3] myri10ge: update driver version to 1.1.0 Brice Goglin
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).