netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 2/3] cxgb4i_v3: main driver files
       [not found]   ` <1273943752-32486-2-git-send-email-rakesh-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
@ 2010-05-15 17:15     ` Rakesh Ranjan
  0 siblings, 0 replies; 9+ messages in thread
From: Rakesh Ranjan @ 2010-05-15 17:15 UTC (permalink / raw)
  To: NETDEVML, SCSIDEVML, OISCSIML
  Cc: LKML, Karen Xie, David Miller, James Bottomley, Mike Christie,
	Anish Bhatt, Rakesh Ranjan, Rakesh Ranjan

From: Rakesh Ranjan <rranjan-UJ4WrezqVcvwEYdC/TKypOqkaFVsf6Qi@public.gmane.org>


Signed-off-by: Rakesh Ranjan <rakesh-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
---
 drivers/scsi/cxgb4i/cxgb4i.h         |  101 ++
 drivers/scsi/cxgb4i/cxgb4i_ddp.c     |  678 +++++++++++++
 drivers/scsi/cxgb4i/cxgb4i_ddp.h     |  118 +++
 drivers/scsi/cxgb4i/cxgb4i_offload.c | 1846 ++++++++++++++++++++++++++++++++++
 drivers/scsi/cxgb4i/cxgb4i_offload.h |   91 ++
 drivers/scsi/cxgb4i/cxgb4i_snic.c    |  260 +++++
 6 files changed, 3094 insertions(+), 0 deletions(-)
 create mode 100644 drivers/scsi/cxgb4i/cxgb4i.h
 create mode 100644 drivers/scsi/cxgb4i/cxgb4i_ddp.c
 create mode 100644 drivers/scsi/cxgb4i/cxgb4i_ddp.h
 create mode 100644 drivers/scsi/cxgb4i/cxgb4i_offload.c
 create mode 100644 drivers/scsi/cxgb4i/cxgb4i_offload.h
 create mode 100644 drivers/scsi/cxgb4i/cxgb4i_snic.c

diff --git a/drivers/scsi/cxgb4i/cxgb4i.h b/drivers/scsi/cxgb4i/cxgb4i.h
new file mode 100644
index 0000000..fbf7699
--- /dev/null
+++ b/drivers/scsi/cxgb4i/cxgb4i.h
@@ -0,0 +1,101 @@
+/*
+ * cxgb4i.h: Chelsio T4 iSCSI driver.
+ *
+ * Copyright (c) 2010 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ * Written by: Rakesh Ranjan (rranjan-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ */
+
+#ifndef	__CXGB4I_H__
+#define	__CXGB4I_H__
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/scatterlist.h>
+#include <linux/skbuff.h>
+#include <scsi/libiscsi_tcp.h>
+
+#include "t4fw_api.h"
+#include "t4_msg.h"
+#include "l2t.h"
+#include "cxgb4.h"
+#include "cxgb4_uld.h"
+
+#include "libcxgbi.h"
+#include "cxgb4i_ddp.h"
+#include "cxgb4i_offload.h"
+
+#define	CXGB4I_SCSI_HOST_QDEPTH	1024
+#define	CXGB4I_MAX_TARGET	CXGB4I_MAX_CONN
+#define	CXGB4I_MAX_LUN		512
+#define	ISCSI_PDU_NONPAYLOAD_MAX \
+	(sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE + \
+	 (2 * ISCSI_DIGEST_SIZE))
+
+struct cxgb4i_snic;
+struct cxgb4i_host;
+struct cxgb4i_endpoint;
+typedef int (*cxgb4i_cplhandler_func)(struct cxgb4i_snic *, struct sk_buff *);
+
+struct cxgb4i_snic {
+	struct list_head list_head;
+	spinlock_t lock;
+	struct cxgbi_device cdev;
+	struct cxgbi_hba *hba[MAX_NPORTS];
+	unsigned char hba_cnt;
+	unsigned int flags;
+	unsigned int tx_max_size;
+	unsigned int rx_max_size;
+	struct cxgb4_lld_info lldi;
+	struct cxgb4i_ddp_info *ddp;
+	cxgb4i_cplhandler_func *handlers;
+};
+
+int cxgb4i_ofld_init(struct cxgb4i_snic *);
+void cxgb4i_ofld_cleanup(struct cxgb4i_snic *);
+struct cxgb4i_snic *cxgb4i_find_snic(struct net_device *, __be32);
+struct cxgbi_hba *cxgb4i_hba_find_by_netdev(struct net_device *);
+struct cxgbi_hba *cxgb4i_hba_add(struct cxgb4i_snic *, struct net_device *);
+void cxgb4i_hba_remove(struct cxgbi_hba *);
+int cxgb4i_iscsi_init(void);
+void cxgb4i_iscsi_cleanup(void);
+
+static inline void cxgb4i_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr)
+{
+	chba->ipv4addr = ipaddr;
+}
+
+static inline __be32 cxgb4i_get_iscsi_ipv4(struct cxgbi_hba *chba)
+{
+	return chba->ipv4addr;
+}
+
+static inline struct cxgb4i_snic *cxgb4i_get_snic(struct cxgbi_device *cdev)
+{
+	return (struct cxgb4i_snic *)cdev->dd_data;
+}
+
+
+#define W_TCB_ULP_TYPE          0
+#define TCB_ULP_TYPE_SHIFT      0
+#define TCB_ULP_TYPE_MASK       0xfULL
+#define TCB_ULP_TYPE(x)         ((x) << TCB_ULP_TYPE_SHIFT)
+
+#define W_TCB_ULP_RAW           0
+#define TCB_ULP_RAW_SHIFT       4
+#define TCB_ULP_RAW_MASK        0xffULL
+#define TCB_ULP_RAW(x)          ((x) << TCB_ULP_RAW_SHIFT)
+
+
+#endif	/* __CXGB4I_H__ */
+
diff --git a/drivers/scsi/cxgb4i/cxgb4i_ddp.c b/drivers/scsi/cxgb4i/cxgb4i_ddp.c
new file mode 100644
index 0000000..1e53c0e
--- /dev/null
+++ b/drivers/scsi/cxgb4i/cxgb4i_ddp.c
@@ -0,0 +1,678 @@
+/*
+ * cxgb4i_ddp.c: Chelsio T4 iSCSI driver.
+ *
+ * Copyright (c) 2010 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ * Written by: Rakesh Ranjan (rranjan-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ */
+
+#include <linux/skbuff.h>
+#include <linux/scatterlist.h>
+
+
+#include "libcxgbi.h"
+#include "cxgb4i.h"
+#include "cxgb4i_ddp.h"
+
+#define DDP_PGIDX_MAX	4
+#define DDP_THRESHOLD	2048
+
+static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4};
+static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16};
+static unsigned char page_idx = DDP_PGIDX_MAX;
+
+static unsigned char sw_tag_idx_bits;
+static unsigned char sw_tag_age_bits;
+
+
+static inline void cxgb4i_ddp_ppod_set(struct pagepod *ppod,
+					struct pagepod_hdr *hdr,
+					struct cxgbi_gather_list *gl,
+					unsigned int pidx)
+{
+	int i;
+
+	memcpy(ppod, hdr, sizeof(*hdr));
+	for (i = 0; i < (PPOD_PAGES_MAX + 1); i++, pidx++) {
+		ppod->addr[i] = pidx < gl->nelem ?
+			cpu_to_be64(gl->phys_addr[pidx]) : 0ULL;
+	}
+}
+
+static inline void cxgb4i_ddp_ppod_clear(struct pagepod *ppod)
+{
+	memset(ppod, 0, sizeof(*ppod));
+}
+
+static inline void cxgb4i_ddp_ulp_mem_io_set_hdr(struct ulp_mem_io *req,
+					unsigned int wr_len, unsigned int dlen,
+					unsigned int pm_addr)
+{
+	struct ulptx_sgl *sgl;
+
+	INIT_ULPTX_WR(req, wr_len, 0, 0);
+	req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE));
+	req->dlen = htonl(ULP_MEMIO_DATA_LEN(dlen >> 5));
+	req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
+	req->lock_addr = htonl(ULP_MEMIO_ADDR(pm_addr >> 5));
+
+	sgl = (struct ulptx_sgl *)(req + 1);
+	sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(1));
+	sgl->len0 = htonl(dlen);
+}
+
+static int cxgb4i_ddp_ppod_write_sgl(struct cxgb4i_ddp_info *ddp,
+					struct pagepod_hdr *hdr,
+					unsigned int idx,
+					unsigned int npods,
+					struct cxgbi_gather_list *gl,
+					unsigned int gl_pidx)
+{
+	unsigned int dlen = PPOD_SIZE * npods;
+	unsigned int pm_addr = idx * PPOD_SIZE + ddp->llimit;
+	unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
+					sizeof(struct ulptx_sgl), 16);
+	struct sk_buff *skb = alloc_skb(wr_len + dlen, GFP_ATOMIC);
+	struct ulp_mem_io *req;
+	struct ulptx_sgl *sgl;
+	struct pagepod *ppod;
+	unsigned int i;
+
+	if (!skb) {
+		cxgbi_log_error("snic 0x%p, idx %u, npods %u, OOM\n",
+				ddp->snic, idx, npods);
+		return -ENOMEM;
+	}
+
+	memset(skb->data, 0, wr_len + dlen);
+	skb->queue_mapping = CPL_PRIORITY_CONTROL;
+
+	req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
+	cxgb4i_ddp_ulp_mem_io_set_hdr(req, wr_len, dlen, pm_addr);
+	sgl = (struct ulptx_sgl *)(req + 1);
+	ppod = (struct pagepod *)(sgl + 1);
+	sgl->addr0 = cpu_to_be64(virt_to_phys(ppod));
+
+	for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES_MAX) {
+		if (!hdr && !gl)
+			cxgb4i_ddp_ppod_clear(ppod);
+		else
+			cxgb4i_ddp_ppod_set(ppod, hdr, gl, gl_pidx);
+
+	}
+
+	cxgb4_ofld_send(ddp->snic->lldi.ports[0], skb);
+
+	return 0;
+}
+
+static int cxgb4i_ddp_set_map(struct cxgb4i_ddp_info *ddp,
+					struct pagepod_hdr *hdr,
+					unsigned int idx,
+					unsigned int npods,
+					struct cxgbi_gather_list *gl)
+{
+	unsigned int pidx = 0;
+	unsigned int w_npods = 0;
+	unsigned int cnt;
+	int err = 0;
+
+	for (; w_npods < npods; idx += cnt, w_npods += cnt,
+					pidx += PPOD_PAGES_MAX) {
+		cnt = npods - w_npods;
+		if (cnt > ULPMEM_DSGL_MAX_NPPODS)
+			cnt = ULPMEM_DSGL_MAX_NPPODS;
+		err = cxgb4i_ddp_ppod_write_sgl(ddp, hdr, idx, cnt, gl, pidx);
+
+		if (err < 0)
+			break;
+	}
+
+	return err;
+}
+
+static void cxgb4i_ddp_clear_map(struct cxgb4i_ddp_info *ddp,
+						unsigned int tag,
+						unsigned int idx,
+						unsigned int npods)
+{
+	int err;
+	unsigned int w_npods = 0;
+	unsigned int cnt;
+
+	for (; w_npods < npods; idx += cnt, w_npods += cnt) {
+		cnt = npods - w_npods;
+
+		if (cnt > ULPMEM_DSGL_MAX_NPPODS)
+			cnt = ULPMEM_DSGL_MAX_NPPODS;
+		err = cxgb4i_ddp_ppod_write_sgl(ddp, NULL, idx, cnt, NULL, 0);
+
+		if (err < 0)
+			break;
+	}
+}
+
+static inline int cxgb4i_ddp_find_unused_entries(struct cxgb4i_ddp_info *ddp,
+					unsigned int start, unsigned int max,
+					unsigned int count,
+					struct cxgbi_gather_list *gl)
+{
+	unsigned int i, j, k;
+
+	/*  not enough entries */
+	if ((max - start) < count)
+		return -EBUSY;
+
+	max -= count;
+	spin_lock(&ddp->map_lock);
+	for (i = start; i < max;) {
+		for (j = 0, k = i; j < count; j++, k++) {
+			if (ddp->gl_map[k])
+				break;
+		}
+		if (j == count) {
+			for (j = 0, k = i; j < count; j++, k++)
+				ddp->gl_map[k] = gl;
+			spin_unlock(&ddp->map_lock);
+			return i;
+		}
+		i += j + 1;
+	}
+	spin_unlock(&ddp->map_lock);
+	return -EBUSY;
+}
+
+static inline void cxgb4i_ddp_unmark_entries(struct cxgb4i_ddp_info *ddp,
+							int start, int count)
+{
+	spin_lock(&ddp->map_lock);
+	memset(&ddp->gl_map[start], 0,
+			count * sizeof(struct cxgbi_gather_list *));
+	spin_unlock(&ddp->map_lock);
+}
+
+static int cxgb4i_ddp_find_page_index(unsigned long pgsz)
+{
+	int i;
+
+	for (i = 0; i < DDP_PGIDX_MAX; i++) {
+		if (pgsz == (1UL << ddp_page_shift[i]))
+			return i;
+	}
+	cxgbi_log_debug("ddp page size 0x%lx not supported\n", pgsz);
+
+	return DDP_PGIDX_MAX;
+}
+
+static int cxgb4i_ddp_adjust_page_table(void)
+{
+	int i;
+	unsigned int base_order, order;
+
+	if (PAGE_SIZE < (1UL << ddp_page_shift[0])) {
+		cxgbi_log_info("PAGE_SIZE 0x%lx too small, min 0x%lx\n",
+				PAGE_SIZE, 1UL << ddp_page_shift[0]);
+		return -EINVAL;
+	}
+
+	base_order = get_order(1UL << ddp_page_shift[0]);
+	order = get_order(1UL << PAGE_SHIFT);
+
+	for (i = 0; i < DDP_PGIDX_MAX; i++) {
+		/* first is the kernel page size, then just doubling the size */
+		ddp_page_order[i] = order - base_order + i;
+		ddp_page_shift[i] = PAGE_SHIFT + i;
+	}
+
+	return 0;
+}
+
+static inline void cxgb4i_ddp_gl_unmap(struct pci_dev *pdev,
+					struct cxgbi_gather_list *gl)
+{
+	int i;
+
+	for (i = 0; i < gl->nelem; i++)
+		dma_unmap_page(&pdev->dev, gl->phys_addr[i], PAGE_SIZE,
+				PCI_DMA_FROMDEVICE);
+}
+
+static inline int cxgb4i_ddp_gl_map(struct pci_dev *pdev,
+				struct cxgbi_gather_list *gl)
+{
+	int i;
+
+	for (i = 0; i < gl->nelem; i++) {
+		gl->phys_addr[i] = dma_map_page(&pdev->dev, gl->pages[i], 0,
+						PAGE_SIZE,
+						PCI_DMA_FROMDEVICE);
+		if (unlikely(dma_mapping_error(&pdev->dev, gl->phys_addr[i])))
+			goto unmap;
+	}
+
+	return i;
+
+unmap:
+	if (i) {
+		unsigned int nelem = gl->nelem;
+
+		gl->nelem = i;
+		cxgb4i_ddp_gl_unmap(pdev, gl);
+		gl->nelem = nelem;
+	}
+	return -ENOMEM;
+}
+
+
+void cxgb4i_ddp_release_gl(struct cxgbi_gather_list *gl,
+				struct pci_dev *pdev)
+{
+	cxgb4i_ddp_gl_unmap(pdev, gl);
+	kfree(gl);
+}
+
+struct cxgbi_gather_list *cxgb4i_ddp_make_gl(unsigned int xferlen,
+						struct scatterlist *sgl,
+						unsigned int sgcnt,
+						struct pci_dev *pdev,
+						gfp_t gfp)
+{
+	struct cxgbi_gather_list *gl;
+	struct scatterlist *sg = sgl;
+	struct page *sgpage = sg_page(sg);
+	unsigned int sglen = sg->length;
+	unsigned int sgoffset = sg->offset;
+	unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >>
+				PAGE_SHIFT;
+	int i = 1, j = 0;
+
+	if (xferlen < DDP_THRESHOLD) {
+		cxgbi_log_debug("xfer %u < threshold %u, no ddp.\n",
+				xferlen, DDP_THRESHOLD);
+		return NULL;
+	}
+
+	gl = kzalloc(sizeof(struct cxgbi_gather_list) +
+			npages * (sizeof(dma_addr_t) + sizeof(struct page *)),
+			gfp);
+	if (!gl)
+		return NULL;
+
+	gl->pages = (struct page **)&gl->phys_addr[npages];
+	gl->length = xferlen;
+	gl->offset = sgoffset;
+	gl->pages[0] = sgpage;
+
+	sg = sg_next(sg);
+	while (sg) {
+		struct page *page = sg_page(sg);
+
+		if (sgpage == page && sg->offset == sgoffset + sglen)
+			sglen += sg->length;
+		else {
+			/*  make sure the sgl is fit for ddp:
+			 *  each has the same page size, and
+			 *  all of the middle pages are used completely
+			 */
+			if ((j && sgoffset) || ((i != sgcnt - 1) &&
+					 ((sglen + sgoffset) & ~PAGE_MASK)))
+				goto error_out;
+
+			j++;
+			if (j == gl->nelem || sg->offset)
+				goto error_out;
+			gl->pages[j] = page;
+			sglen = sg->length;
+			sgoffset = sg->offset;
+			sgpage = page;
+		}
+		i++;
+		sg = sg_next(sg);
+	}
+	gl->nelem = ++j;
+
+	if (cxgb4i_ddp_gl_map(pdev, gl) < 0)
+		goto error_out;
+
+	return gl;
+
+error_out:
+	kfree(gl);
+	return NULL;
+}
+
+
+static void cxgb4i_ddp_tag_release(struct cxgbi_device *cdev, u32 tag)
+{
+	struct cxgb4i_snic *snic = cxgb4i_get_snic(cdev);
+	struct cxgb4i_ddp_info *ddp = snic->ddp;
+	u32 idx;
+
+	if (!ddp) {
+		cxgbi_log_error("release ddp tag 0x%x, ddp NULL.\n", tag);
+		return;
+	}
+
+	idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask;
+	if (idx < ddp->nppods) {
+		struct cxgbi_gather_list *gl = ddp->gl_map[idx];
+		unsigned int npods;
+
+		if (!gl || !gl->nelem) {
+			cxgbi_log_error("rel 0x%x, idx 0x%x, gl 0x%p, %u\n",
+					tag, idx, gl, gl ? gl->nelem : 0);
+			return;
+		}
+		npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
+		cxgbi_log_debug("ddp tag 0x%x, release idx 0x%x, npods %u.\n",
+				tag, idx, npods);
+		cxgb4i_ddp_clear_map(ddp, tag, idx, npods);
+		cxgb4i_ddp_unmark_entries(ddp, idx, npods);
+		cxgb4i_ddp_release_gl(gl, ddp->pdev);
+	} else
+		cxgbi_log_error("ddp tag 0x%x, idx 0x%x > max 0x%x.\n",
+				tag, idx, ddp->nppods);
+}
+
+static int cxgb4i_ddp_tag_reserve(struct cxgbi_device *cdev, unsigned int tid,
+				struct cxgbi_tag_format *tformat, u32 *tagp,
+				struct cxgbi_gather_list *gl, gfp_t gfp)
+{
+	struct cxgb4i_snic *snic = cxgb4i_get_snic(cdev);
+	struct cxgb4i_ddp_info *ddp = snic->ddp;
+	struct pagepod_hdr hdr;
+	unsigned int npods;
+	int idx = -1;
+	int err = -ENOMEM;
+	u32 sw_tag = *tagp;
+	u32 tag;
+
+	if (page_idx >= DDP_PGIDX_MAX || !ddp || !gl || !gl->nelem ||
+			gl->length < DDP_THRESHOLD) {
+		cxgbi_log_debug("pgidx %u, xfer %u/%u, NO ddp.\n",
+				page_idx, gl->length, DDP_THRESHOLD);
+		return -EINVAL;
+	}
+
+	npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
+
+	if (ddp->idx_last == ddp->nppods)
+		idx = cxgb4i_ddp_find_unused_entries(ddp, 0, ddp->nppods,
+							npods, gl);
+	else {
+		idx = cxgb4i_ddp_find_unused_entries(ddp, ddp->idx_last + 1,
+							ddp->nppods, npods,
+							gl);
+		if (idx < 0 && ddp->idx_last >= npods) {
+			idx = cxgb4i_ddp_find_unused_entries(ddp, 0,
+				min(ddp->idx_last + npods, ddp->nppods),
+							npods, gl);
+		}
+	}
+	if (idx < 0) {
+		cxgbi_log_debug("xferlen %u, gl %u, npods %u NO DDP.\n",
+				gl->length, gl->nelem, npods);
+		return idx;
+	}
+
+	tag = cxgbi_ddp_tag_base(tformat, sw_tag);
+	tag |= idx << PPOD_IDX_SHIFT;
+
+	hdr.rsvd = 0;
+	hdr.vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid));
+	hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask);
+	hdr.max_offset = htonl(gl->length);
+	hdr.page_offset = htonl(gl->offset);
+
+	err = cxgb4i_ddp_set_map(ddp, &hdr, idx, npods, gl);
+	if (err < 0)
+		goto unmark_entries;
+
+	ddp->idx_last = idx;
+	cxgbi_log_debug("xfer %u, gl %u,%u, tid 0x%x, 0x%x -> 0x%x(%u,%u).\n",
+			gl->length, gl->nelem, gl->offset, tid, sw_tag, tag,
+			idx, npods);
+	*tagp = tag;
+	return 0;
+
+unmark_entries:
+	cxgb4i_ddp_unmark_entries(ddp, idx, npods);
+	return err;
+}
+
+
+static int cxgb4i_ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
+					unsigned int tid, int pg_idx,
+					bool reply)
+{
+	struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
+					GFP_KERNEL);
+	struct cpl_set_tcb_field *req;
+	u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0;
+
+	if (!skb)
+		return -ENOMEM;
+
+	/*  set up ulp submode and page size */
+	val = (val & 0x03) << 2;
+	val |= TCB_ULP_TYPE(ULP_MODE_ISCSI);
+	req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
+	INIT_TP_WR(req, tid);
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->hwtid));
+	req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
+	req->word_cookie = htons(TCB_WORD(W_TCB_ULP_RAW));
+	req->mask = cpu_to_be64(TCB_ULP_TYPE(TCB_ULP_TYPE_MASK));
+	req->val = cpu_to_be64(val);
+
+	skb->queue_mapping = CPL_PRIORITY_CONTROL;
+
+	cxgb4_ofld_send(cxgb4i_get_snic(csk->cdev)->lldi.ports[0], skb);
+	return 0;
+}
+
+int cxgb4i_ddp_setup_conn_host_pagesize(struct cxgbi_sock *csk,
+						unsigned int tid,
+						int reply)
+{
+	return cxgb4i_ddp_setup_conn_pgidx(csk, tid, page_idx, reply);
+}
+
+int cxgb4i_ddp_setup_conn_pagesize(struct cxgbi_sock *csk, unsigned int tid,
+					int reply, unsigned long pgsz)
+{
+	int pgidx = cxgb4i_ddp_find_page_index(pgsz);
+
+	return cxgb4i_ddp_setup_conn_pgidx(csk, tid, pgidx, reply);
+}
+
+int cxgb4i_ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
+				int hcrc, int dcrc, int reply)
+{
+	struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
+					GFP_KERNEL);
+	struct cpl_set_tcb_field *req;
+	u64 val = (hcrc ? ULP_CRC_HEADER : 0) | (dcrc ? ULP_CRC_DATA : 0);
+	val = TCB_ULP_RAW(val);
+	val |= TCB_ULP_TYPE(ULP_MODE_ISCSI);
+
+	if (!skb)
+		return -ENOMEM;
+
+	/*  set up ulp submode and page size */
+	req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
+	INIT_TP_WR(req, tid);
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
+	req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
+	req->word_cookie = htons(TCB_WORD(W_TCB_ULP_RAW));
+	req->mask = cpu_to_be64(TCB_ULP_RAW(TCB_ULP_RAW_MASK));
+	req->val = cpu_to_be64(val);
+
+	skb->queue_mapping = CPL_PRIORITY_CONTROL;
+
+	cxgb4_ofld_send(cxgb4i_get_snic(csk->cdev)->lldi.ports[0], skb);
+	return 0;
+}
+
+static void __cxgb4i_ddp_cleanup(struct kref *kref)
+{
+	int i = 0;
+	struct cxgb4i_ddp_info *ddp = container_of(kref,
+						struct cxgb4i_ddp_info,
+						refcnt);
+
+	cxgbi_log_info("kref release ddp 0x%p, snic 0x%p\n", ddp, ddp->snic);
+
+	ddp->snic->ddp = NULL;
+
+	while (i < ddp->nppods) {
+		struct cxgbi_gather_list *gl = ddp->gl_map[i];
+
+		if (gl) {
+			int npods = (gl->nelem + PPOD_PAGES_MAX - 1) >>
+							PPOD_PAGES_SHIFT;
+			cxgbi_log_info("snic 0x%p, ddp %d + %d\n",
+						ddp->snic, i, npods);
+			kfree(gl);
+			i += npods;
+		} else
+			i++;
+	}
+	cxgbi_free_big_mem(ddp);
+}
+
+
+static void __cxgb4i_ddp_init(struct cxgb4i_snic *snic)
+{
+	struct cxgb4i_ddp_info *ddp = snic->ddp;
+	unsigned int ppmax, bits, tagmask, pgsz_factor[4];
+	int i;
+
+	if (ddp) {
+		kref_get(&ddp->refcnt);
+		cxgbi_log_warn("snic 0x%p, ddp 0x%p already set up\n",
+				snic, snic->ddp);
+		return;
+	}
+
+	sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1;
+	sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1;
+	snic->cdev.tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits;
+
+	cxgbi_log_info("tag itt 0x%x, %u bits, age 0x%x, %u bits\n",
+			ISCSI_ITT_MASK, sw_tag_idx_bits,
+			ISCSI_AGE_MASK, sw_tag_age_bits);
+
+	ppmax = (snic->lldi.vr->iscsi.size >> PPOD_SIZE_SHIFT);
+	bits = __ilog2_u32(ppmax) + 1;
+	if (bits > PPOD_IDX_MAX_SIZE)
+		bits = PPOD_IDX_MAX_SIZE;
+	ppmax = (1 << (bits - 1)) - 1;
+
+	ddp = cxgbi_alloc_big_mem(sizeof(struct cxgb4i_ddp_info) +
+			ppmax * (sizeof(struct cxgbi_gather_list *) +
+				sizeof(struct sk_buff *)),
+				GFP_KERNEL);
+	if (!ddp) {
+		cxgbi_log_warn("snic 0x%p unable to alloc ddp 0x%d, "
+			       "ddp disabled\n", snic, ppmax);
+		return;
+	}
+
+	ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1);
+	spin_lock_init(&ddp->map_lock);
+	kref_init(&ddp->refcnt);
+
+	ddp->snic = snic;
+	ddp->pdev = snic->lldi.pdev;
+	ddp->max_txsz = min_t(unsigned int,
+				snic->lldi.iscsi_iolen,
+				ULP2_MAX_PKT_SIZE);
+	ddp->max_rxsz = min_t(unsigned int,
+				snic->lldi.iscsi_iolen,
+				ULP2_MAX_PKT_SIZE);
+	ddp->llimit = snic->lldi.vr->iscsi.start;
+	ddp->ulimit = ddp->llimit + snic->lldi.vr->iscsi.size;
+	ddp->nppods = ppmax;
+	ddp->idx_last = ppmax;
+	ddp->idx_bits = bits;
+	ddp->idx_mask = (1 << bits) - 1;
+	ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1;
+
+	tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
+	for (i = 0; i < DDP_PGIDX_MAX; i++)
+		pgsz_factor[i] = ddp_page_order[i];
+
+	cxgb4_iscsi_init(snic->lldi.ports[0], tagmask, pgsz_factor);
+	snic->ddp = ddp;
+
+	snic->cdev.tag_format.rsvd_bits = ddp->idx_bits;
+	snic->cdev.tag_format.rsvd_shift = PPOD_IDX_SHIFT;
+	snic->cdev.tag_format.rsvd_mask =
+		((1 << snic->cdev.tag_format.rsvd_bits) - 1);
+
+	cxgbi_log_info("tag format: sw %u, rsvd %u,%u, mask 0x%x.\n",
+			snic->cdev.tag_format.sw_bits,
+			snic->cdev.tag_format.rsvd_bits,
+			snic->cdev.tag_format.rsvd_shift,
+			snic->cdev.tag_format.rsvd_mask);
+
+	snic->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
+				ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
+	snic->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
+				ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
+
+	cxgbi_log_info("max payload size: %u/%u, %u/%u.\n",
+			snic->tx_max_size, ddp->max_txsz,
+			snic->rx_max_size, ddp->max_rxsz);
+
+	cxgbi_log_info("snic 0x%p, nppods %u, bits %u, mask 0x%x,0x%x "
+			"pkt %u/%u, %u/%u\n",
+			snic, ppmax, ddp->idx_bits, ddp->idx_mask,
+			ddp->rsvd_tag_mask, ddp->max_txsz,
+			snic->lldi.iscsi_iolen,
+			ddp->max_rxsz, snic->lldi.iscsi_iolen);
+
+	return;
+}
+
+void cxgb4i_ddp_init(struct cxgb4i_snic *snic)
+{
+	if (page_idx == DDP_PGIDX_MAX) {
+		page_idx = cxgb4i_ddp_find_page_index(PAGE_SIZE);
+
+		if (page_idx == DDP_PGIDX_MAX) {
+			cxgbi_log_info("system PAGE_SIZE %lu, update hw\n",
+					PAGE_SIZE);
+
+			if (cxgb4i_ddp_adjust_page_table()) {
+				cxgbi_log_info("PAGE_SIZE %lu, ddp disabled\n",
+						PAGE_SIZE);
+				return;
+			}
+			page_idx = cxgb4i_ddp_find_page_index(PAGE_SIZE);
+		}
+		cxgbi_log_info("system PAGE_SIZE %lu, ddp idx %u\n",
+				PAGE_SIZE, page_idx);
+	}
+
+	__cxgb4i_ddp_init(snic);
+	snic->cdev.ddp_make_gl = cxgb4i_ddp_make_gl;
+	snic->cdev.ddp_release_gl = cxgb4i_ddp_release_gl;
+	snic->cdev.ddp_tag_reserve = cxgb4i_ddp_tag_reserve;
+	snic->cdev.ddp_tag_release = cxgb4i_ddp_tag_release;
+}
+
+void cxgb4i_ddp_cleanup(struct cxgb4i_snic *snic)
+{
+	struct cxgb4i_ddp_info *ddp = snic->ddp;
+
+	cxgbi_log_info("snic 0x%p, release ddp 0x%p\n", snic, ddp);
+	if (ddp)
+		kref_put(&ddp->refcnt, __cxgb4i_ddp_cleanup);
+}
+
diff --git a/drivers/scsi/cxgb4i/cxgb4i_ddp.h b/drivers/scsi/cxgb4i/cxgb4i_ddp.h
new file mode 100644
index 0000000..f51cb37
--- /dev/null
+++ b/drivers/scsi/cxgb4i/cxgb4i_ddp.h
@@ -0,0 +1,118 @@
+/*
+ * cxgb4i_ddp.h: Chelsio T4 iSCSI driver.
+ *
+ * Copyright (c) 2010 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ * Written by: Rakesh Ranjan (rranjan-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ */
+
+#ifndef	__CXGB4I_DDP_H__
+#define	__CXGB4I_DDP_H__
+
+#include <linux/vmalloc.h>
+
+#include "libcxgbi.h"
+
+struct cxgbi_sock;
+
+struct cxgb4i_ddp_info {
+	struct list_head list;
+	struct kref refcnt;
+	struct cxgb4i_snic *snic;
+	struct pci_dev *pdev;
+	unsigned int max_txsz;
+	unsigned int max_rxsz;
+	unsigned int llimit;
+	unsigned int ulimit;
+	unsigned int nppods;
+	unsigned int idx_last;
+	unsigned char idx_bits;
+	unsigned char filler[3];
+	unsigned int idx_mask;
+	unsigned int rsvd_tag_mask;
+	spinlock_t map_lock;
+	struct cxgbi_gather_list **gl_map;
+};
+
+struct pagepod_hdr {
+	unsigned int vld_tid;
+	unsigned int pgsz_tag_clr;
+	unsigned int max_offset;
+	unsigned int page_offset;
+	unsigned long long rsvd;
+};
+
+struct pagepod {
+	struct pagepod_hdr hdr;
+	unsigned long long addr[PPOD_PAGES_MAX + 1];
+};
+
+struct cpl_rx_data_ddp {
+	union opcode_tid ot;
+	__be16 urg;
+	__be16 len;
+	__be32 seq;
+	union {
+		__be32 nxt_seq;
+		__be32 ddp_report;
+	};
+	__be32 ulp_crc;
+	__be32 ddpvld;
+};
+
+#define PPOD_SIZE               sizeof(struct pagepod)  /*  64 */
+#define PPOD_SIZE_SHIFT         6
+
+#define ULPMEM_DSGL_MAX_NPPODS	16	/*  1024/PPOD_SIZE */
+#define ULPMEM_IDATA_MAX_NPPODS	4	/*  256/PPOD_SIZE */
+#define PCIE_MEMWIN_MAX_NPPODS	16	/*  1024/PPOD_SIZE */
+
+#define PPOD_COLOR_SHIFT	0
+#define PPOD_COLOR_MASK		0x3F
+#define PPOD_COLOR_SIZE         6
+#define PPOD_COLOR(x)		((x) << PPOD_COLOR_SHIFT)
+
+#define PPOD_TAG_SHIFT	6
+#define PPOD_TAG_MASK	0xFFFFFF
+#define PPOD_TAG(x)	((x) << PPOD_TAG_SHIFT)
+
+#define PPOD_PGSZ_SHIFT	30
+#define PPOD_PGSZ_MASK	0x3
+#define PPOD_PGSZ(x)	((x) << PPOD_PGSZ_SHIFT)
+
+#define PPOD_TID_SHIFT	32
+#define PPOD_TID_MASK	0xFFFFFF
+#define PPOD_TID(x)	((__u64)(x) << PPOD_TID_SHIFT)
+
+#define PPOD_VALID_SHIFT	56
+#define PPOD_VALID(x)	((__u64)(x) << PPOD_VALID_SHIFT)
+#define PPOD_VALID_FLAG	PPOD_VALID(1ULL)
+
+#define PPOD_LEN_SHIFT	32
+#define PPOD_LEN_MASK	0xFFFFFFFF
+#define PPOD_LEN(x)	((__u64)(x) << PPOD_LEN_SHIFT)
+
+#define PPOD_OFST_SHIFT	0
+#define PPOD_OFST_MASK	0xFFFFFFFF
+#define PPOD_OFST(x)	((x) << PPOD_OFST_SHIFT)
+
+#define PPOD_IDX_SHIFT          PPOD_COLOR_SIZE
+#define PPOD_IDX_MAX_SIZE       24
+
+int cxgb4i_ddp_setup_conn_host_pagesize(struct cxgbi_sock*, unsigned int,
+					int);
+int cxgb4i_ddp_setup_conn_digest(struct cxgbi_sock *, unsigned int,
+				int, int, int);
+int cxgb4i_snic_ddp_info(struct cxgb4i_snic *, struct cxgbi_tag_format *,
+			unsigned int *, unsigned int *);
+
+void cxgb4i_ddp_init(struct cxgb4i_snic *);
+void cxgb4i_ddp_cleanup(struct cxgb4i_snic *);
+
+#endif	/* __CXGB4I_DDP_H__ */
+
diff --git a/drivers/scsi/cxgb4i/cxgb4i_offload.c b/drivers/scsi/cxgb4i/cxgb4i_offload.c
new file mode 100644
index 0000000..87edb14
--- /dev/null
+++ b/drivers/scsi/cxgb4i/cxgb4i_offload.c
@@ -0,0 +1,1846 @@
+/*
+ * cxgb4i_offload.c: Chelsio T4 iSCSI driver.
+ *
+ * Copyright (c) 2010 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ * Written by: Rakesh Ranjan (rranjan-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ */
+
+#include <linux/if_vlan.h>
+#include <net/dst.h>
+#include <net/route.h>
+#include <net/tcp.h>
+
+#include "libcxgbi.h"
+#include "cxgb4i.h"
+#include "cxgb4i_offload.h"
+
+static int cxgb4i_rcv_win = 256 * 1024;
+module_param(cxgb4i_rcv_win, int, 0644);
+MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes");
+
+static int cxgb4i_snd_win = 128 * 1024;
+module_param(cxgb4i_snd_win, int, 0644);
+MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes");
+
+static int cxgb4i_rx_credit_thres = 10 * 1024;
+module_param(cxgb4i_rx_credit_thres, int, 0644);
+MODULE_PARM_DESC(cxgb4i_rx_credit_thres,
+		"RX credits return threshold in bytes (default=10KB)");
+
+static unsigned int cxgb4i_max_connect = (8 * 1024);
+module_param(cxgb4i_max_connect, uint, 0644);
+MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections");
+
+static unsigned short cxgb4i_sport_base = 20000;
+module_param(cxgb4i_sport_base, ushort, 0644);
+MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)");
+
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define RCV_BUFSIZ_MASK	0x3FFU
+
+static void cxgb4i_sock_release_offload_resources(struct cxgbi_sock *);
+static void cxgbi_sock_conn_closing(struct cxgbi_sock *);
+static int cxgb4i_sock_push_tx_frames(struct cxgbi_sock *, int);
+
+
+#define MAX_IMM_TX_PKT_LEN 128
+
+/*
+ * is_ofld_imm - check whether a packet can be sent as immediate data
+ * @skb: the packet
+ *
+ * Returns true if a packet can be sent as an offload WR with immediate
+ * data.  We currently use the same limit as for Ethernet packets.
+ */
+static inline int is_ofld_imm(const struct sk_buff *skb)
+{
+	return skb->len <= (MAX_IMM_TX_PKT_LEN -
+			sizeof(struct fw_ofld_tx_data_wr));
+}
+
+static void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
+{
+	struct sk_buff *skb;
+	unsigned int read = 0;
+	struct iscsi_conn *conn = csk->user_data;
+	int err = 0;
+
+	cxgbi_rx_debug("csk 0x%p.\n", csk);
+
+	read_lock(&csk->callback_lock);
+	if (unlikely(!conn || conn->suspend_rx)) {
+		cxgbi_rx_debug("conn 0x%p, id %d, suspend_rx %lu!\n",
+				conn, conn ? conn->id : 0xFF,
+				conn ? conn->suspend_rx : 0xFF);
+		read_unlock(&csk->callback_lock);
+		return;
+	}
+	skb = skb_peek(&csk->receive_queue);
+	while (!err && skb) {
+		__skb_unlink(skb, &csk->receive_queue);
+		read += cxgb4i_skb_rx_pdulen(skb);
+		cxgbi_rx_debug("conn 0x%p, csk 0x%p, rx skb 0x%p, pdulen %u\n",
+				conn, csk, skb, cxgb4i_skb_rx_pdulen(skb));
+		if (cxgb4i_skb_flags(skb) & CXGB4I_SKCB_FLAG_HDR_RCVD)
+			err = cxgbi_conn_read_bhs_pdu_skb(conn, skb);
+		else if (cxgb4i_skb_flags(skb) == CXGB4I_SKCB_FLAG_DATA_RCVD)
+			err = cxgbi_conn_read_data_pdu_skb(conn, skb);
+		__kfree_skb(skb);
+		skb = skb_peek(&csk->receive_queue);
+	}
+	read_unlock(&csk->callback_lock);
+	csk->copied_seq += read;
+	cxgb4i_sock_rx_credits(csk, read);
+	conn->rxdata_octets += read;
+
+	if (err) {
+		cxgbi_log_info("conn 0x%p rx failed err %d.\n", conn, err);
+		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+	}
+}
+
+static void cxgb4i_sock_closed(struct cxgbi_sock *csk)
+{
+	cxgbi_conn_debug("csk 0x%p, state %u, flags 0x%lx\n",
+			csk, csk->state, csk->flags);
+
+	cxgbi_sock_put_port(csk);
+	cxgb4i_sock_release_offload_resources(csk);
+	cxgbi_sock_set_state(csk, CXGBI_CSK_ST_CLOSED);
+	cxgbi_sock_conn_closing(csk);
+}
+
+static unsigned int cxgb4i_find_best_mtu(struct cxgb4i_snic *snic,
+						unsigned short mtu)
+{
+	int i = 0;
+
+	while (i < NMTUS - 1 && snic->lldi.mtus[i + 1] <= mtu)
+		++i;
+
+	return i;
+}
+
+static unsigned int cxgb4i_select_mss(struct cxgbi_sock *csk,
+						unsigned int pmtu)
+{
+	unsigned int idx;
+	struct dst_entry *dst = csk->dst;
+	u16 advmss = dst_metric(dst, RTAX_ADVMSS);
+
+	if (advmss > pmtu - 40)
+		advmss = pmtu - 40;
+	if (advmss < cxgb4i_get_snic(csk->cdev)->lldi.mtus[0] - 40)
+		advmss = cxgb4i_get_snic(csk->cdev)->lldi.mtus[0] - 40;
+	idx = cxgb4i_find_best_mtu(cxgb4i_get_snic(csk->cdev), advmss + 40);
+
+	return idx;
+}
+
+static inline int cxgb4i_sock_compute_wscale(int win)
+{
+	int wscale = 0;
+
+	while (wscale < 14 && (65535 << wscale) < win)
+		wscale++;
+
+	return wscale;
+}
+
+static void cxgb4i_sock_make_act_open_req(struct cxgbi_sock *csk,
+					   struct sk_buff *skb,
+					   unsigned int qid_atid,
+					   struct l2t_entry *e)
+{
+	struct cpl_act_open_req *req;
+	unsigned long long opt0;
+	unsigned int opt2;
+	int wscale;
+
+	cxgbi_conn_debug("csk 0x%p, atid 0x%x\n", csk, qid_atid);
+
+	wscale = cxgb4i_sock_compute_wscale(csk->mss_idx);
+
+	opt0 = KEEP_ALIVE(1) |
+		WND_SCALE(wscale) |
+		MSS_IDX(csk->mss_idx) |
+		L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) |
+		TX_CHAN(csk->tx_chan) |
+		SMAC_SEL(csk->smac_idx) |
+		RCV_BUFSIZ(cxgb4i_rcv_win >> 10);
+
+	opt2 = RX_CHANNEL(0) |
+		RSS_QUEUE_VALID |
+		RSS_QUEUE(csk->rss_qid);
+
+	skb->queue_mapping = CPL_PRIORITY_SETUP;
+	req = (struct cpl_act_open_req *)__skb_put(skb, sizeof(*req));
+	INIT_TP_WR(req, 0);
+	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+					qid_atid));
+	req->local_port = csk->saddr.sin_port;
+	req->peer_port = csk->daddr.sin_port;
+	req->local_ip = csk->saddr.sin_addr.s_addr;
+	req->peer_ip = csk->daddr.sin_addr.s_addr;
+	req->opt0 = cpu_to_be64(opt0);
+	req->params = 0;
+	req->opt2 = cpu_to_be32(opt2);
+}
+
+static void cxgb4i_fail_act_open(struct cxgbi_sock *csk, int errno)
+{
+	cxgbi_conn_debug("csk 0%p, state %u, flag 0x%lx\n", csk,
+			csk->state, csk->flags);
+	csk->err = errno;
+	cxgb4i_sock_closed(csk);
+}
+
+static void cxgb4i_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
+{
+	struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk;
+
+	cxgbi_sock_hold(csk);
+	spin_lock_bh(&csk->lock);
+	if (csk->state == CXGBI_CSK_ST_CONNECTING)
+		cxgb4i_fail_act_open(csk, -EHOSTUNREACH);
+	spin_unlock_bh(&csk->lock);
+	cxgbi_sock_put(csk);
+	__kfree_skb(skb);
+}
+
+static void cxgb4i_sock_skb_entail(struct cxgbi_sock *csk,
+					struct sk_buff *skb,
+					int flags)
+{
+	cxgb4i_skb_tcp_seq(skb) = csk->write_seq;
+	cxgb4i_skb_flags(skb) = flags;
+	__skb_queue_tail(&csk->write_queue, skb);
+}
+
+static void cxgb4i_sock_send_close_req(struct cxgbi_sock *csk)
+{
+	struct sk_buff *skb = csk->cpl_close;
+	struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
+	unsigned int tid = csk->hwtid;
+
+	csk->cpl_close = NULL;
+
+	skb->queue_mapping = CPL_PRIORITY_DATA;
+	INIT_TP_WR(req, tid);
+
+	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
+	req->rsvd = 0;
+
+	cxgb4i_sock_skb_entail(csk, skb, CXGB4I_SKCB_FLAG_NO_APPEND);
+	if (csk->state != CXGBI_CSK_ST_CONNECTING)
+		cxgb4i_sock_push_tx_frames(csk, 1);
+}
+
+static void cxgb4i_sock_abort_arp_failure(void *handle, struct sk_buff *skb)
+{
+	struct cpl_abort_req *req = cplhdr(skb);
+	struct cxgbi_sock *csk = (struct cxgbi_sock *)handle;
+	struct cxgb4i_snic *snic = cxgb4i_get_snic(csk->cdev);
+
+	req->cmd = CPL_ABORT_NO_RST;
+	cxgb4_ofld_send(snic->lldi.ports[csk->port_id], skb);
+}
+
+static inline void cxgb4i_sock_purge_write_queue(struct cxgbi_sock *csk)
+{
+	struct sk_buff *skb;
+
+	while ((skb = __skb_dequeue(&csk->write_queue)))
+		__kfree_skb(skb);
+}
+
+static void cxgb4i_sock_send_abort_req(struct cxgbi_sock *csk)
+{
+	struct cpl_abort_req *req;
+	struct sk_buff *skb = csk->cpl_abort_req;
+	struct cxgb4i_snic *snic = cxgb4i_get_snic(csk->cdev);
+
+	if (unlikely(csk->state == CXGBI_CSK_ST_ABORTING) ||
+			!skb || !csk->cdev)
+		return;
+
+	cxgbi_sock_set_state(csk, CXGBI_CSK_ST_ABORTING);
+
+	cxgbi_conn_debug("csk 0x%p, flag ABORT_RPL + ABORT_SHUT\n", csk);
+
+	cxgbi_sock_set_state(csk, CXGBI_CSK_FL_ABORT_RPL_PENDING);
+
+	cxgb4i_sock_purge_write_queue(csk);
+
+	csk->cpl_abort_req = NULL;
+	req = (struct cpl_abort_req *)skb->head;
+
+	skb->queue_mapping = CPL_PRIORITY_DATA;
+	t4_set_arp_err_handler(skb, csk, cxgb4i_sock_abort_arp_failure);
+	INIT_TP_WR(req, csk->hwtid);
+	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->hwtid));
+	req->rsvd0 = htonl(csk->snd_nxt);
+	req->rsvd1 = !cxgbi_sock_flag(csk, CXGBI_CSK_FL_TX_DATA_SENT);
+	req->cmd = CPL_ABORT_SEND_RST;
+
+	cxgb4_l2t_send(snic->lldi.ports[csk->port_id], skb, csk->l2t);
+}
+
+static void cxgb4i_sock_send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
+{
+	struct sk_buff *skb = csk->cpl_abort_rpl;
+	struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
+	struct cxgb4i_snic *snic = cxgb4i_get_snic(csk->cdev);
+
+	csk->cpl_abort_rpl = NULL;
+
+	skb->queue_mapping = CPL_PRIORITY_DATA;
+	INIT_TP_WR(rpl, csk->hwtid);
+	OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->hwtid));
+	rpl->cmd = rst_status;
+
+	cxgb4_ofld_send(snic->lldi.ports[csk->port_id], skb);
+}
+
+static u32 cxgb4i_csk_send_rx_credits(struct cxgbi_sock *csk, u32 credits)
+{
+	struct sk_buff *skb;
+	struct cpl_rx_data_ack *req;
+	int wrlen = roundup(sizeof(*req), 16);
+	struct cxgb4i_snic *snic = cxgb4i_get_snic(csk->cdev);
+
+	skb = alloc_skb(wrlen, GFP_ATOMIC);
+	if (!skb)
+		return 0;
+
+	req = (struct cpl_rx_data_ack *)__skb_put(skb, wrlen);
+	memset(req, 0, wrlen);
+	skb->queue_mapping = CPL_PRIORITY_ACK;
+	INIT_TP_WR(req, csk->hwtid);
+	OPCODE_TID(req) =
+		cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->hwtid));
+	req->credit_dack = cpu_to_be32(RX_CREDITS(credits) | RX_FORCE_ACK(1));
+	cxgb4_ofld_send(snic->lldi.ports[csk->port_id], skb);
+	return credits;
+}
+
+
+#define SKB_WR_LIST_SIZE	(MAX_SKB_FRAGS + 2)
+
+static const unsigned int cxgb4i_ulp_extra_len[] = { 0, 4, 4, 8 };
+static inline unsigned int ulp_extra_len(const struct sk_buff *skb)
+{
+	return cxgb4i_ulp_extra_len[cxgb4i_skb_ulp_mode(skb) & 3];
+}
+
+static inline void cxgb4i_sock_reset_wr_list(struct cxgbi_sock *csk)
+{
+	csk->wr_pending_head = csk->wr_pending_tail = NULL;
+}
+
+static inline void cxgb4i_sock_enqueue_wr(struct cxgbi_sock *csk,
+						struct sk_buff *skb)
+{
+	cxgb4i_skb_tx_wr_next(skb) = NULL;
+
+	/*
+	 * We want to take an extra reference since both us and the driver
+	 * need to free the packet before it's really freed. We know there's
+	 * just one user currently so we use atomic_set rather than skb_get
+	 * to avoid the atomic op.
+	 */
+	atomic_set(&skb->users, 2);
+
+	if (!csk->wr_pending_head)
+		csk->wr_pending_head = skb;
+
+	else
+		cxgb4i_skb_tx_wr_next(csk->wr_pending_tail) = skb;
+
+	csk->wr_pending_tail = skb;
+}
+
+static int cxgb4i_sock_count_pending_wrs(const struct cxgbi_sock *csk)
+{
+	int n = 0;
+	const struct sk_buff *skb = csk->wr_pending_head;
+
+	while (skb) {
+		n += skb->csum;
+		skb = cxgb4i_skb_tx_wr_next(skb);
+	}
+	return n;
+}
+
+static inline struct sk_buff *cxgb4i_sock_peek_wr(const struct cxgbi_sock *csk)
+{
+	return csk->wr_pending_head;
+}
+
+static inline void cxgb4i_sock_free_wr_skb(struct sk_buff *skb)
+{
+	kfree_skb(skb);
+}
+
+static inline struct sk_buff *cxgb4i_sock_dequeue_wr(struct cxgbi_sock *csk)
+{
+	struct sk_buff *skb = csk->wr_pending_head;
+
+	if (likely(skb)) {
+		csk->wr_pending_head = cxgb4i_skb_tx_wr_next(skb);
+		cxgb4i_skb_tx_wr_next(skb) = NULL;
+	}
+	return skb;
+}
+
+static void cxgb4i_sock_purge_wr_queue(struct cxgbi_sock *csk)
+{
+	struct sk_buff *skb;
+
+	while ((skb = cxgb4i_sock_dequeue_wr(csk)) != NULL)
+		cxgb4i_sock_free_wr_skb(skb);
+}
+
+/*
+ * sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+	n--;
+	return (3 * n) / 2 + (n & 1) + 2;
+}
+
+/*
+ * calc_tx_flits_ofld - calculate # of flits for an offload packet
+ * @skb: the packet
+ *
+ * Returns the number of flits needed for the given offload packet.
+ * These packets are already fully constructed and no additional headers
+ * will be added.
+ */
+static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
+{
+	unsigned int flits, cnt;
+
+	if (is_ofld_imm(skb))
+		return DIV_ROUND_UP(skb->len, 8);
+
+	flits = skb_transport_offset(skb) / 8;
+	cnt = skb_shinfo(skb)->nr_frags;
+	if (skb->tail != skb->transport_header)
+		cnt++;
+	return flits + sgl_len(cnt);
+}
+
+static inline void cxgb4i_sock_send_tx_flowc_wr(struct cxgbi_sock *csk)
+{
+	struct sk_buff *skb;
+	struct fw_flowc_wr *flowc;
+	int flowclen, i;
+	struct cxgb4i_snic *snic = cxgb4i_get_snic(csk->cdev);
+
+	flowclen = 80;
+	skb = alloc_skb(flowclen, GFP_ATOMIC);
+	flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
+
+	flowc->op_to_nparams =
+		htonl(FW_WR_OP(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS(8));
+	flowc->flowid_len16 =
+		htonl(FW_WR_LEN16(DIV_ROUND_UP(72, 16)) |
+				FW_WR_FLOWID(csk->hwtid));
+
+	flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
+	flowc->mnemval[0].val = htonl(0);
+	flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
+	flowc->mnemval[1].val = htonl(csk->tx_chan);
+	flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
+	flowc->mnemval[2].val = htonl(csk->tx_chan);
+	flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
+	flowc->mnemval[3].val = htonl(csk->rss_qid);
+	flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
+	flowc->mnemval[4].val = htonl(csk->snd_nxt);
+	flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
+	flowc->mnemval[5].val = htonl(csk->rcv_nxt);
+	flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
+	flowc->mnemval[6].val = htonl(cxgb4i_snd_win);
+	flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
+	flowc->mnemval[7].val = htonl(csk->mss_idx);
+	flowc->mnemval[8].mnemonic = 0;
+	flowc->mnemval[8].val = 0;
+	for (i = 0; i < 9; i++) {
+		flowc->mnemval[i].r4[0] = 0;
+		flowc->mnemval[i].r4[1] = 0;
+		flowc->mnemval[i].r4[2] = 0;
+	}
+
+	skb->queue_mapping = CPL_PRIORITY_DATA;
+
+	cxgb4_ofld_send(snic->lldi.ports[csk->port_id], skb);
+}
+
+static inline void cxgb4i_sock_make_tx_data_wr(struct cxgbi_sock *csk,
+						struct sk_buff *skb, int dlen,
+						int len, u32 credits,
+						int req_completion)
+{
+	struct fw_ofld_tx_data_wr *req;
+	unsigned int wr_ulp_mode;
+
+	if (is_ofld_imm(skb)) {
+			req = (struct fw_ofld_tx_data_wr *)
+				__skb_push(skb, sizeof(*req));
+			req->op_to_immdlen =
+				cpu_to_be32(FW_WR_OP(FW_OFLD_TX_DATA_WR) |
+					FW_WR_COMPL(req_completion) |
+					FW_WR_IMMDLEN(dlen));
+			req->flowid_len16 =
+				cpu_to_be32(FW_WR_FLOWID(csk->hwtid) |
+						FW_WR_LEN16(credits));
+	} else {
+		req = (struct fw_ofld_tx_data_wr *)
+			__skb_push(skb, sizeof(*req));
+		req->op_to_immdlen =
+			cpu_to_be32(FW_WR_OP(FW_OFLD_TX_DATA_WR) |
+					FW_WR_COMPL(req_completion) |
+					FW_WR_IMMDLEN(0));
+		req->flowid_len16 =
+			cpu_to_be32(FW_WR_FLOWID(csk->hwtid) |
+					FW_WR_LEN16(credits));
+	}
+
+	wr_ulp_mode =
+		FW_OFLD_TX_DATA_WR_ULPMODE(cxgb4i_skb_ulp_mode(skb) >> 4) |
+		FW_OFLD_TX_DATA_WR_ULPSUBMODE(cxgb4i_skb_ulp_mode(skb) & 3);
+
+	req->tunnel_to_proxy = cpu_to_be32(wr_ulp_mode) |
+		FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1);
+
+	req->plen = cpu_to_be32(len);
+
+	if (!cxgbi_sock_flag(csk, CXGBI_CSK_FL_TX_DATA_SENT))
+		cxgbi_sock_set_flag(csk, CXGBI_CSK_FL_TX_DATA_SENT);
+}
+
+static void cxgb4i_sock_arp_failure_discard(void *handle, struct sk_buff *skb)
+{
+	kfree_skb(skb);
+}
+
+static int cxgb4i_sock_push_tx_frames(struct cxgbi_sock *csk,
+						int req_completion)
+{
+	int total_size = 0;
+	struct sk_buff *skb;
+	struct cxgb4i_snic *snic;
+
+	if (unlikely(csk->state == CXGBI_CSK_ST_CONNECTING ||
+				csk->state == CXGBI_CSK_ST_CLOSE_WAIT_1 ||
+				csk->state >= CXGBI_CSK_ST_ABORTING)) {
+		cxgbi_tx_debug("csk 0x%p, in closing state %u.\n",
+				csk, csk->state);
+		return 0;
+	}
+
+	snic = cxgb4i_get_snic(csk->cdev);
+
+	while (csk->wr_cred
+			&& (skb = skb_peek(&csk->write_queue)) != NULL) {
+		int dlen;
+		int len;
+		unsigned int credits_needed;
+
+		dlen = len = skb->len;
+		skb_reset_transport_header(skb);
+
+		if (is_ofld_imm(skb))
+			credits_needed = DIV_ROUND_UP(dlen +
+					sizeof(struct fw_ofld_tx_data_wr), 16);
+		else
+			credits_needed = DIV_ROUND_UP(8 *
+					calc_tx_flits_ofld(skb)+
+					sizeof(struct fw_ofld_tx_data_wr), 16);
+
+		if (csk->wr_cred < credits_needed) {
+			cxgbi_tx_debug("csk 0x%p, skb len %u/%u, "
+					"wr %d < %u.\n",
+					csk, skb->len, skb->data_len,
+					credits_needed, csk->wr_cred);
+			break;
+		}
+
+		__skb_unlink(skb, &csk->write_queue);
+		skb->queue_mapping = CPL_PRIORITY_DATA;
+		skb->csum = credits_needed; /* remember this until the WR_ACK */
+		csk->wr_cred -= credits_needed;
+		csk->wr_una_cred += credits_needed;
+		cxgb4i_sock_enqueue_wr(csk, skb);
+
+		cxgbi_tx_debug("csk 0x%p, enqueue, skb len %u/%u, "
+				"wr %d, left %u, unack %u.\n",
+				csk, skb->len, skb->data_len,
+				credits_needed, csk->wr_cred,
+				csk->wr_una_cred);
+
+
+		if (likely(cxgb4i_skb_flags(skb) &
+					CXGB4I_SKCB_FLAG_NEED_HDR)) {
+			len += ulp_extra_len(skb);
+			if (!cxgbi_sock_flag(csk,
+						CXGBI_CSK_FL_TX_DATA_SENT)) {
+				cxgb4i_sock_send_tx_flowc_wr(csk);
+				skb->csum += 5;
+				csk->wr_cred -= 5;
+				csk->wr_una_cred += 5;
+			}
+
+			if ((req_completion &&
+				csk->wr_una_cred == credits_needed) ||
+				(cxgb4i_skb_flags(skb) &
+				  CXGB4I_SKCB_FLAG_COMPL) ||
+				csk->wr_una_cred >= csk->wr_max_cred / 2) {
+				req_completion = 1;
+				csk->wr_una_cred = 0;
+			}
+			cxgb4i_sock_make_tx_data_wr(csk, skb, dlen, len,
+							credits_needed,
+							req_completion);
+			csk->snd_nxt += len;
+
+			if (req_completion)
+				cxgb4i_skb_flags(skb) &=
+					~CXGB4I_SKCB_FLAG_NEED_HDR;
+		}
+
+		total_size += skb->truesize;
+		t4_set_arp_err_handler(skb, csk,
+					cxgb4i_sock_arp_failure_discard);
+		cxgb4_l2t_send(snic->lldi.ports[csk->port_id], skb, csk->l2t);
+	}
+	return total_size;
+}
+
+static inline void cxgb4i_sock_free_atid(struct cxgbi_sock *csk)
+{
+	cxgb4_free_atid(cxgb4i_get_snic(csk->cdev)->lldi.tids, csk->atid);
+	cxgbi_sock_put(csk);
+}
+
+static void cxgb4i_sock_established(struct cxgbi_sock *csk, u32 snd_isn,
+					unsigned int opt)
+{
+	cxgbi_conn_debug("csk 0x%p, state %u.\n", csk, csk->state);
+
+	csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn;
+
+	/*
+	 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
+	 * pass through opt0.
+	 */
+	if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10))
+		csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10);
+
+	dst_confirm(csk->dst);
+
+	smp_mb();
+
+	cxgbi_sock_set_state(csk, CXGBI_CSK_ST_ESTABLISHED);
+}
+
+static int cxgb4i_cpl_act_establish(struct cxgb4i_snic *snic,
+						struct sk_buff *skb)
+{
+	struct cxgbi_sock *csk;
+	struct cpl_act_establish *req = cplhdr(skb);
+	unsigned int hwtid = GET_TID(req);
+	unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
+	struct tid_info *t = snic->lldi.tids;
+	u32 rcv_isn = be32_to_cpu(req->rcv_isn);
+
+	csk = lookup_atid(t, atid);
+
+	if (unlikely(!csk)) {
+		cxgbi_log_error("can't find connection for tid %u\n", hwtid);
+		return CPL_RET_UNKNOWN_TID;
+	}
+
+	cxgbi_conn_debug("csk 0x%p, state %u, flag 0x%lx\n",
+				csk, csk->state, csk->flags);
+	csk->hwtid = hwtid;
+	cxgbi_sock_hold(csk);
+	cxgb4_insert_tid(snic->lldi.tids, csk, hwtid);
+	cxgb4_free_atid(snic->lldi.tids, atid);
+
+	spin_lock_bh(&csk->lock);
+
+	if (unlikely(csk->state != CXGBI_CSK_ST_CONNECTING))
+		cxgbi_log_error("TID %u expected SYN_SENT, got EST., s %u\n",
+				csk->hwtid, csk->state);
+
+	csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
+	cxgb4i_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
+
+	__kfree_skb(skb);
+
+	if (unlikely(cxgbi_sock_flag(csk, CXGBI_CSK_FL_ACTIVE_CLOSE_NEEDED)))
+		cxgb4i_sock_send_abort_req(csk);
+	else {
+		if (skb_queue_len(&csk->write_queue))
+			cxgb4i_sock_push_tx_frames(csk, 1);
+
+		cxgbi_conn_tx_open(csk);
+	}
+
+	spin_unlock_bh(&csk->lock);
+
+	return 0;
+}
+
+static int act_open_rpl_status_to_errno(int status)
+{
+	switch (status) {
+	case CPL_ERR_CONN_RESET:
+		return -ECONNREFUSED;
+	case CPL_ERR_ARP_MISS:
+		return -EHOSTUNREACH;
+	case CPL_ERR_CONN_TIMEDOUT:
+		return -ETIMEDOUT;
+	case CPL_ERR_TCAM_FULL:
+		return -ENOMEM;
+	case CPL_ERR_CONN_EXIST:
+		cxgbi_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n");
+		return -EADDRINUSE;
+	default:
+		return -EIO;
+	}
+}
+
+/*
+ * Return whether a failed active open has allocated a TID
+ */
+static inline int act_open_has_tid(int status)
+{
+	return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
+		status != CPL_ERR_ARP_MISS;
+}
+
+static void cxgb4i_sock_act_open_retry_timer(unsigned long data)
+{
+	struct sk_buff *skb;
+	struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
+	struct cxgb4i_snic *snic = cxgb4i_get_snic(csk->cdev);
+
+	cxgbi_conn_debug("csk 0x%p, state %u.\n", csk, csk->state);
+
+	spin_lock_bh(&csk->lock);
+	skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC);
+	if (!skb)
+		cxgb4i_fail_act_open(csk, -ENOMEM);
+	else {
+		unsigned int qid_atid  = csk->rss_qid << 14;
+		qid_atid |= (unsigned int)csk->atid;
+		skb->sk = (struct sock *)csk;
+		t4_set_arp_err_handler(skb, csk,
+					cxgb4i_act_open_req_arp_failure);
+		cxgb4i_sock_make_act_open_req(csk, skb, qid_atid, csk->l2t);
+		cxgb4_l2t_send(snic->lldi.ports[csk->port_id], skb, csk->l2t);
+	}
+	spin_unlock_bh(&csk->lock);
+	cxgbi_sock_put(csk);
+}
+
+static int cxgb4i_cpl_act_open_rpl(struct cxgb4i_snic *snic,
+						struct sk_buff *skb)
+{
+	struct cxgbi_sock *csk;
+	struct cpl_act_open_rpl *rpl = cplhdr(skb);
+	unsigned int atid =
+		GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl->atid_status)));
+	struct tid_info *t = snic->lldi.tids;
+	unsigned int status = GET_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
+
+	csk = lookup_atid(t, atid);
+
+	if (unlikely(!csk)) {
+		cxgbi_log_error("can't find connection for tid %u\n", atid);
+		return CPL_RET_UNKNOWN_TID;
+	}
+
+	cxgbi_sock_hold(csk);
+	spin_lock_bh(&csk->lock);
+
+	cxgbi_conn_debug("rcv, status 0x%x, csk 0x%p, csk->state %u, "
+			"csk->flag 0x%lx, csk->atid %u.\n",
+			status, csk, csk->state, csk->flags, csk->hwtid);
+
+	if (status & act_open_has_tid(status))
+		cxgb4_remove_tid(snic->lldi.tids, csk->port_id, GET_TID(rpl));
+
+	if (status == CPL_ERR_CONN_EXIST &&
+			csk->retry_timer.function !=
+			cxgb4i_sock_act_open_retry_timer) {
+		csk->retry_timer.function = cxgb4i_sock_act_open_retry_timer;
+		if (!mod_timer(&csk->retry_timer, jiffies + HZ / 2))
+			cxgbi_sock_hold(csk);
+	} else
+
+		cxgb4i_fail_act_open(csk, act_open_rpl_status_to_errno(status));
+
+	__kfree_skb(skb);
+
+	spin_unlock_bh(&csk->lock);
+	cxgbi_sock_put(csk);
+
+	return 0;
+}
+
+static int cxgb4i_cpl_peer_close(struct cxgb4i_snic *snic, struct sk_buff *skb)
+{
+	struct cxgbi_sock *csk;
+	struct cpl_peer_close *req = cplhdr(skb);
+	unsigned int hwtid = GET_TID(req);
+	struct tid_info *t = snic->lldi.tids;
+
+	csk = lookup_tid(t, hwtid);
+
+	if (unlikely(!csk)) {
+		cxgbi_log_error("can't find connection for tid %u\n", hwtid);
+		return CPL_RET_UNKNOWN_TID;
+	}
+
+	cxgbi_sock_hold(csk);
+	spin_lock_bh(&csk->lock);
+
+	if (cxgbi_sock_flag(csk, CXGBI_CSK_FL_ABORT_RPL_PENDING))
+		goto out;
+
+	switch (csk->state) {
+	case CXGBI_CSK_ST_ESTABLISHED:
+		cxgbi_sock_set_state(csk, CXGBI_CSK_ST_PASSIVE_CLOSE);
+		break;
+	case CXGBI_CSK_ST_ACTIVE_CLOSE:
+		cxgbi_sock_set_state(csk, CXGBI_CSK_ST_CLOSE_WAIT_2);
+		break;
+	case CXGBI_CSK_ST_CLOSE_WAIT_1:
+		cxgb4i_sock_closed(csk);
+		break;
+	case CXGBI_CSK_ST_ABORTING:
+		break;
+	default:
+		cxgbi_log_error("peer close, TID %u in bad state %u\n",
+				csk->hwtid, csk->state);
+	}
+
+	cxgbi_sock_conn_closing(csk);
+
+out:
+	__kfree_skb(skb);
+	spin_unlock_bh(&csk->lock);
+	cxgbi_sock_put(csk);
+
+	return 0;
+}
+
+static int cxgb4i_cpl_close_con_rpl(struct cxgb4i_snic *snic,
+						struct sk_buff *skb)
+{
+	struct cxgbi_sock *csk;
+	struct cpl_close_con_rpl *rpl = cplhdr(skb);
+	unsigned int hwtid = GET_TID(rpl);
+	struct tid_info *t = snic->lldi.tids;
+
+	csk = lookup_tid(t, hwtid);
+
+	if (unlikely(!csk)) {
+		cxgbi_log_error("can't find connection for tid %u\n", hwtid);
+		return CPL_RET_UNKNOWN_TID;
+	}
+
+	cxgbi_sock_hold(csk);
+	spin_lock_bh(&csk->lock);
+
+	cxgbi_conn_debug("csk 0x%p, state %u, flag 0x%lx.\n",
+			csk, csk->state, csk->flags);
+
+	csk->snd_una = ntohl(rpl->snd_nxt) - 1;
+
+	if (cxgbi_sock_flag(csk, CXGBI_CSK_FL_ABORT_RPL_PENDING))
+		goto out;
+
+	switch (csk->state) {
+	case CXGBI_CSK_ST_ACTIVE_CLOSE:
+		cxgbi_sock_set_state(csk, CXGBI_CSK_ST_CLOSE_WAIT_1);
+		break;
+	case CXGBI_CSK_ST_CLOSE_WAIT_1:
+	case CXGBI_CSK_ST_CLOSE_WAIT_2:
+		cxgb4i_sock_closed(csk);
+		break;
+	case CXGBI_CSK_ST_ABORTING:
+		break;
+	default:
+		cxgbi_log_error("close_rpl, TID %u in bad state %u\n",
+				csk->hwtid, csk->state);
+	}
+out:
+	spin_unlock_bh(&csk->lock);
+	cxgbi_sock_put(csk);
+	kfree_skb(skb);
+
+	return 0;
+}
+
+static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
+								int *need_rst)
+{
+	switch (abort_reason) {
+	case CPL_ERR_BAD_SYN: /* fall through */
+	case CPL_ERR_CONN_RESET:
+		return csk->state > CXGBI_CSK_ST_ESTABLISHED ?
+			-EPIPE : -ECONNRESET;
+	case CPL_ERR_XMIT_TIMEDOUT:
+	case CPL_ERR_PERSIST_TIMEDOUT:
+	case CPL_ERR_FINWAIT2_TIMEDOUT:
+	case CPL_ERR_KEEPALIVE_TIMEDOUT:
+		return -ETIMEDOUT;
+	default:
+		return -EIO;
+	}
+}
+
+/*
+ * Returns whether an ABORT_REQ_RSS message is a negative advice.
+ */
+static inline int is_neg_adv_abort(unsigned int status)
+{
+	return status == CPL_ERR_RTX_NEG_ADVICE ||
+		status == CPL_ERR_PERSIST_NEG_ADVICE;
+}
+
+static int cxgb4i_cpl_abort_req_rss(struct cxgb4i_snic *snic,
+						struct sk_buff *skb)
+{
+	struct cxgbi_sock *csk;
+	struct cpl_abort_req_rss *req = cplhdr(skb);
+	unsigned int hwtid = GET_TID(req);
+	struct tid_info *t = snic->lldi.tids;
+	int rst_status = CPL_ABORT_NO_RST;
+
+	csk = lookup_tid(t, hwtid);
+	if (unlikely(!csk)) {
+		cxgbi_log_error("can't find connection for tid %u\n", hwtid);
+		return CPL_RET_UNKNOWN_TID;
+	}
+
+	cxgbi_sock_hold(csk);
+	spin_lock_bh(&csk->lock);
+
+	if (is_neg_adv_abort(req->status)) {
+		__kfree_skb(skb);
+		return 0;
+	}
+
+	if (!cxgbi_sock_flag(csk, CXGBI_CSK_FL_ABORT_REQ_RCVD)) {
+		cxgbi_sock_set_flag(csk, CXGBI_CSK_FL_ABORT_REQ_RCVD);
+		cxgbi_sock_set_state(csk, CXGBI_CSK_ST_ABORTING);
+		__kfree_skb(skb);
+		return 0;
+	}
+
+	cxgbi_sock_clear_flag(csk, CXGBI_CSK_FL_ABORT_REQ_RCVD);
+	cxgb4i_sock_send_abort_rpl(csk, rst_status);
+
+	if (!cxgbi_sock_flag(csk, CXGBI_CSK_FL_ABORT_RPL_PENDING)) {
+		csk->err = abort_status_to_errno(csk, req->status,
+							&rst_status);
+		cxgb4i_sock_closed(csk);
+	}
+
+	spin_unlock_bh(&csk->lock);
+	cxgbi_sock_put(csk);
+
+	return 0;
+}
+
+static int cxgb4i_cpl_abort_rpl_rss(struct cxgb4i_snic *snic,
+						struct sk_buff *skb)
+{
+	struct cxgbi_sock *csk;
+	struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
+	unsigned int hwtid = GET_TID(rpl);
+	struct tid_info *t = snic->lldi.tids;
+
+	if (rpl->status == CPL_ERR_ABORT_FAILED)
+		goto out;
+
+	csk = lookup_tid(t, hwtid);
+	if (unlikely(!csk)) {
+		cxgbi_log_error("can't find connection for tid %u\n", hwtid);
+		goto out;
+	}
+
+	cxgbi_sock_hold(csk);
+	spin_lock_bh(&csk->lock);
+
+	if (cxgbi_sock_flag(csk, CXGBI_CSK_FL_ABORT_RPL_PENDING)) {
+		if (!cxgbi_sock_flag(csk, CXGBI_CSK_FL_ABORT_RPL_RCVD))
+			cxgbi_sock_set_flag(csk,
+						CXGBI_CSK_FL_ABORT_RPL_RCVD);
+		else {
+			cxgbi_sock_clear_flag(csk,
+						CXGBI_CSK_FL_ABORT_RPL_RCVD);
+			cxgbi_sock_clear_flag(csk,
+					CXGBI_CSK_FL_ABORT_RPL_PENDING);
+
+			if (cxgbi_sock_flag(csk,
+						CXGBI_CSK_FL_ABORT_REQ_RCVD))
+				cxgbi_log_error("tid %u, ABORT_RPL_RSS\n",
+						csk->hwtid);
+
+			cxgb4i_sock_closed(csk);
+		}
+	}
+
+	spin_unlock_bh(&csk->lock);
+	cxgbi_sock_put(csk);
+
+out:
+	__kfree_skb(skb);
+	return 0;
+}
+
+static int cxgb4i_cpl_iscsi_hdr(struct cxgb4i_snic *snic, struct sk_buff *skb)
+{
+	struct cxgbi_sock *csk;
+	struct cpl_iscsi_hdr *cpl = cplhdr(skb);
+	unsigned int hwtid = GET_TID(cpl);
+	struct tid_info *t = snic->lldi.tids;
+	struct sk_buff *lskb;
+
+	csk = lookup_tid(t, hwtid);
+
+	if (unlikely(!csk)) {
+		cxgbi_log_error("can't find connection for tid %u\n", hwtid);
+		return CPL_RET_UNKNOWN_TID;
+	}
+
+	spin_lock_bh(&csk->lock);
+
+	if (unlikely(csk->state >= CXGBI_CSK_ST_PASSIVE_CLOSE)) {
+		if (csk->state != CXGBI_CSK_ST_ABORTING)
+			goto abort_conn;
+	}
+
+	cxgb4i_skb_tcp_seq(skb) = ntohl(cpl->seq);
+	cxgb4i_skb_flags(skb) = 0;
+
+	skb_reset_transport_header(skb);
+	__skb_pull(skb, sizeof(*cpl));
+	__pskb_trim(skb, ntohs(cpl->len));
+
+	if (!csk->skb_ulp_lhdr) {
+		unsigned char *byte;
+		csk->skb_ulp_lhdr = skb;
+		lskb = csk->skb_ulp_lhdr;
+
+		cxgb4i_skb_flags(lskb) = CXGB4I_SKCB_FLAG_HDR_RCVD;
+
+		if (cxgb4i_skb_tcp_seq(lskb) != csk->rcv_nxt) {
+			cxgbi_log_error("tid 0x%x, CPL_ISCSI_HDR, bad seq got "
+					"0x%x, exp 0x%x\n",
+					csk->hwtid,
+					cxgb4i_skb_tcp_seq(lskb),
+					csk->rcv_nxt);
+		}
+
+		byte = skb->data;
+		cxgb4i_skb_rx_pdulen(skb) = ntohs(cpl->pdu_len_ddp) - 40;
+		csk->rcv_nxt += cxgb4i_skb_rx_pdulen(lskb);
+	} else {
+		lskb = csk->skb_ulp_lhdr;
+		cxgb4i_skb_flags(lskb) |= CXGB4I_SKCB_FLAG_DATA_RCVD;
+		cxgb4i_skb_flags(skb) = CXGB4I_SKCB_FLAG_DATA_RCVD;
+		cxgbi_log_debug("csk 0x%p, tid 0x%x skb 0x%p, pdu data, "
+				" header 0x%p.\n",
+				csk, csk->hwtid, skb, lskb);
+	}
+
+	__skb_queue_tail(&csk->receive_queue, skb);
+
+	spin_unlock_bh(&csk->lock);
+
+	return 0;
+
+abort_conn:
+	cxgb4i_sock_send_abort_req(csk);
+	__kfree_skb(skb);
+	spin_unlock_bh(&csk->lock);
+
+	return -EINVAL;
+}
+
+static int cxgb4i_cpl_rx_data_ddp(struct cxgb4i_snic *snic, struct sk_buff *skb)
+{
+	struct cxgbi_sock *csk;
+	struct sk_buff *lskb;
+	struct cpl_rx_data_ddp *rpl = cplhdr(skb);
+	unsigned int hwtid = GET_TID(rpl);
+	struct tid_info *t = snic->lldi.tids;
+	unsigned int status;
+
+	csk = lookup_tid(t, hwtid);
+
+	if (unlikely(!csk)) {
+		cxgbi_log_error("can't find connection for tid %u\n", hwtid);
+		return CPL_RET_UNKNOWN_TID;
+	}
+
+	spin_lock_bh(&csk->lock);
+
+	if (unlikely(csk->state >= CXGBI_CSK_ST_PASSIVE_CLOSE)) {
+		if (csk->state != CXGBI_CSK_ST_ABORTING)
+			goto abort_conn;
+	}
+
+	if (!csk->skb_ulp_lhdr) {
+		cxgbi_log_error("tid 0x%x, rcv RX_DATA_DDP w/o pdu header\n",
+				csk->hwtid);
+		goto abort_conn;
+	}
+
+	lskb = csk->skb_ulp_lhdr;
+	cxgb4i_skb_flags(lskb) |= CXGB4I_SKCB_FLAG_STATUS_RCVD;
+
+	if (ntohs(rpl->len) != cxgb4i_skb_rx_pdulen(lskb)) {
+		cxgbi_log_error("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
+				csk->hwtid, ntohs(rpl->len),
+				cxgb4i_skb_rx_pdulen(lskb));
+	}
+
+	cxgb4i_skb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc);
+	status = ntohl(rpl->ddpvld);
+
+	if (status & (1 << RX_DDP_STATUS_HCRC_SHIFT))
+		cxgb4i_skb_ulp_mode(skb) |= ULP2_FLAG_HCRC_ERROR;
+	if (status & (1 << RX_DDP_STATUS_DCRC_SHIFT))
+		cxgb4i_skb_ulp_mode(skb) |= ULP2_FLAG_DCRC_ERROR;
+	if (status & (1 << RX_DDP_STATUS_PAD_SHIFT))
+		cxgb4i_skb_ulp_mode(skb) |= ULP2_FLAG_PAD_ERROR;
+	if ((cxgb4i_skb_flags(lskb) & ULP2_FLAG_DATA_READY))
+		cxgb4i_skb_ulp_mode(skb) |= ULP2_FLAG_DATA_DDPED;
+
+	csk->skb_ulp_lhdr = NULL;
+
+	__kfree_skb(skb);
+	cxgbi_conn_pdu_ready(csk);
+	spin_unlock_bh(&csk->lock);
+
+	return 0;
+
+abort_conn:
+	cxgb4i_sock_send_abort_req(csk);
+	__kfree_skb(skb);
+	spin_unlock_bh(&csk->lock);
+	return -EINVAL;
+}
+
+static void check_wr_invariants(const struct cxgbi_sock *csk)
+{
+	int pending = cxgb4i_sock_count_pending_wrs(csk);
+
+	if (unlikely(csk->wr_cred + pending != csk->wr_max_cred))
+		printk(KERN_ERR "TID %u: credit imbalance: avail %u, "
+				"pending %u, total should be %u\n",
+				csk->hwtid,
+				csk->wr_cred,
+				pending,
+				csk->wr_max_cred);
+}
+
+static int cxgb4i_cpl_fw4_ack(struct cxgb4i_snic *snic, struct sk_buff *skb)
+{
+	struct cxgbi_sock *csk;
+	struct cpl_fw4_ack *rpl = cplhdr(skb);
+	unsigned int hwtid = GET_TID(rpl);
+	struct tid_info *t = snic->lldi.tids;
+	unsigned char credits;
+	unsigned int snd_una;
+
+	csk = lookup_tid(t, hwtid);
+	if (unlikely(!csk)) {
+		cxgbi_log_error("can't find connection for tid %u\n", hwtid);
+		kfree_skb(skb);
+		return CPL_RET_UNKNOWN_TID;
+	}
+
+	cxgbi_sock_hold(csk);
+	spin_lock_bh(&csk->lock);
+
+	credits = rpl->credits;
+	snd_una = be32_to_cpu(rpl->snd_una);
+
+	cxgbi_tx_debug("%u WR credits, avail %u, unack %u, TID %u, state %u\n",
+				credits, csk->wr_cred, csk->wr_una_cred,
+						csk->hwtid, csk->state);
+
+	csk->wr_cred += credits;
+
+	if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred)
+		csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
+
+	while (credits) {
+		struct sk_buff *p = cxgb4i_sock_peek_wr(csk);
+
+		if (unlikely(!p)) {
+			cxgbi_log_error("%u WR_ACK credits for TID %u with "
+					"nothing pending, state %u\n",
+					credits, csk->hwtid, csk->state);
+			break;
+		}
+
+		if (unlikely(credits < p->csum)) {
+			p->csum -= credits;
+		} else {
+			cxgb4i_sock_dequeue_wr(csk);
+			credits -= p->csum;
+			cxgb4i_sock_free_wr_skb(p);
+		}
+	}
+
+	check_wr_invariants(csk);
+
+	if (rpl->seq_vld) {
+		if (unlikely(before(snd_una, csk->snd_una))) {
+			cxgbi_log_error("TID %u, unexpected sequence # %u "
+					"in WR_ACK snd_una %u\n",
+					csk->hwtid, snd_una, csk->snd_una);
+			goto out_free;
+		}
+	}
+
+	if (csk->snd_una != snd_una) {
+		csk->snd_una = snd_una;
+		dst_confirm(csk->dst);
+	}
+
+	if (skb_queue_len(&csk->write_queue)) {
+		if (cxgb4i_sock_push_tx_frames(csk, 0))
+			cxgbi_conn_tx_open(csk);
+	} else
+		cxgbi_conn_tx_open(csk);
+
+	goto out;
+
+out_free:
+
+	__kfree_skb(skb);
+
+out:
+	spin_unlock_bh(&csk->lock);
+	cxgbi_sock_put(csk);
+
+	return 0;
+}
+
+static int cxgb4i_cpl_set_tcb_rpl(struct cxgb4i_snic *snic, struct sk_buff *skb)
+{
+	struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
+	unsigned int hwtid = GET_TID(rpl);
+	struct tid_info *t = snic->lldi.tids;
+	struct cxgbi_sock *csk;
+
+	csk = lookup_tid(t, hwtid);
+
+	if (!csk) {
+		cxgbi_log_error("can't find connection for tid %u\n", hwtid);
+		__kfree_skb(skb);
+		return CPL_RET_UNKNOWN_TID;
+	}
+
+	spin_lock_bh(&csk->lock);
+
+	if (rpl->status != CPL_ERR_NONE) {
+		cxgbi_log_error("Unexpected SET_TCB_RPL status %u "
+				 "for tid %u\n", rpl->status, GET_TID(rpl));
+	}
+
+	__kfree_skb(skb);
+	spin_unlock_bh(&csk->lock);
+
+	return 0;
+}
+
+static void cxgb4i_sock_free_cpl_skbs(struct cxgbi_sock *csk)
+{
+	if (csk->cpl_close)
+		kfree_skb(csk->cpl_close);
+	if (csk->cpl_abort_req)
+		kfree_skb(csk->cpl_abort_req);
+	if (csk->cpl_abort_rpl)
+		kfree_skb(csk->cpl_abort_rpl);
+}
+
+static int cxgb4i_alloc_cpl_skbs(struct cxgbi_sock *csk)
+{
+	csk->cpl_close = alloc_skb(sizeof(struct cpl_close_con_req),
+					GFP_KERNEL);
+	if (!csk->cpl_close)
+		return -ENOMEM;
+	skb_put(csk->cpl_close, sizeof(struct cpl_close_con_req));
+
+	csk->cpl_abort_req = alloc_skb(sizeof(struct cpl_abort_req),
+					GFP_KERNEL);
+	if (!csk->cpl_abort_req)
+		goto free_cpl_skbs;
+	skb_put(csk->cpl_abort_req, sizeof(struct cpl_abort_req));
+
+	csk->cpl_abort_rpl = alloc_skb(sizeof(struct cpl_abort_rpl),
+					GFP_KERNEL);
+	if (!csk->cpl_abort_rpl)
+		goto free_cpl_skbs;
+	skb_put(csk->cpl_abort_rpl, sizeof(struct cpl_abort_rpl));
+
+	return 0;
+
+free_cpl_skbs:
+	cxgb4i_sock_free_cpl_skbs(csk);
+	return -ENOMEM;
+}
+
+static void cxgb4i_sock_release_offload_resources(struct cxgbi_sock *csk)
+{
+
+	cxgb4i_sock_free_cpl_skbs(csk);
+
+	if (csk->wr_cred != csk->wr_max_cred) {
+		cxgb4i_sock_purge_wr_queue(csk);
+		cxgb4i_sock_reset_wr_list(csk);
+	}
+
+	if (csk->l2t) {
+		cxgb4_l2t_release(csk->l2t);
+		csk->l2t = NULL;
+	}
+
+	if (csk->state == CXGBI_CSK_ST_CONNECTING)
+		cxgb4i_sock_free_atid(csk);
+	else {
+		cxgb4_remove_tid(cxgb4i_get_snic(csk->cdev)->lldi.tids, 0,
+				csk->hwtid);
+		cxgbi_sock_put(csk);
+	}
+
+	csk->dst = NULL;
+	csk->cdev = NULL;
+}
+
+struct cxgbi_sock *cxgb4i_sock_create(struct cxgb4i_snic *snic)
+{
+	struct cxgbi_sock *csk = NULL;
+
+	csk = kzalloc(sizeof(*csk), GFP_KERNEL);
+	if (!csk)
+		return NULL;
+
+	if (cxgb4i_alloc_cpl_skbs(csk) < 0)
+		goto free_csk;
+
+	cxgbi_conn_debug("alloc csk: 0x%p\n", csk);
+
+	csk->flags = 0;
+	spin_lock_init(&csk->lock);
+	atomic_set(&csk->refcnt, 1);
+	skb_queue_head_init(&csk->receive_queue);
+	skb_queue_head_init(&csk->write_queue);
+	setup_timer(&csk->retry_timer, NULL, (unsigned long)csk);
+	rwlock_init(&csk->callback_lock);
+	csk->cdev = &snic->cdev;
+
+	return csk;
+
+free_csk:
+	cxgbi_api_debug("csk alloc failed %p, baling out\n", csk);
+	kfree(csk);
+	return NULL;
+}
+
+static void cxgb4i_sock_active_close(struct cxgbi_sock *csk)
+{
+	int data_lost;
+	int close_req = 0;
+
+	cxgbi_conn_debug("csk 0x%p, state %u, flags %lu\n",
+			csk, csk->state, csk->flags);
+
+	dst_confirm(csk->dst);
+
+	cxgbi_sock_hold(csk);
+	spin_lock_bh(&csk->lock);
+
+	data_lost = skb_queue_len(&csk->receive_queue);
+	__skb_queue_purge(&csk->receive_queue);
+
+	switch (csk->state) {
+	case CXGBI_CSK_ST_CLOSED:
+	case CXGBI_CSK_ST_ACTIVE_CLOSE:
+	case CXGBI_CSK_ST_CLOSE_WAIT_1:
+	case CXGBI_CSK_ST_CLOSE_WAIT_2:
+	case CXGBI_CSK_ST_ABORTING:
+		break;
+
+	case CXGBI_CSK_ST_CONNECTING:
+		cxgbi_sock_set_flag(csk, CXGBI_CSK_FL_ACTIVE_CLOSE_NEEDED);
+		break;
+	case CXGBI_CSK_ST_ESTABLISHED:
+		close_req = 1;
+		cxgbi_sock_set_flag(csk, CXGBI_CSK_ST_CLOSE_WAIT_2);
+		break;
+	}
+
+	if (close_req) {
+		if (data_lost)
+			cxgb4i_sock_send_abort_req(csk);
+		else
+			cxgb4i_sock_send_close_req(csk);
+	}
+
+	spin_unlock_bh(&csk->lock);
+	cxgbi_sock_put(csk);
+}
+
+void cxgb4i_sock_release(struct cxgbi_sock *csk)
+{
+	cxgbi_conn_debug("csk 0x%p, state %u, flags %lu\n",
+			csk, csk->state, csk->flags);
+
+	if (unlikely(csk->state == CXGBI_CSK_ST_CONNECTING))
+		cxgbi_sock_set_state(csk,
+				CXGBI_CSK_FL_ACTIVE_CLOSE_NEEDED);
+	else if (likely(csk->state != CXGBI_CSK_ST_CLOSED))
+		cxgb4i_sock_active_close(csk);
+
+	cxgbi_sock_put(csk);
+}
+
+static int is_cxgb4_dev(struct net_device *dev, struct cxgb4i_snic *snic)
+{
+	struct net_device *ndev = dev;
+	int i;
+
+	if (dev->priv_flags & IFF_802_1Q_VLAN)
+		ndev = vlan_dev_real_dev(dev);
+
+	for (i = 0; i < snic->lldi.nports; i++) {
+		if (ndev == snic->lldi.ports[i])
+			return 1;
+	}
+
+	return 0;
+}
+
+static struct net_device *cxgb4i_find_egress_dev(struct net_device *root_dev,
+						struct cxgb4i_snic *snic)
+{
+	while (root_dev) {
+		if (root_dev->priv_flags & IFF_802_1Q_VLAN)
+			root_dev = vlan_dev_real_dev(root_dev);
+		else if (is_cxgb4_dev(root_dev, snic))
+			return root_dev;
+		else
+			return NULL;
+	}
+
+	return NULL;
+}
+
+static struct rtable *find_route(struct net_device *dev,
+				__be32 saddr, __be32 daddr,
+				__be16 sport, __be16 dport,
+				u8 tos)
+{
+	struct rtable *rt;
+	struct flowi fl = {
+		.oif = dev ? dev->ifindex : 0,
+		.nl_u = {
+			.ip4_u = {
+				.daddr = daddr,
+				.saddr = saddr,
+				.tos = tos }
+			},
+		.proto = IPPROTO_TCP,
+		.uli_u = {
+			.ports = {
+				.sport = sport,
+				.dport = dport }
+			}
+	};
+
+	if (ip_route_output_flow(dev ? dev_net(dev) : &init_net,
+					&rt, &fl, NULL, 0))
+		return NULL;
+
+	return rt;
+}
+
+static int cxgb4i_init_act_open(struct cxgbi_sock *csk,
+					struct net_device *dev)
+{
+	struct dst_entry *dst = csk->dst;
+	struct sk_buff *skb;
+	struct port_info *pi = netdev_priv(dev);
+
+	cxgbi_conn_debug("csk 0x%p, state %u, flags 0x%lx\n",
+			csk, csk->state, csk->flags);
+
+	csk->atid = cxgb4_alloc_atid(cxgb4i_get_snic(csk->cdev)->lldi.tids,
+					csk);
+	if (csk->atid == -1) {
+		cxgbi_log_error("cannot alloc atid\n");
+		goto out_err;
+	}
+
+	csk->l2t = cxgb4_l2t_get(cxgb4i_get_snic(csk->cdev)->lldi.l2t,
+				csk->dst->neighbour, dev, 0);
+	if (!csk->l2t) {
+		cxgbi_log_error("cannot alloc l2t\n");
+		goto free_atid;
+	}
+
+	skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_KERNEL);
+	if (!skb)
+		goto free_l2t;
+
+	skb->sk = (struct sock *)csk;
+	t4_set_arp_err_handler(skb, csk, cxgb4i_act_open_req_arp_failure);
+
+	cxgbi_sock_hold(csk);
+
+	csk->wr_max_cred = csk->wr_cred =
+		cxgb4i_get_snic(csk->cdev)->lldi.wr_cred;
+	csk->port_id = pi->port_id;
+	csk->rss_qid = cxgb4i_get_snic(csk->cdev)->lldi.rxq_ids[csk->port_id];
+	csk->tx_chan = pi->tx_chan;
+	csk->smac_idx = csk->tx_chan << 1;
+	csk->wr_una_cred = 0;
+	csk->mss_idx = cxgb4i_select_mss(csk, dst_mtu(dst));
+	csk->err = 0;
+
+	cxgb4i_sock_reset_wr_list(csk);
+
+	cxgb4i_sock_make_act_open_req(csk, skb,
+					((csk->rss_qid << 14) |
+					 (csk->atid)), csk->l2t);
+	cxgb4_l2t_send(cxgb4i_get_snic(csk->cdev)->lldi.ports[csk->port_id],
+					skb, csk->l2t);
+	return 0;
+
+free_l2t:
+	cxgb4_l2t_release(csk->l2t);
+
+free_atid:
+	cxgb4_free_atid(cxgb4i_get_snic(csk->cdev)->lldi.tids, csk->atid);
+
+out_err:
+
+	return -EINVAL;;
+}
+
+static struct net_device *cxgb4i_find_dev(struct net_device *dev,
+							__be32 ipaddr)
+{
+	struct flowi fl;
+	struct rtable *rt;
+	int err;
+
+	memset(&fl, 0, sizeof(fl));
+	fl.nl_u.ip4_u.daddr = ipaddr;
+
+	err = ip_route_output_key(dev ? dev_net(dev) : &init_net, &rt, &fl);
+	if (!err)
+		return (&rt->u.dst)->dev;
+
+	return NULL;
+}
+
+int cxgb4i_sock_connect(struct net_device *dev, struct cxgbi_sock *csk,
+						struct sockaddr_in *sin)
+{
+	struct rtable *rt;
+	__be32 sipv4 = 0;
+	struct net_device *dstdev;
+	struct cxgbi_hba *chba = NULL;
+	int err;
+
+	cxgbi_conn_debug("csk 0x%p, dev 0x%p\n", csk, dev);
+
+	if (sin->sin_family != AF_INET)
+		return -EAFNOSUPPORT;
+
+	csk->daddr.sin_port = sin->sin_port;
+	csk->daddr.sin_addr.s_addr = sin->sin_addr.s_addr;
+
+	dstdev = cxgb4i_find_dev(dev, sin->sin_addr.s_addr);
+	if (!dstdev || !is_cxgb4_dev(dstdev, cxgb4i_get_snic(csk->cdev)))
+		return -ENETUNREACH;
+
+	if (dstdev->priv_flags & IFF_802_1Q_VLAN)
+		dev = dstdev;
+
+	rt = find_route(dev, csk->saddr.sin_addr.s_addr,
+			csk->daddr.sin_addr.s_addr,
+			csk->saddr.sin_port,
+			csk->daddr.sin_port,
+			0);
+	if (rt == NULL) {
+		cxgbi_conn_debug("no route to %pI4, port %u, dev %s, "
+					"snic 0x%p\n",
+					&csk->daddr.sin_addr.s_addr,
+					ntohs(csk->daddr.sin_port),
+					dev ? dev->name : "any",
+					csk->snic);
+		return -ENETUNREACH;
+	}
+
+	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
+		cxgbi_conn_debug("multi-cast route to %pI4, port %u, "
+					"dev %s, snic 0x%p\n",
+					&csk->daddr.sin_addr.s_addr,
+					ntohs(csk->daddr.sin_port),
+					dev ? dev->name : "any",
+					csk->snic);
+		ip_rt_put(rt);
+		return -ENETUNREACH;
+	}
+
+	if (!csk->saddr.sin_addr.s_addr)
+		csk->saddr.sin_addr.s_addr = rt->rt_src;
+
+	csk->dst = &rt->u.dst;
+
+	dev = cxgb4i_find_egress_dev(csk->dst->dev,
+					cxgb4i_get_snic(csk->cdev));
+	if (dev == NULL) {
+		cxgbi_conn_debug("csk: 0x%p, egress dev NULL\n", csk);
+		return -ENETUNREACH;
+	}
+
+	err = cxgbi_sock_get_port(csk);
+	if (err)
+		return err;
+
+	cxgbi_conn_debug("csk: 0x%p get port: %u\n",
+			csk, ntohs(csk->saddr.sin_port));
+
+	chba = cxgb4i_hba_find_by_netdev(csk->dst->dev);
+
+	sipv4 = cxgb4i_get_iscsi_ipv4(chba);
+	if (!sipv4) {
+		cxgbi_conn_debug("csk: 0x%p, iscsi is not configured\n", csk);
+		sipv4 = csk->saddr.sin_addr.s_addr;
+		cxgb4i_set_iscsi_ipv4(chba, sipv4);
+	} else
+		csk->saddr.sin_addr.s_addr = sipv4;
+
+	cxgbi_conn_debug("csk: 0x%p, %pI4:[%u], %pI4:[%u] SYN_SENT\n",
+				csk,
+				&csk->saddr.sin_addr.s_addr,
+				ntohs(csk->saddr.sin_port),
+				&csk->daddr.sin_addr.s_addr,
+				ntohs(csk->daddr.sin_port));
+
+	cxgbi_sock_set_state(csk, CXGBI_CSK_ST_CONNECTING);
+
+	if (!cxgb4i_init_act_open(csk, dev))
+		return 0;
+
+	err = -ENOTSUPP;
+
+	cxgbi_conn_debug("csk 0x%p -> closed\n", csk);
+	cxgbi_sock_set_state(csk, CXGBI_CSK_ST_CLOSED);
+	ip_rt_put(rt);
+	cxgbi_sock_put_port(csk);
+
+	return err;
+}
+
+void cxgb4i_sock_rx_credits(struct cxgbi_sock *csk, int copied)
+{
+	int must_send;
+	u32 credits;
+
+	if (csk->state != CXGBI_CSK_ST_ESTABLISHED)
+		return;
+
+	credits = csk->copied_seq - csk->rcv_wup;
+	if (unlikely(!credits))
+		return;
+
+	if (unlikely(cxgb4i_rx_credit_thres == 0))
+		return;
+
+	must_send = credits + 16384 >= cxgb4i_rcv_win;
+
+	if (must_send || credits >= cxgb4i_rx_credit_thres)
+		csk->rcv_wup += cxgb4i_csk_send_rx_credits(csk, credits);
+}
+
+int cxgb4i_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb)
+{
+	struct sk_buff *next;
+	int err, copied = 0;
+
+	spin_lock_bh(&csk->lock);
+
+	if (csk->state != CXGBI_CSK_ST_ESTABLISHED) {
+		cxgbi_tx_debug("csk 0x%p, not in est. state %u.\n",
+			      csk, csk->state);
+		err = -EAGAIN;
+		goto out_err;
+	}
+
+	if (csk->err) {
+		cxgbi_tx_debug("csk 0x%p, err %d.\n", csk, csk->err);
+		err = -EPIPE;
+		goto out_err;
+	}
+
+	if (csk->write_seq - csk->snd_una >= cxgb4i_snd_win) {
+		cxgbi_tx_debug("csk 0x%p, snd %u - %u > %u.\n",
+				csk, csk->write_seq, csk->snd_una,
+				cxgb4i_snd_win);
+		err = -ENOBUFS;
+		goto out_err;
+	}
+
+	while (skb) {
+		int frags = skb_shinfo(skb)->nr_frags +
+				(skb->len != skb->data_len);
+
+		if (unlikely(skb_headroom(skb) < CXGB4I_TX_HEADER_LEN)) {
+			cxgbi_tx_debug("csk 0x%p, skb head.\n", csk);
+			err = -EINVAL;
+			goto out_err;
+		}
+
+		if (frags >= SKB_WR_LIST_SIZE) {
+			cxgbi_log_error("csk 0x%p, tx frags %d, len %u,%u.\n",
+					 csk, skb_shinfo(skb)->nr_frags,
+					 skb->len, skb->data_len);
+			err = -EINVAL;
+			goto out_err;
+		}
+
+		next = skb->next;
+		skb->next = NULL;
+		cxgb4i_sock_skb_entail(csk, skb,
+				CXGB4I_SKCB_FLAG_NO_APPEND |
+				CXGB4I_SKCB_FLAG_NEED_HDR);
+		copied += skb->len;
+		csk->write_seq += skb->len + ulp_extra_len(skb);
+		skb = next;
+	}
+done:
+	if (likely(skb_queue_len(&csk->write_queue)))
+		cxgb4i_sock_push_tx_frames(csk, 1);
+	spin_unlock_bh(&csk->lock);
+	return copied;
+
+out_err:
+	if (copied == 0 && err == -EPIPE)
+		copied = csk->err ? csk->err : -EPIPE;
+	else
+		copied = err;
+	goto done;
+}
+
+static void cxgbi_sock_conn_closing(struct cxgbi_sock *csk)
+{
+	struct iscsi_conn *conn = csk->user_data;
+
+	read_lock(&csk->callback_lock);
+	if (conn && csk->state != CXGBI_CSK_ST_ESTABLISHED)
+		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+	read_unlock(&csk->callback_lock);
+}
+
+static void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
+{
+	u8 submode = 0;
+
+	if (hcrc)
+		submode |= 1;
+	if (dcrc)
+		submode |= 2;
+	cxgb4i_skb_ulp_mode(skb) = (ULP_MODE_ISCSI << 4) | submode;
+}
+
+static inline __u16 get_skb_ulp_mode(struct sk_buff *skb)
+{
+	return cxgb4i_skb_ulp_mode(skb);
+}
+
+static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
+	[CPL_ACT_ESTABLISH] = cxgb4i_cpl_act_establish,
+	[CPL_ACT_OPEN_RPL] = cxgb4i_cpl_act_open_rpl,
+	[CPL_PEER_CLOSE] = cxgb4i_cpl_peer_close,
+	[CPL_ABORT_REQ_RSS] = cxgb4i_cpl_abort_req_rss,
+	[CPL_ABORT_RPL_RSS] = cxgb4i_cpl_abort_rpl_rss,
+	[CPL_CLOSE_CON_RPL] = cxgb4i_cpl_close_con_rpl,
+	[CPL_FW4_ACK] = cxgb4i_cpl_fw4_ack,
+	[CPL_ISCSI_HDR] = cxgb4i_cpl_iscsi_hdr,
+	[CPL_SET_TCB_RPL] = cxgb4i_cpl_set_tcb_rpl,
+	[CPL_RX_DATA_DDP] = cxgb4i_cpl_rx_data_ddp
+};
+
+int cxgb4i_ofld_init(struct cxgb4i_snic *snic)
+{
+	struct cxgbi_ports_map *ports;
+	int mapsize;
+
+	if (cxgb4i_max_connect > CXGB4I_MAX_CONN)
+		cxgb4i_max_connect = CXGB4I_MAX_CONN;
+
+	mapsize = (cxgb4i_max_connect * sizeof(struct cxgbi_sock));
+	ports = cxgbi_alloc_big_mem(sizeof(*ports) + mapsize, GFP_KERNEL);
+	if (!ports)
+		return -ENOMEM;
+
+	spin_lock_init(&ports->lock);
+	snic->cdev.pmap = ports;
+	snic->cdev.pmap->max_connect = cxgb4i_max_connect;
+	snic->cdev.pmap->sport_base = cxgb4i_sport_base;
+
+	snic->cdev.tx_skb_setmode = tx_skb_setmode;
+	snic->cdev.sock_send_pdus = cxgb4i_sock_send_pdus;
+	snic->cdev.get_skb_ulp_mode = get_skb_ulp_mode;
+
+	snic->handlers = cxgb4i_cplhandlers;
+
+	return 0;
+}
+
+void cxgb4i_ofld_cleanup(struct cxgb4i_snic *snic)
+{
+	struct cxgbi_sock *csk;
+	int i;
+
+	for (i = 0; i < snic->cdev.pmap->max_connect; i++) {
+		if (snic->cdev.pmap->port_csk[i]) {
+			csk = snic->cdev.pmap->port_csk[i];
+			snic->cdev.pmap->port_csk[i] = NULL;
+
+			spin_lock_bh(&csk->lock);
+			cxgb4i_sock_closed(csk);
+			spin_unlock_bh(&csk->lock);
+		}
+	}
+	cxgbi_free_big_mem(snic->cdev.pmap);
+}
+
diff --git a/drivers/scsi/cxgb4i/cxgb4i_offload.h b/drivers/scsi/cxgb4i/cxgb4i_offload.h
new file mode 100644
index 0000000..afd50d9
--- /dev/null
+++ b/drivers/scsi/cxgb4i/cxgb4i_offload.h
@@ -0,0 +1,91 @@
+/*
+ * cxgb4i_offload.h: Chelsio T4 iSCSI driver.
+ *
+ * Copyright (c) 2010 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ * Written by: Rakesh Ranjan (rranjan-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ */
+
+#ifndef	__CXGB4I_OFFLOAD_H__
+#define	__CXGB4I_OFFLOAD_H__
+
+#include <linux/skbuff.h>
+#include <linux/in.h>
+
+#include "libcxgbi.h"
+
+#define	CXGB4I_MAX_CONN	16384
+
+enum {
+	CPL_RET_BUF_DONE = 1,
+	CPL_RET_BAD_MSG = 2,
+	CPL_RET_UNKNOWN_TID = 4
+};
+
+struct cxgbi_sock *cxgb4i_sock_create(struct cxgb4i_snic *);
+void cxgb4i_sock_release(struct cxgbi_sock *);
+int cxgb4i_sock_connect(struct net_device *, struct cxgbi_sock *,
+			struct sockaddr_in *);
+void cxgb4i_sock_rx_credits(struct cxgbi_sock *, int);
+int cxgb4i_sock_send_pdus(struct cxgbi_sock *, struct sk_buff *);
+
+struct cxgb4i_skb_rx_cb {
+	__u32 ddigest;
+	__u32 pdulen;
+};
+
+struct cxgb4i_skb_tx_cb {
+	struct l2t_skb_cb l2t;
+	struct sk_buff *wr_next;
+};
+
+struct cxgb4i_skb_cb {
+	__u16 flags;
+	__u16 ulp_mode;
+	__u32 seq;
+
+	union {
+		struct cxgb4i_skb_rx_cb rx;
+		struct cxgb4i_skb_tx_cb tx;
+	};
+};
+
+#define CXGB4I_SKB_CB(skb)	((struct cxgb4i_skb_cb *)&((skb)->cb[0]))
+#define cxgb4i_skb_flags(skb)	(CXGB4I_SKB_CB(skb)->flags)
+#define cxgb4i_skb_ulp_mode(skb)	(CXGB4I_SKB_CB(skb)->ulp_mode)
+#define cxgb4i_skb_tcp_seq(skb)		(CXGB4I_SKB_CB(skb)->seq)
+#define cxgb4i_skb_rx_ddigest(skb)	(CXGB4I_SKB_CB(skb)->rx.ddigest)
+#define cxgb4i_skb_rx_pdulen(skb)	(CXGB4I_SKB_CB(skb)->rx.pdulen)
+#define cxgb4i_skb_tx_wr_next(skb)	(CXGB4I_SKB_CB(skb)->tx.wr_next)
+
+enum cxgb4i_skcb_flags {
+	CXGB4I_SKCB_FLAG_NEED_HDR = 1 << 0,	/* packet needs a header */
+	CXGB4I_SKCB_FLAG_NO_APPEND = 1 << 1,	/* don't grow this skb */
+	CXGB4I_SKCB_FLAG_COMPL = 1 << 2,	/* request WR completion */
+	CXGB4I_SKCB_FLAG_HDR_RCVD = 1 << 3,	/* recieved header pdu */
+	CXGB4I_SKCB_FLAG_DATA_RCVD = 1 << 4,	/*  recieved data pdu */
+	CXGB4I_SKCB_FLAG_STATUS_RCVD = 1 << 5,	/* recieved ddp status */
+};
+
+/*
+ * sge_opaque_hdr -
+ * Opaque version of structure the SGE stores at skb->head of TX_DATA packets
+ * and for which we must reserve space.
+ */
+struct sge_opaque_hdr {
+	void *dev;
+	dma_addr_t addr[MAX_SKB_FRAGS + 1];
+};
+
+/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
+#define CXGB4I_TX_HEADER_LEN \
+	(sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr))
+#define SKB_TX_HEADROOM	SKB_MAX_HEAD(CXGB4I_TX_HEADER_LEN)
+
+#endif	/* __CXGB4I_OFFLOAD_H__ */
+
diff --git a/drivers/scsi/cxgb4i/cxgb4i_snic.c b/drivers/scsi/cxgb4i/cxgb4i_snic.c
new file mode 100644
index 0000000..68cdae5
--- /dev/null
+++ b/drivers/scsi/cxgb4i/cxgb4i_snic.c
@@ -0,0 +1,260 @@
+/*
+ * cxgb4i_snic.c: Chelsio T4 iSCSI driver.
+ *
+ * Copyright (c) 2010 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ * Written by: Rakesh Ranjan (rranjan-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ */
+
+#include <net/route.h>
+
+#include "cxgb4i.h"
+
+#define	DRV_MODULE_NAME		"cxgb4i"
+#define	DRV_MODULE_VERSION	"0.90"
+#define	DRV_MODULE_RELDATE	"04/08/2010"
+
+static char version[] =
+	"Chelsio T4 iSCSI driver " DRV_MODULE_NAME
+	" v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_DESCRIPTION("Chelsio T4 iSCSI driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+static LIST_HEAD(snic_list);
+static DEFINE_MUTEX(snic_rwlock);
+
+static void *cxgb4i_uld_add(const struct cxgb4_lld_info *linfo);
+static int cxgb4i_uld_rx_handler(void *handle, const __be64 *rsp,
+				const struct pkt_gl *pgl);
+static int cxgb4i_uld_state_change(void *handle, enum cxgb4_state state);
+
+static struct cxgb4i_snic *cxgb4i_snic_init(const struct cxgb4_lld_info *);
+static void cxgb4i_snic_cleanup(void);
+
+
+static struct cxgb4_uld_info cxgb4i_uld_info = {
+	.name = "cxgb4i",
+	.add = cxgb4i_uld_add,
+	.rx_handler = cxgb4i_uld_rx_handler,
+	.state_change = cxgb4i_uld_state_change,
+};
+
+
+struct cxgbi_hba *cxgb4i_hba_find_by_netdev(struct net_device *dev)
+{
+	int i;
+	struct cxgb4i_snic *snic = NULL;;
+
+	if (dev->priv_flags & IFF_802_1Q_VLAN)
+		dev = vlan_dev_real_dev(dev);
+
+	mutex_lock(&snic_rwlock);
+	list_for_each_entry(snic, &snic_list, list_head) {
+		for (i = 0; i < snic->hba_cnt; i++) {
+			if (snic->hba[i]->ndev == dev) {
+				mutex_unlock(&snic_rwlock);
+				return snic->hba[i];
+			}
+		}
+	}
+	mutex_unlock(&snic_rwlock);
+	return NULL;
+}
+
+struct cxgb4i_snic *cxgb4i_find_snic(struct net_device *dev, __be32 ipaddr)
+{
+	struct flowi fl;
+	struct rtable *rt;
+	struct net_device *sdev = NULL;
+	struct cxgb4i_snic *snic = NULL, *tmp;
+	int err, i;
+
+	memset(&fl, 0, sizeof(fl));
+	fl.nl_u.ip4_u.daddr = ipaddr;
+
+	err = ip_route_output_key(dev ? dev_net(dev) : &init_net, &rt, &fl);
+	if (err)
+		goto out;
+
+	sdev = (&rt->u.dst)->dev;
+	mutex_lock(&snic_rwlock);
+	list_for_each_entry_safe(snic, tmp, &snic_list, list_head) {
+		if (snic) {
+			for (i = 0; i < snic->lldi.nports; i++) {
+				if (sdev == snic->lldi.ports[i]) {
+					mutex_unlock(&snic_rwlock);
+					return snic;
+				}
+			}
+		}
+	}
+	mutex_unlock(&snic_rwlock);
+
+out:
+	snic = NULL;
+	return snic;
+}
+
+void cxgb4i_snic_add(struct list_head *list_head)
+{
+	mutex_lock(&snic_rwlock);
+	list_add_tail(list_head, &snic_list);
+	mutex_unlock(&snic_rwlock);
+}
+
+struct cxgb4i_snic *cxgb4i_snic_init(const struct cxgb4_lld_info *linfo)
+{
+	struct cxgb4i_snic *snic;
+	int i;
+
+	snic = kzalloc(sizeof(*snic), GFP_KERNEL);
+	if (snic) {
+
+		spin_lock_init(&snic->lock);
+		snic->lldi = *linfo;
+		snic->hba_cnt = snic->lldi.nports;
+		snic->cdev.dd_data = snic;
+		snic->cdev.pdev = snic->lldi.pdev;
+		snic->cdev.skb_tx_headroom = SKB_MAX_HEAD(CXGB4I_TX_HEADER_LEN);
+
+		cxgb4i_iscsi_init();
+		cxgbi_pdu_init(&snic->cdev);
+		cxgb4i_ddp_init(snic);
+		cxgb4i_ofld_init(snic);
+
+		for (i = 0; i < snic->hba_cnt; i++) {
+			snic->hba[i] = cxgb4i_hba_add(snic,
+						snic->lldi.ports[i]);
+			if (!snic->hba[i]) {
+				kfree(snic);
+				snic = ERR_PTR(-ENOMEM);
+				goto out;
+			}
+		}
+		cxgb4i_snic_add(&snic->list_head);
+	} else
+out :
+	snic = ERR_PTR(-ENOMEM);
+
+	return snic;
+}
+
+void cxgb4i_snic_cleanup(void)
+{
+	struct cxgb4i_snic *snic, *tmp;
+	int i;
+
+	mutex_lock(&snic_rwlock);
+	list_for_each_entry_safe(snic, tmp, &snic_list, list_head) {
+		list_del(&snic->list_head);
+
+		for (i = 0; i < snic->hba_cnt; i++) {
+			if (snic->hba[i]) {
+				cxgb4i_hba_remove(snic->hba[i]);
+				snic->hba[i] = NULL;
+			}
+		}
+		cxgb4i_ofld_cleanup(snic);
+		cxgb4i_ddp_cleanup(snic);
+		cxgbi_pdu_cleanup(&snic->cdev);
+		cxgbi_log_info("snic 0x%p, %u scsi hosts removed.\n",
+				snic, snic->hba_cnt);
+
+		kfree(snic);
+	}
+	mutex_unlock(&snic_rwlock);
+	cxgb4i_iscsi_cleanup();
+}
+
+static void *cxgb4i_uld_add(const struct cxgb4_lld_info *linfo)
+{
+	struct cxgb4i_snic *snic;
+
+	cxgbi_log_info("%s", version);
+
+	snic = cxgb4i_snic_init(linfo);
+	if (!snic)
+		goto out;
+out:
+	return snic;
+}
+
+static int cxgb4i_uld_rx_handler(void *handle, const __be64 *rsp,
+				const struct pkt_gl *pgl)
+{
+	struct cxgb4i_snic *snic = handle;
+	struct sk_buff *skb;
+	const struct cpl_act_establish *rpl;
+	unsigned int opcode;
+
+	if (pgl == NULL) {
+		unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
+
+		skb = alloc_skb(256, GFP_ATOMIC);
+		if (!skb)
+			goto nomem;
+		__skb_put(skb, len);
+		skb_copy_to_linear_data(skb, &rsp[1], len);
+
+	} else if (pgl == CXGB4_MSG_AN) {
+
+		return 0;
+
+	} else {
+
+		skb = cxgb4_pktgl_to_skb(pgl, 256, 256);
+		if (unlikely(!skb))
+			goto nomem;
+	}
+
+	rpl = cplhdr(skb);
+	opcode = rpl->ot.opcode;
+
+	cxgbi_api_debug("snic %p, opcode 0x%x, skb %p\n",
+			 snic, opcode, skb);
+
+	BUG_ON(!snic->handlers[opcode]);
+
+	if (snic->handlers[opcode]) {
+		snic->handlers[opcode](snic, skb);
+	} else
+		cxgbi_log_error("No handler for opcode 0x%x\n",
+				opcode);
+
+	return 0;
+
+nomem:
+	cxgbi_api_debug("OOM bailing out\n");
+	return 1;
+}
+
+static int cxgb4i_uld_state_change(void *handle, enum cxgb4_state state)
+{
+	return 0;
+}
+
+static int __init cxgb4i_init_module(void)
+{
+	cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
+
+	return 0;
+}
+
+static void __exit cxgb4i_exit_module(void)
+{
+
+	cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
+	cxgb4i_snic_cleanup();
+}
+
+module_init(cxgb4i_init_module);
+module_exit(cxgb4i_exit_module);
+
-- 
1.6.6.1

-- 
You received this message because you are subscribed to the Google Groups "open-iscsi" group.
To post to this group, send email to open-iscsi-/JYPxA39Uh5TLH3MbocFF+G/Ez6ZCGd0@public.gmane.org
To unsubscribe from this group, send email to open-iscsi+unsubscribe-/JYPxA39Uh5TLH3MbocFF+G/Ez6ZCGd0@public.gmane.org
For more options, visit this group at http://groups.google.com/group/open-iscsi?hl=en.

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* cxgb4i_v3.1 submission
@ 2010-05-15 17:24 Rakesh Ranjan
       [not found] ` <1273944249-311-1-git-send-email-rakesh-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
  0 siblings, 1 reply; 9+ messages in thread
From: Rakesh Ranjan @ 2010-05-15 17:24 UTC (permalink / raw)
  To: NETDEVML, SCSIDEVML, OISCSIML
  Cc: LKML, Karen Xie, David Miller, James Bottomley, Mike Christie,
	Anish Bhatt, Rakesh Ranjan


The following 3 patches add a new iscsi LLD driver cxgb4i to enable iscsi offload
support on Chelsio's new 1G and 10G cards. This is updated version of previous cxgb4i
patch. Please share you commnets after review.

Changes since cxgb4i_v3.1
1. Abastract libcxgbi library common part more properly.
2. Fixed few packet sequence calculation bugs.
3. compile-time initialization of cplhandlers
4. Removed the private mail address from patches from field


[PATCH 1/3] cxgb4i_v3: add build support
[PATCH 2/3] cxgb4i_v3: main driver files
[PATCH 3/3] cxgb4i_v3: iscsi and libcxgbi library for handling common part

Regards
Rakesh Ranjan

-- 
You received this message because you are subscribed to the Google Groups "open-iscsi" group.
To post to this group, send email to open-iscsi-/JYPxA39Uh5TLH3MbocFF+G/Ez6ZCGd0@public.gmane.org
To unsubscribe from this group, send email to open-iscsi+unsubscribe-/JYPxA39Uh5TLH3MbocFF+G/Ez6ZCGd0@public.gmane.org
For more options, visit this group at http://groups.google.com/group/open-iscsi?hl=en.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 1/3] cxgb4i_v3: add build support
       [not found] ` <1273944249-311-1-git-send-email-rakesh-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
@ 2010-05-15 17:24   ` Rakesh Ranjan
       [not found]     ` <1273944249-311-2-git-send-email-rakesh-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
  0 siblings, 1 reply; 9+ messages in thread
From: Rakesh Ranjan @ 2010-05-15 17:24 UTC (permalink / raw)
  To: NETDEVML, SCSIDEVML, OISCSIML
  Cc: LKML, Karen Xie, David Miller, James Bottomley, Mike Christie,
	Anish Bhatt, Rakesh Ranjan, Rakesh Ranjan

From: Rakesh Ranjan <rranjan-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>


Signed-off-by: Rakesh Ranjan <rakesh-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
---
 drivers/scsi/Kconfig        |    1 +
 drivers/scsi/Makefile       |    1 +
 drivers/scsi/cxgb4i/Kbuild  |    4 ++++
 drivers/scsi/cxgb4i/Kconfig |    7 +++++++
 4 files changed, 13 insertions(+), 0 deletions(-)
 create mode 100644 drivers/scsi/cxgb4i/Kbuild
 create mode 100644 drivers/scsi/cxgb4i/Kconfig

diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 75f2336..fc3810a 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -371,6 +371,7 @@ config ISCSI_TCP
 	 http://open-iscsi.org
 
 source "drivers/scsi/cxgb3i/Kconfig"
+source "drivers/scsi/cxgb4i/Kconfig"
 source "drivers/scsi/bnx2i/Kconfig"
 source "drivers/scsi/be2iscsi/Kconfig"
 
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 1c7ac49..46dcdc8 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -133,6 +133,7 @@ obj-$(CONFIG_SCSI_STEX)		+= stex.o
 obj-$(CONFIG_SCSI_MVSAS)	+= mvsas/
 obj-$(CONFIG_PS3_ROM)		+= ps3rom.o
 obj-$(CONFIG_SCSI_CXGB3_ISCSI)	+= libiscsi.o libiscsi_tcp.o cxgb3i/
+obj-$(CONFIG_SCSI_CXGB4_ISCSI)	+= libiscsi.o libiscsi_tcp.o cxgb4i/
 obj-$(CONFIG_SCSI_BNX2_ISCSI)	+= libiscsi.o bnx2i/
 obj-$(CONFIG_BE2ISCSI)		+= libiscsi.o be2iscsi/
 obj-$(CONFIG_SCSI_PMCRAID)	+= pmcraid.o
diff --git a/drivers/scsi/cxgb4i/Kbuild b/drivers/scsi/cxgb4i/Kbuild
new file mode 100644
index 0000000..1cb87b9
--- /dev/null
+++ b/drivers/scsi/cxgb4i/Kbuild
@@ -0,0 +1,4 @@
+EXTRA_CFLAGS += -I$(srctree)/drivers/net/cxgb4
+
+cxgb4i-y := libcxgbi.o cxgb4i_snic.o cxgb4i_iscsi.o cxgb4i_offload.o cxgb4i_ddp.o
+obj-$(CONFIG_SCSI_CXGB4_ISCSI) += cxgb4i.o
diff --git a/drivers/scsi/cxgb4i/Kconfig b/drivers/scsi/cxgb4i/Kconfig
new file mode 100644
index 0000000..3f33dc2
--- /dev/null
+++ b/drivers/scsi/cxgb4i/Kconfig
@@ -0,0 +1,7 @@
+config SCSI_CXGB4_ISCSI
+	tristate "Chelsio T4 iSCSI support"
+	depends on CHELSIO_T4_DEPENDS
+	select CHELSIO_T4
+	select SCSI_ISCSI_ATTRS
+	---help---
+	This driver supports iSCSI offload for the Chelsio T4 series devices.
-- 
1.6.6.1

-- 
You received this message because you are subscribed to the Google Groups "open-iscsi" group.
To post to this group, send email to open-iscsi-/JYPxA39Uh5TLH3MbocFF+G/Ez6ZCGd0@public.gmane.org
To unsubscribe from this group, send email to open-iscsi+unsubscribe-/JYPxA39Uh5TLH3MbocFF+G/Ez6ZCGd0@public.gmane.org
For more options, visit this group at http://groups.google.com/group/open-iscsi?hl=en.

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/3] cxgb4i_v3: main driver files
       [not found]     ` <1273944249-311-2-git-send-email-rakesh-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
@ 2010-05-15 17:24       ` Rakesh Ranjan
       [not found]         ` <1273944249-311-3-git-send-email-rakesh-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
  0 siblings, 1 reply; 9+ messages in thread
From: Rakesh Ranjan @ 2010-05-15 17:24 UTC (permalink / raw)
  To: NETDEVML, SCSIDEVML, OISCSIML
  Cc: LKML, Karen Xie, David Miller, James Bottomley, Mike Christie,
	Anish Bhatt, Rakesh Ranjan, Rakesh Ranjan

From: Rakesh Ranjan <rranjan-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>


Signed-off-by: Rakesh Ranjan <rakesh-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
---
 drivers/scsi/cxgb4i/cxgb4i.h         |  101 ++
 drivers/scsi/cxgb4i/cxgb4i_ddp.c     |  678 +++++++++++++
 drivers/scsi/cxgb4i/cxgb4i_ddp.h     |  118 +++
 drivers/scsi/cxgb4i/cxgb4i_offload.c | 1846 ++++++++++++++++++++++++++++++++++
 drivers/scsi/cxgb4i/cxgb4i_offload.h |   91 ++
 drivers/scsi/cxgb4i/cxgb4i_snic.c    |  260 +++++
 6 files changed, 3094 insertions(+), 0 deletions(-)
 create mode 100644 drivers/scsi/cxgb4i/cxgb4i.h
 create mode 100644 drivers/scsi/cxgb4i/cxgb4i_ddp.c
 create mode 100644 drivers/scsi/cxgb4i/cxgb4i_ddp.h
 create mode 100644 drivers/scsi/cxgb4i/cxgb4i_offload.c
 create mode 100644 drivers/scsi/cxgb4i/cxgb4i_offload.h
 create mode 100644 drivers/scsi/cxgb4i/cxgb4i_snic.c

diff --git a/drivers/scsi/cxgb4i/cxgb4i.h b/drivers/scsi/cxgb4i/cxgb4i.h
new file mode 100644
index 0000000..fbf7699
--- /dev/null
+++ b/drivers/scsi/cxgb4i/cxgb4i.h
@@ -0,0 +1,101 @@
+/*
+ * cxgb4i.h: Chelsio T4 iSCSI driver.
+ *
+ * Copyright (c) 2010 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ * Written by: Rakesh Ranjan (rranjan-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ */
+
+#ifndef	__CXGB4I_H__
+#define	__CXGB4I_H__
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/scatterlist.h>
+#include <linux/skbuff.h>
+#include <scsi/libiscsi_tcp.h>
+
+#include "t4fw_api.h"
+#include "t4_msg.h"
+#include "l2t.h"
+#include "cxgb4.h"
+#include "cxgb4_uld.h"
+
+#include "libcxgbi.h"
+#include "cxgb4i_ddp.h"
+#include "cxgb4i_offload.h"
+
+#define	CXGB4I_SCSI_HOST_QDEPTH	1024
+#define	CXGB4I_MAX_TARGET	CXGB4I_MAX_CONN
+#define	CXGB4I_MAX_LUN		512
+#define	ISCSI_PDU_NONPAYLOAD_MAX \
+	(sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE + \
+	 (2 * ISCSI_DIGEST_SIZE))
+
+struct cxgb4i_snic;
+struct cxgb4i_host;
+struct cxgb4i_endpoint;
+typedef int (*cxgb4i_cplhandler_func)(struct cxgb4i_snic *, struct sk_buff *);
+
+struct cxgb4i_snic {
+	struct list_head list_head;
+	spinlock_t lock;
+	struct cxgbi_device cdev;
+	struct cxgbi_hba *hba[MAX_NPORTS];
+	unsigned char hba_cnt;
+	unsigned int flags;
+	unsigned int tx_max_size;
+	unsigned int rx_max_size;
+	struct cxgb4_lld_info lldi;
+	struct cxgb4i_ddp_info *ddp;
+	cxgb4i_cplhandler_func *handlers;
+};
+
+int cxgb4i_ofld_init(struct cxgb4i_snic *);
+void cxgb4i_ofld_cleanup(struct cxgb4i_snic *);
+struct cxgb4i_snic *cxgb4i_find_snic(struct net_device *, __be32);
+struct cxgbi_hba *cxgb4i_hba_find_by_netdev(struct net_device *);
+struct cxgbi_hba *cxgb4i_hba_add(struct cxgb4i_snic *, struct net_device *);
+void cxgb4i_hba_remove(struct cxgbi_hba *);
+int cxgb4i_iscsi_init(void);
+void cxgb4i_iscsi_cleanup(void);
+
+static inline void cxgb4i_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr)
+{
+	chba->ipv4addr = ipaddr;
+}
+
+static inline __be32 cxgb4i_get_iscsi_ipv4(struct cxgbi_hba *chba)
+{
+	return chba->ipv4addr;
+}
+
+static inline struct cxgb4i_snic *cxgb4i_get_snic(struct cxgbi_device *cdev)
+{
+	return (struct cxgb4i_snic *)cdev->dd_data;
+}
+
+
+#define W_TCB_ULP_TYPE          0
+#define TCB_ULP_TYPE_SHIFT      0
+#define TCB_ULP_TYPE_MASK       0xfULL
+#define TCB_ULP_TYPE(x)         ((x) << TCB_ULP_TYPE_SHIFT)
+
+#define W_TCB_ULP_RAW           0
+#define TCB_ULP_RAW_SHIFT       4
+#define TCB_ULP_RAW_MASK        0xffULL
+#define TCB_ULP_RAW(x)          ((x) << TCB_ULP_RAW_SHIFT)
+
+
+#endif	/* __CXGB4I_H__ */
+
diff --git a/drivers/scsi/cxgb4i/cxgb4i_ddp.c b/drivers/scsi/cxgb4i/cxgb4i_ddp.c
new file mode 100644
index 0000000..1e53c0e
--- /dev/null
+++ b/drivers/scsi/cxgb4i/cxgb4i_ddp.c
@@ -0,0 +1,678 @@
+/*
+ * cxgb4i_ddp.c: Chelsio T4 iSCSI driver.
+ *
+ * Copyright (c) 2010 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ * Written by: Rakesh Ranjan (rranjan-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ */
+
+#include <linux/skbuff.h>
+#include <linux/scatterlist.h>
+
+
+#include "libcxgbi.h"
+#include "cxgb4i.h"
+#include "cxgb4i_ddp.h"
+
+#define DDP_PGIDX_MAX	4
+#define DDP_THRESHOLD	2048
+
+static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4};
+static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16};
+static unsigned char page_idx = DDP_PGIDX_MAX;
+
+static unsigned char sw_tag_idx_bits;
+static unsigned char sw_tag_age_bits;
+
+
+static inline void cxgb4i_ddp_ppod_set(struct pagepod *ppod,
+					struct pagepod_hdr *hdr,
+					struct cxgbi_gather_list *gl,
+					unsigned int pidx)
+{
+	int i;
+
+	memcpy(ppod, hdr, sizeof(*hdr));
+	for (i = 0; i < (PPOD_PAGES_MAX + 1); i++, pidx++) {
+		ppod->addr[i] = pidx < gl->nelem ?
+			cpu_to_be64(gl->phys_addr[pidx]) : 0ULL;
+	}
+}
+
+static inline void cxgb4i_ddp_ppod_clear(struct pagepod *ppod)
+{
+	memset(ppod, 0, sizeof(*ppod));
+}
+
+static inline void cxgb4i_ddp_ulp_mem_io_set_hdr(struct ulp_mem_io *req,
+					unsigned int wr_len, unsigned int dlen,
+					unsigned int pm_addr)
+{
+	struct ulptx_sgl *sgl;
+
+	INIT_ULPTX_WR(req, wr_len, 0, 0);
+	req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE));
+	req->dlen = htonl(ULP_MEMIO_DATA_LEN(dlen >> 5));
+	req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
+	req->lock_addr = htonl(ULP_MEMIO_ADDR(pm_addr >> 5));
+
+	sgl = (struct ulptx_sgl *)(req + 1);
+	sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(1));
+	sgl->len0 = htonl(dlen);
+}
+
+static int cxgb4i_ddp_ppod_write_sgl(struct cxgb4i_ddp_info *ddp,
+					struct pagepod_hdr *hdr,
+					unsigned int idx,
+					unsigned int npods,
+					struct cxgbi_gather_list *gl,
+					unsigned int gl_pidx)
+{
+	unsigned int dlen = PPOD_SIZE * npods;
+	unsigned int pm_addr = idx * PPOD_SIZE + ddp->llimit;
+	unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
+					sizeof(struct ulptx_sgl), 16);
+	struct sk_buff *skb = alloc_skb(wr_len + dlen, GFP_ATOMIC);
+	struct ulp_mem_io *req;
+	struct ulptx_sgl *sgl;
+	struct pagepod *ppod;
+	unsigned int i;
+
+	if (!skb) {
+		cxgbi_log_error("snic 0x%p, idx %u, npods %u, OOM\n",
+				ddp->snic, idx, npods);
+		return -ENOMEM;
+	}
+
+	memset(skb->data, 0, wr_len + dlen);
+	skb->queue_mapping = CPL_PRIORITY_CONTROL;
+
+	req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
+	cxgb4i_ddp_ulp_mem_io_set_hdr(req, wr_len, dlen, pm_addr);
+	sgl = (struct ulptx_sgl *)(req + 1);
+	ppod = (struct pagepod *)(sgl + 1);
+	sgl->addr0 = cpu_to_be64(virt_to_phys(ppod));
+
+	for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES_MAX) {
+		if (!hdr && !gl)
+			cxgb4i_ddp_ppod_clear(ppod);
+		else
+			cxgb4i_ddp_ppod_set(ppod, hdr, gl, gl_pidx);
+
+	}
+
+	cxgb4_ofld_send(ddp->snic->lldi.ports[0], skb);
+
+	return 0;
+}
+
+static int cxgb4i_ddp_set_map(struct cxgb4i_ddp_info *ddp,
+					struct pagepod_hdr *hdr,
+					unsigned int idx,
+					unsigned int npods,
+					struct cxgbi_gather_list *gl)
+{
+	unsigned int pidx = 0;
+	unsigned int w_npods = 0;
+	unsigned int cnt;
+	int err = 0;
+
+	for (; w_npods < npods; idx += cnt, w_npods += cnt,
+					pidx += PPOD_PAGES_MAX) {
+		cnt = npods - w_npods;
+		if (cnt > ULPMEM_DSGL_MAX_NPPODS)
+			cnt = ULPMEM_DSGL_MAX_NPPODS;
+		err = cxgb4i_ddp_ppod_write_sgl(ddp, hdr, idx, cnt, gl, pidx);
+
+		if (err < 0)
+			break;
+	}
+
+	return err;
+}
+
+static void cxgb4i_ddp_clear_map(struct cxgb4i_ddp_info *ddp,
+						unsigned int tag,
+						unsigned int idx,
+						unsigned int npods)
+{
+	int err;
+	unsigned int w_npods = 0;
+	unsigned int cnt;
+
+	for (; w_npods < npods; idx += cnt, w_npods += cnt) {
+		cnt = npods - w_npods;
+
+		if (cnt > ULPMEM_DSGL_MAX_NPPODS)
+			cnt = ULPMEM_DSGL_MAX_NPPODS;
+		err = cxgb4i_ddp_ppod_write_sgl(ddp, NULL, idx, cnt, NULL, 0);
+
+		if (err < 0)
+			break;
+	}
+}
+
+static inline int cxgb4i_ddp_find_unused_entries(struct cxgb4i_ddp_info *ddp,
+					unsigned int start, unsigned int max,
+					unsigned int count,
+					struct cxgbi_gather_list *gl)
+{
+	unsigned int i, j, k;
+
+	/*  not enough entries */
+	if ((max - start) < count)
+		return -EBUSY;
+
+	max -= count;
+	spin_lock(&ddp->map_lock);
+	for (i = start; i < max;) {
+		for (j = 0, k = i; j < count; j++, k++) {
+			if (ddp->gl_map[k])
+				break;
+		}
+		if (j == count) {
+			for (j = 0, k = i; j < count; j++, k++)
+				ddp->gl_map[k] = gl;
+			spin_unlock(&ddp->map_lock);
+			return i;
+		}
+		i += j + 1;
+	}
+	spin_unlock(&ddp->map_lock);
+	return -EBUSY;
+}
+
+static inline void cxgb4i_ddp_unmark_entries(struct cxgb4i_ddp_info *ddp,
+							int start, int count)
+{
+	spin_lock(&ddp->map_lock);
+	memset(&ddp->gl_map[start], 0,
+			count * sizeof(struct cxgbi_gather_list *));
+	spin_unlock(&ddp->map_lock);
+}
+
+static int cxgb4i_ddp_find_page_index(unsigned long pgsz)
+{
+	int i;
+
+	for (i = 0; i < DDP_PGIDX_MAX; i++) {
+		if (pgsz == (1UL << ddp_page_shift[i]))
+			return i;
+	}
+	cxgbi_log_debug("ddp page size 0x%lx not supported\n", pgsz);
+
+	return DDP_PGIDX_MAX;
+}
+
+static int cxgb4i_ddp_adjust_page_table(void)
+{
+	int i;
+	unsigned int base_order, order;
+
+	if (PAGE_SIZE < (1UL << ddp_page_shift[0])) {
+		cxgbi_log_info("PAGE_SIZE 0x%lx too small, min 0x%lx\n",
+				PAGE_SIZE, 1UL << ddp_page_shift[0]);
+		return -EINVAL;
+	}
+
+	base_order = get_order(1UL << ddp_page_shift[0]);
+	order = get_order(1UL << PAGE_SHIFT);
+
+	for (i = 0; i < DDP_PGIDX_MAX; i++) {
+		/* first is the kernel page size, then just doubling the size */
+		ddp_page_order[i] = order - base_order + i;
+		ddp_page_shift[i] = PAGE_SHIFT + i;
+	}
+
+	return 0;
+}
+
+static inline void cxgb4i_ddp_gl_unmap(struct pci_dev *pdev,
+					struct cxgbi_gather_list *gl)
+{
+	int i;
+
+	for (i = 0; i < gl->nelem; i++)
+		dma_unmap_page(&pdev->dev, gl->phys_addr[i], PAGE_SIZE,
+				PCI_DMA_FROMDEVICE);
+}
+
+static inline int cxgb4i_ddp_gl_map(struct pci_dev *pdev,
+				struct cxgbi_gather_list *gl)
+{
+	int i;
+
+	for (i = 0; i < gl->nelem; i++) {
+		gl->phys_addr[i] = dma_map_page(&pdev->dev, gl->pages[i], 0,
+						PAGE_SIZE,
+						PCI_DMA_FROMDEVICE);
+		if (unlikely(dma_mapping_error(&pdev->dev, gl->phys_addr[i])))
+			goto unmap;
+	}
+
+	return i;
+
+unmap:
+	if (i) {
+		unsigned int nelem = gl->nelem;
+
+		gl->nelem = i;
+		cxgb4i_ddp_gl_unmap(pdev, gl);
+		gl->nelem = nelem;
+	}
+	return -ENOMEM;
+}
+
+
+void cxgb4i_ddp_release_gl(struct cxgbi_gather_list *gl,
+				struct pci_dev *pdev)
+{
+	cxgb4i_ddp_gl_unmap(pdev, gl);
+	kfree(gl);
+}
+
+struct cxgbi_gather_list *cxgb4i_ddp_make_gl(unsigned int xferlen,
+						struct scatterlist *sgl,
+						unsigned int sgcnt,
+						struct pci_dev *pdev,
+						gfp_t gfp)
+{
+	struct cxgbi_gather_list *gl;
+	struct scatterlist *sg = sgl;
+	struct page *sgpage = sg_page(sg);
+	unsigned int sglen = sg->length;
+	unsigned int sgoffset = sg->offset;
+	unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >>
+				PAGE_SHIFT;
+	int i = 1, j = 0;
+
+	if (xferlen < DDP_THRESHOLD) {
+		cxgbi_log_debug("xfer %u < threshold %u, no ddp.\n",
+				xferlen, DDP_THRESHOLD);
+		return NULL;
+	}
+
+	gl = kzalloc(sizeof(struct cxgbi_gather_list) +
+			npages * (sizeof(dma_addr_t) + sizeof(struct page *)),
+			gfp);
+	if (!gl)
+		return NULL;
+
+	gl->pages = (struct page **)&gl->phys_addr[npages];
+	gl->length = xferlen;
+	gl->offset = sgoffset;
+	gl->pages[0] = sgpage;
+
+	sg = sg_next(sg);
+	while (sg) {
+		struct page *page = sg_page(sg);
+
+		if (sgpage == page && sg->offset == sgoffset + sglen)
+			sglen += sg->length;
+		else {
+			/*  make sure the sgl is fit for ddp:
+			 *  each has the same page size, and
+			 *  all of the middle pages are used completely
+			 */
+			if ((j && sgoffset) || ((i != sgcnt - 1) &&
+					 ((sglen + sgoffset) & ~PAGE_MASK)))
+				goto error_out;
+
+			j++;
+			if (j == gl->nelem || sg->offset)
+				goto error_out;
+			gl->pages[j] = page;
+			sglen = sg->length;
+			sgoffset = sg->offset;
+			sgpage = page;
+		}
+		i++;
+		sg = sg_next(sg);
+	}
+	gl->nelem = ++j;
+
+	if (cxgb4i_ddp_gl_map(pdev, gl) < 0)
+		goto error_out;
+
+	return gl;
+
+error_out:
+	kfree(gl);
+	return NULL;
+}
+
+
+static void cxgb4i_ddp_tag_release(struct cxgbi_device *cdev, u32 tag)
+{
+	struct cxgb4i_snic *snic = cxgb4i_get_snic(cdev);
+	struct cxgb4i_ddp_info *ddp = snic->ddp;
+	u32 idx;
+
+	if (!ddp) {
+		cxgbi_log_error("release ddp tag 0x%x, ddp NULL.\n", tag);
+		return;
+	}
+
+	idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask;
+	if (idx < ddp->nppods) {
+		struct cxgbi_gather_list *gl = ddp->gl_map[idx];
+		unsigned int npods;
+
+		if (!gl || !gl->nelem) {
+			cxgbi_log_error("rel 0x%x, idx 0x%x, gl 0x%p, %u\n",
+					tag, idx, gl, gl ? gl->nelem : 0);
+			return;
+		}
+		npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
+		cxgbi_log_debug("ddp tag 0x%x, release idx 0x%x, npods %u.\n",
+				tag, idx, npods);
+		cxgb4i_ddp_clear_map(ddp, tag, idx, npods);
+		cxgb4i_ddp_unmark_entries(ddp, idx, npods);
+		cxgb4i_ddp_release_gl(gl, ddp->pdev);
+	} else
+		cxgbi_log_error("ddp tag 0x%x, idx 0x%x > max 0x%x.\n",
+				tag, idx, ddp->nppods);
+}
+
+static int cxgb4i_ddp_tag_reserve(struct cxgbi_device *cdev, unsigned int tid,
+				struct cxgbi_tag_format *tformat, u32 *tagp,
+				struct cxgbi_gather_list *gl, gfp_t gfp)
+{
+	struct cxgb4i_snic *snic = cxgb4i_get_snic(cdev);
+	struct cxgb4i_ddp_info *ddp = snic->ddp;
+	struct pagepod_hdr hdr;
+	unsigned int npods;
+	int idx = -1;
+	int err = -ENOMEM;
+	u32 sw_tag = *tagp;
+	u32 tag;
+
+	if (page_idx >= DDP_PGIDX_MAX || !ddp || !gl || !gl->nelem ||
+			gl->length < DDP_THRESHOLD) {
+		cxgbi_log_debug("pgidx %u, xfer %u/%u, NO ddp.\n",
+				page_idx, gl->length, DDP_THRESHOLD);
+		return -EINVAL;
+	}
+
+	npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
+
+	if (ddp->idx_last == ddp->nppods)
+		idx = cxgb4i_ddp_find_unused_entries(ddp, 0, ddp->nppods,
+							npods, gl);
+	else {
+		idx = cxgb4i_ddp_find_unused_entries(ddp, ddp->idx_last + 1,
+							ddp->nppods, npods,
+							gl);
+		if (idx < 0 && ddp->idx_last >= npods) {
+			idx = cxgb4i_ddp_find_unused_entries(ddp, 0,
+				min(ddp->idx_last + npods, ddp->nppods),
+							npods, gl);
+		}
+	}
+	if (idx < 0) {
+		cxgbi_log_debug("xferlen %u, gl %u, npods %u NO DDP.\n",
+				gl->length, gl->nelem, npods);
+		return idx;
+	}
+
+	tag = cxgbi_ddp_tag_base(tformat, sw_tag);
+	tag |= idx << PPOD_IDX_SHIFT;
+
+	hdr.rsvd = 0;
+	hdr.vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid));
+	hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask);
+	hdr.max_offset = htonl(gl->length);
+	hdr.page_offset = htonl(gl->offset);
+
+	err = cxgb4i_ddp_set_map(ddp, &hdr, idx, npods, gl);
+	if (err < 0)
+		goto unmark_entries;
+
+	ddp->idx_last = idx;
+	cxgbi_log_debug("xfer %u, gl %u,%u, tid 0x%x, 0x%x -> 0x%x(%u,%u).\n",
+			gl->length, gl->nelem, gl->offset, tid, sw_tag, tag,
+			idx, npods);
+	*tagp = tag;
+	return 0;
+
+unmark_entries:
+	cxgb4i_ddp_unmark_entries(ddp, idx, npods);
+	return err;
+}
+
+
+static int cxgb4i_ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
+					unsigned int tid, int pg_idx,
+					bool reply)
+{
+	struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
+					GFP_KERNEL);
+	struct cpl_set_tcb_field *req;
+	u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0;
+
+	if (!skb)
+		return -ENOMEM;
+
+	/*  set up ulp submode and page size */
+	val = (val & 0x03) << 2;
+	val |= TCB_ULP_TYPE(ULP_MODE_ISCSI);
+	req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
+	INIT_TP_WR(req, tid);
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->hwtid));
+	req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
+	req->word_cookie = htons(TCB_WORD(W_TCB_ULP_RAW));
+	req->mask = cpu_to_be64(TCB_ULP_TYPE(TCB_ULP_TYPE_MASK));
+	req->val = cpu_to_be64(val);
+
+	skb->queue_mapping = CPL_PRIORITY_CONTROL;
+
+	cxgb4_ofld_send(cxgb4i_get_snic(csk->cdev)->lldi.ports[0], skb);
+	return 0;
+}
+
+int cxgb4i_ddp_setup_conn_host_pagesize(struct cxgbi_sock *csk,
+						unsigned int tid,
+						int reply)
+{
+	return cxgb4i_ddp_setup_conn_pgidx(csk, tid, page_idx, reply);
+}
+
+int cxgb4i_ddp_setup_conn_pagesize(struct cxgbi_sock *csk, unsigned int tid,
+					int reply, unsigned long pgsz)
+{
+	int pgidx = cxgb4i_ddp_find_page_index(pgsz);
+
+	return cxgb4i_ddp_setup_conn_pgidx(csk, tid, pgidx, reply);
+}
+
+int cxgb4i_ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
+				int hcrc, int dcrc, int reply)
+{
+	struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
+					GFP_KERNEL);
+	struct cpl_set_tcb_field *req;
+	u64 val = (hcrc ? ULP_CRC_HEADER : 0) | (dcrc ? ULP_CRC_DATA : 0);
+	val = TCB_ULP_RAW(val);
+	val |= TCB_ULP_TYPE(ULP_MODE_ISCSI);
+
+	if (!skb)
+		return -ENOMEM;
+
+	/*  set up ulp submode and page size */
+	req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
+	INIT_TP_WR(req, tid);
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
+	req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
+	req->word_cookie = htons(TCB_WORD(W_TCB_ULP_RAW));
+	req->mask = cpu_to_be64(TCB_ULP_RAW(TCB_ULP_RAW_MASK));
+	req->val = cpu_to_be64(val);
+
+	skb->queue_mapping = CPL_PRIORITY_CONTROL;
+
+	cxgb4_ofld_send(cxgb4i_get_snic(csk->cdev)->lldi.ports[0], skb);
+	return 0;
+}
+
+static void __cxgb4i_ddp_cleanup(struct kref *kref)
+{
+	int i = 0;
+	struct cxgb4i_ddp_info *ddp = container_of(kref,
+						struct cxgb4i_ddp_info,
+						refcnt);
+
+	cxgbi_log_info("kref release ddp 0x%p, snic 0x%p\n", ddp, ddp->snic);
+
+	ddp->snic->ddp = NULL;
+
+	while (i < ddp->nppods) {
+		struct cxgbi_gather_list *gl = ddp->gl_map[i];
+
+		if (gl) {
+			int npods = (gl->nelem + PPOD_PAGES_MAX - 1) >>
+							PPOD_PAGES_SHIFT;
+			cxgbi_log_info("snic 0x%p, ddp %d + %d\n",
+						ddp->snic, i, npods);
+			kfree(gl);
+			i += npods;
+		} else
+			i++;
+	}
+	cxgbi_free_big_mem(ddp);
+}
+
+
+static void __cxgb4i_ddp_init(struct cxgb4i_snic *snic)
+{
+	struct cxgb4i_ddp_info *ddp = snic->ddp;
+	unsigned int ppmax, bits, tagmask, pgsz_factor[4];
+	int i;
+
+	if (ddp) {
+		kref_get(&ddp->refcnt);
+		cxgbi_log_warn("snic 0x%p, ddp 0x%p already set up\n",
+				snic, snic->ddp);
+		return;
+	}
+
+	sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1;
+	sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1;
+	snic->cdev.tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits;
+
+	cxgbi_log_info("tag itt 0x%x, %u bits, age 0x%x, %u bits\n",
+			ISCSI_ITT_MASK, sw_tag_idx_bits,
+			ISCSI_AGE_MASK, sw_tag_age_bits);
+
+	ppmax = (snic->lldi.vr->iscsi.size >> PPOD_SIZE_SHIFT);
+	bits = __ilog2_u32(ppmax) + 1;
+	if (bits > PPOD_IDX_MAX_SIZE)
+		bits = PPOD_IDX_MAX_SIZE;
+	ppmax = (1 << (bits - 1)) - 1;
+
+	ddp = cxgbi_alloc_big_mem(sizeof(struct cxgb4i_ddp_info) +
+			ppmax * (sizeof(struct cxgbi_gather_list *) +
+				sizeof(struct sk_buff *)),
+				GFP_KERNEL);
+	if (!ddp) {
+		cxgbi_log_warn("snic 0x%p unable to alloc ddp 0x%d, "
+			       "ddp disabled\n", snic, ppmax);
+		return;
+	}
+
+	ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1);
+	spin_lock_init(&ddp->map_lock);
+	kref_init(&ddp->refcnt);
+
+	ddp->snic = snic;
+	ddp->pdev = snic->lldi.pdev;
+	ddp->max_txsz = min_t(unsigned int,
+				snic->lldi.iscsi_iolen,
+				ULP2_MAX_PKT_SIZE);
+	ddp->max_rxsz = min_t(unsigned int,
+				snic->lldi.iscsi_iolen,
+				ULP2_MAX_PKT_SIZE);
+	ddp->llimit = snic->lldi.vr->iscsi.start;
+	ddp->ulimit = ddp->llimit + snic->lldi.vr->iscsi.size;
+	ddp->nppods = ppmax;
+	ddp->idx_last = ppmax;
+	ddp->idx_bits = bits;
+	ddp->idx_mask = (1 << bits) - 1;
+	ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1;
+
+	tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
+	for (i = 0; i < DDP_PGIDX_MAX; i++)
+		pgsz_factor[i] = ddp_page_order[i];
+
+	cxgb4_iscsi_init(snic->lldi.ports[0], tagmask, pgsz_factor);
+	snic->ddp = ddp;
+
+	snic->cdev.tag_format.rsvd_bits = ddp->idx_bits;
+	snic->cdev.tag_format.rsvd_shift = PPOD_IDX_SHIFT;
+	snic->cdev.tag_format.rsvd_mask =
+		((1 << snic->cdev.tag_format.rsvd_bits) - 1);
+
+	cxgbi_log_info("tag format: sw %u, rsvd %u,%u, mask 0x%x.\n",
+			snic->cdev.tag_format.sw_bits,
+			snic->cdev.tag_format.rsvd_bits,
+			snic->cdev.tag_format.rsvd_shift,
+			snic->cdev.tag_format.rsvd_mask);
+
+	snic->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
+				ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
+	snic->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
+				ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
+
+	cxgbi_log_info("max payload size: %u/%u, %u/%u.\n",
+			snic->tx_max_size, ddp->max_txsz,
+			snic->rx_max_size, ddp->max_rxsz);
+
+	cxgbi_log_info("snic 0x%p, nppods %u, bits %u, mask 0x%x,0x%x "
+			"pkt %u/%u, %u/%u\n",
+			snic, ppmax, ddp->idx_bits, ddp->idx_mask,
+			ddp->rsvd_tag_mask, ddp->max_txsz,
+			snic->lldi.iscsi_iolen,
+			ddp->max_rxsz, snic->lldi.iscsi_iolen);
+
+	return;
+}
+
+void cxgb4i_ddp_init(struct cxgb4i_snic *snic)
+{
+	if (page_idx == DDP_PGIDX_MAX) {
+		page_idx = cxgb4i_ddp_find_page_index(PAGE_SIZE);
+
+		if (page_idx == DDP_PGIDX_MAX) {
+			cxgbi_log_info("system PAGE_SIZE %lu, update hw\n",
+					PAGE_SIZE);
+
+			if (cxgb4i_ddp_adjust_page_table()) {
+				cxgbi_log_info("PAGE_SIZE %lu, ddp disabled\n",
+						PAGE_SIZE);
+				return;
+			}
+			page_idx = cxgb4i_ddp_find_page_index(PAGE_SIZE);
+		}
+		cxgbi_log_info("system PAGE_SIZE %lu, ddp idx %u\n",
+				PAGE_SIZE, page_idx);
+	}
+
+	__cxgb4i_ddp_init(snic);
+	snic->cdev.ddp_make_gl = cxgb4i_ddp_make_gl;
+	snic->cdev.ddp_release_gl = cxgb4i_ddp_release_gl;
+	snic->cdev.ddp_tag_reserve = cxgb4i_ddp_tag_reserve;
+	snic->cdev.ddp_tag_release = cxgb4i_ddp_tag_release;
+}
+
+void cxgb4i_ddp_cleanup(struct cxgb4i_snic *snic)
+{
+	struct cxgb4i_ddp_info *ddp = snic->ddp;
+
+	cxgbi_log_info("snic 0x%p, release ddp 0x%p\n", snic, ddp);
+	if (ddp)
+		kref_put(&ddp->refcnt, __cxgb4i_ddp_cleanup);
+}
+
diff --git a/drivers/scsi/cxgb4i/cxgb4i_ddp.h b/drivers/scsi/cxgb4i/cxgb4i_ddp.h
new file mode 100644
index 0000000..f51cb37
--- /dev/null
+++ b/drivers/scsi/cxgb4i/cxgb4i_ddp.h
@@ -0,0 +1,118 @@
+/*
+ * cxgb4i_ddp.h: Chelsio T4 iSCSI driver.
+ *
+ * Copyright (c) 2010 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ * Written by: Rakesh Ranjan (rranjan-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ */
+
+#ifndef	__CXGB4I_DDP_H__
+#define	__CXGB4I_DDP_H__
+
+#include <linux/vmalloc.h>
+
+#include "libcxgbi.h"
+
+struct cxgbi_sock;
+
+struct cxgb4i_ddp_info {
+	struct list_head list;
+	struct kref refcnt;
+	struct cxgb4i_snic *snic;
+	struct pci_dev *pdev;
+	unsigned int max_txsz;
+	unsigned int max_rxsz;
+	unsigned int llimit;
+	unsigned int ulimit;
+	unsigned int nppods;
+	unsigned int idx_last;
+	unsigned char idx_bits;
+	unsigned char filler[3];
+	unsigned int idx_mask;
+	unsigned int rsvd_tag_mask;
+	spinlock_t map_lock;
+	struct cxgbi_gather_list **gl_map;
+};
+
+struct pagepod_hdr {
+	unsigned int vld_tid;
+	unsigned int pgsz_tag_clr;
+	unsigned int max_offset;
+	unsigned int page_offset;
+	unsigned long long rsvd;
+};
+
+struct pagepod {
+	struct pagepod_hdr hdr;
+	unsigned long long addr[PPOD_PAGES_MAX + 1];
+};
+
+struct cpl_rx_data_ddp {
+	union opcode_tid ot;
+	__be16 urg;
+	__be16 len;
+	__be32 seq;
+	union {
+		__be32 nxt_seq;
+		__be32 ddp_report;
+	};
+	__be32 ulp_crc;
+	__be32 ddpvld;
+};
+
+#define PPOD_SIZE               sizeof(struct pagepod)  /*  64 */
+#define PPOD_SIZE_SHIFT         6
+
+#define ULPMEM_DSGL_MAX_NPPODS	16	/*  1024/PPOD_SIZE */
+#define ULPMEM_IDATA_MAX_NPPODS	4	/*  256/PPOD_SIZE */
+#define PCIE_MEMWIN_MAX_NPPODS	16	/*  1024/PPOD_SIZE */
+
+#define PPOD_COLOR_SHIFT	0
+#define PPOD_COLOR_MASK		0x3F
+#define PPOD_COLOR_SIZE         6
+#define PPOD_COLOR(x)		((x) << PPOD_COLOR_SHIFT)
+
+#define PPOD_TAG_SHIFT	6
+#define PPOD_TAG_MASK	0xFFFFFF
+#define PPOD_TAG(x)	((x) << PPOD_TAG_SHIFT)
+
+#define PPOD_PGSZ_SHIFT	30
+#define PPOD_PGSZ_MASK	0x3
+#define PPOD_PGSZ(x)	((x) << PPOD_PGSZ_SHIFT)
+
+#define PPOD_TID_SHIFT	32
+#define PPOD_TID_MASK	0xFFFFFF
+#define PPOD_TID(x)	((__u64)(x) << PPOD_TID_SHIFT)
+
+#define PPOD_VALID_SHIFT	56
+#define PPOD_VALID(x)	((__u64)(x) << PPOD_VALID_SHIFT)
+#define PPOD_VALID_FLAG	PPOD_VALID(1ULL)
+
+#define PPOD_LEN_SHIFT	32
+#define PPOD_LEN_MASK	0xFFFFFFFF
+#define PPOD_LEN(x)	((__u64)(x) << PPOD_LEN_SHIFT)
+
+#define PPOD_OFST_SHIFT	0
+#define PPOD_OFST_MASK	0xFFFFFFFF
+#define PPOD_OFST(x)	((x) << PPOD_OFST_SHIFT)
+
+#define PPOD_IDX_SHIFT          PPOD_COLOR_SIZE
+#define PPOD_IDX_MAX_SIZE       24
+
+int cxgb4i_ddp_setup_conn_host_pagesize(struct cxgbi_sock*, unsigned int,
+					int);
+int cxgb4i_ddp_setup_conn_digest(struct cxgbi_sock *, unsigned int,
+				int, int, int);
+int cxgb4i_snic_ddp_info(struct cxgb4i_snic *, struct cxgbi_tag_format *,
+			unsigned int *, unsigned int *);
+
+void cxgb4i_ddp_init(struct cxgb4i_snic *);
+void cxgb4i_ddp_cleanup(struct cxgb4i_snic *);
+
+#endif	/* __CXGB4I_DDP_H__ */
+
diff --git a/drivers/scsi/cxgb4i/cxgb4i_offload.c b/drivers/scsi/cxgb4i/cxgb4i_offload.c
new file mode 100644
index 0000000..87edb14
--- /dev/null
+++ b/drivers/scsi/cxgb4i/cxgb4i_offload.c
@@ -0,0 +1,1846 @@
+/*
+ * cxgb4i_offload.c: Chelsio T4 iSCSI driver.
+ *
+ * Copyright (c) 2010 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ * Written by: Rakesh Ranjan (rranjan-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ */
+
+#include <linux/if_vlan.h>
+#include <net/dst.h>
+#include <net/route.h>
+#include <net/tcp.h>
+
+#include "libcxgbi.h"
+#include "cxgb4i.h"
+#include "cxgb4i_offload.h"
+
+static int cxgb4i_rcv_win = 256 * 1024;
+module_param(cxgb4i_rcv_win, int, 0644);
+MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes");
+
+static int cxgb4i_snd_win = 128 * 1024;
+module_param(cxgb4i_snd_win, int, 0644);
+MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes");
+
+static int cxgb4i_rx_credit_thres = 10 * 1024;
+module_param(cxgb4i_rx_credit_thres, int, 0644);
+MODULE_PARM_DESC(cxgb4i_rx_credit_thres,
+		"RX credits return threshold in bytes (default=10KB)");
+
+static unsigned int cxgb4i_max_connect = (8 * 1024);
+module_param(cxgb4i_max_connect, uint, 0644);
+MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections");
+
+static unsigned short cxgb4i_sport_base = 20000;
+module_param(cxgb4i_sport_base, ushort, 0644);
+MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)");
+
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define RCV_BUFSIZ_MASK	0x3FFU
+
+static void cxgb4i_sock_release_offload_resources(struct cxgbi_sock *);
+static void cxgbi_sock_conn_closing(struct cxgbi_sock *);
+static int cxgb4i_sock_push_tx_frames(struct cxgbi_sock *, int);
+
+
+#define MAX_IMM_TX_PKT_LEN 128
+
+/*
+ * is_ofld_imm - check whether a packet can be sent as immediate data
+ * @skb: the packet
+ *
+ * Returns true if a packet can be sent as an offload WR with immediate
+ * data.  We currently use the same limit as for Ethernet packets.
+ */
+static inline int is_ofld_imm(const struct sk_buff *skb)
+{
+	return skb->len <= (MAX_IMM_TX_PKT_LEN -
+			sizeof(struct fw_ofld_tx_data_wr));
+}
+
+static void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
+{
+	struct sk_buff *skb;
+	unsigned int read = 0;
+	struct iscsi_conn *conn = csk->user_data;
+	int err = 0;
+
+	cxgbi_rx_debug("csk 0x%p.\n", csk);
+
+	read_lock(&csk->callback_lock);
+	if (unlikely(!conn || conn->suspend_rx)) {
+		cxgbi_rx_debug("conn 0x%p, id %d, suspend_rx %lu!\n",
+				conn, conn ? conn->id : 0xFF,
+				conn ? conn->suspend_rx : 0xFF);
+		read_unlock(&csk->callback_lock);
+		return;
+	}
+	skb = skb_peek(&csk->receive_queue);
+	while (!err && skb) {
+		__skb_unlink(skb, &csk->receive_queue);
+		read += cxgb4i_skb_rx_pdulen(skb);
+		cxgbi_rx_debug("conn 0x%p, csk 0x%p, rx skb 0x%p, pdulen %u\n",
+				conn, csk, skb, cxgb4i_skb_rx_pdulen(skb));
+		if (cxgb4i_skb_flags(skb) & CXGB4I_SKCB_FLAG_HDR_RCVD)
+			err = cxgbi_conn_read_bhs_pdu_skb(conn, skb);
+		else if (cxgb4i_skb_flags(skb) == CXGB4I_SKCB_FLAG_DATA_RCVD)
+			err = cxgbi_conn_read_data_pdu_skb(conn, skb);
+		__kfree_skb(skb);
+		skb = skb_peek(&csk->receive_queue);
+	}
+	read_unlock(&csk->callback_lock);
+	csk->copied_seq += read;
+	cxgb4i_sock_rx_credits(csk, read);
+	conn->rxdata_octets += read;
+
+	if (err) {
+		cxgbi_log_info("conn 0x%p rx failed err %d.\n", conn, err);
+		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+	}
+}
+
+static void cxgb4i_sock_closed(struct cxgbi_sock *csk)
+{
+	cxgbi_conn_debug("csk 0x%p, state %u, flags 0x%lx\n",
+			csk, csk->state, csk->flags);
+
+	cxgbi_sock_put_port(csk);
+	cxgb4i_sock_release_offload_resources(csk);
+	cxgbi_sock_set_state(csk, CXGBI_CSK_ST_CLOSED);
+	cxgbi_sock_conn_closing(csk);
+}
+
+static unsigned int cxgb4i_find_best_mtu(struct cxgb4i_snic *snic,
+						unsigned short mtu)
+{
+	int i = 0;
+
+	while (i < NMTUS - 1 && snic->lldi.mtus[i + 1] <= mtu)
+		++i;
+
+	return i;
+}
+
+static unsigned int cxgb4i_select_mss(struct cxgbi_sock *csk,
+						unsigned int pmtu)
+{
+	unsigned int idx;
+	struct dst_entry *dst = csk->dst;
+	u16 advmss = dst_metric(dst, RTAX_ADVMSS);
+
+	if (advmss > pmtu - 40)
+		advmss = pmtu - 40;
+	if (advmss < cxgb4i_get_snic(csk->cdev)->lldi.mtus[0] - 40)
+		advmss = cxgb4i_get_snic(csk->cdev)->lldi.mtus[0] - 40;
+	idx = cxgb4i_find_best_mtu(cxgb4i_get_snic(csk->cdev), advmss + 40);
+
+	return idx;
+}
+
+static inline int cxgb4i_sock_compute_wscale(int win)
+{
+	int wscale = 0;
+
+	while (wscale < 14 && (65535 << wscale) < win)
+		wscale++;
+
+	return wscale;
+}
+
+static void cxgb4i_sock_make_act_open_req(struct cxgbi_sock *csk,
+					   struct sk_buff *skb,
+					   unsigned int qid_atid,
+					   struct l2t_entry *e)
+{
+	struct cpl_act_open_req *req;
+	unsigned long long opt0;
+	unsigned int opt2;
+	int wscale;
+
+	cxgbi_conn_debug("csk 0x%p, atid 0x%x\n", csk, qid_atid);
+
+	wscale = cxgb4i_sock_compute_wscale(csk->mss_idx);
+
+	opt0 = KEEP_ALIVE(1) |
+		WND_SCALE(wscale) |
+		MSS_IDX(csk->mss_idx) |
+		L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) |
+		TX_CHAN(csk->tx_chan) |
+		SMAC_SEL(csk->smac_idx) |
+		RCV_BUFSIZ(cxgb4i_rcv_win >> 10);
+
+	opt2 = RX_CHANNEL(0) |
+		RSS_QUEUE_VALID |
+		RSS_QUEUE(csk->rss_qid);
+
+	skb->queue_mapping = CPL_PRIORITY_SETUP;
+	req = (struct cpl_act_open_req *)__skb_put(skb, sizeof(*req));
+	INIT_TP_WR(req, 0);
+	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+					qid_atid));
+	req->local_port = csk->saddr.sin_port;
+	req->peer_port = csk->daddr.sin_port;
+	req->local_ip = csk->saddr.sin_addr.s_addr;
+	req->peer_ip = csk->daddr.sin_addr.s_addr;
+	req->opt0 = cpu_to_be64(opt0);
+	req->params = 0;
+	req->opt2 = cpu_to_be32(opt2);
+}
+
+static void cxgb4i_fail_act_open(struct cxgbi_sock *csk, int errno)
+{
+	cxgbi_conn_debug("csk 0%p, state %u, flag 0x%lx\n", csk,
+			csk->state, csk->flags);
+	csk->err = errno;
+	cxgb4i_sock_closed(csk);
+}
+
+static void cxgb4i_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
+{
+	struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk;
+
+	cxgbi_sock_hold(csk);
+	spin_lock_bh(&csk->lock);
+	if (csk->state == CXGBI_CSK_ST_CONNECTING)
+		cxgb4i_fail_act_open(csk, -EHOSTUNREACH);
+	spin_unlock_bh(&csk->lock);
+	cxgbi_sock_put(csk);
+	__kfree_skb(skb);
+}
+
+static void cxgb4i_sock_skb_entail(struct cxgbi_sock *csk,
+					struct sk_buff *skb,
+					int flags)
+{
+	cxgb4i_skb_tcp_seq(skb) = csk->write_seq;
+	cxgb4i_skb_flags(skb) = flags;
+	__skb_queue_tail(&csk->write_queue, skb);
+}
+
+static void cxgb4i_sock_send_close_req(struct cxgbi_sock *csk)
+{
+	struct sk_buff *skb = csk->cpl_close;
+	struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
+	unsigned int tid = csk->hwtid;
+
+	csk->cpl_close = NULL;
+
+	skb->queue_mapping = CPL_PRIORITY_DATA;
+	INIT_TP_WR(req, tid);
+
+	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
+	req->rsvd = 0;
+
+	cxgb4i_sock_skb_entail(csk, skb, CXGB4I_SKCB_FLAG_NO_APPEND);
+	if (csk->state != CXGBI_CSK_ST_CONNECTING)
+		cxgb4i_sock_push_tx_frames(csk, 1);
+}
+
+static void cxgb4i_sock_abort_arp_failure(void *handle, struct sk_buff *skb)
+{
+	struct cpl_abort_req *req = cplhdr(skb);
+	struct cxgbi_sock *csk = (struct cxgbi_sock *)handle;
+	struct cxgb4i_snic *snic = cxgb4i_get_snic(csk->cdev);
+
+	req->cmd = CPL_ABORT_NO_RST;
+	cxgb4_ofld_send(snic->lldi.ports[csk->port_id], skb);
+}
+
+static inline void cxgb4i_sock_purge_write_queue(struct cxgbi_sock *csk)
+{
+	struct sk_buff *skb;
+
+	while ((skb = __skb_dequeue(&csk->write_queue)))
+		__kfree_skb(skb);
+}
+
+static void cxgb4i_sock_send_abort_req(struct cxgbi_sock *csk)
+{
+	struct cpl_abort_req *req;
+	struct sk_buff *skb = csk->cpl_abort_req;
+	struct cxgb4i_snic *snic = cxgb4i_get_snic(csk->cdev);
+
+	if (unlikely(csk->state == CXGBI_CSK_ST_ABORTING) ||
+			!skb || !csk->cdev)
+		return;
+
+	cxgbi_sock_set_state(csk, CXGBI_CSK_ST_ABORTING);
+
+	cxgbi_conn_debug("csk 0x%p, flag ABORT_RPL + ABORT_SHUT\n", csk);
+
+	cxgbi_sock_set_state(csk, CXGBI_CSK_FL_ABORT_RPL_PENDING);
+
+	cxgb4i_sock_purge_write_queue(csk);
+
+	csk->cpl_abort_req = NULL;
+	req = (struct cpl_abort_req *)skb->head;
+
+	skb->queue_mapping = CPL_PRIORITY_DATA;
+	t4_set_arp_err_handler(skb, csk, cxgb4i_sock_abort_arp_failure);
+	INIT_TP_WR(req, csk->hwtid);
+	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->hwtid));
+	req->rsvd0 = htonl(csk->snd_nxt);
+	req->rsvd1 = !cxgbi_sock_flag(csk, CXGBI_CSK_FL_TX_DATA_SENT);
+	req->cmd = CPL_ABORT_SEND_RST;
+
+	cxgb4_l2t_send(snic->lldi.ports[csk->port_id], skb, csk->l2t);
+}
+
+static void cxgb4i_sock_send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
+{
+	struct sk_buff *skb = csk->cpl_abort_rpl;
+	struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
+	struct cxgb4i_snic *snic = cxgb4i_get_snic(csk->cdev);
+
+	csk->cpl_abort_rpl = NULL;
+
+	skb->queue_mapping = CPL_PRIORITY_DATA;
+	INIT_TP_WR(rpl, csk->hwtid);
+	OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->hwtid));
+	rpl->cmd = rst_status;
+
+	cxgb4_ofld_send(snic->lldi.ports[csk->port_id], skb);
+}
+
+static u32 cxgb4i_csk_send_rx_credits(struct cxgbi_sock *csk, u32 credits)
+{
+	struct sk_buff *skb;
+	struct cpl_rx_data_ack *req;
+	int wrlen = roundup(sizeof(*req), 16);
+	struct cxgb4i_snic *snic = cxgb4i_get_snic(csk->cdev);
+
+	skb = alloc_skb(wrlen, GFP_ATOMIC);
+	if (!skb)
+		return 0;
+
+	req = (struct cpl_rx_data_ack *)__skb_put(skb, wrlen);
+	memset(req, 0, wrlen);
+	skb->queue_mapping = CPL_PRIORITY_ACK;
+	INIT_TP_WR(req, csk->hwtid);
+	OPCODE_TID(req) =
+		cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->hwtid));
+	req->credit_dack = cpu_to_be32(RX_CREDITS(credits) | RX_FORCE_ACK(1));
+	cxgb4_ofld_send(snic->lldi.ports[csk->port_id], skb);
+	return credits;
+}
+
+
+#define SKB_WR_LIST_SIZE	(MAX_SKB_FRAGS + 2)
+
+static const unsigned int cxgb4i_ulp_extra_len[] = { 0, 4, 4, 8 };
+static inline unsigned int ulp_extra_len(const struct sk_buff *skb)
+{
+	return cxgb4i_ulp_extra_len[cxgb4i_skb_ulp_mode(skb) & 3];
+}
+
+static inline void cxgb4i_sock_reset_wr_list(struct cxgbi_sock *csk)
+{
+	csk->wr_pending_head = csk->wr_pending_tail = NULL;
+}
+
+static inline void cxgb4i_sock_enqueue_wr(struct cxgbi_sock *csk,
+						struct sk_buff *skb)
+{
+	cxgb4i_skb_tx_wr_next(skb) = NULL;
+
+	/*
+	 * We want to take an extra reference since both us and the driver
+	 * need to free the packet before it's really freed. We know there's
+	 * just one user currently so we use atomic_set rather than skb_get
+	 * to avoid the atomic op.
+	 */
+	atomic_set(&skb->users, 2);
+
+	if (!csk->wr_pending_head)
+		csk->wr_pending_head = skb;
+
+	else
+		cxgb4i_skb_tx_wr_next(csk->wr_pending_tail) = skb;
+
+	csk->wr_pending_tail = skb;
+}
+
+static int cxgb4i_sock_count_pending_wrs(const struct cxgbi_sock *csk)
+{
+	int n = 0;
+	const struct sk_buff *skb = csk->wr_pending_head;
+
+	while (skb) {
+		n += skb->csum;
+		skb = cxgb4i_skb_tx_wr_next(skb);
+	}
+	return n;
+}
+
+static inline struct sk_buff *cxgb4i_sock_peek_wr(const struct cxgbi_sock *csk)
+{
+	return csk->wr_pending_head;
+}
+
+static inline void cxgb4i_sock_free_wr_skb(struct sk_buff *skb)
+{
+	kfree_skb(skb);
+}
+
+static inline struct sk_buff *cxgb4i_sock_dequeue_wr(struct cxgbi_sock *csk)
+{
+	struct sk_buff *skb = csk->wr_pending_head;
+
+	if (likely(skb)) {
+		csk->wr_pending_head = cxgb4i_skb_tx_wr_next(skb);
+		cxgb4i_skb_tx_wr_next(skb) = NULL;
+	}
+	return skb;
+}
+
+static void cxgb4i_sock_purge_wr_queue(struct cxgbi_sock *csk)
+{
+	struct sk_buff *skb;
+
+	while ((skb = cxgb4i_sock_dequeue_wr(csk)) != NULL)
+		cxgb4i_sock_free_wr_skb(skb);
+}
+
+/*
+ * sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+	n--;
+	return (3 * n) / 2 + (n & 1) + 2;
+}
+
+/*
+ * calc_tx_flits_ofld - calculate # of flits for an offload packet
+ * @skb: the packet
+ *
+ * Returns the number of flits needed for the given offload packet.
+ * These packets are already fully constructed and no additional headers
+ * will be added.
+ */
+static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
+{
+	unsigned int flits, cnt;
+
+	if (is_ofld_imm(skb))
+		return DIV_ROUND_UP(skb->len, 8);
+
+	flits = skb_transport_offset(skb) / 8;
+	cnt = skb_shinfo(skb)->nr_frags;
+	if (skb->tail != skb->transport_header)
+		cnt++;
+	return flits + sgl_len(cnt);
+}
+
+static inline void cxgb4i_sock_send_tx_flowc_wr(struct cxgbi_sock *csk)
+{
+	struct sk_buff *skb;
+	struct fw_flowc_wr *flowc;
+	int flowclen, i;
+	struct cxgb4i_snic *snic = cxgb4i_get_snic(csk->cdev);
+
+	flowclen = 80;
+	skb = alloc_skb(flowclen, GFP_ATOMIC);
+	flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
+
+	flowc->op_to_nparams =
+		htonl(FW_WR_OP(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS(8));
+	flowc->flowid_len16 =
+		htonl(FW_WR_LEN16(DIV_ROUND_UP(72, 16)) |
+				FW_WR_FLOWID(csk->hwtid));
+
+	flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
+	flowc->mnemval[0].val = htonl(0);
+	flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
+	flowc->mnemval[1].val = htonl(csk->tx_chan);
+	flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
+	flowc->mnemval[2].val = htonl(csk->tx_chan);
+	flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
+	flowc->mnemval[3].val = htonl(csk->rss_qid);
+	flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
+	flowc->mnemval[4].val = htonl(csk->snd_nxt);
+	flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
+	flowc->mnemval[5].val = htonl(csk->rcv_nxt);
+	flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
+	flowc->mnemval[6].val = htonl(cxgb4i_snd_win);
+	flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
+	flowc->mnemval[7].val = htonl(csk->mss_idx);
+	flowc->mnemval[8].mnemonic = 0;
+	flowc->mnemval[8].val = 0;
+	for (i = 0; i < 9; i++) {
+		flowc->mnemval[i].r4[0] = 0;
+		flowc->mnemval[i].r4[1] = 0;
+		flowc->mnemval[i].r4[2] = 0;
+	}
+
+	skb->queue_mapping = CPL_PRIORITY_DATA;
+
+	cxgb4_ofld_send(snic->lldi.ports[csk->port_id], skb);
+}
+
+static inline void cxgb4i_sock_make_tx_data_wr(struct cxgbi_sock *csk,
+						struct sk_buff *skb, int dlen,
+						int len, u32 credits,
+						int req_completion)
+{
+	struct fw_ofld_tx_data_wr *req;
+	unsigned int wr_ulp_mode;
+
+	if (is_ofld_imm(skb)) {
+			req = (struct fw_ofld_tx_data_wr *)
+				__skb_push(skb, sizeof(*req));
+			req->op_to_immdlen =
+				cpu_to_be32(FW_WR_OP(FW_OFLD_TX_DATA_WR) |
+					FW_WR_COMPL(req_completion) |
+					FW_WR_IMMDLEN(dlen));
+			req->flowid_len16 =
+				cpu_to_be32(FW_WR_FLOWID(csk->hwtid) |
+						FW_WR_LEN16(credits));
+	} else {
+		req = (struct fw_ofld_tx_data_wr *)
+			__skb_push(skb, sizeof(*req));
+		req->op_to_immdlen =
+			cpu_to_be32(FW_WR_OP(FW_OFLD_TX_DATA_WR) |
+					FW_WR_COMPL(req_completion) |
+					FW_WR_IMMDLEN(0));
+		req->flowid_len16 =
+			cpu_to_be32(FW_WR_FLOWID(csk->hwtid) |
+					FW_WR_LEN16(credits));
+	}
+
+	wr_ulp_mode =
+		FW_OFLD_TX_DATA_WR_ULPMODE(cxgb4i_skb_ulp_mode(skb) >> 4) |
+		FW_OFLD_TX_DATA_WR_ULPSUBMODE(cxgb4i_skb_ulp_mode(skb) & 3);
+
+	req->tunnel_to_proxy = cpu_to_be32(wr_ulp_mode) |
+		FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1);
+
+	req->plen = cpu_to_be32(len);
+
+	if (!cxgbi_sock_flag(csk, CXGBI_CSK_FL_TX_DATA_SENT))
+		cxgbi_sock_set_flag(csk, CXGBI_CSK_FL_TX_DATA_SENT);
+}
+
+static void cxgb4i_sock_arp_failure_discard(void *handle, struct sk_buff *skb)
+{
+	kfree_skb(skb);
+}
+
+static int cxgb4i_sock_push_tx_frames(struct cxgbi_sock *csk,
+						int req_completion)
+{
+	int total_size = 0;
+	struct sk_buff *skb;
+	struct cxgb4i_snic *snic;
+
+	if (unlikely(csk->state == CXGBI_CSK_ST_CONNECTING ||
+				csk->state == CXGBI_CSK_ST_CLOSE_WAIT_1 ||
+				csk->state >= CXGBI_CSK_ST_ABORTING)) {
+		cxgbi_tx_debug("csk 0x%p, in closing state %u.\n",
+				csk, csk->state);
+		return 0;
+	}
+
+	snic = cxgb4i_get_snic(csk->cdev);
+
+	while (csk->wr_cred
+			&& (skb = skb_peek(&csk->write_queue)) != NULL) {
+		int dlen;
+		int len;
+		unsigned int credits_needed;
+
+		dlen = len = skb->len;
+		skb_reset_transport_header(skb);
+
+		if (is_ofld_imm(skb))
+			credits_needed = DIV_ROUND_UP(dlen +
+					sizeof(struct fw_ofld_tx_data_wr), 16);
+		else
+			credits_needed = DIV_ROUND_UP(8 *
+					calc_tx_flits_ofld(skb)+
+					sizeof(struct fw_ofld_tx_data_wr), 16);
+
+		if (csk->wr_cred < credits_needed) {
+			cxgbi_tx_debug("csk 0x%p, skb len %u/%u, "
+					"wr %d < %u.\n",
+					csk, skb->len, skb->data_len,
+					credits_needed, csk->wr_cred);
+			break;
+		}
+
+		__skb_unlink(skb, &csk->write_queue);
+		skb->queue_mapping = CPL_PRIORITY_DATA;
+		skb->csum = credits_needed; /* remember this until the WR_ACK */
+		csk->wr_cred -= credits_needed;
+		csk->wr_una_cred += credits_needed;
+		cxgb4i_sock_enqueue_wr(csk, skb);
+
+		cxgbi_tx_debug("csk 0x%p, enqueue, skb len %u/%u, "
+				"wr %d, left %u, unack %u.\n",
+				csk, skb->len, skb->data_len,
+				credits_needed, csk->wr_cred,
+				csk->wr_una_cred);
+
+
+		if (likely(cxgb4i_skb_flags(skb) &
+					CXGB4I_SKCB_FLAG_NEED_HDR)) {
+			len += ulp_extra_len(skb);
+			if (!cxgbi_sock_flag(csk,
+						CXGBI_CSK_FL_TX_DATA_SENT)) {
+				cxgb4i_sock_send_tx_flowc_wr(csk);
+				skb->csum += 5;
+				csk->wr_cred -= 5;
+				csk->wr_una_cred += 5;
+			}
+
+			if ((req_completion &&
+				csk->wr_una_cred == credits_needed) ||
+				(cxgb4i_skb_flags(skb) &
+				  CXGB4I_SKCB_FLAG_COMPL) ||
+				csk->wr_una_cred >= csk->wr_max_cred / 2) {
+				req_completion = 1;
+				csk->wr_una_cred = 0;
+			}
+			cxgb4i_sock_make_tx_data_wr(csk, skb, dlen, len,
+							credits_needed,
+							req_completion);
+			csk->snd_nxt += len;
+
+			if (req_completion)
+				cxgb4i_skb_flags(skb) &=
+					~CXGB4I_SKCB_FLAG_NEED_HDR;
+		}
+
+		total_size += skb->truesize;
+		t4_set_arp_err_handler(skb, csk,
+					cxgb4i_sock_arp_failure_discard);
+		cxgb4_l2t_send(snic->lldi.ports[csk->port_id], skb, csk->l2t);
+	}
+	return total_size;
+}
+
+static inline void cxgb4i_sock_free_atid(struct cxgbi_sock *csk)
+{
+	cxgb4_free_atid(cxgb4i_get_snic(csk->cdev)->lldi.tids, csk->atid);
+	cxgbi_sock_put(csk);
+}
+
+static void cxgb4i_sock_established(struct cxgbi_sock *csk, u32 snd_isn,
+					unsigned int opt)
+{
+	cxgbi_conn_debug("csk 0x%p, state %u.\n", csk, csk->state);
+
+	csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn;
+
+	/*
+	 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
+	 * pass through opt0.
+	 */
+	if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10))
+		csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10);
+
+	dst_confirm(csk->dst);
+
+	smp_mb();
+
+	cxgbi_sock_set_state(csk, CXGBI_CSK_ST_ESTABLISHED);
+}
+
+static int cxgb4i_cpl_act_establish(struct cxgb4i_snic *snic,
+						struct sk_buff *skb)
+{
+	struct cxgbi_sock *csk;
+	struct cpl_act_establish *req = cplhdr(skb);
+	unsigned int hwtid = GET_TID(req);
+	unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
+	struct tid_info *t = snic->lldi.tids;
+	u32 rcv_isn = be32_to_cpu(req->rcv_isn);
+
+	csk = lookup_atid(t, atid);
+
+	if (unlikely(!csk)) {
+		cxgbi_log_error("can't find connection for tid %u\n", hwtid);
+		return CPL_RET_UNKNOWN_TID;
+	}
+
+	cxgbi_conn_debug("csk 0x%p, state %u, flag 0x%lx\n",
+				csk, csk->state, csk->flags);
+	csk->hwtid = hwtid;
+	cxgbi_sock_hold(csk);
+	cxgb4_insert_tid(snic->lldi.tids, csk, hwtid);
+	cxgb4_free_atid(snic->lldi.tids, atid);
+
+	spin_lock_bh(&csk->lock);
+
+	if (unlikely(csk->state != CXGBI_CSK_ST_CONNECTING))
+		cxgbi_log_error("TID %u expected SYN_SENT, got EST., s %u\n",
+				csk->hwtid, csk->state);
+
+	csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
+	cxgb4i_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
+
+	__kfree_skb(skb);
+
+	if (unlikely(cxgbi_sock_flag(csk, CXGBI_CSK_FL_ACTIVE_CLOSE_NEEDED)))
+		cxgb4i_sock_send_abort_req(csk);
+	else {
+		if (skb_queue_len(&csk->write_queue))
+			cxgb4i_sock_push_tx_frames(csk, 1);
+
+		cxgbi_conn_tx_open(csk);
+	}
+
+	spin_unlock_bh(&csk->lock);
+
+	return 0;
+}
+
+static int act_open_rpl_status_to_errno(int status)
+{
+	switch (status) {
+	case CPL_ERR_CONN_RESET:
+		return -ECONNREFUSED;
+	case CPL_ERR_ARP_MISS:
+		return -EHOSTUNREACH;
+	case CPL_ERR_CONN_TIMEDOUT:
+		return -ETIMEDOUT;
+	case CPL_ERR_TCAM_FULL:
+		return -ENOMEM;
+	case CPL_ERR_CONN_EXIST:
+		cxgbi_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n");
+		return -EADDRINUSE;
+	default:
+		return -EIO;
+	}
+}
+
+/*
+ * Return whether a failed active open has allocated a TID
+ */
+static inline int act_open_has_tid(int status)
+{
+	return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
+		status != CPL_ERR_ARP_MISS;
+}
+
+static void cxgb4i_sock_act_open_retry_timer(unsigned long data)
+{
+	struct sk_buff *skb;
+	struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
+	struct cxgb4i_snic *snic = cxgb4i_get_snic(csk->cdev);
+
+	cxgbi_conn_debug("csk 0x%p, state %u.\n", csk, csk->state);
+
+	spin_lock_bh(&csk->lock);
+	skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC);
+	if (!skb)
+		cxgb4i_fail_act_open(csk, -ENOMEM);
+	else {
+		unsigned int qid_atid  = csk->rss_qid << 14;
+		qid_atid |= (unsigned int)csk->atid;
+		skb->sk = (struct sock *)csk;
+		t4_set_arp_err_handler(skb, csk,
+					cxgb4i_act_open_req_arp_failure);
+		cxgb4i_sock_make_act_open_req(csk, skb, qid_atid, csk->l2t);
+		cxgb4_l2t_send(snic->lldi.ports[csk->port_id], skb, csk->l2t);
+	}
+	spin_unlock_bh(&csk->lock);
+	cxgbi_sock_put(csk);
+}
+
+static int cxgb4i_cpl_act_open_rpl(struct cxgb4i_snic *snic,
+						struct sk_buff *skb)
+{
+	struct cxgbi_sock *csk;
+	struct cpl_act_open_rpl *rpl = cplhdr(skb);
+	unsigned int atid =
+		GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl->atid_status)));
+	struct tid_info *t = snic->lldi.tids;
+	unsigned int status = GET_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
+
+	csk = lookup_atid(t, atid);
+
+	if (unlikely(!csk)) {
+		cxgbi_log_error("can't find connection for tid %u\n", atid);
+		return CPL_RET_UNKNOWN_TID;
+	}
+
+	cxgbi_sock_hold(csk);
+	spin_lock_bh(&csk->lock);
+
+	cxgbi_conn_debug("rcv, status 0x%x, csk 0x%p, csk->state %u, "
+			"csk->flag 0x%lx, csk->atid %u.\n",
+			status, csk, csk->state, csk->flags, csk->hwtid);
+
+	if (status & act_open_has_tid(status))
+		cxgb4_remove_tid(snic->lldi.tids, csk->port_id, GET_TID(rpl));
+
+	if (status == CPL_ERR_CONN_EXIST &&
+			csk->retry_timer.function !=
+			cxgb4i_sock_act_open_retry_timer) {
+		csk->retry_timer.function = cxgb4i_sock_act_open_retry_timer;
+		if (!mod_timer(&csk->retry_timer, jiffies + HZ / 2))
+			cxgbi_sock_hold(csk);
+	} else
+
+		cxgb4i_fail_act_open(csk, act_open_rpl_status_to_errno(status));
+
+	__kfree_skb(skb);
+
+	spin_unlock_bh(&csk->lock);
+	cxgbi_sock_put(csk);
+
+	return 0;
+}
+
+static int cxgb4i_cpl_peer_close(struct cxgb4i_snic *snic, struct sk_buff *skb)
+{
+	struct cxgbi_sock *csk;
+	struct cpl_peer_close *req = cplhdr(skb);
+	unsigned int hwtid = GET_TID(req);
+	struct tid_info *t = snic->lldi.tids;
+
+	csk = lookup_tid(t, hwtid);
+
+	if (unlikely(!csk)) {
+		cxgbi_log_error("can't find connection for tid %u\n", hwtid);
+		return CPL_RET_UNKNOWN_TID;
+	}
+
+	cxgbi_sock_hold(csk);
+	spin_lock_bh(&csk->lock);
+
+	if (cxgbi_sock_flag(csk, CXGBI_CSK_FL_ABORT_RPL_PENDING))
+		goto out;
+
+	switch (csk->state) {
+	case CXGBI_CSK_ST_ESTABLISHED:
+		cxgbi_sock_set_state(csk, CXGBI_CSK_ST_PASSIVE_CLOSE);
+		break;
+	case CXGBI_CSK_ST_ACTIVE_CLOSE:
+		cxgbi_sock_set_state(csk, CXGBI_CSK_ST_CLOSE_WAIT_2);
+		break;
+	case CXGBI_CSK_ST_CLOSE_WAIT_1:
+		cxgb4i_sock_closed(csk);
+		break;
+	case CXGBI_CSK_ST_ABORTING:
+		break;
+	default:
+		cxgbi_log_error("peer close, TID %u in bad state %u\n",
+				csk->hwtid, csk->state);
+	}
+
+	cxgbi_sock_conn_closing(csk);
+
+out:
+	__kfree_skb(skb);
+	spin_unlock_bh(&csk->lock);
+	cxgbi_sock_put(csk);
+
+	return 0;
+}
+
+static int cxgb4i_cpl_close_con_rpl(struct cxgb4i_snic *snic,
+						struct sk_buff *skb)
+{
+	struct cxgbi_sock *csk;
+	struct cpl_close_con_rpl *rpl = cplhdr(skb);
+	unsigned int hwtid = GET_TID(rpl);
+	struct tid_info *t = snic->lldi.tids;
+
+	csk = lookup_tid(t, hwtid);
+
+	if (unlikely(!csk)) {
+		cxgbi_log_error("can't find connection for tid %u\n", hwtid);
+		return CPL_RET_UNKNOWN_TID;
+	}
+
+	cxgbi_sock_hold(csk);
+	spin_lock_bh(&csk->lock);
+
+	cxgbi_conn_debug("csk 0x%p, state %u, flag 0x%lx.\n",
+			csk, csk->state, csk->flags);
+
+	csk->snd_una = ntohl(rpl->snd_nxt) - 1;
+
+	if (cxgbi_sock_flag(csk, CXGBI_CSK_FL_ABORT_RPL_PENDING))
+		goto out;
+
+	switch (csk->state) {
+	case CXGBI_CSK_ST_ACTIVE_CLOSE:
+		cxgbi_sock_set_state(csk, CXGBI_CSK_ST_CLOSE_WAIT_1);
+		break;
+	case CXGBI_CSK_ST_CLOSE_WAIT_1:
+	case CXGBI_CSK_ST_CLOSE_WAIT_2:
+		cxgb4i_sock_closed(csk);
+		break;
+	case CXGBI_CSK_ST_ABORTING:
+		break;
+	default:
+		cxgbi_log_error("close_rpl, TID %u in bad state %u\n",
+				csk->hwtid, csk->state);
+	}
+out:
+	spin_unlock_bh(&csk->lock);
+	cxgbi_sock_put(csk);
+	kfree_skb(skb);
+
+	return 0;
+}
+
+static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
+								int *need_rst)
+{
+	switch (abort_reason) {
+	case CPL_ERR_BAD_SYN: /* fall through */
+	case CPL_ERR_CONN_RESET:
+		return csk->state > CXGBI_CSK_ST_ESTABLISHED ?
+			-EPIPE : -ECONNRESET;
+	case CPL_ERR_XMIT_TIMEDOUT:
+	case CPL_ERR_PERSIST_TIMEDOUT:
+	case CPL_ERR_FINWAIT2_TIMEDOUT:
+	case CPL_ERR_KEEPALIVE_TIMEDOUT:
+		return -ETIMEDOUT;
+	default:
+		return -EIO;
+	}
+}
+
+/*
+ * Returns whether an ABORT_REQ_RSS message is a negative advice.
+ */
+static inline int is_neg_adv_abort(unsigned int status)
+{
+	return status == CPL_ERR_RTX_NEG_ADVICE ||
+		status == CPL_ERR_PERSIST_NEG_ADVICE;
+}
+
+static int cxgb4i_cpl_abort_req_rss(struct cxgb4i_snic *snic,
+						struct sk_buff *skb)
+{
+	struct cxgbi_sock *csk;
+	struct cpl_abort_req_rss *req = cplhdr(skb);
+	unsigned int hwtid = GET_TID(req);
+	struct tid_info *t = snic->lldi.tids;
+	int rst_status = CPL_ABORT_NO_RST;
+
+	csk = lookup_tid(t, hwtid);
+	if (unlikely(!csk)) {
+		cxgbi_log_error("can't find connection for tid %u\n", hwtid);
+		return CPL_RET_UNKNOWN_TID;
+	}
+
+	cxgbi_sock_hold(csk);
+	spin_lock_bh(&csk->lock);
+
+	if (is_neg_adv_abort(req->status)) {
+		__kfree_skb(skb);
+		return 0;
+	}
+
+	if (!cxgbi_sock_flag(csk, CXGBI_CSK_FL_ABORT_REQ_RCVD)) {
+		cxgbi_sock_set_flag(csk, CXGBI_CSK_FL_ABORT_REQ_RCVD);
+		cxgbi_sock_set_state(csk, CXGBI_CSK_ST_ABORTING);
+		__kfree_skb(skb);
+		return 0;
+	}
+
+	cxgbi_sock_clear_flag(csk, CXGBI_CSK_FL_ABORT_REQ_RCVD);
+	cxgb4i_sock_send_abort_rpl(csk, rst_status);
+
+	if (!cxgbi_sock_flag(csk, CXGBI_CSK_FL_ABORT_RPL_PENDING)) {
+		csk->err = abort_status_to_errno(csk, req->status,
+							&rst_status);
+		cxgb4i_sock_closed(csk);
+	}
+
+	spin_unlock_bh(&csk->lock);
+	cxgbi_sock_put(csk);
+
+	return 0;
+}
+
+static int cxgb4i_cpl_abort_rpl_rss(struct cxgb4i_snic *snic,
+						struct sk_buff *skb)
+{
+	struct cxgbi_sock *csk;
+	struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
+	unsigned int hwtid = GET_TID(rpl);
+	struct tid_info *t = snic->lldi.tids;
+
+	if (rpl->status == CPL_ERR_ABORT_FAILED)
+		goto out;
+
+	csk = lookup_tid(t, hwtid);
+	if (unlikely(!csk)) {
+		cxgbi_log_error("can't find connection for tid %u\n", hwtid);
+		goto out;
+	}
+
+	cxgbi_sock_hold(csk);
+	spin_lock_bh(&csk->lock);
+
+	if (cxgbi_sock_flag(csk, CXGBI_CSK_FL_ABORT_RPL_PENDING)) {
+		if (!cxgbi_sock_flag(csk, CXGBI_CSK_FL_ABORT_RPL_RCVD))
+			cxgbi_sock_set_flag(csk,
+						CXGBI_CSK_FL_ABORT_RPL_RCVD);
+		else {
+			cxgbi_sock_clear_flag(csk,
+						CXGBI_CSK_FL_ABORT_RPL_RCVD);
+			cxgbi_sock_clear_flag(csk,
+					CXGBI_CSK_FL_ABORT_RPL_PENDING);
+
+			if (cxgbi_sock_flag(csk,
+						CXGBI_CSK_FL_ABORT_REQ_RCVD))
+				cxgbi_log_error("tid %u, ABORT_RPL_RSS\n",
+						csk->hwtid);
+
+			cxgb4i_sock_closed(csk);
+		}
+	}
+
+	spin_unlock_bh(&csk->lock);
+	cxgbi_sock_put(csk);
+
+out:
+	__kfree_skb(skb);
+	return 0;
+}
+
+static int cxgb4i_cpl_iscsi_hdr(struct cxgb4i_snic *snic, struct sk_buff *skb)
+{
+	struct cxgbi_sock *csk;
+	struct cpl_iscsi_hdr *cpl = cplhdr(skb);
+	unsigned int hwtid = GET_TID(cpl);
+	struct tid_info *t = snic->lldi.tids;
+	struct sk_buff *lskb;
+
+	csk = lookup_tid(t, hwtid);
+
+	if (unlikely(!csk)) {
+		cxgbi_log_error("can't find connection for tid %u\n", hwtid);
+		return CPL_RET_UNKNOWN_TID;
+	}
+
+	spin_lock_bh(&csk->lock);
+
+	if (unlikely(csk->state >= CXGBI_CSK_ST_PASSIVE_CLOSE)) {
+		if (csk->state != CXGBI_CSK_ST_ABORTING)
+			goto abort_conn;
+	}
+
+	cxgb4i_skb_tcp_seq(skb) = ntohl(cpl->seq);
+	cxgb4i_skb_flags(skb) = 0;
+
+	skb_reset_transport_header(skb);
+	__skb_pull(skb, sizeof(*cpl));
+	__pskb_trim(skb, ntohs(cpl->len));
+
+	if (!csk->skb_ulp_lhdr) {
+		unsigned char *byte;
+		csk->skb_ulp_lhdr = skb;
+		lskb = csk->skb_ulp_lhdr;
+
+		cxgb4i_skb_flags(lskb) = CXGB4I_SKCB_FLAG_HDR_RCVD;
+
+		if (cxgb4i_skb_tcp_seq(lskb) != csk->rcv_nxt) {
+			cxgbi_log_error("tid 0x%x, CPL_ISCSI_HDR, bad seq got "
+					"0x%x, exp 0x%x\n",
+					csk->hwtid,
+					cxgb4i_skb_tcp_seq(lskb),
+					csk->rcv_nxt);
+		}
+
+		byte = skb->data;
+		cxgb4i_skb_rx_pdulen(skb) = ntohs(cpl->pdu_len_ddp) - 40;
+		csk->rcv_nxt += cxgb4i_skb_rx_pdulen(lskb);
+	} else {
+		lskb = csk->skb_ulp_lhdr;
+		cxgb4i_skb_flags(lskb) |= CXGB4I_SKCB_FLAG_DATA_RCVD;
+		cxgb4i_skb_flags(skb) = CXGB4I_SKCB_FLAG_DATA_RCVD;
+		cxgbi_log_debug("csk 0x%p, tid 0x%x skb 0x%p, pdu data, "
+				" header 0x%p.\n",
+				csk, csk->hwtid, skb, lskb);
+	}
+
+	__skb_queue_tail(&csk->receive_queue, skb);
+
+	spin_unlock_bh(&csk->lock);
+
+	return 0;
+
+abort_conn:
+	cxgb4i_sock_send_abort_req(csk);
+	__kfree_skb(skb);
+	spin_unlock_bh(&csk->lock);
+
+	return -EINVAL;
+}
+
+static int cxgb4i_cpl_rx_data_ddp(struct cxgb4i_snic *snic, struct sk_buff *skb)
+{
+	struct cxgbi_sock *csk;
+	struct sk_buff *lskb;
+	struct cpl_rx_data_ddp *rpl = cplhdr(skb);
+	unsigned int hwtid = GET_TID(rpl);
+	struct tid_info *t = snic->lldi.tids;
+	unsigned int status;
+
+	csk = lookup_tid(t, hwtid);
+
+	if (unlikely(!csk)) {
+		cxgbi_log_error("can't find connection for tid %u\n", hwtid);
+		return CPL_RET_UNKNOWN_TID;
+	}
+
+	spin_lock_bh(&csk->lock);
+
+	if (unlikely(csk->state >= CXGBI_CSK_ST_PASSIVE_CLOSE)) {
+		if (csk->state != CXGBI_CSK_ST_ABORTING)
+			goto abort_conn;
+	}
+
+	if (!csk->skb_ulp_lhdr) {
+		cxgbi_log_error("tid 0x%x, rcv RX_DATA_DDP w/o pdu header\n",
+				csk->hwtid);
+		goto abort_conn;
+	}
+
+	lskb = csk->skb_ulp_lhdr;
+	cxgb4i_skb_flags(lskb) |= CXGB4I_SKCB_FLAG_STATUS_RCVD;
+
+	if (ntohs(rpl->len) != cxgb4i_skb_rx_pdulen(lskb)) {
+		cxgbi_log_error("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
+				csk->hwtid, ntohs(rpl->len),
+				cxgb4i_skb_rx_pdulen(lskb));
+	}
+
+	cxgb4i_skb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc);
+	status = ntohl(rpl->ddpvld);
+
+	if (status & (1 << RX_DDP_STATUS_HCRC_SHIFT))
+		cxgb4i_skb_ulp_mode(skb) |= ULP2_FLAG_HCRC_ERROR;
+	if (status & (1 << RX_DDP_STATUS_DCRC_SHIFT))
+		cxgb4i_skb_ulp_mode(skb) |= ULP2_FLAG_DCRC_ERROR;
+	if (status & (1 << RX_DDP_STATUS_PAD_SHIFT))
+		cxgb4i_skb_ulp_mode(skb) |= ULP2_FLAG_PAD_ERROR;
+	if ((cxgb4i_skb_flags(lskb) & ULP2_FLAG_DATA_READY))
+		cxgb4i_skb_ulp_mode(skb) |= ULP2_FLAG_DATA_DDPED;
+
+	csk->skb_ulp_lhdr = NULL;
+
+	__kfree_skb(skb);
+	cxgbi_conn_pdu_ready(csk);
+	spin_unlock_bh(&csk->lock);
+
+	return 0;
+
+abort_conn:
+	cxgb4i_sock_send_abort_req(csk);
+	__kfree_skb(skb);
+	spin_unlock_bh(&csk->lock);
+	return -EINVAL;
+}
+
+static void check_wr_invariants(const struct cxgbi_sock *csk)
+{
+	int pending = cxgb4i_sock_count_pending_wrs(csk);
+
+	if (unlikely(csk->wr_cred + pending != csk->wr_max_cred))
+		printk(KERN_ERR "TID %u: credit imbalance: avail %u, "
+				"pending %u, total should be %u\n",
+				csk->hwtid,
+				csk->wr_cred,
+				pending,
+				csk->wr_max_cred);
+}
+
+static int cxgb4i_cpl_fw4_ack(struct cxgb4i_snic *snic, struct sk_buff *skb)
+{
+	struct cxgbi_sock *csk;
+	struct cpl_fw4_ack *rpl = cplhdr(skb);
+	unsigned int hwtid = GET_TID(rpl);
+	struct tid_info *t = snic->lldi.tids;
+	unsigned char credits;
+	unsigned int snd_una;
+
+	csk = lookup_tid(t, hwtid);
+	if (unlikely(!csk)) {
+		cxgbi_log_error("can't find connection for tid %u\n", hwtid);
+		kfree_skb(skb);
+		return CPL_RET_UNKNOWN_TID;
+	}
+
+	cxgbi_sock_hold(csk);
+	spin_lock_bh(&csk->lock);
+
+	credits = rpl->credits;
+	snd_una = be32_to_cpu(rpl->snd_una);
+
+	cxgbi_tx_debug("%u WR credits, avail %u, unack %u, TID %u, state %u\n",
+				credits, csk->wr_cred, csk->wr_una_cred,
+						csk->hwtid, csk->state);
+
+	csk->wr_cred += credits;
+
+	if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred)
+		csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
+
+	while (credits) {
+		struct sk_buff *p = cxgb4i_sock_peek_wr(csk);
+
+		if (unlikely(!p)) {
+			cxgbi_log_error("%u WR_ACK credits for TID %u with "
+					"nothing pending, state %u\n",
+					credits, csk->hwtid, csk->state);
+			break;
+		}
+
+		if (unlikely(credits < p->csum)) {
+			p->csum -= credits;
+		} else {
+			cxgb4i_sock_dequeue_wr(csk);
+			credits -= p->csum;
+			cxgb4i_sock_free_wr_skb(p);
+		}
+	}
+
+	check_wr_invariants(csk);
+
+	if (rpl->seq_vld) {
+		if (unlikely(before(snd_una, csk->snd_una))) {
+			cxgbi_log_error("TID %u, unexpected sequence # %u "
+					"in WR_ACK snd_una %u\n",
+					csk->hwtid, snd_una, csk->snd_una);
+			goto out_free;
+		}
+	}
+
+	if (csk->snd_una != snd_una) {
+		csk->snd_una = snd_una;
+		dst_confirm(csk->dst);
+	}
+
+	if (skb_queue_len(&csk->write_queue)) {
+		if (cxgb4i_sock_push_tx_frames(csk, 0))
+			cxgbi_conn_tx_open(csk);
+	} else
+		cxgbi_conn_tx_open(csk);
+
+	goto out;
+
+out_free:
+
+	__kfree_skb(skb);
+
+out:
+	spin_unlock_bh(&csk->lock);
+	cxgbi_sock_put(csk);
+
+	return 0;
+}
+
+static int cxgb4i_cpl_set_tcb_rpl(struct cxgb4i_snic *snic, struct sk_buff *skb)
+{
+	struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
+	unsigned int hwtid = GET_TID(rpl);
+	struct tid_info *t = snic->lldi.tids;
+	struct cxgbi_sock *csk;
+
+	csk = lookup_tid(t, hwtid);
+
+	if (!csk) {
+		cxgbi_log_error("can't find connection for tid %u\n", hwtid);
+		__kfree_skb(skb);
+		return CPL_RET_UNKNOWN_TID;
+	}
+
+	spin_lock_bh(&csk->lock);
+
+	if (rpl->status != CPL_ERR_NONE) {
+		cxgbi_log_error("Unexpected SET_TCB_RPL status %u "
+				 "for tid %u\n", rpl->status, GET_TID(rpl));
+	}
+
+	__kfree_skb(skb);
+	spin_unlock_bh(&csk->lock);
+
+	return 0;
+}
+
+static void cxgb4i_sock_free_cpl_skbs(struct cxgbi_sock *csk)
+{
+	if (csk->cpl_close)
+		kfree_skb(csk->cpl_close);
+	if (csk->cpl_abort_req)
+		kfree_skb(csk->cpl_abort_req);
+	if (csk->cpl_abort_rpl)
+		kfree_skb(csk->cpl_abort_rpl);
+}
+
+static int cxgb4i_alloc_cpl_skbs(struct cxgbi_sock *csk)
+{
+	csk->cpl_close = alloc_skb(sizeof(struct cpl_close_con_req),
+					GFP_KERNEL);
+	if (!csk->cpl_close)
+		return -ENOMEM;
+	skb_put(csk->cpl_close, sizeof(struct cpl_close_con_req));
+
+	csk->cpl_abort_req = alloc_skb(sizeof(struct cpl_abort_req),
+					GFP_KERNEL);
+	if (!csk->cpl_abort_req)
+		goto free_cpl_skbs;
+	skb_put(csk->cpl_abort_req, sizeof(struct cpl_abort_req));
+
+	csk->cpl_abort_rpl = alloc_skb(sizeof(struct cpl_abort_rpl),
+					GFP_KERNEL);
+	if (!csk->cpl_abort_rpl)
+		goto free_cpl_skbs;
+	skb_put(csk->cpl_abort_rpl, sizeof(struct cpl_abort_rpl));
+
+	return 0;
+
+free_cpl_skbs:
+	cxgb4i_sock_free_cpl_skbs(csk);
+	return -ENOMEM;
+}
+
+static void cxgb4i_sock_release_offload_resources(struct cxgbi_sock *csk)
+{
+
+	cxgb4i_sock_free_cpl_skbs(csk);
+
+	if (csk->wr_cred != csk->wr_max_cred) {
+		cxgb4i_sock_purge_wr_queue(csk);
+		cxgb4i_sock_reset_wr_list(csk);
+	}
+
+	if (csk->l2t) {
+		cxgb4_l2t_release(csk->l2t);
+		csk->l2t = NULL;
+	}
+
+	if (csk->state == CXGBI_CSK_ST_CONNECTING)
+		cxgb4i_sock_free_atid(csk);
+	else {
+		cxgb4_remove_tid(cxgb4i_get_snic(csk->cdev)->lldi.tids, 0,
+				csk->hwtid);
+		cxgbi_sock_put(csk);
+	}
+
+	csk->dst = NULL;
+	csk->cdev = NULL;
+}
+
+struct cxgbi_sock *cxgb4i_sock_create(struct cxgb4i_snic *snic)
+{
+	struct cxgbi_sock *csk = NULL;
+
+	csk = kzalloc(sizeof(*csk), GFP_KERNEL);
+	if (!csk)
+		return NULL;
+
+	if (cxgb4i_alloc_cpl_skbs(csk) < 0)
+		goto free_csk;
+
+	cxgbi_conn_debug("alloc csk: 0x%p\n", csk);
+
+	csk->flags = 0;
+	spin_lock_init(&csk->lock);
+	atomic_set(&csk->refcnt, 1);
+	skb_queue_head_init(&csk->receive_queue);
+	skb_queue_head_init(&csk->write_queue);
+	setup_timer(&csk->retry_timer, NULL, (unsigned long)csk);
+	rwlock_init(&csk->callback_lock);
+	csk->cdev = &snic->cdev;
+
+	return csk;
+
+free_csk:
+	cxgbi_api_debug("csk alloc failed %p, baling out\n", csk);
+	kfree(csk);
+	return NULL;
+}
+
+static void cxgb4i_sock_active_close(struct cxgbi_sock *csk)
+{
+	int data_lost;
+	int close_req = 0;
+
+	cxgbi_conn_debug("csk 0x%p, state %u, flags %lu\n",
+			csk, csk->state, csk->flags);
+
+	dst_confirm(csk->dst);
+
+	cxgbi_sock_hold(csk);
+	spin_lock_bh(&csk->lock);
+
+	data_lost = skb_queue_len(&csk->receive_queue);
+	__skb_queue_purge(&csk->receive_queue);
+
+	switch (csk->state) {
+	case CXGBI_CSK_ST_CLOSED:
+	case CXGBI_CSK_ST_ACTIVE_CLOSE:
+	case CXGBI_CSK_ST_CLOSE_WAIT_1:
+	case CXGBI_CSK_ST_CLOSE_WAIT_2:
+	case CXGBI_CSK_ST_ABORTING:
+		break;
+
+	case CXGBI_CSK_ST_CONNECTING:
+		cxgbi_sock_set_flag(csk, CXGBI_CSK_FL_ACTIVE_CLOSE_NEEDED);
+		break;
+	case CXGBI_CSK_ST_ESTABLISHED:
+		close_req = 1;
+		cxgbi_sock_set_flag(csk, CXGBI_CSK_ST_CLOSE_WAIT_2);
+		break;
+	}
+
+	if (close_req) {
+		if (data_lost)
+			cxgb4i_sock_send_abort_req(csk);
+		else
+			cxgb4i_sock_send_close_req(csk);
+	}
+
+	spin_unlock_bh(&csk->lock);
+	cxgbi_sock_put(csk);
+}
+
+void cxgb4i_sock_release(struct cxgbi_sock *csk)
+{
+	cxgbi_conn_debug("csk 0x%p, state %u, flags %lu\n",
+			csk, csk->state, csk->flags);
+
+	if (unlikely(csk->state == CXGBI_CSK_ST_CONNECTING))
+		cxgbi_sock_set_state(csk,
+				CXGBI_CSK_FL_ACTIVE_CLOSE_NEEDED);
+	else if (likely(csk->state != CXGBI_CSK_ST_CLOSED))
+		cxgb4i_sock_active_close(csk);
+
+	cxgbi_sock_put(csk);
+}
+
+static int is_cxgb4_dev(struct net_device *dev, struct cxgb4i_snic *snic)
+{
+	struct net_device *ndev = dev;
+	int i;
+
+	if (dev->priv_flags & IFF_802_1Q_VLAN)
+		ndev = vlan_dev_real_dev(dev);
+
+	for (i = 0; i < snic->lldi.nports; i++) {
+		if (ndev == snic->lldi.ports[i])
+			return 1;
+	}
+
+	return 0;
+}
+
+static struct net_device *cxgb4i_find_egress_dev(struct net_device *root_dev,
+						struct cxgb4i_snic *snic)
+{
+	while (root_dev) {
+		if (root_dev->priv_flags & IFF_802_1Q_VLAN)
+			root_dev = vlan_dev_real_dev(root_dev);
+		else if (is_cxgb4_dev(root_dev, snic))
+			return root_dev;
+		else
+			return NULL;
+	}
+
+	return NULL;
+}
+
+static struct rtable *find_route(struct net_device *dev,
+				__be32 saddr, __be32 daddr,
+				__be16 sport, __be16 dport,
+				u8 tos)
+{
+	struct rtable *rt;
+	struct flowi fl = {
+		.oif = dev ? dev->ifindex : 0,
+		.nl_u = {
+			.ip4_u = {
+				.daddr = daddr,
+				.saddr = saddr,
+				.tos = tos }
+			},
+		.proto = IPPROTO_TCP,
+		.uli_u = {
+			.ports = {
+				.sport = sport,
+				.dport = dport }
+			}
+	};
+
+	if (ip_route_output_flow(dev ? dev_net(dev) : &init_net,
+					&rt, &fl, NULL, 0))
+		return NULL;
+
+	return rt;
+}
+
+static int cxgb4i_init_act_open(struct cxgbi_sock *csk,
+					struct net_device *dev)
+{
+	struct dst_entry *dst = csk->dst;
+	struct sk_buff *skb;
+	struct port_info *pi = netdev_priv(dev);
+
+	cxgbi_conn_debug("csk 0x%p, state %u, flags 0x%lx\n",
+			csk, csk->state, csk->flags);
+
+	csk->atid = cxgb4_alloc_atid(cxgb4i_get_snic(csk->cdev)->lldi.tids,
+					csk);
+	if (csk->atid == -1) {
+		cxgbi_log_error("cannot alloc atid\n");
+		goto out_err;
+	}
+
+	csk->l2t = cxgb4_l2t_get(cxgb4i_get_snic(csk->cdev)->lldi.l2t,
+				csk->dst->neighbour, dev, 0);
+	if (!csk->l2t) {
+		cxgbi_log_error("cannot alloc l2t\n");
+		goto free_atid;
+	}
+
+	skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_KERNEL);
+	if (!skb)
+		goto free_l2t;
+
+	skb->sk = (struct sock *)csk;
+	t4_set_arp_err_handler(skb, csk, cxgb4i_act_open_req_arp_failure);
+
+	cxgbi_sock_hold(csk);
+
+	csk->wr_max_cred = csk->wr_cred =
+		cxgb4i_get_snic(csk->cdev)->lldi.wr_cred;
+	csk->port_id = pi->port_id;
+	csk->rss_qid = cxgb4i_get_snic(csk->cdev)->lldi.rxq_ids[csk->port_id];
+	csk->tx_chan = pi->tx_chan;
+	csk->smac_idx = csk->tx_chan << 1;
+	csk->wr_una_cred = 0;
+	csk->mss_idx = cxgb4i_select_mss(csk, dst_mtu(dst));
+	csk->err = 0;
+
+	cxgb4i_sock_reset_wr_list(csk);
+
+	cxgb4i_sock_make_act_open_req(csk, skb,
+					((csk->rss_qid << 14) |
+					 (csk->atid)), csk->l2t);
+	cxgb4_l2t_send(cxgb4i_get_snic(csk->cdev)->lldi.ports[csk->port_id],
+					skb, csk->l2t);
+	return 0;
+
+free_l2t:
+	cxgb4_l2t_release(csk->l2t);
+
+free_atid:
+	cxgb4_free_atid(cxgb4i_get_snic(csk->cdev)->lldi.tids, csk->atid);
+
+out_err:
+
+	return -EINVAL;;
+}
+
+static struct net_device *cxgb4i_find_dev(struct net_device *dev,
+							__be32 ipaddr)
+{
+	struct flowi fl;
+	struct rtable *rt;
+	int err;
+
+	memset(&fl, 0, sizeof(fl));
+	fl.nl_u.ip4_u.daddr = ipaddr;
+
+	err = ip_route_output_key(dev ? dev_net(dev) : &init_net, &rt, &fl);
+	if (!err)
+		return (&rt->u.dst)->dev;
+
+	return NULL;
+}
+
+int cxgb4i_sock_connect(struct net_device *dev, struct cxgbi_sock *csk,
+						struct sockaddr_in *sin)
+{
+	struct rtable *rt;
+	__be32 sipv4 = 0;
+	struct net_device *dstdev;
+	struct cxgbi_hba *chba = NULL;
+	int err;
+
+	cxgbi_conn_debug("csk 0x%p, dev 0x%p\n", csk, dev);
+
+	if (sin->sin_family != AF_INET)
+		return -EAFNOSUPPORT;
+
+	csk->daddr.sin_port = sin->sin_port;
+	csk->daddr.sin_addr.s_addr = sin->sin_addr.s_addr;
+
+	dstdev = cxgb4i_find_dev(dev, sin->sin_addr.s_addr);
+	if (!dstdev || !is_cxgb4_dev(dstdev, cxgb4i_get_snic(csk->cdev)))
+		return -ENETUNREACH;
+
+	if (dstdev->priv_flags & IFF_802_1Q_VLAN)
+		dev = dstdev;
+
+	rt = find_route(dev, csk->saddr.sin_addr.s_addr,
+			csk->daddr.sin_addr.s_addr,
+			csk->saddr.sin_port,
+			csk->daddr.sin_port,
+			0);
+	if (rt == NULL) {
+		cxgbi_conn_debug("no route to %pI4, port %u, dev %s, "
+					"snic 0x%p\n",
+					&csk->daddr.sin_addr.s_addr,
+					ntohs(csk->daddr.sin_port),
+					dev ? dev->name : "any",
+					csk->snic);
+		return -ENETUNREACH;
+	}
+
+	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
+		cxgbi_conn_debug("multi-cast route to %pI4, port %u, "
+					"dev %s, snic 0x%p\n",
+					&csk->daddr.sin_addr.s_addr,
+					ntohs(csk->daddr.sin_port),
+					dev ? dev->name : "any",
+					csk->snic);
+		ip_rt_put(rt);
+		return -ENETUNREACH;
+	}
+
+	if (!csk->saddr.sin_addr.s_addr)
+		csk->saddr.sin_addr.s_addr = rt->rt_src;
+
+	csk->dst = &rt->u.dst;
+
+	dev = cxgb4i_find_egress_dev(csk->dst->dev,
+					cxgb4i_get_snic(csk->cdev));
+	if (dev == NULL) {
+		cxgbi_conn_debug("csk: 0x%p, egress dev NULL\n", csk);
+		return -ENETUNREACH;
+	}
+
+	err = cxgbi_sock_get_port(csk);
+	if (err)
+		return err;
+
+	cxgbi_conn_debug("csk: 0x%p get port: %u\n",
+			csk, ntohs(csk->saddr.sin_port));
+
+	chba = cxgb4i_hba_find_by_netdev(csk->dst->dev);
+
+	sipv4 = cxgb4i_get_iscsi_ipv4(chba);
+	if (!sipv4) {
+		cxgbi_conn_debug("csk: 0x%p, iscsi is not configured\n", csk);
+		sipv4 = csk->saddr.sin_addr.s_addr;
+		cxgb4i_set_iscsi_ipv4(chba, sipv4);
+	} else
+		csk->saddr.sin_addr.s_addr = sipv4;
+
+	cxgbi_conn_debug("csk: 0x%p, %pI4:[%u], %pI4:[%u] SYN_SENT\n",
+				csk,
+				&csk->saddr.sin_addr.s_addr,
+				ntohs(csk->saddr.sin_port),
+				&csk->daddr.sin_addr.s_addr,
+				ntohs(csk->daddr.sin_port));
+
+	cxgbi_sock_set_state(csk, CXGBI_CSK_ST_CONNECTING);
+
+	if (!cxgb4i_init_act_open(csk, dev))
+		return 0;
+
+	err = -ENOTSUPP;
+
+	cxgbi_conn_debug("csk 0x%p -> closed\n", csk);
+	cxgbi_sock_set_state(csk, CXGBI_CSK_ST_CLOSED);
+	ip_rt_put(rt);
+	cxgbi_sock_put_port(csk);
+
+	return err;
+}
+
+void cxgb4i_sock_rx_credits(struct cxgbi_sock *csk, int copied)
+{
+	int must_send;
+	u32 credits;
+
+	if (csk->state != CXGBI_CSK_ST_ESTABLISHED)
+		return;
+
+	credits = csk->copied_seq - csk->rcv_wup;
+	if (unlikely(!credits))
+		return;
+
+	if (unlikely(cxgb4i_rx_credit_thres == 0))
+		return;
+
+	must_send = credits + 16384 >= cxgb4i_rcv_win;
+
+	if (must_send || credits >= cxgb4i_rx_credit_thres)
+		csk->rcv_wup += cxgb4i_csk_send_rx_credits(csk, credits);
+}
+
+int cxgb4i_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb)
+{
+	struct sk_buff *next;
+	int err, copied = 0;
+
+	spin_lock_bh(&csk->lock);
+
+	if (csk->state != CXGBI_CSK_ST_ESTABLISHED) {
+		cxgbi_tx_debug("csk 0x%p, not in est. state %u.\n",
+			      csk, csk->state);
+		err = -EAGAIN;
+		goto out_err;
+	}
+
+	if (csk->err) {
+		cxgbi_tx_debug("csk 0x%p, err %d.\n", csk, csk->err);
+		err = -EPIPE;
+		goto out_err;
+	}
+
+	if (csk->write_seq - csk->snd_una >= cxgb4i_snd_win) {
+		cxgbi_tx_debug("csk 0x%p, snd %u - %u > %u.\n",
+				csk, csk->write_seq, csk->snd_una,
+				cxgb4i_snd_win);
+		err = -ENOBUFS;
+		goto out_err;
+	}
+
+	while (skb) {
+		int frags = skb_shinfo(skb)->nr_frags +
+				(skb->len != skb->data_len);
+
+		if (unlikely(skb_headroom(skb) < CXGB4I_TX_HEADER_LEN)) {
+			cxgbi_tx_debug("csk 0x%p, skb head.\n", csk);
+			err = -EINVAL;
+			goto out_err;
+		}
+
+		if (frags >= SKB_WR_LIST_SIZE) {
+			cxgbi_log_error("csk 0x%p, tx frags %d, len %u,%u.\n",
+					 csk, skb_shinfo(skb)->nr_frags,
+					 skb->len, skb->data_len);
+			err = -EINVAL;
+			goto out_err;
+		}
+
+		next = skb->next;
+		skb->next = NULL;
+		cxgb4i_sock_skb_entail(csk, skb,
+				CXGB4I_SKCB_FLAG_NO_APPEND |
+				CXGB4I_SKCB_FLAG_NEED_HDR);
+		copied += skb->len;
+		csk->write_seq += skb->len + ulp_extra_len(skb);
+		skb = next;
+	}
+done:
+	if (likely(skb_queue_len(&csk->write_queue)))
+		cxgb4i_sock_push_tx_frames(csk, 1);
+	spin_unlock_bh(&csk->lock);
+	return copied;
+
+out_err:
+	if (copied == 0 && err == -EPIPE)
+		copied = csk->err ? csk->err : -EPIPE;
+	else
+		copied = err;
+	goto done;
+}
+
+static void cxgbi_sock_conn_closing(struct cxgbi_sock *csk)
+{
+	struct iscsi_conn *conn = csk->user_data;
+
+	read_lock(&csk->callback_lock);
+	if (conn && csk->state != CXGBI_CSK_ST_ESTABLISHED)
+		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+	read_unlock(&csk->callback_lock);
+}
+
+static void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
+{
+	u8 submode = 0;
+
+	if (hcrc)
+		submode |= 1;
+	if (dcrc)
+		submode |= 2;
+	cxgb4i_skb_ulp_mode(skb) = (ULP_MODE_ISCSI << 4) | submode;
+}
+
+static inline __u16 get_skb_ulp_mode(struct sk_buff *skb)
+{
+	return cxgb4i_skb_ulp_mode(skb);
+}
+
+static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
+	[CPL_ACT_ESTABLISH] = cxgb4i_cpl_act_establish,
+	[CPL_ACT_OPEN_RPL] = cxgb4i_cpl_act_open_rpl,
+	[CPL_PEER_CLOSE] = cxgb4i_cpl_peer_close,
+	[CPL_ABORT_REQ_RSS] = cxgb4i_cpl_abort_req_rss,
+	[CPL_ABORT_RPL_RSS] = cxgb4i_cpl_abort_rpl_rss,
+	[CPL_CLOSE_CON_RPL] = cxgb4i_cpl_close_con_rpl,
+	[CPL_FW4_ACK] = cxgb4i_cpl_fw4_ack,
+	[CPL_ISCSI_HDR] = cxgb4i_cpl_iscsi_hdr,
+	[CPL_SET_TCB_RPL] = cxgb4i_cpl_set_tcb_rpl,
+	[CPL_RX_DATA_DDP] = cxgb4i_cpl_rx_data_ddp
+};
+
+int cxgb4i_ofld_init(struct cxgb4i_snic *snic)
+{
+	struct cxgbi_ports_map *ports;
+	int mapsize;
+
+	if (cxgb4i_max_connect > CXGB4I_MAX_CONN)
+		cxgb4i_max_connect = CXGB4I_MAX_CONN;
+
+	mapsize = (cxgb4i_max_connect * sizeof(struct cxgbi_sock));
+	ports = cxgbi_alloc_big_mem(sizeof(*ports) + mapsize, GFP_KERNEL);
+	if (!ports)
+		return -ENOMEM;
+
+	spin_lock_init(&ports->lock);
+	snic->cdev.pmap = ports;
+	snic->cdev.pmap->max_connect = cxgb4i_max_connect;
+	snic->cdev.pmap->sport_base = cxgb4i_sport_base;
+
+	snic->cdev.tx_skb_setmode = tx_skb_setmode;
+	snic->cdev.sock_send_pdus = cxgb4i_sock_send_pdus;
+	snic->cdev.get_skb_ulp_mode = get_skb_ulp_mode;
+
+	snic->handlers = cxgb4i_cplhandlers;
+
+	return 0;
+}
+
+void cxgb4i_ofld_cleanup(struct cxgb4i_snic *snic)
+{
+	struct cxgbi_sock *csk;
+	int i;
+
+	for (i = 0; i < snic->cdev.pmap->max_connect; i++) {
+		if (snic->cdev.pmap->port_csk[i]) {
+			csk = snic->cdev.pmap->port_csk[i];
+			snic->cdev.pmap->port_csk[i] = NULL;
+
+			spin_lock_bh(&csk->lock);
+			cxgb4i_sock_closed(csk);
+			spin_unlock_bh(&csk->lock);
+		}
+	}
+	cxgbi_free_big_mem(snic->cdev.pmap);
+}
+
diff --git a/drivers/scsi/cxgb4i/cxgb4i_offload.h b/drivers/scsi/cxgb4i/cxgb4i_offload.h
new file mode 100644
index 0000000..afd50d9
--- /dev/null
+++ b/drivers/scsi/cxgb4i/cxgb4i_offload.h
@@ -0,0 +1,91 @@
+/*
+ * cxgb4i_offload.h: Chelsio T4 iSCSI driver.
+ *
+ * Copyright (c) 2010 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ * Written by: Rakesh Ranjan (rranjan-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ */
+
+#ifndef	__CXGB4I_OFFLOAD_H__
+#define	__CXGB4I_OFFLOAD_H__
+
+#include <linux/skbuff.h>
+#include <linux/in.h>
+
+#include "libcxgbi.h"
+
+#define	CXGB4I_MAX_CONN	16384
+
+enum {
+	CPL_RET_BUF_DONE = 1,
+	CPL_RET_BAD_MSG = 2,
+	CPL_RET_UNKNOWN_TID = 4
+};
+
+struct cxgbi_sock *cxgb4i_sock_create(struct cxgb4i_snic *);
+void cxgb4i_sock_release(struct cxgbi_sock *);
+int cxgb4i_sock_connect(struct net_device *, struct cxgbi_sock *,
+			struct sockaddr_in *);
+void cxgb4i_sock_rx_credits(struct cxgbi_sock *, int);
+int cxgb4i_sock_send_pdus(struct cxgbi_sock *, struct sk_buff *);
+
+struct cxgb4i_skb_rx_cb {
+	__u32 ddigest;
+	__u32 pdulen;
+};
+
+struct cxgb4i_skb_tx_cb {
+	struct l2t_skb_cb l2t;
+	struct sk_buff *wr_next;
+};
+
+struct cxgb4i_skb_cb {
+	__u16 flags;
+	__u16 ulp_mode;
+	__u32 seq;
+
+	union {
+		struct cxgb4i_skb_rx_cb rx;
+		struct cxgb4i_skb_tx_cb tx;
+	};
+};
+
+#define CXGB4I_SKB_CB(skb)	((struct cxgb4i_skb_cb *)&((skb)->cb[0]))
+#define cxgb4i_skb_flags(skb)	(CXGB4I_SKB_CB(skb)->flags)
+#define cxgb4i_skb_ulp_mode(skb)	(CXGB4I_SKB_CB(skb)->ulp_mode)
+#define cxgb4i_skb_tcp_seq(skb)		(CXGB4I_SKB_CB(skb)->seq)
+#define cxgb4i_skb_rx_ddigest(skb)	(CXGB4I_SKB_CB(skb)->rx.ddigest)
+#define cxgb4i_skb_rx_pdulen(skb)	(CXGB4I_SKB_CB(skb)->rx.pdulen)
+#define cxgb4i_skb_tx_wr_next(skb)	(CXGB4I_SKB_CB(skb)->tx.wr_next)
+
+enum cxgb4i_skcb_flags {
+	CXGB4I_SKCB_FLAG_NEED_HDR = 1 << 0,	/* packet needs a header */
+	CXGB4I_SKCB_FLAG_NO_APPEND = 1 << 1,	/* don't grow this skb */
+	CXGB4I_SKCB_FLAG_COMPL = 1 << 2,	/* request WR completion */
+	CXGB4I_SKCB_FLAG_HDR_RCVD = 1 << 3,	/* recieved header pdu */
+	CXGB4I_SKCB_FLAG_DATA_RCVD = 1 << 4,	/*  recieved data pdu */
+	CXGB4I_SKCB_FLAG_STATUS_RCVD = 1 << 5,	/* recieved ddp status */
+};
+
+/*
+ * sge_opaque_hdr -
+ * Opaque version of structure the SGE stores at skb->head of TX_DATA packets
+ * and for which we must reserve space.
+ */
+struct sge_opaque_hdr {
+	void *dev;
+	dma_addr_t addr[MAX_SKB_FRAGS + 1];
+};
+
+/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
+#define CXGB4I_TX_HEADER_LEN \
+	(sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr))
+#define SKB_TX_HEADROOM	SKB_MAX_HEAD(CXGB4I_TX_HEADER_LEN)
+
+#endif	/* __CXGB4I_OFFLOAD_H__ */
+
diff --git a/drivers/scsi/cxgb4i/cxgb4i_snic.c b/drivers/scsi/cxgb4i/cxgb4i_snic.c
new file mode 100644
index 0000000..68cdae5
--- /dev/null
+++ b/drivers/scsi/cxgb4i/cxgb4i_snic.c
@@ -0,0 +1,260 @@
+/*
+ * cxgb4i_snic.c: Chelsio T4 iSCSI driver.
+ *
+ * Copyright (c) 2010 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ * Written by: Rakesh Ranjan (rranjan-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ */
+
+#include <net/route.h>
+
+#include "cxgb4i.h"
+
+#define	DRV_MODULE_NAME		"cxgb4i"
+#define	DRV_MODULE_VERSION	"0.90"
+#define	DRV_MODULE_RELDATE	"04/08/2010"
+
+static char version[] =
+	"Chelsio T4 iSCSI driver " DRV_MODULE_NAME
+	" v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_DESCRIPTION("Chelsio T4 iSCSI driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+static LIST_HEAD(snic_list);
+static DEFINE_MUTEX(snic_rwlock);
+
+static void *cxgb4i_uld_add(const struct cxgb4_lld_info *linfo);
+static int cxgb4i_uld_rx_handler(void *handle, const __be64 *rsp,
+				const struct pkt_gl *pgl);
+static int cxgb4i_uld_state_change(void *handle, enum cxgb4_state state);
+
+static struct cxgb4i_snic *cxgb4i_snic_init(const struct cxgb4_lld_info *);
+static void cxgb4i_snic_cleanup(void);
+
+
+static struct cxgb4_uld_info cxgb4i_uld_info = {
+	.name = "cxgb4i",
+	.add = cxgb4i_uld_add,
+	.rx_handler = cxgb4i_uld_rx_handler,
+	.state_change = cxgb4i_uld_state_change,
+};
+
+
+struct cxgbi_hba *cxgb4i_hba_find_by_netdev(struct net_device *dev)
+{
+	int i;
+	struct cxgb4i_snic *snic = NULL;;
+
+	if (dev->priv_flags & IFF_802_1Q_VLAN)
+		dev = vlan_dev_real_dev(dev);
+
+	mutex_lock(&snic_rwlock);
+	list_for_each_entry(snic, &snic_list, list_head) {
+		for (i = 0; i < snic->hba_cnt; i++) {
+			if (snic->hba[i]->ndev == dev) {
+				mutex_unlock(&snic_rwlock);
+				return snic->hba[i];
+			}
+		}
+	}
+	mutex_unlock(&snic_rwlock);
+	return NULL;
+}
+
+struct cxgb4i_snic *cxgb4i_find_snic(struct net_device *dev, __be32 ipaddr)
+{
+	struct flowi fl;
+	struct rtable *rt;
+	struct net_device *sdev = NULL;
+	struct cxgb4i_snic *snic = NULL, *tmp;
+	int err, i;
+
+	memset(&fl, 0, sizeof(fl));
+	fl.nl_u.ip4_u.daddr = ipaddr;
+
+	err = ip_route_output_key(dev ? dev_net(dev) : &init_net, &rt, &fl);
+	if (err)
+		goto out;
+
+	sdev = (&rt->u.dst)->dev;
+	mutex_lock(&snic_rwlock);
+	list_for_each_entry_safe(snic, tmp, &snic_list, list_head) {
+		if (snic) {
+			for (i = 0; i < snic->lldi.nports; i++) {
+				if (sdev == snic->lldi.ports[i]) {
+					mutex_unlock(&snic_rwlock);
+					return snic;
+				}
+			}
+		}
+	}
+	mutex_unlock(&snic_rwlock);
+
+out:
+	snic = NULL;
+	return snic;
+}
+
+void cxgb4i_snic_add(struct list_head *list_head)
+{
+	mutex_lock(&snic_rwlock);
+	list_add_tail(list_head, &snic_list);
+	mutex_unlock(&snic_rwlock);
+}
+
+struct cxgb4i_snic *cxgb4i_snic_init(const struct cxgb4_lld_info *linfo)
+{
+	struct cxgb4i_snic *snic;
+	int i;
+
+	snic = kzalloc(sizeof(*snic), GFP_KERNEL);
+	if (snic) {
+
+		spin_lock_init(&snic->lock);
+		snic->lldi = *linfo;
+		snic->hba_cnt = snic->lldi.nports;
+		snic->cdev.dd_data = snic;
+		snic->cdev.pdev = snic->lldi.pdev;
+		snic->cdev.skb_tx_headroom = SKB_MAX_HEAD(CXGB4I_TX_HEADER_LEN);
+
+		cxgb4i_iscsi_init();
+		cxgbi_pdu_init(&snic->cdev);
+		cxgb4i_ddp_init(snic);
+		cxgb4i_ofld_init(snic);
+
+		for (i = 0; i < snic->hba_cnt; i++) {
+			snic->hba[i] = cxgb4i_hba_add(snic,
+						snic->lldi.ports[i]);
+			if (!snic->hba[i]) {
+				kfree(snic);
+				snic = ERR_PTR(-ENOMEM);
+				goto out;
+			}
+		}
+		cxgb4i_snic_add(&snic->list_head);
+	} else
+out :
+	snic = ERR_PTR(-ENOMEM);
+
+	return snic;
+}
+
+void cxgb4i_snic_cleanup(void)
+{
+	struct cxgb4i_snic *snic, *tmp;
+	int i;
+
+	mutex_lock(&snic_rwlock);
+	list_for_each_entry_safe(snic, tmp, &snic_list, list_head) {
+		list_del(&snic->list_head);
+
+		for (i = 0; i < snic->hba_cnt; i++) {
+			if (snic->hba[i]) {
+				cxgb4i_hba_remove(snic->hba[i]);
+				snic->hba[i] = NULL;
+			}
+		}
+		cxgb4i_ofld_cleanup(snic);
+		cxgb4i_ddp_cleanup(snic);
+		cxgbi_pdu_cleanup(&snic->cdev);
+		cxgbi_log_info("snic 0x%p, %u scsi hosts removed.\n",
+				snic, snic->hba_cnt);
+
+		kfree(snic);
+	}
+	mutex_unlock(&snic_rwlock);
+	cxgb4i_iscsi_cleanup();
+}
+
+static void *cxgb4i_uld_add(const struct cxgb4_lld_info *linfo)
+{
+	struct cxgb4i_snic *snic;
+
+	cxgbi_log_info("%s", version);
+
+	snic = cxgb4i_snic_init(linfo);
+	if (!snic)
+		goto out;
+out:
+	return snic;
+}
+
+static int cxgb4i_uld_rx_handler(void *handle, const __be64 *rsp,
+				const struct pkt_gl *pgl)
+{
+	struct cxgb4i_snic *snic = handle;
+	struct sk_buff *skb;
+	const struct cpl_act_establish *rpl;
+	unsigned int opcode;
+
+	if (pgl == NULL) {
+		unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
+
+		skb = alloc_skb(256, GFP_ATOMIC);
+		if (!skb)
+			goto nomem;
+		__skb_put(skb, len);
+		skb_copy_to_linear_data(skb, &rsp[1], len);
+
+	} else if (pgl == CXGB4_MSG_AN) {
+
+		return 0;
+
+	} else {
+
+		skb = cxgb4_pktgl_to_skb(pgl, 256, 256);
+		if (unlikely(!skb))
+			goto nomem;
+	}
+
+	rpl = cplhdr(skb);
+	opcode = rpl->ot.opcode;
+
+	cxgbi_api_debug("snic %p, opcode 0x%x, skb %p\n",
+			 snic, opcode, skb);
+
+	BUG_ON(!snic->handlers[opcode]);
+
+	if (snic->handlers[opcode]) {
+		snic->handlers[opcode](snic, skb);
+	} else
+		cxgbi_log_error("No handler for opcode 0x%x\n",
+				opcode);
+
+	return 0;
+
+nomem:
+	cxgbi_api_debug("OOM bailing out\n");
+	return 1;
+}
+
+static int cxgb4i_uld_state_change(void *handle, enum cxgb4_state state)
+{
+	return 0;
+}
+
+static int __init cxgb4i_init_module(void)
+{
+	cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
+
+	return 0;
+}
+
+static void __exit cxgb4i_exit_module(void)
+{
+
+	cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
+	cxgb4i_snic_cleanup();
+}
+
+module_init(cxgb4i_init_module);
+module_exit(cxgb4i_exit_module);
+
-- 
1.6.6.1

-- 
You received this message because you are subscribed to the Google Groups "open-iscsi" group.
To post to this group, send email to open-iscsi-/JYPxA39Uh5TLH3MbocFF+G/Ez6ZCGd0@public.gmane.org
To unsubscribe from this group, send email to open-iscsi+unsubscribe-/JYPxA39Uh5TLH3MbocFF+G/Ez6ZCGd0@public.gmane.org
For more options, visit this group at http://groups.google.com/group/open-iscsi?hl=en.

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 3/3] cxgb4i_v3: iscsi and libcxgbi library for handling common part
       [not found]         ` <1273944249-311-3-git-send-email-rakesh-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
@ 2010-05-15 17:24           ` Rakesh Ranjan
  2010-05-27  5:59             ` Mike Christie
  2010-05-27  7:38           ` [PATCH 2/3] cxgb4i_v3: main driver files Mike Christie
  1 sibling, 1 reply; 9+ messages in thread
From: Rakesh Ranjan @ 2010-05-15 17:24 UTC (permalink / raw)
  To: NETDEVML, SCSIDEVML, OISCSIML
  Cc: LKML, Karen Xie, David Miller, James Bottomley, Mike Christie,
	Anish Bhatt, Rakesh Ranjan, Rakesh Ranjan

From: Rakesh Ranjan <rranjan-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>


Signed-off-by: Rakesh Ranjan <rakesh-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
---
 drivers/scsi/cxgb4i/cxgb4i_iscsi.c |  617 ++++++++++++++++++++++++++++++++++++
 drivers/scsi/cxgb4i/libcxgbi.c     |  589 ++++++++++++++++++++++++++++++++++
 drivers/scsi/cxgb4i/libcxgbi.h     |  430 +++++++++++++++++++++++++
 3 files changed, 1636 insertions(+), 0 deletions(-)
 create mode 100644 drivers/scsi/cxgb4i/cxgb4i_iscsi.c
 create mode 100644 drivers/scsi/cxgb4i/libcxgbi.c
 create mode 100644 drivers/scsi/cxgb4i/libcxgbi.h

diff --git a/drivers/scsi/cxgb4i/cxgb4i_iscsi.c b/drivers/scsi/cxgb4i/cxgb4i_iscsi.c
new file mode 100644
index 0000000..a10ab6d
--- /dev/null
+++ b/drivers/scsi/cxgb4i/cxgb4i_iscsi.c
@@ -0,0 +1,617 @@
+/*
+ * cxgb4i_iscsi.c: Chelsio T4 iSCSI driver.
+ *
+ * Copyright (c) 2010 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ * Written by: Rakesh Ranjan (rranjan-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ */
+
+#include <linux/inet.h>
+#include <linux/crypto.h>
+#include <linux/if_vlan.h>
+#include <net/dst.h>
+#include <net/tcp.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+#include <scsi/iscsi_proto.h>
+#include <scsi/libiscsi.h>
+#include <scsi/scsi_transport_iscsi.h>
+
+#include "cxgb4i.h"
+
+/*
+ * align pdu size to multiple of 512 for better performance
+ */
+#define cxgb4i_align_pdu_size(n) do { n = (n) & (~511); } while (0)
+
+static struct scsi_transport_template *cxgb4i_scsi_transport;
+static struct scsi_host_template cxgb4i_host_template;
+static struct iscsi_transport cxgb4i_iscsi_transport;
+
+struct cxgbi_hba *cxgb4i_hba_add(struct cxgb4i_snic *snic,
+					struct net_device *dev)
+{
+	struct cxgbi_hba *chba;
+	struct Scsi_Host *shost;
+	int err;
+
+	shost = iscsi_host_alloc(&cxgb4i_host_template, sizeof(*chba), 1);
+
+	if (!shost) {
+		cxgbi_log_info("snic 0x%p, ndev 0x%p, host alloc failed\n",
+				snic, dev);
+		return NULL;
+	}
+
+	shost->transportt = cxgb4i_scsi_transport;
+	shost->max_lun = CXGB4I_MAX_LUN;
+	shost->max_id = CXGB4I_MAX_TARGET;
+	shost->max_channel = 0;
+	shost->max_cmd_len = 16;
+
+	chba = iscsi_host_priv(shost);
+	cxgbi_log_debug("snic %p, cdev %p\n", snic, &snic->cdev);
+	chba->cdev = &snic->cdev;
+	chba->ndev = dev;
+	chba->shost = shost;
+
+	pci_dev_get(snic->lldi.pdev);
+	err = iscsi_host_add(shost, &snic->lldi.pdev->dev);
+	if (err) {
+		cxgbi_log_info("snic 0x%p, dev 0x%p, host add failed\n",
+				snic, dev);
+		goto pci_dev_put;
+	}
+
+	return chba;
+
+pci_dev_put:
+	pci_dev_put(snic->lldi.pdev);
+	scsi_host_put(shost);
+	return NULL;
+}
+
+void cxgb4i_hba_remove(struct cxgbi_hba *chba)
+{
+	iscsi_host_remove(chba->shost);
+	pci_dev_put(cxgb4i_get_snic(chba->cdev)->lldi.pdev);
+	iscsi_host_free(chba->shost);
+}
+
+static struct iscsi_endpoint *cxgb4i_ep_connect(struct Scsi_Host *shost,
+						struct sockaddr *dst_addr,
+						int non_blocking)
+{
+	struct iscsi_endpoint *iep;
+	struct cxgbi_endpoint *cep;
+	struct cxgbi_hba *hba = NULL;
+	struct cxgbi_sock *csk = NULL;
+	struct cxgb4i_snic *snic;
+	int err = 0;
+
+	if (shost)
+		hba = iscsi_host_priv(shost);
+
+	snic = cxgb4i_find_snic(hba ? hba->ndev : NULL,
+			((struct sockaddr_in *)dst_addr)->sin_addr.s_addr);
+	if (!snic) {
+		cxgbi_log_info("ep connect no snic\n");
+		err = -ENOSPC;
+		goto release_conn;
+	}
+
+	csk = cxgb4i_sock_create(snic);
+	if (!csk) {
+		cxgbi_log_info("ep connect OOM\n");
+		err = -ENOMEM;
+		goto release_conn;
+	}
+	err = cxgb4i_sock_connect(hba ? hba->ndev : NULL, csk,
+				(struct sockaddr_in *)dst_addr);
+	if (err < 0) {
+		cxgbi_log_info("ep connect failed\n");
+		goto release_conn;
+	}
+
+	hba = cxgb4i_hba_find_by_netdev(csk->dst->dev);
+	if (!hba) {
+		err = -ENOSPC;
+		cxgbi_log_info("Not going through cxgb4i device\n");
+		goto release_conn;
+	}
+
+	if (shost && hba != iscsi_host_priv(shost)) {
+		err = -ENOSPC;
+		cxgbi_log_info("Could not connect through request host %u\n",
+				shost->host_no);
+		goto release_conn;
+	}
+
+	if (cxgbi_sock_is_closing(csk)) {
+		err = -ENOSPC;
+		cxgbi_log_info("ep connect unable to connect\n");
+		goto release_conn;
+	}
+
+	iep = iscsi_create_endpoint(sizeof(*cep));
+	if (!iep) {
+		err = -ENOMEM;
+		cxgbi_log_info("iscsi alloc ep, OOM\n");
+		goto release_conn;
+	}
+
+	cep = iep->dd_data;
+	cep->csk = csk;
+	cep->chba = hba;
+
+	cxgbi_api_debug("iep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p\n",
+			iep, cep, csk, hba);
+
+	return iep;
+
+release_conn:
+	cxgbi_api_debug("conn 0x%p failed, release\n", csk);
+	if (csk)
+		cxgb4i_sock_release(csk);
+
+	return ERR_PTR(err);
+}
+
+static int cxgb4i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+{
+	struct cxgbi_endpoint *cep = ep->dd_data;
+	struct cxgbi_sock *csk = cep->csk;
+
+	if (!cxgbi_sock_is_established(csk))
+		return 0;
+
+	return 1;
+}
+
+static void cxgb4i_ep_disconnect(struct iscsi_endpoint *ep)
+{
+	struct cxgbi_endpoint *cep = ep->dd_data;
+	struct cxgbi_conn *cconn = cep->cconn;
+
+	if (cconn && cconn->iconn) {
+		iscsi_suspend_tx(cconn->iconn);
+
+		write_lock_bh(&cep->csk->callback_lock);
+		cep->csk->user_data = NULL;
+		cconn->cep = NULL;
+		write_unlock_bh(&cep->csk->callback_lock);
+	}
+
+	cxgb4i_sock_release(cep->csk);
+	iscsi_destroy_endpoint(ep);
+}
+
+static struct iscsi_cls_session *
+cxgb4i_create_session(struct iscsi_endpoint *ep, u16 cmds_max, u16 qdepth,
+							u32 initial_cmdsn)
+{
+	struct cxgbi_endpoint *cep;
+	struct cxgbi_hba *chba;
+	struct Scsi_Host *shost;
+	struct iscsi_cls_session *cls_session;
+	struct iscsi_session *session;
+
+	if (!ep) {
+		cxgbi_log_error("missing endpoint\n");
+		return NULL;
+	}
+
+	cep = ep->dd_data;
+	chba = cep->chba;
+	shost = chba->shost;
+
+	BUG_ON(chba != iscsi_host_priv(shost));
+
+	cls_session = iscsi_session_setup(&cxgb4i_iscsi_transport, shost,
+					cmds_max, 0,
+					sizeof(struct iscsi_tcp_task) +
+					sizeof(struct cxgbi_task_data),
+					initial_cmdsn, ISCSI_MAX_TARGET);
+	if (!cls_session)
+		return NULL;
+
+	session = cls_session->dd_data;
+	if (iscsi_tcp_r2tpool_alloc(session))
+		goto remove_session;
+
+	return cls_session;
+
+remove_session:
+	iscsi_session_teardown(cls_session);
+	return NULL;
+}
+
+static void cxgb4i_destroy_session(struct iscsi_cls_session *cls_session)
+{
+	iscsi_tcp_r2tpool_free(cls_session->dd_data);
+	iscsi_session_teardown(cls_session);
+}
+
+static inline int cxgb4i_conn_max_xmit_dlength(struct iscsi_conn *conn)
+{
+	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct cxgbi_conn *cconn = tcp_conn->dd_data;
+	unsigned int max = max(512 * MAX_SKB_FRAGS, SKB_TX_HEADROOM);
+
+	max = min(cxgb4i_get_snic(cconn->chba->cdev)->tx_max_size, max);
+	if (conn->max_xmit_dlength)
+		conn->max_xmit_dlength = min(conn->max_xmit_dlength, max);
+	else
+		conn->max_xmit_dlength = max;
+	cxgb4i_align_pdu_size(conn->max_xmit_dlength);
+	return 0;
+}
+
+static inline int cxgb4i_conn_max_recv_dlength(struct iscsi_conn *conn)
+{
+	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct cxgbi_conn *cconn = tcp_conn->dd_data;
+	unsigned int max = cxgb4i_get_snic(cconn->chba->cdev)->rx_max_size;
+
+	cxgb4i_align_pdu_size(max);
+
+	if (conn->max_recv_dlength) {
+		if (conn->max_recv_dlength > max) {
+			cxgbi_log_error("MaxRecvDataSegmentLength %u too big."
+					" Need to be <= %u.\n",
+					conn->max_recv_dlength, max);
+			return -EINVAL;
+		}
+		conn->max_recv_dlength = min(conn->max_recv_dlength, max);
+		cxgb4i_align_pdu_size(conn->max_recv_dlength);
+	} else
+		conn->max_recv_dlength = max;
+
+	return 0;
+}
+
+static struct iscsi_cls_conn *
+cxgb4i_create_conn(struct iscsi_cls_session *cls_session, u32 cid)
+{
+	struct iscsi_cls_conn *cls_conn;
+	struct iscsi_conn *conn;
+	struct iscsi_tcp_conn *tcp_conn;
+	struct cxgbi_conn *cconn;
+
+	cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid);
+	if (!cls_conn)
+		return NULL;
+
+	conn = cls_conn->dd_data;
+	tcp_conn = conn->dd_data;
+	cconn = tcp_conn->dd_data;
+
+	cconn->iconn = conn;
+	return cls_conn;
+}
+
+static int cxgb4i_bind_conn(struct iscsi_cls_session *cls_session,
+				struct iscsi_cls_conn *cls_conn,
+				u64 transport_eph, int is_leading)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct cxgbi_conn *cconn = tcp_conn->dd_data;
+	struct iscsi_endpoint *ep;
+	struct cxgbi_endpoint *cep;
+	struct cxgbi_sock *csk;
+	int err;
+
+	ep = iscsi_lookup_endpoint(transport_eph);
+	if (!ep)
+		return -EINVAL;
+
+	/*  setup ddp pagesize */
+	cep = ep->dd_data;
+	csk = cep->csk;
+	err = cxgb4i_ddp_setup_conn_host_pagesize(csk, csk->hwtid, 0);
+	if (err < 0)
+		return err;
+
+	err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+	if (err)
+		return -EINVAL;
+
+	/*  calculate the tag idx bits needed for this conn based on cmds_max */
+	cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
+
+	read_lock(&csk->callback_lock);
+	csk->user_data = conn;
+	cconn->chba = cep->chba;
+	cconn->cep = cep;
+	cep->cconn = cconn;
+	read_unlock(&csk->callback_lock);
+
+	cxgb4i_conn_max_xmit_dlength(conn);
+	cxgb4i_conn_max_recv_dlength(conn);
+
+	spin_lock_bh(&conn->session->lock);
+	sprintf(conn->portal_address, "%pI4", &csk->daddr.sin_addr.s_addr);
+	conn->portal_port = ntohs(csk->daddr.sin_port);
+	spin_unlock_bh(&conn->session->lock);
+
+	/*  init recv engine */
+	iscsi_tcp_hdr_recv_prep(tcp_conn);
+
+	return 0;
+}
+
+static int
+cxgb4i_get_conn_param(struct iscsi_cls_conn *cls_conn,
+			enum iscsi_param param, char *buff)
+{
+	struct iscsi_conn *iconn = cls_conn->dd_data;
+	int len;
+
+	switch (param) {
+	case ISCSI_PARAM_CONN_PORT:
+		spin_lock_bh(&iconn->session->lock);
+		len = sprintf(buff, "%hu\n", iconn->portal_port);
+		spin_unlock_bh(&iconn->session->lock);
+		break;
+	case ISCSI_PARAM_CONN_ADDRESS:
+		spin_lock_bh(&iconn->session->lock);
+		len = sprintf(buff, "%s\n", iconn->portal_address);
+		spin_unlock_bh(&iconn->session->lock);
+		break;
+	default:
+		return iscsi_conn_get_param(cls_conn, param, buff);
+	}
+	return len;
+}
+
+static int
+cxgb4i_set_conn_param(struct iscsi_cls_conn *cls_conn,
+			enum iscsi_param param, char *buf, int buflen)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+	struct iscsi_session *session = conn->session;
+	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct cxgbi_conn *cconn = tcp_conn->dd_data;
+	struct cxgbi_sock *csk = cconn->cep->csk;
+	int value, err = 0;
+
+	switch (param) {
+	case ISCSI_PARAM_HDRDGST_EN:
+		err = iscsi_set_param(cls_conn, param, buf, buflen);
+		if (!err && conn->hdrdgst_en)
+			err = cxgb4i_ddp_setup_conn_digest(csk, csk->hwtid,
+							conn->hdrdgst_en,
+							conn->datadgst_en, 0);
+		break;
+	case ISCSI_PARAM_DATADGST_EN:
+		err = iscsi_set_param(cls_conn, param, buf, buflen);
+		if (!err && conn->datadgst_en)
+			err = cxgb4i_ddp_setup_conn_digest(csk, csk->hwtid,
+							conn->hdrdgst_en,
+							conn->datadgst_en, 0);
+		break;
+	case ISCSI_PARAM_MAX_R2T:
+		sscanf(buf, "%d", &value);
+		if (value <= 0 || !is_power_of_2(value))
+			return -EINVAL;
+		if (session->max_r2t == value)
+			break;
+		iscsi_tcp_r2tpool_free(session);
+		err = iscsi_set_param(cls_conn, param, buf, buflen);
+		if (!err && iscsi_tcp_r2tpool_alloc(session))
+			return -ENOMEM;
+	case ISCSI_PARAM_MAX_RECV_DLENGTH:
+		err = iscsi_set_param(cls_conn, param, buf, buflen);
+		if (!err)
+			err = cxgb4i_conn_max_recv_dlength(conn);
+		break;
+	case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+		err = iscsi_set_param(cls_conn, param, buf, buflen);
+		if (!err)
+			err = cxgb4i_conn_max_xmit_dlength(conn);
+		break;
+	default:
+		return iscsi_set_param(cls_conn, param, buf, buflen);
+	}
+	return err;
+}
+
+static int
+cxgb4i_set_host_param(struct Scsi_Host *shost,
+			enum iscsi_host_param param, char *buff, int buflen)
+{
+	struct cxgbi_hba *chba = iscsi_host_priv(shost);
+
+	if (!chba->ndev) {
+		shost_printk(KERN_ERR, shost, "Could not set host param. "
+				"Netdev for host not set\n");
+		return -ENODEV;
+	}
+
+	cxgbi_api_debug("param %d, buff %s\n", param, buff);
+
+	switch (param) {
+	case ISCSI_HOST_PARAM_IPADDRESS:
+	{
+		__be32 addr = in_aton(buff);
+		cxgb4i_set_iscsi_ipv4(chba, addr);
+		return 0;
+	}
+
+	case ISCSI_HOST_PARAM_HWADDRESS:
+	case ISCSI_HOST_PARAM_NETDEV_NAME:
+		return 0;
+
+	default:
+		return iscsi_host_set_param(shost, param, buff, buflen);
+	}
+}
+
+static int
+cxgb4i_get_host_param(struct Scsi_Host *shost,
+			enum iscsi_host_param param, char *buff)
+{
+	struct cxgbi_hba *chba = iscsi_host_priv(shost);
+	int len = 0;
+
+	if (!chba->ndev) {
+		shost_printk(KERN_ERR, shost, "Could not set host param. "
+				"Netdev for host not set\n");
+		return -ENODEV;
+	}
+
+	cxgbi_api_debug("hba %s, param %d\n", chba->ndev->name, param);
+
+	switch (param) {
+	case ISCSI_HOST_PARAM_HWADDRESS:
+		len = sysfs_format_mac(buff, chba->ndev->dev_addr, 6);
+		break;
+	case ISCSI_HOST_PARAM_NETDEV_NAME:
+		len = sprintf(buff, "%s\n", chba->ndev->name);
+		break;
+	case ISCSI_HOST_PARAM_IPADDRESS:
+	{
+		__be32 addr;
+
+		addr = cxgb4i_get_iscsi_ipv4(chba);
+		len = sprintf(buff, "%pI4", &addr);
+		break;
+	}
+	default:
+		return iscsi_host_get_param(shost, param, buff);
+	}
+
+	return len;
+}
+
+static void cxgb4i_get_conn_stats(struct iscsi_cls_conn *cls_conn,
+				struct iscsi_stats *stats)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+
+	stats->txdata_octets = conn->txdata_octets;
+	stats->rxdata_octets = conn->rxdata_octets;
+	stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
+	stats->dataout_pdus = conn->dataout_pdus_cnt;
+	stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
+	stats->datain_pdus = conn->datain_pdus_cnt;
+	stats->r2t_pdus = conn->r2t_pdus_cnt;
+	stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
+	stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
+	stats->digest_err = 0;
+	stats->timeout_err = 0;
+	stats->custom_length = 1;
+	strcpy(stats->custom[0].desc, "eh_abort_cnt");
+	stats->custom[0].value = conn->eh_abort_cnt;
+}
+
+static struct scsi_host_template cxgb4i_host_template = {
+	.module				= THIS_MODULE,
+	.name				= "Chelsio T4 iSCSI initiator",
+	.proc_name			= "cxgb4i",
+	.queuecommand			= iscsi_queuecommand,
+	.change_queue_depth		= iscsi_change_queue_depth,
+	.can_queue			= CXGB4I_SCSI_HOST_QDEPTH,
+	.sg_tablesize			= SG_ALL,
+	.max_sectors			= 0xFFFF,
+	.cmd_per_lun			= ISCSI_DEF_CMD_PER_LUN,
+	.eh_abort_handler		= iscsi_eh_abort,
+	.eh_device_reset_handler	= iscsi_eh_device_reset,
+	.eh_target_reset_handler	= iscsi_eh_recover_target,
+	.target_alloc			= iscsi_target_alloc,
+	.use_clustering			= DISABLE_CLUSTERING,
+	.this_id			= -1,
+};
+
+#define	CXGB4I_CAPS	(CAP_RECOVERY_L0 | CAP_MULTI_R2T |	\
+			CAP_HDRDGST | CAP_DATADGST |		\
+			CAP_DIGEST_OFFLOAD | CAP_PADDING_OFFLOAD)
+#define	CXGB4I_PMASK	(ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH | \
+			ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN | \
+			ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T | \
+			ISCSI_IMM_DATA_EN | ISCSI_FIRST_BURST | \
+			ISCSI_MAX_BURST | ISCSI_PDU_INORDER_EN | \
+			ISCSI_DATASEQ_INORDER_EN | ISCSI_ERL | \
+			ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS | \
+			ISCSI_EXP_STATSN | ISCSI_PERSISTENT_PORT | \
+			ISCSI_PERSISTENT_ADDRESS | ISCSI_TARGET_NAME | \
+			ISCSI_TPGT | ISCSI_USERNAME | \
+			ISCSI_PASSWORD | ISCSI_USERNAME_IN | \
+			ISCSI_PASSWORD_IN | ISCSI_FAST_ABORT | \
+			ISCSI_ABORT_TMO | ISCSI_LU_RESET_TMO | \
+			ISCSI_TGT_RESET_TMO | ISCSI_PING_TMO | \
+			ISCSI_RECV_TMO | ISCSI_IFACE_NAME | \
+			ISCSI_INITIATOR_NAME)
+#define	CXGB4I_HPMASK	(ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | \
+			ISCSI_HOST_INITIATOR_NAME | ISCSI_HOST_INITIATOR_NAME)
+
+static struct iscsi_transport cxgb4i_iscsi_transport = {
+	.owner				= THIS_MODULE,
+	.name				= "cxgb4i",
+	.caps				= CXGB4I_CAPS,
+	.param_mask			= CXGB4I_PMASK,
+	.host_param_mask		= CXGB4I_HPMASK,
+	.get_host_param			= cxgb4i_get_host_param,
+	.set_host_param			= cxgb4i_set_host_param,
+
+	.create_session			= cxgb4i_create_session,
+	.destroy_session		= cxgb4i_destroy_session,
+	.get_session_param		= iscsi_session_get_param,
+
+	.create_conn			= cxgb4i_create_conn,
+	.bind_conn			= cxgb4i_bind_conn,
+	.destroy_conn			= iscsi_tcp_conn_teardown,
+	.start_conn			= iscsi_conn_start,
+	.stop_conn			= iscsi_conn_stop,
+	.get_conn_param			= cxgb4i_get_conn_param,
+	.set_param			= cxgb4i_set_conn_param,
+	.get_stats			= cxgb4i_get_conn_stats,
+
+	.send_pdu			= iscsi_conn_send_pdu,
+
+	.init_task			= iscsi_tcp_task_init,
+	.xmit_task			= iscsi_tcp_task_xmit,
+	.cleanup_task			= cxgbi_cleanup_task,
+
+	.alloc_pdu			= cxgbi_conn_alloc_pdu,
+	.init_pdu			= cxgbi_conn_init_pdu,
+	.xmit_pdu			= cxgbi_conn_xmit_pdu,
+	.parse_pdu_itt			= cxgbi_parse_pdu_itt,
+
+	.ep_connect			= cxgb4i_ep_connect,
+	.ep_poll			= cxgb4i_ep_poll,
+	.ep_disconnect			= cxgb4i_ep_disconnect,
+
+	.session_recovery_timedout	= iscsi_session_recovery_timedout,
+};
+
+int cxgb4i_iscsi_init(void)
+{
+	cxgb4i_scsi_transport = iscsi_register_transport(
+					&cxgb4i_iscsi_transport);
+	if (!cxgb4i_scsi_transport) {
+		cxgbi_log_error("Could not register cxgb4i transport\n");
+		return -ENODATA;
+	}
+
+	return 0;
+}
+
+void cxgb4i_iscsi_cleanup(void)
+{
+	if (cxgb4i_scsi_transport) {
+		cxgbi_api_debug("cxgb4i transport 0x%p removed\n",
+				cxgb4i_scsi_transport);
+		iscsi_unregister_transport(&cxgb4i_iscsi_transport);
+	}
+}
+
diff --git a/drivers/scsi/cxgb4i/libcxgbi.c b/drivers/scsi/cxgb4i/libcxgbi.c
new file mode 100644
index 0000000..98f904b
--- /dev/null
+++ b/drivers/scsi/cxgb4i/libcxgbi.c
@@ -0,0 +1,589 @@
+/*
+ * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver.
+ *
+ * Copyright (c) 2010 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ * Written by: Rakesh Ranjan (rranjan-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ */
+
+#include <linux/skbuff.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <linux/pci.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+
+#include "libcxgbi.h"
+
+/* always allocate rooms for AHS */
+#define SKB_TX_PDU_HEADER_LEN	\
+	(sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE)
+
+/*
+ * pdu receive, interact with libiscsi_tcp
+ */
+static inline int read_pdu_skb(struct iscsi_conn *conn, struct sk_buff *skb,
+			       unsigned int offset, int offloaded)
+{
+	int status = 0;
+	int bytes_read;
+
+	bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status);
+	switch (status) {
+	case ISCSI_TCP_CONN_ERR:
+		return -EIO;
+	case ISCSI_TCP_SUSPENDED:
+		/* no transfer - just have caller flush queue */
+		return bytes_read;
+	case ISCSI_TCP_SKB_DONE:
+		/*
+		 * pdus should always fit in the skb and we should get
+		 * segment done notifcation.
+		 */
+		iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb.");
+		return -EFAULT;
+	case ISCSI_TCP_SEGMENT_DONE:
+		return bytes_read;
+	default:
+		iscsi_conn_printk(KERN_ERR, conn, "Invalid iscsi_tcp_recv_skb "
+				  "status %d\n", status);
+		return -EINVAL;
+	}
+}
+
+int cxgbi_conn_read_bhs_pdu_skb(struct iscsi_conn *conn,
+					struct sk_buff *skb)
+{
+	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct cxgbi_conn *cconn = tcp_conn->dd_data;
+	struct cxgbi_device *cdev = cconn->chba->cdev;
+
+	int rc;
+
+	cxgbi_rx_debug("conn 0x%p, skb 0x%p, len %u, flag 0x%x.\n",
+			conn, skb, skb->len, cdev->get_skb_ulp_mode(skb));
+
+	if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) {
+		iscsi_conn_failure(conn, ISCSI_ERR_PROTO);
+		return -EIO;
+	}
+
+	if (conn->hdrdgst_en && (cdev->get_skb_ulp_mode(skb)
+				& ULP2_FLAG_HCRC_ERROR)) {
+		iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST);
+		return -EIO;
+	}
+
+	rc = read_pdu_skb(conn, skb, 0, 0);
+	if (rc <= 0)
+		return rc;
+
+	return 0;
+}
+
+int cxgbi_conn_read_data_pdu_skb(struct iscsi_conn *conn,
+				    struct sk_buff *skb)
+{
+	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct cxgbi_conn *cconn = tcp_conn->dd_data;
+	struct cxgbi_device *cdev = cconn->chba->cdev;
+	bool offloaded = 0;
+	unsigned int offset = 0;
+	int rc;
+
+	cxgbi_rx_debug("conn 0x%p, skb 0x%p, len %u, flag 0x%x.\n",
+			conn, skb, skb->len, cdev->get_skb_ulp_mode(skb));
+
+	if (conn->datadgst_en &&
+		(cdev->get_skb_ulp_mode(skb) & ULP2_FLAG_DCRC_ERROR)) {
+		iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
+		return -EIO;
+	}
+
+	if (iscsi_tcp_recv_segment_is_hdr(tcp_conn))
+		return 0;
+
+	if (conn->hdrdgst_en)
+		offset = ISCSI_DIGEST_SIZE;
+
+	if (cdev->get_skb_ulp_mode(skb) & ULP2_FLAG_DATA_DDPED) {
+		cxgbi_rx_debug("skb 0x%p, opcode 0x%x, data %u, ddp'ed, "
+				"itt 0x%x.\n",
+				skb,
+				tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK,
+				tcp_conn->in.datalen,
+				ntohl(tcp_conn->in.hdr->itt));
+		offloaded = 1;
+	} else {
+		cxgbi_rx_debug("skb 0x%p, opcode 0x%x, data %u, NOT ddp'ed, "
+				"itt 0x%x.\n",
+				skb,
+				tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK,
+				tcp_conn->in.datalen,
+				ntohl(tcp_conn->in.hdr->itt));
+	}
+
+	rc = read_pdu_skb(conn, skb, 0, offloaded);
+	if (rc < 0)
+		return rc;
+	else
+		return 0;
+}
+
+static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt,
+				unsigned int offset, unsigned int *off,
+				struct scatterlist **sgp)
+{
+	int i;
+	struct scatterlist *sg;
+
+	for_each_sg(sgl, sg, sgcnt, i) {
+		if (offset < sg->length) {
+			*off = offset;
+			*sgp = sg;
+			return 0;
+		}
+		offset -= sg->length;
+	}
+	return -EFAULT;
+}
+
+static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
+				unsigned int dlen, skb_frag_t *frags,
+				int frag_max)
+{
+	unsigned int datalen = dlen;
+	unsigned int sglen = sg->length - sgoffset;
+	struct page *page = sg_page(sg);
+	int i;
+
+	i = 0;
+	do {
+		unsigned int copy;
+
+		if (!sglen) {
+			sg = sg_next(sg);
+			if (!sg) {
+				cxgbi_log_error("sg NULL, len %u/%u.\n",
+								datalen, dlen);
+				return -EINVAL;
+			}
+			sgoffset = 0;
+			sglen = sg->length;
+			page = sg_page(sg);
+
+		}
+		copy = min(datalen, sglen);
+		if (i && page == frags[i - 1].page &&
+		    sgoffset + sg->offset ==
+			frags[i - 1].page_offset + frags[i - 1].size) {
+			frags[i - 1].size += copy;
+		} else {
+			if (i >= frag_max) {
+				cxgbi_log_error("too many pages %u, "
+						 "dlen %u.\n", frag_max, dlen);
+				return -EINVAL;
+			}
+
+			frags[i].page = page;
+			frags[i].page_offset = sg->offset + sgoffset;
+			frags[i].size = copy;
+			i++;
+		}
+		datalen -= copy;
+		sgoffset += copy;
+		sglen -= copy;
+	} while (datalen);
+
+	return i;
+}
+
+int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
+{
+	struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
+	struct cxgbi_conn *cconn = tcp_conn->dd_data;
+	struct cxgbi_device *cdev = cconn->chba->cdev;
+	struct iscsi_conn *conn = task->conn;
+	struct iscsi_tcp_task *tcp_task = task->dd_data;
+	struct cxgbi_task_data *tdata = task->dd_data + sizeof(*tcp_task);
+	struct scsi_cmnd *sc = task->sc;
+	int headroom = SKB_TX_PDU_HEADER_LEN;
+
+	tcp_task->dd_data = tdata;
+	task->hdr = NULL;
+
+	/* write command, need to send data pdus */
+	if (cdev->skb_extra_headroom && (opcode == ISCSI_OP_SCSI_DATA_OUT ||
+	    (opcode == ISCSI_OP_SCSI_CMD &&
+	    (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE))))
+		headroom += min(cdev->skb_extra_headroom,
+					conn->max_xmit_dlength);
+
+	tdata->skb = alloc_skb(cdev->skb_tx_headroom + headroom, GFP_ATOMIC);
+	if (!tdata->skb)
+		return -ENOMEM;
+	skb_reserve(tdata->skb, cdev->skb_tx_headroom);
+
+	cxgbi_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n",
+			task, opcode, tdata->skb);
+
+	task->hdr = (struct iscsi_hdr *)tdata->skb->data;
+	task->hdr_max = SKB_TX_PDU_HEADER_LEN;
+
+	/* data_out uses scsi_cmd's itt */
+	if (opcode != ISCSI_OP_SCSI_DATA_OUT)
+		cxgbi_reserve_itt(task, &task->hdr->itt);
+
+	return 0;
+}
+
+int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
+			      unsigned int count)
+{
+	struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
+	struct cxgbi_conn *cconn = tcp_conn->dd_data;
+	struct cxgbi_device *cdev = cconn->chba->cdev;
+	struct iscsi_conn *conn = task->conn;
+	struct iscsi_tcp_task *tcp_task = task->dd_data;
+	struct cxgbi_task_data *tdata = tcp_task->dd_data;
+	struct sk_buff *skb = tdata->skb;
+	unsigned int datalen = count;
+	int i, padlen = iscsi_padding(count);
+	struct page *pg;
+
+	cxgbi_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n",
+			task, task->sc, offset, count, skb);
+
+	skb_put(skb, task->hdr_len);
+	cdev->tx_skb_setmode(skb, conn->hdrdgst_en,
+				datalen ? conn->datadgst_en : 0);
+	if (!count)
+		return 0;
+
+	if (task->sc) {
+		struct scsi_data_buffer *sdb = scsi_out(task->sc);
+		struct scatterlist *sg = NULL;
+		int err;
+
+		tdata->offset = offset;
+		tdata->count = count;
+		err = sgl_seek_offset(sdb->table.sgl, sdb->table.nents,
+					tdata->offset, &tdata->sgoffset, &sg);
+		if (err < 0) {
+			cxgbi_log_warn("tpdu, sgl %u, bad offset %u/%u.\n",
+					sdb->table.nents, tdata->offset,
+					sdb->length);
+			return err;
+		}
+		err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count,
+					tdata->frags, MAX_PDU_FRAGS);
+		if (err < 0) {
+			cxgbi_log_warn("tpdu, sgl %u, bad offset %u + %u.\n",
+					sdb->table.nents, tdata->offset,
+					tdata->count);
+			return err;
+		}
+		tdata->nr_frags = err;
+
+		if (tdata->nr_frags > MAX_SKB_FRAGS ||
+		    (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) {
+			char *dst = skb->data + task->hdr_len;
+			skb_frag_t *frag = tdata->frags;
+
+			/* data fits in the skb's headroom */
+			for (i = 0; i < tdata->nr_frags; i++, frag++) {
+				char *src = kmap_atomic(frag->page,
+							KM_SOFTIRQ0);
+
+				memcpy(dst, src+frag->page_offset, frag->size);
+				dst += frag->size;
+				kunmap_atomic(src, KM_SOFTIRQ0);
+			}
+			if (padlen) {
+				memset(dst, 0, padlen);
+				padlen = 0;
+			}
+			skb_put(skb, count + padlen);
+		} else {
+			/* data fit into frag_list */
+			for (i = 0; i < tdata->nr_frags; i++)
+				get_page(tdata->frags[i].page);
+
+			memcpy(skb_shinfo(skb)->frags, tdata->frags,
+				sizeof(skb_frag_t) * tdata->nr_frags);
+			skb_shinfo(skb)->nr_frags = tdata->nr_frags;
+			skb->len += count;
+			skb->data_len += count;
+			skb->truesize += count;
+		}
+
+	} else {
+		pg = virt_to_page(task->data);
+
+		get_page(pg);
+		skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data),
+					count);
+		skb->len += count;
+		skb->data_len += count;
+		skb->truesize += count;
+	}
+
+	if (padlen) {
+		i = skb_shinfo(skb)->nr_frags;
+		get_page(cdev->pad_page);
+		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+					cdev->pad_page, 0, padlen);
+
+		skb->data_len += padlen;
+		skb->truesize += padlen;
+		skb->len += padlen;
+	}
+
+	return 0;
+}
+
+int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
+{
+	struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
+	struct cxgbi_conn *cconn = tcp_conn->dd_data;
+	struct cxgbi_device *cdev = cconn->chba->cdev;
+	struct iscsi_tcp_task *tcp_task = task->dd_data;
+	struct cxgbi_task_data *tdata = tcp_task->dd_data;
+	struct sk_buff *skb = tdata->skb;
+	unsigned int datalen;
+	int err;
+
+	if (!skb)
+		return 0;
+
+	datalen = skb->data_len;
+	tdata->skb = NULL;
+	err = cdev->sock_send_pdus(cconn->cep->csk, skb);
+	if (err > 0) {
+		int pdulen = err;
+
+		cxgbi_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
+				task, skb, skb->len, skb->data_len, err);
+
+		if (task->conn->hdrdgst_en)
+			pdulen += ISCSI_DIGEST_SIZE;
+		if (datalen && task->conn->datadgst_en)
+			pdulen += ISCSI_DIGEST_SIZE;
+
+		task->conn->txdata_octets += pdulen;
+		return 0;
+	}
+
+	if (err == -EAGAIN || err == -ENOBUFS) {
+		/* reset skb to send when we are called again */
+		tdata->skb = skb;
+		return err;
+	}
+
+	kfree_skb(skb);
+	cxgbi_tx_debug("itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
+			task->itt, skb, skb->len, skb->data_len, err);
+	iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
+	iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
+	return err;
+}
+
+int cxgbi_pdu_init(struct cxgbi_device *cdev)
+{
+	if (cdev->skb_tx_headroom > (512 * MAX_SKB_FRAGS))
+		cdev->skb_extra_headroom = cdev->skb_tx_headroom;
+	cdev->pad_page = alloc_page(GFP_KERNEL);
+	if (cdev->pad_page)
+		return -ENOMEM;
+	memset(page_address(cdev->pad_page), 0, PAGE_SIZE);
+	return 0;
+
+	/*
+	if (SKB_TX_HEADROOM > (512 * MAX_SKB_FRAGS))
+		skb_extra_headroom = SKB_TX_HEADROOM;
+	pad_page = alloc_page(GFP_KERNEL);
+	if (!pad_page)
+		return -ENOMEM;
+	memset(page_address(pad_page), 0, PAGE_SIZE);
+	return 0;*/
+}
+
+void cxgbi_pdu_cleanup(struct cxgbi_device *cdev)
+{
+	if (cdev->pad_page) {
+		__free_page(cdev->pad_page);
+		cdev->pad_page = NULL;
+	}
+}
+
+void cxgbi_conn_tx_open(struct cxgbi_sock *csk)
+{
+	struct iscsi_conn *conn = csk->user_data;
+
+	if (conn) {
+		cxgbi_tx_debug("cn 0x%p, cid %d.\n", csk, conn->id);
+		iscsi_conn_queue_work(conn);
+	}
+}
+
+int cxgbi_sock_get_port(struct cxgbi_sock *csk)
+{
+	unsigned int start;
+	int idx;
+
+	if (!csk->cdev->pmap)
+		goto error_out;
+
+	if (csk->saddr.sin_port) {
+		cxgbi_log_error("connect, sin_port none ZERO %u\n",
+				ntohs(csk->saddr.sin_port));
+		return -EADDRINUSE;
+	}
+
+	spin_lock_bh(&csk->cdev->pmap->lock);
+	start = idx = csk->cdev->pmap->next;
+
+	do {
+		if (++idx >= csk->cdev->pmap->max_connect)
+			idx = 0;
+		if (!csk->cdev->pmap->port_csk[idx]) {
+			csk->saddr.sin_port =
+				htons(csk->cdev->pmap->sport_base + idx);
+			csk->cdev->pmap->next = idx;
+			csk->cdev->pmap->port_csk[idx] = csk;
+			spin_unlock_bh(&csk->cdev->pmap->lock);
+
+			cxgbi_conn_debug("reserved port %u\n",
+					csk->cdev->pmap->sport_base + idx);
+
+			return 0;
+		}
+	} while (idx != start);
+	spin_unlock_bh(&csk->cdev->pmap->lock);
+
+error_out:
+	return -EADDRNOTAVAIL;
+}
+
+void cxgbi_sock_put_port(struct cxgbi_sock *csk)
+{
+	if (csk->saddr.sin_port) {
+		int idx = ntohs(csk->saddr.sin_port) -
+			csk->cdev->pmap->sport_base;
+
+		csk->saddr.sin_port = 0;
+		if (idx < 0 || idx >= csk->cdev->pmap->max_connect)
+			return;
+
+		spin_lock_bh(&csk->cdev->pmap->lock);
+		csk->cdev->pmap->port_csk[idx] = NULL;
+		spin_unlock_bh(&csk->cdev->pmap->lock);
+
+		cxgbi_conn_debug("released port %u\n",
+				csk->cdev->pmap->sport_base + idx);
+	}
+}
+
+void cxgbi_release_itt(struct iscsi_task *task, itt_t hdr_itt)
+{
+	struct scsi_cmnd *sc = task->sc;
+	struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
+	struct cxgbi_conn *cconn = tcp_conn->dd_data;
+	struct cxgbi_device *cdev = cconn->chba->cdev;
+	struct cxgbi_tag_format *tformat = &cdev->tag_format;
+	u32 tag = ntohl((__force u32)hdr_itt);
+
+	cxgbi_tag_debug("release tag 0x%x.\n", tag);
+
+	if (sc &&
+		(scsi_bidi_cmnd(sc) ||
+		 sc->sc_data_direction == DMA_FROM_DEVICE) &&
+			cxgbi_is_ddp_tag(tformat, tag))
+		cdev->ddp_tag_release(cdev, tag);
+}
+
+
+int cxgbi_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
+{
+	struct scsi_cmnd *sc = task->sc;
+	struct iscsi_conn *conn = task->conn;
+	struct iscsi_session *sess = conn->session;
+	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct cxgbi_conn *cconn = tcp_conn->dd_data;
+	struct cxgbi_device *cdev = cconn->chba->cdev;
+	struct cxgbi_tag_format *tformat = &cdev->tag_format;
+	u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt;
+	u32 tag;
+	int err = -EINVAL;
+
+	if (sc &&
+		(scsi_bidi_cmnd(sc) ||
+		 sc->sc_data_direction == DMA_FROM_DEVICE) &&
+			cxgbi_sw_tag_usable(tformat, sw_tag)) {
+
+		struct cxgbi_sock *csk = cconn->cep->csk;
+		struct cxgbi_gather_list *gl;
+
+		gl = cdev->ddp_make_gl(scsi_in(sc)->length,
+					scsi_in(sc)->table.sgl,
+					scsi_in(sc)->table.nents,
+					cdev->pdev, GFP_ATOMIC);
+		if (gl) {
+			tag = sw_tag;
+			err = cdev->ddp_tag_reserve(cdev, csk->hwtid,
+							tformat, &tag,
+							gl, GFP_ATOMIC);
+			if (err < 0)
+				cdev->ddp_release_gl(gl, cdev->pdev);
+		}
+	}
+	if (err < 0)
+		tag = cxgbi_set_non_ddp_tag(tformat, sw_tag);
+	/*  the itt need to sent in big-endian order */
+	*hdr_itt = (__force itt_t)htonl(tag);
+
+	cxgbi_tag_debug("new sc 0x%p tag 0x%x/0x%x (itt 0x%x, age 0x%x).\n",
+			sc, tag, *hdr_itt, task->itt, sess->age);
+	return 0;
+}
+
+
+void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt,
+				int *idx, int *age)
+{
+	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct cxgbi_conn *cconn = tcp_conn->dd_data;
+	struct cxgbi_device *cdev = cconn->chba->cdev;
+	u32 tag = ntohl((__force u32) itt);
+	u32 sw_bits;
+
+	sw_bits = cxgbi_tag_nonrsvd_bits(&cdev->tag_format, tag);
+	if (idx)
+		*idx = sw_bits & ((1 << cconn->task_idx_bits) - 1);
+	if (age)
+		*age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK;
+
+	cxgbi_tag_debug("parse tag 0x%x/0x%x, sw 0x%x, itt 0x%x, age 0x%x.\n",
+			tag, itt, sw_bits, idx ? *idx : 0xFFFFF,
+			age ? *age : 0xFF);
+}
+
+void cxgbi_cleanup_task(struct iscsi_task *task)
+{
+	struct cxgbi_task_data *tdata = task->dd_data +
+				sizeof(struct iscsi_tcp_task);
+
+	/*  never reached the xmit task callout */
+	if (tdata->skb)
+		__kfree_skb(tdata->skb);
+	memset(tdata, 0, sizeof(*tdata));
+
+	cxgbi_release_itt(task, task->hdr_itt);
+	iscsi_tcp_cleanup_task(task);
+}
+
diff --git a/drivers/scsi/cxgb4i/libcxgbi.h b/drivers/scsi/cxgb4i/libcxgbi.h
new file mode 100644
index 0000000..058a9aa
--- /dev/null
+++ b/drivers/scsi/cxgb4i/libcxgbi.h
@@ -0,0 +1,430 @@
+/*
+ * libcxgbi.h: Chelsio common library for T3/T4 iSCSI driver.
+ *
+ * Copyright (c) 2010 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ * Written by: Rakesh Ranjan (rranjan-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org)
+ */
+
+#ifndef	__LIBCXGBI_H__
+#define	__LIBCXGBI_H__
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/scatterlist.h>
+#include <linux/skbuff.h>
+#include <scsi/libiscsi_tcp.h>
+
+
+#define	cxgbi_log_error(fmt...)	printk(KERN_ERR "cxgbi: ERR! " fmt)
+#define cxgbi_log_warn(fmt...)	printk(KERN_WARNING "cxgbi: WARN! " fmt)
+#define cxgbi_log_info(fmt...)	printk(KERN_INFO "cxgbi: " fmt)
+#define cxgbi_debug_log(fmt, args...) \
+	printk(KERN_INFO "cxgbi: %s - " fmt, __func__ , ## args)
+
+
+#ifdef	__DEBUG_CXGB4I__
+#define	cxgbi_log_debug	cxgbi_debug_log
+#else
+#define cxgbi_log_debug(fmt...)
+#endif
+
+#ifdef __DEBUG_CXGB4I_TAG__
+#define cxgbi_tag_debug        cxgbi_log_debug
+#else
+#define cxgbi_tag_debug(fmt...)
+#endif
+
+#ifdef __DEBUG_CXGB4I_API__
+#define cxgbi_api_debug        cxgbi_log_debug
+#else
+#define cxgbi_api_debug(fmt...)
+#endif
+
+#ifdef __DEBUG_CXGB4I_CONN__
+#define cxgbi_conn_debug         cxgbi_log_debug
+#else
+#define cxgbi_conn_debug(fmt...)
+#endif
+
+#ifdef __DEBUG_CXGB4I_TX__
+#define cxgbi_tx_debug           cxgbi_log_debug
+#else
+#define cxgbi_tx_debug(fmt...)
+#endif
+
+#ifdef __DEBUG_CXGB4I_RX__
+#define cxgbi_rx_debug           cxgbi_log_debug
+#else
+#define cxgbi_rx_debug(fmt...)
+#endif
+
+#define	ISCSI_PDU_NONPAYLOAD_LEN	312 /* bhs(48) + ahs(256) + digest(8)*/
+#define ULP2_MAX_PKT_SIZE		16224
+#define ULP2_MAX_PDU_PAYLOAD	\
+	(ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN)
+
+/* # of pages a pagepod can hold without needing another pagepod */
+#define PPOD_PAGES			4
+#define PPOD_PAGES_MAX			4
+#define PPOD_PAGES_SHIFT		2       /*  4 pages per pod */
+
+
+struct cxgbi_tag_format {
+	unsigned char sw_bits;
+	unsigned char rsvd_bits;
+	unsigned char rsvd_shift;
+	unsigned char filler[1];
+	unsigned int rsvd_mask;
+};
+
+struct cxgbi_gather_list {
+	unsigned int tag;
+	unsigned int length;
+	unsigned int offset;
+	unsigned int nelem;
+	struct page **pages;
+	dma_addr_t phys_addr[0];
+};
+
+struct cxgbi_sock {
+	struct net_device *egdev;
+	struct cxgbi_device *cdev;
+
+	unsigned long flags;
+	unsigned int qset;
+	unsigned int rss_qid;
+
+	unsigned int hwtid;
+	unsigned int atid;
+
+	unsigned int tx_chan;
+	unsigned int rx_chan;
+	unsigned int mss_idx;
+	unsigned int smac_idx;
+	unsigned char port_id;
+
+	void *l2t;
+
+	int wr_max_cred;
+	int wr_cred;
+	int wr_una_cred;
+
+	struct sk_buff *wr_pending_head;
+	struct sk_buff *wr_pending_tail;
+	struct sk_buff *cpl_close;
+	struct sk_buff *cpl_abort_req;
+	struct sk_buff *cpl_abort_rpl;
+	struct sk_buff *skb_ulp_lhdr;
+	spinlock_t lock;
+	atomic_t refcnt;
+	volatile unsigned int state;
+	struct sockaddr_in saddr;
+	struct sockaddr_in daddr;
+	struct dst_entry *dst;
+	struct sk_buff_head receive_queue;
+	struct sk_buff_head write_queue;
+	struct timer_list retry_timer;
+	int err;
+	rwlock_t callback_lock;
+	void *user_data;
+
+	u32 rcv_nxt;
+	u32 copied_seq;
+	u32 rcv_wup;
+	u32 snd_nxt;
+	u32 snd_una;
+	u32 write_seq;
+};
+
+enum cxgbi_sock_states{
+	CXGBI_CSK_ST_CONNECTING = 1,
+	CXGBI_CSK_ST_ESTABLISHED,
+	CXGBI_CSK_ST_ACTIVE_CLOSE,
+	CXGBI_CSK_ST_PASSIVE_CLOSE,
+	CXGBI_CSK_ST_CLOSE_WAIT_1,
+	CXGBI_CSK_ST_CLOSE_WAIT_2,
+	CXGBI_CSK_ST_ABORTING,
+	CXGBI_CSK_ST_CLOSED,
+};
+
+enum cxgbi_sock_flags {
+	CXGBI_CSK_FL_ABORT_RPL_RCVD,	/*received one ABORT_RPL_RSS message */
+	CXGBI_CSK_FL_ABORT_REQ_RCVD,	/*received one ABORT_REQ_RSS message */
+	CXGBI_CSK_FL_ABORT_RPL_PENDING,	/* expecting an abort reply */
+	CXGBI_CSK_FL_TX_DATA_SENT,	/* already sent a TX_DATA WR */
+	CXGBI_CSK_FL_ACTIVE_CLOSE_NEEDED,	/* need to be closed */
+	CXGBI_CSK_FL_OFFLOAD_DOWN,		/* offload function off */
+};
+
+static inline void cxgbi_sock_set_flag(struct cxgbi_sock *csk,
+					enum cxgbi_sock_flags flag)
+{
+	__set_bit(flag, &csk->flags);
+	cxgbi_conn_debug("csk 0x%p, set %d, state %u, flags 0x%lu\n",
+			csk, flag, csk->state, csk->flags);
+}
+
+static inline void cxgbi_sock_clear_flag(struct cxgbi_sock *csk,
+					enum cxgbi_sock_flags flag)
+{
+	__clear_bit(flag, &csk->flags);
+	cxgbi_conn_debug("csk 0x%p, clear %d, state %u, flags 0x%lu\n",
+			csk, flag, csk->state, csk->flags);
+}
+
+static inline int cxgbi_sock_flag(struct cxgbi_sock *csk,
+				enum cxgbi_sock_flags flag)
+{
+	if (csk == NULL)
+		return 0;
+
+	return test_bit(flag, &csk->flags);
+}
+
+static inline void cxgbi_sock_set_state(struct cxgbi_sock *csk, int state)
+{
+	csk->state = state;
+}
+
+static inline void cxgbi_sock_hold(struct cxgbi_sock *csk)
+{
+	atomic_inc(&csk->refcnt);
+}
+
+static inline void cxgbi_sock_put(struct cxgbi_sock *csk)
+{
+	if (atomic_dec_and_test(&csk->refcnt)) {
+		cxgbi_conn_debug("free csk 0x%p, state %u, flags 0x%lx\n",
+				csk, csk->state, csk->flags);
+		kfree(csk);
+	}
+}
+
+static inline unsigned int cxgbi_sock_is_closing(
+			const struct cxgbi_sock *csk)
+{
+	return csk->state >= CXGBI_CSK_ST_ACTIVE_CLOSE;
+}
+
+static inline unsigned int cxgbi_sock_is_established(
+				const struct cxgbi_sock *csk)
+{
+	return csk->state == CXGBI_CSK_ST_ESTABLISHED;
+}
+
+int cxgbi_sock_get_port(struct cxgbi_sock *);
+void cxgbi_sock_put_port(struct cxgbi_sock *);
+
+struct cxgbi_hba {
+	struct net_device *ndev;
+	struct Scsi_Host *shost;
+	struct cxgbi_device *cdev;
+	__be32 ipv4addr;
+};
+
+struct cxgbi_ports_map {
+	unsigned int max_connect;
+	unsigned short sport_base;
+	spinlock_t lock;
+	unsigned int next;
+	struct cxgbi_sock *port_csk[0];
+};
+
+struct cxgbi_device {
+	struct list_head list_head;
+	struct net_device *ndev;
+	struct pci_dev *pdev;
+	unsigned int skb_tx_headroom;
+	unsigned int skb_extra_headroom;
+	struct page *pad_page;
+
+	void (*tx_skb_setmode)(struct sk_buff *, int, int);
+	int (*sock_send_pdus)(struct cxgbi_sock *, struct sk_buff *);
+	int (*ddp_tag_reserve)(struct cxgbi_device *, unsigned int,
+				struct cxgbi_tag_format *, u32 *,
+				struct cxgbi_gather_list *, gfp_t);
+	void (*ddp_tag_release)(struct cxgbi_device *, u32);
+	struct cxgbi_gather_list* (*ddp_make_gl)(unsigned int,
+						struct scatterlist *,
+						unsigned int,
+						struct pci_dev *,
+						gfp_t);
+	void (*ddp_release_gl)(struct cxgbi_gather_list *, struct pci_dev *);
+	__u16 (*get_skb_ulp_mode)(struct sk_buff *);
+
+	struct cxgbi_tag_format tag_format;
+	struct cxgbi_ports_map *pmap;
+
+	void *dd_data;
+};
+
+struct cxgbi_conn {
+	struct list_head list_head;
+	struct cxgbi_endpoint *cep;
+	struct iscsi_conn *iconn;
+	struct cxgbi_hba *chba;
+	u32 task_idx_bits;
+};
+
+struct cxgbi_endpoint {
+	struct cxgbi_conn *cconn;
+	struct cxgbi_hba *chba;
+	struct cxgbi_sock *csk;
+};
+
+#define MAX_PDU_FRAGS	((ULP2_MAX_PDU_PAYLOAD + 512 - 1) / 512)
+struct cxgbi_task_data {
+	unsigned short nr_frags;
+	skb_frag_t frags[MAX_PDU_FRAGS];
+	struct sk_buff *skb;
+	unsigned int offset;
+	unsigned int count;
+	unsigned int sgoffset;
+};
+
+static inline int cxgbi_is_ddp_tag(struct cxgbi_tag_format *tformat, u32 tag)
+{
+	return !(tag & (1 << (tformat->rsvd_bits + tformat->rsvd_shift - 1)));
+}
+
+static inline int cxgbi_sw_tag_usable(struct cxgbi_tag_format *tformat,
+					u32 sw_tag)
+{
+	sw_tag >>= (32 - tformat->rsvd_bits);
+	return !sw_tag;
+}
+
+static inline u32 cxgbi_set_non_ddp_tag(struct cxgbi_tag_format *tformat,
+					u32 sw_tag)
+{
+	unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1;
+
+	u32 mask = (1 << shift) - 1;
+
+	if (sw_tag && (sw_tag & ~mask)) {
+		u32 v1 = sw_tag & ((1 << shift) - 1);
+		u32 v2 = (sw_tag >> (shift - 1)) << shift;
+
+		return v2 | v1 | 1 << shift;
+	}
+
+	return sw_tag | 1 << shift;
+}
+
+static inline u32 cxgbi_ddp_tag_base(struct cxgbi_tag_format *tformat,
+					u32 sw_tag)
+{
+	u32 mask = (1 << tformat->rsvd_shift) - 1;
+
+	if (sw_tag && (sw_tag & ~mask)) {
+		u32 v1 = sw_tag & mask;
+		u32 v2 = sw_tag >> tformat->rsvd_shift;
+
+		v2 <<= tformat->rsvd_bits + tformat->rsvd_shift;
+
+		return v2 | v1;
+	}
+
+	return sw_tag;
+}
+
+static inline u32 cxgbi_tag_rsvd_bits(struct cxgbi_tag_format *tformat,
+					u32 tag)
+{
+	if (cxgbi_is_ddp_tag(tformat, tag))
+		return (tag >> tformat->rsvd_shift) & tformat->rsvd_mask;
+
+	return 0;
+}
+
+static inline u32 cxgbi_tag_nonrsvd_bits(struct cxgbi_tag_format *tformat,
+					u32 tag)
+{
+	unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1;
+	u32 v1, v2;
+
+	if (cxgbi_is_ddp_tag(tformat, tag)) {
+		v1 = tag & ((1 << tformat->rsvd_shift) - 1);
+		v2 = (tag >> (shift + 1)) << tformat->rsvd_shift;
+	} else {
+		u32 mask = (1 << shift) - 1;
+		tag &= ~(1 << shift);
+		v1 = tag & mask;
+		v2 = (tag >> 1) & ~mask;
+	}
+	return v1 | v2;
+}
+
+static inline void *cxgbi_alloc_big_mem(unsigned int size,
+					gfp_t gfp)
+{
+	void *p = kmalloc(size, gfp);
+	if (!p)
+		p = vmalloc(size);
+	if (p)
+		memset(p, 0, size);
+	return p;
+}
+
+static inline void cxgbi_free_big_mem(void *addr)
+{
+	if (is_vmalloc_addr(addr))
+		vfree(addr);
+	else
+		kfree(addr);
+}
+
+#define RX_DDP_STATUS_IPP_SHIFT		27      /* invalid pagepod */
+#define RX_DDP_STATUS_TID_SHIFT		26      /* tid mismatch */
+#define RX_DDP_STATUS_COLOR_SHIFT	25      /* color mismatch */
+#define RX_DDP_STATUS_OFFSET_SHIFT	24      /* offset mismatch */
+#define RX_DDP_STATUS_ULIMIT_SHIFT	23      /* ulimit error */
+#define RX_DDP_STATUS_TAG_SHIFT		22      /* tag mismatch */
+#define RX_DDP_STATUS_DCRC_SHIFT	21      /* dcrc error */
+#define RX_DDP_STATUS_HCRC_SHIFT	20      /* hcrc error */
+#define RX_DDP_STATUS_PAD_SHIFT		19      /* pad error */
+#define RX_DDP_STATUS_PPP_SHIFT		18      /* pagepod parity error */
+#define RX_DDP_STATUS_LLIMIT_SHIFT	17      /* llimit error */
+#define RX_DDP_STATUS_DDP_SHIFT		16      /* ddp'able */
+#define RX_DDP_STATUS_PMM_SHIFT		15      /* pagepod mismatch */
+
+
+#define ULP2_FLAG_DATA_READY		0x1
+#define ULP2_FLAG_DATA_DDPED		0x2
+#define ULP2_FLAG_HCRC_ERROR		0x4
+#define ULP2_FLAG_DCRC_ERROR		0x8
+#define ULP2_FLAG_PAD_ERROR		0x10
+
+static inline void *cplhdr(struct sk_buff *skb)
+{
+	return skb->data;
+}
+
+int cxgbi_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt);
+void cxgbi_release_itt(struct iscsi_task *task, itt_t hdr_itt);
+void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt,
+				int *idx, int *age);
+void cxgbi_cleanup_task(struct iscsi_task *task);
+
+int cxgbi_conn_read_bhs_pdu_skb(struct iscsi_conn *, struct sk_buff *);
+int cxgbi_conn_read_data_pdu_skb(struct iscsi_conn *, struct sk_buff *);
+void cxgbi_conn_tx_open(struct cxgbi_sock *);
+int cxgbi_conn_init_pdu(struct iscsi_task *, unsigned int , unsigned int);
+int cxgbi_conn_alloc_pdu(struct iscsi_task *, u8);
+int cxgbi_conn_xmit_pdu(struct iscsi_task *);
+
+int cxgbi_pdu_init(struct cxgbi_device *);
+void cxgbi_pdu_cleanup(struct cxgbi_device *);
+
+
+#endif	/*__LIBCXGBI_H__*/
+
-- 
1.6.6.1

-- 
You received this message because you are subscribed to the Google Groups "open-iscsi" group.
To post to this group, send email to open-iscsi-/JYPxA39Uh5TLH3MbocFF+G/Ez6ZCGd0@public.gmane.org
To unsubscribe from this group, send email to open-iscsi+unsubscribe-/JYPxA39Uh5TLH3MbocFF+G/Ez6ZCGd0@public.gmane.org
For more options, visit this group at http://groups.google.com/group/open-iscsi?hl=en.

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 3/3] cxgb4i_v3: iscsi and libcxgbi library for handling common part
  2010-05-15 17:24           ` [PATCH 3/3] cxgb4i_v3: iscsi and libcxgbi library for handling common part Rakesh Ranjan
@ 2010-05-27  5:59             ` Mike Christie
       [not found]               ` <4BFE0A3E.9020804-hcNo3dDEHLuVc3sceRu5cw@public.gmane.org>
  0 siblings, 1 reply; 9+ messages in thread
From: Mike Christie @ 2010-05-27  5:59 UTC (permalink / raw)
  To: open-iscsi
  Cc: Rakesh Ranjan, NETDEVML, SCSIDEVML, LKML, Karen Xie, David Miller,
	James Bottomley, Anish Bhatt, Rakesh Ranjan

On 05/15/2010 12:24 PM, Rakesh Ranjan wrote:
> From: Rakesh Ranjan<rranjan@chelsio.com>
>
>
> Signed-off-by: Rakesh Ranjan<rakesh@chelsio.com>
> ---
>   drivers/scsi/cxgb4i/cxgb4i_iscsi.c |  617 ++++++++++++++++++++++++++++++++++++
>   drivers/scsi/cxgb4i/libcxgbi.c     |  589 ++++++++++++++++++++++++++++++++++
>   drivers/scsi/cxgb4i/libcxgbi.h     |  430 +++++++++++++++++++++++++

I think the patch had some whitespace/newline issues. When I did git-am 
I got:

warning: squelched 1 whitespace error
warning: 6 lines add whitespace errors.

I think James can just fix up when he merges with git, but in the future 
you might want to try a git-am on your patch before you send (git-am 
--whitespace=fix will fix it up for you).



> +
> +int cxgbi_pdu_init(struct cxgbi_device *cdev)
> +{
> +	if (cdev->skb_tx_headroom>  (512 * MAX_SKB_FRAGS))
> +		cdev->skb_extra_headroom = cdev->skb_tx_headroom;
> +	cdev->pad_page = alloc_page(GFP_KERNEL);
> +	if (cdev->pad_page)
> +		return -ENOMEM;
> +	memset(page_address(cdev->pad_page), 0, PAGE_SIZE);
> +	return 0;
> +
> +	/*
> +	if (SKB_TX_HEADROOM>  (512 * MAX_SKB_FRAGS))
> +		skb_extra_headroom = SKB_TX_HEADROOM;
> +	pad_page = alloc_page(GFP_KERNEL);
> +	if (!pad_page)
> +		return -ENOMEM;
> +	memset(page_address(pad_page), 0, PAGE_SIZE);
> +	return 0;*/

Clean this up.

> +
> +void cxgbi_release_itt(struct iscsi_task *task, itt_t hdr_itt)
> +{
> +	struct scsi_cmnd *sc = task->sc;
> +	struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
> +	struct cxgbi_conn *cconn = tcp_conn->dd_data;
> +	struct cxgbi_device *cdev = cconn->chba->cdev;
> +	struct cxgbi_tag_format *tformat =&cdev->tag_format;
> +	u32 tag = ntohl((__force u32)hdr_itt);
> +
> +	cxgbi_tag_debug("release tag 0x%x.\n", tag);
> +
> +	if (sc&&
> +		(scsi_bidi_cmnd(sc) ||
> +		 sc->sc_data_direction == DMA_FROM_DEVICE)&&
> +			cxgbi_is_ddp_tag(tformat, tag))

The formatting is a little weird. I think you want each line to start in 
the same column instead of each getting tabbed over.


> +
> +
> +int cxgbi_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
> +{
> +	struct scsi_cmnd *sc = task->sc;
> +	struct iscsi_conn *conn = task->conn;
> +	struct iscsi_session *sess = conn->session;
> +	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
> +	struct cxgbi_conn *cconn = tcp_conn->dd_data;
> +	struct cxgbi_device *cdev = cconn->chba->cdev;
> +	struct cxgbi_tag_format *tformat =&cdev->tag_format;
> +	u32 sw_tag = (sess->age<<  cconn->task_idx_bits) | task->itt;
> +	u32 tag;
> +	int err = -EINVAL;
> +
> +	if (sc&&
> +		(scsi_bidi_cmnd(sc) ||
> +		 sc->sc_data_direction == DMA_FROM_DEVICE)&&
> +			cxgbi_sw_tag_usable(tformat, sw_tag)) {


Same tabbing.


> +	volatile unsigned int state;

I did not get why this needed to be volatile (I see it is like this in 
cxgb3i and I did not see why in there too).



> +
> +static inline void cxgbi_sock_hold(struct cxgbi_sock *csk)
> +{
> +	atomic_inc(&csk->refcnt);


We want people to use krefs instead of their own refcounting now.

> +
> +static inline void *cplhdr(struct sk_buff *skb)
> +{
> +	return skb->data;
> +}


Seems kinda useless.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 3/3] cxgb4i_v3: iscsi and libcxgbi library for handling common part
       [not found]               ` <4BFE0A3E.9020804-hcNo3dDEHLuVc3sceRu5cw@public.gmane.org>
@ 2010-05-27  6:05                 ` Rakesh Ranjan
  0 siblings, 0 replies; 9+ messages in thread
From: Rakesh Ranjan @ 2010-05-27  6:05 UTC (permalink / raw)
  To: Mike Christie
  Cc: open-iscsi-/JYPxA39Uh5TLH3MbocFFw, Rakesh Ranjan, NETDEVML,
	SCSIDEVML, LKML, Karen Xie, David Miller, James Bottomley,
	Anish Bhatt

-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA512

On 05/27/2010 11:29 AM, Mike Christie wrote:
> On 05/15/2010 12:24 PM, Rakesh Ranjan wrote:
>> From: Rakesh Ranjan<rranjan-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
>>
>>
>> Signed-off-by: Rakesh Ranjan<rakesh-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
>> ---
>>   drivers/scsi/cxgb4i/cxgb4i_iscsi.c |  617
>> ++++++++++++++++++++++++++++++++++++
>>   drivers/scsi/cxgb4i/libcxgbi.c     |  589
>> ++++++++++++++++++++++++++++++++++
>>   drivers/scsi/cxgb4i/libcxgbi.h     |  430 +++++++++++++++++++++++++
> 
> I think the patch had some whitespace/newline issues. When I did git-am
> I got:
> 
> warning: squelched 1 whitespace error
> warning: 6 lines add whitespace errors.
> 
> I think James can just fix up when he merges with git, but in the future
> you might want to try a git-am on your patch before you send (git-am
> --whitespace=fix will fix it up for you).
> 
> 
> 
>> +
>> +int cxgbi_pdu_init(struct cxgbi_device *cdev)
>> +{
>> +    if (cdev->skb_tx_headroom>  (512 * MAX_SKB_FRAGS))
>> +        cdev->skb_extra_headroom = cdev->skb_tx_headroom;
>> +    cdev->pad_page = alloc_page(GFP_KERNEL);
>> +    if (cdev->pad_page)
>> +        return -ENOMEM;
>> +    memset(page_address(cdev->pad_page), 0, PAGE_SIZE);
>> +    return 0;
>> +
>> +    /*
>> +    if (SKB_TX_HEADROOM>  (512 * MAX_SKB_FRAGS))
>> +        skb_extra_headroom = SKB_TX_HEADROOM;
>> +    pad_page = alloc_page(GFP_KERNEL);
>> +    if (!pad_page)
>> +        return -ENOMEM;
>> +    memset(page_address(pad_page), 0, PAGE_SIZE);
>> +    return 0;*/
> 
> Clean this up.
> 
>> +
>> +void cxgbi_release_itt(struct iscsi_task *task, itt_t hdr_itt)
>> +{
>> +    struct scsi_cmnd *sc = task->sc;
>> +    struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
>> +    struct cxgbi_conn *cconn = tcp_conn->dd_data;
>> +    struct cxgbi_device *cdev = cconn->chba->cdev;
>> +    struct cxgbi_tag_format *tformat =&cdev->tag_format;
>> +    u32 tag = ntohl((__force u32)hdr_itt);
>> +
>> +    cxgbi_tag_debug("release tag 0x%x.\n", tag);
>> +
>> +    if (sc&&
>> +        (scsi_bidi_cmnd(sc) ||
>> +         sc->sc_data_direction == DMA_FROM_DEVICE)&&
>> +            cxgbi_is_ddp_tag(tformat, tag))
> 
> The formatting is a little weird. I think you want each line to start in
> the same column instead of each getting tabbed over.
> 
> 
>> +
>> +
>> +int cxgbi_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
>> +{
>> +    struct scsi_cmnd *sc = task->sc;
>> +    struct iscsi_conn *conn = task->conn;
>> +    struct iscsi_session *sess = conn->session;
>> +    struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
>> +    struct cxgbi_conn *cconn = tcp_conn->dd_data;
>> +    struct cxgbi_device *cdev = cconn->chba->cdev;
>> +    struct cxgbi_tag_format *tformat =&cdev->tag_format;
>> +    u32 sw_tag = (sess->age<<  cconn->task_idx_bits) | task->itt;
>> +    u32 tag;
>> +    int err = -EINVAL;
>> +
>> +    if (sc&&
>> +        (scsi_bidi_cmnd(sc) ||
>> +         sc->sc_data_direction == DMA_FROM_DEVICE)&&
>> +            cxgbi_sw_tag_usable(tformat, sw_tag)) {
> 
> 
> Same tabbing.
> 
> 
>> +    volatile unsigned int state;
> 
> I did not get why this needed to be volatile (I see it is like this in
> cxgb3i and I did not see why in there too).
> 
> 
> 
>> +
>> +static inline void cxgbi_sock_hold(struct cxgbi_sock *csk)
>> +{
>> +    atomic_inc(&csk->refcnt);
> 
> 
> We want people to use krefs instead of their own refcounting now.
> 
>> +
>> +static inline void *cplhdr(struct sk_buff *skb)
>> +{
>> +    return skb->data;
>> +}
> 
> 
> Seems kinda useless.

Hi Mike,

Thanks for the review, will send you the updated patch.

Regards
Rakesh Ranjan

-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.10 (GNU/Linux)
Comment: Using GnuPG with Fedora - http://enigmail.mozdev.org/

iQIcBAEBCgAGBQJL/guKAAoJEBqoHbxtDU4JFvQP/2uIWFmZo4nqcJ7xmEiBjnaq
BeUfjbRGaiAImEM/6OuMIB/Fm9p9NGtW0GsZdqKYSdZBmgO1kWNT1onCZgVF4eW1
Muikjo+I2wXGsWIt4ZqbZs722COC1oQHXpyulfh/6ONzeWJ3R8vos/TnCw256Wxj
HL42jIO882JCImzmP/wmdPmDlTUI/Z0UEYbu0djy20yzESo2NDHq1PYYopyXIboH
joJ/sZvK0jRYrRsDdq84XJ8CUv1yDE8m3NiMV8cV9JN1EwP/gzBrq0DXfFgpdEDU
+4/pfzzxJ5y/ekdZaSZdx9ke/n/vmamJCaQONaL2WfQaznogxqZypaoM/NRzfMlH
40KY5lqajGRnm0aEBffn8uqBzEbopYpb1m+3mzt/vDtvdBRuSJqmcPbUandTNHLe
cZQnn689GtWqJfoyVF/dfyubtAZFLu8VVZkFxx1e3UDEqVDUuwVEHXoXSt6K/SkB
UnZI7hMUYSof+WI+UEV8DR4KMsRbE1cnvXGnsXTZOMhMsU3KoMielaGTgepAbxT7
bLfaARiMwvU+vle5546uK8IuxjGH81nbUEy6R86OeS6Osv6PZFiGYP9yxjzkp4K7
NWOTnVV9qTaPPTtN9SORINPXUXuI90JgIDbh9qnfDDKl2oj5HmwBpa7S3KmMxxlI
eoAqa/9LLjrMVi5Wicv3
=TOBq
-----END PGP SIGNATURE-----

-- 
You received this message because you are subscribed to the Google Groups "open-iscsi" group.
To post to this group, send email to open-iscsi-/JYPxA39Uh5TLH3MbocFF+G/Ez6ZCGd0@public.gmane.org
To unsubscribe from this group, send email to open-iscsi+unsubscribe-/JYPxA39Uh5TLH3MbocFF+G/Ez6ZCGd0@public.gmane.org
For more options, visit this group at http://groups.google.com/group/open-iscsi?hl=en.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/3] cxgb4i_v3: main driver files
       [not found]         ` <1273944249-311-3-git-send-email-rakesh-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
  2010-05-15 17:24           ` [PATCH 3/3] cxgb4i_v3: iscsi and libcxgbi library for handling common part Rakesh Ranjan
@ 2010-05-27  7:38           ` Mike Christie
  2010-05-27  7:40             ` Mike Christie
  1 sibling, 1 reply; 9+ messages in thread
From: Mike Christie @ 2010-05-27  7:38 UTC (permalink / raw)
  To: open-iscsi-/JYPxA39Uh5TLH3MbocFFw
  Cc: Rakesh Ranjan, NETDEVML, SCSIDEVML, LKML, Karen Xie, David Miller,
	James Bottomley, Anish Bhatt, Rakesh Ranjan

On 05/15/2010 12:24 PM, Rakesh Ranjan wrote:
> From: Rakesh Ranjan<rranjan-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
>
>
> Signed-off-by: Rakesh Ranjan<rakesh-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
> ---
>   drivers/scsi/cxgb4i/cxgb4i.h         |  101 ++
>   drivers/scsi/cxgb4i/cxgb4i_ddp.c     |  678 +++++++++++++
>   drivers/scsi/cxgb4i/cxgb4i_ddp.h     |  118 +++
>   drivers/scsi/cxgb4i/cxgb4i_offload.c | 1846 ++++++++++++++++++++++++++++++++++
>   drivers/scsi/cxgb4i/cxgb4i_offload.h |   91 ++
>   drivers/scsi/cxgb4i/cxgb4i_snic.c    |  260 +++++
>   6 files changed, 3094 insertions(+), 0 deletions(-)
>   create mode 100644 drivers/scsi/cxgb4i/cxgb4i.h
>   create mode 100644 drivers/scsi/cxgb4i/cxgb4i_ddp.c
>   create mode 100644 drivers/scsi/cxgb4i/cxgb4i_ddp.h
>   create mode 100644 drivers/scsi/cxgb4i/cxgb4i_offload.c
>   create mode 100644 drivers/scsi/cxgb4i/cxgb4i_offload.h
>   create mode 100644 drivers/scsi/cxgb4i/cxgb4i_snic.c



Got some whitespace errors when applying.

warning: squelched 1 whitespace error
warning: 6 lines add whitespace errors.



> +#define	CXGB4I_SCSI_HOST_QDEPTH	1024
> +#define	CXGB4I_MAX_TARGET	CXGB4I_MAX_CONN
> +#define	CXGB4I_MAX_LUN		512

Is the max lun right? It seems kinda small for modern drivers.



> +
> +static inline void cxgb4i_ddp_ulp_mem_io_set_hdr(struct ulp_mem_io *req,


If you build cxgb3i and cxgb4i in the kernel at the same time, will you 
get problems if each driver has structs that are named the same?


> +
> +static inline int cxgb4i_ddp_find_unused_entries(struct cxgb4i_ddp_info *ddp,
> +					unsigned int start, unsigned int max,
> +					unsigned int count,
> +					struct cxgbi_gather_list *gl)
> +{
> +	unsigned int i, j, k;
> +
> +	/*  not enough entries */
> +	if ((max - start)<  count)
> +		return -EBUSY;
> +
> +	max -= count;
> +	spin_lock(&ddp->map_lock);
> +	for (i = start; i<  max;) {
> +		for (j = 0, k = i; j<  count; j++, k++) {
> +			if (ddp->gl_map[k])
> +				break;
> +		}
> +		if (j == count) {
> +			for (j = 0, k = i; j<  count; j++, k++)
> +				ddp->gl_map[k] = gl;
> +			spin_unlock(&ddp->map_lock);
> +			return i;
> +		}


Is there a more efficient bitmap or some sort of common map operation 
for this (I thought we found something when doing cxgb3i but forgot to 
add it or were testing a patch)?



> +		i += j + 1;
> +	}
> +	spin_unlock(&ddp->map_lock);
> +	return -EBUSY;
> +}
> +
> +static inline void cxgb4i_ddp_unmark_entries(struct cxgb4i_ddp_info *ddp,
> +							int start, int count)
> +{
> +	spin_lock(&ddp->map_lock);
> +	memset(&ddp->gl_map[start], 0,
> +			count * sizeof(struct cxgbi_gather_list *));

extra tab.



> +static void __cxgb4i_ddp_init(struct cxgb4i_snic *snic)
> +{
> +	struct cxgb4i_ddp_info *ddp = snic->ddp;
> +	unsigned int ppmax, bits, tagmask, pgsz_factor[4];
> +	int i;
> +
> +	if (ddp) {
> +		kref_get(&ddp->refcnt);
> +		cxgbi_log_warn("snic 0x%p, ddp 0x%p already set up\n",
> +				snic, snic->ddp);
> +		return;
> +	}
> +
> +	sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1;
> +	sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1;
> +	snic->cdev.tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits;
> +
> +	cxgbi_log_info("tag itt 0x%x, %u bits, age 0x%x, %u bits\n",
> +			ISCSI_ITT_MASK, sw_tag_idx_bits,
> +			ISCSI_AGE_MASK, sw_tag_age_bits);
> +
> +	ppmax = (snic->lldi.vr->iscsi.size>>  PPOD_SIZE_SHIFT);
> +	bits = __ilog2_u32(ppmax) + 1;
> +	if (bits>  PPOD_IDX_MAX_SIZE)
> +		bits = PPOD_IDX_MAX_SIZE;
> +	ppmax = (1<<  (bits - 1)) - 1;
> +
> +	ddp = cxgbi_alloc_big_mem(sizeof(struct cxgb4i_ddp_info) +
> +			ppmax * (sizeof(struct cxgbi_gather_list *) +
> +				sizeof(struct sk_buff *)),
> +				GFP_KERNEL);
> +	if (!ddp) {
> +		cxgbi_log_warn("snic 0x%p unable to alloc ddp 0x%d, "
> +			       "ddp disabled\n", snic, ppmax);
> +		return;
> +	}
> +
> +	ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1);
> +	spin_lock_init(&ddp->map_lock);
> +	kref_init(&ddp->refcnt);
> +
> +	ddp->snic = snic;
> +	ddp->pdev = snic->lldi.pdev;
> +	ddp->max_txsz = min_t(unsigned int,
> +				snic->lldi.iscsi_iolen,
> +				ULP2_MAX_PKT_SIZE);
> +	ddp->max_rxsz = min_t(unsigned int,
> +				snic->lldi.iscsi_iolen,
> +				ULP2_MAX_PKT_SIZE);
> +	ddp->llimit = snic->lldi.vr->iscsi.start;
> +	ddp->ulimit = ddp->llimit + snic->lldi.vr->iscsi.size;
> +	ddp->nppods = ppmax;
> +	ddp->idx_last = ppmax;
> +	ddp->idx_bits = bits;
> +	ddp->idx_mask = (1<<  bits) - 1;
> +	ddp->rsvd_tag_mask = (1<<  (bits + PPOD_IDX_SHIFT)) - 1;
> +
> +	tagmask = ddp->idx_mask<<  PPOD_IDX_SHIFT;
> +	for (i = 0; i<  DDP_PGIDX_MAX; i++)
> +		pgsz_factor[i] = ddp_page_order[i];
> +
> +	cxgb4_iscsi_init(snic->lldi.ports[0], tagmask, pgsz_factor);
> +	snic->ddp = ddp;
> +
> +	snic->cdev.tag_format.rsvd_bits = ddp->idx_bits;
> +	snic->cdev.tag_format.rsvd_shift = PPOD_IDX_SHIFT;
> +	snic->cdev.tag_format.rsvd_mask =
> +		((1<<  snic->cdev.tag_format.rsvd_bits) - 1);
> +
> +	cxgbi_log_info("tag format: sw %u, rsvd %u,%u, mask 0x%x.\n",
> +			snic->cdev.tag_format.sw_bits,
> +			snic->cdev.tag_format.rsvd_bits,
> +			snic->cdev.tag_format.rsvd_shift,
> +			snic->cdev.tag_format.rsvd_mask);
> +
> +	snic->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
> +				ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
> +	snic->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
> +				ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
> +
> +	cxgbi_log_info("max payload size: %u/%u, %u/%u.\n",
> +			snic->tx_max_size, ddp->max_txsz,
> +			snic->rx_max_size, ddp->max_rxsz);
> +
> +	cxgbi_log_info("snic 0x%p, nppods %u, bits %u, mask 0x%x,0x%x "
> +			"pkt %u/%u, %u/%u\n",
> +			snic, ppmax, ddp->idx_bits, ddp->idx_mask,
> +			ddp->rsvd_tag_mask, ddp->max_txsz,
> +			snic->lldi.iscsi_iolen,
> +			ddp->max_rxsz, snic->lldi.iscsi_iolen);
> +
> +	return;


Don't need "return".





> +static void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
> +{


Were you going to put this in the libcxgbi but later decide it was a 
little too different? If you are leaving it here could you add a cxgb4i 
prefix so the naming is consistent and avoid confusion with your lib 
functions?



cxgb4i_find_best_mtu looks like it could go in your lib. Looks like 
find_best_mtu from cxgb3i_offload.c. Same with select_mss and compute_wscale


> +	struct sk_buff *skb;
> +	unsigned int read = 0;
> +	struct iscsi_conn *conn = csk->user_data;
> +	int err = 0;
> +
> +	cxgbi_rx_debug("csk 0x%p.\n", csk);
> +
> +	read_lock(&csk->callback_lock);
> +	if (unlikely(!conn || conn->suspend_rx)) {
> +		cxgbi_rx_debug("conn 0x%p, id %d, suspend_rx %lu!\n",
> +				conn, conn ? conn->id : 0xFF,
> +				conn ? conn->suspend_rx : 0xFF);
> +		read_unlock(&csk->callback_lock);
> +		return;
> +	}
> +	skb = skb_peek(&csk->receive_queue);
> +	while (!err&&  skb) {
> +		__skb_unlink(skb,&csk->receive_queue);
> +		read += cxgb4i_skb_rx_pdulen(skb);
> +		cxgbi_rx_debug("conn 0x%p, csk 0x%p, rx skb 0x%p, pdulen %u\n",
> +				conn, csk, skb, cxgb4i_skb_rx_pdulen(skb));
> +		if (cxgb4i_skb_flags(skb)&  CXGB4I_SKCB_FLAG_HDR_RCVD)
> +			err = cxgbi_conn_read_bhs_pdu_skb(conn, skb);
> +		else if (cxgb4i_skb_flags(skb) == CXGB4I_SKCB_FLAG_DATA_RCVD)
> +			err = cxgbi_conn_read_data_pdu_skb(conn, skb);
> +		__kfree_skb(skb);
> +		skb = skb_peek(&csk->receive_queue);
> +	}
> +	read_unlock(&csk->callback_lock);
> +	csk->copied_seq += read;
> +	cxgb4i_sock_rx_credits(csk, read);
> +	conn->rxdata_octets += read;
> +
> +	if (err) {
> +		cxgbi_log_info("conn 0x%p rx failed err %d.\n", conn, err);
> +		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
> +	}
> +}





> +
> +static inline void cxgb4i_sock_free_wr_skb(struct sk_buff *skb)
> +{
> +	kfree_skb(skb);
> +}


I think adding wrappers around skb functions in a net driver is not so 
useful.


> +
> +static inline struct sk_buff *cxgb4i_sock_dequeue_wr(struct cxgbi_sock *csk)
> +{
> +	struct sk_buff *skb = csk->wr_pending_head;
> +
> +	if (likely(skb)) {
> +		csk->wr_pending_head = cxgb4i_skb_tx_wr_next(skb);
> +		cxgb4i_skb_tx_wr_next(skb) = NULL;
> +	}
> +	return skb;
> +}
> +
> +static void cxgb4i_sock_purge_wr_queue(struct cxgbi_sock *csk)
> +{
> +	struct sk_buff *skb;
> +
> +	while ((skb = cxgb4i_sock_dequeue_wr(csk)) != NULL)
> +		cxgb4i_sock_free_wr_skb(skb);
> +}
> +
> +/*

I think this is supposed to be

/**


> +static int cxgb4i_sock_push_tx_frames(struct cxgbi_sock *csk,
> +						int req_completion)
> +{
> +	int total_size = 0;
> +	struct sk_buff *skb;
> +	struct cxgb4i_snic *snic;
> +
> +	if (unlikely(csk->state == CXGBI_CSK_ST_CONNECTING ||
> +				csk->state == CXGBI_CSK_ST_CLOSE_WAIT_1 ||
> +				csk->state>= CXGBI_CSK_ST_ABORTING)) {
> +		cxgbi_tx_debug("csk 0x%p, in closing state %u.\n",
> +				csk, csk->state);
> +		return 0;
> +	}
> +
> +	snic = cxgb4i_get_snic(csk->cdev);
> +
> +	while (csk->wr_cred
> +			&&  (skb = skb_peek(&csk->write_queue)) != NULL) {


The && should be on the right

while (csk->wr_cred &&

> +
> +static int cxgb4i_cpl_act_open_rpl(struct cxgb4i_snic *snic,
> +						struct sk_buff *skb)
> +{
> +	struct cxgbi_sock *csk;
> +	struct cpl_act_open_rpl *rpl = cplhdr(skb);
> +	unsigned int atid =
> +		GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl->atid_status)));
> +	struct tid_info *t = snic->lldi.tids;
> +	unsigned int status = GET_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
> +
> +	csk = lookup_atid(t, atid);
> +
> +	if (unlikely(!csk)) {
> +		cxgbi_log_error("can't find connection for tid %u\n", atid);
> +		return CPL_RET_UNKNOWN_TID;
> +	}
> +
> +	cxgbi_sock_hold(csk);
> +	spin_lock_bh(&csk->lock);
> +
> +	cxgbi_conn_debug("rcv, status 0x%x, csk 0x%p, csk->state %u, "
> +			"csk->flag 0x%lx, csk->atid %u.\n",
> +			status, csk, csk->state, csk->flags, csk->hwtid);
> +
> +	if (status&  act_open_has_tid(status))
> +		cxgb4_remove_tid(snic->lldi.tids, csk->port_id, GET_TID(rpl));
> +
> +	if (status == CPL_ERR_CONN_EXIST&&
> +			csk->retry_timer.function !=
> +			cxgb4i_sock_act_open_retry_timer) {


I do not mean to nit pick on silly coding style stuff. I think it is 
easier to read lines like this:

if (status == CPL_ERR_CONN_EXIST&&
     csk->retry_timer.function != cxgb4i_sock_act_open_retry_timer) {


> +		csk->retry_timer.function = cxgb4i_sock_act_open_retry_timer;
> +		if (!mod_timer(&csk->retry_timer, jiffies + HZ / 2))
> +			cxgbi_sock_hold(csk);


There is no del_timer/del_timer_sync + cxgbi_sock_put for this timer. If 
something cleans this csk up, what makes sure the timer gets stopped ok.

> +
> +static int cxgb4i_alloc_cpl_skbs(struct cxgbi_sock *csk)
> +{
> +	csk->cpl_close = alloc_skb(sizeof(struct cpl_close_con_req),
> +					GFP_KERNEL);
> +	if (!csk->cpl_close)
> +		return -ENOMEM;
> +	skb_put(csk->cpl_close, sizeof(struct cpl_close_con_req));
> +
> +	csk->cpl_abort_req = alloc_skb(sizeof(struct cpl_abort_req),
> +					GFP_KERNEL);
> +	if (!csk->cpl_abort_req)
> +		goto free_cpl_skbs;
> +	skb_put(csk->cpl_abort_req, sizeof(struct cpl_abort_req));
> +
> +	csk->cpl_abort_rpl = alloc_skb(sizeof(struct cpl_abort_rpl),
> +					GFP_KERNEL);


These should be GFP_NOIO in case we call them to relogin on a disk that 
has data that would have been needed to be written out to free up mem.

> +
> +struct cxgbi_sock *cxgb4i_sock_create(struct cxgb4i_snic *snic)
> +{
> +	struct cxgbi_sock *csk = NULL;
> +
> +	csk = kzalloc(sizeof(*csk), GFP_KERNEL);

Same as above.

> +
> +static int is_cxgb4_dev(struct net_device *dev, struct cxgb4i_snic *snic)
> +{
> +	struct net_device *ndev = dev;
> +	int i;
> +
> +	if (dev->priv_flags&  IFF_802_1Q_VLAN)
> +		ndev = vlan_dev_real_dev(dev);
> +
> +	for (i = 0; i<  snic->lldi.nports; i++) {
> +		if (ndev == snic->lldi.ports[i])
> +			return 1;
> +	}
> +
> +	return 0;
> +}
> +
> +static struct net_device *cxgb4i_find_egress_dev(struct net_device *root_dev,
> +						struct cxgb4i_snic *snic)
> +{
> +	while (root_dev) {
> +		if (root_dev->priv_flags&  IFF_802_1Q_VLAN)
> +			root_dev = vlan_dev_real_dev(root_dev);
> +		else if (is_cxgb4_dev(root_dev, snic))
> +			return root_dev;
> +		else
> +			return NULL;
> +	}
> +
> +	return NULL;
> +}
> +
> +static struct rtable *find_route(struct net_device *dev,
> +				__be32 saddr, __be32 daddr,
> +				__be16 sport, __be16 dport,
> +				u8 tos)
> +{
> +	struct rtable *rt;
> +	struct flowi fl = {
> +		.oif = dev ? dev->ifindex : 0,
> +		.nl_u = {
> +			.ip4_u = {
> +				.daddr = daddr,
> +				.saddr = saddr,
> +				.tos = tos }
> +			},
> +		.proto = IPPROTO_TCP,
> +		.uli_u = {
> +			.ports = {
> +				.sport = sport,
> +				.dport = dport }
> +			}
> +	};
> +
> +	if (ip_route_output_flow(dev ? dev_net(dev) :&init_net,
> +					&rt,&fl, NULL, 0))
> +		return NULL;
> +
> +	return rt;
> +}
> +


Those functions above look like the cxgb3i ones. Could they be in your lib?



> +static int cxgb4i_init_act_open(struct cxgbi_sock *csk,
> +					struct net_device *dev)
> +{
> +	struct dst_entry *dst = csk->dst;
> +	struct sk_buff *skb;
> +	struct port_info *pi = netdev_priv(dev);
> +
> +	cxgbi_conn_debug("csk 0x%p, state %u, flags 0x%lx\n",
> +			csk, csk->state, csk->flags);
> +
> +	csk->atid = cxgb4_alloc_atid(cxgb4i_get_snic(csk->cdev)->lldi.tids,
> +					csk);
> +	if (csk->atid == -1) {
> +		cxgbi_log_error("cannot alloc atid\n");
> +		goto out_err;
> +	}
> +
> +	csk->l2t = cxgb4_l2t_get(cxgb4i_get_snic(csk->cdev)->lldi.l2t,
> +				csk->dst->neighbour, dev, 0);
> +	if (!csk->l2t) {
> +		cxgbi_log_error("cannot alloc l2t\n");
> +		goto free_atid;
> +	}
> +
> +	skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_KERNEL);
> +	if (!skb)


Should be NOIO too.

> +		goto free_l2t;
> +
> +	skb->sk = (struct sock *)csk;
> +	t4_set_arp_err_handler(skb, csk, cxgb4i_act_open_req_arp_failure);
> +
> +	cxgbi_sock_hold(csk);
> +
> +	csk->wr_max_cred = csk->wr_cred =
> +		cxgb4i_get_snic(csk->cdev)->lldi.wr_cred;
> +	csk->port_id = pi->port_id;
> +	csk->rss_qid = cxgb4i_get_snic(csk->cdev)->lldi.rxq_ids[csk->port_id];
> +	csk->tx_chan = pi->tx_chan;
> +	csk->smac_idx = csk->tx_chan<<  1;
> +	csk->wr_una_cred = 0;
> +	csk->mss_idx = cxgb4i_select_mss(csk, dst_mtu(dst));
> +	csk->err = 0;
> +
> +	cxgb4i_sock_reset_wr_list(csk);
> +
> +	cxgb4i_sock_make_act_open_req(csk, skb,
> +					((csk->rss_qid<<  14) |
> +					 (csk->atid)), csk->l2t);
> +	cxgb4_l2t_send(cxgb4i_get_snic(csk->cdev)->lldi.ports[csk->port_id],
> +					skb, csk->l2t);
> +	return 0;
> +
> +free_l2t:
> +	cxgb4_l2t_release(csk->l2t);
> +
> +free_atid:
> +	cxgb4_free_atid(cxgb4i_get_snic(csk->cdev)->lldi.tids, csk->atid);
> +
> +out_err:
> +
> +	return -EINVAL;;
> +}
> +
> +static struct net_device *cxgb4i_find_dev(struct net_device *dev,
> +							__be32 ipaddr)
> +{
> +	struct flowi fl;
> +	struct rtable *rt;
> +	int err;
> +
> +	memset(&fl, 0, sizeof(fl));
> +	fl.nl_u.ip4_u.daddr = ipaddr;
> +
> +	err = ip_route_output_key(dev ? dev_net(dev) :&init_net,&rt,&fl);
> +	if (!err)
> +		return (&rt->u.dst)->dev;
> +
> +	return NULL;
> +}
> +


Looks like cxgb3i one.


> +int cxgb4i_sock_connect(struct net_device *dev, struct cxgbi_sock *csk,
> +						struct sockaddr_in *sin)
> +{
> +	struct rtable *rt;
> +	__be32 sipv4 = 0;
> +	struct net_device *dstdev;
> +	struct cxgbi_hba *chba = NULL;
> +	int err;
> +
> +	cxgbi_conn_debug("csk 0x%p, dev 0x%p\n", csk, dev);
> +
> +	if (sin->sin_family != AF_INET)
> +		return -EAFNOSUPPORT;
> +
> +	csk->daddr.sin_port = sin->sin_port;
> +	csk->daddr.sin_addr.s_addr = sin->sin_addr.s_addr;
> +
> +	dstdev = cxgb4i_find_dev(dev, sin->sin_addr.s_addr);
> +	if (!dstdev || !is_cxgb4_dev(dstdev, cxgb4i_get_snic(csk->cdev)))
> +		return -ENETUNREACH;
> +
> +	if (dstdev->priv_flags&  IFF_802_1Q_VLAN)
> +		dev = dstdev;
> +
> +	rt = find_route(dev, csk->saddr.sin_addr.s_addr,
> +			csk->daddr.sin_addr.s_addr,
> +			csk->saddr.sin_port,
> +			csk->daddr.sin_port,
> +			0);
> +	if (rt == NULL) {
> +		cxgbi_conn_debug("no route to %pI4, port %u, dev %s, "
> +					"snic 0x%p\n",
> +					&csk->daddr.sin_addr.s_addr,
> +					ntohs(csk->daddr.sin_port),
> +					dev ? dev->name : "any",
> +					csk->snic);
> +		return -ENETUNREACH;
> +	}
> +
> +	if (rt->rt_flags&  (RTCF_MULTICAST | RTCF_BROADCAST)) {
> +		cxgbi_conn_debug("multi-cast route to %pI4, port %u, "
> +					"dev %s, snic 0x%p\n",
> +					&csk->daddr.sin_addr.s_addr,
> +					ntohs(csk->daddr.sin_port),
> +					dev ? dev->name : "any",
> +					csk->snic);
> +		ip_rt_put(rt);
> +		return -ENETUNREACH;
> +	}
> +
> +	if (!csk->saddr.sin_addr.s_addr)
> +		csk->saddr.sin_addr.s_addr = rt->rt_src;
> +
> +	csk->dst =&rt->u.dst;
> +
> +	dev = cxgb4i_find_egress_dev(csk->dst->dev,
> +					cxgb4i_get_snic(csk->cdev));
> +	if (dev == NULL) {
> +		cxgbi_conn_debug("csk: 0x%p, egress dev NULL\n", csk);
> +		return -ENETUNREACH;
> +	}
> +
> +	err = cxgbi_sock_get_port(csk);
> +	if (err)
> +		return err;
> +
> +	cxgbi_conn_debug("csk: 0x%p get port: %u\n",
> +			csk, ntohs(csk->saddr.sin_port));
> +
> +	chba = cxgb4i_hba_find_by_netdev(csk->dst->dev);
> +
> +	sipv4 = cxgb4i_get_iscsi_ipv4(chba);
> +	if (!sipv4) {
> +		cxgbi_conn_debug("csk: 0x%p, iscsi is not configured\n", csk);
> +		sipv4 = csk->saddr.sin_addr.s_addr;
> +		cxgb4i_set_iscsi_ipv4(chba, sipv4);
> +	} else
> +		csk->saddr.sin_addr.s_addr = sipv4;
> +
> +	cxgbi_conn_debug("csk: 0x%p, %pI4:[%u], %pI4:[%u] SYN_SENT\n",
> +				csk,
> +				&csk->saddr.sin_addr.s_addr,
> +				ntohs(csk->saddr.sin_port),
> +				&csk->daddr.sin_addr.s_addr,
> +				ntohs(csk->daddr.sin_port));
> +
> +	cxgbi_sock_set_state(csk, CXGBI_CSK_ST_CONNECTING);
> +
> +	if (!cxgb4i_init_act_open(csk, dev))
> +		return 0;
> +
> +	err = -ENOTSUPP;
> +
> +	cxgbi_conn_debug("csk 0x%p ->  closed\n", csk);
> +	cxgbi_sock_set_state(csk, CXGBI_CSK_ST_CLOSED);
> +	ip_rt_put(rt);
> +	cxgbi_sock_put_port(csk);
> +
> +	return err;
> +}
> +
> +void cxgb4i_sock_rx_credits(struct cxgbi_sock *csk, int copied)
> +{
> +	int must_send;
> +	u32 credits;
> +
> +	if (csk->state != CXGBI_CSK_ST_ESTABLISHED)
> +		return;
> +
> +	credits = csk->copied_seq - csk->rcv_wup;
> +	if (unlikely(!credits))
> +		return;
> +
> +	if (unlikely(cxgb4i_rx_credit_thres == 0))
> +		return;
> +
> +	must_send = credits + 16384>= cxgb4i_rcv_win;
> +
> +	if (must_send || credits>= cxgb4i_rx_credit_thres)
> +		csk->rcv_wup += cxgb4i_csk_send_rx_credits(csk, credits);
> +}
> +
> +int cxgb4i_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb)
> +{
> +	struct sk_buff *next;
> +	int err, copied = 0;
> +
> +	spin_lock_bh(&csk->lock);
> +
> +	if (csk->state != CXGBI_CSK_ST_ESTABLISHED) {
> +		cxgbi_tx_debug("csk 0x%p, not in est. state %u.\n",
> +			      csk, csk->state);
> +		err = -EAGAIN;
> +		goto out_err;
> +	}
> +
> +	if (csk->err) {
> +		cxgbi_tx_debug("csk 0x%p, err %d.\n", csk, csk->err);
> +		err = -EPIPE;
> +		goto out_err;
> +	}
> +
> +	if (csk->write_seq - csk->snd_una>= cxgb4i_snd_win) {
> +		cxgbi_tx_debug("csk 0x%p, snd %u - %u>  %u.\n",
> +				csk, csk->write_seq, csk->snd_una,
> +				cxgb4i_snd_win);
> +		err = -ENOBUFS;
> +		goto out_err;
> +	}
> +
> +	while (skb) {
> +		int frags = skb_shinfo(skb)->nr_frags +
> +				(skb->len != skb->data_len);
> +
> +		if (unlikely(skb_headroom(skb)<  CXGB4I_TX_HEADER_LEN)) {
> +			cxgbi_tx_debug("csk 0x%p, skb head.\n", csk);
> +			err = -EINVAL;
> +			goto out_err;
> +		}
> +
> +		if (frags>= SKB_WR_LIST_SIZE) {
> +			cxgbi_log_error("csk 0x%p, tx frags %d, len %u,%u.\n",
> +					 csk, skb_shinfo(skb)->nr_frags,
> +					 skb->len, skb->data_len);
> +			err = -EINVAL;
> +			goto out_err;
> +		}
> +
> +		next = skb->next;
> +		skb->next = NULL;
> +		cxgb4i_sock_skb_entail(csk, skb,
> +				CXGB4I_SKCB_FLAG_NO_APPEND |
> +				CXGB4I_SKCB_FLAG_NEED_HDR);
> +		copied += skb->len;
> +		csk->write_seq += skb->len + ulp_extra_len(skb);
> +		skb = next;
> +	}
> +done:
> +	if (likely(skb_queue_len(&csk->write_queue)))
> +		cxgb4i_sock_push_tx_frames(csk, 1);
> +	spin_unlock_bh(&csk->lock);
> +	return copied;
> +
> +out_err:
> +	if (copied == 0&&  err == -EPIPE)
> +		copied = csk->err ? csk->err : -EPIPE;
> +	else
> +		copied = err;
> +	goto done;
> +}

Looks similar to cxgb3i one.


> +
> +static void cxgbi_sock_conn_closing(struct cxgbi_sock *csk)
> +{

Was this going to the lib? Looks like the cxgb3i one. If not then rename 
it to avoid confusion.


> +
> +struct cxgbi_hba *cxgb4i_hba_find_by_netdev(struct net_device *dev)
> +{
> +	int i;
> +	struct cxgb4i_snic *snic = NULL;;
> +
> +	if (dev->priv_flags&  IFF_802_1Q_VLAN)
> +		dev = vlan_dev_real_dev(dev);
> +
> +	mutex_lock(&snic_rwlock);
> +	list_for_each_entry(snic,&snic_list, list_head) {
> +		for (i = 0; i<  snic->hba_cnt; i++) {
> +			if (snic->hba[i]->ndev == dev) {
> +				mutex_unlock(&snic_rwlock);
> +				return snic->hba[i];
> +			}
> +		}
> +	}
> +	mutex_unlock(&snic_rwlock);
> +	return NULL;


Looks like cxgb3i_hba_find_by_netdev.

> +}
> +
> +struct cxgb4i_snic *cxgb4i_find_snic(struct net_device *dev, __be32 ipaddr)
> +{
> +	struct flowi fl;
> +	struct rtable *rt;
> +	struct net_device *sdev = NULL;
> +	struct cxgb4i_snic *snic = NULL, *tmp;
> +	int err, i;
> +
> +	memset(&fl, 0, sizeof(fl));
> +	fl.nl_u.ip4_u.daddr = ipaddr;
> +
> +	err = ip_route_output_key(dev ? dev_net(dev) :&init_net,&rt,&fl);
> +	if (err)
> +		goto out;
> +
> +	sdev = (&rt->u.dst)->dev;
> +	mutex_lock(&snic_rwlock);
> +	list_for_each_entry_safe(snic, tmp,&snic_list, list_head) {
> +		if (snic) {
> +			for (i = 0; i<  snic->lldi.nports; i++) {
> +				if (sdev == snic->lldi.ports[i]) {
> +					mutex_unlock(&snic_rwlock);
> +					return snic;
> +				}
> +			}
> +		}
> +	}
> +	mutex_unlock(&snic_rwlock);
> +
> +out:
> +	snic = NULL;
> +	return snic;


you can just do return NULL


> +}
> +
> +void cxgb4i_snic_add(struct list_head *list_head)
> +{
> +	mutex_lock(&snic_rwlock);
> +	list_add_tail(list_head,&snic_list);
> +	mutex_unlock(&snic_rwlock);
> +}
> +
> +struct cxgb4i_snic *cxgb4i_snic_init(const struct cxgb4_lld_info *linfo)
> +{
> +	struct cxgb4i_snic *snic;
> +	int i;
> +
> +	snic = kzalloc(sizeof(*snic), GFP_KERNEL);
> +	if (snic) {
> +

extra newline

> +		spin_lock_init(&snic->lock);
> +		snic->lldi = *linfo;
> +		snic->hba_cnt = snic->lldi.nports;
> +		snic->cdev.dd_data = snic;
> +		snic->cdev.pdev = snic->lldi.pdev;
> +		snic->cdev.skb_tx_headroom = SKB_MAX_HEAD(CXGB4I_TX_HEADER_LEN);
> +
> +		cxgb4i_iscsi_init();
> +		cxgbi_pdu_init(&snic->cdev);
> +		cxgb4i_ddp_init(snic);
> +		cxgb4i_ofld_init(snic);
> +
> +		for (i = 0; i<  snic->hba_cnt; i++) {
> +			snic->hba[i] = cxgb4i_hba_add(snic,
> +						snic->lldi.ports[i]);
> +			if (!snic->hba[i]) {
> +				kfree(snic);
> +				snic = ERR_PTR(-ENOMEM);
> +				goto out;
> +			}
> +		}
> +		cxgb4i_snic_add(&snic->list_head);
> +	} else
> +out :
> +	snic = ERR_PTR(-ENOMEM);
> +
> +	return snic;


I think xgb4i_uld_add is not checking for PTR_ERR/IS_ERR.


> +}
> +
> +void cxgb4i_snic_cleanup(void)
> +{
> +	struct cxgb4i_snic *snic, *tmp;
> +	int i;
> +
> +	mutex_lock(&snic_rwlock);
> +	list_for_each_entry_safe(snic, tmp,&snic_list, list_head) {
> +		list_del(&snic->list_head);
> +
> +		for (i = 0; i<  snic->hba_cnt; i++) {
> +			if (snic->hba[i]) {
> +				cxgb4i_hba_remove(snic->hba[i]);
> +				snic->hba[i] = NULL;
> +			}
> +		}
> +		cxgb4i_ofld_cleanup(snic);
> +		cxgb4i_ddp_cleanup(snic);
> +		cxgbi_pdu_cleanup(&snic->cdev);
> +		cxgbi_log_info("snic 0x%p, %u scsi hosts removed.\n",
> +				snic, snic->hba_cnt);
> +
> +		kfree(snic);
> +	}
> +	mutex_unlock(&snic_rwlock);
> +	cxgb4i_iscsi_cleanup();
> +}
> +
> +static void *cxgb4i_uld_add(const struct cxgb4_lld_info *linfo)
> +{
> +	struct cxgb4i_snic *snic;
> +
> +	cxgbi_log_info("%s", version);
> +
> +	snic = cxgb4i_snic_init(linfo);

you can just do

return cxgb4i_snic_init(linfo);

and then delete everything below.



> +	if (!snic)
> +		goto out;
> +out:
> +	return snic;
> +}
> +
> +static int cxgb4i_uld_rx_handler(void *handle, const __be64 *rsp,
> +				const struct pkt_gl *pgl)
> +{
> +	struct cxgb4i_snic *snic = handle;
> +	struct sk_buff *skb;
> +	const struct cpl_act_establish *rpl;
> +	unsigned int opcode;
> +
> +	if (pgl == NULL) {
> +		unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
> +
> +		skb = alloc_skb(256, GFP_ATOMIC);
> +		if (!skb)
> +			goto nomem;
> +		__skb_put(skb, len);
> +		skb_copy_to_linear_data(skb,&rsp[1], len);
> +
> +	} else if (pgl == CXGB4_MSG_AN) {
> +

don't need extra {} and newlines.

> +		return 0;
> +
> +	} else {
> +

extra newline

> +		skb = cxgb4_pktgl_to_skb(pgl, 256, 256);
> +		if (unlikely(!skb))
> +			goto nomem;
> +	}
> +
> +	rpl = cplhdr(skb);
> +	opcode = rpl->ot.opcode;
> +
> +	cxgbi_api_debug("snic %p, opcode 0x%x, skb %p\n",
> +			 snic, opcode, skb);
> +
> +	BUG_ON(!snic->handlers[opcode]);
> +
> +	if (snic->handlers[opcode]) {

extra brackets

> +		snic->handlers[opcode](snic, skb);
> +	} else
> +		cxgbi_log_error("No handler for opcode 0x%x\n",
> +				opcode);
> +
> +	return 0;
> +
> +nomem:
> +	cxgbi_api_debug("OOM bailing out\n");
> +	return 1;
> +}
> +
> +static int cxgb4i_uld_state_change(void *handle, enum cxgb4_state state)
> +{
> +	return 0;
> +}
> +
> +static int __init cxgb4i_init_module(void)
> +{
> +	cxgb4_register_uld(CXGB4_ULD_ISCSI,&cxgb4i_uld_info);
> +

extra newline

> +	return 0;
> +}
> +
> +static void __exit cxgb4i_exit_module(void)
> +{
> +

extra newline


> +	cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
> +	cxgb4i_snic_cleanup();
> +}
> +
> +module_init(cxgb4i_init_module);
> +module_exit(cxgb4i_exit_module);
> +

-- 
You received this message because you are subscribed to the Google Groups "open-iscsi" group.
To post to this group, send email to open-iscsi-/JYPxA39Uh5TLH3MbocFF+G/Ez6ZCGd0@public.gmane.org
To unsubscribe from this group, send email to open-iscsi+unsubscribe-/JYPxA39Uh5TLH3MbocFF+G/Ez6ZCGd0@public.gmane.org
For more options, visit this group at http://groups.google.com/group/open-iscsi?hl=en.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/3] cxgb4i_v3: main driver files
  2010-05-27  7:38           ` [PATCH 2/3] cxgb4i_v3: main driver files Mike Christie
@ 2010-05-27  7:40             ` Mike Christie
  0 siblings, 0 replies; 9+ messages in thread
From: Mike Christie @ 2010-05-27  7:40 UTC (permalink / raw)
  To: open-iscsi
  Cc: Rakesh Ranjan, NETDEVML, SCSIDEVML, LKML, Karen Xie, David Miller,
	James Bottomley, Anish Bhatt, Rakesh Ranjan

On 05/27/2010 02:38 AM, Mike Christie wrote:
> On 05/15/2010 12:24 PM, Rakesh Ranjan wrote:
>> From: Rakesh Ranjan<rranjan@chelsio.com>
>>
>>
>> Signed-off-by: Rakesh Ranjan<rakesh@chelsio.com>
>> ---
>> drivers/scsi/cxgb4i/cxgb4i.h | 101 ++
>> drivers/scsi/cxgb4i/cxgb4i_ddp.c | 678 +++++++++++++
>> drivers/scsi/cxgb4i/cxgb4i_ddp.h | 118 +++
>> drivers/scsi/cxgb4i/cxgb4i_offload.c | 1846
>> ++++++++++++++++++++++++++++++++++
>> drivers/scsi/cxgb4i/cxgb4i_offload.h | 91 ++
>> drivers/scsi/cxgb4i/cxgb4i_snic.c | 260 +++++
>> 6 files changed, 3094 insertions(+), 0 deletions(-)
>> create mode 100644 drivers/scsi/cxgb4i/cxgb4i.h
>> create mode 100644 drivers/scsi/cxgb4i/cxgb4i_ddp.c
>> create mode 100644 drivers/scsi/cxgb4i/cxgb4i_ddp.h
>> create mode 100644 drivers/scsi/cxgb4i/cxgb4i_offload.c
>> create mode 100644 drivers/scsi/cxgb4i/cxgb4i_offload.h
>> create mode 100644 drivers/scsi/cxgb4i/cxgb4i_snic.c
>
>
>
> Got some whitespace errors when applying.
>
> warning: squelched 1 whitespace error
> warning: 6 lines add whitespace errors.
>
>

Oh yeah, run sparse on the driver too. There are some warnings about 
some functions should be static.

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2010-05-27  7:39 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-05-15 17:24 cxgb4i_v3.1 submission Rakesh Ranjan
     [not found] ` <1273944249-311-1-git-send-email-rakesh-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
2010-05-15 17:24   ` [PATCH 1/3] cxgb4i_v3: add build support Rakesh Ranjan
     [not found]     ` <1273944249-311-2-git-send-email-rakesh-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
2010-05-15 17:24       ` [PATCH 2/3] cxgb4i_v3: main driver files Rakesh Ranjan
     [not found]         ` <1273944249-311-3-git-send-email-rakesh-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
2010-05-15 17:24           ` [PATCH 3/3] cxgb4i_v3: iscsi and libcxgbi library for handling common part Rakesh Ranjan
2010-05-27  5:59             ` Mike Christie
     [not found]               ` <4BFE0A3E.9020804-hcNo3dDEHLuVc3sceRu5cw@public.gmane.org>
2010-05-27  6:05                 ` Rakesh Ranjan
2010-05-27  7:38           ` [PATCH 2/3] cxgb4i_v3: main driver files Mike Christie
2010-05-27  7:40             ` Mike Christie
  -- strict thread matches above, loose matches on Subject: below --
2010-05-15 17:15 cxgb4i_v3 submission Rakesh Ranjan
2010-05-15 17:15 ` [PATCH 1/3] cxgb4i_v3: add build support Rakesh Ranjan
     [not found]   ` <1273943752-32486-2-git-send-email-rakesh-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
2010-05-15 17:15     ` [PATCH 2/3] cxgb4i_v3: main driver files Rakesh Ranjan

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).