linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
* [2.6.19 PATCH 3/7] ehea: queue management
@ 2006-08-18 11:31 Jan-Bernd Themann
  2006-08-18 12:17 ` Arjan van de Ven
  2006-08-18 14:40 ` Jörn Engel
  0 siblings, 2 replies; 14+ messages in thread
From: Jan-Bernd Themann @ 2006-08-18 11:31 UTC (permalink / raw)
  To: netdev
  Cc: Thomas Klein, Jan-Bernd Themann, linux-kernel, Thomas Klein,
	linux-ppc, Christoph Raisch, Marcus Eder

Signed-off-by: Jan-Bernd Themann <themann@de.ibm.com> 


 drivers/net/ehea/ehea_qmr.c |  643 ++++++++++++++++++++++++++++++++++++++++++++
 drivers/net/ehea/ehea_qmr.h |  367 +++++++++++++++++++++++++
 2 files changed, 1010 insertions(+)



--- linux-2.6.18-rc4-orig/drivers/net/ehea/ehea_qmr.c	1969-12-31 16:00:00.000000000 -0800
+++ kernel/drivers/net/ehea/ehea_qmr.c	2006-08-18 00:01:02.765367182 -0700
@@ -0,0 +1,643 @@
+/*
+ *  linux/drivers/net/ehea/ehea_qmr.c
+ *
+ *  eHEA ethernet device driver for IBM eServer System p
+ *
+ *  (C) Copyright IBM Corp. 2006
+ *
+ *  Authors:
+ *       Christoph Raisch <raisch@de.ibm.com>
+ *       Jan-Bernd Themann <themann@de.ibm.com>
+ *       Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "ehea.h"
+#include "ehea_phyp.h"
+#include "ehea_qmr.h"
+
+static void *hw_qpageit_get_inc(struct hw_queue *queue)
+{
+	void *retvalue = hw_qeit_get(queue);
+
+	queue->current_q_offset += queue->pagesize;
+	if (queue->current_q_offset > queue->queue_length) {
+		queue->current_q_offset -= queue->pagesize;
+		retvalue = NULL;
+	}
+	else if ((((u64) retvalue) & (EHEA_PAGESIZE-1)) != 0) {
+		ehea_error("not on pageboundary");
+		retvalue = NULL;
+	}
+	return retvalue;
+}
+
+static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
+			  const u32 pagesize, const u32 qe_size)
+{
+	int pages_per_kpage = PAGE_SIZE / pagesize;
+	int i;
+
+	if (pagesize > PAGE_SIZE) {
+		ehea_error("FATAL ERROR: pagesize=%x is greater than "
+			   "kernel page size", pagesize);
+		return 0;
+	}
+	if (!pages_per_kpage) {
+		ehea_error("FATAL ERROR: invalid kernel page size. "
+			   "pages_per_kpage=%x", pages_per_kpage);
+		return 0;
+	}
+	queue->queue_length = nr_of_pages * pagesize;
+	queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *));
+	if (!queue->queue_pages) {
+		ehea_error("no mem");
+		return 0;
+	}
+	memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *));
+	/*
+	 * allocate pages for queue:
+	 * outer loop allocates whole kernel pages (page aligned) and
+	 * inner loop divides a kernel page into smaller hea queue pages
+	 */
+	i = 0;
+	while (i < nr_of_pages) {
+		int k;
+		u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL);
+		if (!kpage)
+			goto hw_queue_ctor_exit0;
+		for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
+			(queue->queue_pages)[i] = (struct ehea_page *)kpage;
+			kpage += pagesize;
+			i++;
+		}
+	}
+
+	queue->current_q_offset = 0;
+	queue->qe_size = qe_size;
+	queue->pagesize = pagesize;
+	queue->toggle_state = 1;
+	return 1;
+
+hw_queue_ctor_exit0:
+	for (i = 0; i < nr_of_pages; i += pages_per_kpage) {
+		if (!(queue->queue_pages)[i])
+			break;
+		free_page((unsigned long)(queue->queue_pages)[i]);
+	}
+	return 0;
+}
+
+static void hw_queue_dtor(struct hw_queue *queue)
+{
+	int pages_per_kpage = PAGE_SIZE / queue->pagesize;
+	int i;
+	int nr_pages;
+
+	if (!queue || !queue->queue_pages)
+		return;
+
+	nr_pages = queue->queue_length / queue->pagesize;
+
+	for (i = 0; i < nr_pages; i += pages_per_kpage)
+		free_page((unsigned long)(queue->queue_pages)[i]);
+
+	vfree(queue->queue_pages);
+}
+
+struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
+			       int nr_of_cqe, u64 eq_handle, u32 cq_token)
+{
+	struct ehea_cq *cq = NULL;
+	struct h_epa epa;
+
+	u64 *cq_handle_ref;
+	u32 act_nr_of_entries;
+	u32 act_pages;
+	u64 hret = H_HARDWARE;
+	int hwret;
+	u32 counter;
+	void *vpage = NULL;
+	u64 rpage = 0;
+
+	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+	if (!cq) {
+		ehea_error("no mem for cq");
+		goto create_cq_exit0;
+	}
+
+	cq->attr.max_nr_of_cqes = nr_of_cqe;
+	cq->attr.cq_token = cq_token;
+	cq->attr.eq_handle = eq_handle;
+
+	cq->adapter = adapter;
+
+	cq_handle_ref = &cq->fw_handle;
+	act_nr_of_entries = 0;
+	act_pages = 0;
+
+	hret = ehea_h_alloc_resource_cq(adapter->handle, cq, &cq->attr,
+					&cq->fw_handle, &cq->epas);
+	if (hret != H_SUCCESS) {
+		ehea_error("alloc_resource_cq failed");
+		goto create_cq_exit1;
+	}
+
+	hwret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages,
+			      EHEA_PAGESIZE, sizeof(struct ehea_cqe));
+	if (!hwret)
+		goto create_cq_exit2;
+
+	for (counter = 0; counter < cq->attr.nr_pages; counter++) {
+		vpage = hw_qpageit_get_inc(&cq->hw_queue);
+		if (!vpage) {
+			ehea_error("hw_qpageit_get_inc failed");
+			goto create_cq_exit3;
+		}
+
+		rpage = virt_to_abs(vpage);
+
+		hret = ehea_h_register_rpage_cq(adapter->handle, cq->fw_handle,
+						0, EHEA_CQ_REGISTER_ORIG, rpage,
+						1, cq->epas.kernel);
+		if (hret < H_SUCCESS) {
+			ehea_error("register_rpage_cq failed ehea_cq=%p "
+				   "hret=%lx counter=%i act_pages=%i",
+				   cq, hret, counter, cq->attr.nr_pages);
+			goto create_cq_exit3;
+		}
+
+		if (counter == (cq->attr.nr_pages - 1)) {
+			vpage = hw_qpageit_get_inc(&cq->hw_queue);
+
+			if ((hret != H_SUCCESS) || (vpage)) {
+				ehea_error("registration of pages not "
+					   "complete hret=%lx\n", hret);
+				goto create_cq_exit3;
+			}
+		} else {
+			if ((hret != H_PAGE_REGISTERED) || (!vpage)) {
+				ehea_error("CQ: registration of page failed "
+					   "hret=%lx\n", hret);
+				goto create_cq_exit3;
+			}
+		}
+	}
+
+	hw_qeit_reset(&cq->hw_queue);
+	epa = cq->epas.kernel;
+	ehea_reset_cq_ep(cq);
+	ehea_reset_cq_n1(cq);
+
+	return cq;
+
+create_cq_exit3:
+	hw_queue_dtor(&cq->hw_queue);
+
+create_cq_exit2:
+	ehea_h_destroy_cq(adapter->handle, cq, cq->fw_handle, &cq->epas);
+
+create_cq_exit1:
+	kfree(cq);
+
+create_cq_exit0:
+	return NULL;
+}
+
+int ehea_destroy_cq(struct ehea_cq *cq)
+{
+	u64 adapter_handle;
+	u64 hret = H_HARDWARE;
+
+	adapter_handle = cq->adapter->handle;
+
+	if (!cq)
+		return 0;
+
+	/* deregister all previous registered pages */
+	hret = ehea_h_destroy_cq(adapter_handle, cq, cq->fw_handle, &cq->epas);
+	if (hret != H_SUCCESS) {
+		ehea_error("destroy CQ failed");
+		return -EINVAL;
+	}
+
+	hw_queue_dtor(&cq->hw_queue);
+	kfree(cq);
+
+	return 0;
+}
+
+struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
+			       const enum ehea_eq_type type,
+			       const u32 max_nr_of_eqes, const u8 eqe_gen)
+{
+	u64 hret = H_HARDWARE;
+	int ret = 0;
+	u32 i;
+	void *vpage = NULL;
+	u64 rpage = 0;
+	struct ehea_eq *eq;
+
+	eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+	if (!eq) {
+		ehea_error("no mem for eq");
+		return NULL;
+	}
+
+	eq->adapter = adapter;
+	eq->attr.type = type;
+	eq->attr.max_nr_of_eqes = max_nr_of_eqes;
+	eq->attr.eqe_gen = eqe_gen;
+	spin_lock_init(&eq->spinlock);
+
+	hret = ehea_h_alloc_resource_eq(adapter->handle,
+					eq, &eq->attr, &eq->fw_handle);
+
+	if (hret != H_SUCCESS) {
+		ehea_error("alloc_resource_eq failed");
+		goto free_eq_mem;
+	}
+
+	ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages, EHEA_PAGESIZE,
+			    sizeof(struct ehea_eqe));
+	if (!ret) {
+		ehea_error("can't allocate eq pages");
+		goto alloc_pages_failed;
+	}
+
+	for (i = 0; i < eq->attr.nr_pages; i++) {
+		vpage = hw_qpageit_get_inc(&eq->hw_queue);
+		if (!vpage) {
+			ehea_error("hw_qpageit_get_inc failed");
+			hret = H_RESOURCE;
+			goto register_page_failed;
+		}
+
+		rpage = virt_to_abs(vpage);
+
+		hret = ehea_h_register_rpage_eq(adapter->handle, eq->fw_handle,
+						0, EHEA_EQ_REGISTER_ORIG, rpage,
+						1);
+
+		if (i == (eq->attr.nr_pages - 1)) {
+			/* last page */
+			vpage = hw_qpageit_get_inc(&eq->hw_queue);
+			if ((hret != H_SUCCESS) || (vpage)) {
+				goto register_page_failed;
+			}
+		} else {
+			if ((hret != H_PAGE_REGISTERED) || (!vpage)) {
+				goto register_page_failed;
+			}
+		}
+	}
+
+	hw_qeit_reset(&eq->hw_queue);
+	return eq;
+
+register_page_failed:
+	hw_queue_dtor(&eq->hw_queue);
+
+alloc_pages_failed:
+	ehea_h_destroy_eq(adapter->handle, eq, eq->fw_handle, &eq->epas);
+free_eq_mem:
+	kfree(eq);
+
+	return NULL;
+}
+
+struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
+{
+	struct ehea_eqe *eqe = NULL;
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&eq->spinlock, flags);
+	eqe = (struct ehea_eqe*)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
+	spin_unlock_irqrestore(&eq->spinlock, flags);
+
+	return eqe;
+}
+
+int ehea_destroy_eq(struct ehea_eq *eq)
+{
+	unsigned long flags = 0;
+	u64 hret = H_HARDWARE;
+
+	if (!eq)
+		return 0;
+
+	spin_lock_irqsave(&eq->spinlock, flags);
+
+	hret = ehea_h_destroy_eq(eq->adapter->handle, eq, eq->fw_handle,
+				 &eq->epas);
+	spin_unlock_irqrestore(&eq->spinlock, flags);
+
+	if (hret != H_SUCCESS) {
+		ehea_error("failed freeing EQ resources");
+		return -EINVAL;
+	}
+
+	hw_queue_dtor(&eq->hw_queue);
+	kfree(eq);
+
+	return 0;
+}
+
+/**
+ * allocates memory for a queue and registers pages in phyp
+ */
+int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
+			   int nr_pages, int wqe_size, int act_nr_sges,
+			   struct ehea_adapter *adapter, int h_call_q_selector)
+{
+	u64 hret = H_HARDWARE;
+	u64 rpage = 0;
+	int ret = 0;
+	int cnt = 0;
+	void *vpage = NULL;
+
+	ret = hw_queue_ctor(hw_queue,
+			      nr_pages, EHEA_PAGESIZE, wqe_size);
+	if (!ret)
+		return -ENOMEM;
+
+	for (cnt = 0; cnt < nr_pages; cnt++) {
+		vpage = hw_qpageit_get_inc(hw_queue);
+		if (!vpage) {
+			ehea_error("hw_qpageit_get_inc failed");
+			goto qp_alloc_register_exit0;
+		}
+		rpage = virt_to_abs(vpage);
+
+		hret = ehea_h_register_rpage_qp(adapter->handle, qp->fw_handle,
+						0, h_call_q_selector, rpage, 1,
+						qp->epas.kernel);
+		if (hret < H_SUCCESS) {
+			ehea_error("register_rpage_qp failed");
+			goto qp_alloc_register_exit0;
+		}
+	}
+	hw_qeit_reset(hw_queue);
+
+	return 0;
+
+qp_alloc_register_exit0:
+	hw_queue_dtor(hw_queue);
+	return -EINVAL;
+}
+
+static inline u32 map_wqe_size(u8 wqe_enc_size)
+{
+	return 128 << wqe_enc_size;
+}
+
+struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
+			       u32 pd, struct ehea_qp_init_attr *init_attr)
+{
+	struct ehea_qp *qp;
+	u64 hret = H_HARDWARE;
+
+	u32 wqe_size_in_bytes_sq = 0;
+	u32 wqe_size_in_bytes_rq1 = 0;
+	u32 wqe_size_in_bytes_rq2 = 0;
+	u32 wqe_size_in_bytes_rq3 = 0;
+
+	int ret = -1;
+
+	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+	if (!qp) {
+		ehea_error("no mem for qp");
+		return NULL;
+	}
+
+	qp->adapter = adapter;
+
+	hret = ehea_h_alloc_resource_qp(adapter->handle, qp, init_attr, pd,
+					&qp->fw_handle, &qp->epas);
+	if (hret != H_SUCCESS) {
+		ehea_error("ehea_h_alloc_resource_qp failed");
+		goto create_qp_exit1;
+	}
+
+	wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq);
+	wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1);
+	wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2);
+	wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3);
+
+	ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages,
+				     wqe_size_in_bytes_sq,
+				     init_attr->act_wqe_size_enc_sq, adapter,
+				     0);
+	if (ret) {
+		ehea_error("can't register for sq ret=%x", ret);
+		goto create_qp_exit2;
+	}
+
+	ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1,
+				     init_attr->nr_rq1_pages,
+				     wqe_size_in_bytes_rq1,
+				     init_attr->act_wqe_size_enc_rq1,
+				     adapter, 1);
+	if (ret) {
+		ehea_error("can't register for rq1 ret=%x", ret);
+		goto create_qp_exit3;
+	}
+
+	if (init_attr->rq_count > 1) {
+		ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2,
+					     init_attr->nr_rq2_pages,
+					     wqe_size_in_bytes_rq2,
+					     init_attr->act_wqe_size_enc_rq2,
+					     adapter, 2);
+		if (ret) {
+			ehea_error("can't register for rq2 ret=%x", ret);
+			goto create_qp_exit4;
+		}
+	}
+
+	if (init_attr->rq_count > 2) {
+		ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3,
+					     init_attr->nr_rq3_pages,
+					     wqe_size_in_bytes_rq3,
+					     init_attr->act_wqe_size_enc_rq3,
+					     adapter, 3);
+		if (ret) {
+			ehea_error("can't register for rq3 ret=%x", ret);
+			goto create_qp_exit5;
+		}
+	}
+
+	qp->init_attr = *init_attr;
+
+	return qp;
+create_qp_exit5:
+	hw_queue_dtor(&qp->hw_rqueue2);
+
+create_qp_exit4:
+	hw_queue_dtor(&qp->hw_rqueue1);
+
+create_qp_exit3:
+	hw_queue_dtor(&qp->hw_squeue);
+
+create_qp_exit2:
+	hret = ehea_h_destroy_qp(adapter->handle, qp, qp->fw_handle, &qp->epas);
+
+create_qp_exit1:
+	kfree(qp);
+	return NULL;
+}
+
+int ehea_destroy_qp(struct ehea_qp *qp)
+{
+	int ret = 0;
+	u64 hret = H_HARDWARE;
+	struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
+
+	if (!qp)
+		return 0;
+
+	hret = ehea_h_destroy_qp(qp->adapter->handle, qp, qp->fw_handle,
+				 &qp->epas);
+	if (hret != H_SUCCESS) {
+		ehea_error("destroy QP failed");
+		ret = -EINVAL;
+	}
+
+	hw_queue_dtor(&qp->hw_squeue);
+	hw_queue_dtor(&qp->hw_rqueue1);
+
+   	if(qp_attr->rq_count > 1)
+		hw_queue_dtor(&qp->hw_rqueue2);
+   	if(qp_attr->rq_count > 2)
+		hw_queue_dtor(&qp->hw_rqueue3);
+	kfree(qp);
+
+	return ret;
+}
+
+int ehea_reg_mr_adapter(struct ehea_adapter *adapter)
+{
+	int i = 0;
+	int k = 0;
+	u64 hret = H_HARDWARE;
+	u64 start = KERNELBASE;
+	u64 end = (u64) high_memory;
+	u64 nr_pages = (end - start) / PAGE_SIZE;
+	u32 acc_ctrl = EHEA_MR_ACC_CTRL;
+	u64 pt_abs = 0;
+	u64 *pt;
+
+	pt =  kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!pt) {
+		ehea_error("no mem");
+		return -ENOMEM;
+	}
+	pt_abs = virt_to_abs(pt);
+
+	hret = ehea_h_alloc_resource_mr(adapter->handle, start, end - start,
+					acc_ctrl, adapter->pd,
+					&adapter->mr.handle, &adapter->mr.lkey);
+	if (hret != H_SUCCESS) {
+		ehea_error("alloc_mr failed");
+		return -EINVAL;
+	}
+
+	adapter->mr.vaddr = KERNELBASE;
+
+	while (nr_pages > 0) {
+		if (nr_pages > 1) {
+			u64 num_pages = min(nr_pages, (u64)512);
+			for (i = 0; i < num_pages; i++)
+				pt[i] = virt_to_abs((void*)(((u64)start)
+							     + ((k++) *
+								PAGE_SIZE)));
+
+			hret = ehea_h_register_rpage_mr(adapter->handle,
+							adapter->mr.handle, 0,
+							0, (u64)pt_abs,
+							num_pages);
+			nr_pages -= num_pages;
+		} else {
+			u64 abs_adr = virt_to_abs((void *)(((u64)start)
+							   + (k * PAGE_SIZE)));
+			hret = ehea_h_register_rpage_mr(adapter->handle,
+							adapter->mr.handle, 0,
+							0, abs_adr,1);
+			nr_pages--;
+		}
+
+		if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) {
+			ehea_h_free_resource_mr(adapter->handle,
+						adapter->mr.handle);
+			ehea_error("free_resource_mr failed");
+			return -EINVAL;
+		}
+	}
+
+	if (hret != H_SUCCESS) {
+		ehea_h_free_resource_mr(adapter->handle, adapter->mr.handle);
+		ehea_error("free_resource_mr failed for last page");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int ehea_reg_mr_pages(struct ehea_adapter *adapter,
+		      struct ehea_mr *mr,
+		      u64 start, u64 *pt, int nr_pages)
+{
+	u64 hret = H_HARDWARE;
+	u32 acc_ctrl = EHEA_MR_ACC_CTRL;
+
+	u64 pt_abs = virt_to_abs(pt);
+	u64 first_page = pt[0];
+
+	hret = ehea_h_alloc_resource_mr(adapter->handle, start,
+					PAGE_SIZE * nr_pages, acc_ctrl,
+					adapter->pd, &mr->handle, &mr->lkey);
+	if (hret != H_SUCCESS) {
+		ehea_error("alloc_resource_mr failed");
+		return -EINVAL;
+	}
+
+	if (nr_pages > 1)
+		hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 
+						0, 0, (u64)pt_abs, nr_pages);
+	else
+		hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 
+						0, 0, first_page, 1);
+
+	if (hret != H_SUCCESS) {
+		ehea_h_free_resource_mr(adapter->handle, mr->handle);
+		ehea_error("free_resource_mr failed for last page");
+		return -EINVAL;
+	}
+	mr->vaddr = start;
+	return 0;
+}
+
+int ehea_dereg_mr_adapter(struct ehea_adapter *adapter)
+{
+	u64 hret = H_HARDWARE;
+
+	hret = ehea_h_free_resource_mr(adapter->handle, adapter->mr.handle);
+	if (hret != H_SUCCESS) {
+		return -EINVAL;
+	}
+
+	return 0;
+}
--- linux-2.6.18-rc4-orig/drivers/net/ehea/ehea_qmr.h	1969-12-31 16:00:00.000000000 -0800
+++ kernel/drivers/net/ehea/ehea_qmr.h	2006-08-18 00:01:02.642366062 -0700
@@ -0,0 +1,367 @@
+/*
+ *  linux/drivers/net/ehea/ehea_qmr.h
+ *
+ *  eHEA ethernet device driver for IBM eServer System p
+ *
+ *  (C) Copyright IBM Corp. 2006
+ *
+ *  Authors:
+ *       Christoph Raisch <raisch@de.ibm.com>
+ *       Jan-Bernd Themann <themann@de.ibm.com>
+ *       Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __EHEA_QMR_H__
+#define __EHEA_QMR_H__
+
+#include "ehea.h"
+#include "ehea_hw.h"
+
+/*
+ * page size of ehea hardware queues
+ */
+
+#define EHEA_PAGESHIFT  12
+#define EHEA_PAGESIZE   4096UL
+
+/* Some abbreviations used here:
+ *
+ * WQE  - Work Queue Entry
+ * SWQE - Send Work Queue Entry
+ * RWQE - Receive Work Queue Entry
+ * CQE  - Completion Queue Entry
+ * EQE  - Event Queue Entry
+ * MR   - Memory Region
+ */
+
+/* Use of WR_ID field for EHEA */
+#define EHEA_WR_ID_COUNT   EHEA_BMASK_IBM(0, 19)
+#define EHEA_WR_ID_TYPE    EHEA_BMASK_IBM(20, 23)
+#define EHEA_SWQE2_TYPE    0x1
+#define EHEA_SWQE3_TYPE    0x2
+#define EHEA_RWQE2_TYPE    0x3
+#define EHEA_RWQE3_TYPE    0x4
+#define EHEA_WR_ID_INDEX   EHEA_BMASK_IBM(24, 47)
+#define EHEA_WR_ID_REFILL  EHEA_BMASK_IBM(48, 63)
+
+struct ehea_vsgentry {
+	u64 vaddr;
+	u32 l_key;
+	u32 len;
+};
+
+/* maximum number of sg entries allowed in a WQE */
+#define EHEA_MAX_WQE_SG_ENTRIES  	252
+#define SWQE2_MAX_IMM            	(0xD0 - 0x30)
+#define SWQE3_MAX_IMM            	224
+
+/* tx control flags for swqe */
+#define EHEA_SWQE_CRC                   0x8000
+#define EHEA_SWQE_IP_CHECKSUM           0x4000
+#define EHEA_SWQE_TCP_CHECKSUM          0x2000
+#define EHEA_SWQE_TSO                   0x1000
+#define EHEA_SWQE_SIGNALLED_COMPLETION  0x0800
+#define EHEA_SWQE_VLAN_INSERT           0x0400
+#define EHEA_SWQE_IMM_DATA_PRESENT      0x0200
+#define EHEA_SWQE_DESCRIPTORS_PRESENT   0x0100
+#define EHEA_SWQE_WRAP_CTL_REC          0x0080
+#define EHEA_SWQE_WRAP_CTL_FORCE        0x0040
+#define EHEA_SWQE_BIND                  0x0020
+#define EHEA_SWQE_PURGE                 0x0010
+
+#define SWQE_HEADER_SIZE		32
+
+struct ehea_swqe {
+	u64 wr_id;
+	u16 tx_control;
+	u16 vlan_tag;
+	u8 reserved1;
+	u8 ip_start;
+	u8 ip_end;
+	u8 immediate_data_length;
+	u8 tcp_offset;
+	u8 reserved2;
+	u16 tcp_end;
+	u8 wrap_tag;
+	u8 descriptors;		/* number of valid descriptors in WQE */
+	u16 reserved3;
+	u16 reserved4;
+	u16 mss;
+	u32 reserved5;
+	union {
+		/*  Send WQE Format 1 */
+		struct {
+			struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
+		} no_immediate_data;
+
+		/*  Send WQE Format 2 */
+		struct {
+			struct ehea_vsgentry sg_entry;
+			/* 0x30 */
+			u8 immediate_data[SWQE2_MAX_IMM];
+			/* 0xd0 */
+			struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
+		} immdata_desc __attribute__ ((packed));
+
+		/*  Send WQE Format 3 */
+		struct {
+			u8 immediate_data[SWQE3_MAX_IMM];
+		} immdata_nodesc;
+	} u;
+};
+
+struct ehea_rwqe {
+	u64 wr_id;		/* work request ID */
+	u8 reserved1[5];
+	u8 data_segments;
+	u16 reserved2;
+	u64 reserved3;
+	u64 reserved4;
+	struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
+};
+
+#define EHEA_CQE_VLAN_TAG_XTRACT  0x0400
+
+#define EHEA_CQE_TYPE_RQ          0x60
+#define EHEA_CQE_STAT_ERR_MASK    0x7300
+#define EHEA_CQE_STAT_ERR_TCP     0x4000
+
+struct ehea_cqe {
+	u64 wr_id;		/* work request ID from WQE */
+	u8 type;
+	u8 valid;
+	u16 status;
+	u16 reserved1;
+	u16 num_bytes_transfered;
+	u16 vlan_tag;
+	u16 inet_checksum_value;
+	u8 reserved2;
+	u8 header_length;
+	u16 reserved3;
+	u16 page_offset;
+	u16 wqe_count;
+	u32 qp_token;
+	u32 timestamp;
+	u32 reserved4;
+	u64 reserved5[3];
+};
+
+#define EHEA_EQE_VALID           EHEA_BMASK_IBM(0, 0)
+#define EHEA_EQE_IS_CQE          EHEA_BMASK_IBM(1, 1)
+#define EHEA_EQE_IDENTIFIER      EHEA_BMASK_IBM(2, 7)
+#define EHEA_EQE_QP_CQ_NUMBER    EHEA_BMASK_IBM(8, 31)
+#define EHEA_EQE_QP_TOKEN        EHEA_BMASK_IBM(32, 63)
+#define EHEA_EQE_CQ_TOKEN        EHEA_BMASK_IBM(32, 63)
+#define EHEA_EQE_KEY             EHEA_BMASK_IBM(32, 63)
+#define EHEA_EQE_PORT_NUMBER     EHEA_BMASK_IBM(56, 63)
+#define EHEA_EQE_EQ_NUMBER       EHEA_BMASK_IBM(48, 63)
+#define EHEA_EQE_SM_ID           EHEA_BMASK_IBM(48, 63)
+#define EHEA_EQE_SM_MECH_NUMBER  EHEA_BMASK_IBM(48, 55)
+#define EHEA_EQE_SM_PORT_NUMBER  EHEA_BMASK_IBM(56, 63)
+
+struct ehea_eqe {
+	u64 entry;
+};
+
+static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
+{
+	struct ehea_page *current_page = NULL;
+	if (q_offset >= queue->queue_length)
+		q_offset -= queue->queue_length;
+	current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT];
+	return &current_page->entries[q_offset & (EHEA_PAGESIZE - 1)];
+}
+
+static inline void *hw_qeit_get(struct hw_queue *queue)
+{
+	return hw_qeit_calc(queue, queue->current_q_offset);
+}
+
+static inline void hw_qeit_inc(struct hw_queue *queue)
+{
+	queue->current_q_offset += queue->qe_size;
+	if (queue->current_q_offset >= queue->queue_length) {
+		queue->current_q_offset = 0;
+		/* toggle the valid flag */
+		queue->toggle_state = (~queue->toggle_state) & 1;
+	}
+}
+
+static inline void *hw_qeit_get_inc(struct hw_queue *queue)
+{
+	void *retvalue = hw_qeit_get(queue);
+	hw_qeit_inc(queue);
+	return retvalue;
+}
+
+static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue)
+{
+	struct ehea_cqe *retvalue = hw_qeit_get(queue);
+	void *pref;
+	u8 valid = retvalue->valid;
+	if ((valid >> 7) == (queue->toggle_state & 1)) {
+		/* this is a good one */
+		hw_qeit_inc(queue);
+		pref = hw_qeit_calc(queue, queue->current_q_offset);
+		prefetch(pref);
+		prefetch(pref + 128);
+	} else
+		retvalue = NULL;
+	return retvalue;
+}
+
+static inline void *hw_qeit_get_valid(struct hw_queue *queue)
+{
+	u8 valid = 0;
+
+	struct ehea_cqe *retvalue = hw_qeit_get(queue);
+	void *pref;
+	pref = hw_qeit_calc(queue, queue->current_q_offset);
+	prefetch(pref);
+	prefetch(pref + 128);
+	prefetch(pref + 256);
+	valid = retvalue->valid;
+	if (!((valid >> 7) == (queue->toggle_state & 1)))
+		retvalue = NULL;
+	return retvalue;
+}
+
+static inline void *hw_qeit_reset(struct hw_queue *queue)
+{
+	queue->current_q_offset = 0;
+	return hw_qeit_get(queue);
+}
+
+static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
+{
+	void *retvalue = NULL;
+	u64 last_entry_in_q = queue->queue_length - queue->qe_size;
+
+	retvalue = hw_qeit_get(queue);
+	queue->current_q_offset += queue->qe_size;
+	if (queue->current_q_offset > last_entry_in_q) {
+		queue->current_q_offset = 0;
+		queue->toggle_state = (~queue->toggle_state) & 1;
+	}
+	return retvalue;
+}
+
+static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
+{
+	void *retvalue = hw_qeit_get(queue);
+	u32 qe = *(u8 *) retvalue;
+	if ((qe >> 7) == (queue->toggle_state & 1))
+		hw_qeit_eq_get_inc(queue);
+	else
+		retvalue = NULL;
+	return retvalue;
+}
+
+static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp,
+						   int rq_nr)
+{
+	struct ehea_rwqe *wqe_p = NULL;
+	struct hw_queue *queue = NULL;
+	struct ehea_qp *my_qp = qp;
+
+	if (rq_nr == 1)
+		queue = &my_qp->hw_rqueue1;
+	else if (rq_nr == 2)
+		queue = &my_qp->hw_rqueue2;
+	else
+		queue = &my_qp->hw_rqueue3;
+	wqe_p = (struct ehea_rwqe *)hw_qeit_get_inc(queue);
+
+	return wqe_p;
+}
+
+static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp,
+					      int *wqe_index)
+{
+	struct hw_queue *queue = &my_qp->hw_squeue;
+	struct ehea_swqe *wqe_p = NULL;
+	*wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ);
+	wqe_p = (struct ehea_swqe *)hw_qeit_get_inc(&my_qp->hw_squeue);
+	return wqe_p;
+}
+
+static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe)
+{
+	iosync();
+	ehea_update_sqa(my_qp, 1);
+}
+
+static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index)
+{
+	struct hw_queue *queue = &qp->hw_rqueue1;
+	struct ehea_cqe *cqe = NULL;
+
+	*wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1);
+	cqe = (struct ehea_cqe *)hw_qeit_get_valid(queue);
+	return cqe;
+}
+
+static inline void ehea_inc_rq1(struct ehea_qp *qp)
+{
+	struct hw_queue *queue = &qp->hw_rqueue1;
+	hw_qeit_inc(queue);
+}
+
+static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq)
+{
+	struct ehea_cqe *wqe_p = NULL;
+	wqe_p = (struct ehea_cqe *)hw_qeit_get_inc_valid(&my_cq->hw_queue);
+	return wqe_p;
+};
+
+#define EHEA_CQ_REGISTER_ORIG 0
+#define EHEA_EQ_REGISTER_ORIG 0
+
+enum ehea_eq_type {
+	EHEA_EQ = 0,		/* event queue              */
+	EHEA_NEQ		/* notification event queue */
+};
+
+struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
+			       enum ehea_eq_type type,
+			       const u32 length, const u8 eqe_gen);
+
+int ehea_destroy_eq(struct ehea_eq *eq);
+
+struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq);
+
+struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
+			       u64 eq_handle, u32 cq_token);
+
+int ehea_destroy_cq(struct ehea_cq *cq);
+
+
+struct ehea_qp *ehea_create_qp(struct ehea_adapter * adapter,
+			       u32 pd,
+			       struct ehea_qp_init_attr *init_attr);
+
+int ehea_destroy_qp(struct ehea_qp *qp);
+
+int ehea_reg_mr_adapter(struct ehea_adapter *adapter);
+int ehea_dereg_mr_adapter(struct ehea_adapter *adapter);
+
+int ehea_reg_mr_pages(struct ehea_adapter *adapter,
+		      struct ehea_mr *mr,
+		      u64 start, u64 *pt, int nr_pages);
+
+#endif	/* __EHEA_QMR_H__ */

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [2.6.19 PATCH 3/7] ehea: queue management
  2006-08-18 11:31 [2.6.19 PATCH 3/7] ehea: queue management Jan-Bernd Themann
@ 2006-08-18 12:17 ` Arjan van de Ven
  2006-08-18 13:25   ` Thomas Klein
  2006-08-18 14:40 ` Jörn Engel
  1 sibling, 1 reply; 14+ messages in thread
From: Arjan van de Ven @ 2006-08-18 12:17 UTC (permalink / raw)
  To: Jan-Bernd Themann
  Cc: Thomas Klein, Jan-Bernd Themann, netdev, linux-kernel,
	Thomas Klein, linux-ppc, Christoph Raisch, Marcus Eder

> +	queue->queue_length = nr_of_pages * pagesize;
> +	queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *));


wow... is this really so large that it warrants a vmalloc()???

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [2.6.19 PATCH 3/7] ehea: queue management
  2006-08-18 12:17 ` Arjan van de Ven
@ 2006-08-18 13:25   ` Thomas Klein
  2006-08-18 13:47     ` Arnd Bergmann
  2006-08-18 14:33     ` Jörn Engel
  0 siblings, 2 replies; 14+ messages in thread
From: Thomas Klein @ 2006-08-18 13:25 UTC (permalink / raw)
  To: Arjan van de Ven
  Cc: Thomas Klein, Jan-Bernd Themann, netdev, linux-kernel,
	Christoph Raisch, linux-ppc, Marcus Eder

Arjan van de Ven wrote:
>> +	queue->queue_length = nr_of_pages * pagesize;
>> +	queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *));
> 
> 
> wow... is this really so large that it warrants a vmalloc()???

Agreed: Replaced with kmalloc()

Regards
Thomas

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [2.6.19 PATCH 3/7] ehea: queue management
  2006-08-18 13:25   ` Thomas Klein
@ 2006-08-18 13:47     ` Arnd Bergmann
  2006-08-18 14:24       ` Christoph Raisch
  2006-08-18 14:33     ` Jörn Engel
  1 sibling, 1 reply; 14+ messages in thread
From: Arnd Bergmann @ 2006-08-18 13:47 UTC (permalink / raw)
  To: linuxppc-dev
  Cc: Thomas Klein, Jan-Bernd Themann, netdev, linux-kernel,
	Thomas Klein, Christoph Raisch, Marcus Eder, Arjan van de Ven

On Friday 18 August 2006 15:25, Thomas Klein wrote:
> 
> > wow... is this really so large that it warrants a vmalloc()???
> 
> Agreed: Replaced with kmalloc()

My understanding from the previous discussion was that it actually
is a multi-page power of two allocation, so the right choice might
be __get_free_pages() instead of kmalloc, but it probably doesn't
make much of a difference.

You should really do some measurements to see what the minimal
queue sizes are that can get you optimal throughput.

	Arnd <><

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [2.6.19 PATCH 3/7] ehea: queue management
  2006-08-18 13:47     ` Arnd Bergmann
@ 2006-08-18 14:24       ` Christoph Raisch
  2006-08-18 16:16         ` Arnd Bergmann
  0 siblings, 1 reply; 14+ messages in thread
From: Christoph Raisch @ 2006-08-18 14:24 UTC (permalink / raw)
  To: abergman
  Cc: Thomas Q Klein, Jan-Bernd Themann, netdev, linux-kernel,
	linuxppc-dev, Marcus Eder, tklein, Arjan van de Ven

> You should really do some measurements to see what the minimal
> queue sizes are that can get you optimal throughput.
>
>    Arnd <><

we did.
And as always in performance tuning... one size fits all unfortunately is
not the correct answer.
Therefore we'll leave that open to the user as most other new ethernet
driver did as well.

Regards . . . Christoph R

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [2.6.19 PATCH 3/7] ehea: queue management
  2006-08-18 13:25   ` Thomas Klein
  2006-08-18 13:47     ` Arnd Bergmann
@ 2006-08-18 14:33     ` Jörn Engel
  1 sibling, 0 replies; 14+ messages in thread
From: Jörn Engel @ 2006-08-18 14:33 UTC (permalink / raw)
  To: Thomas Klein
  Cc: Thomas Klein, Jan-Bernd Themann, netdev, linux-kernel,
	Christoph Raisch, linux-ppc, Marcus Eder, Arjan van de Ven

On Fri, 18 August 2006 15:25:11 +0200, Thomas Klein wrote:
> Arjan van de Ven wrote:
> >>+	queue->queue_length = nr_of_pages * pagesize;
> >>+	queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *));
> >
> >
> >wow... is this really so large that it warrants a vmalloc()???
> 
> Agreed: Replaced with kmalloc()

kzalloc() and you can remove the memset() as well.

Jörn

-- 
Data dominates. If you've chosen the right data structures and organized
things well, the algorithms will almost always be self-evident. Data
structures, not algorithms, are central to programming.
-- Rob Pike

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [2.6.19 PATCH 3/7] ehea: queue management
  2006-08-18 11:31 [2.6.19 PATCH 3/7] ehea: queue management Jan-Bernd Themann
  2006-08-18 12:17 ` Arjan van de Ven
@ 2006-08-18 14:40 ` Jörn Engel
  1 sibling, 0 replies; 14+ messages in thread
From: Jörn Engel @ 2006-08-18 14:40 UTC (permalink / raw)
  To: Jan-Bernd Themann
  Cc: Thomas Klein, Jan-Bernd Themann, netdev, linux-kernel,
	Thomas Klein, linux-ppc, Christoph Raisch, Marcus Eder

On Fri, 18 August 2006 13:31:19 +0200, Jan-Bernd Themann wrote:
>
> +	if (queue->current_q_offset > queue->queue_length) {
> +		queue->current_q_offset -= queue->pagesize;
> +		retvalue = NULL;
> +	}
> +	else if ((((u64) retvalue) & (EHEA_PAGESIZE-1)) != 0) {

	} else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {

> +		if (hret < H_SUCCESS) {

Do you have a reason to keep H_SUCCESS?

> +   	if(qp_attr->rq_count > 1)
> +		hw_queue_dtor(&qp->hw_rqueue2);
> +   	if(qp_attr->rq_count > 2)

Small amount of whitespace damage.

Jörn

-- 
Write programs that do one thing and do it well. Write programs to work
together. Write programs to handle text streams, because that is a
universal interface.
-- Doug MacIlroy

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [2.6.19 PATCH 3/7] ehea: queue management
  2006-08-18 14:24       ` Christoph Raisch
@ 2006-08-18 16:16         ` Arnd Bergmann
  0 siblings, 0 replies; 14+ messages in thread
From: Arnd Bergmann @ 2006-08-18 16:16 UTC (permalink / raw)
  To: linuxppc-dev
  Cc: Thomas Q Klein, Jan-Bernd Themann, netdev, linux-kernel,
	Christoph Raisch, Marcus Eder, tklein, abergman, Arjan van de Ven

On Friday 18 August 2006 16:24, Christoph Raisch wrote:
> And as always in performance tuning... one size fits all unfortunately is
> not the correct answer.

Ah, good. What is the maximum sensible value that you came up with?

> Therefore we'll leave that open to the user as most other new ethernet
> driver did as well.

Sure. The interesting question is whether you want to allow users
to set it to a value that is no longer sensible to do with
__get_free_pages() and requires vmalloc().

	Arnd <><

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [2.6.19 PATCH 3/7] ehea: queue management
@ 2006-08-22 12:53 Jan-Bernd Themann
  2006-08-22 14:01 ` Arnd Bergmann
  0 siblings, 1 reply; 14+ messages in thread
From: Jan-Bernd Themann @ 2006-08-22 12:53 UTC (permalink / raw)
  To: netdev
  Cc: Thomas Klein, Jan-Bernd Themann, linux-kernel, linux-ppc,
	Christoph Raisch, Marcus Eder

Signed-off-by: Jan-Bernd Themann <themann@de.ibm.com> 


 drivers/net/ehea/ehea_qmr.c |  634 ++++++++++++++++++++++++++++++++++++++++++++
 drivers/net/ehea/ehea_qmr.h |  367 +++++++++++++++++++++++++
 2 files changed, 1001 insertions(+)



--- linux-2.6.18-rc4-git1-orig/drivers/net/ehea/ehea_qmr.c	1969-12-31 16:00:00.000000000 -0800
+++ kernel/drivers/net/ehea/ehea_qmr.c	2006-08-22 06:05:29.120372939 -0700
@@ -0,0 +1,634 @@
+/*
+ *  linux/drivers/net/ehea/ehea_qmr.c
+ *
+ *  eHEA ethernet device driver for IBM eServer System p
+ *
+ *  (C) Copyright IBM Corp. 2006
+ *
+ *  Authors:
+ *       Christoph Raisch <raisch@de.ibm.com>
+ *       Jan-Bernd Themann <themann@de.ibm.com>
+ *       Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "ehea.h"
+#include "ehea_phyp.h"
+#include "ehea_qmr.h"
+
+static void *hw_qpageit_get_inc(struct hw_queue *queue)
+{
+	void *retvalue = hw_qeit_get(queue);
+
+	queue->current_q_offset += queue->pagesize;
+	if (queue->current_q_offset > queue->queue_length) {
+		queue->current_q_offset -= queue->pagesize;
+		retvalue = NULL;
+	} else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
+		ehea_error("not on pageboundary");
+		retvalue = NULL;
+	}
+	return retvalue;
+}
+
+static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
+			  const u32 pagesize, const u32 qe_size)
+{
+	int pages_per_kpage = PAGE_SIZE / pagesize;
+	int i;
+
+	if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
+		ehea_error("pagesize conflict! kernel pagesize=%d, "
+			   "ehea pagesize=%d", (int)PAGE_SIZE, (int)pagesize);
+		return -EINVAL;
+	}
+
+	queue->queue_length = nr_of_pages * pagesize;
+	queue->queue_pages = kmalloc(nr_of_pages * sizeof(void*), GFP_KERNEL);
+	if (!queue->queue_pages) {
+		ehea_error("no mem for queue_pages");
+		return -ENOMEM;
+	}
+
+	/*
+	 * allocate pages for queue:
+	 * outer loop allocates whole kernel pages (page aligned) and
+	 * inner loop divides a kernel page into smaller hea queue pages
+	 */
+	i = 0;
+	while (i < nr_of_pages) {
+		int k;
+		u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL);
+		if (!kpage)
+			goto hw_queue_ctor_exit0;
+		for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
+			(queue->queue_pages)[i] = (struct ehea_page *)kpage;
+			kpage += pagesize;
+			i++;
+		}
+	}
+
+	queue->current_q_offset = 0;
+	queue->qe_size = qe_size;
+	queue->pagesize = pagesize;
+	queue->toggle_state = 1;
+
+	return 0;
+
+hw_queue_ctor_exit0:
+	for (i = 0; i < nr_of_pages; i += pages_per_kpage) {
+		if (!(queue->queue_pages)[i])
+			break;
+		free_page((unsigned long)(queue->queue_pages)[i]);
+	}
+	return -ENOMEM;
+}
+
+static void hw_queue_dtor(struct hw_queue *queue)
+{
+	int pages_per_kpage = PAGE_SIZE / queue->pagesize;
+	int i;
+	int nr_pages;
+
+	if (!queue || !queue->queue_pages)
+		return;
+
+	nr_pages = queue->queue_length / queue->pagesize;
+
+	for (i = 0; i < nr_pages; i += pages_per_kpage)
+		free_page((unsigned long)(queue->queue_pages)[i]);
+
+	kfree(queue->queue_pages);
+}
+
+struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
+			       int nr_of_cqe, u64 eq_handle, u32 cq_token)
+{
+	struct ehea_cq *cq = NULL;
+	struct h_epa epa;
+
+	u64 *cq_handle_ref;
+	u32 act_nr_of_entries;
+	u32 act_pages;
+	u64 hret;
+	int ret;
+	u32 counter;
+	void *vpage = NULL;
+	u64 rpage = 0;
+
+	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+	if (!cq) {
+		ehea_error("no mem for cq");
+		goto create_cq_exit0;
+	}
+
+	cq->attr.max_nr_of_cqes = nr_of_cqe;
+	cq->attr.cq_token = cq_token;
+	cq->attr.eq_handle = eq_handle;
+
+	cq->adapter = adapter;
+
+	cq_handle_ref = &cq->fw_handle;
+	act_nr_of_entries = 0;
+	act_pages = 0;
+
+	hret = ehea_h_alloc_resource_cq(adapter->handle, cq, &cq->attr,
+					&cq->fw_handle, &cq->epas);
+	if (hret != H_SUCCESS) {
+		ehea_error("alloc_resource_cq failed");
+		goto create_cq_exit1;
+	}
+
+	ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages,
+			    EHEA_PAGESIZE, sizeof(struct ehea_cqe));
+	if (ret)
+		goto create_cq_exit2;
+
+	for (counter = 0; counter < cq->attr.nr_pages; counter++) {
+		vpage = hw_qpageit_get_inc(&cq->hw_queue);
+		if (!vpage) {
+			ehea_error("hw_qpageit_get_inc failed");
+			goto create_cq_exit3;
+		}
+
+		rpage = virt_to_abs(vpage);
+
+		hret = ehea_h_register_rpage_cq(adapter->handle, cq->fw_handle,
+						0, EHEA_CQ_REGISTER_ORIG, rpage,
+						1, cq->epas.kernel);
+		if (hret < H_SUCCESS) {
+			ehea_error("register_rpage_cq failed ehea_cq=%p "
+				   "hret=%lx counter=%i act_pages=%i",
+				   cq, hret, counter, cq->attr.nr_pages);
+			goto create_cq_exit3;
+		}
+
+		if (counter == (cq->attr.nr_pages - 1)) {
+			vpage = hw_qpageit_get_inc(&cq->hw_queue);
+
+			if ((hret != H_SUCCESS) || (vpage)) {
+				ehea_error("registration of pages not "
+					   "complete hret=%lx\n", hret);
+				goto create_cq_exit3;
+			}
+		} else {
+			if ((hret != H_PAGE_REGISTERED) || (!vpage)) {
+				ehea_error("CQ: registration of page failed "
+					   "hret=%lx\n", hret);
+				goto create_cq_exit3;
+			}
+		}
+	}
+
+	hw_qeit_reset(&cq->hw_queue);
+	epa = cq->epas.kernel;
+	ehea_reset_cq_ep(cq);
+	ehea_reset_cq_n1(cq);
+
+	return cq;
+
+create_cq_exit3:
+	hw_queue_dtor(&cq->hw_queue);
+
+create_cq_exit2:
+	ehea_h_destroy_cq(adapter->handle, cq, cq->fw_handle, &cq->epas);
+
+create_cq_exit1:
+	kfree(cq);
+
+create_cq_exit0:
+	return NULL;
+}
+
+int ehea_destroy_cq(struct ehea_cq *cq)
+{
+	u64 adapter_handle;
+	u64 hret;
+
+	adapter_handle = cq->adapter->handle;
+
+	if (!cq)
+		return 0;
+
+	/* deregister all previous registered pages */
+	hret = ehea_h_destroy_cq(adapter_handle, cq, cq->fw_handle, &cq->epas);
+	if (hret != H_SUCCESS) {
+		ehea_error("destroy CQ failed");
+		return -EINVAL;
+	}
+
+	hw_queue_dtor(&cq->hw_queue);
+	kfree(cq);
+
+	return 0;
+}
+
+struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
+			       const enum ehea_eq_type type,
+			       const u32 max_nr_of_eqes, const u8 eqe_gen)
+{
+	u64 hret;
+	int ret;
+	u32 i;
+	void *vpage = NULL;
+	u64 rpage = 0;
+	struct ehea_eq *eq;
+
+	eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+	if (!eq) {
+		ehea_error("no mem for eq");
+		return NULL;
+	}
+
+	eq->adapter = adapter;
+	eq->attr.type = type;
+	eq->attr.max_nr_of_eqes = max_nr_of_eqes;
+	eq->attr.eqe_gen = eqe_gen;
+	spin_lock_init(&eq->spinlock);
+
+	hret = ehea_h_alloc_resource_eq(adapter->handle,
+					eq, &eq->attr, &eq->fw_handle);
+	if (hret != H_SUCCESS) {
+		ehea_error("alloc_resource_eq failed");
+		goto free_eq_mem;
+	}
+
+	ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages, 
+			    EHEA_PAGESIZE, sizeof(struct ehea_eqe));
+	if (ret) {
+		ehea_error("can't allocate eq pages");
+		goto alloc_pages_failed;
+	}
+
+	for (i = 0; i < eq->attr.nr_pages; i++) {
+		vpage = hw_qpageit_get_inc(&eq->hw_queue);
+		if (!vpage) {
+			ehea_error("hw_qpageit_get_inc failed");
+			hret = H_RESOURCE;
+			goto register_page_failed;
+		}
+
+		rpage = virt_to_abs(vpage);
+
+		hret = ehea_h_register_rpage_eq(adapter->handle, eq->fw_handle,
+						0, EHEA_EQ_REGISTER_ORIG, rpage,
+						1);
+
+		if (i == (eq->attr.nr_pages - 1)) {
+			/* last page */
+			vpage = hw_qpageit_get_inc(&eq->hw_queue);
+			if ((hret != H_SUCCESS) || (vpage)) {
+				goto register_page_failed;
+			}
+		} else {
+			if ((hret != H_PAGE_REGISTERED) || (!vpage)) {
+				goto register_page_failed;
+			}
+		}
+	}
+
+	hw_qeit_reset(&eq->hw_queue);
+	return eq;
+
+register_page_failed:
+	hw_queue_dtor(&eq->hw_queue);
+
+alloc_pages_failed:
+	ehea_h_destroy_eq(adapter->handle, eq, eq->fw_handle, &eq->epas);
+free_eq_mem:
+	kfree(eq);
+	return NULL;
+}
+
+struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
+{
+	struct ehea_eqe *eqe = NULL;
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&eq->spinlock, flags);
+	eqe = (struct ehea_eqe*)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
+	spin_unlock_irqrestore(&eq->spinlock, flags);
+
+	return eqe;
+}
+
+int ehea_destroy_eq(struct ehea_eq *eq)
+{
+	u64 hret;
+	unsigned long flags = 0;
+
+	if (!eq)
+		return 0;
+
+	spin_lock_irqsave(&eq->spinlock, flags);
+
+	hret = ehea_h_destroy_eq(eq->adapter->handle, eq, eq->fw_handle,
+				 &eq->epas);
+	spin_unlock_irqrestore(&eq->spinlock, flags);
+
+	if (hret != H_SUCCESS) {
+		ehea_error("destroy_eq failed");
+		return -EINVAL;
+	}
+
+	hw_queue_dtor(&eq->hw_queue);
+	kfree(eq);
+
+	return 0;
+}
+
+/**
+ * allocates memory for a queue and registers pages in phyp
+ */
+int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
+			   int nr_pages, int wqe_size, int act_nr_sges,
+			   struct ehea_adapter *adapter, int h_call_q_selector)
+{
+	u64 hret;
+	u64 rpage = 0;
+	int ret;
+	int cnt = 0;
+	void *vpage = NULL;
+
+	ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size);
+	if (ret)
+		return ret;
+
+	for (cnt = 0; cnt < nr_pages; cnt++) {
+		vpage = hw_qpageit_get_inc(hw_queue);
+		if (!vpage) {
+			ehea_error("hw_qpageit_get_inc failed");
+			goto qp_alloc_register_exit0;
+		}
+		rpage = virt_to_abs(vpage);
+
+		hret = ehea_h_register_rpage_qp(adapter->handle, qp->fw_handle,
+						0, h_call_q_selector, rpage, 1,
+						qp->epas.kernel);
+		if (hret < H_SUCCESS) {
+			ehea_error("register_rpage_qp failed");
+			goto qp_alloc_register_exit0;
+		}
+	}
+	hw_qeit_reset(hw_queue);
+	return 0;
+
+qp_alloc_register_exit0:
+	hw_queue_dtor(hw_queue);
+	return -EINVAL;
+}
+
+static inline u32 map_wqe_size(u8 wqe_enc_size)
+{
+	return 128 << wqe_enc_size;
+}
+
+struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
+			       u32 pd, struct ehea_qp_init_attr *init_attr)
+{
+	int ret;
+	u64 hret;
+	struct ehea_qp *qp;
+	u32 wqe_size_in_bytes_sq = 0;
+	u32 wqe_size_in_bytes_rq1 = 0;
+	u32 wqe_size_in_bytes_rq2 = 0;
+	u32 wqe_size_in_bytes_rq3 = 0;
+
+
+	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+	if (!qp) {
+		ehea_error("no mem for qp");
+		return NULL;
+	}
+
+	qp->adapter = adapter;
+
+	hret = ehea_h_alloc_resource_qp(adapter->handle, qp, init_attr, pd,
+					&qp->fw_handle, &qp->epas);
+	if (hret != H_SUCCESS) {
+		ehea_error("ehea_h_alloc_resource_qp failed");
+		goto create_qp_exit1;
+	}
+
+	wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq);
+	wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1);
+	wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2);
+	wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3);
+
+	ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages,
+				     wqe_size_in_bytes_sq,
+				     init_attr->act_wqe_size_enc_sq, adapter,
+				     0);
+	if (ret) {
+		ehea_error("can't register for sq ret=%x", ret);
+		goto create_qp_exit2;
+	}
+
+	ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1,
+				     init_attr->nr_rq1_pages,
+				     wqe_size_in_bytes_rq1,
+				     init_attr->act_wqe_size_enc_rq1,
+				     adapter, 1);
+	if (ret) {
+		ehea_error("can't register for rq1 ret=%x", ret);
+		goto create_qp_exit3;
+	}
+
+	if (init_attr->rq_count > 1) {
+		ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2,
+					     init_attr->nr_rq2_pages,
+					     wqe_size_in_bytes_rq2,
+					     init_attr->act_wqe_size_enc_rq2,
+					     adapter, 2);
+		if (ret) {
+			ehea_error("can't register for rq2 ret=%x", ret);
+			goto create_qp_exit4;
+		}
+	}
+
+	if (init_attr->rq_count > 2) {
+		ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3,
+					     init_attr->nr_rq3_pages,
+					     wqe_size_in_bytes_rq3,
+					     init_attr->act_wqe_size_enc_rq3,
+					     adapter, 3);
+		if (ret) {
+			ehea_error("can't register for rq3 ret=%x", ret);
+			goto create_qp_exit5;
+		}
+	}
+
+	qp->init_attr = *init_attr;
+
+	return qp;
+
+create_qp_exit5:
+	hw_queue_dtor(&qp->hw_rqueue2);
+
+create_qp_exit4:
+	hw_queue_dtor(&qp->hw_rqueue1);
+
+create_qp_exit3:
+	hw_queue_dtor(&qp->hw_squeue);
+
+create_qp_exit2:
+	hret = ehea_h_destroy_qp(adapter->handle, qp, qp->fw_handle, &qp->epas);
+
+create_qp_exit1:
+	kfree(qp);
+	return NULL;
+}
+
+int ehea_destroy_qp(struct ehea_qp *qp)
+{
+	u64 hret;
+	struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
+
+	if (!qp)
+		return 0;
+
+	hret = ehea_h_destroy_qp(qp->adapter->handle, qp, qp->fw_handle,
+				 &qp->epas);
+	if (hret != H_SUCCESS) {
+		ehea_error("destroy_qp failed");
+		return -EINVAL;
+	}
+
+	hw_queue_dtor(&qp->hw_squeue);
+	hw_queue_dtor(&qp->hw_rqueue1);
+
+   	if (qp_attr->rq_count > 1)
+		hw_queue_dtor(&qp->hw_rqueue2);
+   	if (qp_attr->rq_count > 2)
+		hw_queue_dtor(&qp->hw_rqueue3);
+	kfree(qp);
+
+	return 0;
+}
+
+int ehea_reg_mr_adapter(struct ehea_adapter *adapter)
+{
+	int i = 0;
+	int k = 0;
+	u64 hret;
+	u64 start = KERNELBASE;
+	u64 end = (u64) high_memory;
+	u64 nr_pages = (end - start) / PAGE_SIZE;
+	u32 acc_ctrl = EHEA_MR_ACC_CTRL;
+	u64 pt_abs = 0;
+	u64 *pt;
+
+	pt =  kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!pt) {
+		ehea_error("no mem");
+		return -ENOMEM;
+	}
+	pt_abs = virt_to_abs(pt);
+
+	hret = ehea_h_alloc_resource_mr(adapter->handle, start, end - start,
+					acc_ctrl, adapter->pd,
+					&adapter->mr.handle, &adapter->mr.lkey);
+	if (hret != H_SUCCESS) {
+		ehea_error("alloc_mr failed");
+		return -EINVAL;
+	}
+
+	adapter->mr.vaddr = KERNELBASE;
+
+	while (nr_pages > 0) {
+		if (nr_pages > 1) {
+			u64 num_pages = min(nr_pages, (u64)512);
+			for (i = 0; i < num_pages; i++)
+				pt[i] = virt_to_abs((void*)(((u64)start)
+							     + ((k++) *
+								PAGE_SIZE)));
+
+			hret = ehea_h_register_rpage_mr(adapter->handle,
+							adapter->mr.handle, 0,
+							0, (u64)pt_abs,
+							num_pages);
+			nr_pages -= num_pages;
+		} else {
+			u64 abs_adr = virt_to_abs((void *)(((u64)start)
+							   + (k * PAGE_SIZE)));
+			hret = ehea_h_register_rpage_mr(adapter->handle,
+							adapter->mr.handle, 0,
+							0, abs_adr,1);
+			nr_pages--;
+		}
+
+		if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) {
+			ehea_h_free_resource_mr(adapter->handle,
+						adapter->mr.handle);
+			ehea_error("free_resource_mr failed");
+			return -EINVAL;
+		}
+	}
+
+	if (hret != H_SUCCESS) {
+		ehea_h_free_resource_mr(adapter->handle, adapter->mr.handle);
+		ehea_error("free_resource_mr failed for last page");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int ehea_reg_mr_pages(struct ehea_adapter *adapter,
+		      struct ehea_mr *mr,
+		      u64 start, u64 *pt, int nr_pages)
+{
+	u64 hret;
+	u32 acc_ctrl = EHEA_MR_ACC_CTRL;
+
+	u64 pt_abs = virt_to_abs(pt);
+	u64 first_page = pt[0];
+
+	hret = ehea_h_alloc_resource_mr(adapter->handle, start,
+					PAGE_SIZE * nr_pages, acc_ctrl,
+					adapter->pd, &mr->handle, &mr->lkey);
+	if (hret != H_SUCCESS) {
+		ehea_error("alloc_resource_mr failed");
+		return -EINVAL;
+	}
+
+	if (nr_pages > 1)
+		hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle,
+						0, 0, (u64)pt_abs, nr_pages);
+	else
+		hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle,
+						0, 0, first_page, 1);
+
+	if (hret != H_SUCCESS) {
+		ehea_h_free_resource_mr(adapter->handle, mr->handle);
+		ehea_error("free_resource_mr failed for last page");
+		return -EINVAL;
+	}
+	mr->vaddr = start;
+	return 0;
+}
+
+int ehea_dereg_mr_adapter(struct ehea_adapter *adapter)
+{
+	u64 hret;
+
+	hret = ehea_h_free_resource_mr(adapter->handle, adapter->mr.handle);
+	if (hret != H_SUCCESS) {
+		return -EINVAL;
+	}
+
+	return 0;
+}
--- linux-2.6.18-rc4-git1-orig/drivers/net/ehea/ehea_qmr.h	1969-12-31 16:00:00.000000000 -0800
+++ kernel/drivers/net/ehea/ehea_qmr.h	2006-08-22 06:05:29.003371879 -0700
@@ -0,0 +1,367 @@
+/*
+ *  linux/drivers/net/ehea/ehea_qmr.h
+ *
+ *  eHEA ethernet device driver for IBM eServer System p
+ *
+ *  (C) Copyright IBM Corp. 2006
+ *
+ *  Authors:
+ *       Christoph Raisch <raisch@de.ibm.com>
+ *       Jan-Bernd Themann <themann@de.ibm.com>
+ *       Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __EHEA_QMR_H__
+#define __EHEA_QMR_H__
+
+#include "ehea.h"
+#include "ehea_hw.h"
+
+/*
+ * page size of ehea hardware queues
+ */
+
+#define EHEA_PAGESHIFT  12
+#define EHEA_PAGESIZE   4096UL
+
+/* Some abbreviations used here:
+ *
+ * WQE  - Work Queue Entry
+ * SWQE - Send Work Queue Entry
+ * RWQE - Receive Work Queue Entry
+ * CQE  - Completion Queue Entry
+ * EQE  - Event Queue Entry
+ * MR   - Memory Region
+ */
+
+/* Use of WR_ID field for EHEA */
+#define EHEA_WR_ID_COUNT   EHEA_BMASK_IBM(0, 19)
+#define EHEA_WR_ID_TYPE    EHEA_BMASK_IBM(20, 23)
+#define EHEA_SWQE2_TYPE    0x1
+#define EHEA_SWQE3_TYPE    0x2
+#define EHEA_RWQE2_TYPE    0x3
+#define EHEA_RWQE3_TYPE    0x4
+#define EHEA_WR_ID_INDEX   EHEA_BMASK_IBM(24, 47)
+#define EHEA_WR_ID_REFILL  EHEA_BMASK_IBM(48, 63)
+
+struct ehea_vsgentry {
+	u64 vaddr;
+	u32 l_key;
+	u32 len;
+};
+
+/* maximum number of sg entries allowed in a WQE */
+#define EHEA_MAX_WQE_SG_ENTRIES  	252
+#define SWQE2_MAX_IMM            	(0xD0 - 0x30)
+#define SWQE3_MAX_IMM            	224
+
+/* tx control flags for swqe */
+#define EHEA_SWQE_CRC                   0x8000
+#define EHEA_SWQE_IP_CHECKSUM           0x4000
+#define EHEA_SWQE_TCP_CHECKSUM          0x2000
+#define EHEA_SWQE_TSO                   0x1000
+#define EHEA_SWQE_SIGNALLED_COMPLETION  0x0800
+#define EHEA_SWQE_VLAN_INSERT           0x0400
+#define EHEA_SWQE_IMM_DATA_PRESENT      0x0200
+#define EHEA_SWQE_DESCRIPTORS_PRESENT   0x0100
+#define EHEA_SWQE_WRAP_CTL_REC          0x0080
+#define EHEA_SWQE_WRAP_CTL_FORCE        0x0040
+#define EHEA_SWQE_BIND                  0x0020
+#define EHEA_SWQE_PURGE                 0x0010
+
+/* sizeof(struct ehea_swqe) less the union */
+#define SWQE_HEADER_SIZE		32
+
+struct ehea_swqe {
+	u64 wr_id;
+	u16 tx_control;
+	u16 vlan_tag;
+	u8 reserved1;
+	u8 ip_start;
+	u8 ip_end;
+	u8 immediate_data_length;
+	u8 tcp_offset;
+	u8 reserved2;
+	u16 tcp_end;
+	u8 wrap_tag;
+	u8 descriptors;		/* number of valid descriptors in WQE */
+	u16 reserved3;
+	u16 reserved4;
+	u16 mss;
+	u32 reserved5;
+	union {
+		/*  Send WQE Format 1 */
+		struct {
+			struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
+		} no_immediate_data;
+
+		/*  Send WQE Format 2 */
+		struct {
+			struct ehea_vsgentry sg_entry;
+			/* 0x30 */
+			u8 immediate_data[SWQE2_MAX_IMM];
+			/* 0xd0 */
+			struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
+		} immdata_desc __attribute__ ((packed));
+
+		/*  Send WQE Format 3 */
+		struct {
+			u8 immediate_data[SWQE3_MAX_IMM];
+		} immdata_nodesc;
+	} u;
+};
+
+struct ehea_rwqe {
+	u64 wr_id;		/* work request ID */
+	u8 reserved1[5];
+	u8 data_segments;
+	u16 reserved2;
+	u64 reserved3;
+	u64 reserved4;
+	struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
+};
+
+#define EHEA_CQE_VLAN_TAG_XTRACT   0x0400
+
+#define EHEA_CQE_TYPE_RQ           0x60
+#define EHEA_CQE_STAT_ERR_MASK     0x721F
+#define EHEA_CQE_STAT_FAT_ERR_MASK 0x1F
+#define EHEA_CQE_STAT_ERR_TCP      0x4000
+
+struct ehea_cqe {
+	u64 wr_id;		/* work request ID from WQE */
+	u8 type;
+	u8 valid;
+	u16 status;
+	u16 reserved1;
+	u16 num_bytes_transfered;
+	u16 vlan_tag;
+	u16 inet_checksum_value;
+	u8 reserved2;
+	u8 header_length;
+	u16 reserved3;
+	u16 page_offset;
+	u16 wqe_count;
+	u32 qp_token;
+	u32 timestamp;
+	u32 reserved4;
+	u64 reserved5[3];
+};
+
+#define EHEA_EQE_VALID           EHEA_BMASK_IBM(0, 0)
+#define EHEA_EQE_IS_CQE          EHEA_BMASK_IBM(1, 1)
+#define EHEA_EQE_IDENTIFIER      EHEA_BMASK_IBM(2, 7)
+#define EHEA_EQE_QP_CQ_NUMBER    EHEA_BMASK_IBM(8, 31)
+#define EHEA_EQE_QP_TOKEN        EHEA_BMASK_IBM(32, 63)
+#define EHEA_EQE_CQ_TOKEN        EHEA_BMASK_IBM(32, 63)
+#define EHEA_EQE_KEY             EHEA_BMASK_IBM(32, 63)
+#define EHEA_EQE_PORT_NUMBER     EHEA_BMASK_IBM(56, 63)
+#define EHEA_EQE_EQ_NUMBER       EHEA_BMASK_IBM(48, 63)
+#define EHEA_EQE_SM_ID           EHEA_BMASK_IBM(48, 63)
+#define EHEA_EQE_SM_MECH_NUMBER  EHEA_BMASK_IBM(48, 55)
+#define EHEA_EQE_SM_PORT_NUMBER  EHEA_BMASK_IBM(56, 63)
+
+struct ehea_eqe {
+	u64 entry;
+};
+
+static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
+{
+	struct ehea_page *current_page;
+
+	if (q_offset >= queue->queue_length)
+		q_offset -= queue->queue_length;
+	current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT];
+	return &current_page->entries[q_offset & (EHEA_PAGESIZE - 1)];
+}
+
+static inline void *hw_qeit_get(struct hw_queue *queue)
+{
+	return hw_qeit_calc(queue, queue->current_q_offset);
+}
+
+static inline void hw_qeit_inc(struct hw_queue *queue)
+{
+	queue->current_q_offset += queue->qe_size;
+	if (queue->current_q_offset >= queue->queue_length) {
+		queue->current_q_offset = 0;
+		/* toggle the valid flag */
+		queue->toggle_state = (~queue->toggle_state) & 1;
+	}
+}
+
+static inline void *hw_qeit_get_inc(struct hw_queue *queue)
+{
+	void *retvalue = hw_qeit_get(queue);
+	hw_qeit_inc(queue);
+	return retvalue;
+}
+
+static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue)
+{
+	struct ehea_cqe *retvalue = hw_qeit_get(queue);
+	u8 valid = retvalue->valid;
+	void *pref;
+
+	if ((valid >> 7) == (queue->toggle_state & 1)) {
+		/* this is a good one */
+		hw_qeit_inc(queue);
+		pref = hw_qeit_calc(queue, queue->current_q_offset);
+		prefetch(pref);
+		prefetch(pref + 128);
+	} else
+		retvalue = NULL;
+	return retvalue;
+}
+
+static inline void *hw_qeit_get_valid(struct hw_queue *queue)
+{
+	struct ehea_cqe *retvalue = hw_qeit_get(queue);
+	void *pref;
+	u8 valid;
+
+	pref = hw_qeit_calc(queue, queue->current_q_offset);
+	prefetch(pref);
+	prefetch(pref + 128);
+	prefetch(pref + 256);
+	valid = retvalue->valid;
+	if (!((valid >> 7) == (queue->toggle_state & 1)))
+		retvalue = NULL;
+	return retvalue;
+}
+
+static inline void *hw_qeit_reset(struct hw_queue *queue)
+{
+	queue->current_q_offset = 0;
+	return hw_qeit_get(queue);
+}
+
+static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
+{
+	u64 last_entry_in_q = queue->queue_length - queue->qe_size;
+	void *retvalue;
+
+
+	retvalue = hw_qeit_get(queue);
+	queue->current_q_offset += queue->qe_size;
+	if (queue->current_q_offset > last_entry_in_q) {
+		queue->current_q_offset = 0;
+		queue->toggle_state = (~queue->toggle_state) & 1;
+	}
+	return retvalue;
+}
+
+static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
+{
+	void *retvalue = hw_qeit_get(queue);
+	u32 qe = *(u8 *) retvalue;
+	if ((qe >> 7) == (queue->toggle_state & 1))
+		hw_qeit_eq_get_inc(queue);
+	else
+		retvalue = NULL;
+	return retvalue;
+}
+
+static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp,
+						   int rq_nr)
+{
+	struct hw_queue *queue = NULL;
+
+	if (rq_nr == 1)
+		queue = &qp->hw_rqueue1;
+	else if (rq_nr == 2)
+		queue = &qp->hw_rqueue2;
+	else
+		queue = &qp->hw_rqueue3;
+
+	return (struct ehea_rwqe *)hw_qeit_get_inc(queue);
+}
+
+static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp,
+					      int *wqe_index)
+{
+	struct hw_queue *queue = &my_qp->hw_squeue;
+	struct ehea_swqe *wqe_p = NULL;
+	*wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ);
+	wqe_p = (struct ehea_swqe *)hw_qeit_get_inc(&my_qp->hw_squeue);
+	return wqe_p;
+}
+
+static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe)
+{
+	iosync();
+	ehea_update_sqa(my_qp, 1);
+}
+
+static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index)
+{
+	struct hw_queue *queue = &qp->hw_rqueue1;
+	struct ehea_cqe *cqe = NULL;
+
+	*wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1);
+	cqe = (struct ehea_cqe *)hw_qeit_get_valid(queue);
+	return cqe;
+}
+
+static inline void ehea_inc_rq1(struct ehea_qp *qp)
+{
+	struct hw_queue *queue = &qp->hw_rqueue1;
+	hw_qeit_inc(queue);
+}
+
+static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq)
+{
+	struct ehea_cqe *wqe_p = NULL;
+	wqe_p = (struct ehea_cqe *)hw_qeit_get_inc_valid(&my_cq->hw_queue);
+	return wqe_p;
+};
+
+#define EHEA_CQ_REGISTER_ORIG 0
+#define EHEA_EQ_REGISTER_ORIG 0
+
+enum ehea_eq_type {
+	EHEA_EQ = 0,		/* event queue              */
+	EHEA_NEQ		/* notification event queue */
+};
+
+struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
+			       enum ehea_eq_type type,
+			       const u32 length, const u8 eqe_gen);
+
+int ehea_destroy_eq(struct ehea_eq *eq);
+
+struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq);
+
+struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
+			       u64 eq_handle, u32 cq_token);
+
+int ehea_destroy_cq(struct ehea_cq *cq);
+
+struct ehea_qp *ehea_create_qp(struct ehea_adapter * adapter, u32 pd,
+			       struct ehea_qp_init_attr *init_attr);
+
+int ehea_destroy_qp(struct ehea_qp *qp);
+
+int ehea_reg_mr_adapter(struct ehea_adapter *adapter);
+int ehea_dereg_mr_adapter(struct ehea_adapter *adapter);
+
+int ehea_reg_mr_pages(struct ehea_adapter *adapter,
+		      struct ehea_mr *mr,
+		      u64 start, u64 *pt, int nr_pages);
+
+#endif	/* __EHEA_QMR_H__ */

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [2.6.19 PATCH 3/7] ehea: queue management
  2006-08-22 14:01 ` Arnd Bergmann
@ 2006-08-22 13:51   ` Jan-Bernd Themann
  0 siblings, 0 replies; 14+ messages in thread
From: Jan-Bernd Themann @ 2006-08-22 13:51 UTC (permalink / raw)
  To: Arnd Bergmann
  Cc: Thomas Klein, Jan-Bernd Themann, netdev, linux-kernel, linux-ppc,
	Christoph Raisch, Marcus Eder

Hi,

On Tuesday 22 August 2006 16:01, Arnd Bergmann wrote:
> > +=A0=A0=A0=A0=A0=A0=A0u64 rpage =3D 0;
> > +=A0=A0=A0=A0=A0=A0=A0int ret;
> > +=A0=A0=A0=A0=A0=A0=A0int cnt =3D 0;
> > +=A0=A0=A0=A0=A0=A0=A0void *vpage =3D NULL;
> > +
> > +=A0=A0=A0=A0=A0=A0=A0ret =3D hw_queue_ctor(hw_queue, nr_pages, EHEA_PA=
GESIZE, wqe_size);
> > +=A0=A0=A0=A0=A0=A0=A0if (ret)
> > +=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0return ret;
> > +
> > +=A0=A0=A0=A0=A0=A0=A0for (cnt =3D 0; cnt < nr_pages; cnt++) {
> > +=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0vpage =3D hw_qpageit_get_=
inc(hw_queue);
> > +=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0if (!vpage) {
> > +=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0e=
hea_error("hw_qpageit_get_inc failed");
> > +=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0g=
oto qp_alloc_register_exit0;
> > +=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0}
> > +=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0=A0rpage =3D virt_to_abs(vpa=
ge);
>=20
> As someone mentioned before, the initialization to 0 or NULL
> is pointless here, as the variables are always assigned before
> they are used. There are a number of other places in your
> code that do similar things, you should probably go through
> these and remove the initializers.
>=20
> If you indeed need something to be initialized, it is good practice
> to do the initialization as late as possible, e.g.
>=20
> 	int foo;
> 	...
> 	foo =3D 0;
> 	do_foo(foo);
>=20
> to make it clear that you have a reason to initialize it.
>=20
> 	Arnd <><
>=20

Agreed. We started to remove some but apparrently not all.
We'll go through the code and remove them where possible.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [2.6.19 PATCH 3/7] ehea: queue management
  2006-08-22 12:53 Jan-Bernd Themann
@ 2006-08-22 14:01 ` Arnd Bergmann
  2006-08-22 13:51   ` Jan-Bernd Themann
  0 siblings, 1 reply; 14+ messages in thread
From: Arnd Bergmann @ 2006-08-22 14:01 UTC (permalink / raw)
  To: Jan-Bernd Themann
  Cc: Thomas Klein, Jan-Bernd Themann, netdev, linux-kernel, linux-ppc,
	Christoph Raisch, Marcus Eder

T24gVHVlc2RheSAyMiBBdWd1c3QgMjAwNiAxNDo1MywgSmFuLUJlcm5kIFRoZW1hbm4gd3JvdGU6
Cj4gK6CgoKCgoKB1NjQgcnBhZ2UgPSAwOwo+ICugoKCgoKCgaW50IHJldDsKPiAroKCgoKCgoGlu
dCBjbnQgPSAwOwo+ICugoKCgoKCgdm9pZCAqdnBhZ2UgPSBOVUxMOwo+ICsKPiAroKCgoKCgoHJl
dCA9IGh3X3F1ZXVlX2N0b3IoaHdfcXVldWUsIG5yX3BhZ2VzLCBFSEVBX1BBR0VTSVpFLCB3cWVf
c2l6ZSk7Cj4gK6CgoKCgoKBpZiAocmV0KQo+ICugoKCgoKCgoKCgoKCgoKByZXR1cm4gcmV0Owo+
ICsKPiAroKCgoKCgoGZvciAoY250ID0gMDsgY250IDwgbnJfcGFnZXM7IGNudCsrKSB7Cj4gK6Cg
oKCgoKCgoKCgoKCgoHZwYWdlID0gaHdfcXBhZ2VpdF9nZXRfaW5jKGh3X3F1ZXVlKTsKPiAroKCg
oKCgoKCgoKCgoKCgaWYgKCF2cGFnZSkgewo+ICugoKCgoKCgoKCgoKCgoKCgoKCgoKCgoGVoZWFf
ZXJyb3IoImh3X3FwYWdlaXRfZ2V0X2luYyBmYWlsZWQiKTsKPiAroKCgoKCgoKCgoKCgoKCgoKCg
oKCgoKBnb3RvIHFwX2FsbG9jX3JlZ2lzdGVyX2V4aXQwOwo+ICugoKCgoKCgoKCgoKCgoKB9Cj4g
K6CgoKCgoKCgoKCgoKCgoHJwYWdlID0gdmlydF90b19hYnModnBhZ2UpOwoKQXMgc29tZW9uZSBt
ZW50aW9uZWQgYmVmb3JlLCB0aGUgaW5pdGlhbGl6YXRpb24gdG8gMCBvciBOVUxMCmlzIHBvaW50
bGVzcyBoZXJlLCBhcyB0aGUgdmFyaWFibGVzIGFyZSBhbHdheXMgYXNzaWduZWQgYmVmb3JlCnRo
ZXkgYXJlIHVzZWQuIFRoZXJlIGFyZSBhIG51bWJlciBvZiBvdGhlciBwbGFjZXMgaW4geW91cgpj
b2RlIHRoYXQgZG8gc2ltaWxhciB0aGluZ3MsIHlvdSBzaG91bGQgcHJvYmFibHkgZ28gdGhyb3Vn
aAp0aGVzZSBhbmQgcmVtb3ZlIHRoZSBpbml0aWFsaXplcnMuCgpJZiB5b3UgaW5kZWVkIG5lZWQg
c29tZXRoaW5nIHRvIGJlIGluaXRpYWxpemVkLCBpdCBpcyBnb29kIHByYWN0aWNlCnRvIGRvIHRo
ZSBpbml0aWFsaXphdGlvbiBhcyBsYXRlIGFzIHBvc3NpYmxlLCBlLmcuCgoJaW50IGZvbzsKCS4u
LgoJZm9vID0gMDsKCWRvX2Zvbyhmb28pOwoKdG8gbWFrZSBpdCBjbGVhciB0aGF0IHlvdSBoYXZl
IGEgcmVhc29uIHRvIGluaXRpYWxpemUgaXQuCgoJQXJuZCA8PjwK

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [2.6.19 PATCH 3/7] ehea: queue management
@ 2006-08-23  8:57 Jan-Bernd Themann
  0 siblings, 0 replies; 14+ messages in thread
From: Jan-Bernd Themann @ 2006-08-23  8:57 UTC (permalink / raw)
  To: netdev
  Cc: Thomas Klein, Jan-Bernd Themann, linux-kernel, linux-ppc,
	Christoph Raisch, Marcus Eder

Signed-off-by: Jan-Bernd Themann <themann@de.ibm.com> 


 drivers/net/ehea/ehea_qmr.c |  607 ++++++++++++++++++++++++++++++++++++++++++++
 drivers/net/ehea/ehea_qmr.h |  361 ++++++++++++++++++++++++++
 2 files changed, 968 insertions(+)



--- linux-2.6.18-rc4-git1-orig/drivers/net/ehea/ehea_qmr.c	1969-12-31 16:00:00.000000000 -0800
+++ kernel/drivers/net/ehea/ehea_qmr.c	2006-08-23 02:01:00.282229545 -0700
@@ -0,0 +1,607 @@
+/*
+ *  linux/drivers/net/ehea/ehea_qmr.c
+ *
+ *  eHEA ethernet device driver for IBM eServer System p
+ *
+ *  (C) Copyright IBM Corp. 2006
+ *
+ *  Authors:
+ *       Christoph Raisch <raisch@de.ibm.com>
+ *       Jan-Bernd Themann <themann@de.ibm.com>
+ *       Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "ehea.h"
+#include "ehea_phyp.h"
+#include "ehea_qmr.h"
+
+static void *hw_qpageit_get_inc(struct hw_queue *queue)
+{
+	void *retvalue = hw_qeit_get(queue);
+
+	queue->current_q_offset += queue->pagesize;
+	if (queue->current_q_offset > queue->queue_length) {
+		queue->current_q_offset -= queue->pagesize;
+		retvalue = NULL;
+	} else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
+		ehea_error("not on pageboundary");
+		retvalue = NULL;
+	}
+	return retvalue;
+}
+
+static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
+			  const u32 pagesize, const u32 qe_size)
+{
+	int pages_per_kpage = PAGE_SIZE / pagesize;
+	int i, k;
+
+	if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
+		ehea_error("pagesize conflict! kernel pagesize=%d, "
+			   "ehea pagesize=%d", (int)PAGE_SIZE, (int)pagesize);
+		return -EINVAL;
+	}
+
+	queue->queue_length = nr_of_pages * pagesize;
+	queue->queue_pages = kmalloc(nr_of_pages * sizeof(void*), GFP_KERNEL);
+	if (!queue->queue_pages) {
+		ehea_error("no mem for queue_pages");
+		return -ENOMEM;
+	}
+
+	/*
+	 * allocate pages for queue:
+	 * outer loop allocates whole kernel pages (page aligned) and
+	 * inner loop divides a kernel page into smaller hea queue pages
+	 */
+	i = 0;
+	while (i < nr_of_pages) {
+		u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL);
+		if (!kpage)
+			goto exit0;
+		for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
+			(queue->queue_pages)[i] = (struct ehea_page *)kpage;
+			kpage += pagesize;
+			i++;
+		}
+	}
+
+	queue->current_q_offset = 0;
+	queue->qe_size = qe_size;
+	queue->pagesize = pagesize;
+	queue->toggle_state = 1;
+
+	return 0;
+
+exit0:
+	for (i = 0; i < nr_of_pages; i += pages_per_kpage) {
+		if (!(queue->queue_pages)[i])
+			break;
+		free_page((unsigned long)(queue->queue_pages)[i]);
+	}
+	return -ENOMEM;
+}
+
+static void hw_queue_dtor(struct hw_queue *queue)
+{
+	int pages_per_kpage = PAGE_SIZE / queue->pagesize;
+	int i, nr_pages;
+
+	if (!queue || !queue->queue_pages)
+		return;
+
+	nr_pages = queue->queue_length / queue->pagesize;
+
+	for (i = 0; i < nr_pages; i += pages_per_kpage)
+		free_page((unsigned long)(queue->queue_pages)[i]);
+
+	kfree(queue->queue_pages);
+}
+
+struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
+			       int nr_of_cqe, u64 eq_handle, u32 cq_token)
+{
+	struct ehea_cq *cq;
+	struct h_epa epa;
+	u64 *cq_handle_ref, hret, rpage;
+	u32 act_nr_of_entries, act_pages, counter;
+	int ret;
+	void *vpage;
+
+	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+	if (!cq) {
+		ehea_error("no mem for cq");
+		goto create_cq_exit0;
+	}
+
+	cq->attr.max_nr_of_cqes = nr_of_cqe;
+	cq->attr.cq_token = cq_token;
+	cq->attr.eq_handle = eq_handle;
+
+	cq->adapter = adapter;
+
+	cq_handle_ref = &cq->fw_handle;
+	act_nr_of_entries = 0;
+	act_pages = 0;
+
+	hret = ehea_h_alloc_resource_cq(adapter->handle, cq, &cq->attr,
+					&cq->fw_handle, &cq->epas);
+	if (hret != H_SUCCESS) {
+		ehea_error("alloc_resource_cq failed");
+		goto create_cq_exit1;
+	}
+
+	ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages,
+			    EHEA_PAGESIZE, sizeof(struct ehea_cqe));
+	if (ret)
+		goto create_cq_exit2;
+
+	for (counter = 0; counter < cq->attr.nr_pages; counter++) {
+		vpage = hw_qpageit_get_inc(&cq->hw_queue);
+		if (!vpage) {
+			ehea_error("hw_qpageit_get_inc failed");
+			goto create_cq_exit3;
+		}
+
+		rpage = virt_to_abs(vpage);
+		hret = ehea_h_register_rpage(adapter->handle,
+					     0, EHEA_CQ_REGISTER_ORIG,
+					     cq->fw_handle, rpage, 1);
+		if (hret < H_SUCCESS) {
+			ehea_error("register_rpage_cq failed ehea_cq=%p "
+				   "hret=%lx counter=%i act_pages=%i",
+				   cq, hret, counter, cq->attr.nr_pages);
+			goto create_cq_exit3;
+		}
+
+		if (counter == (cq->attr.nr_pages - 1)) {
+			vpage = hw_qpageit_get_inc(&cq->hw_queue);
+
+			if ((hret != H_SUCCESS) || (vpage)) {
+				ehea_error("registration of pages not "
+					   "complete hret=%lx\n", hret);
+				goto create_cq_exit3;
+			}
+		} else {
+			if ((hret != H_PAGE_REGISTERED) || (!vpage)) {
+				ehea_error("CQ: registration of page failed "
+					   "hret=%lx\n", hret);
+				goto create_cq_exit3;
+			}
+		}
+	}
+
+	hw_qeit_reset(&cq->hw_queue);
+	epa = cq->epas.kernel;
+	ehea_reset_cq_ep(cq);
+	ehea_reset_cq_n1(cq);
+
+	return cq;
+
+create_cq_exit3:
+	hw_queue_dtor(&cq->hw_queue);
+
+create_cq_exit2:
+	ehea_h_destroy_cq(adapter->handle, cq, cq->fw_handle, &cq->epas);
+
+create_cq_exit1:
+	kfree(cq);
+
+create_cq_exit0:
+	return NULL;
+}
+
+int ehea_destroy_cq(struct ehea_cq *cq)
+{
+	u64 adapter_handle, hret;
+
+	adapter_handle = cq->adapter->handle;
+
+	if (!cq)
+		return 0;
+
+	/* deregister all previous registered pages */
+	hret = ehea_h_destroy_cq(adapter_handle, cq, cq->fw_handle, &cq->epas);
+	if (hret != H_SUCCESS) {
+		ehea_error("destroy CQ failed");
+		return -EINVAL;
+	}
+
+	hw_queue_dtor(&cq->hw_queue);
+	kfree(cq);
+
+	return 0;
+}
+
+struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
+			       const enum ehea_eq_type type,
+			       const u32 max_nr_of_eqes, const u8 eqe_gen)
+{
+	int ret, i;
+	u64 hret, rpage;;
+	void *vpage;
+	struct ehea_eq *eq;
+
+	eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+	if (!eq) {
+		ehea_error("no mem for eq");
+		return NULL;
+	}
+
+	eq->adapter = adapter;
+	eq->attr.type = type;
+	eq->attr.max_nr_of_eqes = max_nr_of_eqes;
+	eq->attr.eqe_gen = eqe_gen;
+	spin_lock_init(&eq->spinlock);
+
+	hret = ehea_h_alloc_resource_eq(adapter->handle,
+					eq, &eq->attr, &eq->fw_handle);
+	if (hret != H_SUCCESS) {
+		ehea_error("alloc_resource_eq failed");
+		goto free_eq_mem;
+	}
+
+	ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
+			    EHEA_PAGESIZE, sizeof(struct ehea_eqe));
+	if (ret) {
+		ehea_error("can't allocate eq pages");
+		goto alloc_pages_failed;
+	}
+
+	for (i = 0; i < eq->attr.nr_pages; i++) {
+		vpage = hw_qpageit_get_inc(&eq->hw_queue);
+		if (!vpage) {
+			ehea_error("hw_qpageit_get_inc failed");
+			hret = H_RESOURCE;
+			goto register_page_failed;
+		}
+
+		rpage = virt_to_abs(vpage);
+
+		hret = ehea_h_register_rpage(adapter->handle, 0,
+					     EHEA_EQ_REGISTER_ORIG,
+					     eq->fw_handle, rpage, 1);
+
+		if (i == (eq->attr.nr_pages - 1)) {
+			/* last page */
+			vpage = hw_qpageit_get_inc(&eq->hw_queue);
+			if ((hret != H_SUCCESS) || (vpage)) {
+				goto register_page_failed;
+			}
+		} else {
+			if ((hret != H_PAGE_REGISTERED) || (!vpage)) {
+				goto register_page_failed;
+			}
+		}
+	}
+
+	hw_qeit_reset(&eq->hw_queue);
+	return eq;
+
+register_page_failed:
+	hw_queue_dtor(&eq->hw_queue);
+
+alloc_pages_failed:
+	ehea_h_destroy_eq(adapter->handle, eq, eq->fw_handle, &eq->epas);
+
+free_eq_mem:
+	kfree(eq);
+	return NULL;
+}
+
+struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
+{
+	struct ehea_eqe *eqe;
+	unsigned long flags;
+
+	spin_lock_irqsave(&eq->spinlock, flags);
+	eqe = (struct ehea_eqe*)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
+	spin_unlock_irqrestore(&eq->spinlock, flags);
+
+	return eqe;
+}
+
+int ehea_destroy_eq(struct ehea_eq *eq)
+{
+	u64 hret;
+	unsigned long flags;
+
+	if (!eq)
+		return 0;
+
+	spin_lock_irqsave(&eq->spinlock, flags);
+
+	hret = ehea_h_destroy_eq(eq->adapter->handle, eq, eq->fw_handle,
+				 &eq->epas);
+	spin_unlock_irqrestore(&eq->spinlock, flags);
+
+	if (hret != H_SUCCESS) {
+		ehea_error("destroy_eq failed");
+		return -EINVAL;
+	}
+
+	hw_queue_dtor(&eq->hw_queue);
+	kfree(eq);
+
+	return 0;
+}
+
+/**
+ * allocates memory for a queue and registers pages in phyp
+ */
+int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
+			   int nr_pages, int wqe_size, int act_nr_sges,
+			   struct ehea_adapter *adapter, int h_call_q_selector)
+{
+	u64 hret, rpage;
+	int ret, cnt;
+	void *vpage;
+
+	ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size);
+	if (ret)
+		return ret;
+
+	for (cnt = 0; cnt < nr_pages; cnt++) {
+		vpage = hw_qpageit_get_inc(hw_queue);
+		if (!vpage) {
+			ehea_error("hw_qpageit_get_inc failed");
+			goto exit0;
+		}
+		rpage = virt_to_abs(vpage);
+		hret = ehea_h_register_rpage(adapter->handle,
+					     0, h_call_q_selector,
+					     qp->fw_handle, rpage, 1);
+		if (hret < H_SUCCESS) {
+			ehea_error("register_rpage_qp failed");
+			goto exit0;
+		}
+	}
+	hw_qeit_reset(hw_queue);
+	return 0;
+
+exit0:
+	hw_queue_dtor(hw_queue);
+	return -EINVAL;
+}
+
+static inline u32 map_wqe_size(u8 wqe_enc_size)
+{
+	return 128 << wqe_enc_size;
+}
+
+struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
+			       u32 pd, struct ehea_qp_init_attr *init_attr)
+{
+	int ret;
+	u64 hret;
+	struct ehea_qp *qp;
+	u32 wqe_size_in_bytes_sq, wqe_size_in_bytes_rq1;
+	u32 wqe_size_in_bytes_rq2, wqe_size_in_bytes_rq3;
+
+
+	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+	if (!qp) {
+		ehea_error("no mem for qp");
+		return NULL;
+	}
+
+	qp->adapter = adapter;
+
+	hret = ehea_h_alloc_resource_qp(adapter->handle, qp, init_attr, pd,
+					&qp->fw_handle, &qp->epas);
+	if (hret != H_SUCCESS) {
+		ehea_error("ehea_h_alloc_resource_qp failed");
+		goto create_qp_exit1;
+	}
+
+	wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq);
+	wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1);
+	wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2);
+	wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3);
+
+	ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages,
+				     wqe_size_in_bytes_sq,
+				     init_attr->act_wqe_size_enc_sq, adapter,
+				     0);
+	if (ret) {
+		ehea_error("can't register for sq ret=%x", ret);
+		goto create_qp_exit2;
+	}
+
+	ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1,
+				     init_attr->nr_rq1_pages,
+				     wqe_size_in_bytes_rq1,
+				     init_attr->act_wqe_size_enc_rq1,
+				     adapter, 1);
+	if (ret) {
+		ehea_error("can't register for rq1 ret=%x", ret);
+		goto create_qp_exit3;
+	}
+
+	if (init_attr->rq_count > 1) {
+		ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2,
+					     init_attr->nr_rq2_pages,
+					     wqe_size_in_bytes_rq2,
+					     init_attr->act_wqe_size_enc_rq2,
+					     adapter, 2);
+		if (ret) {
+			ehea_error("can't register for rq2 ret=%x", ret);
+			goto create_qp_exit4;
+		}
+	}
+
+	if (init_attr->rq_count > 2) {
+		ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3,
+					     init_attr->nr_rq3_pages,
+					     wqe_size_in_bytes_rq3,
+					     init_attr->act_wqe_size_enc_rq3,
+					     adapter, 3);
+		if (ret) {
+			ehea_error("can't register for rq3 ret=%x", ret);
+			goto create_qp_exit5;
+		}
+	}
+
+	qp->init_attr = *init_attr;
+
+	return qp;
+
+create_qp_exit5:
+	hw_queue_dtor(&qp->hw_rqueue2);
+
+create_qp_exit4:
+	hw_queue_dtor(&qp->hw_rqueue1);
+
+create_qp_exit3:
+	hw_queue_dtor(&qp->hw_squeue);
+
+create_qp_exit2:
+	hret = ehea_h_destroy_qp(adapter->handle, qp, qp->fw_handle, &qp->epas);
+
+create_qp_exit1:
+	kfree(qp);
+	return NULL;
+}
+
+int ehea_destroy_qp(struct ehea_qp *qp)
+{
+	u64 hret;
+	struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
+
+	if (!qp)
+		return 0;
+
+	hret = ehea_h_destroy_qp(qp->adapter->handle, qp, qp->fw_handle,
+				 &qp->epas);
+	if (hret != H_SUCCESS) {
+		ehea_error("destroy_qp failed");
+		return -EINVAL;
+	}
+
+	hw_queue_dtor(&qp->hw_squeue);
+	hw_queue_dtor(&qp->hw_rqueue1);
+
+   	if (qp_attr->rq_count > 1)
+		hw_queue_dtor(&qp->hw_rqueue2);
+   	if (qp_attr->rq_count > 2)
+		hw_queue_dtor(&qp->hw_rqueue3);
+	kfree(qp);
+
+	return 0;
+}
+
+int ehea_reg_mr_adapter(struct ehea_adapter *adapter)
+{
+	int i, k;
+	u64 hret, pt_abs, start, end, nr_pages;
+	u32 acc_ctrl = EHEA_MR_ACC_CTRL;
+	u64 *pt;
+
+	start = KERNELBASE;
+	end = (u64)high_memory;
+	nr_pages = (end - start) / PAGE_SIZE;
+
+	pt =  kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!pt) {
+		ehea_error("no mem");
+		return -ENOMEM;
+	}
+	pt_abs = virt_to_abs(pt);
+
+	hret = ehea_h_alloc_resource_mr(adapter->handle, start, end - start,
+					acc_ctrl, adapter->pd,
+					&adapter->mr.handle, &adapter->mr.lkey);
+	if (hret != H_SUCCESS) {
+		ehea_error("alloc_resource_mr failed");
+		return -EINVAL;
+	}
+
+	adapter->mr.vaddr = KERNELBASE;
+	k = 0;
+
+	while (nr_pages > 0) {
+		if (nr_pages > 1) {
+			u64 num_pages = min(nr_pages, (u64)512);
+			for (i = 0; i < num_pages; i++)
+				pt[i] = virt_to_abs((void*)(((u64)start)
+							     + ((k++) *
+								PAGE_SIZE)));
+
+			hret = ehea_h_register_rpage_mr(adapter->handle,
+							adapter->mr.handle, 0,
+							0, (u64)pt_abs,
+							num_pages);
+			nr_pages -= num_pages;
+		} else {
+			u64 abs_adr = virt_to_abs((void *)(((u64)start)
+							   + (k * PAGE_SIZE)));
+			hret = ehea_h_register_rpage_mr(adapter->handle,
+							adapter->mr.handle, 0,
+							0, abs_adr,1);
+			nr_pages--;
+		}
+
+		if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) {
+			ehea_h_free_resource_mr(adapter->handle,
+						adapter->mr.handle);
+			ehea_error("free_resource_mr failed");
+			return -EINVAL;
+		}
+	}
+
+	if (hret != H_SUCCESS) {
+		ehea_h_free_resource_mr(adapter->handle, adapter->mr.handle);
+		ehea_error("free_resource_mr failed for last page");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int ehea_reg_mr_pages(struct ehea_adapter *adapter, struct ehea_mr *mr,
+		      u64 start, u64 *pt, int nr_pages)
+{
+	u64 hret;
+	u32 acc_ctrl = EHEA_MR_ACC_CTRL;
+
+	u64 pt_abs = virt_to_abs(pt);
+	u64 first_page = pt[0];
+
+	hret = ehea_h_alloc_resource_mr(adapter->handle, start,
+					PAGE_SIZE * nr_pages, acc_ctrl,
+					adapter->pd, &mr->handle, &mr->lkey);
+	if (hret != H_SUCCESS) {
+		ehea_error("alloc_resource_mr failed");
+		return -EINVAL;
+	}
+
+	if (nr_pages > 1)
+		hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle,
+						0, 0, (u64)pt_abs, nr_pages);
+	else
+		hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle,
+						0, 0, first_page, 1);
+
+	if (hret != H_SUCCESS) {
+		ehea_h_free_resource_mr(adapter->handle, mr->handle);
+		ehea_error("free_resource_mr failed for last page");
+		return -EINVAL;
+	}
+	mr->vaddr = start;
+	return 0;
+}
+
--- linux-2.6.18-rc4-git1-orig/drivers/net/ehea/ehea_qmr.h	1969-12-31 16:00:00.000000000 -0800
+++ kernel/drivers/net/ehea/ehea_qmr.h	2006-08-23 02:01:00.167228518 -0700
@@ -0,0 +1,361 @@
+/*
+ *  linux/drivers/net/ehea/ehea_qmr.h
+ *
+ *  eHEA ethernet device driver for IBM eServer System p
+ *
+ *  (C) Copyright IBM Corp. 2006
+ *
+ *  Authors:
+ *       Christoph Raisch <raisch@de.ibm.com>
+ *       Jan-Bernd Themann <themann@de.ibm.com>
+ *       Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __EHEA_QMR_H__
+#define __EHEA_QMR_H__
+
+#include "ehea.h"
+#include "ehea_hw.h"
+
+/*
+ * page size of ehea hardware queues
+ */
+
+#define EHEA_PAGESHIFT  12
+#define EHEA_PAGESIZE   4096UL
+
+/* Some abbreviations used here:
+ *
+ * WQE  - Work Queue Entry
+ * SWQE - Send Work Queue Entry
+ * RWQE - Receive Work Queue Entry
+ * CQE  - Completion Queue Entry
+ * EQE  - Event Queue Entry
+ * MR   - Memory Region
+ */
+
+/* Use of WR_ID field for EHEA */
+#define EHEA_WR_ID_COUNT   EHEA_BMASK_IBM(0, 19)
+#define EHEA_WR_ID_TYPE    EHEA_BMASK_IBM(20, 23)
+#define EHEA_SWQE2_TYPE    0x1
+#define EHEA_SWQE3_TYPE    0x2
+#define EHEA_RWQE2_TYPE    0x3
+#define EHEA_RWQE3_TYPE    0x4
+#define EHEA_WR_ID_INDEX   EHEA_BMASK_IBM(24, 47)
+#define EHEA_WR_ID_REFILL  EHEA_BMASK_IBM(48, 63)
+
+struct ehea_vsgentry {
+	u64 vaddr;
+	u32 l_key;
+	u32 len;
+};
+
+/* maximum number of sg entries allowed in a WQE */
+#define EHEA_MAX_WQE_SG_ENTRIES  	252
+#define SWQE2_MAX_IMM            	(0xD0 - 0x30)
+#define SWQE3_MAX_IMM            	224
+
+/* tx control flags for swqe */
+#define EHEA_SWQE_CRC                   0x8000
+#define EHEA_SWQE_IP_CHECKSUM           0x4000
+#define EHEA_SWQE_TCP_CHECKSUM          0x2000
+#define EHEA_SWQE_TSO                   0x1000
+#define EHEA_SWQE_SIGNALLED_COMPLETION  0x0800
+#define EHEA_SWQE_VLAN_INSERT           0x0400
+#define EHEA_SWQE_IMM_DATA_PRESENT      0x0200
+#define EHEA_SWQE_DESCRIPTORS_PRESENT   0x0100
+#define EHEA_SWQE_WRAP_CTL_REC          0x0080
+#define EHEA_SWQE_WRAP_CTL_FORCE        0x0040
+#define EHEA_SWQE_BIND                  0x0020
+#define EHEA_SWQE_PURGE                 0x0010
+
+/* sizeof(struct ehea_swqe) less the union */
+#define SWQE_HEADER_SIZE		32
+
+struct ehea_swqe {
+	u64 wr_id;
+	u16 tx_control;
+	u16 vlan_tag;
+	u8 reserved1;
+	u8 ip_start;
+	u8 ip_end;
+	u8 immediate_data_length;
+	u8 tcp_offset;
+	u8 reserved2;
+	u16 tcp_end;
+	u8 wrap_tag;
+	u8 descriptors;		/* number of valid descriptors in WQE */
+	u16 reserved3;
+	u16 reserved4;
+	u16 mss;
+	u32 reserved5;
+	union {
+		/*  Send WQE Format 1 */
+		struct {
+			struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
+		} no_immediate_data;
+
+		/*  Send WQE Format 2 */
+		struct {
+			struct ehea_vsgentry sg_entry;
+			/* 0x30 */
+			u8 immediate_data[SWQE2_MAX_IMM];
+			/* 0xd0 */
+			struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
+		} immdata_desc __attribute__ ((packed));
+
+		/*  Send WQE Format 3 */
+		struct {
+			u8 immediate_data[SWQE3_MAX_IMM];
+		} immdata_nodesc;
+	} u;
+};
+
+struct ehea_rwqe {
+	u64 wr_id;		/* work request ID */
+	u8 reserved1[5];
+	u8 data_segments;
+	u16 reserved2;
+	u64 reserved3;
+	u64 reserved4;
+	struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
+};
+
+#define EHEA_CQE_VLAN_TAG_XTRACT   0x0400
+
+#define EHEA_CQE_TYPE_RQ           0x60
+#define EHEA_CQE_STAT_ERR_MASK     0x721F
+#define EHEA_CQE_STAT_FAT_ERR_MASK 0x1F
+#define EHEA_CQE_STAT_ERR_TCP      0x4000
+
+struct ehea_cqe {
+	u64 wr_id;		/* work request ID from WQE */
+	u8 type;
+	u8 valid;
+	u16 status;
+	u16 reserved1;
+	u16 num_bytes_transfered;
+	u16 vlan_tag;
+	u16 inet_checksum_value;
+	u8 reserved2;
+	u8 header_length;
+	u16 reserved3;
+	u16 page_offset;
+	u16 wqe_count;
+	u32 qp_token;
+	u32 timestamp;
+	u32 reserved4;
+	u64 reserved5[3];
+};
+
+#define EHEA_EQE_VALID           EHEA_BMASK_IBM(0, 0)
+#define EHEA_EQE_IS_CQE          EHEA_BMASK_IBM(1, 1)
+#define EHEA_EQE_IDENTIFIER      EHEA_BMASK_IBM(2, 7)
+#define EHEA_EQE_QP_CQ_NUMBER    EHEA_BMASK_IBM(8, 31)
+#define EHEA_EQE_QP_TOKEN        EHEA_BMASK_IBM(32, 63)
+#define EHEA_EQE_CQ_TOKEN        EHEA_BMASK_IBM(32, 63)
+#define EHEA_EQE_KEY             EHEA_BMASK_IBM(32, 63)
+#define EHEA_EQE_PORT_NUMBER     EHEA_BMASK_IBM(56, 63)
+#define EHEA_EQE_EQ_NUMBER       EHEA_BMASK_IBM(48, 63)
+#define EHEA_EQE_SM_ID           EHEA_BMASK_IBM(48, 63)
+#define EHEA_EQE_SM_MECH_NUMBER  EHEA_BMASK_IBM(48, 55)
+#define EHEA_EQE_SM_PORT_NUMBER  EHEA_BMASK_IBM(56, 63)
+
+struct ehea_eqe {
+	u64 entry;
+};
+
+static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
+{
+	struct ehea_page *current_page;
+
+	if (q_offset >= queue->queue_length)
+		q_offset -= queue->queue_length;
+	current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT];
+	return &current_page->entries[q_offset & (EHEA_PAGESIZE - 1)];
+}
+
+static inline void *hw_qeit_get(struct hw_queue *queue)
+{
+	return hw_qeit_calc(queue, queue->current_q_offset);
+}
+
+static inline void hw_qeit_inc(struct hw_queue *queue)
+{
+	queue->current_q_offset += queue->qe_size;
+	if (queue->current_q_offset >= queue->queue_length) {
+		queue->current_q_offset = 0;
+		/* toggle the valid flag */
+		queue->toggle_state = (~queue->toggle_state) & 1;
+	}
+}
+
+static inline void *hw_qeit_get_inc(struct hw_queue *queue)
+{
+	void *retvalue = hw_qeit_get(queue);
+	hw_qeit_inc(queue);
+	return retvalue;
+}
+
+static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue)
+{
+	struct ehea_cqe *retvalue = hw_qeit_get(queue);
+	u8 valid = retvalue->valid;
+	void *pref;
+
+	if ((valid >> 7) == (queue->toggle_state & 1)) {
+		/* this is a good one */
+		hw_qeit_inc(queue);
+		pref = hw_qeit_calc(queue, queue->current_q_offset);
+		prefetch(pref);
+		prefetch(pref + 128);
+	} else
+		retvalue = NULL;
+	return retvalue;
+}
+
+static inline void *hw_qeit_get_valid(struct hw_queue *queue)
+{
+	struct ehea_cqe *retvalue = hw_qeit_get(queue);
+	void *pref;
+	u8 valid;
+
+	pref = hw_qeit_calc(queue, queue->current_q_offset);
+	prefetch(pref);
+	prefetch(pref + 128);
+	prefetch(pref + 256);
+	valid = retvalue->valid;
+	if (!((valid >> 7) == (queue->toggle_state & 1)))
+		retvalue = NULL;
+	return retvalue;
+}
+
+static inline void *hw_qeit_reset(struct hw_queue *queue)
+{
+	queue->current_q_offset = 0;
+	return hw_qeit_get(queue);
+}
+
+static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
+{
+	u64 last_entry_in_q = queue->queue_length - queue->qe_size;
+	void *retvalue;
+
+
+	retvalue = hw_qeit_get(queue);
+	queue->current_q_offset += queue->qe_size;
+	if (queue->current_q_offset > last_entry_in_q) {
+		queue->current_q_offset = 0;
+		queue->toggle_state = (~queue->toggle_state) & 1;
+	}
+	return retvalue;
+}
+
+static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
+{
+	void *retvalue = hw_qeit_get(queue);
+	u32 qe = *(u8 *) retvalue;
+	if ((qe >> 7) == (queue->toggle_state & 1))
+		hw_qeit_eq_get_inc(queue);
+	else
+		retvalue = NULL;
+	return retvalue;
+}
+
+static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp,
+						   int rq_nr)
+{
+	struct hw_queue *queue;
+
+	if (rq_nr == 1)
+		queue = &qp->hw_rqueue1;
+	else if (rq_nr == 2)
+		queue = &qp->hw_rqueue2;
+	else
+		queue = &qp->hw_rqueue3;
+
+	return (struct ehea_rwqe *)hw_qeit_get_inc(queue);
+}
+
+static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp,
+					      int *wqe_index)
+{
+	struct hw_queue *queue = &my_qp->hw_squeue;
+	struct ehea_swqe *wqe_p;
+	*wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ);
+	wqe_p = (struct ehea_swqe *)hw_qeit_get_inc(&my_qp->hw_squeue);
+	return wqe_p;
+}
+
+static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe)
+{
+	iosync();
+	ehea_update_sqa(my_qp, 1);
+}
+
+static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index)
+{
+	struct hw_queue *queue = &qp->hw_rqueue1;
+
+	*wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1);
+	return hw_qeit_get_valid(queue);
+}
+
+static inline void ehea_inc_rq1(struct ehea_qp *qp)
+{
+	hw_qeit_inc(&qp->hw_rqueue1);
+}
+
+static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq)
+{
+	return hw_qeit_get_inc_valid(&my_cq->hw_queue);
+};
+
+#define EHEA_CQ_REGISTER_ORIG 0
+#define EHEA_EQ_REGISTER_ORIG 0
+
+enum ehea_eq_type {
+	EHEA_EQ = 0,		/* event queue              */
+	EHEA_NEQ		/* notification event queue */
+};
+
+struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
+			       enum ehea_eq_type type,
+			       const u32 length, const u8 eqe_gen);
+
+int ehea_destroy_eq(struct ehea_eq *eq);
+
+struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq);
+
+struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
+			       u64 eq_handle, u32 cq_token);
+
+int ehea_destroy_cq(struct ehea_cq *cq);
+
+struct ehea_qp *ehea_create_qp(struct ehea_adapter * adapter, u32 pd,
+			       struct ehea_qp_init_attr *init_attr);
+
+int ehea_destroy_qp(struct ehea_qp *qp);
+
+int ehea_reg_mr_adapter(struct ehea_adapter *adapter);
+
+int ehea_reg_mr_pages(struct ehea_adapter *adapter,
+		      struct ehea_mr *mr,
+		      u64 start, u64 *pt, int nr_pages);
+
+#endif	/* __EHEA_QMR_H__ */

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [2.6.19 PATCH 3/7] ehea: queue management
@ 2006-09-04 10:39 Jan-Bernd Themann
  0 siblings, 0 replies; 14+ messages in thread
From: Jan-Bernd Themann @ 2006-09-04 10:39 UTC (permalink / raw)
  To: netdev, Jeff Garzik
  Cc: Thomas Klein, Jan-Bernd Themann, linux-kernel, linux-ppc,
	Christoph Raisch, Marcus Eder

Signed-off-by: Jan-Bernd Themann <themann@de.ibm.com> 


 drivers/net/ehea/ehea_qmr.c |  605 ++++++++++++++++++++++++++++++++++++++++++++
 drivers/net/ehea/ehea_qmr.h |  361 ++++++++++++++++++++++++++
 2 files changed, 966 insertions(+)



--- linux-2.6.18-rc6-orig/drivers/net/ehea/ehea_qmr.c	1970-01-01 01:00:00.000000000 +0100
+++ kernel/drivers/net/ehea/ehea_qmr.c	2006-09-04 11:41:18.000000000 +0200
@@ -0,0 +1,605 @@
+/*
+ *  linux/drivers/net/ehea/ehea_qmr.c
+ *
+ *  eHEA ethernet device driver for IBM eServer System p
+ *
+ *  (C) Copyright IBM Corp. 2006
+ *
+ *  Authors:
+ *       Christoph Raisch <raisch@de.ibm.com>
+ *       Jan-Bernd Themann <themann@de.ibm.com>
+ *       Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "ehea.h"
+#include "ehea_phyp.h"
+#include "ehea_qmr.h"
+
+static void *hw_qpageit_get_inc(struct hw_queue *queue)
+{
+	void *retvalue = hw_qeit_get(queue);
+
+	queue->current_q_offset += queue->pagesize;
+	if (queue->current_q_offset > queue->queue_length) {
+		queue->current_q_offset -= queue->pagesize;
+		retvalue = NULL;
+	} else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
+		ehea_error("not on pageboundary");
+		retvalue = NULL;
+	}
+	return retvalue;
+}
+
+static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
+			  const u32 pagesize, const u32 qe_size)
+{
+	int pages_per_kpage = PAGE_SIZE / pagesize;
+	int i, k;
+
+	if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
+		ehea_error("pagesize conflict! kernel pagesize=%d, "
+			   "ehea pagesize=%d", (int)PAGE_SIZE, (int)pagesize);
+		return -EINVAL;
+	}
+
+	queue->queue_length = nr_of_pages * pagesize;
+	queue->queue_pages = kmalloc(nr_of_pages * sizeof(void*), GFP_KERNEL);
+	if (!queue->queue_pages) {
+		ehea_error("no mem for queue_pages");
+		return -ENOMEM;
+	}
+
+	/*
+	 * allocate pages for queue:
+	 * outer loop allocates whole kernel pages (page aligned) and
+	 * inner loop divides a kernel page into smaller hea queue pages
+	 */
+	i = 0;
+	while (i < nr_of_pages) {
+		u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL);
+		if (!kpage)
+			goto exit0;
+		for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
+			(queue->queue_pages)[i] = (struct ehea_page *)kpage;
+			kpage += pagesize;
+			i++;
+		}
+	}
+
+	queue->current_q_offset = 0;
+	queue->qe_size = qe_size;
+	queue->pagesize = pagesize;
+	queue->toggle_state = 1;
+
+	return 0;
+
+exit0:
+	for (i = 0; i < nr_of_pages; i += pages_per_kpage) {
+		if (!(queue->queue_pages)[i])
+			break;
+		free_page((unsigned long)(queue->queue_pages)[i]);
+	}
+	return -ENOMEM;
+}
+
+static void hw_queue_dtor(struct hw_queue *queue)
+{
+	int pages_per_kpage = PAGE_SIZE / queue->pagesize;
+	int i, nr_pages;
+
+	if (!queue || !queue->queue_pages)
+		return;
+
+	nr_pages = queue->queue_length / queue->pagesize;
+
+	for (i = 0; i < nr_pages; i += pages_per_kpage)
+		free_page((unsigned long)(queue->queue_pages)[i]);
+
+	kfree(queue->queue_pages);
+}
+
+struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
+			       int nr_of_cqe, u64 eq_handle, u32 cq_token)
+{
+	struct ehea_cq *cq;
+	struct h_epa epa;
+	u64 *cq_handle_ref, hret, rpage;
+	u32 act_nr_of_entries, act_pages, counter;
+	int ret;
+	void *vpage;
+
+	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+	if (!cq) {
+		ehea_error("no mem for cq");
+		goto create_cq_exit0;
+	}
+
+	cq->attr.max_nr_of_cqes = nr_of_cqe;
+	cq->attr.cq_token = cq_token;
+	cq->attr.eq_handle = eq_handle;
+
+	cq->adapter = adapter;
+
+	cq_handle_ref = &cq->fw_handle;
+	act_nr_of_entries = 0;
+	act_pages = 0;
+
+	hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
+					&cq->fw_handle, &cq->epas);
+	if (hret != H_SUCCESS) {
+		ehea_error("alloc_resource_cq failed");
+		goto create_cq_exit1;
+	}
+
+	ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages,
+			    EHEA_PAGESIZE, sizeof(struct ehea_cqe));
+	if (ret)
+		goto create_cq_exit2;
+
+	for (counter = 0; counter < cq->attr.nr_pages; counter++) {
+		vpage = hw_qpageit_get_inc(&cq->hw_queue);
+		if (!vpage) {
+			ehea_error("hw_qpageit_get_inc failed");
+			goto create_cq_exit3;
+		}
+
+		rpage = virt_to_abs(vpage);
+		hret = ehea_h_register_rpage(adapter->handle,
+					     0, EHEA_CQ_REGISTER_ORIG,
+					     cq->fw_handle, rpage, 1);
+		if (hret < H_SUCCESS) {
+			ehea_error("register_rpage_cq failed ehea_cq=%p "
+				   "hret=%lx counter=%i act_pages=%i",
+				   cq, hret, counter, cq->attr.nr_pages);
+			goto create_cq_exit3;
+		}
+
+		if (counter == (cq->attr.nr_pages - 1)) {
+			vpage = hw_qpageit_get_inc(&cq->hw_queue);
+
+			if ((hret != H_SUCCESS) || (vpage)) {
+				ehea_error("registration of pages not "
+					   "complete hret=%lx\n", hret);
+				goto create_cq_exit3;
+			}
+		} else {
+			if ((hret != H_PAGE_REGISTERED) || (!vpage)) {
+				ehea_error("CQ: registration of page failed "
+					   "hret=%lx\n", hret);
+				goto create_cq_exit3;
+			}
+		}
+	}
+
+	hw_qeit_reset(&cq->hw_queue);
+	epa = cq->epas.kernel;
+	ehea_reset_cq_ep(cq);
+	ehea_reset_cq_n1(cq);
+
+	return cq;
+
+create_cq_exit3:
+	hw_queue_dtor(&cq->hw_queue);
+
+create_cq_exit2:
+	ehea_h_free_resource(adapter->handle, cq->fw_handle);
+
+create_cq_exit1:
+	kfree(cq);
+
+create_cq_exit0:
+	return NULL;
+}
+
+int ehea_destroy_cq(struct ehea_cq *cq)
+{
+	u64 adapter_handle, hret;
+
+	adapter_handle = cq->adapter->handle;
+
+	if (!cq)
+		return 0;
+
+	/* deregister all previous registered pages */
+	hret = ehea_h_free_resource(adapter_handle, cq->fw_handle);
+	if (hret != H_SUCCESS) {
+		ehea_error("destroy CQ failed");
+		return -EIO;
+	}
+
+	hw_queue_dtor(&cq->hw_queue);
+	kfree(cq);
+
+	return 0;
+}
+
+struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
+			       const enum ehea_eq_type type,
+			       const u32 max_nr_of_eqes, const u8 eqe_gen)
+{
+	int ret, i;
+	u64 hret, rpage;
+	void *vpage;
+	struct ehea_eq *eq;
+
+	eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+	if (!eq) {
+		ehea_error("no mem for eq");
+		return NULL;
+	}
+
+	eq->adapter = adapter;
+	eq->attr.type = type;
+	eq->attr.max_nr_of_eqes = max_nr_of_eqes;
+	eq->attr.eqe_gen = eqe_gen;
+	spin_lock_init(&eq->spinlock);
+
+	hret = ehea_h_alloc_resource_eq(adapter->handle,
+					&eq->attr, &eq->fw_handle);
+	if (hret != H_SUCCESS) {
+		ehea_error("alloc_resource_eq failed");
+		goto free_eq_mem;
+	}
+
+	ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
+			    EHEA_PAGESIZE, sizeof(struct ehea_eqe));
+	if (ret) {
+		ehea_error("can't allocate eq pages");
+		goto alloc_pages_failed;
+	}
+
+	for (i = 0; i < eq->attr.nr_pages; i++) {
+		vpage = hw_qpageit_get_inc(&eq->hw_queue);
+		if (!vpage) {
+			ehea_error("hw_qpageit_get_inc failed");
+			hret = H_RESOURCE;
+			goto register_page_failed;
+		}
+
+		rpage = virt_to_abs(vpage);
+
+		hret = ehea_h_register_rpage(adapter->handle, 0,
+					     EHEA_EQ_REGISTER_ORIG,
+					     eq->fw_handle, rpage, 1);
+
+		if (i == (eq->attr.nr_pages - 1)) {
+			/* last page */
+			vpage = hw_qpageit_get_inc(&eq->hw_queue);
+			if ((hret != H_SUCCESS) || (vpage)) {
+				goto register_page_failed;
+			}
+		} else {
+			if ((hret != H_PAGE_REGISTERED) || (!vpage)) {
+				goto register_page_failed;
+			}
+		}
+	}
+
+	hw_qeit_reset(&eq->hw_queue);
+	return eq;
+
+register_page_failed:
+	hw_queue_dtor(&eq->hw_queue);
+
+alloc_pages_failed:
+	ehea_h_free_resource(adapter->handle, eq->fw_handle);
+
+free_eq_mem:
+	kfree(eq);
+	return NULL;
+}
+
+struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
+{
+	struct ehea_eqe *eqe;
+	unsigned long flags;
+
+	spin_lock_irqsave(&eq->spinlock, flags);
+	eqe = (struct ehea_eqe*)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
+	spin_unlock_irqrestore(&eq->spinlock, flags);
+
+	return eqe;
+}
+
+int ehea_destroy_eq(struct ehea_eq *eq)
+{
+	u64 hret;
+	unsigned long flags;
+
+	if (!eq)
+		return 0;
+
+	spin_lock_irqsave(&eq->spinlock, flags);
+
+	hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle);
+	spin_unlock_irqrestore(&eq->spinlock, flags);
+
+	if (hret != H_SUCCESS) {
+		ehea_error("destroy_eq failed");
+		return -EIO;
+	}
+
+	hw_queue_dtor(&eq->hw_queue);
+	kfree(eq);
+
+	return 0;
+}
+
+/**
+ * allocates memory for a queue and registers pages in phyp
+ */
+int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
+			   int nr_pages, int wqe_size, int act_nr_sges,
+			   struct ehea_adapter *adapter, int h_call_q_selector)
+{
+	u64 hret, rpage;
+	int ret, cnt;
+	void *vpage;
+
+	ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size);
+	if (ret)
+		return ret;
+
+	for (cnt = 0; cnt < nr_pages; cnt++) {
+		vpage = hw_qpageit_get_inc(hw_queue);
+		if (!vpage) {
+			ehea_error("hw_qpageit_get_inc failed");
+			goto exit0;
+		}
+		rpage = virt_to_abs(vpage);
+		hret = ehea_h_register_rpage(adapter->handle,
+					     0, h_call_q_selector,
+					     qp->fw_handle, rpage, 1);
+		if (hret < H_SUCCESS) {
+			ehea_error("register_rpage_qp failed");
+			goto exit0;
+		}
+	}
+	hw_qeit_reset(hw_queue);
+	return 0;
+
+exit0:
+	hw_queue_dtor(hw_queue);
+	return -EIO;
+}
+
+static inline u32 map_wqe_size(u8 wqe_enc_size)
+{
+	return 128 << wqe_enc_size;
+}
+
+struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
+			       u32 pd, struct ehea_qp_init_attr *init_attr)
+{
+	int ret;
+	u64 hret;
+	struct ehea_qp *qp;
+	u32 wqe_size_in_bytes_sq, wqe_size_in_bytes_rq1;
+	u32 wqe_size_in_bytes_rq2, wqe_size_in_bytes_rq3;
+
+
+	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+	if (!qp) {
+		ehea_error("no mem for qp");
+		return NULL;
+	}
+
+	qp->adapter = adapter;
+
+	hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
+					&qp->fw_handle, &qp->epas);
+	if (hret != H_SUCCESS) {
+		ehea_error("ehea_h_alloc_resource_qp failed");
+		goto create_qp_exit1;
+	}
+
+	wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq);
+	wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1);
+	wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2);
+	wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3);
+
+	ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages,
+				     wqe_size_in_bytes_sq,
+				     init_attr->act_wqe_size_enc_sq, adapter,
+				     0);
+	if (ret) {
+		ehea_error("can't register for sq ret=%x", ret);
+		goto create_qp_exit2;
+	}
+
+	ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1,
+				     init_attr->nr_rq1_pages,
+				     wqe_size_in_bytes_rq1,
+				     init_attr->act_wqe_size_enc_rq1,
+				     adapter, 1);
+	if (ret) {
+		ehea_error("can't register for rq1 ret=%x", ret);
+		goto create_qp_exit3;
+	}
+
+	if (init_attr->rq_count > 1) {
+		ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2,
+					     init_attr->nr_rq2_pages,
+					     wqe_size_in_bytes_rq2,
+					     init_attr->act_wqe_size_enc_rq2,
+					     adapter, 2);
+		if (ret) {
+			ehea_error("can't register for rq2 ret=%x", ret);
+			goto create_qp_exit4;
+		}
+	}
+
+	if (init_attr->rq_count > 2) {
+		ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3,
+					     init_attr->nr_rq3_pages,
+					     wqe_size_in_bytes_rq3,
+					     init_attr->act_wqe_size_enc_rq3,
+					     adapter, 3);
+		if (ret) {
+			ehea_error("can't register for rq3 ret=%x", ret);
+			goto create_qp_exit5;
+		}
+	}
+
+	qp->init_attr = *init_attr;
+
+	return qp;
+
+create_qp_exit5:
+	hw_queue_dtor(&qp->hw_rqueue2);
+
+create_qp_exit4:
+	hw_queue_dtor(&qp->hw_rqueue1);
+
+create_qp_exit3:
+	hw_queue_dtor(&qp->hw_squeue);
+
+create_qp_exit2:
+	ehea_h_disable_and_get_hea(adapter->handle, qp->fw_handle);
+	ehea_h_free_resource(adapter->handle, qp->fw_handle);
+
+create_qp_exit1:
+	kfree(qp);
+	return NULL;
+}
+
+int ehea_destroy_qp(struct ehea_qp *qp)
+{
+	u64 hret;
+	struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
+
+	if (!qp)
+		return 0;
+
+	hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle);
+	if (hret != H_SUCCESS) {
+		ehea_error("destroy_qp failed");
+		return -EIO;
+	}
+
+	hw_queue_dtor(&qp->hw_squeue);
+	hw_queue_dtor(&qp->hw_rqueue1);
+
+   	if (qp_attr->rq_count > 1)
+		hw_queue_dtor(&qp->hw_rqueue2);
+   	if (qp_attr->rq_count > 2)
+		hw_queue_dtor(&qp->hw_rqueue3);
+	kfree(qp);
+
+	return 0;
+}
+
+int ehea_reg_mr_adapter(struct ehea_adapter *adapter)
+{
+	int i, k;
+	u64 hret, pt_abs, start, end, nr_pages;
+	u32 acc_ctrl = EHEA_MR_ACC_CTRL;
+	u64 *pt;
+
+	start = KERNELBASE;
+	end = (u64)high_memory;
+	nr_pages = (end - start) / PAGE_SIZE;
+
+	pt =  kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!pt) {
+		ehea_error("no mem");
+		return -ENOMEM;
+	}
+	pt_abs = virt_to_abs(pt);
+
+	hret = ehea_h_alloc_resource_mr(adapter->handle, start, end - start,
+					acc_ctrl, adapter->pd,
+					&adapter->mr.handle, &adapter->mr.lkey);
+	if (hret != H_SUCCESS) {
+		ehea_error("alloc_resource_mr failed");
+		return -EIO;
+	}
+
+	adapter->mr.vaddr = KERNELBASE;
+	k = 0;
+
+	while (nr_pages > 0) {
+		if (nr_pages > 1) {
+			u64 num_pages = min(nr_pages, (u64)512);
+			for (i = 0; i < num_pages; i++)
+				pt[i] = virt_to_abs((void*)(((u64)start)
+							     + ((k++) *
+								PAGE_SIZE)));
+
+			hret = ehea_h_register_rpage_mr(adapter->handle,
+							adapter->mr.handle, 0,
+							0, (u64)pt_abs,
+							num_pages);
+			nr_pages -= num_pages;
+		} else {
+			u64 abs_adr = virt_to_abs((void *)(((u64)start)
+							   + (k * PAGE_SIZE)));
+			hret = ehea_h_register_rpage_mr(adapter->handle,
+							adapter->mr.handle, 0,
+							0, abs_adr,1);
+			nr_pages--;
+		}
+
+		if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) {
+			ehea_h_free_resource(adapter->handle,
+						adapter->mr.handle);
+			ehea_error("free_resource_mr failed");
+			return -EIO;
+		}
+	}
+
+	if (hret != H_SUCCESS) {
+		ehea_h_free_resource(adapter->handle, adapter->mr.handle);
+		ehea_error("free_resource_mr failed for last page");
+		return -EIO;
+	}
+	return 0;
+}
+
+int ehea_reg_mr_pages(struct ehea_adapter *adapter, struct ehea_mr *mr,
+		      u64 start, u64 *pt, int nr_pages)
+{
+	u64 hret;
+	u32 acc_ctrl = EHEA_MR_ACC_CTRL;
+
+	u64 pt_abs = virt_to_abs(pt);
+	u64 first_page = pt[0];
+
+	hret = ehea_h_alloc_resource_mr(adapter->handle, start,
+					PAGE_SIZE * nr_pages, acc_ctrl,
+					adapter->pd, &mr->handle, &mr->lkey);
+	if (hret != H_SUCCESS) {
+		ehea_error("alloc_resource_mr failed");
+		return -EIO;
+	}
+
+	if (nr_pages > 1)
+		hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle,
+						0, 0, (u64)pt_abs, nr_pages);
+	else
+		hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle,
+						0, 0, first_page, 1);
+
+	if (hret != H_SUCCESS) {
+		ehea_h_free_resource(adapter->handle, mr->handle);
+		ehea_error("free_resource_mr failed for last page");
+		return -EIO;
+	}
+	mr->vaddr = start;
+	return 0;
+}
--- linux-2.6.18-rc6-orig/drivers/net/ehea/ehea_qmr.h	1970-01-01 01:00:00.000000000 +0100
+++ kernel/drivers/net/ehea/ehea_qmr.h	2006-09-04 11:41:17.000000000 +0200
@@ -0,0 +1,361 @@
+/*
+ *  linux/drivers/net/ehea/ehea_qmr.h
+ *
+ *  eHEA ethernet device driver for IBM eServer System p
+ *
+ *  (C) Copyright IBM Corp. 2006
+ *
+ *  Authors:
+ *       Christoph Raisch <raisch@de.ibm.com>
+ *       Jan-Bernd Themann <themann@de.ibm.com>
+ *       Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __EHEA_QMR_H__
+#define __EHEA_QMR_H__
+
+#include "ehea.h"
+#include "ehea_hw.h"
+
+/*
+ * page size of ehea hardware queues
+ */
+
+#define EHEA_PAGESHIFT  12
+#define EHEA_PAGESIZE   4096UL
+
+/* Some abbreviations used here:
+ *
+ * WQE  - Work Queue Entry
+ * SWQE - Send Work Queue Entry
+ * RWQE - Receive Work Queue Entry
+ * CQE  - Completion Queue Entry
+ * EQE  - Event Queue Entry
+ * MR   - Memory Region
+ */
+
+/* Use of WR_ID field for EHEA */
+#define EHEA_WR_ID_COUNT   EHEA_BMASK_IBM(0, 19)
+#define EHEA_WR_ID_TYPE    EHEA_BMASK_IBM(20, 23)
+#define EHEA_SWQE2_TYPE    0x1
+#define EHEA_SWQE3_TYPE    0x2
+#define EHEA_RWQE2_TYPE    0x3
+#define EHEA_RWQE3_TYPE    0x4
+#define EHEA_WR_ID_INDEX   EHEA_BMASK_IBM(24, 47)
+#define EHEA_WR_ID_REFILL  EHEA_BMASK_IBM(48, 63)
+
+struct ehea_vsgentry {
+	u64 vaddr;
+	u32 l_key;
+	u32 len;
+};
+
+/* maximum number of sg entries allowed in a WQE */
+#define EHEA_MAX_WQE_SG_ENTRIES  	252
+#define SWQE2_MAX_IMM            	(0xD0 - 0x30)
+#define SWQE3_MAX_IMM            	224
+
+/* tx control flags for swqe */
+#define EHEA_SWQE_CRC                   0x8000
+#define EHEA_SWQE_IP_CHECKSUM           0x4000
+#define EHEA_SWQE_TCP_CHECKSUM          0x2000
+#define EHEA_SWQE_TSO                   0x1000
+#define EHEA_SWQE_SIGNALLED_COMPLETION  0x0800
+#define EHEA_SWQE_VLAN_INSERT           0x0400
+#define EHEA_SWQE_IMM_DATA_PRESENT      0x0200
+#define EHEA_SWQE_DESCRIPTORS_PRESENT   0x0100
+#define EHEA_SWQE_WRAP_CTL_REC          0x0080
+#define EHEA_SWQE_WRAP_CTL_FORCE        0x0040
+#define EHEA_SWQE_BIND                  0x0020
+#define EHEA_SWQE_PURGE                 0x0010
+
+/* sizeof(struct ehea_swqe) less the union */
+#define SWQE_HEADER_SIZE		32
+
+struct ehea_swqe {
+	u64 wr_id;
+	u16 tx_control;
+	u16 vlan_tag;
+	u8 reserved1;
+	u8 ip_start;
+	u8 ip_end;
+	u8 immediate_data_length;
+	u8 tcp_offset;
+	u8 reserved2;
+	u16 tcp_end;
+	u8 wrap_tag;
+	u8 descriptors;		/* number of valid descriptors in WQE */
+	u16 reserved3;
+	u16 reserved4;
+	u16 mss;
+	u32 reserved5;
+	union {
+		/*  Send WQE Format 1 */
+		struct {
+			struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
+		} no_immediate_data;
+
+		/*  Send WQE Format 2 */
+		struct {
+			struct ehea_vsgentry sg_entry;
+			/* 0x30 */
+			u8 immediate_data[SWQE2_MAX_IMM];
+			/* 0xd0 */
+			struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
+		} immdata_desc __attribute__ ((packed));
+
+		/*  Send WQE Format 3 */
+		struct {
+			u8 immediate_data[SWQE3_MAX_IMM];
+		} immdata_nodesc;
+	} u;
+};
+
+struct ehea_rwqe {
+	u64 wr_id;		/* work request ID */
+	u8 reserved1[5];
+	u8 data_segments;
+	u16 reserved2;
+	u64 reserved3;
+	u64 reserved4;
+	struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
+};
+
+#define EHEA_CQE_VLAN_TAG_XTRACT   0x0400
+
+#define EHEA_CQE_TYPE_RQ           0x60
+#define EHEA_CQE_STAT_ERR_MASK     0x721F
+#define EHEA_CQE_STAT_FAT_ERR_MASK 0x1F
+#define EHEA_CQE_STAT_ERR_TCP      0x4000
+
+struct ehea_cqe {
+	u64 wr_id;		/* work request ID from WQE */
+	u8 type;
+	u8 valid;
+	u16 status;
+	u16 reserved1;
+	u16 num_bytes_transfered;
+	u16 vlan_tag;
+	u16 inet_checksum_value;
+	u8 reserved2;
+	u8 header_length;
+	u16 reserved3;
+	u16 page_offset;
+	u16 wqe_count;
+	u32 qp_token;
+	u32 timestamp;
+	u32 reserved4;
+	u64 reserved5[3];
+};
+
+#define EHEA_EQE_VALID           EHEA_BMASK_IBM(0, 0)
+#define EHEA_EQE_IS_CQE          EHEA_BMASK_IBM(1, 1)
+#define EHEA_EQE_IDENTIFIER      EHEA_BMASK_IBM(2, 7)
+#define EHEA_EQE_QP_CQ_NUMBER    EHEA_BMASK_IBM(8, 31)
+#define EHEA_EQE_QP_TOKEN        EHEA_BMASK_IBM(32, 63)
+#define EHEA_EQE_CQ_TOKEN        EHEA_BMASK_IBM(32, 63)
+#define EHEA_EQE_KEY             EHEA_BMASK_IBM(32, 63)
+#define EHEA_EQE_PORT_NUMBER     EHEA_BMASK_IBM(56, 63)
+#define EHEA_EQE_EQ_NUMBER       EHEA_BMASK_IBM(48, 63)
+#define EHEA_EQE_SM_ID           EHEA_BMASK_IBM(48, 63)
+#define EHEA_EQE_SM_MECH_NUMBER  EHEA_BMASK_IBM(48, 55)
+#define EHEA_EQE_SM_PORT_NUMBER  EHEA_BMASK_IBM(56, 63)
+
+struct ehea_eqe {
+	u64 entry;
+};
+
+static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
+{
+	struct ehea_page *current_page;
+
+	if (q_offset >= queue->queue_length)
+		q_offset -= queue->queue_length;
+	current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT];
+	return &current_page->entries[q_offset & (EHEA_PAGESIZE - 1)];
+}
+
+static inline void *hw_qeit_get(struct hw_queue *queue)
+{
+	return hw_qeit_calc(queue, queue->current_q_offset);
+}
+
+static inline void hw_qeit_inc(struct hw_queue *queue)
+{
+	queue->current_q_offset += queue->qe_size;
+	if (queue->current_q_offset >= queue->queue_length) {
+		queue->current_q_offset = 0;
+		/* toggle the valid flag */
+		queue->toggle_state = (~queue->toggle_state) & 1;
+	}
+}
+
+static inline void *hw_qeit_get_inc(struct hw_queue *queue)
+{
+	void *retvalue = hw_qeit_get(queue);
+	hw_qeit_inc(queue);
+	return retvalue;
+}
+
+static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue)
+{
+	struct ehea_cqe *retvalue = hw_qeit_get(queue);
+	u8 valid = retvalue->valid;
+	void *pref;
+
+	if ((valid >> 7) == (queue->toggle_state & 1)) {
+		/* this is a good one */
+		hw_qeit_inc(queue);
+		pref = hw_qeit_calc(queue, queue->current_q_offset);
+		prefetch(pref);
+		prefetch(pref + 128);
+	} else
+		retvalue = NULL;
+	return retvalue;
+}
+
+static inline void *hw_qeit_get_valid(struct hw_queue *queue)
+{
+	struct ehea_cqe *retvalue = hw_qeit_get(queue);
+	void *pref;
+	u8 valid;
+
+	pref = hw_qeit_calc(queue, queue->current_q_offset);
+	prefetch(pref);
+	prefetch(pref + 128);
+	prefetch(pref + 256);
+	valid = retvalue->valid;
+	if (!((valid >> 7) == (queue->toggle_state & 1)))
+		retvalue = NULL;
+	return retvalue;
+}
+
+static inline void *hw_qeit_reset(struct hw_queue *queue)
+{
+	queue->current_q_offset = 0;
+	return hw_qeit_get(queue);
+}
+
+static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
+{
+	u64 last_entry_in_q = queue->queue_length - queue->qe_size;
+	void *retvalue;
+
+
+	retvalue = hw_qeit_get(queue);
+	queue->current_q_offset += queue->qe_size;
+	if (queue->current_q_offset > last_entry_in_q) {
+		queue->current_q_offset = 0;
+		queue->toggle_state = (~queue->toggle_state) & 1;
+	}
+	return retvalue;
+}
+
+static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
+{
+	void *retvalue = hw_qeit_get(queue);
+	u32 qe = *(u8 *) retvalue;
+	if ((qe >> 7) == (queue->toggle_state & 1))
+		hw_qeit_eq_get_inc(queue);
+	else
+		retvalue = NULL;
+	return retvalue;
+}
+
+static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp,
+						   int rq_nr)
+{
+	struct hw_queue *queue;
+
+	if (rq_nr == 1)
+		queue = &qp->hw_rqueue1;
+	else if (rq_nr == 2)
+		queue = &qp->hw_rqueue2;
+	else
+		queue = &qp->hw_rqueue3;
+
+	return (struct ehea_rwqe *)hw_qeit_get_inc(queue);
+}
+
+static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp,
+					      int *wqe_index)
+{
+	struct hw_queue *queue = &my_qp->hw_squeue;
+	struct ehea_swqe *wqe_p;
+	*wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ);
+	wqe_p = (struct ehea_swqe *)hw_qeit_get_inc(&my_qp->hw_squeue);
+	return wqe_p;
+}
+
+static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe)
+{
+	iosync();
+	ehea_update_sqa(my_qp, 1);
+}
+
+static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index)
+{
+	struct hw_queue *queue = &qp->hw_rqueue1;
+
+	*wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1);
+	return hw_qeit_get_valid(queue);
+}
+
+static inline void ehea_inc_rq1(struct ehea_qp *qp)
+{
+	hw_qeit_inc(&qp->hw_rqueue1);
+}
+
+static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq)
+{
+	return hw_qeit_get_inc_valid(&my_cq->hw_queue);
+};
+
+#define EHEA_CQ_REGISTER_ORIG 0
+#define EHEA_EQ_REGISTER_ORIG 0
+
+enum ehea_eq_type {
+	EHEA_EQ = 0,		/* event queue              */
+	EHEA_NEQ		/* notification event queue */
+};
+
+struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
+			       enum ehea_eq_type type,
+			       const u32 length, const u8 eqe_gen);
+
+int ehea_destroy_eq(struct ehea_eq *eq);
+
+struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq);
+
+struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
+			       u64 eq_handle, u32 cq_token);
+
+int ehea_destroy_cq(struct ehea_cq *cq);
+
+struct ehea_qp *ehea_create_qp(struct ehea_adapter * adapter, u32 pd,
+			       struct ehea_qp_init_attr *init_attr);
+
+int ehea_destroy_qp(struct ehea_qp *qp);
+
+int ehea_reg_mr_adapter(struct ehea_adapter *adapter);
+
+int ehea_reg_mr_pages(struct ehea_adapter *adapter,
+		      struct ehea_mr *mr,
+		      u64 start, u64 *pt, int nr_pages);
+
+#endif	/* __EHEA_QMR_H__ */

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [2.6.19 PATCH 3/7] ehea: queue management
@ 2006-09-06 13:32 Jan-Bernd Themann
  0 siblings, 0 replies; 14+ messages in thread
From: Jan-Bernd Themann @ 2006-09-06 13:32 UTC (permalink / raw)
  To: netdev
  Cc: Thomas Klein, Jan-Bernd Themann, linux-kernel, linux-ppc,
	Christoph Raisch, Marcus Eder

Signed-off-by: Jan-Bernd Themann <themann@de.ibm.com> 


 drivers/net/ehea/ehea_qmr.c |  604 ++++++++++++++++++++++++++++++++++++++++++++
 drivers/net/ehea/ehea_qmr.h |  362 ++++++++++++++++++++++++++
 2 files changed, 966 insertions(+)



--- linux-2.6.18-rc6-orig/drivers/net/ehea/ehea_qmr.c	1970-01-01 01:00:00.000000000 +0100
+++ kernel/drivers/net/ehea/ehea_qmr.c	2006-09-06 15:53:42.000000000 +0200
@@ -0,0 +1,604 @@
+/*
+ *  linux/drivers/net/ehea/ehea_qmr.c
+ *
+ *  eHEA ethernet device driver for IBM eServer System p
+ *
+ *  (C) Copyright IBM Corp. 2006
+ *
+ *  Authors:
+ *       Christoph Raisch <raisch@de.ibm.com>
+ *       Jan-Bernd Themann <themann@de.ibm.com>
+ *       Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "ehea.h"
+#include "ehea_phyp.h"
+#include "ehea_qmr.h"
+
+static void *hw_qpageit_get_inc(struct hw_queue *queue)
+{
+	void *retvalue = hw_qeit_get(queue);
+
+	queue->current_q_offset += queue->pagesize;
+	if (queue->current_q_offset > queue->queue_length) {
+		queue->current_q_offset -= queue->pagesize;
+		retvalue = NULL;
+	} else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
+		ehea_error("not on pageboundary");
+		retvalue = NULL;
+	}
+	return retvalue;
+}
+
+static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
+			  const u32 pagesize, const u32 qe_size)
+{
+	int pages_per_kpage = PAGE_SIZE / pagesize;
+	int i, k;
+
+	if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
+		ehea_error("pagesize conflict! kernel pagesize=%d, "
+			   "ehea pagesize=%d", (int)PAGE_SIZE, (int)pagesize);
+		return -EINVAL;
+	}
+
+	queue->queue_length = nr_of_pages * pagesize;
+	queue->queue_pages = kmalloc(nr_of_pages * sizeof(void*), GFP_KERNEL);
+	if (!queue->queue_pages) {
+		ehea_error("no mem for queue_pages");
+		return -ENOMEM;
+	}
+
+	/*
+	 * allocate pages for queue:
+	 * outer loop allocates whole kernel pages (page aligned) and
+	 * inner loop divides a kernel page into smaller hea queue pages
+	 */
+	i = 0;
+	while (i < nr_of_pages) {
+		u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL);
+		if (!kpage)
+			goto out_nomem;
+		for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
+			(queue->queue_pages)[i] = (struct ehea_page*)kpage;
+			kpage += pagesize;
+			i++;
+		}
+	}
+
+	queue->current_q_offset = 0;
+	queue->qe_size = qe_size;
+	queue->pagesize = pagesize;
+	queue->toggle_state = 1;
+
+	return 0;
+out_nomem:
+	for (i = 0; i < nr_of_pages; i += pages_per_kpage) {
+		if (!(queue->queue_pages)[i])
+			break;
+		free_page((unsigned long)(queue->queue_pages)[i]);
+	}
+	return -ENOMEM;
+}
+
+static void hw_queue_dtor(struct hw_queue *queue)
+{
+	int pages_per_kpage = PAGE_SIZE / queue->pagesize;
+	int i, nr_pages;
+
+	if (!queue || !queue->queue_pages)
+		return;
+
+	nr_pages = queue->queue_length / queue->pagesize;
+
+	for (i = 0; i < nr_pages; i += pages_per_kpage)
+		free_page((unsigned long)(queue->queue_pages)[i]);
+
+	kfree(queue->queue_pages);
+}
+
+struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
+			       int nr_of_cqe, u64 eq_handle, u32 cq_token)
+{
+	struct ehea_cq *cq;
+	struct h_epa epa;
+	u64 *cq_handle_ref, hret, rpage;
+	u32 act_nr_of_entries, act_pages, counter;
+	int ret;
+	void *vpage;
+
+	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+	if (!cq) {
+		ehea_error("no mem for cq");
+		goto out_nomem;
+	}
+
+	cq->attr.max_nr_of_cqes = nr_of_cqe;
+	cq->attr.cq_token = cq_token;
+	cq->attr.eq_handle = eq_handle;
+
+	cq->adapter = adapter;
+
+	cq_handle_ref = &cq->fw_handle;
+	act_nr_of_entries = 0;
+	act_pages = 0;
+
+	hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
+					&cq->fw_handle, &cq->epas);
+	if (hret != H_SUCCESS) {
+		ehea_error("alloc_resource_cq failed");
+		goto out_freemem;
+	}
+
+	ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages,
+			    EHEA_PAGESIZE, sizeof(struct ehea_cqe));
+	if (ret)
+		goto out_freeres;
+
+	for (counter = 0; counter < cq->attr.nr_pages; counter++) {
+		vpage = hw_qpageit_get_inc(&cq->hw_queue);
+		if (!vpage) {
+			ehea_error("hw_qpageit_get_inc failed");
+			goto out_kill_hwq;
+		}
+
+		rpage = virt_to_abs(vpage);
+		hret = ehea_h_register_rpage(adapter->handle,
+					     0, EHEA_CQ_REGISTER_ORIG,
+					     cq->fw_handle, rpage, 1);
+		if (hret < H_SUCCESS) {
+			ehea_error("register_rpage_cq failed ehea_cq=%p "
+				   "hret=%lx counter=%i act_pages=%i",
+				   cq, hret, counter, cq->attr.nr_pages);
+			goto out_kill_hwq;
+		}
+
+		if (counter == (cq->attr.nr_pages - 1)) {
+			vpage = hw_qpageit_get_inc(&cq->hw_queue);
+
+			if ((hret != H_SUCCESS) || (vpage)) {
+				ehea_error("registration of pages not "
+					   "complete hret=%lx\n", hret);
+				goto out_kill_hwq;
+			}
+		} else {
+			if ((hret != H_PAGE_REGISTERED) || (!vpage)) {
+				ehea_error("CQ: registration of page failed "
+					   "hret=%lx\n", hret);
+				goto out_kill_hwq;
+			}
+		}
+	}
+
+	hw_qeit_reset(&cq->hw_queue);
+	epa = cq->epas.kernel;
+	ehea_reset_cq_ep(cq);
+	ehea_reset_cq_n1(cq);
+
+	return cq;
+
+out_kill_hwq:
+	hw_queue_dtor(&cq->hw_queue);
+
+out_freeres:
+	ehea_h_free_resource(adapter->handle, cq->fw_handle);
+
+out_freemem:
+	kfree(cq);
+
+out_nomem:
+	return NULL;
+}
+
+int ehea_destroy_cq(struct ehea_cq *cq)
+{
+	u64 adapter_handle, hret;
+
+	adapter_handle = cq->adapter->handle;
+
+	if (!cq)
+		return 0;
+
+	/* deregister all previous registered pages */
+	hret = ehea_h_free_resource(adapter_handle, cq->fw_handle);
+	if (hret != H_SUCCESS) {
+		ehea_error("destroy CQ failed");
+		return -EIO;
+	}
+
+	hw_queue_dtor(&cq->hw_queue);
+	kfree(cq);
+
+	return 0;
+}
+
+struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
+			       const enum ehea_eq_type type,
+			       const u32 max_nr_of_eqes, const u8 eqe_gen)
+{
+	int ret, i;
+	u64 hret, rpage;
+	void *vpage;
+	struct ehea_eq *eq;
+
+	eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+	if (!eq) {
+		ehea_error("no mem for eq");
+		return NULL;
+	}
+
+	eq->adapter = adapter;
+	eq->attr.type = type;
+	eq->attr.max_nr_of_eqes = max_nr_of_eqes;
+	eq->attr.eqe_gen = eqe_gen;
+	spin_lock_init(&eq->spinlock);
+
+	hret = ehea_h_alloc_resource_eq(adapter->handle,
+					&eq->attr, &eq->fw_handle);
+	if (hret != H_SUCCESS) {
+		ehea_error("alloc_resource_eq failed");
+		goto out_freemem;
+	}
+
+	ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
+			    EHEA_PAGESIZE, sizeof(struct ehea_eqe));
+	if (ret) {
+		ehea_error("can't allocate eq pages");
+		goto out_freeres;
+	}
+
+	for (i = 0; i < eq->attr.nr_pages; i++) {
+		vpage = hw_qpageit_get_inc(&eq->hw_queue);
+		if (!vpage) {
+			ehea_error("hw_qpageit_get_inc failed");
+			hret = H_RESOURCE;
+			goto out_kill_hwq;
+		}
+
+		rpage = virt_to_abs(vpage);
+
+		hret = ehea_h_register_rpage(adapter->handle, 0,
+					     EHEA_EQ_REGISTER_ORIG,
+					     eq->fw_handle, rpage, 1);
+
+		if (i == (eq->attr.nr_pages - 1)) {
+			/* last page */
+			vpage = hw_qpageit_get_inc(&eq->hw_queue);
+			if ((hret != H_SUCCESS) || (vpage)) {
+				goto out_kill_hwq;
+			}
+		} else {
+			if ((hret != H_PAGE_REGISTERED) || (!vpage)) {
+				goto out_kill_hwq;
+			}
+		}
+	}
+
+	hw_qeit_reset(&eq->hw_queue);
+	return eq;
+
+out_kill_hwq:
+	hw_queue_dtor(&eq->hw_queue);
+
+out_freeres:
+	ehea_h_free_resource(adapter->handle, eq->fw_handle);
+
+out_freemem:
+	kfree(eq);
+	return NULL;
+}
+
+struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
+{
+	struct ehea_eqe *eqe;
+	unsigned long flags;
+
+	spin_lock_irqsave(&eq->spinlock, flags);
+	eqe = (struct ehea_eqe*)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
+	spin_unlock_irqrestore(&eq->spinlock, flags);
+
+	return eqe;
+}
+
+int ehea_destroy_eq(struct ehea_eq *eq)
+{
+	u64 hret;
+	unsigned long flags;
+
+	if (!eq)
+		return 0;
+
+	spin_lock_irqsave(&eq->spinlock, flags);
+
+	hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle);
+	spin_unlock_irqrestore(&eq->spinlock, flags);
+
+	if (hret != H_SUCCESS) {
+		ehea_error("destroy_eq failed");
+		return -EIO;
+	}
+
+	hw_queue_dtor(&eq->hw_queue);
+	kfree(eq);
+
+	return 0;
+}
+
+/**
+ * allocates memory for a queue and registers pages in phyp
+ */
+int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
+			   int nr_pages, int wqe_size, int act_nr_sges,
+			   struct ehea_adapter *adapter, int h_call_q_selector)
+{
+	u64 hret, rpage;
+	int ret, cnt;
+	void *vpage;
+
+	ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size);
+	if (ret)
+		return ret;
+
+	for (cnt = 0; cnt < nr_pages; cnt++) {
+		vpage = hw_qpageit_get_inc(hw_queue);
+		if (!vpage) {
+			ehea_error("hw_qpageit_get_inc failed");
+			goto out_kill_hwq;
+		}
+		rpage = virt_to_abs(vpage);
+		hret = ehea_h_register_rpage(adapter->handle,
+					     0, h_call_q_selector,
+					     qp->fw_handle, rpage, 1);
+		if (hret < H_SUCCESS) {
+			ehea_error("register_rpage_qp failed");
+			goto out_kill_hwq;
+		}
+	}
+	hw_qeit_reset(hw_queue);
+	return 0;
+
+out_kill_hwq:
+	hw_queue_dtor(hw_queue);
+	return -EIO;
+}
+
+static inline u32 map_wqe_size(u8 wqe_enc_size)
+{
+	return 128 << wqe_enc_size;
+}
+
+struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
+			       u32 pd, struct ehea_qp_init_attr *init_attr)
+{
+	int ret;
+	u64 hret;
+	struct ehea_qp *qp;
+	u32 wqe_size_in_bytes_sq, wqe_size_in_bytes_rq1;
+	u32 wqe_size_in_bytes_rq2, wqe_size_in_bytes_rq3;
+
+
+	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+	if (!qp) {
+		ehea_error("no mem for qp");
+		return NULL;
+	}
+
+	qp->adapter = adapter;
+
+	hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
+					&qp->fw_handle, &qp->epas);
+	if (hret != H_SUCCESS) {
+		ehea_error("ehea_h_alloc_resource_qp failed");
+		goto out_freemem;
+	}
+
+	wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq);
+	wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1);
+	wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2);
+	wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3);
+
+	ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages,
+				     wqe_size_in_bytes_sq,
+				     init_attr->act_wqe_size_enc_sq, adapter,
+				     0);
+	if (ret) {
+		ehea_error("can't register for sq ret=%x", ret);
+		goto out_freeres;
+	}
+
+	ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1,
+				     init_attr->nr_rq1_pages,
+				     wqe_size_in_bytes_rq1,
+				     init_attr->act_wqe_size_enc_rq1,
+				     adapter, 1);
+	if (ret) {
+		ehea_error("can't register for rq1 ret=%x", ret);
+		goto out_kill_hwsq;
+	}
+
+	if (init_attr->rq_count > 1) {
+		ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2,
+					     init_attr->nr_rq2_pages,
+					     wqe_size_in_bytes_rq2,
+					     init_attr->act_wqe_size_enc_rq2,
+					     adapter, 2);
+		if (ret) {
+			ehea_error("can't register for rq2 ret=%x", ret);
+			goto out_kill_hwr1q;
+		}
+	}
+
+	if (init_attr->rq_count > 2) {
+		ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3,
+					     init_attr->nr_rq3_pages,
+					     wqe_size_in_bytes_rq3,
+					     init_attr->act_wqe_size_enc_rq3,
+					     adapter, 3);
+		if (ret) {
+			ehea_error("can't register for rq3 ret=%x", ret);
+			goto out_kill_hwr2q;
+		}
+	}
+
+	qp->init_attr = *init_attr;
+
+	return qp;
+
+out_kill_hwr2q:
+	hw_queue_dtor(&qp->hw_rqueue2);
+
+out_kill_hwr1q:
+	hw_queue_dtor(&qp->hw_rqueue1);
+
+out_kill_hwsq:
+	hw_queue_dtor(&qp->hw_squeue);
+
+out_freeres:
+	ehea_h_disable_and_get_hea(adapter->handle, qp->fw_handle);
+	ehea_h_free_resource(adapter->handle, qp->fw_handle);
+
+out_freemem:
+	kfree(qp);
+	return NULL;
+}
+
+int ehea_destroy_qp(struct ehea_qp *qp)
+{
+	u64 hret;
+	struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
+
+	if (!qp)
+		return 0;
+
+	hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle);
+	if (hret != H_SUCCESS) {
+		ehea_error("destroy_qp failed");
+		return -EIO;
+	}
+
+	hw_queue_dtor(&qp->hw_squeue);
+	hw_queue_dtor(&qp->hw_rqueue1);
+
+   	if (qp_attr->rq_count > 1)
+		hw_queue_dtor(&qp->hw_rqueue2);
+   	if (qp_attr->rq_count > 2)
+		hw_queue_dtor(&qp->hw_rqueue3);
+	kfree(qp);
+
+	return 0;
+}
+
+int ehea_reg_mr_adapter(struct ehea_adapter *adapter)
+{
+	int i, k;
+	u64 hret, pt_abs, start, end, nr_pages;
+	u32 acc_ctrl = EHEA_MR_ACC_CTRL;
+	u64 *pt;
+
+	start = KERNELBASE;
+	end = (u64)high_memory;
+	nr_pages = (end - start) / PAGE_SIZE;
+
+	pt =  kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!pt) {
+		ehea_error("no mem");
+		return -ENOMEM;
+	}
+	pt_abs = virt_to_abs(pt);
+
+	hret = ehea_h_alloc_resource_mr(adapter->handle, start, end - start,
+					acc_ctrl, adapter->pd,
+					&adapter->mr.handle, &adapter->mr.lkey);
+	if (hret != H_SUCCESS) {
+		ehea_error("alloc_resource_mr failed");
+		return -EIO;
+	}
+
+	adapter->mr.vaddr = KERNELBASE;
+	k = 0;
+
+	while (nr_pages > 0) {
+		if (nr_pages > 1) {
+			u64 num_pages = min(nr_pages, (u64)512);
+			for (i = 0; i < num_pages; i++)
+				pt[i] = virt_to_abs((void*)(((u64)start)
+							     + ((k++) *
+								PAGE_SIZE)));
+
+			hret = ehea_h_register_rpage_mr(adapter->handle,
+							adapter->mr.handle, 0,
+							0, (u64)pt_abs,
+							num_pages);
+			nr_pages -= num_pages;
+		} else {
+			u64 abs_adr = virt_to_abs((void*)(((u64)start)
+							   + (k * PAGE_SIZE)));
+			hret = ehea_h_register_rpage_mr(adapter->handle,
+							adapter->mr.handle, 0,
+							0, abs_adr,1);
+			nr_pages--;
+		}
+
+		if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) {
+			ehea_h_free_resource(adapter->handle,
+						adapter->mr.handle);
+			ehea_error("free_resource_mr failed");
+			return -EIO;
+		}
+	}
+
+	if (hret != H_SUCCESS) {
+		ehea_h_free_resource(adapter->handle, adapter->mr.handle);
+		ehea_error("free_resource_mr failed for last page");
+		return -EIO;
+	}
+	return 0;
+}
+
+int ehea_reg_mr_pages(struct ehea_adapter *adapter, struct ehea_mr *mr,
+		      u64 start, u64 *pt, int nr_pages)
+{
+	u64 hret;
+	u32 acc_ctrl = EHEA_MR_ACC_CTRL;
+
+	u64 pt_abs = virt_to_abs(pt);
+	u64 first_page = pt[0];
+
+	hret = ehea_h_alloc_resource_mr(adapter->handle, start,
+					PAGE_SIZE * nr_pages, acc_ctrl,
+					adapter->pd, &mr->handle, &mr->lkey);
+	if (hret != H_SUCCESS) {
+		ehea_error("alloc_resource_mr failed");
+		return -EIO;
+	}
+
+	if (nr_pages > 1)
+		hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle,
+						0, 0, (u64)pt_abs, nr_pages);
+	else
+		hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle,
+						0, 0, first_page, 1);
+
+	if (hret != H_SUCCESS) {
+		ehea_h_free_resource(adapter->handle, mr->handle);
+		ehea_error("free_resource_mr failed for last page");
+		return -EIO;
+	}
+	mr->vaddr = start;
+	return 0;
+}
--- linux-2.6.18-rc6-orig/drivers/net/ehea/ehea_qmr.h	1970-01-01 01:00:00.000000000 +0100
+++ kernel/drivers/net/ehea/ehea_qmr.h	2006-09-06 15:53:42.000000000 +0200
@@ -0,0 +1,362 @@
+/*
+ *  linux/drivers/net/ehea/ehea_qmr.h
+ *
+ *  eHEA ethernet device driver for IBM eServer System p
+ *
+ *  (C) Copyright IBM Corp. 2006
+ *
+ *  Authors:
+ *       Christoph Raisch <raisch@de.ibm.com>
+ *       Jan-Bernd Themann <themann@de.ibm.com>
+ *       Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __EHEA_QMR_H__
+#define __EHEA_QMR_H__
+
+#include "ehea.h"
+#include "ehea_hw.h"
+
+/*
+ * page size of ehea hardware queues
+ */
+
+#define EHEA_PAGESHIFT  12
+#define EHEA_PAGESIZE   4096UL
+
+/* Some abbreviations used here:
+ *
+ * WQE  - Work Queue Entry
+ * SWQE - Send Work Queue Entry
+ * RWQE - Receive Work Queue Entry
+ * CQE  - Completion Queue Entry
+ * EQE  - Event Queue Entry
+ * MR   - Memory Region
+ */
+
+/* Use of WR_ID field for EHEA */
+#define EHEA_WR_ID_COUNT   EHEA_BMASK_IBM(0, 19)
+#define EHEA_WR_ID_TYPE    EHEA_BMASK_IBM(20, 23)
+#define EHEA_SWQE2_TYPE    0x1
+#define EHEA_SWQE3_TYPE    0x2
+#define EHEA_RWQE2_TYPE    0x3
+#define EHEA_RWQE3_TYPE    0x4
+#define EHEA_WR_ID_INDEX   EHEA_BMASK_IBM(24, 47)
+#define EHEA_WR_ID_REFILL  EHEA_BMASK_IBM(48, 63)
+
+struct ehea_vsgentry {
+	u64 vaddr;
+	u32 l_key;
+	u32 len;
+};
+
+/* maximum number of sg entries allowed in a WQE */
+#define EHEA_MAX_WQE_SG_ENTRIES  	252
+#define SWQE2_MAX_IMM            	(0xD0 - 0x30)
+#define SWQE3_MAX_IMM            	224
+
+/* tx control flags for swqe */
+#define EHEA_SWQE_CRC                   0x8000
+#define EHEA_SWQE_IP_CHECKSUM           0x4000
+#define EHEA_SWQE_TCP_CHECKSUM          0x2000
+#define EHEA_SWQE_TSO                   0x1000
+#define EHEA_SWQE_SIGNALLED_COMPLETION  0x0800
+#define EHEA_SWQE_VLAN_INSERT           0x0400
+#define EHEA_SWQE_IMM_DATA_PRESENT      0x0200
+#define EHEA_SWQE_DESCRIPTORS_PRESENT   0x0100
+#define EHEA_SWQE_WRAP_CTL_REC          0x0080
+#define EHEA_SWQE_WRAP_CTL_FORCE        0x0040
+#define EHEA_SWQE_BIND                  0x0020
+#define EHEA_SWQE_PURGE                 0x0010
+
+/* sizeof(struct ehea_swqe) less the union */
+#define SWQE_HEADER_SIZE		32
+
+struct ehea_swqe {
+	u64 wr_id;
+	u16 tx_control;
+	u16 vlan_tag;
+	u8 reserved1;
+	u8 ip_start;
+	u8 ip_end;
+	u8 immediate_data_length;
+	u8 tcp_offset;
+	u8 reserved2;
+	u16 tcp_end;
+	u8 wrap_tag;
+	u8 descriptors;		/* number of valid descriptors in WQE */
+	u16 reserved3;
+	u16 reserved4;
+	u16 mss;
+	u32 reserved5;
+	union {
+		/*  Send WQE Format 1 */
+		struct {
+			struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
+		} no_immediate_data;
+
+		/*  Send WQE Format 2 */
+		struct {
+			struct ehea_vsgentry sg_entry;
+			/* 0x30 */
+			u8 immediate_data[SWQE2_MAX_IMM];
+			/* 0xd0 */
+			struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
+		} immdata_desc __attribute__ ((packed));
+
+		/*  Send WQE Format 3 */
+		struct {
+			u8 immediate_data[SWQE3_MAX_IMM];
+		} immdata_nodesc;
+	} u;
+};
+
+struct ehea_rwqe {
+	u64 wr_id;		/* work request ID */
+	u8 reserved1[5];
+	u8 data_segments;
+	u16 reserved2;
+	u64 reserved3;
+	u64 reserved4;
+	struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
+};
+
+#define EHEA_CQE_VLAN_TAG_XTRACT   0x0400
+
+#define EHEA_CQE_TYPE_RQ           0x60
+#define EHEA_CQE_STAT_ERR_MASK     0x721F
+#define EHEA_CQE_STAT_FAT_ERR_MASK 0x1F
+#define EHEA_CQE_STAT_ERR_TCP      0x4000
+
+struct ehea_cqe {
+	u64 wr_id;		/* work request ID from WQE */
+	u8 type;
+	u8 valid;
+	u16 status;
+	u16 reserved1;
+	u16 num_bytes_transfered;
+	u16 vlan_tag;
+	u16 inet_checksum_value;
+	u8 reserved2;
+	u8 header_length;
+	u16 reserved3;
+	u16 page_offset;
+	u16 wqe_count;
+	u32 qp_token;
+	u32 timestamp;
+	u32 reserved4;
+	u64 reserved5[3];
+};
+
+#define EHEA_EQE_VALID           EHEA_BMASK_IBM(0, 0)
+#define EHEA_EQE_IS_CQE          EHEA_BMASK_IBM(1, 1)
+#define EHEA_EQE_IDENTIFIER      EHEA_BMASK_IBM(2, 7)
+#define EHEA_EQE_QP_CQ_NUMBER    EHEA_BMASK_IBM(8, 31)
+#define EHEA_EQE_QP_TOKEN        EHEA_BMASK_IBM(32, 63)
+#define EHEA_EQE_CQ_TOKEN        EHEA_BMASK_IBM(32, 63)
+#define EHEA_EQE_KEY             EHEA_BMASK_IBM(32, 63)
+#define EHEA_EQE_PORT_NUMBER     EHEA_BMASK_IBM(56, 63)
+#define EHEA_EQE_EQ_NUMBER       EHEA_BMASK_IBM(48, 63)
+#define EHEA_EQE_SM_ID           EHEA_BMASK_IBM(48, 63)
+#define EHEA_EQE_SM_MECH_NUMBER  EHEA_BMASK_IBM(48, 55)
+#define EHEA_EQE_SM_PORT_NUMBER  EHEA_BMASK_IBM(56, 63)
+
+struct ehea_eqe {
+	u64 entry;
+};
+
+static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
+{
+	struct ehea_page *current_page;
+
+	if (q_offset >= queue->queue_length)
+		q_offset -= queue->queue_length;
+	current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT];
+	return &current_page->entries[q_offset & (EHEA_PAGESIZE - 1)];
+}
+
+static inline void *hw_qeit_get(struct hw_queue *queue)
+{
+	return hw_qeit_calc(queue, queue->current_q_offset);
+}
+
+static inline void hw_qeit_inc(struct hw_queue *queue)
+{
+	queue->current_q_offset += queue->qe_size;
+	if (queue->current_q_offset >= queue->queue_length) {
+		queue->current_q_offset = 0;
+		/* toggle the valid flag */
+		queue->toggle_state = (~queue->toggle_state) & 1;
+	}
+}
+
+static inline void *hw_qeit_get_inc(struct hw_queue *queue)
+{
+	void *retvalue = hw_qeit_get(queue);
+	hw_qeit_inc(queue);
+	return retvalue;
+}
+
+static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue)
+{
+	struct ehea_cqe *retvalue = hw_qeit_get(queue);
+	u8 valid = retvalue->valid;
+	void *pref;
+
+	if ((valid >> 7) == (queue->toggle_state & 1)) {
+		/* this is a good one */
+		hw_qeit_inc(queue);
+		pref = hw_qeit_calc(queue, queue->current_q_offset);
+		prefetch(pref);
+		prefetch(pref + 128);
+	} else
+		retvalue = NULL;
+	return retvalue;
+}
+
+static inline void *hw_qeit_get_valid(struct hw_queue *queue)
+{
+	struct ehea_cqe *retvalue = hw_qeit_get(queue);
+	void *pref;
+	u8 valid;
+
+	pref = hw_qeit_calc(queue, queue->current_q_offset);
+	prefetch(pref);
+	prefetch(pref + 128);
+	prefetch(pref + 256);
+	valid = retvalue->valid;
+	if (!((valid >> 7) == (queue->toggle_state & 1)))
+		retvalue = NULL;
+	return retvalue;
+}
+
+static inline void *hw_qeit_reset(struct hw_queue *queue)
+{
+	queue->current_q_offset = 0;
+	return hw_qeit_get(queue);
+}
+
+static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
+{
+	u64 last_entry_in_q = queue->queue_length - queue->qe_size;
+	void *retvalue;
+
+	retvalue = hw_qeit_get(queue);
+	queue->current_q_offset += queue->qe_size;
+	if (queue->current_q_offset > last_entry_in_q) {
+		queue->current_q_offset = 0;
+		queue->toggle_state = (~queue->toggle_state) & 1;
+	}
+	return retvalue;
+}
+
+static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
+{
+	void *retvalue = hw_qeit_get(queue);
+	u32 qe = *(u8*)retvalue;
+	if ((qe >> 7) == (queue->toggle_state & 1))
+		hw_qeit_eq_get_inc(queue);
+	else
+		retvalue = NULL;
+	return retvalue;
+}
+
+static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp,
+						   int rq_nr)
+{
+	struct hw_queue *queue;
+
+	if (rq_nr == 1)
+		queue = &qp->hw_rqueue1;
+	else if (rq_nr == 2)
+		queue = &qp->hw_rqueue2;
+	else
+		queue = &qp->hw_rqueue3;
+
+	return hw_qeit_get_inc(queue);
+}
+
+static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp,
+					      int *wqe_index)
+{
+	struct hw_queue *queue = &my_qp->hw_squeue;
+	struct ehea_swqe *wqe_p;
+
+	*wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ);
+	wqe_p = hw_qeit_get_inc(&my_qp->hw_squeue);
+
+	return wqe_p;
+}
+
+static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe)
+{
+	iosync();
+	ehea_update_sqa(my_qp, 1);
+}
+
+static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index)
+{
+	struct hw_queue *queue = &qp->hw_rqueue1;
+
+	*wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1);
+	return hw_qeit_get_valid(queue);
+}
+
+static inline void ehea_inc_rq1(struct ehea_qp *qp)
+{
+	hw_qeit_inc(&qp->hw_rqueue1);
+}
+
+static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq)
+{
+	return hw_qeit_get_inc_valid(&my_cq->hw_queue);
+}
+
+#define EHEA_CQ_REGISTER_ORIG 0
+#define EHEA_EQ_REGISTER_ORIG 0
+
+enum ehea_eq_type {
+	EHEA_EQ = 0,		/* event queue              */
+	EHEA_NEQ		/* notification event queue */
+};
+
+struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
+			       enum ehea_eq_type type,
+			       const u32 length, const u8 eqe_gen);
+
+int ehea_destroy_eq(struct ehea_eq *eq);
+
+struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq);
+
+struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
+			       u64 eq_handle, u32 cq_token);
+
+int ehea_destroy_cq(struct ehea_cq *cq);
+
+struct ehea_qp *ehea_create_qp(struct ehea_adapter * adapter, u32 pd,
+			       struct ehea_qp_init_attr *init_attr);
+
+int ehea_destroy_qp(struct ehea_qp *qp);
+
+int ehea_reg_mr_adapter(struct ehea_adapter *adapter);
+
+int ehea_reg_mr_pages(struct ehea_adapter *adapter,
+		      struct ehea_mr *mr,
+		      u64 start, u64 *pt, int nr_pages);
+
+#endif	/* __EHEA_QMR_H__ */

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2006-09-06 14:15 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2006-08-18 11:31 [2.6.19 PATCH 3/7] ehea: queue management Jan-Bernd Themann
2006-08-18 12:17 ` Arjan van de Ven
2006-08-18 13:25   ` Thomas Klein
2006-08-18 13:47     ` Arnd Bergmann
2006-08-18 14:24       ` Christoph Raisch
2006-08-18 16:16         ` Arnd Bergmann
2006-08-18 14:33     ` Jörn Engel
2006-08-18 14:40 ` Jörn Engel
  -- strict thread matches above, loose matches on Subject: below --
2006-08-22 12:53 Jan-Bernd Themann
2006-08-22 14:01 ` Arnd Bergmann
2006-08-22 13:51   ` Jan-Bernd Themann
2006-08-23  8:57 Jan-Bernd Themann
2006-09-04 10:39 Jan-Bernd Themann
2006-09-06 13:32 Jan-Bernd Themann

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).