linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2.6.36-rc7] infiniband: update workqueue usage
@ 2010-10-19 15:24 Tejun Heo
  2010-10-19 17:22 ` Ralph Campbell
  2010-10-19 18:40 ` Bart Van Assche
  0 siblings, 2 replies; 7+ messages in thread
From: Tejun Heo @ 2010-10-19 15:24 UTC (permalink / raw)
  To: Roland Dreier, Sean Hefty, Hal Rosenstock, linux-rdma, lkml

* ib_wq is added, which is used as the common workqueue for infiniband
  instead of the system workqueue.  All system workqueue usages
  including flush_scheduled_work() callers are converted to use and
  flush ib_wq.

* cancel_delayed_work() + flush_scheduled_work() converted to
  cancel_delayed_work_sync().

* qib_wq is removed and ib_wq is used instead.

This is to prepare for deprecation of flush_scheduled_work().

Signed-off-by: Tejun Heo <tj@kernel.org>
---
Hello,

I think this patch is safe but don't have any experience with or
access to infiniband stuff and it's only compile tested.  Also, while
looking through the code, I got curious about several things.

* Can any of the works in infiniband be used during memory reclaim?

* qib_cq_wq is a separate singlethread workqueue.  Does the queue
  require strict single thread execution ordering?  IOW, does each
  work have to be executed in the exact queued order and no two works
  should execute in parallel?  Or was the singlethreadedness chosen
  just to reduce the number of workers?

* The same question for ipoib_workqueue.

Thank you.

 drivers/infiniband/core/cache.c                |    4 +--
 drivers/infiniband/core/device.c               |   11 ++++++++--
 drivers/infiniband/core/sa_query.c             |    4 +--
 drivers/infiniband/core/umem.c                 |    2 -
 drivers/infiniband/hw/ipath/ipath_driver.c     |    2 -
 drivers/infiniband/hw/ipath/ipath_user_pages.c |    2 -
 drivers/infiniband/hw/qib/qib_iba7220.c        |    7 ++----
 drivers/infiniband/hw/qib/qib_iba7322.c        |   14 ++++++-------
 drivers/infiniband/hw/qib/qib_init.c           |   26 +++----------------------
 drivers/infiniband/hw/qib/qib_qsfp.c           |    9 +++-----
 drivers/infiniband/hw/qib/qib_verbs.h          |    5 +---
 drivers/infiniband/ulp/srp/ib_srp.c            |    4 +--
 include/rdma/ib_verbs.h                        |    3 ++
 13 files changed, 41 insertions(+), 52 deletions(-)

Index: work/drivers/infiniband/core/cache.c
===================================================================
--- work.orig/drivers/infiniband/core/cache.c
+++ work/drivers/infiniband/core/cache.c
@@ -308,7 +308,7 @@ static void ib_cache_event(struct ib_eve
 			INIT_WORK(&work->work, ib_cache_task);
 			work->device   = event->device;
 			work->port_num = event->element.port_num;
-			schedule_work(&work->work);
+			queue_work(ib_wq, &work->work);
 		}
 	}
 }
@@ -368,7 +368,7 @@ static void ib_cache_cleanup_one(struct
 	int p;

 	ib_unregister_event_handler(&device->cache.event_handler);
-	flush_scheduled_work();
+	flush_workqueue(ib_wq);

 	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
 		kfree(device->cache.pkey_cache[p]);
Index: work/drivers/infiniband/core/device.c
===================================================================
--- work.orig/drivers/infiniband/core/device.c
+++ work/drivers/infiniband/core/device.c
@@ -38,7 +38,6 @@
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/mutex.h>
-#include <linux/workqueue.h>

 #include "core_priv.h"

@@ -52,6 +51,9 @@ struct ib_client_data {
 	void *            data;
 };

+struct workqueue_struct *ib_wq;
+EXPORT_SYMBOL_GPL(ib_wq);
+
 static LIST_HEAD(device_list);
 static LIST_HEAD(client_list);

@@ -718,6 +720,10 @@ static int __init ib_core_init(void)
 {
 	int ret;

+	ib_wq = alloc_workqueue("infiniband", 0, 0);
+	if (!ib_wq)
+		return -ENOMEM;
+
 	ret = ib_sysfs_setup();
 	if (ret)
 		printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
@@ -726,6 +732,7 @@ static int __init ib_core_init(void)
 	if (ret) {
 		printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
 		ib_sysfs_cleanup();
+		destroy_workqueue(ib_wq);
 	}

 	return ret;
@@ -736,7 +743,7 @@ static void __exit ib_core_cleanup(void)
 	ib_cache_cleanup();
 	ib_sysfs_cleanup();
 	/* Make sure that any pending umem accounting work is done. */
-	flush_scheduled_work();
+	destroy_workqueue(ib_wq);
 }

 module_init(ib_core_init);
Index: work/drivers/infiniband/core/sa_query.c
===================================================================
--- work.orig/drivers/infiniband/core/sa_query.c
+++ work/drivers/infiniband/core/sa_query.c
@@ -422,7 +422,7 @@ static void ib_sa_event(struct ib_event_
 		port->sm_ah = NULL;
 		spin_unlock_irqrestore(&port->ah_lock, flags);

-		schedule_work(&sa_dev->port[event->element.port_num -
+		queue_work(ib_wq, &sa_dev->port[event->element.port_num -
 					    sa_dev->start_port].update_task);
 	}
 }
@@ -1068,7 +1068,7 @@ static void ib_sa_remove_one(struct ib_d

 	ib_unregister_event_handler(&sa_dev->event_handler);

-	flush_scheduled_work();
+	flush_workqueue(ib_wq);

 	for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
 		ib_unregister_mad_agent(sa_dev->port[i].agent);
Index: work/drivers/infiniband/core/umem.c
===================================================================
--- work.orig/drivers/infiniband/core/umem.c
+++ work/drivers/infiniband/core/umem.c
@@ -262,7 +262,7 @@ void ib_umem_release(struct ib_umem *ume
 			umem->mm   = mm;
 			umem->diff = diff;

-			schedule_work(&umem->work);
+			queue_work(ib_wq, &umem->work);
 			return;
 		}
 	} else
Index: work/drivers/infiniband/hw/ipath/ipath_driver.c
===================================================================
--- work.orig/drivers/infiniband/hw/ipath/ipath_driver.c
+++ work/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -757,7 +757,7 @@ static void __devexit ipath_remove_one(s
 	 */
 	ipath_shutdown_device(dd);

-	flush_scheduled_work();
+	flush_workqueue(ib_wq);

 	if (dd->verbs_dev)
 		ipath_unregister_ib_device(dd->verbs_dev);
Index: work/drivers/infiniband/hw/ipath/ipath_user_pages.c
===================================================================
--- work.orig/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ work/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -220,7 +220,7 @@ void ipath_release_user_pages_on_close(s
 	work->mm = mm;
 	work->num_pages = num_pages;

-	schedule_work(&work->work);
+	queue_work(ib_wq, &work->work);
 	return;

 bail_mm:
Index: work/drivers/infiniband/hw/qib/qib_iba7220.c
===================================================================
--- work.orig/drivers/infiniband/hw/qib/qib_iba7220.c
+++ work/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -1692,8 +1692,7 @@ static void qib_7220_quiet_serdes(struct
 	ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
 	wake_up(&ppd->cpspec->autoneg_wait);
-	cancel_delayed_work(&ppd->cpspec->autoneg_work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);

 	shutdown_7220_relock_poll(ppd->dd);
 	val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg);
@@ -3515,8 +3514,8 @@ static void try_7220_autoneg(struct qib_

 	toggle_7220_rclkrls(ppd->dd);
 	/* 2 msec is minimum length of a poll cycle */
-	schedule_delayed_work(&ppd->cpspec->autoneg_work,
-			      msecs_to_jiffies(2));
+	queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
+			   msecs_to_jiffies(2));
 }

 /*
Index: work/drivers/infiniband/hw/qib/qib_iba7322.c
===================================================================
--- work.orig/drivers/infiniband/hw/qib/qib_iba7322.c
+++ work/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2348,10 +2348,9 @@ static void qib_7322_mini_quiet_serdes(s
 	ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
 	wake_up(&ppd->cpspec->autoneg_wait);
-	cancel_delayed_work(&ppd->cpspec->autoneg_work);
+	cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
 	if (ppd->dd->cspec->r1)
-		cancel_delayed_work(&ppd->cpspec->ipg_work);
-	flush_scheduled_work();
+		cancel_delayed_work_sync(&ppd->cpspec->ipg_work);

 	ppd->cpspec->chase_end = 0;
 	if (ppd->cpspec->chase_timer.data) /* if initted */
@@ -2648,7 +2647,7 @@ static noinline void unknown_7322_gpio_i
 			if (!(pins & mask)) {
 				++handled;
 				qd->t_insert = get_jiffies_64();
-				schedule_work(&qd->work);
+				queue_work(ib_wq, &qd->work);
 			}
 		}
 	}
@@ -4926,8 +4925,8 @@ static void try_7322_autoneg(struct qib_
 	set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
 	qib_7322_mini_pcs_reset(ppd);
 	/* 2 msec is minimum length of a poll cycle */
-	schedule_delayed_work(&ppd->cpspec->autoneg_work,
-			      msecs_to_jiffies(2));
+	queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
+			   msecs_to_jiffies(2));
 }

 /*
@@ -5057,7 +5056,8 @@ static void try_7322_ipg(struct qib_ppor
 		ib_free_send_mad(send_buf);
 retry:
 	delay = 2 << ppd->cpspec->ipg_tries;
-	schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay));
+	queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
+			   msecs_to_jiffies(delay));
 }

 /*
Index: work/drivers/infiniband/hw/qib/qib_init.c
===================================================================
--- work.orig/drivers/infiniband/hw/qib/qib_init.c
+++ work/drivers/infiniband/hw/qib/qib_init.c
@@ -80,7 +80,6 @@ unsigned qib_wc_pat = 1; /* default (1)
 module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
 MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");

-struct workqueue_struct *qib_wq;
 struct workqueue_struct *qib_cq_wq;

 static void verify_interrupt(unsigned long);
@@ -1045,24 +1044,10 @@ static int __init qlogic_ib_init(void)
 	if (ret)
 		goto bail;

-	/*
-	 * We create our own workqueue mainly because we want to be
-	 * able to flush it when devices are being removed.  We can't
-	 * use schedule_work()/flush_scheduled_work() because both
-	 * unregister_netdev() and linkwatch_event take the rtnl lock,
-	 * so flush_scheduled_work() can deadlock during device
-	 * removal.
-	 */
-	qib_wq = create_workqueue("qib");
-	if (!qib_wq) {
-		ret = -ENOMEM;
-		goto bail_dev;
-	}
-
 	qib_cq_wq = create_singlethread_workqueue("qib_cq");
 	if (!qib_cq_wq) {
 		ret = -ENOMEM;
-		goto bail_wq;
+		goto bail_dev;
 	}

 	/*
@@ -1092,8 +1077,6 @@ bail_unit:
 	idr_destroy(&qib_unit_table);
 bail_cq_wq:
 	destroy_workqueue(qib_cq_wq);
-bail_wq:
-	destroy_workqueue(qib_wq);
 bail_dev:
 	qib_dev_cleanup();
 bail:
@@ -1117,7 +1100,6 @@ static void __exit qlogic_ib_cleanup(voi

 	pci_unregister_driver(&qib_driver);

-	destroy_workqueue(qib_wq);
 	destroy_workqueue(qib_cq_wq);

 	qib_cpulist_count = 0;
@@ -1289,7 +1271,7 @@ static int __devinit qib_init_one(struct

 	if (qib_mini_init || initfail || ret) {
 		qib_stop_timers(dd);
-		flush_scheduled_work();
+		flush_workqueue(ib_wq);
 		for (pidx = 0; pidx < dd->num_pports; ++pidx)
 			dd->f_quiet_serdes(dd->pport + pidx);
 		if (qib_mini_init)
@@ -1338,8 +1320,8 @@ static void __devexit qib_remove_one(str

 	qib_stop_timers(dd);

-	/* wait until all of our (qsfp) schedule_work() calls complete */
-	flush_scheduled_work();
+	/* wait until all of our (qsfp) queue_work() calls complete */
+	flush_workqueue(ib_wq);

 	ret = qibfs_remove(dd);
 	if (ret)
Index: work/drivers/infiniband/hw/qib/qib_qsfp.c
===================================================================
--- work.orig/drivers/infiniband/hw/qib/qib_qsfp.c
+++ work/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -485,7 +485,7 @@ void qib_qsfp_init(struct qib_qsfp_data
 		goto bail;
 	/* We see a module, but it may be unwise to look yet. Just schedule */
 	qd->t_insert = get_jiffies_64();
-	schedule_work(&qd->work);
+	queue_work(ib_wq, &qd->work);
 bail:
 	return;
 }
@@ -493,10 +493,9 @@ bail:
 void qib_qsfp_deinit(struct qib_qsfp_data *qd)
 {
 	/*
-	 * There is nothing to do here for now.  our
-	 * work is scheduled with schedule_work(), and
-	 * flush_scheduled_work() from remove_one will
-	 * block until all work ssetup with schedule_work()
+	 * There is nothing to do here for now.  our work is scheduled
+	 * with queue_work(), and flush_workqueue() from remove_one
+	 * will block until all work setup with queue_work()
 	 * completes.
 	 */
 }
Index: work/drivers/infiniband/hw/qib/qib_verbs.h
===================================================================
--- work.orig/drivers/infiniband/hw/qib/qib_verbs.h
+++ work/drivers/infiniband/hw/qib/qib_verbs.h
@@ -805,7 +805,6 @@ static inline int qib_send_ok(struct qib
 		 !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
 }

-extern struct workqueue_struct *qib_wq;
 extern struct workqueue_struct *qib_cq_wq;

 /*
@@ -815,10 +814,10 @@ static inline void qib_schedule_send(str
 {
 	if (qib_send_ok(qp)) {
 		if (qp->processor_id == smp_processor_id())
-			queue_work(qib_wq, &qp->s_work);
+			queue_work(ib_wq, &qp->s_work);
 		else
 			queue_work_on(qp->processor_id,
-				      qib_wq, &qp->s_work);
+				      ib_wq, &qp->s_work);
 	}
 }

Index: work/drivers/infiniband/ulp/srp/ib_srp.c
===================================================================
--- work.orig/drivers/infiniband/ulp/srp/ib_srp.c
+++ work/drivers/infiniband/ulp/srp/ib_srp.c
@@ -628,7 +628,7 @@ err:
 	if (target->state == SRP_TARGET_CONNECTING) {
 		target->state = SRP_TARGET_DEAD;
 		INIT_WORK(&target->work, srp_remove_work);
-		schedule_work(&target->work);
+		queue_work(ib_wq, &target->work);
 	}
 	spin_unlock_irq(target->scsi_host->host_lock);

@@ -2129,7 +2129,7 @@ static void srp_remove_one(struct ib_dev
 		 * started before we marked our target ports as
 		 * removed, and any target port removal tasks.
 		 */
-		flush_scheduled_work();
+		flush_workqueue(ib_wq);

 		list_for_each_entry_safe(target, tmp_target,
 					 &host->target_list, list) {
Index: work/include/rdma/ib_verbs.h
===================================================================
--- work.orig/include/rdma/ib_verbs.h
+++ work/include/rdma/ib_verbs.h
@@ -47,10 +47,13 @@
 #include <linux/list.h>
 #include <linux/rwsem.h>
 #include <linux/scatterlist.h>
+#include <linux/workqueue.h>

 #include <asm/atomic.h>
 #include <asm/uaccess.h>

+extern struct workqueue_struct *ib_wq;
+
 union ib_gid {
 	u8	raw[16];
 	struct {

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2.6.36-rc7] infiniband: update workqueue usage
  2010-10-19 15:24 [PATCH v2.6.36-rc7] infiniband: update workqueue usage Tejun Heo
@ 2010-10-19 17:22 ` Ralph Campbell
  2010-10-20  8:37   ` Tejun Heo
  2010-10-19 18:40 ` Bart Van Assche
  1 sibling, 1 reply; 7+ messages in thread
From: Ralph Campbell @ 2010-10-19 17:22 UTC (permalink / raw)
  To: Tejun Heo
  Cc: Roland Dreier, Sean Hefty, Hal Rosenstock,
	linux-rdma@vger.kernel.org, lkml

On Tue, 2010-10-19 at 08:24 -0700, Tejun Heo wrote:

> * qib_cq_wq is a separate singlethread workqueue.  Does the queue
>   require strict single thread execution ordering?  IOW, does each
>   work have to be executed in the exact queued order and no two works
>   should execute in parallel?  Or was the singlethreadedness chosen
>   just to reduce the number of workers?

The work functions need to be called in-order and single threaded
or memory will be freed multiple times and other "bad things".


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2.6.36-rc7] infiniband: update workqueue usage
  2010-10-19 15:24 [PATCH v2.6.36-rc7] infiniband: update workqueue usage Tejun Heo
  2010-10-19 17:22 ` Ralph Campbell
@ 2010-10-19 18:40 ` Bart Van Assche
  2010-10-20  8:38   ` Tejun Heo
  1 sibling, 1 reply; 7+ messages in thread
From: Bart Van Assche @ 2010-10-19 18:40 UTC (permalink / raw)
  To: Tejun Heo; +Cc: Roland Dreier, Sean Hefty, Hal Rosenstock, linux-rdma, lkml

On Tue, Oct 19, 2010 at 5:24 PM, Tejun Heo <tj@kernel.org> wrote:
> [ ... ]
> This is to prepare for deprecation of flush_scheduled_work().
> [ ... ]
> Index: work/include/rdma/ib_verbs.h
> [ ... ]
> +extern struct workqueue_struct *ib_wq;
> [ ... ]

This patch adds a declaration of a global variable to a public header
file. That might be unavoidable, but it doesn't make me happy.

Bart.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2.6.36-rc7] infiniband: update workqueue usage
  2010-10-19 17:22 ` Ralph Campbell
@ 2010-10-20  8:37   ` Tejun Heo
  0 siblings, 0 replies; 7+ messages in thread
From: Tejun Heo @ 2010-10-20  8:37 UTC (permalink / raw)
  To: Ralph Campbell
  Cc: Roland Dreier, Sean Hefty, Hal Rosenstock,
	linux-rdma@vger.kernel.org, lkml

Hello,

On 10/19/2010 07:22 PM, Ralph Campbell wrote:
> On Tue, 2010-10-19 at 08:24 -0700, Tejun Heo wrote:
> 
>> * qib_cq_wq is a separate singlethread workqueue.  Does the queue
>>   require strict single thread execution ordering?  IOW, does each
>>   work have to be executed in the exact queued order and no two works
>>   should execute in parallel?  Or was the singlethreadedness chosen
>>   just to reduce the number of workers?
> 
> The work functions need to be called in-order and single threaded
> or memory will be freed multiple times and other "bad things".

I see, so they'll need to be converted to alloc_ordered_workqueue()
once -rc1 merge window opens up.  I'll follow up with the conversion.

Thanks.

-- 
tejun

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2.6.36-rc7] infiniband: update workqueue usage
  2010-10-19 18:40 ` Bart Van Assche
@ 2010-10-20  8:38   ` Tejun Heo
  2010-10-20 10:21     ` Bart Van Assche
  0 siblings, 1 reply; 7+ messages in thread
From: Tejun Heo @ 2010-10-20  8:38 UTC (permalink / raw)
  To: Bart Van Assche
  Cc: Roland Dreier, Sean Hefty, Hal Rosenstock, linux-rdma, lkml

Hello,

On 10/19/2010 08:40 PM, Bart Van Assche wrote:
> On Tue, Oct 19, 2010 at 5:24 PM, Tejun Heo <tj@kernel.org> wrote:
>> [ ... ]
>> This is to prepare for deprecation of flush_scheduled_work().
>> [ ... ]
>> Index: work/include/rdma/ib_verbs.h
>> [ ... ]
>> +extern struct workqueue_struct *ib_wq;
>> [ ... ]
> 
> This patch adds a declaration of a global variable to a public header
> file. That might be unavoidable, but it doesn't make me happy.

Hmm... that's one very interesting reason to be unhappy.  Can you
please elaborate why addition of a global variable doesn't make you
happy?

Thanks.

-- 
tejun

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2.6.36-rc7] infiniband: update workqueue usage
  2010-10-20  8:38   ` Tejun Heo
@ 2010-10-20 10:21     ` Bart Van Assche
  2010-10-20 11:03       ` Tejun Heo
  0 siblings, 1 reply; 7+ messages in thread
From: Bart Van Assche @ 2010-10-20 10:21 UTC (permalink / raw)
  To: Tejun Heo; +Cc: Roland Dreier, Sean Hefty, Hal Rosenstock, linux-rdma, lkml

On Wed, Oct 20, 2010 at 10:38 AM, Tejun Heo <tj@kernel.org> wrote:
> Hello,
>
> On 10/19/2010 08:40 PM, Bart Van Assche wrote:
>> On Tue, Oct 19, 2010 at 5:24 PM, Tejun Heo <tj@kernel.org> wrote:
>>> [ ... ]
>>> This is to prepare for deprecation of flush_scheduled_work().
>>> [ ... ]
>>> Index: work/include/rdma/ib_verbs.h
>>> [ ... ]
>>> +extern struct workqueue_struct *ib_wq;
>>> [ ... ]
>>
>> This patch adds a declaration of a global variable to a public header
>> file. That might be unavoidable, but it doesn't make me happy.
>
> Hmm... that's one very interesting reason to be unhappy.  Can you
> please elaborate why addition of a global variable doesn't make you
> happy?

In the past every time I saw a global variable being added in a
software project that meant that some software concept was not being
abstracted properly. Which does not necessarily mean that that is the
case with this patch.

With the posted patch, just like with the current implementation, e.g.
the flush_workqueue() call in the ipath driver will make that driver
wait until work scheduled by the core/sa_query.c code finished and
vice versa. Is that necessary ? If not, using multiple work queues for
IB instead of one would allow to get rid of that global ib_wq
declaration.

Bart.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2.6.36-rc7] infiniband: update workqueue usage
  2010-10-20 10:21     ` Bart Van Assche
@ 2010-10-20 11:03       ` Tejun Heo
  0 siblings, 0 replies; 7+ messages in thread
From: Tejun Heo @ 2010-10-20 11:03 UTC (permalink / raw)
  To: Bart Van Assche
  Cc: Roland Dreier, Sean Hefty, Hal Rosenstock, linux-rdma, lkml

Hello,

On 10/20/2010 12:21 PM, Bart Van Assche wrote:
>> Hmm... that's one very interesting reason to be unhappy.  Can you
>> please elaborate why addition of a global variable doesn't make you
>> happy?
> 
> In the past every time I saw a global variable being added in a
> software project that meant that some software concept was not being
> abstracted properly. Which does not necessarily mean that that is the
> case with this patch.

That's too wide brushed position to agree with, but, well, let's talk
about that some other time.  :-)

> With the posted patch, just like with the current implementation, e.g.
> the flush_workqueue() call in the ipath driver will make that driver
> wait until work scheduled by the core/sa_query.c code finished and
> vice versa. Is that necessary ? If not, using multiple work queues for
> IB instead of one would allow to get rid of that global ib_wq
> declaration.

Yeap, if breaking it down further makes sense, definitely.  If someone
is interested, the followings are the guidelines I've been following
in these conversions.

* If all the possibly pending works can be safely enumerated without
  too much difficulty and the works in question are not used during
  memory reclaim, use the system workqueue and use explicit
  flush/cancel_work[_sync]() instead of using flush_workqueue().
  Please note that flush_work_sync() isn't in mainline yet.  It's
  scheduled for the coming rc1 window.

* If works can be used during memory reclaim, there's no way around
  it.  A separate workqueue needs to be used.

* If works free themselves or are used to put the last reference of
  the containing object, they can't be flushed explicitly and thus
  need to be queued on a dedicated workqueue which serves as the
  flushing domain.

For this patch, I chose less intrusive path and just replaced system
wide workqueue with subsystem wide one mainly because I don't have
experience with or access to anything infiniband.  If someone wants to
push it further, I would be happy to help.

BTW, does the posted patch look safe to you?

Thanks.

-- 
tejun

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2010-10-20 11:04 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-10-19 15:24 [PATCH v2.6.36-rc7] infiniband: update workqueue usage Tejun Heo
2010-10-19 17:22 ` Ralph Campbell
2010-10-20  8:37   ` Tejun Heo
2010-10-19 18:40 ` Bart Van Assche
2010-10-20  8:38   ` Tejun Heo
2010-10-20 10:21     ` Bart Van Assche
2010-10-20 11:03       ` Tejun Heo

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).