public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3] staging: octeon: Convert create_singlethread_workqueue()
@ 2016-02-27 17:51 Bhaktipriya Shridhar
  2016-02-28 13:09 ` Tejun Heo
  0 siblings, 1 reply; 2+ messages in thread
From: Bhaktipriya Shridhar @ 2016-02-27 17:51 UTC (permalink / raw)
  To: gregkh, aaro.koskinen, cristina.moraru09, avid.daney, david.daney,
	ralf, paul.martin, arnd, joe, aybuke.147
  Cc: linux-kernel, devel, tj

With conccurency managed workqueues, use of dedicated workqueues can
be replaced by system_wq. Drop cvm_oct_poll_queue by using system_wq.

There are multiple work items per cvm_oct_poll_queue (viz.
cvm_oct_rx_refill_work, port_periodic_work) and different
cvm_oct_poll_queues need not be be ordered. Hence, concurrency
can be increased by switching to system_wq.

All work items are sync canceled in cvm_oct_remove() so it
is guaranteed that no work is in flight by the time exit path runs.

Signed-off-by: Bhaktipriya Shridhar <bhaktipriya96@gmail.com>
---
 drivers/staging/octeon/ethernet.c        | 21 ++++-----------------
 drivers/staging/octeon/octeon-ethernet.h |  1 -
 2 files changed, 4 insertions(+), 18 deletions(-)

diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 00adc52..c4d3f76 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -86,10 +86,6 @@ int rx_napi_weight = 32;
 module_param(rx_napi_weight, int, 0444);
 MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");

-/*
- * cvm_oct_poll_queue - Workqueue for polling operations.
- */
-struct workqueue_struct *cvm_oct_poll_queue;

 /*
  * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
@@ -121,8 +117,7 @@ static void cvm_oct_rx_refill_worker(struct work_struct *work)
 	cvm_oct_rx_refill_pool(num_packet_buffers / 2);

 	if (!atomic_read(&cvm_oct_poll_queue_stopping))
-		queue_delayed_work(cvm_oct_poll_queue,
-				   &cvm_oct_rx_refill_work, HZ);
+		schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
 }

 static void cvm_oct_periodic_worker(struct work_struct *work)
@@ -138,8 +133,7 @@ static void cvm_oct_periodic_worker(struct work_struct *work)
 						cvm_oct_device[priv->port]);

 	if (!atomic_read(&cvm_oct_poll_queue_stopping))
-		queue_delayed_work(cvm_oct_poll_queue,
-						&priv->port_periodic_work, HZ);
+		schedule_delayed_work(&priv->port_periodic_work, HZ);
 }

 static void cvm_oct_configure_common_hw(void)
@@ -666,11 +660,6 @@ static int cvm_oct_probe(struct platform_device *pdev)
 		return -EINVAL;
 	}

-	cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
-	if (!cvm_oct_poll_queue) {
-		pr_err("octeon-ethernet: Cannot create workqueue");
-		return -ENOMEM;
-	}

 	cvm_oct_configure_common_hw();

@@ -828,8 +817,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
 				fau -=
 				    cvmx_pko_get_num_queues(priv->port) *
 				    sizeof(u32);
-				queue_delayed_work(cvm_oct_poll_queue,
-						&priv->port_periodic_work, HZ);
+				schedule_delayed_work(&priv->port_periodic_work, HZ);
 			}
 		}
 	}
@@ -842,7 +830,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
 	 */
 	cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);

-	queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);
+	schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);

 	return 0;
 }
@@ -885,7 +873,6 @@ static int cvm_oct_remove(struct platform_device *pdev)
 		}
 	}

-	destroy_workqueue(cvm_oct_poll_queue);

 	cvmx_pko_shutdown();

diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index 5b4fdd2..6275c15 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -76,7 +76,6 @@ extern int pow_send_group;
 extern int pow_receive_group;
 extern char pow_send_list[];
 extern struct net_device *cvm_oct_device[];
-extern struct workqueue_struct *cvm_oct_poll_queue;
 extern atomic_t cvm_oct_poll_queue_stopping;
 extern u64 cvm_oct_tx_poll_interval;

--
2.1.4

^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH v3] staging: octeon: Convert create_singlethread_workqueue()
  2016-02-27 17:51 [PATCH v3] staging: octeon: Convert create_singlethread_workqueue() Bhaktipriya Shridhar
@ 2016-02-28 13:09 ` Tejun Heo
  0 siblings, 0 replies; 2+ messages in thread
From: Tejun Heo @ 2016-02-28 13:09 UTC (permalink / raw)
  To: Bhaktipriya Shridhar
  Cc: gregkh, aaro.koskinen, cristina.moraru09, avid.daney, david.daney,
	ralf, paul.martin, arnd, joe, aybuke.147, linux-kernel, devel

Hello,

On Sat, Feb 27, 2016 at 11:21:47PM +0530, Bhaktipriya Shridhar wrote:
> With conccurency managed workqueues, use of dedicated workqueues can
> be replaced by system_wq. Drop cvm_oct_poll_queue by using system_wq.
> 
> There are multiple work items per cvm_oct_poll_queue (viz.
> cvm_oct_rx_refill_work, port_periodic_work) and different
> cvm_oct_poll_queues need not be be ordered. Hence, concurrency
> can be increased by switching to system_wq.
> 
> All work items are sync canceled in cvm_oct_remove() so it
> is guaranteed that no work is in flight by the time exit path runs.
> 
> Signed-off-by: Bhaktipriya Shridhar <bhaktipriya96@gmail.com>

For the workqueue part,

Acked-by: Tejun Heo <tj@kernel.org>

Thanks.

-- 
tejun

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2016-02-28 13:09 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-02-27 17:51 [PATCH v3] staging: octeon: Convert create_singlethread_workqueue() Bhaktipriya Shridhar
2016-02-28 13:09 ` Tejun Heo

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox