netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] net: usb: Convert tasklet API to new bottom half workqueue mechanism
@ 2025-06-09  7:26 Jun Miao
  2025-06-09  9:05 ` Subbaraya Sundeep
  0 siblings, 1 reply; 5+ messages in thread
From: Jun Miao @ 2025-06-09  7:26 UTC (permalink / raw)
  To: oneukum, andrew+netdev, davem, edumazet, kuba
  Cc: netdev, linux-usb, linux-kernel, jun.miao

 Migrate tasklet APIs to the new bottom half workqueue mechanism. It
 replaces all occurrences of tasklet usage with the appropriate workqueue
 APIs throughout the usbnet driver. This transition ensures compatibility
 with the latest design and enhances performance.

Signed-off-by: Jun Miao <jun.miao@intel.com>
---
 drivers/net/usb/usbnet.c   | 36 ++++++++++++++++++------------------
 include/linux/usb/usbnet.h |  2 +-
 2 files changed, 19 insertions(+), 19 deletions(-)

diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index c04e715a4c2a..566127b4e0ba 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -461,7 +461,7 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
 
 	__skb_queue_tail(&dev->done, skb);
 	if (dev->done.qlen == 1)
-		tasklet_schedule(&dev->bh);
+		queue_work(system_bh_wq, &dev->bh_work);
 	spin_unlock(&dev->done.lock);
 	spin_unlock_irqrestore(&list->lock, flags);
 	return old_state;
@@ -549,7 +549,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
 		default:
 			netif_dbg(dev, rx_err, dev->net,
 				  "rx submit, %d\n", retval);
-			tasklet_schedule (&dev->bh);
+			queue_work(system_bh_wq, &dev->bh_work);
 			break;
 		case 0:
 			__usbnet_queue_skb(&dev->rxq, skb, rx_start);
@@ -709,7 +709,7 @@ void usbnet_resume_rx(struct usbnet *dev)
 		num++;
 	}
 
-	tasklet_schedule(&dev->bh);
+	queue_work(system_bh_wq, &dev->bh_work);
 
 	netif_dbg(dev, rx_status, dev->net,
 		  "paused rx queue disabled, %d skbs requeued\n", num);
@@ -778,7 +778,7 @@ void usbnet_unlink_rx_urbs(struct usbnet *dev)
 {
 	if (netif_running(dev->net)) {
 		(void) unlink_urbs (dev, &dev->rxq);
-		tasklet_schedule(&dev->bh);
+		queue_work(system_bh_wq, &dev->bh_work);
 	}
 }
 EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
@@ -861,14 +861,14 @@ int usbnet_stop (struct net_device *net)
 	/* deferred work (timer, softirq, task) must also stop */
 	dev->flags = 0;
 	timer_delete_sync(&dev->delay);
-	tasklet_kill(&dev->bh);
+	disable_work_sync(&dev->bh_work);
 	cancel_work_sync(&dev->kevent);
 
 	/* We have cyclic dependencies. Those calls are needed
 	 * to break a cycle. We cannot fall into the gaps because
 	 * we have a flag
 	 */
-	tasklet_kill(&dev->bh);
+	disable_work_sync(&dev->bh_work);
 	timer_delete_sync(&dev->delay);
 	cancel_work_sync(&dev->kevent);
 
@@ -955,7 +955,7 @@ int usbnet_open (struct net_device *net)
 	clear_bit(EVENT_RX_KILL, &dev->flags);
 
 	// delay posting reads until we're fully open
-	tasklet_schedule (&dev->bh);
+	queue_work(system_bh_wq, &dev->bh_work);
 	if (info->manage_power) {
 		retval = info->manage_power(dev, 1);
 		if (retval < 0) {
@@ -1123,7 +1123,7 @@ static void __handle_link_change(struct usbnet *dev)
 		 */
 	} else {
 		/* submitting URBs for reading packets */
-		tasklet_schedule(&dev->bh);
+		queue_work(system_bh_wq, &dev->bh_work);
 	}
 
 	/* hard_mtu or rx_urb_size may change during link change */
@@ -1198,11 +1198,11 @@ usbnet_deferred_kevent (struct work_struct *work)
 		} else {
 			clear_bit (EVENT_RX_HALT, &dev->flags);
 			if (!usbnet_going_away(dev))
-				tasklet_schedule(&dev->bh);
+				queue_work(system_bh_wq, &dev->bh_work);
 		}
 	}
 
-	/* tasklet could resubmit itself forever if memory is tight */
+	/* workqueue could resubmit itself forever if memory is tight */
 	if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
 		struct urb	*urb = NULL;
 		int resched = 1;
@@ -1224,7 +1224,7 @@ usbnet_deferred_kevent (struct work_struct *work)
 fail_lowmem:
 			if (resched)
 				if (!usbnet_going_away(dev))
-					tasklet_schedule(&dev->bh);
+					queue_work(system_bh_wq, &dev->bh_work);
 		}
 	}
 
@@ -1325,7 +1325,7 @@ void usbnet_tx_timeout (struct net_device *net, unsigned int txqueue)
 	struct usbnet		*dev = netdev_priv(net);
 
 	unlink_urbs (dev, &dev->txq);
-	tasklet_schedule (&dev->bh);
+	queue_work(system_bh_wq, &dev->bh_work);
 	/* this needs to be handled individually because the generic layer
 	 * doesn't know what is sufficient and could not restore private
 	 * information if a remedy of an unconditional reset were used.
@@ -1547,7 +1547,7 @@ static inline void usb_free_skb(struct sk_buff *skb)
 
 /*-------------------------------------------------------------------------*/
 
-// tasklet (work deferred from completions, in_irq) or timer
+// workqueue (work deferred from completions, in_irq) or timer
 
 static void usbnet_bh (struct timer_list *t)
 {
@@ -1601,16 +1601,16 @@ static void usbnet_bh (struct timer_list *t)
 					  "rxqlen %d --> %d\n",
 					  temp, dev->rxq.qlen);
 			if (dev->rxq.qlen < RX_QLEN(dev))
-				tasklet_schedule (&dev->bh);
+				queue_work(system_bh_wq, &dev->bh_work);
 		}
 		if (dev->txq.qlen < TX_QLEN (dev))
 			netif_wake_queue (dev->net);
 	}
 }
 
-static void usbnet_bh_tasklet(struct tasklet_struct *t)
+static void usbnet_bh_workqueue(struct work_struct *work)
 {
-	struct usbnet *dev = from_tasklet(dev, t, bh);
+	struct usbnet *dev = from_work(dev, work, bh_work);
 
 	usbnet_bh(&dev->delay);
 }
@@ -1742,7 +1742,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
 	skb_queue_head_init (&dev->txq);
 	skb_queue_head_init (&dev->done);
 	skb_queue_head_init(&dev->rxq_pause);
-	tasklet_setup(&dev->bh, usbnet_bh_tasklet);
+	INIT_WORK (&dev->bh_work, usbnet_bh_workqueue);
 	INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
 	init_usb_anchor(&dev->deferred);
 	timer_setup(&dev->delay, usbnet_bh, 0);
@@ -1971,7 +1971,7 @@ int usbnet_resume (struct usb_interface *intf)
 
 			if (!(dev->txq.qlen >= TX_QLEN(dev)))
 				netif_tx_wake_all_queues(dev->net);
-			tasklet_schedule (&dev->bh);
+			queue_work(system_bh_wq, &dev->bh_work);
 		}
 	}
 
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 0b9f1e598e3a..208682f77179 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -58,7 +58,7 @@ struct usbnet {
 	unsigned		interrupt_count;
 	struct mutex		interrupt_mutex;
 	struct usb_anchor	deferred;
-	struct tasklet_struct	bh;
+	struct work_struct	bh_work;
 
 	struct work_struct	kevent;
 	unsigned long		flags;
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] net: usb: Convert tasklet API to new bottom half workqueue mechanism
  2025-06-09  7:26 [PATCH] net: usb: Convert tasklet API to new bottom half workqueue mechanism Jun Miao
@ 2025-06-09  9:05 ` Subbaraya Sundeep
  2025-06-09  9:53   ` Miao, Jun
  0 siblings, 1 reply; 5+ messages in thread
From: Subbaraya Sundeep @ 2025-06-09  9:05 UTC (permalink / raw)
  To: Jun Miao
  Cc: oneukum, andrew+netdev, davem, edumazet, kuba, netdev, linux-usb,
	linux-kernel

Hi,

On 2025-06-09 at 07:26:10, Jun Miao (jun.miao@intel.com) wrote:
>  Migrate tasklet APIs to the new bottom half workqueue mechanism. It
>  replaces all occurrences of tasklet usage with the appropriate workqueue
>  APIs throughout the usbnet driver. This transition ensures compatibility
>  with the latest design and enhances performance.
> 
> Signed-off-by: Jun Miao <jun.miao@intel.com>
> ---
>  drivers/net/usb/usbnet.c   | 36 ++++++++++++++++++------------------
>  include/linux/usb/usbnet.h |  2 +-
>  2 files changed, 19 insertions(+), 19 deletions(-)
> 
> diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
> index c04e715a4c2a..566127b4e0ba 100644
> --- a/drivers/net/usb/usbnet.c
> +++ b/drivers/net/usb/usbnet.c
> @@ -461,7 +461,7 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
>  
>  	__skb_queue_tail(&dev->done, skb);
>  	if (dev->done.qlen == 1)
> -		tasklet_schedule(&dev->bh);
> +		queue_work(system_bh_wq, &dev->bh_work);
>  	spin_unlock(&dev->done.lock);
>  	spin_unlock_irqrestore(&list->lock, flags);
>  	return old_state;
> @@ -549,7 +549,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
>  		default:
>  			netif_dbg(dev, rx_err, dev->net,
>  				  "rx submit, %d\n", retval);
> -			tasklet_schedule (&dev->bh);
> +			queue_work(system_bh_wq, &dev->bh_work);
>  			break;
>  		case 0:
>  			__usbnet_queue_skb(&dev->rxq, skb, rx_start);
> @@ -709,7 +709,7 @@ void usbnet_resume_rx(struct usbnet *dev)
>  		num++;
>  	}
>  
> -	tasklet_schedule(&dev->bh);
> +	queue_work(system_bh_wq, &dev->bh_work);
>  
>  	netif_dbg(dev, rx_status, dev->net,
>  		  "paused rx queue disabled, %d skbs requeued\n", num);
> @@ -778,7 +778,7 @@ void usbnet_unlink_rx_urbs(struct usbnet *dev)
>  {
>  	if (netif_running(dev->net)) {
>  		(void) unlink_urbs (dev, &dev->rxq);
> -		tasklet_schedule(&dev->bh);
> +		queue_work(system_bh_wq, &dev->bh_work);
>  	}
>  }
>  EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
> @@ -861,14 +861,14 @@ int usbnet_stop (struct net_device *net)
>  	/* deferred work (timer, softirq, task) must also stop */
>  	dev->flags = 0;
>  	timer_delete_sync(&dev->delay);
> -	tasklet_kill(&dev->bh);
> +	disable_work_sync(&dev->bh_work);
>  	cancel_work_sync(&dev->kevent);
>  
>  	/* We have cyclic dependencies. Those calls are needed
>  	 * to break a cycle. We cannot fall into the gaps because
>  	 * we have a flag
>  	 */
> -	tasklet_kill(&dev->bh);
> +	disable_work_sync(&dev->bh_work);
>  	timer_delete_sync(&dev->delay);
>  	cancel_work_sync(&dev->kevent);
>  
> @@ -955,7 +955,7 @@ int usbnet_open (struct net_device *net)
>  	clear_bit(EVENT_RX_KILL, &dev->flags);
>  
>  	// delay posting reads until we're fully open
> -	tasklet_schedule (&dev->bh);
> +	queue_work(system_bh_wq, &dev->bh_work);
>  	if (info->manage_power) {
>  		retval = info->manage_power(dev, 1);
>  		if (retval < 0) {
> @@ -1123,7 +1123,7 @@ static void __handle_link_change(struct usbnet *dev)
>  		 */
>  	} else {
>  		/* submitting URBs for reading packets */
> -		tasklet_schedule(&dev->bh);
> +		queue_work(system_bh_wq, &dev->bh_work);
>  	}
>  
>  	/* hard_mtu or rx_urb_size may change during link change */
> @@ -1198,11 +1198,11 @@ usbnet_deferred_kevent (struct work_struct *work)
>  		} else {
>  			clear_bit (EVENT_RX_HALT, &dev->flags);
>  			if (!usbnet_going_away(dev))
> -				tasklet_schedule(&dev->bh);
> +				queue_work(system_bh_wq, &dev->bh_work);
>  		}
>  	}
>  
> -	/* tasklet could resubmit itself forever if memory is tight */
> +	/* workqueue could resubmit itself forever if memory is tight */
>  	if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
>  		struct urb	*urb = NULL;
>  		int resched = 1;
> @@ -1224,7 +1224,7 @@ usbnet_deferred_kevent (struct work_struct *work)
>  fail_lowmem:
>  			if (resched)
>  				if (!usbnet_going_away(dev))
> -					tasklet_schedule(&dev->bh);
> +					queue_work(system_bh_wq, &dev->bh_work);
>  		}
>  	}
>  
> @@ -1325,7 +1325,7 @@ void usbnet_tx_timeout (struct net_device *net, unsigned int txqueue)
>  	struct usbnet		*dev = netdev_priv(net);
>  
>  	unlink_urbs (dev, &dev->txq);
> -	tasklet_schedule (&dev->bh);
> +	queue_work(system_bh_wq, &dev->bh_work);
>  	/* this needs to be handled individually because the generic layer
>  	 * doesn't know what is sufficient and could not restore private
>  	 * information if a remedy of an unconditional reset were used.
> @@ -1547,7 +1547,7 @@ static inline void usb_free_skb(struct sk_buff *skb)
>  
>  /*-------------------------------------------------------------------------*/
>  
> -// tasklet (work deferred from completions, in_irq) or timer
> +// workqueue (work deferred from completions, in_irq) or timer
>  
>  static void usbnet_bh (struct timer_list *t)
>  {
> @@ -1601,16 +1601,16 @@ static void usbnet_bh (struct timer_list *t)
>  					  "rxqlen %d --> %d\n",
>  					  temp, dev->rxq.qlen);
>  			if (dev->rxq.qlen < RX_QLEN(dev))
> -				tasklet_schedule (&dev->bh);
> +				queue_work(system_bh_wq, &dev->bh_work);
Correct me if am wrong. 
Just above this code there is - if (rx_alloc_submit(dev, GFP_ATOMIC) == -ENOLINK).
You can change it to GFP_KERNEL since this is not atomic context now.

Thanks,
Sundeep
>  		}
>  		if (dev->txq.qlen < TX_QLEN (dev))
>  			netif_wake_queue (dev->net);
>  	}
>  }
>  
> -static void usbnet_bh_tasklet(struct tasklet_struct *t)
> +static void usbnet_bh_workqueue(struct work_struct *work)
>  {
> -	struct usbnet *dev = from_tasklet(dev, t, bh);
> +	struct usbnet *dev = from_work(dev, work, bh_work);
>  
>  	usbnet_bh(&dev->delay);
>  }
> @@ -1742,7 +1742,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
>  	skb_queue_head_init (&dev->txq);
>  	skb_queue_head_init (&dev->done);
>  	skb_queue_head_init(&dev->rxq_pause);
> -	tasklet_setup(&dev->bh, usbnet_bh_tasklet);
> +	INIT_WORK (&dev->bh_work, usbnet_bh_workqueue);
>  	INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
>  	init_usb_anchor(&dev->deferred);
>  	timer_setup(&dev->delay, usbnet_bh, 0);
> @@ -1971,7 +1971,7 @@ int usbnet_resume (struct usb_interface *intf)
>  
>  			if (!(dev->txq.qlen >= TX_QLEN(dev)))
>  				netif_tx_wake_all_queues(dev->net);
> -			tasklet_schedule (&dev->bh);
> +			queue_work(system_bh_wq, &dev->bh_work);
>  		}
>  	}
>  
> diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
> index 0b9f1e598e3a..208682f77179 100644
> --- a/include/linux/usb/usbnet.h
> +++ b/include/linux/usb/usbnet.h
> @@ -58,7 +58,7 @@ struct usbnet {
>  	unsigned		interrupt_count;
>  	struct mutex		interrupt_mutex;
>  	struct usb_anchor	deferred;
> -	struct tasklet_struct	bh;
> +	struct work_struct	bh_work;
>  
>  	struct work_struct	kevent;
>  	unsigned long		flags;
> -- 
> 2.43.0
> 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH] net: usb: Convert tasklet API to new bottom half workqueue mechanism
  2025-06-09  9:05 ` Subbaraya Sundeep
@ 2025-06-09  9:53   ` Miao, Jun
  2025-06-10  9:53     ` Oliver Neukum
  0 siblings, 1 reply; 5+ messages in thread
From: Miao, Jun @ 2025-06-09  9:53 UTC (permalink / raw)
  To: Subbaraya Sundeep
  Cc: oneukum@suse.com, andrew+netdev@lunn.ch, davem@davemloft.net,
	edumazet@google.com, kuba@kernel.org, netdev@vger.kernel.org,
	linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org

>
>Hi,
>
>On 2025-06-09 at 07:26:10, Jun Miao (jun.miao@intel.com) wrote:
>>  Migrate tasklet APIs to the new bottom half workqueue mechanism. It
>> replaces all occurrences of tasklet usage with the appropriate
>> workqueue  APIs throughout the usbnet driver. This transition ensures
>> compatibility  with the latest design and enhances performance.
>>
>> Signed-off-by: Jun Miao <jun.miao@intel.com>
>> ---
>>  drivers/net/usb/usbnet.c   | 36 ++++++++++++++++++------------------
>>  include/linux/usb/usbnet.h |  2 +-
>>  2 files changed, 19 insertions(+), 19 deletions(-)
>>
>> diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index
>> c04e715a4c2a..566127b4e0ba 100644
>> --- a/drivers/net/usb/usbnet.c
>> +++ b/drivers/net/usb/usbnet.c
>> @@ -461,7 +461,7 @@ static enum skb_state defer_bh(struct usbnet *dev,
>> struct sk_buff *skb,
>>
>>  	__skb_queue_tail(&dev->done, skb);
>>  	if (dev->done.qlen == 1)
>> -		tasklet_schedule(&dev->bh);
>> +		queue_work(system_bh_wq, &dev->bh_work);
>>  	spin_unlock(&dev->done.lock);
>>  	spin_unlock_irqrestore(&list->lock, flags);
>>  	return old_state;
>> @@ -549,7 +549,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb,
>gfp_t flags)
>>  		default:
>>  			netif_dbg(dev, rx_err, dev->net,
>>  				  "rx submit, %d\n", retval);
>> -			tasklet_schedule (&dev->bh);
>> +			queue_work(system_bh_wq, &dev->bh_work);
>>  			break;
>>  		case 0:
>>  			__usbnet_queue_skb(&dev->rxq, skb, rx_start); @@ -
>709,7 +709,7 @@
>> void usbnet_resume_rx(struct usbnet *dev)
>>  		num++;
>>  	}
>>
>> -	tasklet_schedule(&dev->bh);
>> +	queue_work(system_bh_wq, &dev->bh_work);
>>
>>  	netif_dbg(dev, rx_status, dev->net,
>>  		  "paused rx queue disabled, %d skbs requeued\n", num); @@ -
>778,7
>> +778,7 @@ void usbnet_unlink_rx_urbs(struct usbnet *dev)  {
>>  	if (netif_running(dev->net)) {
>>  		(void) unlink_urbs (dev, &dev->rxq);
>> -		tasklet_schedule(&dev->bh);
>> +		queue_work(system_bh_wq, &dev->bh_work);
>>  	}
>>  }
>>  EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
>> @@ -861,14 +861,14 @@ int usbnet_stop (struct net_device *net)
>>  	/* deferred work (timer, softirq, task) must also stop */
>>  	dev->flags = 0;
>>  	timer_delete_sync(&dev->delay);
>> -	tasklet_kill(&dev->bh);
>> +	disable_work_sync(&dev->bh_work);
>>  	cancel_work_sync(&dev->kevent);
>>
>>  	/* We have cyclic dependencies. Those calls are needed
>>  	 * to break a cycle. We cannot fall into the gaps because
>>  	 * we have a flag
>>  	 */
>> -	tasklet_kill(&dev->bh);
>> +	disable_work_sync(&dev->bh_work);
>>  	timer_delete_sync(&dev->delay);
>>  	cancel_work_sync(&dev->kevent);
>>
>> @@ -955,7 +955,7 @@ int usbnet_open (struct net_device *net)
>>  	clear_bit(EVENT_RX_KILL, &dev->flags);
>>
>>  	// delay posting reads until we're fully open
>> -	tasklet_schedule (&dev->bh);
>> +	queue_work(system_bh_wq, &dev->bh_work);
>>  	if (info->manage_power) {
>>  		retval = info->manage_power(dev, 1);
>>  		if (retval < 0) {
>> @@ -1123,7 +1123,7 @@ static void __handle_link_change(struct usbnet *dev)
>>  		 */
>>  	} else {
>>  		/* submitting URBs for reading packets */
>> -		tasklet_schedule(&dev->bh);
>> +		queue_work(system_bh_wq, &dev->bh_work);
>>  	}
>>
>>  	/* hard_mtu or rx_urb_size may change during link change */ @@
>> -1198,11 +1198,11 @@ usbnet_deferred_kevent (struct work_struct *work)
>>  		} else {
>>  			clear_bit (EVENT_RX_HALT, &dev->flags);
>>  			if (!usbnet_going_away(dev))
>> -				tasklet_schedule(&dev->bh);
>> +				queue_work(system_bh_wq, &dev->bh_work);
>>  		}
>>  	}
>>
>> -	/* tasklet could resubmit itself forever if memory is tight */
>> +	/* workqueue could resubmit itself forever if memory is tight */
>>  	if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
>>  		struct urb	*urb = NULL;
>>  		int resched = 1;
>> @@ -1224,7 +1224,7 @@ usbnet_deferred_kevent (struct work_struct
>> *work)
>>  fail_lowmem:
>>  			if (resched)
>>  				if (!usbnet_going_away(dev))
>> -					tasklet_schedule(&dev->bh);
>> +					queue_work(system_bh_wq, &dev-
>>bh_work);
>>  		}
>>  	}
>>
>> @@ -1325,7 +1325,7 @@ void usbnet_tx_timeout (struct net_device *net,
>unsigned int txqueue)
>>  	struct usbnet		*dev = netdev_priv(net);
>>
>>  	unlink_urbs (dev, &dev->txq);
>> -	tasklet_schedule (&dev->bh);
>> +	queue_work(system_bh_wq, &dev->bh_work);
>>  	/* this needs to be handled individually because the generic layer
>>  	 * doesn't know what is sufficient and could not restore private
>>  	 * information if a remedy of an unconditional reset were used.
>> @@ -1547,7 +1547,7 @@ static inline void usb_free_skb(struct sk_buff
>> *skb)
>>
>>
>> /*--------------------------------------------------------------------
>> -----*/
>>
>> -// tasklet (work deferred from completions, in_irq) or timer
>> +// workqueue (work deferred from completions, in_irq) or timer
>>
>>  static void usbnet_bh (struct timer_list *t)  { @@ -1601,16 +1601,16
>> @@ static void usbnet_bh (struct timer_list *t)
>>  					  "rxqlen %d --> %d\n",
>>  					  temp, dev->rxq.qlen);
>>  			if (dev->rxq.qlen < RX_QLEN(dev))
>> -				tasklet_schedule (&dev->bh);
>> +				queue_work(system_bh_wq, &dev->bh_work);
>Correct me if am wrong.
>Just above this code there is - if (rx_alloc_submit(dev, GFP_ATOMIC) == -
>ENOLINK).
>You can change it to GFP_KERNEL since this is not atomic context now.
>

Thanks,  the usbnet_bh() function only be called by usbnet_bh_workqueue which is sleepable.

---Jun Miao 

>Thanks,
>Sundeep
>>  		}
>>  		if (dev->txq.qlen < TX_QLEN (dev))
>>  			netif_wake_queue (dev->net);
>>  	}
>>  }
>>
>> -static void usbnet_bh_tasklet(struct tasklet_struct *t)
>> +static void usbnet_bh_workqueue(struct work_struct *work)
>>  {
>> -	struct usbnet *dev = from_tasklet(dev, t, bh);
>> +	struct usbnet *dev = from_work(dev, work, bh_work);
>>
>>  	usbnet_bh(&dev->delay);
>>  }
>> @@ -1742,7 +1742,7 @@ usbnet_probe (struct usb_interface *udev, const
>struct usb_device_id *prod)
>>  	skb_queue_head_init (&dev->txq);
>>  	skb_queue_head_init (&dev->done);
>>  	skb_queue_head_init(&dev->rxq_pause);
>> -	tasklet_setup(&dev->bh, usbnet_bh_tasklet);
>> +	INIT_WORK (&dev->bh_work, usbnet_bh_workqueue);
>>  	INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
>>  	init_usb_anchor(&dev->deferred);
>>  	timer_setup(&dev->delay, usbnet_bh, 0); @@ -1971,7 +1971,7 @@ int
>> usbnet_resume (struct usb_interface *intf)
>>
>>  			if (!(dev->txq.qlen >= TX_QLEN(dev)))
>>  				netif_tx_wake_all_queues(dev->net);
>> -			tasklet_schedule (&dev->bh);
>> +			queue_work(system_bh_wq, &dev->bh_work);
>>  		}
>>  	}
>>
>> diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
>> index 0b9f1e598e3a..208682f77179 100644
>> --- a/include/linux/usb/usbnet.h
>> +++ b/include/linux/usb/usbnet.h
>> @@ -58,7 +58,7 @@ struct usbnet {
>>  	unsigned		interrupt_count;
>>  	struct mutex		interrupt_mutex;
>>  	struct usb_anchor	deferred;
>> -	struct tasklet_struct	bh;
>> +	struct work_struct	bh_work;
>>
>>  	struct work_struct	kevent;
>>  	unsigned long		flags;
>> --
>> 2.43.0
>>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] net: usb: Convert tasklet API to new bottom half workqueue mechanism
  2025-06-09  9:53   ` Miao, Jun
@ 2025-06-10  9:53     ` Oliver Neukum
  2025-06-10 10:14       ` Miao, Jun
  0 siblings, 1 reply; 5+ messages in thread
From: Oliver Neukum @ 2025-06-10  9:53 UTC (permalink / raw)
  To: Miao, Jun, Subbaraya Sundeep
  Cc: oneukum@suse.com, andrew+netdev@lunn.ch, davem@davemloft.net,
	edumazet@google.com, kuba@kernel.org, netdev@vger.kernel.org,
	linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org

On 09.06.25 11:53, Miao, Jun wrote:

>> You can change it to GFP_KERNEL since this is not atomic context now.
>>
> 
> Thanks,  the usbnet_bh() function only be called by usbnet_bh_workqueue which is sleepable.

Yes, but it can be waited on in usbnet_stop(), which in turn can
be called for a device reset. Hence this must be GFP_NOIO, not
GFP_KERNEL.

	Regards
		Oliver


^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH] net: usb: Convert tasklet API to new bottom half workqueue mechanism
  2025-06-10  9:53     ` Oliver Neukum
@ 2025-06-10 10:14       ` Miao, Jun
  0 siblings, 0 replies; 5+ messages in thread
From: Miao, Jun @ 2025-06-10 10:14 UTC (permalink / raw)
  To: Oliver Neukum, Subbaraya Sundeep
  Cc: andrew+netdev@lunn.ch, davem@davemloft.net, edumazet@google.com,
	kuba@kernel.org, netdev@vger.kernel.org,
	linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org

>
>On 09.06.25 11:53, Miao, Jun wrote:
>
>>> You can change it to GFP_KERNEL since this is not atomic context now.
>>>
>>
>> Thanks,  the usbnet_bh() function only be called by usbnet_bh_workqueue which
>is sleepable.
>
>Yes, but it can be waited on in usbnet_stop(), which in turn can be called for a
>device reset. Hence this must be GFP_NOIO, not GFP_KERNEL.

I will be send V2 with the Suggested-by !

Thanks 
Jun Miao

>
>	Regards
>		Oliver


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2025-06-10 10:14 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-06-09  7:26 [PATCH] net: usb: Convert tasklet API to new bottom half workqueue mechanism Jun Miao
2025-06-09  9:05 ` Subbaraya Sundeep
2025-06-09  9:53   ` Miao, Jun
2025-06-10  9:53     ` Oliver Neukum
2025-06-10 10:14       ` Miao, Jun

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).