* [PATCH v2 1/3] gpio: mpsse: use rcu to ensure worker is torn down
2025-09-23 13:33 [PATCH v2 0/3] gpio: mpsse: add support for bryx brik Mary Strodl
@ 2025-09-23 13:33 ` Mary Strodl
2025-09-29 8:46 ` Dan Carpenter
` (2 more replies)
2025-09-23 13:33 ` [PATCH v2 2/3] gpio: mpsse: add quirk support Mary Strodl
2025-09-23 13:33 ` [PATCH v2 3/3] gpio: mpsse: support bryx radio interface kit Mary Strodl
2 siblings, 3 replies; 14+ messages in thread
From: Mary Strodl @ 2025-09-23 13:33 UTC (permalink / raw)
To: linux-kernel; +Cc: linus.walleij, brgl, linux-gpio, Mary Strodl
When an IRQ worker is running, unplugging the device would cause a
crash. The sealevel hardware this driver was written for was not
hotpluggable, so I never realized it.
This change uses RCU to create a list of workers, which it tears down on
disconnect.
Signed-off-by: Mary Strodl <mstrodl@csh.rit.edu>
---
drivers/gpio/gpio-mpsse.c | 119 +++++++++++++++++++++++++++++++++++---
1 file changed, 110 insertions(+), 9 deletions(-)
diff --git a/drivers/gpio/gpio-mpsse.c b/drivers/gpio/gpio-mpsse.c
index 9f42bb30b4ec..af9a9196ac9d 100644
--- a/drivers/gpio/gpio-mpsse.c
+++ b/drivers/gpio/gpio-mpsse.c
@@ -10,6 +10,7 @@
#include <linux/cleanup.h>
#include <linux/gpio/driver.h>
#include <linux/mutex.h>
+#include <linux/spinlock.h>
#include <linux/usb.h>
struct mpsse_priv {
@@ -17,8 +18,10 @@ struct mpsse_priv {
struct usb_device *udev; /* USB device encompassing all MPSSEs */
struct usb_interface *intf; /* USB interface for this MPSSE */
u8 intf_id; /* USB interface number for this MPSSE */
- struct work_struct irq_work; /* polling work thread */
+ struct list_head workers; /* polling work threads */
struct mutex irq_mutex; /* lock over irq_data */
+ struct mutex irq_race; /* race for polling worker teardown */
+ raw_spinlock_t irq_spin; /* protects worker list */
atomic_t irq_type[16]; /* pin -> edge detection type */
atomic_t irq_enabled;
int id;
@@ -34,6 +37,15 @@ struct mpsse_priv {
struct mutex io_mutex; /* sync I/O with disconnect */
};
+struct mpsse_worker {
+ struct mpsse_priv *priv;
+ struct work_struct work;
+ atomic_t cancelled;
+ struct list_head list; /* linked list */
+ struct list_head destroy; /* teardown linked list */
+ struct rcu_head rcu; /* synchronization */
+};
+
struct bulk_desc {
bool tx; /* direction of bulk transfer */
u8 *data; /* input (tx) or output (rx) */
@@ -261,9 +273,8 @@ static int gpio_mpsse_direction_input(struct gpio_chip *chip,
guard(mutex)(&priv->io_mutex);
priv->gpio_dir[bank] &= ~BIT(bank_offset);
- gpio_mpsse_set_bank(priv, bank);
- return 0;
+ return gpio_mpsse_set_bank(priv, bank);
}
static int gpio_mpsse_get_direction(struct gpio_chip *chip,
@@ -284,18 +295,55 @@ static int gpio_mpsse_get_direction(struct gpio_chip *chip,
return ret;
}
-static void gpio_mpsse_poll(struct work_struct *work)
+static void gpio_mpsse_stop(struct mpsse_worker *worker)
+{
+ cancel_work_sync(&worker->work);
+ kfree(worker);
+}
+
+static void gpio_mpsse_poll(struct work_struct *my_work)
{
unsigned long pin_mask, pin_states, flags;
int irq_enabled, offset, err, value, fire_irq,
irq, old_value[16], irq_type[16];
- struct mpsse_priv *priv = container_of(work, struct mpsse_priv,
- irq_work);
+ struct mpsse_worker *worker;
+ struct mpsse_worker *my_worker = container_of(my_work, struct mpsse_worker, work);
+ struct mpsse_priv *priv = my_worker->priv;
+ struct list_head destructors = LIST_HEAD_INIT(destructors);
for (offset = 0; offset < priv->gpio.ngpio; ++offset)
old_value[offset] = -1;
- while ((irq_enabled = atomic_read(&priv->irq_enabled))) {
+ /*
+ * We only want one worker. Workers race to acquire irq_race and tear
+ * down all other workers. This is a cond guard so that we don't deadlock
+ * trying to cancel a worker.
+ */
+ scoped_cond_guard(mutex_try, ;, &priv->irq_race) {
+ scoped_guard(rcu) {
+ list_for_each_entry_rcu(worker, &priv->workers, list) {
+ /* Don't stop ourselves */
+ if (worker == my_worker)
+ continue;
+
+ scoped_guard(raw_spinlock_irqsave, &priv->irq_spin)
+ list_del_rcu(&worker->list);
+
+ /* Give worker a chance to terminate itself */
+ atomic_set(&worker->cancelled, 1);
+ /* Keep track of stuff to cancel */
+ INIT_LIST_HEAD(&worker->destroy);
+ list_add(&worker->destroy, &destructors);
+ }
+ }
+ /* Make sure list consumers are finished before we tear down */
+ synchronize_rcu();
+ list_for_each_entry(worker, &destructors, destroy)
+ gpio_mpsse_stop(worker);
+ }
+
+ while ((irq_enabled = atomic_read(&priv->irq_enabled)) &&
+ !atomic_read(&my_worker->cancelled)) {
usleep_range(MPSSE_POLL_INTERVAL, MPSSE_POLL_INTERVAL + 1000);
/* Cleanup will trigger at the end of the loop */
guard(mutex)(&priv->irq_mutex);
@@ -370,21 +418,41 @@ static int gpio_mpsse_set_irq_type(struct irq_data *irqd, unsigned int type)
static void gpio_mpsse_irq_disable(struct irq_data *irqd)
{
+ struct mpsse_worker *worker;
struct mpsse_priv *priv = irq_data_get_irq_chip_data(irqd);
atomic_and(~BIT(irqd->hwirq), &priv->irq_enabled);
gpiochip_disable_irq(&priv->gpio, irqd->hwirq);
+
+ /* Can't actually do teardown in IRQ context (blocks...) */
+ scoped_guard(rcu)
+ list_for_each_entry_rcu(worker, &priv->workers, list)
+ atomic_set(&worker->cancelled, 1);
}
static void gpio_mpsse_irq_enable(struct irq_data *irqd)
{
+ struct mpsse_worker *worker;
struct mpsse_priv *priv = irq_data_get_irq_chip_data(irqd);
gpiochip_enable_irq(&priv->gpio, irqd->hwirq);
/* If no-one else was using the IRQ, enable it */
if (!atomic_fetch_or(BIT(irqd->hwirq), &priv->irq_enabled)) {
- INIT_WORK(&priv->irq_work, gpio_mpsse_poll);
- schedule_work(&priv->irq_work);
+ /*
+ * Can't be devm because it uses a non-raw spinlock (illegal in
+ * this context, where a raw spinlock is held by our caller)
+ */
+ worker = kzalloc(sizeof(*worker), GFP_KERNEL);
+ if (!worker)
+ return;
+
+ worker->priv = priv;
+ INIT_LIST_HEAD(&worker->list);
+ INIT_WORK(&worker->work, gpio_mpsse_poll);
+ schedule_work(&worker->work);
+
+ scoped_guard(raw_spinlock_irqsave, &priv->irq_spin)
+ list_add_rcu(&worker->list, &priv->workers);
}
}
@@ -436,6 +504,12 @@ static int gpio_mpsse_probe(struct usb_interface *interface,
if (err)
return err;
+ err = devm_mutex_init(dev, &priv->irq_race);
+ if (err)
+ return err;
+
+ raw_spin_lock_init(&priv->irq_spin);
+
priv->gpio.label = devm_kasprintf(dev, GFP_KERNEL,
"gpio-mpsse.%d.%d",
priv->id, priv->intf_id);
@@ -504,7 +578,34 @@ static int gpio_mpsse_probe(struct usb_interface *interface,
static void gpio_mpsse_disconnect(struct usb_interface *intf)
{
+ struct mpsse_worker *worker;
struct mpsse_priv *priv = usb_get_intfdata(intf);
+ struct list_head destructors = LIST_HEAD_INIT(destructors);
+
+ /*
+ * Lock prevents double-free of worker from here and the teardown
+ * step at the beginning of gpio_mpsse_poll
+ */
+ scoped_guard(mutex, &priv->irq_race) {
+ scoped_guard(rcu) {
+ list_for_each_entry_rcu(worker, &priv->workers, list) {
+ scoped_guard(raw_spinlock_irqsave, &priv->irq_spin)
+ list_del_rcu(&worker->list);
+
+ /* Give worker a chance to terminate itself */
+ atomic_set(&worker->cancelled, 1);
+ /* Keep track of stuff to cancel */
+ INIT_LIST_HEAD(&worker->destroy);
+ list_add(&worker->destroy, &destructors);
+ }
+ }
+ /* Make sure list consumers are finished before we tear down */
+ synchronize_rcu();
+ list_for_each_entry(worker, &destructors, destroy)
+ gpio_mpsse_stop(worker);
+ }
+
+ rcu_barrier();
priv->intf = NULL;
usb_set_intfdata(intf, NULL);
--
2.47.0
^ permalink raw reply related [flat|nested] 14+ messages in thread* Re: [PATCH v2 1/3] gpio: mpsse: use rcu to ensure worker is torn down
2025-09-23 13:33 ` [PATCH v2 1/3] gpio: mpsse: use rcu to ensure worker is torn down Mary Strodl
@ 2025-09-29 8:46 ` Dan Carpenter
2025-09-29 8:48 ` Dan Carpenter
2025-10-01 7:15 ` Linus Walleij
2025-10-02 14:02 ` Tzung-Bi Shih
2 siblings, 1 reply; 14+ messages in thread
From: Dan Carpenter @ 2025-09-29 8:46 UTC (permalink / raw)
To: oe-kbuild, Mary Strodl, linux-kernel
Cc: lkp, oe-kbuild-all, linus.walleij, brgl, linux-gpio, Mary Strodl
Hi Mary,
kernel test robot noticed the following build warnings:
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Mary-Strodl/gpio-mpsse-use-rcu-to-ensure-worker-is-torn-down/20250923-214710
base: https://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git gpio/for-next
patch link: https://lore.kernel.org/r/20250923133304.273529-2-mstrodl%40csh.rit.edu
patch subject: [PATCH v2 1/3] gpio: mpsse: use rcu to ensure worker is torn down
config: i386-randconfig-141-20250929 (https://download.01.org/0day-ci/archive/20250929/202509290823.hreUi6Tp-lkp@intel.com/config)
compiler: gcc-12 (Debian 12.4.0-5) 12.4.0
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
| Closes: https://lore.kernel.org/r/202509290823.hreUi6Tp-lkp@intel.com/
New smatch warnings:
drivers/gpio/gpio-mpsse.c:341 gpio_mpsse_poll() error: dereferencing freed memory 'worker' (line 342)
drivers/gpio/gpio-mpsse.c:604 gpio_mpsse_disconnect() error: dereferencing freed memory 'worker' (line 605)
vim +/worker +341 drivers/gpio/gpio-mpsse.c
a14b0c5e3b0741 Mary Strodl 2025-09-23 304 static void gpio_mpsse_poll(struct work_struct *my_work)
c46a74ff05c0ac Mary Strodl 2024-10-09 305 {
c46a74ff05c0ac Mary Strodl 2024-10-09 306 unsigned long pin_mask, pin_states, flags;
c46a74ff05c0ac Mary Strodl 2024-10-09 307 int irq_enabled, offset, err, value, fire_irq,
c46a74ff05c0ac Mary Strodl 2024-10-09 308 irq, old_value[16], irq_type[16];
a14b0c5e3b0741 Mary Strodl 2025-09-23 309 struct mpsse_worker *worker;
a14b0c5e3b0741 Mary Strodl 2025-09-23 310 struct mpsse_worker *my_worker = container_of(my_work, struct mpsse_worker, work);
a14b0c5e3b0741 Mary Strodl 2025-09-23 311 struct mpsse_priv *priv = my_worker->priv;
a14b0c5e3b0741 Mary Strodl 2025-09-23 312 struct list_head destructors = LIST_HEAD_INIT(destructors);
c46a74ff05c0ac Mary Strodl 2024-10-09 313
c46a74ff05c0ac Mary Strodl 2024-10-09 314 for (offset = 0; offset < priv->gpio.ngpio; ++offset)
c46a74ff05c0ac Mary Strodl 2024-10-09 315 old_value[offset] = -1;
c46a74ff05c0ac Mary Strodl 2024-10-09 316
a14b0c5e3b0741 Mary Strodl 2025-09-23 317 /*
a14b0c5e3b0741 Mary Strodl 2025-09-23 318 * We only want one worker. Workers race to acquire irq_race and tear
a14b0c5e3b0741 Mary Strodl 2025-09-23 319 * down all other workers. This is a cond guard so that we don't deadlock
a14b0c5e3b0741 Mary Strodl 2025-09-23 320 * trying to cancel a worker.
a14b0c5e3b0741 Mary Strodl 2025-09-23 321 */
a14b0c5e3b0741 Mary Strodl 2025-09-23 322 scoped_cond_guard(mutex_try, ;, &priv->irq_race) {
a14b0c5e3b0741 Mary Strodl 2025-09-23 323 scoped_guard(rcu) {
a14b0c5e3b0741 Mary Strodl 2025-09-23 324 list_for_each_entry_rcu(worker, &priv->workers, list) {
a14b0c5e3b0741 Mary Strodl 2025-09-23 325 /* Don't stop ourselves */
a14b0c5e3b0741 Mary Strodl 2025-09-23 326 if (worker == my_worker)
a14b0c5e3b0741 Mary Strodl 2025-09-23 327 continue;
a14b0c5e3b0741 Mary Strodl 2025-09-23 328
a14b0c5e3b0741 Mary Strodl 2025-09-23 329 scoped_guard(raw_spinlock_irqsave, &priv->irq_spin)
a14b0c5e3b0741 Mary Strodl 2025-09-23 330 list_del_rcu(&worker->list);
a14b0c5e3b0741 Mary Strodl 2025-09-23 331
a14b0c5e3b0741 Mary Strodl 2025-09-23 332 /* Give worker a chance to terminate itself */
a14b0c5e3b0741 Mary Strodl 2025-09-23 333 atomic_set(&worker->cancelled, 1);
a14b0c5e3b0741 Mary Strodl 2025-09-23 334 /* Keep track of stuff to cancel */
a14b0c5e3b0741 Mary Strodl 2025-09-23 335 INIT_LIST_HEAD(&worker->destroy);
a14b0c5e3b0741 Mary Strodl 2025-09-23 336 list_add(&worker->destroy, &destructors);
a14b0c5e3b0741 Mary Strodl 2025-09-23 337 }
a14b0c5e3b0741 Mary Strodl 2025-09-23 338 }
a14b0c5e3b0741 Mary Strodl 2025-09-23 339 /* Make sure list consumers are finished before we tear down */
a14b0c5e3b0741 Mary Strodl 2025-09-23 340 synchronize_rcu();
a14b0c5e3b0741 Mary Strodl 2025-09-23 @341 list_for_each_entry(worker, &destructors, destroy)
a14b0c5e3b0741 Mary Strodl 2025-09-23 @342 gpio_mpsse_stop(worker);
This needs to be list_for_each_entry_save() because gpio_mpsse_stop()
frees the worker. Or kfree_rcu() inside an rcu lock or something.
a14b0c5e3b0741 Mary Strodl 2025-09-23 343 }
a14b0c5e3b0741 Mary Strodl 2025-09-23 344
a14b0c5e3b0741 Mary Strodl 2025-09-23 345 while ((irq_enabled = atomic_read(&priv->irq_enabled)) &&
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH v2 1/3] gpio: mpsse: use rcu to ensure worker is torn down
2025-09-29 8:46 ` Dan Carpenter
@ 2025-09-29 8:48 ` Dan Carpenter
2025-10-02 14:36 ` Mary Strodl
0 siblings, 1 reply; 14+ messages in thread
From: Dan Carpenter @ 2025-09-29 8:48 UTC (permalink / raw)
To: oe-kbuild, Mary Strodl, linux-kernel
Cc: lkp, oe-kbuild-all, linus.walleij, brgl, linux-gpio
On Mon, Sep 29, 2025 at 11:46:24AM +0300, Dan Carpenter wrote:
> a14b0c5e3b0741 Mary Strodl 2025-09-23 339 /* Make sure list consumers are finished before we tear down */
> a14b0c5e3b0741 Mary Strodl 2025-09-23 340 synchronize_rcu();
> a14b0c5e3b0741 Mary Strodl 2025-09-23 @341 list_for_each_entry(worker, &destructors, destroy)
> a14b0c5e3b0741 Mary Strodl 2025-09-23 @342 gpio_mpsse_stop(worker);
>
> This needs to be list_for_each_entry_save() because gpio_mpsse_stop()
s/save/safe/. #CantType
regards,
dan carpenter
> frees the worker. Or kfree_rcu() inside an rcu lock or something.
>
> a14b0c5e3b0741 Mary Strodl 2025-09-23 343 }
> a14b0c5e3b0741 Mary Strodl 2025-09-23 344
> a14b0c5e3b0741 Mary Strodl 2025-09-23 345 while ((irq_enabled = atomic_read(&priv->irq_enabled)) &&
>
> --
> 0-DAY CI Kernel Test Service
> https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH v2 1/3] gpio: mpsse: use rcu to ensure worker is torn down
2025-09-29 8:48 ` Dan Carpenter
@ 2025-10-02 14:36 ` Mary Strodl
0 siblings, 0 replies; 14+ messages in thread
From: Mary Strodl @ 2025-10-02 14:36 UTC (permalink / raw)
To: Dan Carpenter
Cc: oe-kbuild, linux-kernel, lkp, oe-kbuild-all, linus.walleij, brgl,
linux-gpio
Hey Dan,
On Mon, Sep 29, 2025 at 11:48:46AM +0300, Dan Carpenter wrote:
> On Mon, Sep 29, 2025 at 11:46:24AM +0300, Dan Carpenter wrote:
> > a14b0c5e3b0741 Mary Strodl 2025-09-23 339 /* Make sure list consumers are finished before we tear down */
> > a14b0c5e3b0741 Mary Strodl 2025-09-23 340 synchronize_rcu();
> > a14b0c5e3b0741 Mary Strodl 2025-09-23 @341 list_for_each_entry(worker, &destructors, destroy)
> > a14b0c5e3b0741 Mary Strodl 2025-09-23 @342 gpio_mpsse_stop(worker);
> >
> > This needs to be list_for_each_entry_safe() because gpio_mpsse_stop()
> > frees the worker. Or kfree_rcu() inside an rcu lock or something.
Really good catch! The loop above this one has the same problem. Will be fixed next revision :)
Thank you!
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH v2 1/3] gpio: mpsse: use rcu to ensure worker is torn down
2025-09-23 13:33 ` [PATCH v2 1/3] gpio: mpsse: use rcu to ensure worker is torn down Mary Strodl
2025-09-29 8:46 ` Dan Carpenter
@ 2025-10-01 7:15 ` Linus Walleij
2025-10-01 15:07 ` Mary Strodl
2025-10-02 14:02 ` Tzung-Bi Shih
2 siblings, 1 reply; 14+ messages in thread
From: Linus Walleij @ 2025-10-01 7:15 UTC (permalink / raw)
To: Mary Strodl
Cc: linux-kernel, brgl, linux-gpio, Paul E. McKenney, Tzung-Bi Shih
On Tue, Sep 23, 2025 at 3:35 PM Mary Strodl <mstrodl@csh.rit.edu> wrote:
> When an IRQ worker is running, unplugging the device would cause a
> crash. The sealevel hardware this driver was written for was not
> hotpluggable, so I never realized it.
>
> This change uses RCU to create a list of workers, which it tears down on
> disconnect.
>
> Signed-off-by: Mary Strodl <mstrodl@csh.rit.edu>
Oh this RCU thing is a bit terse and a lot of code.
Can you look if you can use the new revocable resource
management API? It uses RCU underneath.
https://lwn.net/Articles/1038523/
Yours,
Linus Walleij
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH v2 1/3] gpio: mpsse: use rcu to ensure worker is torn down
2025-10-01 7:15 ` Linus Walleij
@ 2025-10-01 15:07 ` Mary Strodl
2025-10-01 22:51 ` Linus Walleij
2025-10-02 14:03 ` Tzung-Bi Shih
0 siblings, 2 replies; 14+ messages in thread
From: Mary Strodl @ 2025-10-01 15:07 UTC (permalink / raw)
To: Linus Walleij
Cc: linux-kernel, brgl, linux-gpio, Paul E. McKenney, Tzung-Bi Shih
Hey Linus,
On Wed, Oct 01, 2025 at 09:15:14AM +0200, Linus Walleij wrote:
> Oh this RCU thing is a bit terse and a lot of code.
>
> Can you look if you can use the new revocable resource
> management API? It uses RCU underneath.
> https://lwn.net/Articles/1038523/
Yeah I'm very open to suggestions about how to do this nicer.
I can't read that article, because I don't subscribe to LWN (Maybe I should
though). Looks like it becomes free tomorrow, so I can take a look then. I did
find and skim through this, which I believe is the implementation of the API
you're talking about:
https://lore.kernel.org/chrome-platform/20250923075302.591026-2-tzungbi@kernel.org/
Based on this, it seems like:
* This uses sleepable RCU, which I don't think we can use in the IRQ callbacks
since they don't allow any blocking (this is the main reason for the complexity)
* We'd still need some sort of list primitive, because we could potentially have
multiple workers being torn down at a time, so we need to know what all to
revoke when the device is being torn down. Right now, I'm using the RCU lists
API to keep track of this. My instinct is to use devm, but that also isn't
technically safe for use in IRQ handlers.
I obviously haven't played with the revocable resource management API, so
maybe these limitations aren't as big of a deal as I think they are.
With that said, I think now that I've found spinlocks work, I could use those
to gate access to the list everywhere, and use the standard lists api rather
than the RCU lists api. Obviously teardown of the workers would happen outside
the spin lock critical section, guarded by a proper mutex.
Let me know what you think... Thank you!
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH v2 1/3] gpio: mpsse: use rcu to ensure worker is torn down
2025-10-01 15:07 ` Mary Strodl
@ 2025-10-01 22:51 ` Linus Walleij
2025-10-02 14:03 ` Tzung-Bi Shih
1 sibling, 0 replies; 14+ messages in thread
From: Linus Walleij @ 2025-10-01 22:51 UTC (permalink / raw)
To: Mary Strodl
Cc: linux-kernel, brgl, linux-gpio, Paul E. McKenney, Tzung-Bi Shih
On Wed, Oct 1, 2025 at 5:07 PM Mary Strodl <mstrodl@csh.rit.edu> wrote:
> With that said, I think now that I've found spinlocks work, I could use those
> to gate access to the list everywhere, and use the standard lists api rather
> than the RCU lists api. Obviously teardown of the workers would happen outside
> the spin lock critical section, guarded by a proper mutex.
Yeah RCU is for massive parallelism where you have a lot of (performance
sensitive) readers and a few writers to the struct.
If this isn't performance-sensitive and doesn't have a *lot* of simultaneous
readers, try to use simpler locking mechanisms if you can.
But if performance hampers, RCU is surely the way to go!
Yours,
Linus Walleij
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH v2 1/3] gpio: mpsse: use rcu to ensure worker is torn down
2025-10-01 15:07 ` Mary Strodl
2025-10-01 22:51 ` Linus Walleij
@ 2025-10-02 14:03 ` Tzung-Bi Shih
2025-10-02 14:34 ` Mary Strodl
1 sibling, 1 reply; 14+ messages in thread
From: Tzung-Bi Shih @ 2025-10-02 14:03 UTC (permalink / raw)
To: Mary Strodl
Cc: Linus Walleij, linux-kernel, brgl, linux-gpio, Paul E. McKenney
On Wed, Oct 01, 2025 at 11:07:00AM -0400, Mary Strodl wrote:
> Hey Linus,
>
> On Wed, Oct 01, 2025 at 09:15:14AM +0200, Linus Walleij wrote:
> > Oh this RCU thing is a bit terse and a lot of code.
> >
> > Can you look if you can use the new revocable resource
> > management API? It uses RCU underneath.
> > https://lwn.net/Articles/1038523/
>
> Yeah I'm very open to suggestions about how to do this nicer.
>
> I can't read that article, because I don't subscribe to LWN (Maybe I should
> though). Looks like it becomes free tomorrow, so I can take a look then. I did
> find and skim through this, which I believe is the implementation of the API
> you're talking about:
> https://lore.kernel.org/chrome-platform/20250923075302.591026-2-tzungbi@kernel.org/
>
> Based on this, it seems like:
> * This uses sleepable RCU, which I don't think we can use in the IRQ callbacks
> since they don't allow any blocking (this is the main reason for the complexity)
Good to know. It hints me about maybe revocable should provide both RCU and
SRCU variants.
> * We'd still need some sort of list primitive, because we could potentially have
> multiple workers being torn down at a time, so we need to know what all to
> revoke when the device is being torn down. Right now, I'm using the RCU lists
> API to keep track of this. My instinct is to use devm, but that also isn't
> technically safe for use in IRQ handlers.
>
> I obviously haven't played with the revocable resource management API, so
> maybe these limitations aren't as big of a deal as I think they are.
To use revocable API, we need to identify what resources should protect. It
seems there are some UAF possibilities in gpio_mpsse_poll() after unplugging
the device.
Thanks for letting me know the use case. We are figuring if revocable could
provide subsystem level helpers so that device drivers don't need to play
with primitive revocable APIs.
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH v2 1/3] gpio: mpsse: use rcu to ensure worker is torn down
2025-10-02 14:03 ` Tzung-Bi Shih
@ 2025-10-02 14:34 ` Mary Strodl
0 siblings, 0 replies; 14+ messages in thread
From: Mary Strodl @ 2025-10-02 14:34 UTC (permalink / raw)
To: Tzung-Bi Shih
Cc: Linus Walleij, linux-kernel, brgl, linux-gpio, Paul E. McKenney
Hello!
On Thu, Oct 02, 2025 at 10:03:33PM +0800, Tzung-Bi Shih wrote:
> To use revocable API, we need to identify what resources should protect. It
> seems there are some UAF possibilities in gpio_mpsse_poll() after unplugging
> the device.
Yeah, that's what the cancel_work_sync is supposed to protect. This case is a
little weird because the task itself is what could get destroyed on unplug,
but it could be cool to have some guarantees around accessing `priv` and other
resources in the worker?
I'm open to suggestions if you have ideas. Thanks!
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH v2 1/3] gpio: mpsse: use rcu to ensure worker is torn down
2025-09-23 13:33 ` [PATCH v2 1/3] gpio: mpsse: use rcu to ensure worker is torn down Mary Strodl
2025-09-29 8:46 ` Dan Carpenter
2025-10-01 7:15 ` Linus Walleij
@ 2025-10-02 14:02 ` Tzung-Bi Shih
2025-10-02 14:28 ` Mary Strodl
2 siblings, 1 reply; 14+ messages in thread
From: Tzung-Bi Shih @ 2025-10-02 14:02 UTC (permalink / raw)
To: Mary Strodl; +Cc: linux-kernel, linus.walleij, brgl, linux-gpio
On Tue, Sep 23, 2025 at 09:33:02AM -0400, Mary Strodl wrote:
> @@ -261,9 +273,8 @@ static int gpio_mpsse_direction_input(struct gpio_chip *chip,
>
> guard(mutex)(&priv->io_mutex);
> priv->gpio_dir[bank] &= ~BIT(bank_offset);
> - gpio_mpsse_set_bank(priv, bank);
>
> - return 0;
> + return gpio_mpsse_set_bank(priv, bank);
The change looks irrelevant to the patch.
> +static void gpio_mpsse_poll(struct work_struct *my_work)
> {
[...]
> + /*
> + * We only want one worker. Workers race to acquire irq_race and tear
> + * down all other workers. This is a cond guard so that we don't deadlock
> + * trying to cancel a worker.
> + */
> + scoped_cond_guard(mutex_try, ;, &priv->irq_race) {
> + scoped_guard(rcu) {
> + list_for_each_entry_rcu(worker, &priv->workers, list) {
I'm not sure: doesn't it need to use list_for_each_entry_safe() (or variants)
as elements may be removed in the loop?
> + /* Don't stop ourselves */
> + if (worker == my_worker)
> + continue;
> +
> + scoped_guard(raw_spinlock_irqsave, &priv->irq_spin)
> + list_del_rcu(&worker->list);
If RCU is using, does it still need to acquire the spinlock? Alternatively,
could it use the spinlock to protect the list so that it doesn't need RCU at
all?
> static void gpio_mpsse_irq_disable(struct irq_data *irqd)
> {
> + struct mpsse_worker *worker;
> struct mpsse_priv *priv = irq_data_get_irq_chip_data(irqd);
>
> atomic_and(~BIT(irqd->hwirq), &priv->irq_enabled);
> gpiochip_disable_irq(&priv->gpio, irqd->hwirq);
> +
> + /* Can't actually do teardown in IRQ context (blocks...) */
> + scoped_guard(rcu)
> + list_for_each_entry_rcu(worker, &priv->workers, list)
> + atomic_set(&worker->cancelled, 1);
> }
>
> static void gpio_mpsse_irq_enable(struct irq_data *irqd)
> {
> + struct mpsse_worker *worker;
> struct mpsse_priv *priv = irq_data_get_irq_chip_data(irqd);
>
> gpiochip_enable_irq(&priv->gpio, irqd->hwirq);
> /* If no-one else was using the IRQ, enable it */
> if (!atomic_fetch_or(BIT(irqd->hwirq), &priv->irq_enabled)) {
> - INIT_WORK(&priv->irq_work, gpio_mpsse_poll);
> - schedule_work(&priv->irq_work);
> + /*
> + * Can't be devm because it uses a non-raw spinlock (illegal in
> + * this context, where a raw spinlock is held by our caller)
> + */
> + worker = kzalloc(sizeof(*worker), GFP_KERNEL);
I'm not sure: however it seems this function may be in IRQ context too (as
gpio_mpsse_irq_disable() does). GFP_KERNEL can sleep.
> + if (!worker)
> + return;
> +
> + worker->priv = priv;
> + INIT_LIST_HEAD(&worker->list);
> + INIT_WORK(&worker->work, gpio_mpsse_poll);
> + schedule_work(&worker->work);
> +
> + scoped_guard(raw_spinlock_irqsave, &priv->irq_spin)
> + list_add_rcu(&worker->list, &priv->workers);
Doesn't it need a synchronize_rcu()?
> static void gpio_mpsse_disconnect(struct usb_interface *intf)
> {
> + struct mpsse_worker *worker;
> struct mpsse_priv *priv = usb_get_intfdata(intf);
> + struct list_head destructors = LIST_HEAD_INIT(destructors);
> +
> + /*
> + * Lock prevents double-free of worker from here and the teardown
> + * step at the beginning of gpio_mpsse_poll
> + */
> + scoped_guard(mutex, &priv->irq_race) {
> + scoped_guard(rcu) {
> + list_for_each_entry_rcu(worker, &priv->workers, list) {
> + scoped_guard(raw_spinlock_irqsave, &priv->irq_spin)
> + list_del_rcu(&worker->list);
> +
> + /* Give worker a chance to terminate itself */
> + atomic_set(&worker->cancelled, 1);
> + /* Keep track of stuff to cancel */
> + INIT_LIST_HEAD(&worker->destroy);
> + list_add(&worker->destroy, &destructors);
> + }
> + }
> + /* Make sure list consumers are finished before we tear down */
> + synchronize_rcu();
> + list_for_each_entry(worker, &destructors, destroy)
> + gpio_mpsse_stop(worker);
> + }
The code block is very similar to block in gpio_mpsse_poll() above. Could
consider to use a function to prevent duplicate code.
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH v2 1/3] gpio: mpsse: use rcu to ensure worker is torn down
2025-10-02 14:02 ` Tzung-Bi Shih
@ 2025-10-02 14:28 ` Mary Strodl
0 siblings, 0 replies; 14+ messages in thread
From: Mary Strodl @ 2025-10-02 14:28 UTC (permalink / raw)
To: Tzung-Bi Shih; +Cc: linux-kernel, linus.walleij, brgl, linux-gpio
Hello!
On Thu, Oct 02, 2025 at 10:02:21PM +0800, Tzung-Bi Shih wrote:
> The change looks irrelevant to the patch.
I can put this in another patch in the series. I wasn't sure.
> I'm not sure: doesn't it need to use list_for_each_entry_safe() (or variants)
> as elements may be removed in the loop?
Absolutely! I noticed this too. Fix coming next revision :)
>
> > + /* Don't stop ourselves */
> > + if (worker == my_worker)
> > + continue;
> > +
> > + scoped_guard(raw_spinlock_irqsave, &priv->irq_spin)
> > + list_del_rcu(&worker->list);
>
> If RCU is using, does it still need to acquire the spinlock?
I believe so, yes. My understanding is RCU lists are safe against unprotected
reads, but you still need to protect list ops like add/remove:
https://www.kernel.org/doc/html/latest/RCU/listRCU.html#example-1-read-mostly-list-deferred-destruction
> Alternatively, could it use the spinlock to protect the list so that it doesn't
> need RCU at all?
Yes! That's what my next version will do.
> I'm not sure: however it seems this function may be in IRQ context too (as
> gpio_mpsse_irq_disable() does). GFP_KERNEL can sleep.
I worried about the same, but didn't actually follow up on it because I never
ran into it. My bad. I will make this GFP_NOWAIT in the next revision.
> > + scoped_guard(raw_spinlock_irqsave, &priv->irq_spin)
> > + list_add_rcu(&worker->list, &priv->workers);
>
> Doesn't it need a synchronize_rcu()?
My understanding was that synchronize_rcu was a grace period delay, so you
could be certain any readers after this point would get the new data.
In this case, we don't care what the readers get. Now that I'm thinking
about it though, maybe the irq loop should call synchronize_rcu first?
In any case though, this will be going away in my next version.
> > static void gpio_mpsse_disconnect(struct usb_interface *intf)
> > {
> > + struct mpsse_worker *worker;
> > struct mpsse_priv *priv = usb_get_intfdata(intf);
> > + struct list_head destructors = LIST_HEAD_INIT(destructors);
> > +
> > + /*
> > + * Lock prevents double-free of worker from here and the teardown
> > + * step at the beginning of gpio_mpsse_poll
> > + */
> > + scoped_guard(mutex, &priv->irq_race) {
> > + scoped_guard(rcu) {
> > + list_for_each_entry_rcu(worker, &priv->workers, list) {
> > + scoped_guard(raw_spinlock_irqsave, &priv->irq_spin)
> > + list_del_rcu(&worker->list);
> > +
> > + /* Give worker a chance to terminate itself */
> > + atomic_set(&worker->cancelled, 1);
> > + /* Keep track of stuff to cancel */
> > + INIT_LIST_HEAD(&worker->destroy);
> > + list_add(&worker->destroy, &destructors);
> > + }
> > + }
> > + /* Make sure list consumers are finished before we tear down */
> > + synchronize_rcu();
> > + list_for_each_entry(worker, &destructors, destroy)
> > + gpio_mpsse_stop(worker);
> > + }
>
> The code block is very similar to block in gpio_mpsse_poll() above. Could
> consider to use a function to prevent duplicate code.
Yeah I agree. I didn't really see a satisfying way to do it with the difference
in scoped_guard vs scoped_cond_guard, though. Now that I'm thinking about it
again though, I could just take everything inside the mutex guard and put
that into a function.
Thanks a lot for taking a look! It's hard doing a critical reading of your own
code, especially for concurrency/memory safety things :)
^ permalink raw reply [flat|nested] 14+ messages in thread
* [PATCH v2 2/3] gpio: mpsse: add quirk support
2025-09-23 13:33 [PATCH v2 0/3] gpio: mpsse: add support for bryx brik Mary Strodl
2025-09-23 13:33 ` [PATCH v2 1/3] gpio: mpsse: use rcu to ensure worker is torn down Mary Strodl
@ 2025-09-23 13:33 ` Mary Strodl
2025-09-23 13:33 ` [PATCH v2 3/3] gpio: mpsse: support bryx radio interface kit Mary Strodl
2 siblings, 0 replies; 14+ messages in thread
From: Mary Strodl @ 2025-09-23 13:33 UTC (permalink / raw)
To: linux-kernel; +Cc: linus.walleij, brgl, linux-gpio, Mary Strodl
Builds out a facility for specifying compatible lines directions and
labels for MPSSE-based devices.
* dir_in/out are bitmask of lines that can go in/out. 0 means
compatible, 1 means incompatible. This is convenient, because it means
if the struct is zeroed out, it supports all pins.
* names is an array of line names which will be exposed to userspace.
Also changes the chip label format to include some more useful
information about the device to help identify it from userspace.
Signed-off-by: Mary Strodl <mstrodl@csh.rit.edu>
---
drivers/gpio/gpio-mpsse.c | 109 ++++++++++++++++++++++++++++++++++++--
1 file changed, 106 insertions(+), 3 deletions(-)
diff --git a/drivers/gpio/gpio-mpsse.c b/drivers/gpio/gpio-mpsse.c
index af9a9196ac9d..ad464914a19b 100644
--- a/drivers/gpio/gpio-mpsse.c
+++ b/drivers/gpio/gpio-mpsse.c
@@ -29,6 +29,9 @@ struct mpsse_priv {
u8 gpio_outputs[2]; /* Output states for GPIOs [L, H] */
u8 gpio_dir[2]; /* Directions for GPIOs [L, H] */
+ unsigned long dir_in; /* Bitmask of valid input pins */
+ unsigned long dir_out; /* Bitmask of valid output pins */
+
u8 *bulk_in_buf; /* Extra recv buffer to grab status bytes */
struct usb_endpoint_descriptor *bulk_in;
@@ -55,6 +58,14 @@ struct bulk_desc {
int timeout;
};
+#define MPSSE_NGPIO 16
+
+struct mpsse_quirk {
+ const char *names[MPSSE_NGPIO]; /* Pin names, if applicable */
+ unsigned long dir_in; /* Bitmask of valid input pins */
+ unsigned long dir_out; /* Bitmask of valid output pins */
+};
+
static const struct usb_device_id gpio_mpsse_table[] = {
{ USB_DEVICE(0x0c52, 0xa064) }, /* SeaLevel Systems, Inc. */
{ } /* Terminating entry */
@@ -172,6 +183,32 @@ static int gpio_mpsse_get_bank(struct mpsse_priv *priv, u8 bank)
return buf;
}
+static int mpsse_ensure_supported(struct gpio_chip *chip,
+ unsigned long *mask, int direction)
+{
+ unsigned long supported, unsupported;
+ char *type = "input";
+ struct mpsse_priv *priv = gpiochip_get_data(chip);
+
+ supported = priv->dir_in;
+ if (direction == GPIO_LINE_DIRECTION_OUT) {
+ supported = priv->dir_out;
+ type = "output";
+ }
+
+ /* An invalid bit was in the provided mask */
+ unsupported = *mask & supported;
+ if (unsupported) {
+ dev_err(&priv->udev->dev,
+ "mpsse: GPIO %d doesn't support %s\n",
+ (int)find_first_bit(&unsupported, sizeof(unsupported) * 8),
+ type);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int gpio_mpsse_set_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long *bits)
{
@@ -179,6 +216,10 @@ static int gpio_mpsse_set_multiple(struct gpio_chip *chip, unsigned long *mask,
int ret;
struct mpsse_priv *priv = gpiochip_get_data(chip);
+ ret = mpsse_ensure_supported(chip, mask, GPIO_LINE_DIRECTION_OUT);
+ if (ret)
+ return ret;
+
guard(mutex)(&priv->io_mutex);
for_each_set_clump8(i, bank_mask, mask, chip->ngpio) {
bank = i / 8;
@@ -206,6 +247,10 @@ static int gpio_mpsse_get_multiple(struct gpio_chip *chip, unsigned long *mask,
int ret;
struct mpsse_priv *priv = gpiochip_get_data(chip);
+ ret = mpsse_ensure_supported(chip, mask, GPIO_LINE_DIRECTION_IN);
+ if (ret)
+ return ret;
+
guard(mutex)(&priv->io_mutex);
for_each_set_clump8(i, bank_mask, mask, chip->ngpio) {
bank = i / 8;
@@ -254,9 +299,15 @@ static int gpio_mpsse_gpio_set(struct gpio_chip *chip, unsigned int offset,
static int gpio_mpsse_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
+ int ret;
struct mpsse_priv *priv = gpiochip_get_data(chip);
int bank = (offset & 8) >> 3;
int bank_offset = offset & 7;
+ unsigned long mask = BIT(offset);
+
+ ret = mpsse_ensure_supported(chip, &mask, GPIO_LINE_DIRECTION_OUT);
+ if (ret)
+ return ret;
scoped_guard(mutex, &priv->io_mutex)
priv->gpio_dir[bank] |= BIT(bank_offset);
@@ -267,9 +318,15 @@ static int gpio_mpsse_direction_output(struct gpio_chip *chip,
static int gpio_mpsse_direction_input(struct gpio_chip *chip,
unsigned int offset)
{
+ int ret;
struct mpsse_priv *priv = gpiochip_get_data(chip);
int bank = (offset & 8) >> 3;
int bank_offset = offset & 7;
+ unsigned long mask = BIT(offset);
+
+ ret = mpsse_ensure_supported(chip, &mask, GPIO_LINE_DIRECTION_IN);
+ if (ret)
+ return ret;
guard(mutex)(&priv->io_mutex);
priv->gpio_dir[bank] &= ~BIT(bank_offset);
@@ -472,18 +529,50 @@ static void gpio_mpsse_ida_remove(void *data)
ida_free(&gpio_mpsse_ida, priv->id);
}
+static int mpsse_init_valid_mask(struct gpio_chip *chip,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ struct mpsse_priv *priv = gpiochip_get_data(chip);
+
+ if (WARN_ON(priv == NULL))
+ return -ENODEV;
+
+ /* If bit is set in both, set to 0 (NAND) */
+ *valid_mask = ~priv->dir_in | ~priv->dir_out;
+
+ return 0;
+}
+
+static void mpsse_irq_init_valid_mask(struct gpio_chip *chip,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ struct mpsse_priv *priv = gpiochip_get_data(chip);
+
+ if (WARN_ON(priv == NULL))
+ return;
+
+ /* Can only use IRQ on input capable pins */
+ *valid_mask = ~priv->dir_in;
+}
+
static int gpio_mpsse_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct mpsse_priv *priv;
struct device *dev;
+ char *serial;
int err;
+ struct mpsse_quirk *quirk = (void *)id->driver_info;
dev = &interface->dev;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ INIT_LIST_HEAD(&priv->workers);
+
priv->udev = usb_get_dev(interface_to_usbdev(interface));
priv->intf = interface;
priv->intf_id = interface->cur_altsetting->desc.bInterfaceNumber;
@@ -510,9 +599,15 @@ static int gpio_mpsse_probe(struct usb_interface *interface,
raw_spin_lock_init(&priv->irq_spin);
+ serial = priv->udev->serial;
+ if (!serial)
+ serial = "NONE";
+
priv->gpio.label = devm_kasprintf(dev, GFP_KERNEL,
- "gpio-mpsse.%d.%d",
- priv->id, priv->intf_id);
+ "MPSSE%04x:%04x.%d.%d.%s",
+ id->idVendor, id->idProduct,
+ priv->intf_id, priv->id,
+ serial);
if (!priv->gpio.label)
return -ENOMEM;
@@ -526,10 +621,17 @@ static int gpio_mpsse_probe(struct usb_interface *interface,
priv->gpio.get_multiple = gpio_mpsse_get_multiple;
priv->gpio.set_multiple = gpio_mpsse_set_multiple;
priv->gpio.base = -1;
- priv->gpio.ngpio = 16;
+ priv->gpio.ngpio = MPSSE_NGPIO;
priv->gpio.offset = priv->intf_id * priv->gpio.ngpio;
priv->gpio.can_sleep = 1;
+ if (quirk) {
+ priv->dir_out = quirk->dir_out;
+ priv->dir_in = quirk->dir_in;
+ priv->gpio.names = quirk->names;
+ priv->gpio.init_valid_mask = mpsse_init_valid_mask;
+ }
+
err = usb_find_common_endpoints(interface->cur_altsetting,
&priv->bulk_in, &priv->bulk_out,
NULL, NULL);
@@ -568,6 +670,7 @@ static int gpio_mpsse_probe(struct usb_interface *interface,
priv->gpio.irq.parents = NULL;
priv->gpio.irq.default_type = IRQ_TYPE_NONE;
priv->gpio.irq.handler = handle_simple_irq;
+ priv->gpio.irq.init_valid_mask = mpsse_irq_init_valid_mask;
err = devm_gpiochip_add_data(dev, &priv->gpio, priv);
if (err)
--
2.47.0
^ permalink raw reply related [flat|nested] 14+ messages in thread* [PATCH v2 3/3] gpio: mpsse: support bryx radio interface kit
2025-09-23 13:33 [PATCH v2 0/3] gpio: mpsse: add support for bryx brik Mary Strodl
2025-09-23 13:33 ` [PATCH v2 1/3] gpio: mpsse: use rcu to ensure worker is torn down Mary Strodl
2025-09-23 13:33 ` [PATCH v2 2/3] gpio: mpsse: add quirk support Mary Strodl
@ 2025-09-23 13:33 ` Mary Strodl
2 siblings, 0 replies; 14+ messages in thread
From: Mary Strodl @ 2025-09-23 13:33 UTC (permalink / raw)
To: linux-kernel; +Cc: linus.walleij, brgl, linux-gpio, Mary Strodl
This device is powered by an FT232H, which is very similar to the
FT2232H this driver was written for. The key difference is it has only
one MPSSE instead of two. As a result, it presents only one USB
interface to the system, which conveniently "just works" out of the box
with this driver.
The brik exposes only two GPIO lines which are hardware limited to only
be useful in one direction. As a result, I've restricted things on the
driver side to refuse to configure any other lines.
This device, unlike the sealevel device I wrote this driver for
originally, is hotpluggable, which makes for all sorts of weird
edgecases. I've tried my best to stress-test the parts that could go
wrong, but given the new usecase, more heads taking a critical look at
the teardown and synchronization bits on the driver as a whole would be
appreciated.
Signed-off-by: Mary Strodl <mstrodl@csh.rit.edu>
---
drivers/gpio/gpio-mpsse.c | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/drivers/gpio/gpio-mpsse.c b/drivers/gpio/gpio-mpsse.c
index ad464914a19b..bef82685ea5f 100644
--- a/drivers/gpio/gpio-mpsse.c
+++ b/drivers/gpio/gpio-mpsse.c
@@ -66,8 +66,19 @@ struct mpsse_quirk {
unsigned long dir_out; /* Bitmask of valid output pins */
};
+static struct mpsse_quirk bryx_brik_quirk = {
+ .names = {
+ [3] = "Push to Talk",
+ [5] = "Channel Activity",
+ },
+ .dir_out = ~BIT(3), /* Push to Talk */
+ .dir_in = ~BIT(5), /* Channel Activity */
+};
+
static const struct usb_device_id gpio_mpsse_table[] = {
{ USB_DEVICE(0x0c52, 0xa064) }, /* SeaLevel Systems, Inc. */
+ { USB_DEVICE(0x0403, 0x6988), /* FTDI, assigned to Bryx */
+ .driver_info = (kernel_ulong_t)&bryx_brik_quirk},
{ } /* Terminating entry */
};
--
2.47.0
^ permalink raw reply related [flat|nested] 14+ messages in thread