Linux virtualization list
 help / color / mirror / Atom feed
From: "Lange Tang" <lange_tang@163.com>
To: "jasowang@redhat.com" <jasowang@redhat.com>
Cc: "mst@redhat.com" <mst@redhat.com>,
	"xuanzhuo@linux.alibaba.com" <xuanzhuo@linux.alibaba.com>,
	"edumazet@google.com" <edumazet@google.com>,
	"Tang Longjun" <tanglongjun@kylinos.cn>,
	"virtualization@lists.linux.dev" <virtualization@lists.linux.dev>
Subject: Re:Re: [PATCH 2/2] virtio_ring: return IRQ_HANDLED for stale interrupts when cb disabled
Date: Tue, 7 Apr 2026 15:28:35 +0800 (CST)	[thread overview]
Message-ID: <5f3c416e.7515.19d66d7d8ec.Coremail.lange_tang@163.com> (raw)
In-Reply-To: <CACGkMEv_VmzD-JthmP300Y4ChtqBhdf4bd3tHYBd3EbxzU1XTA@mail.gmail.com>

At 2026-04-03 10:55:58, "Jason Wang" <jasowang@redhat.com> wrote:
>On Tue, Mar 31, 2026 at 6:27 PM Longjun Tang <lange_tang@163.com> wrote:
>>
>> From: Longjun Tang <tanglongjun@kylinos.cn>
>>
>> In the vring_interrupt, if the used ring is empty, IRQ_NONE is returned.
>> However,Sometimes, such as with busy-polling, buffers might be consumed
>> from the used ring before an stale interrupt notification arrives. it
>> leading to return IRQ_NONE.
>>
>> The kernel's spurious-IRQ detector counts consecutive IRQ_NONE returns
>> and will permanently disable the interrupt line if 99,900 out of 100,000
>> interrupts go unhandled.
>>
>> Add is_cb_disabled() to virtqueue_ops and, when more_used() is false but
>> cb are suppressed, return IRQ_HANDLED instead of IRQ_NONE so the spurious
>> counter does not accumulate.
>>
>> Signed-off-by: Longjun Tang <tanglongjun@kylinos.cn>
>> ---
>>  drivers/virtio/virtio_ring.c | 29 +++++++++++++++++++++++++++++
>>  1 file changed, 29 insertions(+)
>>
>> diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
>> index 335692d41617..52df932fc4a2 100644
>> --- a/drivers/virtio/virtio_ring.c
>> +++ b/drivers/virtio/virtio_ring.c
>> @@ -185,6 +185,7 @@ struct virtqueue_ops {
>>                      unsigned int last_used_idx);
>>         void *(*detach_unused_buf)(struct vring_virtqueue *vq);
>>         bool (*more_used)(const struct vring_virtqueue *vq);
>> +       bool (*is_cb_disabled)(const struct vring_virtqueue *vq);
>>         int (*resize)(struct vring_virtqueue *vq, u32 num);
>>         void (*reset)(struct vring_virtqueue *vq);
>>  };
>> @@ -1063,6 +1064,12 @@ static void virtqueue_disable_cb_split(struct vring_virtqueue *vq)
>>         }
>>  }
>>
>> +static bool is_cb_disabled_split(const struct vring_virtqueue *vq)
>> +{
>> +       return !!(data_race(vq->split.avail_flags_shadow) &
>> +                 VRING_AVAIL_F_NO_INTERRUPT);
>> +}
>> +
>>  static unsigned int virtqueue_enable_cb_prepare_split(struct vring_virtqueue *vq)
>>  {
>>         u16 last_used_idx;
>> @@ -2227,6 +2234,12 @@ static void virtqueue_disable_cb_packed(struct vring_virtqueue *vq)
>>         }
>>  }
>>
>> +static bool is_cb_disabled_packed(const struct vring_virtqueue *vq)
>> +{
>> +       return data_race(vq->packed.event_flags_shadow) ==
>> +              VRING_PACKED_EVENT_FLAG_DISABLE;
>> +}
>> +
>>  static unsigned int virtqueue_enable_cb_prepare_packed(struct vring_virtqueue *vq)
>>  {
>>         START_USE(vq);
>> @@ -2644,6 +2657,7 @@ static const struct virtqueue_ops split_ops = {
>>         .poll = virtqueue_poll_split,
>>         .detach_unused_buf = virtqueue_detach_unused_buf_split,
>>         .more_used = more_used_split,
>> +       .is_cb_disabled = is_cb_disabled_split,
>>         .resize = virtqueue_resize_split,
>>         .reset = virtqueue_reset_split,
>>  };
>> @@ -2658,6 +2672,7 @@ static const struct virtqueue_ops packed_ops = {
>>         .poll = virtqueue_poll_packed,
>>         .detach_unused_buf = virtqueue_detach_unused_buf_packed,
>>         .more_used = more_used_packed,
>> +       .is_cb_disabled = is_cb_disabled_packed,
>>         .resize = virtqueue_resize_packed,
>>         .reset = virtqueue_reset_packed,
>>  };
>> @@ -2672,6 +2687,7 @@ static const struct virtqueue_ops split_in_order_ops = {
>>         .poll = virtqueue_poll_split,
>>         .detach_unused_buf = virtqueue_detach_unused_buf_split,
>>         .more_used = more_used_split_in_order,
>> +       .is_cb_disabled = is_cb_disabled_split,
>>         .resize = virtqueue_resize_split,
>>         .reset = virtqueue_reset_split,
>>  };
>> @@ -2686,6 +2702,7 @@ static const struct virtqueue_ops packed_in_order_ops = {
>>         .poll = virtqueue_poll_packed,
>>         .detach_unused_buf = virtqueue_detach_unused_buf_packed,
>>         .more_used = more_used_packed_in_order,
>> +       .is_cb_disabled = is_cb_disabled_packed,
>>         .resize = virtqueue_resize_packed,
>>         .reset = virtqueue_reset_packed,
>>  };
>> @@ -3231,6 +3248,18 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
>>         struct vring_virtqueue *vq = to_vvq(_vq);
>>
>>         if (!more_used(vq)) {
>> +               /*
>> +                * Stale interrupt: the device posted this notification
>> +                * before it observed the callback suppression;
>> +                * When more_used returns empty, IRQ_HANDLED should be
>> +                * returned for stale interrupts.
>> +                */
>> +               if (VIRTQUEUE_CALL(vq, is_cb_disabled)) {
>> +                       if (vq->event)
>> +                               data_race(vq->event_triggered = true);
>
>Why event idx is special here?
>
>Btw, looking at the comment of virtqueue_disable_cb_split:
>
>                /*
>                 * If device triggered an event already it won't
>trigger one again:
>                 * no need to disable.
>                 */
>        if (vq->event_triggered)
>                        return;
>
>It makes sense only for event index.

yes, I will remove this part in the next version.

>
>Thanks
>
>> +                       pr_debug("virtqueue stale interrupt (callbacks disabled) for %p\n", vq);
>> +                       return IRQ_HANDLED;
>> +               }
>>                 pr_debug("virtqueue interrupt with no work for %p\n", vq);
>>                 return IRQ_NONE;
>>         }
>> --
>> 2.43.0
>>

      reply	other threads:[~2026-04-07  7:28 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-31 10:26 [PATCH 0/2] fix virtio_net/virtio when in busy-polling Longjun Tang
2026-03-31 10:26 ` [PATCH 1/2] virtio_net: disable cb " Longjun Tang
2026-04-03  2:57   ` Jason Wang
2026-04-07  7:02     ` Lange Tang
2026-03-31 10:26 ` [PATCH 2/2] virtio_ring: return IRQ_HANDLED for stale interrupts when cb disabled Longjun Tang
2026-04-03  2:55   ` Jason Wang
2026-04-07  7:28     ` Lange Tang [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5f3c416e.7515.19d66d7d8ec.Coremail.lange_tang@163.com \
    --to=lange_tang@163.com \
    --cc=edumazet@google.com \
    --cc=jasowang@redhat.com \
    --cc=mst@redhat.com \
    --cc=tanglongjun@kylinos.cn \
    --cc=virtualization@lists.linux.dev \
    --cc=xuanzhuo@linux.alibaba.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox