qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] hw/nvme: add trace events for ioeventfd
@ 2022-07-14  5:34 Klaus Jensen
  2022-07-14 14:41 ` Jinhao Fan
  0 siblings, 1 reply; 5+ messages in thread
From: Klaus Jensen @ 2022-07-14  5:34 UTC (permalink / raw)
  To: qemu-devel
  Cc: Jinhao Fan, qemu-block, Keith Busch, Klaus Jensen, Klaus Jensen

From: Klaus Jensen <k.jensen@samsung.com>

While testing Jinhaos ioeventfd patch I found it useful with a couple of
additional trace events since we no longer see the mmio events.

Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
---
 hw/nvme/ctrl.c       | 8 ++++++++
 hw/nvme/trace-events | 4 ++++
 2 files changed, 12 insertions(+)

diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index 533ad14e7a61..09725ec49c5d 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -1346,6 +1346,8 @@ static void nvme_post_cqes(void *opaque)
     bool pending = cq->head != cq->tail;
     int ret;
 
+    trace_pci_nvme_post_cqes(cq->cqid);
+
     QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) {
         NvmeSQueue *sq;
         hwaddr addr;
@@ -4238,6 +4240,8 @@ static void nvme_cq_notifier(EventNotifier *e)
     NvmeCQueue *cq = container_of(e, NvmeCQueue, notifier);
     NvmeCtrl *n = cq->ctrl;
 
+    trace_pci_nvme_cq_notify(cq->cqid);
+
     event_notifier_test_and_clear(&cq->notifier);
 
     nvme_update_cq_head(cq);
@@ -4275,6 +4279,8 @@ static void nvme_sq_notifier(EventNotifier *e)
 {
     NvmeSQueue *sq = container_of(e, NvmeSQueue, notifier);
 
+    trace_pci_nvme_sq_notify(sq->sqid);
+
     event_notifier_test_and_clear(&sq->notifier);
 
     nvme_process_sq(sq);
@@ -6240,6 +6246,8 @@ static void nvme_process_sq(void *opaque)
     NvmeCtrl *n = sq->ctrl;
     NvmeCQueue *cq = n->cq[sq->cqid];
 
+    trace_pci_nvme_process_sq(sq->sqid);
+
     uint16_t status;
     hwaddr addr;
     NvmeCmd cmd;
diff --git a/hw/nvme/trace-events b/hw/nvme/trace-events
index fccb79f48973..45dd708bd2fa 100644
--- a/hw/nvme/trace-events
+++ b/hw/nvme/trace-events
@@ -104,6 +104,10 @@ pci_nvme_mmio_shutdown_set(void) "shutdown bit set"
 pci_nvme_mmio_shutdown_cleared(void) "shutdown bit cleared"
 pci_nvme_shadow_doorbell_cq(uint16_t cqid, uint16_t new_shadow_doorbell) "cqid %"PRIu16" new_shadow_doorbell %"PRIu16""
 pci_nvme_shadow_doorbell_sq(uint16_t sqid, uint16_t new_shadow_doorbell) "sqid %"PRIu16" new_shadow_doorbell %"PRIu16""
+pci_nvme_sq_notify(uint16_t sqid) "sqid %"PRIu16""
+pci_nvme_cq_notify(uint16_t cqid) "cqid %"PRIu16""
+pci_nvme_process_sq(uint16_t sqid) "sqid %"PRIu16""
+pci_nvme_post_cqes(uint16_t cqid) "cqid %"PRIu16""
 pci_nvme_open_zone(uint64_t slba, uint32_t zone_idx, int all) "open zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32""
 pci_nvme_close_zone(uint64_t slba, uint32_t zone_idx, int all) "close zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32""
 pci_nvme_finish_zone(uint64_t slba, uint32_t zone_idx, int all) "finish zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32""
-- 
2.36.1



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] hw/nvme: add trace events for ioeventfd
  2022-07-14  5:34 [PATCH] hw/nvme: add trace events for ioeventfd Klaus Jensen
@ 2022-07-14 14:41 ` Jinhao Fan
  2022-07-20  2:47   ` Jinhao Fan
  0 siblings, 1 reply; 5+ messages in thread
From: Jinhao Fan @ 2022-07-14 14:41 UTC (permalink / raw)
  To: Klaus Jensen; +Cc: qemu-devel, qemu-block, Keith Busch, Klaus Jensen

at 1:34 PM, Klaus Jensen <its@irrelevant.dk> wrote:

> From: Klaus Jensen <k.jensen@samsung.com>
> 
> While testing Jinhaos ioeventfd patch I found it useful with a couple of
> additional trace events since we no longer see the mmio events.
> 
> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
> ---
> hw/nvme/ctrl.c       | 8 ++++++++
> hw/nvme/trace-events | 4 ++++
> 2 files changed, 12 insertions(+)
> 
> diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
> index 533ad14e7a61..09725ec49c5d 100644
> --- a/hw/nvme/ctrl.c
> +++ b/hw/nvme/ctrl.c
> @@ -1346,6 +1346,8 @@ static void nvme_post_cqes(void *opaque)
>     bool pending = cq->head != cq->tail;
>     int ret;
> 
> +    trace_pci_nvme_post_cqes(cq->cqid);
> +
>     QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) {
>         NvmeSQueue *sq;
>         hwaddr addr;
> @@ -4238,6 +4240,8 @@ static void nvme_cq_notifier(EventNotifier *e)
>     NvmeCQueue *cq = container_of(e, NvmeCQueue, notifier);
>     NvmeCtrl *n = cq->ctrl;
> 
> +    trace_pci_nvme_cq_notify(cq->cqid);
> +
>     event_notifier_test_and_clear(&cq->notifier);
> 
>     nvme_update_cq_head(cq);
> @@ -4275,6 +4279,8 @@ static void nvme_sq_notifier(EventNotifier *e)
> {
>     NvmeSQueue *sq = container_of(e, NvmeSQueue, notifier);
> 
> +    trace_pci_nvme_sq_notify(sq->sqid);
> +
>     event_notifier_test_and_clear(&sq->notifier);
> 
>     nvme_process_sq(sq);
> @@ -6240,6 +6246,8 @@ static void nvme_process_sq(void *opaque)
>     NvmeCtrl *n = sq->ctrl;
>     NvmeCQueue *cq = n->cq[sq->cqid];
> 
> +    trace_pci_nvme_process_sq(sq->sqid);
> +
>     uint16_t status;
>     hwaddr addr;
>     NvmeCmd cmd;
> diff --git a/hw/nvme/trace-events b/hw/nvme/trace-events
> index fccb79f48973..45dd708bd2fa 100644
> --- a/hw/nvme/trace-events
> +++ b/hw/nvme/trace-events
> @@ -104,6 +104,10 @@ pci_nvme_mmio_shutdown_set(void) "shutdown bit set"
> pci_nvme_mmio_shutdown_cleared(void) "shutdown bit cleared"
> pci_nvme_shadow_doorbell_cq(uint16_t cqid, uint16_t new_shadow_doorbell) "cqid %"PRIu16" new_shadow_doorbell %"PRIu16""
> pci_nvme_shadow_doorbell_sq(uint16_t sqid, uint16_t new_shadow_doorbell) "sqid %"PRIu16" new_shadow_doorbell %"PRIu16""
> +pci_nvme_sq_notify(uint16_t sqid) "sqid %"PRIu16""
> +pci_nvme_cq_notify(uint16_t cqid) "cqid %"PRIu16""
> +pci_nvme_process_sq(uint16_t sqid) "sqid %"PRIu16""
> +pci_nvme_post_cqes(uint16_t cqid) "cqid %"PRIu16""
> pci_nvme_open_zone(uint64_t slba, uint32_t zone_idx, int all) "open zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32""
> pci_nvme_close_zone(uint64_t slba, uint32_t zone_idx, int all) "close zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32""
> pci_nvme_finish_zone(uint64_t slba, uint32_t zone_idx, int all) "finish zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32""
> -- 
> 2.36.1

I agree on the addition of SQ and CQ notify trace events. But what is the
purpose for adding tracepoints for nvme_process_sq and nvme_post_cqes?


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] hw/nvme: add trace events for ioeventfd
  2022-07-14 14:41 ` Jinhao Fan
@ 2022-07-20  2:47   ` Jinhao Fan
  2022-07-27  7:10     ` Klaus Jensen
  0 siblings, 1 reply; 5+ messages in thread
From: Jinhao Fan @ 2022-07-20  2:47 UTC (permalink / raw)
  To: Klaus Jensen; +Cc: qemu-devel, qemu-block, Keith Busch, Klaus Jensen

at 10:41 PM, Jinhao Fan <fanjinhao21s@ict.ac.cn> wrote:

> at 1:34 PM, Klaus Jensen <its@irrelevant.dk> wrote:
> 
>> From: Klaus Jensen <k.jensen@samsung.com>
>> 
>> While testing Jinhaos ioeventfd patch I found it useful with a couple of
>> additional trace events since we no longer see the mmio events.
>> 
>> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
>> ---
>> hw/nvme/ctrl.c       | 8 ++++++++
>> hw/nvme/trace-events | 4 ++++
>> 2 files changed, 12 insertions(+)
>> 
>> diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
>> index 533ad14e7a61..09725ec49c5d 100644
>> --- a/hw/nvme/ctrl.c
>> +++ b/hw/nvme/ctrl.c
>> @@ -1346,6 +1346,8 @@ static void nvme_post_cqes(void *opaque)
>>    bool pending = cq->head != cq->tail;
>>    int ret;
>> 
>> +    trace_pci_nvme_post_cqes(cq->cqid);
>> +
>>    QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) {
>>        NvmeSQueue *sq;
>>        hwaddr addr;
>> @@ -4238,6 +4240,8 @@ static void nvme_cq_notifier(EventNotifier *e)
>>    NvmeCQueue *cq = container_of(e, NvmeCQueue, notifier);
>>    NvmeCtrl *n = cq->ctrl;
>> 
>> +    trace_pci_nvme_cq_notify(cq->cqid);
>> +
>>    event_notifier_test_and_clear(&cq->notifier);
>> 
>>    nvme_update_cq_head(cq);
>> @@ -4275,6 +4279,8 @@ static void nvme_sq_notifier(EventNotifier *e)
>> {
>>    NvmeSQueue *sq = container_of(e, NvmeSQueue, notifier);
>> 
>> +    trace_pci_nvme_sq_notify(sq->sqid);
>> +
>>    event_notifier_test_and_clear(&sq->notifier);
>> 
>>    nvme_process_sq(sq);
>> @@ -6240,6 +6246,8 @@ static void nvme_process_sq(void *opaque)
>>    NvmeCtrl *n = sq->ctrl;
>>    NvmeCQueue *cq = n->cq[sq->cqid];
>> 
>> +    trace_pci_nvme_process_sq(sq->sqid);
>> +
>>    uint16_t status;
>>    hwaddr addr;
>>    NvmeCmd cmd;
>> diff --git a/hw/nvme/trace-events b/hw/nvme/trace-events
>> index fccb79f48973..45dd708bd2fa 100644
>> --- a/hw/nvme/trace-events
>> +++ b/hw/nvme/trace-events
>> @@ -104,6 +104,10 @@ pci_nvme_mmio_shutdown_set(void) "shutdown bit set"
>> pci_nvme_mmio_shutdown_cleared(void) "shutdown bit cleared"
>> pci_nvme_shadow_doorbell_cq(uint16_t cqid, uint16_t new_shadow_doorbell) "cqid %"PRIu16" new_shadow_doorbell %"PRIu16""
>> pci_nvme_shadow_doorbell_sq(uint16_t sqid, uint16_t new_shadow_doorbell) "sqid %"PRIu16" new_shadow_doorbell %"PRIu16""
>> +pci_nvme_sq_notify(uint16_t sqid) "sqid %"PRIu16""
>> +pci_nvme_cq_notify(uint16_t cqid) "cqid %"PRIu16""
>> +pci_nvme_process_sq(uint16_t sqid) "sqid %"PRIu16""
>> +pci_nvme_post_cqes(uint16_t cqid) "cqid %"PRIu16""
>> pci_nvme_open_zone(uint64_t slba, uint32_t zone_idx, int all) "open zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32""
>> pci_nvme_close_zone(uint64_t slba, uint32_t zone_idx, int all) "close zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32""
>> pci_nvme_finish_zone(uint64_t slba, uint32_t zone_idx, int all) "finish zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32""
>> -- 
>> 2.36.1
> 
> I agree on the addition of SQ and CQ notify trace events. But what is the
> purpose for adding tracepoints for nvme_process_sq and nvme_post_cqes?

I realized these two events are useful when debugging iothread support. We
are processing sqe and cqe’s in a batch in nvme_process_sq and
nvme_post_cqes. It is important to mark the beginning of the batch.


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] hw/nvme: add trace events for ioeventfd
  2022-07-20  2:47   ` Jinhao Fan
@ 2022-07-27  7:10     ` Klaus Jensen
  2022-07-27  7:48       ` Jinhao Fan
  0 siblings, 1 reply; 5+ messages in thread
From: Klaus Jensen @ 2022-07-27  7:10 UTC (permalink / raw)
  To: Jinhao Fan; +Cc: qemu-devel, qemu-block, Keith Busch, Klaus Jensen

[-- Attachment #1: Type: text/plain, Size: 3596 bytes --]

On Jul 20 10:47, Jinhao Fan wrote:
> at 10:41 PM, Jinhao Fan <fanjinhao21s@ict.ac.cn> wrote:
> 
> > at 1:34 PM, Klaus Jensen <its@irrelevant.dk> wrote:
> > 
> >> From: Klaus Jensen <k.jensen@samsung.com>
> >> 
> >> While testing Jinhaos ioeventfd patch I found it useful with a couple of
> >> additional trace events since we no longer see the mmio events.
> >> 
> >> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
> >> ---
> >> hw/nvme/ctrl.c       | 8 ++++++++
> >> hw/nvme/trace-events | 4 ++++
> >> 2 files changed, 12 insertions(+)
> >> 
> >> diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
> >> index 533ad14e7a61..09725ec49c5d 100644
> >> --- a/hw/nvme/ctrl.c
> >> +++ b/hw/nvme/ctrl.c
> >> @@ -1346,6 +1346,8 @@ static void nvme_post_cqes(void *opaque)
> >>    bool pending = cq->head != cq->tail;
> >>    int ret;
> >> 
> >> +    trace_pci_nvme_post_cqes(cq->cqid);
> >> +
> >>    QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) {
> >>        NvmeSQueue *sq;
> >>        hwaddr addr;
> >> @@ -4238,6 +4240,8 @@ static void nvme_cq_notifier(EventNotifier *e)
> >>    NvmeCQueue *cq = container_of(e, NvmeCQueue, notifier);
> >>    NvmeCtrl *n = cq->ctrl;
> >> 
> >> +    trace_pci_nvme_cq_notify(cq->cqid);
> >> +
> >>    event_notifier_test_and_clear(&cq->notifier);
> >> 
> >>    nvme_update_cq_head(cq);
> >> @@ -4275,6 +4279,8 @@ static void nvme_sq_notifier(EventNotifier *e)
> >> {
> >>    NvmeSQueue *sq = container_of(e, NvmeSQueue, notifier);
> >> 
> >> +    trace_pci_nvme_sq_notify(sq->sqid);
> >> +
> >>    event_notifier_test_and_clear(&sq->notifier);
> >> 
> >>    nvme_process_sq(sq);
> >> @@ -6240,6 +6246,8 @@ static void nvme_process_sq(void *opaque)
> >>    NvmeCtrl *n = sq->ctrl;
> >>    NvmeCQueue *cq = n->cq[sq->cqid];
> >> 
> >> +    trace_pci_nvme_process_sq(sq->sqid);
> >> +
> >>    uint16_t status;
> >>    hwaddr addr;
> >>    NvmeCmd cmd;
> >> diff --git a/hw/nvme/trace-events b/hw/nvme/trace-events
> >> index fccb79f48973..45dd708bd2fa 100644
> >> --- a/hw/nvme/trace-events
> >> +++ b/hw/nvme/trace-events
> >> @@ -104,6 +104,10 @@ pci_nvme_mmio_shutdown_set(void) "shutdown bit set"
> >> pci_nvme_mmio_shutdown_cleared(void) "shutdown bit cleared"
> >> pci_nvme_shadow_doorbell_cq(uint16_t cqid, uint16_t new_shadow_doorbell) "cqid %"PRIu16" new_shadow_doorbell %"PRIu16""
> >> pci_nvme_shadow_doorbell_sq(uint16_t sqid, uint16_t new_shadow_doorbell) "sqid %"PRIu16" new_shadow_doorbell %"PRIu16""
> >> +pci_nvme_sq_notify(uint16_t sqid) "sqid %"PRIu16""
> >> +pci_nvme_cq_notify(uint16_t cqid) "cqid %"PRIu16""
> >> +pci_nvme_process_sq(uint16_t sqid) "sqid %"PRIu16""
> >> +pci_nvme_post_cqes(uint16_t cqid) "cqid %"PRIu16""
> >> pci_nvme_open_zone(uint64_t slba, uint32_t zone_idx, int all) "open zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32""
> >> pci_nvme_close_zone(uint64_t slba, uint32_t zone_idx, int all) "close zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32""
> >> pci_nvme_finish_zone(uint64_t slba, uint32_t zone_idx, int all) "finish zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32""
> >> -- 
> >> 2.36.1
> > 
> > I agree on the addition of SQ and CQ notify trace events. But what is the
> > purpose for adding tracepoints for nvme_process_sq and nvme_post_cqes?
> 
> I realized these two events are useful when debugging iothread support. We
> are processing sqe and cqe’s in a batch in nvme_process_sq and
> nvme_post_cqes. It is important to mark the beginning of the batch.

If you can add your Reviewed-by here, I can queue it up ;)

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] hw/nvme: add trace events for ioeventfd
  2022-07-27  7:10     ` Klaus Jensen
@ 2022-07-27  7:48       ` Jinhao Fan
  0 siblings, 0 replies; 5+ messages in thread
From: Jinhao Fan @ 2022-07-27  7:48 UTC (permalink / raw)
  To: Klaus Jensen; +Cc: qemu-devel, qemu-block, Keith Busch, Klaus Jensen

at 3:10 PM, Klaus Jensen <its@irrelevant.dk> wrote:

> On Jul 20 10:47, Jinhao Fan wrote:
>> at 10:41 PM, Jinhao Fan <fanjinhao21s@ict.ac.cn> wrote:
>> 
>>> at 1:34 PM, Klaus Jensen <its@irrelevant.dk> wrote:
>>> 
>>>> From: Klaus Jensen <k.jensen@samsung.com>
>>>> 
>>>> While testing Jinhaos ioeventfd patch I found it useful with a couple of
>>>> additional trace events since we no longer see the mmio events.
>>>> 
>>>> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
>>>> ---
>>>> hw/nvme/ctrl.c       | 8 ++++++++
>>>> hw/nvme/trace-events | 4 ++++
>>>> 2 files changed, 12 insertions(+)
>>>> 
>>>> diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
>>>> index 533ad14e7a61..09725ec49c5d 100644
>>>> --- a/hw/nvme/ctrl.c
>>>> +++ b/hw/nvme/ctrl.c
>>>> @@ -1346,6 +1346,8 @@ static void nvme_post_cqes(void *opaque)
>>>>   bool pending = cq->head != cq->tail;
>>>>   int ret;
>>>> 
>>>> +    trace_pci_nvme_post_cqes(cq->cqid);
>>>> +
>>>>   QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) {
>>>>       NvmeSQueue *sq;
>>>>       hwaddr addr;
>>>> @@ -4238,6 +4240,8 @@ static void nvme_cq_notifier(EventNotifier *e)
>>>>   NvmeCQueue *cq = container_of(e, NvmeCQueue, notifier);
>>>>   NvmeCtrl *n = cq->ctrl;
>>>> 
>>>> +    trace_pci_nvme_cq_notify(cq->cqid);
>>>> +
>>>>   event_notifier_test_and_clear(&cq->notifier);
>>>> 
>>>>   nvme_update_cq_head(cq);
>>>> @@ -4275,6 +4279,8 @@ static void nvme_sq_notifier(EventNotifier *e)
>>>> {
>>>>   NvmeSQueue *sq = container_of(e, NvmeSQueue, notifier);
>>>> 
>>>> +    trace_pci_nvme_sq_notify(sq->sqid);
>>>> +
>>>>   event_notifier_test_and_clear(&sq->notifier);
>>>> 
>>>>   nvme_process_sq(sq);
>>>> @@ -6240,6 +6246,8 @@ static void nvme_process_sq(void *opaque)
>>>>   NvmeCtrl *n = sq->ctrl;
>>>>   NvmeCQueue *cq = n->cq[sq->cqid];
>>>> 
>>>> +    trace_pci_nvme_process_sq(sq->sqid);
>>>> +
>>>>   uint16_t status;
>>>>   hwaddr addr;
>>>>   NvmeCmd cmd;
>>>> diff --git a/hw/nvme/trace-events b/hw/nvme/trace-events
>>>> index fccb79f48973..45dd708bd2fa 100644
>>>> --- a/hw/nvme/trace-events
>>>> +++ b/hw/nvme/trace-events
>>>> @@ -104,6 +104,10 @@ pci_nvme_mmio_shutdown_set(void) "shutdown bit set"
>>>> pci_nvme_mmio_shutdown_cleared(void) "shutdown bit cleared"
>>>> pci_nvme_shadow_doorbell_cq(uint16_t cqid, uint16_t new_shadow_doorbell) "cqid %"PRIu16" new_shadow_doorbell %"PRIu16""
>>>> pci_nvme_shadow_doorbell_sq(uint16_t sqid, uint16_t new_shadow_doorbell) "sqid %"PRIu16" new_shadow_doorbell %"PRIu16""
>>>> +pci_nvme_sq_notify(uint16_t sqid) "sqid %"PRIu16""
>>>> +pci_nvme_cq_notify(uint16_t cqid) "cqid %"PRIu16""
>>>> +pci_nvme_process_sq(uint16_t sqid) "sqid %"PRIu16""
>>>> +pci_nvme_post_cqes(uint16_t cqid) "cqid %"PRIu16""
>>>> pci_nvme_open_zone(uint64_t slba, uint32_t zone_idx, int all) "open zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32""
>>>> pci_nvme_close_zone(uint64_t slba, uint32_t zone_idx, int all) "close zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32""
>>>> pci_nvme_finish_zone(uint64_t slba, uint32_t zone_idx, int all) "finish zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32""
>>>> -- 
>>>> 2.36.1
>>> 
>>> I agree on the addition of SQ and CQ notify trace events. But what is the
>>> purpose for adding tracepoints for nvme_process_sq and nvme_post_cqes?
>> 
>> I realized these two events are useful when debugging iothread support. We
>> are processing sqe and cqe’s in a batch in nvme_process_sq and
>> nvme_post_cqes. It is important to mark the beginning of the batch.
> 
> If you can add your Reviewed-by here, I can queue it up ;)

Of course :)

Reviewed-by: Jinhao Fan <fanjinhao21s@ict.ac.cn>


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2022-07-27  7:52 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-07-14  5:34 [PATCH] hw/nvme: add trace events for ioeventfd Klaus Jensen
2022-07-14 14:41 ` Jinhao Fan
2022-07-20  2:47   ` Jinhao Fan
2022-07-27  7:10     ` Klaus Jensen
2022-07-27  7:48       ` Jinhao Fan

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).