qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
To: qemu-devel@nongnu.org, Lukas Stockner <lstockner@genesiscloud.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>,
	Marcel Apfelbaum <marcel.apfelbaum@gmail.com>,
	Lukas Stockner <lstockner@genesiscloud.com>
Subject: Re: [PATCH] pcie: Support PCIe Gen5/Gen6 link speeds
Date: Fri, 16 Feb 2024 09:52:32 +0200	[thread overview]
Message-ID: <8xv91.32qjtsyqi8qb@linaro.org> (raw)
In-Reply-To: <20240215012326.3272366-1-lstockner@genesiscloud.com>

On Thu, 15 Feb 2024 03:23, Lukas Stockner <lstockner@genesiscloud.com> wrote:
>This patch extends the PCIe link speed option so that slots can be
>configured as supporting 32GT/s (Gen5) or 64GT/s (Gen5) speeds.
>This is as simple as setting the appropriate bit in LnkCap2 and
>the appropriate value in LnkCap and LnkCtl2.
>
>Signed-off-by: Lukas Stockner <lstockner@genesiscloud.com>
>---
> hw/core/qdev-properties-system.c | 16 ++++++++++++++--
> hw/pci/pcie.c                    |  8 ++++++++
> include/hw/pci/pcie_regs.h       |  2 ++
> qapi/common.json                 |  6 +++++-
> 4 files changed, 29 insertions(+), 3 deletions(-)
>
>diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c
>index 1a396521d5..106a31c233 100644
>--- a/hw/core/qdev-properties-system.c
>+++ b/hw/core/qdev-properties-system.c
>@@ -941,7 +941,7 @@ const PropertyInfo qdev_prop_off_auto_pcibar = {
>     .set_default_value = qdev_propinfo_set_default_value_enum,
> };
> 
>-/* --- PCIELinkSpeed 2_5/5/8/16 -- */
>+/* --- PCIELinkSpeed 2_5/5/8/16/32/64 -- */
> 
> static void get_prop_pcielinkspeed(Object *obj, Visitor *v, const char *name,
>                                    void *opaque, Error **errp)
>@@ -963,6 +963,12 @@ static void get_prop_pcielinkspeed(Object *obj, Visitor *v, const char *name,
>     case QEMU_PCI_EXP_LNK_16GT:
>         speed = PCIE_LINK_SPEED_16;
>         break;
>+    case QEMU_PCI_EXP_LNK_32GT:
>+        speed = PCIE_LINK_SPEED_32;
>+        break;
>+    case QEMU_PCI_EXP_LNK_64GT:
>+        speed = PCIE_LINK_SPEED_64;
>+        break;
>     default:
>         /* Unreachable */
>         abort();
>@@ -996,6 +1002,12 @@ static void set_prop_pcielinkspeed(Object *obj, Visitor *v, const char *name,
>     case PCIE_LINK_SPEED_16:
>         *p = QEMU_PCI_EXP_LNK_16GT;
>         break;
>+    case PCIE_LINK_SPEED_32:
>+        *p = QEMU_PCI_EXP_LNK_32GT;
>+        break;
>+    case PCIE_LINK_SPEED_64:
>+        *p = QEMU_PCI_EXP_LNK_64GT;
>+        break;
>     default:
>         /* Unreachable */
>         abort();
>@@ -1004,7 +1016,7 @@ static void set_prop_pcielinkspeed(Object *obj, Visitor *v, const char *name,
> 
> const PropertyInfo qdev_prop_pcie_link_speed = {
>     .name = "PCIELinkSpeed",
>-    .description = "2_5/5/8/16",
>+    .description = "2_5/5/8/16/32/64",
>     .enum_table = &PCIELinkSpeed_lookup,
>     .get = get_prop_pcielinkspeed,
>     .set = set_prop_pcielinkspeed,
>diff --git a/hw/pci/pcie.c b/hw/pci/pcie.c
>index 6db0cf69cd..0b4817e144 100644
>--- a/hw/pci/pcie.c
>+++ b/hw/pci/pcie.c
>@@ -153,6 +153,14 @@ static void pcie_cap_fill_slot_lnk(PCIDevice *dev)
>             pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2,
>                                        PCI_EXP_LNKCAP2_SLS_16_0GB);
>         }
>+        if (s->speed > QEMU_PCI_EXP_LNK_16GT) {
>+            pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2,
>+                                       PCI_EXP_LNKCAP2_SLS_32_0GB);
>+        }
>+        if (s->speed > QEMU_PCI_EXP_LNK_32GT) {
>+            pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2,
>+                                       PCI_EXP_LNKCAP2_SLS_64_0GB);
>+        }
>     }
> }
> 
>diff --git a/include/hw/pci/pcie_regs.h b/include/hw/pci/pcie_regs.h
>index 4972106c42..9d3b6868dc 100644
>--- a/include/hw/pci/pcie_regs.h
>+++ b/include/hw/pci/pcie_regs.h
>@@ -39,6 +39,8 @@ typedef enum PCIExpLinkSpeed {
>     QEMU_PCI_EXP_LNK_5GT,
>     QEMU_PCI_EXP_LNK_8GT,
>     QEMU_PCI_EXP_LNK_16GT,
>+    QEMU_PCI_EXP_LNK_32GT,
>+    QEMU_PCI_EXP_LNK_64GT,
> } PCIExpLinkSpeed;
> 
> #define QEMU_PCI_EXP_LNKCAP_MLS(speed)  (speed)
>diff --git a/qapi/common.json b/qapi/common.json
>index f1bb841951..867a9ad9b0 100644
>--- a/qapi/common.json
>+++ b/qapi/common.json
>@@ -107,10 +107,14 @@
> #
> # @16: 16.0GT/s
> #
>+# @32: 32.0GT/s
>+#
>+# @64: 64.0GT/s
>+#
> # Since: 4.0
> ##
> { 'enum': 'PCIELinkSpeed',
>-  'data': [ '2_5', '5', '8', '16' ] }
>+  'data': [ '2_5', '5', '8', '16', '32', '64' ] }
> 
> ##
> # @PCIELinkWidth:
>-- 
>2.43.1
>
>

Reviewed-by: Manos Pitsidianakis <manos.pitsidianakis@linaro.org>


  reply	other threads:[~2024-02-16  7:54 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-02-15  1:23 [PATCH] pcie: Support PCIe Gen5/Gen6 link speeds Lukas Stockner
2024-02-16  7:52 ` Manos Pitsidianakis [this message]
2024-03-12 15:19 ` Michael S. Tsirkin
2024-03-13  6:41   ` Markus Armbruster

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=8xv91.32qjtsyqi8qb@linaro.org \
    --to=manos.pitsidianakis@linaro.org \
    --cc=lstockner@genesiscloud.com \
    --cc=marcel.apfelbaum@gmail.com \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).