From: <ankita@nvidia.com>
To: <ankita@nvidia.com>, <jgg@nvidia.com>,
<alex.williamson@redhat.com>, <yishaih@nvidia.com>,
<skolothumtho@nvidia.com>, <kevin.tian@intel.com>,
<yi.l.liu@intel.com>, <zhiw@nvidia.com>
Cc: <aniketa@nvidia.com>, <cjia@nvidia.com>, <kwankhede@nvidia.com>,
<targupta@nvidia.com>, <vsethi@nvidia.com>, <acurrid@nvidia.com>,
<apopple@nvidia.com>, <jhubbard@nvidia.com>, <danw@nvidia.com>,
<anuaggarwal@nvidia.com>, <mochs@nvidia.com>, <kjaju@nvidia.com>,
<dnigam@nvidia.com>, <kvm@vger.kernel.org>,
<linux-kernel@vger.kernel.org>
Subject: [RFC 03/14] vfio/nvgrace-gpu: track GPUs associated with the EGM regions
Date: Thu, 4 Sep 2025 04:08:17 +0000 [thread overview]
Message-ID: <20250904040828.319452-4-ankita@nvidia.com> (raw)
In-Reply-To: <20250904040828.319452-1-ankita@nvidia.com>
From: Ankit Agrawal <ankita@nvidia.com>
Grace Blackwell systems could have multiple GPUs on a socket and
thus are associated with the corresponding EGM region for that
socket. Track the GPUs as a list.
On the device probe, the device pci_dev struct is added to a
linked list of the appropriate EGM region.
Similarly on device remove, the pci_dev struct for the GPU
is removed from the EGM region.
Since the GPUs on a socket have the same EGM region, they have
the have the same set of EGM region information. Skip the EGM
region information fetch if already done through a differnt
GPU on the same socket.
Signed-off-by: Ankit Agrawal <ankita@nvidia.com>
---
drivers/vfio/pci/nvgrace-gpu/egm_dev.c | 29 ++++++++++++++++++++++
drivers/vfio/pci/nvgrace-gpu/egm_dev.h | 4 +++
drivers/vfio/pci/nvgrace-gpu/main.c | 34 +++++++++++++++++++++++---
include/linux/nvgrace-egm.h | 6 +++++
4 files changed, 70 insertions(+), 3 deletions(-)
diff --git a/drivers/vfio/pci/nvgrace-gpu/egm_dev.c b/drivers/vfio/pci/nvgrace-gpu/egm_dev.c
index f4e27dadf1ef..28cfd29eda56 100644
--- a/drivers/vfio/pci/nvgrace-gpu/egm_dev.c
+++ b/drivers/vfio/pci/nvgrace-gpu/egm_dev.c
@@ -17,6 +17,33 @@ int nvgrace_gpu_has_egm_property(struct pci_dev *pdev, u64 *pegmpxm)
pegmpxm);
}
+int add_gpu(struct nvgrace_egm_dev *egm_dev, struct pci_dev *pdev)
+{
+ struct gpu_node *node;
+
+ node = kvzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ node->pdev = pdev;
+
+ list_add_tail(&node->list, &egm_dev->gpus);
+
+ return 0;
+}
+
+void remove_gpu(struct nvgrace_egm_dev *egm_dev, struct pci_dev *pdev)
+{
+ struct gpu_node *node, *tmp;
+
+ list_for_each_entry_safe(node, tmp, &egm_dev->gpus, list) {
+ if (node->pdev == pdev) {
+ list_del(&node->list);
+ kvfree(node);
+ }
+ }
+}
+
static void nvgrace_gpu_release_aux_device(struct device *device)
{
struct auxiliary_device *aux_dev = container_of(device, struct auxiliary_device, dev);
@@ -37,6 +64,8 @@ nvgrace_gpu_create_aux_device(struct pci_dev *pdev, const char *name,
goto create_err;
egm_dev->egmpxm = egmpxm;
+ INIT_LIST_HEAD(&egm_dev->gpus);
+
egm_dev->aux_dev.id = egmpxm;
egm_dev->aux_dev.name = name;
egm_dev->aux_dev.dev.release = nvgrace_gpu_release_aux_device;
diff --git a/drivers/vfio/pci/nvgrace-gpu/egm_dev.h b/drivers/vfio/pci/nvgrace-gpu/egm_dev.h
index c00f5288f4e7..1635753c9e50 100644
--- a/drivers/vfio/pci/nvgrace-gpu/egm_dev.h
+++ b/drivers/vfio/pci/nvgrace-gpu/egm_dev.h
@@ -10,6 +10,10 @@
int nvgrace_gpu_has_egm_property(struct pci_dev *pdev, u64 *pegmpxm);
+int add_gpu(struct nvgrace_egm_dev *egm_dev, struct pci_dev *pdev);
+
+void remove_gpu(struct nvgrace_egm_dev *egm_dev, struct pci_dev *pdev);
+
struct nvgrace_egm_dev *
nvgrace_gpu_create_aux_device(struct pci_dev *pdev, const char *name,
u64 egmphys);
diff --git a/drivers/vfio/pci/nvgrace-gpu/main.c b/drivers/vfio/pci/nvgrace-gpu/main.c
index 2cf851492990..436f0ac17332 100644
--- a/drivers/vfio/pci/nvgrace-gpu/main.c
+++ b/drivers/vfio/pci/nvgrace-gpu/main.c
@@ -66,9 +66,10 @@ static struct list_head egm_dev_list;
static int nvgrace_gpu_create_egm_aux_device(struct pci_dev *pdev)
{
- struct nvgrace_egm_dev_entry *egm_entry;
+ struct nvgrace_egm_dev_entry *egm_entry = NULL;
u64 egmpxm;
int ret = 0;
+ bool is_new_region = false;
/*
* EGM is an optional feature enabled in SBIOS. If disabled, there
@@ -79,6 +80,19 @@ static int nvgrace_gpu_create_egm_aux_device(struct pci_dev *pdev)
if (nvgrace_gpu_has_egm_property(pdev, &egmpxm))
goto exit;
+ list_for_each_entry(egm_entry, &egm_dev_list, list) {
+ /*
+ * A system could have multiple GPUs associated with an
+ * EGM region and will have the same set of EGM region
+ * information. Skip the EGM region information fetch if
+ * already done through a differnt GPU on the same socket.
+ */
+ if (egm_entry->egm_dev->egmpxm == egmpxm)
+ goto add_gpu;
+ }
+
+ is_new_region = true;
+
egm_entry = kvzalloc(sizeof(*egm_entry), GFP_KERNEL);
if (!egm_entry)
return -ENOMEM;
@@ -87,13 +101,23 @@ static int nvgrace_gpu_create_egm_aux_device(struct pci_dev *pdev)
nvgrace_gpu_create_aux_device(pdev, NVGRACE_EGM_DEV_NAME,
egmpxm);
if (!egm_entry->egm_dev) {
- kvfree(egm_entry);
ret = -EINVAL;
+ goto free_egm_entry;
+ }
+
+add_gpu:
+ ret = add_gpu(egm_entry->egm_dev, pdev);
+ if (!ret) {
+ if (is_new_region)
+ list_add_tail(&egm_entry->list, &egm_dev_list);
goto exit;
}
- list_add_tail(&egm_entry->list, &egm_dev_list);
+ if (is_new_region)
+ auxiliary_device_destroy(&egm_entry->egm_dev->aux_dev);
+free_egm_entry:
+ kvfree(egm_entry);
exit:
return ret;
}
@@ -112,6 +136,10 @@ static void nvgrace_gpu_destroy_egm_aux_device(struct pci_dev *pdev)
* device.
*/
if (egm_entry->egm_dev->egmpxm == egmpxm) {
+ remove_gpu(egm_entry->egm_dev, pdev);
+ if (!list_empty(&egm_entry->egm_dev->gpus))
+ break;
+
auxiliary_device_destroy(&egm_entry->egm_dev->aux_dev);
list_del(&egm_entry->list);
kvfree(egm_entry);
diff --git a/include/linux/nvgrace-egm.h b/include/linux/nvgrace-egm.h
index 9575d4ad4338..e42494a2b1a6 100644
--- a/include/linux/nvgrace-egm.h
+++ b/include/linux/nvgrace-egm.h
@@ -10,9 +10,15 @@
#define NVGRACE_EGM_DEV_NAME "egm"
+struct gpu_node {
+ struct list_head list;
+ struct pci_dev *pdev;
+};
+
struct nvgrace_egm_dev {
struct auxiliary_device aux_dev;
u64 egmpxm;
+ struct list_head gpus;
};
struct nvgrace_egm_dev_entry {
--
2.34.1
next prev parent reply other threads:[~2025-09-04 4:08 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-04 4:08 [RFC 00/14] cover-letter: Add virtualization support for EGM ankita
2025-09-04 4:08 ` [RFC 01/14] vfio/nvgrace-gpu: Expand module_pci_driver to allow custom module init ankita
2025-09-04 4:08 ` [RFC 02/14] vfio/nvgrace-gpu: Create auxiliary device for EGM ankita
2025-09-15 6:56 ` Shameer Kolothum
2025-09-04 4:08 ` ankita [this message]
2025-09-15 7:19 ` [RFC 03/14] vfio/nvgrace-gpu: track GPUs associated with the EGM regions Shameer Kolothum
2025-09-04 4:08 ` [RFC 04/14] vfio/nvgrace-gpu: Introduce functions to fetch and save EGM info ankita
2025-09-04 4:08 ` [RFC 05/14] vfio/nvgrace-egm: Introduce module to manage EGM ankita
2025-09-05 13:26 ` Jason Gunthorpe
2025-09-15 7:47 ` Shameer Kolothum
2025-09-04 4:08 ` [RFC 06/14] vfio/nvgrace-egm: Introduce egm class and register char device numbers ankita
2025-09-04 4:08 ` [RFC 07/14] vfio/nvgrace-egm: Register auxiliary driver ops ankita
2025-09-05 13:31 ` Jason Gunthorpe
2025-09-04 4:08 ` [RFC 08/14] vfio/nvgrace-egm: Expose EGM region as char device ankita
2025-09-05 13:34 ` Jason Gunthorpe
2025-09-15 8:36 ` Shameer Kolothum
2025-09-04 4:08 ` [RFC 09/14] vfio/nvgrace-egm: Add chardev ops for EGM management ankita
2025-09-05 13:36 ` Jason Gunthorpe
2025-09-04 4:08 ` [RFC 10/14] vfio/nvgrace-egm: Clear Memory before handing out to VM ankita
2025-09-05 13:39 ` Jason Gunthorpe
2025-09-15 8:45 ` Shameer Kolothum
2025-09-04 4:08 ` [RFC 11/14] vfio/nvgrace-egm: Fetch EGM region retired pages list ankita
2025-09-15 9:21 ` Shameer Kolothum
2025-09-04 4:08 ` [RFC 12/14] vfio/nvgrace-egm: Introduce ioctl to share retired pages ankita
2025-09-04 4:08 ` [RFC 13/14] vfio/nvgrace-egm: expose the egm size through sysfs ankita
2025-09-04 4:08 ` [RFC 14/14] vfio/nvgrace-gpu: Add link from pci to EGM ankita
2025-09-05 13:42 ` Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250904040828.319452-4-ankita@nvidia.com \
--to=ankita@nvidia.com \
--cc=acurrid@nvidia.com \
--cc=alex.williamson@redhat.com \
--cc=aniketa@nvidia.com \
--cc=anuaggarwal@nvidia.com \
--cc=apopple@nvidia.com \
--cc=cjia@nvidia.com \
--cc=danw@nvidia.com \
--cc=dnigam@nvidia.com \
--cc=jgg@nvidia.com \
--cc=jhubbard@nvidia.com \
--cc=kevin.tian@intel.com \
--cc=kjaju@nvidia.com \
--cc=kvm@vger.kernel.org \
--cc=kwankhede@nvidia.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mochs@nvidia.com \
--cc=skolothumtho@nvidia.com \
--cc=targupta@nvidia.com \
--cc=vsethi@nvidia.com \
--cc=yi.l.liu@intel.com \
--cc=yishaih@nvidia.com \
--cc=zhiw@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox