public inbox for intel-xe@lists.freedesktop.org
 help / color / mirror / Atom feed
From: Soham Purkait <soham.purkait@intel.com>
To: intel-xe@lists.freedesktop.org, riana.tauro@intel.com,
	anshuman.gupta@intel.com, aravind.iddamsetty@linux.intel.com,
	badal.nilawar@intel.com, raag.jadav@intel.com,
	ravi.kishore.koppuravuri@intel.com, mallesh.koujalagi@intel.com
Cc: soham.purkait@intel.com, anoop.c.vijay@intel.com
Subject: [PATCH v1 2/2] drm/xe/xe_ras: Add RAS support for GPU health indicator
Date: Thu, 16 Apr 2026 15:06:10 +0530	[thread overview]
Message-ID: <20260416093610.4085667-3-soham.purkait@intel.com> (raw)
In-Reply-To: <20260416093610.4085667-1-soham.purkait@intel.com>

GPU health indicator exposes a single sysfs interface, gpu_health,
at the device level, allowing administrators and management tools to
query the GPU health status. The interface permits both read and write
operations on PF and native functions, while on VFs it is exposed as
read-only.

v1:
 - gpu_health is read-write on PFs and native functions. It is read-only
   on VFs. VF write attempts are rejected.

Signed-off-by: Soham Purkait <soham.purkait@intel.com>
---
 drivers/gpu/drm/xe/Makefile    |   1 +
 drivers/gpu/drm/xe/xe_device.c |   3 +
 drivers/gpu/drm/xe/xe_ras.c    | 181 +++++++++++++++++++++++++++++++++
 drivers/gpu/drm/xe/xe_ras.h    |  13 +++
 4 files changed, 198 insertions(+)
 create mode 100644 drivers/gpu/drm/xe/xe_ras.c
 create mode 100644 drivers/gpu/drm/xe/xe_ras.h

diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index e42e582aca5c..4bf98c3c9b25 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -112,6 +112,7 @@ xe-y += xe_bb.o \
 	xe_pxp_debugfs.o \
 	xe_pxp_submit.o \
 	xe_query.o \
+	xe_ras.o \
 	xe_range_fence.o \
 	xe_reg_sr.o \
 	xe_reg_whitelist.o \
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 4b45b617a039..cb5484712f1c 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -62,6 +62,7 @@
 #include "xe_psmi.h"
 #include "xe_pxp.h"
 #include "xe_query.h"
+#include "xe_ras.h"
 #include "xe_shrinker.h"
 #include "xe_soc_remapper.h"
 #include "xe_survivability_mode.h"
@@ -1067,6 +1068,8 @@ int xe_device_probe(struct xe_device *xe)
 
 	xe_vsec_init(xe);
 
+	xe_ras_init(xe);
+
 	err = xe_sriov_init_late(xe);
 	if (err)
 		goto err_unregister_display;
diff --git a/drivers/gpu/drm/xe/xe_ras.c b/drivers/gpu/drm/xe/xe_ras.c
new file mode 100644
index 000000000000..925ef7738e6b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_ras.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2026 Intel Corporation
+ */
+
+#include "xe_device.h"
+#include "xe_device_types.h"
+#include "xe_printk.h"
+#include "xe_ras.h"
+#include "xe_ras_types.h"
+#include "xe_sriov.h"
+#include "xe_sysctrl_mailbox.h"
+#include "xe_sysctrl_mailbox_types.h"
+
+static const char * const gpu_health_states[] = { "ok", "warning", "critical" };
+static const char * const gpu_health_fmt[] = {
+	"[%s] %s %s\n",
+	"%s [%s] %s\n",
+	"%s %s [%s]\n",
+};
+
+static void prepare_sysctrl_command(struct xe_sysctrl_mailbox_command *command,
+				    u32 cmd_mask, void *request, size_t request_len,
+				    void *response, size_t response_len)
+{
+	struct xe_sysctrl_app_msg_hdr hdr = {0};
+	u32 req_hdr;
+
+	req_hdr = FIELD_PREP(APP_HDR_GROUP_ID_MASK, XE_SYSCTRL_GROUP_GFSP) |
+		  FIELD_PREP(APP_HDR_COMMAND_MASK, cmd_mask);
+
+	hdr.data = req_hdr;
+	command->header = hdr;
+	command->data_in = request;
+	command->data_in_len = request_len;
+	command->data_out = response;
+	command->data_out_len = response_len;
+}
+
+static ssize_t gpu_health_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct xe_device *xe = kdev_to_xe_device(dev);
+	struct xe_sysctrl_mailbox_command command = {0};
+	struct xe_ras_health_get_response response = {0};
+	struct xe_ras_health_get_input request = {0};
+	u8 health;
+	int ret;
+	size_t rlen = 0;
+
+	prepare_sysctrl_command(&command, XE_SYSCTRL_CMD_GET_HEALTH, &request,
+				sizeof(request), &response, sizeof(response));
+	ret = xe_sysctrl_send_command(&xe->sc, &command, &rlen);
+	if (ret) {
+		xe_err(xe, "[RAS]: Sysctrl error ret %d\n", ret);
+		return -EIO;
+	}
+	if (rlen != sizeof(response)) {
+		xe_err(xe,
+		       "[RAS]: invalid Sysctrl response length %zu (expected %zu)\n",
+		       rlen, sizeof(response));
+		return -EIO;
+	}
+	if (response.current_health >= ARRAY_SIZE(gpu_health_states)) {
+		xe_err(xe, "[RAS]: invalid health state %u from Sysctrl\n",
+		       response.current_health);
+		return -EIO;
+	}
+
+	health = response.current_health;
+
+	xe_dbg(xe, "[RAS]: current GPU health state = %d (%s)\n",
+	       health, gpu_health_states[health]);
+
+	return sysfs_emit(buf, gpu_health_fmt[health],
+			  gpu_health_states[0],
+			  gpu_health_states[1],
+			  gpu_health_states[2]);
+}
+
+static ssize_t gpu_health_store(struct device *dev, struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct xe_device *xe = kdev_to_xe_device(dev);
+	struct xe_sysctrl_mailbox_command command = {0};
+	struct xe_ras_health_set_input request = {0};
+	struct xe_ras_health_set_response response = {0};
+	u8 health;
+	int ret;
+	size_t rlen = 0;
+	int state;
+
+	if (IS_SRIOV_VF(xe)) {
+		xe_dbg(xe, "[RAS]: GPU health state update rejected on VF\n");
+		return -EPERM;
+	}
+
+	state = sysfs_match_string(gpu_health_states,
+				   buf);
+	if (state < 0)
+		return -EINVAL;
+
+	request.new_health = (xe_ras_health_status_t)state;
+
+	prepare_sysctrl_command(&command, XE_SYSCTRL_CMD_SET_HEALTH, &request,
+				sizeof(request), &response, sizeof(response));
+	ret = xe_sysctrl_send_command(&xe->sc, &command, &rlen);
+	if (ret) {
+		xe_err(xe, "[RAS]: Sysctrl error ret %d\n", ret);
+		return -EIO;
+	}
+	if (rlen != sizeof(response)) {
+		xe_err(xe,
+		       "[RAS]: invalid Sysctrl response length %zu (expected %zu)\n",
+		       rlen, sizeof(response));
+		return -EIO;
+	}
+	if (response.current_health >= ARRAY_SIZE(gpu_health_states)) {
+		xe_err(xe, "[RAS]: invalid health state %u from Sysctrl\n",
+		       response.current_health);
+		return -EIO;
+	}
+
+	health = response.current_health;
+
+	xe_dbg(xe, "[RAS]: current GPU health state=%d (%s)\n",
+	       health, gpu_health_states[health]);
+
+	return count;
+}
+
+static struct device_attribute dev_attr_gpu_health_rw =
+	__ATTR_RW_MODE(gpu_health, 0600);
+
+static struct device_attribute dev_attr_gpu_health_ro =
+	__ATTR_RO_MODE(gpu_health, 0400);
+
+static struct device_attribute *gpu_health_attr(struct xe_device *xe)
+{
+	return IS_SRIOV_VF(xe) ? &dev_attr_gpu_health_ro : &dev_attr_gpu_health_rw;
+}
+
+static void gpu_health_sysfs_fini(void *arg)
+{
+	struct device *dev = arg;
+	struct xe_device *xe = kdev_to_xe_device(dev);
+
+	device_remove_file(dev, gpu_health_attr(xe));
+}
+
+static void gpu_health_indicator_sysfs_init(struct xe_device *xe)
+{
+	struct device *dev = xe->drm.dev;
+	int err;
+
+	err = device_create_file(dev, gpu_health_attr(xe));
+	if (err)
+		goto err;
+
+	err = devm_add_action_or_reset(dev, gpu_health_sysfs_fini, dev);
+	if (err)
+		goto err;
+
+	return;
+
+err:
+	xe_err(xe, "[RAS]: failed to initialize GPU health sysfs, err=%d\n", err);
+}
+
+/**
+ * xe_ras_init - Initialize Xe RAS
+ * @xe: xe device instance
+ *
+ * Initialize Xe RAS
+ */
+void xe_ras_init(struct xe_device *xe)
+{
+	if (!xe->info.has_sysctrl)
+		return;
+
+	gpu_health_indicator_sysfs_init(xe);
+}
diff --git a/drivers/gpu/drm/xe/xe_ras.h b/drivers/gpu/drm/xe/xe_ras.h
new file mode 100644
index 000000000000..14cb973603e7
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_ras.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2026 Intel Corporation
+ */
+
+#ifndef _XE_RAS_H_
+#define _XE_RAS_H_
+
+struct xe_device;
+
+void xe_ras_init(struct xe_device *xe);
+
+#endif
-- 
2.34.1


  parent reply	other threads:[~2026-04-16  9:49 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-04-16  9:36 [PATCH v1 0/2] drm/xe: Add support for GPU health indicator Soham Purkait
2026-04-16  9:36 ` [PATCH v1 1/2] drm/xe/xe_ras: Add structures and commands for RAS " Soham Purkait
2026-04-16 11:39   ` Andi Shyti
2026-04-17 14:45   ` Rodrigo Vivi
2026-04-16  9:36 ` Soham Purkait [this message]
2026-04-16 11:54   ` [PATCH v1 2/2] drm/xe/xe_ras: Add RAS support for " Andi Shyti
2026-04-17 14:51     ` Rodrigo Vivi
2026-04-20 15:26       ` Andi Shyti
2026-04-20 19:51         ` Rodrigo Vivi
2026-04-21 12:56           ` Andi Shyti
2026-04-21 13:21             ` Rodrigo Vivi
2026-04-22  6:05           ` Purkait, Soham
2026-04-20 16:19     ` Purkait, Soham
2026-04-20 17:35       ` Andi Shyti
2026-04-16  9:55 ` ✗ CI.checkpatch: warning for drm/xe: Add " Patchwork
2026-04-16  9:56 ` ✓ CI.KUnit: success " Patchwork
2026-04-16 10:58 ` ✓ Xe.CI.BAT: " Patchwork
2026-04-16 12:01 ` ✗ Xe.CI.FULL: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260416093610.4085667-3-soham.purkait@intel.com \
    --to=soham.purkait@intel.com \
    --cc=anoop.c.vijay@intel.com \
    --cc=anshuman.gupta@intel.com \
    --cc=aravind.iddamsetty@linux.intel.com \
    --cc=badal.nilawar@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=mallesh.koujalagi@intel.com \
    --cc=raag.jadav@intel.com \
    --cc=ravi.kishore.koppuravuri@intel.com \
    --cc=riana.tauro@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox