From: Aravind Iddamsetty <aravind.iddamsetty@linux.intel.com>
To: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>,
intel-xe@lists.freedesktop.org
Subject: Re: [Intel-xe] [PATCH 08/11] drm/xe: Support SOC NONFATAL error handling for PVC.
Date: Wed, 11 Oct 2023 11:37:43 +0530 [thread overview]
Message-ID: <70345afb-9b70-a526-9791-16733aa69976@linux.intel.com> (raw)
In-Reply-To: <20230927114627.136925-9-himal.prasad.ghimiray@intel.com>
On 27/09/23 17:16, Himal Prasad Ghimiray wrote:
> Report the SOC nonfatal hardware error and update the counters which
> will increment incase of error.
>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
> drivers/gpu/drm/xe/xe_hw_error.c | 118 ++++++++++++++++++++++++++-----
> drivers/gpu/drm/xe/xe_hw_error.h | 42 +++++++++++
> 2 files changed, 143 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_hw_error.c b/drivers/gpu/drm/xe/xe_hw_error.c
> index fa05bad5e684..aeece9e705dc 100644
> --- a/drivers/gpu/drm/xe/xe_hw_error.c
> +++ b/drivers/gpu/drm/xe/xe_hw_error.c
> @@ -276,6 +276,67 @@ static const struct err_msg_cntr_pair soc_mstr_lcl_err_reg_fatal[] = {
> [14 ... 31] = {"Undefined", XE_SOC_HW_ERR_UNKNOWN_FATAL},
> };
>
> +static const struct err_msg_cntr_pair soc_mstr_glbl_err_reg_nonfatal[] = {
> + [0] = {"MASTER LOCAL Reported", XE_SOC_HW_ERR_MSTR_LCL_NONFATAL},
> + [1] = {"SLAVE GLOBAL Reported", XE_SOC_HW_ERR_SLAVE_GLBL_NONFATAL},
same as mentioned in earlier patch no need to count these
> + [2] = {"HBM SS0: Channel0", XE_SOC_HW_ERR_HBM0_CHNL0_NONFATAL},
> + [3] = {"HBM SS0: Channel1", XE_SOC_HW_ERR_HBM0_CHNL1_NONFATAL},
> + [4] = {"HBM SS0: Channel2", XE_SOC_HW_ERR_HBM0_CHNL2_NONFATAL},
> + [5] = {"HBM SS0: Channel3", XE_SOC_HW_ERR_HBM0_CHNL3_NONFATAL},
> + [6] = {"HBM SS0: Channel4", XE_SOC_HW_ERR_HBM0_CHNL4_NONFATAL},
> + [7] = {"HBM SS0: Channel5", XE_SOC_HW_ERR_HBM0_CHNL5_NONFATAL},
> + [8] = {"HBM SS0: Channel6", XE_SOC_HW_ERR_HBM0_CHNL6_NONFATAL},
> + [9] = {"HBM SS0: Channel7", XE_SOC_HW_ERR_HBM0_CHNL7_NONFATAL},
> + [10] = {"HBM SS1: Channel0", XE_SOC_HW_ERR_HBM1_CHNL0_NONFATAL},
> + [11] = {"HBM SS1: Channel1", XE_SOC_HW_ERR_HBM1_CHNL1_NONFATAL},
> + [12] = {"HBM SS1: Channel2", XE_SOC_HW_ERR_HBM1_CHNL2_NONFATAL},
> + [13] = {"HBM SS1: Channel3", XE_SOC_HW_ERR_HBM1_CHNL3_NONFATAL},
> + [14] = {"HBM SS1: Channel4", XE_SOC_HW_ERR_HBM1_CHNL4_NONFATAL},
> + [15] = {"HBM SS1: Channel5", XE_SOC_HW_ERR_HBM1_CHNL5_NONFATAL},
> + [16] = {"HBM SS1: Channel6", XE_SOC_HW_ERR_HBM1_CHNL6_NONFATAL},
> + [17] = {"HBM SS1: Channel7", XE_SOC_HW_ERR_HBM1_CHNL7_NONFATAL},
> + [18 ... 31] = {"Undefined", XE_SOC_HW_ERR_UNKNOWN_FATAL},
> +};
> +
> +static const struct err_msg_cntr_pair soc_slave_glbl_err_reg_nonfatal[] = {
> + [0] = {"SLAVE LOCAL Reported", XE_SOC_HW_ERR_SLAVE_LCL_NONFATAL},
same here
> + [1] = {"HBM SS2: Channel0", XE_SOC_HW_ERR_HBM2_CHNL0_NONFATAL},
> + [2] = {"HBM SS2: Channel1", XE_SOC_HW_ERR_HBM2_CHNL1_NONFATAL},
> + [3] = {"HBM SS2: Channel2", XE_SOC_HW_ERR_HBM2_CHNL2_NONFATAL},
> + [4] = {"HBM SS2: Channel3", XE_SOC_HW_ERR_HBM2_CHNL3_NONFATAL},
> + [5] = {"HBM SS2: Channel4", XE_SOC_HW_ERR_HBM2_CHNL4_NONFATAL},
> + [6] = {"HBM SS2: Channel5", XE_SOC_HW_ERR_HBM2_CHNL5_NONFATAL},
> + [7] = {"HBM SS2: Channel6", XE_SOC_HW_ERR_HBM2_CHNL6_NONFATAL},
> + [8] = {"HBM SS2: Channel7", XE_SOC_HW_ERR_HBM2_CHNL7_NONFATAL},
> + [9] = {"HBM SS3: Channel0", XE_SOC_HW_ERR_HBM3_CHNL0_NONFATAL},
> + [10] = {"HBM SS3: Channel1", XE_SOC_HW_ERR_HBM3_CHNL1_NONFATAL},
> + [11] = {"HBM SS3: Channel2", XE_SOC_HW_ERR_HBM3_CHNL2_NONFATAL},
> + [12] = {"HBM SS3: Channel3", XE_SOC_HW_ERR_HBM3_CHNL3_NONFATAL},
> + [13] = {"HBM SS3: Channel4", XE_SOC_HW_ERR_HBM3_CHNL4_NONFATAL},
> + [14] = {"HBM SS3: Channel5", XE_SOC_HW_ERR_HBM3_CHNL5_NONFATAL},
> + [15] = {"HBM SS3: Channel6", XE_SOC_HW_ERR_HBM3_CHNL6_NONFATAL},
> + [16] = {"HBM SS3: Channel7", XE_SOC_HW_ERR_HBM3_CHNL7_NONFATAL},
> + [18] = {"ANR MDFI", XE_SOC_HW_ERR_ANR_MDFI_NONFATAL},
> + [17] = {"Undefined", XE_SOC_HW_ERR_UNKNOWN_NONFATAL},
> + [19 ... 31] = {"Undefined", XE_SOC_HW_ERR_UNKNOWN_FATAL},
> +};
> +
> +static const struct err_msg_cntr_pair soc_slave_lcl_err_reg_nonfatal[] = {
> + [0 ... 31] = {"Undefined", XE_SOC_HW_ERR_UNKNOWN_NONFATAL},
> +};
> +
> +static const struct err_msg_cntr_pair soc_mstr_lcl_err_reg_nonfatal[] = {
> + [0 ... 3] = {"Undefined", XE_SOC_HW_ERR_UNKNOWN_NONFATAL},
> + [4] = {"Base Die MDFI T2T", XE_SOC_HW_ERR_MDFI_T2T_NONFATAL},
> + [5] = {"Undefined", XE_SOC_HW_ERR_UNKNOWN_NONFATAL},
> + [6] = {"Base Die MDFI T2C", XE_SOC_HW_ERR_MDFI_T2C_NONFATAL},
> + [7] = {"Undefined", XE_SOC_HW_ERR_UNKNOWN_NONFATAL},
> + [8] = {"Invalid CSC PSF Command Parity", XE_SOC_HW_ERR_CSC_PSF_CMD_NONFATAL},
> + [9] = {"Invalid CSC PSF Unexpected Completion", XE_SOC_HW_ERR_CSC_PSF_CMP_NONFATAL},
> + [10] = {"Invalid CSC PSF Unsupported Request", XE_SOC_HW_ERR_CSC_PSF_REQ_NONFATAL},
> + [11 ... 31] = {"Undefined", XE_SOC_HW_ERR_UNKNOWN_FATAL},
> +};
> +
> static void xe_assign_hw_err_regs(struct xe_device *xe)
> {
> const struct err_msg_cntr_pair **dev_err_stat = xe->hw_err_regs.dev_err_stat;
> @@ -521,18 +582,20 @@ xe_gsc_hw_error_handler(struct xe_tile *tile, const enum hardware_error hw_err)
> }
>
> static void
> -xe_soc_log_err_update_cntr(struct xe_tile *tile,
> +xe_soc_log_err_update_cntr(struct xe_tile *tile, const enum hardware_error hw_err,
> u32 errbit, const struct err_msg_cntr_pair *reg_info)
> {
> const char *errmsg;
> u32 indx;
>
> + const char *hwerr_to_str = hardware_error_type_to_str(hw_err);
> +
> errmsg = reg_info[errbit].errmsg;
> indx = reg_info[errbit].cntr_indx;
>
> drm_err_ratelimited(&tile_to_xe(tile)->drm, HW_ERR
> - "Tile%d %s SOC FATAL error, bit[%d] is set\n",
> - tile->id, errmsg, errbit);
> + "Tile%d %s SOC %s error, bit[%d] is set\n",
> + tile->id, hwerr_to_str, errmsg, errbit);
in the prints as well let's maintain same reporting source error category error name.
and also let have some meaningful message like Tile0 reported SOC NONFATAL errorname
and don't need to error again at the end as HW_ERR will anyways prepend with "HARDWARE_ERROR".
will bit ID add any value as we will print the registers.
> tile->errors.count[indx]++;
> }
>
> @@ -540,15 +603,34 @@ static void
> xe_soc_hw_error_handler(struct xe_tile *tile, const enum hardware_error hw_err)
> {
> unsigned long mst_glb_errstat, slv_glb_errstat, lcl_errstat;
> +
> + const struct err_msg_cntr_pair *soc_mstr_glbl_err_reg;
> + const struct err_msg_cntr_pair *soc_mstr_lcl_err_reg;
> + const struct err_msg_cntr_pair *soc_slave_glbl_err_reg;
> + const struct err_msg_cntr_pair *soc_slave_lcl_err_reg;
> u32 errbit, base, slave_base;
> int i;
> +
> + const char *hwerr_to_str = hardware_error_type_to_str(hw_err);
> struct xe_gt *gt = tile->primary_gt;
>
> lockdep_assert_held(&tile_to_xe(tile)->irq.lock);
>
> - if ((tile_to_xe(tile)->info.platform != XE_PVC) && hw_err != HARDWARE_ERROR_FATAL)
> + if ((tile_to_xe(tile)->info.platform != XE_PVC) && hw_err == HARDWARE_ERROR_CORRECTABLE)
> return;
>
> + if (hw_err == HARDWARE_ERROR_FATAL) {
> + soc_mstr_glbl_err_reg = soc_mstr_glbl_err_reg_fatal;
> + soc_mstr_lcl_err_reg = soc_mstr_lcl_err_reg_fatal;
> + soc_slave_glbl_err_reg = soc_slave_glbl_err_reg_fatal;
> + soc_slave_lcl_err_reg = soc_slave_lcl_err_reg_fatal;
> + } else if (hw_err == HARDWARE_ERROR_NONFATAL) {
> + soc_mstr_glbl_err_reg = soc_mstr_glbl_err_reg_nonfatal;
> + soc_mstr_lcl_err_reg = soc_mstr_lcl_err_reg_nonfatal;
> + soc_slave_glbl_err_reg = soc_slave_glbl_err_reg_nonfatal;
> + soc_slave_lcl_err_reg = soc_slave_lcl_err_reg_nonfatal;
> + }
i guess this we agreed to do once like we did for GT errors
> +
> base = SOC_PVC_BASE;
> slave_base = SOC_PVC_SLAVE_BASE;
>
> @@ -564,33 +646,34 @@ xe_soc_hw_error_handler(struct xe_tile *tile, const enum hardware_error hw_err)
>
> mst_glb_errstat = xe_mmio_read32(gt, SOC_GLOBAL_ERR_STAT_MASTER_REG(base, hw_err));
> drm_info(&tile_to_xe(tile)->drm, HW_ERR
> - "Tile%d SOC_GLOBAL_ERR_STAT_MASTER_REG_FATAL:0x%08lx\n",
> - tile->id, mst_glb_errstat);
> + "Tile%d SOC_GLOBAL_ERR_STAT_MASTER_REG_%s:0x%08lx\n",
> + tile->id, hwerr_to_str, mst_glb_errstat);
for the register dumps let's use drm_dbg
>
> if (mst_glb_errstat & REG_BIT(SOC_SLAVE_IEH)) {
> slv_glb_errstat = xe_mmio_read32(gt,
> SOC_GLOBAL_ERR_STAT_SLAVE_REG(slave_base, hw_err));
> drm_info(&tile_to_xe(tile)->drm, HW_ERR
> - "Tile%d SOC_GLOBAL_ERR_STAT_SLAVE_REG_FATAL:0x%08lx\n",
> - tile->id, slv_glb_errstat);
> + "Tile%d SOC_GLOBAL_ERR_STAT_SLAVE_REG_%s:0x%08lx\n",
> + tile->id, hwerr_to_str, slv_glb_errstat);
>
> if (slv_glb_errstat & REG_BIT(SOC_IEH1_LOCAL_ERR_STATUS)) {
> lcl_errstat = xe_mmio_read32(gt, SOC_LOCAL_ERR_STAT_SLAVE_REG(slave_base,
> hw_err));
> drm_info(&tile_to_xe(tile)->drm, HW_ERR
> - "Tile%d SOC_LOCAL_ERR_STAT_SLAVE_REG_FATAL:0x%08lx\n",
> - tile->id, lcl_errstat);
> + "Tile%d SOC_LOCAL_ERR_STAT_SLAVE_REG_%s:0x%08lx\n",
> + tile->id, hwerr_to_str, lcl_errstat);
>
> for_each_set_bit(errbit, &lcl_errstat, 32)
define what 32 is
> - xe_soc_log_err_update_cntr(tile, errbit,
> - soc_slave_lcl_err_reg_fatal);
> + xe_soc_log_err_update_cntr(tile, hw_err, errbit,
> + soc_slave_lcl_err_reg);
>
> xe_mmio_write32(gt, SOC_LOCAL_ERR_STAT_SLAVE_REG(slave_base, hw_err),
> lcl_errstat);
> }
>
> for_each_set_bit(errbit, &slv_glb_errstat, 32)
> - xe_soc_log_err_update_cntr(tile, errbit, soc_slave_glbl_err_reg_fatal);
> + xe_soc_log_err_update_cntr(tile, errbit, hw_err,
> + soc_slave_glbl_err_reg);
>
> xe_mmio_write32(gt, SOC_GLOBAL_ERR_STAT_SLAVE_REG(slave_base, hw_err),
> slv_glb_errstat);
> @@ -598,17 +681,18 @@ xe_soc_hw_error_handler(struct xe_tile *tile, const enum hardware_error hw_err)
>
> if (mst_glb_errstat & REG_BIT(SOC_IEH0_LOCAL_ERR_STATUS)) {
> lcl_errstat = xe_mmio_read32(gt, SOC_LOCAL_ERR_STAT_MASTER_REG(base, hw_err));
> - drm_info(&tile_to_xe(tile)->drm, HW_ERR "SOC_LOCAL_ERR_STAT_MASTER_REG_FATAL:0x%08lx\n",
> - lcl_errstat);
> + drm_info(&tile_to_xe(tile)->drm, HW_ERR "Tile%d SOC_LOCAL_ERR_STAT_MASTER_REG_%s:0x%08lx\n",
> + tile->id, hwerr_to_str, lcl_errstat);
>
> for_each_set_bit(errbit, &lcl_errstat, 32)
> - xe_soc_log_err_update_cntr(tile, errbit, soc_mstr_lcl_err_reg_fatal);
> + xe_soc_log_err_update_cntr(tile, hw_err, errbit,
> + soc_mstr_lcl_err_reg);
>
> xe_mmio_write32(gt, SOC_LOCAL_ERR_STAT_MASTER_REG(base, hw_err), lcl_errstat);
> }
>
> for_each_set_bit(errbit, &mst_glb_errstat, 32)
> - xe_soc_log_err_update_cntr(tile, errbit, soc_mstr_glbl_err_reg_fatal);
> + xe_soc_log_err_update_cntr(tile, errbit, hw_err, soc_mstr_glbl_err_reg);
>
> xe_mmio_write32(gt, SOC_GLOBAL_ERR_STAT_MASTER_REG(base, hw_err),
> mst_glb_errstat);
> diff --git a/drivers/gpu/drm/xe/xe_hw_error.h b/drivers/gpu/drm/xe/xe_hw_error.h
> index 05838e082abd..a458a90b34a2 100644
> --- a/drivers/gpu/drm/xe/xe_hw_error.h
> +++ b/drivers/gpu/drm/xe/xe_hw_error.h
> @@ -115,6 +115,48 @@ enum xe_tile_hw_errors {
> XE_SOC_HW_ERR_PCIE_PSF_CMD_FATAL,
> XE_SOC_HW_ERR_PCIE_PSF_CMP_FATAL,
> XE_SOC_HW_ERR_PCIE_PSF_REQ_FATAL,
> + XE_SOC_HW_ERR_MSTR_LCL_NONFATAL,
> + XE_SOC_HW_ERR_SLAVE_GLBL_NONFATAL,
> + XE_SOC_HW_ERR_HBM0_CHNL0_NONFATAL,
> + XE_SOC_HW_ERR_HBM0_CHNL1_NONFATAL,
> + XE_SOC_HW_ERR_HBM0_CHNL2_NONFATAL,
> + XE_SOC_HW_ERR_HBM0_CHNL3_NONFATAL,
> + XE_SOC_HW_ERR_HBM0_CHNL4_NONFATAL,
> + XE_SOC_HW_ERR_HBM0_CHNL5_NONFATAL,
> + XE_SOC_HW_ERR_HBM0_CHNL6_NONFATAL,
> + XE_SOC_HW_ERR_HBM0_CHNL7_NONFATAL,
> + XE_SOC_HW_ERR_HBM1_CHNL0_NONFATAL,
> + XE_SOC_HW_ERR_HBM1_CHNL1_NONFATAL,
> + XE_SOC_HW_ERR_HBM1_CHNL2_NONFATAL,
> + XE_SOC_HW_ERR_HBM1_CHNL3_NONFATAL,
> + XE_SOC_HW_ERR_HBM1_CHNL4_NONFATAL,
> + XE_SOC_HW_ERR_HBM1_CHNL5_NONFATAL,
> + XE_SOC_HW_ERR_HBM1_CHNL6_NONFATAL,
> + XE_SOC_HW_ERR_HBM1_CHNL7_NONFATAL,
> + XE_SOC_HW_ERR_UNKNOWN_NONFATAL,
> + XE_SOC_HW_ERR_SLAVE_LCL_NONFATAL,
> + XE_SOC_HW_ERR_HBM2_CHNL0_NONFATAL,
> + XE_SOC_HW_ERR_HBM2_CHNL1_NONFATAL,
> + XE_SOC_HW_ERR_HBM2_CHNL2_NONFATAL,
> + XE_SOC_HW_ERR_HBM2_CHNL3_NONFATAL,
> + XE_SOC_HW_ERR_HBM2_CHNL4_NONFATAL,
> + XE_SOC_HW_ERR_HBM2_CHNL5_NONFATAL,
> + XE_SOC_HW_ERR_HBM2_CHNL6_NONFATAL,
> + XE_SOC_HW_ERR_HBM2_CHNL7_NONFATAL,
> + XE_SOC_HW_ERR_HBM3_CHNL0_NONFATAL,
> + XE_SOC_HW_ERR_HBM3_CHNL1_NONFATAL,
> + XE_SOC_HW_ERR_HBM3_CHNL2_NONFATAL,
> + XE_SOC_HW_ERR_HBM3_CHNL3_NONFATAL,
> + XE_SOC_HW_ERR_HBM3_CHNL4_NONFATAL,
> + XE_SOC_HW_ERR_HBM3_CHNL5_NONFATAL,
> + XE_SOC_HW_ERR_HBM3_CHNL6_NONFATAL,
> + XE_SOC_HW_ERR_HBM3_CHNL7_NONFATAL,
> + XE_SOC_HW_ERR_ANR_MDFI_NONFATAL,
> + XE_SOC_HW_ERR_MDFI_T2T_NONFATAL,
> + XE_SOC_HW_ERR_MDFI_T2C_NONFATAL,
> + XE_SOC_HW_ERR_CSC_PSF_CMD_NONFATAL,
> + XE_SOC_HW_ERR_CSC_PSF_CMP_NONFATAL,
> + XE_SOC_HW_ERR_CSC_PSF_REQ_NONFATAL,
> XE_TILE_HW_ERROR_MAX,
> };
Thanks,
Aravind.
>
next prev parent reply other threads:[~2023-10-11 6:04 UTC|newest]
Thread overview: 42+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-09-27 11:46 [Intel-xe] [PATCH 00/11] Supporting CSC and SOC HARDWARE ERROR HANDLING on PVC Himal Prasad Ghimiray
2023-09-27 11:43 ` [Intel-xe] ✓ CI.Patch_applied: success for " Patchwork
2023-09-27 11:43 ` [Intel-xe] ✗ CI.checkpatch: warning " Patchwork
2023-09-27 11:44 ` [Intel-xe] ✓ CI.KUnit: success " Patchwork
2023-09-27 11:46 ` [Intel-xe] [PATCH 01/11] drm/xe: Handle errors from various components Himal Prasad Ghimiray
2023-09-27 11:46 ` [Intel-xe] [PATCH 02/11] drm/xe: Log and count the GT hardware errors Himal Prasad Ghimiray
2023-09-27 11:46 ` [Intel-xe] [PATCH 03/11] drm/xe: Support GT hardware error reporting for PVC Himal Prasad Ghimiray
2023-09-27 11:46 ` [Intel-xe] [PATCH 04/11] drm/xe: Process fatal hardware errors Himal Prasad Ghimiray
2023-09-27 11:46 ` [Intel-xe] [PATCH 05/11] drm/xe: Support GSC hardware error reporting for PVC Himal Prasad Ghimiray
2023-10-11 7:18 ` Aravind Iddamsetty
2023-09-27 11:46 ` [Intel-xe] [PATCH 06/11] drm/xe: Notify userspace about GSC HW errors Himal Prasad Ghimiray
2023-10-11 7:23 ` Aravind Iddamsetty
2023-10-11 7:25 ` Ghimiray, Himal Prasad
2023-10-12 3:12 ` Aravind Iddamsetty
2023-09-27 11:46 ` [Intel-xe] [PATCH 07/11] drm/xe: Support SOC FATAL error handling for PVC Himal Prasad Ghimiray
2023-10-04 6:38 ` Aravind Iddamsetty
2023-10-04 6:50 ` Ghimiray, Himal Prasad
2023-10-08 9:32 ` Aravind Iddamsetty
2023-10-09 4:11 ` Ghimiray, Himal Prasad
2023-10-09 9:00 ` Aravind Iddamsetty
2023-10-09 9:15 ` Ghimiray, Himal Prasad
2023-10-10 6:27 ` Aravind Iddamsetty
2023-10-09 9:52 ` Aravind Iddamsetty
2023-10-09 10:14 ` Ghimiray, Himal Prasad
2023-09-27 11:46 ` [Intel-xe] [PATCH 08/11] drm/xe: Support SOC NONFATAL " Himal Prasad Ghimiray
2023-10-11 6:07 ` Aravind Iddamsetty [this message]
2023-09-27 11:46 ` [Intel-xe] [PATCH 09/11] drm/xe: Handle MDFI error severity Himal Prasad Ghimiray
2023-10-04 12:11 ` Aravind Iddamsetty
2023-09-27 11:46 ` [Intel-xe] [PATCH 10/11] drm/xe: Clear SOC CORRECTABLE error registers Himal Prasad Ghimiray
2023-10-09 9:58 ` Aravind Iddamsetty
2023-10-11 6:48 ` Aravind Iddamsetty
2023-10-11 6:52 ` Ghimiray, Himal Prasad
2023-10-12 2:59 ` Aravind Iddamsetty
2023-10-12 4:01 ` Ghimiray, Himal Prasad
2023-09-27 11:46 ` [Intel-xe] [PATCH 11/11] drm/xe: Clear all SoC errors post warm reset Himal Prasad Ghimiray
2023-10-11 6:56 ` Aravind Iddamsetty
2023-10-11 6:59 ` Ghimiray, Himal Prasad
2023-10-12 3:05 ` Aravind Iddamsetty
2023-09-27 11:51 ` [Intel-xe] ✓ CI.Build: success for Supporting CSC and SOC HARDWARE ERROR HANDLING on PVC Patchwork
2023-09-27 11:52 ` [Intel-xe] ✗ CI.Hooks: failure " Patchwork
2023-09-27 11:53 ` [Intel-xe] ✓ CI.checksparse: success " Patchwork
2023-09-27 12:28 ` [Intel-xe] ✗ CI.BAT: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=70345afb-9b70-a526-9791-16733aa69976@linux.intel.com \
--to=aravind.iddamsetty@linux.intel.com \
--cc=himal.prasad.ghimiray@intel.com \
--cc=intel-xe@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox