* [PATCH v2 1/2] PCI/DPC: Run recovery on device that detected the error
2024-11-12 13:54 [PATCH v2 0/2] PCI/AER: Report fatal errors of RCiEP and EP if link recoverd Shuai Xue
@ 2024-11-12 13:54 ` Shuai Xue
2025-01-23 4:53 ` Sathyanarayanan Kuppuswamy
2024-11-12 13:54 ` [PATCH v2 2/2] PCI/AER: Report fatal errors of RCiEP and EP if link recoverd Shuai Xue
2024-12-24 11:03 ` [PATCH v2 0/2] " Shuai Xue
2 siblings, 1 reply; 17+ messages in thread
From: Shuai Xue @ 2024-11-12 13:54 UTC (permalink / raw)
To: linux-pci, linux-kernel, linuxppc-dev, bhelgaas, kbusch
Cc: mahesh, oohall, sathyanarayanan.kuppuswamy, xueshuai
The current implementation of pcie_do_recovery() assumes that the
recovery process is executed on the device that detected the error.
However, the DPC driver currently passes the error port that experienced
the DPC event to pcie_do_recovery().
Use the SOURCE ID register to correctly identify the device that detected the
error. By passing this error device to pcie_do_recovery(), subsequent
patches will be able to accurately access AER status of the error device.
Signed-off-by: Shuai Xue <xueshuai@linux.alibaba.com>
---
drivers/pci/pci.h | 2 +-
drivers/pci/pcie/dpc.c | 30 ++++++++++++++++++++++++------
drivers/pci/pcie/edr.c | 35 ++++++++++++++++++-----------------
3 files changed, 43 insertions(+), 24 deletions(-)
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 14d00ce45bfa..0866f79aec54 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -521,7 +521,7 @@ struct rcec_ea {
void pci_save_dpc_state(struct pci_dev *dev);
void pci_restore_dpc_state(struct pci_dev *dev);
void pci_dpc_init(struct pci_dev *pdev);
-void dpc_process_error(struct pci_dev *pdev);
+struct pci_dev *dpc_process_error(struct pci_dev *pdev);
pci_ers_result_t dpc_reset_link(struct pci_dev *pdev);
bool pci_dpc_recovered(struct pci_dev *pdev);
#else
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 2b6ef7efa3c1..62a68cde4364 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -257,10 +257,17 @@ static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev,
return 1;
}
-void dpc_process_error(struct pci_dev *pdev)
+/**
+ * dpc_process_error - handle the DPC error status
+ * @pdev: the port that experienced the containment event
+ *
+ * Return the device that experienced the error.
+ */
+struct pci_dev *dpc_process_error(struct pci_dev *pdev)
{
u16 cap = pdev->dpc_cap, status, source, reason, ext_reason;
struct aer_err_info info;
+ struct pci_dev *err_dev = NULL;
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source);
@@ -283,6 +290,13 @@ void dpc_process_error(struct pci_dev *pdev)
"software trigger" :
"reserved error");
+ if (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE ||
+ reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE)
+ err_dev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
+ PCI_BUS_NUM(source), source & 0xff);
+ else
+ err_dev = pci_dev_get(pdev);
+
/* show RP PIO error detail information */
if (pdev->dpc_rp_extensions &&
reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT &&
@@ -295,6 +309,8 @@ void dpc_process_error(struct pci_dev *pdev)
pci_aer_clear_nonfatal_status(pdev);
pci_aer_clear_fatal_status(pdev);
}
+
+ return err_dev;
}
static void pci_clear_surpdn_errors(struct pci_dev *pdev)
@@ -350,21 +366,23 @@ static bool dpc_is_surprise_removal(struct pci_dev *pdev)
static irqreturn_t dpc_handler(int irq, void *context)
{
- struct pci_dev *pdev = context;
+ struct pci_dev *err_port = context, *err_dev = NULL;
/*
* According to PCIe r6.0 sec 6.7.6, errors are an expected side effect
* of async removal and should be ignored by software.
*/
- if (dpc_is_surprise_removal(pdev)) {
- dpc_handle_surprise_removal(pdev);
+ if (dpc_is_surprise_removal(err_port)) {
+ dpc_handle_surprise_removal(err_port);
return IRQ_HANDLED;
}
- dpc_process_error(pdev);
+ err_dev = dpc_process_error(err_port);
/* We configure DPC so it only triggers on ERR_FATAL */
- pcie_do_recovery(pdev, pci_channel_io_frozen, dpc_reset_link);
+ pcie_do_recovery(err_dev, pci_channel_io_frozen, dpc_reset_link);
+
+ pci_dev_put(err_dev);
return IRQ_HANDLED;
}
diff --git a/drivers/pci/pcie/edr.c b/drivers/pci/pcie/edr.c
index e86298dbbcff..6ac95e5e001b 100644
--- a/drivers/pci/pcie/edr.c
+++ b/drivers/pci/pcie/edr.c
@@ -150,7 +150,7 @@ static int acpi_send_edr_status(struct pci_dev *pdev, struct pci_dev *edev,
static void edr_handle_event(acpi_handle handle, u32 event, void *data)
{
- struct pci_dev *pdev = data, *edev;
+ struct pci_dev *pdev = data, *err_port, *err_dev = NULL;
pci_ers_result_t estate = PCI_ERS_RESULT_DISCONNECT;
u16 status;
@@ -169,36 +169,36 @@ static void edr_handle_event(acpi_handle handle, u32 event, void *data)
* may be that port or a parent of it (PCI Firmware r3.3, sec
* 4.6.13).
*/
- edev = acpi_dpc_port_get(pdev);
- if (!edev) {
+ err_port = acpi_dpc_port_get(pdev);
+ if (!err_port) {
pci_err(pdev, "Firmware failed to locate DPC port\n");
return;
}
- pci_dbg(pdev, "Reported EDR dev: %s\n", pci_name(edev));
+ pci_dbg(pdev, "Reported EDR dev: %s\n", pci_name(err_port));
/* If port does not support DPC, just send the OST */
- if (!edev->dpc_cap) {
- pci_err(edev, FW_BUG "This device doesn't support DPC\n");
+ if (!err_port->dpc_cap) {
+ pci_err(err_port, FW_BUG "This device doesn't support DPC\n");
goto send_ost;
}
/* Check if there is a valid DPC trigger */
- pci_read_config_word(edev, edev->dpc_cap + PCI_EXP_DPC_STATUS, &status);
+ pci_read_config_word(err_port, err_port->dpc_cap + PCI_EXP_DPC_STATUS, &status);
if (!(status & PCI_EXP_DPC_STATUS_TRIGGER)) {
- pci_err(edev, "Invalid DPC trigger %#010x\n", status);
+ pci_err(err_port, "Invalid DPC trigger %#010x\n", status);
goto send_ost;
}
- dpc_process_error(edev);
- pci_aer_raw_clear_status(edev);
+ err_dev = dpc_process_error(err_port);
+ pci_aer_raw_clear_status(err_port);
/*
* Irrespective of whether the DPC event is triggered by ERR_FATAL
* or ERR_NONFATAL, since the link is already down, use the FATAL
* error recovery path for both cases.
*/
- estate = pcie_do_recovery(edev, pci_channel_io_frozen, dpc_reset_link);
+ estate = pcie_do_recovery(err_dev, pci_channel_io_frozen, dpc_reset_link);
send_ost:
@@ -207,15 +207,16 @@ static void edr_handle_event(acpi_handle handle, u32 event, void *data)
* to firmware. If not successful, send _OST(0xF, BDF << 16 | 0x81).
*/
if (estate == PCI_ERS_RESULT_RECOVERED) {
- pci_dbg(edev, "DPC port successfully recovered\n");
- pcie_clear_device_status(edev);
- acpi_send_edr_status(pdev, edev, EDR_OST_SUCCESS);
+ pci_dbg(err_port, "DPC port successfully recovered\n");
+ pcie_clear_device_status(err_port);
+ acpi_send_edr_status(pdev, err_port, EDR_OST_SUCCESS);
} else {
- pci_dbg(edev, "DPC port recovery failed\n");
- acpi_send_edr_status(pdev, edev, EDR_OST_FAILED);
+ pci_dbg(err_port, "DPC port recovery failed\n");
+ acpi_send_edr_status(pdev, err_port, EDR_OST_FAILED);
}
- pci_dev_put(edev);
+ pci_dev_put(err_port);
+ pci_dev_put(err_dev);
}
void pci_acpi_add_edr_notifier(struct pci_dev *pdev)
--
2.39.3
^ permalink raw reply related [flat|nested] 17+ messages in thread* Re: [PATCH v2 1/2] PCI/DPC: Run recovery on device that detected the error
2024-11-12 13:54 ` [PATCH v2 1/2] PCI/DPC: Run recovery on device that detected the error Shuai Xue
@ 2025-01-23 4:53 ` Sathyanarayanan Kuppuswamy
2025-01-23 7:03 ` Shuai Xue
0 siblings, 1 reply; 17+ messages in thread
From: Sathyanarayanan Kuppuswamy @ 2025-01-23 4:53 UTC (permalink / raw)
To: Shuai Xue, linux-pci, linux-kernel, linuxppc-dev, bhelgaas,
kbusch
Cc: mahesh, oohall
On 11/12/24 5:54 AM, Shuai Xue wrote:
> The current implementation of pcie_do_recovery() assumes that the
> recovery process is executed on the device that detected the error.
> However, the DPC driver currently passes the error port that experienced
> the DPC event to pcie_do_recovery().
>
> Use the SOURCE ID register to correctly identify the device that detected the
> error. By passing this error device to pcie_do_recovery(), subsequent
> patches will be able to accurately access AER status of the error device.
When passing the error device, I assume pcie_do_recovery() will find the
upstream bride and run the recovery logic .
>
> Signed-off-by: Shuai Xue <xueshuai@linux.alibaba.com>
> ---
IMO, moving the "err_port" rename to a separate patch will make this change
more clear. But it is up to you.
> drivers/pci/pci.h | 2 +-
> drivers/pci/pcie/dpc.c | 30 ++++++++++++++++++++++++------
> drivers/pci/pcie/edr.c | 35 ++++++++++++++++++-----------------
> 3 files changed, 43 insertions(+), 24 deletions(-)
>
> diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
> index 14d00ce45bfa..0866f79aec54 100644
> --- a/drivers/pci/pci.h
> +++ b/drivers/pci/pci.h
> @@ -521,7 +521,7 @@ struct rcec_ea {
> void pci_save_dpc_state(struct pci_dev *dev);
> void pci_restore_dpc_state(struct pci_dev *dev);
> void pci_dpc_init(struct pci_dev *pdev);
> -void dpc_process_error(struct pci_dev *pdev);
> +struct pci_dev *dpc_process_error(struct pci_dev *pdev);
> pci_ers_result_t dpc_reset_link(struct pci_dev *pdev);
> bool pci_dpc_recovered(struct pci_dev *pdev);
> #else
> diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
> index 2b6ef7efa3c1..62a68cde4364 100644
> --- a/drivers/pci/pcie/dpc.c
> +++ b/drivers/pci/pcie/dpc.c
> @@ -257,10 +257,17 @@ static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev,
> return 1;
> }
>
> -void dpc_process_error(struct pci_dev *pdev)
> +/**
> + * dpc_process_error - handle the DPC error status
Handling the DPC error status has nothing to do with finding
the error source. Why not add a new helper function?
> + * @pdev: the port that experienced the containment event
> + *
> + * Return the device that experienced the error.
detected the error?
> + */
> +struct pci_dev *dpc_process_error(struct pci_dev *pdev)
> {
> u16 cap = pdev->dpc_cap, status, source, reason, ext_reason;
> struct aer_err_info info;
> + struct pci_dev *err_dev = NULL;
I don't think you need NULL initialization here.
>
> pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
> pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source);
> @@ -283,6 +290,13 @@ void dpc_process_error(struct pci_dev *pdev)
> "software trigger" :
> "reserved error");
>
> + if (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE ||
> + reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE)
> + err_dev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
> + PCI_BUS_NUM(source), source & 0xff);
> + else
> + err_dev = pci_dev_get(pdev);
> +
> /* show RP PIO error detail information */
> if (pdev->dpc_rp_extensions &&
> reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT &&
> @@ -295,6 +309,8 @@ void dpc_process_error(struct pci_dev *pdev)
> pci_aer_clear_nonfatal_status(pdev);
> pci_aer_clear_fatal_status(pdev);
> }
> +
> + return err_dev;
> }
>
> static void pci_clear_surpdn_errors(struct pci_dev *pdev)
> @@ -350,21 +366,23 @@ static bool dpc_is_surprise_removal(struct pci_dev *pdev)
>
> static irqreturn_t dpc_handler(int irq, void *context)
> {
> - struct pci_dev *pdev = context;
> + struct pci_dev *err_port = context, *err_dev = NULL;
NULL initialization is not needed.
>
> /*
> * According to PCIe r6.0 sec 6.7.6, errors are an expected side effect
> * of async removal and should be ignored by software.
> */
> - if (dpc_is_surprise_removal(pdev)) {
> - dpc_handle_surprise_removal(pdev);
> + if (dpc_is_surprise_removal(err_port)) {
> + dpc_handle_surprise_removal(err_port);
> return IRQ_HANDLED;
> }
>
> - dpc_process_error(pdev);
> + err_dev = dpc_process_error(err_port);
>
> /* We configure DPC so it only triggers on ERR_FATAL */
> - pcie_do_recovery(pdev, pci_channel_io_frozen, dpc_reset_link);
> + pcie_do_recovery(err_dev, pci_channel_io_frozen, dpc_reset_link);
> +
> + pci_dev_put(err_dev);
>
> return IRQ_HANDLED;
> }
> diff --git a/drivers/pci/pcie/edr.c b/drivers/pci/pcie/edr.c
> index e86298dbbcff..6ac95e5e001b 100644
> --- a/drivers/pci/pcie/edr.c
> +++ b/drivers/pci/pcie/edr.c
> @@ -150,7 +150,7 @@ static int acpi_send_edr_status(struct pci_dev *pdev, struct pci_dev *edev,
>
> static void edr_handle_event(acpi_handle handle, u32 event, void *data)
> {
> - struct pci_dev *pdev = data, *edev;
> + struct pci_dev *pdev = data, *err_port, *err_dev = NULL;
> pci_ers_result_t estate = PCI_ERS_RESULT_DISCONNECT;
> u16 status;
>
> @@ -169,36 +169,36 @@ static void edr_handle_event(acpi_handle handle, u32 event, void *data)
> * may be that port or a parent of it (PCI Firmware r3.3, sec
> * 4.6.13).
> */
> - edev = acpi_dpc_port_get(pdev);
> - if (!edev) {
> + err_port = acpi_dpc_port_get(pdev);
> + if (!err_port) {
> pci_err(pdev, "Firmware failed to locate DPC port\n");
> return;
> }
>
> - pci_dbg(pdev, "Reported EDR dev: %s\n", pci_name(edev));
> + pci_dbg(pdev, "Reported EDR dev: %s\n", pci_name(err_port));
>
> /* If port does not support DPC, just send the OST */
> - if (!edev->dpc_cap) {
> - pci_err(edev, FW_BUG "This device doesn't support DPC\n");
> + if (!err_port->dpc_cap) {
> + pci_err(err_port, FW_BUG "This device doesn't support DPC\n");
> goto send_ost;
> }
>
> /* Check if there is a valid DPC trigger */
> - pci_read_config_word(edev, edev->dpc_cap + PCI_EXP_DPC_STATUS, &status);
> + pci_read_config_word(err_port, err_port->dpc_cap + PCI_EXP_DPC_STATUS, &status);
> if (!(status & PCI_EXP_DPC_STATUS_TRIGGER)) {
> - pci_err(edev, "Invalid DPC trigger %#010x\n", status);
> + pci_err(err_port, "Invalid DPC trigger %#010x\n", status);
> goto send_ost;
> }
>
> - dpc_process_error(edev);
> - pci_aer_raw_clear_status(edev);
> + err_dev = dpc_process_error(err_port);
> + pci_aer_raw_clear_status(err_port);
>
> /*
> * Irrespective of whether the DPC event is triggered by ERR_FATAL
> * or ERR_NONFATAL, since the link is already down, use the FATAL
> * error recovery path for both cases.
> */
> - estate = pcie_do_recovery(edev, pci_channel_io_frozen, dpc_reset_link);
> + estate = pcie_do_recovery(err_dev, pci_channel_io_frozen, dpc_reset_link);
>
> send_ost:
>
> @@ -207,15 +207,16 @@ static void edr_handle_event(acpi_handle handle, u32 event, void *data)
> * to firmware. If not successful, send _OST(0xF, BDF << 16 | 0x81).
> */
> if (estate == PCI_ERS_RESULT_RECOVERED) {
> - pci_dbg(edev, "DPC port successfully recovered\n");
> - pcie_clear_device_status(edev);
> - acpi_send_edr_status(pdev, edev, EDR_OST_SUCCESS);
> + pci_dbg(err_port, "DPC port successfully recovered\n");
> + pcie_clear_device_status(err_port);
> + acpi_send_edr_status(pdev, err_port, EDR_OST_SUCCESS);
> } else {
> - pci_dbg(edev, "DPC port recovery failed\n");
> - acpi_send_edr_status(pdev, edev, EDR_OST_FAILED);
> + pci_dbg(err_port, "DPC port recovery failed\n");
> + acpi_send_edr_status(pdev, err_port, EDR_OST_FAILED);
> }
>
> - pci_dev_put(edev);
> + pci_dev_put(err_port);
> + pci_dev_put(err_dev);
> }
>
> void pci_acpi_add_edr_notifier(struct pci_dev *pdev)
--
Sathyanarayanan Kuppuswamy
Linux Kernel Developer
^ permalink raw reply [flat|nested] 17+ messages in thread* Re: [PATCH v2 1/2] PCI/DPC: Run recovery on device that detected the error
2025-01-23 4:53 ` Sathyanarayanan Kuppuswamy
@ 2025-01-23 7:03 ` Shuai Xue
0 siblings, 0 replies; 17+ messages in thread
From: Shuai Xue @ 2025-01-23 7:03 UTC (permalink / raw)
To: Sathyanarayanan Kuppuswamy, linux-pci, linux-kernel, linuxppc-dev,
bhelgaas, kbusch
Cc: mahesh, oohall
在 2025/1/23 12:53, Sathyanarayanan Kuppuswamy 写道:
>
> On 11/12/24 5:54 AM, Shuai Xue wrote:
>> The current implementation of pcie_do_recovery() assumes that the
>> recovery process is executed on the device that detected the error.
>> However, the DPC driver currently passes the error port that experienced
>> the DPC event to pcie_do_recovery().
>>
>> Use the SOURCE ID register to correctly identify the device that detected the
>> error. By passing this error device to pcie_do_recovery(), subsequent
>> patches will be able to accurately access AER status of the error device.
>
> When passing the error device, I assume pcie_do_recovery() will find the
> upstream bride and run the recovery logic .
>
Yes, the pcie_do_recovery() will find the upstream bridge and walk bridges
potentially AER affected.
>>
>> Signed-off-by: Shuai Xue <xueshuai@linux.alibaba.com>
>> ---
>
> IMO, moving the "err_port" rename to a separate patch will make this change
> more clear. But it is up to you.
I see, I will add a separate patch.
>
>> drivers/pci/pci.h | 2 +-
>> drivers/pci/pcie/dpc.c | 30 ++++++++++++++++++++++++------
>> drivers/pci/pcie/edr.c | 35 ++++++++++++++++++-----------------
>> 3 files changed, 43 insertions(+), 24 deletions(-)
>>
>> diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
>> index 14d00ce45bfa..0866f79aec54 100644
>> --- a/drivers/pci/pci.h
>> +++ b/drivers/pci/pci.h
>> @@ -521,7 +521,7 @@ struct rcec_ea {
>> void pci_save_dpc_state(struct pci_dev *dev);
>> void pci_restore_dpc_state(struct pci_dev *dev);
>> void pci_dpc_init(struct pci_dev *pdev);
>> -void dpc_process_error(struct pci_dev *pdev);
>> +struct pci_dev *dpc_process_error(struct pci_dev *pdev);
>> pci_ers_result_t dpc_reset_link(struct pci_dev *pdev);
>> bool pci_dpc_recovered(struct pci_dev *pdev);
>> #else
>> diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
>> index 2b6ef7efa3c1..62a68cde4364 100644
>> --- a/drivers/pci/pcie/dpc.c
>> +++ b/drivers/pci/pcie/dpc.c
>> @@ -257,10 +257,17 @@ static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev,
>> return 1;
>> }
>> -void dpc_process_error(struct pci_dev *pdev)
>> +/**
>> + * dpc_process_error - handle the DPC error status
>
> Handling the DPC error status has nothing to do with finding
> the error source. Why not add a new helper function?
As PCIe Spec,
DPC Error Source ID - When the DPC Trigger Reason field indicates that DPC
was triggered due to the reception of an ERR_NONFATAL or ERR_FATAL, this
register contains the Requester ID of the received Message. Otherwise, the
value of this register is undefined.
To find the error source, we need to
- check the error reason from PCI_EXP_DPC_STATUS,
- Identify the error device by PCI_EXP_DPC_SOURCE_ID for ERR_NONFATAL and
ERR_FATAL reason.
The code will duplicate with dpc_process_error. Therefore, I directly reused
dpc_process_error.
>
>> + * @pdev: the port that experienced the containment event
>> + *
>> + * Return the device that experienced the error.
> detected the error?
Will change it.
>> + */
>> +struct pci_dev *dpc_process_error(struct pci_dev *pdev)
>> {
>> u16 cap = pdev->dpc_cap, status, source, reason, ext_reason;
>> struct aer_err_info info;
>> + struct pci_dev *err_dev = NULL;
>
> I don't think you need NULL initialization here.
Will remove it.
>
>> pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
>> pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source);
>> @@ -283,6 +290,13 @@ void dpc_process_error(struct pci_dev *pdev)
>> "software trigger" :
>> "reserved error");
>> + if (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE ||
>> + reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE)
>> + err_dev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
>> + PCI_BUS_NUM(source), source & 0xff);
>> + else
>> + err_dev = pci_dev_get(pdev);
>> +
>> /* show RP PIO error detail information */
>> if (pdev->dpc_rp_extensions &&
>> reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT &&
>> @@ -295,6 +309,8 @@ void dpc_process_error(struct pci_dev *pdev)
>> pci_aer_clear_nonfatal_status(pdev);
>> pci_aer_clear_fatal_status(pdev);
>> }
>> +
>> + return err_dev;
>> }
>> static void pci_clear_surpdn_errors(struct pci_dev *pdev)
>> @@ -350,21 +366,23 @@ static bool dpc_is_surprise_removal(struct pci_dev *pdev)
>> static irqreturn_t dpc_handler(int irq, void *context)
>> {
>> - struct pci_dev *pdev = context;
>> + struct pci_dev *err_port = context, *err_dev = NULL;
>
> NULL initialization is not needed.
Will remove it.
Thanks for valuable comments.
Best Regards,
Shuai
^ permalink raw reply [flat|nested] 17+ messages in thread
* [PATCH v2 2/2] PCI/AER: Report fatal errors of RCiEP and EP if link recoverd
2024-11-12 13:54 [PATCH v2 0/2] PCI/AER: Report fatal errors of RCiEP and EP if link recoverd Shuai Xue
2024-11-12 13:54 ` [PATCH v2 1/2] PCI/DPC: Run recovery on device that detected the error Shuai Xue
@ 2024-11-12 13:54 ` Shuai Xue
2024-11-15 9:06 ` Lukas Wunner
` (2 more replies)
2024-12-24 11:03 ` [PATCH v2 0/2] " Shuai Xue
2 siblings, 3 replies; 17+ messages in thread
From: Shuai Xue @ 2024-11-12 13:54 UTC (permalink / raw)
To: linux-pci, linux-kernel, linuxppc-dev, bhelgaas, kbusch
Cc: mahesh, oohall, sathyanarayanan.kuppuswamy, xueshuai
The AER driver has historically avoided reading the configuration space of
an endpoint or RCiEP that reported a fatal error, considering the link to
that device unreliable. Consequently, when a fatal error occurs, the AER
and DPC drivers do not report specific error types, resulting in logs like:
pcieport 0000:30:03.0: EDR: EDR event received
pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
pcieport 0000:30:03.0: DPC: ERR_FATAL detected
pcieport 0000:30:03.0: AER: broadcast error_detected message
nvme nvme0: frozen state error detected, reset controller
nvme 0000:34:00.0: ready 0ms after DPC
pcieport 0000:30:03.0: AER: broadcast slot_reset message
AER status registers are sticky and Write-1-to-clear. If the link recovered
after hot reset, we can still safely access AER status of the error device.
In such case, report fatal errors which helps to figure out the error root
case.
After this patch, the logs like:
pcieport 0000:30:03.0: EDR: EDR event received
pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
pcieport 0000:30:03.0: DPC: ERR_FATAL detected
pcieport 0000:30:03.0: AER: broadcast error_detected message
nvme nvme0: frozen state error detected, reset controller
pcieport 0000:30:03.0: waiting 100 ms for downstream link, after activation
nvme 0000:34:00.0: ready 0ms after DPC
nvme 0000:34:00.0: PCIe Bus Error: severity=Uncorrectable (Fatal), type=Data Link Layer, (Receiver ID)
nvme 0000:34:00.0: device [144d:a804] error status/mask=00000010/00504000
nvme 0000:34:00.0: [ 4] DLP (First)
pcieport 0000:30:03.0: AER: broadcast slot_reset message
Signed-off-by: Shuai Xue <xueshuai@linux.alibaba.com>
---
drivers/pci/pci.h | 3 ++-
drivers/pci/pcie/aer.c | 11 +++++++----
drivers/pci/pcie/dpc.c | 2 +-
drivers/pci/pcie/err.c | 9 +++++++++
4 files changed, 19 insertions(+), 6 deletions(-)
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 0866f79aec54..6f827c313639 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -504,7 +504,8 @@ struct aer_err_info {
struct pcie_tlp_log tlp; /* TLP Header */
};
-int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
+int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info,
+ bool link_healthy);
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
#endif /* CONFIG_PCIEAER */
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 13b8586924ea..97ec1c17b6f4 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -1200,12 +1200,14 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
* aer_get_device_error_info - read error status from dev and store it to info
* @dev: pointer to the device expected to have a error record
* @info: pointer to structure to store the error record
+ * @link_healthy: link is healthy or not
*
* Return 1 on success, 0 on error.
*
* Note that @info is reused among all error devices. Clear fields properly.
*/
-int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
+int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info,
+ bool link_healthy)
{
int type = pci_pcie_type(dev);
int aer = dev->aer_cap;
@@ -1229,7 +1231,8 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
} else if (type == PCI_EXP_TYPE_ROOT_PORT ||
type == PCI_EXP_TYPE_RC_EC ||
type == PCI_EXP_TYPE_DOWNSTREAM ||
- info->severity == AER_NONFATAL) {
+ info->severity == AER_NONFATAL ||
+ (info->severity == AER_FATAL && link_healthy)) {
/* Link is still healthy for IO reads */
pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS,
@@ -1258,11 +1261,11 @@ static inline void aer_process_err_devices(struct aer_err_info *e_info)
/* Report all before handle them, not to lost records by reset etc. */
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
- if (aer_get_device_error_info(e_info->dev[i], e_info))
+ if (aer_get_device_error_info(e_info->dev[i], e_info, false))
aer_print_error(e_info->dev[i], e_info);
}
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
- if (aer_get_device_error_info(e_info->dev[i], e_info))
+ if (aer_get_device_error_info(e_info->dev[i], e_info, false))
handle_error_source(e_info->dev[i], e_info);
}
}
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 62a68cde4364..b3f157a00405 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -304,7 +304,7 @@ struct pci_dev *dpc_process_error(struct pci_dev *pdev)
dpc_process_rp_pio_error(pdev);
else if (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR &&
dpc_get_aer_uncorrect_severity(pdev, &info) &&
- aer_get_device_error_info(pdev, &info)) {
+ aer_get_device_error_info(pdev, &info, false)) {
aer_print_error(pdev, &info);
pci_aer_clear_nonfatal_status(pdev);
pci_aer_clear_fatal_status(pdev);
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index 31090770fffc..462577b8d75a 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -196,6 +196,7 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
struct pci_dev *bridge;
pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER;
struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
+ struct aer_err_info info;
/*
* If the error was detected by a Root Port, Downstream Port, RCEC,
@@ -223,6 +224,13 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
pci_warn(bridge, "subordinate device reset failed\n");
goto failed;
}
+
+ info.severity = AER_FATAL;
+ /* Link recovered, report fatal errors of RCiEP or EP */
+ if ((type == PCI_EXP_TYPE_ENDPOINT ||
+ type == PCI_EXP_TYPE_RC_END) &&
+ aer_get_device_error_info(dev, &info, true))
+ aer_print_error(dev, &info);
} else {
pci_walk_bridge(bridge, report_normal_detected, &status);
}
@@ -259,6 +267,7 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
if (host->native_aer || pcie_ports_native) {
pcie_clear_device_status(dev);
pci_aer_clear_nonfatal_status(dev);
+ pci_aer_clear_fatal_status(dev);
}
pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);
--
2.39.3
^ permalink raw reply related [flat|nested] 17+ messages in thread* Re: [PATCH v2 2/2] PCI/AER: Report fatal errors of RCiEP and EP if link recoverd
2024-11-12 13:54 ` [PATCH v2 2/2] PCI/AER: Report fatal errors of RCiEP and EP if link recoverd Shuai Xue
@ 2024-11-15 9:06 ` Lukas Wunner
2024-11-15 9:22 ` Shuai Xue
2024-11-15 20:20 ` Bowman, Terry
2025-01-23 20:10 ` Sathyanarayanan Kuppuswamy
2 siblings, 1 reply; 17+ messages in thread
From: Lukas Wunner @ 2024-11-15 9:06 UTC (permalink / raw)
To: Shuai Xue
Cc: linux-pci, linux-kernel, linuxppc-dev, bhelgaas, kbusch, mahesh,
oohall, sathyanarayanan.kuppuswamy
On Tue, Nov 12, 2024 at 09:54:19PM +0800, Shuai Xue wrote:
> The AER driver has historically avoided reading the configuration space of
> an endpoint or RCiEP that reported a fatal error, considering the link to
> that device unreliable.
It would be good if you could mention the relevant commit here:
9d938ea53b26 ("PCI/AER: Don't read upstream ports below fatal errors")
Thanks,
Lukas
^ permalink raw reply [flat|nested] 17+ messages in thread* Re: [PATCH v2 2/2] PCI/AER: Report fatal errors of RCiEP and EP if link recoverd
2024-11-15 9:06 ` Lukas Wunner
@ 2024-11-15 9:22 ` Shuai Xue
0 siblings, 0 replies; 17+ messages in thread
From: Shuai Xue @ 2024-11-15 9:22 UTC (permalink / raw)
To: Lukas Wunner
Cc: linux-pci, linux-kernel, linuxppc-dev, bhelgaas, kbusch, mahesh,
oohall, sathyanarayanan.kuppuswamy
在 2024/11/15 17:06, Lukas Wunner 写道:
> On Tue, Nov 12, 2024 at 09:54:19PM +0800, Shuai Xue wrote:
>> The AER driver has historically avoided reading the configuration space of
>> an endpoint or RCiEP that reported a fatal error, considering the link to
>> that device unreliable.
>
> It would be good if you could mention the relevant commit here:
>
> 9d938ea53b26 ("PCI/AER: Don't read upstream ports below fatal errors")
>
> Thanks,
>
> Lukas
Sure, will add it.
Thank you.
Best Regards,
Shuai
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH v2 2/2] PCI/AER: Report fatal errors of RCiEP and EP if link recoverd
2024-11-12 13:54 ` [PATCH v2 2/2] PCI/AER: Report fatal errors of RCiEP and EP if link recoverd Shuai Xue
2024-11-15 9:06 ` Lukas Wunner
@ 2024-11-15 20:20 ` Bowman, Terry
2024-11-16 12:44 ` Shuai Xue
2025-01-23 20:10 ` Sathyanarayanan Kuppuswamy
2 siblings, 1 reply; 17+ messages in thread
From: Bowman, Terry @ 2024-11-15 20:20 UTC (permalink / raw)
To: Shuai Xue, linux-pci, linux-kernel, linuxppc-dev, bhelgaas,
kbusch, Lukas Wunner
Cc: mahesh, oohall, sathyanarayanan.kuppuswamy
Hi Shuai,
On 11/12/2024 7:54 AM, Shuai Xue wrote:
> The AER driver has historically avoided reading the configuration space of
> an endpoint or RCiEP that reported a fatal error, considering the link to
> that device unreliable. Consequently, when a fatal error occurs, the AER
> and DPC drivers do not report specific error types, resulting in logs like:
>
> pcieport 0000:30:03.0: EDR: EDR event received
> pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
> pcieport 0000:30:03.0: DPC: ERR_FATAL detected
> pcieport 0000:30:03.0: AER: broadcast error_detected message
> nvme nvme0: frozen state error detected, reset controller
> nvme 0000:34:00.0: ready 0ms after DPC
> pcieport 0000:30:03.0: AER: broadcast slot_reset message
>
> AER status registers are sticky and Write-1-to-clear. If the link recovered
> after hot reset, we can still safely access AER status of the error device.
> In such case, report fatal errors which helps to figure out the error root
> case.
>
> After this patch, the logs like:
>
> pcieport 0000:30:03.0: EDR: EDR event received
> pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
> pcieport 0000:30:03.0: DPC: ERR_FATAL detected
> pcieport 0000:30:03.0: AER: broadcast error_detected message
> nvme nvme0: frozen state error detected, reset controller
> pcieport 0000:30:03.0: waiting 100 ms for downstream link, after activation
> nvme 0000:34:00.0: ready 0ms after DPC
> nvme 0000:34:00.0: PCIe Bus Error: severity=Uncorrectable (Fatal), type=Data Link Layer, (Receiver ID)
> nvme 0000:34:00.0: device [144d:a804] error status/mask=00000010/00504000
> nvme 0000:34:00.0: [ 4] DLP (First)
> pcieport 0000:30:03.0: AER: broadcast slot_reset message
>
> Signed-off-by: Shuai Xue <xueshuai@linux.alibaba.com>
> ---
> drivers/pci/pci.h | 3 ++-
> drivers/pci/pcie/aer.c | 11 +++++++----
> drivers/pci/pcie/dpc.c | 2 +-
> drivers/pci/pcie/err.c | 9 +++++++++
> 4 files changed, 19 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
> index 0866f79aec54..6f827c313639 100644
> --- a/drivers/pci/pci.h
> +++ b/drivers/pci/pci.h
> @@ -504,7 +504,8 @@ struct aer_err_info {
> struct pcie_tlp_log tlp; /* TLP Header */
> };
>
> -int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
> +int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info,
> + bool link_healthy);
> void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
> #endif /* CONFIG_PCIEAER */
>
> diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
> index 13b8586924ea..97ec1c17b6f4 100644
> --- a/drivers/pci/pcie/aer.c
> +++ b/drivers/pci/pcie/aer.c
> @@ -1200,12 +1200,14 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
> * aer_get_device_error_info - read error status from dev and store it to info
> * @dev: pointer to the device expected to have a error record
> * @info: pointer to structure to store the error record
> + * @link_healthy: link is healthy or not
> *
> * Return 1 on success, 0 on error.
> *
> * Note that @info is reused among all error devices. Clear fields properly.
> */
> -int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
> +int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info,
> + bool link_healthy)
> {
> int type = pci_pcie_type(dev);
> int aer = dev->aer_cap;
> @@ -1229,7 +1231,8 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
> } else if (type == PCI_EXP_TYPE_ROOT_PORT ||
> type == PCI_EXP_TYPE_RC_EC ||
> type == PCI_EXP_TYPE_DOWNSTREAM ||
> - info->severity == AER_NONFATAL) {
> + info->severity == AER_NONFATAL ||
> + (info->severity == AER_FATAL && link_healthy)) {
>
> /* Link is still healthy for IO reads */
> pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS,
> @@ -1258,11 +1261,11 @@ static inline void aer_process_err_devices(struct aer_err_info *e_info)
>
> /* Report all before handle them, not to lost records by reset etc. */
> for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
> - if (aer_get_device_error_info(e_info->dev[i], e_info))
> + if (aer_get_device_error_info(e_info->dev[i], e_info, false))
> aer_print_error(e_info->dev[i], e_info);
> }
Would it be reasonable to detect if the link is intact and set the aer_get_device_error_info()
function's 'link_healthy' parameter accordingly? I was thinking the port upstream capability
link status register could be used to indicate the link viability.
Regards,
Terry
^ permalink raw reply [flat|nested] 17+ messages in thread* Re: [PATCH v2 2/2] PCI/AER: Report fatal errors of RCiEP and EP if link recoverd
2024-11-15 20:20 ` Bowman, Terry
@ 2024-11-16 12:44 ` Shuai Xue
2024-11-17 13:36 ` Shuai Xue
0 siblings, 1 reply; 17+ messages in thread
From: Shuai Xue @ 2024-11-16 12:44 UTC (permalink / raw)
To: Bowman, Terry, linux-pci, linux-kernel, linuxppc-dev, bhelgaas,
kbusch, Lukas Wunner
Cc: mahesh, oohall, sathyanarayanan.kuppuswamy
在 2024/11/16 04:20, Bowman, Terry 写道:
> Hi Shuai,
>
>
> On 11/12/2024 7:54 AM, Shuai Xue wrote:
>> The AER driver has historically avoided reading the configuration space of
>> an endpoint or RCiEP that reported a fatal error, considering the link to
>> that device unreliable. Consequently, when a fatal error occurs, the AER
>> and DPC drivers do not report specific error types, resulting in logs like:
>>
>> pcieport 0000:30:03.0: EDR: EDR event received
>> pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
>> pcieport 0000:30:03.0: DPC: ERR_FATAL detected
>> pcieport 0000:30:03.0: AER: broadcast error_detected message
>> nvme nvme0: frozen state error detected, reset controller
>> nvme 0000:34:00.0: ready 0ms after DPC
>> pcieport 0000:30:03.0: AER: broadcast slot_reset message
>>
>> AER status registers are sticky and Write-1-to-clear. If the link recovered
>> after hot reset, we can still safely access AER status of the error device.
>> In such case, report fatal errors which helps to figure out the error root
>> case.
>>
>> After this patch, the logs like:
>>
>> pcieport 0000:30:03.0: EDR: EDR event received
>> pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
>> pcieport 0000:30:03.0: DPC: ERR_FATAL detected
>> pcieport 0000:30:03.0: AER: broadcast error_detected message
>> nvme nvme0: frozen state error detected, reset controller
>> pcieport 0000:30:03.0: waiting 100 ms for downstream link, after activation
>> nvme 0000:34:00.0: ready 0ms after DPC
>> nvme 0000:34:00.0: PCIe Bus Error: severity=Uncorrectable (Fatal), type=Data Link Layer, (Receiver ID)
>> nvme 0000:34:00.0: device [144d:a804] error status/mask=00000010/00504000
>> nvme 0000:34:00.0: [ 4] DLP (First)
>> pcieport 0000:30:03.0: AER: broadcast slot_reset message
>>
>> Signed-off-by: Shuai Xue <xueshuai@linux.alibaba.com>
>> ---
>> drivers/pci/pci.h | 3 ++-
>> drivers/pci/pcie/aer.c | 11 +++++++----
>> drivers/pci/pcie/dpc.c | 2 +-
>> drivers/pci/pcie/err.c | 9 +++++++++
>> 4 files changed, 19 insertions(+), 6 deletions(-)
>>
>> diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
>> index 0866f79aec54..6f827c313639 100644
>> --- a/drivers/pci/pci.h
>> +++ b/drivers/pci/pci.h
>> @@ -504,7 +504,8 @@ struct aer_err_info {
>> struct pcie_tlp_log tlp; /* TLP Header */
>> };
>>
>> -int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
>> +int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info,
>> + bool link_healthy);
>> void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
>> #endif /* CONFIG_PCIEAER */
>>
>> diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
>> index 13b8586924ea..97ec1c17b6f4 100644
>> --- a/drivers/pci/pcie/aer.c
>> +++ b/drivers/pci/pcie/aer.c
>> @@ -1200,12 +1200,14 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
>> * aer_get_device_error_info - read error status from dev and store it to info
>> * @dev: pointer to the device expected to have a error record
>> * @info: pointer to structure to store the error record
>> + * @link_healthy: link is healthy or not
>> *
>> * Return 1 on success, 0 on error.
>> *
>> * Note that @info is reused among all error devices. Clear fields properly.
>> */
>> -int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
>> +int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info,
>> + bool link_healthy)
>> {
>> int type = pci_pcie_type(dev);
>> int aer = dev->aer_cap;
>> @@ -1229,7 +1231,8 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
>> } else if (type == PCI_EXP_TYPE_ROOT_PORT ||
>> type == PCI_EXP_TYPE_RC_EC ||
>> type == PCI_EXP_TYPE_DOWNSTREAM ||
>> - info->severity == AER_NONFATAL) {
>> + info->severity == AER_NONFATAL ||
>> + (info->severity == AER_FATAL && link_healthy)) {
>>
>> /* Link is still healthy for IO reads */
>> pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS,
>> @@ -1258,11 +1261,11 @@ static inline void aer_process_err_devices(struct aer_err_info *e_info)
>>
>> /* Report all before handle them, not to lost records by reset etc. */
>> for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
>> - if (aer_get_device_error_info(e_info->dev[i], e_info))
>> + if (aer_get_device_error_info(e_info->dev[i], e_info, false))
>> aer_print_error(e_info->dev[i], e_info);
>> }
>
> Would it be reasonable to detect if the link is intact and set the aer_get_device_error_info()
> function's 'link_healthy' parameter accordingly? I was thinking the port upstream capability
> link status register could be used to indicate the link viability.
>
> Regards,
> Terry
Good idea. I think pciehp_check_link_active is a good implementation to check
link_healthy in aer_get_device_error_info().
int pciehp_check_link_active(struct controller *ctrl)
{
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 lnk_status;
int ret;
ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(lnk_status))
return -ENODEV;
ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
return ret;
}
Thank you for valuable comments.
Best Regards
Shuai
^ permalink raw reply [flat|nested] 17+ messages in thread* Re: [PATCH v2 2/2] PCI/AER: Report fatal errors of RCiEP and EP if link recoverd
2024-11-16 12:44 ` Shuai Xue
@ 2024-11-17 13:36 ` Shuai Xue
2024-11-25 5:43 ` Shuai Xue
0 siblings, 1 reply; 17+ messages in thread
From: Shuai Xue @ 2024-11-17 13:36 UTC (permalink / raw)
To: Bowman, Terry, linux-pci, linux-kernel, linuxppc-dev, bhelgaas,
kbusch, Lukas Wunner
Cc: mahesh, oohall, sathyanarayanan.kuppuswamy
在 2024/11/16 20:44, Shuai Xue 写道:
>
>
> 在 2024/11/16 04:20, Bowman, Terry 写道:
>> Hi Shuai,
>>
>>
>> On 11/12/2024 7:54 AM, Shuai Xue wrote:
>>> The AER driver has historically avoided reading the configuration space of
>>> an endpoint or RCiEP that reported a fatal error, considering the link to
>>> that device unreliable. Consequently, when a fatal error occurs, the AER
>>> and DPC drivers do not report specific error types, resulting in logs like:
>>>
>>> pcieport 0000:30:03.0: EDR: EDR event received
>>> pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
>>> pcieport 0000:30:03.0: DPC: ERR_FATAL detected
>>> pcieport 0000:30:03.0: AER: broadcast error_detected message
>>> nvme nvme0: frozen state error detected, reset controller
>>> nvme 0000:34:00.0: ready 0ms after DPC
>>> pcieport 0000:30:03.0: AER: broadcast slot_reset message
>>>
>>> AER status registers are sticky and Write-1-to-clear. If the link recovered
>>> after hot reset, we can still safely access AER status of the error device.
>>> In such case, report fatal errors which helps to figure out the error root
>>> case.
>>>
>>> After this patch, the logs like:
>>>
>>> pcieport 0000:30:03.0: EDR: EDR event received
>>> pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
>>> pcieport 0000:30:03.0: DPC: ERR_FATAL detected
>>> pcieport 0000:30:03.0: AER: broadcast error_detected message
>>> nvme nvme0: frozen state error detected, reset controller
>>> pcieport 0000:30:03.0: waiting 100 ms for downstream link, after activation
>>> nvme 0000:34:00.0: ready 0ms after DPC
>>> nvme 0000:34:00.0: PCIe Bus Error: severity=Uncorrectable (Fatal), type=Data Link Layer, (Receiver ID)
>>> nvme 0000:34:00.0: device [144d:a804] error status/mask=00000010/00504000
>>> nvme 0000:34:00.0: [ 4] DLP (First)
>>> pcieport 0000:30:03.0: AER: broadcast slot_reset message
>>>
>>> Signed-off-by: Shuai Xue <xueshuai@linux.alibaba.com>
>>> ---
>>> drivers/pci/pci.h | 3 ++-
>>> drivers/pci/pcie/aer.c | 11 +++++++----
>>> drivers/pci/pcie/dpc.c | 2 +-
>>> drivers/pci/pcie/err.c | 9 +++++++++
>>> 4 files changed, 19 insertions(+), 6 deletions(-)
>>>
>>> diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
>>> index 0866f79aec54..6f827c313639 100644
>>> --- a/drivers/pci/pci.h
>>> +++ b/drivers/pci/pci.h
>>> @@ -504,7 +504,8 @@ struct aer_err_info {
>>> struct pcie_tlp_log tlp; /* TLP Header */
>>> };
>>> -int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
>>> +int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info,
>>> + bool link_healthy);
>>> void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
>>> #endif /* CONFIG_PCIEAER */
>>> diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
>>> index 13b8586924ea..97ec1c17b6f4 100644
>>> --- a/drivers/pci/pcie/aer.c
>>> +++ b/drivers/pci/pcie/aer.c
>>> @@ -1200,12 +1200,14 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
>>> * aer_get_device_error_info - read error status from dev and store it to info
>>> * @dev: pointer to the device expected to have a error record
>>> * @info: pointer to structure to store the error record
>>> + * @link_healthy: link is healthy or not
>>> *
>>> * Return 1 on success, 0 on error.
>>> *
>>> * Note that @info is reused among all error devices. Clear fields properly.
>>> */
>>> -int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
>>> +int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info,
>>> + bool link_healthy)
>>> {
>>> int type = pci_pcie_type(dev);
>>> int aer = dev->aer_cap;
>>> @@ -1229,7 +1231,8 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
>>> } else if (type == PCI_EXP_TYPE_ROOT_PORT ||
>>> type == PCI_EXP_TYPE_RC_EC ||
>>> type == PCI_EXP_TYPE_DOWNSTREAM ||
>>> - info->severity == AER_NONFATAL) {
>>> + info->severity == AER_NONFATAL ||
>>> + (info->severity == AER_FATAL && link_healthy)) {
>>> /* Link is still healthy for IO reads */
>>> pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS,
>>> @@ -1258,11 +1261,11 @@ static inline void aer_process_err_devices(struct aer_err_info *e_info)
>>> /* Report all before handle them, not to lost records by reset etc. */
>>> for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
>>> - if (aer_get_device_error_info(e_info->dev[i], e_info))
>>> + if (aer_get_device_error_info(e_info->dev[i], e_info, false))
>>> aer_print_error(e_info->dev[i], e_info);
>>> }
>>
>> Would it be reasonable to detect if the link is intact and set the aer_get_device_error_info()
>> function's 'link_healthy' parameter accordingly? I was thinking the port upstream capability
>> link status register could be used to indicate the link viability.
>>
>> Regards,
>> Terry
>
> Good idea. I think pciehp_check_link_active is a good implementation to check
> link_healthy in aer_get_device_error_info().
>
> int pciehp_check_link_active(struct controller *ctrl)
> {
> struct pci_dev *pdev = ctrl_dev(ctrl);
> u16 lnk_status;
> int ret;
> ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
> if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(lnk_status))
> return -ENODEV;
> ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
> ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
> return ret;
> }
>
> Thank you for valuable comments.
>
> Best Regards
> Shuai
Hi, Bowman,
After dive into the code details, I found that both dpc_reset_link() and
aer_root_reset() use pci_bridge_wait_for_secondary_bus() to wait for secondary
bus to be accessible. IMHO, pci_bridge_wait_for_secondary_bus() is better
robustness than function like pciehp_check_link_active(). So I think
reset_subordinates() is good boundary for delineating whether a link is
accessible.
Besides, for DPC driver, the link status of upstream port, e.g, rootport, is
inactive when DPC is triggered, and is recoverd to active until
dpc_reset_link() success. But for AER driver, the link is active before and
after aer_root_reset(). As a result, the AER status will be reported twice.
Best Regards,
Shuai
^ permalink raw reply [flat|nested] 17+ messages in thread* Re: [PATCH v2 2/2] PCI/AER: Report fatal errors of RCiEP and EP if link recoverd
2024-11-17 13:36 ` Shuai Xue
@ 2024-11-25 5:43 ` Shuai Xue
2024-11-25 19:47 ` Bowman, Terry
0 siblings, 1 reply; 17+ messages in thread
From: Shuai Xue @ 2024-11-25 5:43 UTC (permalink / raw)
To: Bowman, Terry, linux-pci, linux-kernel, linuxppc-dev, bhelgaas,
kbusch, Lukas Wunner
Cc: mahesh, oohall, sathyanarayanan.kuppuswamy
在 2024/11/17 21:36, Shuai Xue 写道:
>
>
> 在 2024/11/16 20:44, Shuai Xue 写道:
>>
>>
>> 在 2024/11/16 04:20, Bowman, Terry 写道:
>>> Hi Shuai,
>>>
>>>
>>> On 11/12/2024 7:54 AM, Shuai Xue wrote:
>>>> The AER driver has historically avoided reading the configuration space of
>>>> an endpoint or RCiEP that reported a fatal error, considering the link to
>>>> that device unreliable. Consequently, when a fatal error occurs, the AER
>>>> and DPC drivers do not report specific error types, resulting in logs like:
>>>>
>>>> pcieport 0000:30:03.0: EDR: EDR event received
>>>> pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
>>>> pcieport 0000:30:03.0: DPC: ERR_FATAL detected
>>>> pcieport 0000:30:03.0: AER: broadcast error_detected message
>>>> nvme nvme0: frozen state error detected, reset controller
>>>> nvme 0000:34:00.0: ready 0ms after DPC
>>>> pcieport 0000:30:03.0: AER: broadcast slot_reset message
>>>>
>>>> AER status registers are sticky and Write-1-to-clear. If the link recovered
>>>> after hot reset, we can still safely access AER status of the error device.
>>>> In such case, report fatal errors which helps to figure out the error root
>>>> case.
>>>>
>>>> After this patch, the logs like:
>>>>
>>>> pcieport 0000:30:03.0: EDR: EDR event received
>>>> pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
>>>> pcieport 0000:30:03.0: DPC: ERR_FATAL detected
>>>> pcieport 0000:30:03.0: AER: broadcast error_detected message
>>>> nvme nvme0: frozen state error detected, reset controller
>>>> pcieport 0000:30:03.0: waiting 100 ms for downstream link, after activation
>>>> nvme 0000:34:00.0: ready 0ms after DPC
>>>> nvme 0000:34:00.0: PCIe Bus Error: severity=Uncorrectable (Fatal), type=Data Link Layer, (Receiver ID)
>>>> nvme 0000:34:00.0: device [144d:a804] error status/mask=00000010/00504000
>>>> nvme 0000:34:00.0: [ 4] DLP (First)
>>>> pcieport 0000:30:03.0: AER: broadcast slot_reset message
>>>>
>>>> Signed-off-by: Shuai Xue <xueshuai@linux.alibaba.com>
>>>> ---
>>>> drivers/pci/pci.h | 3 ++-
>>>> drivers/pci/pcie/aer.c | 11 +++++++----
>>>> drivers/pci/pcie/dpc.c | 2 +-
>>>> drivers/pci/pcie/err.c | 9 +++++++++
>>>> 4 files changed, 19 insertions(+), 6 deletions(-)
>>>>
>>>> diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
>>>> index 0866f79aec54..6f827c313639 100644
>>>> --- a/drivers/pci/pci.h
>>>> +++ b/drivers/pci/pci.h
>>>> @@ -504,7 +504,8 @@ struct aer_err_info {
>>>> struct pcie_tlp_log tlp; /* TLP Header */
>>>> };
>>>> -int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
>>>> +int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info,
>>>> + bool link_healthy);
>>>> void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
>>>> #endif /* CONFIG_PCIEAER */
>>>> diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
>>>> index 13b8586924ea..97ec1c17b6f4 100644
>>>> --- a/drivers/pci/pcie/aer.c
>>>> +++ b/drivers/pci/pcie/aer.c
>>>> @@ -1200,12 +1200,14 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
>>>> * aer_get_device_error_info - read error status from dev and store it to info
>>>> * @dev: pointer to the device expected to have a error record
>>>> * @info: pointer to structure to store the error record
>>>> + * @link_healthy: link is healthy or not
>>>> *
>>>> * Return 1 on success, 0 on error.
>>>> *
>>>> * Note that @info is reused among all error devices. Clear fields properly.
>>>> */
>>>> -int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
>>>> +int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info,
>>>> + bool link_healthy)
>>>> {
>>>> int type = pci_pcie_type(dev);
>>>> int aer = dev->aer_cap;
>>>> @@ -1229,7 +1231,8 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
>>>> } else if (type == PCI_EXP_TYPE_ROOT_PORT ||
>>>> type == PCI_EXP_TYPE_RC_EC ||
>>>> type == PCI_EXP_TYPE_DOWNSTREAM ||
>>>> - info->severity == AER_NONFATAL) {
>>>> + info->severity == AER_NONFATAL ||
>>>> + (info->severity == AER_FATAL && link_healthy)) {
>>>> /* Link is still healthy for IO reads */
>>>> pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS,
>>>> @@ -1258,11 +1261,11 @@ static inline void aer_process_err_devices(struct aer_err_info *e_info)
>>>> /* Report all before handle them, not to lost records by reset etc. */
>>>> for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
>>>> - if (aer_get_device_error_info(e_info->dev[i], e_info))
>>>> + if (aer_get_device_error_info(e_info->dev[i], e_info, false))
>>>> aer_print_error(e_info->dev[i], e_info);
>>>> }
>>>
>>> Would it be reasonable to detect if the link is intact and set the aer_get_device_error_info()
>>> function's 'link_healthy' parameter accordingly? I was thinking the port upstream capability
>>> link status register could be used to indicate the link viability.
>>>
>>> Regards,
>>> Terry
>>
>> Good idea. I think pciehp_check_link_active is a good implementation to check
>> link_healthy in aer_get_device_error_info().
>>
>> int pciehp_check_link_active(struct controller *ctrl)
>> {
>> struct pci_dev *pdev = ctrl_dev(ctrl);
>> u16 lnk_status;
>> int ret;
>> ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
>> if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(lnk_status))
>> return -ENODEV;
>> ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
>> ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
>> return ret;
>> }
>>
>> Thank you for valuable comments.
>>
>> Best Regards
>> Shuai
>
> Hi, Bowman,
>
> After dive into the code details, I found that both dpc_reset_link() and
> aer_root_reset() use pci_bridge_wait_for_secondary_bus() to wait for secondary
> bus to be accessible. IMHO, pci_bridge_wait_for_secondary_bus() is better
> robustness than function like pciehp_check_link_active(). So I think
> reset_subordinates() is good boundary for delineating whether a link is
> accessible.
>
> Besides, for DPC driver, the link status of upstream port, e.g, rootport, is
> inactive when DPC is triggered, and is recoverd to active until
> dpc_reset_link() success. But for AER driver, the link is active before and
> after aer_root_reset(). As a result, the AER status will be reported twice.
>
Hi, Bowman, and Bjorn,
Do you have any feedback or other concern?
Thank you.
Best Regards,
Shuai
^ permalink raw reply [flat|nested] 17+ messages in thread* Re: [PATCH v2 2/2] PCI/AER: Report fatal errors of RCiEP and EP if link recoverd
2024-11-25 5:43 ` Shuai Xue
@ 2024-11-25 19:47 ` Bowman, Terry
0 siblings, 0 replies; 17+ messages in thread
From: Bowman, Terry @ 2024-11-25 19:47 UTC (permalink / raw)
To: Shuai Xue, linux-pci, linux-kernel, linuxppc-dev, bhelgaas,
kbusch, Lukas Wunner
Cc: mahesh, oohall, sathyanarayanan.kuppuswamy
On 11/24/2024 11:43 PM, Shuai Xue wrote:
>
> 在 2024/11/17 21:36, Shuai Xue 写道:
>>
>> 在 2024/11/16 20:44, Shuai Xue 写道:
>>>
>>> 在 2024/11/16 04:20, Bowman, Terry 写道:
>>>> Hi Shuai,
>>>>
>>>>
>>>> On 11/12/2024 7:54 AM, Shuai Xue wrote:
>>>>> The AER driver has historically avoided reading the configuration space of
>>>>> an endpoint or RCiEP that reported a fatal error, considering the link to
>>>>> that device unreliable. Consequently, when a fatal error occurs, the AER
>>>>> and DPC drivers do not report specific error types, resulting in logs like:
>>>>>
>>>>> pcieport 0000:30:03.0: EDR: EDR event received
>>>>> pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
>>>>> pcieport 0000:30:03.0: DPC: ERR_FATAL detected
>>>>> pcieport 0000:30:03.0: AER: broadcast error_detected message
>>>>> nvme nvme0: frozen state error detected, reset controller
>>>>> nvme 0000:34:00.0: ready 0ms after DPC
>>>>> pcieport 0000:30:03.0: AER: broadcast slot_reset message
>>>>>
>>>>> AER status registers are sticky and Write-1-to-clear. If the link recovered
>>>>> after hot reset, we can still safely access AER status of the error device.
>>>>> In such case, report fatal errors which helps to figure out the error root
>>>>> case.
>>>>>
>>>>> After this patch, the logs like:
>>>>>
>>>>> pcieport 0000:30:03.0: EDR: EDR event received
>>>>> pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
>>>>> pcieport 0000:30:03.0: DPC: ERR_FATAL detected
>>>>> pcieport 0000:30:03.0: AER: broadcast error_detected message
>>>>> nvme nvme0: frozen state error detected, reset controller
>>>>> pcieport 0000:30:03.0: waiting 100 ms for downstream link, after activation
>>>>> nvme 0000:34:00.0: ready 0ms after DPC
>>>>> nvme 0000:34:00.0: PCIe Bus Error: severity=Uncorrectable (Fatal), type=Data Link Layer, (Receiver ID)
>>>>> nvme 0000:34:00.0: device [144d:a804] error status/mask=00000010/00504000
>>>>> nvme 0000:34:00.0: [ 4] DLP (First)
>>>>> pcieport 0000:30:03.0: AER: broadcast slot_reset message
>>>>>
>>>>> Signed-off-by: Shuai Xue <xueshuai@linux.alibaba.com>
>>>>> ---
>>>>> drivers/pci/pci.h | 3 ++-
>>>>> drivers/pci/pcie/aer.c | 11 +++++++----
>>>>> drivers/pci/pcie/dpc.c | 2 +-
>>>>> drivers/pci/pcie/err.c | 9 +++++++++
>>>>> 4 files changed, 19 insertions(+), 6 deletions(-)
>>>>>
>>>>> diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
>>>>> index 0866f79aec54..6f827c313639 100644
>>>>> --- a/drivers/pci/pci.h
>>>>> +++ b/drivers/pci/pci.h
>>>>> @@ -504,7 +504,8 @@ struct aer_err_info {
>>>>> struct pcie_tlp_log tlp; /* TLP Header */
>>>>> };
>>>>> -int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
>>>>> +int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info,
>>>>> + bool link_healthy);
>>>>> void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
>>>>> #endif /* CONFIG_PCIEAER */
>>>>> diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
>>>>> index 13b8586924ea..97ec1c17b6f4 100644
>>>>> --- a/drivers/pci/pcie/aer.c
>>>>> +++ b/drivers/pci/pcie/aer.c
>>>>> @@ -1200,12 +1200,14 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
>>>>> * aer_get_device_error_info - read error status from dev and store it to info
>>>>> * @dev: pointer to the device expected to have a error record
>>>>> * @info: pointer to structure to store the error record
>>>>> + * @link_healthy: link is healthy or not
>>>>> *
>>>>> * Return 1 on success, 0 on error.
>>>>> *
>>>>> * Note that @info is reused among all error devices. Clear fields properly.
>>>>> */
>>>>> -int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
>>>>> +int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info,
>>>>> + bool link_healthy)
>>>>> {
>>>>> int type = pci_pcie_type(dev);
>>>>> int aer = dev->aer_cap;
>>>>> @@ -1229,7 +1231,8 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
>>>>> } else if (type == PCI_EXP_TYPE_ROOT_PORT ||
>>>>> type == PCI_EXP_TYPE_RC_EC ||
>>>>> type == PCI_EXP_TYPE_DOWNSTREAM ||
>>>>> - info->severity == AER_NONFATAL) {
>>>>> + info->severity == AER_NONFATAL ||
>>>>> + (info->severity == AER_FATAL && link_healthy)) {
>>>>> /* Link is still healthy for IO reads */
>>>>> pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS,
>>>>> @@ -1258,11 +1261,11 @@ static inline void aer_process_err_devices(struct aer_err_info *e_info)
>>>>> /* Report all before handle them, not to lost records by reset etc. */
>>>>> for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
>>>>> - if (aer_get_device_error_info(e_info->dev[i], e_info))
>>>>> + if (aer_get_device_error_info(e_info->dev[i], e_info, false))
>>>>> aer_print_error(e_info->dev[i], e_info);
>>>>> }
>>>> Would it be reasonable to detect if the link is intact and set the aer_get_device_error_info()
>>>> function's 'link_healthy' parameter accordingly? I was thinking the port upstream capability
>>>> link status register could be used to indicate the link viability.
>>>>
>>>> Regards,
>>>> Terry
>>> Good idea. I think pciehp_check_link_active is a good implementation to check
>>> link_healthy in aer_get_device_error_info().
>>>
>>> int pciehp_check_link_active(struct controller *ctrl)
>>> {
>>> struct pci_dev *pdev = ctrl_dev(ctrl);
>>> u16 lnk_status;
>>> int ret;
>>> ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
>>> if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(lnk_status))
>>> return -ENODEV;
>>> ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
>>> ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
>>> return ret;
>>> }
>>>
>>> Thank you for valuable comments.
>>>
>>> Best Regards
>>> Shuai
>> Hi, Bowman,
>>
>> After dive into the code details, I found that both dpc_reset_link() and
>> aer_root_reset() use pci_bridge_wait_for_secondary_bus() to wait for secondary
>> bus to be accessible. IMHO, pci_bridge_wait_for_secondary_bus() is better
>> robustness than function like pciehp_check_link_active(). So I think
>> reset_subordinates() is good boundary for delineating whether a link is
>> accessible.
>>
>> Besides, for DPC driver, the link status of upstream port, e.g, rootport, is
>> inactive when DPC is triggered, and is recoverd to active until
>> dpc_reset_link() success. But for AER driver, the link is active before and
>> after aer_root_reset(). As a result, the AER status will be reported twice.
>>
>
> Hi, Bowman, and Bjorn,
>
> Do you have any feedback or other concern?
>
> Thank you.
>
> Best Regards,
> Shuai
>
Hi Shuai,
I don't have further feedback or concerns.
Regards,
Terry
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH v2 2/2] PCI/AER: Report fatal errors of RCiEP and EP if link recoverd
2024-11-12 13:54 ` [PATCH v2 2/2] PCI/AER: Report fatal errors of RCiEP and EP if link recoverd Shuai Xue
2024-11-15 9:06 ` Lukas Wunner
2024-11-15 20:20 ` Bowman, Terry
@ 2025-01-23 20:10 ` Sathyanarayanan Kuppuswamy
2025-01-24 1:45 ` Shuai Xue
2 siblings, 1 reply; 17+ messages in thread
From: Sathyanarayanan Kuppuswamy @ 2025-01-23 20:10 UTC (permalink / raw)
To: Shuai Xue, linux-pci, linux-kernel, linuxppc-dev, bhelgaas,
kbusch
Cc: mahesh, oohall
Hi,
On 11/12/24 5:54 AM, Shuai Xue wrote:
> The AER driver has historically avoided reading the configuration space of
> an endpoint or RCiEP that reported a fatal error, considering the link to
> that device unreliable. Consequently, when a fatal error occurs, the AER
> and DPC drivers do not report specific error types, resulting in logs like:
>
> pcieport 0000:30:03.0: EDR: EDR event received
> pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
> pcieport 0000:30:03.0: DPC: ERR_FATAL detected
> pcieport 0000:30:03.0: AER: broadcast error_detected message
> nvme nvme0: frozen state error detected, reset controller
> nvme 0000:34:00.0: ready 0ms after DPC
> pcieport 0000:30:03.0: AER: broadcast slot_reset message
>
> AER status registers are sticky and Write-1-to-clear. If the link recovered
> after hot reset, we can still safely access AER status of the error device.
> In such case, report fatal errors which helps to figure out the error root
> case.
>
> After this patch, the logs like:
>
> pcieport 0000:30:03.0: EDR: EDR event received
> pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
> pcieport 0000:30:03.0: DPC: ERR_FATAL detected
> pcieport 0000:30:03.0: AER: broadcast error_detected message
> nvme nvme0: frozen state error detected, reset controller
> pcieport 0000:30:03.0: waiting 100 ms for downstream link, after activation
> nvme 0000:34:00.0: ready 0ms after DPC
> nvme 0000:34:00.0: PCIe Bus Error: severity=Uncorrectable (Fatal), type=Data Link Layer, (Receiver ID)
> nvme 0000:34:00.0: device [144d:a804] error status/mask=00000010/00504000
> nvme 0000:34:00.0: [ 4] DLP (First)
> pcieport 0000:30:03.0: AER: broadcast slot_reset message
>
> Signed-off-by: Shuai Xue <xueshuai@linux.alibaba.com>
> ---
> drivers/pci/pci.h | 3 ++-
> drivers/pci/pcie/aer.c | 11 +++++++----
> drivers/pci/pcie/dpc.c | 2 +-
> drivers/pci/pcie/err.c | 9 +++++++++
> 4 files changed, 19 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
> index 0866f79aec54..6f827c313639 100644
> --- a/drivers/pci/pci.h
> +++ b/drivers/pci/pci.h
> @@ -504,7 +504,8 @@ struct aer_err_info {
> struct pcie_tlp_log tlp; /* TLP Header */
> };
>
> -int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
> +int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info,
> + bool link_healthy);
> void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
> #endif /* CONFIG_PCIEAER */
>
> diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
> index 13b8586924ea..97ec1c17b6f4 100644
> --- a/drivers/pci/pcie/aer.c
> +++ b/drivers/pci/pcie/aer.c
> @@ -1200,12 +1200,14 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
> * aer_get_device_error_info - read error status from dev and store it to info
> * @dev: pointer to the device expected to have a error record
> * @info: pointer to structure to store the error record
> + * @link_healthy: link is healthy or not
> *
> * Return 1 on success, 0 on error.
> *
> * Note that @info is reused among all error devices. Clear fields properly.
> */
> -int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
> +int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info,
> + bool link_healthy)
> {
> int type = pci_pcie_type(dev);
> int aer = dev->aer_cap;
> @@ -1229,7 +1231,8 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
> } else if (type == PCI_EXP_TYPE_ROOT_PORT ||
> type == PCI_EXP_TYPE_RC_EC ||
> type == PCI_EXP_TYPE_DOWNSTREAM ||
> - info->severity == AER_NONFATAL) {
> + info->severity == AER_NONFATAL ||
> + (info->severity == AER_FATAL && link_healthy)) {
>
> /* Link is still healthy for IO reads */
> pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS,
> @@ -1258,11 +1261,11 @@ static inline void aer_process_err_devices(struct aer_err_info *e_info)
>
> /* Report all before handle them, not to lost records by reset etc. */
> for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
> - if (aer_get_device_error_info(e_info->dev[i], e_info))
> + if (aer_get_device_error_info(e_info->dev[i], e_info, false))
> aer_print_error(e_info->dev[i], e_info);
> }
> for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
> - if (aer_get_device_error_info(e_info->dev[i], e_info))
> + if (aer_get_device_error_info(e_info->dev[i], e_info, false))
> handle_error_source(e_info->dev[i], e_info);
> }
> }
> diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
> index 62a68cde4364..b3f157a00405 100644
> --- a/drivers/pci/pcie/dpc.c
> +++ b/drivers/pci/pcie/dpc.c
> @@ -304,7 +304,7 @@ struct pci_dev *dpc_process_error(struct pci_dev *pdev)
> dpc_process_rp_pio_error(pdev);
> else if (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR &&
> dpc_get_aer_uncorrect_severity(pdev, &info) &&
> - aer_get_device_error_info(pdev, &info)) {
> + aer_get_device_error_info(pdev, &info, false)) {
> aer_print_error(pdev, &info);
> pci_aer_clear_nonfatal_status(pdev);
> pci_aer_clear_fatal_status(pdev);
> diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
> index 31090770fffc..462577b8d75a 100644
> --- a/drivers/pci/pcie/err.c
> +++ b/drivers/pci/pcie/err.c
> @@ -196,6 +196,7 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
> struct pci_dev *bridge;
> pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER;
> struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
> + struct aer_err_info info;
>
> /*
> * If the error was detected by a Root Port, Downstream Port, RCEC,
> @@ -223,6 +224,13 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
> pci_warn(bridge, "subordinate device reset failed\n");
> goto failed;
> }
> +
> + info.severity = AER_FATAL;
> + /* Link recovered, report fatal errors of RCiEP or EP */
> + if ((type == PCI_EXP_TYPE_ENDPOINT ||
> + type == PCI_EXP_TYPE_RC_END) &&
> + aer_get_device_error_info(dev, &info, true))
> + aer_print_error(dev, &info);
IMO, error device information is more like a debug info. Can we change
the print level of this info to debug?
> } else {
> pci_walk_bridge(bridge, report_normal_detected, &status);
> }
> @@ -259,6 +267,7 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
> if (host->native_aer || pcie_ports_native) {
> pcie_clear_device_status(dev);
> pci_aer_clear_nonfatal_status(dev);
> + pci_aer_clear_fatal_status(dev);
I think we clear fatal status in DPC driver, why do it again?
> }
>
> pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);
--
Sathyanarayanan Kuppuswamy
Linux Kernel Developer
^ permalink raw reply [flat|nested] 17+ messages in thread* Re: [PATCH v2 2/2] PCI/AER: Report fatal errors of RCiEP and EP if link recoverd
2025-01-23 20:10 ` Sathyanarayanan Kuppuswamy
@ 2025-01-24 1:45 ` Shuai Xue
2025-01-24 7:03 ` Sathyanarayanan Kuppuswamy
0 siblings, 1 reply; 17+ messages in thread
From: Shuai Xue @ 2025-01-24 1:45 UTC (permalink / raw)
To: Sathyanarayanan Kuppuswamy, linux-pci, linux-kernel, linuxppc-dev,
bhelgaas, kbusch
Cc: mahesh, oohall
在 2025/1/24 04:10, Sathyanarayanan Kuppuswamy 写道:
> Hi,
>
> On 11/12/24 5:54 AM, Shuai Xue wrote:
>> The AER driver has historically avoided reading the configuration space of
>> an endpoint or RCiEP that reported a fatal error, considering the link to
>> that device unreliable. Consequently, when a fatal error occurs, the AER
>> and DPC drivers do not report specific error types, resulting in logs like:
>>
>> pcieport 0000:30:03.0: EDR: EDR event received
>> pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
>> pcieport 0000:30:03.0: DPC: ERR_FATAL detected
>> pcieport 0000:30:03.0: AER: broadcast error_detected message
>> nvme nvme0: frozen state error detected, reset controller
>> nvme 0000:34:00.0: ready 0ms after DPC
>> pcieport 0000:30:03.0: AER: broadcast slot_reset message
>>
>> AER status registers are sticky and Write-1-to-clear. If the link recovered
>> after hot reset, we can still safely access AER status of the error device.
>> In such case, report fatal errors which helps to figure out the error root
>> case.
>>
>> After this patch, the logs like:
>>
>> pcieport 0000:30:03.0: EDR: EDR event received
>> pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
>> pcieport 0000:30:03.0: DPC: ERR_FATAL detected
>> pcieport 0000:30:03.0: AER: broadcast error_detected message
>> nvme nvme0: frozen state error detected, reset controller
>> pcieport 0000:30:03.0: waiting 100 ms for downstream link, after activation
>> nvme 0000:34:00.0: ready 0ms after DPC
>> nvme 0000:34:00.0: PCIe Bus Error: severity=Uncorrectable (Fatal), type=Data Link Layer, (Receiver ID)
>> nvme 0000:34:00.0: device [144d:a804] error status/mask=00000010/00504000
>> nvme 0000:34:00.0: [ 4] DLP (First)
>> pcieport 0000:30:03.0: AER: broadcast slot_reset message
>>
>> Signed-off-by: Shuai Xue <xueshuai@linux.alibaba.com>
>> ---
>> drivers/pci/pci.h | 3 ++-
>> drivers/pci/pcie/aer.c | 11 +++++++----
>> drivers/pci/pcie/dpc.c | 2 +-
>> drivers/pci/pcie/err.c | 9 +++++++++
>> 4 files changed, 19 insertions(+), 6 deletions(-)
>>
>> diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
>> index 0866f79aec54..6f827c313639 100644
>> --- a/drivers/pci/pci.h
>> +++ b/drivers/pci/pci.h
>> @@ -504,7 +504,8 @@ struct aer_err_info {
>> struct pcie_tlp_log tlp; /* TLP Header */
>> };
>> -int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
>> +int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info,
>> + bool link_healthy);
>> void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
>> #endif /* CONFIG_PCIEAER */
>> diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
>> index 13b8586924ea..97ec1c17b6f4 100644
>> --- a/drivers/pci/pcie/aer.c
>> +++ b/drivers/pci/pcie/aer.c
>> @@ -1200,12 +1200,14 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
>> * aer_get_device_error_info - read error status from dev and store it to info
>> * @dev: pointer to the device expected to have a error record
>> * @info: pointer to structure to store the error record
>> + * @link_healthy: link is healthy or not
>> *
>> * Return 1 on success, 0 on error.
>> *
>> * Note that @info is reused among all error devices. Clear fields properly.
>> */
>> -int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
>> +int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info,
>> + bool link_healthy)
>> {
>> int type = pci_pcie_type(dev);
>> int aer = dev->aer_cap;
>> @@ -1229,7 +1231,8 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
>> } else if (type == PCI_EXP_TYPE_ROOT_PORT ||
>> type == PCI_EXP_TYPE_RC_EC ||
>> type == PCI_EXP_TYPE_DOWNSTREAM ||
>> - info->severity == AER_NONFATAL) {
>> + info->severity == AER_NONFATAL ||
>> + (info->severity == AER_FATAL && link_healthy)) {
>> /* Link is still healthy for IO reads */
>> pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS,
>> @@ -1258,11 +1261,11 @@ static inline void aer_process_err_devices(struct aer_err_info *e_info)
>> /* Report all before handle them, not to lost records by reset etc. */
>> for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
>> - if (aer_get_device_error_info(e_info->dev[i], e_info))
>> + if (aer_get_device_error_info(e_info->dev[i], e_info, false))
>> aer_print_error(e_info->dev[i], e_info);
>> }
>> for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
>> - if (aer_get_device_error_info(e_info->dev[i], e_info))
>> + if (aer_get_device_error_info(e_info->dev[i], e_info, false))
>> handle_error_source(e_info->dev[i], e_info);
>> }
>> }
>> diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
>> index 62a68cde4364..b3f157a00405 100644
>> --- a/drivers/pci/pcie/dpc.c
>> +++ b/drivers/pci/pcie/dpc.c
>> @@ -304,7 +304,7 @@ struct pci_dev *dpc_process_error(struct pci_dev *pdev)
>> dpc_process_rp_pio_error(pdev);
>> else if (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR &&
>> dpc_get_aer_uncorrect_severity(pdev, &info) &&
>> - aer_get_device_error_info(pdev, &info)) {
>> + aer_get_device_error_info(pdev, &info, false)) {
>> aer_print_error(pdev, &info);
>> pci_aer_clear_nonfatal_status(pdev);
>> pci_aer_clear_fatal_status(pdev);
>> diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
>> index 31090770fffc..462577b8d75a 100644
>> --- a/drivers/pci/pcie/err.c
>> +++ b/drivers/pci/pcie/err.c
>> @@ -196,6 +196,7 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
>> struct pci_dev *bridge;
>> pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER;
>> struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
>> + struct aer_err_info info;
>> /*
>> * If the error was detected by a Root Port, Downstream Port, RCEC,
>> @@ -223,6 +224,13 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
>> pci_warn(bridge, "subordinate device reset failed\n");
>> goto failed;
>> }
>> +
>> + info.severity = AER_FATAL;
>> + /* Link recovered, report fatal errors of RCiEP or EP */
>> + if ((type == PCI_EXP_TYPE_ENDPOINT ||
>> + type == PCI_EXP_TYPE_RC_END) &&
>> + aer_get_device_error_info(dev, &info, true))
>> + aer_print_error(dev, &info);
>
> IMO, error device information is more like a debug info. Can we change
> the print level of this info to debug?
Yes, but error device information is quite important for user to figure out the
device status and should not been ignored. We need it in production to analysis
server healthy.
>
>> } else {
>> pci_walk_bridge(bridge, report_normal_detected, &status);
>> }
>> @@ -259,6 +267,7 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
>> if (host->native_aer || pcie_ports_native) {
>> pcie_clear_device_status(dev);
>> pci_aer_clear_nonfatal_status(dev);
>> + pci_aer_clear_fatal_status(dev);
>
> I think we clear fatal status in DPC driver, why do it again?
DPC driver only clear fatal status for the err_port, but not the err_dev.
err_dev and err_port are indeed easy to confuse, so I have differentiated them
again in patch1.
>
>> }
>> pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);
>
^ permalink raw reply [flat|nested] 17+ messages in thread* Re: [PATCH v2 2/2] PCI/AER: Report fatal errors of RCiEP and EP if link recoverd
2025-01-24 1:45 ` Shuai Xue
@ 2025-01-24 7:03 ` Sathyanarayanan Kuppuswamy
0 siblings, 0 replies; 17+ messages in thread
From: Sathyanarayanan Kuppuswamy @ 2025-01-24 7:03 UTC (permalink / raw)
To: Shuai Xue, linux-pci, linux-kernel, linuxppc-dev, bhelgaas,
kbusch
Cc: mahesh, oohall
On 1/23/25 5:45 PM, Shuai Xue wrote:
>
>
> 在 2025/1/24 04:10, Sathyanarayanan Kuppuswamy 写道:
>> Hi,
>>
>> On 11/12/24 5:54 AM, Shuai Xue wrote:
>>> The AER driver has historically avoided reading the configuration
>>> space of
>>> an endpoint or RCiEP that reported a fatal error, considering the
>>> link to
>>> that device unreliable. Consequently, when a fatal error occurs, the
>>> AER
>>> and DPC drivers do not report specific error types, resulting in
>>> logs like:
>>>
>>> pcieport 0000:30:03.0: EDR: EDR event received
>>> pcieport 0000:30:03.0: DPC: containment event, status:0x0005
>>> source:0x3400
>>> pcieport 0000:30:03.0: DPC: ERR_FATAL detected
>>> pcieport 0000:30:03.0: AER: broadcast error_detected message
>>> nvme nvme0: frozen state error detected, reset controller
>>> nvme 0000:34:00.0: ready 0ms after DPC
>>> pcieport 0000:30:03.0: AER: broadcast slot_reset message
>>>
>>> AER status registers are sticky and Write-1-to-clear. If the link
>>> recovered
>>> after hot reset, we can still safely access AER status of the error
>>> device.
>>> In such case, report fatal errors which helps to figure out the
>>> error root
>>> case.
>>>
>>> After this patch, the logs like:
>>>
>>> pcieport 0000:30:03.0: EDR: EDR event received
>>> pcieport 0000:30:03.0: DPC: containment event, status:0x0005
>>> source:0x3400
>>> pcieport 0000:30:03.0: DPC: ERR_FATAL detected
>>> pcieport 0000:30:03.0: AER: broadcast error_detected message
>>> nvme nvme0: frozen state error detected, reset controller
>>> pcieport 0000:30:03.0: waiting 100 ms for downstream link, after
>>> activation
>>> nvme 0000:34:00.0: ready 0ms after DPC
>>> nvme 0000:34:00.0: PCIe Bus Error: severity=Uncorrectable
>>> (Fatal), type=Data Link Layer, (Receiver ID)
>>> nvme 0000:34:00.0: device [144d:a804] error
>>> status/mask=00000010/00504000
>>> nvme 0000:34:00.0: [ 4] DLP (First)
>>> pcieport 0000:30:03.0: AER: broadcast slot_reset message
>>>
>>> Signed-off-by: Shuai Xue <xueshuai@linux.alibaba.com>
>>> ---
>>> drivers/pci/pci.h | 3 ++-
>>> drivers/pci/pcie/aer.c | 11 +++++++----
>>> drivers/pci/pcie/dpc.c | 2 +-
>>> drivers/pci/pcie/err.c | 9 +++++++++
>>> 4 files changed, 19 insertions(+), 6 deletions(-)
>>>
>>> diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
>>> index 0866f79aec54..6f827c313639 100644
>>> --- a/drivers/pci/pci.h
>>> +++ b/drivers/pci/pci.h
>>> @@ -504,7 +504,8 @@ struct aer_err_info {
>>> struct pcie_tlp_log tlp; /* TLP Header */
>>> };
>>> -int aer_get_device_error_info(struct pci_dev *dev, struct
>>> aer_err_info *info);
>>> +int aer_get_device_error_info(struct pci_dev *dev, struct
>>> aer_err_info *info,
>>> + bool link_healthy);
>>> void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
>>> #endif /* CONFIG_PCIEAER */
>>> diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
>>> index 13b8586924ea..97ec1c17b6f4 100644
>>> --- a/drivers/pci/pcie/aer.c
>>> +++ b/drivers/pci/pcie/aer.c
>>> @@ -1200,12 +1200,14 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
>>> * aer_get_device_error_info - read error status from dev and
>>> store it to info
>>> * @dev: pointer to the device expected to have a error record
>>> * @info: pointer to structure to store the error record
>>> + * @link_healthy: link is healthy or not
>>> *
>>> * Return 1 on success, 0 on error.
>>> *
>>> * Note that @info is reused among all error devices. Clear fields
>>> properly.
>>> */
>>> -int aer_get_device_error_info(struct pci_dev *dev, struct
>>> aer_err_info *info)
>>> +int aer_get_device_error_info(struct pci_dev *dev, struct
>>> aer_err_info *info,
>>> + bool link_healthy)
>>> {
>>> int type = pci_pcie_type(dev);
>>> int aer = dev->aer_cap;
>>> @@ -1229,7 +1231,8 @@ int aer_get_device_error_info(struct pci_dev
>>> *dev, struct aer_err_info *info)
>>> } else if (type == PCI_EXP_TYPE_ROOT_PORT ||
>>> type == PCI_EXP_TYPE_RC_EC ||
>>> type == PCI_EXP_TYPE_DOWNSTREAM ||
>>> - info->severity == AER_NONFATAL) {
>>> + info->severity == AER_NONFATAL ||
>>> + (info->severity == AER_FATAL && link_healthy)) {
>>> /* Link is still healthy for IO reads */
>>> pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS,
>>> @@ -1258,11 +1261,11 @@ static inline void
>>> aer_process_err_devices(struct aer_err_info *e_info)
>>> /* Report all before handle them, not to lost records by reset
>>> etc. */
>>> for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
>>> - if (aer_get_device_error_info(e_info->dev[i], e_info))
>>> + if (aer_get_device_error_info(e_info->dev[i], e_info, false))
>>> aer_print_error(e_info->dev[i], e_info);
>>> }
>>> for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
>>> - if (aer_get_device_error_info(e_info->dev[i], e_info))
>>> + if (aer_get_device_error_info(e_info->dev[i], e_info, false))
>>> handle_error_source(e_info->dev[i], e_info);
>>> }
>>> }
>>> diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
>>> index 62a68cde4364..b3f157a00405 100644
>>> --- a/drivers/pci/pcie/dpc.c
>>> +++ b/drivers/pci/pcie/dpc.c
>>> @@ -304,7 +304,7 @@ struct pci_dev *dpc_process_error(struct pci_dev
>>> *pdev)
>>> dpc_process_rp_pio_error(pdev);
>>> else if (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR &&
>>> dpc_get_aer_uncorrect_severity(pdev, &info) &&
>>> - aer_get_device_error_info(pdev, &info)) {
>>> + aer_get_device_error_info(pdev, &info, false)) {
>>> aer_print_error(pdev, &info);
>>> pci_aer_clear_nonfatal_status(pdev);
>>> pci_aer_clear_fatal_status(pdev);
>>> diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
>>> index 31090770fffc..462577b8d75a 100644
>>> --- a/drivers/pci/pcie/err.c
>>> +++ b/drivers/pci/pcie/err.c
>>> @@ -196,6 +196,7 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev
>>> *dev,
>>> struct pci_dev *bridge;
>>> pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER;
>>> struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
>>> + struct aer_err_info info;
>>> /*
>>> * If the error was detected by a Root Port, Downstream Port,
>>> RCEC,
>>> @@ -223,6 +224,13 @@ pci_ers_result_t pcie_do_recovery(struct
>>> pci_dev *dev,
>>> pci_warn(bridge, "subordinate device reset failed\n");
>>> goto failed;
>>> }
>>> +
>>> + info.severity = AER_FATAL;
>>> + /* Link recovered, report fatal errors of RCiEP or EP */
>>> + if ((type == PCI_EXP_TYPE_ENDPOINT ||
>>> + type == PCI_EXP_TYPE_RC_END) &&
>>> + aer_get_device_error_info(dev, &info, true))
>>> + aer_print_error(dev, &info);
>>
>> IMO, error device information is more like a debug info. Can we change
>> the print level of this info to debug?
>
> Yes, but error device information is quite important for user to
> figure out the
> device status and should not been ignored. We need it in production to
> analysis
> server healthy.
IMO, such information is needed for debugging repeated DPC event
occurrences.
So when encountering repeated failures, interested party can increase
log level
and gather this data. I personally think this is too much detail for a
kernel info
messages. Lets see what others and Bjorn think.
>
>>
>>> } else {
>>> pci_walk_bridge(bridge, report_normal_detected, &status);
>>> }
>>> @@ -259,6 +267,7 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev
>>> *dev,
>>> if (host->native_aer || pcie_ports_native) {
>>> pcie_clear_device_status(dev);
>>> pci_aer_clear_nonfatal_status(dev);
>>> + pci_aer_clear_fatal_status(dev);
>>
>> I think we clear fatal status in DPC driver, why do it again?
>
> DPC driver only clear fatal status for the err_port, but not the err_dev.
> err_dev and err_port are indeed easy to confuse, so I have
> differentiated them
> again in patch1.
>
Got it.
>>
>>> }
>>> pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);
>>
--
Sathyanarayanan Kuppuswamy
Linux Kernel Developer
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH v2 0/2] PCI/AER: Report fatal errors of RCiEP and EP if link recoverd
2024-11-12 13:54 [PATCH v2 0/2] PCI/AER: Report fatal errors of RCiEP and EP if link recoverd Shuai Xue
2024-11-12 13:54 ` [PATCH v2 1/2] PCI/DPC: Run recovery on device that detected the error Shuai Xue
2024-11-12 13:54 ` [PATCH v2 2/2] PCI/AER: Report fatal errors of RCiEP and EP if link recoverd Shuai Xue
@ 2024-12-24 11:03 ` Shuai Xue
2025-01-22 10:59 ` Shuai Xue
2 siblings, 1 reply; 17+ messages in thread
From: Shuai Xue @ 2024-12-24 11:03 UTC (permalink / raw)
To: linux-pci, linux-kernel, linuxppc-dev, bhelgaas, kbusch
Cc: mahesh, oohall, sathyanarayanan.kuppuswamy
在 2024/11/12 21:54, Shuai Xue 写道:
> changes since v1:
> - rewrite commit log per Bjorn
> - refactor aer_get_device_error_info to reduce duplication per Keith
> - fix to avoid reporting fatal errors twice for root and downstream ports per Keith
>
> The AER driver has historically avoided reading the configuration space of an
> endpoint or RCiEP that reported a fatal error, considering the link to that
> device unreliable. Consequently, when a fatal error occurs, the AER and DPC
> drivers do not report specific error types, resulting in logs like:
>
> pcieport 0000:30:03.0: EDR: EDR event received
> pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
> pcieport 0000:30:03.0: DPC: ERR_FATAL detected
> pcieport 0000:30:03.0: AER: broadcast error_detected message
> nvme nvme0: frozen state error detected, reset controller
> nvme 0000:34:00.0: ready 0ms after DPC
> pcieport 0000:30:03.0: AER: broadcast slot_reset message
>
> AER status registers are sticky and Write-1-to-clear. If the link recovered
> after hot reset, we can still safely access AER status of the error device.
> In such case, report fatal errors which helps to figure out the error root
> case.
>
> - Patch 1/2 identifies the error device by SOURCE ID register
> - Patch 2/3 reports the AER status if link recoverd.
>
> After this patch set, the logs like:
>
> pcieport 0000:30:03.0: EDR: EDR event received
> pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
> pcieport 0000:30:03.0: DPC: ERR_FATAL detected
> pcieport 0000:30:03.0: AER: broadcast error_detected message
> nvme nvme0: frozen state error detected, reset controller
> pcieport 0000:30:03.0: waiting 100 ms for downstream link, after activation
> nvme 0000:34:00.0: ready 0ms after DPC
> nvme 0000:34:00.0: PCIe Bus Error: severity=Uncorrectable (Fatal), type=Data Link Layer, (Receiver ID)
> nvme 0000:34:00.0: device [144d:a804] error status/mask=00000010/00504000
> nvme 0000:34:00.0: [ 4] DLP (First)
> pcieport 0000:30:03.0: AER: broadcast slot_reset message
>
> Shuai Xue (2):
> PCI/DPC: Run recovery on device that detected the error
> PCI/AER: Report fatal errors of RCiEP and EP if link recoverd
>
> drivers/pci/pci.h | 5 +++--
> drivers/pci/pcie/aer.c | 11 +++++++----
> drivers/pci/pcie/dpc.c | 32 +++++++++++++++++++++++++-------
> drivers/pci/pcie/edr.c | 35 ++++++++++++++++++-----------------
> drivers/pci/pcie/err.c | 9 +++++++++
> 5 files changed, 62 insertions(+), 30 deletions(-)
>
Hi, all,
Gentle ping.
Best Regards,
Shuai
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [PATCH v2 0/2] PCI/AER: Report fatal errors of RCiEP and EP if link recoverd
2024-12-24 11:03 ` [PATCH v2 0/2] " Shuai Xue
@ 2025-01-22 10:59 ` Shuai Xue
0 siblings, 0 replies; 17+ messages in thread
From: Shuai Xue @ 2025-01-22 10:59 UTC (permalink / raw)
To: linux-pci, linux-kernel, linuxppc-dev, bhelgaas, kbusch
Cc: mahesh, oohall, sathyanarayanan.kuppuswamy
Hi, all,
Gentle ping.
Best Regards,
Shuai
在 2024/12/24 19:03, Shuai Xue 写道:
>
>
> 在 2024/11/12 21:54, Shuai Xue 写道:
>> changes since v1:
>> - rewrite commit log per Bjorn
>> - refactor aer_get_device_error_info to reduce duplication per Keith
>> - fix to avoid reporting fatal errors twice for root and downstream ports per Keith
>>
>> The AER driver has historically avoided reading the configuration space of an
>> endpoint or RCiEP that reported a fatal error, considering the link to that
>> device unreliable. Consequently, when a fatal error occurs, the AER and DPC
>> drivers do not report specific error types, resulting in logs like:
>>
>> pcieport 0000:30:03.0: EDR: EDR event received
>> pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
>> pcieport 0000:30:03.0: DPC: ERR_FATAL detected
>> pcieport 0000:30:03.0: AER: broadcast error_detected message
>> nvme nvme0: frozen state error detected, reset controller
>> nvme 0000:34:00.0: ready 0ms after DPC
>> pcieport 0000:30:03.0: AER: broadcast slot_reset message
>>
>> AER status registers are sticky and Write-1-to-clear. If the link recovered
>> after hot reset, we can still safely access AER status of the error device.
>> In such case, report fatal errors which helps to figure out the error root
>> case.
>>
>> - Patch 1/2 identifies the error device by SOURCE ID register
>> - Patch 2/3 reports the AER status if link recoverd.
>>
>> After this patch set, the logs like:
>>
>> pcieport 0000:30:03.0: EDR: EDR event received
>> pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
>> pcieport 0000:30:03.0: DPC: ERR_FATAL detected
>> pcieport 0000:30:03.0: AER: broadcast error_detected message
>> nvme nvme0: frozen state error detected, reset controller
>> pcieport 0000:30:03.0: waiting 100 ms for downstream link, after activation
>> nvme 0000:34:00.0: ready 0ms after DPC
>> nvme 0000:34:00.0: PCIe Bus Error: severity=Uncorrectable (Fatal), type=Data Link Layer, (Receiver ID)
>> nvme 0000:34:00.0: device [144d:a804] error status/mask=00000010/00504000
>> nvme 0000:34:00.0: [ 4] DLP (First)
>> pcieport 0000:30:03.0: AER: broadcast slot_reset message
>>
>> Shuai Xue (2):
>> PCI/DPC: Run recovery on device that detected the error
>> PCI/AER: Report fatal errors of RCiEP and EP if link recoverd
>>
>> drivers/pci/pci.h | 5 +++--
>> drivers/pci/pcie/aer.c | 11 +++++++----
>> drivers/pci/pcie/dpc.c | 32 +++++++++++++++++++++++++-------
>> drivers/pci/pcie/edr.c | 35 ++++++++++++++++++-----------------
>> drivers/pci/pcie/err.c | 9 +++++++++
>> 5 files changed, 62 insertions(+), 30 deletions(-)
>>
>
> Hi, all,
>
> Gentle ping.
>
> Best Regards,
> Shuai
^ permalink raw reply [flat|nested] 17+ messages in thread