* [PATCH] enable RAID for SATA under VMD
@ 2023-02-16 4:41 Kevin Friedberg
2023-03-20 6:26 ` Kevin Friedberg
0 siblings, 1 reply; 7+ messages in thread
From: Kevin Friedberg @ 2023-02-16 4:41 UTC (permalink / raw)
To: linux-raid, mariusz.tkaczyk; +Cc: Kevin Friedberg
Detect when a SATA controller has been mapped under Intel Alderlake RST
VMD, so that it can use the VMD controller's RAID capabilities. Create
new device type SYS_DEV_SATA_VMD and list separate controller to prevent
mixing with the NVMe SYS_DEV_VMD devices on the same VMD domain.
Signed-off-by: Kevin Friedberg <kev.friedberg@gmail.com>
---
platform-intel.c | 21 ++++++++++++++++++---
platform-intel.h | 1 +
super-intel.c | 28 ++++++++++++++++++----------
3 files changed, 37 insertions(+), 13 deletions(-)
diff --git a/platform-intel.c b/platform-intel.c
index 757f0b1b..914164c0 100644
--- a/platform-intel.c
+++ b/platform-intel.c
@@ -64,9 +64,10 @@ struct sys_dev *find_driver_devices(const char *bus, const char *driver)
if (strcmp(driver, "isci") == 0)
type = SYS_DEV_SAS;
- else if (strcmp(driver, "ahci") == 0)
+ else if (strcmp(driver, "ahci") == 0) {
+ vmd = find_driver_devices("pci", "vmd");
type = SYS_DEV_SATA;
- else if (strcmp(driver, "nvme") == 0) {
+ } else if (strcmp(driver, "nvme") == 0) {
/* if looking for nvme devs, first look for vmd */
vmd = find_driver_devices("pci", "vmd");
type = SYS_DEV_NVME;
@@ -115,6 +116,17 @@ struct sys_dev *find_driver_devices(const char *bus, const char *driver)
free(rp);
}
+ /* change sata type if under a vmd controller */
+ if (type == SYS_DEV_SATA) {
+ struct sys_dev *dev;
+ char *rp = realpath(path, NULL);
+ for (dev = vmd; dev; dev = dev->next) {
+ if ((strncmp(dev->path, rp, strlen(dev->path)) == 0))
+ type = SYS_DEV_SATA_VMD;
+ }
+ free(rp);
+ }
+
/* if it's not Intel device or mark as VMD connected - skip it. */
if (devpath_to_vendor(path) != 0x8086 || skip == 1)
continue;
@@ -166,7 +178,8 @@ struct sys_dev *find_driver_devices(const char *bus, const char *driver)
}
closedir(driver_dir);
- if (vmd) {
+ /* nvme vmd needs a list separate from sata vmd */
+ if (vmd && type == SYS_DEV_NVME) {
if (list)
list->next = vmd;
else
@@ -273,6 +286,7 @@ struct sys_dev *find_intel_devices(void)
free_sys_dev(&intel_devices);
isci = find_driver_devices("pci", "isci");
+ /* Searching for AHCI will return list of SATA and SATA VMD controllers */
ahci = find_driver_devices("pci", "ahci");
/* Searching for NVMe will return list of NVMe and VMD controllers */
nvme = find_driver_devices("pci", "nvme");
@@ -638,6 +652,7 @@ const struct imsm_orom *find_imsm_efi(struct sys_dev *hba)
break;
case SYS_DEV_VMD:
+ case SYS_DEV_SATA_VMD:
for (i = 0; i < ARRAY_SIZE(vmd_efivars); i++) {
if (!read_efi_variable(&orom, sizeof(orom),
vmd_efivars[i], VENDOR_GUID))
diff --git a/platform-intel.h b/platform-intel.h
index 6238d23f..2c0f4e39 100644
--- a/platform-intel.h
+++ b/platform-intel.h
@@ -196,6 +196,7 @@ enum sys_dev_type {
SYS_DEV_SATA,
SYS_DEV_NVME,
SYS_DEV_VMD,
+ SYS_DEV_SATA_VMD,
SYS_DEV_MAX
};
diff --git a/super-intel.c b/super-intel.c
index 89fac626..13671be1 100644
--- a/super-intel.c
+++ b/super-intel.c
@@ -626,7 +626,8 @@ static const char *_sys_dev_type[] = {
[SYS_DEV_SAS] = "SAS",
[SYS_DEV_SATA] = "SATA",
[SYS_DEV_NVME] = "NVMe",
- [SYS_DEV_VMD] = "VMD"
+ [SYS_DEV_VMD] = "VMD",
+ [SYS_DEV_SATA_VMD] = "SATA VMD"
};
const char *get_sys_dev_type(enum sys_dev_type type)
@@ -2559,6 +2560,8 @@ static void print_found_intel_controllers(struct sys_dev *elem)
if (elem->type == SYS_DEV_VMD)
fprintf(stderr, "VMD domain");
+ else if (elem->type == SYS_DEV_SATA_VMD)
+ fprintf(stderr, "SATA VMD domain");
else
fprintf(stderr, "RAID controller");
@@ -2729,8 +2732,9 @@ static int detail_platform_imsm(int verbose, int enumerate_only, char *controlle
if (!find_imsm_capability(hba)) {
char buf[PATH_MAX];
pr_err("imsm capabilities not found for controller: %s (type %s)\n",
- hba->type == SYS_DEV_VMD ? vmd_domain_to_controller(hba, buf) : hba->path,
- get_sys_dev_type(hba->type));
+ hba->type == SYS_DEV_VMD || hba->type == SYS_DEV_SATA_VMD ?
+ vmd_domain_to_controller(hba, buf) :
+ hba->path, get_sys_dev_type(hba->type));
continue;
}
result = 0;
@@ -2783,11 +2787,12 @@ static int detail_platform_imsm(int verbose, int enumerate_only, char *controlle
printf(" I/O Controller : %s (%s)\n",
hba->path, get_sys_dev_type(hba->type));
- if (hba->type == SYS_DEV_SATA) {
+ if (hba->type == SYS_DEV_SATA || hba->type == SYS_DEV_SATA_VMD) {
host_base = ahci_get_port_count(hba->path, &port_count);
if (ahci_enumerate_ports(hba->path, port_count, host_base, verbose)) {
if (verbose > 0)
- pr_err("failed to enumerate ports on SATA controller at %s.\n", hba->pci_id);
+ pr_err("failed to enumerate ports on %s controller at %s.\n",
+ get_sys_dev_type(hba->type), hba->pci_id);
result |= 2;
}
}
@@ -2817,7 +2822,8 @@ static int export_detail_platform_imsm(int verbose, char *controller_path)
if (!find_imsm_capability(hba) && verbose > 0) {
char buf[PATH_MAX];
pr_err("IMSM_DETAIL_PLATFORM_ERROR=NO_IMSM_CAPABLE_DEVICE_UNDER_%s\n",
- hba->type == SYS_DEV_VMD ? vmd_domain_to_controller(hba, buf) : hba->path);
+ hba->type == SYS_DEV_VMD || hba->type == SYS_DEV_SATA_VMD ?
+ vmd_domain_to_controller(hba, buf) : hba->path);
}
else
result = 0;
@@ -2826,7 +2832,7 @@ static int export_detail_platform_imsm(int verbose, char *controller_path)
const struct orom_entry *entry;
for (entry = orom_entries; entry; entry = entry->next) {
- if (entry->type == SYS_DEV_VMD) {
+ if (entry->type == SYS_DEV_VMD || entry->type == SYS_DEV_SATA_VMD) {
for (hba = list; hba; hba = hba->next)
print_imsm_capability_export(&entry->orom);
continue;
@@ -4742,10 +4748,12 @@ static int find_intel_hba_capability(int fd, struct intel_super *super, char *de
" but the container is assigned to Intel(R) %s %s (",
devname,
get_sys_dev_type(hba_name->type),
- hba_name->type == SYS_DEV_VMD ? "domain" : "RAID controller",
+ hba_name->type == SYS_DEV_VMD || hba_name->type == SYS_DEV_SATA_VMD ?
+ "domain" : "RAID controller",
hba_name->pci_id ? : "Err!",
get_sys_dev_type(super->hba->type),
- hba->type == SYS_DEV_VMD ? "domain" : "RAID controller");
+ hba->type == SYS_DEV_VMD || hba_name->type == SYS_DEV_SATA_VMD ?
+ "domain" : "RAID controller");
while (hba) {
fprintf(stderr, "%s", hba->pci_id ? : "Err!");
@@ -11234,7 +11242,7 @@ static const char *imsm_get_disk_controller_domain(const char *path)
hba = find_disk_attached_hba(-1, path);
if (hba && hba->type == SYS_DEV_SAS)
drv = "isci";
- else if (hba && hba->type == SYS_DEV_SATA)
+ else if (hba && (hba->type == SYS_DEV_SATA || hba->type == SYS_DEV_SATA_VMD))
drv = "ahci";
else if (hba && hba->type == SYS_DEV_VMD)
drv = "vmd";
--
2.39.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH] enable RAID for SATA under VMD
2023-02-16 4:41 [PATCH] enable RAID for SATA under VMD Kevin Friedberg
@ 2023-03-20 6:26 ` Kevin Friedberg
2023-03-20 8:35 ` Mariusz Tkaczyk
0 siblings, 1 reply; 7+ messages in thread
From: Kevin Friedberg @ 2023-03-20 6:26 UTC (permalink / raw)
To: linux-raid, mariusz.tkaczyk; +Cc: Kevin Friedberg
Hi Mariusz,
You mentioned on the previous version of this patch that it might be a
while before it could be tested. Have you had a chance to try this
revision?
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] enable RAID for SATA under VMD
2023-03-20 6:26 ` Kevin Friedberg
@ 2023-03-20 8:35 ` Mariusz Tkaczyk
2023-04-28 7:30 ` Kinga Tanska
0 siblings, 1 reply; 7+ messages in thread
From: Mariusz Tkaczyk @ 2023-03-20 8:35 UTC (permalink / raw)
To: Kevin Friedberg; +Cc: linux-raid
On Mon, 20 Mar 2023 02:26:31 -0400
Kevin Friedberg <kev.friedberg@gmail.com> wrote:
> Hi Mariusz,
>
> You mentioned on the previous version of this patch that it might be a
> while before it could be tested. Have you had a chance to try this
> revision?
Hi Kevin,
Sorry for that... I totally lost it. That for reminder. We will test the change
soon.
Thanks,
Mariusz
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] enable RAID for SATA under VMD
2023-03-20 8:35 ` Mariusz Tkaczyk
@ 2023-04-28 7:30 ` Kinga Tanska
2023-05-05 7:31 ` Kevin Friedberg
0 siblings, 1 reply; 7+ messages in thread
From: Kinga Tanska @ 2023-04-28 7:30 UTC (permalink / raw)
To: Kevin Friedberg; +Cc: Mariusz Tkaczyk, linux-raid
On Mon, 20 Mar 2023 09:35:45 +0100
Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com> wrote:
> On Mon, 20 Mar 2023 02:26:31 -0400
> Kevin Friedberg <kev.friedberg@gmail.com> wrote:
>
> > Hi Mariusz,
> >
> > You mentioned on the previous version of this patch that it might
> > be a while before it could be tested. Have you had a chance to try
> > this revision?
>
> Hi Kevin,
> Sorry for that... I totally lost it. That for reminder. We will test
> the change soon.
>
> Thanks,
> Mariusz
Hi,
We've been able to test this change and we haven't found problems.
Regards,
Kinga
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] enable RAID for SATA under VMD
2023-04-28 7:30 ` Kinga Tanska
@ 2023-05-05 7:31 ` Kevin Friedberg
2023-05-05 7:44 ` Mariusz Tkaczyk
0 siblings, 1 reply; 7+ messages in thread
From: Kevin Friedberg @ 2023-05-05 7:31 UTC (permalink / raw)
To: Kinga Tanska; +Cc: Mariusz Tkaczyk, linux-raid
On Fri, Apr 28, 2023 at 3:31 AM Kinga Tanska
<kinga.tanska@linux.intel.com> wrote:
> Hi,
>
> We've been able to test this change and we haven't found problems.
>
> Regards,
> Kinga
Great! What are the next steps to get it included in a future release?
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] enable RAID for SATA under VMD
2023-05-05 7:31 ` Kevin Friedberg
@ 2023-05-05 7:44 ` Mariusz Tkaczyk
2023-05-08 20:29 ` Jes Sorensen
0 siblings, 1 reply; 7+ messages in thread
From: Mariusz Tkaczyk @ 2023-05-05 7:44 UTC (permalink / raw)
To: Kevin Friedberg; +Cc: Kinga Tanska, linux-raid
On Fri, 5 May 2023 03:31:11 -0400
Kevin Friedberg <kev.friedberg@gmail.com> wrote:
> On Fri, Apr 28, 2023 at 3:31 AM Kinga Tanska
> <kinga.tanska@linux.intel.com> wrote:
>
> > Hi,
> >
> > We've been able to test this change and we haven't found problems.
> >
> > Regards,
> > Kinga
>
> Great! What are the next steps to get it included in a future release?
See patchwork:
https://patchwork.kernel.org/project/linux-raid/patch/20230216044134.30581-1-kev.friedberg@gmail.com/
I moved the patch to "awaiting upstream". Now it is up to Jes.
You will get mail, like here:
https://lore.kernel.org/linux-raid/5f493463-6e69-419f-affc-b0de8424fa1a@trained-monkey.org/
Thanks,
Mariusz
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] enable RAID for SATA under VMD
2023-05-05 7:44 ` Mariusz Tkaczyk
@ 2023-05-08 20:29 ` Jes Sorensen
0 siblings, 0 replies; 7+ messages in thread
From: Jes Sorensen @ 2023-05-08 20:29 UTC (permalink / raw)
To: Mariusz Tkaczyk, Kevin Friedberg; +Cc: Kinga Tanska, linux-raid
On 5/5/23 03:44, Mariusz Tkaczyk wrote:
> On Fri, 5 May 2023 03:31:11 -0400
> Kevin Friedberg <kev.friedberg@gmail.com> wrote:
>
>> On Fri, Apr 28, 2023 at 3:31 AM Kinga Tanska
>> <kinga.tanska@linux.intel.com> wrote:
>>
>>> Hi,
>>>
>>> We've been able to test this change and we haven't found problems.
>>>
>>> Regards,
>>> Kinga
>>
>> Great! What are the next steps to get it included in a future release?
> See patchwork:
> https://patchwork.kernel.org/project/linux-raid/patch/20230216044134.30581-1-kev.friedberg@gmail.com/
>
> I moved the patch to "awaiting upstream". Now it is up to Jes.
> You will get mail, like here:
> https://lore.kernel.org/linux-raid/5f493463-6e69-419f-affc-b0de8424fa1a@trained-monkey.org/
Applied!
Thanks Mariusz for reviewing.
Cheers,
Jes
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2023-05-08 20:29 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-02-16 4:41 [PATCH] enable RAID for SATA under VMD Kevin Friedberg
2023-03-20 6:26 ` Kevin Friedberg
2023-03-20 8:35 ` Mariusz Tkaczyk
2023-04-28 7:30 ` Kinga Tanska
2023-05-05 7:31 ` Kevin Friedberg
2023-05-05 7:44 ` Mariusz Tkaczyk
2023-05-08 20:29 ` Jes Sorensen
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).