From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 854C62874E9 for ; Thu, 10 Jul 2025 22:53:41 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1752188021; cv=none; b=oIQl+uy6YWQJrTTZxWQNZf56kw68OeZVjurJM5yHfRSwy600P/L3YmtoOV+49oPXkyAYUqj+2H3yj6/dfj7CecX2rRuNhZsjP4MPIJfdCBL+b7AZ0iQ0NhsRHkVa+YVQfNsfISeXadLRdy2TGpn3/lZB38AbfAvyCfmpIzGyYeg= ARC-Message-Signature:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1752188021; c=relaxed/simple; bh=iGiictquUxO3SdTv7GVgLqYfas7ywncu8K7JT4ZkjyM=; h=Message-ID:Date:MIME-Version:Subject:To:Cc:References:From: In-Reply-To:Content-Type; b=LOeeJiuOxSbvA8p8i2h9vLTvv1eIlFwNijDGX1e3MadpFKeUDnj6D81mh/cmZ7jIAKSCJIkwT2F9P4HfAzQsDH+F7ggew1jXaVSR8HYEksRk1cwSdNVOGrUCcUsD2ETKo5vLwok8aXR5FCULhT3uv/XXugfkOKTC0kXDiQrinj0= ARC-Authentication-Results:i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=KZPMRAYA; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="KZPMRAYA" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 28B65C4CEE3; Thu, 10 Jul 2025 22:53:40 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1752188021; bh=iGiictquUxO3SdTv7GVgLqYfas7ywncu8K7JT4ZkjyM=; h=Date:Subject:To:Cc:References:From:In-Reply-To:From; b=KZPMRAYAzXVcHIKL1A1ftpqPIx/Ol4HVA0XTnk6E53IVEE1GaXivaSYDO/Q79yl6f 5qOunXwt0XMsV3JxCo7hEPnKf45Dnewx0Jopf3eMLMdWCNeDLh53P5xOFhceVMEkdv y+xEbCoiJm6VeTtDBb2dI5PW+RIoOSxi3EhjcNTUsoAtKgrHe2+BCYOVDBxPowgQmK 5uFrGLikuIYA7m5BLoAl2dRucBxdcqzgzFpgx+Y5FrQI4w2DsTUSHjvBPyYFm1kxoa uBLhG+7QRupk8S9KMRRKXvkWizK6vDlFGBSaln92nQfBjJVKqLvuhPbG9307lBfMR0 ZRnpTgUqg8yJQ== Message-ID: Date: Thu, 10 Jul 2025 18:53:37 -0400 Precedence: bulk X-Mailing-List: kdevops@lists.linux.dev List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 User-Agent: Mozilla Thunderbird Subject: Re: [PATCH] guestfs: add scsi extra storage option To: Luis Chamberlain , Daniel Gomez , Swarna Prabhu , swarnassp6@gmail.com, kdevops@lists.linux.dev Cc: Luis Chamberlain References: <20250710173517.3003494-1-mcgrof@kernel.org> Content-Language: en-US From: Chuck Lever Organization: kernel.org In-Reply-To: <20250710173517.3003494-1-mcgrof@kernel.org> Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 7bit On 7/10/25 1:35 PM, Luis Chamberlain wrote: > From: Luis Chamberlain > > Add scsi device support for virtualization. > > Generated-by: ChatGPT Codex > Signed-off-by: Luis Chamberlain > --- > defconfigs/blktests_scsi | 2 +- > kconfigs/Kconfig.libvirt | 25 ++++++++++++------- > playbooks/roles/gen_nodes/defaults/main.yml | 1 + > .../roles/gen_nodes/templates/Vagrantfile.j2 | 11 ++++++-- > playbooks/roles/gen_nodes/templates/drives.j2 | 24 ++++++++++++++++-- > .../roles/gen_nodes/templates/gen_drives.j2 | 21 ++++++++++------ > .../kdevops_nodes_split_start.j2.yaml | 10 ++++++++ > workflows/steady_state/Kconfig | 1 + > 8 files changed, 74 insertions(+), 21 deletions(-) > > diff --git a/defconfigs/blktests_scsi b/defconfigs/blktests_scsi > index d49e72893690..28462939e11e 100644 > --- a/defconfigs/blktests_scsi > +++ b/defconfigs/blktests_scsi > @@ -9,7 +9,7 @@ CONFIG_GUESTFS_BRINGUP_DEBUG_1=y > CONFIG_BOOTLINUX=y > CONFIG_BOOTLINUX_9P=y > > -CONFIG_LIBVIRT_EXTRA_STORAGE_DRIVE_NVME=y > +CONFIG_LIBVIRT_EXTRA_STORAGE_DRIVE_SCSI=y > > CONFIG_WORKFLOWS_TESTS=y > CONFIG_WORKFLOWS_LINUX_TESTS=y > diff --git a/kconfigs/Kconfig.libvirt b/kconfigs/Kconfig.libvirt > index 8654eb9b1a06..f737b79faea4 100644 > --- a/kconfigs/Kconfig.libvirt > +++ b/kconfigs/Kconfig.libvirt > @@ -487,7 +487,7 @@ config LIBVIRT_MACHINE_TYPE_DEFAULT > is the old i440x, so you will not get PCIe support. > > We only want to support PCI-E capable guests with libguest so the > - deafult is not allowed on libguest. > + default is not allowed on libguest. > > config LIBVIRT_MACHINE_TYPE_Q35 > bool "q35" > @@ -548,11 +548,18 @@ config LIBVIRT_EXTRA_STORAGE_DRIVE_VIRTIO > you won't be able to test ZNS. > > config LIBVIRT_EXTRA_STORAGE_DRIVE_IDE > - bool "ide" > - output yaml > - help > - Use the QEMU ide driver for extra storage drives. This is useful for > - really old Linux distributions that lack the virtio backend driver. > + bool "ide" > + output yaml > + help > + Use the QEMU ide driver for extra storage drives. This is useful for > + really old Linux distributions that lack the virtio backend driver. > + > +config LIBVIRT_EXTRA_STORAGE_DRIVE_SCSI > + bool "scsi" > + output yaml > + help > + Use the QEMU SCSI driver for extra storage drives. This relies on a > + virtio-scsi controller with scsi-hd devices attached. > > endchoice > > @@ -817,7 +824,7 @@ config LIBVIRT_AIO_MODE_NATIVE > help > Use the aio=native mode. For some older kernels it is known that > native will cause corruption if used on ext4 or xfs filesystem if > - you also use cace=none. This corruption is documented for RHEL: > + you also use cache=none. This corruption is documented for RHEL: > > https://access.redhat.com/articles/41313 > https://bugzilla.redhat.com/show_bug.cgi?id=615309 > @@ -1104,7 +1111,7 @@ config LIBVIRT_ENABLE_GDB > output yaml > help > Select this option if you want to enable debugging support for GDB. > - By default , it is assumed that gdb is disabled since we dont want > + By default , it is assumed that gdb is disabled since we don't want > to complicate this for the CI runs. If enabled then libvirt guest > xml for each guest will be configured to use gdb on a specific > tcp port. > @@ -1116,7 +1123,7 @@ config LIBVIRT_GDB_BASEPORT > depends on LIBVIRT_ENABLE_GDB > help > This option defines the base port to be used for the GDB. > - Esentially we need to make QEMU listen for an incoming connection from > + Essentially we need to make QEMU listen for an incoming connection from > gdb on a TCP port. The default port is chosen to be 1234. However we > introduce variability for assigning the port to each guest by defining > a base port and adding an index to it based on the number of libvrt guest > diff --git a/playbooks/roles/gen_nodes/defaults/main.yml b/playbooks/roles/gen_nodes/defaults/main.yml > index 6a899be45128..2cf450b84281 100644 > --- a/playbooks/roles/gen_nodes/defaults/main.yml > +++ b/playbooks/roles/gen_nodes/defaults/main.yml > @@ -70,6 +70,7 @@ libvirt_extra_drive_id_prefix: 'drv' > libvirt_extra_storage_drive_nvme: False > libvirt_extra_storage_drive_virtio: False > libvirt_extra_storage_drive_ide: False > +libvirt_extra_storage_drive_scsi: False > libvirt_extra_storage_aio_mode: "native" > libvirt_extra_storage_aio_cache_mode: "none" > # Note that NVMe on qemu does not allow the physical block size > diff --git a/playbooks/roles/gen_nodes/templates/Vagrantfile.j2 b/playbooks/roles/gen_nodes/templates/Vagrantfile.j2 > index 2298c3455251..a52f5566280e 100644 > --- a/playbooks/roles/gen_nodes/templates/Vagrantfile.j2 > +++ b/playbooks/roles/gen_nodes/templates/Vagrantfile.j2 > @@ -244,7 +244,7 @@ Vagrant.configure("2") do |config| > # > # Just create a PCI or PCI-E root bus dedicated for extra drives. We > # use 0x08 to place this PCI / PCI-E root bus as we know this is > - # avilable on modern x86-64 systems. Eventually we may need to bump > + # available on modern x86-64 systems. Eventually we may need to bump > # this to 0x9, but it would be better instead to have vagant-libvirt > # speak "add a new PCI or PCI-E root bus" and "add extra drives" whether > # that is nvme or virtio. > @@ -252,7 +252,7 @@ Vagrant.configure("2") do |config| > {% if not libvirt_override_machine_type %} > # For i440x on x86_64 (default on libvirt as of today) we use PCI, not > # PCI-E. Below assumes i440x. i440x cannot support CXL as it does not > - # suport PCI-E. > + # support PCI-E. > libvirt.qemuargs :value => "-device" > libvirt.qemuargs :value => "pci-bridge,id=custom-pci-for-{{ extra_disk_driver }},chassis_nr=1,bus=pci.0,addr=0x8" > {% else %} > @@ -417,6 +417,13 @@ Vagrant.configure("2") do |config| > libvirt.qemuargs :value => "file=#{extra_disk},format={{ libvirt_extra_drive_format }},if=none,aio={{ libvirt_extra_storage_aio_mode }},cache={{ libvirt_extra_storage_aio_cache_mode }},id=#{disk_id}" > libvirt.qemuargs :value => "-device" > libvirt.qemuargs :value => "virtio-blk-pci,drive=#{disk_id},id=virtio-#{disk_id},serial=#{serial_id},bus=#{bus_for_extra_drives},addr=#{pci_function},iothread=kdevops-virtio-iothread-#{port}#{extra_drive_largio_args},logical_block_size=#{virtio_lbs},physical_block_size=#{virtio_pbs}" > +{% elif libvirt_extra_storage_drive_scsi %} > + libvirt.qemuargs :value => "-device" > + libvirt.qemuargs :value => "virtio-scsi-pci,id=scsi#{port},bus=#{bus_for_extra_drives},addr=#{pci_function}" > + libvirt.qemuargs :value => "-drive" > + libvirt.qemuargs :value => "file=#{extra_disk},format={{ libvirt_extra_drive_format }},if=none,aio={{ libvirt_extra_storage_aio_mode }},cache={{ libvirt_extra_storage_aio_cache_mode }},id=#{disk_id}" > + libvirt.qemuargs :value => "-device" > + libvirt.qemuargs :value => "scsi-hd,drive=#{disk_id},bus=scsi#{port}.0" > {% elif libvirt_extra_storage_drive_nvme %} > if ! largio > nvme_lbs = "{{ libvirt_extra_storage_nvme_logical_block_size }}" > diff --git a/playbooks/roles/gen_nodes/templates/drives.j2 b/playbooks/roles/gen_nodes/templates/drives.j2 > index 8327cd430981..1241b88a9379 100644 > --- a/playbooks/roles/gen_nodes/templates/drives.j2 > +++ b/playbooks/roles/gen_nodes/templates/drives.j2 > @@ -40,6 +40,26 @@ the drives can vary by type, so we have one macro by type of drive. > {% endfor %} > > {%- endmacro -%} > +{%- macro gen_drive_scsi(num_drives, > + kdevops_storage_pool_path, > + hostname, > + libvirt_extra_drive_format, > + libvirt_extra_storage_aio_mode, > + libvirt_extra_storage_aio_cache_mode) -%} > + > +{% for n in range(0,num_drives) %} > + > + > + > + > + > + > + > + > + > +{% endfor %} > + > +{%- endmacro -%} > {%- macro gen_drive_large_io_virtio(libvirt_largeio_logical_compat, > libvirt_largeio_logical_compat_size, > libvirt_largeio_pow_limit, > @@ -49,7 +69,7 @@ the drives can vary by type, so we have one macro by type of drive. > libvirt_extra_storage_aio_mode, > libvirt_extra_storage_aio_cache_mode, > kdevops_storage_pool_path) -%} > - > + > {% set ns = namespace(lbs_idx=0) %} > {% set max_pbs = libvirt_largeio_logical_compat_size * (2 ** libvirt_largeio_pow_limit) %} > {% for n in range(0,libvirt_largeio_pow_limit+1) %} > @@ -106,7 +126,7 @@ the drives can vary by type, so we have one macro by type of drive. > libvirt_extra_storage_aio_mode, > libvirt_extra_storage_aio_cache_mode, > kdevops_storage_pool_path) -%} > - > + > {% set ns = namespace(lbs_idx=0) %} > {% set max_pbs = libvirt_largeio_logical_compat_size * (2 ** libvirt_largeio_pow_limit) %} > {% for n in range(0,libvirt_largeio_pow_limit+1) %} > diff --git a/playbooks/roles/gen_nodes/templates/gen_drives.j2 b/playbooks/roles/gen_nodes/templates/gen_drives.j2 > index 874e5b0623b9..2de13da4ab8e 100644 > --- a/playbooks/roles/gen_nodes/templates/gen_drives.j2 > +++ b/playbooks/roles/gen_nodes/templates/gen_drives.j2 > @@ -19,14 +19,21 @@ > kdevops_storage_pool_path) }} > {% else %} > {{ drives.gen_drive_virtio(4, > - kdevops_storage_pool_path, > - hostname, > - libvirt_extra_drive_format, > - libvirt_extra_storage_aio_mode, > - libvirt_extra_storage_aio_cache_mode, > - libvirt_extra_storage_virtio_logical_block_size, > - libvirt_extra_storage_virtio_physical_block_size) }} > + kdevops_storage_pool_path, > + hostname, > + libvirt_extra_drive_format, > + libvirt_extra_storage_aio_mode, > + libvirt_extra_storage_aio_cache_mode, > + libvirt_extra_storage_virtio_logical_block_size, > + libvirt_extra_storage_virtio_physical_block_size) }} > {% endif %} > +{% elif libvirt_extra_storage_drive_scsi %} > +{{ drives.gen_drive_scsi(4, > + kdevops_storage_pool_path, > + hostname, > + libvirt_extra_drive_format, > + libvirt_extra_storage_aio_mode, > + libvirt_extra_storage_aio_cache_mode) }} > {% elif libvirt_extra_storage_drive_nvme %} > {% if libvirt_largeio_enable %} > {{ drives.gen_drive_large_io_nvme(libvirt_largeio_logical_compat, > diff --git a/playbooks/roles/gen_nodes/templates/kdevops_nodes_split_start.j2.yaml b/playbooks/roles/gen_nodes/templates/kdevops_nodes_split_start.j2.yaml > index e5b88efefc63..91e105cd3bff 100644 > --- a/playbooks/roles/gen_nodes/templates/kdevops_nodes_split_start.j2.yaml > +++ b/playbooks/roles/gen_nodes/templates/kdevops_nodes_split_start.j2.yaml > @@ -76,6 +76,16 @@ vagrant_global: > physical_block_size: {{ libvirt_extra_storage_virtio_physical_block_size }} > logical_block_size: {{ libvirt_extra_storage_virtio_logical_block_size }} > {% endif %} > +{% elif libvirt_extra_storage_drive_scsi %} > + extra_disks: > + data: > + size: 102400 > + scratch: > + size: 102400 > + extra1: > + size: 102400 > + extra2: > + size: 102400 > {% elif libvirt_extra_storage_drive_nvme %} > extra_disks: > data: > diff --git a/workflows/steady_state/Kconfig b/workflows/steady_state/Kconfig > index 335b833cfbcc..f7064d078c0a 100644 > --- a/workflows/steady_state/Kconfig > +++ b/workflows/steady_state/Kconfig > @@ -6,6 +6,7 @@ config SSD_STEADY_STATE_DEVICE > default "/dev/disk/by-id/nvme-QEMU_NVMe_Ctrl_kdevops1" if LIBVIRT && LIBVIRT_EXTRA_STORAGE_DRIVE_NVME > default "/dev/disk/by-id/virtio-kdevops1" if LIBVIRT && LIBVIRT_EXTRA_STORAGE_DRIVE_VIRTIO > default "/dev/disk/by-id/ata-QEMU_HARDDISK_kdevops1" if LIBVIRT && LIBVIRT_EXTRA_STORAGE_DRIVE_IDE > + default "/dev/sdc" if LIBVIRT && LIBVIRT_EXTRA_STORAGE_DRIVE_SCSI > default "/dev/nvme2n1" if TERRAFORM_AWS_INSTANCE_M5AD_4XLARGE > default "/dev/nvme1n1" if TERRAFORM_GCE > default "/dev/sdd" if TERRAFORM_AZURE I would split the spelling corrections into a separate patch. That might even be helpful for LLM training. This patch looks like it is missing a change to playbooks/roles/volume_group/tasks/guestfs.yml Add a task specific to guestfs SCSI drives after the IDE task. You can test by running the pynfs workflow, which will try to set up a single guest with NFSD and an LVM group made from these disk devices. -- Chuck Lever