public inbox for kdevops@lists.linux.dev
 help / color / mirror / Atom feed
From: Luis Chamberlain <mcgrof@kernel.org>
To: kdevops@lists.linux.dev
Cc: Luis Chamberlain <mcgrof@kernel.org>
Subject: [PATCH 3/8] libvirt: move zns, largio and cxl to its own files
Date: Thu,  7 Mar 2024 16:03:54 -0800	[thread overview]
Message-ID: <20240308000400.1646823-4-mcgrof@kernel.org> (raw)
In-Reply-To: <20240308000400.1646823-1-mcgrof@kernel.org>

The features for ZNS, large IO support and CXL are pretty large now,
move them to its own Kconfig file to clean up clutter and make things
easier to scale and read.

No functional changes.

Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
---
 kconfigs/Kconfig.libvirt         | 362 +------------------------------
 kconfigs/Kconfig.libvirt.cxl     |  73 +++++++
 kconfigs/Kconfig.libvirt.largeio | 134 ++++++++++++
 kconfigs/Kconfig.libvirt.zns     | 150 +++++++++++++
 4 files changed, 360 insertions(+), 359 deletions(-)
 create mode 100644 kconfigs/Kconfig.libvirt.cxl
 create mode 100644 kconfigs/Kconfig.libvirt.largeio
 create mode 100644 kconfigs/Kconfig.libvirt.zns

diff --git a/kconfigs/Kconfig.libvirt b/kconfigs/Kconfig.libvirt
index 7486be49..f6f7d134 100644
--- a/kconfigs/Kconfig.libvirt
+++ b/kconfigs/Kconfig.libvirt
@@ -1091,362 +1091,6 @@ config LIBVIRT_STORAGE_POOL_NAME
 	  For instance you may want to use a volume name of "data2" for a path
 	  on a partition on /data2/ or something like that.
 
-if EXTRA_STORAGE_SUPPORTS_ZNS
-
-config QEMU_ENABLE_NVME_ZNS
-	bool "Enable QEMU NVMe ZNS drives"
-	depends on LIBVIRT && LIBVIRT_EXTRA_STORAGE_DRIVE_NVME
-	default n
-	help
-	  If this option is enabled then you can enable NVMe ZNS drives on the
-	  guests.
-
-config QEMU_CUSTOM_NVME_ZNS
-	bool "Customize QEMU NVMe ZNS settings"
-	depends on QEMU_ENABLE_NVME_ZNS
-	default n
-	help
-	  If this option is enabled then you will be able to modify the defaults
-	  used for the 2 NVMe ZNS drives we create for you. By default we create
-	  two NVMe ZNS drives with 100 GiB of total size, each zone being
-	  128 MiB, and so you end up with 800 total zones. The zone capacity
-	  equals the zone size. The default zone size append limit is also
-	  set to 0, which means the zone append size limit will equal to the
-	  maximum data transfer size (MDTS). The default logical and physical
-	  block size of 4096 bytes is also used. If you want to customize any
-	  of these ZNS settings for the drives we bring up enable this option.
-
-	  If unsure say N.
-
-if QEMU_CUSTOM_NVME_ZNS
-
-config QEMU_CUSTOM_NVME_ZONE_DRIVE_SIZE
-	int "QEMU ZNS storage NVMe drive size"
-	default 102400
-	help
-	  The size of the QEMU NVMe ZNS drive to expose. We expose 2 NVMe
-	  ZNS drives of 100 GiB by default. This value chagnes its size.
-	  100 GiB is a sensible default given most full fstests require about
-	  50 GiB of data writes.
-
-config QEMU_CUSTOM_NVME_ZONE_ZASL
-	int "QEMU ZNS zasl - zone append size limit power of 2"
-	default 0
-	help
-	  This is the zone append size limit. If left at 0 QEMU will use
-	  the maximum data transfer size (MDTS) for the zone size append limit.
-	  Otherwise if this value is set to something other than 0, then the
-	  zone size append limit will equal to 2 to the power of the value set
-	  here multiplied by the minimum memory page size (4096 bytes) but the
-	  QEMU promises this value cannot exceed the maximum data transfer size.
-
-config QEMU_CUSTOM_NVME_ZONE_SIZE
-	string "QEMU ZNS storage NVMe zone size"
-	default "128M"
-	help
-	  The size the the QEMU NVMe ZNS zone size. The number of zones are
-	  implied by the driver size / zone size. If there is a remainder
-	  technically that should go into another zone with a smaller zone
-	  capacity.
-
-config QEMU_CUSTOM_NVME_ZONE_CAPACITY
-	string "QEMU ZNS storage NVMe zone capacity"
-	default "0M"
-	help
-	  The size to use for the zone capacity. This may be smaller or equal
-	  to the zone size. If set to 0 then this will ensure the zone
-	  capacity is equal to the zone size.
-
-config QEMU_CUSTOM_NVME_ZONE_MAX_ACTIVE
-	int "QEMU ZNS storage NVMe zone max active"
-	default 0
-	help
-	  The max numbe of active zones. The default of 0 means all zones
-	  can be active at all times.
-
-config QEMU_CUSTOM_NVME_ZONE_MAX_OPEN
-	int "QEMU ZNS storage NVMe zone max open"
-	default 0
-	help
-	  The max numbe of open zones. The default of 0 means all zones
-	  can be opened at all times. If the number of active zones is
-	  specified this value must be less than or equal to that value.
-
-config QEMU_CUSTOM_NVME_ZONE_PHYSICAL_BLOCK_SIZE
-	int "QEMU ZNS storage NVMe physical block size"
-	default 4096
-	help
-	  The physical block size to use for ZNS drives. This ends up
-	  what is put into the /sys/block/<disk>/queue/physical_block_size
-	  and is the smallest unit a physical storage device can write
-	  atomically. It is usually the same as the logical block size but may
-	  be bigger. One example is SATA drives with 4KB sectors that expose a
-	  512-byte logical block size to the operating system. For stacked
-	  block devices the physical_block_size variable contains the maximum
-	  physical_block_size of the component devices.
-
-config QEMU_CUSTOM_NVME_ZONE_LOGICAL_BLOCK_SIZE
-	int "QEMU ZNS storage NVMe logical block size"
-	default 4096
-	help
-	  The logical block size to use for ZNS drives. This ends up what is
-	  put into the /sys/block/<disk>/queue/logical_block_size and the
-	  smallest unit the storage device can address. It is typically 512
-	  bytes.
-
-endif # QEMU_CUSTOM_NVME_ZNS
-
-config LIBVIRT_ENABLE_ZNS
-	bool
-	default y if QEMU_ENABLE_NVME_ZNS
-
-config QEMU_NVME_ZONE_DRIVE_SIZE
-	int
-	default 102400 if !QEMU_CUSTOM_NVME_ZNS
-	default QEMU_CUSTOM_NVME_ZONE_DRIVE_SIZE if QEMU_CUSTOM_NVME_ZNS
-
-config QEMU_NVME_ZONE_ZASL
-	int
-	default 0 if !QEMU_CUSTOM_NVME_ZNS
-	default QEMU_CUSTOM_NVME_ZONE_ZASL if QEMU_CUSTOM_NVME_ZNS
-
-config QEMU_NVME_ZONE_SIZE
-	string
-	default "128M" if !QEMU_CUSTOM_NVME_ZNS
-	default QEMU_CUSTOM_NVME_ZONE_SIZE if QEMU_CUSTOM_NVME_ZNS
-
-config QEMU_NVME_ZONE_CAPACITY
-	string
-	default "0M" if !QEMU_CUSTOM_NVME_ZNS
-	default QEMU_CUSTOM_NVME_ZONE_CAPACITY if QEMU_CUSTOM_NVME_ZNS
-
-config QEMU_NVME_ZONE_MAX_ACTIVE
-	int
-	default 0 if !QEMU_CUSTOM_NVME_ZNS
-	default QEMU_CUSTOM_NVME_ZONE_MAX_ACTIVE if QEMU_CUSTOM_NVME_ZNS
-
-config QEMU_NVME_ZONE_MAX_OPEN
-	int
-	default 0 if !QEMU_CUSTOM_NVME_ZNS
-	default QEMU_CUSTOM_NVME_ZONE_MAX_OPEN if QEMU_CUSTOM_NVME_ZNS
-
-config QEMU_NVME_ZONE_PHYSICAL_BLOCK_SIZE
-	int
-	default 4096 if !QEMU_CUSTOM_NVME_ZNS
-	default QEMU_CUSTOM_NVME_ZONE_PHYSICAL_BLOCK_SIZE if QEMU_CUSTOM_NVME_ZNS
-
-config QEMU_NVME_ZONE_LOGICAL_BLOCK_SIZE
-	int
-	default 4096 if !QEMU_CUSTOM_NVME_ZNS
-	default QEMU_CUSTOM_NVME_ZONE_LOGICAL_BLOCK_SIZE if QEMU_CUSTOM_NVME_ZNS
-
-endif # EXTRA_STORAGE_SUPPORTS_ZNS
-
-if EXTRA_STORAGE_SUPPORTS_LARGEIO
-
-config QEMU_ENABLE_EXTRA_DRIVE_LARGEIO
-	bool "Enable QEMU drives for large IO experimentation"
-	depends on LIBVIRT
-	default n
-	help
-	  If you want to experiment with large IO either with NVMe or virtio
-	  you can enable this option. This will create a few additional drives
-	  which are dedicated for largio experimentation testing.
-
-	  For now you will need a distribution with a root filesystem on XFS
-	  or btrfs, and so you will want to enable the kdevops distribution and
-	  VAGRANT_KDEVOPS_DEBIAN_TESTING64_XFS_20230427. This is a requirement
-	  given all block devices must use iomap and that is the only current
-	  way to disable buffer-heads. Eventually this limitation is expected
-	  You can also use large-block-20230525 with Amazon Linux 2023 on AWS.
-
-	  If unsure say N.
-
-if QEMU_ENABLE_EXTRA_DRIVE_LARGEIO
-
-config QEMU_EXTRA_DRIVE_LARGEIO_NUM_DRIVES_PER_SPACE
-	int "How many qemu drives to create per each target size"
-	default 4
-	help
-	  If you are going to try to mess with LBS on 4k LBA you can experiment
-	  with:
-
-	  - 4k block size
-	  - 8k block size
-	  - 16k block size
-	  - 32k block size
-	  - 64k block size
-
-	  So in total 4 drives. For a drive with an LBA format of 16k, you can
-	  only experiment with block sizes:
-
-	  - 16k block size
-	  - 32k block size
-	  - 64k block size
-
-	  In theory you can experiment up to MAX_PAGECACHE_ORDER and to make
-	  things worse some filesystems can use block sizes which are not power
-	  of two. For now filesystems only support up to max block size 64k, so
-	  we can just keep the max drive sizes down a bit. Likewise twice the
-	  PAGE_SIZE is not supported as we require at least order 2 so 16k as
-	  folios use the 3rd page for the deferred list. So you really only need
-	  for 4k today:
-
-	  - 4k block size
-	  - 16k block size
-	  - 32k block size
-	  - 64k block size
-
-	  If we create 4 drives per space you can have 4 for basic baseline
-	  coverage testing. It seems the max limit is about 20 drives per
-	  qemu pcie port today, if you enable more than the default 4, good
-	  luck!
-
-config QEMU_EXTRA_DRIVE_LARGEIO_BASE_SIZE
-	int "QEMU extra drive drive base size"
-	default 10240
-	help
-	  The base size of the QEMU extra storage drive to expose. The
-	  size is increased by 1 MiB as we go down the list of extra large IO
-	  drives.
-
-config QEMU_EXTRA_DRIVE_LARGEIO_COMPAT
-	bool "Use a compatibility logical block size"
-	default n
-	help
-	  Since older spindle drives used to work with 512 bytes some drives
-	  exist with support to handle 512 writes even if they physically store
-	  more data on their drives for that one 512 byte write. Enable this if
-	  you want to ensure your large IO drives always have a logical block
-	  size restrained by the compatibility size you want to support.
-
-	  By default this is not enabled, and therefore the logical block size
-	  for the large IO drives will be equal to the physical block size.
-
-config QEMU_EXTRA_DRIVE_LARGEIO_COMPAT_SIZE
-	int "Large IO compat size"
-	default 512
-	help
-	  This is the compatibility base block size to use for older drives.
-	  Even if you disable QEMU_EXTRA_DRIVE_LARGEIO_COMPAT, this value will
-	  be used as the base for the computation for the physical block size
-	  for the large IO drives we create for you using the formula:
-
-	    libvirt_largeio_logical_compat_size  * (2 ** n)
-
-	  where n is the index of the large IO drive.
-
-config QEMU_EXTRA_DRIVE_LARGEIO_MAX_POW_LIMIT
-	int "Large IO - number of drives - power"
-	default 7
-	help
-	  We use an iterator to create the number of large drives on the
-	  guest system using:
-
-	    for n in range(0,libvirt_largeio_pow_limit)
-	       pbs = compat_size * (2 ** n)
-
-	  Using a compat_size of 512 means we go up to 64k physical block
-	  size by using the default of 7.
-
-	  This provides the value for the libvirt_largeio_pow_limit. By
-	  default we set this to 12 so we get drives of different physical
-	  sizes in powers of 2 ranging from 512 up to 1 GiB. You can reduce
-	  this if you want less drives to experiment with.
-
-endif # QEMU_ENABLE_EXTRA_DRIVE_LARGEIO
-
-config LIBVIRT_ENABLE_LARGEIO
-	bool
-	default y if QEMU_ENABLE_EXTRA_DRIVE_LARGEIO
-
-config QEMU_LARGEIO_DRIVE_BASE_SIZE
-	int
-	default 10240 if !QEMU_ENABLE_EXTRA_DRIVE_LARGEIO
-	default QEMU_EXTRA_DRIVE_LARGEIO_BASE_SIZE if QEMU_ENABLE_EXTRA_DRIVE_LARGEIO
-
-config QEMU_LARGEIO_COMPAT_SIZE
-	int
-	default 512 if !QEMU_ENABLE_EXTRA_DRIVE_LARGEIO
-	default QEMU_EXTRA_DRIVE_LARGEIO_COMPAT_SIZE if QEMU_ENABLE_EXTRA_DRIVE_LARGEIO
-
-config QEMU_LARGEIO_MAX_POW_LIMIT
-	int
-	default 12 if !QEMU_ENABLE_EXTRA_DRIVE_LARGEIO
-	default QEMU_EXTRA_DRIVE_LARGEIO_MAX_POW_LIMIT if QEMU_ENABLE_EXTRA_DRIVE_LARGEIO
-
-endif # EXTRA_STORAGE_SUPPORTS_LARGEIO
-
-config QEMU_ENABLE_CXL
-	bool "Enable QEMU CXL devices"
-	depends on LIBVIRT
-	depends on LIBVIRT_MACHINE_TYPE_Q35
-	depends on BRINGUP_SUPPORTS_CXL
-	depends on QEMU_USE_DEVELOPMENT_VERSION
-	default n
-	help
-	  If this option is enabled then you can enable different types of
-	  CXL devices which we will emulate for you.
-
-if QEMU_ENABLE_CXL
-
-config QEMU_START_QMP_ON_TCP_SOCKET
-	bool "Start QMP on a TCP socket"
-	default n
-
-if QEMU_START_QMP_ON_TCP_SOCKET
-
-config QEMU_QMP_COMMAND_LINE_STRING
-	string "Qemu command line string for qmp"
-	default "tcp:localhost:4444,server"
-	help
-	  Option for qmp interface (from https://wiki.qemu.org/Documentation/QMP).
-
-config QEMU_QMP_WAIT_ON
-	bool "Let Qemu instance wait for qmp connection"
-	default n
-
-endif # QEMU_START_QMP_ON_TCP_SOCKET
-
-choice
-	prompt "CXL topology to enable"
-	default QEMU_ENABLE_CXL_DEMO_TOPOLOGY_1
-
-config QEMU_ENABLE_CXL_DEMO_TOPOLOGY_1
-	bool "Basic CXL demo topology with a CXL Type 3 device"
-	help
-	  This is a basic CXL demo topology. It consists of single host bridge that
-	  has one root port. A Type 3 persistent memory device is attached to the
-	  root port. This topology is referred to as a passthrough decoder in
-	  kernel terminology. The kernel CXL core will consume the resource exposed
-	  in the ACPI CXL memory layout description, such as Host Managed
-	  Device memory (HDM), CXL Early Discovery Table (CEDT), and the
-	  CXL Fixed Memory Window Structures to publish the root of a
-	  cxl_port decode hierarchy to map regions that represent System RAM,
-	  or Persistent Memory regions to be managed by LIBNVDIMM.
-
-config QEMU_ENABLE_CXL_DEMO_TOPOLOGY_2
-	bool "Host bridge with two root ports"
-	help
-	  This topology extends the first demo topology by placing two root ports
-	  in the host bridge. This ensures that the decoder associated with the
-	  host bridge is not a passthrough decoder.
-
-config QEMU_ENABLE_CXL_SWITCH_TOPOLOGY_1
-	bool "CXL switch connected to root port with two down stream ports"
-	help
-	  This topology adds a CXL switch in the topology. A memory device
-	  is connected to one of the down stream ports. The upstream port
-	  is connected to a root port on the host bridge.
-
-config QEMU_ENABLE_CXL_DEMO_DCD_TOPOLOGY_1
-	bool "CXL DCD demo directly attached to a single-port HB"
-	help
-	  This topology adds a DCD device in the topology, directly attached to
-	  a host bridge with only one root port.
-	  The device has zero (volatile or non-volatile) static capacity
-	  and 2 dynamic capacity regions where dynamic extents can be added.
-
-endchoice
-
-endif # QEMU_ENABLE_CXL
+source "kconfigs/Kconfig.libvirt.zns"
+source "kconfigs/Kconfig.libvirt.largeio"
+source "kconfigs/Kconfig.libvirt.cxl"
diff --git a/kconfigs/Kconfig.libvirt.cxl b/kconfigs/Kconfig.libvirt.cxl
new file mode 100644
index 00000000..bac83a57
--- /dev/null
+++ b/kconfigs/Kconfig.libvirt.cxl
@@ -0,0 +1,73 @@
+config QEMU_ENABLE_CXL
+	bool "Enable QEMU CXL devices"
+	depends on LIBVIRT
+	depends on LIBVIRT_MACHINE_TYPE_Q35
+	depends on BRINGUP_SUPPORTS_CXL
+	depends on QEMU_USE_DEVELOPMENT_VERSION
+	default n
+	help
+	  If this option is enabled then you can enable different types of
+	  CXL devices which we will emulate for you.
+
+if QEMU_ENABLE_CXL
+
+config QEMU_START_QMP_ON_TCP_SOCKET
+	bool "Start QMP on a TCP socket"
+	default n
+
+if QEMU_START_QMP_ON_TCP_SOCKET
+
+config QEMU_QMP_COMMAND_LINE_STRING
+	string "Qemu command line string for qmp"
+	default "tcp:localhost:4444,server"
+	help
+	  Option for qmp interface (from https://wiki.qemu.org/Documentation/QMP).
+
+config QEMU_QMP_WAIT_ON
+	bool "Let Qemu instance wait for qmp connection"
+	default n
+
+endif # QEMU_START_QMP_ON_TCP_SOCKET
+
+choice
+	prompt "CXL topology to enable"
+	default QEMU_ENABLE_CXL_DEMO_TOPOLOGY_1
+
+config QEMU_ENABLE_CXL_DEMO_TOPOLOGY_1
+	bool "Basic CXL demo topology with a CXL Type 3 device"
+	help
+	  This is a basic CXL demo topology. It consists of single host bridge that
+	  has one root port. A Type 3 persistent memory device is attached to the
+	  root port. This topology is referred to as a passthrough decoder in
+	  kernel terminology. The kernel CXL core will consume the resource exposed
+	  in the ACPI CXL memory layout description, such as Host Managed
+	  Device memory (HDM), CXL Early Discovery Table (CEDT), and the
+	  CXL Fixed Memory Window Structures to publish the root of a
+	  cxl_port decode hierarchy to map regions that represent System RAM,
+	  or Persistent Memory regions to be managed by LIBNVDIMM.
+
+config QEMU_ENABLE_CXL_DEMO_TOPOLOGY_2
+	bool "Host bridge with two root ports"
+	help
+	  This topology extends the first demo topology by placing two root ports
+	  in the host bridge. This ensures that the decoder associated with the
+	  host bridge is not a passthrough decoder.
+
+config QEMU_ENABLE_CXL_SWITCH_TOPOLOGY_1
+	bool "CXL switch connected to root port with two down stream ports"
+	help
+	  This topology adds a CXL switch in the topology. A memory device
+	  is connected to one of the down stream ports. The upstream port
+	  is connected to a root port on the host bridge.
+
+config QEMU_ENABLE_CXL_DEMO_DCD_TOPOLOGY_1
+	bool "CXL DCD demo directly attached to a single-port HB"
+	help
+	  This topology adds a DCD device in the topology, directly attached to
+	  a host bridge with only one root port.
+	  The device has zero (volatile or non-volatile) static capacity
+	  and 2 dynamic capacity regions where dynamic extents can be added.
+
+endchoice
+
+endif # QEMU_ENABLE_CXL
diff --git a/kconfigs/Kconfig.libvirt.largeio b/kconfigs/Kconfig.libvirt.largeio
new file mode 100644
index 00000000..0d9e5973
--- /dev/null
+++ b/kconfigs/Kconfig.libvirt.largeio
@@ -0,0 +1,134 @@
+if EXTRA_STORAGE_SUPPORTS_LARGEIO
+
+config QEMU_ENABLE_EXTRA_DRIVE_LARGEIO
+	bool "Enable QEMU drives for large IO experimentation"
+	depends on LIBVIRT
+	default n
+	help
+	  If you want to experiment with large IO either with NVMe or virtio
+	  you can enable this option. This will create a few additional drives
+	  which are dedicated for largio experimentation testing.
+
+	  For now you will need a distribution with a root filesystem on XFS
+	  or btrfs, and so you will want to enable the kdevops distribution and
+	  VAGRANT_KDEVOPS_DEBIAN_TESTING64_XFS_20230427. This is a requirement
+	  given all block devices must use iomap and that is the only current
+	  way to disable buffer-heads. Eventually this limitation is expected
+	  You can also use large-block-20230525 with Amazon Linux 2023 on AWS.
+
+	  If unsure say N.
+
+if QEMU_ENABLE_EXTRA_DRIVE_LARGEIO
+
+config QEMU_EXTRA_DRIVE_LARGEIO_NUM_DRIVES_PER_SPACE
+	int "How many qemu drives to create per each target size"
+	default 4
+	help
+	  If you are going to try to mess with LBS on 4k LBA you can experiment
+	  with:
+
+	  - 4k block size
+	  - 8k block size
+	  - 16k block size
+	  - 32k block size
+	  - 64k block size
+
+	  So in total 4 drives. For a drive with an LBA format of 16k, you can
+	  only experiment with block sizes:
+
+	  - 16k block size
+	  - 32k block size
+	  - 64k block size
+
+	  In theory you can experiment up to MAX_PAGECACHE_ORDER and to make
+	  things worse some filesystems can use block sizes which are not power
+	  of two. For now filesystems only support up to max block size 64k, so
+	  we can just keep the max drive sizes down a bit. Likewise twice the
+	  PAGE_SIZE is not supported as we require at least order 2 so 16k as
+	  folios use the 3rd page for the deferred list. So you really only need
+	  for 4k today:
+
+	  - 4k block size
+	  - 16k block size
+	  - 32k block size
+	  - 64k block size
+
+	  If we create 4 drives per space you can have 4 for basic baseline
+	  coverage testing. It seems the max limit is about 20 drives per
+	  qemu pcie port today, if you enable more than the default 4, good
+	  luck!
+
+config QEMU_EXTRA_DRIVE_LARGEIO_BASE_SIZE
+	int "QEMU extra drive drive base size"
+	default 10240
+	help
+	  The base size of the QEMU extra storage drive to expose. The
+	  size is increased by 1 MiB as we go down the list of extra large IO
+	  drives.
+
+config QEMU_EXTRA_DRIVE_LARGEIO_COMPAT
+	bool "Use a compatibility logical block size"
+	default n
+	help
+	  Since older spindle drives used to work with 512 bytes some drives
+	  exist with support to handle 512 writes even if they physically store
+	  more data on their drives for that one 512 byte write. Enable this if
+	  you want to ensure your large IO drives always have a logical block
+	  size restrained by the compatibility size you want to support.
+
+	  By default this is not enabled, and therefore the logical block size
+	  for the large IO drives will be equal to the physical block size.
+
+config QEMU_EXTRA_DRIVE_LARGEIO_COMPAT_SIZE
+	int "Large IO compat size"
+	default 512
+	help
+	  This is the compatibility base block size to use for older drives.
+	  Even if you disable QEMU_EXTRA_DRIVE_LARGEIO_COMPAT, this value will
+	  be used as the base for the computation for the physical block size
+	  for the large IO drives we create for you using the formula:
+
+	    libvirt_largeio_logical_compat_size  * (2 ** n)
+
+	  where n is the index of the large IO drive.
+
+config QEMU_EXTRA_DRIVE_LARGEIO_MAX_POW_LIMIT
+	int "Large IO - number of drives - power"
+	default 7
+	help
+	  We use an iterator to create the number of large drives on the
+	  guest system using:
+
+	    for n in range(0,libvirt_largeio_pow_limit)
+	       pbs = compat_size * (2 ** n)
+
+	  Using a compat_size of 512 means we go up to 64k physical block
+	  size by using the default of 7.
+
+	  This provides the value for the libvirt_largeio_pow_limit. By
+	  default we set this to 12 so we get drives of different physical
+	  sizes in powers of 2 ranging from 512 up to 1 GiB. You can reduce
+	  this if you want less drives to experiment with.
+
+endif # QEMU_ENABLE_EXTRA_DRIVE_LARGEIO
+
+config LIBVIRT_ENABLE_LARGEIO
+	bool
+	default y if QEMU_ENABLE_EXTRA_DRIVE_LARGEIO
+
+config QEMU_LARGEIO_DRIVE_BASE_SIZE
+	int
+	default 10240 if !QEMU_ENABLE_EXTRA_DRIVE_LARGEIO
+	default QEMU_EXTRA_DRIVE_LARGEIO_BASE_SIZE if QEMU_ENABLE_EXTRA_DRIVE_LARGEIO
+
+config QEMU_LARGEIO_COMPAT_SIZE
+	int
+	default 512 if !QEMU_ENABLE_EXTRA_DRIVE_LARGEIO
+	default QEMU_EXTRA_DRIVE_LARGEIO_COMPAT_SIZE if QEMU_ENABLE_EXTRA_DRIVE_LARGEIO
+
+config QEMU_LARGEIO_MAX_POW_LIMIT
+	int
+	default 12 if !QEMU_ENABLE_EXTRA_DRIVE_LARGEIO
+	default QEMU_EXTRA_DRIVE_LARGEIO_MAX_POW_LIMIT if QEMU_ENABLE_EXTRA_DRIVE_LARGEIO
+
+endif # EXTRA_STORAGE_SUPPORTS_LARGEIO
diff --git a/kconfigs/Kconfig.libvirt.zns b/kconfigs/Kconfig.libvirt.zns
new file mode 100644
index 00000000..1b1b7090
--- /dev/null
+++ b/kconfigs/Kconfig.libvirt.zns
@@ -0,0 +1,150 @@
+if EXTRA_STORAGE_SUPPORTS_ZNS
+
+config QEMU_ENABLE_NVME_ZNS
+	bool "Enable QEMU NVMe ZNS drives"
+	depends on LIBVIRT && LIBVIRT_EXTRA_STORAGE_DRIVE_NVME
+	default n
+	help
+	  If this option is enabled then you can enable NVMe ZNS drives on the
+	  guests.
+
+config QEMU_CUSTOM_NVME_ZNS
+	bool "Customize QEMU NVMe ZNS settings"
+	depends on QEMU_ENABLE_NVME_ZNS
+	default n
+	help
+	  If this option is enabled then you will be able to modify the defaults
+	  used for the 2 NVMe ZNS drives we create for you. By default we create
+	  two NVMe ZNS drives with 100 GiB of total size, each zone being
+	  128 MiB, and so you end up with 800 total zones. The zone capacity
+	  equals the zone size. The default zone size append limit is also
+	  set to 0, which means the zone append size limit will equal to the
+	  maximum data transfer size (MDTS). The default logical and physical
+	  block size of 4096 bytes is also used. If you want to customize any
+	  of these ZNS settings for the drives we bring up enable this option.
+
+	  If unsure say N.
+
+if QEMU_CUSTOM_NVME_ZNS
+
+config QEMU_CUSTOM_NVME_ZONE_DRIVE_SIZE
+	int "QEMU ZNS storage NVMe drive size"
+	default 102400
+	help
+	  The size of the QEMU NVMe ZNS drive to expose. We expose 2 NVMe
+	  ZNS drives of 100 GiB by default. This value chagnes its size.
+	  100 GiB is a sensible default given most full fstests require about
+	  50 GiB of data writes.
+
+config QEMU_CUSTOM_NVME_ZONE_ZASL
+	int "QEMU ZNS zasl - zone append size limit power of 2"
+	default 0
+	help
+	  This is the zone append size limit. If left at 0 QEMU will use
+	  the maximum data transfer size (MDTS) for the zone size append limit.
+	  Otherwise if this value is set to something other than 0, then the
+	  zone size append limit will equal to 2 to the power of the value set
+	  here multiplied by the minimum memory page size (4096 bytes) but the
+	  QEMU promises this value cannot exceed the maximum data transfer size.
+
+config QEMU_CUSTOM_NVME_ZONE_SIZE
+	string "QEMU ZNS storage NVMe zone size"
+	default "128M"
+	help
+	  The size the the QEMU NVMe ZNS zone size. The number of zones are
+	  implied by the driver size / zone size. If there is a remainder
+	  technically that should go into another zone with a smaller zone
+	  capacity.
+
+config QEMU_CUSTOM_NVME_ZONE_CAPACITY
+	string "QEMU ZNS storage NVMe zone capacity"
+	default "0M"
+	help
+	  The size to use for the zone capacity. This may be smaller or equal
+	  to the zone size. If set to 0 then this will ensure the zone
+	  capacity is equal to the zone size.
+
+config QEMU_CUSTOM_NVME_ZONE_MAX_ACTIVE
+	int "QEMU ZNS storage NVMe zone max active"
+	default 0
+	help
+	  The max numbe of active zones. The default of 0 means all zones
+	  can be active at all times.
+
+config QEMU_CUSTOM_NVME_ZONE_MAX_OPEN
+	int "QEMU ZNS storage NVMe zone max open"
+	default 0
+	help
+	  The max numbe of open zones. The default of 0 means all zones
+	  can be opened at all times. If the number of active zones is
+	  specified this value must be less than or equal to that value.
+
+config QEMU_CUSTOM_NVME_ZONE_PHYSICAL_BLOCK_SIZE
+	int "QEMU ZNS storage NVMe physical block size"
+	default 4096
+	help
+	  The physical block size to use for ZNS drives. This ends up
+	  what is put into the /sys/block/<disk>/queue/physical_block_size
+	  and is the smallest unit a physical storage device can write
+	  atomically. It is usually the same as the logical block size but may
+	  be bigger. One example is SATA drives with 4KB sectors that expose a
+	  512-byte logical block size to the operating system. For stacked
+	  block devices the physical_block_size variable contains the maximum
+	  physical_block_size of the component devices.
+
+config QEMU_CUSTOM_NVME_ZONE_LOGICAL_BLOCK_SIZE
+	int "QEMU ZNS storage NVMe logical block size"
+	default 4096
+	help
+	  The logical block size to use for ZNS drives. This ends up what is
+	  put into the /sys/block/<disk>/queue/logical_block_size and the
+	  smallest unit the storage device can address. It is typically 512
+	  bytes.
+
+endif # QEMU_CUSTOM_NVME_ZNS
+
+config LIBVIRT_ENABLE_ZNS
+	bool
+	default y if QEMU_ENABLE_NVME_ZNS
+
+config QEMU_NVME_ZONE_DRIVE_SIZE
+	int
+	default 102400 if !QEMU_CUSTOM_NVME_ZNS
+	default QEMU_CUSTOM_NVME_ZONE_DRIVE_SIZE if QEMU_CUSTOM_NVME_ZNS
+
+config QEMU_NVME_ZONE_ZASL
+	int
+	default 0 if !QEMU_CUSTOM_NVME_ZNS
+	default QEMU_CUSTOM_NVME_ZONE_ZASL if QEMU_CUSTOM_NVME_ZNS
+
+config QEMU_NVME_ZONE_SIZE
+	string
+	default "128M" if !QEMU_CUSTOM_NVME_ZNS
+	default QEMU_CUSTOM_NVME_ZONE_SIZE if QEMU_CUSTOM_NVME_ZNS
+
+config QEMU_NVME_ZONE_CAPACITY
+	string
+	default "0M" if !QEMU_CUSTOM_NVME_ZNS
+	default QEMU_CUSTOM_NVME_ZONE_CAPACITY if QEMU_CUSTOM_NVME_ZNS
+
+config QEMU_NVME_ZONE_MAX_ACTIVE
+	int
+	default 0 if !QEMU_CUSTOM_NVME_ZNS
+	default QEMU_CUSTOM_NVME_ZONE_MAX_ACTIVE if QEMU_CUSTOM_NVME_ZNS
+
+config QEMU_NVME_ZONE_MAX_OPEN
+	int
+	default 0 if !QEMU_CUSTOM_NVME_ZNS
+	default QEMU_CUSTOM_NVME_ZONE_MAX_OPEN if QEMU_CUSTOM_NVME_ZNS
+
+config QEMU_NVME_ZONE_PHYSICAL_BLOCK_SIZE
+	int
+	default 4096 if !QEMU_CUSTOM_NVME_ZNS
+	default QEMU_CUSTOM_NVME_ZONE_PHYSICAL_BLOCK_SIZE if QEMU_CUSTOM_NVME_ZNS
+
+config QEMU_NVME_ZONE_LOGICAL_BLOCK_SIZE
+	int
+	default 4096 if !QEMU_CUSTOM_NVME_ZNS
+	default QEMU_CUSTOM_NVME_ZONE_LOGICAL_BLOCK_SIZE if QEMU_CUSTOM_NVME_ZNS
+
+endif # EXTRA_STORAGE_SUPPORTS_ZNS
-- 
2.43.0


  parent reply	other threads:[~2024-03-08  0:04 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-03-08  0:03 [PATCH 0/8] guestfs: fixes and enhancements Luis Chamberlain
2024-03-08  0:03 ` [PATCH 1/8] guestfs: use macros for drives for aarch64 Luis Chamberlain
2024-03-08  0:03 ` [PATCH 2/8] bringup: disable ZNS and CXL for guestfs Luis Chamberlain
2024-03-08  0:03 ` Luis Chamberlain [this message]
2024-03-08  0:03 ` [PATCH 4/8] guestfs: move options to its own file Luis Chamberlain
2024-03-08  0:03 ` [PATCH 5/8] bringup: match default distro to user's distro Luis Chamberlain
2024-03-08  0:03 ` [PATCH 6/8] guestfs: remove explicit tap0 device name Luis Chamberlain
2024-03-08  0:03 ` [PATCH 7/8] destroy_guestfs.sh: remove known ssh key Luis Chamberlain
2024-03-08  0:03 ` [PATCH 8/8] guestfs: verify new line on ssh include directive Luis Chamberlain
2024-03-08  9:55 ` [PATCH 0/8] guestfs: fixes and enhancements Luis Chamberlain
2024-03-08 14:14   ` Chuck Lever III
2024-03-08 14:26     ` Chuck Lever III
2024-03-08 15:44       ` Luis Chamberlain
2024-03-08 15:46         ` Chuck Lever III
2024-03-08 15:56           ` Luis Chamberlain

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240308000400.1646823-4-mcgrof@kernel.org \
    --to=mcgrof@kernel.org \
    --cc=kdevops@lists.linux.dev \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox