From: David Matlack <dmatlack@google.com>
To: Alex Williamson <alex.williamson@redhat.com>
Cc: Aaron Lewis <aaronlewis@google.com>,
Adhemerval Zanella <adhemerval.zanella@linaro.org>,
Adithya Jayachandran <ajayachandra@nvidia.com>,
Andrew Jones <ajones@ventanamicro.com>,
Ard Biesheuvel <ardb@kernel.org>,
Arnaldo Carvalho de Melo <acme@redhat.com>,
Bibo Mao <maobibo@loongson.cn>,
Claudio Imbrenda <imbrenda@linux.ibm.com>,
Dan Williams <dan.j.williams@intel.com>,
Dave Jiang <dave.jiang@intel.com>,
David Matlack <dmatlack@google.com>,
dmaengine@vger.kernel.org, Huacai Chen <chenhuacai@kernel.org>,
James Houghton <jthoughton@google.com>,
Jason Gunthorpe <jgg@nvidia.com>,
Joel Granados <joel.granados@kernel.org>,
Josh Hilke <jrhilke@google.com>,
Kevin Tian <kevin.tian@intel.com>,
kvm@vger.kernel.org, linux-kselftest@vger.kernel.org,
"Mike Rapoport (Microsoft)" <rppt@kernel.org>,
Paolo Bonzini <pbonzini@redhat.com>,
Pasha Tatashin <pasha.tatashin@soleen.com>,
"Pratik R. Sampat" <prsampat@amd.com>,
Saeed Mahameed <saeedm@nvidia.com>,
Sean Christopherson <seanjc@google.com>,
Shuah Khan <shuah@kernel.org>,
Vinicius Costa Gomes <vinicius.gomes@intel.com>,
Vipin Sharma <vipinsh@google.com>,
Wei Yang <richard.weiyang@gmail.com>,
"Yury Norov [NVIDIA]" <yury.norov@gmail.com>
Subject: [PATCH 10/33] tools headers: Import asm-generic MMIO helpers
Date: Fri, 20 Jun 2025 23:20:08 +0000 [thread overview]
Message-ID: <20250620232031.2705638-11-dmatlack@google.com> (raw)
In-Reply-To: <20250620232031.2705638-1-dmatlack@google.com>
Import the asm-generic MMIO helper functions from the kernel headers
into tools/include/. The top-level include is <linux/io.h> which then
includes the arch-specific <asm/io.h>, which then includes
<asm-generic/io.h>. This layout is chosen to match the kernel header
layout and to appease checkpatch.pl (which warns against including
<asm/io.h> or <asm-generic/io.h> directly).
Changes made when importing:
- Add missing includes at the top.
- Stub out mmiowb_set_pending().
- Stub out _THIS_IP_.
- Stub out log_*_mmio() calls.
- Drop the CONFIG_64BIT checks, since tools/include/linux/types.h
always defines u64.
Signed-off-by: David Matlack <dmatlack@google.com>
---
tools/include/asm-generic/io.h | 482 +++++++++++++++++++++++++++++++++
tools/include/asm/io.h | 7 +
tools/include/linux/io.h | 4 +-
3 files changed, 492 insertions(+), 1 deletion(-)
create mode 100644 tools/include/asm-generic/io.h
create mode 100644 tools/include/asm/io.h
diff --git a/tools/include/asm-generic/io.h b/tools/include/asm-generic/io.h
new file mode 100644
index 000000000000..e5a0b07ad452
--- /dev/null
+++ b/tools/include/asm-generic/io.h
@@ -0,0 +1,482 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TOOLS_ASM_GENERIC_IO_H
+#define _TOOLS_ASM_GENERIC_IO_H
+
+#include <asm/barrier.h>
+#include <asm/byteorder.h>
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#ifndef mmiowb_set_pending
+#define mmiowb_set_pending() do { } while (0)
+#endif
+
+#ifndef __io_br
+#define __io_br() barrier()
+#endif
+
+/* prevent prefetching of coherent DMA data ahead of a dma-complete */
+#ifndef __io_ar
+#ifdef rmb
+#define __io_ar(v) rmb()
+#else
+#define __io_ar(v) barrier()
+#endif
+#endif
+
+/* flush writes to coherent DMA data before possibly triggering a DMA read */
+#ifndef __io_bw
+#ifdef wmb
+#define __io_bw() wmb()
+#else
+#define __io_bw() barrier()
+#endif
+#endif
+
+/* serialize device access against a spin_unlock, usually handled there. */
+#ifndef __io_aw
+#define __io_aw() mmiowb_set_pending()
+#endif
+
+#ifndef __io_pbw
+#define __io_pbw() __io_bw()
+#endif
+
+#ifndef __io_paw
+#define __io_paw() __io_aw()
+#endif
+
+#ifndef __io_pbr
+#define __io_pbr() __io_br()
+#endif
+
+#ifndef __io_par
+#define __io_par(v) __io_ar(v)
+#endif
+
+#ifndef _THIS_IP_
+#define _THIS_IP_ 0
+#endif
+
+static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0) {}
+static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0) {}
+static inline void log_read_mmio(u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0) {}
+static inline void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0) {}
+
+/*
+ * __raw_{read,write}{b,w,l,q}() access memory in native endianness.
+ *
+ * On some architectures memory mapped IO needs to be accessed differently.
+ * On the simple architectures, we just read/write the memory location
+ * directly.
+ */
+
+#ifndef __raw_readb
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ return *(const volatile u8 __force *)addr;
+}
+#endif
+
+#ifndef __raw_readw
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ return *(const volatile u16 __force *)addr;
+}
+#endif
+
+#ifndef __raw_readl
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ return *(const volatile u32 __force *)addr;
+}
+#endif
+
+#ifndef __raw_readq
+#define __raw_readq __raw_readq
+static inline u64 __raw_readq(const volatile void __iomem *addr)
+{
+ return *(const volatile u64 __force *)addr;
+}
+#endif
+
+#ifndef __raw_writeb
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 value, volatile void __iomem *addr)
+{
+ *(volatile u8 __force *)addr = value;
+}
+#endif
+
+#ifndef __raw_writew
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 value, volatile void __iomem *addr)
+{
+ *(volatile u16 __force *)addr = value;
+}
+#endif
+
+#ifndef __raw_writel
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 value, volatile void __iomem *addr)
+{
+ *(volatile u32 __force *)addr = value;
+}
+#endif
+
+#ifndef __raw_writeq
+#define __raw_writeq __raw_writeq
+static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
+{
+ *(volatile u64 __force *)addr = value;
+}
+#endif
+
+/*
+ * {read,write}{b,w,l,q}() access little endian memory and return result in
+ * native endianness.
+ */
+
+#ifndef readb
+#define readb readb
+static inline u8 readb(const volatile void __iomem *addr)
+{
+ u8 val;
+
+ log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
+ __io_br();
+ val = __raw_readb(addr);
+ __io_ar(val);
+ log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
+ return val;
+}
+#endif
+
+#ifndef readw
+#define readw readw
+static inline u16 readw(const volatile void __iomem *addr)
+{
+ u16 val;
+
+ log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
+ __io_br();
+ val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
+ __io_ar(val);
+ log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
+ return val;
+}
+#endif
+
+#ifndef readl
+#define readl readl
+static inline u32 readl(const volatile void __iomem *addr)
+{
+ u32 val;
+
+ log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
+ __io_br();
+ val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
+ __io_ar(val);
+ log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
+ return val;
+}
+#endif
+
+#ifndef readq
+#define readq readq
+static inline u64 readq(const volatile void __iomem *addr)
+{
+ u64 val;
+
+ log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
+ __io_br();
+ val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
+ __io_ar(val);
+ log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
+ return val;
+}
+#endif
+
+#ifndef writeb
+#define writeb writeb
+static inline void writeb(u8 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ __io_bw();
+ __raw_writeb(value, addr);
+ __io_aw();
+ log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+}
+#endif
+
+#ifndef writew
+#define writew writew
+static inline void writew(u16 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ __io_bw();
+ __raw_writew((u16 __force)cpu_to_le16(value), addr);
+ __io_aw();
+ log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+}
+#endif
+
+#ifndef writel
+#define writel writel
+static inline void writel(u32 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ __io_bw();
+ __raw_writel((u32 __force)__cpu_to_le32(value), addr);
+ __io_aw();
+ log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+}
+#endif
+
+#ifndef writeq
+#define writeq writeq
+static inline void writeq(u64 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ __io_bw();
+ __raw_writeq((u64 __force)__cpu_to_le64(value), addr);
+ __io_aw();
+ log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+}
+#endif
+
+/*
+ * {read,write}{b,w,l,q}_relaxed() are like the regular version, but
+ * are not guaranteed to provide ordering against spinlocks or memory
+ * accesses.
+ */
+#ifndef readb_relaxed
+#define readb_relaxed readb_relaxed
+static inline u8 readb_relaxed(const volatile void __iomem *addr)
+{
+ u8 val;
+
+ log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
+ val = __raw_readb(addr);
+ log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
+ return val;
+}
+#endif
+
+#ifndef readw_relaxed
+#define readw_relaxed readw_relaxed
+static inline u16 readw_relaxed(const volatile void __iomem *addr)
+{
+ u16 val;
+
+ log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
+ val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
+ log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
+ return val;
+}
+#endif
+
+#ifndef readl_relaxed
+#define readl_relaxed readl_relaxed
+static inline u32 readl_relaxed(const volatile void __iomem *addr)
+{
+ u32 val;
+
+ log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
+ val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
+ log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
+ return val;
+}
+#endif
+
+#if defined(readq) && !defined(readq_relaxed)
+#define readq_relaxed readq_relaxed
+static inline u64 readq_relaxed(const volatile void __iomem *addr)
+{
+ u64 val;
+
+ log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
+ val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
+ log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
+ return val;
+}
+#endif
+
+#ifndef writeb_relaxed
+#define writeb_relaxed writeb_relaxed
+static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ __raw_writeb(value, addr);
+ log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+}
+#endif
+
+#ifndef writew_relaxed
+#define writew_relaxed writew_relaxed
+static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ __raw_writew((u16 __force)cpu_to_le16(value), addr);
+ log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+}
+#endif
+
+#ifndef writel_relaxed
+#define writel_relaxed writel_relaxed
+static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ __raw_writel((u32 __force)__cpu_to_le32(value), addr);
+ log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+}
+#endif
+
+#if defined(writeq) && !defined(writeq_relaxed)
+#define writeq_relaxed writeq_relaxed
+static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ __raw_writeq((u64 __force)__cpu_to_le64(value), addr);
+ log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+}
+#endif
+
+/*
+ * {read,write}s{b,w,l,q}() repeatedly access the same memory address in
+ * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times).
+ */
+#ifndef readsb
+#define readsb readsb
+static inline void readsb(const volatile void __iomem *addr, void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ u8 *buf = buffer;
+
+ do {
+ u8 x = __raw_readb(addr);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+#endif
+
+#ifndef readsw
+#define readsw readsw
+static inline void readsw(const volatile void __iomem *addr, void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ u16 *buf = buffer;
+
+ do {
+ u16 x = __raw_readw(addr);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+#endif
+
+#ifndef readsl
+#define readsl readsl
+static inline void readsl(const volatile void __iomem *addr, void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ u32 *buf = buffer;
+
+ do {
+ u32 x = __raw_readl(addr);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+#endif
+
+#ifndef readsq
+#define readsq readsq
+static inline void readsq(const volatile void __iomem *addr, void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ u64 *buf = buffer;
+
+ do {
+ u64 x = __raw_readq(addr);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+#endif
+
+#ifndef writesb
+#define writesb writesb
+static inline void writesb(volatile void __iomem *addr, const void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ const u8 *buf = buffer;
+
+ do {
+ __raw_writeb(*buf++, addr);
+ } while (--count);
+ }
+}
+#endif
+
+#ifndef writesw
+#define writesw writesw
+static inline void writesw(volatile void __iomem *addr, const void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ const u16 *buf = buffer;
+
+ do {
+ __raw_writew(*buf++, addr);
+ } while (--count);
+ }
+}
+#endif
+
+#ifndef writesl
+#define writesl writesl
+static inline void writesl(volatile void __iomem *addr, const void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ const u32 *buf = buffer;
+
+ do {
+ __raw_writel(*buf++, addr);
+ } while (--count);
+ }
+}
+#endif
+
+#ifndef writesq
+#define writesq writesq
+static inline void writesq(volatile void __iomem *addr, const void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ const u64 *buf = buffer;
+
+ do {
+ __raw_writeq(*buf++, addr);
+ } while (--count);
+ }
+}
+#endif
+
+#endif /* _TOOLS_ASM_GENERIC_IO_H */
diff --git a/tools/include/asm/io.h b/tools/include/asm/io.h
new file mode 100644
index 000000000000..9ae219b12604
--- /dev/null
+++ b/tools/include/asm/io.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TOOLS_ASM_IO_H
+#define _TOOLS_ASM_IO_H
+
+#include <asm-generic/io.h>
+
+#endif /* _TOOLS_ASM_IO_H */
diff --git a/tools/include/linux/io.h b/tools/include/linux/io.h
index e129871fe661..4b94b84160b8 100644
--- a/tools/include/linux/io.h
+++ b/tools/include/linux/io.h
@@ -2,4 +2,6 @@
#ifndef _TOOLS_IO_H
#define _TOOLS_IO_H
-#endif
+#include <asm/io.h>
+
+#endif /* _TOOLS_IO_H */
--
2.50.0.rc2.701.gf1e915cc24-goog
next prev parent reply other threads:[~2025-06-20 23:21 UTC|newest]
Thread overview: 61+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-20 23:19 [PATCH 00/33] vfio: Introduce selftests for VFIO David Matlack
2025-06-20 23:19 ` [PATCH 01/33] selftests: Create tools/testing/selftests/vfio David Matlack
2025-06-20 23:20 ` [PATCH 02/33] vfio: selftests: Add a helper library for VFIO selftests David Matlack
2025-06-20 23:20 ` [PATCH 03/33] vfio: selftests: Introduce vfio_pci_device_test David Matlack
2025-06-26 11:27 ` Sairaj Kodilkar
2025-06-26 11:44 ` Sairaj Kodilkar
2025-06-26 16:29 ` David Matlack
2025-06-27 4:57 ` Sairaj Kodilkar
2025-06-27 23:08 ` David Matlack
2025-06-30 6:18 ` Sairaj Kodilkar
2025-06-20 23:20 ` [PATCH 04/33] vfio: selftests: Test basic VFIO and IOMMUFD integration David Matlack
2025-06-20 23:20 ` [PATCH 05/33] vfio: selftests: Move vfio dma mapping test to their own file David Matlack
2025-06-20 23:20 ` [PATCH 06/33] vfio: selftests: Add test to reset vfio device David Matlack
2025-06-20 23:20 ` [PATCH 07/33] vfio: selftests: Add DMA mapping tests for 2M and 1G HugeTLB David Matlack
2025-06-20 23:20 ` [PATCH 08/33] vfio: selftests: Validate 2M/1G HugeTLB are mapped as 2M/1G in IOMMU David Matlack
2025-06-20 23:20 ` [PATCH 09/33] tools headers: Add stub definition for __iomem David Matlack
2025-06-20 23:20 ` David Matlack [this message]
2025-06-20 23:20 ` [PATCH 11/33] tools headers: Import x86 MMIO helper overrides David Matlack
2025-06-20 23:20 ` [PATCH 12/33] tools headers: Import iosubmit_cmds512() David Matlack
2025-08-18 23:25 ` Vinicius Costa Gomes
2025-08-18 23:46 ` David Matlack
2025-06-20 23:20 ` [PATCH 13/33] tools headers: Add symlink to linux/pci_ids.h David Matlack
2025-06-20 23:20 ` [PATCH 14/33] vfio: selftests: Keep track of DMA regions mapped into the device David Matlack
2025-06-20 23:20 ` [PATCH 15/33] vfio: selftests: Enable asserting MSI eventfds not firing David Matlack
2025-06-20 23:20 ` [PATCH 16/33] vfio: selftests: Add a helper for matching vendor+device IDs David Matlack
2025-06-20 23:20 ` [PATCH 17/33] vfio: selftests: Add driver framework David Matlack
2025-06-20 23:20 ` [PATCH 18/33] vfio: sefltests: Add vfio_pci_driver_test David Matlack
2025-06-20 23:20 ` [PATCH 19/33] dmaengine: ioat: Move system_has_dca_enabled() to dma.h David Matlack
2025-08-19 22:07 ` Dave Jiang
2025-06-20 23:20 ` [PATCH 20/33] vfio: selftests: Add driver for Intel CBDMA David Matlack
2025-08-19 22:07 ` Dave Jiang
2025-06-20 23:20 ` [PATCH 21/33] dmaengine: idxd: Allow registers.h to be included from tools/ David Matlack
2025-08-18 23:26 ` Vinicius Costa Gomes
2025-06-20 23:20 ` [PATCH 22/33] vfio: selftests: Add driver for Intel DSA David Matlack
2025-08-18 23:41 ` Vinicius Costa Gomes
2025-08-19 16:31 ` David Matlack
2025-08-19 18:52 ` Vinicius Costa Gomes
2025-06-20 23:20 ` [PATCH 23/33] vfio: selftests: Move helper to get cdev path to libvfio David Matlack
2025-06-20 23:20 ` [PATCH 24/33] vfio: selftests: Encapsulate IOMMU mode David Matlack
2025-06-20 23:20 ` [PATCH 25/33] vfio: selftests: Replicate tests across all iommu_modes David Matlack
2025-06-20 23:20 ` [PATCH 26/33] vfio: selftests: Add vfio_type1v2_mode David Matlack
2025-06-20 23:20 ` [PATCH 27/33] vfio: selftests: Add iommufd_compat_type1{,v2} modes David Matlack
2025-06-20 23:20 ` [PATCH 28/33] vfio: selftests: Add iommufd mode David Matlack
2025-06-20 23:20 ` [PATCH 29/33] vfio: selftests: Make iommufd the default iommu_mode David Matlack
2025-06-20 23:20 ` [PATCH 30/33] vfio: selftests: Add a script to help with running VFIO selftests David Matlack
2025-06-20 23:20 ` [PATCH 31/33] KVM: selftests: Build and link sefltests/vfio/lib into KVM selftests David Matlack
2025-06-20 23:20 ` [PATCH 32/33] KVM: selftests: Test sending a vfio-pci device IRQ to a VM David Matlack
2025-06-20 23:20 ` [PATCH 33/33] KVM: selftests: Add -d option to vfio_pci_device_irq_test for device-sent MSIs David Matlack
2025-07-25 16:47 ` [PATCH 00/33] vfio: Introduce selftests for VFIO David Matlack
2025-07-28 16:27 ` Alex Williamson
2025-07-29 22:26 ` Jason Gunthorpe
2025-07-31 20:55 ` David Matlack
2025-08-18 18:59 ` David Matlack
2025-08-18 19:37 ` Alex Williamson
2025-08-18 20:33 ` David Matlack
2025-08-21 20:10 ` Alex Williamson
2025-08-21 21:03 ` David Matlack
2025-08-19 14:50 ` Shuah Khan
2025-08-05 15:08 ` Joel Granados
2025-08-12 15:04 ` David Matlack
2025-08-19 17:48 ` David Matlack
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250620232031.2705638-11-dmatlack@google.com \
--to=dmatlack@google.com \
--cc=aaronlewis@google.com \
--cc=acme@redhat.com \
--cc=adhemerval.zanella@linaro.org \
--cc=ajayachandra@nvidia.com \
--cc=ajones@ventanamicro.com \
--cc=alex.williamson@redhat.com \
--cc=ardb@kernel.org \
--cc=chenhuacai@kernel.org \
--cc=dan.j.williams@intel.com \
--cc=dave.jiang@intel.com \
--cc=dmaengine@vger.kernel.org \
--cc=imbrenda@linux.ibm.com \
--cc=jgg@nvidia.com \
--cc=joel.granados@kernel.org \
--cc=jrhilke@google.com \
--cc=jthoughton@google.com \
--cc=kevin.tian@intel.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=maobibo@loongson.cn \
--cc=pasha.tatashin@soleen.com \
--cc=pbonzini@redhat.com \
--cc=prsampat@amd.com \
--cc=richard.weiyang@gmail.com \
--cc=rppt@kernel.org \
--cc=saeedm@nvidia.com \
--cc=seanjc@google.com \
--cc=shuah@kernel.org \
--cc=vinicius.gomes@intel.com \
--cc=vipinsh@google.com \
--cc=yury.norov@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).