From: Steffen Eiden <seiden@linux.ibm.com>
To: kvm@vger.kernel.org, kvmarm@lists.linux.dev,
linux-arm-kernel@lists.infradead.org,
linux-kernel@vger.kernel.org, linux-s390@vger.kernel.org
Cc: Andreas Grapentin <gra@linux.ibm.com>,
Arnd Bergmann <arnd@arndb.de>,
Catalin Marinas <catalin.marinas@arm.com>,
Christian Borntraeger <borntraeger@linux.ibm.com>,
Claudio Imbrenda <imbrenda@linux.ibm.com>,
David Hildenbrand <david@kernel.org>,
Gautam Gala <ggala@linux.ibm.com>,
Hendrik Brueckner <brueckner@linux.ibm.com>,
Janosch Frank <frankja@linux.ibm.com>,
Joey Gouly <joey.gouly@arm.com>, Marc Zyngier <maz@kernel.org>,
Nina Schoetterl-Glausch <oss@nina.schoetterlglausch.eu>,
Oliver Upton <oupton@kernel.org>,
Paolo Bonzini <pbonzini@redhat.com>,
Suzuki K Poulose <suzuki.poulose@arm.com>,
Ulrich Weigand <Ulrich.Weigand@de.ibm.com>,
Will Deacon <will@kernel.org>, Zenghui Yu <yuzenghui@huawei.com>
Subject: [PATCH v2 26/28] KVM: s390: arm64: Implement vCPU IOCTLs
Date: Tue, 28 Apr 2026 17:56:18 +0200 [thread overview]
Message-ID: <20260428155622.1361364-27-seiden@linux.ibm.com> (raw)
In-Reply-To: <20260428155622.1361364-1-seiden@linux.ibm.com>
Implement all vCPU IOCTLs.
Co-developed-by: Andreas Grapentin <gra@linux.ibm.com>
Signed-off-by: Andreas Grapentin <gra@linux.ibm.com>
Co-developed-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
arch/s390/kvm/arm64/arm.c | 354 ++++++++++++++++++++++++++++++++++++
arch/s390/kvm/arm64/guest.c | 71 +++++++-
arch/s390/kvm/arm64/guest.h | 5 +
arch/s390/kvm/arm64/reset.c | 45 +++++
arch/s390/kvm/arm64/reset.h | 11 ++
5 files changed, 484 insertions(+), 2 deletions(-)
create mode 100644 arch/s390/kvm/arm64/reset.c
create mode 100644 arch/s390/kvm/arm64/reset.h
diff --git a/arch/s390/kvm/arm64/arm.c b/arch/s390/kvm/arm64/arm.c
index 77bc4a8841df..b629bef84eda 100644
--- a/arch/s390/kvm/arm64/arm.c
+++ b/arch/s390/kvm/arm64/arm.c
@@ -8,9 +8,17 @@
#include <linux/kvm_types.h>
#include <linux/kvm_host.h>
+#include <asm/access-regs.h>
+#include <asm/kvm_emulate.h>
+#include <asm/sae.h>
+
+#include <kvm/arm64/handle_exit.h>
+#include <kvm/arm64/kvm_emulate.h>
+
#include <gmap.h>
#include "arm.h"
+#include "guest.h"
#include "reset.h"
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
@@ -168,6 +176,22 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
}
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ save_access_regs(&vcpu->arch.host_acrs[0]);
+ vcpu->cpu = cpu;
+
+ lasrm(&vcpu->arch.save_area);
+}
+
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ stiasrm(&vcpu->arch.save_area);
+
+ vcpu->cpu = -1;
+ restore_access_regs(&vcpu->arch.host_acrs[0]);
+}
+
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
@@ -191,12 +215,342 @@ unsigned long system_supported_vcpu_features(void)
return KVM_VCPU_VALID_FEATURES;
}
+bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+{
+ return vcpu_mode_priv(vcpu);
+}
+
+int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
+{
+ if (!kvm_vcpu_initialized(vcpu))
+ return -ENOEXEC;
+
+ if (!kvm_arm_vcpu_is_finalized(vcpu))
+ return -EPERM;
+
+ if (likely(READ_ONCE(vcpu->pid)))
+ return 0;
+
+ return 0;
+}
+
+/**
+ * check_vcpu_requests - check and handle pending vCPU requests
+ * @vcpu: the VCPU pointer
+ *
+ * Return: 1 if we should enter the guest
+ * 0 if we should exit to userspace
+ * < 0 if we should exit to userspace, where the return value indicates
+ * an error
+ */
+static int check_vcpu_requests(struct kvm_vcpu *vcpu)
+{
+ if (kvm_request_pending(vcpu)) {
+ if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
+ kvm_reset_vcpu(vcpu);
+ /*
+ * Clear IRQ_PENDING requests that were made to guarantee
+ * that a VCPU sees new virtual interrupts.
+ */
+ kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
+ }
+
+ return 1;
+}
+
+static int kvm_vcpu_initialize(struct kvm_vcpu *vcpu,
+ const struct kvm_vcpu_init *init)
+{
+ unsigned long features = init->features[0];
+ struct kvm *kvm = vcpu->kvm;
+ int ret = -EINVAL;
+
+ mutex_lock(&kvm->arch.config_lock);
+
+ if (test_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags) &&
+ kvm_vcpu_init_changed(vcpu, init))
+ goto out_unlock;
+
+ bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES);
+
+ kvm_reset_vcpu(vcpu);
+
+ set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags);
+ vcpu_set_flag(vcpu, VCPU_INITIALIZED);
+
+ ret = 0;
+out_unlock:
+ mutex_unlock(&kvm->arch.config_lock);
+ return ret;
+}
+
+static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
+ const struct kvm_vcpu_init *init)
+{
+ int ret;
+
+ if (init->target != KVM_ARM_TARGET_GENERIC_V8)
+ return -EINVAL;
+
+ ret = kvm_vcpu_init_check_features(vcpu, init);
+ if (ret)
+ return ret;
+
+ if (!kvm_vcpu_initialized(vcpu))
+ return kvm_vcpu_initialize(vcpu, init);
+
+ if (kvm_vcpu_init_changed(vcpu, init))
+ return -EINVAL;
+
+ kvm_reset_vcpu(vcpu);
+
+ return 0;
+}
+
+static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_init *init)
+{
+ struct kvm_sae_save_area *save_area = &vcpu->arch.save_area;
+ struct kvm_sae_block *sae_block = &vcpu->arch.sae_block;
+ int ret;
+
+ sae_block->save_area = virt_to_phys(save_area);
+ save_area->sdo = virt_to_phys(sae_block);
+
+ vcpu_load(vcpu);
+
+ ret = kvm_vcpu_set_target(vcpu, init);
+ if (ret)
+ goto out_put;
+
+ vcpu_reset_hcr(vcpu);
+
+ spin_lock(&vcpu->arch.mp_state_lock);
+ WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
+ spin_unlock(&vcpu->arch.mp_state_lock);
+
+ ret = 0;
+out_put:
+ vcpu_put(vcpu);
+ return ret;
+}
+
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
bool line_status)
{
return 0;
}
+static void adjust_pc(struct kvm_vcpu *vcpu)
+{
+ if (vcpu_get_flag(vcpu, INCREMENT_PC)) {
+ kvm_skip_instr(vcpu);
+ vcpu_clear_flag(vcpu, INCREMENT_PC);
+ }
+}
+
+static void arm_vcpu_run(struct kvm_vcpu *vcpu)
+{
+ struct kvm_sae_block *sae_block = &vcpu->arch.sae_block;
+
+ adjust_pc(vcpu);
+
+ local_irq_disable();
+ guest_enter_irqoff();
+ local_irq_enable();
+
+ sae_block->icptr = 0;
+
+ sae64a(sae_block);
+
+ local_irq_disable();
+ guest_exit_irqoff();
+ local_irq_enable();
+}
+
+/** kvm_arch_vcpu_ioctl_run() - run arm64 vCPU
+ *
+ * Execute arm64 guest instructions using SAE.
+ *
+ * Returns:
+ * 1 enter the guest (should not be observed by userspace)
+ * 0 exit to userspace
+ * < 0 exit to userspace, where the return value indicates n error
+ *
+ *
+ */
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *kvm_run = vcpu->run;
+ u8 icptr;
+ int ret;
+
+ if (kvm_run->exit_reason == KVM_EXIT_MMIO) {
+ ret = kvm_handle_mmio_return(vcpu);
+ if (ret <= 0)
+ return ret;
+ }
+
+ vcpu_load(vcpu);
+
+ if (!vcpu->wants_to_run) {
+ ret = -EINTR;
+ goto out;
+ }
+
+ kvm_sigset_activate(vcpu);
+
+ might_fault();
+
+ ret = 1;
+ do {
+ if (signal_pending(current)) {
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ ret = -EINTR;
+ continue;
+ }
+
+ if (need_resched())
+ schedule();
+
+ if (ret > 0)
+ ret = check_vcpu_requests(vcpu);
+
+ vcpu->arch.sae_block.icptr = 0;
+
+ arm_vcpu_run(vcpu);
+
+ icptr = vcpu->arch.sae_block.icptr;
+ switch (icptr) {
+ case SAE_ICPTR_SPURIOUS:
+ break;
+ case SAE_ICPTR_VALIDITY:
+ WARN_ONCE(true, "SAE: validity intercept. vir: 0x%04x",
+ vcpu->arch.sae_block.vir);
+ ret = -EINVAL;
+ break;
+ case SAE_ICPTR_SYNCHRONOUS_EXCEPTION:
+ ret = handle_trap_exceptions(vcpu);
+ break;
+ default:
+ WARN_ONCE(true, "SAE: unknown interception reason 0x%02x", icptr);
+ ret = -EINVAL;
+ }
+ } while (ret > 0);
+
+ kvm_sigset_deactivate(vcpu);
+out:
+ if (unlikely(vcpu_get_flag(vcpu, INCREMENT_PC)))
+ adjust_pc(vcpu);
+
+ vcpu_put(vcpu);
+
+ return ret;
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ struct kvm_vcpu *vcpu = filp->private_data;
+ void __user *argp = (void __user *)arg;
+ struct kvm_device_attr attr;
+ int ret;
+
+ switch (ioctl) {
+ case KVM_ARM_VCPU_INIT: {
+ struct kvm_vcpu_init init;
+
+ ret = -EFAULT;
+ if (copy_from_user(&init, argp, sizeof(init)))
+ break;
+
+ ret = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
+ break;
+ }
+ case KVM_SET_ONE_REG:
+ case KVM_GET_ONE_REG: {
+ struct kvm_one_reg reg;
+
+ ret = -ENOEXEC;
+ if (unlikely(!kvm_vcpu_initialized(vcpu)))
+ break;
+
+ ret = -EFAULT;
+ if (copy_from_user(®, argp, sizeof(reg)))
+ break;
+
+ if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
+ kvm_reset_vcpu(vcpu);
+
+ if (ioctl == KVM_SET_ONE_REG)
+ ret = kvm_arm_set_reg(vcpu, ®);
+ else
+ ret = kvm_arm_get_reg(vcpu, ®);
+ break;
+ }
+ case KVM_GET_REG_LIST: {
+ struct kvm_reg_list __user *user_list = argp;
+ struct kvm_reg_list reg_list;
+ unsigned int n;
+
+ ret = -ENOEXEC;
+ if (unlikely(!kvm_vcpu_initialized(vcpu)))
+ break;
+ ret = -EPERM;
+ if (!kvm_arm_vcpu_is_finalized(vcpu))
+ break;
+ ret = -EFAULT;
+ if (copy_from_user(®_list, user_list, sizeof(reg_list)))
+ break;
+ n = reg_list.n;
+ reg_list.n = kvm_arm_num_regs(vcpu);
+ if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
+ break;
+ ret = -E2BIG;
+ if (n < reg_list.n)
+ break;
+ ret = kvm_arm_copy_reg_indices(vcpu, user_list->reg);
+ break;
+ }
+ case KVM_ARM_VCPU_FINALIZE: {
+ int what;
+
+ if (!kvm_vcpu_initialized(vcpu))
+ return -ENOEXEC;
+
+ if (get_user(what, (const int __user *)argp))
+ return -EFAULT;
+
+ ret = kvm_arm_vcpu_finalize(vcpu, what);
+ break;
+ }
+ case KVM_SET_DEVICE_ATTR: {
+ ret = -EFAULT;
+ if (copy_from_user(&attr, argp, sizeof(attr)))
+ break;
+ ret = kvm_arm_vcpu_set_attr(vcpu, &attr);
+ break;
+ }
+ case KVM_GET_DEVICE_ATTR: {
+ ret = -EFAULT;
+ if (copy_from_user(&attr, argp, sizeof(attr)))
+ break;
+ ret = kvm_arm_vcpu_get_attr(vcpu, &attr);
+ break;
+ }
+ case KVM_HAS_DEVICE_ATTR: {
+ ret = -EFAULT;
+ if (copy_from_user(&attr, argp, sizeof(attr)))
+ break;
+ ret = kvm_arm_vcpu_has_attr(vcpu, &attr);
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log)
{
diff --git a/arch/s390/kvm/arm64/guest.c b/arch/s390/kvm/arm64/guest.c
index 00886755accf..893d48037292 100644
--- a/arch/s390/kvm/arm64/guest.c
+++ b/arch/s390/kvm/arm64/guest.c
@@ -4,7 +4,7 @@
#include "guest.h"
-const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+const struct kvm_stats_desc kvm_vm_stats_desc[] = {
KVM_GENERIC_VM_STATS()
};
@@ -17,7 +17,7 @@ const struct kvm_stats_header kvm_vm_stats_header = {
sizeof(kvm_vm_stats_desc),
};
-const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+const struct kvm_stats_desc kvm_vcpu_stats_desc[] = {
KVM_GENERIC_VCPU_STATS(),
/* ARM64 stats */
STATS_DESC_COUNTER(VCPU, hvc_exit_stat),
@@ -50,6 +50,73 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
return num_core_regs(vcpu);
}
+int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ /* We currently use nothing arch-specific in upper 32 bits */
+ if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
+ return -EINVAL;
+
+ switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
+ case KVM_REG_ARM_CORE:
+ return get_core_reg(vcpu, reg);
+ default:
+ return -EINVAL;
+ }
+}
+
+int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ /* We currently use nothing arch-specific in upper 32 bits */
+ if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
+ return -EINVAL;
+
+ switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
+ case KVM_REG_ARM_CORE:
+ return set_core_reg(vcpu, reg);
+ default:
+ return -EINVAL;
+ }
+}
+
+int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+{
+ int ret;
+
+ switch (attr->group) {
+ default:
+ ret = -ENXIO;
+ break;
+ }
+
+ return ret;
+}
+
+int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+{
+ int ret;
+
+ switch (attr->group) {
+ default:
+ ret = -ENXIO;
+ break;
+ }
+
+ return ret;
+}
+
+int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+{
+ int ret;
+
+ switch (attr->group) {
+ default:
+ ret = -ENXIO;
+ break;
+ }
+
+ return ret;
+}
+
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
return -EINVAL;
diff --git a/arch/s390/kvm/arm64/guest.h b/arch/s390/kvm/arm64/guest.h
index db635d513c2c..847489fb81be 100644
--- a/arch/s390/kvm/arm64/guest.h
+++ b/arch/s390/kvm/arm64/guest.h
@@ -6,5 +6,10 @@
#include <kvm/arm64/guest.h>
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
+int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
+int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
+int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
#endif /* KVM_ARM_GUEST_H */
diff --git a/arch/s390/kvm/arm64/reset.c b/arch/s390/kvm/arm64/reset.c
new file mode 100644
index 000000000000..9a12d5f19f6a
--- /dev/null
+++ b/arch/s390/kvm/arm64/reset.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include <kvm/arm64/reset.h>
+
+#include "reset.h"
+
+bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
+{
+ return true;
+}
+
+void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_reset_state reset_state;
+
+ spin_lock(&vcpu->arch.mp_state_lock);
+ reset_state = vcpu->arch.reset_state;
+ vcpu->arch.reset_state.reset = false;
+ spin_unlock(&vcpu->arch.mp_state_lock);
+
+ /*
+ * disable preemption around the vcpu reset as we might otherwise race with
+ * preempt notifiers which call stiasrm/lasrm from put/load
+ */
+ preempt_disable();
+
+ kvm_reset_vcpu_core_regs(vcpu);
+
+ if (reset_state.reset) {
+ *vcpu_pc(vcpu) = reset_state.pc;
+ vcpu_clear_flag(vcpu, PENDING_EXCEPTION);
+ vcpu_clear_flag(vcpu, EXCEPT_MASK);
+ vcpu_clear_flag(vcpu, INCREMENT_PC);
+ vcpu_set_reg(vcpu, 0, reset_state.r0);
+ }
+
+ preempt_enable();
+}
+
+int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
+{
+ return 0;
+}
diff --git a/arch/s390/kvm/arm64/reset.h b/arch/s390/kvm/arm64/reset.h
new file mode 100644
index 000000000000..a5c5304e47bc
--- /dev/null
+++ b/arch/s390/kvm/arm64/reset.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef KVM_ARM_RESET_H
+#define KVM_ARM_RESET_H
+
+#include <linux/kvm_host.h>
+
+bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
+void kvm_reset_vcpu(struct kvm_vcpu *vcpu);
+int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
+
+#endif /* KVM_ARM_RESET_H */
--
2.51.0
next prev parent reply other threads:[~2026-04-28 15:57 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-28 15:55 [PATCH v2 00/28] KVM: s390: Introduce arm64 KVM Steffen Eiden
2026-04-28 15:55 ` [PATCH v2 01/28] VFIO: take reference to the KVM module Steffen Eiden
2026-04-28 15:55 ` [PATCH v2 02/28] KVM, vfio: remove symbol_get(kvm_get_kvm_safe) from vfio Steffen Eiden
2026-04-28 15:55 ` [PATCH v2 03/28] KVM, vfio: remove symbol_get(kvm_put_kvm) " Steffen Eiden
2026-04-28 15:55 ` [PATCH v2 04/28] arm64: Provide arm64 UAPI for other host architectures Steffen Eiden
2026-04-29 8:04 ` Steffen Eiden
2026-04-28 15:55 ` [PATCH v2 05/28] arm64: Extract sysreg definitions Steffen Eiden
2026-04-28 15:55 ` [PATCH v2 06/28] arm64: Provide arm64 API for non-native architectures Steffen Eiden
2026-04-28 15:55 ` [PATCH v2 07/28] KVM: arm64: Provide arm64 KVM " Steffen Eiden
2026-04-28 15:56 ` [PATCH v2 08/28] arm64: Extract pstate definitions from ptrace Steffen Eiden
2026-04-28 15:56 ` [PATCH v2 09/28] KVM: arm64: Share kvm_emulate definitions Steffen Eiden
2026-04-28 15:56 ` [PATCH v2 10/28] KVM: arm64: Make some arm64 KVM code shareable Steffen Eiden
2026-04-28 15:56 ` [PATCH v2 11/28] KVM: arm64: Access elements of vcpu_gp_regs individually Steffen Eiden
2026-04-28 15:56 ` [PATCH v2 12/28] KVM: arm64: Share reset general register code Steffen Eiden
2026-04-28 15:56 ` [PATCH v2 13/28] KVM: arm64: Extract & share ipa size shift calculation Steffen Eiden
2026-04-28 15:56 ` [PATCH v2 14/28] MAINTAINERS: Add Steffen as reviewer for KVM/arm64 Steffen Eiden
2026-04-28 15:56 ` [PATCH v2 15/28] KVM: s390: Move s390 kvm code into a subdirectory Steffen Eiden
2026-04-28 15:56 ` [PATCH v2 16/28] KVM: S390: Refactor gmap Steffen Eiden
2026-04-28 15:56 ` [PATCH v2 17/28] KVM: Make device name configurable Steffen Eiden
2026-04-28 15:56 ` [PATCH v2 18/28] KVM: Remove KVM_MMIO as config option Steffen Eiden
2026-04-28 15:56 ` [PATCH v2 19/28] KVM: s390: Prepare kvm-s390 for a second kvm module Steffen Eiden
2026-04-28 15:56 ` [PATCH v2 20/28] s390: Introduce Start Arm Execution instruction Steffen Eiden
2026-04-28 15:56 ` [PATCH v2 21/28] KVM: s390: arm64: Introduce host definitions Steffen Eiden
2026-04-28 15:56 ` [PATCH v2 22/28] s390/hwcaps: Report SAE support as hwcap Steffen Eiden
2026-04-28 15:56 ` [PATCH v2 23/28] KVM: s390: Add basic arm64 kvm module Steffen Eiden
2026-04-28 15:56 ` [PATCH v2 24/28] KVM: s390: arm64: Implement required functions Steffen Eiden
2026-04-28 15:56 ` [PATCH v2 25/28] KVM: s390: arm64: Implement vm/vcpu create destroy Steffen Eiden
2026-04-28 15:56 ` Steffen Eiden [this message]
2026-04-28 15:56 ` [PATCH v2 27/28] KVM: s390: arm64: Implement basic page fault handler Steffen Eiden
2026-04-28 15:56 ` [PATCH v2 28/28] KVM: s390: arm64: Enable KVM_ARM64 config and Kbuild Steffen Eiden
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260428155622.1361364-27-seiden@linux.ibm.com \
--to=seiden@linux.ibm.com \
--cc=Ulrich.Weigand@de.ibm.com \
--cc=arnd@arndb.de \
--cc=borntraeger@linux.ibm.com \
--cc=brueckner@linux.ibm.com \
--cc=catalin.marinas@arm.com \
--cc=david@kernel.org \
--cc=frankja@linux.ibm.com \
--cc=ggala@linux.ibm.com \
--cc=gra@linux.ibm.com \
--cc=imbrenda@linux.ibm.com \
--cc=joey.gouly@arm.com \
--cc=kvm@vger.kernel.org \
--cc=kvmarm@lists.linux.dev \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-s390@vger.kernel.org \
--cc=maz@kernel.org \
--cc=oss@nina.schoetterlglausch.eu \
--cc=oupton@kernel.org \
--cc=pbonzini@redhat.com \
--cc=suzuki.poulose@arm.com \
--cc=will@kernel.org \
--cc=yuzenghui@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.