* [PATCH v1 01/27] VFIO: take reference to the KVM module
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
@ 2026-04-02 4:20 ` Steffen Eiden
2026-04-02 9:18 ` Paolo Bonzini
2026-04-02 4:20 ` [PATCH v1 02/27] KVM, vfio: remove symbol_get(kvm_get_kvm_safe) from vfio Steffen Eiden
` (26 subsequent siblings)
27 siblings, 1 reply; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:20 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
From: Paolo Bonzini <pbonzini@redhat.com>
VFIO is implicitly taking a reference to the KVM module between
vfio_device_get_kvm_safe and vfio_device_put_kvm, thanks to
symbol_get and symbol_put.
In preparation for removing symbol_get and symbol_put themselves
from VFIO, actually store a pointer to the KVM module and use
module_get()/module_put() to keep KVM alive.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
drivers/vfio/device_cdev.c | 2 +-
drivers/vfio/group.c | 5 +++--
drivers/vfio/vfio.h | 15 ++++++++++-----
drivers/vfio/vfio_main.c | 18 +++++++++++++-----
include/linux/vfio.h | 3 ++-
virt/kvm/vfio.c | 14 ++++++++------
6 files changed, 37 insertions(+), 20 deletions(-)
diff --git a/drivers/vfio/device_cdev.c b/drivers/vfio/device_cdev.c
index 8ceca24ac136..a67d7215c239 100644
--- a/drivers/vfio/device_cdev.c
+++ b/drivers/vfio/device_cdev.c
@@ -56,7 +56,7 @@ int vfio_device_fops_cdev_open(struct inode *inode, struct file *filep)
static void vfio_df_get_kvm_safe(struct vfio_device_file *df)
{
spin_lock(&df->kvm_ref_lock);
- vfio_device_get_kvm_safe(df->device, df->kvm);
+ vfio_device_get_kvm_safe(df->device, df->kvm, df->kvm_module);
spin_unlock(&df->kvm_ref_lock);
}
diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c
index 4f15016d2a5f..7d28f45fefaa 100644
--- a/drivers/vfio/group.c
+++ b/drivers/vfio/group.c
@@ -158,7 +158,7 @@ static int vfio_group_ioctl_set_container(struct vfio_group *group,
static void vfio_device_group_get_kvm_safe(struct vfio_device *device)
{
spin_lock(&device->group->kvm_ref_lock);
- vfio_device_get_kvm_safe(device, device->group->kvm);
+ vfio_device_get_kvm_safe(device, device->group->kvm, device->group->kvm_module);
spin_unlock(&device->group->kvm_ref_lock);
}
@@ -858,10 +858,11 @@ bool vfio_group_enforced_coherent(struct vfio_group *group)
return ret;
}
-void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
+void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm, struct module *kvm_module)
{
spin_lock(&group->kvm_ref_lock);
group->kvm = kvm;
+ group->kvm_module = kvm_module;
spin_unlock(&group->kvm_ref_lock);
}
diff --git a/drivers/vfio/vfio.h b/drivers/vfio/vfio.h
index 50128da18bca..a0c38f89b30a 100644
--- a/drivers/vfio/vfio.h
+++ b/drivers/vfio/vfio.h
@@ -22,8 +22,9 @@ struct vfio_device_file {
u8 access_granted;
u32 devid; /* only valid when iommufd is valid */
- spinlock_t kvm_ref_lock; /* protect kvm field */
+ spinlock_t kvm_ref_lock; /* protect kvm and kvm_module fields */
struct kvm *kvm;
+ struct module *kvm_module;
struct iommufd_ctx *iommufd; /* protected by struct vfio_device_set::lock */
};
@@ -89,6 +90,7 @@ struct vfio_group {
enum vfio_group_type type;
struct mutex group_lock;
struct kvm *kvm;
+ struct module *kvm_module;
struct file *opened_file;
struct blocking_notifier_head notifier;
struct iommufd_ctx *iommufd;
@@ -108,7 +110,7 @@ void vfio_device_group_unuse_iommu(struct vfio_device *device);
void vfio_df_group_close(struct vfio_device_file *df);
struct vfio_group *vfio_group_from_file(struct file *file);
bool vfio_group_enforced_coherent(struct vfio_group *group);
-void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm);
+void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm, struct module *kvm_module);
bool vfio_device_has_container(struct vfio_device *device);
int __init vfio_group_init(void);
void vfio_group_cleanup(void);
@@ -171,7 +173,8 @@ static inline bool vfio_group_enforced_coherent(struct vfio_group *group)
return true;
}
-static inline void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
+static inline void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm,
+ struct module *kvm_module)
{
}
@@ -435,11 +438,13 @@ static inline void vfio_virqfd_exit(void)
#endif
#if IS_ENABLED(CONFIG_KVM)
-void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm);
+void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm,
+ struct module *kvm_module);
void vfio_device_put_kvm(struct vfio_device *device);
#else
static inline void vfio_device_get_kvm_safe(struct vfio_device *device,
- struct kvm *kvm)
+ struct kvm *kvm,
+ struct module *kvm_module)
{
}
diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c
index 742477546b15..b1b753889a77 100644
--- a/drivers/vfio/vfio_main.c
+++ b/drivers/vfio/vfio_main.c
@@ -433,7 +433,8 @@ void vfio_unregister_group_dev(struct vfio_device *device)
EXPORT_SYMBOL_GPL(vfio_unregister_group_dev);
#if IS_ENABLED(CONFIG_KVM)
-void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm)
+void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm,
+ struct module *kvm_module)
{
void (*pfn)(struct kvm *kvm);
bool (*fn)(struct kvm *kvm);
@@ -444,6 +445,9 @@ void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm)
if (!kvm)
return;
+ if (!try_module_get(kvm_module))
+ return;
+
pfn = symbol_get(kvm_put_kvm);
if (WARN_ON(!pfn))
return;
@@ -463,6 +467,7 @@ void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm)
device->put_kvm = pfn;
device->kvm = kvm;
+ device->kvm_module = kvm_module;
}
void vfio_device_put_kvm(struct vfio_device *device)
@@ -480,6 +485,8 @@ void vfio_device_put_kvm(struct vfio_device *device)
symbol_put(kvm_put_kvm);
clear:
+ module_put(device->kvm_module);
+ device->kvm_module = NULL;
device->kvm = NULL;
}
#endif
@@ -1483,7 +1490,7 @@ bool vfio_file_enforced_coherent(struct file *file)
}
EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent);
-static void vfio_device_file_set_kvm(struct file *file, struct kvm *kvm)
+static void vfio_device_file_set_kvm(struct file *file, struct kvm *kvm, struct module *kvm_module)
{
struct vfio_device_file *df = file->private_data;
@@ -1494,6 +1501,7 @@ static void vfio_device_file_set_kvm(struct file *file, struct kvm *kvm)
*/
spin_lock(&df->kvm_ref_lock);
df->kvm = kvm;
+ df->kvm_module = kvm_module;
spin_unlock(&df->kvm_ref_lock);
}
@@ -1505,16 +1513,16 @@ static void vfio_device_file_set_kvm(struct file *file, struct kvm *kvm)
* When a VFIO device is first opened the KVM will be available in
* device->kvm if one was associated with the file.
*/
-void vfio_file_set_kvm(struct file *file, struct kvm *kvm)
+void vfio_file_set_kvm(struct file *file, struct kvm *kvm, struct module *kvm_module)
{
struct vfio_group *group;
group = vfio_group_from_file(file);
if (group)
- vfio_group_set_kvm(group, kvm);
+ vfio_group_set_kvm(group, kvm, kvm_module);
if (vfio_device_from_file(file))
- vfio_device_file_set_kvm(file, kvm);
+ vfio_device_file_set_kvm(file, kvm, kvm_module);
}
EXPORT_SYMBOL_GPL(vfio_file_set_kvm);
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index e90859956514..69a8d527b0e8 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -53,6 +53,7 @@ struct vfio_device {
struct list_head dev_set_list;
unsigned int migration_flags;
struct kvm *kvm;
+ struct module *kvm_module;
/* Members below here are private, not for driver use */
unsigned int index;
@@ -339,7 +340,7 @@ static inline bool vfio_file_has_dev(struct file *file, struct vfio_device *devi
#endif
bool vfio_file_is_valid(struct file *file);
bool vfio_file_enforced_coherent(struct file *file);
-void vfio_file_set_kvm(struct file *file, struct kvm *kvm);
+void vfio_file_set_kvm(struct file *file, struct kvm *kvm, struct module *kvm_module);
#define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long))
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index 9f9acb66cc1e..8161229f4b86 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -35,15 +35,15 @@ struct kvm_vfio {
bool noncoherent;
};
-static void kvm_vfio_file_set_kvm(struct file *file, struct kvm *kvm)
+static void kvm_vfio_file_set_kvm(struct file *file, struct kvm *kvm, struct module *module)
{
- void (*fn)(struct file *file, struct kvm *kvm);
+ void (*fn)(struct file *file, struct kvm *kvm, struct module *kvm_module);
fn = symbol_get(vfio_file_set_kvm);
if (!fn)
return;
- fn(file, kvm);
+ fn(file, kvm, module);
symbol_put(vfio_file_set_kvm);
}
@@ -142,6 +142,7 @@ static void kvm_vfio_update_coherency(struct kvm_device *dev)
static int kvm_vfio_file_add(struct kvm_device *dev, unsigned int fd)
{
+ struct module *module;
struct kvm_vfio *kv = dev->private;
struct kvm_vfio_file *kvf;
struct file *filp;
@@ -157,6 +158,7 @@ static int kvm_vfio_file_add(struct kvm_device *dev, unsigned int fd)
goto out_fput;
}
+ module = filp->f_op->owner;
mutex_lock(&kv->lock);
list_for_each_entry(kvf, &kv->file_list, node) {
@@ -175,7 +177,7 @@ static int kvm_vfio_file_add(struct kvm_device *dev, unsigned int fd)
kvf->file = get_file(filp);
list_add_tail(&kvf->node, &kv->file_list);
- kvm_vfio_file_set_kvm(kvf->file, dev->kvm);
+ kvm_vfio_file_set_kvm(kvf->file, dev->kvm, module);
kvm_vfio_update_coherency(dev);
out_unlock:
@@ -207,7 +209,7 @@ static int kvm_vfio_file_del(struct kvm_device *dev, unsigned int fd)
#ifdef CONFIG_SPAPR_TCE_IOMMU
kvm_spapr_tce_release_vfio_group(dev->kvm, kvf);
#endif
- kvm_vfio_file_set_kvm(kvf->file, NULL);
+ kvm_vfio_file_set_kvm(kvf->file, NULL, NULL);
fput(kvf->file);
kfree(kvf);
ret = 0;
@@ -330,7 +332,7 @@ static void kvm_vfio_release(struct kvm_device *dev)
#ifdef CONFIG_SPAPR_TCE_IOMMU
kvm_spapr_tce_release_vfio_group(dev->kvm, kvf);
#endif
- kvm_vfio_file_set_kvm(kvf->file, NULL);
+ kvm_vfio_file_set_kvm(kvf->file, NULL, NULL);
fput(kvf->file);
list_del(&kvf->node);
kfree(kvf);
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* Re: [PATCH v1 01/27] VFIO: take reference to the KVM module
2026-04-02 4:20 ` [PATCH v1 01/27] VFIO: take reference to the KVM module Steffen Eiden
@ 2026-04-02 9:18 ` Paolo Bonzini
0 siblings, 0 replies; 33+ messages in thread
From: Paolo Bonzini @ 2026-04-02 9:18 UTC (permalink / raw)
To: Steffen Eiden, kvm, kvmarm, linux-arm-kernel, linux-kernel,
linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Suzuki K Poulose, Ulrich Weigand, Will Deacon, Zenghui Yu
On 4/2/26 06:20, Steffen Eiden wrote:
> @@ -157,6 +158,7 @@ static int kvm_vfio_file_add(struct kvm_device *dev, unsigned int fd)
> goto out_fput;
> }
>
> + module = filp->f_op->owner;
This patch is incorrect because filp->f_op->owner is actually the VFIO
module.
I'll send a replacement series for the first three patches here.
Paolo
^ permalink raw reply [flat|nested] 33+ messages in thread
* [PATCH v1 02/27] KVM, vfio: remove symbol_get(kvm_get_kvm_safe) from vfio
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
2026-04-02 4:20 ` [PATCH v1 01/27] VFIO: take reference to the KVM module Steffen Eiden
@ 2026-04-02 4:20 ` Steffen Eiden
2026-04-02 4:20 ` [PATCH v1 03/27] KVM, vfio: remove symbol_get(kvm_put_kvm) " Steffen Eiden
` (25 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:20 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
From: Paolo Bonzini <pbonzini@redhat.com>
Right now, KVM and VFIO are using symbol_get to access each other's
symbols because of a circular reference between the modules, as well
as to avoid loading them unnecessarily.
However, usage of symbol_get is mostly deprecated and there are just a
handful of users left. In the case of VFIO, in particular, the
functions it calls can be made inline. Start with kvm_get_kvm_safe, for
which it is trivial to do so.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
arch/x86/kvm/mmu/tdp_mmu.c | 2 +-
arch/x86/kvm/vmx/nested.h | 4 ++--
drivers/vfio/vfio_main.c | 10 +---------
include/linux/kvm_host.h | 9 +++++----
include/linux/kvm_types.h | 24 ++++++++++++++++++++++++
virt/kvm/kvm_main.c | 26 +++++---------------------
6 files changed, 38 insertions(+), 37 deletions(-)
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 9c26038f6b77..a88686b5db24 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1136,7 +1136,7 @@ void kvm_tdp_mmu_invalidate_roots(struct kvm *kvm,
* being destroyed in an error path of KVM_CREATE_VM.
*/
if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
- refcount_read(&kvm->users_count) && kvm->created_vcpus)
+ refcount_read(&kvm->rc.users_count) && kvm->created_vcpus)
lockdep_assert_held_write(&kvm->mmu_lock);
/*
diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
index 213a448104af..2c83fc905698 100644
--- a/arch/x86/kvm/vmx/nested.h
+++ b/arch/x86/kvm/vmx/nested.h
@@ -58,7 +58,7 @@ bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
{
lockdep_assert_once(lockdep_is_held(&vcpu->mutex) ||
- !refcount_read(&vcpu->kvm->users_count));
+ !refcount_read(&vcpu->kvm->rc.users_count));
return to_vmx(vcpu)->nested.cached_vmcs12;
}
@@ -66,7 +66,7 @@ static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
{
lockdep_assert_once(lockdep_is_held(&vcpu->mutex) ||
- !refcount_read(&vcpu->kvm->users_count));
+ !refcount_read(&vcpu->kvm->rc.users_count));
return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
}
diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c
index b1b753889a77..42f515519d87 100644
--- a/drivers/vfio/vfio_main.c
+++ b/drivers/vfio/vfio_main.c
@@ -437,7 +437,6 @@ void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm,
struct module *kvm_module)
{
void (*pfn)(struct kvm *kvm);
- bool (*fn)(struct kvm *kvm);
bool ret;
lockdep_assert_held(&device->dev_set->lock);
@@ -452,14 +451,7 @@ void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm,
if (WARN_ON(!pfn))
return;
- fn = symbol_get(kvm_get_kvm_safe);
- if (WARN_ON(!fn)) {
- symbol_put(kvm_put_kvm);
- return;
- }
-
- ret = fn(kvm);
- symbol_put(kvm_get_kvm_safe);
+ ret = kvm_get_kvm_safe(kvm);
if (!ret) {
symbol_put(kvm_put_kvm);
return;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 6b76e7a6f4c2..dc18ee99bba4 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -767,6 +767,9 @@ struct kvm_memslots {
};
struct kvm {
+ /* Must be the first field, see function definitions in kvm_types.h. */
+ struct kvm_refcount rc;
+
#ifdef KVM_HAVE_MMU_RWLOCK
rwlock_t mmu_lock;
#else
@@ -830,7 +833,6 @@ struct kvm {
struct list_head ioeventfds;
struct kvm_vm_stat stat;
struct kvm_arch arch;
- refcount_t users_count;
#ifdef CONFIG_KVM_MMIO
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
spinlock_t ring_lock;
@@ -876,6 +878,7 @@ struct kvm {
#endif
char stats_id[KVM_STATS_NAME_SIZE];
};
+static_assert(offsetof(struct kvm, rc) == 0);
#define kvm_err(fmt, ...) \
pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
@@ -1062,8 +1065,6 @@ static inline void kvm_irqfd_exit(void)
int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module);
void kvm_exit(void);
-void kvm_get_kvm(struct kvm *kvm);
-bool kvm_get_kvm_safe(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm);
bool file_is_kvm(struct file *file);
void kvm_put_kvm_no_destroy(struct kvm *kvm);
@@ -1073,7 +1074,7 @@ static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
as_id = array_index_nospec(as_id, KVM_MAX_NR_ADDRESS_SPACES);
return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
lockdep_is_held(&kvm->slots_lock) ||
- !refcount_read(&kvm->users_count));
+ !refcount_read(&kvm->rc.users_count));
}
static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index a568d8e6f4e8..add7cc2016e8 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -33,6 +33,7 @@
#include <linux/mutex.h>
#include <linux/spinlock_types.h>
+#include <linux/refcount.h>
struct kvm;
struct kvm_async_pf;
@@ -140,6 +141,29 @@ struct kvm_vcpu_stat_generic {
};
#define KVM_STATS_NAME_SIZE 48
+
+struct kvm_refcount {
+ refcount_t users_count;
+};
+
+static inline void kvm_get_kvm(struct kvm *kvm)
+{
+ struct kvm_refcount *rc = (struct kvm_refcount *)kvm;
+
+ refcount_inc(&rc->users_count);
+}
+
+/*
+ * A safe version of kvm_get_kvm(), making sure the vm is not being destroyed.
+ * Return true if kvm referenced successfully, false otherwise.
+ */
+static inline bool kvm_get_kvm_safe(struct kvm *kvm)
+{
+ struct kvm_refcount *rc = (struct kvm_refcount *)kvm;
+
+ return refcount_inc_not_zero(&rc->users_count);
+}
+
#endif /* !__ASSEMBLER__ */
#endif /* __KVM_TYPES_H__ */
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 9093251beb39..cb5e01f92503 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1099,7 +1099,7 @@ static inline struct kvm_io_bus *kvm_get_bus_for_destruction(struct kvm *kvm,
enum kvm_bus idx)
{
return rcu_dereference_protected(kvm->buses[idx],
- !refcount_read(&kvm->users_count));
+ !refcount_read(&kvm->rc.users_count));
}
static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
@@ -1153,7 +1153,7 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
if (r)
goto out_err_no_irq_routing;
- refcount_set(&kvm->users_count, 1);
+ refcount_set(&kvm->rc.users_count, 1);
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
for (j = 0; j < 2; j++) {
@@ -1223,7 +1223,7 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
out_err_no_disable:
kvm_arch_destroy_vm(kvm);
out_err_no_arch_destroy_vm:
- WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
+ WARN_ON_ONCE(!refcount_dec_and_test(&kvm->rc.users_count));
for (i = 0; i < KVM_NR_BUSES; i++)
kfree(kvm_get_bus_for_destruction(kvm, i));
kvm_free_irq_routing(kvm);
@@ -1316,25 +1316,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
mmdrop(mm);
}
-void kvm_get_kvm(struct kvm *kvm)
-{
- refcount_inc(&kvm->users_count);
-}
-EXPORT_SYMBOL_GPL(kvm_get_kvm);
-
-/*
- * Make sure the vm is not during destruction, which is a safe version of
- * kvm_get_kvm(). Return true if kvm referenced successfully, false otherwise.
- */
-bool kvm_get_kvm_safe(struct kvm *kvm)
-{
- return refcount_inc_not_zero(&kvm->users_count);
-}
-EXPORT_SYMBOL_GPL(kvm_get_kvm_safe);
-
void kvm_put_kvm(struct kvm *kvm)
{
- if (refcount_dec_and_test(&kvm->users_count))
+ if (refcount_dec_and_test(&kvm->rc.users_count))
kvm_destroy_vm(kvm);
}
EXPORT_SYMBOL_GPL(kvm_put_kvm);
@@ -1348,7 +1332,7 @@ EXPORT_SYMBOL_GPL(kvm_put_kvm);
*/
void kvm_put_kvm_no_destroy(struct kvm *kvm)
{
- WARN_ON(refcount_dec_and_test(&kvm->users_count));
+ WARN_ON(refcount_dec_and_test(&kvm->rc.users_count));
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_put_kvm_no_destroy);
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v1 03/27] KVM, vfio: remove symbol_get(kvm_put_kvm) from vfio
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
2026-04-02 4:20 ` [PATCH v1 01/27] VFIO: take reference to the KVM module Steffen Eiden
2026-04-02 4:20 ` [PATCH v1 02/27] KVM, vfio: remove symbol_get(kvm_get_kvm_safe) from vfio Steffen Eiden
@ 2026-04-02 4:20 ` Steffen Eiden
2026-04-02 4:21 ` [PATCH v1 04/27] arm64: Provide arm64 UAPI for other host architectures Steffen Eiden
` (24 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:20 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
From: Paolo Bonzini <pbonzini@redhat.com>
Right now, KVM and VFIO are using symbol_get to access each other's
symbols because of a circular reference between the modules, as well
as to avoid loading them unnecessarily.
The remaining use in VFIO is for kvm_put_kvm, which is not inline
because it needs to call kvm_destroy_vm. However, storing the
address of kvm_destroy_vm in the "struct kvm" is enough to remove
the dependency from VFIO.
This also makes it possible to direct kvm_put_kvm to different
implementations of kvm_destroy_vm.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
drivers/vfio/vfio_main.c | 29 +++++------------------------
include/linux/kvm_host.h | 1 -
include/linux/kvm_types.h | 9 +++++++++
include/linux/vfio.h | 1 -
virt/kvm/kvm_main.c | 9 ++-------
5 files changed, 16 insertions(+), 33 deletions(-)
diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c
index 42f515519d87..e9c6353c74d8 100644
--- a/drivers/vfio/vfio_main.c
+++ b/drivers/vfio/vfio_main.c
@@ -17,7 +17,7 @@
#include <linux/idr.h>
#include <linux/iommu.h>
#if IS_ENABLED(CONFIG_KVM)
-#include <linux/kvm_host.h>
+#include <linux/kvm_types.h>
#endif
#include <linux/list.h>
#include <linux/miscdevice.h>
@@ -436,9 +436,6 @@ EXPORT_SYMBOL_GPL(vfio_unregister_group_dev);
void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm,
struct module *kvm_module)
{
- void (*pfn)(struct kvm *kvm);
- bool ret;
-
lockdep_assert_held(&device->dev_set->lock);
if (!kvm)
@@ -447,19 +444,10 @@ void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm,
if (!try_module_get(kvm_module))
return;
- pfn = symbol_get(kvm_put_kvm);
- if (WARN_ON(!pfn))
- return;
-
- ret = kvm_get_kvm_safe(kvm);
- if (!ret) {
- symbol_put(kvm_put_kvm);
- return;
+ if (kvm_get_kvm_safe(kvm)) {
+ device->kvm = kvm;
+ device->kvm_module = kvm_module;
}
-
- device->put_kvm = pfn;
- device->kvm = kvm;
- device->kvm_module = kvm_module;
}
void vfio_device_put_kvm(struct vfio_device *device)
@@ -469,15 +457,8 @@ void vfio_device_put_kvm(struct vfio_device *device)
if (!device->kvm)
return;
- if (WARN_ON(!device->put_kvm))
- goto clear;
-
- device->put_kvm(device->kvm);
- device->put_kvm = NULL;
- symbol_put(kvm_put_kvm);
-
-clear:
module_put(device->kvm_module);
+ kvm_put_kvm(device->kvm);
device->kvm_module = NULL;
device->kvm = NULL;
}
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index dc18ee99bba4..13f903993ed0 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1065,7 +1065,6 @@ static inline void kvm_irqfd_exit(void)
int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module);
void kvm_exit(void);
-void kvm_put_kvm(struct kvm *kvm);
bool file_is_kvm(struct file *file);
void kvm_put_kvm_no_destroy(struct kvm *kvm);
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index add7cc2016e8..aadee536771a 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -144,6 +144,7 @@ struct kvm_vcpu_stat_generic {
struct kvm_refcount {
refcount_t users_count;
+ void (*destroy)(struct kvm *kvm);
};
static inline void kvm_get_kvm(struct kvm *kvm)
@@ -164,6 +165,14 @@ static inline bool kvm_get_kvm_safe(struct kvm *kvm)
return refcount_inc_not_zero(&rc->users_count);
}
+static inline void kvm_put_kvm(struct kvm *kvm)
+{
+ struct kvm_refcount *rc = (struct kvm_refcount *)kvm;
+
+ if (refcount_dec_and_test(&rc->users_count))
+ rc->destroy(kvm);
+}
+
#endif /* !__ASSEMBLER__ */
#endif /* __KVM_TYPES_H__ */
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 69a8d527b0e8..5c69532d6127 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -65,7 +65,6 @@ struct vfio_device {
unsigned int open_count;
struct completion comp;
struct iommufd_access *iommufd_access;
- void (*put_kvm)(struct kvm *kvm);
struct inode *inode;
#if IS_ENABLED(CONFIG_IOMMUFD)
struct iommufd_device *iommufd_device;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index cb5e01f92503..642f9e9638cc 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -120,6 +120,7 @@ static struct dentry *kvm_debugfs_dir;
static const struct file_operations stat_fops_per_vm;
+static void kvm_destroy_vm(struct kvm *kvm);
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
#ifdef CONFIG_KVM_COMPAT
@@ -1154,6 +1155,7 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
goto out_err_no_irq_routing;
refcount_set(&kvm->rc.users_count, 1);
+ kvm->rc.destroy = kvm_destroy_vm;
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
for (j = 0; j < 2; j++) {
@@ -1316,13 +1318,6 @@ static void kvm_destroy_vm(struct kvm *kvm)
mmdrop(mm);
}
-void kvm_put_kvm(struct kvm *kvm)
-{
- if (refcount_dec_and_test(&kvm->rc.users_count))
- kvm_destroy_vm(kvm);
-}
-EXPORT_SYMBOL_GPL(kvm_put_kvm);
-
/*
* Used to put a reference that was taken on behalf of an object associated
* with a user-visible file descriptor, e.g. a vcpu or device, if installation
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v1 04/27] arm64: Provide arm64 UAPI for other host architectures
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (2 preceding siblings ...)
2026-04-02 4:20 ` [PATCH v1 03/27] KVM, vfio: remove symbol_get(kvm_put_kvm) " Steffen Eiden
@ 2026-04-02 4:21 ` Steffen Eiden
2026-04-02 4:21 ` [PATCH v1 05/27] arm64: Extract sysreg definitions Steffen Eiden
` (23 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:21 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
Enable the ARM64 userspace API to be used on non-arm64 host
architectures, with initial support for s390.
The arm64 KVM UAPI headers are relocated to include/uapi/arch/arm64/,
allowing non‑arm64 hosts (such as s390) to use the arm64 KVM userspace API.
Likewise, the arm64 KVM kernel‑internal headers are relocated to
include/kvm/arm64/, and several arm64 asm headers are relocated to
include/arch/arm64/asm for architecture‑independent consumption.
To achieve architecture independence, some type aliases are introduced,
which conditionally resolve to native arm64 types when building on arm64
or to fallback to ABI compatible inline struct definitions on other
architectures.
The build system is updated to install the moved UAPI headers to their
original location but in and conditionally export arm64 architecture
headers for s390. This infrastructure enables s390 systems to host arm64
virtual machines while maintaining full compatibility with the existing
arm64 KVM-UAPI, requiring only minimal, compatible changes to the arm64
UAPI-headers itself.
Co-developed-by: Andreas Grapentin <gra@linux.ibm.com>
Signed-off-by: Andreas Grapentin <gra@linux.ibm.com>
Co-developed-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
MAINTAINERS | 2 +
arch/arm64/include/uapi/asm/Kbuild | 3 +
include/uapi/Kbuild | 6 +
.../uapi/arch/arm64}/asm/kvm.h | 24 +-
.../uapi/arch/arm64}/asm/sve_context.h | 0
include/uapi/arch/arm64/linux/kvm.h | 8 +
include/uapi/linux/{kvm.h => kvm-generic.h} | 11 +-
include/uapi/linux/kvm.h | 1649 +----------------
| 14 +-
usr/include/Makefile | 1 +
10 files changed, 61 insertions(+), 1657 deletions(-)
rename {arch/arm64/include/uapi => include/uapi/arch/arm64}/asm/kvm.h (97%)
rename {arch/arm64/include/uapi => include/uapi/arch/arm64}/asm/sve_context.h (100%)
create mode 100644 include/uapi/arch/arm64/linux/kvm.h
copy include/uapi/linux/{kvm.h => kvm-generic.h} (99%)
diff --git a/MAINTAINERS b/MAINTAINERS
index 0481aca2286c..3f03ef9ee2bd 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3817,6 +3817,7 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git
F: Documentation/arch/arm64/
F: arch/arm64/
+F: include/uapi/arch/arm64/
F: drivers/virt/coco/arm-cca-guest/
F: drivers/virt/coco/pkvm-guest/
F: tools/testing/selftests/arm64/
@@ -13993,6 +13994,7 @@ F: Documentation/virt/kvm/arm/
F: Documentation/virt/kvm/devices/arm*
F: arch/arm64/include/asm/kvm*
F: arch/arm64/include/uapi/asm/kvm*
+F: include/uapi/arch/arm64/asm/kvm*
F: arch/arm64/kvm/
F: include/kvm/arm_*
F: tools/testing/selftests/kvm/*/arm64/
diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild
index c6d141d7b7d7..b45584e83448 100644
--- a/arch/arm64/include/uapi/asm/Kbuild
+++ b/arch/arm64/include/uapi/asm/Kbuild
@@ -2,3 +2,6 @@
syscall-y += unistd_64.h
generic-y += kvm_para.h
+
+shared-uapi-y += kvm.h
+shared-uapi-y += sve_context.h
diff --git a/include/uapi/Kbuild b/include/uapi/Kbuild
index 61ee6e59c930..a212098b81fe 100644
--- a/include/uapi/Kbuild
+++ b/include/uapi/Kbuild
@@ -4,11 +4,17 @@ no-export-headers += linux/a.out.h
endif
ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm.h),)
+ifneq ($(ARCH),arm64)
no-export-headers += linux/kvm.h
endif
+endif
ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm_para.h),)
ifeq ($(wildcard $(objtree)/arch/$(SRCARCH)/include/generated/uapi/asm/kvm_para.h),)
no-export-headers += linux/kvm_para.h
endif
endif
+
+ifneq ($(ARCH),s390)
+no-export-headers += arch/%
+endif
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/include/uapi/arch/arm64/asm/kvm.h
similarity index 97%
rename from arch/arm64/include/uapi/asm/kvm.h
rename to include/uapi/arch/arm64/asm/kvm.h
index a792a599b9d6..c8c621d26f09 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/include/uapi/arch/arm64/asm/kvm.h
@@ -34,8 +34,26 @@
#ifndef __ASSEMBLER__
#include <linux/psci.h>
#include <linux/types.h>
+#include "sve_context.h"
+
+#ifdef __arm64__
#include <asm/ptrace.h>
-#include <asm/sve_context.h>
+typedef struct user_pt_regs user_pt_regs_arm64;
+typedef struct user_fpsimd_state user_fpsimd_state_arm64;
+#else
+typedef struct {
+ __u64 regs[31];
+ __u64 sp;
+ __u64 pc;
+ __u64 pstate;
+} user_pt_regs_arm64;
+typedef struct {
+ __uint128_t vregs[32];
+ __u32 fpsr;
+ __u32 fpcr;
+ __u32 __reserved[2];
+} __attribute__((aligned(16))) user_fpsimd_state_arm64;
+#endif
#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_VCPU_EVENTS
@@ -44,14 +62,14 @@
#define KVM_DIRTY_LOG_PAGE_OFFSET 64
struct kvm_regs {
- struct user_pt_regs regs; /* sp = sp_el0 */
+ user_pt_regs_arm64 regs; /* sp = sp_el0 */
__u64 sp_el1;
__u64 elr_el1;
__u64 spsr[KVM_NR_SPSR];
- struct user_fpsimd_state fp_regs;
+ user_fpsimd_state_arm64 fp_regs;
};
/*
diff --git a/arch/arm64/include/uapi/asm/sve_context.h b/include/uapi/arch/arm64/asm/sve_context.h
similarity index 100%
rename from arch/arm64/include/uapi/asm/sve_context.h
rename to include/uapi/arch/arm64/asm/sve_context.h
diff --git a/include/uapi/arch/arm64/linux/kvm.h b/include/uapi/arch/arm64/linux/kvm.h
new file mode 100644
index 000000000000..21528a146b4d
--- /dev/null
+++ b/include/uapi/arch/arm64/linux/kvm.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __LINUX_KVM_H
+#define __LINUX_KVM_H
+
+#include <arch/arm64/asm/kvm.h>
+#include <linux/kvm-generic.h>
+
+#endif /*__LINUX_KVM_H */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm-generic.h
similarity index 99%
copy from include/uapi/linux/kvm.h
copy to include/uapi/linux/kvm-generic.h
index 80364d4dbebb..b34dceaf2f2d 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm-generic.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __LINUX_KVM_H
-#define __LINUX_KVM_H
+#ifndef __LINUX_GENERIC_KVM_H
+#define __LINUX_GENERIC_KVM_H
/*
* Userspace interface for /dev/kvm - kernel based virtual machine
@@ -8,11 +8,14 @@
* Note: you must update KVM_API_VERSION if you change this interface.
*/
+#ifndef __LINUX_KVM_H
+#error "Error: Do not directly include <linux/kvm-generic.h> include <linux.kvm.h>"
+#endif /* __LINUX_KVM_H */
+
#include <linux/const.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/ioctl.h>
-#include <asm/kvm.h>
#ifdef __KERNEL__
#include <linux/kvm_types.h>
@@ -1658,4 +1661,4 @@ struct kvm_pre_fault_memory {
__u64 padding[5];
};
-#endif /* __LINUX_KVM_H */
+#endif /* __LINUX_KVM_GENERIC_H */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 80364d4dbebb..d7362e7519b7 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -8,1654 +8,7 @@
* Note: you must update KVM_API_VERSION if you change this interface.
*/
-#include <linux/const.h>
-#include <linux/types.h>
-#include <linux/compiler.h>
-#include <linux/ioctl.h>
#include <asm/kvm.h>
-
-#ifdef __KERNEL__
-#include <linux/kvm_types.h>
-#endif
-
-#define KVM_API_VERSION 12
-
-/*
- * Backwards-compatible definitions.
- */
-#define __KVM_HAVE_GUEST_DEBUG
-
-/* for KVM_SET_USER_MEMORY_REGION */
-struct kvm_userspace_memory_region {
- __u32 slot;
- __u32 flags;
- __u64 guest_phys_addr;
- __u64 memory_size; /* bytes */
- __u64 userspace_addr; /* start of the userspace allocated memory */
-};
-
-/* for KVM_SET_USER_MEMORY_REGION2 */
-struct kvm_userspace_memory_region2 {
- __u32 slot;
- __u32 flags;
- __u64 guest_phys_addr;
- __u64 memory_size;
- __u64 userspace_addr;
- __u64 guest_memfd_offset;
- __u32 guest_memfd;
- __u32 pad1;
- __u64 pad2[14];
-};
-
-/*
- * The bit 0 ~ bit 15 of kvm_userspace_memory_region::flags are visible for
- * userspace, other bits are reserved for kvm internal use which are defined
- * in include/linux/kvm_host.h.
- */
-#define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0)
-#define KVM_MEM_READONLY (1UL << 1)
-#define KVM_MEM_GUEST_MEMFD (1UL << 2)
-
-/* for KVM_IRQ_LINE */
-struct kvm_irq_level {
- /*
- * ACPI gsi notion of irq.
- * For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47..
- * For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23..
- * For ARM: See Documentation/virt/kvm/api.rst
- */
- union {
- __u32 irq;
- __s32 status;
- };
- __u32 level;
-};
-
-
-struct kvm_irqchip {
- __u32 chip_id;
- __u32 pad;
- union {
- char dummy[512]; /* reserving space */
-#ifdef __KVM_HAVE_PIT
- struct kvm_pic_state pic;
-#endif
-#ifdef __KVM_HAVE_IOAPIC
- struct kvm_ioapic_state ioapic;
-#endif
- } chip;
-};
-
-/* for KVM_CREATE_PIT2 */
-struct kvm_pit_config {
- __u32 flags;
- __u32 pad[15];
-};
-
-#define KVM_PIT_SPEAKER_DUMMY 1
-
-struct kvm_hyperv_exit {
-#define KVM_EXIT_HYPERV_SYNIC 1
-#define KVM_EXIT_HYPERV_HCALL 2
-#define KVM_EXIT_HYPERV_SYNDBG 3
- __u32 type;
- __u32 pad1;
- union {
- struct {
- __u32 msr;
- __u32 pad2;
- __u64 control;
- __u64 evt_page;
- __u64 msg_page;
- } synic;
- struct {
- __u64 input;
- __u64 result;
- __u64 params[2];
- } hcall;
- struct {
- __u32 msr;
- __u32 pad2;
- __u64 control;
- __u64 status;
- __u64 send_page;
- __u64 recv_page;
- __u64 pending_page;
- } syndbg;
- } u;
-};
-
-struct kvm_xen_exit {
-#define KVM_EXIT_XEN_HCALL 1
- __u32 type;
- union {
- struct {
- __u32 longmode;
- __u32 cpl;
- __u64 input;
- __u64 result;
- __u64 params[6];
- } hcall;
- } u;
-};
-
-struct kvm_exit_snp_req_certs {
- __u64 gpa;
- __u64 npages;
- __u64 ret;
-};
-
-#define KVM_S390_GET_SKEYS_NONE 1
-#define KVM_S390_SKEYS_MAX 1048576
-
-#define KVM_EXIT_UNKNOWN 0
-#define KVM_EXIT_EXCEPTION 1
-#define KVM_EXIT_IO 2
-#define KVM_EXIT_HYPERCALL 3
-#define KVM_EXIT_DEBUG 4
-#define KVM_EXIT_HLT 5
-#define KVM_EXIT_MMIO 6
-#define KVM_EXIT_IRQ_WINDOW_OPEN 7
-#define KVM_EXIT_SHUTDOWN 8
-#define KVM_EXIT_FAIL_ENTRY 9
-#define KVM_EXIT_INTR 10
-#define KVM_EXIT_SET_TPR 11
-#define KVM_EXIT_TPR_ACCESS 12
-#define KVM_EXIT_S390_SIEIC 13
-#define KVM_EXIT_S390_RESET 14
-#define KVM_EXIT_DCR 15 /* deprecated */
-#define KVM_EXIT_NMI 16
-#define KVM_EXIT_INTERNAL_ERROR 17
-#define KVM_EXIT_OSI 18
-#define KVM_EXIT_PAPR_HCALL 19
-#define KVM_EXIT_S390_UCONTROL 20
-#define KVM_EXIT_WATCHDOG 21
-#define KVM_EXIT_S390_TSCH 22
-#define KVM_EXIT_EPR 23
-#define KVM_EXIT_SYSTEM_EVENT 24
-#define KVM_EXIT_S390_STSI 25
-#define KVM_EXIT_IOAPIC_EOI 26
-#define KVM_EXIT_HYPERV 27
-#define KVM_EXIT_ARM_NISV 28
-#define KVM_EXIT_X86_RDMSR 29
-#define KVM_EXIT_X86_WRMSR 30
-#define KVM_EXIT_DIRTY_RING_FULL 31
-#define KVM_EXIT_AP_RESET_HOLD 32
-#define KVM_EXIT_X86_BUS_LOCK 33
-#define KVM_EXIT_XEN 34
-#define KVM_EXIT_RISCV_SBI 35
-#define KVM_EXIT_RISCV_CSR 36
-#define KVM_EXIT_NOTIFY 37
-#define KVM_EXIT_LOONGARCH_IOCSR 38
-#define KVM_EXIT_MEMORY_FAULT 39
-#define KVM_EXIT_TDX 40
-#define KVM_EXIT_ARM_SEA 41
-#define KVM_EXIT_ARM_LDST64B 42
-#define KVM_EXIT_SNP_REQ_CERTS 43
-
-/* For KVM_EXIT_INTERNAL_ERROR */
-/* Emulate instruction failed. */
-#define KVM_INTERNAL_ERROR_EMULATION 1
-/* Encounter unexpected simultaneous exceptions. */
-#define KVM_INTERNAL_ERROR_SIMUL_EX 2
-/* Encounter unexpected vm-exit due to delivery event. */
-#define KVM_INTERNAL_ERROR_DELIVERY_EV 3
-/* Encounter unexpected vm-exit reason */
-#define KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON 4
-
-/* Flags that describe what fields in emulation_failure hold valid data. */
-#define KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES (1ULL << 0)
-
-/*
- * struct kvm_run can be modified by userspace at any time, so KVM must be
- * careful to avoid TOCTOU bugs. In order to protect KVM, HINT_UNSAFE_IN_KVM()
- * renames fields in struct kvm_run from <symbol> to <symbol>__unsafe when
- * compiled into the kernel, ensuring that any use within KVM is obvious and
- * gets extra scrutiny.
- */
-#ifdef __KERNEL__
-#define HINT_UNSAFE_IN_KVM(_symbol) _symbol##__unsafe
-#else
-#define HINT_UNSAFE_IN_KVM(_symbol) _symbol
-#endif
-
-/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
-struct kvm_run {
- /* in */
- __u8 request_interrupt_window;
- __u8 HINT_UNSAFE_IN_KVM(immediate_exit);
- __u8 padding1[6];
-
- /* out */
- __u32 exit_reason;
- __u8 ready_for_interrupt_injection;
- __u8 if_flag;
- __u16 flags;
-
- /* in (pre_kvm_run), out (post_kvm_run) */
- __u64 cr8;
- __u64 apic_base;
-
-#ifdef __KVM_S390
- /* the processor status word for s390 */
- __u64 psw_mask; /* psw upper half */
- __u64 psw_addr; /* psw lower half */
-#endif
- union {
- /* KVM_EXIT_UNKNOWN */
- struct {
- __u64 hardware_exit_reason;
- } hw;
- /* KVM_EXIT_FAIL_ENTRY */
- struct {
- __u64 hardware_entry_failure_reason;
- __u32 cpu;
- } fail_entry;
- /* KVM_EXIT_EXCEPTION */
- struct {
- __u32 exception;
- __u32 error_code;
- } ex;
- /* KVM_EXIT_IO */
- struct {
-#define KVM_EXIT_IO_IN 0
-#define KVM_EXIT_IO_OUT 1
- __u8 direction;
- __u8 size; /* bytes */
- __u16 port;
- __u32 count;
- __u64 data_offset; /* relative to kvm_run start */
- } io;
- /* KVM_EXIT_DEBUG */
- struct {
- struct kvm_debug_exit_arch arch;
- } debug;
- /* KVM_EXIT_MMIO */
- struct {
- __u64 phys_addr;
- __u8 data[8];
- __u32 len;
- __u8 is_write;
- } mmio;
- /* KVM_EXIT_LOONGARCH_IOCSR */
- struct {
- __u64 phys_addr;
- __u8 data[8];
- __u32 len;
- __u8 is_write;
- } iocsr_io;
- /* KVM_EXIT_HYPERCALL */
- struct {
- __u64 nr;
- __u64 args[6];
- __u64 ret;
-
- union {
-#ifndef __KERNEL__
- __u32 longmode;
-#endif
- __u64 flags;
- };
- } hypercall;
- /* KVM_EXIT_TPR_ACCESS */
- struct {
- __u64 rip;
- __u32 is_write;
- __u32 pad;
- } tpr_access;
- /* KVM_EXIT_S390_SIEIC */
- struct {
- __u8 icptcode;
- __u16 ipa;
- __u32 ipb;
- } s390_sieic;
- /* KVM_EXIT_S390_RESET */
- __u64 s390_reset_flags;
- /* KVM_EXIT_S390_UCONTROL */
- struct {
- __u64 trans_exc_code;
- __u32 pgm_code;
- } s390_ucontrol;
- /* KVM_EXIT_DCR (deprecated) */
- struct {
- __u32 dcrn;
- __u32 data;
- __u8 is_write;
- } dcr;
- /* KVM_EXIT_INTERNAL_ERROR */
- struct {
- __u32 suberror;
- /* Available with KVM_CAP_INTERNAL_ERROR_DATA: */
- __u32 ndata;
- __u64 data[16];
- } internal;
- /*
- * KVM_INTERNAL_ERROR_EMULATION
- *
- * "struct emulation_failure" is an overlay of "struct internal"
- * that is used for the KVM_INTERNAL_ERROR_EMULATION sub-type of
- * KVM_EXIT_INTERNAL_ERROR. Note, unlike other internal error
- * sub-types, this struct is ABI! It also needs to be backwards
- * compatible with "struct internal". Take special care that
- * "ndata" is correct, that new fields are enumerated in "flags",
- * and that each flag enumerates fields that are 64-bit aligned
- * and sized (so that ndata+internal.data[] is valid/accurate).
- *
- * Space beyond the defined fields may be used to store arbitrary
- * debug information relating to the emulation failure. It is
- * accounted for in "ndata" but the format is unspecified and is
- * not represented in "flags". Any such information is *not* ABI!
- */
- struct {
- __u32 suberror;
- __u32 ndata;
- __u64 flags;
- union {
- struct {
- __u8 insn_size;
- __u8 insn_bytes[15];
- };
- };
- /* Arbitrary debug data may follow. */
- } emulation_failure;
- /* KVM_EXIT_OSI */
- struct {
- __u64 gprs[32];
- } osi;
- /* KVM_EXIT_PAPR_HCALL */
- struct {
- __u64 nr;
- __u64 ret;
- __u64 args[9];
- } papr_hcall;
- /* KVM_EXIT_S390_TSCH */
- struct {
- __u16 subchannel_id;
- __u16 subchannel_nr;
- __u32 io_int_parm;
- __u32 io_int_word;
- __u32 ipb;
- __u8 dequeued;
- } s390_tsch;
- /* KVM_EXIT_EPR */
- struct {
- __u32 epr;
- } epr;
- /* KVM_EXIT_SYSTEM_EVENT */
- struct {
-#define KVM_SYSTEM_EVENT_SHUTDOWN 1
-#define KVM_SYSTEM_EVENT_RESET 2
-#define KVM_SYSTEM_EVENT_CRASH 3
-#define KVM_SYSTEM_EVENT_WAKEUP 4
-#define KVM_SYSTEM_EVENT_SUSPEND 5
-#define KVM_SYSTEM_EVENT_SEV_TERM 6
-#define KVM_SYSTEM_EVENT_TDX_FATAL 7
- __u32 type;
- __u32 ndata;
- union {
-#ifndef __KERNEL__
- __u64 flags;
-#endif
- __u64 data[16];
- };
- } system_event;
- /* KVM_EXIT_S390_STSI */
- struct {
- __u64 addr;
- __u8 ar;
- __u8 reserved;
- __u8 fc;
- __u8 sel1;
- __u16 sel2;
- } s390_stsi;
- /* KVM_EXIT_IOAPIC_EOI */
- struct {
- __u8 vector;
- } eoi;
- /* KVM_EXIT_HYPERV */
- struct kvm_hyperv_exit hyperv;
- /* KVM_EXIT_ARM_NISV / KVM_EXIT_ARM_LDST64B */
- struct {
- __u64 esr_iss;
- __u64 fault_ipa;
- } arm_nisv;
- /* KVM_EXIT_X86_RDMSR / KVM_EXIT_X86_WRMSR */
- struct {
- __u8 error; /* user -> kernel */
- __u8 pad[7];
-#define KVM_MSR_EXIT_REASON_INVAL (1 << 0)
-#define KVM_MSR_EXIT_REASON_UNKNOWN (1 << 1)
-#define KVM_MSR_EXIT_REASON_FILTER (1 << 2)
-#define KVM_MSR_EXIT_REASON_VALID_MASK (KVM_MSR_EXIT_REASON_INVAL | \
- KVM_MSR_EXIT_REASON_UNKNOWN | \
- KVM_MSR_EXIT_REASON_FILTER)
- __u32 reason; /* kernel -> user */
- __u32 index; /* kernel -> user */
- __u64 data; /* kernel <-> user */
- } msr;
- /* KVM_EXIT_XEN */
- struct kvm_xen_exit xen;
- /* KVM_EXIT_RISCV_SBI */
- struct {
- unsigned long extension_id;
- unsigned long function_id;
- unsigned long args[6];
- unsigned long ret[2];
- } riscv_sbi;
- /* KVM_EXIT_RISCV_CSR */
- struct {
- unsigned long csr_num;
- unsigned long new_value;
- unsigned long write_mask;
- unsigned long ret_value;
- } riscv_csr;
- /* KVM_EXIT_NOTIFY */
- struct {
-#define KVM_NOTIFY_CONTEXT_INVALID (1 << 0)
- __u32 flags;
- } notify;
- /* KVM_EXIT_MEMORY_FAULT */
- struct {
-#define KVM_MEMORY_EXIT_FLAG_PRIVATE (1ULL << 3)
- __u64 flags;
- __u64 gpa;
- __u64 size;
- } memory_fault;
- /* KVM_EXIT_TDX */
- struct {
- __u64 flags;
- __u64 nr;
- union {
- struct {
- __u64 ret;
- __u64 data[5];
- } unknown;
- struct {
- __u64 ret;
- __u64 gpa;
- __u64 size;
- } get_quote;
- struct {
- __u64 ret;
- __u64 leaf;
- __u64 r11, r12, r13, r14;
- } get_tdvmcall_info;
- struct {
- __u64 ret;
- __u64 vector;
- } setup_event_notify;
- };
- } tdx;
- /* KVM_EXIT_ARM_SEA */
- struct {
-#define KVM_EXIT_ARM_SEA_FLAG_GPA_VALID (1ULL << 0)
- __u64 flags;
- __u64 esr;
- __u64 gva;
- __u64 gpa;
- } arm_sea;
- /* KVM_EXIT_SNP_REQ_CERTS */
- struct kvm_exit_snp_req_certs snp_req_certs;
- /* Fix the size of the union. */
- char padding[256];
- };
-
- /* 2048 is the size of the char array used to bound/pad the size
- * of the union that holds sync regs.
- */
- #define SYNC_REGS_SIZE_BYTES 2048
- /*
- * shared registers between kvm and userspace.
- * kvm_valid_regs specifies the register classes set by the host
- * kvm_dirty_regs specified the register classes dirtied by userspace
- * struct kvm_sync_regs is architecture specific, as well as the
- * bits for kvm_valid_regs and kvm_dirty_regs
- */
- __u64 kvm_valid_regs;
- __u64 kvm_dirty_regs;
- union {
- struct kvm_sync_regs regs;
- char padding[SYNC_REGS_SIZE_BYTES];
- } s;
-};
-
-/* for KVM_REGISTER_COALESCED_MMIO / KVM_UNREGISTER_COALESCED_MMIO */
-
-struct kvm_coalesced_mmio_zone {
- __u64 addr;
- __u32 size;
- union {
- __u32 pad;
- __u32 pio;
- };
-};
-
-struct kvm_coalesced_mmio {
- __u64 phys_addr;
- __u32 len;
- union {
- __u32 pad;
- __u32 pio;
- };
- __u8 data[8];
-};
-
-struct kvm_coalesced_mmio_ring {
- __u32 first, last;
- struct kvm_coalesced_mmio coalesced_mmio[];
-};
-
-#define KVM_COALESCED_MMIO_MAX \
- ((PAGE_SIZE - sizeof(struct kvm_coalesced_mmio_ring)) / \
- sizeof(struct kvm_coalesced_mmio))
-
-/* for KVM_TRANSLATE */
-struct kvm_translation {
- /* in */
- __u64 linear_address;
-
- /* out */
- __u64 physical_address;
- __u8 valid;
- __u8 writeable;
- __u8 usermode;
- __u8 pad[5];
-};
-
-/* for KVM_INTERRUPT */
-struct kvm_interrupt {
- /* in */
- __u32 irq;
-};
-
-/* for KVM_GET_DIRTY_LOG */
-struct kvm_dirty_log {
- __u32 slot;
- __u32 padding1;
- union {
- void __user *dirty_bitmap; /* one bit per page */
- __u64 padding2;
- };
-};
-
-/* for KVM_CLEAR_DIRTY_LOG */
-struct kvm_clear_dirty_log {
- __u32 slot;
- __u32 num_pages;
- __u64 first_page;
- union {
- void __user *dirty_bitmap; /* one bit per page */
- __u64 padding2;
- };
-};
-
-/* for KVM_SET_SIGNAL_MASK */
-struct kvm_signal_mask {
- __u32 len;
- __u8 sigset[];
-};
-
-/* for KVM_TPR_ACCESS_REPORTING */
-struct kvm_tpr_access_ctl {
- __u32 enabled;
- __u32 flags;
- __u32 reserved[8];
-};
-
-/* for KVM_SET_VAPIC_ADDR */
-struct kvm_vapic_addr {
- __u64 vapic_addr;
-};
-
-/* for KVM_SET_MP_STATE */
-
-/* not all states are valid on all architectures */
-#define KVM_MP_STATE_RUNNABLE 0
-#define KVM_MP_STATE_UNINITIALIZED 1
-#define KVM_MP_STATE_INIT_RECEIVED 2
-#define KVM_MP_STATE_HALTED 3
-#define KVM_MP_STATE_SIPI_RECEIVED 4
-#define KVM_MP_STATE_STOPPED 5
-#define KVM_MP_STATE_CHECK_STOP 6
-#define KVM_MP_STATE_OPERATING 7
-#define KVM_MP_STATE_LOAD 8
-#define KVM_MP_STATE_AP_RESET_HOLD 9
-#define KVM_MP_STATE_SUSPENDED 10
-
-struct kvm_mp_state {
- __u32 mp_state;
-};
-
-/* for KVM_SET_GUEST_DEBUG */
-
-#define KVM_GUESTDBG_ENABLE 0x00000001
-#define KVM_GUESTDBG_SINGLESTEP 0x00000002
-
-struct kvm_guest_debug {
- __u32 control;
- __u32 pad;
- struct kvm_guest_debug_arch arch;
-};
-
-enum {
- kvm_ioeventfd_flag_nr_datamatch,
- kvm_ioeventfd_flag_nr_pio,
- kvm_ioeventfd_flag_nr_deassign,
- kvm_ioeventfd_flag_nr_virtio_ccw_notify,
- kvm_ioeventfd_flag_nr_fast_mmio,
- kvm_ioeventfd_flag_nr_max,
-};
-
-#define KVM_IOEVENTFD_FLAG_DATAMATCH (1 << kvm_ioeventfd_flag_nr_datamatch)
-#define KVM_IOEVENTFD_FLAG_PIO (1 << kvm_ioeventfd_flag_nr_pio)
-#define KVM_IOEVENTFD_FLAG_DEASSIGN (1 << kvm_ioeventfd_flag_nr_deassign)
-#define KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY \
- (1 << kvm_ioeventfd_flag_nr_virtio_ccw_notify)
-
-#define KVM_IOEVENTFD_VALID_FLAG_MASK ((1 << kvm_ioeventfd_flag_nr_max) - 1)
-
-struct kvm_ioeventfd {
- __u64 datamatch;
- __u64 addr; /* legal pio/mmio address */
- __u32 len; /* 1, 2, 4, or 8 bytes; or 0 to ignore length */
- __s32 fd;
- __u32 flags;
- __u8 pad[36];
-};
-
-#define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0)
-#define KVM_X86_DISABLE_EXITS_HLT (1 << 1)
-#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
-#define KVM_X86_DISABLE_EXITS_CSTATE (1 << 3)
-#define KVM_X86_DISABLE_EXITS_APERFMPERF (1 << 4)
-
-/* for KVM_ENABLE_CAP */
-struct kvm_enable_cap {
- /* in */
- __u32 cap;
- __u32 flags;
- __u64 args[4];
- __u8 pad[64];
-};
-
-#define KVMIO 0xAE
-
-/* machine type bits, to be used as argument to KVM_CREATE_VM */
-#define KVM_VM_S390_UCONTROL 1
-
-/* on ppc, 0 indicate default, 1 should force HV and 2 PR */
-#define KVM_VM_PPC_HV 1
-#define KVM_VM_PPC_PR 2
-
-/* on MIPS, 0 indicates auto, 1 forces VZ ASE, 2 forces trap & emulate */
-#define KVM_VM_MIPS_AUTO 0
-#define KVM_VM_MIPS_VZ 1
-#define KVM_VM_MIPS_TE 2
-
-#define KVM_S390_SIE_PAGE_OFFSET 1
-
-/*
- * On arm64, machine type can be used to request the physical
- * address size for the VM. Bits[7-0] are reserved for the guest
- * PA size shift (i.e, log2(PA_Size)). For backward compatibility,
- * value 0 implies the default IPA size, 40bits.
- */
-#define KVM_VM_TYPE_ARM_IPA_SIZE_MASK 0xffULL
-#define KVM_VM_TYPE_ARM_IPA_SIZE(x) \
- ((x) & KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
-/*
- * ioctls for /dev/kvm fds:
- */
-#define KVM_GET_API_VERSION _IO(KVMIO, 0x00)
-#define KVM_CREATE_VM _IO(KVMIO, 0x01) /* returns a VM fd */
-#define KVM_GET_MSR_INDEX_LIST _IOWR(KVMIO, 0x02, struct kvm_msr_list)
-
-#define KVM_S390_ENABLE_SIE _IO(KVMIO, 0x06)
-/*
- * Check if a kvm extension is available. Argument is extension number,
- * return is 1 (yes) or 0 (no, sorry).
- */
-#define KVM_CHECK_EXTENSION _IO(KVMIO, 0x03)
-/*
- * Get size for mmap(vcpu_fd)
- */
-#define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */
-#define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2)
-#define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2)
-#define KVM_GET_MSR_FEATURE_INDEX_LIST _IOWR(KVMIO, 0x0a, struct kvm_msr_list)
-
-/*
- * Extension capability list.
- */
-#define KVM_CAP_IRQCHIP 0
-#define KVM_CAP_HLT 1
-#define KVM_CAP_MMU_SHADOW_CACHE_CONTROL 2
-#define KVM_CAP_USER_MEMORY 3
-#define KVM_CAP_SET_TSS_ADDR 4
-#define KVM_CAP_VAPIC 6
-#define KVM_CAP_EXT_CPUID 7
-#define KVM_CAP_CLOCKSOURCE 8
-#define KVM_CAP_NR_VCPUS 9 /* returns recommended max vcpus per vm */
-#define KVM_CAP_NR_MEMSLOTS 10 /* returns max memory slots per vm */
-#define KVM_CAP_PIT 11
-#define KVM_CAP_NOP_IO_DELAY 12
-#define KVM_CAP_PV_MMU 13
-#define KVM_CAP_MP_STATE 14
-#define KVM_CAP_COALESCED_MMIO 15
-#define KVM_CAP_SYNC_MMU 16 /* Changes to host mmap are reflected in guest */
-#define KVM_CAP_IOMMU 18
-/* Bug in KVM_SET_USER_MEMORY_REGION fixed: */
-#define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21
-#define KVM_CAP_USER_NMI 22
-#define KVM_CAP_SET_GUEST_DEBUG 23
-#ifdef __KVM_HAVE_PIT
-#define KVM_CAP_REINJECT_CONTROL 24
-#endif
-#define KVM_CAP_IRQ_ROUTING 25
-#define KVM_CAP_IRQ_INJECT_STATUS 26
-#define KVM_CAP_ASSIGN_DEV_IRQ 29
-/* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */
-#define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30
-#ifdef __KVM_HAVE_MCE
-#define KVM_CAP_MCE 31
-#endif
-#define KVM_CAP_IRQFD 32
-#ifdef __KVM_HAVE_PIT
-#define KVM_CAP_PIT2 33
-#endif
-#define KVM_CAP_SET_BOOT_CPU_ID 34
-#ifdef __KVM_HAVE_PIT_STATE2
-#define KVM_CAP_PIT_STATE2 35
-#endif
-#define KVM_CAP_IOEVENTFD 36
-#define KVM_CAP_SET_IDENTITY_MAP_ADDR 37
-#ifdef __KVM_HAVE_XEN_HVM
-#define KVM_CAP_XEN_HVM 38
-#endif
-#define KVM_CAP_ADJUST_CLOCK 39
-#define KVM_CAP_INTERNAL_ERROR_DATA 40
-#ifdef __KVM_HAVE_VCPU_EVENTS
-#define KVM_CAP_VCPU_EVENTS 41
-#endif
-#define KVM_CAP_S390_PSW 42
-#define KVM_CAP_PPC_SEGSTATE 43
-#define KVM_CAP_HYPERV 44
-#define KVM_CAP_HYPERV_VAPIC 45
-#define KVM_CAP_HYPERV_SPIN 46
-#define KVM_CAP_PCI_SEGMENT 47
-#define KVM_CAP_PPC_PAIRED_SINGLES 48
-#define KVM_CAP_INTR_SHADOW 49
-#ifdef __KVM_HAVE_DEBUGREGS
-#define KVM_CAP_DEBUGREGS 50
-#endif
-#define KVM_CAP_X86_ROBUST_SINGLESTEP 51
-#define KVM_CAP_PPC_OSI 52
-#define KVM_CAP_PPC_UNSET_IRQ 53
-#define KVM_CAP_ENABLE_CAP 54
-#ifdef __KVM_HAVE_XSAVE
-#define KVM_CAP_XSAVE 55
-#endif
-#ifdef __KVM_HAVE_XCRS
-#define KVM_CAP_XCRS 56
-#endif
-#define KVM_CAP_PPC_GET_PVINFO 57
-#define KVM_CAP_PPC_IRQ_LEVEL 58
-#define KVM_CAP_ASYNC_PF 59
-#define KVM_CAP_TSC_CONTROL 60
-#define KVM_CAP_GET_TSC_KHZ 61
-#define KVM_CAP_PPC_BOOKE_SREGS 62
-#define KVM_CAP_SPAPR_TCE 63
-#define KVM_CAP_PPC_SMT 64
-#define KVM_CAP_PPC_RMA 65
-#define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */
-#define KVM_CAP_PPC_HIOR 67
-#define KVM_CAP_PPC_PAPR 68
-#define KVM_CAP_SW_TLB 69
-#define KVM_CAP_ONE_REG 70
-#define KVM_CAP_S390_GMAP 71
-#define KVM_CAP_TSC_DEADLINE_TIMER 72
-#define KVM_CAP_S390_UCONTROL 73
-#define KVM_CAP_SYNC_REGS 74
-#define KVM_CAP_PCI_2_3 75
-#define KVM_CAP_KVMCLOCK_CTRL 76
-#define KVM_CAP_SIGNAL_MSI 77
-#define KVM_CAP_PPC_GET_SMMU_INFO 78
-#define KVM_CAP_S390_COW 79
-#define KVM_CAP_PPC_ALLOC_HTAB 80
-#define KVM_CAP_READONLY_MEM 81
-#define KVM_CAP_IRQFD_RESAMPLE 82
-#define KVM_CAP_PPC_BOOKE_WATCHDOG 83
-#define KVM_CAP_PPC_HTAB_FD 84
-#define KVM_CAP_S390_CSS_SUPPORT 85
-#define KVM_CAP_PPC_EPR 86
-#define KVM_CAP_ARM_PSCI 87
-#define KVM_CAP_ARM_SET_DEVICE_ADDR 88
-#define KVM_CAP_DEVICE_CTRL 89
-#define KVM_CAP_IRQ_MPIC 90
-#define KVM_CAP_PPC_RTAS 91
-#define KVM_CAP_IRQ_XICS 92
-#define KVM_CAP_ARM_EL1_32BIT 93
-#define KVM_CAP_SPAPR_MULTITCE 94
-#define KVM_CAP_EXT_EMUL_CPUID 95
-#define KVM_CAP_HYPERV_TIME 96
-#define KVM_CAP_IOAPIC_POLARITY_IGNORED 97
-#define KVM_CAP_ENABLE_CAP_VM 98
-#define KVM_CAP_S390_IRQCHIP 99
-#define KVM_CAP_IOEVENTFD_NO_LENGTH 100
-#define KVM_CAP_VM_ATTRIBUTES 101
-#define KVM_CAP_ARM_PSCI_0_2 102
-#define KVM_CAP_PPC_FIXUP_HCALL 103
-#define KVM_CAP_PPC_ENABLE_HCALL 104
-#define KVM_CAP_CHECK_EXTENSION_VM 105
-#define KVM_CAP_S390_USER_SIGP 106
-#define KVM_CAP_S390_VECTOR_REGISTERS 107
-#define KVM_CAP_S390_MEM_OP 108
-#define KVM_CAP_S390_USER_STSI 109
-#define KVM_CAP_S390_SKEYS 110
-#define KVM_CAP_MIPS_FPU 111
-#define KVM_CAP_MIPS_MSA 112
-#define KVM_CAP_S390_INJECT_IRQ 113
-#define KVM_CAP_S390_IRQ_STATE 114
-#define KVM_CAP_PPC_HWRNG 115
-#define KVM_CAP_DISABLE_QUIRKS 116
-#define KVM_CAP_X86_SMM 117
-#define KVM_CAP_MULTI_ADDRESS_SPACE 118
-#define KVM_CAP_GUEST_DEBUG_HW_BPS 119
-#define KVM_CAP_GUEST_DEBUG_HW_WPS 120
-#define KVM_CAP_SPLIT_IRQCHIP 121
-#define KVM_CAP_IOEVENTFD_ANY_LENGTH 122
-#define KVM_CAP_HYPERV_SYNIC 123
-#define KVM_CAP_S390_RI 124
-#define KVM_CAP_SPAPR_TCE_64 125
-#define KVM_CAP_ARM_PMU_V3 126
-#define KVM_CAP_VCPU_ATTRIBUTES 127
-#define KVM_CAP_MAX_VCPU_ID 128
-#define KVM_CAP_X2APIC_API 129
-#define KVM_CAP_S390_USER_INSTR0 130
-#define KVM_CAP_MSI_DEVID 131
-#define KVM_CAP_PPC_HTM 132
-#define KVM_CAP_SPAPR_RESIZE_HPT 133
-#define KVM_CAP_PPC_MMU_RADIX 134
-#define KVM_CAP_PPC_MMU_HASH_V3 135
-#define KVM_CAP_IMMEDIATE_EXIT 136
-#define KVM_CAP_MIPS_VZ 137
-#define KVM_CAP_MIPS_TE 138
-#define KVM_CAP_MIPS_64BIT 139
-#define KVM_CAP_S390_GS 140
-#define KVM_CAP_S390_AIS 141
-#define KVM_CAP_SPAPR_TCE_VFIO 142
-#define KVM_CAP_X86_DISABLE_EXITS 143
-#define KVM_CAP_ARM_USER_IRQ 144
-#define KVM_CAP_S390_CMMA_MIGRATION 145
-#define KVM_CAP_PPC_FWNMI 146
-#define KVM_CAP_PPC_SMT_POSSIBLE 147
-#define KVM_CAP_HYPERV_SYNIC2 148
-#define KVM_CAP_HYPERV_VP_INDEX 149
-#define KVM_CAP_S390_AIS_MIGRATION 150
-#define KVM_CAP_PPC_GET_CPU_CHAR 151
-#define KVM_CAP_S390_BPB 152
-#define KVM_CAP_GET_MSR_FEATURES 153
-#define KVM_CAP_HYPERV_EVENTFD 154
-#define KVM_CAP_HYPERV_TLBFLUSH 155
-#define KVM_CAP_S390_HPAGE_1M 156
-#define KVM_CAP_NESTED_STATE 157
-#define KVM_CAP_ARM_INJECT_SERROR_ESR 158
-#define KVM_CAP_MSR_PLATFORM_INFO 159
-#define KVM_CAP_PPC_NESTED_HV 160
-#define KVM_CAP_HYPERV_SEND_IPI 161
-#define KVM_CAP_COALESCED_PIO 162
-#define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163
-#define KVM_CAP_EXCEPTION_PAYLOAD 164
-#define KVM_CAP_ARM_VM_IPA_SIZE 165
-#define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT 166 /* Obsolete */
-#define KVM_CAP_HYPERV_CPUID 167
-#define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 168
-#define KVM_CAP_PPC_IRQ_XIVE 169
-#define KVM_CAP_ARM_SVE 170
-#define KVM_CAP_ARM_PTRAUTH_ADDRESS 171
-#define KVM_CAP_ARM_PTRAUTH_GENERIC 172
-#define KVM_CAP_PMU_EVENT_FILTER 173
-#define KVM_CAP_ARM_IRQ_LINE_LAYOUT_2 174
-#define KVM_CAP_HYPERV_DIRECT_TLBFLUSH 175
-#define KVM_CAP_PPC_GUEST_DEBUG_SSTEP 176
-#define KVM_CAP_ARM_NISV_TO_USER 177
-#define KVM_CAP_ARM_INJECT_EXT_DABT 178
-#define KVM_CAP_S390_VCPU_RESETS 179
-#define KVM_CAP_S390_PROTECTED 180
-#define KVM_CAP_PPC_SECURE_GUEST 181
-#define KVM_CAP_HALT_POLL 182
-#define KVM_CAP_ASYNC_PF_INT 183
-#define KVM_CAP_LAST_CPU 184
-#define KVM_CAP_SMALLER_MAXPHYADDR 185
-#define KVM_CAP_S390_DIAG318 186
-#define KVM_CAP_STEAL_TIME 187
-#define KVM_CAP_X86_USER_SPACE_MSR 188
-#define KVM_CAP_X86_MSR_FILTER 189
-#define KVM_CAP_ENFORCE_PV_FEATURE_CPUID 190
-#define KVM_CAP_SYS_HYPERV_CPUID 191
-#define KVM_CAP_DIRTY_LOG_RING 192
-#define KVM_CAP_X86_BUS_LOCK_EXIT 193
-#define KVM_CAP_PPC_DAWR1 194
-#define KVM_CAP_SET_GUEST_DEBUG2 195
-#define KVM_CAP_SGX_ATTRIBUTE 196
-#define KVM_CAP_VM_COPY_ENC_CONTEXT_FROM 197
-#define KVM_CAP_PTP_KVM 198
-#define KVM_CAP_HYPERV_ENFORCE_CPUID 199
-#define KVM_CAP_SREGS2 200
-#define KVM_CAP_EXIT_HYPERCALL 201
-#define KVM_CAP_PPC_RPT_INVALIDATE 202
-#define KVM_CAP_BINARY_STATS_FD 203
-#define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204
-#define KVM_CAP_ARM_MTE 205
-#define KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM 206
-#define KVM_CAP_VM_GPA_BITS 207
-#define KVM_CAP_XSAVE2 208
-#define KVM_CAP_SYS_ATTRIBUTES 209
-#define KVM_CAP_PPC_AIL_MODE_3 210
-#define KVM_CAP_S390_MEM_OP_EXTENSION 211
-#define KVM_CAP_PMU_CAPABILITY 212
-#define KVM_CAP_DISABLE_QUIRKS2 213
-#define KVM_CAP_VM_TSC_CONTROL 214
-#define KVM_CAP_SYSTEM_EVENT_DATA 215
-#define KVM_CAP_ARM_SYSTEM_SUSPEND 216
-#define KVM_CAP_S390_PROTECTED_DUMP 217
-#define KVM_CAP_X86_TRIPLE_FAULT_EVENT 218
-#define KVM_CAP_X86_NOTIFY_VMEXIT 219
-#define KVM_CAP_VM_DISABLE_NX_HUGE_PAGES 220
-#define KVM_CAP_S390_ZPCI_OP 221
-#define KVM_CAP_S390_CPU_TOPOLOGY 222
-#define KVM_CAP_DIRTY_LOG_RING_ACQ_REL 223
-#define KVM_CAP_S390_PROTECTED_ASYNC_DISABLE 224
-#define KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP 225
-#define KVM_CAP_PMU_EVENT_MASKED_EVENTS 226
-#define KVM_CAP_COUNTER_OFFSET 227
-#define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228
-#define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229
-#define KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES 230
-#define KVM_CAP_USER_MEMORY2 231
-#define KVM_CAP_MEMORY_FAULT_INFO 232
-#define KVM_CAP_MEMORY_ATTRIBUTES 233
-#define KVM_CAP_GUEST_MEMFD 234
-#define KVM_CAP_VM_TYPES 235
-#define KVM_CAP_PRE_FAULT_MEMORY 236
-#define KVM_CAP_X86_APIC_BUS_CYCLES_NS 237
-#define KVM_CAP_X86_GUEST_MODE 238
-#define KVM_CAP_ARM_WRITABLE_IMP_ID_REGS 239
-#define KVM_CAP_ARM_EL2 240
-#define KVM_CAP_ARM_EL2_E2H0 241
-#define KVM_CAP_RISCV_MP_STATE_RESET 242
-#define KVM_CAP_ARM_CACHEABLE_PFNMAP_SUPPORTED 243
-#define KVM_CAP_GUEST_MEMFD_FLAGS 244
-#define KVM_CAP_ARM_SEA_TO_USER 245
-#define KVM_CAP_S390_USER_OPEREXEC 246
-#define KVM_CAP_S390_KEYOP 247
-
-struct kvm_irq_routing_irqchip {
- __u32 irqchip;
- __u32 pin;
-};
-
-struct kvm_irq_routing_msi {
- __u32 address_lo;
- __u32 address_hi;
- __u32 data;
- union {
- __u32 pad;
- __u32 devid;
- };
-};
-
-struct kvm_irq_routing_s390_adapter {
- __u64 ind_addr;
- __u64 summary_addr;
- __u64 ind_offset;
- __u32 summary_offset;
- __u32 adapter_id;
-};
-
-struct kvm_irq_routing_hv_sint {
- __u32 vcpu;
- __u32 sint;
-};
-
-struct kvm_irq_routing_xen_evtchn {
- __u32 port;
- __u32 vcpu;
- __u32 priority;
-};
-
-#define KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL ((__u32)(-1))
-
-/* gsi routing entry types */
-#define KVM_IRQ_ROUTING_IRQCHIP 1
-#define KVM_IRQ_ROUTING_MSI 2
-#define KVM_IRQ_ROUTING_S390_ADAPTER 3
-#define KVM_IRQ_ROUTING_HV_SINT 4
-#define KVM_IRQ_ROUTING_XEN_EVTCHN 5
-
-struct kvm_irq_routing_entry {
- __u32 gsi;
- __u32 type;
- __u32 flags;
- __u32 pad;
- union {
- struct kvm_irq_routing_irqchip irqchip;
- struct kvm_irq_routing_msi msi;
- struct kvm_irq_routing_s390_adapter adapter;
- struct kvm_irq_routing_hv_sint hv_sint;
- struct kvm_irq_routing_xen_evtchn xen_evtchn;
- __u32 pad[8];
- } u;
-};
-
-struct kvm_irq_routing {
- __u32 nr;
- __u32 flags;
- struct kvm_irq_routing_entry entries[];
-};
-
-#define KVM_IRQFD_FLAG_DEASSIGN (1 << 0)
-/*
- * Available with KVM_CAP_IRQFD_RESAMPLE
- *
- * KVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies
- * the irqfd to operate in resampling mode for level triggered interrupt
- * emulation. See Documentation/virt/kvm/api.rst.
- */
-#define KVM_IRQFD_FLAG_RESAMPLE (1 << 1)
-
-struct kvm_irqfd {
- __u32 fd;
- __u32 gsi;
- __u32 flags;
- __u32 resamplefd;
- __u8 pad[16];
-};
-
-/* For KVM_CAP_ADJUST_CLOCK */
-
-/* Do not use 1, KVM_CHECK_EXTENSION returned it before we had flags. */
-#define KVM_CLOCK_TSC_STABLE 2
-#define KVM_CLOCK_REALTIME (1 << 2)
-#define KVM_CLOCK_HOST_TSC (1 << 3)
-
-struct kvm_clock_data {
- __u64 clock;
- __u32 flags;
- __u32 pad0;
- __u64 realtime;
- __u64 host_tsc;
- __u32 pad[4];
-};
-
-/* For KVM_CAP_SW_TLB */
-
-#define KVM_MMU_FSL_BOOKE_NOHV 0
-#define KVM_MMU_FSL_BOOKE_HV 1
-
-struct kvm_config_tlb {
- __u64 params;
- __u64 array;
- __u32 mmu_type;
- __u32 array_len;
-};
-
-struct kvm_dirty_tlb {
- __u64 bitmap;
- __u32 num_dirty;
-};
-
-/* Available with KVM_CAP_ONE_REG */
-
-#define KVM_REG_ARCH_MASK 0xff00000000000000ULL
-#define KVM_REG_GENERIC 0x0000000000000000ULL
-
-/*
- * Architecture specific registers are to be defined in arch headers and
- * ORed with the arch identifier.
- */
-#define KVM_REG_PPC 0x1000000000000000ULL
-#define KVM_REG_X86 0x2000000000000000ULL
-#define KVM_REG_IA64 0x3000000000000000ULL
-#define KVM_REG_ARM 0x4000000000000000ULL
-#define KVM_REG_S390 0x5000000000000000ULL
-#define KVM_REG_ARM64 0x6000000000000000ULL
-#define KVM_REG_MIPS 0x7000000000000000ULL
-#define KVM_REG_RISCV 0x8000000000000000ULL
-#define KVM_REG_LOONGARCH 0x9000000000000000ULL
-
-#define KVM_REG_SIZE_SHIFT 52
-#define KVM_REG_SIZE_MASK 0x00f0000000000000ULL
-
-#define KVM_REG_SIZE(id) \
- (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
-
-#define KVM_REG_SIZE_U8 0x0000000000000000ULL
-#define KVM_REG_SIZE_U16 0x0010000000000000ULL
-#define KVM_REG_SIZE_U32 0x0020000000000000ULL
-#define KVM_REG_SIZE_U64 0x0030000000000000ULL
-#define KVM_REG_SIZE_U128 0x0040000000000000ULL
-#define KVM_REG_SIZE_U256 0x0050000000000000ULL
-#define KVM_REG_SIZE_U512 0x0060000000000000ULL
-#define KVM_REG_SIZE_U1024 0x0070000000000000ULL
-#define KVM_REG_SIZE_U2048 0x0080000000000000ULL
-
-struct kvm_reg_list {
- __u64 n; /* number of regs */
- __u64 reg[];
-};
-
-struct kvm_one_reg {
- __u64 id;
- __u64 addr;
-};
-
-#define KVM_MSI_VALID_DEVID (1U << 0)
-struct kvm_msi {
- __u32 address_lo;
- __u32 address_hi;
- __u32 data;
- __u32 flags;
- __u32 devid;
- __u8 pad[12];
-};
-
-struct kvm_arm_device_addr {
- __u64 id;
- __u64 addr;
-};
-
-/*
- * Device control API, available with KVM_CAP_DEVICE_CTRL
- */
-#define KVM_CREATE_DEVICE_TEST 1
-
-struct kvm_create_device {
- __u32 type; /* in: KVM_DEV_TYPE_xxx */
- __u32 fd; /* out: device handle */
- __u32 flags; /* in: KVM_CREATE_DEVICE_xxx */
-};
-
-struct kvm_device_attr {
- __u32 flags; /* no flags currently defined */
- __u32 group; /* device-defined */
- __u64 attr; /* group-defined */
- __u64 addr; /* userspace address of attr data */
-};
-
-#define KVM_DEV_VFIO_FILE 1
-
-#define KVM_DEV_VFIO_FILE_ADD 1
-#define KVM_DEV_VFIO_FILE_DEL 2
-
-/* KVM_DEV_VFIO_GROUP aliases are for compile time uapi compatibility */
-#define KVM_DEV_VFIO_GROUP KVM_DEV_VFIO_FILE
-
-#define KVM_DEV_VFIO_GROUP_ADD KVM_DEV_VFIO_FILE_ADD
-#define KVM_DEV_VFIO_GROUP_DEL KVM_DEV_VFIO_FILE_DEL
-#define KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE 3
-
-enum kvm_device_type {
- KVM_DEV_TYPE_FSL_MPIC_20 = 1,
-#define KVM_DEV_TYPE_FSL_MPIC_20 KVM_DEV_TYPE_FSL_MPIC_20
- KVM_DEV_TYPE_FSL_MPIC_42,
-#define KVM_DEV_TYPE_FSL_MPIC_42 KVM_DEV_TYPE_FSL_MPIC_42
- KVM_DEV_TYPE_XICS,
-#define KVM_DEV_TYPE_XICS KVM_DEV_TYPE_XICS
- KVM_DEV_TYPE_VFIO,
-#define KVM_DEV_TYPE_VFIO KVM_DEV_TYPE_VFIO
- KVM_DEV_TYPE_ARM_VGIC_V2,
-#define KVM_DEV_TYPE_ARM_VGIC_V2 KVM_DEV_TYPE_ARM_VGIC_V2
- KVM_DEV_TYPE_FLIC,
-#define KVM_DEV_TYPE_FLIC KVM_DEV_TYPE_FLIC
- KVM_DEV_TYPE_ARM_VGIC_V3,
-#define KVM_DEV_TYPE_ARM_VGIC_V3 KVM_DEV_TYPE_ARM_VGIC_V3
- KVM_DEV_TYPE_ARM_VGIC_ITS,
-#define KVM_DEV_TYPE_ARM_VGIC_ITS KVM_DEV_TYPE_ARM_VGIC_ITS
- KVM_DEV_TYPE_XIVE,
-#define KVM_DEV_TYPE_XIVE KVM_DEV_TYPE_XIVE
- KVM_DEV_TYPE_ARM_PV_TIME,
-#define KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_ARM_PV_TIME
- KVM_DEV_TYPE_RISCV_AIA,
-#define KVM_DEV_TYPE_RISCV_AIA KVM_DEV_TYPE_RISCV_AIA
- KVM_DEV_TYPE_LOONGARCH_IPI,
-#define KVM_DEV_TYPE_LOONGARCH_IPI KVM_DEV_TYPE_LOONGARCH_IPI
- KVM_DEV_TYPE_LOONGARCH_EIOINTC,
-#define KVM_DEV_TYPE_LOONGARCH_EIOINTC KVM_DEV_TYPE_LOONGARCH_EIOINTC
- KVM_DEV_TYPE_LOONGARCH_PCHPIC,
-#define KVM_DEV_TYPE_LOONGARCH_PCHPIC KVM_DEV_TYPE_LOONGARCH_PCHPIC
-
- KVM_DEV_TYPE_MAX,
-
-};
-
-struct kvm_vfio_spapr_tce {
- __s32 groupfd;
- __s32 tablefd;
-};
-
-#define KVM_S390_KEYOP_ISKE 0x01
-#define KVM_S390_KEYOP_RRBE 0x02
-#define KVM_S390_KEYOP_SSKE 0x03
-struct kvm_s390_keyop {
- __u64 guest_addr;
- __u8 key;
- __u8 operation;
- __u8 pad[6];
-};
-
-/*
- * KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns
- * a vcpu fd.
- */
-#define KVM_CREATE_VCPU _IO(KVMIO, 0x41)
-#define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log)
-#define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44)
-#define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45) /* deprecated */
-#define KVM_SET_USER_MEMORY_REGION _IOW(KVMIO, 0x46, \
- struct kvm_userspace_memory_region)
-#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
-#define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64)
-#define KVM_SET_USER_MEMORY_REGION2 _IOW(KVMIO, 0x49, \
- struct kvm_userspace_memory_region2)
-
-/* enable ucontrol for s390 */
-#define KVM_S390_UCAS_MAP _IOW(KVMIO, 0x50, struct kvm_s390_ucas_mapping)
-#define KVM_S390_UCAS_UNMAP _IOW(KVMIO, 0x51, struct kvm_s390_ucas_mapping)
-#define KVM_S390_VCPU_FAULT _IOW(KVMIO, 0x52, unsigned long)
-#define KVM_S390_KEYOP _IOWR(KVMIO, 0x53, struct kvm_s390_keyop)
-
-/* Device model IOC */
-#define KVM_CREATE_IRQCHIP _IO(KVMIO, 0x60)
-#define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level)
-#define KVM_GET_IRQCHIP _IOWR(KVMIO, 0x62, struct kvm_irqchip)
-#define KVM_SET_IRQCHIP _IOR(KVMIO, 0x63, struct kvm_irqchip)
-#define KVM_CREATE_PIT _IO(KVMIO, 0x64)
-#define KVM_GET_PIT _IOWR(KVMIO, 0x65, struct kvm_pit_state)
-#define KVM_SET_PIT _IOR(KVMIO, 0x66, struct kvm_pit_state)
-#define KVM_IRQ_LINE_STATUS _IOWR(KVMIO, 0x67, struct kvm_irq_level)
-#define KVM_REGISTER_COALESCED_MMIO \
- _IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone)
-#define KVM_UNREGISTER_COALESCED_MMIO \
- _IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone)
-#define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing)
-#define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71)
-#define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd)
-#define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config)
-#define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78)
-#define KVM_IOEVENTFD _IOW(KVMIO, 0x79, struct kvm_ioeventfd)
-#define KVM_XEN_HVM_CONFIG _IOW(KVMIO, 0x7a, struct kvm_xen_hvm_config)
-#define KVM_SET_CLOCK _IOW(KVMIO, 0x7b, struct kvm_clock_data)
-#define KVM_GET_CLOCK _IOR(KVMIO, 0x7c, struct kvm_clock_data)
-/* Available with KVM_CAP_PIT_STATE2 */
-#define KVM_GET_PIT2 _IOR(KVMIO, 0x9f, struct kvm_pit_state2)
-#define KVM_SET_PIT2 _IOW(KVMIO, 0xa0, struct kvm_pit_state2)
-/* Available with KVM_CAP_PPC_GET_PVINFO */
-#define KVM_PPC_GET_PVINFO _IOW(KVMIO, 0xa1, struct kvm_ppc_pvinfo)
-/* Available with KVM_CAP_TSC_CONTROL for a vCPU, or with
-* KVM_CAP_VM_TSC_CONTROL to set defaults for a VM */
-#define KVM_SET_TSC_KHZ _IO(KVMIO, 0xa2)
-#define KVM_GET_TSC_KHZ _IO(KVMIO, 0xa3)
-/* Available with KVM_CAP_SIGNAL_MSI */
-#define KVM_SIGNAL_MSI _IOW(KVMIO, 0xa5, struct kvm_msi)
-/* Available with KVM_CAP_PPC_GET_SMMU_INFO */
-#define KVM_PPC_GET_SMMU_INFO _IOR(KVMIO, 0xa6, struct kvm_ppc_smmu_info)
-/* Available with KVM_CAP_PPC_ALLOC_HTAB */
-#define KVM_PPC_ALLOCATE_HTAB _IOWR(KVMIO, 0xa7, __u32)
-#define KVM_CREATE_SPAPR_TCE _IOW(KVMIO, 0xa8, struct kvm_create_spapr_tce)
-#define KVM_CREATE_SPAPR_TCE_64 _IOW(KVMIO, 0xa8, \
- struct kvm_create_spapr_tce_64)
-/* Available with KVM_CAP_RMA */
-#define KVM_ALLOCATE_RMA _IOR(KVMIO, 0xa9, struct kvm_allocate_rma)
-/* Available with KVM_CAP_PPC_HTAB_FD */
-#define KVM_PPC_GET_HTAB_FD _IOW(KVMIO, 0xaa, struct kvm_get_htab_fd)
-/* Available with KVM_CAP_ARM_SET_DEVICE_ADDR */
-#define KVM_ARM_SET_DEVICE_ADDR _IOW(KVMIO, 0xab, struct kvm_arm_device_addr)
-/* Available with KVM_CAP_PPC_RTAS */
-#define KVM_PPC_RTAS_DEFINE_TOKEN _IOW(KVMIO, 0xac, struct kvm_rtas_token_args)
-/* Available with KVM_CAP_SPAPR_RESIZE_HPT */
-#define KVM_PPC_RESIZE_HPT_PREPARE _IOR(KVMIO, 0xad, struct kvm_ppc_resize_hpt)
-#define KVM_PPC_RESIZE_HPT_COMMIT _IOR(KVMIO, 0xae, struct kvm_ppc_resize_hpt)
-/* Available with KVM_CAP_PPC_MMU_RADIX or KVM_CAP_PPC_MMU_HASH_V3 */
-#define KVM_PPC_CONFIGURE_V3_MMU _IOW(KVMIO, 0xaf, struct kvm_ppc_mmuv3_cfg)
-/* Available with KVM_CAP_PPC_MMU_RADIX */
-#define KVM_PPC_GET_RMMU_INFO _IOW(KVMIO, 0xb0, struct kvm_ppc_rmmu_info)
-/* Available with KVM_CAP_PPC_GET_CPU_CHAR */
-#define KVM_PPC_GET_CPU_CHAR _IOR(KVMIO, 0xb1, struct kvm_ppc_cpu_char)
-/* Available with KVM_CAP_PMU_EVENT_FILTER */
-#define KVM_SET_PMU_EVENT_FILTER _IOW(KVMIO, 0xb2, struct kvm_pmu_event_filter)
-#define KVM_PPC_SVM_OFF _IO(KVMIO, 0xb3)
-#define KVM_ARM_MTE_COPY_TAGS _IOR(KVMIO, 0xb4, struct kvm_arm_copy_mte_tags)
-/* Available with KVM_CAP_COUNTER_OFFSET */
-#define KVM_ARM_SET_COUNTER_OFFSET _IOW(KVMIO, 0xb5, struct kvm_arm_counter_offset)
-#define KVM_ARM_GET_REG_WRITABLE_MASKS _IOR(KVMIO, 0xb6, struct reg_mask_range)
-
-/* ioctl for vm fd */
-#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)
-
-/* ioctls for fds returned by KVM_CREATE_DEVICE */
-#define KVM_SET_DEVICE_ATTR _IOW(KVMIO, 0xe1, struct kvm_device_attr)
-#define KVM_GET_DEVICE_ATTR _IOW(KVMIO, 0xe2, struct kvm_device_attr)
-#define KVM_HAS_DEVICE_ATTR _IOW(KVMIO, 0xe3, struct kvm_device_attr)
-
-/*
- * ioctls for vcpu fds
- */
-#define KVM_RUN _IO(KVMIO, 0x80)
-#define KVM_GET_REGS _IOR(KVMIO, 0x81, struct kvm_regs)
-#define KVM_SET_REGS _IOW(KVMIO, 0x82, struct kvm_regs)
-#define KVM_GET_SREGS _IOR(KVMIO, 0x83, struct kvm_sregs)
-#define KVM_SET_SREGS _IOW(KVMIO, 0x84, struct kvm_sregs)
-#define KVM_TRANSLATE _IOWR(KVMIO, 0x85, struct kvm_translation)
-#define KVM_INTERRUPT _IOW(KVMIO, 0x86, struct kvm_interrupt)
-#define KVM_GET_MSRS _IOWR(KVMIO, 0x88, struct kvm_msrs)
-#define KVM_SET_MSRS _IOW(KVMIO, 0x89, struct kvm_msrs)
-#define KVM_SET_CPUID _IOW(KVMIO, 0x8a, struct kvm_cpuid)
-#define KVM_SET_SIGNAL_MASK _IOW(KVMIO, 0x8b, struct kvm_signal_mask)
-#define KVM_GET_FPU _IOR(KVMIO, 0x8c, struct kvm_fpu)
-#define KVM_SET_FPU _IOW(KVMIO, 0x8d, struct kvm_fpu)
-#define KVM_GET_LAPIC _IOR(KVMIO, 0x8e, struct kvm_lapic_state)
-#define KVM_SET_LAPIC _IOW(KVMIO, 0x8f, struct kvm_lapic_state)
-#define KVM_SET_CPUID2 _IOW(KVMIO, 0x90, struct kvm_cpuid2)
-#define KVM_GET_CPUID2 _IOWR(KVMIO, 0x91, struct kvm_cpuid2)
-/* Available with KVM_CAP_VAPIC */
-#define KVM_TPR_ACCESS_REPORTING _IOWR(KVMIO, 0x92, struct kvm_tpr_access_ctl)
-/* Available with KVM_CAP_VAPIC */
-#define KVM_SET_VAPIC_ADDR _IOW(KVMIO, 0x93, struct kvm_vapic_addr)
-/* valid for virtual machine (for floating interrupt)_and_ vcpu */
-#define KVM_S390_INTERRUPT _IOW(KVMIO, 0x94, struct kvm_s390_interrupt)
-/* store status for s390 */
-#define KVM_S390_STORE_STATUS_NOADDR (-1ul)
-#define KVM_S390_STORE_STATUS_PREFIXED (-2ul)
-#define KVM_S390_STORE_STATUS _IOW(KVMIO, 0x95, unsigned long)
-/* initial ipl psw for s390 */
-#define KVM_S390_SET_INITIAL_PSW _IOW(KVMIO, 0x96, struct kvm_s390_psw)
-/* initial reset for s390 */
-#define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97)
-#define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state)
-#define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state)
-/* Available with KVM_CAP_USER_NMI */
-#define KVM_NMI _IO(KVMIO, 0x9a)
-/* Available with KVM_CAP_SET_GUEST_DEBUG */
-#define KVM_SET_GUEST_DEBUG _IOW(KVMIO, 0x9b, struct kvm_guest_debug)
-/* MCE for x86 */
-#define KVM_X86_SETUP_MCE _IOW(KVMIO, 0x9c, __u64)
-#define KVM_X86_GET_MCE_CAP_SUPPORTED _IOR(KVMIO, 0x9d, __u64)
-#define KVM_X86_SET_MCE _IOW(KVMIO, 0x9e, struct kvm_x86_mce)
-/* Available with KVM_CAP_VCPU_EVENTS */
-#define KVM_GET_VCPU_EVENTS _IOR(KVMIO, 0x9f, struct kvm_vcpu_events)
-#define KVM_SET_VCPU_EVENTS _IOW(KVMIO, 0xa0, struct kvm_vcpu_events)
-/* Available with KVM_CAP_DEBUGREGS */
-#define KVM_GET_DEBUGREGS _IOR(KVMIO, 0xa1, struct kvm_debugregs)
-#define KVM_SET_DEBUGREGS _IOW(KVMIO, 0xa2, struct kvm_debugregs)
-/*
- * vcpu version available with KVM_CAP_ENABLE_CAP
- * vm version available with KVM_CAP_ENABLE_CAP_VM
- */
-#define KVM_ENABLE_CAP _IOW(KVMIO, 0xa3, struct kvm_enable_cap)
-/* Available with KVM_CAP_XSAVE */
-#define KVM_GET_XSAVE _IOR(KVMIO, 0xa4, struct kvm_xsave)
-#define KVM_SET_XSAVE _IOW(KVMIO, 0xa5, struct kvm_xsave)
-/* Available with KVM_CAP_XCRS */
-#define KVM_GET_XCRS _IOR(KVMIO, 0xa6, struct kvm_xcrs)
-#define KVM_SET_XCRS _IOW(KVMIO, 0xa7, struct kvm_xcrs)
-/* Available with KVM_CAP_SW_TLB */
-#define KVM_DIRTY_TLB _IOW(KVMIO, 0xaa, struct kvm_dirty_tlb)
-/* Available with KVM_CAP_ONE_REG */
-#define KVM_GET_ONE_REG _IOW(KVMIO, 0xab, struct kvm_one_reg)
-#define KVM_SET_ONE_REG _IOW(KVMIO, 0xac, struct kvm_one_reg)
-/* VM is being stopped by host */
-#define KVM_KVMCLOCK_CTRL _IO(KVMIO, 0xad)
-#define KVM_ARM_VCPU_INIT _IOW(KVMIO, 0xae, struct kvm_vcpu_init)
-#define KVM_ARM_PREFERRED_TARGET _IOR(KVMIO, 0xaf, struct kvm_vcpu_init)
-#define KVM_GET_REG_LIST _IOWR(KVMIO, 0xb0, struct kvm_reg_list)
-/* Available with KVM_CAP_S390_MEM_OP */
-#define KVM_S390_MEM_OP _IOW(KVMIO, 0xb1, struct kvm_s390_mem_op)
-/* Available with KVM_CAP_S390_SKEYS */
-#define KVM_S390_GET_SKEYS _IOW(KVMIO, 0xb2, struct kvm_s390_skeys)
-#define KVM_S390_SET_SKEYS _IOW(KVMIO, 0xb3, struct kvm_s390_skeys)
-/* Available with KVM_CAP_S390_INJECT_IRQ */
-#define KVM_S390_IRQ _IOW(KVMIO, 0xb4, struct kvm_s390_irq)
-/* Available with KVM_CAP_S390_IRQ_STATE */
-#define KVM_S390_SET_IRQ_STATE _IOW(KVMIO, 0xb5, struct kvm_s390_irq_state)
-#define KVM_S390_GET_IRQ_STATE _IOW(KVMIO, 0xb6, struct kvm_s390_irq_state)
-/* Available with KVM_CAP_X86_SMM */
-#define KVM_SMI _IO(KVMIO, 0xb7)
-/* Available with KVM_CAP_S390_CMMA_MIGRATION */
-#define KVM_S390_GET_CMMA_BITS _IOWR(KVMIO, 0xb8, struct kvm_s390_cmma_log)
-#define KVM_S390_SET_CMMA_BITS _IOW(KVMIO, 0xb9, struct kvm_s390_cmma_log)
-/* Memory Encryption Commands */
-#define KVM_MEMORY_ENCRYPT_OP _IOWR(KVMIO, 0xba, unsigned long)
-
-struct kvm_enc_region {
- __u64 addr;
- __u64 size;
-};
-
-#define KVM_MEMORY_ENCRYPT_REG_REGION _IOR(KVMIO, 0xbb, struct kvm_enc_region)
-#define KVM_MEMORY_ENCRYPT_UNREG_REGION _IOR(KVMIO, 0xbc, struct kvm_enc_region)
-
-/* Available with KVM_CAP_HYPERV_EVENTFD */
-#define KVM_HYPERV_EVENTFD _IOW(KVMIO, 0xbd, struct kvm_hyperv_eventfd)
-
-/* Available with KVM_CAP_NESTED_STATE */
-#define KVM_GET_NESTED_STATE _IOWR(KVMIO, 0xbe, struct kvm_nested_state)
-#define KVM_SET_NESTED_STATE _IOW(KVMIO, 0xbf, struct kvm_nested_state)
-
-/* Available with KVM_CAP_MANUAL_DIRTY_LOG_PROTECT_2 */
-#define KVM_CLEAR_DIRTY_LOG _IOWR(KVMIO, 0xc0, struct kvm_clear_dirty_log)
-
-/* Available with KVM_CAP_HYPERV_CPUID (vcpu) / KVM_CAP_SYS_HYPERV_CPUID (system) */
-#define KVM_GET_SUPPORTED_HV_CPUID _IOWR(KVMIO, 0xc1, struct kvm_cpuid2)
-
-/* Available with KVM_CAP_ARM_SVE */
-#define KVM_ARM_VCPU_FINALIZE _IOW(KVMIO, 0xc2, int)
-
-/* Available with KVM_CAP_S390_VCPU_RESETS */
-#define KVM_S390_NORMAL_RESET _IO(KVMIO, 0xc3)
-#define KVM_S390_CLEAR_RESET _IO(KVMIO, 0xc4)
-
-/* Available with KVM_CAP_S390_PROTECTED */
-#define KVM_S390_PV_COMMAND _IOWR(KVMIO, 0xc5, struct kvm_pv_cmd)
-
-/* Available with KVM_CAP_X86_MSR_FILTER */
-#define KVM_X86_SET_MSR_FILTER _IOW(KVMIO, 0xc6, struct kvm_msr_filter)
-
-/* Available with KVM_CAP_DIRTY_LOG_RING */
-#define KVM_RESET_DIRTY_RINGS _IO(KVMIO, 0xc7)
-
-/* Per-VM Xen attributes */
-#define KVM_XEN_HVM_GET_ATTR _IOWR(KVMIO, 0xc8, struct kvm_xen_hvm_attr)
-#define KVM_XEN_HVM_SET_ATTR _IOW(KVMIO, 0xc9, struct kvm_xen_hvm_attr)
-
-/* Per-vCPU Xen attributes */
-#define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr)
-#define KVM_XEN_VCPU_SET_ATTR _IOW(KVMIO, 0xcb, struct kvm_xen_vcpu_attr)
-
-/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
-#define KVM_XEN_HVM_EVTCHN_SEND _IOW(KVMIO, 0xd0, struct kvm_irq_routing_xen_evtchn)
-
-#define KVM_GET_SREGS2 _IOR(KVMIO, 0xcc, struct kvm_sregs2)
-#define KVM_SET_SREGS2 _IOW(KVMIO, 0xcd, struct kvm_sregs2)
-
-#define KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE (1 << 0)
-#define KVM_DIRTY_LOG_INITIALLY_SET (1 << 1)
-
-/*
- * Arch needs to define the macro after implementing the dirty ring
- * feature. KVM_DIRTY_LOG_PAGE_OFFSET should be defined as the
- * starting page offset of the dirty ring structures.
- */
-#ifndef KVM_DIRTY_LOG_PAGE_OFFSET
-#define KVM_DIRTY_LOG_PAGE_OFFSET 0
-#endif
-
-/*
- * KVM dirty GFN flags, defined as:
- *
- * |---------------+---------------+--------------|
- * | bit 1 (reset) | bit 0 (dirty) | Status |
- * |---------------+---------------+--------------|
- * | 0 | 0 | Invalid GFN |
- * | 0 | 1 | Dirty GFN |
- * | 1 | X | GFN to reset |
- * |---------------+---------------+--------------|
- *
- * Lifecycle of a dirty GFN goes like:
- *
- * dirtied harvested reset
- * 00 -----------> 01 -------------> 1X -------+
- * ^ |
- * | |
- * +------------------------------------------+
- *
- * The userspace program is only responsible for the 01->1X state
- * conversion after harvesting an entry. Also, it must not skip any
- * dirty bits, so that dirty bits are always harvested in sequence.
- */
-#define KVM_DIRTY_GFN_F_DIRTY _BITUL(0)
-#define KVM_DIRTY_GFN_F_RESET _BITUL(1)
-#define KVM_DIRTY_GFN_F_MASK 0x3
-
-/*
- * KVM dirty rings should be mapped at KVM_DIRTY_LOG_PAGE_OFFSET of
- * per-vcpu mmaped regions as an array of struct kvm_dirty_gfn. The
- * size of the gfn buffer is decided by the first argument when
- * enabling KVM_CAP_DIRTY_LOG_RING.
- */
-struct kvm_dirty_gfn {
- __u32 flags;
- __u32 slot;
- __u64 offset;
-};
-
-#define KVM_BUS_LOCK_DETECTION_OFF (1 << 0)
-#define KVM_BUS_LOCK_DETECTION_EXIT (1 << 1)
-
-#define KVM_PMU_CAP_DISABLE (1 << 0)
-
-/**
- * struct kvm_stats_header - Header of per vm/vcpu binary statistics data.
- * @flags: Some extra information for header, always 0 for now.
- * @name_size: The size in bytes of the memory which contains statistics
- * name string including trailing '\0'. The memory is allocated
- * at the send of statistics descriptor.
- * @num_desc: The number of statistics the vm or vcpu has.
- * @id_offset: The offset of the vm/vcpu stats' id string in the file pointed
- * by vm/vcpu stats fd.
- * @desc_offset: The offset of the vm/vcpu stats' descriptor block in the file
- * pointd by vm/vcpu stats fd.
- * @data_offset: The offset of the vm/vcpu stats' data block in the file
- * pointed by vm/vcpu stats fd.
- *
- * This is the header userspace needs to read from stats fd before any other
- * readings. It is used by userspace to discover all the information about the
- * vm/vcpu's binary statistics.
- * Userspace reads this header from the start of the vm/vcpu's stats fd.
- */
-struct kvm_stats_header {
- __u32 flags;
- __u32 name_size;
- __u32 num_desc;
- __u32 id_offset;
- __u32 desc_offset;
- __u32 data_offset;
-};
-
-#define KVM_STATS_TYPE_SHIFT 0
-#define KVM_STATS_TYPE_MASK (0xF << KVM_STATS_TYPE_SHIFT)
-#define KVM_STATS_TYPE_CUMULATIVE (0x0 << KVM_STATS_TYPE_SHIFT)
-#define KVM_STATS_TYPE_INSTANT (0x1 << KVM_STATS_TYPE_SHIFT)
-#define KVM_STATS_TYPE_PEAK (0x2 << KVM_STATS_TYPE_SHIFT)
-#define KVM_STATS_TYPE_LINEAR_HIST (0x3 << KVM_STATS_TYPE_SHIFT)
-#define KVM_STATS_TYPE_LOG_HIST (0x4 << KVM_STATS_TYPE_SHIFT)
-#define KVM_STATS_TYPE_MAX KVM_STATS_TYPE_LOG_HIST
-
-#define KVM_STATS_UNIT_SHIFT 4
-#define KVM_STATS_UNIT_MASK (0xF << KVM_STATS_UNIT_SHIFT)
-#define KVM_STATS_UNIT_NONE (0x0 << KVM_STATS_UNIT_SHIFT)
-#define KVM_STATS_UNIT_BYTES (0x1 << KVM_STATS_UNIT_SHIFT)
-#define KVM_STATS_UNIT_SECONDS (0x2 << KVM_STATS_UNIT_SHIFT)
-#define KVM_STATS_UNIT_CYCLES (0x3 << KVM_STATS_UNIT_SHIFT)
-#define KVM_STATS_UNIT_BOOLEAN (0x4 << KVM_STATS_UNIT_SHIFT)
-#define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_BOOLEAN
-
-#define KVM_STATS_BASE_SHIFT 8
-#define KVM_STATS_BASE_MASK (0xF << KVM_STATS_BASE_SHIFT)
-#define KVM_STATS_BASE_POW10 (0x0 << KVM_STATS_BASE_SHIFT)
-#define KVM_STATS_BASE_POW2 (0x1 << KVM_STATS_BASE_SHIFT)
-#define KVM_STATS_BASE_MAX KVM_STATS_BASE_POW2
-
-/**
- * struct kvm_stats_desc - Descriptor of a KVM statistics.
- * @flags: Annotations of the stats, like type, unit, etc.
- * @exponent: Used together with @flags to determine the unit.
- * @size: The number of data items for this stats.
- * Every data item is of type __u64.
- * @offset: The offset of the stats to the start of stat structure in
- * structure kvm or kvm_vcpu.
- * @bucket_size: A parameter value used for histogram stats. It is only used
- * for linear histogram stats, specifying the size of the bucket;
- * @name: The name string for the stats. Its size is indicated by the
- * &kvm_stats_header->name_size.
- */
-struct kvm_stats_desc {
- __u32 flags;
- __s16 exponent;
- __u16 size;
- __u32 offset;
- __u32 bucket_size;
-#ifdef __KERNEL__
- char name[KVM_STATS_NAME_SIZE];
-#else
- char name[];
-#endif
-};
-
-#define KVM_GET_STATS_FD _IO(KVMIO, 0xce)
-
-/* Available with KVM_CAP_XSAVE2 */
-#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave)
-
-/* Available with KVM_CAP_S390_PROTECTED_DUMP */
-#define KVM_S390_PV_CPU_COMMAND _IOWR(KVMIO, 0xd0, struct kvm_pv_cmd)
-
-/* Available with KVM_CAP_X86_NOTIFY_VMEXIT */
-#define KVM_X86_NOTIFY_VMEXIT_ENABLED (1ULL << 0)
-#define KVM_X86_NOTIFY_VMEXIT_USER (1ULL << 1)
-
-/* Available with KVM_CAP_S390_ZPCI_OP */
-#define KVM_S390_ZPCI_OP _IOW(KVMIO, 0xd1, struct kvm_s390_zpci_op)
-
-/* Available with KVM_CAP_MEMORY_ATTRIBUTES */
-#define KVM_SET_MEMORY_ATTRIBUTES _IOW(KVMIO, 0xd2, struct kvm_memory_attributes)
-
-struct kvm_memory_attributes {
- __u64 address;
- __u64 size;
- __u64 attributes;
- __u64 flags;
-};
-
-#define KVM_MEMORY_ATTRIBUTE_PRIVATE (1ULL << 3)
-
-#define KVM_CREATE_GUEST_MEMFD _IOWR(KVMIO, 0xd4, struct kvm_create_guest_memfd)
-#define GUEST_MEMFD_FLAG_MMAP (1ULL << 0)
-#define GUEST_MEMFD_FLAG_INIT_SHARED (1ULL << 1)
-
-struct kvm_create_guest_memfd {
- __u64 size;
- __u64 flags;
- __u64 reserved[6];
-};
-
-#define KVM_PRE_FAULT_MEMORY _IOWR(KVMIO, 0xd5, struct kvm_pre_fault_memory)
-
-struct kvm_pre_fault_memory {
- __u64 gpa;
- __u64 size;
- __u64 flags;
- __u64 padding[5];
-};
+#include <linux/kvm-generic.h>
#endif /* __LINUX_KVM_H */
--git a/scripts/Makefile.asm-headers b/scripts/Makefile.asm-headers
index 8a4856e74180..6482028624e4 100644
--- a/scripts/Makefile.asm-headers
+++ b/scripts/Makefile.asm-headers
@@ -46,14 +46,18 @@ generic-y += $(foreach f, $(mandatory-y), $(if $(wildcard $(src)/$(f)),,$(f)))
generic-y := $(addprefix $(obj)/, $(generic-y))
syscall-y := $(addprefix $(obj)/, $(syscall-y))
generated-y := $(addprefix $(obj)/, $(generated-y))
+shared-uapi-y := $(addprefix $(obj)/, $(shared-uapi-y))
# Remove stale wrappers when the corresponding files are removed from generic-y
old-headers := $(wildcard $(obj)/*.h)
-unwanted := $(filter-out $(generic-y) $(generated-y) $(syscall-y),$(old-headers))
+unwanted := $(filter-out $(generic-y) $(generated-y) $(syscall-y) $(shared-uapi-y),$(old-headers))
quiet_cmd_wrap = WRAP $@
cmd_wrap = echo "\#include <asm-generic/$*.h>" > $@
+quiet_cmd_share_hdr = SHARE $@
+ cmd_share_hdr = cp $< $@
+
quiet_cmd_remove = REMOVE $(unwanted)
cmd_remove = rm -f $(unwanted)
@@ -70,13 +74,19 @@ quiet_cmd_systbl = SYSTBL $@
--abis $(subst $(space),$(comma),$(strip $(syscall_abis_$*))) \
$< $@
-all: $(generic-y) $(syscall-y)
+all: $(generic-y) $(syscall-y) $(shared-uapi-y)
$(if $(unwanted),$(call cmd,remove))
@:
$(obj)/%.h: $(srctree)/$(generic)/%.h
$(call cmd,wrap)
+# Let architectures define architecture specific headers that can be shared
+# with other architectures to enable single-system cross architecture communication
+shared-uapi-dest := $(srctree)/include/uapi/arch/$(SRCARCH)/asm
+$(shared-uapi-y): $(obj)/%.h: $(shared-uapi-dest)/%.h FORCE
+ $(call if_changed,share_hdr)
+
$(obj)/unistd_%.h: $(syscalltbl) $(syshdr) FORCE
$(call if_changed,syshdr)
diff --git a/usr/include/Makefile b/usr/include/Makefile
index 6d86a53c6f0a..c080447f8126 100644
--- a/usr/include/Makefile
+++ b/usr/include/Makefile
@@ -24,6 +24,7 @@ no-header-test += linux/cyclades.h
no-header-test += linux/errqueue.h
no-header-test += linux/hdlc/ioctl.h
no-header-test += linux/ivtv.h
+no-header-test += linux/kvm-generic.h
no-header-test += linux/matroxfb.h
no-header-test += linux/omap3isp.h
no-header-test += linux/omapfb.h
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v1 05/27] arm64: Extract sysreg definitions
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (3 preceding siblings ...)
2026-04-02 4:21 ` [PATCH v1 04/27] arm64: Provide arm64 UAPI for other host architectures Steffen Eiden
@ 2026-04-02 4:21 ` Steffen Eiden
2026-04-02 4:21 ` [PATCH v1 06/27] arm64: Provide arm64 API for non-native architectures Steffen Eiden
` (22 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:21 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
From: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Split all definitions that can be used by non-native architectures into
a separate file sysreg-defs.h. The generated sysreg definitions are
generated into sysreg-defs-gen.h. This allows other architectures using
the sysreg definitions.
Signed-off-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
arch/arm64/include/asm/Kbuild | 2 +-
.../include/asm/{sysreg.h => sysreg-defs.h} | 302 +-----
arch/arm64/include/asm/sysreg.h | 972 +-----------------
arch/arm64/tools/Makefile | 14 +-
arch/arm64/tools/Makefile.sysreg | 12 +
arch/arm64/tools/gen-sysreg.awk | 6 +-
6 files changed, 33 insertions(+), 1275 deletions(-)
copy arch/arm64/include/asm/{sysreg.h => sysreg-defs.h} (80%)
create mode 100644 arch/arm64/tools/Makefile.sysreg
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index d2ff8f6c3231..76f6cf2b952b 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -17,4 +17,4 @@ generic-y += parport.h
generic-y += user.h
generated-y += cpucap-defs.h
-generated-y += sysreg-defs.h
+generated-y += sysreg-gen-defs.h
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg-defs.h
similarity index 80%
copy from arch/arm64/include/asm/sysreg.h
copy to arch/arm64/include/asm/sysreg-defs.h
index f4436ecc630c..d5196f293e19 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg-defs.h
@@ -1,20 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Macros for accessing system registers with older binutils.
- *
- * Copyright (C) 2014 ARM Ltd.
- * Author: Catalin Marinas <catalin.marinas@arm.com>
- */
-#ifndef __ASM_SYSREG_H
-#define __ASM_SYSREG_H
+#ifndef __ASM_SYSREG_DEFS_H
+#define __ASM_SYSREG_DEFS_H
#include <linux/bits.h>
-#include <linux/stringify.h>
-#include <linux/kasan-tags.h>
-#include <linux/kconfig.h>
-
-#include <asm/gpr-num.h>
/*
* ARMv8 ARM reserves the following encoding for system registers:
@@ -50,35 +39,6 @@
#define sys_reg_CRm(id) (((id) >> CRm_shift) & CRm_mask)
#define sys_reg_Op2(id) (((id) >> Op2_shift) & Op2_mask)
-#ifndef CONFIG_BROKEN_GAS_INST
-
-#ifdef __ASSEMBLER__
-// The space separator is omitted so that __emit_inst(x) can be parsed as
-// either an assembler directive or an assembler macro argument.
-#define __emit_inst(x) .inst(x)
-#else
-#define __emit_inst(x) ".inst " __stringify((x)) "\n\t"
-#endif
-
-#else /* CONFIG_BROKEN_GAS_INST */
-
-#ifndef CONFIG_CPU_BIG_ENDIAN
-#define __INSTR_BSWAP(x) (x)
-#else /* CONFIG_CPU_BIG_ENDIAN */
-#define __INSTR_BSWAP(x) ((((x) << 24) & 0xff000000) | \
- (((x) << 8) & 0x00ff0000) | \
- (((x) >> 8) & 0x0000ff00) | \
- (((x) >> 24) & 0x000000ff))
-#endif /* CONFIG_CPU_BIG_ENDIAN */
-
-#ifdef __ASSEMBLER__
-#define __emit_inst(x) .long __INSTR_BSWAP(x)
-#else /* __ASSEMBLER__ */
-#define __emit_inst(x) ".long " __stringify(__INSTR_BSWAP(x)) "\n\t"
-#endif /* __ASSEMBLER__ */
-
-#endif /* CONFIG_BROKEN_GAS_INST */
-
/*
* Instructions for modifying PSTATE fields.
* As per Arm ARM for v8-A, Section "C.5.1.3 op0 == 0b00, architectural hints,
@@ -100,29 +60,6 @@
#define PSTATE_DIT pstate_field(3, 2)
#define PSTATE_TCO pstate_field(3, 4)
-#define SET_PSTATE_PAN(x) SET_PSTATE((x), PAN)
-#define SET_PSTATE_UAO(x) SET_PSTATE((x), UAO)
-#define SET_PSTATE_SSBS(x) SET_PSTATE((x), SSBS)
-#define SET_PSTATE_DIT(x) SET_PSTATE((x), DIT)
-#define SET_PSTATE_TCO(x) SET_PSTATE((x), TCO)
-
-#define set_pstate_pan(x) asm volatile(SET_PSTATE_PAN(x))
-#define set_pstate_uao(x) asm volatile(SET_PSTATE_UAO(x))
-#define set_pstate_ssbs(x) asm volatile(SET_PSTATE_SSBS(x))
-#define set_pstate_dit(x) asm volatile(SET_PSTATE_DIT(x))
-
-/* Register-based PAN access, for save/restore purposes */
-#define SYS_PSTATE_PAN sys_reg(3, 0, 4, 2, 3)
-
-#define __SYS_BARRIER_INSN(op0, op1, CRn, CRm, op2, Rt) \
- __emit_inst(0xd5000000 | \
- sys_insn((op0), (op1), (CRn), (CRm), (op2)) | \
- ((Rt) & 0x1f))
-
-#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 3, 3, 0, 7, 31)
-#define GSB_SYS_BARRIER_INSN __SYS_BARRIER_INSN(1, 0, 12, 0, 0, 31)
-#define GSB_ACK_BARRIER_INSN __SYS_BARRIER_INSN(1, 0, 12, 0, 1, 31)
-
/* Data cache zero operations */
#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
#define SYS_DC_IGSW sys_insn(1, 0, 7, 6, 4)
@@ -173,7 +110,7 @@
* come from here. The header relies on the definition of sys_reg()
* earlier in this file.
*/
-#include "asm/sysreg-defs.h"
+#include "asm/sysreg-gen-defs.h"
/*
* System registers, organised loosely by encoding but grouped together
@@ -835,40 +772,6 @@
#define SCTLR_ELx_A (BIT(1))
#define SCTLR_ELx_M (BIT(0))
-#ifdef CONFIG_CPU_BIG_ENDIAN
-#define ENDIAN_SET_EL2 SCTLR_ELx_EE
-#else
-#define ENDIAN_SET_EL2 0
-#endif
-
-#define INIT_SCTLR_EL2_MMU_ON \
- (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_ELx_I | \
- SCTLR_ELx_IESB | SCTLR_ELx_WXN | ENDIAN_SET_EL2 | \
- SCTLR_ELx_ITFSB | SCTLR_EL2_RES1)
-
-#define INIT_SCTLR_EL2_MMU_OFF \
- (SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
-
-/* SCTLR_EL1 specific flags. */
-#ifdef CONFIG_CPU_BIG_ENDIAN
-#define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
-#else
-#define ENDIAN_SET_EL1 0
-#endif
-
-#define INIT_SCTLR_EL1_MMU_OFF \
- (ENDIAN_SET_EL1 | SCTLR_EL1_LSMAOE | SCTLR_EL1_nTLSMD | \
- SCTLR_EL1_EIS | SCTLR_EL1_TSCXT | SCTLR_EL1_EOS)
-
-#define INIT_SCTLR_EL1_MMU_ON \
- (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | \
- SCTLR_EL1_SA0 | SCTLR_EL1_SED | SCTLR_ELx_I | \
- SCTLR_EL1_DZE | SCTLR_EL1_UCT | SCTLR_EL1_nTWE | \
- SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \
- ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_EPAN | \
- SCTLR_EL1_LSMAOE | SCTLR_EL1_nTLSMD | SCTLR_EL1_EIS | \
- SCTLR_EL1_TSCXT | SCTLR_EL1_EOS)
-
/* MAIR_ELx memory attributes (used by Linux) */
#define MAIR_ATTR_DEVICE_nGnRnE UL(0x00)
#define MAIR_ATTR_DEVICE_nGnRE UL(0x04)
@@ -898,30 +801,13 @@
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2 0x3
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7
-#ifdef CONFIG_ARM64_PA_BITS_52
-#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_52
-#else
-#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48
-#endif
-
-#if defined(CONFIG_ARM64_4K_PAGES)
-#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT
-#define ID_AA64MMFR0_EL1_TGRAN_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT
-#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN
-#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX
-#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT
-#elif defined(CONFIG_ARM64_16K_PAGES)
-#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN16_SHIFT
-#define ID_AA64MMFR0_EL1_TGRAN_LPA2 ID_AA64MMFR0_EL1_TGRAN16_52_BIT
-#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN
-#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX
-#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT
-#elif defined(CONFIG_ARM64_64K_PAGES)
-#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN64_SHIFT
-#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN
-#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX
-#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT
-#endif
+#define ARM64_MIN_PARANGE_BITS 32
+
+#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT 0x0
+#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE 0x1
+#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN 0x2
+#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2 0x3
+#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7
#define CPACR_EL1_FPEN_EL1EN (BIT(20)) /* enable EL1 access */
#define CPACR_EL1_FPEN_EL0EN (BIT(21)) /* enable EL0 access, if EL1EN set */
@@ -936,21 +822,6 @@
#define SYS_GCR_EL1_RRND (BIT(16))
#define SYS_GCR_EL1_EXCL_MASK 0xffffUL
-#ifdef CONFIG_KASAN_HW_TAGS
-/*
- * KASAN always uses a whole byte for its tags. With CONFIG_KASAN_HW_TAGS it
- * only uses tags in the range 0xF0-0xFF, which we map to MTE tags 0x0-0xF.
- */
-#define __MTE_TAG_MIN (KASAN_TAG_MIN & 0xf)
-#define __MTE_TAG_MAX (KASAN_TAG_MAX & 0xf)
-#define __MTE_TAG_INCL GENMASK(__MTE_TAG_MAX, __MTE_TAG_MIN)
-#define KERNEL_GCR_EL1_EXCL (SYS_GCR_EL1_EXCL_MASK & ~__MTE_TAG_INCL)
-#else
-#define KERNEL_GCR_EL1_EXCL SYS_GCR_EL1_EXCL_MASK
-#endif
-
-#define KERNEL_GCR_EL1 (SYS_GCR_EL1_RRND | KERNEL_GCR_EL1_EXCL)
-
/* RGSR_EL1 Definitions */
#define SYS_RGSR_EL1_TAG_MASK 0xfUL
#define SYS_RGSR_EL1_SEED_SHIFT 8
@@ -1101,153 +972,8 @@
#define gicr_insn(insn) read_sysreg_s(GICV5_OP_GICR_##insn)
#define gic_insn(v, insn) write_sysreg_s(v, GICV5_OP_GIC_##insn)
-#ifdef __ASSEMBLER__
-
- .macro mrs_s, rt, sreg
- __emit_inst(0xd5200000|(\sreg)|(.L__gpr_num_\rt))
- .endm
-
- .macro msr_s, sreg, rt
- __emit_inst(0xd5000000|(\sreg)|(.L__gpr_num_\rt))
- .endm
-
- .macro msr_hcr_el2, reg
-#if IS_ENABLED(CONFIG_AMPERE_ERRATUM_AC04_CPU_23)
- dsb nsh
- msr hcr_el2, \reg
- isb
-#else
- msr hcr_el2, \reg
-#endif
- .endm
-#else
-
+#ifndef __ASSEMBLY__
#include <linux/bitfield.h>
-#include <linux/build_bug.h>
-#include <linux/types.h>
-#include <asm/alternative.h>
-
-#define DEFINE_MRS_S \
- __DEFINE_ASM_GPR_NUMS \
-" .macro mrs_s, rt, sreg\n" \
- __emit_inst(0xd5200000|(\\sreg)|(.L__gpr_num_\\rt)) \
-" .endm\n"
-
-#define DEFINE_MSR_S \
- __DEFINE_ASM_GPR_NUMS \
-" .macro msr_s, sreg, rt\n" \
- __emit_inst(0xd5000000|(\\sreg)|(.L__gpr_num_\\rt)) \
-" .endm\n"
-
-#define UNDEFINE_MRS_S \
-" .purgem mrs_s\n"
-
-#define UNDEFINE_MSR_S \
-" .purgem msr_s\n"
-
-#define __mrs_s(v, r) \
- DEFINE_MRS_S \
-" mrs_s " v ", " __stringify(r) "\n" \
- UNDEFINE_MRS_S
-
-#define __msr_s(r, v) \
- DEFINE_MSR_S \
-" msr_s " __stringify(r) ", " v "\n" \
- UNDEFINE_MSR_S
-
-/*
- * Unlike read_cpuid, calls to read_sysreg are never expected to be
- * optimized away or replaced with synthetic values.
- */
-#define read_sysreg(r) ({ \
- u64 __val; \
- asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
- __val; \
-})
-
-/*
- * The "Z" constraint normally means a zero immediate, but when combined with
- * the "%x0" template means XZR.
- */
-#define write_sysreg(v, r) do { \
- u64 __val = (u64)(v); \
- asm volatile("msr " __stringify(r) ", %x0" \
- : : "rZ" (__val)); \
-} while (0)
-
-/*
- * For registers without architectural names, or simply unsupported by
- * GAS.
- *
- * __check_r forces warnings to be generated by the compiler when
- * evaluating r which wouldn't normally happen due to being passed to
- * the assembler via __stringify(r).
- */
-#define read_sysreg_s(r) ({ \
- u64 __val; \
- u32 __maybe_unused __check_r = (u32)(r); \
- asm volatile(__mrs_s("%0", r) : "=r" (__val)); \
- __val; \
-})
-
-/*
- * The "Z" constraint combined with the "%x0" template should be enough
- * to force XZR generation if (v) is a constant 0 value but LLVM does not
- * yet understand that modifier/constraint combo so a conditional is required
- * to nudge the compiler into using XZR as a source for a 0 constant value.
- */
-#define write_sysreg_s(v, r) do { \
- u64 __val = (u64)(v); \
- u32 __maybe_unused __check_r = (u32)(r); \
- if (__builtin_constant_p(__val) && __val == 0) \
- asm volatile(__msr_s(r, "xzr")); \
- else \
- asm volatile(__msr_s(r, "%x0") : : "r" (__val)); \
-} while (0)
-
-/*
- * Modify bits in a sysreg. Bits in the clear mask are zeroed, then bits in the
- * set mask are set. Other bits are left as-is.
- */
-#define sysreg_clear_set(sysreg, clear, set) do { \
- u64 __scs_val = read_sysreg(sysreg); \
- u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
- if (__scs_new != __scs_val) \
- write_sysreg(__scs_new, sysreg); \
-} while (0)
-
-#define sysreg_clear_set_hcr(clear, set) do { \
- u64 __scs_val = read_sysreg(hcr_el2); \
- u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
- if (__scs_new != __scs_val) \
- write_sysreg_hcr(__scs_new); \
-} while (0)
-
-#define sysreg_clear_set_s(sysreg, clear, set) do { \
- u64 __scs_val = read_sysreg_s(sysreg); \
- u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
- if (__scs_new != __scs_val) \
- write_sysreg_s(__scs_new, sysreg); \
-} while (0)
-
-#define write_sysreg_hcr(__val) do { \
- if (IS_ENABLED(CONFIG_AMPERE_ERRATUM_AC04_CPU_23) && \
- (!system_capabilities_finalized() || \
- alternative_has_cap_unlikely(ARM64_WORKAROUND_AMPERE_AC04_CPU_23))) \
- asm volatile("dsb nsh; msr hcr_el2, %x0; isb" \
- : : "rZ" (__val)); \
- else \
- asm volatile("msr hcr_el2, %x0" \
- : : "rZ" (__val)); \
-} while (0)
-
-#define read_sysreg_par() ({ \
- u64 par; \
- asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); \
- par = read_sysreg(par_el1); \
- asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); \
- par; \
-})
#define SYS_FIELD_VALUE(reg, field, val) reg##_##field##_##val
@@ -1260,7 +986,5 @@
#define SYS_FIELD_PREP_ENUM(reg, field, val) \
FIELD_PREP(reg##_##field##_MASK, \
SYS_FIELD_VALUE(reg, field, val))
-
-#endif
-
-#endif /* __ASM_SYSREG_H */
+#endif /* __ASSEMBLER__ */
+#endif /* __ASM_SYSREG_DEFS_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index f4436ecc630c..6209debd9410 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -15,40 +15,7 @@
#include <linux/kconfig.h>
#include <asm/gpr-num.h>
-
-/*
- * ARMv8 ARM reserves the following encoding for system registers:
- * (Ref: ARMv8 ARM, Section: "System instruction class encoding overview",
- * C5.2, version:ARM DDI 0487A.f)
- * [20-19] : Op0
- * [18-16] : Op1
- * [15-12] : CRn
- * [11-8] : CRm
- * [7-5] : Op2
- */
-#define Op0_shift 19
-#define Op0_mask 0x3
-#define Op1_shift 16
-#define Op1_mask 0x7
-#define CRn_shift 12
-#define CRn_mask 0xf
-#define CRm_shift 8
-#define CRm_mask 0xf
-#define Op2_shift 5
-#define Op2_mask 0x7
-
-#define sys_reg(op0, op1, crn, crm, op2) \
- (((op0) << Op0_shift) | ((op1) << Op1_shift) | \
- ((crn) << CRn_shift) | ((crm) << CRm_shift) | \
- ((op2) << Op2_shift))
-
-#define sys_insn sys_reg
-
-#define sys_reg_Op0(id) (((id) >> Op0_shift) & Op0_mask)
-#define sys_reg_Op1(id) (((id) >> Op1_shift) & Op1_mask)
-#define sys_reg_CRn(id) (((id) >> CRn_shift) & CRn_mask)
-#define sys_reg_CRm(id) (((id) >> CRm_shift) & CRm_mask)
-#define sys_reg_Op2(id) (((id) >> Op2_shift) & Op2_mask)
+#include <asm/sysreg-defs.h>
#ifndef CONFIG_BROKEN_GAS_INST
@@ -79,27 +46,6 @@
#endif /* CONFIG_BROKEN_GAS_INST */
-/*
- * Instructions for modifying PSTATE fields.
- * As per Arm ARM for v8-A, Section "C.5.1.3 op0 == 0b00, architectural hints,
- * barriers and CLREX, and PSTATE access", ARM DDI 0487 C.a, system instructions
- * for accessing PSTATE fields have the following encoding:
- * Op0 = 0, CRn = 4
- * Op1, Op2 encodes the PSTATE field modified and defines the constraints.
- * CRm = Imm4 for the instruction.
- * Rt = 0x1f
- */
-#define pstate_field(op1, op2) ((op1) << Op1_shift | (op2) << Op2_shift)
-#define PSTATE_Imm_shift CRm_shift
-#define ENCODE_PSTATE(x, r) (0xd500401f | PSTATE_ ## r | ((!!x) << PSTATE_Imm_shift))
-#define SET_PSTATE(x, r) __emit_inst(ENCODE_PSTATE(x, r))
-
-#define PSTATE_PAN pstate_field(0, 4)
-#define PSTATE_UAO pstate_field(0, 3)
-#define PSTATE_SSBS pstate_field(3, 1)
-#define PSTATE_DIT pstate_field(3, 2)
-#define PSTATE_TCO pstate_field(3, 4)
-
#define SET_PSTATE_PAN(x) SET_PSTATE((x), PAN)
#define SET_PSTATE_UAO(x) SET_PSTATE((x), UAO)
#define SET_PSTATE_SSBS(x) SET_PSTATE((x), SSBS)
@@ -123,718 +69,6 @@
#define GSB_SYS_BARRIER_INSN __SYS_BARRIER_INSN(1, 0, 12, 0, 0, 31)
#define GSB_ACK_BARRIER_INSN __SYS_BARRIER_INSN(1, 0, 12, 0, 1, 31)
-/* Data cache zero operations */
-#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
-#define SYS_DC_IGSW sys_insn(1, 0, 7, 6, 4)
-#define SYS_DC_IGDSW sys_insn(1, 0, 7, 6, 6)
-#define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
-#define SYS_DC_CGSW sys_insn(1, 0, 7, 10, 4)
-#define SYS_DC_CGDSW sys_insn(1, 0, 7, 10, 6)
-#define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2)
-#define SYS_DC_CIGSW sys_insn(1, 0, 7, 14, 4)
-#define SYS_DC_CIGDSW sys_insn(1, 0, 7, 14, 6)
-
-#define SYS_IC_IALLUIS sys_insn(1, 0, 7, 1, 0)
-#define SYS_IC_IALLU sys_insn(1, 0, 7, 5, 0)
-#define SYS_IC_IVAU sys_insn(1, 3, 7, 5, 1)
-
-#define SYS_DC_IVAC sys_insn(1, 0, 7, 6, 1)
-#define SYS_DC_IGVAC sys_insn(1, 0, 7, 6, 3)
-#define SYS_DC_IGDVAC sys_insn(1, 0, 7, 6, 5)
-
-#define SYS_DC_CVAC sys_insn(1, 3, 7, 10, 1)
-#define SYS_DC_CGVAC sys_insn(1, 3, 7, 10, 3)
-#define SYS_DC_CGDVAC sys_insn(1, 3, 7, 10, 5)
-
-#define SYS_DC_CVAU sys_insn(1, 3, 7, 11, 1)
-
-#define SYS_DC_CVAP sys_insn(1, 3, 7, 12, 1)
-#define SYS_DC_CGVAP sys_insn(1, 3, 7, 12, 3)
-#define SYS_DC_CGDVAP sys_insn(1, 3, 7, 12, 5)
-
-#define SYS_DC_CVADP sys_insn(1, 3, 7, 13, 1)
-#define SYS_DC_CGVADP sys_insn(1, 3, 7, 13, 3)
-#define SYS_DC_CGDVADP sys_insn(1, 3, 7, 13, 5)
-
-#define SYS_DC_CIVAC sys_insn(1, 3, 7, 14, 1)
-#define SYS_DC_CIGVAC sys_insn(1, 3, 7, 14, 3)
-#define SYS_DC_CIGDVAC sys_insn(1, 3, 7, 14, 5)
-
-#define SYS_DC_ZVA sys_insn(1, 3, 7, 4, 1)
-#define SYS_DC_GVA sys_insn(1, 3, 7, 4, 3)
-#define SYS_DC_GZVA sys_insn(1, 3, 7, 4, 4)
-
-#define SYS_DC_CIVAPS sys_insn(1, 0, 7, 15, 1)
-#define SYS_DC_CIGDVAPS sys_insn(1, 0, 7, 15, 5)
-
-/*
- * Automatically generated definitions for system registers, the
- * manual encodings below are in the process of being converted to
- * come from here. The header relies on the definition of sys_reg()
- * earlier in this file.
- */
-#include "asm/sysreg-defs.h"
-
-/*
- * System registers, organised loosely by encoding but grouped together
- * where the architected name contains an index. e.g. ID_MMFR<n>_EL1.
- */
-#define SYS_SVCR_SMSTOP_SM_EL0 sys_reg(0, 3, 4, 2, 3)
-#define SYS_SVCR_SMSTART_SM_EL0 sys_reg(0, 3, 4, 3, 3)
-#define SYS_SVCR_SMSTOP_SMZA_EL0 sys_reg(0, 3, 4, 6, 3)
-
-#define SYS_DBGBVRn_EL1(n) sys_reg(2, 0, 0, n, 4)
-#define SYS_DBGBCRn_EL1(n) sys_reg(2, 0, 0, n, 5)
-#define SYS_DBGWVRn_EL1(n) sys_reg(2, 0, 0, n, 6)
-#define SYS_DBGWCRn_EL1(n) sys_reg(2, 0, 0, n, 7)
-#define SYS_MDRAR_EL1 sys_reg(2, 0, 1, 0, 0)
-
-#define SYS_OSLSR_EL1 sys_reg(2, 0, 1, 1, 4)
-#define OSLSR_EL1_OSLM_MASK (BIT(3) | BIT(0))
-#define OSLSR_EL1_OSLM_NI 0
-#define OSLSR_EL1_OSLM_IMPLEMENTED BIT(3)
-#define OSLSR_EL1_OSLK BIT(1)
-
-#define SYS_OSDLR_EL1 sys_reg(2, 0, 1, 3, 4)
-#define SYS_DBGPRCR_EL1 sys_reg(2, 0, 1, 4, 4)
-#define SYS_DBGCLAIMSET_EL1 sys_reg(2, 0, 7, 8, 6)
-#define SYS_DBGCLAIMCLR_EL1 sys_reg(2, 0, 7, 9, 6)
-#define SYS_DBGAUTHSTATUS_EL1 sys_reg(2, 0, 7, 14, 6)
-#define SYS_MDCCSR_EL0 sys_reg(2, 3, 0, 1, 0)
-#define SYS_DBGDTR_EL0 sys_reg(2, 3, 0, 4, 0)
-#define SYS_DBGDTRRX_EL0 sys_reg(2, 3, 0, 5, 0)
-#define SYS_DBGDTRTX_EL0 sys_reg(2, 3, 0, 5, 0)
-#define SYS_DBGVCR32_EL2 sys_reg(2, 4, 0, 7, 0)
-
-#define SYS_BRBINF_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 0))
-#define SYS_BRBSRC_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 1))
-#define SYS_BRBTGT_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 2))
-
-#define SYS_TRCITECR_EL1 sys_reg(3, 0, 1, 2, 3)
-#define SYS_TRCACATR(m) sys_reg(2, 1, 2, ((m & 7) << 1), (2 | (m >> 3)))
-#define SYS_TRCACVR(m) sys_reg(2, 1, 2, ((m & 7) << 1), (0 | (m >> 3)))
-#define SYS_TRCAUTHSTATUS sys_reg(2, 1, 7, 14, 6)
-#define SYS_TRCAUXCTLR sys_reg(2, 1, 0, 6, 0)
-#define SYS_TRCBBCTLR sys_reg(2, 1, 0, 15, 0)
-#define SYS_TRCCCCTLR sys_reg(2, 1, 0, 14, 0)
-#define SYS_TRCCIDCCTLR0 sys_reg(2, 1, 3, 0, 2)
-#define SYS_TRCCIDCCTLR1 sys_reg(2, 1, 3, 1, 2)
-#define SYS_TRCCIDCVR(m) sys_reg(2, 1, 3, ((m & 7) << 1), 0)
-#define SYS_TRCCLAIMCLR sys_reg(2, 1, 7, 9, 6)
-#define SYS_TRCCLAIMSET sys_reg(2, 1, 7, 8, 6)
-#define SYS_TRCCNTCTLR(m) sys_reg(2, 1, 0, (4 | (m & 3)), 5)
-#define SYS_TRCCNTRLDVR(m) sys_reg(2, 1, 0, (0 | (m & 3)), 5)
-#define SYS_TRCCNTVR(m) sys_reg(2, 1, 0, (8 | (m & 3)), 5)
-#define SYS_TRCCONFIGR sys_reg(2, 1, 0, 4, 0)
-#define SYS_TRCDEVARCH sys_reg(2, 1, 7, 15, 6)
-#define SYS_TRCDEVID sys_reg(2, 1, 7, 2, 7)
-#define SYS_TRCEVENTCTL0R sys_reg(2, 1, 0, 8, 0)
-#define SYS_TRCEVENTCTL1R sys_reg(2, 1, 0, 9, 0)
-#define SYS_TRCEXTINSELR(m) sys_reg(2, 1, 0, (8 | (m & 3)), 4)
-#define SYS_TRCIDR0 sys_reg(2, 1, 0, 8, 7)
-#define SYS_TRCIDR10 sys_reg(2, 1, 0, 2, 6)
-#define SYS_TRCIDR11 sys_reg(2, 1, 0, 3, 6)
-#define SYS_TRCIDR12 sys_reg(2, 1, 0, 4, 6)
-#define SYS_TRCIDR13 sys_reg(2, 1, 0, 5, 6)
-#define SYS_TRCIDR1 sys_reg(2, 1, 0, 9, 7)
-#define SYS_TRCIDR2 sys_reg(2, 1, 0, 10, 7)
-#define SYS_TRCIDR3 sys_reg(2, 1, 0, 11, 7)
-#define SYS_TRCIDR4 sys_reg(2, 1, 0, 12, 7)
-#define SYS_TRCIDR5 sys_reg(2, 1, 0, 13, 7)
-#define SYS_TRCIDR6 sys_reg(2, 1, 0, 14, 7)
-#define SYS_TRCIDR7 sys_reg(2, 1, 0, 15, 7)
-#define SYS_TRCIDR8 sys_reg(2, 1, 0, 0, 6)
-#define SYS_TRCIDR9 sys_reg(2, 1, 0, 1, 6)
-#define SYS_TRCIMSPEC(m) sys_reg(2, 1, 0, (m & 7), 7)
-#define SYS_TRCITEEDCR sys_reg(2, 1, 0, 2, 1)
-#define SYS_TRCOSLSR sys_reg(2, 1, 1, 1, 4)
-#define SYS_TRCPRGCTLR sys_reg(2, 1, 0, 1, 0)
-#define SYS_TRCQCTLR sys_reg(2, 1, 0, 1, 1)
-#define SYS_TRCRSCTLR(m) sys_reg(2, 1, 1, (m & 15), (0 | (m >> 4)))
-#define SYS_TRCRSR sys_reg(2, 1, 0, 10, 0)
-#define SYS_TRCSEQEVR(m) sys_reg(2, 1, 0, (m & 3), 4)
-#define SYS_TRCSEQRSTEVR sys_reg(2, 1, 0, 6, 4)
-#define SYS_TRCSEQSTR sys_reg(2, 1, 0, 7, 4)
-#define SYS_TRCSSCCR(m) sys_reg(2, 1, 1, (m & 7), 2)
-#define SYS_TRCSSCSR(m) sys_reg(2, 1, 1, (8 | (m & 7)), 2)
-#define SYS_TRCSSPCICR(m) sys_reg(2, 1, 1, (m & 7), 3)
-#define SYS_TRCSTALLCTLR sys_reg(2, 1, 0, 11, 0)
-#define SYS_TRCSTATR sys_reg(2, 1, 0, 3, 0)
-#define SYS_TRCSYNCPR sys_reg(2, 1, 0, 13, 0)
-#define SYS_TRCTRACEIDR sys_reg(2, 1, 0, 0, 1)
-#define SYS_TRCTSCTLR sys_reg(2, 1, 0, 12, 0)
-#define SYS_TRCVICTLR sys_reg(2, 1, 0, 0, 2)
-#define SYS_TRCVIIECTLR sys_reg(2, 1, 0, 1, 2)
-#define SYS_TRCVIPCSSCTLR sys_reg(2, 1, 0, 3, 2)
-#define SYS_TRCVISSCTLR sys_reg(2, 1, 0, 2, 2)
-#define SYS_TRCVMIDCCTLR0 sys_reg(2, 1, 3, 2, 2)
-#define SYS_TRCVMIDCCTLR1 sys_reg(2, 1, 3, 3, 2)
-#define SYS_TRCVMIDCVR(m) sys_reg(2, 1, 3, ((m & 7) << 1), 1)
-
-/* ETM */
-#define SYS_TRCOSLAR sys_reg(2, 1, 1, 0, 4)
-
-#define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0)
-#define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5)
-#define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6)
-
-#define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1)
-#define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5)
-#define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6)
-
-#define SYS_APIAKEYLO_EL1 sys_reg(3, 0, 2, 1, 0)
-#define SYS_APIAKEYHI_EL1 sys_reg(3, 0, 2, 1, 1)
-#define SYS_APIBKEYLO_EL1 sys_reg(3, 0, 2, 1, 2)
-#define SYS_APIBKEYHI_EL1 sys_reg(3, 0, 2, 1, 3)
-
-#define SYS_APDAKEYLO_EL1 sys_reg(3, 0, 2, 2, 0)
-#define SYS_APDAKEYHI_EL1 sys_reg(3, 0, 2, 2, 1)
-#define SYS_APDBKEYLO_EL1 sys_reg(3, 0, 2, 2, 2)
-#define SYS_APDBKEYHI_EL1 sys_reg(3, 0, 2, 2, 3)
-
-#define SYS_APGAKEYLO_EL1 sys_reg(3, 0, 2, 3, 0)
-#define SYS_APGAKEYHI_EL1 sys_reg(3, 0, 2, 3, 1)
-
-#define SYS_SPSR_EL1 sys_reg(3, 0, 4, 0, 0)
-#define SYS_ELR_EL1 sys_reg(3, 0, 4, 0, 1)
-
-#define SYS_ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
-
-#define SYS_AFSR0_EL1 sys_reg(3, 0, 5, 1, 0)
-#define SYS_AFSR1_EL1 sys_reg(3, 0, 5, 1, 1)
-#define SYS_ESR_EL1 sys_reg(3, 0, 5, 2, 0)
-
-#define SYS_ERRIDR_EL1 sys_reg(3, 0, 5, 3, 0)
-#define SYS_ERRSELR_EL1 sys_reg(3, 0, 5, 3, 1)
-#define SYS_ERXFR_EL1 sys_reg(3, 0, 5, 4, 0)
-#define SYS_ERXCTLR_EL1 sys_reg(3, 0, 5, 4, 1)
-#define SYS_ERXSTATUS_EL1 sys_reg(3, 0, 5, 4, 2)
-#define SYS_ERXADDR_EL1 sys_reg(3, 0, 5, 4, 3)
-#define SYS_ERXPFGF_EL1 sys_reg(3, 0, 5, 4, 4)
-#define SYS_ERXPFGCTL_EL1 sys_reg(3, 0, 5, 4, 5)
-#define SYS_ERXPFGCDN_EL1 sys_reg(3, 0, 5, 4, 6)
-#define SYS_ERXMISC0_EL1 sys_reg(3, 0, 5, 5, 0)
-#define SYS_ERXMISC1_EL1 sys_reg(3, 0, 5, 5, 1)
-#define SYS_ERXMISC2_EL1 sys_reg(3, 0, 5, 5, 2)
-#define SYS_ERXMISC3_EL1 sys_reg(3, 0, 5, 5, 3)
-#define SYS_TFSR_EL1 sys_reg(3, 0, 5, 6, 0)
-#define SYS_TFSRE0_EL1 sys_reg(3, 0, 5, 6, 1)
-
-#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0)
-
-#define SYS_PAR_EL1_F BIT(0)
-/* When PAR_EL1.F == 1 */
-#define SYS_PAR_EL1_FST GENMASK(6, 1)
-#define SYS_PAR_EL1_PTW BIT(8)
-#define SYS_PAR_EL1_S BIT(9)
-#define SYS_PAR_EL1_AssuredOnly BIT(12)
-#define SYS_PAR_EL1_TopLevel BIT(13)
-#define SYS_PAR_EL1_Overlay BIT(14)
-#define SYS_PAR_EL1_DirtyBit BIT(15)
-#define SYS_PAR_EL1_F1_IMPDEF GENMASK_ULL(63, 48)
-#define SYS_PAR_EL1_F1_RES0 (BIT(7) | BIT(10) | GENMASK_ULL(47, 16))
-#define SYS_PAR_EL1_RES1 BIT(11)
-/* When PAR_EL1.F == 0 */
-#define SYS_PAR_EL1_SH GENMASK_ULL(8, 7)
-#define SYS_PAR_EL1_NS BIT(9)
-#define SYS_PAR_EL1_F0_IMPDEF BIT(10)
-#define SYS_PAR_EL1_NSE BIT(11)
-#define SYS_PAR_EL1_PA GENMASK_ULL(51, 12)
-#define SYS_PAR_EL1_ATTR GENMASK_ULL(63, 56)
-#define SYS_PAR_EL1_F0_RES0 (GENMASK_ULL(6, 1) | GENMASK_ULL(55, 52))
-
-/* Buffer error reporting */
-#define PMBSR_EL1_FAULT_FSC_SHIFT PMBSR_EL1_MSS_SHIFT
-#define PMBSR_EL1_FAULT_FSC_MASK PMBSR_EL1_MSS_MASK
-
-#define PMBSR_EL1_BUF_BSC_SHIFT PMBSR_EL1_MSS_SHIFT
-#define PMBSR_EL1_BUF_BSC_MASK PMBSR_EL1_MSS_MASK
-
-#define PMBSR_EL1_BUF_BSC_FULL 0x1UL
-
-/*** End of Statistical Profiling Extension ***/
-
-#define TRBSR_EL1_BSC_MASK GENMASK(5, 0)
-#define TRBSR_EL1_BSC_SHIFT 0
-
-#define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1)
-#define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2)
-
-#define SYS_PMMIR_EL1 sys_reg(3, 0, 9, 14, 6)
-
-#define SYS_MAIR_EL1 sys_reg(3, 0, 10, 2, 0)
-#define SYS_AMAIR_EL1 sys_reg(3, 0, 10, 3, 0)
-
-#define SYS_VBAR_EL1 sys_reg(3, 0, 12, 0, 0)
-#define SYS_DISR_EL1 sys_reg(3, 0, 12, 1, 1)
-
-#define SYS_ICC_IAR0_EL1 sys_reg(3, 0, 12, 8, 0)
-#define SYS_ICC_EOIR0_EL1 sys_reg(3, 0, 12, 8, 1)
-#define SYS_ICC_HPPIR0_EL1 sys_reg(3, 0, 12, 8, 2)
-#define SYS_ICC_BPR0_EL1 sys_reg(3, 0, 12, 8, 3)
-#define SYS_ICC_AP0Rn_EL1(n) sys_reg(3, 0, 12, 8, 4 | n)
-#define SYS_ICC_AP0R0_EL1 SYS_ICC_AP0Rn_EL1(0)
-#define SYS_ICC_AP0R1_EL1 SYS_ICC_AP0Rn_EL1(1)
-#define SYS_ICC_AP0R2_EL1 SYS_ICC_AP0Rn_EL1(2)
-#define SYS_ICC_AP0R3_EL1 SYS_ICC_AP0Rn_EL1(3)
-#define SYS_ICC_AP1Rn_EL1(n) sys_reg(3, 0, 12, 9, n)
-#define SYS_ICC_AP1R0_EL1 SYS_ICC_AP1Rn_EL1(0)
-#define SYS_ICC_AP1R1_EL1 SYS_ICC_AP1Rn_EL1(1)
-#define SYS_ICC_AP1R2_EL1 SYS_ICC_AP1Rn_EL1(2)
-#define SYS_ICC_AP1R3_EL1 SYS_ICC_AP1Rn_EL1(3)
-#define SYS_ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
-#define SYS_ICC_RPR_EL1 sys_reg(3, 0, 12, 11, 3)
-#define SYS_ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
-#define SYS_ICC_ASGI1R_EL1 sys_reg(3, 0, 12, 11, 6)
-#define SYS_ICC_SGI0R_EL1 sys_reg(3, 0, 12, 11, 7)
-#define SYS_ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
-#define SYS_ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
-#define SYS_ICC_HPPIR1_EL1 sys_reg(3, 0, 12, 12, 2)
-#define SYS_ICC_BPR1_EL1 sys_reg(3, 0, 12, 12, 3)
-#define SYS_ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
-#define SYS_ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
-#define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6)
-#define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
-
-#define SYS_ACCDATA_EL1 sys_reg(3, 0, 13, 0, 5)
-
-#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0)
-
-#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7)
-
-#define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0)
-#define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1)
-
-#define SYS_PMCR_EL0 sys_reg(3, 3, 9, 12, 0)
-#define SYS_PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1)
-#define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2)
-#define SYS_PMOVSCLR_EL0 sys_reg(3, 3, 9, 12, 3)
-#define SYS_PMSWINC_EL0 sys_reg(3, 3, 9, 12, 4)
-#define SYS_PMCEID0_EL0 sys_reg(3, 3, 9, 12, 6)
-#define SYS_PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7)
-#define SYS_PMCCNTR_EL0 sys_reg(3, 3, 9, 13, 0)
-#define SYS_PMXEVTYPER_EL0 sys_reg(3, 3, 9, 13, 1)
-#define SYS_PMXEVCNTR_EL0 sys_reg(3, 3, 9, 13, 2)
-#define SYS_PMUSERENR_EL0 sys_reg(3, 3, 9, 14, 0)
-#define SYS_PMOVSSET_EL0 sys_reg(3, 3, 9, 14, 3)
-
-#define SYS_TPIDR_EL0 sys_reg(3, 3, 13, 0, 2)
-#define SYS_TPIDRRO_EL0 sys_reg(3, 3, 13, 0, 3)
-#define SYS_TPIDR2_EL0 sys_reg(3, 3, 13, 0, 5)
-
-#define SYS_SCXTNUM_EL0 sys_reg(3, 3, 13, 0, 7)
-
-/* Definitions for system register interface to AMU for ARMv8.4 onwards */
-#define SYS_AM_EL0(crm, op2) sys_reg(3, 3, 13, (crm), (op2))
-#define SYS_AMCR_EL0 SYS_AM_EL0(2, 0)
-#define SYS_AMCFGR_EL0 SYS_AM_EL0(2, 1)
-#define SYS_AMCGCR_EL0 SYS_AM_EL0(2, 2)
-#define SYS_AMUSERENR_EL0 SYS_AM_EL0(2, 3)
-#define SYS_AMCNTENCLR0_EL0 SYS_AM_EL0(2, 4)
-#define SYS_AMCNTENSET0_EL0 SYS_AM_EL0(2, 5)
-#define SYS_AMCNTENCLR1_EL0 SYS_AM_EL0(3, 0)
-#define SYS_AMCNTENSET1_EL0 SYS_AM_EL0(3, 1)
-
-/*
- * Group 0 of activity monitors (architected):
- * op0 op1 CRn CRm op2
- * Counter: 11 011 1101 010:n<3> n<2:0>
- * Type: 11 011 1101 011:n<3> n<2:0>
- * n: 0-15
- *
- * Group 1 of activity monitors (auxiliary):
- * op0 op1 CRn CRm op2
- * Counter: 11 011 1101 110:n<3> n<2:0>
- * Type: 11 011 1101 111:n<3> n<2:0>
- * n: 0-15
- */
-
-#define SYS_AMEVCNTR0_EL0(n) SYS_AM_EL0(4 + ((n) >> 3), (n) & 7)
-#define SYS_AMEVTYPER0_EL0(n) SYS_AM_EL0(6 + ((n) >> 3), (n) & 7)
-#define SYS_AMEVCNTR1_EL0(n) SYS_AM_EL0(12 + ((n) >> 3), (n) & 7)
-#define SYS_AMEVTYPER1_EL0(n) SYS_AM_EL0(14 + ((n) >> 3), (n) & 7)
-
-/* AMU v1: Fixed (architecturally defined) activity monitors */
-#define SYS_AMEVCNTR0_CORE_EL0 SYS_AMEVCNTR0_EL0(0)
-#define SYS_AMEVCNTR0_CONST_EL0 SYS_AMEVCNTR0_EL0(1)
-#define SYS_AMEVCNTR0_INST_RET_EL0 SYS_AMEVCNTR0_EL0(2)
-#define SYS_AMEVCNTR0_MEM_STALL SYS_AMEVCNTR0_EL0(3)
-
-#define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0)
-
-#define SYS_CNTPCT_EL0 sys_reg(3, 3, 14, 0, 1)
-#define SYS_CNTVCT_EL0 sys_reg(3, 3, 14, 0, 2)
-#define SYS_CNTPCTSS_EL0 sys_reg(3, 3, 14, 0, 5)
-#define SYS_CNTVCTSS_EL0 sys_reg(3, 3, 14, 0, 6)
-
-#define SYS_CNTP_TVAL_EL0 sys_reg(3, 3, 14, 2, 0)
-#define SYS_CNTP_CTL_EL0 sys_reg(3, 3, 14, 2, 1)
-#define SYS_CNTP_CVAL_EL0 sys_reg(3, 3, 14, 2, 2)
-
-#define SYS_CNTV_TVAL_EL0 sys_reg(3, 3, 14, 3, 0)
-#define SYS_CNTV_CTL_EL0 sys_reg(3, 3, 14, 3, 1)
-#define SYS_CNTV_CVAL_EL0 sys_reg(3, 3, 14, 3, 2)
-
-#define SYS_AARCH32_CNTP_TVAL sys_reg(0, 0, 14, 2, 0)
-#define SYS_AARCH32_CNTP_CTL sys_reg(0, 0, 14, 2, 1)
-#define SYS_AARCH32_CNTPCT sys_reg(0, 0, 0, 14, 0)
-#define SYS_AARCH32_CNTVCT sys_reg(0, 1, 0, 14, 0)
-#define SYS_AARCH32_CNTP_CVAL sys_reg(0, 2, 0, 14, 0)
-#define SYS_AARCH32_CNTPCTSS sys_reg(0, 8, 0, 14, 0)
-#define SYS_AARCH32_CNTVCTSS sys_reg(0, 9, 0, 14, 0)
-
-#define __PMEV_op2(n) ((n) & 0x7)
-#define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3))
-#define SYS_PMEVCNTSVRn_EL1(n) sys_reg(2, 0, 14, __CNTR_CRm(n), __PMEV_op2(n))
-#define SYS_PMEVCNTRn_EL0(n) sys_reg(3, 3, 14, __CNTR_CRm(n), __PMEV_op2(n))
-#define __TYPER_CRm(n) (0xc | (((n) >> 3) & 0x3))
-#define SYS_PMEVTYPERn_EL0(n) sys_reg(3, 3, 14, __TYPER_CRm(n), __PMEV_op2(n))
-
-#define SYS_PMCCFILTR_EL0 sys_reg(3, 3, 14, 15, 7)
-
-#define SYS_SPMCGCRn_EL1(n) sys_reg(2, 0, 9, 13, ((n) & 1))
-
-#define __SPMEV_op2(n) ((n) & 0x7)
-#define __SPMEV_crm(p, n) ((((p) & 7) << 1) | (((n) >> 3) & 1))
-#define SYS_SPMEVCNTRn_EL0(n) sys_reg(2, 3, 14, __SPMEV_crm(0b000, n), __SPMEV_op2(n))
-#define SYS_SPMEVFILT2Rn_EL0(n) sys_reg(2, 3, 14, __SPMEV_crm(0b011, n), __SPMEV_op2(n))
-#define SYS_SPMEVFILTRn_EL0(n) sys_reg(2, 3, 14, __SPMEV_crm(0b010, n), __SPMEV_op2(n))
-#define SYS_SPMEVTYPERn_EL0(n) sys_reg(2, 3, 14, __SPMEV_crm(0b001, n), __SPMEV_op2(n))
-
-#define SYS_VPIDR_EL2 sys_reg(3, 4, 0, 0, 0)
-#define SYS_VMPIDR_EL2 sys_reg(3, 4, 0, 0, 5)
-
-#define SYS_ACTLR_EL2 sys_reg(3, 4, 1, 0, 1)
-#define SYS_SCTLR2_EL2 sys_reg(3, 4, 1, 0, 3)
-#define SYS_HCR_EL2 sys_reg(3, 4, 1, 1, 0)
-#define SYS_MDCR_EL2 sys_reg(3, 4, 1, 1, 1)
-#define SYS_CPTR_EL2 sys_reg(3, 4, 1, 1, 2)
-#define SYS_HSTR_EL2 sys_reg(3, 4, 1, 1, 3)
-#define SYS_HACR_EL2 sys_reg(3, 4, 1, 1, 7)
-
-#define SYS_TTBR0_EL2 sys_reg(3, 4, 2, 0, 0)
-#define SYS_TTBR1_EL2 sys_reg(3, 4, 2, 0, 1)
-#define SYS_TCR_EL2 sys_reg(3, 4, 2, 0, 2)
-#define SYS_VTTBR_EL2 sys_reg(3, 4, 2, 1, 0)
-
-#define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6)
-#define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0)
-#define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1)
-#define SYS_SP_EL1 sys_reg(3, 4, 4, 1, 0)
-#define SYS_SPSR_irq sys_reg(3, 4, 4, 3, 0)
-#define SYS_SPSR_abt sys_reg(3, 4, 4, 3, 1)
-#define SYS_SPSR_und sys_reg(3, 4, 4, 3, 2)
-#define SYS_SPSR_fiq sys_reg(3, 4, 4, 3, 3)
-#define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1)
-#define SYS_AFSR0_EL2 sys_reg(3, 4, 5, 1, 0)
-#define SYS_AFSR1_EL2 sys_reg(3, 4, 5, 1, 1)
-#define SYS_ESR_EL2 sys_reg(3, 4, 5, 2, 0)
-#define SYS_VSESR_EL2 sys_reg(3, 4, 5, 2, 3)
-#define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0)
-#define SYS_TFSR_EL2 sys_reg(3, 4, 5, 6, 0)
-
-#define SYS_FAR_EL2 sys_reg(3, 4, 6, 0, 0)
-#define SYS_HPFAR_EL2 sys_reg(3, 4, 6, 0, 4)
-
-#define SYS_MAIR_EL2 sys_reg(3, 4, 10, 2, 0)
-#define SYS_AMAIR_EL2 sys_reg(3, 4, 10, 3, 0)
-
-#define SYS_VBAR_EL2 sys_reg(3, 4, 12, 0, 0)
-#define SYS_RVBAR_EL2 sys_reg(3, 4, 12, 0, 1)
-#define SYS_RMR_EL2 sys_reg(3, 4, 12, 0, 2)
-#define SYS_VDISR_EL2 sys_reg(3, 4, 12, 1, 1)
-#define __SYS__AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x)
-#define SYS_ICH_AP0R0_EL2 __SYS__AP0Rx_EL2(0)
-#define SYS_ICH_AP0R1_EL2 __SYS__AP0Rx_EL2(1)
-#define SYS_ICH_AP0R2_EL2 __SYS__AP0Rx_EL2(2)
-#define SYS_ICH_AP0R3_EL2 __SYS__AP0Rx_EL2(3)
-
-#define __SYS__AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x)
-#define SYS_ICH_AP1R0_EL2 __SYS__AP1Rx_EL2(0)
-#define SYS_ICH_AP1R1_EL2 __SYS__AP1Rx_EL2(1)
-#define SYS_ICH_AP1R2_EL2 __SYS__AP1Rx_EL2(2)
-#define SYS_ICH_AP1R3_EL2 __SYS__AP1Rx_EL2(3)
-
-#define SYS_ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4)
-#define SYS_ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5)
-#define SYS_ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
-#define SYS_ICH_ELRSR_EL2 sys_reg(3, 4, 12, 11, 5)
-
-#define __SYS__LR0_EL2(x) sys_reg(3, 4, 12, 12, x)
-#define SYS_ICH_LR0_EL2 __SYS__LR0_EL2(0)
-#define SYS_ICH_LR1_EL2 __SYS__LR0_EL2(1)
-#define SYS_ICH_LR2_EL2 __SYS__LR0_EL2(2)
-#define SYS_ICH_LR3_EL2 __SYS__LR0_EL2(3)
-#define SYS_ICH_LR4_EL2 __SYS__LR0_EL2(4)
-#define SYS_ICH_LR5_EL2 __SYS__LR0_EL2(5)
-#define SYS_ICH_LR6_EL2 __SYS__LR0_EL2(6)
-#define SYS_ICH_LR7_EL2 __SYS__LR0_EL2(7)
-
-#define __SYS__LR8_EL2(x) sys_reg(3, 4, 12, 13, x)
-#define SYS_ICH_LR8_EL2 __SYS__LR8_EL2(0)
-#define SYS_ICH_LR9_EL2 __SYS__LR8_EL2(1)
-#define SYS_ICH_LR10_EL2 __SYS__LR8_EL2(2)
-#define SYS_ICH_LR11_EL2 __SYS__LR8_EL2(3)
-#define SYS_ICH_LR12_EL2 __SYS__LR8_EL2(4)
-#define SYS_ICH_LR13_EL2 __SYS__LR8_EL2(5)
-#define SYS_ICH_LR14_EL2 __SYS__LR8_EL2(6)
-#define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7)
-
-#define SYS_CONTEXTIDR_EL2 sys_reg(3, 4, 13, 0, 1)
-#define SYS_TPIDR_EL2 sys_reg(3, 4, 13, 0, 2)
-#define SYS_SCXTNUM_EL2 sys_reg(3, 4, 13, 0, 7)
-
-#define __AMEV_op2(m) (m & 0x7)
-#define __AMEV_CRm(n, m) (n | ((m & 0x8) >> 3))
-#define __SYS__AMEVCNTVOFF0n_EL2(m) sys_reg(3, 4, 13, __AMEV_CRm(0x8, m), __AMEV_op2(m))
-#define SYS_AMEVCNTVOFF0n_EL2(m) __SYS__AMEVCNTVOFF0n_EL2(m)
-#define __SYS__AMEVCNTVOFF1n_EL2(m) sys_reg(3, 4, 13, __AMEV_CRm(0xA, m), __AMEV_op2(m))
-#define SYS_AMEVCNTVOFF1n_EL2(m) __SYS__AMEVCNTVOFF1n_EL2(m)
-
-#define SYS_CNTVOFF_EL2 sys_reg(3, 4, 14, 0, 3)
-#define SYS_CNTHCTL_EL2 sys_reg(3, 4, 14, 1, 0)
-#define SYS_CNTHP_TVAL_EL2 sys_reg(3, 4, 14, 2, 0)
-#define SYS_CNTHP_CTL_EL2 sys_reg(3, 4, 14, 2, 1)
-#define SYS_CNTHP_CVAL_EL2 sys_reg(3, 4, 14, 2, 2)
-#define SYS_CNTHV_TVAL_EL2 sys_reg(3, 4, 14, 3, 0)
-#define SYS_CNTHV_CTL_EL2 sys_reg(3, 4, 14, 3, 1)
-#define SYS_CNTHV_CVAL_EL2 sys_reg(3, 4, 14, 3, 2)
-
-/* VHE encodings for architectural EL0/1 system registers */
-#define SYS_BRBCR_EL12 sys_reg(2, 5, 9, 0, 0)
-#define SYS_TTBR0_EL12 sys_reg(3, 5, 2, 0, 0)
-#define SYS_TTBR1_EL12 sys_reg(3, 5, 2, 0, 1)
-#define SYS_SPSR_EL12 sys_reg(3, 5, 4, 0, 0)
-#define SYS_ELR_EL12 sys_reg(3, 5, 4, 0, 1)
-#define SYS_AFSR0_EL12 sys_reg(3, 5, 5, 1, 0)
-#define SYS_AFSR1_EL12 sys_reg(3, 5, 5, 1, 1)
-#define SYS_ESR_EL12 sys_reg(3, 5, 5, 2, 0)
-#define SYS_TFSR_EL12 sys_reg(3, 5, 5, 6, 0)
-#define SYS_PMSCR_EL12 sys_reg(3, 5, 9, 9, 0)
-#define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0)
-#define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0)
-#define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0)
-#define SYS_SCXTNUM_EL12 sys_reg(3, 5, 13, 0, 7)
-#define SYS_CNTKCTL_EL12 sys_reg(3, 5, 14, 1, 0)
-#define SYS_CNTP_TVAL_EL02 sys_reg(3, 5, 14, 2, 0)
-#define SYS_CNTP_CTL_EL02 sys_reg(3, 5, 14, 2, 1)
-#define SYS_CNTP_CVAL_EL02 sys_reg(3, 5, 14, 2, 2)
-#define SYS_CNTV_TVAL_EL02 sys_reg(3, 5, 14, 3, 0)
-#define SYS_CNTV_CTL_EL02 sys_reg(3, 5, 14, 3, 1)
-#define SYS_CNTV_CVAL_EL02 sys_reg(3, 5, 14, 3, 2)
-
-#define SYS_SP_EL2 sys_reg(3, 6, 4, 1, 0)
-
-/* AT instructions */
-#define AT_Op0 1
-#define AT_CRn 7
-
-#define OP_AT_S1E1R sys_insn(AT_Op0, 0, AT_CRn, 8, 0)
-#define OP_AT_S1E1W sys_insn(AT_Op0, 0, AT_CRn, 8, 1)
-#define OP_AT_S1E0R sys_insn(AT_Op0, 0, AT_CRn, 8, 2)
-#define OP_AT_S1E0W sys_insn(AT_Op0, 0, AT_CRn, 8, 3)
-#define OP_AT_S1E1RP sys_insn(AT_Op0, 0, AT_CRn, 9, 0)
-#define OP_AT_S1E1WP sys_insn(AT_Op0, 0, AT_CRn, 9, 1)
-#define OP_AT_S1E1A sys_insn(AT_Op0, 0, AT_CRn, 9, 2)
-#define OP_AT_S1E2R sys_insn(AT_Op0, 4, AT_CRn, 8, 0)
-#define OP_AT_S1E2W sys_insn(AT_Op0, 4, AT_CRn, 8, 1)
-#define OP_AT_S12E1R sys_insn(AT_Op0, 4, AT_CRn, 8, 4)
-#define OP_AT_S12E1W sys_insn(AT_Op0, 4, AT_CRn, 8, 5)
-#define OP_AT_S12E0R sys_insn(AT_Op0, 4, AT_CRn, 8, 6)
-#define OP_AT_S12E0W sys_insn(AT_Op0, 4, AT_CRn, 8, 7)
-#define OP_AT_S1E2A sys_insn(AT_Op0, 4, AT_CRn, 9, 2)
-
-/* TLBI instructions */
-#define TLBI_Op0 1
-
-#define TLBI_Op1_EL1 0 /* Accessible from EL1 or higher */
-#define TLBI_Op1_EL2 4 /* Accessible from EL2 or higher */
-
-#define TLBI_CRn_XS 8 /* Extra Slow (the common one) */
-#define TLBI_CRn_nXS 9 /* not Extra Slow (which nobody uses)*/
-
-#define TLBI_CRm_IPAIS 0 /* S2 Inner-Shareable */
-#define TLBI_CRm_nROS 1 /* non-Range, Outer-Sharable */
-#define TLBI_CRm_RIS 2 /* Range, Inner-Sharable */
-#define TLBI_CRm_nRIS 3 /* non-Range, Inner-Sharable */
-#define TLBI_CRm_IPAONS 4 /* S2 Outer and Non-Shareable */
-#define TLBI_CRm_ROS 5 /* Range, Outer-Sharable */
-#define TLBI_CRm_RNS 6 /* Range, Non-Sharable */
-#define TLBI_CRm_nRNS 7 /* non-Range, Non-Sharable */
-
-#define OP_TLBI_VMALLE1OS sys_insn(1, 0, 8, 1, 0)
-#define OP_TLBI_VAE1OS sys_insn(1, 0, 8, 1, 1)
-#define OP_TLBI_ASIDE1OS sys_insn(1, 0, 8, 1, 2)
-#define OP_TLBI_VAAE1OS sys_insn(1, 0, 8, 1, 3)
-#define OP_TLBI_VALE1OS sys_insn(1, 0, 8, 1, 5)
-#define OP_TLBI_VAALE1OS sys_insn(1, 0, 8, 1, 7)
-#define OP_TLBI_RVAE1IS sys_insn(1, 0, 8, 2, 1)
-#define OP_TLBI_RVAAE1IS sys_insn(1, 0, 8, 2, 3)
-#define OP_TLBI_RVALE1IS sys_insn(1, 0, 8, 2, 5)
-#define OP_TLBI_RVAALE1IS sys_insn(1, 0, 8, 2, 7)
-#define OP_TLBI_VMALLE1IS sys_insn(1, 0, 8, 3, 0)
-#define OP_TLBI_VAE1IS sys_insn(1, 0, 8, 3, 1)
-#define OP_TLBI_ASIDE1IS sys_insn(1, 0, 8, 3, 2)
-#define OP_TLBI_VAAE1IS sys_insn(1, 0, 8, 3, 3)
-#define OP_TLBI_VALE1IS sys_insn(1, 0, 8, 3, 5)
-#define OP_TLBI_VAALE1IS sys_insn(1, 0, 8, 3, 7)
-#define OP_TLBI_RVAE1OS sys_insn(1, 0, 8, 5, 1)
-#define OP_TLBI_RVAAE1OS sys_insn(1, 0, 8, 5, 3)
-#define OP_TLBI_RVALE1OS sys_insn(1, 0, 8, 5, 5)
-#define OP_TLBI_RVAALE1OS sys_insn(1, 0, 8, 5, 7)
-#define OP_TLBI_RVAE1 sys_insn(1, 0, 8, 6, 1)
-#define OP_TLBI_RVAAE1 sys_insn(1, 0, 8, 6, 3)
-#define OP_TLBI_RVALE1 sys_insn(1, 0, 8, 6, 5)
-#define OP_TLBI_RVAALE1 sys_insn(1, 0, 8, 6, 7)
-#define OP_TLBI_VMALLE1 sys_insn(1, 0, 8, 7, 0)
-#define OP_TLBI_VAE1 sys_insn(1, 0, 8, 7, 1)
-#define OP_TLBI_ASIDE1 sys_insn(1, 0, 8, 7, 2)
-#define OP_TLBI_VAAE1 sys_insn(1, 0, 8, 7, 3)
-#define OP_TLBI_VALE1 sys_insn(1, 0, 8, 7, 5)
-#define OP_TLBI_VAALE1 sys_insn(1, 0, 8, 7, 7)
-#define OP_TLBI_VMALLE1OSNXS sys_insn(1, 0, 9, 1, 0)
-#define OP_TLBI_VAE1OSNXS sys_insn(1, 0, 9, 1, 1)
-#define OP_TLBI_ASIDE1OSNXS sys_insn(1, 0, 9, 1, 2)
-#define OP_TLBI_VAAE1OSNXS sys_insn(1, 0, 9, 1, 3)
-#define OP_TLBI_VALE1OSNXS sys_insn(1, 0, 9, 1, 5)
-#define OP_TLBI_VAALE1OSNXS sys_insn(1, 0, 9, 1, 7)
-#define OP_TLBI_RVAE1ISNXS sys_insn(1, 0, 9, 2, 1)
-#define OP_TLBI_RVAAE1ISNXS sys_insn(1, 0, 9, 2, 3)
-#define OP_TLBI_RVALE1ISNXS sys_insn(1, 0, 9, 2, 5)
-#define OP_TLBI_RVAALE1ISNXS sys_insn(1, 0, 9, 2, 7)
-#define OP_TLBI_VMALLE1ISNXS sys_insn(1, 0, 9, 3, 0)
-#define OP_TLBI_VAE1ISNXS sys_insn(1, 0, 9, 3, 1)
-#define OP_TLBI_ASIDE1ISNXS sys_insn(1, 0, 9, 3, 2)
-#define OP_TLBI_VAAE1ISNXS sys_insn(1, 0, 9, 3, 3)
-#define OP_TLBI_VALE1ISNXS sys_insn(1, 0, 9, 3, 5)
-#define OP_TLBI_VAALE1ISNXS sys_insn(1, 0, 9, 3, 7)
-#define OP_TLBI_RVAE1OSNXS sys_insn(1, 0, 9, 5, 1)
-#define OP_TLBI_RVAAE1OSNXS sys_insn(1, 0, 9, 5, 3)
-#define OP_TLBI_RVALE1OSNXS sys_insn(1, 0, 9, 5, 5)
-#define OP_TLBI_RVAALE1OSNXS sys_insn(1, 0, 9, 5, 7)
-#define OP_TLBI_RVAE1NXS sys_insn(1, 0, 9, 6, 1)
-#define OP_TLBI_RVAAE1NXS sys_insn(1, 0, 9, 6, 3)
-#define OP_TLBI_RVALE1NXS sys_insn(1, 0, 9, 6, 5)
-#define OP_TLBI_RVAALE1NXS sys_insn(1, 0, 9, 6, 7)
-#define OP_TLBI_VMALLE1NXS sys_insn(1, 0, 9, 7, 0)
-#define OP_TLBI_VAE1NXS sys_insn(1, 0, 9, 7, 1)
-#define OP_TLBI_ASIDE1NXS sys_insn(1, 0, 9, 7, 2)
-#define OP_TLBI_VAAE1NXS sys_insn(1, 0, 9, 7, 3)
-#define OP_TLBI_VALE1NXS sys_insn(1, 0, 9, 7, 5)
-#define OP_TLBI_VAALE1NXS sys_insn(1, 0, 9, 7, 7)
-#define OP_TLBI_IPAS2E1IS sys_insn(1, 4, 8, 0, 1)
-#define OP_TLBI_RIPAS2E1IS sys_insn(1, 4, 8, 0, 2)
-#define OP_TLBI_IPAS2LE1IS sys_insn(1, 4, 8, 0, 5)
-#define OP_TLBI_RIPAS2LE1IS sys_insn(1, 4, 8, 0, 6)
-#define OP_TLBI_ALLE2OS sys_insn(1, 4, 8, 1, 0)
-#define OP_TLBI_VAE2OS sys_insn(1, 4, 8, 1, 1)
-#define OP_TLBI_ALLE1OS sys_insn(1, 4, 8, 1, 4)
-#define OP_TLBI_VALE2OS sys_insn(1, 4, 8, 1, 5)
-#define OP_TLBI_VMALLS12E1OS sys_insn(1, 4, 8, 1, 6)
-#define OP_TLBI_RVAE2IS sys_insn(1, 4, 8, 2, 1)
-#define OP_TLBI_RVALE2IS sys_insn(1, 4, 8, 2, 5)
-#define OP_TLBI_ALLE2IS sys_insn(1, 4, 8, 3, 0)
-#define OP_TLBI_VAE2IS sys_insn(1, 4, 8, 3, 1)
-#define OP_TLBI_ALLE1IS sys_insn(1, 4, 8, 3, 4)
-#define OP_TLBI_VALE2IS sys_insn(1, 4, 8, 3, 5)
-#define OP_TLBI_VMALLS12E1IS sys_insn(1, 4, 8, 3, 6)
-#define OP_TLBI_IPAS2E1OS sys_insn(1, 4, 8, 4, 0)
-#define OP_TLBI_IPAS2E1 sys_insn(1, 4, 8, 4, 1)
-#define OP_TLBI_RIPAS2E1 sys_insn(1, 4, 8, 4, 2)
-#define OP_TLBI_RIPAS2E1OS sys_insn(1, 4, 8, 4, 3)
-#define OP_TLBI_IPAS2LE1OS sys_insn(1, 4, 8, 4, 4)
-#define OP_TLBI_IPAS2LE1 sys_insn(1, 4, 8, 4, 5)
-#define OP_TLBI_RIPAS2LE1 sys_insn(1, 4, 8, 4, 6)
-#define OP_TLBI_RIPAS2LE1OS sys_insn(1, 4, 8, 4, 7)
-#define OP_TLBI_RVAE2OS sys_insn(1, 4, 8, 5, 1)
-#define OP_TLBI_RVALE2OS sys_insn(1, 4, 8, 5, 5)
-#define OP_TLBI_RVAE2 sys_insn(1, 4, 8, 6, 1)
-#define OP_TLBI_RVALE2 sys_insn(1, 4, 8, 6, 5)
-#define OP_TLBI_ALLE2 sys_insn(1, 4, 8, 7, 0)
-#define OP_TLBI_VAE2 sys_insn(1, 4, 8, 7, 1)
-#define OP_TLBI_ALLE1 sys_insn(1, 4, 8, 7, 4)
-#define OP_TLBI_VALE2 sys_insn(1, 4, 8, 7, 5)
-#define OP_TLBI_VMALLS12E1 sys_insn(1, 4, 8, 7, 6)
-#define OP_TLBI_IPAS2E1ISNXS sys_insn(1, 4, 9, 0, 1)
-#define OP_TLBI_RIPAS2E1ISNXS sys_insn(1, 4, 9, 0, 2)
-#define OP_TLBI_IPAS2LE1ISNXS sys_insn(1, 4, 9, 0, 5)
-#define OP_TLBI_RIPAS2LE1ISNXS sys_insn(1, 4, 9, 0, 6)
-#define OP_TLBI_ALLE2OSNXS sys_insn(1, 4, 9, 1, 0)
-#define OP_TLBI_VAE2OSNXS sys_insn(1, 4, 9, 1, 1)
-#define OP_TLBI_ALLE1OSNXS sys_insn(1, 4, 9, 1, 4)
-#define OP_TLBI_VALE2OSNXS sys_insn(1, 4, 9, 1, 5)
-#define OP_TLBI_VMALLS12E1OSNXS sys_insn(1, 4, 9, 1, 6)
-#define OP_TLBI_RVAE2ISNXS sys_insn(1, 4, 9, 2, 1)
-#define OP_TLBI_RVALE2ISNXS sys_insn(1, 4, 9, 2, 5)
-#define OP_TLBI_ALLE2ISNXS sys_insn(1, 4, 9, 3, 0)
-#define OP_TLBI_VAE2ISNXS sys_insn(1, 4, 9, 3, 1)
-#define OP_TLBI_ALLE1ISNXS sys_insn(1, 4, 9, 3, 4)
-#define OP_TLBI_VALE2ISNXS sys_insn(1, 4, 9, 3, 5)
-#define OP_TLBI_VMALLS12E1ISNXS sys_insn(1, 4, 9, 3, 6)
-#define OP_TLBI_IPAS2E1OSNXS sys_insn(1, 4, 9, 4, 0)
-#define OP_TLBI_IPAS2E1NXS sys_insn(1, 4, 9, 4, 1)
-#define OP_TLBI_RIPAS2E1NXS sys_insn(1, 4, 9, 4, 2)
-#define OP_TLBI_RIPAS2E1OSNXS sys_insn(1, 4, 9, 4, 3)
-#define OP_TLBI_IPAS2LE1OSNXS sys_insn(1, 4, 9, 4, 4)
-#define OP_TLBI_IPAS2LE1NXS sys_insn(1, 4, 9, 4, 5)
-#define OP_TLBI_RIPAS2LE1NXS sys_insn(1, 4, 9, 4, 6)
-#define OP_TLBI_RIPAS2LE1OSNXS sys_insn(1, 4, 9, 4, 7)
-#define OP_TLBI_RVAE2OSNXS sys_insn(1, 4, 9, 5, 1)
-#define OP_TLBI_RVALE2OSNXS sys_insn(1, 4, 9, 5, 5)
-#define OP_TLBI_RVAE2NXS sys_insn(1, 4, 9, 6, 1)
-#define OP_TLBI_RVALE2NXS sys_insn(1, 4, 9, 6, 5)
-#define OP_TLBI_ALLE2NXS sys_insn(1, 4, 9, 7, 0)
-#define OP_TLBI_VAE2NXS sys_insn(1, 4, 9, 7, 1)
-#define OP_TLBI_ALLE1NXS sys_insn(1, 4, 9, 7, 4)
-#define OP_TLBI_VALE2NXS sys_insn(1, 4, 9, 7, 5)
-#define OP_TLBI_VMALLS12E1NXS sys_insn(1, 4, 9, 7, 6)
-
-/* Misc instructions */
-#define OP_GCSPUSHX sys_insn(1, 0, 7, 7, 4)
-#define OP_GCSPOPCX sys_insn(1, 0, 7, 7, 5)
-#define OP_GCSPOPX sys_insn(1, 0, 7, 7, 6)
-#define OP_GCSPUSHM sys_insn(1, 3, 7, 7, 0)
-
-#define OP_BRB_IALL sys_insn(1, 1, 7, 2, 4)
-#define OP_BRB_INJ sys_insn(1, 1, 7, 2, 5)
-#define OP_CFP_RCTX sys_insn(1, 3, 7, 3, 4)
-#define OP_DVP_RCTX sys_insn(1, 3, 7, 3, 5)
-#define OP_COSP_RCTX sys_insn(1, 3, 7, 3, 6)
-#define OP_CPP_RCTX sys_insn(1, 3, 7, 3, 7)
-
-/*
- * BRBE Instructions
- */
-#define BRB_IALL_INSN __emit_inst(0xd5000000 | OP_BRB_IALL | (0x1f))
-#define BRB_INJ_INSN __emit_inst(0xd5000000 | OP_BRB_INJ | (0x1f))
-
-/* Common SCTLR_ELx flags. */
-#define SCTLR_ELx_ENTP2 (BIT(60))
-#define SCTLR_ELx_DSSBS (BIT(44))
-#define SCTLR_ELx_ATA (BIT(43))
-
-#define SCTLR_ELx_EE_SHIFT 25
-#define SCTLR_ELx_ENIA_SHIFT 31
-
-#define SCTLR_ELx_ITFSB (BIT(37))
-#define SCTLR_ELx_ENIA (BIT(SCTLR_ELx_ENIA_SHIFT))
-#define SCTLR_ELx_ENIB (BIT(30))
-#define SCTLR_ELx_LSMAOE (BIT(29))
-#define SCTLR_ELx_nTLSMD (BIT(28))
-#define SCTLR_ELx_ENDA (BIT(27))
-#define SCTLR_ELx_EE (BIT(SCTLR_ELx_EE_SHIFT))
-#define SCTLR_ELx_EIS (BIT(22))
-#define SCTLR_ELx_IESB (BIT(21))
-#define SCTLR_ELx_TSCXT (BIT(20))
-#define SCTLR_ELx_WXN (BIT(19))
-#define SCTLR_ELx_ENDB (BIT(13))
-#define SCTLR_ELx_I (BIT(12))
-#define SCTLR_ELx_EOS (BIT(11))
-#define SCTLR_ELx_SA (BIT(3))
-#define SCTLR_ELx_C (BIT(2))
-#define SCTLR_ELx_A (BIT(1))
-#define SCTLR_ELx_M (BIT(0))
-
#ifdef CONFIG_CPU_BIG_ENDIAN
#define ENDIAN_SET_EL2 SCTLR_ELx_EE
#else
@@ -869,35 +103,6 @@
SCTLR_EL1_LSMAOE | SCTLR_EL1_nTLSMD | SCTLR_EL1_EIS | \
SCTLR_EL1_TSCXT | SCTLR_EL1_EOS)
-/* MAIR_ELx memory attributes (used by Linux) */
-#define MAIR_ATTR_DEVICE_nGnRnE UL(0x00)
-#define MAIR_ATTR_DEVICE_nGnRE UL(0x04)
-#define MAIR_ATTR_NORMAL_NC UL(0x44)
-#define MAIR_ATTR_NORMAL_TAGGED UL(0xf0)
-#define MAIR_ATTR_NORMAL UL(0xff)
-#define MAIR_ATTR_MASK UL(0xff)
-
-/* Position the attr at the correct index */
-#define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8))
-
-/* id_aa64mmfr0 */
-#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN 0x0
-#define ID_AA64MMFR0_EL1_TGRAN4_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT
-#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX 0x7
-#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN 0x0
-#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX 0x7
-#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN 0x1
-#define ID_AA64MMFR0_EL1_TGRAN16_LPA2 ID_AA64MMFR0_EL1_TGRAN16_52_BIT
-#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX 0xf
-
-#define ARM64_MIN_PARANGE_BITS 32
-
-#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT 0x0
-#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE 0x1
-#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN 0x2
-#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2 0x3
-#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7
-
#ifdef CONFIG_ARM64_PA_BITS_52
#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_52
#else
@@ -923,19 +128,6 @@
#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT
#endif
-#define CPACR_EL1_FPEN_EL1EN (BIT(20)) /* enable EL1 access */
-#define CPACR_EL1_FPEN_EL0EN (BIT(21)) /* enable EL0 access, if EL1EN set */
-
-#define CPACR_EL1_SMEN_EL1EN (BIT(24)) /* enable EL1 access */
-#define CPACR_EL1_SMEN_EL0EN (BIT(25)) /* enable EL0 access, if EL1EN set */
-
-#define CPACR_EL1_ZEN_EL1EN (BIT(16)) /* enable EL1 access */
-#define CPACR_EL1_ZEN_EL0EN (BIT(17)) /* enable EL0 access, if EL1EN set */
-
-/* GCR_EL1 Definitions */
-#define SYS_GCR_EL1_RRND (BIT(16))
-#define SYS_GCR_EL1_EXCL_MASK 0xffffUL
-
#ifdef CONFIG_KASAN_HW_TAGS
/*
* KASAN always uses a whole byte for its tags. With CONFIG_KASAN_HW_TAGS it
@@ -951,156 +143,6 @@
#define KERNEL_GCR_EL1 (SYS_GCR_EL1_RRND | KERNEL_GCR_EL1_EXCL)
-/* RGSR_EL1 Definitions */
-#define SYS_RGSR_EL1_TAG_MASK 0xfUL
-#define SYS_RGSR_EL1_SEED_SHIFT 8
-#define SYS_RGSR_EL1_SEED_MASK 0xffffUL
-
-/* TFSR{,E0}_EL1 bit definitions */
-#define SYS_TFSR_EL1_TF0_SHIFT 0
-#define SYS_TFSR_EL1_TF1_SHIFT 1
-#define SYS_TFSR_EL1_TF0 (UL(1) << SYS_TFSR_EL1_TF0_SHIFT)
-#define SYS_TFSR_EL1_TF1 (UL(1) << SYS_TFSR_EL1_TF1_SHIFT)
-
-/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */
-#define SYS_MPIDR_SAFE_VAL (BIT(31))
-
-/* GIC Hypervisor interface registers */
-/* ICH_LR*_EL2 bit definitions */
-#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1)
-
-#define ICH_LR_EOI (1ULL << 41)
-#define ICH_LR_GROUP (1ULL << 60)
-#define ICH_LR_HW (1ULL << 61)
-#define ICH_LR_STATE (3ULL << 62)
-#define ICH_LR_PENDING_BIT (1ULL << 62)
-#define ICH_LR_ACTIVE_BIT (1ULL << 63)
-#define ICH_LR_PHYS_ID_SHIFT 32
-#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT)
-#define ICH_LR_PRIORITY_SHIFT 48
-#define ICH_LR_PRIORITY_MASK (0xffULL << ICH_LR_PRIORITY_SHIFT)
-
-/*
- * Permission Indirection Extension (PIE) permission encodings.
- * Encodings with the _O suffix, have overlays applied (Permission Overlay Extension).
- */
-#define PIE_NONE_O UL(0x0)
-#define PIE_R_O UL(0x1)
-#define PIE_X_O UL(0x2)
-#define PIE_RX_O UL(0x3)
-#define PIE_RW_O UL(0x5)
-#define PIE_RWnX_O UL(0x6)
-#define PIE_RWX_O UL(0x7)
-#define PIE_R UL(0x8)
-#define PIE_GCS UL(0x9)
-#define PIE_RX UL(0xa)
-#define PIE_RW UL(0xc)
-#define PIE_RWX UL(0xe)
-#define PIE_MASK UL(0xf)
-
-#define PIRx_ELx_BITS_PER_IDX 4
-#define PIRx_ELx_PERM_SHIFT(idx) ((idx) * PIRx_ELx_BITS_PER_IDX)
-#define PIRx_ELx_PERM_PREP(idx, perm) (((perm) & PIE_MASK) << PIRx_ELx_PERM_SHIFT(idx))
-
-/*
- * Permission Overlay Extension (POE) permission encodings.
- */
-#define POE_NONE UL(0x0)
-#define POE_R UL(0x1)
-#define POE_X UL(0x2)
-#define POE_RX UL(0x3)
-#define POE_W UL(0x4)
-#define POE_RW UL(0x5)
-#define POE_WX UL(0x6)
-#define POE_RWX UL(0x7)
-#define POE_MASK UL(0xf)
-
-#define POR_ELx_BITS_PER_IDX 4
-#define POR_ELx_PERM_SHIFT(idx) ((idx) * POR_ELx_BITS_PER_IDX)
-#define POR_ELx_PERM_GET(idx, reg) (((reg) >> POR_ELx_PERM_SHIFT(idx)) & POE_MASK)
-#define POR_ELx_PERM_PREP(idx, perm) (((perm) & POE_MASK) << POR_ELx_PERM_SHIFT(idx))
-
-/*
- * Definitions for Guarded Control Stack
- */
-
-#define GCS_CAP_ADDR_MASK GENMASK(63, 12)
-#define GCS_CAP_ADDR_SHIFT 12
-#define GCS_CAP_ADDR_WIDTH 52
-#define GCS_CAP_ADDR(x) FIELD_GET(GCS_CAP_ADDR_MASK, x)
-
-#define GCS_CAP_TOKEN_MASK GENMASK(11, 0)
-#define GCS_CAP_TOKEN_SHIFT 0
-#define GCS_CAP_TOKEN_WIDTH 12
-#define GCS_CAP_TOKEN(x) FIELD_GET(GCS_CAP_TOKEN_MASK, x)
-
-#define GCS_CAP_VALID_TOKEN 0x1
-#define GCS_CAP_IN_PROGRESS_TOKEN 0x5
-
-#define GCS_CAP(x) ((((unsigned long)x) & GCS_CAP_ADDR_MASK) | \
- GCS_CAP_VALID_TOKEN)
-/*
- * Definitions for GICv5 instructions
- */
-#define GICV5_OP_GIC_CDAFF sys_insn(1, 0, 12, 1, 3)
-#define GICV5_OP_GIC_CDDI sys_insn(1, 0, 12, 2, 0)
-#define GICV5_OP_GIC_CDDIS sys_insn(1, 0, 12, 1, 0)
-#define GICV5_OP_GIC_CDHM sys_insn(1, 0, 12, 2, 1)
-#define GICV5_OP_GIC_CDEN sys_insn(1, 0, 12, 1, 1)
-#define GICV5_OP_GIC_CDEOI sys_insn(1, 0, 12, 1, 7)
-#define GICV5_OP_GIC_CDPEND sys_insn(1, 0, 12, 1, 4)
-#define GICV5_OP_GIC_CDPRI sys_insn(1, 0, 12, 1, 2)
-#define GICV5_OP_GIC_CDRCFG sys_insn(1, 0, 12, 1, 5)
-#define GICV5_OP_GICR_CDIA sys_insn(1, 0, 12, 3, 0)
-
-/* Definitions for GIC CDAFF */
-#define GICV5_GIC_CDAFF_IAFFID_MASK GENMASK_ULL(47, 32)
-#define GICV5_GIC_CDAFF_TYPE_MASK GENMASK_ULL(31, 29)
-#define GICV5_GIC_CDAFF_IRM_MASK BIT_ULL(28)
-#define GICV5_GIC_CDAFF_ID_MASK GENMASK_ULL(23, 0)
-
-/* Definitions for GIC CDDI */
-#define GICV5_GIC_CDDI_TYPE_MASK GENMASK_ULL(31, 29)
-#define GICV5_GIC_CDDI_ID_MASK GENMASK_ULL(23, 0)
-
-/* Definitions for GIC CDDIS */
-#define GICV5_GIC_CDDIS_TYPE_MASK GENMASK_ULL(31, 29)
-#define GICV5_GIC_CDDIS_TYPE(r) FIELD_GET(GICV5_GIC_CDDIS_TYPE_MASK, r)
-#define GICV5_GIC_CDDIS_ID_MASK GENMASK_ULL(23, 0)
-#define GICV5_GIC_CDDIS_ID(r) FIELD_GET(GICV5_GIC_CDDIS_ID_MASK, r)
-
-/* Definitions for GIC CDEN */
-#define GICV5_GIC_CDEN_TYPE_MASK GENMASK_ULL(31, 29)
-#define GICV5_GIC_CDEN_ID_MASK GENMASK_ULL(23, 0)
-
-/* Definitions for GIC CDHM */
-#define GICV5_GIC_CDHM_HM_MASK BIT_ULL(32)
-#define GICV5_GIC_CDHM_TYPE_MASK GENMASK_ULL(31, 29)
-#define GICV5_GIC_CDHM_ID_MASK GENMASK_ULL(23, 0)
-
-/* Definitions for GIC CDPEND */
-#define GICV5_GIC_CDPEND_PENDING_MASK BIT_ULL(32)
-#define GICV5_GIC_CDPEND_TYPE_MASK GENMASK_ULL(31, 29)
-#define GICV5_GIC_CDPEND_ID_MASK GENMASK_ULL(23, 0)
-
-/* Definitions for GIC CDPRI */
-#define GICV5_GIC_CDPRI_PRIORITY_MASK GENMASK_ULL(39, 35)
-#define GICV5_GIC_CDPRI_TYPE_MASK GENMASK_ULL(31, 29)
-#define GICV5_GIC_CDPRI_ID_MASK GENMASK_ULL(23, 0)
-
-/* Definitions for GIC CDRCFG */
-#define GICV5_GIC_CDRCFG_TYPE_MASK GENMASK_ULL(31, 29)
-#define GICV5_GIC_CDRCFG_ID_MASK GENMASK_ULL(23, 0)
-
-/* Definitions for GICR CDIA */
-#define GICV5_GIC_CDIA_VALID_MASK BIT_ULL(32)
-#define GICV5_GICR_CDIA_VALID(r) FIELD_GET(GICV5_GIC_CDIA_VALID_MASK, r)
-#define GICV5_GIC_CDIA_TYPE_MASK GENMASK_ULL(31, 29)
-#define GICV5_GIC_CDIA_ID_MASK GENMASK_ULL(23, 0)
-
-#define gicr_insn(insn) read_sysreg_s(GICV5_OP_GICR_##insn)
-#define gic_insn(v, insn) write_sysreg_s(v, GICV5_OP_GIC_##insn)
-
#ifdef __ASSEMBLER__
.macro mrs_s, rt, sreg
@@ -1249,18 +291,6 @@
par; \
})
-#define SYS_FIELD_VALUE(reg, field, val) reg##_##field##_##val
-
-#define SYS_FIELD_GET(reg, field, val) \
- FIELD_GET(reg##_##field##_MASK, val)
-
-#define SYS_FIELD_PREP(reg, field, val) \
- FIELD_PREP(reg##_##field##_MASK, val)
-
-#define SYS_FIELD_PREP_ENUM(reg, field, val) \
- FIELD_PREP(reg##_##field##_MASK, \
- SYS_FIELD_VALUE(reg, field, val))
-
#endif
#endif /* __ASM_SYSREG_H */
diff --git a/arch/arm64/tools/Makefile b/arch/arm64/tools/Makefile
index c2b34e761006..aee011abc2b9 100644
--- a/arch/arm64/tools/Makefile
+++ b/arch/arm64/tools/Makefile
@@ -3,26 +3,18 @@
gen := arch/$(ARCH)/include/generated
kapi := $(gen)/asm
-kapisyshdr-y := cpucap-defs.h sysreg-defs.h
+include $(srctree)/arch/arm64/tools/Makefile.sysreg
-kapi-hdrs-y := $(addprefix $(kapi)/, $(kapisyshdr-y))
-
-targets += $(addprefix ../../../, $(kapi-hdrs-y))
+targets += $(addprefix ../../../, $(kapi)/cpucap-defs.h)
PHONY += kapi
all: $(syscall64) kapi
-kapi: $(kapi-hdrs-y)
+kapi: $(kapi)/cpucap-defs.h
quiet_cmd_gen_cpucaps = GEN $@
cmd_gen_cpucaps = mkdir -p $(dir $@); $(AWK) -f $(real-prereqs) > $@
-quiet_cmd_gen_sysreg = GEN $@
- cmd_gen_sysreg = mkdir -p $(dir $@); $(AWK) -f $(real-prereqs) > $@
-
$(kapi)/cpucap-defs.h: $(src)/gen-cpucaps.awk $(src)/cpucaps FORCE
$(call if_changed,gen_cpucaps)
-
-$(kapi)/sysreg-defs.h: $(src)/gen-sysreg.awk $(src)/sysreg FORCE
- $(call if_changed,gen_sysreg)
diff --git a/arch/arm64/tools/Makefile.sysreg b/arch/arm64/tools/Makefile.sysreg
new file mode 100644
index 000000000000..1c2096208f14
--- /dev/null
+++ b/arch/arm64/tools/Makefile.sysreg
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+
+targets += $(addprefix ../../../, $(kapi)/sysreg-gen-defs.h)
+
+kapi: $(kapi)/sysreg-gen-defs.h
+
+quiet_cmd_gen_sysreg = GEN $@
+ cmd_gen_sysreg = mkdir -p $(dir $@); $(AWK) -f $(real-prereqs) > $@
+
+$(kapi)/sysreg-gen-defs.h: $(srctree)/arch/arm64/tools/gen-sysreg.awk $(srctree)/arch/arm64/tools/sysreg FORCE
+ $(call if_changed,gen_sysreg)
+
diff --git a/arch/arm64/tools/gen-sysreg.awk b/arch/arm64/tools/gen-sysreg.awk
index 86860ab672dc..ffdabd3ff84a 100755
--- a/arch/arm64/tools/gen-sysreg.awk
+++ b/arch/arm64/tools/gen-sysreg.awk
@@ -107,8 +107,8 @@ function parse_bitdef(reg, field, bitdef, _bits)
}
BEGIN {
- print "#ifndef __ASM_SYSREG_DEFS_H"
- print "#define __ASM_SYSREG_DEFS_H"
+ print "#ifndef __ASM_SYSREG_GEN_DEFS_H"
+ print "#define __ASM_SYSREG_GEN_DEFS_H"
print ""
print "/* Generated file - do not edit */"
print ""
@@ -121,7 +121,7 @@ END {
if (__current_block_depth != 0)
fatal("Missing terminator for " block_current() " block")
- print "#endif /* __ASM_SYSREG_DEFS_H */"
+ print "#endif /* __ASM_SYSREG_GEN_DEFS_H */"
}
# skip blank lines and comment lines
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v1 06/27] arm64: Provide arm64 API for non-native architectures
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (4 preceding siblings ...)
2026-04-02 4:21 ` [PATCH v1 05/27] arm64: Extract sysreg definitions Steffen Eiden
@ 2026-04-02 4:21 ` Steffen Eiden
2026-04-02 4:21 ` [PATCH v1 07/27] KVM: arm64: Provide arm64 KVM " Steffen Eiden
` (21 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:21 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
Enable the usage of arm64 asm headers independent of the CPU
architecture. Introduce a new directory, include/arch/arm64/asm to store
(host) architecture- agnostic headers for arm64 implementors.
The new path is added to the include patch of native arm64. Therefore,
arm64 will continue to reference to those headers via include
<asm/header.h> without any code change. This means that the header names
should be unique for arm64 asm headers. For native arm64, headers at the
new path take precedence over any other include path.
For other architectures to use the esr string names, move *esr_class_str
from traps.c to esr.h.
For a seamless integration non-native architectures must also include the
new path to the include path.
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
MAINTAINERS | 1 +
arch/arm64/Makefile | 2 +
arch/arm64/kernel/traps.c | 53 ------------------
.../arch/arm64}/asm/brk-imm.h | 0
.../include => include/arch/arm64}/asm/esr.h | 56 ++++++++++++++++++-
.../arch/arm64}/asm/sysreg-defs.h | 0
6 files changed, 56 insertions(+), 56 deletions(-)
rename {arch/arm64/include => include/arch/arm64}/asm/brk-imm.h (100%)
rename {arch/arm64/include => include/arch/arm64}/asm/esr.h (88%)
rename {arch/arm64/include => include/arch/arm64}/asm/sysreg-defs.h (100%)
diff --git a/MAINTAINERS b/MAINTAINERS
index 3f03ef9ee2bd..e0a101fe05ce 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3817,6 +3817,7 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git
F: Documentation/arch/arm64/
F: arch/arm64/
+F: include/arch/arm64/
F: include/uapi/arch/arm64/
F: drivers/virt/coco/arm-cca-guest/
F: drivers/virt/coco/pkvm-guest/
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 73a10f65ce8b..b244e837cedd 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -221,6 +221,8 @@ PHONY += virtconfig
virtconfig:
$(call merge_into_defconfig_override,defconfig,virt)
+LINUXINCLUDE := -I$(srctree)/include/arch/arm64/ $(LINUXINCLUDE)
+
define archhelp
echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 914282016069..ca679dcdf1c8 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -829,59 +829,6 @@ void do_el0_sys(unsigned long esr, struct pt_regs *regs)
do_el0_undef(regs, esr);
}
-static const char *esr_class_str[] = {
- [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC",
- [ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized",
- [ESR_ELx_EC_WFx] = "WFI/WFE",
- [ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC",
- [ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC",
- [ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC",
- [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC",
- [ESR_ELx_EC_FP_ASIMD] = "ASIMD",
- [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS",
- [ESR_ELx_EC_PAC] = "PAC",
- [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC",
- [ESR_ELx_EC_BTI] = "BTI",
- [ESR_ELx_EC_ILL] = "PSTATE.IL",
- [ESR_ELx_EC_SVC32] = "SVC (AArch32)",
- [ESR_ELx_EC_HVC32] = "HVC (AArch32)",
- [ESR_ELx_EC_SMC32] = "SMC (AArch32)",
- [ESR_ELx_EC_SVC64] = "SVC (AArch64)",
- [ESR_ELx_EC_HVC64] = "HVC (AArch64)",
- [ESR_ELx_EC_SMC64] = "SMC (AArch64)",
- [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)",
- [ESR_ELx_EC_SVE] = "SVE",
- [ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB",
- [ESR_ELx_EC_FPAC] = "FPAC",
- [ESR_ELx_EC_SME] = "SME",
- [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
- [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
- [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
- [ESR_ELx_EC_PC_ALIGN] = "PC Alignment",
- [ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)",
- [ESR_ELx_EC_DABT_CUR] = "DABT (current EL)",
- [ESR_ELx_EC_SP_ALIGN] = "SP Alignment",
- [ESR_ELx_EC_MOPS] = "MOPS",
- [ESR_ELx_EC_FP_EXC32] = "FP (AArch32)",
- [ESR_ELx_EC_FP_EXC64] = "FP (AArch64)",
- [ESR_ELx_EC_GCS] = "Guarded Control Stack",
- [ESR_ELx_EC_SERROR] = "SError",
- [ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)",
- [ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)",
- [ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)",
- [ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)",
- [ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)",
- [ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)",
- [ESR_ELx_EC_BKPT32] = "BKPT (AArch32)",
- [ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)",
- [ESR_ELx_EC_BRK64] = "BRK (AArch64)",
-};
-
-const char *esr_get_class_string(unsigned long esr)
-{
- return esr_class_str[ESR_ELx_EC(esr)];
-}
-
/*
* bad_el0_sync handles unexpected, but potentially recoverable synchronous
* exceptions taken from EL0.
diff --git a/arch/arm64/include/asm/brk-imm.h b/include/arch/arm64/asm/brk-imm.h
similarity index 100%
rename from arch/arm64/include/asm/brk-imm.h
rename to include/arch/arm64/asm/brk-imm.h
diff --git a/arch/arm64/include/asm/esr.h b/include/arch/arm64/asm/esr.h
similarity index 88%
rename from arch/arm64/include/asm/esr.h
rename to include/arch/arm64/asm/esr.h
index 7e86d400864e..1c86202c1be4 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/include/arch/arm64/asm/esr.h
@@ -7,8 +7,9 @@
#ifndef __ASM_ESR_H
#define __ASM_ESR_H
-#include <asm/memory.h>
-#include <asm/sysreg.h>
+#include <linux/const.h>
+#include <asm/sysreg-defs.h>
+#include <asm/brk-imm.h>
#define ESR_ELx_EC_UNKNOWN UL(0x00)
#define ESR_ELx_EC_WFx UL(0x01)
@@ -541,7 +542,56 @@ static inline bool esr_iss_is_eretab(unsigned long esr)
return esr & ESR_ELx_ERET_ISS_ERETA;
}
-const char *esr_get_class_string(unsigned long esr);
+static inline const char *esr_get_class_string(unsigned long esr)
+{
+ switch (ESR_ELx_EC(esr)) {
+ case ESR_ELx_EC_UNKNOWN: return "Unknown/Uncategorized";
+ case ESR_ELx_EC_WFx: return "WFI/WFE";
+ case ESR_ELx_EC_CP15_32: return "CP15 MCR/MRC";
+ case ESR_ELx_EC_CP15_64: return "CP15 MCRR/MRRC";
+ case ESR_ELx_EC_CP14_MR: return "CP14 MCR/MRC";
+ case ESR_ELx_EC_CP14_LS: return "CP14 LDC/STC";
+ case ESR_ELx_EC_FP_ASIMD: return "ASIMD";
+ case ESR_ELx_EC_CP10_ID: return "CP10 MRC/VMRS";
+ case ESR_ELx_EC_PAC: return "PAC";
+ case ESR_ELx_EC_CP14_64: return "CP14 MCRR/MRRC";
+ case ESR_ELx_EC_BTI: return "BTI";
+ case ESR_ELx_EC_ILL: return "PSTATE.IL";
+ case ESR_ELx_EC_SVC32: return "SVC (AArch32)";
+ case ESR_ELx_EC_HVC32: return "HVC (AArch32)";
+ case ESR_ELx_EC_SMC32: return "SMC (AArch32)";
+ case ESR_ELx_EC_SVC64: return "SVC (AArch64)";
+ case ESR_ELx_EC_HVC64: return "HVC (AArch64)";
+ case ESR_ELx_EC_SMC64: return "SMC (AArch64)";
+ case ESR_ELx_EC_SYS64: return "MSR/MRS (AArch64)";
+ case ESR_ELx_EC_SVE: return "SVE";
+ case ESR_ELx_EC_ERET: return "ERET/ERETAA/ERETAB";
+ case ESR_ELx_EC_FPAC: return "FPAC";
+ case ESR_ELx_EC_SME: return "SME";
+ case ESR_ELx_EC_IMP_DEF: return "EL3 IMP DEF";
+ case ESR_ELx_EC_IABT_LOW: return "IABT (lower EL)";
+ case ESR_ELx_EC_IABT_CUR: return "IABT (current EL)";
+ case ESR_ELx_EC_PC_ALIGN: return "PC Alignment";
+ case ESR_ELx_EC_DABT_LOW: return "DABT (lower EL)";
+ case ESR_ELx_EC_DABT_CUR: return "DABT (current EL)";
+ case ESR_ELx_EC_SP_ALIGN: return "SP Alignment";
+ case ESR_ELx_EC_MOPS: return "MOPS";
+ case ESR_ELx_EC_FP_EXC32: return "FP (AArch32)";
+ case ESR_ELx_EC_FP_EXC64: return "FP (AArch64)";
+ case ESR_ELx_EC_GCS: return "Guarded Control Stack";
+ case ESR_ELx_EC_SERROR: return "SError";
+ case ESR_ELx_EC_BREAKPT_LOW: return "Breakpoint (lower EL)";
+ case ESR_ELx_EC_BREAKPT_CUR: return "Breakpoint (current EL)";
+ case ESR_ELx_EC_SOFTSTP_LOW: return "Software Step (lower EL)";
+ case ESR_ELx_EC_SOFTSTP_CUR: return "Software Step (current EL)";
+ case ESR_ELx_EC_WATCHPT_LOW: return "Watchpoint (lower EL)";
+ case ESR_ELx_EC_WATCHPT_CUR: return "Watchpoint (current EL)";
+ case ESR_ELx_EC_BKPT32: return "BKPT (AArch32)";
+ case ESR_ELx_EC_VECTOR32: return "Vector catch (AArch32)";
+ case ESR_ELx_EC_BRK64: return "BRK (AArch64)";
+ default: return "UNRECOGNIZED EC";
+ }
+}
#endif /* __ASSEMBLER__ */
#endif /* __ASM_ESR_H */
diff --git a/arch/arm64/include/asm/sysreg-defs.h b/include/arch/arm64/asm/sysreg-defs.h
similarity index 100%
rename from arch/arm64/include/asm/sysreg-defs.h
rename to include/arch/arm64/asm/sysreg-defs.h
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v1 07/27] KVM: arm64: Provide arm64 KVM API for non-native architectures
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (5 preceding siblings ...)
2026-04-02 4:21 ` [PATCH v1 06/27] arm64: Provide arm64 API for non-native architectures Steffen Eiden
@ 2026-04-02 4:21 ` Steffen Eiden
2026-04-02 10:08 ` Marc Zyngier
2026-04-02 4:21 ` [PATCH v1 08/27] arm64: Extract pstate definitions from ptrace Steffen Eiden
` (20 subsequent siblings)
27 siblings, 1 reply; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:21 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
The KVM‑related headers are moved to include/kvm/arm64/, decoupling them from
the arm64 architecture directory. The design convention is that
architecture‑specific headers under <arch>/include/asm/ include from this
shared location, allowing non‑native hosts to consume the arm64 KVM
infrastructure without duplicating code.
This refactoring enables non-native hosts to include and utilize arm64
KVM infrastructure without duplicating code or creating architecture
specific dependencies.
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
MAINTAINERS | 1 +
arch/arm64/include/asm/el2_setup.h | 2 +-
arch/arm64/include/asm/hardirq.h | 2 +-
arch/arm64/include/asm/kvm_emulate.h | 3 +-
arch/arm64/include/asm/kvm_host.h | 99 +--------------
arch/arm64/include/asm/kvm_mmu.h | 41 +------
arch/arm64/kernel/head.S | 2 +-
arch/arm64/kernel/hyp-stub.S | 2 +-
arch/arm64/kvm/arm.c | 6 +-
arch/arm64/kvm/debug.c | 2 +-
arch/arm64/kvm/guest.c | 2 +
arch/arm64/kvm/hyp/entry.S | 2 +-
arch/arm64/kvm/hyp/hyp-entry.S | 2 +-
arch/arm64/kvm/hyp/nvhe/host.S | 2 +-
arch/arm64/kvm/hyp/nvhe/hyp-init.S | 2 +-
arch/arm64/kvm/mmu.c | 2 +-
arch/arm64/kvm/nested.c | 2 +-
arch/arm64/kvm/reset.c | 2 +-
arch/arm64/kvm/sys_regs.c | 2 +-
arch/arm64/kvm/vgic/vgic-its.c | 2 +-
arch/arm64/kvm/vgic/vgic-mmio-v3.c | 2 +-
arch/arm64/kvm/vgic/vgic-v3-nested.c | 2 +-
include/kvm/arm64/guest.h | 10 ++
.../asm => include/kvm/arm64}/kvm_arm.h | 5 +-
include/kvm/arm64/kvm_host.h | 113 ++++++++++++++++++
include/kvm/arm64/kvm_mmu.h | 47 ++++++++
26 files changed, 203 insertions(+), 156 deletions(-)
create mode 100644 include/kvm/arm64/guest.h
rename {arch/arm64/include/asm => include/kvm/arm64}/kvm_arm.h (99%)
create mode 100644 include/kvm/arm64/kvm_host.h
create mode 100644 include/kvm/arm64/kvm_mmu.h
diff --git a/MAINTAINERS b/MAINTAINERS
index e0a101fe05ce..075463117c2b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -13995,6 +13995,7 @@ F: Documentation/virt/kvm/arm/
F: Documentation/virt/kvm/devices/arm*
F: arch/arm64/include/asm/kvm*
F: arch/arm64/include/uapi/asm/kvm*
+F: include/kvm/arm64/
F: include/uapi/arch/arm64/asm/kvm*
F: arch/arm64/kvm/
F: include/kvm/arm_*
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index 85f4c1615472..dfe3e85759f6 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -11,7 +11,7 @@
#error Assembly-only header
#endif
-#include <asm/kvm_arm.h>
+#include <kvm/arm64/kvm_arm.h>
#include <asm/ptrace.h>
#include <asm/sysreg.h>
#include <linux/irqchip/arm-gic-v3.h>
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 77d6b8c63d4e..0eceb8ab6abb 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -10,7 +10,7 @@
#include <linux/threads.h>
#include <asm/barrier.h>
#include <asm/irq.h>
-#include <asm/kvm_arm.h>
+#include <kvm/arm64/kvm_arm.h>
#include <asm/sysreg.h>
#define ack_bad_irq ack_bad_irq
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 5bf3d7e1d92c..822f6077b107 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -16,13 +16,14 @@
#include <asm/debug-monitors.h>
#include <asm/esr.h>
-#include <asm/kvm_arm.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_nested.h>
#include <asm/ptrace.h>
#include <asm/cputype.h>
#include <asm/virt.h>
+#include <kvm/arm64/kvm_arm.h>
+
#define CURRENT_EL_SP_EL0_VECTOR 0x0
#define CURRENT_EL_SP_ELx_VECTOR 0x200
#define LOWER_EL_AArch64_VECTOR 0x400
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 70cb9cfd760a..ae9e507f2c7c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -37,25 +37,12 @@
#include <kvm/arm_arch_timer.h>
#include <kvm/arm_pmu.h>
+#include <kvm/arm64/kvm_host.h>
+
#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
-#define KVM_VCPU_MAX_FEATURES 9
#define KVM_VCPU_VALID_FEATURES (BIT(KVM_VCPU_MAX_FEATURES) - 1)
-#define KVM_REQ_SLEEP \
- KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
-#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
-#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
-#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
-#define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
-#define KVM_REQ_SUSPEND KVM_ARCH_REQ(6)
-#define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7)
-#define KVM_REQ_NESTED_S2_UNMAP KVM_ARCH_REQ(8)
-#define KVM_REQ_GUEST_HYP_IRQ_PENDING KVM_ARCH_REQ(9)
-#define KVM_REQ_MAP_L1_VNCR_EL2 KVM_ARCH_REQ(10)
-#define KVM_REQ_VGIC_PROCESS_UPDATE KVM_ARCH_REQ(11)
-
#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
KVM_DIRTY_LOG_INITIALLY_SET)
@@ -324,35 +311,7 @@ struct kvm_arch {
/* Protects VM-scoped configuration data */
struct mutex config_lock;
- /*
- * If we encounter a data abort without valid instruction syndrome
- * information, report this to user space. User space can (and
- * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
- * supported.
- */
-#define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER 0
- /* Memory Tagging Extension enabled for the guest */
-#define KVM_ARCH_FLAG_MTE_ENABLED 1
- /* At least one vCPU has ran in the VM */
-#define KVM_ARCH_FLAG_HAS_RAN_ONCE 2
- /* The vCPU feature set for the VM is configured */
-#define KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED 3
- /* PSCI SYSTEM_SUSPEND enabled for the guest */
-#define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 4
- /* VM counter offset */
-#define KVM_ARCH_FLAG_VM_COUNTER_OFFSET 5
- /* Timer PPIs made immutable */
-#define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 6
- /* Initial ID reg values loaded */
-#define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7
- /* Fine-Grained UNDEF initialised */
-#define KVM_ARCH_FLAG_FGU_INITIALIZED 8
- /* SVE exposed to guest */
-#define KVM_ARCH_FLAG_GUEST_HAS_SVE 9
- /* MIDR_EL1, REVIDR_EL1, and AIDR_EL1 are writable from userspace */
-#define KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS 10
- /* Unhandled SEAs are taken to userspace */
-#define KVM_ARCH_FLAG_EXIT_SEA 11
+ /* VM-wide vCPU feature set */
unsigned long flags;
/* VM-wide vCPU feature set */
@@ -812,13 +771,6 @@ extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
#define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
-struct vcpu_reset_state {
- unsigned long pc;
- unsigned long r0;
- bool be;
- bool reset;
-};
-
struct vncr_tlb;
struct kvm_vcpu_arch {
@@ -1020,41 +972,6 @@ struct kvm_vcpu_arch {
/* pKVM VCPU setup completed */
#define VCPU_PKVM_FINALIZED __vcpu_single_flag(cflags, BIT(2))
-/* Exception pending */
-#define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0))
-/*
- * PC increment. Overlaps with EXCEPT_MASK on purpose so that it can't
- * be set together with an exception...
- */
-#define INCREMENT_PC __vcpu_single_flag(iflags, BIT(1))
-/* Target EL/MODE (not a single flag, but let's abuse the macro) */
-#define EXCEPT_MASK __vcpu_single_flag(iflags, GENMASK(3, 1))
-
-/* Helpers to encode exceptions with minimum fuss */
-#define __EXCEPT_MASK_VAL unpack_vcpu_flag(EXCEPT_MASK)
-#define __EXCEPT_SHIFT __builtin_ctzl(__EXCEPT_MASK_VAL)
-#define __vcpu_except_flags(_f) iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL
-
-/*
- * When PENDING_EXCEPTION is set, EXCEPT_MASK can take the following
- * values:
- *
- * For AArch32 EL1:
- */
-#define EXCEPT_AA32_UND __vcpu_except_flags(0)
-#define EXCEPT_AA32_IABT __vcpu_except_flags(1)
-#define EXCEPT_AA32_DABT __vcpu_except_flags(2)
-/* For AArch64: */
-#define EXCEPT_AA64_EL1_SYNC __vcpu_except_flags(0)
-#define EXCEPT_AA64_EL1_IRQ __vcpu_except_flags(1)
-#define EXCEPT_AA64_EL1_FIQ __vcpu_except_flags(2)
-#define EXCEPT_AA64_EL1_SERR __vcpu_except_flags(3)
-/* For AArch64 with NV: */
-#define EXCEPT_AA64_EL2_SYNC __vcpu_except_flags(4)
-#define EXCEPT_AA64_EL2_IRQ __vcpu_except_flags(5)
-#define EXCEPT_AA64_EL2_FIQ __vcpu_except_flags(6)
-#define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7)
-
/* Physical CPU not in supported_cpus */
#define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(0))
/* WFIT instruction trapped */
@@ -1215,7 +1132,6 @@ struct kvm_vcpu_stat {
};
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
-int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
@@ -1299,13 +1215,6 @@ int __init populate_nv_trap_config(void);
void kvm_calculate_traps(struct kvm_vcpu *vcpu);
-/* MMIO helpers */
-void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
-unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
-
-int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
-int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
-
/*
* Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
* arrived in guest context. For arm64, any event that arrives while a vCPU is
@@ -1480,8 +1389,6 @@ struct kvm *kvm_arch_alloc_vm(void);
#define kvm_vm_is_protected(kvm) (is_protected_kvm_enabled() && (kvm)->arch.pkvm.is_protected)
-#define vcpu_is_protected(vcpu) kvm_vm_is_protected((vcpu)->kvm)
-
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index d968aca0461a..6a990aa63622 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -100,6 +100,8 @@ alternative_cb_end
#include <asm/kvm_host.h>
#include <asm/kvm_nested.h>
+#include <kvm/arm64/kvm_mmu.h>
+
void kvm_update_va_mask(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst);
void kvm_compute_layout(void);
@@ -142,12 +144,6 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
extern u32 __hyp_va_bits;
-/*
- * We currently support using a VM-specified IPA size. For backward
- * compatibility, the default IPA size is fixed to 40bits.
- */
-#define KVM_PHYS_SHIFT (40)
-
#define kvm_phys_shift(mmu) VTCR_EL2_IPA((mmu)->vtcr)
#define kvm_phys_size(mmu) (_AC(1, ULL) << kvm_phys_shift(mmu))
#define kvm_phys_mask(mmu) (kvm_phys_size(mmu) - _AC(1, ULL))
@@ -161,9 +157,6 @@ int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
int __create_hyp_mappings(unsigned long start, unsigned long size,
unsigned long phys, enum kvm_pgtable_prot prot);
int hyp_alloc_private_va_range(size_t size, unsigned long *haddr);
-int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
- void __iomem **kaddr,
- void __iomem **haddr);
int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
void **haddr);
int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr);
@@ -178,8 +171,6 @@ void stage2_unmap_vm(struct kvm *kvm);
int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type);
void kvm_uninit_stage2_mmu(struct kvm *kvm);
void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
-int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
- phys_addr_t pa, unsigned long size, bool writable);
int kvm_handle_guest_sea(struct kvm_vcpu *vcpu);
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
@@ -267,34 +258,6 @@ static inline unsigned int kvm_get_vmid_bits(void)
return get_vmid_bits(reg);
}
-
-/*
- * We are not in the kvm->srcu critical section most of the time, so we take
- * the SRCU read lock here. Since we copy the data from the user page, we
- * can immediately drop the lock again.
- */
-static inline int kvm_read_guest_lock(struct kvm *kvm,
- gpa_t gpa, void *data, unsigned long len)
-{
- int srcu_idx = srcu_read_lock(&kvm->srcu);
- int ret = kvm_read_guest(kvm, gpa, data, len);
-
- srcu_read_unlock(&kvm->srcu, srcu_idx);
-
- return ret;
-}
-
-static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
- const void *data, unsigned long len)
-{
- int srcu_idx = srcu_read_lock(&kvm->srcu);
- int ret = kvm_write_guest(kvm, gpa, data, len);
-
- srcu_read_unlock(&kvm->srcu, srcu_idx);
-
- return ret;
-}
-
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
/*
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 87a822e5c4ca..853952be8021 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -25,7 +25,7 @@
#include <asm/elf.h>
#include <asm/image.h>
#include <asm/kernel-pgtable.h>
-#include <asm/kvm_arm.h>
+#include <kvm/arm64/kvm_arm.h>
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
#include <asm/page.h>
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 085bc9972f6b..bf3a541fc076 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -11,7 +11,7 @@
#include <asm/assembler.h>
#include <asm/el2_setup.h>
-#include <asm/kvm_arm.h>
+#include <kvm/arm64/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/ptrace.h>
#include <asm/virt.h>
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 410ffd41fd73..47630730260f 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -32,7 +32,7 @@
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/virt.h>
-#include <asm/kvm_arm.h>
+#include <kvm/arm64/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
@@ -46,6 +46,8 @@
#include <kvm/arm_pmu.h>
#include <kvm/arm_psci.h>
+#include <kvm/arm64/guest.h>
+
#include "sys_regs.h"
static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT;
@@ -1489,7 +1491,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
return -EINVAL;
}
-static unsigned long system_supported_vcpu_features(void)
+unsigned long system_supported_vcpu_features(void)
{
unsigned long features = KVM_VCPU_VALID_FEATURES;
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index 3ad6b7c6e4ba..349fea6e3e51 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -12,7 +12,7 @@
#include <asm/debug-monitors.h>
#include <asm/kvm_asm.h>
-#include <asm/kvm_arm.h>
+#include <kvm/arm64/kvm_arm.h>
#include <asm/kvm_emulate.h>
static int cpu_has_spe(u64 dfr0)
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 332c453b87cf..557c380ffa37 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -27,6 +27,8 @@
#include <asm/kvm_nested.h>
#include <asm/sigcontext.h>
+#include <kvm/arm64/guest.h>
+
#include "trace.h"
const struct kvm_stats_desc kvm_vm_stats_desc[] = {
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index 11a10d8f5beb..b1694f738208 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -10,7 +10,7 @@
#include <asm/assembler.h>
#include <asm/fpsimdmacros.h>
#include <asm/kvm.h>
-#include <asm/kvm_arm.h>
+#include <kvm/arm64/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_mte.h>
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 03f97d71984c..dfec4e6e5d62 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -10,7 +10,7 @@
#include <asm/alternative.h>
#include <asm/assembler.h>
#include <asm/cpufeature.h>
-#include <asm/kvm_arm.h>
+#include <kvm/arm64/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/mmu.h>
#include <asm/spectre.h>
diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
index eef15b374abb..3f5093387f5a 100644
--- a/arch/arm64/kvm/hyp/nvhe/host.S
+++ b/arch/arm64/kvm/hyp/nvhe/host.S
@@ -7,7 +7,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/kvm_arm.h>
+#include <kvm/arm64/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_ptrauth.h>
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
index 0d42eedc7167..8677f4da7a2f 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
@@ -11,7 +11,7 @@
#include <asm/alternative.h>
#include <asm/assembler.h>
#include <asm/el2_setup.h>
-#include <asm/kvm_arm.h>
+#include <kvm/arm64/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
#include <asm/pgtable-hwdef.h>
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 17d64a1e11e5..e19ff77b3cd5 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -14,7 +14,7 @@
#include <asm/acpi.h>
#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
-#include <asm/kvm_arm.h>
+#include <kvm/arm64/kvm_arm.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_pgtable.h>
#include <asm/kvm_pkvm.h>
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 2c43097248b2..1a3bd7bf6bf4 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -9,7 +9,7 @@
#include <linux/kvm_host.h>
#include <asm/fixmap.h>
-#include <asm/kvm_arm.h>
+#include <kvm/arm64/kvm_arm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_nested.h>
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index b963fd975aac..036bf2dff976 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -23,7 +23,7 @@
#include <asm/cputype.h>
#include <asm/fpsimd.h>
#include <asm/ptrace.h>
-#include <asm/kvm_arm.h>
+#include <kvm/arm64/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 1b4cacb6e918..a7564ee0fd15 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -24,7 +24,7 @@
#include <asm/cputype.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
-#include <asm/kvm_arm.h>
+#include <kvm/arm64/kvm_arm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
index 2ea9f1c7ebcd..076877db9243 100644
--- a/arch/arm64/kvm/vgic/vgic-its.c
+++ b/arch/arm64/kvm/vgic/vgic-its.c
@@ -17,7 +17,7 @@
#include <linux/irqchip/arm-gic-v3.h>
#include <asm/kvm_emulate.h>
-#include <asm/kvm_arm.h>
+#include <kvm/arm64/kvm_arm.h>
#include <asm/kvm_mmu.h>
#include "vgic.h"
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
index 89edb84d1ac6..009e52a16c25 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
@@ -12,7 +12,7 @@
#include <kvm/arm_vgic.h>
#include <asm/kvm_emulate.h>
-#include <asm/kvm_arm.h>
+#include <kvm/arm64/kvm_arm.h>
#include <asm/kvm_mmu.h>
#include "vgic.h"
diff --git a/arch/arm64/kvm/vgic/vgic-v3-nested.c b/arch/arm64/kvm/vgic/vgic-v3-nested.c
index 5c69fa615823..a2070a637f51 100644
--- a/arch/arm64/kvm/vgic/vgic-v3-nested.c
+++ b/arch/arm64/kvm/vgic/vgic-v3-nested.c
@@ -9,7 +9,7 @@
#include <kvm/arm_vgic.h>
-#include <asm/kvm_arm.h>
+#include <kvm/arm64/kvm_arm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_nested.h>
diff --git a/include/kvm/arm64/guest.h b/include/kvm/arm64/guest.h
new file mode 100644
index 000000000000..fa67d992e8fd
--- /dev/null
+++ b/include/kvm/arm64/guest.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __KVM_ARM64_GUEST_H__
+#define __KVM_ARM64_GUEST_H__
+
+/* Implemented by virt/kvm/arm64/guest.c */
+unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
+int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
+
+#endif /* __KVM_ARM64_GUEST_H__ */
diff --git a/arch/arm64/include/asm/kvm_arm.h b/include/kvm/arm64/kvm_arm.h
similarity index 99%
rename from arch/arm64/include/asm/kvm_arm.h
rename to include/kvm/arm64/kvm_arm.h
index 3f9233b5a130..b9c45e4dad72 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/include/kvm/arm64/kvm_arm.h
@@ -8,8 +8,9 @@
#define __ARM64_KVM_ARM_H__
#include <asm/esr.h>
-#include <asm/memory.h>
-#include <asm/sysreg.h>
+#include <linux/const.h>
+#include <linux/bits.h>
+#include <asm/sysreg-defs.h>
#include <asm/types.h>
/*
diff --git a/include/kvm/arm64/kvm_host.h b/include/kvm/arm64/kvm_host.h
new file mode 100644
index 000000000000..3a434f47497b
--- /dev/null
+++ b/include/kvm/arm64/kvm_host.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __KVM_ARM64_KVM_HOST_H
+#define __KVM_ARM64_KVM_HOST_H
+
+#include <linux/types.h>
+
+#define KVM_VCPU_MAX_FEATURES 9
+
+#define KVM_REQ_SLEEP \
+ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
+#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
+#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
+#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
+#define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
+#define KVM_REQ_SUSPEND KVM_ARCH_REQ(6)
+#define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7)
+#define KVM_REQ_NESTED_S2_UNMAP KVM_ARCH_REQ(8)
+#define KVM_REQ_GUEST_HYP_IRQ_PENDING KVM_ARCH_REQ(9)
+#define KVM_REQ_MAP_L1_VNCR_EL2 KVM_ARCH_REQ(10)
+#define KVM_REQ_VGIC_PROCESS_UPDATE KVM_ARCH_REQ(11)
+
+struct vcpu_reset_state {
+ unsigned long pc;
+ unsigned long r0;
+ bool be;
+ bool reset;
+};
+
+/* MMIO helpers */
+void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
+unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
+
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
+int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
+
+/* Exception pending */
+#define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0))
+/*
+ * PC increment. Overlaps with EXCEPT_MASK on purpose so that it can't
+ * be set together with an exception...
+ */
+#define INCREMENT_PC __vcpu_single_flag(iflags, BIT(1))
+/* Target EL/MODE (not a single flag, but let's abuse the macro) */
+#define EXCEPT_MASK __vcpu_single_flag(iflags, GENMASK(3, 1))
+
+/* Helpers to encode exceptions with minimum fuss */
+#define __EXCEPT_MASK_VAL unpack_vcpu_flag(EXCEPT_MASK)
+#define __EXCEPT_SHIFT __builtin_ctzl(__EXCEPT_MASK_VAL)
+#define __vcpu_except_flags(_f) iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL
+
+/*
+ * When PENDING_EXCEPTION is set, EXCEPT_MASK can take the following
+ * values:
+ *
+ * For AArch32 EL1:
+ */
+#define EXCEPT_AA32_UND __vcpu_except_flags(0)
+#define EXCEPT_AA32_IABT __vcpu_except_flags(1)
+#define EXCEPT_AA32_DABT __vcpu_except_flags(2)
+/* For AArch64: */
+#define EXCEPT_AA64_EL1_SYNC __vcpu_except_flags(0)
+#define EXCEPT_AA64_EL1_IRQ __vcpu_except_flags(1)
+#define EXCEPT_AA64_EL1_FIQ __vcpu_except_flags(2)
+#define EXCEPT_AA64_EL1_SERR __vcpu_except_flags(3)
+/* For AArch64 with NV: */
+#define EXCEPT_AA64_EL2_SYNC __vcpu_except_flags(4)
+#define EXCEPT_AA64_EL2_IRQ __vcpu_except_flags(5)
+#define EXCEPT_AA64_EL2_FIQ __vcpu_except_flags(6)
+#define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7)
+
+static inline bool kvm_supports_32bit_el0(void)
+{
+ return false;
+}
+
+#define vcpu_is_protected(vcpu) kvm_vm_is_protected((vcpu)->kvm)
+
+/*
+ * If we encounter a data abort without valid instruction syndrome
+ * information, report this to user space. User space can (and
+ * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
+ * supported.
+ */
+#define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER 0
+/* Memory Tagging Extension enabled for the guest */
+#define KVM_ARCH_FLAG_MTE_ENABLED 1
+/* At least one vCPU has ran in the VM */
+#define KVM_ARCH_FLAG_HAS_RAN_ONCE 2
+/* The vCPU feature set for the VM is configured */
+#define KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED 3
+/* PSCI SYSTEM_SUSPEND enabled for the guest */
+#define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 4
+/* VM counter offset */
+#define KVM_ARCH_FLAG_VM_COUNTER_OFFSET 5
+/* Timer PPIs made immutable */
+#define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 6
+/* Initial ID reg values loaded */
+#define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7
+/* Fine-Grained UNDEF initialised */
+#define KVM_ARCH_FLAG_FGU_INITIALIZED 8
+/* SVE exposed to guest */
+#define KVM_ARCH_FLAG_GUEST_HAS_SVE 9
+/* MIDR_EL1, REVIDR_EL1, and AIDR_EL1 are writable from userspace */
+#define KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS 10
+/* Unhandled SEAs are taken to userspace */
+#define KVM_ARCH_FLAG_EXIT_SEA 11
+
+/* Implemented in architecture specific code */
+unsigned long system_supported_vcpu_features(void);
+
+#endif /* __KVM_ARM64_KVM_HOST_H */
diff --git a/include/kvm/arm64/kvm_mmu.h b/include/kvm/arm64/kvm_mmu.h
new file mode 100644
index 000000000000..91607105eaf6
--- /dev/null
+++ b/include/kvm/arm64/kvm_mmu.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef KVM_ARM64_KVM_MMU_H__
+#define KVM_ARM64_KVM_MMU_H__
+
+/*
+ * We currently support using a VM-specified IPA size. For backward
+ * compatibility, the default IPA size is fixed to 40bits.
+ */
+#define KVM_PHYS_SHIFT (40)
+
+/*
+ * We are not in the kvm->srcu critical section most of the time, so we take
+ * the SRCU read lock here. Since we copy the data from the user page, we
+ * can immediately drop the lock again.
+ */
+static inline int kvm_read_guest_lock(struct kvm *kvm,
+ gpa_t gpa, void *data, unsigned long len)
+{
+ int srcu_idx = srcu_read_lock(&kvm->srcu);
+ int ret = kvm_read_guest(kvm, gpa, data, len);
+
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+ return ret;
+}
+
+static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
+ const void *data, unsigned long len)
+{
+ int srcu_idx = srcu_read_lock(&kvm->srcu);
+ int ret = kvm_write_guest(kvm, gpa, data, len);
+
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+ return ret;
+}
+
+/* Implemented by each architecture */
+int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
+ phys_addr_t pa, unsigned long size, bool writable);
+
+int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
+ void __iomem **kaddr,
+ void __iomem **haddr);
+
+#endif /* KVM_ARM64_KVM_MMU_H__ */
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* Re: [PATCH v1 07/27] KVM: arm64: Provide arm64 KVM API for non-native architectures
2026-04-02 4:21 ` [PATCH v1 07/27] KVM: arm64: Provide arm64 KVM " Steffen Eiden
@ 2026-04-02 10:08 ` Marc Zyngier
2026-04-02 11:26 ` Christian Borntraeger
0 siblings, 1 reply; 33+ messages in thread
From: Marc Zyngier @ 2026-04-02 10:08 UTC (permalink / raw)
To: Steffen Eiden
Cc: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390,
Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Nina Schoetterl-Glausch, Oliver Upton, Paolo Bonzini,
Suzuki K Poulose, Ulrich Weigand, Will Deacon, Zenghui Yu
On Thu, 02 Apr 2026 05:21:03 +0100,
Steffen Eiden <seiden@linux.ibm.com> wrote:
Drive-by comment as I was idly going through this patch.
[...]
> diff --git a/include/kvm/arm64/kvm_host.h b/include/kvm/arm64/kvm_host.h
> new file mode 100644
> index 000000000000..3a434f47497b
> --- /dev/null
> +++ b/include/kvm/arm64/kvm_host.h
[...]
> +static inline bool kvm_supports_32bit_el0(void)
> +{
> + return false;
> +}
> +
This looks wrong. The original file still has:
#define kvm_supports_32bit_el0() \
(system_supports_32bit_el0() && \
!static_branch_unlikely(&arm64_mismatched_32bit_el0))
which should not be tampered with. I guess we are simply lucky that
the preprocessor is braindead enough to make this sort of things go
unnoticed, but I'd expect this predicate to be directly provided by
the s390 code one way or another.
Thanks,
M.
--
Without deviation from the norm, progress is not possible.
^ permalink raw reply [flat|nested] 33+ messages in thread* Re: [PATCH v1 07/27] KVM: arm64: Provide arm64 KVM API for non-native architectures
2026-04-02 10:08 ` Marc Zyngier
@ 2026-04-02 11:26 ` Christian Borntraeger
0 siblings, 0 replies; 33+ messages in thread
From: Christian Borntraeger @ 2026-04-02 11:26 UTC (permalink / raw)
To: Marc Zyngier, Steffen Eiden
Cc: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390,
Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Claudio Imbrenda, David Hildenbrand, Gautam Gala,
Hendrik Brueckner, Janosch Frank, Joey Gouly,
Nina Schoetterl-Glausch, Oliver Upton, Paolo Bonzini,
Suzuki K Poulose, Ulrich Weigand, Will Deacon, Zenghui Yu
Am 02.04.26 um 12:08 schrieb Marc Zyngier:
>> +static inline bool kvm_supports_32bit_el0(void)
>> +{
>> + return false;
>> +}
>> +
>
> This looks wrong. The original file still has:
>
> #define kvm_supports_32bit_el0() \
> (system_supports_32bit_el0() && \
> !static_branch_unlikely(&arm64_mismatched_32bit_el0))
Thanks for spotting.
Yes, we will fix. Seems to be a leftover when sorting patches.
Christian
^ permalink raw reply [flat|nested] 33+ messages in thread
* [PATCH v1 08/27] arm64: Extract pstate definitions from ptrace
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (6 preceding siblings ...)
2026-04-02 4:21 ` [PATCH v1 07/27] KVM: arm64: Provide arm64 KVM " Steffen Eiden
@ 2026-04-02 4:21 ` Steffen Eiden
2026-04-02 4:21 ` [PATCH v1 09/27] KVM: arm64: Share kvm_emulate definitions Steffen Eiden
` (19 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:21 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
From: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Split all definitions that can be used by non-native architectures into a
separate file pstate.h. This allows other architectures using
the pstate definitions. While at it refactor SPSR related definitions
to use the BIT(n) macro and move them into sysreg-defs.h
Signed-off-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
arch/arm64/include/asm/ptrace.h | 34 +-----------------
arch/arm64/include/uapi/asm/Kbuild | 1 +
arch/arm64/include/uapi/asm/ptrace.h | 49 +------------------------
include/arch/arm64/asm/pstate.h | 46 ++++++++++++++++++++++++
include/arch/arm64/asm/sysreg-defs.h | 42 ++++++++++++++++++++++
include/uapi/arch/arm64/asm/pstate.h | 53 ++++++++++++++++++++++++++++
6 files changed, 144 insertions(+), 81 deletions(-)
create mode 100644 include/arch/arm64/asm/pstate.h
create mode 100644 include/uapi/arch/arm64/asm/pstate.h
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 39582511ad72..72ea0a8af960 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -9,6 +9,7 @@
#define __ASM_PTRACE_H
#include <asm/cpufeature.h>
+#include <asm/pstate.h>
#include <uapi/asm/ptrace.h>
@@ -28,10 +29,6 @@
#define GIC_PRIO_PSR_I_SET GICV3_PRIO_PSR_I_SET
-/* Additional SPSR bits not exposed in the UABI */
-#define PSR_MODE_THREAD_BIT (1 << 0)
-#define PSR_IL_BIT (1 << 20)
-
/* AArch32-specific ptrace requests */
#define COMPAT_PTRACE_GETREGS 12
#define COMPAT_PTRACE_SETREGS 13
@@ -42,41 +39,12 @@
#define COMPAT_PTRACE_GETHBPREGS 29
#define COMPAT_PTRACE_SETHBPREGS 30
-/* SPSR_ELx bits for exceptions taken from AArch32 */
-#define PSR_AA32_MODE_MASK 0x0000001f
-#define PSR_AA32_MODE_USR 0x00000010
-#define PSR_AA32_MODE_FIQ 0x00000011
-#define PSR_AA32_MODE_IRQ 0x00000012
-#define PSR_AA32_MODE_SVC 0x00000013
-#define PSR_AA32_MODE_ABT 0x00000017
-#define PSR_AA32_MODE_HYP 0x0000001a
-#define PSR_AA32_MODE_UND 0x0000001b
-#define PSR_AA32_MODE_SYS 0x0000001f
-#define PSR_AA32_T_BIT 0x00000020
-#define PSR_AA32_F_BIT 0x00000040
-#define PSR_AA32_I_BIT 0x00000080
-#define PSR_AA32_A_BIT 0x00000100
-#define PSR_AA32_E_BIT 0x00000200
-#define PSR_AA32_PAN_BIT 0x00400000
-#define PSR_AA32_SSBS_BIT 0x00800000
-#define PSR_AA32_DIT_BIT 0x01000000
-#define PSR_AA32_Q_BIT 0x08000000
-#define PSR_AA32_V_BIT 0x10000000
-#define PSR_AA32_C_BIT 0x20000000
-#define PSR_AA32_Z_BIT 0x40000000
-#define PSR_AA32_N_BIT 0x80000000
-#define PSR_AA32_IT_MASK 0x0600fc00 /* If-Then execution state mask */
-#define PSR_AA32_GE_MASK 0x000f0000
-
#ifdef CONFIG_CPU_BIG_ENDIAN
#define PSR_AA32_ENDSTATE PSR_AA32_E_BIT
#else
#define PSR_AA32_ENDSTATE 0
#endif
-/* AArch32 CPSR bits, as seen in AArch32 */
-#define COMPAT_PSR_DIT_BIT 0x00200000
-
/*
* These are 'magic' values for PTRACE_PEEKUSR that return info about where a
* process is located in memory.
diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild
index b45584e83448..43d1a8ab98e1 100644
--- a/arch/arm64/include/uapi/asm/Kbuild
+++ b/arch/arm64/include/uapi/asm/Kbuild
@@ -5,3 +5,4 @@ generic-y += kvm_para.h
shared-uapi-y += kvm.h
shared-uapi-y += sve_context.h
+shared-uapi-y += pstate.h
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index 6fed93fb2536..6e743eb021e8 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -24,54 +24,7 @@
#include <asm/hwcap.h>
#include <asm/sve_context.h>
-
-
-/*
- * PSR bits
- */
-#define PSR_MODE_EL0t 0x00000000
-#define PSR_MODE_EL1t 0x00000004
-#define PSR_MODE_EL1h 0x00000005
-#define PSR_MODE_EL2t 0x00000008
-#define PSR_MODE_EL2h 0x00000009
-#define PSR_MODE_EL3t 0x0000000c
-#define PSR_MODE_EL3h 0x0000000d
-#define PSR_MODE_MASK 0x0000000f
-
-/* AArch32 CPSR bits */
-#define PSR_MODE32_BIT 0x00000010
-
-/* AArch64 SPSR bits */
-#define PSR_F_BIT 0x00000040
-#define PSR_I_BIT 0x00000080
-#define PSR_A_BIT 0x00000100
-#define PSR_D_BIT 0x00000200
-#define PSR_BTYPE_MASK 0x00000c00
-#define PSR_SSBS_BIT 0x00001000
-#define PSR_PAN_BIT 0x00400000
-#define PSR_UAO_BIT 0x00800000
-#define PSR_DIT_BIT 0x01000000
-#define PSR_TCO_BIT 0x02000000
-#define PSR_V_BIT 0x10000000
-#define PSR_C_BIT 0x20000000
-#define PSR_Z_BIT 0x40000000
-#define PSR_N_BIT 0x80000000
-
-#define PSR_BTYPE_SHIFT 10
-
-/*
- * Groups of PSR bits
- */
-#define PSR_f 0xff000000 /* Flags */
-#define PSR_s 0x00ff0000 /* Status */
-#define PSR_x 0x0000ff00 /* Extension */
-#define PSR_c 0x000000ff /* Control */
-
-/* Convenience names for the values of PSTATE.BTYPE */
-#define PSR_BTYPE_NONE (0b00 << PSR_BTYPE_SHIFT)
-#define PSR_BTYPE_JC (0b01 << PSR_BTYPE_SHIFT)
-#define PSR_BTYPE_C (0b10 << PSR_BTYPE_SHIFT)
-#define PSR_BTYPE_J (0b11 << PSR_BTYPE_SHIFT)
+#include <asm/pstate.h>
/* syscall emulation path in ptrace */
#define PTRACE_SYSEMU 31
diff --git a/include/arch/arm64/asm/pstate.h b/include/arch/arm64/asm/pstate.h
new file mode 100644
index 000000000000..3ff6073a0eaa
--- /dev/null
+++ b/include/arch/arm64/asm/pstate.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_PSTATE_H
+#define __ASM_PSTATE_H
+
+#include <asm/sysreg-defs.h>
+#ifdef __arm64__
+#include <uapi/asm/pstate.h>
+#else
+#include <uapi/arch/arm64/asm/pstate.h>
+#endif // __arm64__
+
+/* Additional SPSR bits not exposed in the UABI */
+#define PSR_MODE_THREAD_BIT BIT(0)
+#define PSR_IL_BIT SPSR_IL
+
+/* SPSR_ELx bits for exceptions taken from AArch32 */
+#define PSR_AA32_MODE_MASK SPSR_MODE_MASK
+#define PSR_AA32_MODE_USR (SPSR_MODE_32BIT | SPSR32_MODE_USR)
+#define PSR_AA32_MODE_FIQ (SPSR_MODE_32BIT | SPSR32_MODE_FIQ)
+#define PSR_AA32_MODE_IRQ (SPSR_MODE_32BIT | SPSR32_MODE_IRQ)
+#define PSR_AA32_MODE_SVC (SPSR_MODE_32BIT | SPSR32_MODE_SVC)
+#define PSR_AA32_MODE_ABT (SPSR_MODE_32BIT | SPSR32_MODE_ABT)
+#define PSR_AA32_MODE_HYP (SPSR_MODE_32BIT | SPSR32_MODE_HYP)
+#define PSR_AA32_MODE_UND (SPSR_MODE_32BIT | SPSR32_MODE_UND)
+#define PSR_AA32_MODE_SYS (SPSR_MODE_32BIT | SPSR32_MODE_SYS)
+#define PSR_AA32_T_BIT SPSR32_T
+#define PSR_AA32_F_BIT SPSR_F
+#define PSR_AA32_I_BIT SPSR_I
+#define PSR_AA32_A_BIT SPSR_A
+#define PSR_AA32_E_BIT SPSR32_E
+#define PSR_AA32_PAN_BIT SPSR_PAN
+#define PSR_AA32_SSBS_BIT SPSR32_SSBS
+#define PSR_AA32_DIT_BIT SPSR_DIT
+#define PSR_AA32_Q_BIT SPSR32_Q
+#define PSR_AA32_V_BIT SPSR_V
+#define PSR_AA32_C_BIT SPSR_C
+#define PSR_AA32_Z_BIT SPSR_Z
+#define PSR_AA32_N_BIT SPSR_N
+#define PSR_AA32_IT_MASK SPSR32_IT_MASK /* If-Then execution state mask */
+#define PSR_AA32_GE_MASK SPSR32_GE_MASK
+
+/* AArch32 CPSR bits, as seen in AArch32 */
+#define COMPAT_PSR_DIT_BIT 0x00200000
+
+#endif /* __ASM_PSTATE_H */
diff --git a/include/arch/arm64/asm/sysreg-defs.h b/include/arch/arm64/asm/sysreg-defs.h
index d5196f293e19..4460fae38623 100644
--- a/include/arch/arm64/asm/sysreg-defs.h
+++ b/include/arch/arm64/asm/sysreg-defs.h
@@ -470,6 +470,48 @@
#define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0)
#define SYS_TFSR_EL2 sys_reg(3, 4, 5, 6, 0)
+#define SPSR_PPEND BIT(33)
+#define SPSR_N BIT(31)
+#define SPSR_Z BIT(30)
+#define SPSR_C BIT(29)
+#define SPSR_V BIT(28)
+#define SPSR32_Q BIT(27)
+#define SPSR32_IT_MASK (GENMASK(26, 25) | GENMASK(15, 10))
+#define SPSR64_TCO BIT(25)
+#define SPSR_DIT BIT(24)
+#define SPSR64_UAO BIT(23)
+#define SPSR32_SSBS BIT(23)
+#define SPSR_PAN BIT(22)
+#define SPSR_SS BIT(21)
+#define SPSR_IL BIT(20)
+#define SPSR32_GE_MASK GENMASK(19, 16)
+#define SPSR64_SSBS BIT(12)
+#define SPSR64_BTYPE_SHIFT 10
+#define SPSR64_BTYPE_MASK (UL(3) << SPSR64_BTYPE_SHIFT)
+#define SPSR64_D BIT(9)
+#define SPSR32_E BIT(9)
+#define SPSR_A BIT(8)
+#define SPSR_I BIT(7)
+#define SPSR_F BIT(6)
+#define SPSR32_T BIT(5)
+#define SPSR_MODE_MASK UL(0x1f)
+#define SPSR_MODE_32BIT BIT(4)
+#define SPSR64_MODE_EL0 UL(0x0)
+#define SPSR64_MODE_EL1t UL(0x4)
+#define SPSR64_MODE_EL1h UL(0x5)
+#define SPSR64_MODE_EL2t UL(0x8)
+#define SPSR64_MODE_EL2h UL(0x9)
+#define SPSR64_MODE_EL3t UL(0xc)
+#define SPSR64_MODE_EL3h UL(0xd)
+#define SPSR32_MODE_USR UL(0x0)
+#define SPSR32_MODE_FIQ UL(0x1)
+#define SPSR32_MODE_IRQ UL(0x2)
+#define SPSR32_MODE_SVC UL(0x3)
+#define SPSR32_MODE_ABT UL(0x7)
+#define SPSR32_MODE_HYP UL(0xa)
+#define SPSR32_MODE_UND UL(0xb)
+#define SPSR32_MODE_SYS UL(0xf)
+
#define SYS_FAR_EL2 sys_reg(3, 4, 6, 0, 0)
#define SYS_HPFAR_EL2 sys_reg(3, 4, 6, 0, 4)
diff --git a/include/uapi/arch/arm64/asm/pstate.h b/include/uapi/arch/arm64/asm/pstate.h
new file mode 100644
index 000000000000..87b2acec9ac2
--- /dev/null
+++ b/include/uapi/arch/arm64/asm/pstate.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef _UAPI__ASM_PSTATE_H
+#define _UAPI__ASM_PSTATE_H
+
+/*
+ * PSR bits
+ */
+#define PSR_MODE_EL0t 0x00000000
+#define PSR_MODE_EL1t 0x00000004
+#define PSR_MODE_EL1h 0x00000005
+#define PSR_MODE_EL2t 0x00000008
+#define PSR_MODE_EL2h 0x00000009
+#define PSR_MODE_EL3t 0x0000000c
+#define PSR_MODE_EL3h 0x0000000d
+#define PSR_MODE_MASK 0x0000000f
+
+/* AArch32 CPSR bits */
+#define PSR_MODE32_BIT 0x00000010
+
+/* AArch64 SPSR bits */
+#define PSR_F_BIT 0x00000040
+#define PSR_I_BIT 0x00000080
+#define PSR_A_BIT 0x00000100
+#define PSR_D_BIT 0x00000200
+#define PSR_BTYPE_MASK 0x00000c00
+#define PSR_SSBS_BIT 0x00001000
+#define PSR_PAN_BIT 0x00400000
+#define PSR_UAO_BIT 0x00800000
+#define PSR_DIT_BIT 0x01000000
+#define PSR_TCO_BIT 0x02000000
+#define PSR_V_BIT 0x10000000
+#define PSR_C_BIT 0x20000000
+#define PSR_Z_BIT 0x40000000
+#define PSR_N_BIT 0x80000000
+
+#define PSR_BTYPE_SHIFT 10
+
+/*
+ * Groups of PSR bits
+ */
+#define PSR_f 0xff000000 /* Flags */
+#define PSR_s 0x00ff0000 /* Status */
+#define PSR_x 0x0000ff00 /* Extension */
+#define PSR_c 0x000000ff /* Control */
+
+/* Convenience names for the values of PSTATE.BTYPE */
+#define PSR_BTYPE_NONE (0b00 << PSR_BTYPE_SHIFT)
+#define PSR_BTYPE_JC (0b01 << PSR_BTYPE_SHIFT)
+#define PSR_BTYPE_C (0b10 << PSR_BTYPE_SHIFT)
+#define PSR_BTYPE_J (0b11 << PSR_BTYPE_SHIFT)
+
+#endif /* _UAPI__ASM_PSTATE_H */
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v1 09/27] KVM: arm64: Share kvm_emulate definitions
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (7 preceding siblings ...)
2026-04-02 4:21 ` [PATCH v1 08/27] arm64: Extract pstate definitions from ptrace Steffen Eiden
@ 2026-04-02 4:21 ` Steffen Eiden
2026-04-02 4:21 ` [PATCH v1 10/27] KVM: arm64: Make some arm64 KVM code shareable Steffen Eiden
` (18 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:21 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
Move functions and definitions useful for emulating arm64 instructions
to include/kvm/arm64.
Co-developed-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
arch/arm64/include/asm/kvm_emulate.h | 235 +-----------------
arch/arm64/kvm/hyp/include/hyp/adjust_pc.h | 13 -
include/kvm/arm64/kvm_emulate.h | 268 +++++++++++++++++++++
3 files changed, 269 insertions(+), 247 deletions(-)
create mode 100644 include/kvm/arm64/kvm_emulate.h
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 822f6077b107..39fa3a12730c 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -23,18 +23,7 @@
#include <asm/virt.h>
#include <kvm/arm64/kvm_arm.h>
-
-#define CURRENT_EL_SP_EL0_VECTOR 0x0
-#define CURRENT_EL_SP_ELx_VECTOR 0x200
-#define LOWER_EL_AArch64_VECTOR 0x400
-#define LOWER_EL_AArch32_VECTOR 0x600
-
-enum exception_type {
- except_type_sync = 0,
- except_type_irq = 0x80,
- except_type_fiq = 0x100,
- except_type_serror = 0x180,
-};
+#include <kvm/arm64/kvm_emulate.h>
#define kvm_exception_type_names \
{ except_type_sync, "SYNC" }, \
@@ -45,36 +34,8 @@ enum exception_type {
bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
void kvm_skip_instr32(struct kvm_vcpu *vcpu);
-void kvm_inject_undefined(struct kvm_vcpu *vcpu);
void kvm_inject_sync(struct kvm_vcpu *vcpu, u64 esr);
-int kvm_inject_serror_esr(struct kvm_vcpu *vcpu, u64 esr);
-int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr);
int kvm_inject_dabt_excl_atomic(struct kvm_vcpu *vcpu, u64 addr);
-void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
-
-static inline int kvm_inject_sea_dabt(struct kvm_vcpu *vcpu, u64 addr)
-{
- return kvm_inject_sea(vcpu, false, addr);
-}
-
-static inline int kvm_inject_sea_iabt(struct kvm_vcpu *vcpu, u64 addr)
-{
- return kvm_inject_sea(vcpu, true, addr);
-}
-
-static inline int kvm_inject_serror(struct kvm_vcpu *vcpu)
-{
- /*
- * ESR_ELx.ISV (later renamed to IDS) indicates whether or not
- * ESR_ELx.ISS contains IMPLEMENTATION DEFINED syndrome information.
- *
- * Set the bit when injecting an SError w/o an ESR to indicate ISS
- * does not follow the architected format.
- */
- return kvm_inject_serror_esr(vcpu, ESR_ELx_ISV);
-}
-
-void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
@@ -160,24 +121,6 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
*vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
}
-/*
- * vcpu_get_reg and vcpu_set_reg should always be passed a register number
- * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
- * AArch32 with banked registers.
- */
-static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
- u8 reg_num)
-{
- return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
-}
-
-static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
- unsigned long val)
-{
- if (reg_num != 31)
- vcpu_gp_regs(vcpu)->regs[reg_num] = val;
-}
-
static inline bool vcpu_is_el2_ctxt(const struct kvm_cpu_context *ctxt)
{
switch (ctxt->regs.pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) {
@@ -361,82 +304,11 @@ static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
return vcpu->arch.fault.disr_el1;
}
-static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
-}
-
-static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
-{
- return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
-}
-
static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
}
-static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
-{
- return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
-}
-
-static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
-{
- return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
-}
-
-static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
-{
- return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
-}
-
-static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
-{
- return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
-}
-
-/* Always check for S1PTW *before* using this. */
-static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
-}
-
-static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
-{
- return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
-}
-
-static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
-{
- return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
-}
-
-/* This one is not specific to Data Abort */
-static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
-{
- return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
-}
-
-static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
-{
- return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
-}
-
-static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
-}
-
-static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
-}
-
-static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
-}
static inline
bool kvm_vcpu_trap_is_permission_fault(const struct kvm_vcpu *vcpu)
@@ -472,36 +344,6 @@ static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
}
}
-static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
-{
- u64 esr = kvm_vcpu_get_esr(vcpu);
- return ESR_ELx_SYS64_ISS_RT(esr);
-}
-
-static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
-{
- if (kvm_vcpu_abt_iss1tw(vcpu)) {
- /*
- * Only a permission fault on a S1PTW should be
- * considered as a write. Otherwise, page tables baked
- * in a read-only memslot will result in an exception
- * being delivered in the guest.
- *
- * The drawback is that we end-up faulting twice if the
- * guest is using any of HW AF/DB: a translation fault
- * to map the page containing the PT (read only at
- * first), then a permission fault to allow the flags
- * to be set.
- */
- return kvm_vcpu_trap_is_permission_fault(vcpu);
- }
-
- if (kvm_vcpu_trap_is_iabt(vcpu))
- return false;
-
- return kvm_vcpu_dabt_iswrite(vcpu);
-}
-
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
{
return __vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
@@ -537,81 +379,6 @@ static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
return vcpu_read_sys_reg(vcpu, r) & bit;
}
-static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
- unsigned long data,
- unsigned int len)
-{
- if (kvm_vcpu_is_be(vcpu)) {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return be16_to_cpu(data & 0xffff);
- case 4:
- return be32_to_cpu(data & 0xffffffff);
- default:
- return be64_to_cpu(data);
- }
- } else {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return le16_to_cpu(data & 0xffff);
- case 4:
- return le32_to_cpu(data & 0xffffffff);
- default:
- return le64_to_cpu(data);
- }
- }
-
- return data; /* Leave LE untouched */
-}
-
-static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
- unsigned long data,
- unsigned int len)
-{
- if (kvm_vcpu_is_be(vcpu)) {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return cpu_to_be16(data & 0xffff);
- case 4:
- return cpu_to_be32(data & 0xffffffff);
- default:
- return cpu_to_be64(data);
- }
- } else {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return cpu_to_le16(data & 0xffff);
- case 4:
- return cpu_to_le32(data & 0xffffffff);
- default:
- return cpu_to_le64(data);
- }
- }
-
- return data; /* Leave LE untouched */
-}
-
-static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
-{
- WARN_ON(vcpu_get_flag(vcpu, PENDING_EXCEPTION));
- vcpu_set_flag(vcpu, INCREMENT_PC);
-}
-
-#define kvm_pend_exception(v, e) \
- do { \
- WARN_ON(vcpu_get_flag((v), INCREMENT_PC)); \
- vcpu_set_flag((v), PENDING_EXCEPTION); \
- vcpu_set_flag((v), e); \
- } while (0)
-
/*
* Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE
* format if E2H isn't set.
diff --git a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
index 4fdfeabefeb4..15e1e5db73e1 100644
--- a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
+++ b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
@@ -13,19 +13,6 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_host.h>
-static inline void kvm_skip_instr(struct kvm_vcpu *vcpu)
-{
- if (vcpu_mode_is_32bit(vcpu)) {
- kvm_skip_instr32(vcpu);
- } else {
- *vcpu_pc(vcpu) += 4;
- *vcpu_cpsr(vcpu) &= ~PSR_BTYPE_MASK;
- }
-
- /* advance the singlestep state machine */
- *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
-}
-
/*
* Skip an instruction which has been emulated at hyp while most guest sysregs
* are live.
diff --git a/include/kvm/arm64/kvm_emulate.h b/include/kvm/arm64/kvm_emulate.h
new file mode 100644
index 000000000000..25322b95af21
--- /dev/null
+++ b/include/kvm/arm64/kvm_emulate.h
@@ -0,0 +1,268 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef KVM_ARM64_KVM_EMULATE_H
+#define KVM_ARM64_KVM_EMULATE_H
+
+#include <asm/esr.h>
+#include <asm/pstate.h>
+#include <asm/sysreg-defs.h>
+
+static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu);
+static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu);
+static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu);
+static inline bool kvm_vcpu_trap_is_permission_fault(const struct kvm_vcpu *vcpu);
+static u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu);
+static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu);
+
+#define CURRENT_EL_SP_EL0_VECTOR 0x0
+#define CURRENT_EL_SP_ELx_VECTOR 0x200
+#define LOWER_EL_AArch64_VECTOR 0x400
+#define LOWER_EL_AArch32_VECTOR 0x600
+
+enum exception_type {
+ except_type_sync = 0,
+ except_type_irq = 0x80,
+ except_type_fiq = 0x100,
+ except_type_serror = 0x180,
+};
+
+void kvm_skip_instr32(struct kvm_vcpu *vcpu);
+
+void kvm_inject_undefined(struct kvm_vcpu *vcpu);
+int kvm_inject_serror_esr(struct kvm_vcpu *vcpu, u64 esr);
+int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr);
+void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
+
+static inline int kvm_inject_sea_dabt(struct kvm_vcpu *vcpu, u64 addr)
+{
+ return kvm_inject_sea(vcpu, false, addr);
+}
+
+static inline int kvm_inject_sea_iabt(struct kvm_vcpu *vcpu, u64 addr)
+{
+ return kvm_inject_sea(vcpu, true, addr);
+}
+
+static inline int kvm_inject_serror(struct kvm_vcpu *vcpu)
+{
+ /*
+ * ESR_ELx.ISV (later renamed to IDS) indicates whether or not
+ * ESR_ELx.ISS contains IMPLEMENTATION DEFINED syndrome information.
+ *
+ * Set the bit when injecting an SError w/o an ESR to indicate ISS
+ * does not follow the architected format.
+ */
+ return kvm_inject_serror_esr(vcpu, ESR_ELx_ISV);
+}
+
+void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
+
+static inline void kvm_skip_instr(struct kvm_vcpu *vcpu)
+{
+ if (vcpu_mode_is_32bit(vcpu)) {
+ kvm_skip_instr32(vcpu);
+ } else {
+ *vcpu_pc(vcpu) += 4;
+ *vcpu_cpsr(vcpu) &= ~SPSR64_BTYPE_MASK;
+ }
+
+ /* advance the singlestep state machine */
+ *vcpu_cpsr(vcpu) &= ~SPSR_SS;
+}
+
+/*
+ * vcpu_get_reg and vcpu_set_reg should always be passed a register number
+ * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
+ * AArch32 with banked registers.
+ */
+static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
+ u8 reg_num)
+{
+ return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
+}
+
+static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
+ unsigned long val)
+{
+ if (reg_num != 31)
+ vcpu_gp_regs(vcpu)->regs[reg_num] = val;
+}
+
+static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
+}
+
+static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
+{
+ return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
+}
+
+static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
+{
+ return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
+}
+
+static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
+{
+ return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
+}
+
+static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
+{
+ return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
+}
+
+static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
+{
+ return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
+}
+
+/* Always check for S1PTW *before* using this. */
+static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
+}
+
+static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
+{
+ return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
+}
+
+static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
+{
+ return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
+}
+
+/* This one is not specific to Data Abort */
+static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
+{
+ return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
+}
+
+static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
+{
+ return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
+}
+
+static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
+}
+
+static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
+}
+
+static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
+{
+ u64 esr = kvm_vcpu_get_esr(vcpu);
+
+ return ESR_ELx_SYS64_ISS_RT(esr);
+}
+
+static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
+}
+
+static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
+{
+ if (kvm_vcpu_abt_iss1tw(vcpu)) {
+ /*
+ * Only a permission fault on a S1PTW should be
+ * considered as a write. Otherwise, page tables baked
+ * in a read-only memslot will result in an exception
+ * being delivered in the guest.
+ *
+ * The drawback is that we end-up faulting twice if the
+ * guest is using any of HW AF/DB: a translation fault
+ * to map the page containing the PT (read only at
+ * first), then a permission fault to allow the flags
+ * to be set.
+ */
+ return kvm_vcpu_trap_is_permission_fault(vcpu);
+ }
+
+ if (kvm_vcpu_trap_is_iabt(vcpu))
+ return false;
+
+ return kvm_vcpu_dabt_iswrite(vcpu);
+}
+
+static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
+ unsigned long data,
+ unsigned int len)
+{
+ if (kvm_vcpu_is_be(vcpu)) {
+ switch (len) {
+ case 1:
+ return data & 0xff;
+ case 2:
+ return be16_to_cpu(data & 0xffff);
+ case 4:
+ return be32_to_cpu(data & 0xffffffff);
+ default:
+ return be64_to_cpu(data);
+ }
+ } else {
+ switch (len) {
+ case 1:
+ return data & 0xff;
+ case 2:
+ return le16_to_cpu(data & 0xffff);
+ case 4:
+ return le32_to_cpu(data & 0xffffffff);
+ default:
+ return le64_to_cpu(data);
+ }
+ }
+
+ return data; /* Leave LE untouched */
+}
+
+static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
+ unsigned long data,
+ unsigned int len)
+{
+ if (kvm_vcpu_is_be(vcpu)) {
+ switch (len) {
+ case 1:
+ return data & 0xff;
+ case 2:
+ return cpu_to_be16(data & 0xffff);
+ case 4:
+ return cpu_to_be32(data & 0xffffffff);
+ default:
+ return cpu_to_be64(data);
+ }
+ } else {
+ switch (len) {
+ case 1:
+ return data & 0xff;
+ case 2:
+ return cpu_to_le16(data & 0xffff);
+ case 4:
+ return cpu_to_le32(data & 0xffffffff);
+ default:
+ return cpu_to_le64(data);
+ }
+ }
+
+ return data; /* Leave LE untouched */
+}
+
+static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
+{
+ WARN_ON(vcpu_get_flag(vcpu, PENDING_EXCEPTION));
+ vcpu_set_flag(vcpu, INCREMENT_PC);
+}
+
+#define kvm_pend_exception(v, e) \
+ do { \
+ WARN_ON(vcpu_get_flag((v), INCREMENT_PC)); \
+ vcpu_set_flag((v), PENDING_EXCEPTION); \
+ vcpu_set_flag((v), e); \
+ } while (0)
+
+#endif /* KVM_ARM64_KVM_EMULATE_H */
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v1 10/27] KVM: arm64: Make some arm64 KVM code shareable
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (8 preceding siblings ...)
2026-04-02 4:21 ` [PATCH v1 09/27] KVM: arm64: Share kvm_emulate definitions Steffen Eiden
@ 2026-04-02 4:21 ` Steffen Eiden
2026-04-02 4:21 ` [PATCH v1 11/27] KVM: arm64: Access elements of vcpu_gp_regs individually Steffen Eiden
` (17 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:21 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
Support sharing architecture-agnostic KVM-arm64 code between different
architectures. Introduce a new directory virt/kvm/arm64 which holds
arm64-KVM code that can be used by multiple architectures implementing
the arm64-KVM API.
Additionally, to improve portability slightly modify core_reg_addr to
use functions instead of direct field accesses.
Co-developed-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
MAINTAINERS | 1 +
arch/arm64/kvm/Makefile | 5 +-
arch/arm64/kvm/arm.c | 48 ----
arch/arm64/kvm/guest.c | 292 ---------------------
arch/arm64/kvm/handle_exit.c | 52 +---
arch/arm64/kvm/trace_arm.h | 25 --
include/kvm/arm64/guest.h | 5 +-
include/kvm/arm64/handle_exit.h | 14 +
include/kvm/arm64/kvm_host.h | 9 +
virt/kvm/arm64/Makefile.kvm | 12 +
virt/kvm/arm64/arm.c | 54 ++++
virt/kvm/arm64/guest.c | 302 ++++++++++++++++++++++
virt/kvm/arm64/handle_exit.c | 54 ++++
{arch/arm64/kvm => virt/kvm/arm64}/mmio.c | 1 +
virt/kvm/arm64/trace.h | 42 +++
15 files changed, 500 insertions(+), 416 deletions(-)
create mode 100644 include/kvm/arm64/handle_exit.h
create mode 100644 virt/kvm/arm64/Makefile.kvm
create mode 100644 virt/kvm/arm64/arm.c
create mode 100644 virt/kvm/arm64/guest.c
create mode 100644 virt/kvm/arm64/handle_exit.c
rename {arch/arm64/kvm => virt/kvm/arm64}/mmio.c (99%)
create mode 100644 virt/kvm/arm64/trace.h
diff --git a/MAINTAINERS b/MAINTAINERS
index 075463117c2b..bac2ba07c916 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -13998,6 +13998,7 @@ F: arch/arm64/include/uapi/asm/kvm*
F: include/kvm/arm64/
F: include/uapi/arch/arm64/asm/kvm*
F: arch/arm64/kvm/
+F: virt/kvm/arm64/
F: include/kvm/arm_*
F: tools/testing/selftests/kvm/*/arm64/
F: tools/testing/selftests/kvm/arm64/
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 3ebc0570345c..94ea6f0546b0 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -6,6 +6,7 @@
ccflags-y += -I $(src)
include $(srctree)/virt/kvm/Makefile.kvm
+include $(srctree)/virt/kvm/arm64/Makefile.kvm
obj-$(CONFIG_KVM) += kvm.o
obj-$(CONFIG_KVM) += hyp/
@@ -13,7 +14,7 @@ obj-$(CONFIG_KVM) += hyp/
CFLAGS_sys_regs.o += -Wno-override-init
CFLAGS_handle_exit.o += -Wno-override-init
-kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
+kvm-y += arm.o mmu.o psci.o hypercalls.o pvtime.o \
inject_fault.o va_layout.o handle_exit.o config.o \
guest.o debug.o reset.o sys_regs.o stacktrace.o \
vgic-sys-reg-v3.o fpsimd.o pkvm.o \
@@ -26,6 +27,8 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
vgic/vgic-its.o vgic/vgic-debug.o vgic/vgic-v3-nested.o \
vgic/vgic-v5.o
+kvm-y += $(shared-arm64-obj)
+
kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o pmu.o
kvm-$(CONFIG_ARM64_PTR_AUTH) += pauth.o
kvm-$(CONFIG_PTDUMP_STAGE2_DEBUGFS) += ptdump.o
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 47630730260f..41f2e5c1b953 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1515,54 +1515,6 @@ unsigned long system_supported_vcpu_features(void)
return features;
}
-static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu,
- const struct kvm_vcpu_init *init)
-{
- unsigned long features = init->features[0];
- int i;
-
- if (features & ~KVM_VCPU_VALID_FEATURES)
- return -ENOENT;
-
- for (i = 1; i < ARRAY_SIZE(init->features); i++) {
- if (init->features[i])
- return -ENOENT;
- }
-
- if (features & ~system_supported_vcpu_features())
- return -EINVAL;
-
- /*
- * For now make sure that both address/generic pointer authentication
- * features are requested by the userspace together.
- */
- if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features) !=
- test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features))
- return -EINVAL;
-
- if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features))
- return 0;
-
- /* MTE is incompatible with AArch32 */
- if (kvm_has_mte(vcpu->kvm))
- return -EINVAL;
-
- /* NV is incompatible with AArch32 */
- if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features))
- return -EINVAL;
-
- return 0;
-}
-
-static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu,
- const struct kvm_vcpu_init *init)
-{
- unsigned long features = init->features[0];
-
- return !bitmap_equal(vcpu->kvm->arch.vcpu_features, &features,
- KVM_VCPU_MAX_FEATURES);
-}
-
static int kvm_setup_vcpu(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 557c380ffa37..161840c357e0 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -64,250 +64,6 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
sizeof(kvm_vcpu_stats_desc),
};
-static bool core_reg_offset_is_vreg(u64 off)
-{
- return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) &&
- off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr);
-}
-
-static u64 core_reg_offset_from_id(u64 id)
-{
- return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
-}
-
-static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off)
-{
- int size;
-
- switch (off) {
- case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
- KVM_REG_ARM_CORE_REG(regs.regs[30]):
- case KVM_REG_ARM_CORE_REG(regs.sp):
- case KVM_REG_ARM_CORE_REG(regs.pc):
- case KVM_REG_ARM_CORE_REG(regs.pstate):
- case KVM_REG_ARM_CORE_REG(sp_el1):
- case KVM_REG_ARM_CORE_REG(elr_el1):
- case KVM_REG_ARM_CORE_REG(spsr[0]) ...
- KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
- size = sizeof(__u64);
- break;
-
- case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
- KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
- size = sizeof(__uint128_t);
- break;
-
- case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
- case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
- size = sizeof(__u32);
- break;
-
- default:
- return -EINVAL;
- }
-
- if (!IS_ALIGNED(off, size / sizeof(__u32)))
- return -EINVAL;
-
- /*
- * The KVM_REG_ARM64_SVE regs must be used instead of
- * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
- * SVE-enabled vcpus:
- */
- if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
- return -EINVAL;
-
- return size;
-}
-
-static void *core_reg_addr(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
-{
- u64 off = core_reg_offset_from_id(reg->id);
- int size = core_reg_size_from_offset(vcpu, off);
-
- if (size < 0)
- return NULL;
-
- if (KVM_REG_SIZE(reg->id) != size)
- return NULL;
-
- switch (off) {
- case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
- KVM_REG_ARM_CORE_REG(regs.regs[30]):
- off -= KVM_REG_ARM_CORE_REG(regs.regs[0]);
- off /= 2;
- return &vcpu->arch.ctxt.regs.regs[off];
-
- case KVM_REG_ARM_CORE_REG(regs.sp):
- return &vcpu->arch.ctxt.regs.sp;
-
- case KVM_REG_ARM_CORE_REG(regs.pc):
- return &vcpu->arch.ctxt.regs.pc;
-
- case KVM_REG_ARM_CORE_REG(regs.pstate):
- return &vcpu->arch.ctxt.regs.pstate;
-
- case KVM_REG_ARM_CORE_REG(sp_el1):
- return __ctxt_sys_reg(&vcpu->arch.ctxt, SP_EL1);
-
- case KVM_REG_ARM_CORE_REG(elr_el1):
- return __ctxt_sys_reg(&vcpu->arch.ctxt, ELR_EL1);
-
- case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_EL1]):
- return __ctxt_sys_reg(&vcpu->arch.ctxt, SPSR_EL1);
-
- case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_ABT]):
- return &vcpu->arch.ctxt.spsr_abt;
-
- case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_UND]):
- return &vcpu->arch.ctxt.spsr_und;
-
- case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_IRQ]):
- return &vcpu->arch.ctxt.spsr_irq;
-
- case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_FIQ]):
- return &vcpu->arch.ctxt.spsr_fiq;
-
- case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
- KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
- off -= KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]);
- off /= 4;
- return &vcpu->arch.ctxt.fp_regs.vregs[off];
-
- case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
- return &vcpu->arch.ctxt.fp_regs.fpsr;
-
- case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
- return &vcpu->arch.ctxt.fp_regs.fpcr;
-
- default:
- return NULL;
- }
-}
-
-static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
-{
- /*
- * Because the kvm_regs structure is a mix of 32, 64 and
- * 128bit fields, we index it as if it was a 32bit
- * array. Hence below, nr_regs is the number of entries, and
- * off the index in the "array".
- */
- __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
- int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
- void *addr;
- u32 off;
-
- /* Our ID is an index into the kvm_regs struct. */
- off = core_reg_offset_from_id(reg->id);
- if (off >= nr_regs ||
- (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
- return -ENOENT;
-
- addr = core_reg_addr(vcpu, reg);
- if (!addr)
- return -EINVAL;
-
- if (copy_to_user(uaddr, addr, KVM_REG_SIZE(reg->id)))
- return -EFAULT;
-
- return 0;
-}
-
-static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
-{
- __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
- int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
- __uint128_t tmp;
- void *valp = &tmp, *addr;
- u64 off;
- int err = 0;
-
- /* Our ID is an index into the kvm_regs struct. */
- off = core_reg_offset_from_id(reg->id);
- if (off >= nr_regs ||
- (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
- return -ENOENT;
-
- addr = core_reg_addr(vcpu, reg);
- if (!addr)
- return -EINVAL;
-
- if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
- return -EINVAL;
-
- if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) {
- err = -EFAULT;
- goto out;
- }
-
- if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
- u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
- switch (mode) {
- case PSR_AA32_MODE_USR:
- if (!kvm_supports_32bit_el0())
- return -EINVAL;
- break;
- case PSR_AA32_MODE_FIQ:
- case PSR_AA32_MODE_IRQ:
- case PSR_AA32_MODE_SVC:
- case PSR_AA32_MODE_ABT:
- case PSR_AA32_MODE_UND:
- case PSR_AA32_MODE_SYS:
- if (!vcpu_el1_is_32bit(vcpu))
- return -EINVAL;
- break;
- case PSR_MODE_EL2h:
- case PSR_MODE_EL2t:
- if (!vcpu_has_nv(vcpu))
- return -EINVAL;
- fallthrough;
- case PSR_MODE_EL0t:
- case PSR_MODE_EL1t:
- case PSR_MODE_EL1h:
- if (vcpu_el1_is_32bit(vcpu))
- return -EINVAL;
- break;
- default:
- err = -EINVAL;
- goto out;
- }
- }
-
- memcpy(addr, valp, KVM_REG_SIZE(reg->id));
-
- if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
- int i, nr_reg;
-
- switch (*vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK) {
- /*
- * Either we are dealing with user mode, and only the
- * first 15 registers (+ PC) must be narrowed to 32bit.
- * AArch32 r0-r14 conveniently map to AArch64 x0-x14.
- */
- case PSR_AA32_MODE_USR:
- case PSR_AA32_MODE_SYS:
- nr_reg = 15;
- break;
-
- /*
- * Otherwise, this is a privileged mode, and *all* the
- * registers must be narrowed to 32bit.
- */
- default:
- nr_reg = 31;
- break;
- }
-
- for (i = 0; i < nr_reg; i++)
- vcpu_set_reg(vcpu, i, (u32)vcpu_get_reg(vcpu, i));
-
- *vcpu_pc(vcpu) = (u32)*vcpu_pc(vcpu);
- }
-out:
- return err;
-}
-
#define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
#define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
#define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq)))
@@ -545,54 +301,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
return -EINVAL;
}
-static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
- u64 __user *uindices)
-{
- unsigned int i;
- int n = 0;
-
- for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
- u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i;
- int size = core_reg_size_from_offset(vcpu, i);
-
- if (size < 0)
- continue;
-
- switch (size) {
- case sizeof(__u32):
- reg |= KVM_REG_SIZE_U32;
- break;
-
- case sizeof(__u64):
- reg |= KVM_REG_SIZE_U64;
- break;
-
- case sizeof(__uint128_t):
- reg |= KVM_REG_SIZE_U128;
- break;
-
- default:
- WARN_ON(1);
- continue;
- }
-
- if (uindices) {
- if (put_user(reg, uindices))
- return -EFAULT;
- uindices++;
- }
-
- n++;
- }
-
- return n;
-}
-
-static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
-{
- return copy_core_reg_indices(vcpu, NULL);
-}
-
static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
{
const unsigned int slices = vcpu_sve_slices(vcpu);
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index cc7d5d1709cb..fc92f6ade8a4 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -24,11 +24,11 @@
#include <kvm/arm_hypercalls.h>
+#include <kvm/arm64/handle_exit.h>
+
#define CREATE_TRACE_POINTS
#include "trace_handle_exit.h"
-typedef int (*exit_handle_fn)(struct kvm_vcpu *);
-
static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
{
if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
@@ -213,17 +213,6 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
return 0;
}
-static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
-{
- u64 esr = kvm_vcpu_get_esr(vcpu);
-
- kvm_pr_unimpl("Unknown exception class: esr: %#016llx -- %s\n",
- esr, esr_get_class_string(esr));
-
- kvm_inject_undefined(vcpu);
- return 1;
-}
-
/*
* Guest access to SVE registers should be routed to this handler only
* when the system doesn't support SVE.
@@ -373,7 +362,7 @@ static int handle_other(struct kvm_vcpu *vcpu)
return 1;
}
-static exit_handle_fn arm_exit_handlers[] = {
+exit_handle_fn arm_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
[ESR_ELx_EC_WFx] = kvm_handle_wfx,
[ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
@@ -404,41 +393,6 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_GCS] = kvm_handle_gcs,
};
-static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
-{
- u64 esr = kvm_vcpu_get_esr(vcpu);
- u8 esr_ec = ESR_ELx_EC(esr);
-
- return arm_exit_handlers[esr_ec];
-}
-
-/*
- * We may be single-stepping an emulated instruction. If the emulation
- * has been completed in the kernel, we can return to userspace with a
- * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
- * emulation first.
- */
-static int handle_trap_exceptions(struct kvm_vcpu *vcpu)
-{
- int handled;
-
- /*
- * See ARM ARM B1.14.1: "Hyp traps on instructions
- * that fail their condition code check"
- */
- if (!kvm_condition_valid(vcpu)) {
- kvm_incr_pc(vcpu);
- handled = 1;
- } else {
- exit_handle_fn exit_handler;
-
- exit_handler = kvm_get_exit_handler(vcpu);
- handled = exit_handler(vcpu);
- }
-
- return handled;
-}
-
/*
* Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
* proper exit to userspace.
diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h
index 9c60f6465c78..40c656937136 100644
--- a/arch/arm64/kvm/trace_arm.h
+++ b/arch/arm64/kvm/trace_arm.h
@@ -136,31 +136,6 @@ TRACE_EVENT(kvm_mmio_emulate,
__entry->vcpu_pc, __entry->instr, __entry->cpsr)
);
-TRACE_EVENT(kvm_mmio_nisv,
- TP_PROTO(unsigned long vcpu_pc, unsigned long esr,
- unsigned long far, unsigned long ipa),
- TP_ARGS(vcpu_pc, esr, far, ipa),
-
- TP_STRUCT__entry(
- __field( unsigned long, vcpu_pc )
- __field( unsigned long, esr )
- __field( unsigned long, far )
- __field( unsigned long, ipa )
- ),
-
- TP_fast_assign(
- __entry->vcpu_pc = vcpu_pc;
- __entry->esr = esr;
- __entry->far = far;
- __entry->ipa = ipa;
- ),
-
- TP_printk("ipa %#016lx, esr %#016lx, far %#016lx, pc %#016lx",
- __entry->ipa, __entry->esr,
- __entry->far, __entry->vcpu_pc)
-);
-
-
TRACE_EVENT(kvm_set_way_flush,
TP_PROTO(unsigned long vcpu_pc, bool cache),
TP_ARGS(vcpu_pc, cache),
diff --git a/include/kvm/arm64/guest.h b/include/kvm/arm64/guest.h
index fa67d992e8fd..ac042ed71157 100644
--- a/include/kvm/arm64/guest.h
+++ b/include/kvm/arm64/guest.h
@@ -4,7 +4,10 @@
#define __KVM_ARM64_GUEST_H__
/* Implemented by virt/kvm/arm64/guest.c */
-unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
+int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+int copy_core_reg_indices(const struct kvm_vcpu *vcpu, u64 __user *uindices);
+unsigned long num_core_regs(const struct kvm_vcpu *vcpu);
#endif /* __KVM_ARM64_GUEST_H__ */
diff --git a/include/kvm/arm64/handle_exit.h b/include/kvm/arm64/handle_exit.h
new file mode 100644
index 000000000000..ef253e1f2cb3
--- /dev/null
+++ b/include/kvm/arm64/handle_exit.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef KVM_ARM64_HANDLE_EXIT_H
+#define KVM_ARM64_HANDLE_EXIT_H
+
+#include <linux/kvm_host.h>
+
+typedef int (*exit_handle_fn)(struct kvm_vcpu *);
+extern exit_handle_fn arm_exit_handlers[255];
+
+int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu);
+exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu);
+int handle_trap_exceptions(struct kvm_vcpu *vcpu);
+
+#endif /* KVM_ARM64_HANDLE_EXIT_H */
diff --git a/include/kvm/arm64/kvm_host.h b/include/kvm/arm64/kvm_host.h
index 3a434f47497b..21117e4fd546 100644
--- a/include/kvm/arm64/kvm_host.h
+++ b/include/kvm/arm64/kvm_host.h
@@ -28,6 +28,12 @@ struct vcpu_reset_state {
bool reset;
};
+/* Implemented in virt/kvm/arm64/arm.c */
+int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu,
+ const struct kvm_vcpu_init *init);
+bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu,
+ const struct kvm_vcpu_init *init);
+
/* MMIO helpers */
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
@@ -75,6 +81,9 @@ static inline bool kvm_supports_32bit_el0(void)
return false;
}
+/* Implemented in architecture specific code */
+unsigned long system_supported_vcpu_features(void);
+
#define vcpu_is_protected(vcpu) kvm_vm_is_protected((vcpu)->kvm)
/*
diff --git a/virt/kvm/arm64/Makefile.kvm b/virt/kvm/arm64/Makefile.kvm
new file mode 100644
index 000000000000..ac969bf1c016
--- /dev/null
+++ b/virt/kvm/arm64/Makefile.kvm
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Kernel-based Virtual Machine modules for arm64 guests
+#
+KVM_ARM64 ?= $(KVM)/arm64
+
+shared-arm64-obj := \
+ $(KVM_ARM64)/arm.o \
+ $(KVM_ARM64)/guest.o \
+ $(KVM_ARM64)/handle_exit.o \
+ $(KVM_ARM64)/mmio.o \
+
diff --git a/virt/kvm/arm64/arm.c b/virt/kvm/arm64/arm.c
new file mode 100644
index 000000000000..b47adef65e5f
--- /dev/null
+++ b/virt/kvm/arm64/arm.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/preempt.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_mmu.h>
+
+int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu,
+ const struct kvm_vcpu_init *init)
+{
+ unsigned long features = init->features[0];
+ int i;
+
+ if (features & ~KVM_VCPU_VALID_FEATURES)
+ return -ENOENT;
+
+ for (i = 1; i < ARRAY_SIZE(init->features); i++) {
+ if (init->features[i])
+ return -ENOENT;
+ }
+
+ if (features & ~system_supported_vcpu_features())
+ return -EINVAL;
+
+ /*
+ * For now make sure that both address/generic pointer authentication
+ * features are requested by the userspace together.
+ */
+ if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features) !=
+ test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features))
+ return -EINVAL;
+
+ if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features))
+ return 0;
+
+ /* MTE is incompatible with AArch32 */
+ if (kvm_has_mte(vcpu->kvm))
+ return -EINVAL;
+
+ /* NV is incompatible with AArch32 */
+ if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features))
+ return -EINVAL;
+
+ return 0;
+}
+
+bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu,
+ const struct kvm_vcpu_init *init)
+{
+ unsigned long features = init->features[0];
+
+ return !bitmap_equal(vcpu->kvm->arch.vcpu_features, &features,
+ KVM_VCPU_MAX_FEATURES);
+}
diff --git a/virt/kvm/arm64/guest.c b/virt/kvm/arm64/guest.c
new file mode 100644
index 000000000000..83e33e0143b9
--- /dev/null
+++ b/virt/kvm/arm64/guest.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/kvm_host.h>
+#include <linux/kvm.h>
+#include <asm/pstate.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_nested.h>
+#include <asm/sigcontext.h>
+
+#include <kvm/arm64/guest.h>
+
+static bool core_reg_offset_is_vreg(u64 off)
+{
+ return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) &&
+ off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr);
+}
+
+static u64 core_reg_offset_from_id(u64 id)
+{
+ return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
+}
+
+static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off)
+{
+ int size;
+
+ switch (off) {
+ case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
+ KVM_REG_ARM_CORE_REG(regs.regs[30]):
+ case KVM_REG_ARM_CORE_REG(regs.sp):
+ case KVM_REG_ARM_CORE_REG(regs.pc):
+ case KVM_REG_ARM_CORE_REG(regs.pstate):
+ case KVM_REG_ARM_CORE_REG(sp_el1):
+ case KVM_REG_ARM_CORE_REG(elr_el1):
+ case KVM_REG_ARM_CORE_REG(spsr[0]) ...
+ KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
+ size = sizeof(__u64);
+ break;
+
+ case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
+ KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
+ size = sizeof(__uint128_t);
+ break;
+
+ case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
+ case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
+ size = sizeof(__u32);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(off, size / sizeof(__u32)))
+ return -EINVAL;
+
+ /*
+ * The KVM_REG_ARM64_SVE regs must be used instead of
+ * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
+ * SVE-enabled vcpus:
+ */
+ if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
+ return -EINVAL;
+
+ return size;
+}
+
+static void *core_reg_addr(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ u64 off = core_reg_offset_from_id(reg->id);
+ int size = core_reg_size_from_offset(vcpu, off);
+
+ if (size < 0)
+ return NULL;
+
+ if (KVM_REG_SIZE(reg->id) != size)
+ return NULL;
+
+ switch (off) {
+ case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
+ KVM_REG_ARM_CORE_REG(regs.regs[30]):
+ off -= KVM_REG_ARM_CORE_REG(regs.regs[0]);
+ off /= 2;
+ return &vcpu_gp_regs(vcpu)->regs[off];
+
+ case KVM_REG_ARM_CORE_REG(regs.sp):
+ return &vcpu_gp_regs(vcpu)->sp;
+
+ case KVM_REG_ARM_CORE_REG(regs.pc):
+ return vcpu_pc(vcpu);
+
+ case KVM_REG_ARM_CORE_REG(regs.pstate):
+ return &vcpu_gp_regs(vcpu)->pstate;
+
+ case KVM_REG_ARM_CORE_REG(sp_el1):
+ return __ctxt_sys_reg(&vcpu->arch.ctxt, SP_EL1);
+
+ case KVM_REG_ARM_CORE_REG(elr_el1):
+ return __ctxt_sys_reg(&vcpu->arch.ctxt, ELR_EL1);
+
+ case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_EL1]):
+ return __ctxt_sys_reg(&vcpu->arch.ctxt, SPSR_EL1);
+
+ case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_ABT]):
+ return &vcpu->arch.ctxt.spsr_abt;
+
+ case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_UND]):
+ return &vcpu->arch.ctxt.spsr_und;
+
+ case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_IRQ]):
+ return &vcpu->arch.ctxt.spsr_irq;
+
+ case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_FIQ]):
+ return &vcpu->arch.ctxt.spsr_fiq;
+
+ case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
+ KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
+ off -= KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]);
+ off /= 4;
+ return &vcpu->arch.ctxt.fp_regs.vregs[off];
+
+ case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
+ return &vcpu->arch.ctxt.fp_regs.fpsr;
+
+ case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
+ return &vcpu->arch.ctxt.fp_regs.fpcr;
+
+ default:
+ return NULL;
+ }
+}
+
+int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ /*
+ * Because the kvm_regs structure is a mix of 32, 64 and
+ * 128bit fields, we index it as if it was a 32bit
+ * array. Hence below, nr_regs is the number of entries, and
+ * off the index in the "array".
+ */
+ __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
+ int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
+ void *addr;
+ u32 off;
+
+ /* Our ID is an index into the kvm_regs struct. */
+ off = core_reg_offset_from_id(reg->id);
+ if (off >= nr_regs ||
+ (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
+ return -ENOENT;
+
+ addr = core_reg_addr(vcpu, reg);
+ if (!addr)
+ return -EINVAL;
+
+ if (copy_to_user(uaddr, addr, KVM_REG_SIZE(reg->id)))
+ return -EFAULT;
+
+ return 0;
+}
+
+int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
+ int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
+ __uint128_t tmp;
+ void *valp = &tmp, *addr;
+ u64 off;
+ int err = 0;
+
+ /* Our ID is an index into the kvm_regs struct. */
+ off = core_reg_offset_from_id(reg->id);
+ if (off >= nr_regs ||
+ (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
+ return -ENOENT;
+
+ addr = core_reg_addr(vcpu, reg);
+ if (!addr)
+ return -EINVAL;
+
+ if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
+ u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
+
+ switch (mode) {
+ case PSR_AA32_MODE_USR:
+ if (!kvm_supports_32bit_el0())
+ return -EINVAL;
+ break;
+ case PSR_AA32_MODE_FIQ:
+ case PSR_AA32_MODE_IRQ:
+ case PSR_AA32_MODE_SVC:
+ case PSR_AA32_MODE_ABT:
+ case PSR_AA32_MODE_UND:
+ case PSR_AA32_MODE_SYS:
+ if (!vcpu_el1_is_32bit(vcpu))
+ return -EINVAL;
+ break;
+ case PSR_MODE_EL2h:
+ case PSR_MODE_EL2t:
+ if (!vcpu_has_nv(vcpu))
+ return -EINVAL;
+ fallthrough;
+ case PSR_MODE_EL0t:
+ case PSR_MODE_EL1t:
+ case PSR_MODE_EL1h:
+ if (vcpu_el1_is_32bit(vcpu))
+ return -EINVAL;
+ break;
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ memcpy(addr, valp, KVM_REG_SIZE(reg->id));
+
+ if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
+ int i, nr_reg;
+
+ switch (*vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK) {
+ /*
+ * Either we are dealing with user mode, and only the
+ * first 15 registers (+ PC) must be narrowed to 32bit.
+ * AArch32 r0-r14 conveniently map to AArch64 x0-x14.
+ */
+ case PSR_AA32_MODE_USR:
+ case PSR_AA32_MODE_SYS:
+ nr_reg = 15;
+ break;
+
+ /*
+ * Otherwise, this is a privileged mode, and *all* the
+ * registers must be narrowed to 32bit.
+ */
+ default:
+ nr_reg = 31;
+ break;
+ }
+
+ for (i = 0; i < nr_reg; i++)
+ vcpu_set_reg(vcpu, i, (u32)vcpu_get_reg(vcpu, i));
+
+ *vcpu_pc(vcpu) = (u32)*vcpu_pc(vcpu);
+ }
+out:
+ return err;
+}
+
+int copy_core_reg_indices(const struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+ unsigned int i;
+ int n = 0;
+
+ for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
+ u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i;
+ int size = core_reg_size_from_offset(vcpu, i);
+
+ if (size < 0)
+ continue;
+
+ switch (size) {
+ case sizeof(__u32):
+ reg |= KVM_REG_SIZE_U32;
+ break;
+
+ case sizeof(__u64):
+ reg |= KVM_REG_SIZE_U64;
+ break;
+
+ case sizeof(__uint128_t):
+ reg |= KVM_REG_SIZE_U128;
+ break;
+
+ default:
+ WARN_ON(1);
+ continue;
+ }
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+
+ n++;
+ }
+
+ return n;
+}
+
+unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
+{
+ return copy_core_reg_indices(vcpu, NULL);
+}
diff --git a/virt/kvm/arm64/handle_exit.c b/virt/kvm/arm64/handle_exit.c
new file mode 100644
index 000000000000..7c2eff026d08
--- /dev/null
+++ b/virt/kvm/arm64/handle_exit.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/kvm_host.h>
+
+#include <asm/esr.h>
+#include <asm/kvm_emulate.h>
+
+#include <kvm/arm64/handle_exit.h>
+
+int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
+{
+ u64 esr = kvm_vcpu_get_esr(vcpu);
+
+ kvm_pr_unimpl("Unknown exception class: esr: %#016llx -- %s\n",
+ esr, esr_get_class_string(esr));
+
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
+exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
+{
+ u64 esr = kvm_vcpu_get_esr(vcpu);
+ u8 esr_ec = ESR_ELx_EC(esr);
+
+ return arm_exit_handlers[esr_ec];
+}
+
+/*
+ * We may be single-stepping an emulated instruction. If the emulation
+ * has been completed in the kernel, we can return to userspace with a
+ * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
+ * emulation first.
+ */
+int handle_trap_exceptions(struct kvm_vcpu *vcpu)
+{
+ int handled;
+
+ /*
+ * See ARM ARM B1.14.1: "Hyp traps on instructions
+ * that fail their condition code check"
+ */
+ if (!kvm_condition_valid(vcpu)) {
+ kvm_incr_pc(vcpu);
+ handled = 1;
+ } else {
+ exit_handle_fn exit_handler;
+
+ exit_handler = kvm_get_exit_handler(vcpu);
+ handled = exit_handler(vcpu);
+ }
+
+ return handled;
+}
diff --git a/arch/arm64/kvm/mmio.c b/virt/kvm/arm64/mmio.c
similarity index 99%
rename from arch/arm64/kvm/mmio.c
rename to virt/kvm/arm64/mmio.c
index e2285ed8c91d..438a554ec1ed 100644
--- a/arch/arm64/kvm/mmio.c
+++ b/virt/kvm/arm64/mmio.c
@@ -8,6 +8,7 @@
#include <asm/kvm_emulate.h>
#include <trace/events/kvm.h>
+#define CREATE_TRACE_POINTS
#include "trace.h"
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
diff --git a/virt/kvm/arm64/trace.h b/virt/kvm/arm64/trace.h
new file mode 100644
index 000000000000..0814000b7749
--- /dev/null
+++ b/virt/kvm/arm64/trace.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#if !defined(__KVM_ARM64_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __KVM_ARM64_TRACE_H__
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../virt/kvm/arm64
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(kvm_mmio_nisv,
+ TP_PROTO(unsigned long vcpu_pc, unsigned long esr,
+ unsigned long far, unsigned long ipa),
+ TP_ARGS(vcpu_pc, esr, far, ipa),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, vcpu_pc )
+ __field( unsigned long, esr )
+ __field( unsigned long, far )
+ __field( unsigned long, ipa )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_pc = vcpu_pc;
+ __entry->esr = esr;
+ __entry->far = far;
+ __entry->ipa = ipa;
+ ),
+
+ TP_printk("ipa %#016lx, esr %#016lx, far %#016lx, pc %#016lx",
+ __entry->ipa, __entry->esr,
+ __entry->far, __entry->vcpu_pc)
+);
+
+#endif /* __KVM_ARM64_TRACE_H__ */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v1 11/27] KVM: arm64: Access elements of vcpu_gp_regs individually
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (9 preceding siblings ...)
2026-04-02 4:21 ` [PATCH v1 10/27] KVM: arm64: Make some arm64 KVM code shareable Steffen Eiden
@ 2026-04-02 4:21 ` Steffen Eiden
2026-04-02 4:21 ` [PATCH v1 12/27] KVM: arm64: Share reset general register code Steffen Eiden
` (16 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:21 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
While for arm64 the members of vcpu_gp_regs are allocated continuous
this is not necessarily true for other architectures implementing ARM.
Let vcpu_gp_regs() no longer return the address of the user_pt_regs in
the vcpu context but the address of the gp-register array field in the
user_pt_reg struct.
Co-developed-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
arch/arm64/include/asm/kvm_emulate.h | 9 +++++++--
arch/arm64/include/asm/kvm_host.h | 2 +-
arch/arm64/kvm/hyp/exception.c | 7 +++++--
arch/arm64/kvm/hyp/include/hyp/adjust_pc.h | 4 ++--
arch/arm64/kvm/hyp/include/hyp/switch.h | 6 +++---
arch/arm64/kvm/reset.c | 3 ++-
include/kvm/arm64/kvm_emulate.h | 4 ++--
virt/kvm/arm64/guest.c | 6 +++---
8 files changed, 25 insertions(+), 16 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 39fa3a12730c..41eac2b5de14 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -95,12 +95,17 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
{
- return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
+ return (unsigned long *)&vcpu->arch.ctxt.regs.pc;
}
static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
{
- return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
+ return (unsigned long *)&vcpu->arch.ctxt.regs.pstate;
+}
+
+static __always_inline unsigned long *vcpu_sp_el0(const struct kvm_vcpu *vcpu)
+{
+ return (unsigned long *)&vcpu->arch.ctxt.regs.sp;
}
static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index ae9e507f2c7c..7e473b895740 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -1050,7 +1050,7 @@ struct kvm_vcpu_arch {
#define vcpu_clear_on_unsupported_cpu(vcpu) \
vcpu_clear_flag(vcpu, ON_UNSUPPORTED_CPU)
-#define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs)
+#define vcpu_gp_regs(v) ((v)->arch.ctxt.regs.regs)
/*
* Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c
index bef40ddb16db..82611442a2d1 100644
--- a/arch/arm64/kvm/hyp/exception.c
+++ b/arch/arm64/kvm/hyp/exception.c
@@ -277,6 +277,9 @@ static const u8 return_offsets[8][2] = {
[7] = { 4, 4 }, /* FIQ, unused */
};
+#define OFFSETOF_PT_REG(__r) offsetof(struct user_pt_regs, __r)
+#define COMPAT_IDX(__c) ((OFFSETOF_PT_REG(__c) - OFFSETOF_PT_REG(regs[0])) / sizeof(u64))
+
static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
{
unsigned long spsr = *vcpu_cpsr(vcpu);
@@ -292,12 +295,12 @@ static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
switch(mode) {
case PSR_AA32_MODE_ABT:
__vcpu_write_spsr_abt(vcpu, host_spsr_to_spsr32(spsr));
- vcpu_gp_regs(vcpu)->compat_lr_abt = return_address;
+ vcpu_gp_regs(vcpu)[COMPAT_IDX(compat_lr_abt)] = return_address;
break;
case PSR_AA32_MODE_UND:
__vcpu_write_spsr_und(vcpu, host_spsr_to_spsr32(spsr));
- vcpu_gp_regs(vcpu)->compat_lr_und = return_address;
+ vcpu_gp_regs(vcpu)[COMPAT_IDX(compat_lr_und)] = return_address;
break;
}
diff --git a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
index 15e1e5db73e1..4e4cb67824c0 100644
--- a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
+++ b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
@@ -20,11 +20,11 @@
static inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
{
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
- vcpu_gp_regs(vcpu)->pstate = read_sysreg_el2(SYS_SPSR);
+ *vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
kvm_skip_instr(vcpu);
- write_sysreg_el2(vcpu_gp_regs(vcpu)->pstate, SYS_SPSR);
+ write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
}
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 2597e8bda867..79e6e6cc9f81 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -416,7 +416,7 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
static inline bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
{
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
- arm64_mops_reset_regs(vcpu_gp_regs(vcpu), vcpu->arch.fault.esr_el2);
+ arm64_mops_reset_regs(&vcpu->arch.ctxt.regs, vcpu->arch.fault.esr_el2);
write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
/*
@@ -857,7 +857,7 @@ static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu)
/*
* Check for the conditions of Cortex-A510's #2077057. When these occur
* SPSR_EL2 can't be trusted, but isn't needed either as it is
- * unchanged from the value in vcpu_gp_regs(vcpu)->pstate.
+ * unchanged from the value in vcpu_gp_cpsr(vcpu).
* Are we single-stepping the guest, and took a PAC exception from the
* active-not-pending state?
*/
@@ -867,7 +867,7 @@ static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu)
ESR_ELx_EC(read_sysreg_el2(SYS_ESR)) == ESR_ELx_EC_PAC)
write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
- vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
+ *vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
}
/*
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 036bf2dff976..d039f1d7116a 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -219,12 +219,13 @@ void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
/* Reset core registers */
memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
+ *vcpu_pc(vcpu) = 0;
memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
vcpu->arch.ctxt.spsr_abt = 0;
vcpu->arch.ctxt.spsr_und = 0;
vcpu->arch.ctxt.spsr_irq = 0;
vcpu->arch.ctxt.spsr_fiq = 0;
- vcpu_gp_regs(vcpu)->pstate = pstate;
+ *vcpu_cpsr(vcpu) = pstate;
/* Reset system registers */
kvm_reset_sys_regs(vcpu);
diff --git a/include/kvm/arm64/kvm_emulate.h b/include/kvm/arm64/kvm_emulate.h
index 25322b95af21..0e16d18e53d2 100644
--- a/include/kvm/arm64/kvm_emulate.h
+++ b/include/kvm/arm64/kvm_emulate.h
@@ -77,14 +77,14 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu)
static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
u8 reg_num)
{
- return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
+ return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)[reg_num];
}
static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
unsigned long val)
{
if (reg_num != 31)
- vcpu_gp_regs(vcpu)->regs[reg_num] = val;
+ vcpu_gp_regs(vcpu)[reg_num] = val;
}
static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm64/guest.c b/virt/kvm/arm64/guest.c
index 83e33e0143b9..e283a4456df8 100644
--- a/virt/kvm/arm64/guest.c
+++ b/virt/kvm/arm64/guest.c
@@ -81,16 +81,16 @@ static void *core_reg_addr(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
KVM_REG_ARM_CORE_REG(regs.regs[30]):
off -= KVM_REG_ARM_CORE_REG(regs.regs[0]);
off /= 2;
- return &vcpu_gp_regs(vcpu)->regs[off];
+ return &vcpu_gp_regs(vcpu)[off];
case KVM_REG_ARM_CORE_REG(regs.sp):
- return &vcpu_gp_regs(vcpu)->sp;
+ return vcpu_sp_el0(vcpu);
case KVM_REG_ARM_CORE_REG(regs.pc):
return vcpu_pc(vcpu);
case KVM_REG_ARM_CORE_REG(regs.pstate):
- return &vcpu_gp_regs(vcpu)->pstate;
+ return vcpu_cpsr(vcpu);
case KVM_REG_ARM_CORE_REG(sp_el1):
return __ctxt_sys_reg(&vcpu->arch.ctxt, SP_EL1);
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v1 12/27] KVM: arm64: Share reset general register code
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (10 preceding siblings ...)
2026-04-02 4:21 ` [PATCH v1 11/27] KVM: arm64: Access elements of vcpu_gp_regs individually Steffen Eiden
@ 2026-04-02 4:21 ` Steffen Eiden
2026-04-02 4:21 ` [PATCH v1 13/27] KVM: arm64: Extract & share ipa size shift calculation Steffen Eiden
` (15 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:21 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
From: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Move code and required definitions to reset general registers into the
shared location. Additionally, add defines to arch/arm64 such that
accessing general registers becomes architecture agnostic.
Signed-off-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
arch/arm64/include/asm/kvm_host.h | 97 +++++--------------------------
arch/arm64/kvm/reset.c | 33 +----------
include/kvm/arm64/kvm_host.h | 82 ++++++++++++++++++++++++++
include/kvm/arm64/reset.h | 8 +++
virt/kvm/arm64/Makefile.kvm | 1 +
virt/kvm/arm64/guest.c | 8 +--
virt/kvm/arm64/reset.c | 42 +++++++++++++
7 files changed, 154 insertions(+), 117 deletions(-)
create mode 100644 include/kvm/arm64/reset.h
create mode 100644 virt/kvm/arm64/reset.c
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 7e473b895740..e3a2ac3979ac 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -877,39 +877,6 @@ struct kvm_vcpu_arch {
struct vncr_tlb *vncr_tlb;
};
-/*
- * Each 'flag' is composed of a comma-separated triplet:
- *
- * - the flag-set it belongs to in the vcpu->arch structure
- * - the value for that flag
- * - the mask for that flag
- *
- * __vcpu_single_flag() builds such a triplet for a single-bit flag.
- * unpack_vcpu_flag() extract the flag value from the triplet for
- * direct use outside of the flag accessors.
- */
-#define __vcpu_single_flag(_set, _f) _set, (_f), (_f)
-
-#define __unpack_flag(_set, _f, _m) _f
-#define unpack_vcpu_flag(...) __unpack_flag(__VA_ARGS__)
-
-#define __build_check_flag(v, flagset, f, m) \
- do { \
- typeof(v->arch.flagset) *_fset; \
- \
- /* Check that the flags fit in the mask */ \
- BUILD_BUG_ON(HWEIGHT(m) != HWEIGHT((f) | (m))); \
- /* Check that the flags fit in the type */ \
- BUILD_BUG_ON((sizeof(*_fset) * 8) <= __fls(m)); \
- } while (0)
-
-#define __vcpu_get_flag(v, flagset, f, m) \
- ({ \
- __build_check_flag(v, flagset, f, m); \
- \
- READ_ONCE(v->arch.flagset) & (m); \
- })
-
/*
* Note that the set/clear accessors must be preempt-safe in order to
* avoid nesting them with load/put which also manipulate flags...
@@ -923,54 +890,14 @@ struct kvm_vcpu_arch {
#define __vcpu_flags_preempt_enable() preempt_enable()
#endif
-#define __vcpu_set_flag(v, flagset, f, m) \
- do { \
- typeof(v->arch.flagset) *fset; \
- \
- __build_check_flag(v, flagset, f, m); \
- \
- fset = &v->arch.flagset; \
- __vcpu_flags_preempt_disable(); \
- if (HWEIGHT(m) > 1) \
- *fset &= ~(m); \
- *fset |= (f); \
- __vcpu_flags_preempt_enable(); \
- } while (0)
-
-#define __vcpu_clear_flag(v, flagset, f, m) \
- do { \
- typeof(v->arch.flagset) *fset; \
- \
- __build_check_flag(v, flagset, f, m); \
- \
- fset = &v->arch.flagset; \
- __vcpu_flags_preempt_disable(); \
- *fset &= ~(m); \
- __vcpu_flags_preempt_enable(); \
- } while (0)
-
-#define __vcpu_test_and_clear_flag(v, flagset, f, m) \
- ({ \
- typeof(v->arch.flagset) set; \
- \
- set = __vcpu_get_flag(v, flagset, f, m); \
- __vcpu_clear_flag(v, flagset, f, m); \
- \
- set; \
- })
-
-#define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__)
-#define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__)
-#define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__)
-#define vcpu_test_and_clear_flag(v, ...) \
- __vcpu_test_and_clear_flag((v), __VA_ARGS__)
-
-/* KVM_ARM_VCPU_INIT completed */
-#define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(0))
-/* SVE config completed */
-#define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
-/* pKVM VCPU setup completed */
-#define VCPU_PKVM_FINALIZED __vcpu_single_flag(cflags, BIT(2))
+#define _vcpu_get_flag(v, flagset, ...) \
+ __vcpu_get_flag(&(v)->arch.flagset, __VA_ARGS__)
+#define _vcpu_set_flag(v, flagset, ...) \
+ __vcpu_set_flag(&(v)->arch.flagset, __VA_ARGS__)
+#define _vcpu_clear_flag(v, flagset, ...) \
+ __vcpu_clear_flag(&(v)->arch.flagset, __VA_ARGS__)
+#define _vcpu_test_and_clear_flag(v, flagset, ...) \
+ __vcpu_test_and_clear_flag(&(v)->arch.flagset, __VA_ARGS__)
/* Physical CPU not in supported_cpus */
#define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(0))
@@ -1081,6 +1008,12 @@ static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
+#define kvm_vcpu_get_sp_el1(__vcpu) (__ctxt_sys_reg(&(__vcpu)->arch.ctxt, SP_EL1))
+#define kvm_vcpu_get_vreg(__vcpu, _n) (&(__vcpu)->arch.ctxt.fp_regs.vregs[_n])
+#define kvm_vcpu_get_vregs(__vcpu) (&(__vcpu)->arch.ctxt.fp_regs.vregs)
+#define kvm_vcpu_get_fpsr(__vcpu) (&(__vcpu)->arch.ctxt.fp_regs.fpsr)
+#define kvm_vcpu_get_fpcr(__vcpu) (&(__vcpu)->arch.ctxt.fp_regs.fpcr)
+
u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *, enum vcpu_sysreg, u64);
#define __vcpu_assign_sys_reg(v, r, val) \
@@ -1413,8 +1346,6 @@ static inline bool __vcpu_has_feature(const struct kvm_arch *ka, int feature)
#define kvm_vcpu_has_feature(k, f) __vcpu_has_feature(&(k)->arch, (f))
#define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f))
-#define kvm_vcpu_initialized(v) vcpu_get_flag(vcpu, VCPU_INITIALIZED)
-
int kvm_trng_call(struct kvm_vcpu *vcpu);
#ifdef CONFIG_KVM
extern phys_addr_t hyp_mem_base;
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index d039f1d7116a..b4f579df0beb 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -30,22 +30,11 @@
#include <asm/kvm_nested.h>
#include <asm/virt.h>
+#include <kvm/arm64/reset.h>
+
/* Maximum phys_shift supported for any VM on this host */
static u32 __ro_after_init kvm_ipa_limit;
unsigned int __ro_after_init kvm_host_sve_max_vl;
-
-/*
- * ARMv8 Reset Values
- */
-#define VCPU_RESET_PSTATE_EL1 (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | \
- PSR_F_BIT | PSR_D_BIT)
-
-#define VCPU_RESET_PSTATE_EL2 (PSR_MODE_EL2h | PSR_A_BIT | PSR_I_BIT | \
- PSR_F_BIT | PSR_D_BIT)
-
-#define VCPU_RESET_PSTATE_SVC (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \
- PSR_AA32_I_BIT | PSR_AA32_F_BIT)
-
unsigned int __ro_after_init kvm_sve_max_vl;
int __init kvm_arm_init_sve(void)
@@ -191,7 +180,6 @@ void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_reset_state reset_state;
bool loaded;
- u32 pstate;
spin_lock(&vcpu->arch.mp_state_lock);
reset_state = vcpu->arch.reset_state;
@@ -210,22 +198,7 @@ void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
kvm_vcpu_reset_sve(vcpu);
}
- if (vcpu_el1_is_32bit(vcpu))
- pstate = VCPU_RESET_PSTATE_SVC;
- else if (vcpu_has_nv(vcpu))
- pstate = VCPU_RESET_PSTATE_EL2;
- else
- pstate = VCPU_RESET_PSTATE_EL1;
-
- /* Reset core registers */
- memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
- *vcpu_pc(vcpu) = 0;
- memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
- vcpu->arch.ctxt.spsr_abt = 0;
- vcpu->arch.ctxt.spsr_und = 0;
- vcpu->arch.ctxt.spsr_irq = 0;
- vcpu->arch.ctxt.spsr_fiq = 0;
- *vcpu_cpsr(vcpu) = pstate;
+ kvm_reset_vcpu_core_regs(vcpu);
/* Reset system registers */
kvm_reset_sys_regs(vcpu);
diff --git a/include/kvm/arm64/kvm_host.h b/include/kvm/arm64/kvm_host.h
index 21117e4fd546..20b824ecf16e 100644
--- a/include/kvm/arm64/kvm_host.h
+++ b/include/kvm/arm64/kvm_host.h
@@ -41,6 +41,86 @@ unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
+/*
+ * Each 'flag' is composed of a comma-separated triplet:
+ *
+ * - the flag-set it belongs to in the vcpu->arch structure
+ * - the value for that flag
+ * - the mask for that flag
+ *
+ * __vcpu_single_flag() builds such a triplet for a single-bit flag.
+ * unpack_vcpu_flag() extract the flag value from the triplet for
+ * direct use outside of the flag accessors.
+ */
+#define __vcpu_single_flag(_set, _f) _set, (_f), (_f)
+
+#define __unpack_flag(_set, _f, _m) _f
+#define unpack_vcpu_flag(...) __unpack_flag(__VA_ARGS__)
+
+#define __build_check_flag(flagset, f, m) \
+ do { \
+ /* Check that the flags fit in the mask */ \
+ BUILD_BUG_ON(HWEIGHT(m) != HWEIGHT((f) | (m))); \
+ /* Check that the flags fit in the type */ \
+ BUILD_BUG_ON((sizeof(*(flagset)) * 8) <= __fls(m)); \
+ } while (0)
+
+#define __vcpu_get_flag(flagset, f, m) \
+ ({ \
+ __build_check_flag((flagset), f, m); \
+ \
+ READ_ONCE(*(flagset)) & (m); \
+ })
+
+#define __vcpu_set_flag(flagset, f, m) \
+ do { \
+ typeof(*flagset) *fset; \
+ \
+ __build_check_flag((flagset), f, m); \
+ \
+ fset = (flagset); \
+ __vcpu_flags_preempt_disable(); \
+ if (HWEIGHT(m) > 1) \
+ *fset &= ~(m); \
+ *fset |= (f); \
+ __vcpu_flags_preempt_enable(); \
+ } while (0)
+
+#define __vcpu_clear_flag(flagset, f, m) \
+ do { \
+ typeof(*flagset) *fset; \
+ \
+ __build_check_flag(flagset, f, m); \
+ \
+ fset = (flagset); \
+ __vcpu_flags_preempt_disable(); \
+ *fset &= ~(m); \
+ __vcpu_flags_preempt_enable(); \
+ } while (0)
+
+#define __vcpu_test_and_clear_flag(flagset, f, m) \
+ ({ \
+ typeof(*flagset) set; \
+ \
+ set = __vcpu_get_flag((flagset), f, m); \
+ __vcpu_clear_flag((flagset), f, m); \
+ \
+ set; \
+ })
+
+#define vcpu_get_flag(v, ...) _vcpu_get_flag((v), __VA_ARGS__)
+#define vcpu_set_flag(v, ...) _vcpu_set_flag((v), __VA_ARGS__)
+#define vcpu_clear_flag(v, ...) _vcpu_clear_flag((v), __VA_ARGS__)
+#define vcpu_test_and_clear_flag(v, ...) \
+ _vcpu_test_and_clear_flag((v), __VA_ARGS__)
+
+/* KVM_ARM_VCPU_INIT completed */
+#define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(0))
+/* SVE config completed */
+#define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
+/* pKVM VCPU setup completed */
+#define VCPU_PKVM_FINALIZED __vcpu_single_flag(cflags, BIT(2))
+
/* Exception pending */
#define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0))
/*
@@ -76,6 +156,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
#define EXCEPT_AA64_EL2_FIQ __vcpu_except_flags(6)
#define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7)
+#define kvm_vcpu_initialized(v) vcpu_get_flag(v, VCPU_INITIALIZED)
+
static inline bool kvm_supports_32bit_el0(void)
{
return false;
diff --git a/include/kvm/arm64/reset.h b/include/kvm/arm64/reset.h
new file mode 100644
index 000000000000..a0bca4769b13
--- /dev/null
+++ b/include/kvm/arm64/reset.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __KVM_ARM64_RESET_H__
+#define __KVM_ARM64_RESET_H__
+
+void kvm_reset_vcpu_core_regs(struct kvm_vcpu *vcpu);
+
+#endif /* __KVM_ARM64_RESET_H__ */
diff --git a/virt/kvm/arm64/Makefile.kvm b/virt/kvm/arm64/Makefile.kvm
index ac969bf1c016..c5e1db570a09 100644
--- a/virt/kvm/arm64/Makefile.kvm
+++ b/virt/kvm/arm64/Makefile.kvm
@@ -9,4 +9,5 @@ shared-arm64-obj := \
$(KVM_ARM64)/guest.o \
$(KVM_ARM64)/handle_exit.o \
$(KVM_ARM64)/mmio.o \
+ $(KVM_ARM64)/reset.o \
diff --git a/virt/kvm/arm64/guest.c b/virt/kvm/arm64/guest.c
index e283a4456df8..35ba03033b4c 100644
--- a/virt/kvm/arm64/guest.c
+++ b/virt/kvm/arm64/guest.c
@@ -93,7 +93,7 @@ static void *core_reg_addr(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
return vcpu_cpsr(vcpu);
case KVM_REG_ARM_CORE_REG(sp_el1):
- return __ctxt_sys_reg(&vcpu->arch.ctxt, SP_EL1);
+ return kvm_vcpu_get_sp_el1(vcpu);
case KVM_REG_ARM_CORE_REG(elr_el1):
return __ctxt_sys_reg(&vcpu->arch.ctxt, ELR_EL1);
@@ -117,13 +117,13 @@ static void *core_reg_addr(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
off -= KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]);
off /= 4;
- return &vcpu->arch.ctxt.fp_regs.vregs[off];
+ return kvm_vcpu_get_vreg(vcpu, off);
case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
- return &vcpu->arch.ctxt.fp_regs.fpsr;
+ return kvm_vcpu_get_fpsr(vcpu);
case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
- return &vcpu->arch.ctxt.fp_regs.fpcr;
+ return kvm_vcpu_get_fpcr(vcpu);
default:
return NULL;
diff --git a/virt/kvm/arm64/reset.c b/virt/kvm/arm64/reset.c
new file mode 100644
index 000000000000..5a8be5233f76
--- /dev/null
+++ b/virt/kvm/arm64/reset.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kvm_host.h>
+#include <asm/pstate.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_nested.h>
+#include <kvm/arm64/reset.h>
+
+/*
+ * ARMv8 Reset Values
+ */
+#define VCPU_RESET_PSTATE_EL1 (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | \
+ PSR_F_BIT | PSR_D_BIT)
+
+#define VCPU_RESET_PSTATE_EL2 (PSR_MODE_EL2h | PSR_A_BIT | PSR_I_BIT | \
+ PSR_F_BIT | PSR_D_BIT)
+
+#define VCPU_RESET_PSTATE_SVC (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \
+ PSR_AA32_I_BIT | PSR_AA32_F_BIT)
+
+void kvm_reset_vcpu_core_regs(struct kvm_vcpu *vcpu)
+{
+ u64 pstate;
+
+ if (vcpu_el1_is_32bit(vcpu))
+ pstate = VCPU_RESET_PSTATE_SVC;
+ else if (vcpu_has_nv(vcpu))
+ pstate = VCPU_RESET_PSTATE_EL2;
+ else
+ pstate = VCPU_RESET_PSTATE_EL1;
+
+ /* Reset core registers */
+ memset(vcpu_gp_regs(vcpu), 0, sizeof(vcpu_gp_regs(vcpu)));
+ *vcpu_pc(vcpu) = 0;
+ memset(kvm_vcpu_get_vregs(vcpu), 0, sizeof(*kvm_vcpu_get_vregs(vcpu)));
+ memset(kvm_vcpu_get_fpsr(vcpu), 0, sizeof(*kvm_vcpu_get_fpsr(vcpu)));
+ memset(kvm_vcpu_get_fpcr(vcpu), 0, sizeof(*kvm_vcpu_get_fpcr(vcpu)));
+ vcpu->arch.ctxt.spsr_abt = 0;
+ vcpu->arch.ctxt.spsr_und = 0;
+ vcpu->arch.ctxt.spsr_irq = 0;
+ vcpu->arch.ctxt.spsr_fiq = 0;
+ *vcpu_cpsr(vcpu) = pstate;
+}
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v1 13/27] KVM: arm64: Extract & share ipa size shift calculation
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (11 preceding siblings ...)
2026-04-02 4:21 ` [PATCH v1 12/27] KVM: arm64: Share reset general register code Steffen Eiden
@ 2026-04-02 4:21 ` Steffen Eiden
2026-04-02 4:21 ` [PATCH v1 14/27] KVM: s390: Move s390 kvm code into a subdirectory Steffen Eiden
` (14 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:21 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
Extract the ipa shift calculation from kvm_init_ipa_range into its own
function kvm_vm_type_ipa_size_shift to be shared across architectures.
User space passes a type parameter to the VM creation ioctl, indicating
the physical size of the VM. Therefore extract the ipa shift calculation
from kvm_init_ipa_range into its own function kvm_vm_type_ipa_size_shift,
so all implementers of arm64 KVM can make use of it for VM creation.
Co-developed-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
arch/arm64/kvm/mmu.c | 18 ++++++------------
include/kvm/arm64/kvm_host.h | 1 +
virt/kvm/arm64/arm.c | 21 +++++++++++++++++++++
3 files changed, 28 insertions(+), 12 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index e19ff77b3cd5..9d71bb3627fc 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -874,27 +874,21 @@ static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
static int kvm_init_ipa_range(struct kvm_s2_mmu *mmu, unsigned long type)
{
- u32 kvm_ipa_limit = get_kvm_ipa_limit();
u64 mmfr0, mmfr1;
u32 phys_shift;
+ int r;
if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
return -EINVAL;
phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type);
if (is_protected_kvm_enabled()) {
- phys_shift = kvm_ipa_limit;
- } else if (phys_shift) {
- if (phys_shift > kvm_ipa_limit ||
- phys_shift < ARM64_MIN_PARANGE_BITS)
- return -EINVAL;
+ phys_shift = get_kvm_ipa_limit();
} else {
- phys_shift = KVM_PHYS_SHIFT;
- if (phys_shift > kvm_ipa_limit) {
- pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n",
- current->comm);
- return -EINVAL;
- }
+ r = kvm_vm_type_ipa_size_shift(type);
+ if (r < 0)
+ return r;
+ phys_shift = r;
}
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
diff --git a/include/kvm/arm64/kvm_host.h b/include/kvm/arm64/kvm_host.h
index 20b824ecf16e..8c39ec485730 100644
--- a/include/kvm/arm64/kvm_host.h
+++ b/include/kvm/arm64/kvm_host.h
@@ -33,6 +33,7 @@ int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_init *init);
bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_init *init);
+int kvm_vm_type_ipa_size_shift(unsigned long type);
/* MMIO helpers */
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
diff --git a/virt/kvm/arm64/arm.c b/virt/kvm/arm64/arm.c
index b47adef65e5f..0bbfbe63e558 100644
--- a/virt/kvm/arm64/arm.c
+++ b/virt/kvm/arm64/arm.c
@@ -52,3 +52,24 @@ bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu,
return !bitmap_equal(vcpu->kvm->arch.vcpu_features, &features,
KVM_VCPU_MAX_FEATURES);
}
+
+int kvm_vm_type_ipa_size_shift(unsigned long type)
+{
+ int phys_shift;
+
+ phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type);
+ if (phys_shift) {
+ if (phys_shift > get_kvm_ipa_limit() ||
+ phys_shift < ARM64_MIN_PARANGE_BITS)
+ return -EINVAL;
+ } else {
+ phys_shift = KVM_PHYS_SHIFT;
+ if (phys_shift > get_kvm_ipa_limit()) {
+ pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n",
+ current->comm);
+ return -EINVAL;
+ }
+ }
+
+ return phys_shift;
+}
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v1 14/27] KVM: s390: Move s390 kvm code into a subdirectory
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (12 preceding siblings ...)
2026-04-02 4:21 ` [PATCH v1 13/27] KVM: arm64: Extract & share ipa size shift calculation Steffen Eiden
@ 2026-04-02 4:21 ` Steffen Eiden
2026-04-02 4:21 ` [PATCH v1 15/27] KVM: S390: Refactor gmap Steffen Eiden
` (13 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:21 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
Move all the code required to run s390 KVM guests on s390 to a s390
subdirectory. Move gmap related code into a gmap directory to share gmap
code between KVM implementations. Additionally, prepare the build system
and s390-kvm headers for a second KVM implementation.
While at it, rename the main s390-kvm file from s390-kvm.{c,h} to
just s390.{c,h} to match the naming scheme of other architectures.
The module name stays kvm.
Co-developed-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
arch/s390/Kconfig | 2 +-
arch/s390/boot/ipl_parm.c | 2 +-
arch/s390/boot/uv.c | 2 +-
arch/s390/configs/defconfig | 2 +-
arch/s390/include/asm/kvm_host.h | 750 +-----------------
.../asm/{kvm_host.h => kvm_host_s390.h} | 8 +-
...kvm_host_types.h => kvm_host_s390_types.h} | 0
arch/s390/kernel/asm-offsets.c | 2 +-
arch/s390/kernel/early.c | 2 +-
arch/s390/kernel/entry.S | 10 +-
arch/s390/kernel/perf_event.c | 2 +-
arch/s390/kvm/Kconfig | 35 +-
arch/s390/kvm/Makefile | 11 +-
arch/s390/kvm/gmap/Makefile | 5 +
arch/s390/kvm/{ => gmap}/dat.c | 0
arch/s390/kvm/{ => gmap}/dat.h | 6 +-
arch/s390/kvm/{ => gmap}/faultin.c | 0
arch/s390/kvm/{ => gmap}/faultin.h | 6 +-
arch/s390/kvm/{ => gmap}/gmap.c | 2 +-
arch/s390/kvm/{ => gmap}/gmap.h | 6 +-
arch/s390/kvm/{ => s390}/Kconfig | 25 +-
arch/s390/kvm/{ => s390}/Makefile | 10 +-
arch/s390/kvm/{ => s390}/diag.c | 2 +-
arch/s390/kvm/{ => s390}/gaccess.c | 2 +-
arch/s390/kvm/{ => s390}/gaccess.h | 2 +-
arch/s390/kvm/{ => s390}/guestdbg.c | 2 +-
arch/s390/kvm/{ => s390}/intercept.c | 2 +-
arch/s390/kvm/{ => s390}/interrupt.c | 2 +-
arch/s390/kvm/{ => s390}/pci.c | 2 +-
arch/s390/kvm/{ => s390}/pci.h | 0
arch/s390/kvm/{ => s390}/priv.c | 2 +-
arch/s390/kvm/{ => s390}/pv.c | 2 +-
arch/s390/kvm/{kvm-s390.c => s390/s390.c} | 2 +-
arch/s390/kvm/{kvm-s390.h => s390/s390.h} | 2 +-
arch/s390/kvm/{ => s390}/sigp.c | 2 +-
arch/s390/kvm/{ => s390}/trace-s390.h | 0
arch/s390/kvm/{ => s390}/trace.h | 0
arch/s390/kvm/{ => s390}/vsie.c | 2 +-
include/linux/kvm_host.h | 2 +-
39 files changed, 61 insertions(+), 855 deletions(-)
copy arch/s390/include/asm/{kvm_host.h => kvm_host_s390.h} (99%)
rename arch/s390/include/asm/{kvm_host_types.h => kvm_host_s390_types.h} (100%)
create mode 100644 arch/s390/kvm/gmap/Makefile
rename arch/s390/kvm/{ => gmap}/dat.c (100%)
rename arch/s390/kvm/{ => gmap}/dat.h (99%)
rename arch/s390/kvm/{ => gmap}/faultin.c (100%)
rename arch/s390/kvm/{ => gmap}/faultin.h (96%)
rename arch/s390/kvm/{ => gmap}/gmap.c (99%)
rename arch/s390/kvm/{ => gmap}/gmap.h (98%)
copy arch/s390/kvm/{ => s390}/Kconfig (62%)
copy arch/s390/kvm/{ => s390}/Makefile (53%)
rename arch/s390/kvm/{ => s390}/diag.c (99%)
rename arch/s390/kvm/{ => s390}/gaccess.c (99%)
rename arch/s390/kvm/{ => s390}/gaccess.h (99%)
rename arch/s390/kvm/{ => s390}/guestdbg.c (99%)
rename arch/s390/kvm/{ => s390}/intercept.c (99%)
rename arch/s390/kvm/{ => s390}/interrupt.c (99%)
rename arch/s390/kvm/{ => s390}/pci.c (99%)
rename arch/s390/kvm/{ => s390}/pci.h (100%)
rename arch/s390/kvm/{ => s390}/priv.c (99%)
rename arch/s390/kvm/{ => s390}/pv.c (99%)
rename arch/s390/kvm/{kvm-s390.c => s390/s390.c} (99%)
rename arch/s390/kvm/{kvm-s390.h => s390/s390.h} (99%)
rename arch/s390/kvm/{ => s390}/sigp.c (99%)
rename arch/s390/kvm/{ => s390}/trace-s390.h (100%)
rename arch/s390/kvm/{ => s390}/trace.h (100%)
rename arch/s390/kvm/{ => s390}/vsie.c (99%)
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index edc927d9e85a..2248bf3da5df 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -837,7 +837,7 @@ config VFIO_CCW
config VFIO_AP
def_tristate n
prompt "VFIO support for AP devices"
- depends on KVM
+ depends on KVM_S390
depends on VFIO
depends on AP
select VFIO_MDEV
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
index 6bc950b92be7..b8906ba7e5e8 100644
--- a/arch/s390/boot/ipl_parm.c
+++ b/arch/s390/boot/ipl_parm.c
@@ -300,7 +300,7 @@ void parse_boot_command_line(void)
stack_protector_debug = 1;
#endif
-#if IS_ENABLED(CONFIG_KVM)
+#if IS_ENABLED(CONFIG_KVM_S390)
if (!strcmp(param, "prot_virt")) {
rc = kstrtobool(val, &enabled);
if (!rc && enabled)
diff --git a/arch/s390/boot/uv.c b/arch/s390/boot/uv.c
index 4568e8f81dac..f7b68bdcedc1 100644
--- a/arch/s390/boot/uv.c
+++ b/arch/s390/boot/uv.c
@@ -26,7 +26,7 @@ void uv_query_info(void)
if (uv_call(0, (uint64_t)&uvcb) && uvcb.header.rc != UVC_RC_MORE_DATA)
return;
- if (IS_ENABLED(CONFIG_KVM)) {
+ if (IS_ENABLED(CONFIG_KVM_S390)) {
memcpy(uv_info.inst_calls_list, uvcb.inst_calls_list, sizeof(uv_info.inst_calls_list));
uv_info.uv_base_stor_len = uvcb.uv_base_stor_len;
uv_info.guest_base_stor_len = uvcb.conf_base_phys_stor_len;
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 0f4cedcab3ce..bbbb4d0df9dd 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -57,7 +57,7 @@ CONFIG_VFIO_AP=m
CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_S390_HYPFS_FS=y
-CONFIG_KVM=m
+CONFIG_KVM_S390=m
CONFIG_S390_UNWIND_SELFTEST=m
CONFIG_S390_KPROBES_SANITY_TEST=m
CONFIG_S390_MODULES_SANITY_TEST=m
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 3039c88daa63..6ff643ac0d15 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -1,756 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * definition for kernel virtual machines on s390
- *
- * Copyright IBM Corp. 2008, 2018
- *
- * Author(s): Carsten Otte <cotte@de.ibm.com>
- */
-
#ifndef ASM_KVM_HOST_H
#define ASM_KVM_HOST_H
-#include <linux/types.h>
-#include <linux/hrtimer.h>
-#include <linux/interrupt.h>
-#include <linux/kvm_types.h>
-#include <linux/kvm.h>
-#include <linux/seqlock.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/mmu_notifier.h>
-#include <asm/kvm_host_types.h>
-#include <asm/debug.h>
-#include <asm/cpu.h>
-#include <asm/fpu.h>
-#include <asm/isc.h>
-#include <asm/guarded_storage.h>
-
-#define KVM_HAVE_MMU_RWLOCK
-#define KVM_MAX_VCPUS 255
-
-#define KVM_INTERNAL_MEM_SLOTS 1
-
-/*
- * These seem to be used for allocating ->chip in the routing table, which we
- * don't use. 1 is as small as we can get to reduce the needed memory. If we
- * need to look at ->chip later on, we'll need to revisit this.
- */
-#define KVM_NR_IRQCHIPS 1
-#define KVM_IRQCHIP_NUM_PINS 1
-#define KVM_HALT_POLL_NS_DEFAULT 50000
-
-/* s390-specific vcpu->requests bit members */
-#define KVM_REQ_ENABLE_IBS KVM_ARCH_REQ(0)
-#define KVM_REQ_DISABLE_IBS KVM_ARCH_REQ(1)
-#define KVM_REQ_ICPT_OPEREXC KVM_ARCH_REQ(2)
-#define KVM_REQ_START_MIGRATION KVM_ARCH_REQ(3)
-#define KVM_REQ_STOP_MIGRATION KVM_ARCH_REQ(4)
-#define KVM_REQ_VSIE_RESTART KVM_ARCH_REQ(5)
-#define KVM_REQ_REFRESH_GUEST_PREFIX \
- KVM_ARCH_REQ_FLAGS(6, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-
-struct kvm_vcpu_stat {
- struct kvm_vcpu_stat_generic generic;
- u64 exit_userspace;
- u64 exit_null;
- u64 exit_external_request;
- u64 exit_io_request;
- u64 exit_external_interrupt;
- u64 exit_stop_request;
- u64 exit_validity;
- u64 exit_instruction;
- u64 exit_pei;
- u64 halt_no_poll_steal;
- u64 instruction_lctl;
- u64 instruction_lctlg;
- u64 instruction_stctl;
- u64 instruction_stctg;
- u64 exit_program_interruption;
- u64 exit_instr_and_program;
- u64 exit_operation_exception;
- u64 deliver_ckc;
- u64 deliver_cputm;
- u64 deliver_external_call;
- u64 deliver_emergency_signal;
- u64 deliver_service_signal;
- u64 deliver_virtio;
- u64 deliver_stop_signal;
- u64 deliver_prefix_signal;
- u64 deliver_restart_signal;
- u64 deliver_program;
- u64 deliver_io;
- u64 deliver_machine_check;
- u64 exit_wait_state;
- u64 inject_ckc;
- u64 inject_cputm;
- u64 inject_external_call;
- u64 inject_emergency_signal;
- u64 inject_mchk;
- u64 inject_pfault_init;
- u64 inject_program;
- u64 inject_restart;
- u64 inject_set_prefix;
- u64 inject_stop_signal;
- u64 instruction_epsw;
- u64 instruction_gs;
- u64 instruction_io_other;
- u64 instruction_lpsw;
- u64 instruction_lpswe;
- u64 instruction_lpswey;
- u64 instruction_pfmf;
- u64 instruction_ptff;
- u64 instruction_sck;
- u64 instruction_sckpf;
- u64 instruction_stidp;
- u64 instruction_spx;
- u64 instruction_stpx;
- u64 instruction_stap;
- u64 instruction_iske;
- u64 instruction_ri;
- u64 instruction_rrbe;
- u64 instruction_sske;
- u64 instruction_ipte_interlock;
- u64 instruction_stsi;
- u64 instruction_stfl;
- u64 instruction_tb;
- u64 instruction_tpi;
- u64 instruction_tprot;
- u64 instruction_tsch;
- u64 instruction_sie;
- u64 instruction_essa;
- u64 instruction_sthyi;
- u64 instruction_sigp_sense;
- u64 instruction_sigp_sense_running;
- u64 instruction_sigp_external_call;
- u64 instruction_sigp_emergency;
- u64 instruction_sigp_cond_emergency;
- u64 instruction_sigp_start;
- u64 instruction_sigp_stop;
- u64 instruction_sigp_stop_store_status;
- u64 instruction_sigp_store_status;
- u64 instruction_sigp_store_adtl_status;
- u64 instruction_sigp_arch;
- u64 instruction_sigp_prefix;
- u64 instruction_sigp_restart;
- u64 instruction_sigp_init_cpu_reset;
- u64 instruction_sigp_cpu_reset;
- u64 instruction_sigp_unknown;
- u64 instruction_diagnose_10;
- u64 instruction_diagnose_44;
- u64 instruction_diagnose_9c;
- u64 diag_9c_ignored;
- u64 diag_9c_forward;
- u64 instruction_diagnose_258;
- u64 instruction_diagnose_308;
- u64 instruction_diagnose_500;
- u64 instruction_diagnose_other;
- u64 pfault_sync;
- u64 signal_exits;
-};
-
-#define PGM_OPERATION 0x01
-#define PGM_PRIVILEGED_OP 0x02
-#define PGM_EXECUTE 0x03
-#define PGM_PROTECTION 0x04
-#define PGM_ADDRESSING 0x05
-#define PGM_SPECIFICATION 0x06
-#define PGM_DATA 0x07
-#define PGM_FIXED_POINT_OVERFLOW 0x08
-#define PGM_FIXED_POINT_DIVIDE 0x09
-#define PGM_DECIMAL_OVERFLOW 0x0a
-#define PGM_DECIMAL_DIVIDE 0x0b
-#define PGM_HFP_EXPONENT_OVERFLOW 0x0c
-#define PGM_HFP_EXPONENT_UNDERFLOW 0x0d
-#define PGM_HFP_SIGNIFICANCE 0x0e
-#define PGM_HFP_DIVIDE 0x0f
-#define PGM_SEGMENT_TRANSLATION 0x10
-#define PGM_PAGE_TRANSLATION 0x11
-#define PGM_TRANSLATION_SPEC 0x12
-#define PGM_SPECIAL_OPERATION 0x13
-#define PGM_OPERAND 0x15
-#define PGM_TRACE_TABEL 0x16
-#define PGM_VECTOR_PROCESSING 0x1b
-#define PGM_SPACE_SWITCH 0x1c
-#define PGM_HFP_SQUARE_ROOT 0x1d
-#define PGM_PC_TRANSLATION_SPEC 0x1f
-#define PGM_AFX_TRANSLATION 0x20
-#define PGM_ASX_TRANSLATION 0x21
-#define PGM_LX_TRANSLATION 0x22
-#define PGM_EX_TRANSLATION 0x23
-#define PGM_PRIMARY_AUTHORITY 0x24
-#define PGM_SECONDARY_AUTHORITY 0x25
-#define PGM_LFX_TRANSLATION 0x26
-#define PGM_LSX_TRANSLATION 0x27
-#define PGM_ALET_SPECIFICATION 0x28
-#define PGM_ALEN_TRANSLATION 0x29
-#define PGM_ALE_SEQUENCE 0x2a
-#define PGM_ASTE_VALIDITY 0x2b
-#define PGM_ASTE_SEQUENCE 0x2c
-#define PGM_EXTENDED_AUTHORITY 0x2d
-#define PGM_LSTE_SEQUENCE 0x2e
-#define PGM_ASTE_INSTANCE 0x2f
-#define PGM_STACK_FULL 0x30
-#define PGM_STACK_EMPTY 0x31
-#define PGM_STACK_SPECIFICATION 0x32
-#define PGM_STACK_TYPE 0x33
-#define PGM_STACK_OPERATION 0x34
-#define PGM_ASCE_TYPE 0x38
-#define PGM_REGION_FIRST_TRANS 0x39
-#define PGM_REGION_SECOND_TRANS 0x3a
-#define PGM_REGION_THIRD_TRANS 0x3b
-#define PGM_SECURE_STORAGE_ACCESS 0x3d
-#define PGM_NON_SECURE_STORAGE_ACCESS 0x3e
-#define PGM_SECURE_STORAGE_VIOLATION 0x3f
-#define PGM_MONITOR 0x40
-#define PGM_PER 0x80
-#define PGM_CRYPTO_OPERATION 0x119
-
-/* irq types in ascend order of priorities */
-enum irq_types {
- IRQ_PEND_SET_PREFIX = 0,
- IRQ_PEND_RESTART,
- IRQ_PEND_SIGP_STOP,
- IRQ_PEND_IO_ISC_7,
- IRQ_PEND_IO_ISC_6,
- IRQ_PEND_IO_ISC_5,
- IRQ_PEND_IO_ISC_4,
- IRQ_PEND_IO_ISC_3,
- IRQ_PEND_IO_ISC_2,
- IRQ_PEND_IO_ISC_1,
- IRQ_PEND_IO_ISC_0,
- IRQ_PEND_VIRTIO,
- IRQ_PEND_PFAULT_DONE,
- IRQ_PEND_PFAULT_INIT,
- IRQ_PEND_EXT_HOST,
- IRQ_PEND_EXT_SERVICE,
- IRQ_PEND_EXT_SERVICE_EV,
- IRQ_PEND_EXT_TIMING,
- IRQ_PEND_EXT_CPU_TIMER,
- IRQ_PEND_EXT_CLOCK_COMP,
- IRQ_PEND_EXT_EXTERNAL,
- IRQ_PEND_EXT_EMERGENCY,
- IRQ_PEND_EXT_MALFUNC,
- IRQ_PEND_EXT_IRQ_KEY,
- IRQ_PEND_MCHK_REP,
- IRQ_PEND_PROG,
- IRQ_PEND_SVC,
- IRQ_PEND_MCHK_EX,
- IRQ_PEND_COUNT
-};
-
-/* We have 2M for virtio device descriptor pages. Smallest amount of
- * memory per page is 24 bytes (1 queue), so (2048*1024) / 24 = 87381
- */
-#define KVM_S390_MAX_VIRTIO_IRQS 87381
-
-/*
- * Repressible (non-floating) machine check interrupts
- * subclass bits in MCIC
- */
-#define MCHK_EXTD_BIT 58
-#define MCHK_DEGR_BIT 56
-#define MCHK_WARN_BIT 55
-#define MCHK_REP_MASK ((1UL << MCHK_DEGR_BIT) | \
- (1UL << MCHK_EXTD_BIT) | \
- (1UL << MCHK_WARN_BIT))
-
-/* Exigent machine check interrupts subclass bits in MCIC */
-#define MCHK_SD_BIT 63
-#define MCHK_PD_BIT 62
-#define MCHK_EX_MASK ((1UL << MCHK_SD_BIT) | (1UL << MCHK_PD_BIT))
-
-#define IRQ_PEND_EXT_MASK ((1UL << IRQ_PEND_EXT_IRQ_KEY) | \
- (1UL << IRQ_PEND_EXT_CLOCK_COMP) | \
- (1UL << IRQ_PEND_EXT_CPU_TIMER) | \
- (1UL << IRQ_PEND_EXT_MALFUNC) | \
- (1UL << IRQ_PEND_EXT_EMERGENCY) | \
- (1UL << IRQ_PEND_EXT_EXTERNAL) | \
- (1UL << IRQ_PEND_EXT_TIMING) | \
- (1UL << IRQ_PEND_EXT_HOST) | \
- (1UL << IRQ_PEND_EXT_SERVICE) | \
- (1UL << IRQ_PEND_EXT_SERVICE_EV) | \
- (1UL << IRQ_PEND_VIRTIO) | \
- (1UL << IRQ_PEND_PFAULT_INIT) | \
- (1UL << IRQ_PEND_PFAULT_DONE))
-
-#define IRQ_PEND_IO_MASK ((1UL << IRQ_PEND_IO_ISC_0) | \
- (1UL << IRQ_PEND_IO_ISC_1) | \
- (1UL << IRQ_PEND_IO_ISC_2) | \
- (1UL << IRQ_PEND_IO_ISC_3) | \
- (1UL << IRQ_PEND_IO_ISC_4) | \
- (1UL << IRQ_PEND_IO_ISC_5) | \
- (1UL << IRQ_PEND_IO_ISC_6) | \
- (1UL << IRQ_PEND_IO_ISC_7))
-
-#define IRQ_PEND_MCHK_MASK ((1UL << IRQ_PEND_MCHK_REP) | \
- (1UL << IRQ_PEND_MCHK_EX))
-
-#define IRQ_PEND_EXT_II_MASK ((1UL << IRQ_PEND_EXT_CPU_TIMER) | \
- (1UL << IRQ_PEND_EXT_CLOCK_COMP) | \
- (1UL << IRQ_PEND_EXT_EMERGENCY) | \
- (1UL << IRQ_PEND_EXT_EXTERNAL) | \
- (1UL << IRQ_PEND_EXT_SERVICE) | \
- (1UL << IRQ_PEND_EXT_SERVICE_EV))
-
-struct kvm_s390_interrupt_info {
- struct list_head list;
- u64 type;
- union {
- struct kvm_s390_io_info io;
- struct kvm_s390_ext_info ext;
- struct kvm_s390_pgm_info pgm;
- struct kvm_s390_emerg_info emerg;
- struct kvm_s390_extcall_info extcall;
- struct kvm_s390_prefix_info prefix;
- struct kvm_s390_stop_info stop;
- struct kvm_s390_mchk_info mchk;
- };
-};
-
-struct kvm_s390_irq_payload {
- struct kvm_s390_io_info io;
- struct kvm_s390_ext_info ext;
- struct kvm_s390_pgm_info pgm;
- struct kvm_s390_emerg_info emerg;
- struct kvm_s390_extcall_info extcall;
- struct kvm_s390_prefix_info prefix;
- struct kvm_s390_stop_info stop;
- struct kvm_s390_mchk_info mchk;
-};
-
-struct kvm_s390_local_interrupt {
- spinlock_t lock;
- DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
- struct kvm_s390_irq_payload irq;
- unsigned long pending_irqs;
-};
-
-#define FIRQ_LIST_IO_ISC_0 0
-#define FIRQ_LIST_IO_ISC_1 1
-#define FIRQ_LIST_IO_ISC_2 2
-#define FIRQ_LIST_IO_ISC_3 3
-#define FIRQ_LIST_IO_ISC_4 4
-#define FIRQ_LIST_IO_ISC_5 5
-#define FIRQ_LIST_IO_ISC_6 6
-#define FIRQ_LIST_IO_ISC_7 7
-#define FIRQ_LIST_PFAULT 8
-#define FIRQ_LIST_VIRTIO 9
-#define FIRQ_LIST_COUNT 10
-#define FIRQ_CNTR_IO 0
-#define FIRQ_CNTR_SERVICE 1
-#define FIRQ_CNTR_VIRTIO 2
-#define FIRQ_CNTR_PFAULT 3
-#define FIRQ_MAX_COUNT 4
-
-/* mask the AIS mode for a given ISC */
-#define AIS_MODE_MASK(isc) (0x80 >> isc)
-
-#define KVM_S390_AIS_MODE_ALL 0
-#define KVM_S390_AIS_MODE_SINGLE 1
-
-struct kvm_s390_float_interrupt {
- unsigned long pending_irqs;
- unsigned long masked_irqs;
- spinlock_t lock;
- struct list_head lists[FIRQ_LIST_COUNT];
- int counters[FIRQ_MAX_COUNT];
- struct kvm_s390_mchk_info mchk;
- struct kvm_s390_ext_info srv_signal;
- int last_sleep_cpu;
- struct mutex ais_lock;
- u8 simm;
- u8 nimm;
-};
-
-struct kvm_hw_wp_info_arch {
- unsigned long addr;
- unsigned long phys_addr;
- int len;
- char *old_data;
-};
-
-struct kvm_hw_bp_info_arch {
- unsigned long addr;
- int len;
-};
-
-/*
- * Only the upper 16 bits of kvm_guest_debug->control are arch specific.
- * Further KVM_GUESTDBG flags which an be used from userspace can be found in
- * arch/s390/include/uapi/asm/kvm.h
- */
-#define KVM_GUESTDBG_EXIT_PENDING 0x10000000
-
-#define guestdbg_enabled(vcpu) \
- (vcpu->guest_debug & KVM_GUESTDBG_ENABLE)
-#define guestdbg_sstep_enabled(vcpu) \
- (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
-#define guestdbg_hw_bp_enabled(vcpu) \
- (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
-#define guestdbg_exit_pending(vcpu) (guestdbg_enabled(vcpu) && \
- (vcpu->guest_debug & KVM_GUESTDBG_EXIT_PENDING))
-
-#define KVM_GUESTDBG_VALID_MASK \
- (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP |\
- KVM_GUESTDBG_USE_HW_BP | KVM_GUESTDBG_EXIT_PENDING)
-
-struct kvm_guestdbg_info_arch {
- unsigned long cr0;
- unsigned long cr9;
- unsigned long cr10;
- unsigned long cr11;
- struct kvm_hw_bp_info_arch *hw_bp_info;
- struct kvm_hw_wp_info_arch *hw_wp_info;
- int nr_hw_bp;
- int nr_hw_wp;
- unsigned long last_bp;
-};
-
-struct kvm_s390_pv_vcpu {
- u64 handle;
- unsigned long stor_base;
-};
-
-struct kvm_vcpu_arch {
- struct kvm_s390_sie_block *sie_block;
- /* if vsie is active, currently executed shadow sie control block */
- struct kvm_s390_sie_block *vsie_block;
- unsigned int host_acrs[NUM_ACRS];
- struct gs_cb *host_gscb;
- struct kvm_s390_local_interrupt local_int;
- struct hrtimer ckc_timer;
- struct kvm_s390_pgm_info pgm;
- struct gmap *gmap;
- struct kvm_guestdbg_info_arch guestdbg;
- unsigned long pfault_token;
- unsigned long pfault_select;
- unsigned long pfault_compare;
- bool cputm_enabled;
- /*
- * The seqcount protects updates to cputm_start and sie_block.cputm,
- * this way we can have non-blocking reads with consistent values.
- * Only the owning VCPU thread (vcpu->cpu) is allowed to change these
- * values and to start/stop/enable/disable cpu timer accounting.
- */
- seqcount_t cputm_seqcount;
- __u64 cputm_start;
- bool gs_enabled;
- bool skey_enabled;
- /* Indicator if the access registers have been loaded from guest */
- bool acrs_loaded;
- struct kvm_s390_pv_vcpu pv;
- union diag318_info diag318_info;
- struct kvm_s390_mmu_cache *mc;
-};
-
-struct kvm_vm_stat {
- struct kvm_vm_stat_generic generic;
- u64 inject_io;
- u64 inject_float_mchk;
- u64 inject_pfault_done;
- u64 inject_service_signal;
- u64 inject_virtio;
- u64 aen_forward;
- u64 gmap_shadow_create;
- u64 gmap_shadow_reuse;
- u64 gmap_shadow_r1_entry;
- u64 gmap_shadow_r2_entry;
- u64 gmap_shadow_r3_entry;
- u64 gmap_shadow_sg_entry;
- u64 gmap_shadow_pg_entry;
-};
-
-struct kvm_arch_memory_slot {
-};
-
-struct s390_map_info {
- struct list_head list;
- __u64 guest_addr;
- __u64 addr;
- struct page *page;
-};
-
-struct s390_io_adapter {
- unsigned int id;
- int isc;
- bool maskable;
- bool masked;
- bool swap;
- bool suppressible;
-};
-
-#define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
-#define MAX_S390_ADAPTER_MAPS 256
-
-/* maximum size of facilities and facility mask is 2k bytes */
-#define S390_ARCH_FAC_LIST_SIZE_BYTE (1<<11)
-#define S390_ARCH_FAC_LIST_SIZE_U64 \
- (S390_ARCH_FAC_LIST_SIZE_BYTE / sizeof(u64))
-#define S390_ARCH_FAC_MASK_SIZE_BYTE S390_ARCH_FAC_LIST_SIZE_BYTE
-#define S390_ARCH_FAC_MASK_SIZE_U64 \
- (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64))
-
-struct kvm_s390_cpu_model {
- /* facility mask supported by kvm & hosting machine */
- __u64 fac_mask[S390_ARCH_FAC_MASK_SIZE_U64];
- struct kvm_s390_vm_cpu_subfunc subfuncs;
- /* facility list requested by guest (in dma page) */
- __u64 *fac_list;
- u64 cpuid;
- unsigned short ibc;
- /* subset of available UV-features for pv-guests enabled by user space */
- struct kvm_s390_vm_cpu_uv_feat uv_feat_guest;
-};
-
-typedef int (*crypto_hook)(struct kvm_vcpu *vcpu);
-
-struct kvm_s390_crypto {
- struct kvm_s390_crypto_cb *crycb;
- struct rw_semaphore pqap_hook_rwsem;
- crypto_hook *pqap_hook;
- __u32 crycbd;
- __u8 aes_kw;
- __u8 dea_kw;
- __u8 apie;
-};
-
-#define APCB0_MASK_SIZE 1
-struct kvm_s390_apcb0 {
- __u64 apm[APCB0_MASK_SIZE]; /* 0x0000 */
- __u64 aqm[APCB0_MASK_SIZE]; /* 0x0008 */
- __u64 adm[APCB0_MASK_SIZE]; /* 0x0010 */
- __u64 reserved18; /* 0x0018 */
-};
-
-#define APCB1_MASK_SIZE 4
-struct kvm_s390_apcb1 {
- __u64 apm[APCB1_MASK_SIZE]; /* 0x0000 */
- __u64 aqm[APCB1_MASK_SIZE]; /* 0x0020 */
- __u64 adm[APCB1_MASK_SIZE]; /* 0x0040 */
- __u64 reserved60[4]; /* 0x0060 */
-};
-
-struct kvm_s390_crypto_cb {
- struct kvm_s390_apcb0 apcb0; /* 0x0000 */
- __u8 reserved20[0x0048 - 0x0020]; /* 0x0020 */
- __u8 dea_wrapping_key_mask[24]; /* 0x0048 */
- __u8 aes_wrapping_key_mask[32]; /* 0x0060 */
- struct kvm_s390_apcb1 apcb1; /* 0x0080 */
-};
-
-struct kvm_s390_gisa {
- union {
- struct { /* common to all formats */
- u32 next_alert;
- u8 ipm;
- u8 reserved01[2];
- u8 iam;
- };
- struct { /* format 0 */
- u32 next_alert;
- u8 ipm;
- u8 reserved01;
- u8 : 6;
- u8 g : 1;
- u8 c : 1;
- u8 iam;
- u8 reserved02[4];
- u32 airq_count;
- } g0;
- struct { /* format 1 */
- u32 next_alert;
- u8 ipm;
- u8 simm;
- u8 nimm;
- u8 iam;
- u8 aism[8];
- u8 : 6;
- u8 g : 1;
- u8 c : 1;
- u8 reserved03[11];
- u32 airq_count;
- } g1;
- struct {
- u64 word[4];
- } u64;
- };
-};
-
-struct kvm_s390_gib {
- u32 alert_list_origin;
- u32 reserved01;
- u8:5;
- u8 nisc:3;
- u8 reserved03[3];
- u32 reserved04[5];
-};
-
-/*
- * sie_page2 has to be allocated as DMA because fac_list, crycb and
- * gisa need 31bit addresses in the sie control block.
- */
-struct sie_page2 {
- __u64 fac_list[S390_ARCH_FAC_LIST_SIZE_U64]; /* 0x0000 */
- struct kvm_s390_crypto_cb crycb; /* 0x0800 */
- struct kvm_s390_gisa gisa; /* 0x0900 */
- struct kvm *kvm; /* 0x0920 */
- u8 reserved928[0x1000 - 0x928]; /* 0x0928 */
-};
-
-struct vsie_page;
-
-struct kvm_s390_vsie {
- struct mutex mutex;
- struct radix_tree_root addr_to_page;
- int page_count;
- int next;
- struct vsie_page *pages[KVM_MAX_VCPUS];
-};
-
-struct kvm_s390_gisa_iam {
- u8 mask;
- spinlock_t ref_lock;
- u32 ref_count[MAX_ISC + 1];
-};
-
-struct kvm_s390_gisa_interrupt {
- struct kvm_s390_gisa *origin;
- struct kvm_s390_gisa_iam alert;
- struct hrtimer timer;
- u64 expires;
- DECLARE_BITMAP(kicked_mask, KVM_MAX_VCPUS);
-};
-
-struct kvm_s390_pv {
- u64 handle;
- u64 guest_len;
- unsigned long stor_base;
- void *stor_var;
- bool dumping;
- void *set_aside;
- struct list_head need_cleanup;
- struct mmu_notifier mmu_notifier;
- /* Protects against concurrent import-like operations */
- struct mutex import_lock;
-};
-
-struct kvm_s390_mmu_cache;
-
-struct kvm_arch {
- struct esca_block *sca;
- debug_info_t *dbf;
- struct kvm_s390_float_interrupt float_int;
- struct kvm_device *flic;
- struct gmap *gmap;
- unsigned long mem_limit;
- int css_support;
- int use_irqchip;
- int use_cmma;
- int use_pfmfi;
- int use_skf;
- int use_zpci_interp;
- int user_cpu_state_ctrl;
- int user_sigp;
- int user_stsi;
- int user_instr0;
- int user_operexec;
- struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
- wait_queue_head_t ipte_wq;
- int ipte_lock_count;
- struct mutex ipte_mutex;
- spinlock_t start_stop_lock;
- struct sie_page2 *sie_page2;
- struct kvm_s390_cpu_model model;
- struct kvm_s390_crypto crypto;
- struct kvm_s390_vsie vsie;
- u8 epdx;
- u64 epoch;
- int migration_mode;
- atomic64_t cmma_dirty_pages;
- /* subset of available cpu features enabled by user space */
- DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
- /* indexed by vcpu_idx */
- DECLARE_BITMAP(idle_mask, KVM_MAX_VCPUS);
- struct kvm_s390_gisa_interrupt gisa_int;
- struct kvm_s390_pv pv;
- struct list_head kzdev_list;
- spinlock_t kzdev_list_lock;
- struct kvm_s390_mmu_cache *mc;
-};
-
-#define KVM_HVA_ERR_BAD (-1UL)
-#define KVM_HVA_ERR_RO_BAD (-2UL)
-
-static inline bool kvm_is_error_hva(unsigned long addr)
-{
- return IS_ERR_VALUE(addr);
-}
-
-#define ASYNC_PF_PER_VCPU 64
-struct kvm_arch_async_pf {
- unsigned long pfault_token;
-};
-
-bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu);
-
-void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
- struct kvm_async_pf *work);
-
-bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
- struct kvm_async_pf *work);
-
-void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
- struct kvm_async_pf *work);
-
-static inline void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu) {}
-
-void kvm_arch_crypto_clear_masks(struct kvm *kvm);
-void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
- unsigned long *aqm, unsigned long *adm);
-
-#define SIE64_RETURN_NORMAL 0
-#define SIE64_RETURN_MCCK 1
-
-int __sie64a(phys_addr_t sie_block_phys, struct kvm_s390_sie_block *sie_block, u64 *rsa,
- unsigned long gasce);
-
-static inline int sie64a(struct kvm_s390_sie_block *sie_block, u64 *rsa, unsigned long gasce)
-{
- return __sie64a(virt_to_phys(sie_block), sie_block, rsa, gasce);
-}
-
-extern char sie_exit;
-
-bool kvm_s390_pv_is_protected(struct kvm *kvm);
-bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu);
-
-extern int kvm_s390_enter_exit_sie(struct kvm_s390_sie_block *scb,
- u64 *gprs, unsigned long gasce);
-
-extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc);
-extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc);
-
-bool kvm_s390_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa);
-
-static inline void kvm_arch_free_memslot(struct kvm *kvm,
- struct kvm_memory_slot *slot) {}
-static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
-static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
-static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
- struct kvm_memory_slot *slot) {}
-static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
-
-#define __KVM_HAVE_ARCH_VM_FREE
-void kvm_arch_free_vm(struct kvm *kvm);
-
-struct zpci_kvm_hook {
- int (*kvm_register)(void *opaque, struct kvm *kvm);
- void (*kvm_unregister)(void *opaque);
-};
-
-extern struct zpci_kvm_hook zpci_kvm_hook;
+#include <asm/kvm_host_s390.h>
#endif
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host_s390.h
similarity index 99%
copy from arch/s390/include/asm/kvm_host.h
copy to arch/s390/include/asm/kvm_host_s390.h
index 3039c88daa63..2d62a8ff8008 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host_s390.h
@@ -8,8 +8,8 @@
*/
-#ifndef ASM_KVM_HOST_H
-#define ASM_KVM_HOST_H
+#ifndef ASM_KVM_HOST_S390_H
+#define ASM_KVM_HOST_S390_H
#include <linux/types.h>
#include <linux/hrtimer.h>
@@ -20,7 +20,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/mmu_notifier.h>
-#include <asm/kvm_host_types.h>
+#include <asm/kvm_host_s390_types.h>
#include <asm/debug.h>
#include <asm/cpu.h>
#include <asm/fpu.h>
@@ -753,4 +753,4 @@ struct zpci_kvm_hook {
extern struct zpci_kvm_hook zpci_kvm_hook;
-#endif
+#endif /* ASM_KVM_HOST_S390_H */
diff --git a/arch/s390/include/asm/kvm_host_types.h b/arch/s390/include/asm/kvm_host_s390_types.h
similarity index 100%
rename from arch/s390/include/asm/kvm_host_types.h
rename to arch/s390/include/asm/kvm_host_s390_types.h
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index fbd26f3e9f96..8619adf91cdb 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -11,7 +11,7 @@
#include <linux/purgatory.h>
#include <linux/pgtable.h>
#include <linux/ftrace_regs.h>
-#include <asm/kvm_host_types.h>
+#include <asm/kvm_host_s390_types.h>
#include <asm/stacktrace.h>
#include <asm/ptrace.h>
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index b27239c03d79..464f8918d447 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -57,7 +57,7 @@ decompressor_handled_param(cmma);
decompressor_handled_param(relocate_lowcore);
decompressor_handled_param(bootdebug);
__decompressor_handled_param(debug_alternative, debug-alternative);
-#if IS_ENABLED(CONFIG_KVM)
+#if IS_ENABLED(CONFIG_KVM_S390)
decompressor_handled_param(prot_virt);
#endif
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 5817cb47b2d0..ac8d75a209fa 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -113,7 +113,7 @@ _LPP_OFFSET = __LC_LPP
"jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82)
.endm
-#if IS_ENABLED(CONFIG_KVM)
+#if IS_ENABLED(CONFIG_KVM_S390)
.macro SIEEXIT sie_control,lowcore
lg %r9,\sie_control # get control block pointer
ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
@@ -184,7 +184,7 @@ EXPORT_SYMBOL(__WARN_trap)
#endif /* CONFIG_BUG && CONFIG_CC_HAS_ASM_IMMEDIATE_STRINGS */
-#if IS_ENABLED(CONFIG_KVM)
+#if IS_ENABLED(CONFIG_KVM_S390)
/*
* __sie64a calling convention:
* %r2 pointer to sie control block phys
@@ -314,7 +314,7 @@ SYM_CODE_START(pgm_check_handler)
xgr %r10,%r10
tmhh %r8,0x0001 # coming from user space?
jo 3f # -> fault in user space
-#if IS_ENABLED(CONFIG_KVM)
+#if IS_ENABLED(CONFIG_KVM_S390)
lg %r11,__LC_CURRENT(%r13)
tm __TI_sie(%r11),0xff
jz 1f
@@ -385,7 +385,7 @@ SYM_CODE_START(\name)
lmg %r8,%r9,\lc_old_psw(%r13)
tmhh %r8,0x0001 # interrupting from user ?
jnz 1f
-#if IS_ENABLED(CONFIG_KVM)
+#if IS_ENABLED(CONFIG_KVM_S390)
lg %r10,__LC_CURRENT(%r13)
tm __TI_sie(%r10),0xff
jz 0f
@@ -463,7 +463,7 @@ SYM_CODE_START(mcck_int_handler)
jnz .Lmcck_user
TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_PSW_IA_VALID
jno .Lmcck_panic
-#if IS_ENABLED(CONFIG_KVM)
+#if IS_ENABLED(CONFIG_KVM_S390)
lg %r10,__LC_CURRENT(%r13)
tm __TI_sie(%r10),0xff
jz .Lmcck_user
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index 606750bae508..6441746b8e72 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -37,7 +37,7 @@ static bool is_in_guest(struct pt_regs *regs)
{
if (user_mode(regs))
return false;
-#if IS_ENABLED(CONFIG_KVM)
+#if IS_ENABLED(CONFIG_KVM_S390)
return instruction_pointer(regs) == (unsigned long) &sie_exit;
#else
return false;
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index 5b835bc6a194..f8d4a9a38dae 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -17,39 +17,8 @@ menuconfig VIRTUALIZATION
if VIRTUALIZATION
config KVM
- def_tristate y
- prompt "Kernel-based Virtual Machine (KVM) support"
- select HAVE_KVM_CPU_RELAX_INTERCEPT
- select KVM_ASYNC_PF
- select KVM_ASYNC_PF_SYNC
- select KVM_COMMON
- select HAVE_KVM_IRQCHIP
- select HAVE_KVM_IRQ_ROUTING
- select HAVE_KVM_INVALID_WAKEUPS
- select HAVE_KVM_NO_POLL
- select KVM_VFIO
- select VIRT_XFER_TO_GUEST_WORK
- select KVM_MMU_LOCKLESS_AGING
- help
- Support hosting paravirtualized guest machines using the SIE
- virtualization capability on the mainframe. This should work
- on any 64bit machine.
-
- This module provides access to the hardware capabilities through
- a character device node named /dev/kvm.
-
- To compile this as a module, choose M here: the module
- will be called kvm.
-
- If unsure, say N.
-
-config KVM_S390_UCONTROL
- bool "Userspace controlled virtual machines"
- depends on KVM
- help
- Allow CAP_SYS_ADMIN users to create KVM virtual machines that are
- controlled by userspace.
+ tristate
- If unsure, say N.
+source "arch/s390/kvm/s390/Kconfig"
endif # VIRTUALIZATION
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index dac9d53b23d8..c43d7dffca13 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -3,13 +3,4 @@
#
# Copyright IBM Corp. 2008
-include $(srctree)/virt/kvm/Makefile.kvm
-
-ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
-
-kvm-y += kvm-s390.o intercept.o interrupt.o priv.o sigp.o
-kvm-y += diag.o gaccess.o guestdbg.o vsie.o pv.o
-kvm-y += dat.o gmap.o faultin.o
-
-kvm-$(CONFIG_VFIO_PCI_ZDEV_KVM) += pci.o
-obj-$(CONFIG_KVM) += kvm.o
+obj-$(CONFIG_KVM_S390) += s390/
diff --git a/arch/s390/kvm/gmap/Makefile b/arch/s390/kvm/gmap/Makefile
new file mode 100644
index 000000000000..21967ed88877
--- /dev/null
+++ b/arch/s390/kvm/gmap/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+GMAP ?= ../gmap
+
+kvm-y += $(GMAP)/dat.o $(GMAP)/gmap.o $(GMAP)/faultin.o
diff --git a/arch/s390/kvm/dat.c b/arch/s390/kvm/gmap/dat.c
similarity index 100%
rename from arch/s390/kvm/dat.c
rename to arch/s390/kvm/gmap/dat.c
diff --git a/arch/s390/kvm/dat.h b/arch/s390/kvm/gmap/dat.h
similarity index 99%
rename from arch/s390/kvm/dat.h
rename to arch/s390/kvm/gmap/dat.h
index 123e11dcd70d..44e0675bf136 100644
--- a/arch/s390/kvm/dat.h
+++ b/arch/s390/kvm/gmap/dat.h
@@ -6,8 +6,8 @@
* Author(s): Claudio Imbrenda <imbrenda@linux.ibm.com>
*/
-#ifndef __KVM_S390_DAT_H
-#define __KVM_S390_DAT_H
+#ifndef ARCH_KVM_GMAP_DAT_H
+#define ARCH_KVM_GMAP_DAT_H
#include <linux/radix-tree.h>
#include <linux/refcount.h>
@@ -967,4 +967,4 @@ static inline bool crste_is_ucas(union crste crste)
return is_pmd(crste) && crste.h.i && crste.h.fc0.tl == 1 && crste.h.fc == 0;
}
-#endif /* __KVM_S390_DAT_H */
+#endif /* ARCH_KVM_GMAP_DAT_H */
diff --git a/arch/s390/kvm/faultin.c b/arch/s390/kvm/gmap/faultin.c
similarity index 100%
rename from arch/s390/kvm/faultin.c
rename to arch/s390/kvm/gmap/faultin.c
diff --git a/arch/s390/kvm/faultin.h b/arch/s390/kvm/gmap/faultin.h
similarity index 96%
rename from arch/s390/kvm/faultin.h
rename to arch/s390/kvm/gmap/faultin.h
index f86176d2769c..f343b6fb6f16 100644
--- a/arch/s390/kvm/faultin.h
+++ b/arch/s390/kvm/gmap/faultin.h
@@ -6,8 +6,8 @@
* Author(s): Claudio Imbrenda <imbrenda@linux.ibm.com>
*/
-#ifndef __KVM_S390_FAULTIN_H
-#define __KVM_S390_FAULTIN_H
+#ifndef ARCH_KVM_GMAP_FAULTIN_H
+#define ARCH_KVM_GMAP_FAULTIN_H
#include <linux/kvm_host.h>
@@ -89,4 +89,4 @@ static inline int kvm_s390_get_guest_pages(struct kvm *kvm, struct guest_fault *
#define kvm_s390_array_needs_retry_safe(kvm, seq, array) \
kvm_s390_multiple_faults_need_retry(kvm, seq, array, ARRAY_SIZE(array), false)
-#endif /* __KVM_S390_FAULTIN_H */
+#endif /* ARCH_KVM_GMAP_FAULTIN_H */
diff --git a/arch/s390/kvm/gmap.c b/arch/s390/kvm/gmap/gmap.c
similarity index 99%
rename from arch/s390/kvm/gmap.c
rename to arch/s390/kvm/gmap/gmap.c
index ef0c6ebfdde2..1312d7882824 100644
--- a/arch/s390/kvm/gmap.c
+++ b/arch/s390/kvm/gmap/gmap.c
@@ -21,7 +21,7 @@
#include "dat.h"
#include "gmap.h"
-#include "kvm-s390.h"
+#include "s390.h"
#include "faultin.h"
static inline bool kvm_s390_is_in_sie(struct kvm_vcpu *vcpu)
diff --git a/arch/s390/kvm/gmap.h b/arch/s390/kvm/gmap/gmap.h
similarity index 98%
rename from arch/s390/kvm/gmap.h
rename to arch/s390/kvm/gmap/gmap.h
index ccb5cd751e31..e2b3bd457782 100644
--- a/arch/s390/kvm/gmap.h
+++ b/arch/s390/kvm/gmap/gmap.h
@@ -7,8 +7,8 @@
* Claudio Imbrenda <imbrenda@linux.ibm.com>
*/
-#ifndef ARCH_KVM_S390_GMAP_H
-#define ARCH_KVM_S390_GMAP_H
+#ifndef ARCH_KVM_GMAP_GMAP_H
+#define ARCH_KVM_GMAP_GMAP_H
#include "dat.h"
@@ -241,4 +241,4 @@ static inline bool gmap_is_shadow_valid(struct gmap *sg, union asce asce, int ed
return sg->guest_asce.val == asce.val && sg->edat_level == edat_level;
}
-#endif /* ARCH_KVM_S390_GMAP_H */
+#endif /* ARCH_KVM_GMAP_GMAP_H */
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/s390/Kconfig
similarity index 62%
copy from arch/s390/kvm/Kconfig
copy to arch/s390/kvm/s390/Kconfig
index 5b835bc6a194..f9853e61fd58 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/s390/Kconfig
@@ -1,24 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
#
-# KVM configuration
+# KVM_S390 configuration
#
source "virt/kvm/Kconfig"
-menuconfig VIRTUALIZATION
- def_bool y
- prompt "KVM"
- help
- Say Y here to get to see options for using your Linux host to run other
- operating systems inside virtual machines (guests).
- This option alone does not add any kernel code.
-
- If you say N, all options in this submenu will be skipped and disabled.
-
-if VIRTUALIZATION
-
-config KVM
+config KVM_S390
def_tristate y
- prompt "Kernel-based Virtual Machine (KVM) support"
+ prompt "Kernel-based Virtual Machine (KVM) support (s390)"
+ select KVM
select HAVE_KVM_CPU_RELAX_INTERCEPT
select KVM_ASYNC_PF
select KVM_ASYNC_PF_SYNC
@@ -31,7 +20,7 @@ config KVM
select VIRT_XFER_TO_GUEST_WORK
select KVM_MMU_LOCKLESS_AGING
help
- Support hosting paravirtualized guest machines using the SIE
+ Support hosting paravirtualized s390 guest machines using the SIE
virtualization capability on the mainframe. This should work
on any 64bit machine.
@@ -45,11 +34,9 @@ config KVM
config KVM_S390_UCONTROL
bool "Userspace controlled virtual machines"
- depends on KVM
+ depends on KVM_S390
help
Allow CAP_SYS_ADMIN users to create KVM virtual machines that are
controlled by userspace.
If unsure, say N.
-
-endif # VIRTUALIZATION
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/s390/Makefile
similarity index 53%
copy from arch/s390/kvm/Makefile
copy to arch/s390/kvm/s390/Makefile
index dac9d53b23d8..51aee874b36f 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/s390/Makefile
@@ -3,13 +3,15 @@
#
# Copyright IBM Corp. 2008
+KVM := ../../../../virt/kvm
include $(srctree)/virt/kvm/Makefile.kvm
+include $(srctree)/arch/s390/kvm/gmap/Makefile
-ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
+ccflags-y := -I$(src) -I$(srctree)/arch/s390/kvm/gmap
-kvm-y += kvm-s390.o intercept.o interrupt.o priv.o sigp.o
+kvm-y += s390.o intercept.o interrupt.o priv.o sigp.o
kvm-y += diag.o gaccess.o guestdbg.o vsie.o pv.o
-kvm-y += dat.o gmap.o faultin.o
kvm-$(CONFIG_VFIO_PCI_ZDEV_KVM) += pci.o
-obj-$(CONFIG_KVM) += kvm.o
+
+obj-$(CONFIG_KVM_S390) += kvm.o
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/s390/diag.c
similarity index 99%
rename from arch/s390/kvm/diag.c
rename to arch/s390/kvm/s390/diag.c
index d89d1c381522..700d9b7b68bc 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/s390/diag.c
@@ -12,7 +12,7 @@
#include <linux/kvm_host.h>
#include <asm/gmap_helpers.h>
#include <asm/virtio-ccw.h>
-#include "kvm-s390.h"
+#include "s390.h"
#include "trace.h"
#include "trace-s390.h"
#include "gaccess.h"
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/s390/gaccess.c
similarity index 99%
rename from arch/s390/kvm/gaccess.c
rename to arch/s390/kvm/s390/gaccess.c
index a9da9390867d..2314ece35eab 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/s390/gaccess.c
@@ -17,7 +17,7 @@
#include <asm/access-regs.h>
#include <asm/fault.h>
#include <asm/dat-bits.h>
-#include "kvm-s390.h"
+#include "s390.h"
#include "dat.h"
#include "gmap.h"
#include "gaccess.h"
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/s390/gaccess.h
similarity index 99%
rename from arch/s390/kvm/gaccess.h
rename to arch/s390/kvm/s390/gaccess.h
index b5385cec60f4..ef922b3b4990 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/s390/gaccess.h
@@ -14,7 +14,7 @@
#include <linux/kvm_host.h>
#include <linux/uaccess.h>
#include <linux/ptrace.h>
-#include "kvm-s390.h"
+#include "s390.h"
/**
* kvm_s390_real_to_abs - convert guest real address to guest absolute address
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/s390/guestdbg.c
similarity index 99%
rename from arch/s390/kvm/guestdbg.c
rename to arch/s390/kvm/s390/guestdbg.c
index 69835e1d4f20..c880ebbdef2f 100644
--- a/arch/s390/kvm/guestdbg.c
+++ b/arch/s390/kvm/s390/guestdbg.c
@@ -8,7 +8,7 @@
*/
#include <linux/kvm_host.h>
#include <linux/errno.h>
-#include "kvm-s390.h"
+#include "s390.h"
#include "gaccess.h"
/*
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/s390/intercept.c
similarity index 99%
rename from arch/s390/kvm/intercept.c
rename to arch/s390/kvm/s390/intercept.c
index 39aff324203e..9b897328e4b9 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/s390/intercept.c
@@ -17,7 +17,7 @@
#include <asm/sysinfo.h>
#include <asm/uv.h>
-#include "kvm-s390.h"
+#include "s390.h"
#include "gaccess.h"
#include "trace.h"
#include "trace-s390.h"
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/s390/interrupt.c
similarity index 99%
rename from arch/s390/kvm/interrupt.c
rename to arch/s390/kvm/s390/interrupt.c
index 7cb8ce833b62..04a4db24fc75 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/s390/interrupt.c
@@ -29,7 +29,7 @@
#include <asm/nmi.h>
#include <asm/airq.h>
#include <asm/tpi.h>
-#include "kvm-s390.h"
+#include "s390.h"
#include "gaccess.h"
#include "trace-s390.h"
#include "pci.h"
diff --git a/arch/s390/kvm/pci.c b/arch/s390/kvm/s390/pci.c
similarity index 99%
rename from arch/s390/kvm/pci.c
rename to arch/s390/kvm/s390/pci.c
index 86d93e8dddae..83180897bf37 100644
--- a/arch/s390/kvm/pci.c
+++ b/arch/s390/kvm/s390/pci.c
@@ -14,7 +14,7 @@
#include <asm/pci_io.h>
#include <asm/sclp.h>
#include "pci.h"
-#include "kvm-s390.h"
+#include "s390.h"
struct zpci_aift *aift;
diff --git a/arch/s390/kvm/pci.h b/arch/s390/kvm/s390/pci.h
similarity index 100%
rename from arch/s390/kvm/pci.h
rename to arch/s390/kvm/s390/pci.h
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/s390/priv.c
similarity index 99%
rename from arch/s390/kvm/priv.c
rename to arch/s390/kvm/s390/priv.c
index a3250ad83a8e..123f9e56da96 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/s390/priv.c
@@ -26,7 +26,7 @@
#include <asm/ap.h>
#include <asm/gmap_helpers.h>
#include "gaccess.h"
-#include "kvm-s390.h"
+#include "s390.h"
#include "trace.h"
#include "gmap.h"
diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/s390/pv.c
similarity index 99%
rename from arch/s390/kvm/pv.c
rename to arch/s390/kvm/s390/pv.c
index c2dafd812a3b..1bddc9aeb1a9 100644
--- a/arch/s390/kvm/pv.c
+++ b/arch/s390/kvm/s390/pv.c
@@ -17,7 +17,7 @@
#include <linux/pagewalk.h>
#include <linux/sched/mm.h>
#include <linux/mmu_notifier.h>
-#include "kvm-s390.h"
+#include "s390.h"
#include "dat.h"
#include "gaccess.h"
#include "gmap.h"
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/s390/s390.c
similarity index 99%
rename from arch/s390/kvm/kvm-s390.c
rename to arch/s390/kvm/s390/s390.c
index b2c01fa7b852..6b5b366fb073 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/s390/s390.c
@@ -50,7 +50,7 @@
#include <asm/fpu.h>
#include <asm/ap.h>
#include <asm/uv.h>
-#include "kvm-s390.h"
+#include "s390.h"
#include "gaccess.h"
#include "gmap.h"
#include "faultin.h"
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/s390/s390.h
similarity index 99%
rename from arch/s390/kvm/kvm-s390.h
rename to arch/s390/kvm/s390/s390.h
index bf1d7798c1af..ad1aeec40cae 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/s390/s390.h
@@ -448,7 +448,7 @@ void kvm_s390_vsie_destroy(struct kvm *kvm);
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
-/* implemented in kvm-s390.c */
+/* implemented in s390.c */
int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/s390/sigp.c
similarity index 99%
rename from arch/s390/kvm/sigp.c
rename to arch/s390/kvm/s390/sigp.c
index 55c34cb35428..131b3371ef4f 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/s390/sigp.c
@@ -14,7 +14,7 @@
#include <linux/slab.h>
#include <asm/sigp.h>
#include "gaccess.h"
-#include "kvm-s390.h"
+#include "s390.h"
#include "trace.h"
static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/s390/trace-s390.h
similarity index 100%
rename from arch/s390/kvm/trace-s390.h
rename to arch/s390/kvm/s390/trace-s390.h
diff --git a/arch/s390/kvm/trace.h b/arch/s390/kvm/s390/trace.h
similarity index 100%
rename from arch/s390/kvm/trace.h
rename to arch/s390/kvm/s390/trace.h
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/s390/vsie.c
similarity index 99%
rename from arch/s390/kvm/vsie.c
rename to arch/s390/kvm/s390/vsie.c
index 0330829b4046..09feabe1d095 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/s390/vsie.c
@@ -20,7 +20,7 @@
#include <asm/nmi.h>
#include <asm/dis.h>
#include <asm/facility.h>
-#include "kvm-s390.h"
+#include "s390.h"
#include "gaccess.h"
#include "gmap.h"
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 13f903993ed0..ff2aff71e207 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1529,7 +1529,7 @@ void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
-#ifndef CONFIG_S390
+#ifndef CONFIG_KVM_S390
void __kvm_vcpu_kick(struct kvm_vcpu *vcpu, bool wait);
static inline void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v1 15/27] KVM: S390: Refactor gmap
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (13 preceding siblings ...)
2026-04-02 4:21 ` [PATCH v1 14/27] KVM: s390: Move s390 kvm code into a subdirectory Steffen Eiden
@ 2026-04-02 4:21 ` Steffen Eiden
2026-04-02 4:21 ` [PATCH v1 16/27] KVM: Make device name configurable Steffen Eiden
` (12 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:21 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
Refactor gmap code such that a second s390 (host) KVM implementation can
use the gmap code as well. Move relevant definitions into the shared
kvm_host.h. Move mmu code and traces from s390 to gmap.
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
arch/s390/include/asm/kvm_host.h | 9 ++
arch/s390/include/asm/kvm_host_s390.h | 11 +-
arch/s390/kvm/gmap/Makefile | 2 +-
arch/s390/kvm/gmap/faultin.c | 11 +-
arch/s390/kvm/gmap/gmap.c | 11 +-
arch/s390/kvm/gmap/gmap.h | 11 ++
arch/s390/kvm/gmap/mmu.c | 154 ++++++++++++++++++++++++++
arch/s390/kvm/gmap/trace-gmap.h | 59 ++++++++++
arch/s390/kvm/s390/s390.c | 116 +------------------
arch/s390/kvm/s390/s390.h | 16 +++
arch/s390/kvm/s390/trace.h | 14 ---
11 files changed, 272 insertions(+), 142 deletions(-)
create mode 100644 arch/s390/kvm/gmap/mmu.c
create mode 100644 arch/s390/kvm/gmap/trace-gmap.h
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 6ff643ac0d15..1c20168a3ef5 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -5,4 +5,13 @@
#include <asm/kvm_host_s390.h>
+#define PGM_PROTECTION 0x04
+#define PGM_ADDRESSING 0x05
+#define PGM_SEGMENT_TRANSLATION 0x10
+#define PGM_PAGE_TRANSLATION 0x11
+#define PGM_ASCE_TYPE 0x38
+#define PGM_REGION_FIRST_TRANS 0x39
+#define PGM_REGION_SECOND_TRANS 0x3a
+#define PGM_REGION_THIRD_TRANS 0x3b
+
#endif
diff --git a/arch/s390/include/asm/kvm_host_s390.h b/arch/s390/include/asm/kvm_host_s390.h
index 2d62a8ff8008..c528d7600bed 100644
--- a/arch/s390/include/asm/kvm_host_s390.h
+++ b/arch/s390/include/asm/kvm_host_s390.h
@@ -153,8 +153,7 @@ struct kvm_vcpu_stat {
#define PGM_OPERATION 0x01
#define PGM_PRIVILEGED_OP 0x02
#define PGM_EXECUTE 0x03
-#define PGM_PROTECTION 0x04
-#define PGM_ADDRESSING 0x05
+/* 0x04 & 0x05 defined in kvm_host.h */
#define PGM_SPECIFICATION 0x06
#define PGM_DATA 0x07
#define PGM_FIXED_POINT_OVERFLOW 0x08
@@ -165,8 +164,7 @@ struct kvm_vcpu_stat {
#define PGM_HFP_EXPONENT_UNDERFLOW 0x0d
#define PGM_HFP_SIGNIFICANCE 0x0e
#define PGM_HFP_DIVIDE 0x0f
-#define PGM_SEGMENT_TRANSLATION 0x10
-#define PGM_PAGE_TRANSLATION 0x11
+/* 0x10 & 0x11 defined in kvm_host.h */
#define PGM_TRANSLATION_SPEC 0x12
#define PGM_SPECIAL_OPERATION 0x13
#define PGM_OPERAND 0x15
@@ -196,10 +194,7 @@ struct kvm_vcpu_stat {
#define PGM_STACK_SPECIFICATION 0x32
#define PGM_STACK_TYPE 0x33
#define PGM_STACK_OPERATION 0x34
-#define PGM_ASCE_TYPE 0x38
-#define PGM_REGION_FIRST_TRANS 0x39
-#define PGM_REGION_SECOND_TRANS 0x3a
-#define PGM_REGION_THIRD_TRANS 0x3b
+/* 0x38 - 0x3b defined in kvm_host.h */
#define PGM_SECURE_STORAGE_ACCESS 0x3d
#define PGM_NON_SECURE_STORAGE_ACCESS 0x3e
#define PGM_SECURE_STORAGE_VIOLATION 0x3f
diff --git a/arch/s390/kvm/gmap/Makefile b/arch/s390/kvm/gmap/Makefile
index 21967ed88877..140914c5c14f 100644
--- a/arch/s390/kvm/gmap/Makefile
+++ b/arch/s390/kvm/gmap/Makefile
@@ -2,4 +2,4 @@
GMAP ?= ../gmap
-kvm-y += $(GMAP)/dat.o $(GMAP)/gmap.o $(GMAP)/faultin.o
+kvm-y += $(GMAP)/dat.o $(GMAP)/gmap.o $(GMAP)/faultin.o $(GMAP)/mmu.o
diff --git a/arch/s390/kvm/gmap/faultin.c b/arch/s390/kvm/gmap/faultin.c
index e37cd18200f5..26b7d4cb1e86 100644
--- a/arch/s390/kvm/gmap/faultin.c
+++ b/arch/s390/kvm/gmap/faultin.c
@@ -9,10 +9,15 @@
#include <linux/kvm_host.h>
#include "gmap.h"
-#include "trace.h"
#include "faultin.h"
-
-bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu);
+#ifdef KVM_S390_ARM64
+#include "arm.h"
+#else
+#include "s390.h"
+#endif
+
+#define CREATE_TRACE_POINTS
+#include "trace-gmap.h"
/*
* kvm_s390_faultin_gfn() - handle a dat fault.
diff --git a/arch/s390/kvm/gmap/gmap.c b/arch/s390/kvm/gmap/gmap.c
index 1312d7882824..8c2cc65e7a85 100644
--- a/arch/s390/kvm/gmap/gmap.c
+++ b/arch/s390/kvm/gmap/gmap.c
@@ -21,14 +21,13 @@
#include "dat.h"
#include "gmap.h"
+#ifdef KVM_S390_ARM64
+#include "arm.h"
+#else
#include "s390.h"
+#endif
#include "faultin.h"
-static inline bool kvm_s390_is_in_sie(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.sie_block->prog0c & PROG_IN_SIE;
-}
-
static int gmap_limit_to_type(gfn_t limit)
{
if (!limit)
@@ -253,6 +252,7 @@ int s390_replace_asce(struct gmap *gmap)
bool _gmap_unmap_prefix(struct gmap *gmap, gfn_t gfn, gfn_t end, bool hint)
{
+#ifndef KVM_S390_ARM64
struct kvm *kvm = gmap->kvm;
struct kvm_vcpu *vcpu;
gfn_t prefix_gfn;
@@ -271,6 +271,7 @@ bool _gmap_unmap_prefix(struct gmap *gmap, gfn_t gfn, gfn_t end, bool hint)
kvm_s390_sync_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
}
}
+#endif /* ifdef KVM_S390_ARM64 */
return true;
}
diff --git a/arch/s390/kvm/gmap/gmap.h b/arch/s390/kvm/gmap/gmap.h
index e2b3bd457782..c70f0c357b5c 100644
--- a/arch/s390/kvm/gmap/gmap.h
+++ b/arch/s390/kvm/gmap/gmap.h
@@ -241,4 +241,15 @@ static inline bool gmap_is_shadow_valid(struct gmap *sg, union asce asce, int ed
return sg->guest_asce.val == asce.val && sg->edat_level == edat_level;
}
+int gmap_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
+int gmap_prepare_memory_region(struct kvm *kvm,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
+ enum kvm_mr_change change);
+void gmap_commit_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change);
+bool gmap_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
+
#endif /* ARCH_KVM_GMAP_GMAP_H */
diff --git a/arch/s390/kvm/gmap/mmu.c b/arch/s390/kvm/gmap/mmu.c
new file mode 100644
index 000000000000..2d81466e49cb
--- /dev/null
+++ b/arch/s390/kvm/gmap/mmu.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/kvm_types.h>
+#include <linux/kvm_host.h>
+
+#ifdef KVM_S390_ARM64
+#include "arm.h"
+#else
+#include "s390.h"
+#endif
+#include "gmap.h"
+#include "dat.h"
+
+/*
+ * Get (and clear) the dirty memory log for a memory slot.
+ */
+int gmap_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+ int r;
+ unsigned long n;
+ struct kvm_memory_slot *memslot;
+ int is_dirty;
+
+ if (kvm_is_ucontrol(kvm))
+ return -EINVAL;
+
+ mutex_lock(&kvm->slots_lock);
+
+ r = -EINVAL;
+ if (log->slot >= KVM_USER_MEM_SLOTS)
+ goto out;
+
+ r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
+ if (r)
+ goto out;
+
+ /* Clear the dirty log */
+ if (is_dirty) {
+ n = kvm_dirty_bitmap_bytes(memslot);
+ memset(memslot->dirty_bitmap, 0, n);
+ }
+ r = 0;
+out:
+ mutex_unlock(&kvm->slots_lock);
+ return r;
+}
+
+int gmap_prepare_memory_region(struct kvm *kvm,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
+{
+ gpa_t size;
+
+ if (kvm_is_ucontrol(kvm) && new->id < KVM_USER_MEM_SLOTS)
+ return -EINVAL;
+
+ /* When we are protected, we should not change the memory slots */
+ if (kvm_s390_pv_get_handle(kvm))
+ return -EINVAL;
+
+ if (change != KVM_MR_DELETE && change != KVM_MR_FLAGS_ONLY) {
+ /*
+ * A few sanity checks. We can have memory slots which have to be
+ * located/ended at a segment boundary (1MB). The memory in userland is
+ * ok to be fragmented into various different vmas. It is okay to mmap()
+ * and munmap() stuff in this slot after doing this call at any time
+ */
+
+ if (new->userspace_addr & 0xffffful)
+ return -EINVAL;
+
+ size = new->npages * PAGE_SIZE;
+ if (size & 0xffffful)
+ return -EINVAL;
+
+ if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
+ return -EINVAL;
+ }
+
+ if (!kvm_s390_is_migration_mode(kvm))
+ return 0;
+
+ /*
+ * Turn off migration mode when:
+ * - userspace creates a new memslot with dirty logging off,
+ * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and
+ * dirty logging is turned off.
+ * Migration mode expects dirty page logging being enabled to store
+ * its dirty bitmap.
+ */
+ if (change != KVM_MR_DELETE &&
+ !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
+ WARN(kvm_s390_vm_stop_migration(kvm),
+ "Failed to stop migration mode");
+
+ return 0;
+}
+
+void gmap_commit_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
+{
+ struct kvm_s390_mmu_cache *mc = NULL;
+ int rc = 0;
+
+ if (change == KVM_MR_FLAGS_ONLY)
+ return;
+
+ mc = kvm_s390_new_mmu_cache();
+ if (!mc) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ scoped_guard(write_lock, &kvm->mmu_lock) {
+ switch (change) {
+ case KVM_MR_DELETE:
+ rc = dat_delete_slot(mc, kvm->arch.gmap->asce, old->base_gfn, old->npages);
+ break;
+ case KVM_MR_MOVE:
+ rc = dat_delete_slot(mc, kvm->arch.gmap->asce, old->base_gfn, old->npages);
+ if (rc)
+ break;
+ fallthrough;
+ case KVM_MR_CREATE:
+ rc = dat_create_slot(mc, kvm->arch.gmap->asce, new->base_gfn, new->npages);
+ break;
+ case KVM_MR_FLAGS_ONLY:
+ break;
+ default:
+ WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
+ }
+ }
+out:
+ if (rc)
+ pr_warn("failed to commit memory region\n");
+ kvm_s390_free_mmu_cache(mc);
+}
+
+/**
+ * gmap_test_age_gfn() - test young
+ * @kvm: the kvm instance
+ * @range: the range of guest addresses whose young status needs to be cleared
+ *
+ * Context: called by KVM common code without holding the kvm mmu lock
+ * Return: true if any page in the given range is young, otherwise 0.
+ */
+bool gmap_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+{
+ scoped_guard(read_lock, &kvm->mmu_lock)
+ return dat_test_age_gfn(kvm->arch.gmap->asce, range->start, range->end);
+}
diff --git a/arch/s390/kvm/gmap/trace-gmap.h b/arch/s390/kvm/gmap/trace-gmap.h
new file mode 100644
index 000000000000..25f8a527fdde
--- /dev/null
+++ b/arch/s390/kvm/gmap/trace-gmap.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#if !defined(GMAP_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define GMAP_TRACE_KVM_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+#define TRACE_INCLUDE_PATH ../gmap
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace-gmap
+
+#ifdef KVM_S390_ARM64
+#define __KVM_FIELDS \
+ __field(unsigned long, pstate) \
+ __field(unsigned long, pc)
+#define __KVM_ASSIGN ({\
+ __entry->pstate = vcpu->arch.sae_block.pstate; \
+ __entry->pc = vcpu->arch.sae_block.pc; \
+ })
+#define __KVM_PRINT \
+ __entry->pstate, \
+ __entry->pc
+#else
+#define __KVM_FIELDS \
+ __field(unsigned long, pswmask) \
+ __field(unsigned long, pswaddr)
+#define __KVM_ASSIGN ({\
+ __entry->pswmask = vcpu->arch.sie_block->gpsw.mask; \
+ __entry->pswaddr = vcpu->arch.sie_block->gpsw.addr; \
+ })
+#define __KVM_PRINT \
+ __entry->pswmask,\
+ __entry->pswaddr
+#endif
+
+TRACE_EVENT(kvm_s390_major_guest_pfault,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu),
+
+ TP_STRUCT__entry(
+ __field(int, id)
+ __KVM_FIELDS
+ ),
+
+ TP_fast_assign(
+ __entry->id = vcpu->vcpu_id;
+ __KVM_ASSIGN
+ ),
+ TP_printk("%02d[%016lx-%016lx]: major fault, maybe applicable for pfault",
+ __entry->id,
+ __KVM_PRINT
+ )
+ );
+
+#endif /* GMAP_TRACE_KVM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/s390/kvm/s390/s390.c b/arch/s390/kvm/s390/s390.c
index 6b5b366fb073..497abe3a83f4 100644
--- a/arch/s390/kvm/s390/s390.c
+++ b/arch/s390/kvm/s390/s390.c
@@ -734,33 +734,7 @@ static void sca_del_vcpu(struct kvm_vcpu *vcpu);
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log)
{
- int r;
- unsigned long n;
- struct kvm_memory_slot *memslot;
- int is_dirty;
-
- if (kvm_is_ucontrol(kvm))
- return -EINVAL;
-
- mutex_lock(&kvm->slots_lock);
-
- r = -EINVAL;
- if (log->slot >= KVM_USER_MEM_SLOTS)
- goto out;
-
- r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
- if (r)
- goto out;
-
- /* Clear the dirty log */
- if (is_dirty) {
- n = kvm_dirty_bitmap_bytes(memslot);
- memset(memslot->dirty_bitmap, 0, n);
- }
- r = 0;
-out:
- mutex_unlock(&kvm->slots_lock);
- return r;
+ return gmap_get_dirty_log(kvm, log);
}
static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
@@ -1195,7 +1169,7 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
* Must be called with kvm->slots_lock to avoid races with ourselves and
* kvm_s390_vm_start_migration.
*/
-static int kvm_s390_vm_stop_migration(struct kvm *kvm)
+int kvm_s390_vm_stop_migration(struct kvm *kvm)
{
/* migration mode already disabled */
if (!kvm->arch.migration_mode)
@@ -5636,51 +5610,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
- gpa_t size;
-
- if (kvm_is_ucontrol(kvm) && new->id < KVM_USER_MEM_SLOTS)
- return -EINVAL;
-
- /* When we are protected, we should not change the memory slots */
- if (kvm_s390_pv_get_handle(kvm))
- return -EINVAL;
-
- if (change != KVM_MR_DELETE && change != KVM_MR_FLAGS_ONLY) {
- /*
- * A few sanity checks. We can have memory slots which have to be
- * located/ended at a segment boundary (1MB). The memory in userland is
- * ok to be fragmented into various different vmas. It is okay to mmap()
- * and munmap() stuff in this slot after doing this call at any time
- */
-
- if (new->userspace_addr & 0xffffful)
- return -EINVAL;
-
- size = new->npages * PAGE_SIZE;
- if (size & 0xffffful)
- return -EINVAL;
-
- if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
- return -EINVAL;
- }
-
- if (!kvm->arch.migration_mode)
- return 0;
-
- /*
- * Turn off migration mode when:
- * - userspace creates a new memslot with dirty logging off,
- * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and
- * dirty logging is turned off.
- * Migration mode expects dirty page logging being enabled to store
- * its dirty bitmap.
- */
- if (change != KVM_MR_DELETE &&
- !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
- WARN(kvm_s390_vm_stop_migration(kvm),
- "Failed to stop migration mode");
-
- return 0;
+ return gmap_prepare_memory_region(kvm, old, new, change);
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
@@ -5688,42 +5618,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
- struct kvm_s390_mmu_cache *mc = NULL;
- int rc = 0;
-
- if (change == KVM_MR_FLAGS_ONLY)
- return;
-
- mc = kvm_s390_new_mmu_cache();
- if (!mc) {
- rc = -ENOMEM;
- goto out;
- }
-
- scoped_guard(write_lock, &kvm->mmu_lock) {
- switch (change) {
- case KVM_MR_DELETE:
- rc = dat_delete_slot(mc, kvm->arch.gmap->asce, old->base_gfn, old->npages);
- break;
- case KVM_MR_MOVE:
- rc = dat_delete_slot(mc, kvm->arch.gmap->asce, old->base_gfn, old->npages);
- if (rc)
- break;
- fallthrough;
- case KVM_MR_CREATE:
- rc = dat_create_slot(mc, kvm->arch.gmap->asce, new->base_gfn, new->npages);
- break;
- case KVM_MR_FLAGS_ONLY:
- break;
- default:
- WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
- }
- }
-out:
- if (rc)
- pr_warn("failed to commit memory region\n");
- kvm_s390_free_mmu_cache(mc);
- return;
+ gmap_commit_memory_region(kvm, old, new, change);
}
/**
@@ -5736,8 +5631,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
*/
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- scoped_guard(read_lock, &kvm->mmu_lock)
- return dat_test_age_gfn(kvm->arch.gmap->asce, range->start, range->end);
+ return gmap_test_age_gfn(kvm, range);
}
/**
diff --git a/arch/s390/kvm/s390/s390.h b/arch/s390/kvm/s390/s390.h
index ad1aeec40cae..3acb01690bf6 100644
--- a/arch/s390/kvm/s390/s390.h
+++ b/arch/s390/kvm/s390/s390.h
@@ -32,6 +32,11 @@ union kvm_s390_quad {
unsigned char one;
};
+static inline bool kvm_s390_is_in_sie(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.sie_block->prog0c & PROG_IN_SIE;
+}
+
static inline void kvm_s390_fpu_store(struct kvm_run *run)
{
fpu_stfpc(&run->s.regs.fpc);
@@ -588,6 +593,11 @@ static inline bool kvm_s390_cur_gmap_fault_is_write(void)
return test_facility(75) && (current->thread.gmap_teid.fsi == TEID_FSI_STORE);
}
+static __always_inline int kvm_s390_is_migration_mode(struct kvm *kvm)
+{
+ return kvm->arch.migration_mode;
+}
+
/**
* kvm_s390_vcpu_crypto_reset_all
*
@@ -618,4 +628,10 @@ void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm);
*/
extern unsigned int diag9c_forwarding_hz;
+/*
+ * Must be called with kvm->slots_lock to avoid races with ourselves and
+ * kvm_s390_vm_start_migration.
+ */
+int kvm_s390_vm_stop_migration(struct kvm *kvm);
+
#endif
diff --git a/arch/s390/kvm/s390/trace.h b/arch/s390/kvm/s390/trace.h
index aa419eb6a0c8..97774fe09a85 100644
--- a/arch/s390/kvm/s390/trace.h
+++ b/arch/s390/kvm/s390/trace.h
@@ -45,20 +45,6 @@ TRACE_EVENT(kvm_s390_skey_related_inst,
VCPU_TP_PRINTK("%s", "storage key related instruction")
);
-TRACE_EVENT(kvm_s390_major_guest_pfault,
- TP_PROTO(VCPU_PROTO_COMMON),
- TP_ARGS(VCPU_ARGS_COMMON),
-
- TP_STRUCT__entry(
- VCPU_FIELD_COMMON
- ),
-
- TP_fast_assign(
- VCPU_ASSIGN_COMMON
- ),
- VCPU_TP_PRINTK("%s", "major fault, maybe applicable for pfault")
- );
-
TRACE_EVENT(kvm_s390_pfault_init,
TP_PROTO(VCPU_PROTO_COMMON, long pfault_token),
TP_ARGS(VCPU_ARGS_COMMON, pfault_token),
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v1 16/27] KVM: Make device name configurable
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (14 preceding siblings ...)
2026-04-02 4:21 ` [PATCH v1 15/27] KVM: S390: Refactor gmap Steffen Eiden
@ 2026-04-02 4:21 ` Steffen Eiden
2026-04-02 4:21 ` [PATCH v1 17/27] KVM: Remove KVM_MMIO as config option Steffen Eiden
` (11 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:21 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
Allow KVM implementations to choose alternative device names. This is
especially useful for architectures providing multiple KVM
implementations simultaneously.
Co-developed-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
include/linux/kvm_host.h | 4 +++-
virt/kvm/kvm_main.c | 21 ++++++++++++++-------
2 files changed, 17 insertions(+), 8 deletions(-)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ff2aff71e207..d5d9757e40ca 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1062,7 +1062,9 @@ static inline void kvm_irqfd_exit(void)
{
}
#endif
-int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module);
+int kvm_init(unsigned int vcpu_size, unsigned int vcpu_align, struct module *module);
+int kvm_init_with_dev(unsigned int vcpu_size, unsigned int vcpu_align,
+ struct module *module, const char *dev_name, int minor);
void kvm_exit(void);
bool file_is_kvm(struct file *file);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 642f9e9638cc..d05e2c1e6fb0 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -5547,9 +5547,7 @@ static struct file_operations kvm_chardev_ops = {
};
static struct miscdevice kvm_dev = {
- KVM_MINOR,
- "kvm",
- &kvm_chardev_ops,
+ .fops = &kvm_chardev_ops,
};
#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
@@ -6321,13 +6319,13 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
kfree(env);
}
-static void kvm_init_debug(void)
+static void kvm_init_debug(const char *dev_name)
{
const struct file_operations *fops;
const struct kvm_stats_desc *pdesc;
int i;
- kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
+ kvm_debugfs_dir = debugfs_create_dir(dev_name, NULL);
for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
pdesc = &kvm_vm_stats_desc[i];
@@ -6463,11 +6461,20 @@ void kvm_unregister_perf_callbacks(void)
}
#endif
-int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
+int kvm_init(unsigned int vcpu_size, unsigned int vcpu_align, struct module *module)
+{
+ return kvm_init_with_dev(vcpu_size, vcpu_align, module, "kvm", KVM_MINOR);
+}
+
+int kvm_init_with_dev(unsigned int vcpu_size, unsigned int vcpu_align,
+ struct module *module, const char *dev_name, int minor)
{
int r;
int cpu;
+ kvm_dev.name = dev_name;
+ kvm_dev.minor = minor;
+
/* A kmem cache lets us meet the alignment requirements of fx_save. */
if (!vcpu_align)
vcpu_align = __alignof__(struct kvm_vcpu);
@@ -6505,7 +6512,7 @@ int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
kvm_preempt_ops.sched_in = kvm_sched_in;
kvm_preempt_ops.sched_out = kvm_sched_out;
- kvm_init_debug();
+ kvm_init_debug(kvm_dev.name);
r = kvm_vfio_ops_init();
if (WARN_ON_ONCE(r))
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v1 17/27] KVM: Remove KVM_MMIO as config option
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (15 preceding siblings ...)
2026-04-02 4:21 ` [PATCH v1 16/27] KVM: Make device name configurable Steffen Eiden
@ 2026-04-02 4:21 ` Steffen Eiden
2026-04-02 4:21 ` [PATCH v1 18/27] KVM: s390: Prepare kvm-s390 for a second kvm module Steffen Eiden
` (10 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:21 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
Defining KVM_MMIO is not flexible enough for multi-KVM systems with
different Kconfig options regarding KVM_MMIO. Therefore, remove KVM_MMIO
from the config space and use the macro HAVE_KVM_MMIO instead.
Suggested-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
arch/arm64/include/asm/kvm_host.h | 2 ++
arch/arm64/kvm/Kconfig | 1 -
arch/loongarch/include/asm/kvm_host.h | 2 ++
arch/loongarch/kvm/Kconfig | 1 -
arch/mips/include/asm/kvm_host.h | 2 ++
arch/mips/kvm/Kconfig | 1 -
arch/powerpc/include/asm/kvm_host.h | 7 +++++++
arch/powerpc/kvm/Kconfig | 4 ----
arch/riscv/include/asm/kvm_host.h | 2 ++
arch/riscv/kvm/Kconfig | 1 -
arch/x86/include/asm/kvm_host.h | 2 ++
arch/x86/kvm/Kconfig | 1 -
include/linux/kvm_host.h | 2 +-
virt/kvm/Kconfig | 3 ---
virt/kvm/Makefile.kvm | 3 +--
virt/kvm/coalesced_mmio.c | 3 +++
virt/kvm/coalesced_mmio.h | 2 +-
virt/kvm/kvm_main.c | 8 ++++----
18 files changed, 27 insertions(+), 20 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e3a2ac3979ac..39630e235a36 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -33,6 +33,8 @@
#define KVM_HALT_POLL_NS_DEFAULT 500000
+#define HAVE_KVM_MMIO
+
#include <kvm/arm_vgic.h>
#include <kvm/arm_arch_timer.h>
#include <kvm/arm_pmu.h>
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 7d1f22fd490b..4761b5e7e75b 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -22,7 +22,6 @@ menuconfig KVM
select KVM_COMMON
select KVM_GENERIC_HARDWARE_ENABLING
select HAVE_KVM_CPU_RELAX_INTERCEPT
- select KVM_MMIO
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select VIRT_XFER_TO_GUEST_WORK
select KVM_VFIO
diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
index 19eb5e5c3984..392e14302453 100644
--- a/arch/loongarch/include/asm/kvm_host.h
+++ b/arch/loongarch/include/asm/kvm_host.h
@@ -26,6 +26,8 @@
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
+#define HAVE_KVM_MMIO
+
/* Loongarch KVM register ids */
#define KVM_GET_IOC_CSR_IDX(id) ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT)
#define KVM_GET_IOC_CPUCFG_IDX(id) ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT)
diff --git a/arch/loongarch/kvm/Kconfig b/arch/loongarch/kvm/Kconfig
index 8e5213609975..a244f2aea7ff 100644
--- a/arch/loongarch/kvm/Kconfig
+++ b/arch/loongarch/kvm/Kconfig
@@ -28,7 +28,6 @@ config KVM
select KVM_COMMON
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_GENERIC_HARDWARE_ENABLING
- select KVM_MMIO
select VIRT_XFER_TO_GUEST_WORK
select SCHED_INFO
select GUEST_PERF_EVENTS if PERF_EVENTS
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index c14b10821817..e3cb7c8c9461 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -26,6 +26,8 @@
#include <kvm/iodev.h>
+#define HAVE_KVM_MMIO
+
/* MIPS KVM register ids */
#define MIPS_CP0_32(_R, _S) \
(KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index b1b9a1d67758..c65596fdf54f 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -22,7 +22,6 @@ config KVM
select EXPORT_UASM
select KVM_COMMON
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
- select KVM_MMIO
select KVM_GENERIC_HARDWARE_ENABLING
select HAVE_KVM_READONLY_MEM
help
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 2d139c807577..92d14438e31b 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -29,6 +29,13 @@
#define __KVM_HAVE_ARCH_VCPU_DEBUGFS
+#if defined(CONFIG_KVM_BOOK3S_32_HANDLER) || \
+ defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) || \
+ defined(CONFIG_KVM_E500V2) || \
+ defined(CONFIG_KVM_E500MC)
+#define HAVE_KVM_MMIO
+#endif
+
#define KVM_MAX_VCPUS NR_CPUS
#define KVM_MAX_VCORES NR_CPUS
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 9a0d1c1aca6c..d97a4a51ec55 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -29,7 +29,6 @@ config KVM_BOOK3S_HANDLER
config KVM_BOOK3S_32_HANDLER
bool
select KVM_BOOK3S_HANDLER
- select KVM_MMIO
config KVM_BOOK3S_64_HANDLER
bool
@@ -37,7 +36,6 @@ config KVM_BOOK3S_64_HANDLER
config KVM_BOOK3S_PR_POSSIBLE
bool
- select KVM_MMIO
config KVM_BOOK3S_HV_POSSIBLE
bool
@@ -200,7 +198,6 @@ config KVM_E500V2
depends on PPC_E500 && !PPC_E500MC
depends on !CONTEXT_TRACKING_USER
select KVM
- select KVM_MMIO
help
Support running unmodified E500 guest kernels in virtual machines on
E500v2 host processors.
@@ -215,7 +212,6 @@ config KVM_E500MC
depends on PPC_E500MC
depends on !CONTEXT_TRACKING_USER
select KVM
- select KVM_MMIO
select KVM_BOOKE_HV
help
Support running unmodified E500MC/E5500/E6500 guest kernels in
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index 24585304c02b..a405cd30c6fa 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -25,6 +25,8 @@
#include <asm/kvm_vcpu_timer.h>
#include <asm/kvm_vcpu_pmu.h>
+#define HAVE_KVM_MMIO
+
#define KVM_MAX_VCPUS 1024
#define KVM_HALT_POLL_NS_DEFAULT 500000
diff --git a/arch/riscv/kvm/Kconfig b/arch/riscv/kvm/Kconfig
index ec2cee0a39e0..3aea8d4939d2 100644
--- a/arch/riscv/kvm/Kconfig
+++ b/arch/riscv/kvm/Kconfig
@@ -28,7 +28,6 @@ config KVM
select KVM_COMMON
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_GENERIC_HARDWARE_ENABLING
- select KVM_MMIO
select VIRT_XFER_TO_GUEST_WORK
select SCHED_INFO
select GUEST_PERF_EVENTS if PERF_EVENTS
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6e4e3ef9b8c7..4cdaf60c9217 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -45,6 +45,8 @@
#define __KVM_HAVE_ARCH_VCPU_DEBUGFS
+#define HAVE_KVM_MMIO
+
/*
* CONFIG_KVM_MAX_NR_VCPUS is defined iff CONFIG_KVM!=n, provide a dummy max if
* KVM is disabled (arbitrarily use the default from CONFIG_KVM_MAX_NR_VCPUS).
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 801bf9e520db..da5d9fa8a11c 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -32,7 +32,6 @@ config KVM_X86
select VHOST_TASK
select KVM_ASYNC_PF
select USER_RETURN_NOTIFIER
- select KVM_MMIO
select SCHED_INFO
select PERF_EVENTS
select GUEST_PERF_EVENTS
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index d5d9757e40ca..a4abf16b5879 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -833,7 +833,7 @@ struct kvm {
struct list_head ioeventfds;
struct kvm_vm_stat stat;
struct kvm_arch arch;
-#ifdef CONFIG_KVM_MMIO
+#ifdef HAVE_KVM_MMIO
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
spinlock_t ring_lock;
struct list_head coalesced_zones;
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index 794976b88c6f..c4a983e41765 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -40,9 +40,6 @@ config NEED_KVM_DIRTY_RING_WITH_BITMAP
bool
depends on HAVE_KVM_DIRTY_RING
-config KVM_MMIO
- bool
-
config KVM_ASYNC_PF
bool
diff --git a/virt/kvm/Makefile.kvm b/virt/kvm/Makefile.kvm
index d047d4cf58c9..3c27c18b49a0 100644
--- a/virt/kvm/Makefile.kvm
+++ b/virt/kvm/Makefile.kvm
@@ -5,9 +5,8 @@
KVM ?= ../../../virt/kvm
-kvm-y := $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/binary_stats.o
+kvm-y := $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/binary_stats.o $(KVM)/coalesced_mmio.o
kvm-$(CONFIG_KVM_VFIO) += $(KVM)/vfio.o
-kvm-$(CONFIG_KVM_MMIO) += $(KVM)/coalesced_mmio.o
kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
kvm-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(KVM)/irqchip.o
kvm-$(CONFIG_HAVE_KVM_DIRTY_RING) += $(KVM)/dirty_ring.o
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 6b1d90161099..070eba3c6534 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -12,6 +12,8 @@
#include <kvm/iodev.h>
#include <linux/kvm_host.h>
+
+#ifdef HAVE_KVM_MMIO
#include <linux/slab.h>
#include <linux/kvm.h>
@@ -188,3 +190,4 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
*/
return 0;
}
+#endif /* ifdef HAVE_KVM_MMIO */
diff --git a/virt/kvm/coalesced_mmio.h b/virt/kvm/coalesced_mmio.h
index 36f84264ed25..a8430d41945b 100644
--- a/virt/kvm/coalesced_mmio.h
+++ b/virt/kvm/coalesced_mmio.h
@@ -11,7 +11,7 @@
*
*/
-#ifdef CONFIG_KVM_MMIO
+#ifdef HAVE_KVM_MMIO
#include <linux/list.h>
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d05e2c1e6fb0..baf7ea9d7a58 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -4040,7 +4040,7 @@ static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
page = virt_to_page(vcpu->arch.pio_data);
#endif
-#ifdef CONFIG_KVM_MMIO
+#ifdef HAVE_KVM_MMIO
else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
#endif
@@ -4866,7 +4866,7 @@ static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
case KVM_CAP_ENABLE_CAP_VM:
case KVM_CAP_HALT_POLL:
return 1;
-#ifdef CONFIG_KVM_MMIO
+#ifdef HAVE_KVM_MMIO
case KVM_CAP_COALESCED_MMIO:
return KVM_COALESCED_MMIO_PAGE_OFFSET;
case KVM_CAP_COALESCED_PIO:
@@ -5207,7 +5207,7 @@ static long kvm_vm_ioctl(struct file *filp,
break;
}
#endif
-#ifdef CONFIG_KVM_MMIO
+#ifdef HAVE_KVM_MMIO
case KVM_REGISTER_COALESCED_MMIO: {
struct kvm_coalesced_mmio_zone zone;
@@ -5529,7 +5529,7 @@ static long kvm_dev_ioctl(struct file *filp,
#ifdef CONFIG_X86
r += PAGE_SIZE; /* pio data page */
#endif
-#ifdef CONFIG_KVM_MMIO
+#ifdef HAVE_KVM_MMIO
r += PAGE_SIZE; /* coalesced mmio ring page */
#endif
break;
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v1 18/27] KVM: s390: Prepare kvm-s390 for a second kvm module
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (16 preceding siblings ...)
2026-04-02 4:21 ` [PATCH v1 17/27] KVM: Remove KVM_MMIO as config option Steffen Eiden
@ 2026-04-02 4:21 ` Steffen Eiden
2026-04-02 4:21 ` [PATCH v1 19/27] s390: Introduce Start Arm Execution instruction Steffen Eiden
` (9 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:21 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
The second KVM module will have a different Kconfig set.
When both modules are compiled the Kconfig sets get merged and
the native s390 KVM needs to implement functionality required by
the respective config options. Ensure that s390-KVM will still compile.
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
arch/s390/kvm/s390/s390.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/arch/s390/kvm/s390/s390.c b/arch/s390/kvm/s390/s390.c
index 497abe3a83f4..8a99d6e9f7e7 100644
--- a/arch/s390/kvm/s390/s390.c
+++ b/arch/s390/kvm/s390/s390.c
@@ -5665,6 +5665,14 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
return gmap_unmap_gfn_range(kvm->arch.gmap, range->slot, range->start, range->end);
}
+#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
+/* Make s390 compile if arm64-on-s390 is selected */
+int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+#endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */
+
static inline unsigned long nonhyp_mask(int i)
{
unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v1 19/27] s390: Introduce Start Arm Execution instruction
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (17 preceding siblings ...)
2026-04-02 4:21 ` [PATCH v1 18/27] KVM: s390: Prepare kvm-s390 for a second kvm module Steffen Eiden
@ 2026-04-02 4:21 ` Steffen Eiden
2026-04-02 4:21 ` [PATCH v1 20/27] KVM: s390: arm64: Introduce host definitions Steffen Eiden
` (8 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:21 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
The Start Arm Execution (SAE) instruction is the centerpiece for
executing arm64 (KVM) guests on s390. Its purpose is, similar to SIE, to
enable accelerated execution of arm64 virtual machines. SAE expects the
physical address of a control block as the only argument.
The host is responsible to save & restore
- GPRs 0-13
- access register 0-15
- breaking event register (BEAR)
- vector/floating point registers
between SAE executions to guarantee host consistency.
GPRs and BEAR are save and restores in the asm functions. The other
register are handled in within C code. Access registers are handled in a
later patch and SVEs will be handled when they are introduced in a
future series. Most arm64 registers are handled by a satellite block
called save_area. Some registers, frequently used by hypervisors, are
placed into the SAE control block itself.
Co-developed-by: Andreas Grapentin <gra@linux.ibm.com>
Signed-off-by: Andreas Grapentin <gra@linux.ibm.com>
Co-developed-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
arch/s390/include/asm/asm-prototypes.h | 1 +
arch/s390/include/asm/kvm_host_arm64_types.h | 128 +++++++++++++++++++
arch/s390/include/asm/sae.h | 39 ++++++
arch/s390/include/asm/stacktrace.h | 5 +
arch/s390/kernel/asm-offsets.c | 1 +
arch/s390/kernel/entry.S | 24 ++++
arch/s390/tools/opcodes.txt | 3 +
7 files changed, 201 insertions(+)
create mode 100644 arch/s390/include/asm/kvm_host_arm64_types.h
create mode 100644 arch/s390/include/asm/sae.h
diff --git a/arch/s390/include/asm/asm-prototypes.h b/arch/s390/include/asm/asm-prototypes.h
index 7bd1801cf241..2bf4b52f4d2d 100644
--- a/arch/s390/include/asm/asm-prototypes.h
+++ b/arch/s390/include/asm/asm-prototypes.h
@@ -6,6 +6,7 @@
#include <asm/bug.h>
#include <asm/fpu.h>
#include <asm/nospec-branch.h>
+#include <asm/sae.h>
#include <asm-generic/asm-prototypes.h>
__int128_t __ashlti3(__int128_t a, int b);
diff --git a/arch/s390/include/asm/kvm_host_arm64_types.h b/arch/s390/include/asm/kvm_host_arm64_types.h
new file mode 100644
index 000000000000..9cbc9a88f515
--- /dev/null
+++ b/arch/s390/include/asm/kvm_host_arm64_types.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ASM_KVM_HOST_ARM64_TYPES_H
+#define ASM_KVM_HOST_ARM64_TYPES_H
+
+#include <linux/types.h>
+#include <linux/kvm_types.h>
+#include <linux/compiler_attributes.h>
+#include <asm/page.h>
+#include <asm/fault.h>
+
+struct kvm_sae_block {
+ u64 _0000[16]; /* 0x0000 */
+#define SAE_ICPTR_SPURIOUS 0x00
+#define SAE_ICPTR_VALIDITY 0x01
+#define SAE_ICPTR_HOST_ACCESS_EXCEPTION 0x02
+#define SAE_ICPTR_SYNCHRONOUS_EXCEPTION 0x03
+#define SAE_ICPTR_TIMER 0x04
+#define SAE_ICPTR_PE_INTERCOMM 0x05
+#define SAE_ICPTR_GUEST_ADDRESS_SIZE 0x06
+#define SAE_ICPTR_STOP 0x07
+#define SAE_ICPTR_SYSTEM_REGISTER 0x08
+#define SAE_ICPTR_PMU 0x09
+#define SAE_ICPTR_MAINTENANCE 0x0a
+ u8 icptr; /* 0x0080 */
+ u8 _0081[7]; /* 0x0081 */
+ u64 scad; /* 0x0088 */
+ u64 _0090[16]; /* 0x00b0 */
+ u32 cntp_ctl; /* 0x0110 */
+ u32 cntv_ctl; /* 0x0114 */
+ u8 irq_ctl; /* 0x0118 */
+ u8 _0119[7]; /* 0x0119 */
+ struct {
+ u64 ich_hcr_el2; /* 0x0120 */
+ u64 ich_vmcr_el2; /* 0x0128 */
+ u64 ich_ap0r0_el2; /* 0x0130 */
+ u64 ich_ap1r0_el2; /* 0x0138 */
+ u64 _0140[2]; /* 0x0140 */
+ u64 ich_lrn_el2[4]; /* 0x0150 */
+ u64 _0170[4]; /* 0x0170 */
+ } ic_regs;
+ u64 _0190[13]; /* 0x0190 */
+ u32 wip; /* 0x01f8 */
+ u32 _01fc; /* 0x01fc */
+#define SAE_SD_FORMAT_0 0x00
+ u8 sdf; /* 0x0200 */
+ u8 _0201[7]; /* 0x0201 */
+ u64 mso; /* 0x0208 */
+ u64 msl; /* 0x0210 */
+ u64 hbasce; /* 0x0218 */
+ u64 _0220; /* 0x0220 */
+ u64 gpto; /* 0x0228 */
+ u64 ic; /* 0x0230 */
+ u64 ec; /* 0x0238 */
+ u64 save_area; /* 0x0240 */
+ u64 _0248[7]; /* 0x0248 */
+ u8 _0280[6]; /* 0x0280 */
+ u16 lrcpua; /* 0x0286 */
+ u64 pstate; /* 0x0288 */
+ u64 pc; /* 0x0290 */
+ u64 sp_el0; /* 0x0298 */
+ u64 sp_el1; /* 0x02a0 */
+ u64 _02a8; /* 0x02a8 */
+ u64 fpcr; /* 0x02b0 */
+ u64 fpsr; /* 0x02b8 */
+ u16 sve_pregs[16]; /* 0x02c0 */
+ u16 sve_ffr; /* 0x02e0 */
+ u8 _02e2[6]; /* 0x02e2 */
+ u64 _02e8[3]; /* 0x02e8 */
+
+ u64 gpr[31]; /* 0x0300 */
+ u64 _03f8; /* 0x03f8 */
+
+ union {
+ u64 icptd[8]; /* 0x0400 */
+ /* validity-interception reason; icptr 0x01 */
+#define SAE_VIR_UNKNOWN 0x00
+#define SAE_VIR_UNSUPP_FORMAT 0x01
+#define SAE_VIR_MSO_BOUNDS 0x02
+#define SAE_VIR_MSLA 0x03
+#define SAE_VIR_MGPAS 0x04
+#define SAE_VIR_INVAL_SYSREG 0x05
+#define SAE_VIR_HOST_CONTROL 0x06
+#define SAE_VIR_SCA 0x07
+#define SAE_VIR_MSO_ALIGN 0x08
+#define SAE_VIR_HLC 0x09
+#define SEA_VIR_IRPTC 0x0a
+ u16 vir; /* 0x0400 */
+ /* host access interception details; icptr 0x02 */
+ struct {
+ u64 esr_elz; /* 0x0400 */
+ u8 _0408[6]; /* 0x0408 */
+ u16 pic; /* 0x040e */
+ union teid teid; /* 0x0410 */
+ gva_t far_elz; /* 0x0418 */
+ gva_t vaddr; /* 0x0420 */
+ u64 suppl; /* 0x0428 */
+ u8 gltl; /* 0x0430 */
+ u8 _0431[7]; /* 0x0431 */
+ u64 _0438; /* 0x0438 */
+ } hai;
+ /* exception-interception details; icptr 0x03 */
+ struct {
+ gva_t esr_elz; /* 0x0400 */
+ u64 _0408[2]; /* 0x0408 */
+ u64 far_elz; /* 0x0418 */
+ } trap;
+ /* timer-interception reason; icptr 0x04 */
+#define SAE_IR_TIMER_ID_VIRT BIT(6)
+#define SAE_IR_TIMER_ID_PHYS BIT(7)
+ u8 tir; /* 0x0400 */
+ };
+ u64 _0440[376]; /* 0x0440 */
+} __packed __aligned(PAGE_SIZE);
+static_assert(sizeof(struct kvm_sae_block) == PAGE_SIZE);
+
+struct kvm_sae_save_area {
+#define SAE_SAVE_AREA_FORMAT_0 0x00
+ u8 saf; /* 0x0000 */
+ u8 _0001[5]; /* 0x0001 */
+#define SAE_SAS_VALID BIT_ULL(0)
+ u16 sas; /* 0x0006 */
+ u64 sdo; /* 0x0008 */
+ u64 _0010[2]; /* 0x0010 */
+ u64 regs[507]; /* 0x0020 */
+} __packed __aligned(PAGE_SIZE);
+static_assert(sizeof(struct kvm_sae_save_area) == PAGE_SIZE);
+
+#endif /* ASM_KVM_HOST_ARM64_TYPES_H */
diff --git a/arch/s390/include/asm/sae.h b/arch/s390/include/asm/sae.h
new file mode 100644
index 000000000000..d7be5ebb25d5
--- /dev/null
+++ b/arch/s390/include/asm/sae.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_S390_SAE_H
+#define __ASM_S390_SAE_H
+
+/* defined in arch/s390/kernel/entry.S */
+int __sae64a(phys_addr_t sae_block_phys);
+
+/**
+ * __sae64a() - Start Arm Execution
+ */
+static inline void sae64a(struct kvm_sae_block *sae_block)
+{
+ __sae64a(virt_to_phys(sae_block));
+}
+
+/**
+ * stiasrm() - STore and Invalidate Arm System Register Multiple
+ */
+static __always_inline void stiasrm(struct kvm_sae_save_area *save_area)
+{
+ asm volatile(".insn rre,0xb9a70000,%[r1],0\n"
+ : "=m"(*save_area)
+ : [r1] "d"(save_area));
+}
+
+/**
+ * lasrm() - Load Arm System Register Multiple
+ *
+ */
+static __always_inline void lasrm(struct kvm_sae_save_area *save_area)
+{
+ asm volatile(".insn rre,0xb9a60000,%[r1],0\n"
+ :
+ : "m" (*save_area),
+ [r1] "d" (save_area)
+ );
+}
+
+#endif /* __ASM_S390_SAE_H */
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index ac3606c3babe..2d332d7c8145 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -59,6 +59,7 @@ static inline bool on_stack(struct stack_info *info,
struct stack_frame {
union {
unsigned long empty[9];
+ /* SIE stack frame */
struct {
unsigned long sie_control_block;
unsigned long sie_savearea;
@@ -68,6 +69,10 @@ struct stack_frame {
unsigned long sie_guest_asce;
unsigned long sie_irq;
};
+ /* SAE stack frame */
+ struct {
+ unsigned long sae_bear;
+ };
};
unsigned long gprs[10];
unsigned long back_chain;
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 8619adf91cdb..8ef992734bf8 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -68,6 +68,7 @@ int main(void)
OFFSET(__SF_SIE_CONTROL_PHYS, stack_frame, sie_control_block_phys);
OFFSET(__SF_SIE_GUEST_ASCE, stack_frame, sie_guest_asce);
OFFSET(__SF_SIE_IRQ, stack_frame, sie_irq);
+ OFFSET(__SF_SAE_BEAR, stack_frame, sae_bear);
DEFINE(STACK_FRAME_OVERHEAD, sizeof(struct stack_frame));
BLANK();
OFFSET(__SFUSER_BACKCHAIN, stack_frame_user, back_chain);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index ac8d75a209fa..fc5f1dd77e6c 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -244,6 +244,30 @@ EXPORT_SYMBOL(__sie64a)
EXPORT_SYMBOL(sie_exit)
#endif
+#if IS_ENABLED(CONFIG_KVM_ARM64)
+/*
+ * __sae64a calling convention:
+ * %r2 pointer to sae control block physical address
+ */
+SYM_FUNC_START(__sae64a)
+ stmg %r6,%r14,__SF_GPRS(%r15) # store kernel registers
+ STBEAR __SF_SAE_BEAR(%r15) # save breaking event address register
+ .insn rre,0xb9a50000,%r2,0 # Start Arm Execution
+# Let the next instruction be NOP to avoid triggering a machine check
+# and handling it in a guest as result of the instruction execution.
+ nopr 7
+ LBEAR __SF_SAE_BEAR(%r15) # restore breaking event address register
+ xgr %r0,%r0 # clear guest registers to
+ xgr %r1,%r1 # prevent speculative use
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
+ lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
+ BR_EX %r14
+SYM_FUNC_END(__sae64a)
+EXPORT_SYMBOL(__sae64a)
+#endif
+
/*
* SVC interrupt handler routine. System calls are synchronous events and
* are entered with interrupts disabled.
diff --git a/arch/s390/tools/opcodes.txt b/arch/s390/tools/opcodes.txt
index def2659f6602..0e4773c94af0 100644
--- a/arch/s390/tools/opcodes.txt
+++ b/arch/s390/tools/opcodes.txt
@@ -594,6 +594,9 @@ b9a0 clp RRF_U0RR
b9a1 tpei RRE_RR
b9a2 ptf RRE_R0
b9a4 uvc RRF_URR
+b9a5 sae RRE_R0
+b9a6 lasrm RRE_R0
+b9a7 stiasrm RRE_R0
b9aa lptea RRF_RURR2
b9ab essa RRF_U0RR
b9ac irbm RRE_RR
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v1 20/27] KVM: s390: arm64: Introduce host definitions
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (18 preceding siblings ...)
2026-04-02 4:21 ` [PATCH v1 19/27] s390: Introduce Start Arm Execution instruction Steffen Eiden
@ 2026-04-02 4:21 ` Steffen Eiden
2026-04-02 4:21 ` [PATCH v1 21/27] s390/hwcaps: Report SAE support as hwcap Steffen Eiden
` (7 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:21 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
Add all basic definitions the arm on s390 KVM host requires.
Including, but not limited to:
- struct kvm*arch definitions
- various functions (to be implemented in the following patches)
- various defines required to run arm64 guests
Co-developed-by: Andreas Grapentin <gra@linux.ibm.com>
Signed-off-by: Andreas Grapentin <gra@linux.ibm.com>
Co-developed-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
arch/s390/include/asm/kvm.h | 6 +
arch/s390/include/asm/kvm_host.h | 9 ++
arch/s390/include/asm/kvm_host_arm64.h | 199 +++++++++++++++++++++++++
3 files changed, 214 insertions(+)
create mode 100644 arch/s390/include/asm/kvm.h
create mode 100644 arch/s390/include/asm/kvm_host_arm64.h
diff --git a/arch/s390/include/asm/kvm.h b/arch/s390/include/asm/kvm.h
new file mode 100644
index 000000000000..d9e727d2378c
--- /dev/null
+++ b/arch/s390/include/asm/kvm.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifdef KVM_S390_ARM64
+#include <uapi/arch/arm64/asm/kvm.h>
+#else
+#include <uapi/asm/kvm.h>
+#endif
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 1c20168a3ef5..0330d3e503d8 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -3,7 +3,16 @@
#ifndef ASM_KVM_HOST_H
#define ASM_KVM_HOST_H
+#ifdef KVM_S390_ARM64
+#include <asm/kvm_host_arm64.h>
+#else
#include <asm/kvm_host_s390.h>
+#endif
+
+static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
#define PGM_PROTECTION 0x04
#define PGM_ADDRESSING 0x05
diff --git a/arch/s390/include/asm/kvm_host_arm64.h b/arch/s390/include/asm/kvm_host_arm64.h
new file mode 100644
index 000000000000..5a694c835dbb
--- /dev/null
+++ b/arch/s390/include/asm/kvm_host_arm64.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ASM_KVM_HOST_ARM64_H
+#define ASM_KVM_HOST_ARM64_H
+
+#include <linux/bug.h>
+
+#include <asm/kvm_host_arm64_types.h>
+#include "asm/debug.h"
+
+#include <kvm/arm_vgic.h>
+
+#define vcpu_gp_regs(v) ((v)->arch.sae_block.gpr)
+
+#define HAVE_KVM_MMIO
+
+#include <kvm/arm64/kvm_host.h>
+#include <asm/sae.h>
+
+#define KVM_HAVE_MMU_RWLOCK
+#define KVM_MAX_VCPUS 1
+
+#define KVM_VCPU_VALID_FEATURES 0
+
+#define KVM_HALT_POLL_NS_DEFAULT 50000
+
+#define __ctxt_sys_reg(ctx, reg) NULL
+struct kvm_cpu_context {
+ /*
+ * These are just for 32 bit, which we don't have, making them RES0.
+ * They are exposed to user space.
+ * Arm KVM seemingly does not enforce RES0.
+ */
+ u64 spsr_abt;
+ u64 spsr_und;
+ u64 spsr_irq;
+ u64 spsr_fiq;
+
+ __vector128 __aligned(16) vregs[32];
+};
+
+struct kvm_vcpu_arch {
+ struct kvm_sae_block sae_block;
+ struct kvm_sae_save_area save_area;
+ struct kvm_cpu_context ctxt;
+
+ u32 host_acrs[NUM_ACRS];
+
+ /* Hypervisor Configuration Register */
+ u64 hcr_elz;
+
+ /* Configuration flags, set once and for all before the vcpu can run */
+ u8 cflags;
+
+ /* Input flags to the hypervisor code, potentially cleared after use */
+ u8 iflags;
+
+ /* State flags for kernel bookkeeping, unused by the hypervisor code */
+ u8 sflags;
+
+ /*
+ * Don't run the guest (internal implementation need).
+ *
+ * Contrary to the flags above, this is set/cleared outside of
+ * a vcpu context, and thus cannot be mixed with the flags
+ * themselves (or the flag accesses need to be made atomic).
+ */
+ bool pause;
+
+ /* vcpu power state */
+ struct kvm_mp_state mp_state;
+ /* lock for mp_state & reset_state.reset */
+ spinlock_t mp_state_lock;
+
+ /* vcpu reset state */
+ struct vcpu_reset_state reset_state;
+
+ /* GMAP */
+ struct gmap *gmap;
+ struct kvm_s390_mmu_cache *mc;
+
+ void *debugfs_state_data;
+};
+
+struct kvm_vcpu_stat {
+ struct kvm_vcpu_stat_generic generic;
+ /* ARM64 stats */
+ u64 hvc_exit_stat;
+ u64 wfe_exit_stat;
+ u64 wfi_exit_stat;
+ u64 mmio_exit_user;
+ u64 mmio_exit_kernel;
+ u64 signal_exits;
+ u64 exits;
+ /* GMAP stats */
+ u64 pfault_sync;
+};
+
+#define kvm_vcpu_get_sp_el1(__vcpu) (&((__vcpu)->arch.sae_block.sp_el1))
+#define kvm_vcpu_get_vreg(__vcpu, _off) (&(__vcpu)->arch.ctxt.vregs[_off])
+#define kvm_vcpu_get_vregs(__vcpu) (&(__vcpu)->arch.ctxt.vregs)
+#define kvm_vcpu_get_fpsr(__vcpu) (&(__vcpu)->arch.sae_block.fpsr)
+#define kvm_vcpu_get_fpcr(__vcpu) (&(__vcpu)->arch.sae_block.fpcr)
+
+#define __vcpu_flags_preempt_disable() preempt_disable()
+#define __vcpu_flags_preempt_enable() preempt_enable()
+
+#define _vcpu_get_flag(v, flagset, ...) \
+ __vcpu_get_flag(&(v)->arch.flagset, __VA_ARGS__)
+#define _vcpu_set_flag(v, flagset, ...) \
+ __vcpu_set_flag(&(v)->arch.flagset, __VA_ARGS__)
+#define _vcpu_clear_flag(v, flagset, ...) \
+ __vcpu_clear_flag(&(v)->arch.flagset, __VA_ARGS__)
+#define _vcpu_test_and_clear_flag(v, flagset, ...) \
+ __vcpu_test_and_clear_flag(&(v)->arch.flagset, __VA_ARGS__)
+
+#define kvm_has_mte(_kvm) false
+#define vcpu_has_sve(vcpu) false
+
+struct kvm_arch_memory_slot {
+};
+
+struct kvm_arch {
+ struct gmap *gmap;
+ u64 guest_phys_size;
+
+ /* VM-wide vCPU feature set */
+ unsigned long flags;
+
+ /* Protects VM-scoped configuration data */
+ struct mutex config_lock;
+
+ debug_info_t *dbf;
+
+ DECLARE_BITMAP(vcpu_features, KVM_VCPU_MAX_FEATURES);
+
+ unsigned long mem_limit;
+};
+
+struct kvm_vm_stat {
+ struct kvm_vm_stat_generic generic;
+};
+
+#define kvm_vm_is_protected(_kvm) false
+
+#define KVM_HVA_ERR_BAD -1UL
+#define KVM_HVA_ERR_RO_BAD -2UL
+
+#define kvm_phys_size(__kvm) ((__kvm)->arch.guest_phys_size)
+static inline bool kvm_is_error_hva(unsigned long addr)
+{
+ return IS_ERR_VALUE(addr);
+}
+
+u32 get_kvm_ipa_limit(void);
+
+static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+
+int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
+
+/* arm64 guests do not use async-pf. Defined because Kbuild requires it as s390 kvm turns it on. */
+#define ASYNC_PF_PER_VCPU 0
+struct kvm_arch_async_pf {
+ unsigned long pfault_token;
+};
+
+#define __unsupp_async_call(fn) WARN_ONCE(true, "async not supported on kvm-arm64 %s", fn)
+
+static inline bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
+{
+ __unsupp_async_call(__func__);
+ return false;
+};
+
+static inline void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
+ struct kvm_async_pf *work)
+{
+ __unsupp_async_call(__func__);
+};
+
+static inline bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+ struct kvm_async_pf *work)
+{
+ __unsupp_async_call(__func__);
+ return false;
+};
+
+static inline void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
+ struct kvm_async_pf *work)
+{
+ __unsupp_async_call(__func__);
+};
+
+static inline void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu)
+{
+ __unsupp_async_call(__func__);
+};
+
+#endif /* ASM_KVM_HOST_ARM64_H */
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v1 21/27] s390/hwcaps: Report SAE support as hwcap
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (19 preceding siblings ...)
2026-04-02 4:21 ` [PATCH v1 20/27] KVM: s390: arm64: Introduce host definitions Steffen Eiden
@ 2026-04-02 4:21 ` Steffen Eiden
2026-04-02 4:21 ` [PATCH v1 22/27] KVM: s390: Add basic arm64 kvm module Steffen Eiden
` (6 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:21 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
From: Hendrik Brueckner <brueckner@linux.ibm.com>
Report SAE support as hwcap (and /proc/cpuinfo)
Signed-off-by: Hendrik Brueckner <brueckner@linux.ibm.com>
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
arch/s390/include/asm/elf.h | 2 ++
arch/s390/include/asm/sclp.h | 5 ++++-
arch/s390/kernel/processor.c | 3 +++
drivers/s390/char/sclp_early.c | 1 +
4 files changed, 10 insertions(+), 1 deletion(-)
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index bb63fa4d20bb..ad3108ecfb07 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -123,6 +123,7 @@ enum {
HWCAP_NR_NNPA = 20,
HWCAP_NR_PCI_MIO = 21,
HWCAP_NR_SIE = 22,
+ HWCAP_NR_SAE = 23,
HWCAP_NR_MAX
};
@@ -150,6 +151,7 @@ enum {
#define HWCAP_NNPA BIT(HWCAP_NR_NNPA)
#define HWCAP_PCI_MIO BIT(HWCAP_NR_PCI_MIO)
#define HWCAP_SIE BIT(HWCAP_NR_SIE)
+#define HWCAP_SAE BIT(HWCAP_NR_SAE)
/*
* These are used to set parameters in the core dumps.
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 0f184dbdbe5e..18e46654227a 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -52,7 +52,9 @@ struct sclp_core_entry {
u8 siif : 1;
u8 sigpif : 1;
u8 : 3;
- u8 reserved2[3];
+ u8 aef: 1;
+ u8 : 7;
+ u8 reserved2[2];
u8 : 2;
u8 ib : 1;
u8 cei : 1;
@@ -104,6 +106,7 @@ struct sclp_info {
unsigned char has_aisii : 1;
unsigned char has_aeni : 1;
unsigned char has_aisi : 1;
+ unsigned char has_aef : 1;
unsigned int ibc;
unsigned int mtid;
unsigned int mtid_cp;
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index e33a3eccda56..6da55a158027 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -150,6 +150,7 @@ static void show_cpu_summary(struct seq_file *m, void *v)
[HWCAP_NR_NNPA] = "nnpa",
[HWCAP_NR_PCI_MIO] = "pcimio",
[HWCAP_NR_SIE] = "sie",
+ [HWCAP_NR_SAE] = "sae",
};
int i, cpu;
@@ -254,6 +255,8 @@ static int __init setup_hwcaps(void)
/* virtualization support */
if (sclp.has_sief2)
elf_hwcap |= HWCAP_SIE;
+ if (sclp.has_aef)
+ elf_hwcap |= HWCAP_SAE;
return 0;
}
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 6bf501ad8ff0..26a76f09b19a 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -94,6 +94,7 @@ static void __init sclp_early_facilities_detect(void)
sclp.has_ib = cpue->ib;
sclp.has_cei = cpue->cei;
sclp.has_skey = cpue->skey;
+ sclp.has_aef = cpue->aef;
break;
}
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v1 22/27] KVM: s390: Add basic arm64 kvm module
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (20 preceding siblings ...)
2026-04-02 4:21 ` [PATCH v1 21/27] s390/hwcaps: Report SAE support as hwcap Steffen Eiden
@ 2026-04-02 4:21 ` Steffen Eiden
2026-04-02 4:21 ` [PATCH v1 23/27] KVM: s390: arm64: Implement required functions Steffen Eiden
` (5 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:21 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
Add basic code for the new arm64 on s390 KVM implementation.
Add kernel module boilerplate code and trivial functions.
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
arch/s390/kvm/arm64/arm.c | 182 ++++++++++++++++++++++++++++++++++++
arch/s390/kvm/arm64/arm.h | 7 ++
arch/s390/kvm/arm64/guest.c | 95 +++++++++++++++++++
arch/s390/kvm/arm64/guest.h | 10 ++
4 files changed, 294 insertions(+)
create mode 100644 arch/s390/kvm/arm64/arm.c
create mode 100644 arch/s390/kvm/arm64/arm.h
create mode 100644 arch/s390/kvm/arm64/guest.c
create mode 100644 arch/s390/kvm/arm64/guest.h
diff --git a/arch/s390/kvm/arm64/arm.c b/arch/s390/kvm/arm64/arm.c
new file mode 100644
index 000000000000..8f94eb8fe288
--- /dev/null
+++ b/arch/s390/kvm/arm64/arm.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define KMSG_COMPONENT "kvm-s390-arm64"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/miscdevice.h>
+#include <linux/kvm.h>
+#include <linux/kvm_types.h>
+#include <linux/kvm_host.h>
+
+#include "arm.h"
+
+int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+{
+ int ret;
+
+ switch (ext) {
+ case KVM_CAP_NR_VCPUS:
+ case KVM_CAP_MAX_VCPUS:
+ case KVM_CAP_MAX_VCPU_ID:
+ ret = KVM_MAX_VCPUS;
+ break;
+ case KVM_CAP_ARM_VM_IPA_SIZE:
+ ret = get_kvm_ipa_limit();
+ break;
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static u64 kvm_max_guest_address(void)
+{
+ u64 max_addr;
+
+ if (sclp.hamax == U64_MAX)
+ max_addr = TASK_SIZE_MAX;
+ else
+ max_addr = min_t(u64, TASK_SIZE_MAX, sclp.hamax);
+ return ALIGN_DOWN(max_addr + 1, 1 << 30) - 1;
+}
+
+vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
+{
+ return VM_FAULT_SIGBUS;
+}
+
+long kvm_arch_dev_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ return -EINVAL;
+}
+
+u32 get_kvm_ipa_limit(void)
+{
+ return fls64(kvm_max_guest_address() + 1) - 1;
+}
+
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
+{
+ return 0;
+}
+
+void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+{
+}
+
+void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
+{
+}
+
+void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
+{
+}
+
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ *mp_state = READ_ONCE(vcpu->arch.mp_state);
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
+{
+ return 0;
+}
+
+unsigned long system_supported_vcpu_features(void)
+{
+ return KVM_VCPU_VALID_FEATURES;
+}
+
+int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
+ bool line_status)
+{
+ return 0;
+}
+
+void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset,
+ unsigned long mask)
+{
+}
+
+bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
+{
+ return false;
+}
+
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
+{
+}
+
+void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
+{
+}
+
+int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id,
+ int level, bool line_status)
+{
+ return -EINVAL;
+}
+
+int kvm_set_routing_entry(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *e,
+ const struct kvm_irq_routing_entry *ue)
+{
+ return -EINVAL;
+}
+
+void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot)
+{
+}
+
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
+{
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+#ifdef CONFIG_HAVE_KVM_NO_POLL
+__weak bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+#endif
+
+long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, unsigned int ioctl,
+ unsigned long arg)
+{
+ return -ENOIOCTLCMD;
+}
+
+static __init int kvm_s390_arm64_init(void)
+{
+ if (!sclp.has_aef)
+ return -ENXIO;
+
+ return kvm_init_with_dev(sizeof(struct kvm_vcpu), 0, THIS_MODULE,
+ KVM_DEV_NAME, MISC_DYNAMIC_MINOR);
+}
+
+static __exit void kvm_s390_arm64_exit(void)
+{
+ kvm_exit();
+}
+
+module_init(kvm_s390_arm64_init);
+module_exit(kvm_s390_arm64_exit);
diff --git a/arch/s390/kvm/arm64/arm.h b/arch/s390/kvm/arm64/arm.h
new file mode 100644
index 000000000000..a3db254462c0
--- /dev/null
+++ b/arch/s390/kvm/arm64/arm.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ARCH_S390_KVM_ARM64_H
+#define ARCH_S390_KVM_ARM64_H
+
+#define KVM_DEV_NAME "kvm-arm64"
+
+#endif /* ARCH_S390_KVM_ARM64_H */
diff --git a/arch/s390/kvm/arm64/guest.c b/arch/s390/kvm/arm64/guest.c
new file mode 100644
index 000000000000..00886755accf
--- /dev/null
+++ b/arch/s390/kvm/arm64/guest.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kvm_host.h>
+#include <linux/kvm.h>
+
+#include "guest.h"
+
+const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ KVM_GENERIC_VM_STATS()
+};
+
+const struct kvm_stats_header kvm_vm_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vm_stats_desc),
+};
+
+const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ KVM_GENERIC_VCPU_STATS(),
+ /* ARM64 stats */
+ STATS_DESC_COUNTER(VCPU, hvc_exit_stat),
+ STATS_DESC_COUNTER(VCPU, wfe_exit_stat),
+ STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
+ STATS_DESC_COUNTER(VCPU, mmio_exit_user),
+ STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
+ STATS_DESC_COUNTER(VCPU, signal_exits),
+ STATS_DESC_COUNTER(VCPU, exits),
+ /* GMAP stats */
+ STATS_DESC_COUNTER(VCPU, pfault_sync),
+};
+
+const struct kvm_stats_header kvm_vcpu_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vcpu_stats_desc),
+};
+
+int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+ return copy_core_reg_indices(vcpu, uindices);
+}
+
+unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
+{
+ return num_core_regs(vcpu);
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+ struct kvm_translation *tr)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug *dbg)
+{
+ return -EINVAL;
+}
diff --git a/arch/s390/kvm/arm64/guest.h b/arch/s390/kvm/arm64/guest.h
new file mode 100644
index 000000000000..db635d513c2c
--- /dev/null
+++ b/arch/s390/kvm/arm64/guest.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef KVM_ARM_GUEST_H
+#define KVM_ARM_GUEST_H
+
+#include <linux/kvm_host.h>
+#include <kvm/arm64/guest.h>
+
+unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
+
+#endif /* KVM_ARM_GUEST_H */
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v1 23/27] KVM: s390: arm64: Implement required functions
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (21 preceding siblings ...)
2026-04-02 4:21 ` [PATCH v1 22/27] KVM: s390: Add basic arm64 kvm module Steffen Eiden
@ 2026-04-02 4:21 ` Steffen Eiden
2026-04-02 4:21 ` [PATCH v1 24/27] KVM: s390: arm64: Implement vm/vcpu create destroy Steffen Eiden
` (4 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:21 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
Implement the mostly trivial functions that the shared arm64 (kvm)
headers oblige s390 to implement.
Implement a very basic smccc handler that (non-compliantly) is just able
to stop a vcpu.
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
arch/s390/include/asm/kvm_emulate.h | 135 ++++++++++++++++++++++++++++
arch/s390/include/asm/kvm_mmu.h | 12 +++
arch/s390/include/asm/kvm_nested.h | 13 +++
arch/s390/kvm/arm64/handle_exit.c | 50 +++++++++++
arch/s390/kvm/arm64/inject_fault.c | 15 ++++
5 files changed, 225 insertions(+)
create mode 100644 arch/s390/include/asm/kvm_emulate.h
create mode 100644 arch/s390/include/asm/kvm_mmu.h
create mode 100644 arch/s390/include/asm/kvm_nested.h
create mode 100644 arch/s390/kvm/arm64/handle_exit.c
create mode 100644 arch/s390/kvm/arm64/inject_fault.c
diff --git a/arch/s390/include/asm/kvm_emulate.h b/arch/s390/include/asm/kvm_emulate.h
new file mode 100644
index 000000000000..bf019005e137
--- /dev/null
+++ b/arch/s390/include/asm/kvm_emulate.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Emulation functionality for arm64 guests.
+ */
+
+#ifndef __S390_ARM64_KVM_EMULATE_H__
+#define __S390_ARM64_KVM_EMULATE_H__
+
+#include <asm/fault.h>
+#include <asm/ptrace.h>
+#include <linux/kvm_host.h>
+
+#include <kvm/arm64/kvm_arm.h>
+#include <kvm/arm64/kvm_emulate.h>
+
+static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
+{
+ return (unsigned long *)&vcpu->arch.sae_block.pc;
+}
+
+static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
+{
+ return (unsigned long *)&vcpu->arch.sae_block.pstate;
+}
+
+static __always_inline unsigned long *vcpu_sp_el0(const struct kvm_vcpu *vcpu)
+{
+ return (unsigned long *)&vcpu->arch.sae_block.sp_el0;
+}
+
+static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
+static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.sae_block.hai.esr_elz;
+}
+
+static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.sae_block.hai.far_elz;
+}
+
+static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.sae_block.hai.teid.addr * PAGE_SIZE;
+}
+
+static inline u16 kvm_vcpu_fault_pic(const struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.sae_block.hai.pic & PGM_INT_CODE_MASK;
+}
+
+/* Should be unreachable, arm64 on s390 does not claim KVM_CAP_ARM_NISV_TO_USER*/
+static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR);
+}
+
+static __always_inline
+bool kvm_vcpu_trap_is_permission_fault(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_fault_pic(vcpu) == PGM_PROTECTION;
+}
+
+static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
+{
+ return true;
+}
+
+static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
+static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.hcr_elz = HCR_E2H | HCR_RW | HCR_PTW;
+ /* traps */
+ vcpu->arch.hcr_elz |= HCR_TSC | HCR_TID1 | HCR_TID2 | HCR_TID3 |
+ HCR_TID4 | HCR_TID5 | HCR_TIDCP;
+}
+
+static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
+{
+ WARN(true, "not implemented, just feat RAS");
+
+ return 0L;
+}
+
+static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
+{
+ WARN(true, "not implemented, just feat RAS");
+}
+
+static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
+{
+ return (unsigned long *)&vcpu->arch.hcr_elz;
+}
+
+static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
+static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
+static inline int kvm_vcpu_abt_gltl(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.sae_block.hai.gltl;
+}
+
+static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
+static inline bool is_nested_ctxt(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
+static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
+{
+ u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
+
+ return mode != PSR_MODE_EL0t;
+}
+
+#endif /* __S390_ARM64_KVM_EMULATE_H__ */
diff --git a/arch/s390/include/asm/kvm_mmu.h b/arch/s390/include/asm/kvm_mmu.h
new file mode 100644
index 000000000000..ac354fd5bac9
--- /dev/null
+++ b/arch/s390/include/asm/kvm_mmu.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KVM MMU for arm64 guests.
+ */
+#ifndef __S390_ARM64_KVM_MMU_H__
+#define __S390_ARM64_KVM_MMU_H__
+
+#include <linux/kvm_host.h>
+
+#include <kvm/arm64/kvm_mmu.h>
+
+#endif /* __S390_ARM64_KVM_MMU_H__ */
diff --git a/arch/s390/include/asm/kvm_nested.h b/arch/s390/include/asm/kvm_nested.h
new file mode 100644
index 000000000000..7158932e718b
--- /dev/null
+++ b/arch/s390/include/asm/kvm_nested.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Nested KVM for arm64 guests. (Not supported by s390)
+ */
+#ifndef ASM_KVM_NESTED_H
+#define ASM_KVM_NESTED_H
+
+static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
+#endif /* ASM_KVM_NESTED_H */
diff --git a/arch/s390/kvm/arm64/handle_exit.c b/arch/s390/kvm/arm64/handle_exit.c
new file mode 100644
index 000000000000..89933a604876
--- /dev/null
+++ b/arch/s390/kvm/arm64/handle_exit.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kvm_host.h>
+
+#include <asm/esr.h>
+#include <asm/kvm_emulate.h>
+
+#include <kvm/arm64/handle_exit.h>
+
+#define PSCI_0_2_FN_SYSTEM_OFF 0x84000008
+#define PSCI_RET_NOT_SUPPORTED -1
+#define PSCI_RET_INTERNAL_FAILURE -6
+/*
+ * Temporary smc/hvc handler. Non-compliant implementation (features missing).
+ * Implements only system off so that test programs are able to end their execution
+ */
+static int kvm_smccc_call_handler(struct kvm_vcpu *vcpu)
+{
+ u32 func_id = vcpu_get_reg(vcpu, 0);
+ u64 val = PSCI_RET_NOT_SUPPORTED;
+ int ret = 1;
+
+ if (func_id == PSCI_0_2_FN_SYSTEM_OFF) {
+ spin_lock(&vcpu->arch.mp_state_lock);
+ WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
+ spin_unlock(&vcpu->arch.mp_state_lock);
+ kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
+ memset(&vcpu->run->system_event, 0,
+ sizeof(vcpu->run->system_event));
+ vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SHUTDOWN;
+ vcpu->run->system_event.ndata = 1;
+ vcpu->run->system_event.data[0] = 0;
+ vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
+ val = PSCI_RET_INTERNAL_FAILURE;
+ ret = 0;
+ }
+ vcpu_set_reg(vcpu, 0, val);
+
+ return ret;
+}
+
+static int handle_hvc(struct kvm_vcpu *vcpu)
+{
+ vcpu->stat.hvc_exit_stat++;
+ return kvm_smccc_call_handler(vcpu);
+}
+
+exit_handle_fn arm_exit_handlers[] = {
+ [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
+ [ESR_ELx_EC_HVC64] = handle_hvc,
+};
diff --git a/arch/s390/kvm/arm64/inject_fault.c b/arch/s390/kvm/arm64/inject_fault.c
new file mode 100644
index 000000000000..d4058c3f226e
--- /dev/null
+++ b/arch/s390/kvm/arm64/inject_fault.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <asm/kvm_emulate.h>
+
+/**
+ * kvm_inject_undefined - inject an undefined instruction into the guest
+ * @vcpu: The vCPU in which to inject the exception
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ */
+void kvm_inject_undefined(struct kvm_vcpu *vcpu)
+{
+ /* Stub until s390 supports arm64 sysregs TODO sysregs*/
+}
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v1 24/27] KVM: s390: arm64: Implement vm/vcpu create destroy.
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (22 preceding siblings ...)
2026-04-02 4:21 ` [PATCH v1 23/27] KVM: s390: arm64: Implement required functions Steffen Eiden
@ 2026-04-02 4:21 ` Steffen Eiden
2026-04-02 4:21 ` [PATCH v1 25/27] KVM: s390: arm64: Implement vCPU IOCTLs Steffen Eiden
` (3 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:21 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
Implement init and destroy IOCTLS for vcpu and vm.
Implement arch vm IOCTL. Use s390 gmap.
Co-developed-by: Janosch Frank <frankja@linux.ibm.com>
Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
Co-developed-by: Andreas Grapentin <gra@linux.ibm.com>
Signed-off-by: Andreas Grapentin <gra@linux.ibm.com>
Co-developed-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
arch/s390/kvm/arm64/arm.c | 160 ++++++++++++++++++++++++++++++++++++++
arch/s390/kvm/arm64/arm.h | 54 +++++++++++++
2 files changed, 214 insertions(+)
diff --git a/arch/s390/kvm/arm64/arm.c b/arch/s390/kvm/arm64/arm.c
index 8f94eb8fe288..962d23f4e469 100644
--- a/arch/s390/kvm/arm64/arm.c
+++ b/arch/s390/kvm/arm64/arm.c
@@ -9,6 +9,8 @@
#include <linux/kvm_host.h>
#include "arm.h"
+#include "reset.h"
+#include "gmap.h"
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
{
@@ -41,6 +43,61 @@ static u64 kvm_max_guest_address(void)
return ALIGN_DOWN(max_addr + 1, 1 << 30) - 1;
}
+static int kvm_gmap_init(struct kvm *kvm)
+{
+ struct crst_table *table;
+
+ kvm->arch.gmap = gmap_new(kvm, kvm->arch.guest_phys_size);
+
+ if (!kvm->arch.gmap)
+ return -ENOMEM;
+
+ /* arm64 (on s390) do not have pfault */
+ clear_bit(GMAP_FLAG_PFAULT_ENABLED, &kvm->arch.gmap->flags);
+ set_bit(GMAP_FLAG_ALLOW_HPAGE_1M, &kvm->arch.gmap->flags);
+
+ table = dereference_asce(kvm->arch.gmap->asce);
+ crst_table_init((void *)table, _CRSTE_HOLE(table->crstes[0].h.tt).val);
+
+ return 0;
+}
+
+int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+{
+ char debug_name[32];
+ int ret;
+
+ if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
+ return -EINVAL;
+
+ ret = kvm_vm_type_ipa_size_shift(type);
+ if (ret < 0)
+ return ret;
+ kvm->arch.guest_phys_size = 1UL << ret;
+
+ mutex_init(&kvm->arch.config_lock);
+ bitmap_zero(kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES);
+
+ snprintf(debug_name, sizeof(debug_name), "kvm-arm64-%u", current->pid);
+ kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
+ if (!kvm->arch.dbf)
+ return -ENOMEM;
+ debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
+
+ ret = kvm_gmap_init(kvm);
+ if (ret)
+ goto out_err;
+ kvm->arch.mem_limit = kvm_max_guest_address();
+
+ VM_EVENT(kvm, 3, "vm created with type %lu", type);
+ return 0;
+
+out_err:
+ debug_unregister(kvm->arch.dbf);
+
+ return ret;
+}
+
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
@@ -52,6 +109,13 @@ long kvm_arch_dev_ioctl(struct file *filp,
return -EINVAL;
}
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+ kvm_destroy_vcpus(kvm);
+ debug_unregister(kvm->arch.dbf);
+ kvm->arch.gmap = gmap_put(kvm->arch.gmap);
+}
+
u32 get_kvm_ipa_limit(void)
{
return fls64(kvm_max_guest_address() + 1) - 1;
@@ -62,10 +126,39 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
return 0;
}
+int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
+{
+ struct kvm_sae_block *sae_block = &vcpu->arch.sae_block;
+
+ spin_lock_init(&vcpu->arch.mp_state_lock);
+
+ /* Force users to call KVM_ARM_VCPU_INIT */
+ vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
+
+ vcpu->arch.mc = kvm_s390_new_mmu_cache();
+ if (!vcpu->arch.mc)
+ return -ENOMEM;
+
+ sae_block->hbasce = vcpu->kvm->arch.gmap->asce.val;
+ sae_block->mso = 0L;
+ sae_block->msl = kvm_max_guest_address();
+
+ VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%p, sae block at 0x%p, satellite at 0x%p",
+ vcpu->vcpu_id, vcpu, &vcpu->arch.sae_block, &vcpu->arch.save_area);
+ return 0;
+}
+
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
}
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+ kvm_s390_free_mmu_cache(vcpu->arch.mc);
+
+ VCPU_EVENT(vcpu, 3, "%s", "free cpu");
+}
+
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
}
@@ -103,6 +196,52 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
return 0;
}
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log)
+{
+ return gmap_get_dirty_log(kvm, log);
+}
+
+bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+{
+ scoped_guard(read_lock, &kvm->mmu_lock)
+ return gmap_age_gfn(kvm->arch.gmap, range->start, range->end);
+}
+
+void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
+{
+ gfn_t last_gfn = memslot->base_gfn + memslot->npages;
+
+ scoped_guard(read_lock, &kvm->mmu_lock)
+ gmap_sync_dirty_log(kvm->arch.gmap, memslot->base_gfn, last_gfn);
+}
+
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
+{
+ return gmap_prepare_memory_region(kvm, old, new, change);
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
+{
+ gmap_commit_memory_region(kvm, old, new, change);
+}
+
+bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
+{
+ return gmap_unmap_gfn_range(kvm->arch.gmap, range->slot, range->start, range->end);
+}
+
+bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+{
+ return gmap_test_age_gfn(kvm, range);
+}
+
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset,
@@ -110,6 +249,27 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
{
}
+int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+
+ switch (ioctl) {
+ case KVM_ARM_PREFERRED_TARGET: {
+ struct kvm_vcpu_init init = {
+ .target = KVM_ARM_TARGET_GENERIC_V8,
+ };
+
+ if (copy_to_user(argp, &init, sizeof(init)))
+ return -EFAULT;
+
+ return 0;
+ }
+
+ default:
+ return -EINVAL;
+ }
+}
+
bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
{
return false;
diff --git a/arch/s390/kvm/arm64/arm.h b/arch/s390/kvm/arm64/arm.h
index a3db254462c0..9571e6e1851a 100644
--- a/arch/s390/kvm/arm64/arm.h
+++ b/arch/s390/kvm/arm64/arm.h
@@ -4,4 +4,58 @@
#define KVM_DEV_NAME "kvm-arm64"
+#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
+do { \
+ debug_sprintf_event((d_kvm)->arch.dbf, d_loglevel, KVM_DEV_NAME ": " d_string "\n", d_args); \
+} while (0)
+
+#define VCPU_EVENT(d_vcpu, d_loglevel, d_string, d_args...) \
+ do { \
+ debug_sprintf_event( \
+ (d_vcpu)->kvm->arch.dbf, d_loglevel, \
+ "KVM_DEV_NAME %02d[%016llx-%016llx]: " d_string "\n", \
+ (d_vcpu)->vcpu_id, (d_vcpu)->arch.sae_block.pstate, \
+ (d_vcpu)->arch.sae_block.pc, d_args); \
+ } while (0)
+
+static __always_inline bool kvm_s390_is_in_sie(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
+static __always_inline int kvm_is_ucontrol(struct kvm *kvm)
+{
+ return 0;
+}
+
+static __always_inline int __kvm_s390_pv_destroy_page(struct page *page)
+{
+ return 0;
+}
+
+static __always_inline void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, gpa_t start, gpa_t end)
+{
+}
+
+static __always_inline int kvm_s390_pv_get_handle(struct kvm *kvm)
+{
+ return 0;
+}
+
+static __always_inline int kvm_s390_is_migration_mode(struct kvm *kvm)
+{
+ return false;
+}
+
+static __always_inline bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
+/* should never be called */
+static __always_inline int kvm_s390_vm_stop_migration(struct kvm *kvm)
+{
+ return -EINVAL;
+}
+
#endif /* ARCH_S390_KVM_ARM64_H */
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v1 25/27] KVM: s390: arm64: Implement vCPU IOCTLs
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (23 preceding siblings ...)
2026-04-02 4:21 ` [PATCH v1 24/27] KVM: s390: arm64: Implement vm/vcpu create destroy Steffen Eiden
@ 2026-04-02 4:21 ` Steffen Eiden
2026-04-02 4:21 ` [PATCH v1 26/27] KVM: s390: arm64: Implement basic page fault handler Steffen Eiden
` (2 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:21 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
Implement all vCPU IOCTLs.
Co-developed-by: Andreas Grapentin <gra@linux.ibm.com>
Signed-off-by: Andreas Grapentin <gra@linux.ibm.com>
Co-developed-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
arch/s390/kvm/arm64/arm.c | 361 ++++++++++++++++++++++++++++++++++++
arch/s390/kvm/arm64/guest.c | 71 ++++++-
arch/s390/kvm/arm64/guest.h | 5 +
arch/s390/kvm/arm64/reset.c | 42 +++++
arch/s390/kvm/arm64/reset.h | 11 ++
5 files changed, 488 insertions(+), 2 deletions(-)
create mode 100644 arch/s390/kvm/arm64/reset.c
create mode 100644 arch/s390/kvm/arm64/reset.h
diff --git a/arch/s390/kvm/arm64/arm.c b/arch/s390/kvm/arm64/arm.c
index 962d23f4e469..71562a0c438c 100644
--- a/arch/s390/kvm/arm64/arm.c
+++ b/arch/s390/kvm/arm64/arm.c
@@ -8,7 +8,15 @@
#include <linux/kvm_types.h>
#include <linux/kvm_host.h>
+#include <asm/access-regs.h>
+#include <asm/kvm_emulate.h>
+#include <asm/sae.h>
+
+#include <kvm/arm64/handle_exit.h>
+#include "kvm/arm64/kvm_emulate.h"
+
#include "arm.h"
+#include "guest.h"
#include "reset.h"
#include "gmap.h"
@@ -167,6 +175,22 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
}
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ save_access_regs(&vcpu->arch.host_acrs[0]);
+ vcpu->cpu = cpu;
+
+ lasrm(&vcpu->arch.save_area);
+}
+
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ stiasrm(&vcpu->arch.save_area);
+
+ vcpu->cpu = -1;
+ restore_access_regs(&vcpu->arch.host_acrs[0]);
+}
+
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
@@ -190,12 +214,349 @@ unsigned long system_supported_vcpu_features(void)
return KVM_VCPU_VALID_FEATURES;
}
+bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+{
+ return vcpu_mode_priv(vcpu);
+}
+
+int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
+{
+ if (!kvm_vcpu_initialized(vcpu))
+ return -ENOEXEC;
+
+ if (!kvm_arm_vcpu_is_finalized(vcpu))
+ return -EPERM;
+
+ if (likely(READ_ONCE(vcpu->pid)))
+ return 0;
+
+ return 0;
+}
+
+/**
+ * check_vcpu_requests - check and handle pending vCPU requests
+ * @vcpu: the VCPU pointer
+ *
+ * Return: 1 if we should enter the guest
+ * 0 if we should exit to userspace
+ * < 0 if we should exit to userspace, where the return value indicates
+ * an error
+ */
+static int check_vcpu_requests(struct kvm_vcpu *vcpu)
+{
+ if (kvm_request_pending(vcpu)) {
+ if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
+ kvm_reset_vcpu(vcpu);
+ /*
+ * Clear IRQ_PENDING requests that were made to guarantee
+ * that a VCPU sees new virtual interrupts.
+ */
+ kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
+ }
+
+ return 1;
+}
+
+static int kvm_vcpu_initialize(struct kvm_vcpu *vcpu,
+ const struct kvm_vcpu_init *init)
+{
+ unsigned long features = init->features[0];
+ struct kvm *kvm = vcpu->kvm;
+ int ret = -EINVAL;
+
+ mutex_lock(&kvm->arch.config_lock);
+
+ if (test_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags) &&
+ kvm_vcpu_init_changed(vcpu, init))
+ goto out_unlock;
+
+ bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES);
+
+ kvm_reset_vcpu(vcpu);
+
+ set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags);
+ vcpu_set_flag(vcpu, VCPU_INITIALIZED);
+
+ if (kvm_vcpu_init_changed(vcpu, init))
+ goto out_unlock;
+
+ ret = 0;
+out_unlock:
+ mutex_unlock(&kvm->arch.config_lock);
+ return ret;
+}
+
+static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
+ const struct kvm_vcpu_init *init)
+{
+ int ret;
+
+ if (init->target != KVM_ARM_TARGET_GENERIC_V8)
+ return -EINVAL;
+
+ ret = kvm_vcpu_init_check_features(vcpu, init);
+ if (ret)
+ return ret;
+
+ if (!kvm_vcpu_initialized(vcpu))
+ return kvm_vcpu_initialize(vcpu, init);
+
+ kvm_reset_vcpu(vcpu);
+
+ return 0;
+}
+
+static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_init *init)
+{
+ struct kvm_sae_save_area *save_area = &vcpu->arch.save_area;
+ struct kvm_sae_block *sae_block = &vcpu->arch.sae_block;
+ bool power_off = false;
+ int ret;
+
+ sae_block->save_area = virt_to_phys(save_area);
+ save_area->sdo = virt_to_phys(sae_block);
+
+ if (init->features[0] & BIT(KVM_ARM_VCPU_POWER_OFF)) {
+ init->features[0] &= ~BIT(KVM_ARM_VCPU_POWER_OFF);
+ power_off = true;
+ }
+
+ vcpu_load(vcpu);
+
+ ret = kvm_vcpu_set_target(vcpu, init);
+ if (ret)
+ goto out_put;
+
+ vcpu_reset_hcr(vcpu);
+
+ spin_lock(&vcpu->arch.mp_state_lock);
+ WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
+ spin_unlock(&vcpu->arch.mp_state_lock);
+
+ ret = 0;
+out_put:
+ vcpu_put(vcpu);
+ return ret;
+}
+
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
bool line_status)
{
return 0;
}
+static void adjust_pc(struct kvm_vcpu *vcpu)
+{
+ if (vcpu_get_flag(vcpu, INCREMENT_PC))
+ kvm_skip_instr(vcpu);
+}
+
+static void arm_vcpu_run(struct kvm_vcpu *vcpu)
+{
+ struct kvm_sae_block *sae_block = &vcpu->arch.sae_block;
+
+ adjust_pc(vcpu);
+
+ local_irq_disable();
+ guest_enter_irqoff();
+ local_irq_enable();
+
+ sae_block->icptr = 0;
+
+ sae64a(sae_block);
+
+ local_irq_disable();
+ guest_exit_irqoff();
+ local_irq_enable();
+}
+
+/** kvm_arch_vcpu_ioctl_run() - run arm64 vCPU
+ *
+ * Execute arm64 guest instructions using SAE.
+ *
+ * Returns:
+ * 1 enter the guest (should not be observed by userspace)
+ * 0 exit to userspace
+ * < 0 exit to userspace, where the return value indicates n error
+ *
+ *
+ */
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *kvm_run = vcpu->run;
+ u8 icptr;
+ int ret;
+
+ if (kvm_run->exit_reason == KVM_EXIT_MMIO) {
+ ret = kvm_handle_mmio_return(vcpu);
+ if (ret <= 0)
+ return ret;
+ }
+
+ vcpu_load(vcpu);
+
+ if (!vcpu->wants_to_run) {
+ ret = -EINTR;
+ goto out;
+ }
+
+ kvm_sigset_activate(vcpu);
+
+ might_fault();
+
+ ret = 1;
+ do {
+ if (signal_pending(current)) {
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ ret = -EINTR;
+ continue;
+ }
+
+ if (need_resched())
+ schedule();
+
+ if (ret > 0)
+ ret = check_vcpu_requests(vcpu);
+
+ if (kvm_request_pending(vcpu))
+ continue;
+
+ vcpu->arch.sae_block.icptr = 0;
+
+ arm_vcpu_run(vcpu);
+
+ icptr = vcpu->arch.sae_block.icptr;
+ switch (icptr) {
+ case SAE_ICPTR_SPURIOUS:
+ break;
+ case SAE_ICPTR_VALIDITY:
+ WARN_ONCE(true, "SAE: validity intercept. vir: 0x%04x",
+ vcpu->arch.sae_block.vir);
+ ret = -EINVAL;
+ break;
+ case SAE_ICPTR_SYNCHRONOUS_EXCEPTION:
+ ret = handle_trap_exceptions(vcpu);
+ break;
+ default:
+ WARN_ONCE(true, "SAE: unknown interception reason 0x%02x", icptr);
+ ret = -EINVAL;
+ }
+ } while (ret > 0);
+
+ kvm_sigset_deactivate(vcpu);
+out:
+ if (unlikely(vcpu_get_flag(vcpu, INCREMENT_PC)))
+ adjust_pc(vcpu);
+
+ vcpu_put(vcpu);
+
+ return ret;
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ struct kvm_vcpu *vcpu = filp->private_data;
+ void __user *argp = (void __user *)arg;
+ struct kvm_device_attr attr;
+ int ret;
+
+ switch (ioctl) {
+ case KVM_ARM_VCPU_INIT: {
+ struct kvm_vcpu_init init;
+
+ ret = -EFAULT;
+ if (copy_from_user(&init, argp, sizeof(init)))
+ break;
+
+ ret = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
+ break;
+ }
+ case KVM_SET_ONE_REG:
+ case KVM_GET_ONE_REG: {
+ struct kvm_one_reg reg;
+
+ ret = -ENOEXEC;
+ if (unlikely(!kvm_vcpu_initialized(vcpu)))
+ break;
+
+ ret = -EFAULT;
+ if (copy_from_user(®, argp, sizeof(reg)))
+ break;
+
+ if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
+ kvm_reset_vcpu(vcpu);
+
+ if (ioctl == KVM_SET_ONE_REG)
+ ret = kvm_arm_set_reg(vcpu, ®);
+ else
+ ret = kvm_arm_get_reg(vcpu, ®);
+ break;
+ }
+ case KVM_GET_REG_LIST: {
+ struct kvm_reg_list __user *user_list = argp;
+ struct kvm_reg_list reg_list;
+ unsigned int n;
+
+ ret = -ENOEXEC;
+ if (unlikely(!kvm_vcpu_initialized(vcpu)))
+ break;
+ ret = -EPERM;
+ if (!kvm_arm_vcpu_is_finalized(vcpu))
+ break;
+ ret = -EFAULT;
+ if (copy_from_user(®_list, user_list, sizeof(reg_list)))
+ break;
+ n = reg_list.n;
+ reg_list.n = kvm_arm_num_regs(vcpu);
+ if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
+ break;
+ ret = -E2BIG;
+ if (n < reg_list.n)
+ break;
+ ret = kvm_arm_copy_reg_indices(vcpu, user_list->reg);
+ break;
+ }
+ case KVM_ARM_VCPU_FINALIZE: {
+ int what;
+
+ if (!kvm_vcpu_initialized(vcpu))
+ return -ENOEXEC;
+
+ if (get_user(what, (const int __user *)argp))
+ return -EFAULT;
+
+ ret = kvm_arm_vcpu_finalize(vcpu, what);
+ break;
+ }
+ case KVM_SET_DEVICE_ATTR: {
+ ret = -EFAULT;
+ if (copy_from_user(&attr, argp, sizeof(attr)))
+ break;
+ ret = kvm_arm_vcpu_set_attr(vcpu, &attr);
+ break;
+ }
+ case KVM_GET_DEVICE_ATTR: {
+ ret = -EFAULT;
+ if (copy_from_user(&attr, argp, sizeof(attr)))
+ break;
+ ret = kvm_arm_vcpu_get_attr(vcpu, &attr);
+ break;
+ }
+ case KVM_HAS_DEVICE_ATTR: {
+ ret = -EFAULT;
+ if (copy_from_user(&attr, argp, sizeof(attr)))
+ break;
+ ret = kvm_arm_vcpu_has_attr(vcpu, &attr);
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log)
{
diff --git a/arch/s390/kvm/arm64/guest.c b/arch/s390/kvm/arm64/guest.c
index 00886755accf..893d48037292 100644
--- a/arch/s390/kvm/arm64/guest.c
+++ b/arch/s390/kvm/arm64/guest.c
@@ -4,7 +4,7 @@
#include "guest.h"
-const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+const struct kvm_stats_desc kvm_vm_stats_desc[] = {
KVM_GENERIC_VM_STATS()
};
@@ -17,7 +17,7 @@ const struct kvm_stats_header kvm_vm_stats_header = {
sizeof(kvm_vm_stats_desc),
};
-const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+const struct kvm_stats_desc kvm_vcpu_stats_desc[] = {
KVM_GENERIC_VCPU_STATS(),
/* ARM64 stats */
STATS_DESC_COUNTER(VCPU, hvc_exit_stat),
@@ -50,6 +50,73 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
return num_core_regs(vcpu);
}
+int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ /* We currently use nothing arch-specific in upper 32 bits */
+ if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
+ return -EINVAL;
+
+ switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
+ case KVM_REG_ARM_CORE:
+ return get_core_reg(vcpu, reg);
+ default:
+ return -EINVAL;
+ }
+}
+
+int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ /* We currently use nothing arch-specific in upper 32 bits */
+ if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
+ return -EINVAL;
+
+ switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
+ case KVM_REG_ARM_CORE:
+ return set_core_reg(vcpu, reg);
+ default:
+ return -EINVAL;
+ }
+}
+
+int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+{
+ int ret;
+
+ switch (attr->group) {
+ default:
+ ret = -ENXIO;
+ break;
+ }
+
+ return ret;
+}
+
+int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+{
+ int ret;
+
+ switch (attr->group) {
+ default:
+ ret = -ENXIO;
+ break;
+ }
+
+ return ret;
+}
+
+int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+{
+ int ret;
+
+ switch (attr->group) {
+ default:
+ ret = -ENXIO;
+ break;
+ }
+
+ return ret;
+}
+
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
return -EINVAL;
diff --git a/arch/s390/kvm/arm64/guest.h b/arch/s390/kvm/arm64/guest.h
index db635d513c2c..847489fb81be 100644
--- a/arch/s390/kvm/arm64/guest.h
+++ b/arch/s390/kvm/arm64/guest.h
@@ -6,5 +6,10 @@
#include <kvm/arm64/guest.h>
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
+int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
+int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
+int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
#endif /* KVM_ARM_GUEST_H */
diff --git a/arch/s390/kvm/arm64/reset.c b/arch/s390/kvm/arm64/reset.c
new file mode 100644
index 000000000000..432c844ee858
--- /dev/null
+++ b/arch/s390/kvm/arm64/reset.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include <kvm/arm64/reset.h>
+
+#include "reset.h"
+
+bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
+{
+ return true;
+}
+
+void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_reset_state reset_state;
+
+ spin_lock(&vcpu->arch.mp_state_lock);
+ reset_state = vcpu->arch.reset_state;
+ vcpu->arch.reset_state.reset = false;
+ spin_unlock(&vcpu->arch.mp_state_lock);
+
+ /*
+ * disable preemption around the vcpu reset as we might otherwise race with
+ * preempt notifiers which call stiasrm/lasrm from put/load
+ */
+ preempt_disable();
+
+ kvm_reset_vcpu_core_regs(vcpu);
+
+ if (reset_state.reset) {
+ *vcpu_pc(vcpu) = reset_state.pc;
+ vcpu_set_reg(vcpu, 0, reset_state.r0);
+ }
+
+ preempt_enable();
+}
+
+int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
+{
+ return 0;
+}
diff --git a/arch/s390/kvm/arm64/reset.h b/arch/s390/kvm/arm64/reset.h
new file mode 100644
index 000000000000..a5c5304e47bc
--- /dev/null
+++ b/arch/s390/kvm/arm64/reset.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef KVM_ARM_RESET_H
+#define KVM_ARM_RESET_H
+
+#include <linux/kvm_host.h>
+
+bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
+void kvm_reset_vcpu(struct kvm_vcpu *vcpu);
+int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
+
+#endif /* KVM_ARM_RESET_H */
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v1 26/27] KVM: s390: arm64: Implement basic page fault handler
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (24 preceding siblings ...)
2026-04-02 4:21 ` [PATCH v1 25/27] KVM: s390: arm64: Implement vCPU IOCTLs Steffen Eiden
@ 2026-04-02 4:21 ` Steffen Eiden
2026-04-02 4:21 ` [PATCH v1 27/27] KVM: s390: arm64: Enable KVM_ARM64 config and Kbuild Steffen Eiden
2026-04-02 8:53 ` [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM David Hildenbrand (Arm)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:21 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
Add host functionality to page in guest memory. If the guest does
something unexpected or illegal exit to userspace which very likely has
to stop guest execution. This behaviour will be changed to guest error
injects once all sysregs are accessible for the host.
Co-developed-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
arch/s390/kvm/arm64/arm.c | 1 +
arch/s390/kvm/arm64/handle_exit.c | 2 +
arch/s390/kvm/arm64/mmu.c | 153 ++++++++++++++++++++++++++++++
3 files changed, 156 insertions(+)
create mode 100644 arch/s390/kvm/arm64/mmu.c
diff --git a/arch/s390/kvm/arm64/arm.c b/arch/s390/kvm/arm64/arm.c
index 71562a0c438c..5bd6914b484d 100644
--- a/arch/s390/kvm/arm64/arm.c
+++ b/arch/s390/kvm/arm64/arm.c
@@ -435,6 +435,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
vcpu->arch.sae_block.vir);
ret = -EINVAL;
break;
+ case SAE_ICPTR_HOST_ACCESS_EXCEPTION:
case SAE_ICPTR_SYNCHRONOUS_EXCEPTION:
ret = handle_trap_exceptions(vcpu);
break;
diff --git a/arch/s390/kvm/arm64/handle_exit.c b/arch/s390/kvm/arm64/handle_exit.c
index 89933a604876..debe8aa12c7c 100644
--- a/arch/s390/kvm/arm64/handle_exit.c
+++ b/arch/s390/kvm/arm64/handle_exit.c
@@ -46,5 +46,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu)
exit_handle_fn arm_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
+ [ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
+ [ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
[ESR_ELx_EC_HVC64] = handle_hvc,
};
diff --git a/arch/s390/kvm/arm64/mmu.c b/arch/s390/kvm/arm64/mmu.c
new file mode 100644
index 000000000000..6499d82a5d5c
--- /dev/null
+++ b/arch/s390/kvm/arm64/mmu.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_mmu.h>
+
+#include "faultin.h"
+
+static inline bool kvm_s390_cur_gmap_fault_is_write(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.sae_block.hai.pic == PGM_PROTECTION ||
+ vcpu->arch.sae_block.hai.teid.fsi == TEID_FSI_STORE;
+}
+
+/*
+ * user_mem_abort() - handle a dat fault for the gmap of a vcpu
+ *
+ * Return: 0 on success, < 0 in case of error.
+ * Context: The mm lock must not be held before calling. May sleep.
+ */
+static int user_mem_abort(struct kvm_vcpu *vcpu, gpa_t fault_ipa,
+ struct kvm_memory_slot *slot, hva_t hva)
+{
+ struct guest_fault f = { };
+ int ret;
+
+ if (kvm_s390_cur_gmap_fault_is_write(vcpu))
+ f.write_attempt = FOLL_WRITE;
+ f.gfn = gpa_to_gfn(fault_ipa);
+
+ ret = kvm_s390_faultin_gfn(vcpu, NULL, &f);
+ if (ret <= 0)
+ return ret;
+ if (ret == PGM_ADDRESSING)
+ /*
+ * Without the relevant sysregs we cannot do anything for now.
+ * Go back to userspace with an error. TODO sysreg handling
+ */
+ return -ENOEXEC;
+ KVM_BUG_ON(ret, vcpu->kvm);
+ return -EINVAL;
+}
+
+int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
+{
+ struct kvm_memory_slot *memslot;
+ bool translation = false;
+ phys_addr_t fault_ipa;
+ unsigned long esr;
+ unsigned long hva;
+ bool write_fault;
+ bool writable;
+ bool is_iabt;
+ int ret;
+ gfn_t gfn;
+ int idx;
+
+ esr = kvm_vcpu_get_esr(vcpu);
+ fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
+ is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
+
+ switch (kvm_vcpu_fault_pic(vcpu)) {
+ /* expected cases: */
+ case PGM_ASCE_TYPE:
+ case PGM_REGION_FIRST_TRANS:
+ case PGM_REGION_SECOND_TRANS:
+ case PGM_REGION_THIRD_TRANS:
+ case PGM_SEGMENT_TRANSLATION:
+ case PGM_PAGE_TRANSLATION:
+ translation = true;
+ break;
+ case PGM_PROTECTION:
+ break;
+ /* unexpected cases: */
+ case 0:
+ KVM_BUG(1, vcpu->kvm, "On MMU fault path but no fault occurred");
+ return -EFAULT;
+ default:
+ KVM_BUG(1, vcpu->kvm, "Unexpected program interrupt 0x%x, TEID 0x%016lx",
+ vcpu->arch.sae_block.hai.pic, vcpu->arch.sae_block.hai.teid.val);
+ send_sig(SIGSEGV, current, 0);
+ return -EFAULT;
+ }
+
+ if (translation) {
+ /*
+ * For both cases:
+ * Without the relevant sysregs we cannot do anything for now.
+ * Go back to userspace with an error. TODO sysreg handling
+ */
+ if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit()))
+ return -ENOEXEC;
+
+ if (fault_ipa >= kvm_phys_size(vcpu->kvm))
+ return -ENOEXEC;
+ }
+
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+
+ gfn = fault_ipa >> PAGE_SHIFT;
+
+ memslot = gfn_to_memslot(vcpu->kvm, gfn);
+ hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
+ write_fault = kvm_is_write_fault(vcpu);
+ if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
+ ret = -ENOEXEC;
+ /*
+ * The guest has put either its instructions or its page-tables
+ * somewhere it shouldn't have. Userspace won't be able to do
+ * anything about this (there's no syndrome for a start).
+ *
+ * Without the relevant sysregs we cannot do anything for now.
+ * Go back to userspace with an error. TODO sysreg handling
+ */
+ if (is_iabt)
+ goto out_unlock;
+
+ if (kvm_vcpu_abt_iss1tw(vcpu)) {
+ /*
+ * Without the relevant sysregs we cannot do anything for now.
+ * Go back to userspace with an error. TODO sysreg handling
+ */
+ goto out_unlock;
+ }
+
+ /*
+ * Check for a cache maintenance operation. Assume the guest is
+ * cautious and skip instruction
+ */
+ if (kvm_is_error_hva(hva) && kvm_vcpu_dabt_is_cm(vcpu)) {
+ kvm_incr_pc(vcpu);
+ ret = 1;
+ goto out_unlock;
+ }
+
+ /*
+ * The IPA is reported as [MAX:12], so we need to
+ * complement it with the bottom 12 bits from the
+ * faulting VA. This is always 12 bits, irrespective
+ * of the page size.
+ */
+ fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
+ ret = io_mem_abort(vcpu, fault_ipa);
+ goto out_unlock;
+ }
+
+ ret = user_mem_abort(vcpu, fault_ipa, memslot, hva);
+ if (!ret)
+ ret = 1;
+out_unlock:
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ return ret;
+}
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v1 27/27] KVM: s390: arm64: Enable KVM_ARM64 config and Kbuild
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (25 preceding siblings ...)
2026-04-02 4:21 ` [PATCH v1 26/27] KVM: s390: arm64: Implement basic page fault handler Steffen Eiden
@ 2026-04-02 4:21 ` Steffen Eiden
2026-04-02 8:53 ` [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM David Hildenbrand (Arm)
27 siblings, 0 replies; 33+ messages in thread
From: Steffen Eiden @ 2026-04-02 4:21 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, David Hildenbrand,
Gautam Gala, Hendrik Brueckner, Janosch Frank, Joey Gouly,
Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
Add all Kbuild/Makefile configurations to build a second KVM module on
s390 implementing the arm64-KVM API. To prevent symbol conflicts with
kvm-s390 all internal symbols in kvm-arm64 are mangled if compiled as
built-in. The new module ins named kvm-arm64.
As in this case the build does not go through the normal build process
the module parameter handling would be messed up. By forcing
KBUILD_MODNAME to kvm-arm64 all parameters are at the same location and
not at object/basename of the object file the parameter is introduced.
Co-developed-by: Andreas Grapentin <gra@linux.ibm.com>
Signed-off-by: Andreas Grapentin <gra@linux.ibm.com>
Co-developed-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Signed-off-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Co-developed-by: Gautam Gala <ggala@linux.ibm.com>
Signed-off-by: Gautam Gala <ggala@linux.ibm.com>
Signed-off-by: Steffen Eiden <seiden@linux.ibm.com>
---
arch/s390/configs/defconfig | 1 +
arch/s390/kvm/Kconfig | 1 +
arch/s390/kvm/Makefile | 1 +
arch/s390/kvm/arm64/Kconfig | 23 ++++++++
arch/s390/kvm/arm64/Makefile | 107 +++++++++++++++++++++++++++++++++++
arch/s390/tools/Makefile | 2 +
6 files changed, 135 insertions(+)
create mode 100644 arch/s390/kvm/arm64/Kconfig
create mode 100644 arch/s390/kvm/arm64/Makefile
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index bbbb4d0df9dd..e10e9e1ad94d 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -58,6 +58,7 @@ CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_S390_HYPFS_FS=y
CONFIG_KVM_S390=m
+CONFIG_KVM_ARM64=m
CONFIG_S390_UNWIND_SELFTEST=m
CONFIG_S390_KPROBES_SANITY_TEST=m
CONFIG_S390_MODULES_SANITY_TEST=m
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index f8d4a9a38dae..bbae58aa8bc4 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -20,5 +20,6 @@ config KVM
tristate
source "arch/s390/kvm/s390/Kconfig"
+source "arch/s390/kvm/arm64/Kconfig"
endif # VIRTUALIZATION
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index c43d7dffca13..38bdd7c9b42d 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -4,3 +4,4 @@
# Copyright IBM Corp. 2008
obj-$(CONFIG_KVM_S390) += s390/
+obj-$(CONFIG_KVM_ARM64) += arm64/
diff --git a/arch/s390/kvm/arm64/Kconfig b/arch/s390/kvm/arm64/Kconfig
new file mode 100644
index 000000000000..6794bb0436e9
--- /dev/null
+++ b/arch/s390/kvm/arm64/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+
+source "virt/kvm/Kconfig"
+
+config KVM_ARM64
+ def_tristate y
+ prompt "Kernel-based Virtual Machine (KVM) support for arm64 guests"
+ select KVM
+ select KVM_VFIO
+ select IRQ_BYPASS_MANAGER
+ select SCHED_INFO
+ select XARRAY_MULTI
+ select KVM_COMMON
+ select HAVE_KVM_CPU_RELAX_INTERCEPT
+ select HAVE_KVM_MSI
+ select HAVE_KVM_IRQCHIP
+ select HAVE_KVM_IRQ_ROUTING
+ select HAVE_KVM_VCPU_RUN_PID_CHANGE
+ select GUEST_PERF_EVENTS if PERF_EVENTS
+ help
+ Support hosting virtualized arm64 guest machines on s390 host machines.
+
+ If unsure, say N.
diff --git a/arch/s390/kvm/arm64/Makefile b/arch/s390/kvm/arm64/Makefile
new file mode 100644
index 000000000000..9fa3f209c320
--- /dev/null
+++ b/arch/s390/kvm/arm64/Makefile
@@ -0,0 +1,107 @@
+# SPDX-License-Identifier: GPL-2.0
+
+KVM := ../../../../virt/kvm
+include $(srctree)/virt/kvm/Makefile.kvm
+include $(srctree)/virt/kvm/arm64/Makefile.kvm
+include $(srctree)/arch/s390/kvm/gmap/Makefile
+
+ccflags-y += -I $(src) -I$(srctree)/arch/s390/kvm/gmap -DKVM_S390_ARM64
+
+kvm-arm64-obj := \
+ arm.o \
+ guest.o \
+ handle_exit.o \
+ inject_fault.o \
+ reset.o \
+ mmu.o \
+
+kvm-arm64-obj += $(patsubst %.o,%-arm64.o,$(shared-arm64-obj))
+kvm-arm64-obj += $(patsubst %.o,%-arm64.o,$(kvm-y))
+
+obj-$(CONFIG_KVM_ARM64) += kvm-arm64.o
+
+
+$(obj)/%-arm64.o: $(src)/%.c FORCE
+ @mkdir -p $(dir $@)
+ $(call if_changed_rule,cc_o_c)
+
+ifeq ($(CONFIG_KVM_ARM64),m)
+
+kvm-arm64-y = $(kvm-arm64-obj)
+
+else ifeq ($(CONFIG_KVM_ARM64),y)
+
+KVM_ARM64_GEN_DIR :=$(objtree)/arch/${SRCARCH}/include/generated/asm
+KVM_ARM64_MODNAME_H := $(KVM_ARM64_GEN_DIR)/kvm_arm64_modname.h
+ccflags-y += -include $(KVM_ARM64_MODNAME_H)
+
+targets += $(notdir $(KVM_ARM64_MODNAME_H))
+
+quiet_cmd_kvm_arm64_modname_h = GEN $@
+ cmd_kvm_arm64_modname_h = { \
+ echo '/* Automatically generated; do not edit. */'; \
+ echo '\#ifndef _KVM_ARM64_MODNAME_H'; \
+ echo '\#define _KVM_ARM64_MODNAME_H'; \
+ echo '\#undef KBUILD_MODNAME'; \
+ echo '\#define KBUILD_MODNAME "kvm_arm64"'; \
+ echo '\#endif /* _KVM_ARM64_MODNAME_H */'; \
+ } > $@
+
+$(addprefix $(obj)/,$(kvm-arm64-obj)): $(KVM_ARM64_MODNAME_H)
+
+$(KVM_ARM64_MODNAME_H): FORCE
+ @mkdir -p $(KVM_ARM64_GEN_DIR)
+ $(call cmd,kvm_arm64_modname_h)
+
+prereq-o-cmd = $(foreach o, $(filter %.o, $^), $(dir $(o)).$(notdir $(o)).cmd)
+cmd_gen_symversions_o = \
+ grep --no-filename "^\#SYMVER" $(prereq-o-cmd) >> $(dot-target).cmd || true
+
+define rule_ld_o_o
+ $(call cmd_and_savecmd,ld)
+ $(call cmd,gen_symversions_o)
+endef
+
+LDFLAGS_kvm-unnamespaced.o := -r
+$(obj)/kvm-unnamespaced.o: $(addprefix $(obj)/,$(kvm-arm64-obj)) FORCE
+ $(call if_changed_rule,ld_o_o)
+
+# Make list of symbols to localize.
+# Collect normal/exported symbols. Use dict as set for deduplication.
+quiet_cmd_nm_filter = NMFLTR $@
+ cmd_nm_filter = \
+$(NM) -jU $< | awk ' \
+{ if (match($$0, /^__export_symbol_(.*)$$/, exp_sym)) { \
+ exp_syms[exp_sym[1]] = 1; \
+ } else { \
+ normal_syms[$$0] = 1; \
+ } \
+} \
+END { \
+ for (sym in normal_syms) { \
+ if (!(sym in exp_syms)) { \
+ print sym; \
+ } \
+ } \
+}' > $@
+
+$(obj)/kvm_symbol_list: $(obj)/kvm-unnamespaced.o FORCE
+ $(call if_changed,nm_filter)
+
+define rule_oc_o_o
+ $(call cmd_and_savecmd,objcopy)
+ $(call cmd,gen_objtooldep)
+ $(call cmd,gen_symversions_o)
+endef
+
+OBJCOPYFLAGS_kvm-namespaced.o := -O default --localize-symbols=$(obj)/kvm_symbol_list
+$(obj)/kvm-namespaced.o: $(obj)/kvm-unnamespaced.o $(obj)/kvm_symbol_list FORCE
+ $(call if_changed_rule,oc_o_o)
+
+kvm-arm64-y = kvm-namespaced.o
+
+endif
+
+obj-$(CONFIG_KVM_ARM64) += kvm-arm64.o
+
+LINUXINCLUDE := -I$(srctree)/include/arch/arm64/ $(LINUXINCLUDE)
diff --git a/arch/s390/tools/Makefile b/arch/s390/tools/Makefile
index f2862364fb42..921261dcde28 100644
--- a/arch/s390/tools/Makefile
+++ b/arch/s390/tools/Makefile
@@ -6,6 +6,8 @@
kapi := arch/$(ARCH)/include/generated/asm
kapi-hdrs-y := $(kapi)/facility-defs.h $(kapi)/dis-defs.h
+include $(srctree)/arch/arm64/tools/Makefile.sysreg
+
PHONY += kapi
kapi: $(kapi-hdrs-y)
--
2.51.0
^ permalink raw reply related [flat|nested] 33+ messages in thread* Re: [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM
2026-04-02 4:20 [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM Steffen Eiden
` (26 preceding siblings ...)
2026-04-02 4:21 ` [PATCH v1 27/27] KVM: s390: arm64: Enable KVM_ARM64 config and Kbuild Steffen Eiden
@ 2026-04-02 8:53 ` David Hildenbrand (Arm)
2026-04-02 10:07 ` Christian Borntraeger
27 siblings, 1 reply; 33+ messages in thread
From: David Hildenbrand (Arm) @ 2026-04-02 8:53 UTC (permalink / raw)
To: Steffen Eiden, kvm, kvmarm, linux-arm-kernel, linux-kernel,
linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Christian Borntraeger, Claudio Imbrenda, Gautam Gala,
Hendrik Brueckner, Janosch Frank, Joey Gouly, Marc Zyngier,
Nina Schoetterl-Glausch, Oliver Upton, Paolo Bonzini,
Suzuki K Poulose, Ulrich Weigand, Will Deacon, Zenghui Yu
>
> KVM on s390:
> The SAE (Start Arm Execution) instruction is introduced as the
> s390 mechanism for running Arm64 guests, and a new kvm-arm64 module is
> built up incrementally.
>
> Upcoming patch series will introduce system-register handling, interrupt
> support, hypercalls, and additional features such as PMU.
Pretty cool stuff.
What's the rough timeline for the other work?
Regarding I/O, I guess it is primarily VIRTIO (VIRTIO_PCI) for these VMs
only?
--
Cheers,
David
^ permalink raw reply [flat|nested] 33+ messages in thread* Re: [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM
2026-04-02 8:53 ` [PATCH v1 00/27] KVM: s390: Introduce arm64 KVM David Hildenbrand (Arm)
@ 2026-04-02 10:07 ` Christian Borntraeger
0 siblings, 0 replies; 33+ messages in thread
From: Christian Borntraeger @ 2026-04-02 10:07 UTC (permalink / raw)
To: David Hildenbrand (Arm), Steffen Eiden, kvm, kvmarm,
linux-arm-kernel, linux-kernel, linux-s390
Cc: Andreas Grapentin, Arnd Bergmann, Catalin Marinas,
Claudio Imbrenda, Gautam Gala, Hendrik Brueckner, Janosch Frank,
Joey Gouly, Marc Zyngier, Nina Schoetterl-Glausch, Oliver Upton,
Paolo Bonzini, Suzuki K Poulose, Ulrich Weigand, Will Deacon,
Zenghui Yu
Am 02.04.26 um 10:53 schrieb David Hildenbrand (Arm):
>>
>> KVM on s390:
>> The SAE (Start Arm Execution) instruction is introduced as the
>> s390 mechanism for running Arm64 guests, and a new kvm-arm64 module is
>> built up incrementally.
>>
>> Upcoming patch series will introduce system-register handling, interrupt
>> support, hypercalls, and additional features such as PMU.
>
> Pretty cool stuff.
>
> What's the rough timeline for the other work?
Over the next months. The idea was to split this into consumable chunks and start
with those things where a lot of people have to agree (code movement, code sharing
and shared maintainership). This will certainly evolve depending on patch feedback
and merge progress.
>
> Regarding I/O, I guess it is primarily VIRTIO (VIRTIO_PCI) for these VMs
> only?
yes, virtio-pci.
^ permalink raw reply [flat|nested] 33+ messages in thread