public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Sascha Bischoff <Sascha.Bischoff@arm.com>
To: "linux-arm-kernel@lists.infradead.org"
	<linux-arm-kernel@lists.infradead.org>,
	"kvmarm@lists.linux.dev" <kvmarm@lists.linux.dev>,
	"kvm@vger.kernel.org" <kvm@vger.kernel.org>
Cc: nd <nd@arm.com>, "maz@kernel.org" <maz@kernel.org>,
	"oliver.upton@linux.dev" <oliver.upton@linux.dev>,
	Joey Gouly <Joey.Gouly@arm.com>,
	Suzuki Poulose <Suzuki.Poulose@arm.com>,
	"yuzenghui@huawei.com" <yuzenghui@huawei.com>,
	"peter.maydell@linaro.org" <peter.maydell@linaro.org>,
	"lpieralisi@kernel.org" <lpieralisi@kernel.org>,
	Timothy Hayes <Timothy.Hayes@arm.com>
Subject: [PATCH 37/43] KVM: arm64: gic-v5: Handle userspace accesses to IRS MMIO region
Date: Mon, 27 Apr 2026 16:18:40 +0000	[thread overview]
Message-ID: <20260427160547.3129448-38-sascha.bischoff@arm.com> (raw)
In-Reply-To: <20260427160547.3129448-1-sascha.bischoff@arm.com>

As part of saving and restoring state of a GICv5-based system,
userspace is required to save/restore the IRS MMIO registers. These
include important information such as guest IST configuration, and in
general KVM needs to present consistent state to the guest.

Provide accessors to read and write the IRS MMIO state. This is
modelled on what is already done for the GICv3 ITS as the idea is
broadly the same.

Where possible, the existing access mechanisms are used, but for some
registers the access is handled a bit differently as they have wider
effects. For example, some writes need to be sanitised to make sure
that the hardware is capable (IST capabilities presented to the guest,
for example). Similar things apply to the SPI config where we block
userspace from setting anything that doesn't match what has been set
already.

Signed-off-by: Sascha Bischoff <sascha.bischoff@arm.com>
---
 arch/arm64/kvm/vgic/vgic-irs-v5.c     | 494 +++++++++++++++++++-------
 arch/arm64/kvm/vgic/vgic-kvm-device.c |  24 ++
 arch/arm64/kvm/vgic/vgic.h            |   6 +
 3 files changed, 401 insertions(+), 123 deletions(-)

diff --git a/arch/arm64/kvm/vgic/vgic-irs-v5.c b/arch/arm64/kvm/vgic/vgic-irs-v5.c
index 3a2539bf99d0d..761878fc956de 100644
--- a/arch/arm64/kvm/vgic/vgic-irs-v5.c
+++ b/arch/arm64/kvm/vgic/vgic-irs-v5.c
@@ -26,15 +26,58 @@ static struct vgic_v5_irs *vgic_v5_get_irs(struct kvm_vcpu *vcpu)
 	return vcpu->kvm->arch.vgic.vgic_v5_irs_data;
 }
 
+static int vgic_v5_read_irs_pe_cr0(struct kvm_vcpu *vcpu, u64 *val)
+{
+	struct vgic_v5_irs *irs = vgic_v5_get_irs(vcpu);
+	struct gicv5_cmd_info cmd_info;
+	struct kvm_vcpu *target_vcpu;
+	int rc;
+
+	target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, irs->pe_selr.iaffid);
+	if (!target_vcpu)
+		return -EINVAL;
+
+	/*
+	 * Read the corresponding IRS_VPE_CR0. We do so via the doorbell for the
+	 * specific vcpu we have in the PE_SELR.
+	 */
+	cmd_info.cmd_type = VPE_CR0_READ;
+	rc = irq_set_vcpu_affinity(vgic_v5_vpe_db(target_vcpu), &cmd_info);
+	if (rc)
+		return rc;
+
+	*val = cmd_info.data;
+	return 0;
+}
+
+static int vgic_v5_write_irs_pe_cr0(struct kvm_vcpu *vcpu, unsigned long val)
+{
+	struct vgic_v5_irs *irs = vgic_v5_get_irs(vcpu);
+	struct gicv5_cmd_info cmd_info;
+	struct kvm_vcpu *target_vcpu;
+	int rc;
+
+	target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, irs->pe_selr.iaffid);
+	if (!target_vcpu)
+		return -EINVAL;
+
+	/*
+	 * Write the corresponding IRS_VPE_CR0. We do so via the doorbell for
+	 * the specific vcpu we have in the PE_SELR.
+	 */
+	cmd_info.cmd_type = VPE_CR0_WRITE;
+	cmd_info.data = val;
+	rc = irq_set_vcpu_affinity(vgic_v5_vpe_db(target_vcpu), &cmd_info);
+
+	return rc;
+}
+
 static unsigned long vgic_v5_mmio_read_irs_misc(struct kvm_vcpu *vcpu,
 						gpa_t addr, unsigned int len)
 {
 	struct vgic_v5_irs *irs = vgic_v5_get_irs(vcpu);
 	const size_t offset = addr & (SZ_64K - 1);
-	struct gicv5_cmd_info cmd_info;
-	struct kvm_vcpu *target_vcpu;
 	u64 value = 0;
-	int rc;
 
 	switch (offset) {
 	case GICV5_IRS_IDR0:
@@ -127,32 +170,8 @@ static unsigned long vgic_v5_mmio_read_irs_misc(struct kvm_vcpu *vcpu,
 		}
 		break;
 	case GICV5_IRS_PE_CR0:
-		/*
-		 * Make sure that we are doing something reasonable first.
-		 * Remember, the IAFFID is the same as the VPE_ID
-		 */
-		target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, irs->pe_selr.iaffid);
-		if (!target_vcpu) {
-			kvm_err("Guest programmed invalid IAFFID (0x%x) into the IRS_PE_SELR\n",
-				irs->pe_selr.iaffid);
-			break;
-		}
-
-		mutex_lock(&vcpu->kvm->arch.config_lock);
-
-		/*
-		 * Read the corresponding IRS_VPE_CR0. We do so via the doorbell
-		 * for the specific vcpu we have in the PE_SELR.
-		 */
-		cmd_info.cmd_type = VPE_CR0_READ;
-		rc = irq_set_vcpu_affinity(vgic_v5_vpe_db(target_vcpu), &cmd_info);
-		if (rc)
-			kvm_err("Could not read VPE_CR0 in IRS: %d\n", rc);
-		else
-			value = cmd_info.data;
-
-		mutex_unlock(&vcpu->kvm->arch.config_lock);
-
+		scoped_guard(mutex, &vcpu->kvm->arch.config_lock)
+			WARN_ON_ONCE(vgic_v5_read_irs_pe_cr0(vcpu, &value));
 		break;
 	default:
 		return 0;
@@ -167,9 +186,6 @@ static void vgic_v5_mmio_write_irs_misc(struct kvm_vcpu *vcpu, gpa_t addr,
 	struct vgic_v5_irs *irs = vgic_v5_get_irs(vcpu);
 	struct vgic_dist *vgic = vgic_v5_get_vgic(vcpu);
 	const size_t offset = addr & (SZ_64K - 1);
-	struct gicv5_cmd_info cmd_info;
-	struct kvm_vcpu *target_vcpu;
-	int rc;
 
 	switch (offset) {
 	case GICV5_IRS_CR0:
@@ -216,27 +232,8 @@ static void vgic_v5_mmio_write_irs_misc(struct kvm_vcpu *vcpu, gpa_t addr,
 		irs->pe_selr.iaffid = FIELD_GET(GICV5_IRS_PE_SELR_IAFFID, val);
 		return;
 	case GICV5_IRS_PE_CR0:
-		/*
-		 * Make sure that we are doing something reasonable first.
-		 * Remember, the IAFFID is the same as the VPE_ID.
-		 */
-		target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, irs->pe_selr.iaffid);
-		if (!target_vcpu)
-			return;
-
-		mutex_lock(&vcpu->kvm->arch.config_lock);
-
-		/*
-		 * Write the corresponding IRS_VPE_CR0. We do so via the
-		 * doorbell for the specific vcpu we have in the PE_SELR.
-		 */
-		cmd_info.cmd_type = VPE_CR0_WRITE;
-		cmd_info.data = val;
-		rc = irq_set_vcpu_affinity(vgic_v5_vpe_db(target_vcpu), &cmd_info);
-		if (rc)
-			kvm_err("Could not update VPE_CR0 in IRS: %d\n", rc);
-
-		mutex_unlock(&vcpu->kvm->arch.config_lock);
+		scoped_guard(mutex, &vcpu->kvm->arch.config_lock)
+			WARN_ON_ONCE(vgic_v5_write_irs_pe_cr0(vcpu, val));
 		return;
 	default:
 		return;
@@ -411,13 +408,56 @@ static unsigned long vgic_v5_mmio_read_irs_ist(struct kvm_vcpu *vcpu,
 	return value;
 }
 
+static int vgic_v5_write_irs_ist_baser(struct kvm_vcpu *vcpu, unsigned long val)
+{
+	struct vgic_v5_irs *irs = vgic_v5_get_irs(vcpu);
+	struct gicv5_cmd_info cmd_info;
+	bool valid = FIELD_GET(GICV5_IRS_IST_BASER_VALID, val);
+	int rc;
+
+	/* Valid -> Invalid */
+	if (irs->ist_baser.valid && !valid) {
+		/* Make the LPI IST invalid and then ... */
+		cmd_info.cmd_type = LPI_VIST_MAKE_INVALID;
+		rc = irq_set_vcpu_affinity(vgic_v5_vpe_db(vcpu), &cmd_info);
+		if (rc)
+			return rc;
+
+		/*
+		 * ... free the host IST if we successfully marked the
+		 * IST as invalid. Frankly, if we failed to make the
+		 * guest's IST as invalid, we're cooked because it means
+		 * that the IRS may still be using the memory that we
+		 * want to free. Hence, we leave it allocated and skip
+		 * the clearing of valid bit in the baser.
+		 */
+		rc = vgic_v5_lpi_ist_free(vcpu->kvm);
+		if (rc)
+			return rc;
+	} else if (!irs->ist_baser.valid && valid) { /* Invalid -> Valid */
+		if (!vgic_v5_ist_cfgr_valid(irs)) {
+			kvm_err("Guest programmed invalid IRS_IST_CFGR\n");
+			return -EINVAL;
+		}
+
+		rc = vgic_v5_lpi_ist_alloc(vcpu->kvm, irs->ist_cfgr.lpi_id_bits);
+		if (rc)
+			return rc;
+	}
+
+	/* Now that we've handled the edges, update the valid bit and addr */
+	irs->ist_baser.valid = FIELD_GET(GICV5_IRS_IST_BASER_VALID, val);
+	irs->ist_baser.addr = FIELD_GET(GICV5_IRS_IST_BASER_ADDR_MASK, val)
+		<< GICV5_IRS_IST_BASER_ADDR_SHIFT;
+
+	return 0;
+}
+
 static void vgic_v5_mmio_write_irs_ist(struct kvm_vcpu *vcpu, gpa_t addr,
 				       unsigned int len, unsigned long val)
 {
 	struct vgic_v5_irs *irs = vgic_v5_get_irs(vcpu);
 	const size_t offset = addr & (SZ_64K - 1);
-	struct gicv5_cmd_info cmd_info;
-	int rc;
 
 	switch (offset) {
 	case GICV5_IRS_IST_CFGR:
@@ -427,77 +467,191 @@ static void vgic_v5_mmio_write_irs_ist(struct kvm_vcpu *vcpu, gpa_t addr,
 		irs->ist_cfgr.structure = FIELD_GET(GICV5_IRS_IST_CFGR_STRUCTURE, val);
 		return;
 	case GICV5_IRS_IST_BASER: {
-		bool valid = FIELD_GET(GICV5_IRS_IST_BASER_VALID, val);
-
 		guard(mutex)(&vcpu->kvm->arch.config_lock);
+		if (WARN_ON_ONCE(vgic_v5_write_irs_ist_baser(vcpu, val)))
+			break;
+		return;
+	}
+	default:
+		return;
+	}
+}
 
-		/* Valid -> Invalid */
-		if (irs->ist_baser.valid && !valid) {
-			/* Make the LPI IST invalid and then ... */
-			cmd_info.cmd_type = LPI_VIST_MAKE_INVALID;
-			rc = irq_set_vcpu_affinity(vgic_v5_vpe_db(vcpu), &cmd_info);
-			if (WARN_ON_ONCE(rc))
-				break;
+static unsigned long vgic_v5_mmio_uaccess_read_irs(struct kvm_vcpu *vcpu,
+						   gpa_t addr, unsigned int len)
+{
+	const size_t offset = addr & (SZ_64K - 1);
+	u64 val = 0;
 
-			/*
-			 * ... free the host IST if we successfully marked the
-			 * IST as invalid. Frankly, if we failed to make the
-			 * guest's IST as invalid, we're cooked because it means
-			 * that the IRS may still be using the memory that we
-			 * want to free. Hence, we leave it allocated and skip
-			 * the clearing of valid bit in the baser.
-			 */
-			rc = vgic_v5_lpi_ist_free(vcpu->kvm);
-			if (WARN_ON_ONCE(rc))
-				break;
-		} else if (!irs->ist_baser.valid && valid) { /* Invalid -> Valid */
-			if (!vgic_v5_ist_cfgr_valid(irs)) {
-				kvm_err("Guest programmed invalid IRS_IST_CFGR\n");
-				break;
-			}
-
-			rc = vgic_v5_lpi_ist_alloc(vcpu->kvm,
-						   irs->ist_cfgr.lpi_id_bits);
-			if (WARN_ON_ONCE(rc))
-				break;
-		}
+	switch (offset) {
+	case GICV5_IRS_PE_CR0:
+		/* Read without trying to take the config_lock again */
+		if (vgic_v5_read_irs_pe_cr0(vcpu, &val))
+			return 0;
+		return val;
+	default:
+		return 0;
+	}
+}
 
-		/* Now that we've handled the edges, update the valid bit and addr */
-		irs->ist_baser.valid = FIELD_GET(GICV5_IRS_IST_BASER_VALID, val);
-		irs->ist_baser.addr = FIELD_GET(GICV5_IRS_IST_BASER_ADDR_MASK, val)
-			<< GICV5_IRS_IST_BASER_ADDR_SHIFT;
+static int vgic_v5_mmio_uaccess_write_irs(struct kvm_vcpu *vcpu, gpa_t addr,
+					  unsigned int len, unsigned long val)
+{
+	struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
+	struct vgic_v5_irs *irs_data = vgic->vgic_v5_irs_data;
+	size_t offset = addr & (SZ_64K - 1);
 
-		return;
-	}
+	/*
+	 * The following registers are ONLY settable via uaccesses. The guest
+	 * cannot write them!
+	 */
+
+	switch (offset) {
+	case GICV5_IRS_IDR0:
+		if (FIELD_GET(GICV5_IRS_IDR0_DOM, val) !=
+		    GICV5_IRS_IDR0_DOMAIN_NON_SECURE)
+			return -EINVAL;
+
+		if (FIELD_GET(GICV5_IRS_IDR0_VIRT, val) ||
+		    FIELD_GET(GICV5_IRS_IDR0_ONEOFN, val) ||
+		    FIELD_GET(GICV5_IRS_IDR0_VIRT1OFN, val) ||
+		    FIELD_GET(GICV5_IRS_IDR0_SETLPI, val) ||
+		    FIELD_GET(GICV5_IRS_IDR0_MEC, val) ||
+		    FIELD_GET(GICV5_IRS_IDR0_MPAM, val) ||
+		    FIELD_GET(GICV5_IRS_IDR0_SWE, val))
+			return -EINVAL;
+
+		irs_data->idr0.domain = FIELD_GET(GICV5_IRS_IDR0_DOM, val);
+		irs_data->idr0.pa_range = FIELD_GET(GICV5_IRS_IDR0_PA_RANGE, val);
+		irs_data->idr0.virt = FIELD_GET(GICV5_IRS_IDR0_VIRT, val);
+		irs_data->idr0.one_of_n = FIELD_GET(GICV5_IRS_IDR0_ONEOFN, val);
+		irs_data->idr0.virt_one_of_n = FIELD_GET(GICV5_IRS_IDR0_VIRT1OFN, val);
+		irs_data->idr0.setlpi = FIELD_GET(GICV5_IRS_IDR0_SETLPI, val);
+		irs_data->idr0.mec = FIELD_GET(GICV5_IRS_IDR0_MEC, val);
+		irs_data->idr0.mpam = FIELD_GET(GICV5_IRS_IDR0_MPAM, val);
+		irs_data->idr0.swe = FIELD_GET(GICV5_IRS_IDR0_SWE, val);
+		irs_data->idr0.irs_id = FIELD_GET(GICV5_IRS_IDR0_IRSID, val);
+		break;
+	case GICV5_IRS_IDR1:
+		/* Ignore writes to PE_CNT as this is populated from num vcpus */
+
+		/*
+		 * The number of IAFFID bits supported. If userspace tries to
+		 * set something more than what we support, reject the write.
+		 */
+		if (FIELD_GET(GICV5_IRS_IDR1_IAFFID_BITS, val) > vgic_v5_vmte_vpe_id_bits(vcpu))
+			return -EINVAL;
+
+		if (FIELD_GET(GICV5_IRS_IDR1_PRIORITY_BITS, val) > 0b100)
+			return -EINVAL;
+
+		irs_data->idr1.priority_bits = FIELD_GET(GICV5_IRS_IDR1_PRIORITY_BITS, val);
+		break;
+	case GICV5_IRS_IDR2:
+		/* We always support LPIs */
+		if (!FIELD_GET(GICV5_IRS_IDR2_LPI, val))
+			return -EINVAL;
+
+		/* We only support LPIs with linear, non-metadata guest ISTs */
+		if (FIELD_GET(GICV5_IRS_IDR2_IST_LEVELS, val))
+			return -EINVAL;
+
+		if (FIELD_GET(GICV5_IRS_IDR2_ISTMD, val) ||
+		    FIELD_GET(GICV5_IRS_IDR2_ISTMD_SZ, val))
+			return -EINVAL;
+
+		/* We can't present more bits than we have support for in HW */
+		if (FIELD_GET(GICV5_IRS_IDR2_ID_BITS, val) > vgic_v5_host_caps()->ist_id_bits)
+			return -EINVAL;
+
+		/* Min LPI ID bits must be greater than or equal to the HW */
+		if (FIELD_GET(GICV5_IRS_IDR2_MIN_LPI_ID_BITS, val) <
+		    vgic_v5_host_caps()->min_lpi_id_bits)
+			return -EINVAL;
+
+		if (FIELD_GET(GICV5_IRS_IDR2_MIN_LPI_ID_BITS, val) >
+		    FIELD_GET(GICV5_IRS_IDR2_ID_BITS, val))
+			return -EINVAL;
+
+		irs_data->idr2.istmd_sz = FIELD_GET(GICV5_IRS_IDR2_ISTMD_SZ, val);
+		irs_data->idr2.istmd = FIELD_GET(GICV5_IRS_IDR2_ISTMD, val);
+		irs_data->idr2.ist_l2sz = FIELD_GET(GICV5_IRS_IDR2_IST_L2SZ, val);
+		irs_data->idr2.ist_levels = FIELD_GET(GICV5_IRS_IDR2_IST_LEVELS, val);
+		irs_data->idr2.min_lpi_id_bits = FIELD_GET(GICV5_IRS_IDR2_MIN_LPI_ID_BITS, val);
+		irs_data->idr2.id_bits = FIELD_GET(GICV5_IRS_IDR2_ID_BITS, val);
+		break;
+	case GICV5_IRS_IDR5:
+		if (FIELD_GET(GICV5_IRS_IDR5_SPI_RANGE, val) != irs_data->idr5.spi_range)
+			return -EINVAL;
+		break;
+	case GICV5_IRS_IDR6:
+		if (FIELD_GET(GICV5_IRS_IDR6_SPI_IRS_RANGE, val) != irs_data->idr6.spi_irs_range)
+			return -EINVAL;
+		break;
+	case GICV5_IRS_IDR7:
+		if (FIELD_GET(GICV5_IRS_IDR7_SPI_BASE, val) != irs_data->idr7.spi_base)
+			return -EINVAL;
+		break;
+	case GICV5_IRS_IST_BASER:
+		return vgic_v5_write_irs_ist_baser(vcpu, val);
+	case GICV5_IRS_SPI_CFGR:
+		fallthrough;
+	case GICV5_IRS_PE_SELR:
+		fallthrough;
+	case GICV5_IRS_IIDR:
+		fallthrough;
+	case GICV5_IRS_AIDR:
+		break;
+	case GICV5_IRS_PE_CR0:
+		/* Write without trying to take the config_lock again */
+		return vgic_v5_write_irs_pe_cr0(vcpu, val);
 	default:
-		return;
+		return -EINVAL;
 	}
+
+	return 0;
 }
 
 static const struct vgic_register_region vgic_v5_irs_registers[] = {
 	/*
 	 * This is the IRS_CONFIG_FRAME.
 	 */
-	REGISTER_DESC_WITH_LENGTH(GICV5_IRS_IDR0, vgic_v5_mmio_read_irs_misc,
-				  vgic_mmio_write_wi, 4, VGIC_ACCESS_32bit),
-	REGISTER_DESC_WITH_LENGTH(GICV5_IRS_IDR1, vgic_v5_mmio_read_irs_misc,
-				  vgic_mmio_write_wi, 4, VGIC_ACCESS_32bit),
-	REGISTER_DESC_WITH_LENGTH(GICV5_IRS_IDR2, vgic_v5_mmio_read_irs_misc,
-				  vgic_mmio_write_wi, 4, VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH_UACCESS(GICV5_IRS_IDR0, vgic_v5_mmio_read_irs_misc,
+					  vgic_mmio_write_wi, NULL,
+					  vgic_v5_mmio_uaccess_write_irs, 4,
+					  VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH_UACCESS(GICV5_IRS_IDR1, vgic_v5_mmio_read_irs_misc,
+					  vgic_mmio_write_wi, NULL,
+					  vgic_v5_mmio_uaccess_write_irs, 4,
+					  VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH_UACCESS(GICV5_IRS_IDR2, vgic_v5_mmio_read_irs_misc,
+					  vgic_mmio_write_wi, NULL,
+					  vgic_v5_mmio_uaccess_write_irs, 4,
+					  VGIC_ACCESS_32bit),
 	REGISTER_DESC_WITH_LENGTH(GICV5_IRS_IDR3, vgic_mmio_read_raz,
 				  vgic_mmio_write_wi, 4, VGIC_ACCESS_32bit),
 	REGISTER_DESC_WITH_LENGTH(GICV5_IRS_IDR4, vgic_mmio_read_raz,
 				  vgic_mmio_write_wi, 4, VGIC_ACCESS_32bit),
-	REGISTER_DESC_WITH_LENGTH(GICV5_IRS_IDR5, vgic_v5_mmio_read_irs_misc,
-				  vgic_mmio_write_wi, 4, VGIC_ACCESS_32bit),
-	REGISTER_DESC_WITH_LENGTH(GICV5_IRS_IDR6, vgic_v5_mmio_read_irs_misc,
-				  vgic_mmio_write_wi, 4, VGIC_ACCESS_32bit),
-	REGISTER_DESC_WITH_LENGTH(GICV5_IRS_IDR7, vgic_v5_mmio_read_irs_misc,
-				  vgic_mmio_write_wi, 4, VGIC_ACCESS_32bit),
-	REGISTER_DESC_WITH_LENGTH(GICV5_IRS_IIDR, vgic_v5_mmio_read_irs_misc,
-				  vgic_mmio_write_wi, 4, VGIC_ACCESS_32bit),
-	REGISTER_DESC_WITH_LENGTH(GICV5_IRS_AIDR, vgic_v5_mmio_read_irs_misc,
-				  vgic_mmio_write_wi, 4, VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH_UACCESS(GICV5_IRS_IDR5, vgic_v5_mmio_read_irs_misc,
+					  vgic_mmio_write_wi, NULL,
+					  vgic_v5_mmio_uaccess_write_irs, 4,
+					  VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH_UACCESS(GICV5_IRS_IDR6, vgic_v5_mmio_read_irs_misc,
+					  vgic_mmio_write_wi, NULL,
+					  vgic_v5_mmio_uaccess_write_irs, 4,
+					  VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH_UACCESS(GICV5_IRS_IDR7, vgic_v5_mmio_read_irs_misc,
+					  vgic_mmio_write_wi, NULL,
+					  vgic_v5_mmio_uaccess_write_irs, 4,
+					  VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH_UACCESS(GICV5_IRS_IIDR, vgic_v5_mmio_read_irs_misc,
+					  vgic_mmio_write_wi, NULL,
+					  vgic_v5_mmio_uaccess_write_irs, 4,
+					  VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH_UACCESS(GICV5_IRS_AIDR, vgic_v5_mmio_read_irs_misc,
+					  vgic_mmio_write_wi, NULL,
+					  vgic_v5_mmio_uaccess_write_irs, 4,
+					  VGIC_ACCESS_32bit),
 	REGISTER_DESC_WITH_LENGTH(GICV5_IRS_CR0, vgic_v5_mmio_read_irs_misc,
 				  vgic_v5_mmio_write_irs_misc, 4,
 				  VGIC_ACCESS_32bit),
@@ -516,30 +670,34 @@ static const struct vgic_register_region vgic_v5_irs_registers[] = {
 	REGISTER_DESC_WITH_LENGTH(GICV5_IRS_SPI_SELR, vgic_v5_mmio_read_irs_spi,
 				  vgic_v5_mmio_write_irs_spi, 4,
 				  VGIC_ACCESS_32bit),
-	REGISTER_DESC_WITH_LENGTH(
-		GICV5_IRS_SPI_DOMAINR, vgic_v5_mmio_read_irs_spi,
-		vgic_v5_mmio_write_irs_spi, 4, VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH(GICV5_IRS_SPI_DOMAINR, vgic_v5_mmio_read_irs_spi,
+				  vgic_mmio_write_wi, 4, VGIC_ACCESS_32bit),
 	REGISTER_DESC_WITH_LENGTH(GICV5_IRS_SPI_RESAMPLER, vgic_mmio_read_raz,
 				  vgic_mmio_write_wi, 4,
 				  VGIC_ACCESS_32bit),
-	REGISTER_DESC_WITH_LENGTH(GICV5_IRS_SPI_CFGR, vgic_v5_mmio_read_irs_spi,
-				  vgic_v5_mmio_write_irs_spi, 4,
-				  VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH_UACCESS(GICV5_IRS_SPI_CFGR,
+					  vgic_v5_mmio_read_irs_spi,
+					  vgic_v5_mmio_write_irs_spi, NULL,
+					  vgic_v5_mmio_uaccess_write_irs, 4,
+					  VGIC_ACCESS_32bit),
 	REGISTER_DESC_WITH_LENGTH(GICV5_IRS_SPI_STATUSR,
 				  vgic_v5_mmio_read_irs_spi, vgic_mmio_write_wi,
 				  4, VGIC_ACCESS_32bit),
-	REGISTER_DESC_WITH_LENGTH(GICV5_IRS_PE_SELR, vgic_v5_mmio_read_irs_misc,
-				  vgic_v5_mmio_write_irs_misc, 4,
-				  VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH_UACCESS(
+		GICV5_IRS_PE_SELR, vgic_v5_mmio_read_irs_misc,
+		vgic_v5_mmio_write_irs_misc, NULL,
+		vgic_v5_mmio_uaccess_write_irs, 4, VGIC_ACCESS_32bit),
 	REGISTER_DESC_WITH_LENGTH(GICV5_IRS_PE_STATUSR,
 				  vgic_v5_mmio_read_irs_misc,
 				  vgic_mmio_write_wi, 4, VGIC_ACCESS_32bit),
-	REGISTER_DESC_WITH_LENGTH(GICV5_IRS_PE_CR0, vgic_v5_mmio_read_irs_misc,
-				  vgic_v5_mmio_write_irs_misc, 4,
-				  VGIC_ACCESS_32bit),
-	REGISTER_DESC_WITH_LENGTH(
+	REGISTER_DESC_WITH_LENGTH_UACCESS(
+		GICV5_IRS_PE_CR0, vgic_v5_mmio_read_irs_misc,
+		vgic_v5_mmio_write_irs_misc, vgic_v5_mmio_uaccess_read_irs,
+		vgic_v5_mmio_uaccess_write_irs, 4, VGIC_ACCESS_32bit),
+	REGISTER_DESC_WITH_LENGTH_UACCESS(
 		GICV5_IRS_IST_BASER, vgic_v5_mmio_read_irs_ist,
-		vgic_v5_mmio_write_irs_ist, 8, VGIC_ACCESS_64bit),
+		vgic_v5_mmio_write_irs_ist, NULL,
+		vgic_v5_mmio_uaccess_write_irs, 8, VGIC_ACCESS_64bit),
 	REGISTER_DESC_WITH_LENGTH(GICV5_IRS_IST_CFGR, vgic_v5_mmio_read_irs_ist,
 				  vgic_v5_mmio_write_irs_ist, 4,
 				  VGIC_ACCESS_32bit),
@@ -825,3 +983,93 @@ int kvm_vgic_v5_irs_init(struct kvm *kvm, unsigned int nr_spis)
 
 	return 0;
 }
+
+int vgic_v5_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+	const struct vgic_register_region *region;
+	struct vgic_reg_attr reg_attr;
+	struct kvm_vcpu *vcpu;
+	gpa_t addr, offset;
+	int ret, align;
+
+	ret = vgic_v5_parse_attr(dev, attr, &reg_attr);
+	if (ret)
+		return ret;
+
+	vcpu = reg_attr.vcpu;
+	addr = reg_attr.addr;
+
+	if (attr->group == KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS)
+		return vgic_v5_has_cpu_sysregs_attr(vcpu, attr);
+
+	offset = attr->attr;
+
+	if (IS_VGIC_ADDR_UNDEF(dev->kvm->arch.vgic.vgic_v5_irs_data->vgic_v5_irs_base))
+		return -ENXIO;
+
+	region = vgic_find_mmio_region(vgic_v5_irs_registers,
+				       ARRAY_SIZE(vgic_v5_irs_registers),
+				       offset);
+	if (!region)
+		return -ENXIO;
+
+	align = region->access_flags & VGIC_ACCESS_64bit ? 0x7 : 0x3;
+	if (offset & align)
+		return -EINVAL;
+
+	return 0;
+}
+
+/*
+ * Access the IRS MMIO Regs. Relevant locks have been taken by the calling code.
+ */
+int vgic_v5_irs_attr_regs_access(struct kvm_device *dev,
+				 struct kvm_device_attr *attr,
+				 u64 *reg, bool is_write)
+{
+	const struct vgic_register_region *region;
+	gpa_t addr, offset;
+	unsigned int len;
+	int align, ret = 0;
+
+	offset = attr->attr;
+
+	if (IS_VGIC_ADDR_UNDEF(dev->kvm->arch.vgic.vgic_v5_irs_data->vgic_v5_irs_base))
+		return -ENXIO;
+
+	region = vgic_find_mmio_region(vgic_v5_irs_registers,
+				       ARRAY_SIZE(vgic_v5_irs_registers),
+				       offset);
+	if (!region)
+		return -ENXIO;
+
+	/*
+	 * Although the spec supports upper/lower 32-bit accesses to
+	 * 64-bit IRS registers, the userspace ABI requires 64-bit
+	 * accesses to all 64-bit wide registers. We therefore only
+	 * support 32-bit accesses to 32-bit-wide registers.
+	 */
+	align = region->access_flags & VGIC_ACCESS_64bit ? 0x7 : 0x3;
+	len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
+
+	if (offset & align)
+		return -EINVAL;
+
+	addr = dev->kvm->arch.vgic.vgic_v5_irs_data->vgic_v5_irs_base + offset;
+
+	if (is_write) {
+		if (region->uaccess_write)
+			ret = region->uaccess_write(kvm_get_vcpu(dev->kvm, 0),
+						    addr, len, *reg);
+		else
+			region->write(kvm_get_vcpu(dev->kvm, 0), addr, len, *reg);
+	} else {
+		if (region->uaccess_read)
+			*reg = region->uaccess_read(kvm_get_vcpu(dev->kvm, 0),
+						    addr, len);
+		else
+			*reg = region->read(kvm_get_vcpu(dev->kvm, 0), addr, len);
+	}
+
+	return ret;
+}
diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c
index fd3b9cfbc2660..944923121e080 100644
--- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
+++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
@@ -764,6 +764,30 @@ static int vgic_v5_get_userspace_ppis(struct kvm_device *dev,
 	return ret;
 }
 
+int vgic_v5_parse_attr(struct kvm_device *dev,
+			struct kvm_device_attr *attr,
+			struct vgic_reg_attr *reg_attr)
+{
+	unsigned long vgic_mpidr, mpidr_reg;
+
+	if (attr->group == KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS) {
+		vgic_mpidr = (attr->attr & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) >>
+			KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT;
+
+		mpidr_reg = VGIC_TO_MPIDR(vgic_mpidr);
+		reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg);
+	} else {
+		reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0);
+	}
+
+	if (!reg_attr->vcpu)
+		return -EINVAL;
+
+	reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+
+	return 0;
+}
+
 static int vgic_v5_set_attr(struct kvm_device *dev,
 			    struct kvm_device_attr *attr)
 {
diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
index 35eb048b5a0f7..736b538b40461 100644
--- a/arch/arm64/kvm/vgic/vgic.h
+++ b/arch/arm64/kvm/vgic/vgic.h
@@ -252,6 +252,8 @@ struct ap_list_summary {
 #define irqs_active_outside_lrs(s)		\
 	((s)->nr_act &&	irqs_outside_lrs(s))
 
+int vgic_v5_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
+		       struct vgic_reg_attr *reg_attr);
 int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
 		       struct vgic_reg_attr *reg_attr);
 int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
@@ -389,6 +391,10 @@ int vgic_v5_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *
 const struct sys_reg_desc *vgic_v5_get_sysreg_table(unsigned int *sz);
 int vgic_v5_irs_save_ists(struct kvm *kvm, struct kvm_device_attr *attr);
 int vgic_v5_irs_restore_ists(struct kvm *kvm, struct kvm_device_attr *attr);
+int vgic_v5_irs_attr_regs_access(struct kvm_device *dev,
+				 struct kvm_device_attr *attr,
+				 u64 *reg, bool is_write);
+int vgic_v5_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
 
 #define for_each_visible_v5_ppi(__i, __k)		\
 	for_each_set_bit(__i, (__k)->arch.vgic.gicv5_vm.vgic_ppi_mask, VGIC_V5_NR_PRIVATE_IRQS)
-- 
2.34.1

  parent reply	other threads:[~2026-04-27 16:19 UTC|newest]

Thread overview: 66+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-04-27 16:06 [PATCH 00/43] KVM: arm64: Add GICv5 IRS support Sascha Bischoff
2026-04-27 16:06 ` [PATCH 01/43] arm64/sysreg: Add GICv5 GIC VDPEND and VDRCFG encodings Sascha Bischoff
2026-04-27 16:06 ` [PATCH 02/43] arm64/sysreg: Update ICC_CR0_EL1 with LINK and LINK_IDLE fields Sascha Bischoff
2026-04-27 16:07 ` [PATCH 03/43] KVM: arm64: gic-v5: Add resident/non-resident hyp calls Sascha Bischoff
2026-04-28 14:28   ` Marc Zyngier
2026-05-01 16:40     ` Sascha Bischoff
2026-04-27 16:07 ` [PATCH 04/43] irqchip/gic-v5: Provide IRS config frame attrs to KVM Sascha Bischoff
2026-04-28 14:56   ` Marc Zyngier
2026-05-01 16:46     ` Sascha Bischoff
2026-04-27 16:07 ` [PATCH 05/43] KVM: arm64: gic-v5: Extract host IRS caps from IRS config frame Sascha Bischoff
2026-04-28 15:20   ` Marc Zyngier
2026-05-01 16:44     ` Sascha Bischoff
2026-04-27 16:08 ` [PATCH 06/43] KVM: arm64: gic-v5: Add VPE doorbell domain Sascha Bischoff
2026-04-28 16:40   ` Marc Zyngier
2026-05-01 16:54     ` Sascha Bischoff
2026-04-27 16:08 ` [PATCH 07/43] KVM: arm64: gic-v5: Create & manage VM and VPE tables Sascha Bischoff
2026-04-28 14:54   ` Vladimir Murzin
2026-05-01 16:42     ` Sascha Bischoff
2026-04-28 15:55   ` Joey Gouly
2026-04-29 10:25   ` Marc Zyngier
2026-04-27 16:08 ` [PATCH 08/43] KVM: arm64: gic-v5: Introduce guest IST alloc and management Sascha Bischoff
2026-04-29 14:29   ` Marc Zyngier
2026-04-27 16:09 ` [PATCH 09/43] KVM: arm64: gic-v5: Implement VMT/vIST IRS MMIO Ops Sascha Bischoff
2026-04-29 12:50   ` Joey Gouly
2026-04-29 16:04   ` Marc Zyngier
2026-04-27 16:09 ` [PATCH 10/43] KVM: arm64: gic-v5: Implement VPE " Sascha Bischoff
2026-04-30  8:46   ` Marc Zyngier
2026-04-27 16:09 ` [PATCH 11/43] KVM: arm64: gic-v5: Make VPEs valid in vgic_v5_reset() Sascha Bischoff
2026-04-30  9:37   ` Marc Zyngier
2026-04-27 16:10 ` [PATCH 12/43] KVM: arm64: gic-v5: Clear db_fired flag before making VPE non-resident Sascha Bischoff
2026-04-27 16:10 ` [PATCH 13/43] KVM: arm64: gic-v5: Make VPEs (non-)resident in vgic_load/put Sascha Bischoff
2026-04-30 10:26   ` Marc Zyngier
2026-04-27 16:10 ` [PATCH 14/43] KVM: arm64: gic-v5: Request VPE doorbells when going non-resident Sascha Bischoff
2026-04-30 10:37   ` Marc Zyngier
2026-04-27 16:11 ` [PATCH 15/43] KVM: arm64: gic-v5: Handle doorbells in kvm_vgic_vcpu_pending_irq() Sascha Bischoff
2026-04-27 16:11 ` [PATCH 16/43] KVM: arm64: gic-v5: Initialise and teardown VMTEs & doorbells Sascha Bischoff
2026-04-30 12:23   ` Marc Zyngier
2026-04-27 16:11 ` [PATCH 17/43] KVM: arm64: gic-v5: Enable VPE DBs on VPE reset and disable on teardown Sascha Bischoff
2026-04-27 16:12 ` [PATCH 18/43] KVM: arm64: gic-v5: Define remaining IRS MMIO registers Sascha Bischoff
2026-04-27 16:12 ` [PATCH 19/43] KVM: arm64: gic-v5: Introduce struct vgic_v5_irs and IRS base address Sascha Bischoff
2026-04-27 16:12 ` [PATCH 20/43] KVM: arm64: gic-v5: Add IRS IODEV to iodev_types and generic MMIO handlers Sascha Bischoff
2026-04-27 16:13 ` [PATCH 21/43] KVM: arm64: gic-v5: Add KVM_VGIC_V5_ADDR_TYPE_IRS to UAPI Sascha Bischoff
2026-04-27 16:13 ` [PATCH 22/43] KVM: arm64: gic-v5: Add GICv5 IRS IODEV and MMIO emulation Sascha Bischoff
2026-04-27 16:13 ` [PATCH 23/43] KVM: arm64: gic-v5: Set IRICHPPIDIS based on IRS enable state Sascha Bischoff
2026-04-27 16:14 ` [PATCH 24/43] KVM: arm64: gic-v5: Call IRS init/teardown from vgic_v5 init/teardown Sascha Bischoff
2026-04-27 16:14 ` [PATCH 25/43] KVM: arm64: gic-v5: Register the IRS IODEV Sascha Bischoff
2026-04-27 16:14 ` [PATCH 26/43] Documentation: KVM: Extend VGICv5 docs for KVM_VGIC_V5_ADDR_TYPE_IRS Sascha Bischoff
2026-04-27 16:15 ` [PATCH 27/43] KVM: arm64: selftests: Update vGICv5 selftest to set IRS address Sascha Bischoff
2026-04-27 16:15 ` [PATCH 28/43] KVM: arm64: gic-v5: Introduce SPI AP list Sascha Bischoff
2026-04-27 16:15 ` [PATCH 29/43] KVM: arm64: gic-v5: Add GIC VDPEND and GIC VDRCFG hyp calls Sascha Bischoff
2026-04-27 16:16 ` [PATCH 30/43] KVM: arm64: gic-v5: Track SPI state for in-flight SPIs Sascha Bischoff
2026-04-27 16:16 ` [PATCH 31/43] KVM: arm64: gic: Introduce set_pending_state() to irq_op Sascha Bischoff
2026-04-27 16:16 ` [PATCH 32/43] KVM: arm64: gic-v5: Support SPI injection Sascha Bischoff
2026-04-27 16:17 ` [PATCH 33/43] KVM: arm64: gic-v5: Add GICv5 SPI injection to irqfd Sascha Bischoff
2026-04-27 16:17 ` [PATCH 34/43] KVM: arm64: gic-v5: Mask per-vcpu PPI state in vgic_v5_finalize_ppi_state() Sascha Bischoff
2026-04-27 16:17 ` [PATCH 35/43] KVM: arm64: gic-v5: Add GICv5 EL1 sysreg userspace set/get interface Sascha Bischoff
2026-04-27 16:18 ` [PATCH 36/43] KVM: arm64: gic-v5: Implement save/restore mechanisms for ISTs Sascha Bischoff
2026-05-01 18:54   ` Vladimir Murzin
2026-04-27 16:18 ` Sascha Bischoff [this message]
2026-04-27 16:19 ` [PATCH 38/43] KVM: arm64: gic-v5: Add VGIC_GRP_IRS_REGS/VGIC_GRP_IST to UAPI Sascha Bischoff
2026-04-27 16:19 ` [PATCH 39/43] KVM: arm64: gic-v5: Plumb in has/set/get_attr for sysregs & IRS MMIO regs Sascha Bischoff
2026-04-27 16:19 ` [PATCH 40/43] Documentation: KVM: Document KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS for VGICv5 Sascha Bischoff
2026-04-27 16:20 ` [PATCH 41/43] Documentation: KVM: Add KVM_DEV_ARM_VGIC_GRP_IRS_REGS to VGICv5 docs Sascha Bischoff
2026-04-27 16:20 ` [PATCH 42/43] Documentation: KVM: Add docs for KVM_DEV_ARM_VGIC_GRP_IST Sascha Bischoff
2026-04-27 16:20 ` [PATCH 43/43] Documentation: KVM: Add the VGICv5 IRS save/restore sequences Sascha Bischoff
2026-04-30  8:57   ` Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260427160547.3129448-38-sascha.bischoff@arm.com \
    --to=sascha.bischoff@arm.com \
    --cc=Joey.Gouly@arm.com \
    --cc=Suzuki.Poulose@arm.com \
    --cc=Timothy.Hayes@arm.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.linux.dev \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=lpieralisi@kernel.org \
    --cc=maz@kernel.org \
    --cc=nd@arm.com \
    --cc=oliver.upton@linux.dev \
    --cc=peter.maydell@linaro.org \
    --cc=yuzenghui@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox