* [PATCH v3 01/19] ARM: GIC: move some bits of GICv2 to a library-type file
2014-04-16 13:39 [PATCH v3 00/19] arm64: GICv3 support Marc Zyngier
@ 2014-04-16 13:39 ` Marc Zyngier
2014-05-09 14:05 ` Christoffer Dall
2014-04-16 13:39 ` [PATCH v3 02/19] arm64: initial support for GICv3 Marc Zyngier
` (17 subsequent siblings)
18 siblings, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-04-16 13:39 UTC (permalink / raw)
To: linux-arm-kernel
A few GICv2 low-level function are actually very useful to GICv3,
and it makes some sense to share them across the two drivers.
They end-up in their own file, with an additional parameter used
to ensure an optional synchronization (unused on GICv2).
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
drivers/irqchip/Makefile | 2 +-
drivers/irqchip/irq-gic-common.c | 107 +++++++++++++++++++++++++++++++++++++++
drivers/irqchip/irq-gic-common.h | 29 +++++++++++
drivers/irqchip/irq-gic.c | 59 ++-------------------
4 files changed, 141 insertions(+), 56 deletions(-)
create mode 100644 drivers/irqchip/irq-gic-common.c
create mode 100644 drivers/irqchip/irq-gic-common.h
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 5194afb..22e616c 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -13,7 +13,7 @@ obj-$(CONFIG_ARCH_MOXART) += irq-moxart.o
obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o
obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o
obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
-obj-$(CONFIG_ARM_GIC) += irq-gic.o
+obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
obj-$(CONFIG_ARM_VIC) += irq-vic.o
obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
new file mode 100644
index 0000000..48e596e
--- /dev/null
+++ b/drivers/irqchip/irq-gic-common.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip/arm-gic.h>
+
+#include "irq-gic-common.h"
+
+void gic_configure_irq(unsigned int irq, unsigned int type,
+ void __iomem *base, void (*sync_access)(void))
+{
+ u32 enablemask = 1 << (irq % 32);
+ u32 enableoff = (irq / 32) * 4;
+ u32 confmask = 0x2 << ((irq % 16) * 2);
+ u32 confoff = (irq / 16) * 4;
+ bool enabled = false;
+ u32 val;
+
+ val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
+ if (type == IRQ_TYPE_LEVEL_HIGH)
+ val &= ~confmask;
+ else if (type == IRQ_TYPE_EDGE_RISING)
+ val |= confmask;
+
+ /*
+ * As recommended by the spec, disable the interrupt before changing
+ * the configuration
+ */
+ if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
+ writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
+ if (sync_access)
+ sync_access();
+ enabled = true;
+ }
+
+ writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
+
+ if (enabled) {
+ writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
+ if (sync_access)
+ sync_access();
+ }
+}
+
+void __init gic_dist_config(void __iomem *base, int gic_irqs,
+ void (*sync_access)(void))
+{
+ unsigned int i;
+
+ /*
+ * Set all global interrupts to be level triggered, active low.
+ */
+ for (i = 32; i < gic_irqs; i += 16)
+ writel_relaxed(0, base + GIC_DIST_CONFIG + i / 4);
+
+ /*
+ * Set priority on all global interrupts.
+ */
+ for (i = 32; i < gic_irqs; i += 4)
+ writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i);
+
+ /*
+ * Disable all interrupts. Leave the PPI and SGIs alone
+ * as they are enabled by redistributor registers.
+ */
+ for (i = 32; i < gic_irqs; i += 32)
+ writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i / 8);
+
+ if (sync_access)
+ sync_access();
+}
+
+void gic_cpu_config(void __iomem *base, void (*sync_access)(void))
+{
+ int i;
+
+ /*
+ * Deal with the banked PPI and SGI interrupts - disable all
+ * PPI interrupts, ensure all SGI interrupts are enabled.
+ */
+ writel_relaxed(0xffff0000, base + GIC_DIST_ENABLE_CLEAR);
+ writel_relaxed(0x0000ffff, base + GIC_DIST_ENABLE_SET);
+
+ /*
+ * Set priority on PPI and SGI interrupts
+ */
+ for (i = 0; i < 32; i += 4)
+ writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
+
+ if (sync_access)
+ sync_access();
+}
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
new file mode 100644
index 0000000..b41f024
--- /dev/null
+++ b/drivers/irqchip/irq-gic-common.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _IRQ_GIC_COMMON_H
+#define _IRQ_GIC_COMMON_H
+
+#include <linux/of.h>
+#include <linux/irqdomain.h>
+
+void gic_configure_irq(unsigned int irq, unsigned int type,
+ void __iomem *base, void (*sync_access)(void));
+void gic_dist_config(void __iomem *base, int gic_irqs,
+ void (*sync_access)(void));
+void gic_cpu_config(void __iomem *base, void (*sync_access)(void));
+
+#endif /* _IRQ_GIC_COMMON_H */
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 341c601..09ab3a3 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -46,6 +46,7 @@
#include <asm/exception.h>
#include <asm/smp_plat.h>
+#include "irq-gic-common.h"
#include "irqchip.h"
union gic_base {
@@ -188,12 +189,6 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
{
void __iomem *base = gic_dist_base(d);
unsigned int gicirq = gic_irq(d);
- u32 enablemask = 1 << (gicirq % 32);
- u32 enableoff = (gicirq / 32) * 4;
- u32 confmask = 0x2 << ((gicirq % 16) * 2);
- u32 confoff = (gicirq / 16) * 4;
- bool enabled = false;
- u32 val;
/* Interrupt configuration for SGIs can't be changed */
if (gicirq < 16)
@@ -207,25 +202,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
if (gic_arch_extn.irq_set_type)
gic_arch_extn.irq_set_type(d, type);
- val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
- if (type == IRQ_TYPE_LEVEL_HIGH)
- val &= ~confmask;
- else if (type == IRQ_TYPE_EDGE_RISING)
- val |= confmask;
-
- /*
- * As recommended by the spec, disable the interrupt before changing
- * the configuration
- */
- if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
- writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
- enabled = true;
- }
-
- writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
-
- if (enabled)
- writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
+ gic_configure_irq(gicirq, type, base, NULL);
raw_spin_unlock(&irq_controller_lock);
@@ -383,12 +360,6 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
writel_relaxed(0, base + GIC_DIST_CTRL);
/*
- * Set all global interrupts to be level triggered, active low.
- */
- for (i = 32; i < gic_irqs; i += 16)
- writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
-
- /*
* Set all global interrupts to this CPU only.
*/
cpumask = gic_get_cpumask(gic);
@@ -397,18 +368,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
for (i = 32; i < gic_irqs; i += 4)
writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
- /*
- * Set priority on all global interrupts.
- */
- for (i = 32; i < gic_irqs; i += 4)
- writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
-
- /*
- * Disable all interrupts. Leave the PPI and SGIs alone
- * as these enables are banked registers.
- */
- for (i = 32; i < gic_irqs; i += 32)
- writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
+ gic_dist_config(base, gic_irqs, NULL);
writel_relaxed(1, base + GIC_DIST_CTRL);
}
@@ -435,18 +395,7 @@ static void gic_cpu_init(struct gic_chip_data *gic)
if (i != cpu)
gic_cpu_map[i] &= ~cpu_mask;
- /*
- * Deal with the banked PPI and SGI interrupts - disable all
- * PPI interrupts, ensure all SGI interrupts are enabled.
- */
- writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
- writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
-
- /*
- * Set priority on PPI and SGI interrupts
- */
- for (i = 0; i < 32; i += 4)
- writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
+ gic_cpu_config(dist_base, NULL);
writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
writel_relaxed(1, base + GIC_CPU_CTRL);
--
1.8.3.4
^ permalink raw reply related [flat|nested] 57+ messages in thread* [PATCH v3 01/19] ARM: GIC: move some bits of GICv2 to a library-type file
2014-04-16 13:39 ` [PATCH v3 01/19] ARM: GIC: move some bits of GICv2 to a library-type file Marc Zyngier
@ 2014-05-09 14:05 ` Christoffer Dall
2014-05-12 16:29 ` Marc Zyngier
0 siblings, 1 reply; 57+ messages in thread
From: Christoffer Dall @ 2014-05-09 14:05 UTC (permalink / raw)
To: linux-arm-kernel
On Wed, Apr 16, 2014 at 02:39:33PM +0100, Marc Zyngier wrote:
> A few GICv2 low-level function are actually very useful to GICv3,
> and it makes some sense to share them across the two drivers.
> They end-up in their own file, with an additional parameter used
> to ensure an optional synchronization (unused on GICv2).
>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
> drivers/irqchip/Makefile | 2 +-
> drivers/irqchip/irq-gic-common.c | 107 +++++++++++++++++++++++++++++++++++++++
> drivers/irqchip/irq-gic-common.h | 29 +++++++++++
> drivers/irqchip/irq-gic.c | 59 ++-------------------
> 4 files changed, 141 insertions(+), 56 deletions(-)
> create mode 100644 drivers/irqchip/irq-gic-common.c
> create mode 100644 drivers/irqchip/irq-gic-common.h
>
> diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
> index 5194afb..22e616c 100644
> --- a/drivers/irqchip/Makefile
> +++ b/drivers/irqchip/Makefile
> @@ -13,7 +13,7 @@ obj-$(CONFIG_ARCH_MOXART) += irq-moxart.o
> obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o
> obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o
> obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
> -obj-$(CONFIG_ARM_GIC) += irq-gic.o
> +obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
> obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
> obj-$(CONFIG_ARM_VIC) += irq-vic.o
> obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o
> diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
> new file mode 100644
> index 0000000..48e596e
> --- /dev/null
> +++ b/drivers/irqchip/irq-gic-common.c
> @@ -0,0 +1,107 @@
> +/*
> + * Copyright (C) 2002 ARM Limited, All Rights Reserved.
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program. If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#include <linux/interrupt.h>
> +#include <linux/io.h>
> +#include <linux/irq.h>
> +#include <linux/irqchip/arm-gic.h>
> +
> +#include "irq-gic-common.h"
> +
> +void gic_configure_irq(unsigned int irq, unsigned int type,
> + void __iomem *base, void (*sync_access)(void))
use tabs instead of spaces
> +{
> + u32 enablemask = 1 << (irq % 32);
> + u32 enableoff = (irq / 32) * 4;
> + u32 confmask = 0x2 << ((irq % 16) * 2);
> + u32 confoff = (irq / 16) * 4;
> + bool enabled = false;
> + u32 val;
> +
> + val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
> + if (type == IRQ_TYPE_LEVEL_HIGH)
> + val &= ~confmask;
> + else if (type == IRQ_TYPE_EDGE_RISING)
> + val |= confmask;
> +
> + /*
> + * As recommended by the spec, disable the interrupt before changing
> + * the configuration
> + */
> + if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
> + writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
> + if (sync_access)
> + sync_access();
> + enabled = true;
> + }
> +
> + writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
Super nit: The other functions are very nicely commented but this one
actually doesn't explain which aspect of configuration the
"gic_configure_irq" performs; the fact that the config register deals
with level/edge trigger high/low may not be obvious to everyone.
Not worth a re-spin just for this though.
> +
> + if (enabled) {
> + writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
> + if (sync_access)
> + sync_access();
> + }
> +}
> +
> +void __init gic_dist_config(void __iomem *base, int gic_irqs,
> + void (*sync_access)(void))
> +{
> + unsigned int i;
> +
> + /*
> + * Set all global interrupts to be level triggered, active low.
> + */
> + for (i = 32; i < gic_irqs; i += 16)
> + writel_relaxed(0, base + GIC_DIST_CONFIG + i / 4);
> +
> + /*
> + * Set priority on all global interrupts.
> + */
> + for (i = 32; i < gic_irqs; i += 4)
> + writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i);
> +
> + /*
> + * Disable all interrupts. Leave the PPI and SGIs alone
> + * as they are enabled by redistributor registers.
> + */
> + for (i = 32; i < gic_irqs; i += 32)
> + writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i / 8);
> +
> + if (sync_access)
> + sync_access();
> +}
> +
> +void gic_cpu_config(void __iomem *base, void (*sync_access)(void))
> +{
> + int i;
> +
> + /*
> + * Deal with the banked PPI and SGI interrupts - disable all
> + * PPI interrupts, ensure all SGI interrupts are enabled.
> + */
> + writel_relaxed(0xffff0000, base + GIC_DIST_ENABLE_CLEAR);
> + writel_relaxed(0x0000ffff, base + GIC_DIST_ENABLE_SET);
> +
> + /*
> + * Set priority on PPI and SGI interrupts
> + */
> + for (i = 0; i < 32; i += 4)
> + writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
> +
> + if (sync_access)
> + sync_access();
> +}
> diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
> new file mode 100644
> index 0000000..b41f024
> --- /dev/null
> +++ b/drivers/irqchip/irq-gic-common.h
> @@ -0,0 +1,29 @@
> +/*
> + * Copyright (C) 2002 ARM Limited, All Rights Reserved.
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program. If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#ifndef _IRQ_GIC_COMMON_H
> +#define _IRQ_GIC_COMMON_H
> +
> +#include <linux/of.h>
> +#include <linux/irqdomain.h>
> +
> +void gic_configure_irq(unsigned int irq, unsigned int type,
> + void __iomem *base, void (*sync_access)(void));
> +void gic_dist_config(void __iomem *base, int gic_irqs,
> + void (*sync_access)(void));
> +void gic_cpu_config(void __iomem *base, void (*sync_access)(void));
> +
> +#endif /* _IRQ_GIC_COMMON_H */
> diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
> index 341c601..09ab3a3 100644
> --- a/drivers/irqchip/irq-gic.c
> +++ b/drivers/irqchip/irq-gic.c
> @@ -46,6 +46,7 @@
> #include <asm/exception.h>
> #include <asm/smp_plat.h>
>
> +#include "irq-gic-common.h"
> #include "irqchip.h"
>
> union gic_base {
> @@ -188,12 +189,6 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
> {
> void __iomem *base = gic_dist_base(d);
> unsigned int gicirq = gic_irq(d);
> - u32 enablemask = 1 << (gicirq % 32);
> - u32 enableoff = (gicirq / 32) * 4;
> - u32 confmask = 0x2 << ((gicirq % 16) * 2);
> - u32 confoff = (gicirq / 16) * 4;
> - bool enabled = false;
> - u32 val;
>
> /* Interrupt configuration for SGIs can't be changed */
> if (gicirq < 16)
> @@ -207,25 +202,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
> if (gic_arch_extn.irq_set_type)
> gic_arch_extn.irq_set_type(d, type);
>
> - val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
> - if (type == IRQ_TYPE_LEVEL_HIGH)
> - val &= ~confmask;
> - else if (type == IRQ_TYPE_EDGE_RISING)
> - val |= confmask;
> -
> - /*
> - * As recommended by the spec, disable the interrupt before changing
> - * the configuration
> - */
> - if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
> - writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
> - enabled = true;
> - }
> -
> - writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
> -
> - if (enabled)
> - writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
> + gic_configure_irq(gicirq, type, base, NULL);
>
> raw_spin_unlock(&irq_controller_lock);
>
> @@ -383,12 +360,6 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
> writel_relaxed(0, base + GIC_DIST_CTRL);
>
> /*
> - * Set all global interrupts to be level triggered, active low.
> - */
> - for (i = 32; i < gic_irqs; i += 16)
> - writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
> -
> - /*
> * Set all global interrupts to this CPU only.
> */
> cpumask = gic_get_cpumask(gic);
> @@ -397,18 +368,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
> for (i = 32; i < gic_irqs; i += 4)
> writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
>
> - /*
> - * Set priority on all global interrupts.
> - */
> - for (i = 32; i < gic_irqs; i += 4)
> - writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
> -
> - /*
> - * Disable all interrupts. Leave the PPI and SGIs alone
> - * as these enables are banked registers.
> - */
> - for (i = 32; i < gic_irqs; i += 32)
> - writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
> + gic_dist_config(base, gic_irqs, NULL);
>
> writel_relaxed(1, base + GIC_DIST_CTRL);
> }
> @@ -435,18 +395,7 @@ static void gic_cpu_init(struct gic_chip_data *gic)
> if (i != cpu)
> gic_cpu_map[i] &= ~cpu_mask;
>
> - /*
> - * Deal with the banked PPI and SGI interrupts - disable all
> - * PPI interrupts, ensure all SGI interrupts are enabled.
> - */
> - writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
> - writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
> -
> - /*
> - * Set priority on PPI and SGI interrupts
> - */
> - for (i = 0; i < 32; i += 4)
> - writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
> + gic_cpu_config(dist_base, NULL);
>
> writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
> writel_relaxed(1, base + GIC_CPU_CTRL);
> --
> 1.8.3.4
>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
^ permalink raw reply [flat|nested] 57+ messages in thread* [PATCH v3 01/19] ARM: GIC: move some bits of GICv2 to a library-type file
2014-05-09 14:05 ` Christoffer Dall
@ 2014-05-12 16:29 ` Marc Zyngier
0 siblings, 0 replies; 57+ messages in thread
From: Marc Zyngier @ 2014-05-12 16:29 UTC (permalink / raw)
To: linux-arm-kernel
On Fri, May 09 2014 at 3:05:12 pm BST, Christoffer Dall <christoffer.dall@linaro.org> wrote:
> On Wed, Apr 16, 2014 at 02:39:33PM +0100, Marc Zyngier wrote:
>> A few GICv2 low-level function are actually very useful to GICv3,
>> and it makes some sense to share them across the two drivers.
>> They end-up in their own file, with an additional parameter used
>> to ensure an optional synchronization (unused on GICv2).
>>
>> Cc: Thomas Gleixner <tglx@linutronix.de>
>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>> ---
>> drivers/irqchip/Makefile | 2 +-
>> drivers/irqchip/irq-gic-common.c | 107 +++++++++++++++++++++++++++++++++++++++
>> drivers/irqchip/irq-gic-common.h | 29 +++++++++++
>> drivers/irqchip/irq-gic.c | 59 ++-------------------
>> 4 files changed, 141 insertions(+), 56 deletions(-)
>> create mode 100644 drivers/irqchip/irq-gic-common.c
>> create mode 100644 drivers/irqchip/irq-gic-common.h
>>
>> diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
>> index 5194afb..22e616c 100644
>> --- a/drivers/irqchip/Makefile
>> +++ b/drivers/irqchip/Makefile
>> @@ -13,7 +13,7 @@ obj-$(CONFIG_ARCH_MOXART) += irq-moxart.o
>> obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o
>> obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o
>> obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
>> -obj-$(CONFIG_ARM_GIC) += irq-gic.o
>> +obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
>> obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
>> obj-$(CONFIG_ARM_VIC) += irq-vic.o
>> obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o
>> diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
>> new file mode 100644
>> index 0000000..48e596e
>> --- /dev/null
>> +++ b/drivers/irqchip/irq-gic-common.c
>> @@ -0,0 +1,107 @@
>> +/*
>> + * Copyright (C) 2002 ARM Limited, All Rights Reserved.
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License version 2 as
>> + * published by the Free Software Foundation.
>> + *
>> + * This program is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
>> + * GNU General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU General Public License
>> + * along with this program. If not, see <http://www.gnu.org/licenses/>.
>> + */
>> +
>> +#include <linux/interrupt.h>
>> +#include <linux/io.h>
>> +#include <linux/irq.h>
>> +#include <linux/irqchip/arm-gic.h>
>> +
>> +#include "irq-gic-common.h"
>> +
>> +void gic_configure_irq(unsigned int irq, unsigned int type,
>> + void __iomem *base, void (*sync_access)(void))
>
> use tabs instead of spaces
Sure.
>> +{
>> + u32 enablemask = 1 << (irq % 32);
>> + u32 enableoff = (irq / 32) * 4;
>> + u32 confmask = 0x2 << ((irq % 16) * 2);
>> + u32 confoff = (irq / 16) * 4;
>> + bool enabled = false;
>> + u32 val;
>> +
>> + val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
>> + if (type == IRQ_TYPE_LEVEL_HIGH)
>> + val &= ~confmask;
>> + else if (type == IRQ_TYPE_EDGE_RISING)
>> + val |= confmask;
>> +
>> + /*
>> + * As recommended by the spec, disable the interrupt before changing
>> + * the configuration
>> + */
>> + if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
>> + writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
>> + if (sync_access)
>> + sync_access();
>> + enabled = true;
>> + }
>> +
>> + writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
>
> Super nit: The other functions are very nicely commented but this one
> actually doesn't explain which aspect of configuration the
> "gic_configure_irq" performs; the fact that the config register deals
> with level/edge trigger high/low may not be obvious to everyone.
>
> Not worth a re-spin just for this though.
Bah. I'll add a comment.
>> +
>> + if (enabled) {
>> + writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
>> + if (sync_access)
>> + sync_access();
>> + }
>> +}
>> +
>> +void __init gic_dist_config(void __iomem *base, int gic_irqs,
>> + void (*sync_access)(void))
>> +{
>> + unsigned int i;
>> +
>> + /*
>> + * Set all global interrupts to be level triggered, active low.
>> + */
>> + for (i = 32; i < gic_irqs; i += 16)
>> + writel_relaxed(0, base + GIC_DIST_CONFIG + i / 4);
>> +
>> + /*
>> + * Set priority on all global interrupts.
>> + */
>> + for (i = 32; i < gic_irqs; i += 4)
>> + writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i);
>> +
>> + /*
>> + * Disable all interrupts. Leave the PPI and SGIs alone
>> + * as they are enabled by redistributor registers.
>> + */
>> + for (i = 32; i < gic_irqs; i += 32)
>> + writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i / 8);
>> +
>> + if (sync_access)
>> + sync_access();
>> +}
>> +
>> +void gic_cpu_config(void __iomem *base, void (*sync_access)(void))
>> +{
>> + int i;
>> +
>> + /*
>> + * Deal with the banked PPI and SGI interrupts - disable all
>> + * PPI interrupts, ensure all SGI interrupts are enabled.
>> + */
>> + writel_relaxed(0xffff0000, base + GIC_DIST_ENABLE_CLEAR);
>> + writel_relaxed(0x0000ffff, base + GIC_DIST_ENABLE_SET);
>> +
>> + /*
>> + * Set priority on PPI and SGI interrupts
>> + */
>> + for (i = 0; i < 32; i += 4)
>> + writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
>> +
>> + if (sync_access)
>> + sync_access();
>> +}
>> diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
>> new file mode 100644
>> index 0000000..b41f024
>> --- /dev/null
>> +++ b/drivers/irqchip/irq-gic-common.h
>> @@ -0,0 +1,29 @@
>> +/*
>> + * Copyright (C) 2002 ARM Limited, All Rights Reserved.
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License version 2 as
>> + * published by the Free Software Foundation.
>> + *
>> + * This program is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
>> + * GNU General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU General Public License
>> + * along with this program. If not, see <http://www.gnu.org/licenses/>.
>> + */
>> +
>> +#ifndef _IRQ_GIC_COMMON_H
>> +#define _IRQ_GIC_COMMON_H
>> +
>> +#include <linux/of.h>
>> +#include <linux/irqdomain.h>
>> +
>> +void gic_configure_irq(unsigned int irq, unsigned int type,
>> + void __iomem *base, void (*sync_access)(void));
>> +void gic_dist_config(void __iomem *base, int gic_irqs,
>> + void (*sync_access)(void));
>> +void gic_cpu_config(void __iomem *base, void (*sync_access)(void));
>> +
>> +#endif /* _IRQ_GIC_COMMON_H */
>> diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
>> index 341c601..09ab3a3 100644
>> --- a/drivers/irqchip/irq-gic.c
>> +++ b/drivers/irqchip/irq-gic.c
>> @@ -46,6 +46,7 @@
>> #include <asm/exception.h>
>> #include <asm/smp_plat.h>
>>
>> +#include "irq-gic-common.h"
>> #include "irqchip.h"
>>
>> union gic_base {
>> @@ -188,12 +189,6 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
>> {
>> void __iomem *base = gic_dist_base(d);
>> unsigned int gicirq = gic_irq(d);
>> - u32 enablemask = 1 << (gicirq % 32);
>> - u32 enableoff = (gicirq / 32) * 4;
>> - u32 confmask = 0x2 << ((gicirq % 16) * 2);
>> - u32 confoff = (gicirq / 16) * 4;
>> - bool enabled = false;
>> - u32 val;
>>
>> /* Interrupt configuration for SGIs can't be changed */
>> if (gicirq < 16)
>> @@ -207,25 +202,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
>> if (gic_arch_extn.irq_set_type)
>> gic_arch_extn.irq_set_type(d, type);
>>
>> - val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
>> - if (type == IRQ_TYPE_LEVEL_HIGH)
>> - val &= ~confmask;
>> - else if (type == IRQ_TYPE_EDGE_RISING)
>> - val |= confmask;
>> -
>> - /*
>> - * As recommended by the spec, disable the interrupt before changing
>> - * the configuration
>> - */
>> - if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
>> - writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
>> - enabled = true;
>> - }
>> -
>> - writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
>> -
>> - if (enabled)
>> - writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
>> + gic_configure_irq(gicirq, type, base, NULL);
>>
>> raw_spin_unlock(&irq_controller_lock);
>>
>> @@ -383,12 +360,6 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
>> writel_relaxed(0, base + GIC_DIST_CTRL);
>>
>> /*
>> - * Set all global interrupts to be level triggered, active low.
>> - */
>> - for (i = 32; i < gic_irqs; i += 16)
>> - writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
>> -
>> - /*
>> * Set all global interrupts to this CPU only.
>> */
>> cpumask = gic_get_cpumask(gic);
>> @@ -397,18 +368,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
>> for (i = 32; i < gic_irqs; i += 4)
>> writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
>>
>> - /*
>> - * Set priority on all global interrupts.
>> - */
>> - for (i = 32; i < gic_irqs; i += 4)
>> - writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
>> -
>> - /*
>> - * Disable all interrupts. Leave the PPI and SGIs alone
>> - * as these enables are banked registers.
>> - */
>> - for (i = 32; i < gic_irqs; i += 32)
>> - writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
>> + gic_dist_config(base, gic_irqs, NULL);
>>
>> writel_relaxed(1, base + GIC_DIST_CTRL);
>> }
>> @@ -435,18 +395,7 @@ static void gic_cpu_init(struct gic_chip_data *gic)
>> if (i != cpu)
>> gic_cpu_map[i] &= ~cpu_mask;
>>
>> - /*
>> - * Deal with the banked PPI and SGI interrupts - disable all
>> - * PPI interrupts, ensure all SGI interrupts are enabled.
>> - */
>> - writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
>> - writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
>> -
>> - /*
>> - * Set priority on PPI and SGI interrupts
>> - */
>> - for (i = 0; i < 32; i += 4)
>> - writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
>> + gic_cpu_config(dist_base, NULL);
>>
>> writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
>> writel_relaxed(1, base + GIC_CPU_CTRL);
>> --
>> 1.8.3.4
>>
>
> Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Thanks,
M.
--
Jazz is not dead. It just smells funny.
^ permalink raw reply [flat|nested] 57+ messages in thread
* [PATCH v3 02/19] arm64: initial support for GICv3
2014-04-16 13:39 [PATCH v3 00/19] arm64: GICv3 support Marc Zyngier
2014-04-16 13:39 ` [PATCH v3 01/19] ARM: GIC: move some bits of GICv2 to a library-type file Marc Zyngier
@ 2014-04-16 13:39 ` Marc Zyngier
2014-05-09 14:05 ` Christoffer Dall
2014-04-16 13:39 ` [PATCH v3 03/19] arm64: GICv3 device tree binding documentation Marc Zyngier
` (16 subsequent siblings)
18 siblings, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-04-16 13:39 UTC (permalink / raw)
To: linux-arm-kernel
The Generic Interrupt Controller (version 3) offers services that are
similar to GICv2, with a number of additional features:
- Affinity routing based on the CPU MPIDR (ARE)
- System register for the CPU interfaces (SRE)
- Support for more that 8 CPUs
- Locality-specific Peripheral Interrupts (LPIs)
- Interrupt Translation Services (ITS)
This patch adds preliminary support for GICv3 with ARE and SRE,
non-secure mode only. It relies on higher exception levels to grant ARE
and SRE access.
Support for LPI and ITS will be added at a later time.
Cc: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Zi Shen Lim <zlim@broadcom.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
arch/arm64/Kconfig | 1 +
arch/arm64/kernel/head.S | 18 +
arch/arm64/kernel/hyp-stub.S | 1 +
drivers/irqchip/Kconfig | 5 +
drivers/irqchip/Makefile | 1 +
drivers/irqchip/irq-gic-v3.c | 681 +++++++++++++++++++++++++++++++++++++
include/linux/irqchip/arm-gic-v3.h | 188 ++++++++++
7 files changed, 895 insertions(+)
create mode 100644 drivers/irqchip/irq-gic-v3.c
create mode 100644 include/linux/irqchip/arm-gic-v3.h
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 27bbcfc..5bd0dfe 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -9,6 +9,7 @@ config ARM64
select ARM_AMBA
select ARM_ARCH_TIMER
select ARM_GIC
+ select ARM_GIC_V3
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select COMMON_CLK
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 0b281ff..7c56e93 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -22,6 +22,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/irqchip/arm-gic-v3.h>
#include <asm/assembler.h>
#include <asm/ptrace.h>
@@ -183,6 +184,23 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
msr cnthctl_el2, x0
msr cntvoff_el2, xzr // Clear virtual offset
+#ifdef CONFIG_ARM_GIC_V3
+ /* GICv3 system register access */
+ mrs x0, id_aa64pfr0_el1
+ ubfx x0, x0, #24, #4
+ cmp x0, #1
+ b.ne 3f
+
+ mrs x0, ICC_SRE_EL2
+ orr x0, x0, #1 // Set ICC_SRE_EL2.SRE==1
+ orr x0, x0, #(1 << 3) // Set ICC_SRE_EL2.Enable==1
+ msr ICC_SRE_EL2, x0
+ isb // Make sure SRE is now 1
+ msr ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
+
+3:
+#endif
+
/* Populate ID registers. */
mrs x0, midr_el1
mrs x1, mpidr_el1
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 0959611..a272f33 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/irqchip/arm-gic-v3.h>
#include <asm/assembler.h>
#include <asm/ptrace.h>
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 61ffdca..5b55c46 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -10,6 +10,11 @@ config ARM_GIC
config GIC_NON_BANKED
bool
+config ARM_GIC_V3
+ bool
+ select IRQ_DOMAIN
+ select MULTI_IRQ_HANDLER
+
config ARM_NVIC
bool
select IRQ_DOMAIN
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 22e616c..4adfda8 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o
obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o
obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
+obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
obj-$(CONFIG_ARM_VIC) += irq-vic.o
obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
new file mode 100644
index 0000000..b4c8140
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -0,0 +1,681 @@
+/*
+ * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+
+#include <linux/irqchip/arm-gic-v3.h>
+
+#include <asm/cputype.h>
+#include <asm/exception.h>
+#include <asm/smp_plat.h>
+
+#include "irq-gic-common.h"
+#include "irqchip.h"
+
+struct gic_chip_data {
+ void __iomem *dist_base;
+ void __iomem **redist_base;
+ void __percpu __iomem **rdist;
+ struct irq_domain *domain;
+ u64 redist_stride;
+ u32 redist_regions;
+ unsigned int irq_nr;
+};
+
+static struct gic_chip_data gic_data __read_mostly;
+
+#define gic_data_rdist() (this_cpu_ptr(gic_data.rdist))
+#define gic_data_rdist_rd_base() (*gic_data_rdist())
+#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
+
+#define DEFAULT_PMR_VALUE 0xf0
+
+static inline unsigned int gic_irq(struct irq_data *d)
+{
+ return d->hwirq;
+}
+
+static inline int gic_irq_in_rdist(struct irq_data *d)
+{
+ return gic_irq(d) < 32;
+}
+
+static inline void __iomem *gic_dist_base(struct irq_data *d)
+{
+ if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */
+ return gic_data_rdist_sgi_base();
+
+ if (d->hwirq <= 1023) /* SPI -> dist_base */
+ return gic_data.dist_base;
+
+ if (d->hwirq >= 8192)
+ BUG(); /* LPI Detected!!! */
+
+ return NULL;
+}
+
+static void gic_do_wait_for_rwp(void __iomem *base)
+{
+ u32 count = 1000000; /* 1s! */
+
+ while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
+ count--;
+ if (!count) {
+ pr_err_ratelimited("RWP timeout, gone fishing\n");
+ return;
+ }
+ cpu_relax();
+ udelay(1);
+ };
+}
+
+/* Wait for completion of a distributor change */
+static void gic_dist_wait_for_rwp(void)
+{
+ gic_do_wait_for_rwp(gic_data.dist_base);
+}
+
+/* Wait for completion of a redistributor change */
+static void gic_redist_wait_for_rwp(void)
+{
+ gic_do_wait_for_rwp(gic_data_rdist_rd_base());
+}
+
+/* Low level accessors */
+static u64 gic_read_iar(void)
+{
+ u64 irqstat;
+
+ asm volatile("mrs %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
+ return irqstat;
+}
+
+static void gic_write_pmr(u64 val)
+{
+ asm volatile("msr " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
+}
+
+static void gic_write_ctlr(u64 val)
+{
+ asm volatile("msr " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
+ isb();
+}
+
+static void gic_write_grpen1(u64 val)
+{
+ asm volatile("msr " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
+ isb();
+}
+
+static void gic_write_sgi1r(u64 val)
+{
+ asm volatile("msr " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
+}
+
+static void gic_enable_sre(void)
+{
+ u64 val;
+
+ asm volatile("mrs %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
+ val |= GICC_SRE_EL1_SRE;
+ asm volatile("msr " __stringify(ICC_SRE_EL1) ", %0" : : "r" (val));
+ isb();
+
+ /*
+ * Need to check that the SRE bit has actually been set. If
+ * not, it means that SRE is disabled at EL2. We're going to
+ * die painfully, and there is nothing we can do about it.
+ *
+ * Kindly inform the luser.
+ */
+ asm volatile("mrs %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
+ if (!(val & GICC_SRE_EL1_SRE))
+ pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
+}
+
+static void gic_enable_redist(void)
+{
+ void __iomem *rbase;
+ u32 count = 1000000; /* 1s! */
+ u32 val;
+
+ rbase = gic_data_rdist_rd_base();
+
+ /* Wake up this CPU redistributor */
+ val = readl_relaxed(rbase + GICR_WAKER);
+ val &= ~GICR_WAKER_ProcessorSleep;
+ writel_relaxed(val, rbase + GICR_WAKER);
+
+ while (readl_relaxed(rbase + GICR_WAKER) & GICR_WAKER_ChildrenAsleep) {
+ count--;
+ if (!count) {
+ pr_err_ratelimited("redist didn't wake up...\n");
+ return;
+ }
+ cpu_relax();
+ udelay(1);
+ };
+}
+
+/*
+ * Routines to acknowledge, disable and enable interrupts
+ */
+static void gic_poke_irq(struct irq_data *d, u32 offset)
+{
+ u32 mask = 1 << (gic_irq(d) % 32);
+ void (*rwp_wait)(void);
+ void __iomem *base;
+
+ if (gic_irq_in_rdist(d)) {
+ base = gic_data_rdist_sgi_base();
+ rwp_wait = gic_redist_wait_for_rwp;
+ } else {
+ base = gic_data.dist_base;
+ rwp_wait = gic_dist_wait_for_rwp;
+ }
+
+ writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
+ rwp_wait();
+}
+
+static int gic_peek_irq(struct irq_data *d, u32 offset)
+{
+ u32 mask = 1 << (gic_irq(d) % 32);
+ void __iomem *base;
+
+ if (gic_irq_in_rdist(d))
+ base = gic_data_rdist_sgi_base();
+ else
+ base = gic_data.dist_base;
+
+ return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
+}
+
+static void gic_mask_irq(struct irq_data *d)
+{
+ gic_poke_irq(d, GICD_ICENABLER);
+}
+
+static void gic_unmask_irq(struct irq_data *d)
+{
+ gic_poke_irq(d, GICD_ISENABLER);
+}
+
+static void gic_eoi_irq(struct irq_data *d)
+{
+ gic_write_eoir(gic_irq(d));
+}
+
+static int gic_set_type(struct irq_data *d, unsigned int type)
+{
+ unsigned int irq = gic_irq(d);
+ void (*rwp_wait)(void);
+ void __iomem *base;
+
+ if (gic_irq_in_rdist(d)) {
+ base = gic_data_rdist_sgi_base();
+ rwp_wait = gic_redist_wait_for_rwp;
+ } else {
+ base = gic_data.dist_base;
+ rwp_wait = gic_dist_wait_for_rwp;
+ }
+
+ /* Interrupt configuration for SGIs can't be changed */
+ if (irq < 16)
+ return -EINVAL;
+
+ if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
+ return -EINVAL;
+
+ gic_configure_irq(irq, type, base, rwp_wait);
+
+ return 0;
+}
+
+static u64 gic_mpidr_to_affinity(u64 mpidr)
+{
+ u64 aff;
+
+ aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
+ MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
+ MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
+ MPIDR_AFFINITY_LEVEL(mpidr, 0)) & ~GICD_IROUTER_SPI_MODE_ANY;
+
+ return aff;
+}
+
+static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+{
+ u64 irqnr;
+
+ do {
+ irqnr = gic_read_iar();
+
+ if (likely(irqnr > 15 && irqnr < 1021)) {
+ irqnr = irq_find_mapping(gic_data.domain, irqnr);
+ handle_IRQ(irqnr, regs);
+ continue;
+ }
+ if (irqnr < 16) {
+ gic_write_eoir(irqnr);
+#ifdef CONFIG_SMP
+ handle_IPI(irqnr, regs);
+#else
+ WARN_ONCE(true, "Unexpected SGI received!\n");
+#endif
+ continue;
+ }
+ } while (irqnr != 0x3ff);
+}
+
+static void __init gic_dist_init(void)
+{
+ unsigned int i;
+ u64 affinity;
+ void __iomem *base = gic_data.dist_base;
+
+ /* Disable the distributor */
+ writel_relaxed(0, base + GICD_CTLR);
+
+ gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
+
+ /* Enable distributor with ARE, Group1 */
+ writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
+ base + GICD_CTLR);
+
+ /*
+ * Set all global interrupts to the boot CPU only. ARE must be
+ * enabled.
+ */
+ affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
+ for (i = 32; i < gic_data.irq_nr; i++)
+ writeq_relaxed(affinity, base + GICD_IROUTER + i * 8);
+}
+
+static int gic_populate_rdist(void)
+{
+ u64 mpidr = cpu_logical_map(smp_processor_id());
+ u64 typer;
+ u32 aff;
+ int i;
+
+ /* Convery affinity to a 32bit value... */
+ aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
+ MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
+ MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
+ MPIDR_AFFINITY_LEVEL(mpidr, 0));
+
+ for (i = 0; i < gic_data.redist_regions; i++) {
+ void __iomem *ptr = gic_data.redist_base[i];
+ u32 reg;
+
+ reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
+ if (reg != 0x30 && reg != 0x40) { /* We're in trouble... */
+ pr_warn("No redistributor present @%p\n", ptr);
+ break;
+ }
+
+ do {
+ typer = readq_relaxed(ptr + GICR_TYPER);
+ if ((typer >> 32) == aff) {
+ gic_data_rdist_rd_base() = ptr;
+ pr_info("CPU%d: found redistributor %llx @%p\n",
+ smp_processor_id(),
+ (unsigned long long)mpidr, ptr);
+ return 0;
+ }
+
+ if (gic_data.redist_stride) {
+ ptr += gic_data.redist_stride;
+ } else {
+ ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
+ if (typer & GICR_TYPER_VLPIS)
+ ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
+ }
+ } while (!(typer & GICR_TYPER_LAST));
+ }
+
+ /* We couldn't even deal with ourselves... */
+ WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n",
+ smp_processor_id(), (unsigned long long)mpidr);
+ return -ENODEV;
+}
+
+static void gic_cpu_init(void)
+{
+ void __iomem *rbase;
+
+ /* Register ourselves with the rest of the world */
+ if (gic_populate_rdist())
+ return;
+
+ gic_enable_redist();
+
+ rbase = gic_data_rdist_sgi_base();
+
+ gic_cpu_config(rbase, gic_redist_wait_for_rwp);
+
+ /* Enable system registers */
+ gic_enable_sre();
+
+ /* Set priority mask register */
+ gic_write_pmr(DEFAULT_PMR_VALUE);
+
+ /* EOI deactivates interrupt too (mode 0) */
+ gic_write_ctlr(GICC_CTLR_EL1_EOImode_drop_dir);
+
+ /* ... and let's hit the road... */
+ gic_write_grpen1(1);
+}
+
+#ifdef CONFIG_SMP
+static int gic_secondary_init(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
+ gic_cpu_init();
+ return NOTIFY_OK;
+}
+
+/*
+ * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
+ * priority because the GIC needs to be up before the ARM generic timers.
+ */
+static struct notifier_block gic_cpu_notifier = {
+ .notifier_call = gic_secondary_init,
+ .priority = 100,
+};
+
+static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
+ u64 cluster_id)
+{
+ int cpu = *base_cpu;
+ u64 mpidr = cpu_logical_map(cpu);
+ u16 tlist = 0;
+
+ while (cpu < nr_cpu_ids) {
+ /*
+ * If we ever get a cluster of more than 16 CPUs, just
+ * scream and skip that CPU.
+ */
+ if (WARN_ON((mpidr & 0xff) >= 16))
+ goto out;
+
+ tlist |= 1 << (mpidr & 0xf);
+
+ cpu = cpumask_next(cpu, mask);
+ if (cpu == nr_cpu_ids)
+ goto out;
+
+ mpidr = cpu_logical_map(cpu);
+
+ if (cluster_id != (mpidr & ~0xffUL)) {
+ cpu--;
+ goto out;
+ }
+ }
+out:
+ *base_cpu = cpu;
+ return tlist;
+}
+
+static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
+{
+ u64 val;
+
+ val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48 |
+ MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32 |
+ irq << 24 |
+ MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16 |
+ tlist);
+
+ pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
+ gic_write_sgi1r(val);
+}
+
+static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
+{
+ int cpu;
+
+ if (WARN_ON(irq >= 16))
+ return;
+
+ /*
+ * Ensure that stores to Normal memory are visible to the
+ * other CPUs before issuing the IPI.
+ */
+ smp_wmb();
+
+ for_each_cpu_mask(cpu, *mask) {
+ u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
+ u16 tlist;
+
+ tlist = gic_compute_target_list(&cpu, mask, cluster_id);
+ gic_send_sgi(cluster_id, tlist, irq);
+ }
+
+ /* Force the above writes to ICC_SGI1R_EL1 to be executed */
+ isb();
+}
+
+static void gic_smp_init(void)
+{
+ set_smp_cross_call(gic_raise_softirq);
+ register_cpu_notifier(&gic_cpu_notifier);
+}
+
+static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ bool force)
+{
+ unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ void __iomem *reg;
+ int enabled;
+ u64 val;
+
+ if (gic_irq_in_rdist(d))
+ return -EINVAL;
+
+ /* If interrupt was enabled, disable it first */
+ enabled = gic_peek_irq(d, GICD_ISENABLER);
+ if (enabled)
+ gic_mask_irq(d);
+
+ reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
+ val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
+
+ writeq_relaxed(val, reg);
+
+ /*
+ * If the interrupt was enabled, enabled it again. Otherwise,
+ * just wait for the distributor to have digested our changes.
+ */
+ if (enabled)
+ gic_unmask_irq(d);
+ else
+ gic_dist_wait_for_rwp();
+
+ return IRQ_SET_MASK_OK;
+}
+#else
+#define gic_set_affinity NULL
+#define gic_smp_init() do { } while(0)
+#endif
+
+static struct irq_chip gic_chip = {
+ .name = "GICv3",
+ .irq_mask = gic_mask_irq,
+ .irq_unmask = gic_unmask_irq,
+ .irq_eoi = gic_eoi_irq,
+ .irq_set_type = gic_set_type,
+ .irq_set_affinity = gic_set_affinity,
+};
+
+static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ /* SGIs are private to the core kernel */
+ if (hw < 16)
+ return -EPERM;
+ /* PPIs */
+ if (hw < 32) {
+ irq_set_percpu_devid(irq);
+ irq_set_chip_and_handler(irq, &gic_chip,
+ handle_percpu_devid_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
+ }
+ /* SPIs */
+ if (hw >= 32 && hw < gic_data.irq_nr) {
+ irq_set_chip_and_handler(irq, &gic_chip,
+ handle_fasteoi_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ }
+ irq_set_chip_data(irq, d->host_data);
+ return 0;
+}
+
+static int gic_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ if (d->of_node != controller)
+ return -EINVAL;
+ if (intsize < 3)
+ return -EINVAL;
+
+ switch(intspec[0]) {
+ case 0: /* SPI */
+ *out_hwirq = intspec[1] + 32;
+ break;
+ case 1: /* PPI */
+ *out_hwirq = intspec[1] + 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+}
+
+static const struct irq_domain_ops gic_irq_domain_ops = {
+ .map = gic_irq_domain_map,
+ .xlate = gic_irq_domain_xlate,
+};
+
+static int __init gic_of_init(struct device_node *node, struct device_node *parent)
+{
+ void __iomem *dist_base;
+ void __iomem **redist_base;
+ u64 redist_stride;
+ u32 redist_regions;
+ u32 reg;
+ int gic_irqs;
+ int err;
+ int i;
+
+ dist_base = of_iomap(node, 0);
+ if (!dist_base) {
+ pr_err("%s: unable to map gic dist registers\n",
+ node->full_name);
+ return -ENXIO;
+ }
+
+ reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
+ if (reg != 0x30 && reg != 0x40) {
+ pr_err("%s: no distributor detected, giving up\n",
+ node->full_name);
+ err = -ENODEV;
+ goto out_unmap_dist;
+ }
+
+ if (of_property_read_u32(node, "#redistributor-regions", &redist_regions))
+ redist_regions = 1;
+
+ redist_base = kzalloc(sizeof(*redist_base) * redist_regions, GFP_KERNEL);
+ if (!redist_base) {
+ err = -ENOMEM;
+ goto out_unmap_dist;
+ }
+
+ for (i = 0; i < redist_regions; i++) {
+ redist_base[i] = of_iomap(node, 1 + i);
+ if (!redist_base[i]) {
+ pr_err("%s: couldn't map region %d\n",
+ node->full_name, i);
+ err = -ENODEV;
+ goto out_unmap_rdist;
+ }
+ }
+
+ if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
+ redist_stride = 0;
+
+ gic_data.dist_base = dist_base;
+ gic_data.redist_base = redist_base;
+ gic_data.redist_regions = redist_regions;
+ gic_data.redist_stride = redist_stride;
+
+ /*
+ * Find out how many interrupts are supported.
+ * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
+ */
+ gic_irqs = readl_relaxed(gic_data.dist_base + GICD_TYPER) & 0x1f;
+ gic_irqs = (gic_irqs + 1) * 32;
+ if (gic_irqs > 1020)
+ gic_irqs = 1020;
+ gic_data.irq_nr = gic_irqs;
+
+ gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops,
+ &gic_data);
+ gic_data.rdist = alloc_percpu(typeof(*gic_data.rdist));
+
+ if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdist)) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ set_handle_irq(gic_handle_irq);
+
+ gic_smp_init();
+ gic_dist_init();
+ gic_cpu_init();
+
+ return 0;
+
+out_free:
+ if (gic_data.domain)
+ irq_domain_remove(gic_data.domain);
+ free_percpu(gic_data.rdist);
+out_unmap_rdist:
+ for (i = 0; i < redist_regions; i++)
+ if (redist_base[i])
+ iounmap(redist_base[i]);
+ kfree(redist_base);
+out_unmap_dist:
+ iounmap(dist_base);
+ return err;
+}
+
+IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
new file mode 100644
index 0000000..16c8b05
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H
+#define __LINUX_IRQCHIP_ARM_GIC_V3_H
+
+/*
+ * Distributor registers. We assume we're running non-secure, with ARE
+ * being set. Secure-only and non-ARE registers are not described.
+ */
+#define GICD_CTLR 0x0000
+#define GICD_TYPER 0x0004
+#define GICD_IIDR 0x0008
+#define GICD_STATUSR 0x0010
+#define GICD_SETSPI_NSR 0x0040
+#define GICD_CLRSPI_NSR 0x0048
+#define GICD_SETSPI_SR 0x0050
+#define GICD_CLRSPI_SR 0x0058
+#define GICD_SEIR 0x0068
+#define GICD_ISENABLER 0x0100
+#define GICD_ICENABLER 0x0180
+#define GICD_ISPENDR 0x0200
+#define GICD_ICPENDR 0x0280
+#define GICD_ISACTIVER 0x0300
+#define GICD_ICACTIVER 0x0380
+#define GICD_IPRIORITYR 0x0400
+#define GICD_ICFGR 0x0C00
+#define GICD_IROUTER 0x6000
+#define GICD_PIDR2 0xFFE8
+
+#define GICD_CTLR_RWP (1U << 31)
+#define GICD_CTLR_ARE_NS (1U << 4)
+#define GICD_CTLR_ENABLE_G1A (1U << 1)
+#define GICD_CTLR_ENABLE_G1 (1U << 0)
+
+#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
+#define GICD_IROUTER_SPI_MODE_ANY (1U << 31)
+
+#define GIC_PIDR2_ARCH_MASK 0xf0
+
+/*
+ * Re-Distributor registers, offsets from RD_base
+ */
+#define GICR_CTLR GICD_CTLR
+#define GICR_IIDR 0x0004
+#define GICR_TYPER 0x0008
+#define GICR_STATUSR GICD_STATUSR
+#define GICR_WAKER 0x0014
+#define GICR_SETLPIR 0x0040
+#define GICR_CLRLPIR 0x0048
+#define GICR_SEIR GICD_SEIR
+#define GICR_PROPBASER 0x0070
+#define GICR_PENDBASER 0x0078
+#define GICR_INVLPIR 0x00A0
+#define GICR_INVALLR 0x00B0
+#define GICR_SYNCR 0x00C0
+#define GICR_MOVLPIR 0x0100
+#define GICR_MOVALLR 0x0110
+#define GICR_PIDR2 GICD_PIDR2
+
+#define GICR_WAKER_ProcessorSleep (1U << 1)
+#define GICR_WAKER_ChildrenAsleep (1U << 2)
+
+/*
+ * Re-Distributor registers, offsets from SGI_base
+ */
+#define GICR_ISENABLER0 GICD_ISENABLER
+#define GICR_ICENABLER0 GICD_ICENABLER
+#define GICR_ISPENDR0 GICD_ISPENDR
+#define GICR_ICPENDR0 GICD_ICPENDR
+#define GICR_ISACTIVER0 GICD_ISACTIVER
+#define GICR_ICACTIVER0 GICD_ICACTIVER
+#define GICR_IPRIORITYR0 GICD_IPRIORITYR
+#define GICR_ICFGR0 GICD_ICFGR
+
+#define GICR_TYPER_VLPIS (1U << 1)
+#define GICR_TYPER_LAST (1U << 4)
+
+/*
+ * CPU interface registers
+ */
+#define GICC_CTLR_EL1_EOImode_drop_dir (0U << 1)
+#define GICC_CTLR_EL1_EOImode_drop (1U << 1)
+#define GICC_SRE_EL1_SRE (1U << 0)
+
+/*
+ * Hypervisor interface registers (SRE only)
+ */
+#define GICH_LR_VIRTUAL_ID_MASK ((1UL << 32) - 1)
+
+#define GICH_LR_EOI (1UL << 41)
+#define GICH_LR_GROUP (1UL << 60)
+#define GICH_LR_STATE (3UL << 62)
+#define GICH_LR_PENDING_BIT (1UL << 62)
+#define GICH_LR_ACTIVE_BIT (1UL << 63)
+
+#define GICH_MISR_EOI (1 << 0)
+#define GICH_MISR_U (1 << 1)
+
+#define GICH_HCR_EN (1 << 0)
+#define GICH_HCR_UIE (1 << 1)
+
+#define GICH_VMCR_CTLR_SHIFT 0
+#define GICH_VMCR_CTLR_MASK (0x21f << GICH_VMCR_CTLR_SHIFT)
+#define GICH_VMCR_BPR1_SHIFT 18
+#define GICH_VMCR_BPR1_MASK (7 << GICH_VMCR_BPR1_SHIFT)
+#define GICH_VMCR_BPR0_SHIFT 21
+#define GICH_VMCR_BPR0_MASK (7 << GICH_VMCR_BPR0_SHIFT)
+#define GICH_VMCR_PMR_SHIFT 24
+#define GICH_VMCR_PMR_MASK (0xffUL << GICH_VMCR_PMR_SHIFT)
+
+#define ICC_EOIR1_EL1 S3_0_C12_C12_1
+#define ICC_IAR1_EL1 S3_0_C12_C12_0
+#define ICC_SGI1R_EL1 S3_0_C12_C11_5
+#define ICC_PMR_EL1 S3_0_C4_C6_0
+#define ICC_CTLR_EL1 S3_0_C12_C12_4
+#define ICC_SRE_EL1 S3_0_C12_C12_5
+#define ICC_GRPEN1_EL1 S3_0_C12_C12_7
+
+#define ICC_SRE_EL2 S3_4_C12_C9_5
+
+#define ICH_VSEIR_EL2 S3_4_C12_C9_4
+#define ICH_HCR_EL2 S3_4_C12_C11_0
+#define ICH_VTR_EL2 S3_4_C12_C11_1
+#define ICH_MISR_EL2 S3_4_C12_C11_2
+#define ICH_EISR_EL2 S3_4_C12_C11_3
+#define ICH_ELSR_EL2 S3_4_C12_C11_5
+#define ICH_VMCR_EL2 S3_4_C12_C11_7
+
+#define __LR0_EL2(x) S3_4_C12_C12_ ## x
+#define __LR8_EL2(x) S3_4_C12_C13_ ## x
+
+#define ICH_LR0_EL2 __LR0_EL2(0)
+#define ICH_LR1_EL2 __LR0_EL2(1)
+#define ICH_LR2_EL2 __LR0_EL2(2)
+#define ICH_LR3_EL2 __LR0_EL2(3)
+#define ICH_LR4_EL2 __LR0_EL2(4)
+#define ICH_LR5_EL2 __LR0_EL2(5)
+#define ICH_LR6_EL2 __LR0_EL2(6)
+#define ICH_LR7_EL2 __LR0_EL2(7)
+#define ICH_LR8_EL2 __LR8_EL2(0)
+#define ICH_LR9_EL2 __LR8_EL2(1)
+#define ICH_LR10_EL2 __LR8_EL2(2)
+#define ICH_LR11_EL2 __LR8_EL2(3)
+#define ICH_LR12_EL2 __LR8_EL2(4)
+#define ICH_LR13_EL2 __LR8_EL2(5)
+#define ICH_LR14_EL2 __LR8_EL2(6)
+#define ICH_LR15_EL2 __LR8_EL2(7)
+
+#define __AP0Rx_EL2(x) S3_4_C12_C8_ ## x
+#define ICH_AP0R0_EL2 __AP0Rx_EL2(0)
+#define ICH_AP0R1_EL2 __AP0Rx_EL2(1)
+#define ICH_AP0R2_EL2 __AP0Rx_EL2(2)
+#define ICH_AP0R3_EL2 __AP0Rx_EL2(3)
+
+#define __AP1Rx_EL2(x) S3_4_C12_C9_ ## x
+#define ICH_AP1R0_EL2 __AP1Rx_EL2(0)
+#define ICH_AP1R1_EL2 __AP1Rx_EL2(1)
+#define ICH_AP1R2_EL2 __AP1Rx_EL2(2)
+#define ICH_AP1R3_EL2 __AP1Rx_EL2(3)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/stringify.h>
+
+static inline void gic_write_eoir(u64 irq)
+{
+ asm volatile("msr " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq));
+ isb();
+}
+
+#endif
+
+#endif
--
1.8.3.4
^ permalink raw reply related [flat|nested] 57+ messages in thread* [PATCH v3 02/19] arm64: initial support for GICv3
2014-04-16 13:39 ` [PATCH v3 02/19] arm64: initial support for GICv3 Marc Zyngier
@ 2014-05-09 14:05 ` Christoffer Dall
2014-05-12 16:54 ` Marc Zyngier
0 siblings, 1 reply; 57+ messages in thread
From: Christoffer Dall @ 2014-05-09 14:05 UTC (permalink / raw)
To: linux-arm-kernel
On Wed, Apr 16, 2014 at 02:39:34PM +0100, Marc Zyngier wrote:
> The Generic Interrupt Controller (version 3) offers services that are
> similar to GICv2, with a number of additional features:
> - Affinity routing based on the CPU MPIDR (ARE)
> - System register for the CPU interfaces (SRE)
> - Support for more that 8 CPUs
> - Locality-specific Peripheral Interrupts (LPIs)
> - Interrupt Translation Services (ITS)
>
> This patch adds preliminary support for GICv3 with ARE and SRE,
> non-secure mode only. It relies on higher exception levels to grant ARE
> and SRE access.
>
> Support for LPI and ITS will be added at a later time.
>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Reviewed-by: Zi Shen Lim <zlim@broadcom.com>
> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
> arch/arm64/Kconfig | 1 +
> arch/arm64/kernel/head.S | 18 +
> arch/arm64/kernel/hyp-stub.S | 1 +
> drivers/irqchip/Kconfig | 5 +
> drivers/irqchip/Makefile | 1 +
> drivers/irqchip/irq-gic-v3.c | 681 +++++++++++++++++++++++++++++++++++++
> include/linux/irqchip/arm-gic-v3.h | 188 ++++++++++
> 7 files changed, 895 insertions(+)
> create mode 100644 drivers/irqchip/irq-gic-v3.c
> create mode 100644 include/linux/irqchip/arm-gic-v3.h
>
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 27bbcfc..5bd0dfe 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -9,6 +9,7 @@ config ARM64
> select ARM_AMBA
> select ARM_ARCH_TIMER
> select ARM_GIC
> + select ARM_GIC_V3
> select BUILDTIME_EXTABLE_SORT
> select CLONE_BACKWARDS
> select COMMON_CLK
> diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
> index 0b281ff..7c56e93 100644
> --- a/arch/arm64/kernel/head.S
> +++ b/arch/arm64/kernel/head.S
> @@ -22,6 +22,7 @@
>
> #include <linux/linkage.h>
> #include <linux/init.h>
> +#include <linux/irqchip/arm-gic-v3.h>
>
> #include <asm/assembler.h>
> #include <asm/ptrace.h>
> @@ -183,6 +184,23 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
> msr cnthctl_el2, x0
> msr cntvoff_el2, xzr // Clear virtual offset
>
> +#ifdef CONFIG_ARM_GIC_V3
> + /* GICv3 system register access */
> + mrs x0, id_aa64pfr0_el1
> + ubfx x0, x0, #24, #4
> + cmp x0, #1
> + b.ne 3f
> +
> + mrs x0, ICC_SRE_EL2
> + orr x0, x0, #1 // Set ICC_SRE_EL2.SRE==1
> + orr x0, x0, #(1 << 3) // Set ICC_SRE_EL2.Enable==1
> + msr ICC_SRE_EL2, x0
> + isb // Make sure SRE is now 1
> + msr ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
> +
> +3:
> +#endif
> +
> /* Populate ID registers. */
> mrs x0, midr_el1
> mrs x1, mpidr_el1
> diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
> index 0959611..a272f33 100644
> --- a/arch/arm64/kernel/hyp-stub.S
> +++ b/arch/arm64/kernel/hyp-stub.S
> @@ -19,6 +19,7 @@
>
> #include <linux/init.h>
> #include <linux/linkage.h>
> +#include <linux/irqchip/arm-gic-v3.h>
>
> #include <asm/assembler.h>
> #include <asm/ptrace.h>
> diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
> index 61ffdca..5b55c46 100644
> --- a/drivers/irqchip/Kconfig
> +++ b/drivers/irqchip/Kconfig
> @@ -10,6 +10,11 @@ config ARM_GIC
> config GIC_NON_BANKED
> bool
>
> +config ARM_GIC_V3
> + bool
> + select IRQ_DOMAIN
> + select MULTI_IRQ_HANDLER
> +
> config ARM_NVIC
> bool
> select IRQ_DOMAIN
> diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
> index 22e616c..4adfda8 100644
> --- a/drivers/irqchip/Makefile
> +++ b/drivers/irqchip/Makefile
> @@ -14,6 +14,7 @@ obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o
> obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o
> obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
> obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
> +obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
> obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
> obj-$(CONFIG_ARM_VIC) += irq-vic.o
> obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o
> diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
> new file mode 100644
> index 0000000..b4c8140
> --- /dev/null
> +++ b/drivers/irqchip/irq-gic-v3.c
> @@ -0,0 +1,681 @@
> +/*
> + * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
> + * Author: Marc Zyngier <marc.zyngier@arm.com>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program. If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#include <linux/cpu.h>
> +#include <linux/delay.h>
> +#include <linux/interrupt.h>
> +#include <linux/of.h>
> +#include <linux/of_address.h>
> +#include <linux/of_irq.h>
> +#include <linux/percpu.h>
> +#include <linux/slab.h>
> +
> +#include <linux/irqchip/arm-gic-v3.h>
> +
> +#include <asm/cputype.h>
> +#include <asm/exception.h>
> +#include <asm/smp_plat.h>
> +
> +#include "irq-gic-common.h"
> +#include "irqchip.h"
> +
> +struct gic_chip_data {
> + void __iomem *dist_base;
> + void __iomem **redist_base;
> + void __percpu __iomem **rdist;
> + struct irq_domain *domain;
> + u64 redist_stride;
> + u32 redist_regions;
> + unsigned int irq_nr;
> +};
> +
> +static struct gic_chip_data gic_data __read_mostly;
> +
> +#define gic_data_rdist() (this_cpu_ptr(gic_data.rdist))
> +#define gic_data_rdist_rd_base() (*gic_data_rdist())
> +#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
> +
> +#define DEFAULT_PMR_VALUE 0xf0
> +
> +static inline unsigned int gic_irq(struct irq_data *d)
> +{
> + return d->hwirq;
> +}
> +
> +static inline int gic_irq_in_rdist(struct irq_data *d)
> +{
> + return gic_irq(d) < 32;
> +}
> +
> +static inline void __iomem *gic_dist_base(struct irq_data *d)
> +{
> + if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */
> + return gic_data_rdist_sgi_base();
> +
> + if (d->hwirq <= 1023) /* SPI -> dist_base */
> + return gic_data.dist_base;
> +
> + if (d->hwirq >= 8192)
> + BUG(); /* LPI Detected!!! */
> +
> + return NULL;
> +}
> +
> +static void gic_do_wait_for_rwp(void __iomem *base)
> +{
> + u32 count = 1000000; /* 1s! */
> +
> + while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
> + count--;
> + if (!count) {
> + pr_err_ratelimited("RWP timeout, gone fishing\n");
> + return;
> + }
> + cpu_relax();
> + udelay(1);
> + };
> +}
> +
> +/* Wait for completion of a distributor change */
> +static void gic_dist_wait_for_rwp(void)
> +{
> + gic_do_wait_for_rwp(gic_data.dist_base);
> +}
> +
> +/* Wait for completion of a redistributor change */
> +static void gic_redist_wait_for_rwp(void)
> +{
> + gic_do_wait_for_rwp(gic_data_rdist_rd_base());
> +}
> +
> +/* Low level accessors */
> +static u64 gic_read_iar(void)
> +{
> + u64 irqstat;
> +
> + asm volatile("mrs %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
> + return irqstat;
> +}
> +
> +static void gic_write_pmr(u64 val)
> +{
> + asm volatile("msr " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
> +}
> +
> +static void gic_write_ctlr(u64 val)
> +{
> + asm volatile("msr " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
> + isb();
> +}
> +
> +static void gic_write_grpen1(u64 val)
> +{
> + asm volatile("msr " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
> + isb();
> +}
> +
> +static void gic_write_sgi1r(u64 val)
> +{
> + asm volatile("msr " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
> +}
> +
> +static void gic_enable_sre(void)
> +{
> + u64 val;
> +
> + asm volatile("mrs %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
> + val |= GICC_SRE_EL1_SRE;
> + asm volatile("msr " __stringify(ICC_SRE_EL1) ", %0" : : "r" (val));
> + isb();
> +
> + /*
> + * Need to check that the SRE bit has actually been set. If
> + * not, it means that SRE is disabled at EL2. We're going to
> + * die painfully, and there is nothing we can do about it.
> + *
> + * Kindly inform the luser.
> + */
> + asm volatile("mrs %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
> + if (!(val & GICC_SRE_EL1_SRE))
> + pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
> +}
> +
> +static void gic_enable_redist(void)
> +{
> + void __iomem *rbase;
> + u32 count = 1000000; /* 1s! */
> + u32 val;
> +
> + rbase = gic_data_rdist_rd_base();
> +
> + /* Wake up this CPU redistributor */
> + val = readl_relaxed(rbase + GICR_WAKER);
> + val &= ~GICR_WAKER_ProcessorSleep;
> + writel_relaxed(val, rbase + GICR_WAKER);
> +
> + while (readl_relaxed(rbase + GICR_WAKER) & GICR_WAKER_ChildrenAsleep) {
> + count--;
> + if (!count) {
> + pr_err_ratelimited("redist didn't wake up...\n");
> + return;
> + }
> + cpu_relax();
> + udelay(1);
> + };
> +}
> +
> +/*
> + * Routines to acknowledge, disable and enable interrupts
> + */
> +static void gic_poke_irq(struct irq_data *d, u32 offset)
> +{
> + u32 mask = 1 << (gic_irq(d) % 32);
> + void (*rwp_wait)(void);
> + void __iomem *base;
> +
> + if (gic_irq_in_rdist(d)) {
> + base = gic_data_rdist_sgi_base();
> + rwp_wait = gic_redist_wait_for_rwp;
> + } else {
> + base = gic_data.dist_base;
> + rwp_wait = gic_dist_wait_for_rwp;
> + }
> +
> + writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
> + rwp_wait();
> +}
> +
> +static int gic_peek_irq(struct irq_data *d, u32 offset)
> +{
> + u32 mask = 1 << (gic_irq(d) % 32);
> + void __iomem *base;
> +
> + if (gic_irq_in_rdist(d))
> + base = gic_data_rdist_sgi_base();
> + else
> + base = gic_data.dist_base;
> +
> + return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
> +}
> +
> +static void gic_mask_irq(struct irq_data *d)
> +{
> + gic_poke_irq(d, GICD_ICENABLER);
> +}
> +
> +static void gic_unmask_irq(struct irq_data *d)
> +{
> + gic_poke_irq(d, GICD_ISENABLER);
> +}
> +
> +static void gic_eoi_irq(struct irq_data *d)
> +{
> + gic_write_eoir(gic_irq(d));
> +}
> +
> +static int gic_set_type(struct irq_data *d, unsigned int type)
> +{
> + unsigned int irq = gic_irq(d);
> + void (*rwp_wait)(void);
> + void __iomem *base;
> +
> + if (gic_irq_in_rdist(d)) {
> + base = gic_data_rdist_sgi_base();
> + rwp_wait = gic_redist_wait_for_rwp;
> + } else {
> + base = gic_data.dist_base;
> + rwp_wait = gic_dist_wait_for_rwp;
> + }
> +
> + /* Interrupt configuration for SGIs can't be changed */
> + if (irq < 16)
> + return -EINVAL;
> +
> + if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
> + return -EINVAL;
> +
> + gic_configure_irq(irq, type, base, rwp_wait);
I'm noticing here that we're only waiting for the write to config/enable
if the IRQ was already enabled in gic_configure_irq, but gic_unmask_irq
just enables the IRQ without waiting for operation to complete.
So I'm wondering if there isn't a disconnect between configuring a
disabled IRQ and immediately unmasking it, versus configuring an enabled
IRQ?
> +
> + return 0;
> +}
> +
> +static u64 gic_mpidr_to_affinity(u64 mpidr)
> +{
> + u64 aff;
> +
> + aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
> + MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
> + MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
> + MPIDR_AFFINITY_LEVEL(mpidr, 0)) & ~GICD_IROUTER_SPI_MODE_ANY;
> +
> + return aff;
> +}
> +
> +static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
> +{
> + u64 irqnr;
> +
> + do {
> + irqnr = gic_read_iar();
> +
> + if (likely(irqnr > 15 && irqnr < 1021)) {
> + irqnr = irq_find_mapping(gic_data.domain, irqnr);
> + handle_IRQ(irqnr, regs);
> + continue;
> + }
> + if (irqnr < 16) {
> + gic_write_eoir(irqnr);
> +#ifdef CONFIG_SMP
> + handle_IPI(irqnr, regs);
> +#else
> + WARN_ONCE(true, "Unexpected SGI received!\n");
> +#endif
> + continue;
> + }
> + } while (irqnr != 0x3ff);
> +}
> +
> +static void __init gic_dist_init(void)
> +{
> + unsigned int i;
> + u64 affinity;
> + void __iomem *base = gic_data.dist_base;
> +
> + /* Disable the distributor */
> + writel_relaxed(0, base + GICD_CTLR);
> +
> + gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
> +
> + /* Enable distributor with ARE, Group1 */
> + writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
> + base + GICD_CTLR);
> +
> + /*
> + * Set all global interrupts to the boot CPU only. ARE must be
> + * enabled.
> + */
> + affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
> + for (i = 32; i < gic_data.irq_nr; i++)
> + writeq_relaxed(affinity, base + GICD_IROUTER + i * 8);
> +}
> +
> +static int gic_populate_rdist(void)
> +{
> + u64 mpidr = cpu_logical_map(smp_processor_id());
> + u64 typer;
> + u32 aff;
> + int i;
> +
> + /* Convery affinity to a 32bit value... */
s/Convery/Convert/
while you're at it, you could go the extra mile and specify "that can be
matched to GICR_TYPER bits [63:32]". If you really wanted to.
> + aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
> + MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
> + MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
> + MPIDR_AFFINITY_LEVEL(mpidr, 0));
> +
> + for (i = 0; i < gic_data.redist_regions; i++) {
> + void __iomem *ptr = gic_data.redist_base[i];
> + u32 reg;
> +
> + reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
> + if (reg != 0x30 && reg != 0x40) { /* We're in trouble... */
> + pr_warn("No redistributor present @%p\n", ptr);
> + break;
> + }
> +
> + do {
> + typer = readq_relaxed(ptr + GICR_TYPER);
> + if ((typer >> 32) == aff) {
> + gic_data_rdist_rd_base() = ptr;
> + pr_info("CPU%d: found redistributor %llx @%p\n",
> + smp_processor_id(),
> + (unsigned long long)mpidr, ptr);
> + return 0;
> + }
> +
> + if (gic_data.redist_stride) {
> + ptr += gic_data.redist_stride;
> + } else {
> + ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
> + if (typer & GICR_TYPER_VLPIS)
> + ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
> + }
> + } while (!(typer & GICR_TYPER_LAST));
> + }
> +
> + /* We couldn't even deal with ourselves... */
> + WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n",
> + smp_processor_id(), (unsigned long long)mpidr);
> + return -ENODEV;
> +}
> +
> +static void gic_cpu_init(void)
> +{
> + void __iomem *rbase;
> +
> + /* Register ourselves with the rest of the world */
> + if (gic_populate_rdist())
> + return;
> +
> + gic_enable_redist();
> +
> + rbase = gic_data_rdist_sgi_base();
> +
> + gic_cpu_config(rbase, gic_redist_wait_for_rwp);
> +
> + /* Enable system registers */
> + gic_enable_sre();
> +
> + /* Set priority mask register */
> + gic_write_pmr(DEFAULT_PMR_VALUE);
> +
> + /* EOI deactivates interrupt too (mode 0) */
> + gic_write_ctlr(GICC_CTLR_EL1_EOImode_drop_dir);
> +
> + /* ... and let's hit the road... */
> + gic_write_grpen1(1);
> +}
> +
> +#ifdef CONFIG_SMP
> +static int gic_secondary_init(struct notifier_block *nfb,
> + unsigned long action, void *hcpu)
> +{
> + if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
> + gic_cpu_init();
> + return NOTIFY_OK;
> +}
> +
> +/*
> + * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
> + * priority because the GIC needs to be up before the ARM generic timers.
> + */
> +static struct notifier_block gic_cpu_notifier = {
> + .notifier_call = gic_secondary_init,
> + .priority = 100,
> +};
> +
> +static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
> + u64 cluster_id)
> +{
> + int cpu = *base_cpu;
> + u64 mpidr = cpu_logical_map(cpu);
> + u16 tlist = 0;
> +
> + while (cpu < nr_cpu_ids) {
> + /*
> + * If we ever get a cluster of more than 16 CPUs, just
> + * scream and skip that CPU.
> + */
> + if (WARN_ON((mpidr & 0xff) >= 16))
> + goto out;
> +
> + tlist |= 1 << (mpidr & 0xf);
> +
> + cpu = cpumask_next(cpu, mask);
> + if (cpu == nr_cpu_ids)
> + goto out;
> +
> + mpidr = cpu_logical_map(cpu);
> +
> + if (cluster_id != (mpidr & ~0xffUL)) {
> + cpu--;
> + goto out;
> + }
> + }
> +out:
> + *base_cpu = cpu;
> + return tlist;
> +}
> +
> +static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
> +{
> + u64 val;
> +
> + val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48 |
> + MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32 |
> + irq << 24 |
> + MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16 |
> + tlist);
> +
> + pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
> + gic_write_sgi1r(val);
> +}
> +
> +static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
> +{
> + int cpu;
> +
> + if (WARN_ON(irq >= 16))
> + return;
> +
> + /*
> + * Ensure that stores to Normal memory are visible to the
> + * other CPUs before issuing the IPI.
> + */
> + smp_wmb();
> +
> + for_each_cpu_mask(cpu, *mask) {
> + u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
> + u16 tlist;
> +
> + tlist = gic_compute_target_list(&cpu, mask, cluster_id);
> + gic_send_sgi(cluster_id, tlist, irq);
> + }
> +
> + /* Force the above writes to ICC_SGI1R_EL1 to be executed */
> + isb();
> +}
> +
> +static void gic_smp_init(void)
> +{
> + set_smp_cross_call(gic_raise_softirq);
> + register_cpu_notifier(&gic_cpu_notifier);
> +}
> +
> +static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
> + bool force)
> +{
> + unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
> + void __iomem *reg;
> + int enabled;
> + u64 val;
> +
> + if (gic_irq_in_rdist(d))
> + return -EINVAL;
> +
> + /* If interrupt was enabled, disable it first */
> + enabled = gic_peek_irq(d, GICD_ISENABLER);
> + if (enabled)
> + gic_mask_irq(d);
> +
> + reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
> + val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
> +
> + writeq_relaxed(val, reg);
> +
> + /*
> + * If the interrupt was enabled, enabled it again. Otherwise,
> + * just wait for the distributor to have digested our changes.
> + */
> + if (enabled)
> + gic_unmask_irq(d);
> + else
> + gic_dist_wait_for_rwp();
If we don't need to wait for RWP when enabling the interrupt, why do we
need to wait if we're not going to enable it (presumably nothing ever
takes effect until someone later enables the interrupt, in which case
the effect would be the same? no?)?
> +
> + return IRQ_SET_MASK_OK;
> +}
> +#else
> +#define gic_set_affinity NULL
> +#define gic_smp_init() do { } while(0)
> +#endif
> +
> +static struct irq_chip gic_chip = {
> + .name = "GICv3",
> + .irq_mask = gic_mask_irq,
> + .irq_unmask = gic_unmask_irq,
> + .irq_eoi = gic_eoi_irq,
> + .irq_set_type = gic_set_type,
> + .irq_set_affinity = gic_set_affinity,
> +};
> +
> +static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
> + irq_hw_number_t hw)
> +{
> + /* SGIs are private to the core kernel */
> + if (hw < 16)
> + return -EPERM;
> + /* PPIs */
> + if (hw < 32) {
> + irq_set_percpu_devid(irq);
> + irq_set_chip_and_handler(irq, &gic_chip,
> + handle_percpu_devid_irq);
> + set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
> + }
> + /* SPIs */
> + if (hw >= 32 && hw < gic_data.irq_nr) {
> + irq_set_chip_and_handler(irq, &gic_chip,
> + handle_fasteoi_irq);
> + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
> + }
> + irq_set_chip_data(irq, d->host_data);
> + return 0;
> +}
> +
> +static int gic_irq_domain_xlate(struct irq_domain *d,
> + struct device_node *controller,
> + const u32 *intspec, unsigned int intsize,
> + unsigned long *out_hwirq, unsigned int *out_type)
> +{
> + if (d->of_node != controller)
> + return -EINVAL;
> + if (intsize < 3)
> + return -EINVAL;
> +
> + switch(intspec[0]) {
> + case 0: /* SPI */
> + *out_hwirq = intspec[1] + 32;
> + break;
> + case 1: /* PPI */
> + *out_hwirq = intspec[1] + 16;
> + break;
> + default:
> + return -EINVAL;
> + }
> +
> + *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
> + return 0;
> +}
> +
> +static const struct irq_domain_ops gic_irq_domain_ops = {
> + .map = gic_irq_domain_map,
> + .xlate = gic_irq_domain_xlate,
> +};
> +
> +static int __init gic_of_init(struct device_node *node, struct device_node *parent)
> +{
> + void __iomem *dist_base;
> + void __iomem **redist_base;
> + u64 redist_stride;
> + u32 redist_regions;
> + u32 reg;
> + int gic_irqs;
> + int err;
> + int i;
> +
> + dist_base = of_iomap(node, 0);
> + if (!dist_base) {
> + pr_err("%s: unable to map gic dist registers\n",
> + node->full_name);
> + return -ENXIO;
> + }
> +
> + reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
> + if (reg != 0x30 && reg != 0x40) {
> + pr_err("%s: no distributor detected, giving up\n",
> + node->full_name);
> + err = -ENODEV;
> + goto out_unmap_dist;
> + }
> +
> + if (of_property_read_u32(node, "#redistributor-regions", &redist_regions))
> + redist_regions = 1;
> +
> + redist_base = kzalloc(sizeof(*redist_base) * redist_regions, GFP_KERNEL);
> + if (!redist_base) {
> + err = -ENOMEM;
> + goto out_unmap_dist;
> + }
> +
> + for (i = 0; i < redist_regions; i++) {
> + redist_base[i] = of_iomap(node, 1 + i);
> + if (!redist_base[i]) {
> + pr_err("%s: couldn't map region %d\n",
> + node->full_name, i);
> + err = -ENODEV;
> + goto out_unmap_rdist;
> + }
> + }
> +
> + if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
> + redist_stride = 0;
> +
> + gic_data.dist_base = dist_base;
> + gic_data.redist_base = redist_base;
> + gic_data.redist_regions = redist_regions;
> + gic_data.redist_stride = redist_stride;
> +
> + /*
> + * Find out how many interrupts are supported.
> + * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
> + */
> + gic_irqs = readl_relaxed(gic_data.dist_base + GICD_TYPER) & 0x1f;
> + gic_irqs = (gic_irqs + 1) * 32;
> + if (gic_irqs > 1020)
> + gic_irqs = 1020;
> + gic_data.irq_nr = gic_irqs;
> +
> + gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops,
> + &gic_data);
> + gic_data.rdist = alloc_percpu(typeof(*gic_data.rdist));
> +
> + if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdist)) {
> + err = -ENOMEM;
> + goto out_free;
> + }
> +
> + set_handle_irq(gic_handle_irq);
> +
> + gic_smp_init();
> + gic_dist_init();
> + gic_cpu_init();
> +
> + return 0;
> +
> +out_free:
> + if (gic_data.domain)
> + irq_domain_remove(gic_data.domain);
> + free_percpu(gic_data.rdist);
> +out_unmap_rdist:
> + for (i = 0; i < redist_regions; i++)
> + if (redist_base[i])
> + iounmap(redist_base[i]);
> + kfree(redist_base);
> +out_unmap_dist:
> + iounmap(dist_base);
> + return err;
> +}
> +
> +IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
> diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
> new file mode 100644
> index 0000000..16c8b05
> --- /dev/null
> +++ b/include/linux/irqchip/arm-gic-v3.h
> @@ -0,0 +1,188 @@
> +/*
> + * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
> + * Author: Marc Zyngier <marc.zyngier@arm.com>
> + *
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program. If not, see <http://www.gnu.org/licenses/>.
> + */
> +#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H
> +#define __LINUX_IRQCHIP_ARM_GIC_V3_H
> +
> +/*
> + * Distributor registers. We assume we're running non-secure, with ARE
> + * being set. Secure-only and non-ARE registers are not described.
> + */
> +#define GICD_CTLR 0x0000
> +#define GICD_TYPER 0x0004
> +#define GICD_IIDR 0x0008
> +#define GICD_STATUSR 0x0010
> +#define GICD_SETSPI_NSR 0x0040
> +#define GICD_CLRSPI_NSR 0x0048
> +#define GICD_SETSPI_SR 0x0050
> +#define GICD_CLRSPI_SR 0x0058
> +#define GICD_SEIR 0x0068
> +#define GICD_ISENABLER 0x0100
> +#define GICD_ICENABLER 0x0180
> +#define GICD_ISPENDR 0x0200
> +#define GICD_ICPENDR 0x0280
> +#define GICD_ISACTIVER 0x0300
> +#define GICD_ICACTIVER 0x0380
> +#define GICD_IPRIORITYR 0x0400
> +#define GICD_ICFGR 0x0C00
> +#define GICD_IROUTER 0x6000
> +#define GICD_PIDR2 0xFFE8
> +
> +#define GICD_CTLR_RWP (1U << 31)
> +#define GICD_CTLR_ARE_NS (1U << 4)
> +#define GICD_CTLR_ENABLE_G1A (1U << 1)
> +#define GICD_CTLR_ENABLE_G1 (1U << 0)
> +
> +#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
> +#define GICD_IROUTER_SPI_MODE_ANY (1U << 31)
> +
> +#define GIC_PIDR2_ARCH_MASK 0xf0
> +
> +/*
> + * Re-Distributor registers, offsets from RD_base
> + */
> +#define GICR_CTLR GICD_CTLR
> +#define GICR_IIDR 0x0004
> +#define GICR_TYPER 0x0008
> +#define GICR_STATUSR GICD_STATUSR
> +#define GICR_WAKER 0x0014
> +#define GICR_SETLPIR 0x0040
> +#define GICR_CLRLPIR 0x0048
> +#define GICR_SEIR GICD_SEIR
> +#define GICR_PROPBASER 0x0070
> +#define GICR_PENDBASER 0x0078
> +#define GICR_INVLPIR 0x00A0
> +#define GICR_INVALLR 0x00B0
> +#define GICR_SYNCR 0x00C0
> +#define GICR_MOVLPIR 0x0100
> +#define GICR_MOVALLR 0x0110
> +#define GICR_PIDR2 GICD_PIDR2
> +
> +#define GICR_WAKER_ProcessorSleep (1U << 1)
> +#define GICR_WAKER_ChildrenAsleep (1U << 2)
> +
> +/*
> + * Re-Distributor registers, offsets from SGI_base
> + */
> +#define GICR_ISENABLER0 GICD_ISENABLER
> +#define GICR_ICENABLER0 GICD_ICENABLER
> +#define GICR_ISPENDR0 GICD_ISPENDR
> +#define GICR_ICPENDR0 GICD_ICPENDR
> +#define GICR_ISACTIVER0 GICD_ISACTIVER
> +#define GICR_ICACTIVER0 GICD_ICACTIVER
> +#define GICR_IPRIORITYR0 GICD_IPRIORITYR
> +#define GICR_ICFGR0 GICD_ICFGR
> +
> +#define GICR_TYPER_VLPIS (1U << 1)
> +#define GICR_TYPER_LAST (1U << 4)
> +
> +/*
> + * CPU interface registers
> + */
> +#define GICC_CTLR_EL1_EOImode_drop_dir (0U << 1)
> +#define GICC_CTLR_EL1_EOImode_drop (1U << 1)
> +#define GICC_SRE_EL1_SRE (1U << 0)
> +
> +/*
> + * Hypervisor interface registers (SRE only)
> + */
> +#define GICH_LR_VIRTUAL_ID_MASK ((1UL << 32) - 1)
> +
> +#define GICH_LR_EOI (1UL << 41)
> +#define GICH_LR_GROUP (1UL << 60)
> +#define GICH_LR_STATE (3UL << 62)
> +#define GICH_LR_PENDING_BIT (1UL << 62)
> +#define GICH_LR_ACTIVE_BIT (1UL << 63)
> +
> +#define GICH_MISR_EOI (1 << 0)
> +#define GICH_MISR_U (1 << 1)
> +
> +#define GICH_HCR_EN (1 << 0)
> +#define GICH_HCR_UIE (1 << 1)
> +
> +#define GICH_VMCR_CTLR_SHIFT 0
> +#define GICH_VMCR_CTLR_MASK (0x21f << GICH_VMCR_CTLR_SHIFT)
> +#define GICH_VMCR_BPR1_SHIFT 18
> +#define GICH_VMCR_BPR1_MASK (7 << GICH_VMCR_BPR1_SHIFT)
> +#define GICH_VMCR_BPR0_SHIFT 21
> +#define GICH_VMCR_BPR0_MASK (7 << GICH_VMCR_BPR0_SHIFT)
> +#define GICH_VMCR_PMR_SHIFT 24
> +#define GICH_VMCR_PMR_MASK (0xffUL << GICH_VMCR_PMR_SHIFT)
> +
> +#define ICC_EOIR1_EL1 S3_0_C12_C12_1
> +#define ICC_IAR1_EL1 S3_0_C12_C12_0
> +#define ICC_SGI1R_EL1 S3_0_C12_C11_5
> +#define ICC_PMR_EL1 S3_0_C4_C6_0
> +#define ICC_CTLR_EL1 S3_0_C12_C12_4
> +#define ICC_SRE_EL1 S3_0_C12_C12_5
> +#define ICC_GRPEN1_EL1 S3_0_C12_C12_7
> +
> +#define ICC_SRE_EL2 S3_4_C12_C9_5
> +
> +#define ICH_VSEIR_EL2 S3_4_C12_C9_4
> +#define ICH_HCR_EL2 S3_4_C12_C11_0
> +#define ICH_VTR_EL2 S3_4_C12_C11_1
> +#define ICH_MISR_EL2 S3_4_C12_C11_2
> +#define ICH_EISR_EL2 S3_4_C12_C11_3
> +#define ICH_ELSR_EL2 S3_4_C12_C11_5
> +#define ICH_VMCR_EL2 S3_4_C12_C11_7
> +
> +#define __LR0_EL2(x) S3_4_C12_C12_ ## x
> +#define __LR8_EL2(x) S3_4_C12_C13_ ## x
> +
> +#define ICH_LR0_EL2 __LR0_EL2(0)
> +#define ICH_LR1_EL2 __LR0_EL2(1)
> +#define ICH_LR2_EL2 __LR0_EL2(2)
> +#define ICH_LR3_EL2 __LR0_EL2(3)
> +#define ICH_LR4_EL2 __LR0_EL2(4)
> +#define ICH_LR5_EL2 __LR0_EL2(5)
> +#define ICH_LR6_EL2 __LR0_EL2(6)
> +#define ICH_LR7_EL2 __LR0_EL2(7)
> +#define ICH_LR8_EL2 __LR8_EL2(0)
> +#define ICH_LR9_EL2 __LR8_EL2(1)
> +#define ICH_LR10_EL2 __LR8_EL2(2)
> +#define ICH_LR11_EL2 __LR8_EL2(3)
> +#define ICH_LR12_EL2 __LR8_EL2(4)
> +#define ICH_LR13_EL2 __LR8_EL2(5)
> +#define ICH_LR14_EL2 __LR8_EL2(6)
> +#define ICH_LR15_EL2 __LR8_EL2(7)
> +
> +#define __AP0Rx_EL2(x) S3_4_C12_C8_ ## x
> +#define ICH_AP0R0_EL2 __AP0Rx_EL2(0)
> +#define ICH_AP0R1_EL2 __AP0Rx_EL2(1)
> +#define ICH_AP0R2_EL2 __AP0Rx_EL2(2)
> +#define ICH_AP0R3_EL2 __AP0Rx_EL2(3)
> +
> +#define __AP1Rx_EL2(x) S3_4_C12_C9_ ## x
> +#define ICH_AP1R0_EL2 __AP1Rx_EL2(0)
> +#define ICH_AP1R1_EL2 __AP1Rx_EL2(1)
> +#define ICH_AP1R2_EL2 __AP1Rx_EL2(2)
> +#define ICH_AP1R3_EL2 __AP1Rx_EL2(3)
> +
> +#ifndef __ASSEMBLY__
> +
> +#include <linux/stringify.h>
> +
> +static inline void gic_write_eoir(u64 irq)
> +{
> + asm volatile("msr " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq));
> + isb();
> +}
> +
> +#endif
> +
> +#endif
> --
> 1.8.3.4
>
I noticed that you removed all the dist_lock locks here. I assume there
was some rationale behind this, and given you can share that explanation
with me and address the other tiny issues:
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
^ permalink raw reply [flat|nested] 57+ messages in thread* [PATCH v3 02/19] arm64: initial support for GICv3
2014-05-09 14:05 ` Christoffer Dall
@ 2014-05-12 16:54 ` Marc Zyngier
2014-05-14 16:02 ` Christoffer Dall
0 siblings, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-05-12 16:54 UTC (permalink / raw)
To: linux-arm-kernel
On Fri, May 09 2014 at 3:05:22 pm BST, Christoffer Dall <christoffer.dall@linaro.org> wrote:
> On Wed, Apr 16, 2014 at 02:39:34PM +0100, Marc Zyngier wrote:
>> The Generic Interrupt Controller (version 3) offers services that are
>> similar to GICv2, with a number of additional features:
>> - Affinity routing based on the CPU MPIDR (ARE)
>> - System register for the CPU interfaces (SRE)
>> - Support for more that 8 CPUs
>> - Locality-specific Peripheral Interrupts (LPIs)
>> - Interrupt Translation Services (ITS)
>>
>> This patch adds preliminary support for GICv3 with ARE and SRE,
>> non-secure mode only. It relies on higher exception levels to grant ARE
>> and SRE access.
>>
>> Support for LPI and ITS will be added at a later time.
>>
>> Cc: Thomas Gleixner <tglx@linutronix.de>
>> Reviewed-by: Zi Shen Lim <zlim@broadcom.com>
>> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>> ---
>> arch/arm64/Kconfig | 1 +
>> arch/arm64/kernel/head.S | 18 +
>> arch/arm64/kernel/hyp-stub.S | 1 +
>> drivers/irqchip/Kconfig | 5 +
>> drivers/irqchip/Makefile | 1 +
>> drivers/irqchip/irq-gic-v3.c | 681 +++++++++++++++++++++++++++++++++++++
>> include/linux/irqchip/arm-gic-v3.h | 188 ++++++++++
>> 7 files changed, 895 insertions(+)
>> create mode 100644 drivers/irqchip/irq-gic-v3.c
>> create mode 100644 include/linux/irqchip/arm-gic-v3.h
>>
>> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
>> index 27bbcfc..5bd0dfe 100644
>> --- a/arch/arm64/Kconfig
>> +++ b/arch/arm64/Kconfig
>> @@ -9,6 +9,7 @@ config ARM64
>> select ARM_AMBA
>> select ARM_ARCH_TIMER
>> select ARM_GIC
>> + select ARM_GIC_V3
>> select BUILDTIME_EXTABLE_SORT
>> select CLONE_BACKWARDS
>> select COMMON_CLK
>> diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
>> index 0b281ff..7c56e93 100644
>> --- a/arch/arm64/kernel/head.S
>> +++ b/arch/arm64/kernel/head.S
>> @@ -22,6 +22,7 @@
>>
>> #include <linux/linkage.h>
>> #include <linux/init.h>
>> +#include <linux/irqchip/arm-gic-v3.h>
>>
>> #include <asm/assembler.h>
>> #include <asm/ptrace.h>
>> @@ -183,6 +184,23 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
>> msr cnthctl_el2, x0
>> msr cntvoff_el2, xzr // Clear virtual offset
>>
>> +#ifdef CONFIG_ARM_GIC_V3
>> + /* GICv3 system register access */
>> + mrs x0, id_aa64pfr0_el1
>> + ubfx x0, x0, #24, #4
>> + cmp x0, #1
>> + b.ne 3f
>> +
>> + mrs x0, ICC_SRE_EL2
>> + orr x0, x0, #1 // Set ICC_SRE_EL2.SRE==1
>> + orr x0, x0, #(1 << 3) // Set ICC_SRE_EL2.Enable==1
>> + msr ICC_SRE_EL2, x0
>> + isb // Make sure SRE is now 1
>> + msr ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
>> +
>> +3:
>> +#endif
>> +
>> /* Populate ID registers. */
>> mrs x0, midr_el1
>> mrs x1, mpidr_el1
>> diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
>> index 0959611..a272f33 100644
>> --- a/arch/arm64/kernel/hyp-stub.S
>> +++ b/arch/arm64/kernel/hyp-stub.S
>> @@ -19,6 +19,7 @@
>>
>> #include <linux/init.h>
>> #include <linux/linkage.h>
>> +#include <linux/irqchip/arm-gic-v3.h>
>>
>> #include <asm/assembler.h>
>> #include <asm/ptrace.h>
>> diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
>> index 61ffdca..5b55c46 100644
>> --- a/drivers/irqchip/Kconfig
>> +++ b/drivers/irqchip/Kconfig
>> @@ -10,6 +10,11 @@ config ARM_GIC
>> config GIC_NON_BANKED
>> bool
>>
>> +config ARM_GIC_V3
>> + bool
>> + select IRQ_DOMAIN
>> + select MULTI_IRQ_HANDLER
>> +
>> config ARM_NVIC
>> bool
>> select IRQ_DOMAIN
>> diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
>> index 22e616c..4adfda8 100644
>> --- a/drivers/irqchip/Makefile
>> +++ b/drivers/irqchip/Makefile
>> @@ -14,6 +14,7 @@ obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o
>> obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o
>> obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
>> obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
>> +obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
>> obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
>> obj-$(CONFIG_ARM_VIC) += irq-vic.o
>> obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o
>> diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
>> new file mode 100644
>> index 0000000..b4c8140
>> --- /dev/null
>> +++ b/drivers/irqchip/irq-gic-v3.c
>> @@ -0,0 +1,681 @@
>> +/*
>> + * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
>> + * Author: Marc Zyngier <marc.zyngier@arm.com>
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License version 2 as
>> + * published by the Free Software Foundation.
>> + *
>> + * This program is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
>> + * GNU General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU General Public License
>> + * along with this program. If not, see <http://www.gnu.org/licenses/>.
>> + */
>> +
>> +#include <linux/cpu.h>
>> +#include <linux/delay.h>
>> +#include <linux/interrupt.h>
>> +#include <linux/of.h>
>> +#include <linux/of_address.h>
>> +#include <linux/of_irq.h>
>> +#include <linux/percpu.h>
>> +#include <linux/slab.h>
>> +
>> +#include <linux/irqchip/arm-gic-v3.h>
>> +
>> +#include <asm/cputype.h>
>> +#include <asm/exception.h>
>> +#include <asm/smp_plat.h>
>> +
>> +#include "irq-gic-common.h"
>> +#include "irqchip.h"
>> +
>> +struct gic_chip_data {
>> + void __iomem *dist_base;
>> + void __iomem **redist_base;
>> + void __percpu __iomem **rdist;
>> + struct irq_domain *domain;
>> + u64 redist_stride;
>> + u32 redist_regions;
>> + unsigned int irq_nr;
>> +};
>> +
>> +static struct gic_chip_data gic_data __read_mostly;
>> +
>> +#define gic_data_rdist() (this_cpu_ptr(gic_data.rdist))
>> +#define gic_data_rdist_rd_base() (*gic_data_rdist())
>> +#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
>> +
>> +#define DEFAULT_PMR_VALUE 0xf0
>> +
>> +static inline unsigned int gic_irq(struct irq_data *d)
>> +{
>> + return d->hwirq;
>> +}
>> +
>> +static inline int gic_irq_in_rdist(struct irq_data *d)
>> +{
>> + return gic_irq(d) < 32;
>> +}
>> +
>> +static inline void __iomem *gic_dist_base(struct irq_data *d)
>> +{
>> + if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */
>> + return gic_data_rdist_sgi_base();
>> +
>> + if (d->hwirq <= 1023) /* SPI -> dist_base */
>> + return gic_data.dist_base;
>> +
>> + if (d->hwirq >= 8192)
>> + BUG(); /* LPI Detected!!! */
>> +
>> + return NULL;
>> +}
>> +
>> +static void gic_do_wait_for_rwp(void __iomem *base)
>> +{
>> + u32 count = 1000000; /* 1s! */
>> +
>> + while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
>> + count--;
>> + if (!count) {
>> + pr_err_ratelimited("RWP timeout, gone fishing\n");
>> + return;
>> + }
>> + cpu_relax();
>> + udelay(1);
>> + };
>> +}
>> +
>> +/* Wait for completion of a distributor change */
>> +static void gic_dist_wait_for_rwp(void)
>> +{
>> + gic_do_wait_for_rwp(gic_data.dist_base);
>> +}
>> +
>> +/* Wait for completion of a redistributor change */
>> +static void gic_redist_wait_for_rwp(void)
>> +{
>> + gic_do_wait_for_rwp(gic_data_rdist_rd_base());
>> +}
>> +
>> +/* Low level accessors */
>> +static u64 gic_read_iar(void)
>> +{
>> + u64 irqstat;
>> +
>> + asm volatile("mrs %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
>> + return irqstat;
>> +}
>> +
>> +static void gic_write_pmr(u64 val)
>> +{
>> + asm volatile("msr " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
>> +}
>> +
>> +static void gic_write_ctlr(u64 val)
>> +{
>> + asm volatile("msr " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
>> + isb();
>> +}
>> +
>> +static void gic_write_grpen1(u64 val)
>> +{
>> + asm volatile("msr " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
>> + isb();
>> +}
>> +
>> +static void gic_write_sgi1r(u64 val)
>> +{
>> + asm volatile("msr " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
>> +}
>> +
>> +static void gic_enable_sre(void)
>> +{
>> + u64 val;
>> +
>> + asm volatile("mrs %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
>> + val |= GICC_SRE_EL1_SRE;
>> + asm volatile("msr " __stringify(ICC_SRE_EL1) ", %0" : : "r" (val));
>> + isb();
>> +
>> + /*
>> + * Need to check that the SRE bit has actually been set. If
>> + * not, it means that SRE is disabled at EL2. We're going to
>> + * die painfully, and there is nothing we can do about it.
>> + *
>> + * Kindly inform the luser.
>> + */
>> + asm volatile("mrs %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
>> + if (!(val & GICC_SRE_EL1_SRE))
>> + pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
>> +}
>> +
>> +static void gic_enable_redist(void)
>> +{
>> + void __iomem *rbase;
>> + u32 count = 1000000; /* 1s! */
>> + u32 val;
>> +
>> + rbase = gic_data_rdist_rd_base();
>> +
>> + /* Wake up this CPU redistributor */
>> + val = readl_relaxed(rbase + GICR_WAKER);
>> + val &= ~GICR_WAKER_ProcessorSleep;
>> + writel_relaxed(val, rbase + GICR_WAKER);
>> +
>> + while (readl_relaxed(rbase + GICR_WAKER) & GICR_WAKER_ChildrenAsleep) {
>> + count--;
>> + if (!count) {
>> + pr_err_ratelimited("redist didn't wake up...\n");
>> + return;
>> + }
>> + cpu_relax();
>> + udelay(1);
>> + };
>> +}
>> +
>> +/*
>> + * Routines to acknowledge, disable and enable interrupts
>> + */
>> +static void gic_poke_irq(struct irq_data *d, u32 offset)
>> +{
>> + u32 mask = 1 << (gic_irq(d) % 32);
>> + void (*rwp_wait)(void);
>> + void __iomem *base;
>> +
>> + if (gic_irq_in_rdist(d)) {
>> + base = gic_data_rdist_sgi_base();
>> + rwp_wait = gic_redist_wait_for_rwp;
>> + } else {
>> + base = gic_data.dist_base;
>> + rwp_wait = gic_dist_wait_for_rwp;
>> + }
>> +
>> + writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
>> + rwp_wait();
>> +}
>> +
>> +static int gic_peek_irq(struct irq_data *d, u32 offset)
>> +{
>> + u32 mask = 1 << (gic_irq(d) % 32);
>> + void __iomem *base;
>> +
>> + if (gic_irq_in_rdist(d))
>> + base = gic_data_rdist_sgi_base();
>> + else
>> + base = gic_data.dist_base;
>> +
>> + return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
>> +}
>> +
>> +static void gic_mask_irq(struct irq_data *d)
>> +{
>> + gic_poke_irq(d, GICD_ICENABLER);
>> +}
>> +
>> +static void gic_unmask_irq(struct irq_data *d)
>> +{
>> + gic_poke_irq(d, GICD_ISENABLER);
>> +}
>> +
>> +static void gic_eoi_irq(struct irq_data *d)
>> +{
>> + gic_write_eoir(gic_irq(d));
>> +}
>> +
>> +static int gic_set_type(struct irq_data *d, unsigned int type)
>> +{
>> + unsigned int irq = gic_irq(d);
>> + void (*rwp_wait)(void);
>> + void __iomem *base;
>> +
>> + if (gic_irq_in_rdist(d)) {
>> + base = gic_data_rdist_sgi_base();
>> + rwp_wait = gic_redist_wait_for_rwp;
>> + } else {
>> + base = gic_data.dist_base;
>> + rwp_wait = gic_dist_wait_for_rwp;
>> + }
>> +
>> + /* Interrupt configuration for SGIs can't be changed */
>> + if (irq < 16)
>> + return -EINVAL;
>> +
>> + if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
>> + return -EINVAL;
>> +
>> + gic_configure_irq(irq, type, base, rwp_wait);
>
> I'm noticing here that we're only waiting for the write to config/enable
> if the IRQ was already enabled in gic_configure_irq, but gic_unmask_irq
> just enables the IRQ without waiting for operation to complete.
>
> So I'm wondering if there isn't a disconnect between configuring a
> disabled IRQ and immediately unmasking it, versus configuring an enabled
> IRQ?
That's a good point. gic_configure_irq() should wait for rwp
unconditionnally in the epilogue.
>> +
>> + return 0;
>> +}
>> +
>> +static u64 gic_mpidr_to_affinity(u64 mpidr)
>> +{
>> + u64 aff;
>> +
>> + aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
>> + MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
>> + MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
>> + MPIDR_AFFINITY_LEVEL(mpidr, 0)) & ~GICD_IROUTER_SPI_MODE_ANY;
>> +
>> + return aff;
>> +}
>> +
>> +static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
>> +{
>> + u64 irqnr;
>> +
>> + do {
>> + irqnr = gic_read_iar();
>> +
>> + if (likely(irqnr > 15 && irqnr < 1021)) {
>> + irqnr = irq_find_mapping(gic_data.domain, irqnr);
>> + handle_IRQ(irqnr, regs);
>> + continue;
>> + }
>> + if (irqnr < 16) {
>> + gic_write_eoir(irqnr);
>> +#ifdef CONFIG_SMP
>> + handle_IPI(irqnr, regs);
>> +#else
>> + WARN_ONCE(true, "Unexpected SGI received!\n");
>> +#endif
>> + continue;
>> + }
>> + } while (irqnr != 0x3ff);
>> +}
>> +
>> +static void __init gic_dist_init(void)
>> +{
>> + unsigned int i;
>> + u64 affinity;
>> + void __iomem *base = gic_data.dist_base;
>> +
>> + /* Disable the distributor */
>> + writel_relaxed(0, base + GICD_CTLR);
>> +
>> + gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
>> +
>> + /* Enable distributor with ARE, Group1 */
>> + writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
>> + base + GICD_CTLR);
>> +
>> + /*
>> + * Set all global interrupts to the boot CPU only. ARE must be
>> + * enabled.
>> + */
>> + affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
>> + for (i = 32; i < gic_data.irq_nr; i++)
>> + writeq_relaxed(affinity, base + GICD_IROUTER + i * 8);
>> +}
>> +
>> +static int gic_populate_rdist(void)
>> +{
>> + u64 mpidr = cpu_logical_map(smp_processor_id());
>> + u64 typer;
>> + u32 aff;
>> + int i;
>> +
>> + /* Convery affinity to a 32bit value... */
>
> s/Convery/Convert/
>
> while you're at it, you could go the extra mile and specify "that can be
> matched to GICR_TYPER bits [63:32]". If you really wanted to.
Sure.
>> + aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
>> + MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
>> + MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
>> + MPIDR_AFFINITY_LEVEL(mpidr, 0));
>> +
>> + for (i = 0; i < gic_data.redist_regions; i++) {
>> + void __iomem *ptr = gic_data.redist_base[i];
>> + u32 reg;
>> +
>> + reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
>> + if (reg != 0x30 && reg != 0x40) { /* We're in trouble... */
>> + pr_warn("No redistributor present @%p\n", ptr);
>> + break;
>> + }
>> +
>> + do {
>> + typer = readq_relaxed(ptr + GICR_TYPER);
>> + if ((typer >> 32) == aff) {
>> + gic_data_rdist_rd_base() = ptr;
>> + pr_info("CPU%d: found redistributor %llx @%p\n",
>> + smp_processor_id(),
>> + (unsigned long long)mpidr, ptr);
>> + return 0;
>> + }
>> +
>> + if (gic_data.redist_stride) {
>> + ptr += gic_data.redist_stride;
>> + } else {
>> + ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
>> + if (typer & GICR_TYPER_VLPIS)
>> + ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
>> + }
>> + } while (!(typer & GICR_TYPER_LAST));
>> + }
>> +
>> + /* We couldn't even deal with ourselves... */
>> + WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n",
>> + smp_processor_id(), (unsigned long long)mpidr);
>> + return -ENODEV;
>> +}
>> +
>> +static void gic_cpu_init(void)
>> +{
>> + void __iomem *rbase;
>> +
>> + /* Register ourselves with the rest of the world */
>> + if (gic_populate_rdist())
>> + return;
>> +
>> + gic_enable_redist();
>> +
>> + rbase = gic_data_rdist_sgi_base();
>> +
>> + gic_cpu_config(rbase, gic_redist_wait_for_rwp);
>> +
>> + /* Enable system registers */
>> + gic_enable_sre();
>> +
>> + /* Set priority mask register */
>> + gic_write_pmr(DEFAULT_PMR_VALUE);
>> +
>> + /* EOI deactivates interrupt too (mode 0) */
>> + gic_write_ctlr(GICC_CTLR_EL1_EOImode_drop_dir);
>> +
>> + /* ... and let's hit the road... */
>> + gic_write_grpen1(1);
>> +}
>> +
>> +#ifdef CONFIG_SMP
>> +static int gic_secondary_init(struct notifier_block *nfb,
>> + unsigned long action, void *hcpu)
>> +{
>> + if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
>> + gic_cpu_init();
>> + return NOTIFY_OK;
>> +}
>> +
>> +/*
>> + * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
>> + * priority because the GIC needs to be up before the ARM generic timers.
>> + */
>> +static struct notifier_block gic_cpu_notifier = {
>> + .notifier_call = gic_secondary_init,
>> + .priority = 100,
>> +};
>> +
>> +static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
>> + u64 cluster_id)
>> +{
>> + int cpu = *base_cpu;
>> + u64 mpidr = cpu_logical_map(cpu);
>> + u16 tlist = 0;
>> +
>> + while (cpu < nr_cpu_ids) {
>> + /*
>> + * If we ever get a cluster of more than 16 CPUs, just
>> + * scream and skip that CPU.
>> + */
>> + if (WARN_ON((mpidr & 0xff) >= 16))
>> + goto out;
>> +
>> + tlist |= 1 << (mpidr & 0xf);
>> +
>> + cpu = cpumask_next(cpu, mask);
>> + if (cpu == nr_cpu_ids)
>> + goto out;
>> +
>> + mpidr = cpu_logical_map(cpu);
>> +
>> + if (cluster_id != (mpidr & ~0xffUL)) {
>> + cpu--;
>> + goto out;
>> + }
>> + }
>> +out:
>> + *base_cpu = cpu;
>> + return tlist;
>> +}
>> +
>> +static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
>> +{
>> + u64 val;
>> +
>> + val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48 |
>> + MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32 |
>> + irq << 24 |
>> + MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16 |
>> + tlist);
>> +
>> + pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
>> + gic_write_sgi1r(val);
>> +}
>> +
>> +static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
>> +{
>> + int cpu;
>> +
>> + if (WARN_ON(irq >= 16))
>> + return;
>> +
>> + /*
>> + * Ensure that stores to Normal memory are visible to the
>> + * other CPUs before issuing the IPI.
>> + */
>> + smp_wmb();
>> +
>> + for_each_cpu_mask(cpu, *mask) {
>> + u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
>> + u16 tlist;
>> +
>> + tlist = gic_compute_target_list(&cpu, mask, cluster_id);
>> + gic_send_sgi(cluster_id, tlist, irq);
>> + }
>> +
>> + /* Force the above writes to ICC_SGI1R_EL1 to be executed */
>> + isb();
>> +}
>> +
>> +static void gic_smp_init(void)
>> +{
>> + set_smp_cross_call(gic_raise_softirq);
>> + register_cpu_notifier(&gic_cpu_notifier);
>> +}
>> +
>> +static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
>> + bool force)
>> +{
>> + unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
>> + void __iomem *reg;
>> + int enabled;
>> + u64 val;
>> +
>> + if (gic_irq_in_rdist(d))
>> + return -EINVAL;
>> +
>> + /* If interrupt was enabled, disable it first */
>> + enabled = gic_peek_irq(d, GICD_ISENABLER);
>> + if (enabled)
>> + gic_mask_irq(d);
>> +
>> + reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
>> + val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
>> +
>> + writeq_relaxed(val, reg);
>> +
>> + /*
>> + * If the interrupt was enabled, enabled it again. Otherwise,
>> + * just wait for the distributor to have digested our changes.
>> + */
>> + if (enabled)
>> + gic_unmask_irq(d);
>> + else
>> + gic_dist_wait_for_rwp();
>
> If we don't need to wait for RWP when enabling the interrupt, why do we
> need to wait if we're not going to enable it (presumably nothing ever
> takes effect until someone later enables the interrupt, in which case
> the effect would be the same? no?)?
We do wait on the unmask path too (see gic_poke_irq).
>> +
>> + return IRQ_SET_MASK_OK;
>> +}
>> +#else
>> +#define gic_set_affinity NULL
>> +#define gic_smp_init() do { } while(0)
>> +#endif
>> +
>> +static struct irq_chip gic_chip = {
>> + .name = "GICv3",
>> + .irq_mask = gic_mask_irq,
>> + .irq_unmask = gic_unmask_irq,
>> + .irq_eoi = gic_eoi_irq,
>> + .irq_set_type = gic_set_type,
>> + .irq_set_affinity = gic_set_affinity,
>> +};
>> +
>> +static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
>> + irq_hw_number_t hw)
>> +{
>> + /* SGIs are private to the core kernel */
>> + if (hw < 16)
>> + return -EPERM;
>> + /* PPIs */
>> + if (hw < 32) {
>> + irq_set_percpu_devid(irq);
>> + irq_set_chip_and_handler(irq, &gic_chip,
>> + handle_percpu_devid_irq);
>> + set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
>> + }
>> + /* SPIs */
>> + if (hw >= 32 && hw < gic_data.irq_nr) {
>> + irq_set_chip_and_handler(irq, &gic_chip,
>> + handle_fasteoi_irq);
>> + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
>> + }
>> + irq_set_chip_data(irq, d->host_data);
>> + return 0;
>> +}
>> +
>> +static int gic_irq_domain_xlate(struct irq_domain *d,
>> + struct device_node *controller,
>> + const u32 *intspec, unsigned int intsize,
>> + unsigned long *out_hwirq, unsigned int *out_type)
>> +{
>> + if (d->of_node != controller)
>> + return -EINVAL;
>> + if (intsize < 3)
>> + return -EINVAL;
>> +
>> + switch(intspec[0]) {
>> + case 0: /* SPI */
>> + *out_hwirq = intspec[1] + 32;
>> + break;
>> + case 1: /* PPI */
>> + *out_hwirq = intspec[1] + 16;
>> + break;
>> + default:
>> + return -EINVAL;
>> + }
>> +
>> + *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
>> + return 0;
>> +}
>> +
>> +static const struct irq_domain_ops gic_irq_domain_ops = {
>> + .map = gic_irq_domain_map,
>> + .xlate = gic_irq_domain_xlate,
>> +};
>> +
>> +static int __init gic_of_init(struct device_node *node, struct device_node *parent)
>> +{
>> + void __iomem *dist_base;
>> + void __iomem **redist_base;
>> + u64 redist_stride;
>> + u32 redist_regions;
>> + u32 reg;
>> + int gic_irqs;
>> + int err;
>> + int i;
>> +
>> + dist_base = of_iomap(node, 0);
>> + if (!dist_base) {
>> + pr_err("%s: unable to map gic dist registers\n",
>> + node->full_name);
>> + return -ENXIO;
>> + }
>> +
>> + reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
>> + if (reg != 0x30 && reg != 0x40) {
>> + pr_err("%s: no distributor detected, giving up\n",
>> + node->full_name);
>> + err = -ENODEV;
>> + goto out_unmap_dist;
>> + }
>> +
>> + if (of_property_read_u32(node, "#redistributor-regions", &redist_regions))
>> + redist_regions = 1;
>> +
>> + redist_base = kzalloc(sizeof(*redist_base) * redist_regions, GFP_KERNEL);
>> + if (!redist_base) {
>> + err = -ENOMEM;
>> + goto out_unmap_dist;
>> + }
>> +
>> + for (i = 0; i < redist_regions; i++) {
>> + redist_base[i] = of_iomap(node, 1 + i);
>> + if (!redist_base[i]) {
>> + pr_err("%s: couldn't map region %d\n",
>> + node->full_name, i);
>> + err = -ENODEV;
>> + goto out_unmap_rdist;
>> + }
>> + }
>> +
>> + if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
>> + redist_stride = 0;
>> +
>> + gic_data.dist_base = dist_base;
>> + gic_data.redist_base = redist_base;
>> + gic_data.redist_regions = redist_regions;
>> + gic_data.redist_stride = redist_stride;
>> +
>> + /*
>> + * Find out how many interrupts are supported.
>> + * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
>> + */
>> + gic_irqs = readl_relaxed(gic_data.dist_base + GICD_TYPER) & 0x1f;
>> + gic_irqs = (gic_irqs + 1) * 32;
>> + if (gic_irqs > 1020)
>> + gic_irqs = 1020;
>> + gic_data.irq_nr = gic_irqs;
>> +
>> + gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops,
>> + &gic_data);
>> + gic_data.rdist = alloc_percpu(typeof(*gic_data.rdist));
>> +
>> + if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdist)) {
>> + err = -ENOMEM;
>> + goto out_free;
>> + }
>> +
>> + set_handle_irq(gic_handle_irq);
>> +
>> + gic_smp_init();
>> + gic_dist_init();
>> + gic_cpu_init();
>> +
>> + return 0;
>> +
>> +out_free:
>> + if (gic_data.domain)
>> + irq_domain_remove(gic_data.domain);
>> + free_percpu(gic_data.rdist);
>> +out_unmap_rdist:
>> + for (i = 0; i < redist_regions; i++)
>> + if (redist_base[i])
>> + iounmap(redist_base[i]);
>> + kfree(redist_base);
>> +out_unmap_dist:
>> + iounmap(dist_base);
>> + return err;
>> +}
>> +
>> +IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
>> diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
>> new file mode 100644
>> index 0000000..16c8b05
>> --- /dev/null
>> +++ b/include/linux/irqchip/arm-gic-v3.h
>> @@ -0,0 +1,188 @@
>> +/*
>> + * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
>> + * Author: Marc Zyngier <marc.zyngier@arm.com>
>> + *
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License version 2 as
>> + * published by the Free Software Foundation.
>> + *
>> + * This program is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
>> + * GNU General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU General Public License
>> + * along with this program. If not, see <http://www.gnu.org/licenses/>.
>> + */
>> +#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H
>> +#define __LINUX_IRQCHIP_ARM_GIC_V3_H
>> +
>> +/*
>> + * Distributor registers. We assume we're running non-secure, with ARE
>> + * being set. Secure-only and non-ARE registers are not described.
>> + */
>> +#define GICD_CTLR 0x0000
>> +#define GICD_TYPER 0x0004
>> +#define GICD_IIDR 0x0008
>> +#define GICD_STATUSR 0x0010
>> +#define GICD_SETSPI_NSR 0x0040
>> +#define GICD_CLRSPI_NSR 0x0048
>> +#define GICD_SETSPI_SR 0x0050
>> +#define GICD_CLRSPI_SR 0x0058
>> +#define GICD_SEIR 0x0068
>> +#define GICD_ISENABLER 0x0100
>> +#define GICD_ICENABLER 0x0180
>> +#define GICD_ISPENDR 0x0200
>> +#define GICD_ICPENDR 0x0280
>> +#define GICD_ISACTIVER 0x0300
>> +#define GICD_ICACTIVER 0x0380
>> +#define GICD_IPRIORITYR 0x0400
>> +#define GICD_ICFGR 0x0C00
>> +#define GICD_IROUTER 0x6000
>> +#define GICD_PIDR2 0xFFE8
>> +
>> +#define GICD_CTLR_RWP (1U << 31)
>> +#define GICD_CTLR_ARE_NS (1U << 4)
>> +#define GICD_CTLR_ENABLE_G1A (1U << 1)
>> +#define GICD_CTLR_ENABLE_G1 (1U << 0)
>> +
>> +#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
>> +#define GICD_IROUTER_SPI_MODE_ANY (1U << 31)
>> +
>> +#define GIC_PIDR2_ARCH_MASK 0xf0
>> +
>> +/*
>> + * Re-Distributor registers, offsets from RD_base
>> + */
>> +#define GICR_CTLR GICD_CTLR
>> +#define GICR_IIDR 0x0004
>> +#define GICR_TYPER 0x0008
>> +#define GICR_STATUSR GICD_STATUSR
>> +#define GICR_WAKER 0x0014
>> +#define GICR_SETLPIR 0x0040
>> +#define GICR_CLRLPIR 0x0048
>> +#define GICR_SEIR GICD_SEIR
>> +#define GICR_PROPBASER 0x0070
>> +#define GICR_PENDBASER 0x0078
>> +#define GICR_INVLPIR 0x00A0
>> +#define GICR_INVALLR 0x00B0
>> +#define GICR_SYNCR 0x00C0
>> +#define GICR_MOVLPIR 0x0100
>> +#define GICR_MOVALLR 0x0110
>> +#define GICR_PIDR2 GICD_PIDR2
>> +
>> +#define GICR_WAKER_ProcessorSleep (1U << 1)
>> +#define GICR_WAKER_ChildrenAsleep (1U << 2)
>> +
>> +/*
>> + * Re-Distributor registers, offsets from SGI_base
>> + */
>> +#define GICR_ISENABLER0 GICD_ISENABLER
>> +#define GICR_ICENABLER0 GICD_ICENABLER
>> +#define GICR_ISPENDR0 GICD_ISPENDR
>> +#define GICR_ICPENDR0 GICD_ICPENDR
>> +#define GICR_ISACTIVER0 GICD_ISACTIVER
>> +#define GICR_ICACTIVER0 GICD_ICACTIVER
>> +#define GICR_IPRIORITYR0 GICD_IPRIORITYR
>> +#define GICR_ICFGR0 GICD_ICFGR
>> +
>> +#define GICR_TYPER_VLPIS (1U << 1)
>> +#define GICR_TYPER_LAST (1U << 4)
>> +
>> +/*
>> + * CPU interface registers
>> + */
>> +#define GICC_CTLR_EL1_EOImode_drop_dir (0U << 1)
>> +#define GICC_CTLR_EL1_EOImode_drop (1U << 1)
>> +#define GICC_SRE_EL1_SRE (1U << 0)
>> +
>> +/*
>> + * Hypervisor interface registers (SRE only)
>> + */
>> +#define GICH_LR_VIRTUAL_ID_MASK ((1UL << 32) - 1)
>> +
>> +#define GICH_LR_EOI (1UL << 41)
>> +#define GICH_LR_GROUP (1UL << 60)
>> +#define GICH_LR_STATE (3UL << 62)
>> +#define GICH_LR_PENDING_BIT (1UL << 62)
>> +#define GICH_LR_ACTIVE_BIT (1UL << 63)
>> +
>> +#define GICH_MISR_EOI (1 << 0)
>> +#define GICH_MISR_U (1 << 1)
>> +
>> +#define GICH_HCR_EN (1 << 0)
>> +#define GICH_HCR_UIE (1 << 1)
>> +
>> +#define GICH_VMCR_CTLR_SHIFT 0
>> +#define GICH_VMCR_CTLR_MASK (0x21f << GICH_VMCR_CTLR_SHIFT)
>> +#define GICH_VMCR_BPR1_SHIFT 18
>> +#define GICH_VMCR_BPR1_MASK (7 << GICH_VMCR_BPR1_SHIFT)
>> +#define GICH_VMCR_BPR0_SHIFT 21
>> +#define GICH_VMCR_BPR0_MASK (7 << GICH_VMCR_BPR0_SHIFT)
>> +#define GICH_VMCR_PMR_SHIFT 24
>> +#define GICH_VMCR_PMR_MASK (0xffUL << GICH_VMCR_PMR_SHIFT)
>> +
>> +#define ICC_EOIR1_EL1 S3_0_C12_C12_1
>> +#define ICC_IAR1_EL1 S3_0_C12_C12_0
>> +#define ICC_SGI1R_EL1 S3_0_C12_C11_5
>> +#define ICC_PMR_EL1 S3_0_C4_C6_0
>> +#define ICC_CTLR_EL1 S3_0_C12_C12_4
>> +#define ICC_SRE_EL1 S3_0_C12_C12_5
>> +#define ICC_GRPEN1_EL1 S3_0_C12_C12_7
>> +
>> +#define ICC_SRE_EL2 S3_4_C12_C9_5
>> +
>> +#define ICH_VSEIR_EL2 S3_4_C12_C9_4
>> +#define ICH_HCR_EL2 S3_4_C12_C11_0
>> +#define ICH_VTR_EL2 S3_4_C12_C11_1
>> +#define ICH_MISR_EL2 S3_4_C12_C11_2
>> +#define ICH_EISR_EL2 S3_4_C12_C11_3
>> +#define ICH_ELSR_EL2 S3_4_C12_C11_5
>> +#define ICH_VMCR_EL2 S3_4_C12_C11_7
>> +
>> +#define __LR0_EL2(x) S3_4_C12_C12_ ## x
>> +#define __LR8_EL2(x) S3_4_C12_C13_ ## x
>> +
>> +#define ICH_LR0_EL2 __LR0_EL2(0)
>> +#define ICH_LR1_EL2 __LR0_EL2(1)
>> +#define ICH_LR2_EL2 __LR0_EL2(2)
>> +#define ICH_LR3_EL2 __LR0_EL2(3)
>> +#define ICH_LR4_EL2 __LR0_EL2(4)
>> +#define ICH_LR5_EL2 __LR0_EL2(5)
>> +#define ICH_LR6_EL2 __LR0_EL2(6)
>> +#define ICH_LR7_EL2 __LR0_EL2(7)
>> +#define ICH_LR8_EL2 __LR8_EL2(0)
>> +#define ICH_LR9_EL2 __LR8_EL2(1)
>> +#define ICH_LR10_EL2 __LR8_EL2(2)
>> +#define ICH_LR11_EL2 __LR8_EL2(3)
>> +#define ICH_LR12_EL2 __LR8_EL2(4)
>> +#define ICH_LR13_EL2 __LR8_EL2(5)
>> +#define ICH_LR14_EL2 __LR8_EL2(6)
>> +#define ICH_LR15_EL2 __LR8_EL2(7)
>> +
>> +#define __AP0Rx_EL2(x) S3_4_C12_C8_ ## x
>> +#define ICH_AP0R0_EL2 __AP0Rx_EL2(0)
>> +#define ICH_AP0R1_EL2 __AP0Rx_EL2(1)
>> +#define ICH_AP0R2_EL2 __AP0Rx_EL2(2)
>> +#define ICH_AP0R3_EL2 __AP0Rx_EL2(3)
>> +
>> +#define __AP1Rx_EL2(x) S3_4_C12_C9_ ## x
>> +#define ICH_AP1R0_EL2 __AP1Rx_EL2(0)
>> +#define ICH_AP1R1_EL2 __AP1Rx_EL2(1)
>> +#define ICH_AP1R2_EL2 __AP1Rx_EL2(2)
>> +#define ICH_AP1R3_EL2 __AP1Rx_EL2(3)
>> +
>> +#ifndef __ASSEMBLY__
>> +
>> +#include <linux/stringify.h>
>> +
>> +static inline void gic_write_eoir(u64 irq)
>> +{
>> + asm volatile("msr " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq));
>> + isb();
>> +}
>> +
>> +#endif
>> +
>> +#endif
>> --
>> 1.8.3.4
>>
>
> I noticed that you removed all the dist_lock locks here. I assume there
> was some rationale behind this, and given you can share that explanation
> with me and address the other tiny issues:
The purpose of the lock was to avoid races between mask/unmask and
set_type (both of which use the enable registers). But given that these
registers can operate on a single bit (set/clear operations), and that
the kernel won't try to do both at the same time, it is safe to get rid
of the whole locking.
> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Thanks,
M.
--
Jazz is not dead. It just smells funny.
^ permalink raw reply [flat|nested] 57+ messages in thread* [PATCH v3 02/19] arm64: initial support for GICv3
2014-05-12 16:54 ` Marc Zyngier
@ 2014-05-14 16:02 ` Christoffer Dall
0 siblings, 0 replies; 57+ messages in thread
From: Christoffer Dall @ 2014-05-14 16:02 UTC (permalink / raw)
To: linux-arm-kernel
On 12 May 2014 17:54, Marc Zyngier <marc.zyngier@arm.com> wrote:
> On Fri, May 09 2014 at 3:05:22 pm BST, Christoffer Dall <christoffer.dall@linaro.org> wrote:
>> On Wed, Apr 16, 2014 at 02:39:34PM +0100, Marc Zyngier wrote:
[...]
>>> +
>>> +static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
>>> + bool force)
>>> +{
>>> + unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
>>> + void __iomem *reg;
>>> + int enabled;
>>> + u64 val;
>>> +
>>> + if (gic_irq_in_rdist(d))
>>> + return -EINVAL;
>>> +
>>> + /* If interrupt was enabled, disable it first */
>>> + enabled = gic_peek_irq(d, GICD_ISENABLER);
>>> + if (enabled)
>>> + gic_mask_irq(d);
>>> +
>>> + reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
>>> + val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
>>> +
>>> + writeq_relaxed(val, reg);
>>> +
>>> + /*
>>> + * If the interrupt was enabled, enabled it again. Otherwise,
>>> + * just wait for the distributor to have digested our changes.
>>> + */
>>> + if (enabled)
>>> + gic_unmask_irq(d);
>>> + else
>>> + gic_dist_wait_for_rwp();
>>
>> If we don't need to wait for RWP when enabling the interrupt, why do we
>> need to wait if we're not going to enable it (presumably nothing ever
>> takes effect until someone later enables the interrupt, in which case
>> the effect would be the same? no?)?
>
> We do wait on the unmask path too (see gic_poke_irq).
>
got it, thanks.
[...]
>>
>> I noticed that you removed all the dist_lock locks here. I assume there
>> was some rationale behind this, and given you can share that explanation
>> with me and address the other tiny issues:
>
> The purpose of the lock was to avoid races between mask/unmask and
> set_type (both of which use the enable registers). But given that these
> registers can operate on a single bit (set/clear operations), and that
> the kernel won't try to do both at the same time, it is safe to get rid
> of the whole locking.
>
ok, thanks.
^ permalink raw reply [flat|nested] 57+ messages in thread
* [PATCH v3 03/19] arm64: GICv3 device tree binding documentation
2014-04-16 13:39 [PATCH v3 00/19] arm64: GICv3 support Marc Zyngier
2014-04-16 13:39 ` [PATCH v3 01/19] ARM: GIC: move some bits of GICv2 to a library-type file Marc Zyngier
2014-04-16 13:39 ` [PATCH v3 02/19] arm64: initial support for GICv3 Marc Zyngier
@ 2014-04-16 13:39 ` Marc Zyngier
2014-05-09 14:05 ` Christoffer Dall
2014-04-16 13:39 ` [PATCH v3 04/19] arm64: boot protocol documentation update for GICv3 Marc Zyngier
` (15 subsequent siblings)
18 siblings, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-04-16 13:39 UTC (permalink / raw)
To: linux-arm-kernel
Add the necessary documentation to support GICv3.
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Mark Rutland <mark.rutland@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Rob Herring <robh@kernel.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
Documentation/devicetree/bindings/arm/gic-v3.txt | 79 ++++++++++++++++++++++++
1 file changed, 79 insertions(+)
create mode 100644 Documentation/devicetree/bindings/arm/gic-v3.txt
diff --git a/Documentation/devicetree/bindings/arm/gic-v3.txt b/Documentation/devicetree/bindings/arm/gic-v3.txt
new file mode 100644
index 0000000..33cd05e
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/gic-v3.txt
@@ -0,0 +1,79 @@
+* ARM Generic Interrupt Controller, version 3
+
+AArch64 SMP cores are often associated with a GICv3, providing Private
+Peripheral Interrupts (PPI), Shared Peripheral Interrupts (SPI),
+Software Generated Interrupts (SGI), and Locality-specific Peripheral
+Interrupts (LPI).
+
+Main node required properties:
+
+- compatible : should at least contain "arm,gic-v3".
+- interrupt-controller : Identifies the node as an interrupt controller
+- #interrupt-cells : Specifies the number of cells needed to encode an
+ interrupt source. Must be a single cell with a value of at least 3.
+
+ The 1st cell is the interrupt type; 0 for SPI interrupts, 1 for PPI
+ interrupts. Other values are reserved for future use.
+
+ The 2nd cell contains the interrupt number for the interrupt type.
+ SPI interrupts are in the range [0-987]. PPI interrupts are in the
+ range [0-15].
+
+ The 3rd cell is the flags, encoded as follows:
+ bits[3:0] trigger type and level flags.
+ 1 = edge triggered
+ 4 = level triggered
+
+ Cells 4 and beyond are reserved for future use. When the 1st cell
+ has a value of 0 or 1, cells 4 and beyond act as padding, and may be
+ ignored. It is recommended that padding cells have a value of 0.
+
+- reg : Specifies base physical address(s) and size of the GIC
+ registers, in the following order:
+ - GIC Distributor interface (GICD)
+ - GIC Redistributors (GICR), one range per redistributor region
+ - GIC CPU interface (GICC)
+ - GIC Hypervisor interface (GICH)
+ - GIC Virtual CPU interface (GICV)
+
+ GICC, GICH and GICV are optional.
+
+- interrupts : Interrupt source of the VGIC maintenance interrupt.
+
+Optional
+
+- redistributor-stride : If using padding pages, specifies the stride
+ of consecutive redistributors. Must be a multiple of 64kB.
+
+- #redistributor-regions: The number of independent contiguous regions
+ occupied by the redistributors. Required if more than one such
+ region is present.
+
+Examples:
+
+ gic: interrupt-controller at 2cf00000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x0 0x2f000000 0 0x10000>, // GICD
+ <0x0 0x2f100000 0 0x200000>, // GICR
+ <0x0 0x2c000000 0 0x2000>, // GICC
+ <0x0 0x2c010000 0 0x2000>, // GICH
+ <0x0 0x2c020000 0 0x2000>; // GICV
+ interrupts = <1 9 4>;
+ };
+
+ gic: interrupt-controller at 2c010000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ redistributor-stride = <0x0 0x40000>; // 256kB stride
+ #redistributor-regions = <2>;
+ reg = <0x0 0x2c010000 0 0x10000>, // GICD
+ <0x0 0x2d000000 0 0x800000>, // GICR 1: CPUs 0-31
+ <0x0 0x2e000000 0 0x800000>; // GICR 2: CPUs 32-63
+ <0x0 0x2c040000 0 0x2000>, // GICC
+ <0x0 0x2c060000 0 0x2000>, // GICH
+ <0x0 0x2c080000 0 0x2000>; // GICV
+ interrupts = <1 9 4>;
+ };
--
1.8.3.4
^ permalink raw reply related [flat|nested] 57+ messages in thread* [PATCH v3 04/19] arm64: boot protocol documentation update for GICv3
2014-04-16 13:39 [PATCH v3 00/19] arm64: GICv3 support Marc Zyngier
` (2 preceding siblings ...)
2014-04-16 13:39 ` [PATCH v3 03/19] arm64: GICv3 device tree binding documentation Marc Zyngier
@ 2014-04-16 13:39 ` Marc Zyngier
2014-05-09 14:05 ` Christoffer Dall
2014-04-16 13:39 ` [PATCH v3 05/19] KVM: arm/arm64: vgic: move GICv2 registers to their own structure Marc Zyngier
` (14 subsequent siblings)
18 siblings, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-04-16 13:39 UTC (permalink / raw)
To: linux-arm-kernel
Linux has some requirements that must be satisfied in order to boot
on a system built with a GICv3.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
Documentation/arm64/booting.txt | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt
index a9691cc..be765b6 100644
--- a/Documentation/arm64/booting.txt
+++ b/Documentation/arm64/booting.txt
@@ -131,6 +131,12 @@ Before jumping into the kernel, the following conditions must be met:
the kernel image will be entered must be initialised by software at a
higher exception level to prevent execution in an UNKNOWN state.
+ For systems with a GICv3 interrupt controller, it is expected that:
+ - If EL3 is present, it must program ICC_SRE_EL3.Enable (bit 3) to
+ 0b1 and ICC_SRE_EL3.SRE (bit 0) to 0b1.
+ - If the kernel is entered at EL1, EL2 must set ICC_SRE_EL2.Enable
+ (bit 3) to 0b1 and ICC_SRE_EL2.SRE (bit 0) to 0b1.
+
The requirements described above for CPU mode, caches, MMUs, architected
timers, coherency and system registers apply to all CPUs. All CPUs must
enter the kernel in the same exception level.
--
1.8.3.4
^ permalink raw reply related [flat|nested] 57+ messages in thread* [PATCH v3 04/19] arm64: boot protocol documentation update for GICv3
2014-04-16 13:39 ` [PATCH v3 04/19] arm64: boot protocol documentation update for GICv3 Marc Zyngier
@ 2014-05-09 14:05 ` Christoffer Dall
0 siblings, 0 replies; 57+ messages in thread
From: Christoffer Dall @ 2014-05-09 14:05 UTC (permalink / raw)
To: linux-arm-kernel
On Wed, Apr 16, 2014 at 02:39:36PM +0100, Marc Zyngier wrote:
> Linux has some requirements that must be satisfied in order to boot
> on a system built with a GICv3.
>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
> Documentation/arm64/booting.txt | 6 ++++++
> 1 file changed, 6 insertions(+)
>
> diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt
> index a9691cc..be765b6 100644
> --- a/Documentation/arm64/booting.txt
> +++ b/Documentation/arm64/booting.txt
> @@ -131,6 +131,12 @@ Before jumping into the kernel, the following conditions must be met:
> the kernel image will be entered must be initialised by software at a
> higher exception level to prevent execution in an UNKNOWN state.
>
> + For systems with a GICv3 interrupt controller, it is expected that:
> + - If EL3 is present, it must program ICC_SRE_EL3.Enable (bit 3) to
> + 0b1 and ICC_SRE_EL3.SRE (bit 0) to 0b1.
> + - If the kernel is entered at EL1, EL2 must set ICC_SRE_EL2.Enable
> + (bit 3) to 0b1 and ICC_SRE_EL2.SRE (bit 0) to 0b1.
> +
> The requirements described above for CPU mode, caches, MMUs, architected
> timers, coherency and system registers apply to all CPUs. All CPUs must
> enter the kernel in the same exception level.
> --
> 1.8.3.4
>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
^ permalink raw reply [flat|nested] 57+ messages in thread
* [PATCH v3 05/19] KVM: arm/arm64: vgic: move GICv2 registers to their own structure
2014-04-16 13:39 [PATCH v3 00/19] arm64: GICv3 support Marc Zyngier
` (3 preceding siblings ...)
2014-04-16 13:39 ` [PATCH v3 04/19] arm64: boot protocol documentation update for GICv3 Marc Zyngier
@ 2014-04-16 13:39 ` Marc Zyngier
2014-05-09 14:05 ` Christoffer Dall
2014-04-16 13:39 ` [PATCH v3 06/19] KVM: ARM: vgic: introduce vgic_ops and LR manipulation primitives Marc Zyngier
` (13 subsequent siblings)
18 siblings, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-04-16 13:39 UTC (permalink / raw)
To: linux-arm-kernel
In order to make way for the GICv3 registers, move the v2-specific
registers to their own structure.
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
arch/arm/kernel/asm-offsets.c | 14 +++++------
arch/arm/kvm/interrupts_head.S | 26 +++++++++----------
arch/arm64/kernel/asm-offsets.c | 14 +++++------
arch/arm64/kvm/hyp.S | 26 +++++++++----------
include/kvm/arm_vgic.h | 20 +++++++++------
virt/kvm/arm/vgic.c | 56 ++++++++++++++++++++---------------------
6 files changed, 81 insertions(+), 75 deletions(-)
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index ded0417..dbe0476 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -181,13 +181,13 @@ int main(void)
DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc));
#ifdef CONFIG_KVM_ARM_VGIC
DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
- DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr));
- DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr));
- DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr));
- DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr));
- DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr));
- DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr));
- DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr));
+ DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
+ DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
+ DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
+ DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
+ DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
+ DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
+ DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
#ifdef CONFIG_KVM_ARM_TIMER
DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
index 6f18695..4a2a97a 100644
--- a/arch/arm/kvm/interrupts_head.S
+++ b/arch/arm/kvm/interrupts_head.S
@@ -413,14 +413,14 @@ vcpu .req r0 @ vcpu pointer always in r0
ldr r9, [r2, #GICH_ELRSR1]
ldr r10, [r2, #GICH_APR]
- str r3, [r11, #VGIC_CPU_HCR]
- str r4, [r11, #VGIC_CPU_VMCR]
- str r5, [r11, #VGIC_CPU_MISR]
- str r6, [r11, #VGIC_CPU_EISR]
- str r7, [r11, #(VGIC_CPU_EISR + 4)]
- str r8, [r11, #VGIC_CPU_ELRSR]
- str r9, [r11, #(VGIC_CPU_ELRSR + 4)]
- str r10, [r11, #VGIC_CPU_APR]
+ str r3, [r11, #VGIC_V2_CPU_HCR]
+ str r4, [r11, #VGIC_V2_CPU_VMCR]
+ str r5, [r11, #VGIC_V2_CPU_MISR]
+ str r6, [r11, #VGIC_V2_CPU_EISR]
+ str r7, [r11, #(VGIC_V2_CPU_EISR + 4)]
+ str r8, [r11, #VGIC_V2_CPU_ELRSR]
+ str r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
+ str r10, [r11, #VGIC_V2_CPU_APR]
/* Clear GICH_HCR */
mov r5, #0
@@ -428,7 +428,7 @@ vcpu .req r0 @ vcpu pointer always in r0
/* Save list registers */
add r2, r2, #GICH_LR0
- add r3, r11, #VGIC_CPU_LR
+ add r3, r11, #VGIC_V2_CPU_LR
ldr r4, [r11, #VGIC_CPU_NR_LR]
1: ldr r6, [r2], #4
str r6, [r3], #4
@@ -455,9 +455,9 @@ vcpu .req r0 @ vcpu pointer always in r0
add r11, vcpu, #VCPU_VGIC_CPU
/* We only restore a minimal set of registers */
- ldr r3, [r11, #VGIC_CPU_HCR]
- ldr r4, [r11, #VGIC_CPU_VMCR]
- ldr r8, [r11, #VGIC_CPU_APR]
+ ldr r3, [r11, #VGIC_V2_CPU_HCR]
+ ldr r4, [r11, #VGIC_V2_CPU_VMCR]
+ ldr r8, [r11, #VGIC_V2_CPU_APR]
str r3, [r2, #GICH_HCR]
str r4, [r2, #GICH_VMCR]
@@ -465,7 +465,7 @@ vcpu .req r0 @ vcpu pointer always in r0
/* Restore list registers */
add r2, r2, #GICH_LR0
- add r3, r11, #VGIC_CPU_LR
+ add r3, r11, #VGIC_V2_CPU_LR
ldr r4, [r11, #VGIC_CPU_NR_LR]
1: ldr r6, [r3], #4
str r6, [r2], #4
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 646f888..20fd488 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -129,13 +129,13 @@ int main(void)
DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
- DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr));
- DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr));
- DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr));
- DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr));
- DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr));
- DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr));
- DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr));
+ DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
+ DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
+ DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
+ DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
+ DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
+ DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
+ DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 2c56012..cc1b471 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -412,14 +412,14 @@ CPU_BE( rev w9, w9 )
CPU_BE( rev w10, w10 )
CPU_BE( rev w11, w11 )
- str w4, [x3, #VGIC_CPU_HCR]
- str w5, [x3, #VGIC_CPU_VMCR]
- str w6, [x3, #VGIC_CPU_MISR]
- str w7, [x3, #VGIC_CPU_EISR]
- str w8, [x3, #(VGIC_CPU_EISR + 4)]
- str w9, [x3, #VGIC_CPU_ELRSR]
- str w10, [x3, #(VGIC_CPU_ELRSR + 4)]
- str w11, [x3, #VGIC_CPU_APR]
+ str w4, [x3, #VGIC_V2_CPU_HCR]
+ str w5, [x3, #VGIC_V2_CPU_VMCR]
+ str w6, [x3, #VGIC_V2_CPU_MISR]
+ str w7, [x3, #VGIC_V2_CPU_EISR]
+ str w8, [x3, #(VGIC_V2_CPU_EISR + 4)]
+ str w9, [x3, #VGIC_V2_CPU_ELRSR]
+ str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)]
+ str w11, [x3, #VGIC_V2_CPU_APR]
/* Clear GICH_HCR */
str wzr, [x2, #GICH_HCR]
@@ -427,7 +427,7 @@ CPU_BE( rev w11, w11 )
/* Save list registers */
add x2, x2, #GICH_LR0
ldr w4, [x3, #VGIC_CPU_NR_LR]
- add x3, x3, #VGIC_CPU_LR
+ add x3, x3, #VGIC_V2_CPU_LR
1: ldr w5, [x2], #4
CPU_BE( rev w5, w5 )
str w5, [x3], #4
@@ -452,9 +452,9 @@ CPU_BE( rev w5, w5 )
add x3, x0, #VCPU_VGIC_CPU
/* We only restore a minimal set of registers */
- ldr w4, [x3, #VGIC_CPU_HCR]
- ldr w5, [x3, #VGIC_CPU_VMCR]
- ldr w6, [x3, #VGIC_CPU_APR]
+ ldr w4, [x3, #VGIC_V2_CPU_HCR]
+ ldr w5, [x3, #VGIC_V2_CPU_VMCR]
+ ldr w6, [x3, #VGIC_V2_CPU_APR]
CPU_BE( rev w4, w4 )
CPU_BE( rev w5, w5 )
CPU_BE( rev w6, w6 )
@@ -466,7 +466,7 @@ CPU_BE( rev w6, w6 )
/* Restore list registers */
add x2, x2, #GICH_LR0
ldr w4, [x3, #VGIC_CPU_NR_LR]
- add x3, x3, #VGIC_CPU_LR
+ add x3, x3, #VGIC_V2_CPU_LR
1: ldr w5, [x3], #4
CPU_BE( rev w5, w5 )
str w5, [x2], #4
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index f27000f..f738e5a 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -110,6 +110,16 @@ struct vgic_dist {
#endif
};
+struct vgic_v2_cpu_if {
+ u32 vgic_hcr;
+ u32 vgic_vmcr;
+ u32 vgic_misr; /* Saved only */
+ u32 vgic_eisr[2]; /* Saved only */
+ u32 vgic_elrsr[2]; /* Saved only */
+ u32 vgic_apr;
+ u32 vgic_lr[VGIC_MAX_LRS];
+};
+
struct vgic_cpu {
#ifdef CONFIG_KVM_ARM_VGIC
/* per IRQ to LR mapping */
@@ -126,13 +136,9 @@ struct vgic_cpu {
int nr_lr;
/* CPU vif control registers for world switch */
- u32 vgic_hcr;
- u32 vgic_vmcr;
- u32 vgic_misr; /* Saved only */
- u32 vgic_eisr[2]; /* Saved only */
- u32 vgic_elrsr[2]; /* Saved only */
- u32 vgic_apr;
- u32 vgic_lr[VGIC_MAX_LRS];
+ union {
+ struct vgic_v2_cpu_if vgic_v2;
+ };
#endif
};
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 8ca405c..6bc6c7a 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -602,7 +602,7 @@ static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
static void vgic_retire_lr(int lr_nr, int irq, struct vgic_cpu *vgic_cpu)
{
clear_bit(lr_nr, vgic_cpu->lr_used);
- vgic_cpu->vgic_lr[lr_nr] &= ~GICH_LR_STATE;
+ vgic_cpu->vgic_v2.vgic_lr[lr_nr] &= ~GICH_LR_STATE;
vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
}
@@ -627,7 +627,7 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
u32 *lr;
for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
- lr = &vgic_cpu->vgic_lr[i];
+ lr = &vgic_cpu->vgic_v2.vgic_lr[i];
irq = LR_IRQID(*lr);
source_cpu = LR_CPUID(*lr);
@@ -1007,7 +1007,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
int lr;
for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
- int irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
+ int irq = vgic_cpu->vgic_v2.vgic_lr[lr] & GICH_LR_VIRTUALID;
if (!vgic_irq_is_enabled(vcpu, irq)) {
vgic_retire_lr(lr, irq, vgic_cpu);
@@ -1037,11 +1037,11 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
/* Do we have an active interrupt for the same CPUID? */
if (lr != LR_EMPTY &&
- (LR_CPUID(vgic_cpu->vgic_lr[lr]) == sgi_source_id)) {
+ (LR_CPUID(vgic_cpu->vgic_v2.vgic_lr[lr]) == sgi_source_id)) {
kvm_debug("LR%d piggyback for IRQ%d %x\n",
- lr, irq, vgic_cpu->vgic_lr[lr]);
+ lr, irq, vgic_cpu->vgic_v2.vgic_lr[lr]);
BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
- vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT;
+ vgic_cpu->vgic_v2.vgic_lr[lr] |= GICH_LR_PENDING_BIT;
return true;
}
@@ -1052,12 +1052,12 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
return false;
kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
- vgic_cpu->vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq);
+ vgic_cpu->vgic_v2.vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq);
vgic_cpu->vgic_irq_lr_map[irq] = lr;
set_bit(lr, vgic_cpu->lr_used);
if (!vgic_irq_is_edge(vcpu, irq))
- vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI;
+ vgic_cpu->vgic_v2.vgic_lr[lr] |= GICH_LR_EOI;
return true;
}
@@ -1155,9 +1155,9 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
epilog:
if (overflow) {
- vgic_cpu->vgic_hcr |= GICH_HCR_UIE;
+ vgic_cpu->vgic_v2.vgic_hcr |= GICH_HCR_UIE;
} else {
- vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE;
+ vgic_cpu->vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
/*
* We're about to run this VCPU, and we've consumed
* everything the distributor had in store for
@@ -1173,21 +1173,21 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
bool level_pending = false;
- kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr);
+ kvm_debug("MISR = %08x\n", vgic_cpu->vgic_v2.vgic_misr);
- if (vgic_cpu->vgic_misr & GICH_MISR_EOI) {
+ if (vgic_cpu->vgic_v2.vgic_misr & GICH_MISR_EOI) {
/*
* Some level interrupts have been EOIed. Clear their
* active bit.
*/
int lr, irq;
- for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_eisr,
+ for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_eisr,
vgic_cpu->nr_lr) {
- irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
+ irq = vgic_cpu->vgic_v2.vgic_lr[lr] & GICH_LR_VIRTUALID;
vgic_irq_clear_active(vcpu, irq);
- vgic_cpu->vgic_lr[lr] &= ~GICH_LR_EOI;
+ vgic_cpu->vgic_v2.vgic_lr[lr] &= ~GICH_LR_EOI;
/* Any additional pending interrupt? */
if (vgic_dist_irq_is_pending(vcpu, irq)) {
@@ -1201,13 +1201,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
* Despite being EOIed, the LR may not have
* been marked as empty.
*/
- set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
- vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT;
+ set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr);
+ vgic_cpu->vgic_v2.vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT;
}
}
- if (vgic_cpu->vgic_misr & GICH_MISR_U)
- vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE;
+ if (vgic_cpu->vgic_v2.vgic_misr & GICH_MISR_U)
+ vgic_cpu->vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
return level_pending;
}
@@ -1226,21 +1226,21 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
level_pending = vgic_process_maintenance(vcpu);
/* Clear mappings for empty LRs */
- for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr,
+ for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr,
vgic_cpu->nr_lr) {
int irq;
if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
continue;
- irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
+ irq = vgic_cpu->vgic_v2.vgic_lr[lr] & GICH_LR_VIRTUALID;
BUG_ON(irq >= VGIC_NR_IRQS);
vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
}
/* Check if we still have something up our sleeve... */
- pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_elrsr,
+ pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr,
vgic_cpu->nr_lr);
if (level_pending || pending < vgic_cpu->nr_lr)
set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
@@ -1436,10 +1436,10 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
* points to their reset values. Anything else resets to zero
* anyway.
*/
- vgic_cpu->vgic_vmcr = 0;
+ vgic_cpu->vgic_v2.vgic_vmcr = 0;
vgic_cpu->nr_lr = vgic_nr_lr;
- vgic_cpu->vgic_hcr = GICH_HCR_EN; /* Get the show on the road... */
+ vgic_cpu->vgic_v2.vgic_hcr = GICH_HCR_EN; /* Get the show on the road... */
return 0;
}
@@ -1745,15 +1745,15 @@ static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
}
if (!mmio->is_write) {
- reg = (vgic_cpu->vgic_vmcr & mask) >> shift;
+ reg = (vgic_cpu->vgic_v2.vgic_vmcr & mask) >> shift;
mmio_data_write(mmio, ~0, reg);
} else {
reg = mmio_data_read(mmio, ~0);
reg = (reg << shift) & mask;
- if (reg != (vgic_cpu->vgic_vmcr & mask))
+ if (reg != (vgic_cpu->vgic_v2.vgic_vmcr & mask))
updated = true;
- vgic_cpu->vgic_vmcr &= ~mask;
- vgic_cpu->vgic_vmcr |= reg;
+ vgic_cpu->vgic_v2.vgic_vmcr &= ~mask;
+ vgic_cpu->vgic_v2.vgic_vmcr |= reg;
}
return updated;
}
--
1.8.3.4
^ permalink raw reply related [flat|nested] 57+ messages in thread* [PATCH v3 06/19] KVM: ARM: vgic: introduce vgic_ops and LR manipulation primitives
2014-04-16 13:39 [PATCH v3 00/19] arm64: GICv3 support Marc Zyngier
` (4 preceding siblings ...)
2014-04-16 13:39 ` [PATCH v3 05/19] KVM: arm/arm64: vgic: move GICv2 registers to their own structure Marc Zyngier
@ 2014-04-16 13:39 ` Marc Zyngier
2014-05-09 14:05 ` Christoffer Dall
2014-04-16 13:39 ` [PATCH v3 07/19] KVM: ARM: vgic: abstract access to the ELRSR bitmap Marc Zyngier
` (12 subsequent siblings)
18 siblings, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-04-16 13:39 UTC (permalink / raw)
To: linux-arm-kernel
In order to split the various register manipulation from the main vgic
code, introduce a vgic_ops structure, and start by abstracting the
LR manipulation code with a couple of accessors.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
include/kvm/arm_vgic.h | 18 ++++++
virt/kvm/arm/vgic.c | 170 +++++++++++++++++++++++++++++++++----------------
2 files changed, 132 insertions(+), 56 deletions(-)
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index f738e5a..17bbe51 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -68,6 +68,24 @@ struct vgic_bytemap {
u32 shared[VGIC_NR_SHARED_IRQS / 4];
};
+struct kvm_vcpu;
+
+#define LR_STATE_PENDING (1 << 0)
+#define LR_STATE_ACTIVE (1 << 1)
+#define LR_STATE_MASK (3 << 0)
+#define LR_EOI_INT (1 << 2)
+
+struct vgic_lr {
+ u16 irq;
+ u8 source;
+ u8 state;
+};
+
+struct vgic_ops {
+ struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
+ void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
+};
+
struct vgic_dist {
#ifdef CONFIG_KVM_ARM_VGIC
spinlock_t lock;
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 6bc6c7a..bac37b8 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -94,9 +94,12 @@ static struct device_node *vgic_node;
#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
+static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
static void vgic_update_state(struct kvm *kvm);
static void vgic_kick_vcpus(struct kvm *kvm);
static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
+static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
+static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
static u32 vgic_nr_lr;
static unsigned int vgic_maint_irq;
@@ -594,18 +597,6 @@ static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
return false;
}
-#define LR_CPUID(lr) \
- (((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT)
-#define LR_IRQID(lr) \
- ((lr) & GICH_LR_VIRTUALID)
-
-static void vgic_retire_lr(int lr_nr, int irq, struct vgic_cpu *vgic_cpu)
-{
- clear_bit(lr_nr, vgic_cpu->lr_used);
- vgic_cpu->vgic_v2.vgic_lr[lr_nr] &= ~GICH_LR_STATE;
- vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
-}
-
/**
* vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
* @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
@@ -623,13 +614,10 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
int vcpu_id = vcpu->vcpu_id;
- int i, irq, source_cpu;
- u32 *lr;
+ int i;
for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
- lr = &vgic_cpu->vgic_v2.vgic_lr[i];
- irq = LR_IRQID(*lr);
- source_cpu = LR_CPUID(*lr);
+ struct vgic_lr lr = vgic_get_lr(vcpu, i);
/*
* There are three options for the state bits:
@@ -641,7 +629,7 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
* If the LR holds only an active interrupt (not pending) then
* just leave it alone.
*/
- if ((*lr & GICH_LR_STATE) == GICH_LR_ACTIVE_BIT)
+ if ((lr.state & LR_STATE_MASK) == LR_STATE_ACTIVE)
continue;
/*
@@ -650,18 +638,19 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
* is fine, then we are only setting a few bits that were
* already set.
*/
- vgic_dist_irq_set(vcpu, irq);
- if (irq < VGIC_NR_SGIS)
- dist->irq_sgi_sources[vcpu_id][irq] |= 1 << source_cpu;
- *lr &= ~GICH_LR_PENDING_BIT;
+ vgic_dist_irq_set(vcpu, lr.irq);
+ if (lr.irq < VGIC_NR_SGIS)
+ dist->irq_sgi_sources[vcpu_id][lr.irq] |= 1 << lr.source;
+ lr.state &= ~LR_STATE_PENDING;
+ vgic_set_lr(vcpu, i, lr);
/*
* If there's no state left on the LR (it could still be
* active), then the LR does not hold any useful info and can
* be marked as free for other use.
*/
- if (!(*lr & GICH_LR_STATE))
- vgic_retire_lr(i, irq, vgic_cpu);
+ if (!(lr.state & LR_STATE_MASK))
+ vgic_retire_lr(i, lr.irq, vcpu);
/* Finally update the VGIC state. */
vgic_update_state(vcpu->kvm);
@@ -989,9 +978,77 @@ static void vgic_update_state(struct kvm *kvm)
}
}
+static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr)
+{
+ struct vgic_lr lr_desc;
+ u32 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr];
+
+ lr_desc.irq = val & GICH_LR_VIRTUALID;
+ lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0xff;
+ lr_desc.state = 0;
+
+ if (val & GICH_LR_PENDING_BIT)
+ lr_desc.state |= LR_STATE_PENDING;
+ if (val & GICH_LR_ACTIVE_BIT)
+ lr_desc.state |= LR_STATE_ACTIVE;
+ if (val & GICH_LR_EOI)
+ lr_desc.state |= LR_EOI_INT;
+
+ return lr_desc;
+}
+
#define MK_LR_PEND(src, irq) \
(GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq))
+static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
+ struct vgic_lr lr_desc)
+{
+ u32 lr_val = MK_LR_PEND(lr_desc.source, lr_desc.irq);
+
+ if (lr_desc.state & LR_STATE_PENDING)
+ lr_val |= GICH_LR_PENDING_BIT;
+ if (lr_desc.state & LR_STATE_ACTIVE)
+ lr_val |= GICH_LR_ACTIVE_BIT;
+ if (lr_desc.state & LR_EOI_INT)
+ lr_val |= GICH_LR_EOI;
+
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val;
+
+ /*
+ * Despite being EOIed, the LR may not have been marked as
+ * empty.
+ */
+ if (!(lr_val & GICH_LR_STATE))
+ set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
+}
+
+static const struct vgic_ops vgic_ops = {
+ .get_lr = vgic_v2_get_lr,
+ .set_lr = vgic_v2_set_lr,
+};
+
+static inline struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
+{
+ return vgic_ops.get_lr(vcpu, lr);
+}
+
+static inline void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
+ struct vgic_lr vlr)
+{
+ vgic_ops.set_lr(vcpu, lr, vlr);
+}
+
+static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
+{
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
+
+ vlr.state = 0;
+ vgic_set_lr(vcpu, lr_nr, vlr);
+ clear_bit(lr_nr, vgic_cpu->lr_used);
+ vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
+}
+
/*
* An interrupt may have been disabled after being made pending on the
* CPU interface (the classic case is a timer running while we're
@@ -1007,12 +1064,12 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
int lr;
for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
- int irq = vgic_cpu->vgic_v2.vgic_lr[lr] & GICH_LR_VIRTUALID;
+ struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
- if (!vgic_irq_is_enabled(vcpu, irq)) {
- vgic_retire_lr(lr, irq, vgic_cpu);
- if (vgic_irq_is_active(vcpu, irq))
- vgic_irq_clear_active(vcpu, irq);
+ if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
+ vgic_retire_lr(lr, vlr.irq, vcpu);
+ if (vgic_irq_is_active(vcpu, vlr.irq))
+ vgic_irq_clear_active(vcpu, vlr.irq);
}
}
}
@@ -1024,6 +1081,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ struct vgic_lr vlr;
int lr;
/* Sanitize the input... */
@@ -1036,13 +1094,15 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
lr = vgic_cpu->vgic_irq_lr_map[irq];
/* Do we have an active interrupt for the same CPUID? */
- if (lr != LR_EMPTY &&
- (LR_CPUID(vgic_cpu->vgic_v2.vgic_lr[lr]) == sgi_source_id)) {
- kvm_debug("LR%d piggyback for IRQ%d %x\n",
- lr, irq, vgic_cpu->vgic_v2.vgic_lr[lr]);
- BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
- vgic_cpu->vgic_v2.vgic_lr[lr] |= GICH_LR_PENDING_BIT;
- return true;
+ if (lr != LR_EMPTY) {
+ vlr = vgic_get_lr(vcpu, lr);
+ if (vlr.source == sgi_source_id) {
+ kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
+ BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
+ vlr.state |= LR_STATE_PENDING;
+ vgic_set_lr(vcpu, lr, vlr);
+ return true;
+ }
}
/* Try to use another LR for this interrupt */
@@ -1052,12 +1112,16 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
return false;
kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
- vgic_cpu->vgic_v2.vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq);
vgic_cpu->vgic_irq_lr_map[irq] = lr;
set_bit(lr, vgic_cpu->lr_used);
+ vlr.irq = irq;
+ vlr.source = sgi_source_id;
+ vlr.state = LR_STATE_PENDING;
if (!vgic_irq_is_edge(vcpu, irq))
- vgic_cpu->vgic_v2.vgic_lr[lr] |= GICH_LR_EOI;
+ vlr.state |= LR_EOI_INT;
+
+ vgic_set_lr(vcpu, lr, vlr);
return true;
}
@@ -1180,29 +1244,23 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
* Some level interrupts have been EOIed. Clear their
* active bit.
*/
- int lr, irq;
+ int lr;
for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_eisr,
vgic_cpu->nr_lr) {
- irq = vgic_cpu->vgic_v2.vgic_lr[lr] & GICH_LR_VIRTUALID;
+ struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
- vgic_irq_clear_active(vcpu, irq);
- vgic_cpu->vgic_v2.vgic_lr[lr] &= ~GICH_LR_EOI;
+ vgic_irq_clear_active(vcpu, vlr.irq);
+ vlr.state = 0;
+ vgic_set_lr(vcpu, lr, vlr);
/* Any additional pending interrupt? */
- if (vgic_dist_irq_is_pending(vcpu, irq)) {
- vgic_cpu_irq_set(vcpu, irq);
+ if (vgic_dist_irq_is_pending(vcpu, vlr.irq)) {
+ vgic_cpu_irq_set(vcpu, vlr.irq);
level_pending = true;
} else {
- vgic_cpu_irq_clear(vcpu, irq);
+ vgic_cpu_irq_clear(vcpu, vlr.irq);
}
-
- /*
- * Despite being EOIed, the LR may not have
- * been marked as empty.
- */
- set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr);
- vgic_cpu->vgic_v2.vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT;
}
}
@@ -1228,15 +1286,15 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
/* Clear mappings for empty LRs */
for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr,
vgic_cpu->nr_lr) {
- int irq;
+ struct vgic_lr vlr;
if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
continue;
- irq = vgic_cpu->vgic_v2.vgic_lr[lr] & GICH_LR_VIRTUALID;
+ vlr = vgic_get_lr(vcpu, lr);
- BUG_ON(irq >= VGIC_NR_IRQS);
- vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
+ BUG_ON(vlr.irq >= VGIC_NR_IRQS);
+ vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
}
/* Check if we still have something up our sleeve... */
--
1.8.3.4
^ permalink raw reply related [flat|nested] 57+ messages in thread* [PATCH v3 06/19] KVM: ARM: vgic: introduce vgic_ops and LR manipulation primitives
2014-04-16 13:39 ` [PATCH v3 06/19] KVM: ARM: vgic: introduce vgic_ops and LR manipulation primitives Marc Zyngier
@ 2014-05-09 14:05 ` Christoffer Dall
2014-05-12 17:28 ` Marc Zyngier
0 siblings, 1 reply; 57+ messages in thread
From: Christoffer Dall @ 2014-05-09 14:05 UTC (permalink / raw)
To: linux-arm-kernel
On Wed, Apr 16, 2014 at 02:39:38PM +0100, Marc Zyngier wrote:
> In order to split the various register manipulation from the main vgic
> code, introduce a vgic_ops structure, and start by abstracting the
> LR manipulation code with a couple of accessors.
>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
> include/kvm/arm_vgic.h | 18 ++++++
> virt/kvm/arm/vgic.c | 170 +++++++++++++++++++++++++++++++++----------------
> 2 files changed, 132 insertions(+), 56 deletions(-)
>
> diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
> index f738e5a..17bbe51 100644
> --- a/include/kvm/arm_vgic.h
> +++ b/include/kvm/arm_vgic.h
> @@ -68,6 +68,24 @@ struct vgic_bytemap {
> u32 shared[VGIC_NR_SHARED_IRQS / 4];
> };
>
> +struct kvm_vcpu;
> +
> +#define LR_STATE_PENDING (1 << 0)
> +#define LR_STATE_ACTIVE (1 << 1)
> +#define LR_STATE_MASK (3 << 0)
> +#define LR_EOI_INT (1 << 2)
> +
> +struct vgic_lr {
> + u16 irq;
> + u8 source;
> + u8 state;
> +};
> +
> +struct vgic_ops {
> + struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
> + void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
> +};
> +
> struct vgic_dist {
> #ifdef CONFIG_KVM_ARM_VGIC
> spinlock_t lock;
> diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
> index 6bc6c7a..bac37b8 100644
> --- a/virt/kvm/arm/vgic.c
> +++ b/virt/kvm/arm/vgic.c
> @@ -94,9 +94,12 @@ static struct device_node *vgic_node;
> #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
>
> static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
> +static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
> static void vgic_update_state(struct kvm *kvm);
> static void vgic_kick_vcpus(struct kvm *kvm);
> static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
> +static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
> +static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
> static u32 vgic_nr_lr;
>
> static unsigned int vgic_maint_irq;
> @@ -594,18 +597,6 @@ static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
> return false;
> }
>
> -#define LR_CPUID(lr) \
> - (((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT)
> -#define LR_IRQID(lr) \
> - ((lr) & GICH_LR_VIRTUALID)
> -
> -static void vgic_retire_lr(int lr_nr, int irq, struct vgic_cpu *vgic_cpu)
> -{
> - clear_bit(lr_nr, vgic_cpu->lr_used);
> - vgic_cpu->vgic_v2.vgic_lr[lr_nr] &= ~GICH_LR_STATE;
> - vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
> -}
> -
> /**
> * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
> * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
> @@ -623,13 +614,10 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
> struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
> struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
> int vcpu_id = vcpu->vcpu_id;
> - int i, irq, source_cpu;
> - u32 *lr;
> + int i;
>
> for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
> - lr = &vgic_cpu->vgic_v2.vgic_lr[i];
> - irq = LR_IRQID(*lr);
> - source_cpu = LR_CPUID(*lr);
> + struct vgic_lr lr = vgic_get_lr(vcpu, i);
>
> /*
> * There are three options for the state bits:
> @@ -641,7 +629,7 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
> * If the LR holds only an active interrupt (not pending) then
> * just leave it alone.
> */
> - if ((*lr & GICH_LR_STATE) == GICH_LR_ACTIVE_BIT)
> + if ((lr.state & LR_STATE_MASK) == LR_STATE_ACTIVE)
> continue;
>
> /*
> @@ -650,18 +638,19 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
> * is fine, then we are only setting a few bits that were
> * already set.
> */
> - vgic_dist_irq_set(vcpu, irq);
> - if (irq < VGIC_NR_SGIS)
> - dist->irq_sgi_sources[vcpu_id][irq] |= 1 << source_cpu;
> - *lr &= ~GICH_LR_PENDING_BIT;
> + vgic_dist_irq_set(vcpu, lr.irq);
> + if (lr.irq < VGIC_NR_SGIS)
> + dist->irq_sgi_sources[vcpu_id][lr.irq] |= 1 << lr.source;
> + lr.state &= ~LR_STATE_PENDING;
> + vgic_set_lr(vcpu, i, lr);
>
> /*
> * If there's no state left on the LR (it could still be
> * active), then the LR does not hold any useful info and can
> * be marked as free for other use.
> */
> - if (!(*lr & GICH_LR_STATE))
> - vgic_retire_lr(i, irq, vgic_cpu);
> + if (!(lr.state & LR_STATE_MASK))
> + vgic_retire_lr(i, lr.irq, vcpu);
>
> /* Finally update the VGIC state. */
> vgic_update_state(vcpu->kvm);
> @@ -989,9 +978,77 @@ static void vgic_update_state(struct kvm *kvm)
> }
> }
>
> +static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr)
> +{
> + struct vgic_lr lr_desc;
> + u32 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr];
> +
> + lr_desc.irq = val & GICH_LR_VIRTUALID;
> + lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0xff;
shouldn't the mask here be 0xf according to the GICv2 spec?
> + lr_desc.state = 0;
> +
> + if (val & GICH_LR_PENDING_BIT)
> + lr_desc.state |= LR_STATE_PENDING;
> + if (val & GICH_LR_ACTIVE_BIT)
> + lr_desc.state |= LR_STATE_ACTIVE;
> + if (val & GICH_LR_EOI)
> + lr_desc.state |= LR_EOI_INT;
> +
> + return lr_desc;
> +}
> +
> #define MK_LR_PEND(src, irq) \
> (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq))
>
> +static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
> + struct vgic_lr lr_desc)
> +{
> + u32 lr_val = MK_LR_PEND(lr_desc.source, lr_desc.irq);
this looks a bit weird, the check just below suggests that you can
convert a struct vgic_lr into an lr register for, for example, an lr
which is just active and not pending.
> +
> + if (lr_desc.state & LR_STATE_PENDING)
> + lr_val |= GICH_LR_PENDING_BIT;
> + if (lr_desc.state & LR_STATE_ACTIVE)
> + lr_val |= GICH_LR_ACTIVE_BIT;
> + if (lr_desc.state & LR_EOI_INT)
> + lr_val |= GICH_LR_EOI;
> +
> + vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val;
> +
> + /*
> + * Despite being EOIed, the LR may not have been marked as
> + * empty.
> + */
> + if (!(lr_val & GICH_LR_STATE))
> + set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
This feels weird in vgic_v2_set_lr, the name/style suggests that these
get/set functions are just converters from a struct vgic_lr (that
presumably common code can deal with) and architecture-specific LR
register formats.
If these functions have richer semantics than that (like maintaining the
elrsr register), please comment that on the function.
> +}
> +
> +static const struct vgic_ops vgic_ops = {
> + .get_lr = vgic_v2_get_lr,
> + .set_lr = vgic_v2_set_lr,
> +};
> +
> +static inline struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
> +{
> + return vgic_ops.get_lr(vcpu, lr);
> +}
> +
> +static inline void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
> + struct vgic_lr vlr)
> +{
> + vgic_ops.set_lr(vcpu, lr, vlr);
> +}
inline statics in a C file? Surely the compiler is smart enough to
inline this without any hints.
> +
> +static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
> +{
> + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
> + struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
seems like we're doing a lot of copying back and forward between the
struct vgic_lr and the LRs on the vgic_cpu struct, I wonder if it makes
more sense to only deal with it during the sync/flush functions, or
maybe we end up messing with more state than necessary then?
> +
> + vlr.state = 0;
> + vgic_set_lr(vcpu, lr_nr, vlr);
> + clear_bit(lr_nr, vgic_cpu->lr_used);
> + vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
> +}
> +
> /*
> * An interrupt may have been disabled after being made pending on the
> * CPU interface (the classic case is a timer running while we're
> @@ -1007,12 +1064,12 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
> int lr;
>
> for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
> - int irq = vgic_cpu->vgic_v2.vgic_lr[lr] & GICH_LR_VIRTUALID;
> + struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
>
> - if (!vgic_irq_is_enabled(vcpu, irq)) {
> - vgic_retire_lr(lr, irq, vgic_cpu);
> - if (vgic_irq_is_active(vcpu, irq))
> - vgic_irq_clear_active(vcpu, irq);
> + if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
> + vgic_retire_lr(lr, vlr.irq, vcpu);
> + if (vgic_irq_is_active(vcpu, vlr.irq))
> + vgic_irq_clear_active(vcpu, vlr.irq);
> }
> }
> }
> @@ -1024,6 +1081,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
> static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
> {
> struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
> + struct vgic_lr vlr;
> int lr;
>
> /* Sanitize the input... */
> @@ -1036,13 +1094,15 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
> lr = vgic_cpu->vgic_irq_lr_map[irq];
>
> /* Do we have an active interrupt for the same CPUID? */
> - if (lr != LR_EMPTY &&
> - (LR_CPUID(vgic_cpu->vgic_v2.vgic_lr[lr]) == sgi_source_id)) {
> - kvm_debug("LR%d piggyback for IRQ%d %x\n",
> - lr, irq, vgic_cpu->vgic_v2.vgic_lr[lr]);
> - BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
> - vgic_cpu->vgic_v2.vgic_lr[lr] |= GICH_LR_PENDING_BIT;
> - return true;
> + if (lr != LR_EMPTY) {
> + vlr = vgic_get_lr(vcpu, lr);
> + if (vlr.source == sgi_source_id) {
> + kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
> + BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
> + vlr.state |= LR_STATE_PENDING;
> + vgic_set_lr(vcpu, lr, vlr);
> + return true;
> + }
> }
>
> /* Try to use another LR for this interrupt */
> @@ -1052,12 +1112,16 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
> return false;
>
> kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
> - vgic_cpu->vgic_v2.vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq);
> vgic_cpu->vgic_irq_lr_map[irq] = lr;
> set_bit(lr, vgic_cpu->lr_used);
>
> + vlr.irq = irq;
> + vlr.source = sgi_source_id;
> + vlr.state = LR_STATE_PENDING;
> if (!vgic_irq_is_edge(vcpu, irq))
> - vgic_cpu->vgic_v2.vgic_lr[lr] |= GICH_LR_EOI;
> + vlr.state |= LR_EOI_INT;
> +
> + vgic_set_lr(vcpu, lr, vlr);
>
> return true;
> }
> @@ -1180,29 +1244,23 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
> * Some level interrupts have been EOIed. Clear their
> * active bit.
> */
> - int lr, irq;
> + int lr;
>
> for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_eisr,
> vgic_cpu->nr_lr) {
> - irq = vgic_cpu->vgic_v2.vgic_lr[lr] & GICH_LR_VIRTUALID;
> + struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
>
> - vgic_irq_clear_active(vcpu, irq);
> - vgic_cpu->vgic_v2.vgic_lr[lr] &= ~GICH_LR_EOI;
> + vgic_irq_clear_active(vcpu, vlr.irq);
> + vlr.state = 0;
slight change of semantics here. It is still correct, but only because
we never set the pending bit on an already active level interrupt, but I
guess this could technically be closer to real hardware by allowing
world-switching VMs that are processing active interrupts to pick up an
additional pending state, in which case just setting state = 0 would be
incorrect here, and you should instead lower the EOI mask like you did
before.
That being said, it is a pseudo-theoretical point, and you can make me
more happy by adding:
BUG_ON(vlr.state & LR_STATE_MASK);
before clearing the state completely.
> + vgic_set_lr(vcpu, lr, vlr);
>
> /* Any additional pending interrupt? */
> - if (vgic_dist_irq_is_pending(vcpu, irq)) {
> - vgic_cpu_irq_set(vcpu, irq);
> + if (vgic_dist_irq_is_pending(vcpu, vlr.irq)) {
> + vgic_cpu_irq_set(vcpu, vlr.irq);
> level_pending = true;
> } else {
> - vgic_cpu_irq_clear(vcpu, irq);
> + vgic_cpu_irq_clear(vcpu, vlr.irq);
> }
> -
> - /*
> - * Despite being EOIed, the LR may not have
> - * been marked as empty.
> - */
> - set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr);
> - vgic_cpu->vgic_v2.vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT;
> }
> }
>
> @@ -1228,15 +1286,15 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
> /* Clear mappings for empty LRs */
> for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr,
> vgic_cpu->nr_lr) {
> - int irq;
> + struct vgic_lr vlr;
>
> if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
> continue;
>
> - irq = vgic_cpu->vgic_v2.vgic_lr[lr] & GICH_LR_VIRTUALID;
> + vlr = vgic_get_lr(vcpu, lr);
>
> - BUG_ON(irq >= VGIC_NR_IRQS);
> - vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
> + BUG_ON(vlr.irq >= VGIC_NR_IRQS);
> + vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
> }
>
> /* Check if we still have something up our sleeve... */
> --
> 1.8.3.4
>
-Christoffer
^ permalink raw reply [flat|nested] 57+ messages in thread* [PATCH v3 06/19] KVM: ARM: vgic: introduce vgic_ops and LR manipulation primitives
2014-05-09 14:05 ` Christoffer Dall
@ 2014-05-12 17:28 ` Marc Zyngier
2014-05-14 16:17 ` Christoffer Dall
0 siblings, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-05-12 17:28 UTC (permalink / raw)
To: linux-arm-kernel
On Fri, May 09 2014 at 3:05:54 pm BST, Christoffer Dall <christoffer.dall@linaro.org> wrote:
> On Wed, Apr 16, 2014 at 02:39:38PM +0100, Marc Zyngier wrote:
>> In order to split the various register manipulation from the main vgic
>> code, introduce a vgic_ops structure, and start by abstracting the
>> LR manipulation code with a couple of accessors.
>>
>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>> ---
>> include/kvm/arm_vgic.h | 18 ++++++
>> virt/kvm/arm/vgic.c | 170 +++++++++++++++++++++++++++++++++----------------
>> 2 files changed, 132 insertions(+), 56 deletions(-)
>>
>> diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
>> index f738e5a..17bbe51 100644
>> --- a/include/kvm/arm_vgic.h
>> +++ b/include/kvm/arm_vgic.h
>> @@ -68,6 +68,24 @@ struct vgic_bytemap {
>> u32 shared[VGIC_NR_SHARED_IRQS / 4];
>> };
>>
>> +struct kvm_vcpu;
>> +
>> +#define LR_STATE_PENDING (1 << 0)
>> +#define LR_STATE_ACTIVE (1 << 1)
>> +#define LR_STATE_MASK (3 << 0)
>> +#define LR_EOI_INT (1 << 2)
>> +
>> +struct vgic_lr {
>> + u16 irq;
>> + u8 source;
>> + u8 state;
>> +};
>> +
>> +struct vgic_ops {
>> + struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
>> + void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
>> +};
>> +
>> struct vgic_dist {
>> #ifdef CONFIG_KVM_ARM_VGIC
>> spinlock_t lock;
>> diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
>> index 6bc6c7a..bac37b8 100644
>> --- a/virt/kvm/arm/vgic.c
>> +++ b/virt/kvm/arm/vgic.c
>> @@ -94,9 +94,12 @@ static struct device_node *vgic_node;
>> #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
>>
>> static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
>> +static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
>> static void vgic_update_state(struct kvm *kvm);
>> static void vgic_kick_vcpus(struct kvm *kvm);
>> static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
>> +static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
>> +static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
>> static u32 vgic_nr_lr;
>>
>> static unsigned int vgic_maint_irq;
>> @@ -594,18 +597,6 @@ static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
>> return false;
>> }
>>
>> -#define LR_CPUID(lr) \
>> - (((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT)
>> -#define LR_IRQID(lr) \
>> - ((lr) & GICH_LR_VIRTUALID)
>> -
>> -static void vgic_retire_lr(int lr_nr, int irq, struct vgic_cpu *vgic_cpu)
>> -{
>> - clear_bit(lr_nr, vgic_cpu->lr_used);
>> - vgic_cpu->vgic_v2.vgic_lr[lr_nr] &= ~GICH_LR_STATE;
>> - vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
>> -}
>> -
>> /**
>> * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
>> * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
>> @@ -623,13 +614,10 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
>> struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
>> struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
>> int vcpu_id = vcpu->vcpu_id;
>> - int i, irq, source_cpu;
>> - u32 *lr;
>> + int i;
>>
>> for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
>> - lr = &vgic_cpu->vgic_v2.vgic_lr[i];
>> - irq = LR_IRQID(*lr);
>> - source_cpu = LR_CPUID(*lr);
>> + struct vgic_lr lr = vgic_get_lr(vcpu, i);
>>
>> /*
>> * There are three options for the state bits:
>> @@ -641,7 +629,7 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
>> * If the LR holds only an active interrupt (not pending) then
>> * just leave it alone.
>> */
>> - if ((*lr & GICH_LR_STATE) == GICH_LR_ACTIVE_BIT)
>> + if ((lr.state & LR_STATE_MASK) == LR_STATE_ACTIVE)
>> continue;
>>
>> /*
>> @@ -650,18 +638,19 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
>> * is fine, then we are only setting a few bits that were
>> * already set.
>> */
>> - vgic_dist_irq_set(vcpu, irq);
>> - if (irq < VGIC_NR_SGIS)
>> - dist->irq_sgi_sources[vcpu_id][irq] |= 1 << source_cpu;
>> - *lr &= ~GICH_LR_PENDING_BIT;
>> + vgic_dist_irq_set(vcpu, lr.irq);
>> + if (lr.irq < VGIC_NR_SGIS)
>> + dist->irq_sgi_sources[vcpu_id][lr.irq] |= 1 << lr.source;
>> + lr.state &= ~LR_STATE_PENDING;
>> + vgic_set_lr(vcpu, i, lr);
>>
>> /*
>> * If there's no state left on the LR (it could still be
>> * active), then the LR does not hold any useful info and can
>> * be marked as free for other use.
>> */
>> - if (!(*lr & GICH_LR_STATE))
>> - vgic_retire_lr(i, irq, vgic_cpu);
>> + if (!(lr.state & LR_STATE_MASK))
>> + vgic_retire_lr(i, lr.irq, vcpu);
>>
>> /* Finally update the VGIC state. */
>> vgic_update_state(vcpu->kvm);
>> @@ -989,9 +978,77 @@ static void vgic_update_state(struct kvm *kvm)
>> }
>> }
>>
>> +static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr)
>> +{
>> + struct vgic_lr lr_desc;
>> + u32 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr];
>> +
>> + lr_desc.irq = val & GICH_LR_VIRTUALID;
>> + lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0xff;
>
> shouldn't the mask here be 0xf according to the GICv2 spec?
Actually, looks like it should be 7 instead (bits [12:10], and only when
lr_desc.irq is within 0..15). Well spotted anyway.
>
>> + lr_desc.state = 0;
>> +
>> + if (val & GICH_LR_PENDING_BIT)
>> + lr_desc.state |= LR_STATE_PENDING;
>> + if (val & GICH_LR_ACTIVE_BIT)
>> + lr_desc.state |= LR_STATE_ACTIVE;
>> + if (val & GICH_LR_EOI)
>> + lr_desc.state |= LR_EOI_INT;
>> +
>> + return lr_desc;
>> +}
>> +
>> #define MK_LR_PEND(src, irq) \
>> (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq))
>>
>> +static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
>> + struct vgic_lr lr_desc)
>> +{
>> + u32 lr_val = MK_LR_PEND(lr_desc.source, lr_desc.irq);
>
> this looks a bit weird, the check just below suggests that you can
> convert a struct vgic_lr into an lr register for, for example, an lr
> which is just active and not pending.
Ah, this is probably a leftover from some previous implementation. I'll
get rid of MR_LR_PEND altogether, and rely solely on lr_desc.state.
>> +
>> + if (lr_desc.state & LR_STATE_PENDING)
>> + lr_val |= GICH_LR_PENDING_BIT;
>> + if (lr_desc.state & LR_STATE_ACTIVE)
>> + lr_val |= GICH_LR_ACTIVE_BIT;
>> + if (lr_desc.state & LR_EOI_INT)
>> + lr_val |= GICH_LR_EOI;
>> +
>> + vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val;
>> +
>> + /*
>> + * Despite being EOIed, the LR may not have been marked as
>> + * empty.
>> + */
>> + if (!(lr_val & GICH_LR_STATE))
>> + set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
>
> This feels weird in vgic_v2_set_lr, the name/style suggests that these
> get/set functions are just converters from a struct vgic_lr (that
> presumably common code can deal with) and architecture-specific LR
> register formats.
>
> If these functions have richer semantics than that (like maintaining the
> elrsr register), please comment that on the function.
Yeah, this is one of the corners I really dislike, but making this a
separate vector makes things even more ugly than they already are.
I'll add some comments, but feel free to suggest an alternative approach.
>> +}
>> +
>> +static const struct vgic_ops vgic_ops = {
>> + .get_lr = vgic_v2_get_lr,
>> + .set_lr = vgic_v2_set_lr,
>> +};
>> +
>> +static inline struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
>> +{
>> + return vgic_ops.get_lr(vcpu, lr);
>> +}
>> +
>> +static inline void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
>> + struct vgic_lr vlr)
>> +{
>> + vgic_ops.set_lr(vcpu, lr, vlr);
>> +}
>
> inline statics in a C file? Surely the compiler is smart enough to
> inline this without any hints.
Yup, brain fart here.
>> +
>> +static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
>> +{
>> + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
>> + struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
>
> seems like we're doing a lot of copying back and forward between the
> struct vgic_lr and the LRs on the vgic_cpu struct, I wonder if it makes
> more sense to only deal with it during the sync/flush functions, or
> maybe we end up messing with more state than necessary then?
We're doing a lot of them, but we actually don't have much copying. The
structure is small enough to fit in a single register (even on AArch32),
and we're passing it by value. So the whole state computation actually
occurs in registers.
But overall yes, I agree that we should probably try to do things in a
more staged way. I'll have a look.
>> +
>> + vlr.state = 0;
>> + vgic_set_lr(vcpu, lr_nr, vlr);
>> + clear_bit(lr_nr, vgic_cpu->lr_used);
>> + vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
>> +}
>> +
>> /*
>> * An interrupt may have been disabled after being made pending on the
>> * CPU interface (the classic case is a timer running while we're
>> @@ -1007,12 +1064,12 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
>> int lr;
>>
>> for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
>> - int irq = vgic_cpu->vgic_v2.vgic_lr[lr] & GICH_LR_VIRTUALID;
>> + struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
>>
>> - if (!vgic_irq_is_enabled(vcpu, irq)) {
>> - vgic_retire_lr(lr, irq, vgic_cpu);
>> - if (vgic_irq_is_active(vcpu, irq))
>> - vgic_irq_clear_active(vcpu, irq);
>> + if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
>> + vgic_retire_lr(lr, vlr.irq, vcpu);
>> + if (vgic_irq_is_active(vcpu, vlr.irq))
>> + vgic_irq_clear_active(vcpu, vlr.irq);
>> }
>> }
>> }
>> @@ -1024,6 +1081,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
>> static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
>> {
>> struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
>> + struct vgic_lr vlr;
>> int lr;
>>
>> /* Sanitize the input... */
>> @@ -1036,13 +1094,15 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
>> lr = vgic_cpu->vgic_irq_lr_map[irq];
>>
>> /* Do we have an active interrupt for the same CPUID? */
>> - if (lr != LR_EMPTY &&
>> - (LR_CPUID(vgic_cpu->vgic_v2.vgic_lr[lr]) == sgi_source_id)) {
>> - kvm_debug("LR%d piggyback for IRQ%d %x\n",
>> - lr, irq, vgic_cpu->vgic_v2.vgic_lr[lr]);
>> - BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
>> - vgic_cpu->vgic_v2.vgic_lr[lr] |= GICH_LR_PENDING_BIT;
>> - return true;
>> + if (lr != LR_EMPTY) {
>> + vlr = vgic_get_lr(vcpu, lr);
>> + if (vlr.source == sgi_source_id) {
>> + kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
>> + BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
>> + vlr.state |= LR_STATE_PENDING;
>> + vgic_set_lr(vcpu, lr, vlr);
>> + return true;
>> + }
>> }
>>
>> /* Try to use another LR for this interrupt */
>> @@ -1052,12 +1112,16 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
>> return false;
>>
>> kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
>> - vgic_cpu->vgic_v2.vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq);
>> vgic_cpu->vgic_irq_lr_map[irq] = lr;
>> set_bit(lr, vgic_cpu->lr_used);
>>
>> + vlr.irq = irq;
>> + vlr.source = sgi_source_id;
>> + vlr.state = LR_STATE_PENDING;
>> if (!vgic_irq_is_edge(vcpu, irq))
>> - vgic_cpu->vgic_v2.vgic_lr[lr] |= GICH_LR_EOI;
>> + vlr.state |= LR_EOI_INT;
>> +
>> + vgic_set_lr(vcpu, lr, vlr);
>>
>> return true;
>> }
>> @@ -1180,29 +1244,23 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
>> * Some level interrupts have been EOIed. Clear their
>> * active bit.
>> */
>> - int lr, irq;
>> + int lr;
>>
>> for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_eisr,
>> vgic_cpu->nr_lr) {
>> - irq = vgic_cpu->vgic_v2.vgic_lr[lr] & GICH_LR_VIRTUALID;
>> + struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
>>
>> - vgic_irq_clear_active(vcpu, irq);
>> - vgic_cpu->vgic_v2.vgic_lr[lr] &= ~GICH_LR_EOI;
>> + vgic_irq_clear_active(vcpu, vlr.irq);
>> + vlr.state = 0;
>
> slight change of semantics here. It is still correct, but only because
> we never set the pending bit on an already active level interrupt, but I
> guess this could technically be closer to real hardware by allowing
> world-switching VMs that are processing active interrupts to pick up an
> additional pending state, in which case just setting state = 0 would be
> incorrect here, and you should instead lower the EOI mask like you did
> before.
>
> That being said, it is a pseudo-theoretical point, and you can make me
> more happy by adding:
>
> BUG_ON(vlr.state & LR_STATE_MASK);
>
> before clearing the state completely.
Putting a BUG_ON() seems a bit harsh. WARN_ON()?
>> + vgic_set_lr(vcpu, lr, vlr);
>>
>> /* Any additional pending interrupt? */
>> - if (vgic_dist_irq_is_pending(vcpu, irq)) {
>> - vgic_cpu_irq_set(vcpu, irq);
>> + if (vgic_dist_irq_is_pending(vcpu, vlr.irq)) {
>> + vgic_cpu_irq_set(vcpu, vlr.irq);
>> level_pending = true;
>> } else {
>> - vgic_cpu_irq_clear(vcpu, irq);
>> + vgic_cpu_irq_clear(vcpu, vlr.irq);
>> }
>> -
>> - /*
>> - * Despite being EOIed, the LR may not have
>> - * been marked as empty.
>> - */
>> - set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr);
>> - vgic_cpu->vgic_v2.vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT;
>> }
>> }
>>
>> @@ -1228,15 +1286,15 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
>> /* Clear mappings for empty LRs */
>> for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr,
>> vgic_cpu->nr_lr) {
>> - int irq;
>> + struct vgic_lr vlr;
>>
>> if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
>> continue;
>>
>> - irq = vgic_cpu->vgic_v2.vgic_lr[lr] & GICH_LR_VIRTUALID;
>> + vlr = vgic_get_lr(vcpu, lr);
>>
>> - BUG_ON(irq >= VGIC_NR_IRQS);
>> - vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
>> + BUG_ON(vlr.irq >= VGIC_NR_IRQS);
>> + vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
>> }
>>
>> /* Check if we still have something up our sleeve... */
>> --
>> 1.8.3.4
>>
> -Christoffer
>
--
Jazz is not dead. It just smells funny.
^ permalink raw reply [flat|nested] 57+ messages in thread* [PATCH v3 06/19] KVM: ARM: vgic: introduce vgic_ops and LR manipulation primitives
2014-05-12 17:28 ` Marc Zyngier
@ 2014-05-14 16:17 ` Christoffer Dall
0 siblings, 0 replies; 57+ messages in thread
From: Christoffer Dall @ 2014-05-14 16:17 UTC (permalink / raw)
To: linux-arm-kernel
On 12 May 2014 18:28, Marc Zyngier <marc.zyngier@arm.com> wrote:
> On Fri, May 09 2014 at 3:05:54 pm BST, Christoffer Dall <christoffer.dall@linaro.org> wrote:
>> On Wed, Apr 16, 2014 at 02:39:38PM +0100, Marc Zyngier wrote:
[...]
>>> +static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr)
>>> +{
>>> + struct vgic_lr lr_desc;
>>> + u32 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr];
>>> +
>>> + lr_desc.irq = val & GICH_LR_VIRTUALID;
>>> + lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0xff;
>>
>> shouldn't the mask here be 0xf according to the GICv2 spec?
>
> Actually, looks like it should be 7 instead (bits [12:10], and only when
> lr_desc.irq is within 0..15). Well spotted anyway.
>
yes, 7, duh :)
>>
>>> + lr_desc.state = 0;
>>> +
>>> + if (val & GICH_LR_PENDING_BIT)
>>> + lr_desc.state |= LR_STATE_PENDING;
>>> + if (val & GICH_LR_ACTIVE_BIT)
>>> + lr_desc.state |= LR_STATE_ACTIVE;
>>> + if (val & GICH_LR_EOI)
>>> + lr_desc.state |= LR_EOI_INT;
>>> +
>>> + return lr_desc;
>>> +}
>>> +
>>> #define MK_LR_PEND(src, irq) \
>>> (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq))
>>>
>>> +static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
>>> + struct vgic_lr lr_desc)
>>> +{
>>> + u32 lr_val = MK_LR_PEND(lr_desc.source, lr_desc.irq);
>>
>> this looks a bit weird, the check just below suggests that you can
>> convert a struct vgic_lr into an lr register for, for example, an lr
>> which is just active and not pending.
>
> Ah, this is probably a leftover from some previous implementation. I'll
> get rid of MR_LR_PEND altogether, and rely solely on lr_desc.state.
>
thanks
>>> +
>>> + if (lr_desc.state & LR_STATE_PENDING)
>>> + lr_val |= GICH_LR_PENDING_BIT;
>>> + if (lr_desc.state & LR_STATE_ACTIVE)
>>> + lr_val |= GICH_LR_ACTIVE_BIT;
>>> + if (lr_desc.state & LR_EOI_INT)
>>> + lr_val |= GICH_LR_EOI;
>>> +
>>> + vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val;
>>> +
>>> + /*
>>> + * Despite being EOIed, the LR may not have been marked as
>>> + * empty.
>>> + */
>>> + if (!(lr_val & GICH_LR_STATE))
>>> + set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
>>
>> This feels weird in vgic_v2_set_lr, the name/style suggests that these
>> get/set functions are just converters from a struct vgic_lr (that
>> presumably common code can deal with) and architecture-specific LR
>> register formats.
>>
>> If these functions have richer semantics than that (like maintaining the
>> elrsr register), please comment that on the function.
>
> Yeah, this is one of the corners I really dislike, but making this a
> separate vector makes things even more ugly than they already are.
>
> I'll add some comments, but feel free to suggest an alternative approach.
>
I think adding an api function called something like
vgic_sync_lr_elrsr() and calling that from vgic_process_maintenance()
- and move the comment about EOIed there - would be much nicer.
>>> +}
>>> +
>>> +static const struct vgic_ops vgic_ops = {
>>> + .get_lr = vgic_v2_get_lr,
>>> + .set_lr = vgic_v2_set_lr,
>>> +};
>>> +
>>> +static inline struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
>>> +{
>>> + return vgic_ops.get_lr(vcpu, lr);
>>> +}
>>> +
>>> +static inline void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
>>> + struct vgic_lr vlr)
>>> +{
>>> + vgic_ops.set_lr(vcpu, lr, vlr);
>>> +}
>>
>> inline statics in a C file? Surely the compiler is smart enough to
>> inline this without any hints.
>
> Yup, brain fart here.
>
>>> +
>>> +static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
>>> +{
>>> + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
>>> + struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
>>
>> seems like we're doing a lot of copying back and forward between the
>> struct vgic_lr and the LRs on the vgic_cpu struct, I wonder if it makes
>> more sense to only deal with it during the sync/flush functions, or
>> maybe we end up messing with more state than necessary then?
>
> We're doing a lot of them, but we actually don't have much copying. The
> structure is small enough to fit in a single register (even on AArch32),
> and we're passing it by value. So the whole state computation actually
> occurs in registers.
>
> But overall yes, I agree that we should probably try to do things in a
> more staged way. I'll have a look.
>
not too important, we can always clean it up, measure it etc., later.
>>> +
>>> + vlr.state = 0;
>>> + vgic_set_lr(vcpu, lr_nr, vlr);
>>> + clear_bit(lr_nr, vgic_cpu->lr_used);
>>> + vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
>>> +}
>>> +
>>> /*
>>> * An interrupt may have been disabled after being made pending on the
>>> * CPU interface (the classic case is a timer running while we're
>>> @@ -1007,12 +1064,12 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
>>> int lr;
>>>
>>> for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
>>> - int irq = vgic_cpu->vgic_v2.vgic_lr[lr] & GICH_LR_VIRTUALID;
>>> + struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
>>>
>>> - if (!vgic_irq_is_enabled(vcpu, irq)) {
>>> - vgic_retire_lr(lr, irq, vgic_cpu);
>>> - if (vgic_irq_is_active(vcpu, irq))
>>> - vgic_irq_clear_active(vcpu, irq);
>>> + if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
>>> + vgic_retire_lr(lr, vlr.irq, vcpu);
>>> + if (vgic_irq_is_active(vcpu, vlr.irq))
>>> + vgic_irq_clear_active(vcpu, vlr.irq);
>>> }
>>> }
>>> }
>>> @@ -1024,6 +1081,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
>>> static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
>>> {
>>> struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
>>> + struct vgic_lr vlr;
>>> int lr;
>>>
>>> /* Sanitize the input... */
>>> @@ -1036,13 +1094,15 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
>>> lr = vgic_cpu->vgic_irq_lr_map[irq];
>>>
>>> /* Do we have an active interrupt for the same CPUID? */
>>> - if (lr != LR_EMPTY &&
>>> - (LR_CPUID(vgic_cpu->vgic_v2.vgic_lr[lr]) == sgi_source_id)) {
>>> - kvm_debug("LR%d piggyback for IRQ%d %x\n",
>>> - lr, irq, vgic_cpu->vgic_v2.vgic_lr[lr]);
>>> - BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
>>> - vgic_cpu->vgic_v2.vgic_lr[lr] |= GICH_LR_PENDING_BIT;
>>> - return true;
>>> + if (lr != LR_EMPTY) {
>>> + vlr = vgic_get_lr(vcpu, lr);
>>> + if (vlr.source == sgi_source_id) {
>>> + kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
>>> + BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
>>> + vlr.state |= LR_STATE_PENDING;
>>> + vgic_set_lr(vcpu, lr, vlr);
>>> + return true;
>>> + }
>>> }
>>>
>>> /* Try to use another LR for this interrupt */
>>> @@ -1052,12 +1112,16 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
>>> return false;
>>>
>>> kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
>>> - vgic_cpu->vgic_v2.vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq);
>>> vgic_cpu->vgic_irq_lr_map[irq] = lr;
>>> set_bit(lr, vgic_cpu->lr_used);
>>>
>>> + vlr.irq = irq;
>>> + vlr.source = sgi_source_id;
>>> + vlr.state = LR_STATE_PENDING;
>>> if (!vgic_irq_is_edge(vcpu, irq))
>>> - vgic_cpu->vgic_v2.vgic_lr[lr] |= GICH_LR_EOI;
>>> + vlr.state |= LR_EOI_INT;
>>> +
>>> + vgic_set_lr(vcpu, lr, vlr);
>>>
>>> return true;
>>> }
>>> @@ -1180,29 +1244,23 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
>>> * Some level interrupts have been EOIed. Clear their
>>> * active bit.
>>> */
>>> - int lr, irq;
>>> + int lr;
>>>
>>> for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_eisr,
>>> vgic_cpu->nr_lr) {
>>> - irq = vgic_cpu->vgic_v2.vgic_lr[lr] & GICH_LR_VIRTUALID;
>>> + struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
>>>
>>> - vgic_irq_clear_active(vcpu, irq);
>>> - vgic_cpu->vgic_v2.vgic_lr[lr] &= ~GICH_LR_EOI;
>>> + vgic_irq_clear_active(vcpu, vlr.irq);
>>> + vlr.state = 0;
>>
>> slight change of semantics here. It is still correct, but only because
>> we never set the pending bit on an already active level interrupt, but I
>> guess this could technically be closer to real hardware by allowing
>> world-switching VMs that are processing active interrupts to pick up an
>> additional pending state, in which case just setting state = 0 would be
>> incorrect here, and you should instead lower the EOI mask like you did
>> before.
>>
>> That being said, it is a pseudo-theoretical point, and you can make me
>> more happy by adding:
>>
>> BUG_ON(vlr.state & LR_STATE_MASK);
>>
>> before clearing the state completely.
>
> Putting a BUG_ON() seems a bit harsh. WARN_ON()?
>
yeah, let's take it easy.
>>> + vgic_set_lr(vcpu, lr, vlr);
>>>
>>> /* Any additional pending interrupt? */
>>> - if (vgic_dist_irq_is_pending(vcpu, irq)) {
>>> - vgic_cpu_irq_set(vcpu, irq);
>>> + if (vgic_dist_irq_is_pending(vcpu, vlr.irq)) {
>>> + vgic_cpu_irq_set(vcpu, vlr.irq);
>>> level_pending = true;
>>> } else {
>>> - vgic_cpu_irq_clear(vcpu, irq);
>>> + vgic_cpu_irq_clear(vcpu, vlr.irq);
>>> }
>>> -
>>> - /*
>>> - * Despite being EOIed, the LR may not have
>>> - * been marked as empty.
>>> - */
>>> - set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr);
>>> - vgic_cpu->vgic_v2.vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT;
>>> }
>>> }
>>>
>>> @@ -1228,15 +1286,15 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
>>> /* Clear mappings for empty LRs */
>>> for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr,
>>> vgic_cpu->nr_lr) {
>>> - int irq;
>>> + struct vgic_lr vlr;
>>>
>>> if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
>>> continue;
>>>
>>> - irq = vgic_cpu->vgic_v2.vgic_lr[lr] & GICH_LR_VIRTUALID;
>>> + vlr = vgic_get_lr(vcpu, lr);
>>>
>>> - BUG_ON(irq >= VGIC_NR_IRQS);
>>> - vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
>>> + BUG_ON(vlr.irq >= VGIC_NR_IRQS);
>>> + vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
>>> }
>>>
>>> /* Check if we still have something up our sleeve... */
>>> --
>>> 1.8.3.4
>>>
-Christoffer
^ permalink raw reply [flat|nested] 57+ messages in thread
* [PATCH v3 07/19] KVM: ARM: vgic: abstract access to the ELRSR bitmap
2014-04-16 13:39 [PATCH v3 00/19] arm64: GICv3 support Marc Zyngier
` (5 preceding siblings ...)
2014-04-16 13:39 ` [PATCH v3 06/19] KVM: ARM: vgic: introduce vgic_ops and LR manipulation primitives Marc Zyngier
@ 2014-04-16 13:39 ` Marc Zyngier
2014-05-09 14:06 ` Christoffer Dall
2014-04-16 13:39 ` [PATCH v3 08/19] KVM: ARM: vgic: abstract EISR bitmap access Marc Zyngier
` (11 subsequent siblings)
18 siblings, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-04-16 13:39 UTC (permalink / raw)
To: linux-arm-kernel
Move the GICH_ELRSR access to its own function, and add it to the
vgic_ops structure.
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
include/kvm/arm_vgic.h | 1 +
virt/kvm/arm/vgic.c | 20 ++++++++++++++++----
2 files changed, 17 insertions(+), 4 deletions(-)
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 17bbe51..01013ec 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -84,6 +84,7 @@ struct vgic_lr {
struct vgic_ops {
struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
+ u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
};
struct vgic_dist {
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index bac37b8..04206a8 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1022,9 +1022,16 @@ static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
}
+static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
+{
+ const u32 *elrsr = vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
+ return *(u64 *)elrsr;
+}
+
static const struct vgic_ops vgic_ops = {
.get_lr = vgic_v2_get_lr,
.set_lr = vgic_v2_set_lr,
+ .get_elrsr = vgic_v2_get_elrsr,
};
static inline struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
@@ -1038,6 +1045,11 @@ static inline void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
vgic_ops.set_lr(vcpu, lr, vlr);
}
+static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
+{
+ return vgic_ops.get_elrsr(vcpu);
+}
+
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
@@ -1278,14 +1290,15 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ u64 elrsr = vgic_get_elrsr(vcpu);
+ unsigned long *elrsr_ptr = (unsigned long *)&elrsr;
int lr, pending;
bool level_pending;
level_pending = vgic_process_maintenance(vcpu);
/* Clear mappings for empty LRs */
- for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr,
- vgic_cpu->nr_lr) {
+ for_each_set_bit(lr, elrsr_ptr, vgic_cpu->nr_lr) {
struct vgic_lr vlr;
if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
@@ -1298,8 +1311,7 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
}
/* Check if we still have something up our sleeve... */
- pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr,
- vgic_cpu->nr_lr);
+ pending = find_first_zero_bit(elrsr_ptr, vgic_cpu->nr_lr);
if (level_pending || pending < vgic_cpu->nr_lr)
set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
}
--
1.8.3.4
^ permalink raw reply related [flat|nested] 57+ messages in thread* [PATCH v3 07/19] KVM: ARM: vgic: abstract access to the ELRSR bitmap
2014-04-16 13:39 ` [PATCH v3 07/19] KVM: ARM: vgic: abstract access to the ELRSR bitmap Marc Zyngier
@ 2014-05-09 14:06 ` Christoffer Dall
2014-05-12 17:41 ` Marc Zyngier
0 siblings, 1 reply; 57+ messages in thread
From: Christoffer Dall @ 2014-05-09 14:06 UTC (permalink / raw)
To: linux-arm-kernel
On Wed, Apr 16, 2014 at 02:39:39PM +0100, Marc Zyngier wrote:
> Move the GICH_ELRSR access to its own function, and add it to the
> vgic_ops structure.
>
> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
> include/kvm/arm_vgic.h | 1 +
> virt/kvm/arm/vgic.c | 20 ++++++++++++++++----
> 2 files changed, 17 insertions(+), 4 deletions(-)
>
> diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
> index 17bbe51..01013ec 100644
> --- a/include/kvm/arm_vgic.h
> +++ b/include/kvm/arm_vgic.h
> @@ -84,6 +84,7 @@ struct vgic_lr {
> struct vgic_ops {
> struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
> void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
> + u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
> };
>
> struct vgic_dist {
> diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
> index bac37b8..04206a8 100644
> --- a/virt/kvm/arm/vgic.c
> +++ b/virt/kvm/arm/vgic.c
> @@ -1022,9 +1022,16 @@ static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
> set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
> }
>
> +static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
> +{
> + const u32 *elrsr = vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
> + return *(u64 *)elrsr;
> +}
> +
> static const struct vgic_ops vgic_ops = {
> .get_lr = vgic_v2_get_lr,
> .set_lr = vgic_v2_set_lr,
> + .get_elrsr = vgic_v2_get_elrsr,
> };
>
> static inline struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
> @@ -1038,6 +1045,11 @@ static inline void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
> vgic_ops.set_lr(vcpu, lr, vlr);
> }
>
> +static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
> +{
> + return vgic_ops.get_elrsr(vcpu);
> +}
> +
> static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
> {
> struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
> @@ -1278,14 +1290,15 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
> {
> struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
> struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
> + u64 elrsr = vgic_get_elrsr(vcpu);
> + unsigned long *elrsr_ptr = (unsigned long *)&elrsr;
looks to me like you're changing functionality here,
vgic_process_maintenance() called below can modify the elrsr bitmap, but
you're copying it into elrsr (local variable) here. Why du you even
need the temporary elrsr variable here, and not just declare elrsr_ptr btw.?
> int lr, pending;
> bool level_pending;
>
> level_pending = vgic_process_maintenance(vcpu);
>
> /* Clear mappings for empty LRs */
> - for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr,
> - vgic_cpu->nr_lr) {
> + for_each_set_bit(lr, elrsr_ptr, vgic_cpu->nr_lr) {
> struct vgic_lr vlr;
>
> if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
> @@ -1298,8 +1311,7 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
> }
>
> /* Check if we still have something up our sleeve... */
> - pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr,
> - vgic_cpu->nr_lr);
> + pending = find_first_zero_bit(elrsr_ptr, vgic_cpu->nr_lr);
> if (level_pending || pending < vgic_cpu->nr_lr)
> set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
> }
> --
> 1.8.3.4
>
-Christoffer
^ permalink raw reply [flat|nested] 57+ messages in thread* [PATCH v3 07/19] KVM: ARM: vgic: abstract access to the ELRSR bitmap
2014-05-09 14:06 ` Christoffer Dall
@ 2014-05-12 17:41 ` Marc Zyngier
0 siblings, 0 replies; 57+ messages in thread
From: Marc Zyngier @ 2014-05-12 17:41 UTC (permalink / raw)
To: linux-arm-kernel
On Fri, May 09 2014 at 3:06:03 pm BST, Christoffer Dall <christoffer.dall@linaro.org> wrote:
> On Wed, Apr 16, 2014 at 02:39:39PM +0100, Marc Zyngier wrote:
>> Move the GICH_ELRSR access to its own function, and add it to the
>> vgic_ops structure.
>>
>> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>> ---
>> include/kvm/arm_vgic.h | 1 +
>> virt/kvm/arm/vgic.c | 20 ++++++++++++++++----
>> 2 files changed, 17 insertions(+), 4 deletions(-)
>>
>> diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
>> index 17bbe51..01013ec 100644
>> --- a/include/kvm/arm_vgic.h
>> +++ b/include/kvm/arm_vgic.h
>> @@ -84,6 +84,7 @@ struct vgic_lr {
>> struct vgic_ops {
>> struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
>> void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
>> + u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
>> };
>>
>> struct vgic_dist {
>> diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
>> index bac37b8..04206a8 100644
>> --- a/virt/kvm/arm/vgic.c
>> +++ b/virt/kvm/arm/vgic.c
>> @@ -1022,9 +1022,16 @@ static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
>> set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
>> }
>>
>> +static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
>> +{
>> + const u32 *elrsr = vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
>> + return *(u64 *)elrsr;
>> +}
>> +
>> static const struct vgic_ops vgic_ops = {
>> .get_lr = vgic_v2_get_lr,
>> .set_lr = vgic_v2_set_lr,
>> + .get_elrsr = vgic_v2_get_elrsr,
>> };
>>
>> static inline struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
>> @@ -1038,6 +1045,11 @@ static inline void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
>> vgic_ops.set_lr(vcpu, lr, vlr);
>> }
>>
>> +static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
>> +{
>> + return vgic_ops.get_elrsr(vcpu);
>> +}
>> +
>> static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
>> {
>> struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
>> @@ -1278,14 +1290,15 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
>> {
>> struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
>> struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
>> + u64 elrsr = vgic_get_elrsr(vcpu);
>> + unsigned long *elrsr_ptr = (unsigned long *)&elrsr;
>
> looks to me like you're changing functionality here,
> vgic_process_maintenance() called below can modify the elrsr bitmap, but
> you're copying it into elrsr (local variable) here. Why du you even
> need the temporary elrsr variable here, and not just declare elrsr_ptr btw.?
Duh. Nice one. The whole reason for elrsr_ptr is to have a pointer to an
unsigned long which the bitmap ops can use... Don't say a word, I know.
>> int lr, pending;
>> bool level_pending;
>>
>> level_pending = vgic_process_maintenance(vcpu);
>>
>> /* Clear mappings for empty LRs */
>> - for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr,
>> - vgic_cpu->nr_lr) {
>> + for_each_set_bit(lr, elrsr_ptr, vgic_cpu->nr_lr) {
>> struct vgic_lr vlr;
>>
>> if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
>> @@ -1298,8 +1311,7 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
>> }
>>
>> /* Check if we still have something up our sleeve... */
>> - pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr,
>> - vgic_cpu->nr_lr);
>> + pending = find_first_zero_bit(elrsr_ptr, vgic_cpu->nr_lr);
>> if (level_pending || pending < vgic_cpu->nr_lr)
>> set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
>> }
>> --
>> 1.8.3.4
>>
> -Christoffer
>
--
Jazz is not dead. It just smells funny.
^ permalink raw reply [flat|nested] 57+ messages in thread
* [PATCH v3 08/19] KVM: ARM: vgic: abstract EISR bitmap access
2014-04-16 13:39 [PATCH v3 00/19] arm64: GICv3 support Marc Zyngier
` (6 preceding siblings ...)
2014-04-16 13:39 ` [PATCH v3 07/19] KVM: ARM: vgic: abstract access to the ELRSR bitmap Marc Zyngier
@ 2014-04-16 13:39 ` Marc Zyngier
2014-05-09 14:06 ` Christoffer Dall
2014-04-16 13:39 ` [PATCH v3 09/19] KVM: ARM: vgic: abstract MISR decoding Marc Zyngier
` (10 subsequent siblings)
18 siblings, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-04-16 13:39 UTC (permalink / raw)
To: linux-arm-kernel
Move the GICH_EISR access to its own function.
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
include/kvm/arm_vgic.h | 1 +
virt/kvm/arm/vgic.c | 16 +++++++++++++++-
2 files changed, 16 insertions(+), 1 deletion(-)
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 01013ec..5dbc8ee 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -85,6 +85,7 @@ struct vgic_ops {
struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
+ u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
};
struct vgic_dist {
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 04206a8..896359f 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1028,10 +1028,17 @@ static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
return *(u64 *)elrsr;
}
+static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
+{
+ const u32 *eisr = vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
+ return *(u64 *)eisr;
+}
+
static const struct vgic_ops vgic_ops = {
.get_lr = vgic_v2_get_lr,
.set_lr = vgic_v2_set_lr,
.get_elrsr = vgic_v2_get_elrsr,
+ .get_eisr = vgic_v2_get_eisr,
};
static inline struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
@@ -1050,6 +1057,11 @@ static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
return vgic_ops.get_elrsr(vcpu);
}
+static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
+{
+ return vgic_ops.get_eisr(vcpu);
+}
+
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
@@ -1256,9 +1268,11 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
* Some level interrupts have been EOIed. Clear their
* active bit.
*/
+ u64 eisr = vgic_get_eisr(vcpu);
+ unsigned long *eisr_ptr = (unsigned long *)&eisr;
int lr;
- for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_eisr,
+ for_each_set_bit(lr, eisr_ptr, vgic_cpu->nr_lr) {
vgic_cpu->nr_lr) {
struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
--
1.8.3.4
^ permalink raw reply related [flat|nested] 57+ messages in thread* [PATCH v3 08/19] KVM: ARM: vgic: abstract EISR bitmap access
2014-04-16 13:39 ` [PATCH v3 08/19] KVM: ARM: vgic: abstract EISR bitmap access Marc Zyngier
@ 2014-05-09 14:06 ` Christoffer Dall
0 siblings, 0 replies; 57+ messages in thread
From: Christoffer Dall @ 2014-05-09 14:06 UTC (permalink / raw)
To: linux-arm-kernel
On Wed, Apr 16, 2014 at 02:39:40PM +0100, Marc Zyngier wrote:
> Move the GICH_EISR access to its own function.
>
> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
> include/kvm/arm_vgic.h | 1 +
> virt/kvm/arm/vgic.c | 16 +++++++++++++++-
> 2 files changed, 16 insertions(+), 1 deletion(-)
>
> diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
> index 01013ec..5dbc8ee 100644
> --- a/include/kvm/arm_vgic.h
> +++ b/include/kvm/arm_vgic.h
> @@ -85,6 +85,7 @@ struct vgic_ops {
> struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
> void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
> u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
> + u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
> };
>
> struct vgic_dist {
> diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
> index 04206a8..896359f 100644
> --- a/virt/kvm/arm/vgic.c
> +++ b/virt/kvm/arm/vgic.c
> @@ -1028,10 +1028,17 @@ static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
> return *(u64 *)elrsr;
> }
>
> +static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
> +{
> + const u32 *eisr = vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
> + return *(u64 *)eisr;
> +}
> +
> static const struct vgic_ops vgic_ops = {
> .get_lr = vgic_v2_get_lr,
> .set_lr = vgic_v2_set_lr,
> .get_elrsr = vgic_v2_get_elrsr,
> + .get_eisr = vgic_v2_get_eisr,
> };
>
> static inline struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
> @@ -1050,6 +1057,11 @@ static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
> return vgic_ops.get_elrsr(vcpu);
> }
>
> +static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
> +{
> + return vgic_ops.get_eisr(vcpu);
> +}
> +
> static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
> {
> struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
> @@ -1256,9 +1268,11 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
> * Some level interrupts have been EOIed. Clear their
> * active bit.
> */
> + u64 eisr = vgic_get_eisr(vcpu);
> + unsigned long *eisr_ptr = (unsigned long *)&eisr;
> int lr;
>
> - for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_eisr,
> + for_each_set_bit(lr, eisr_ptr, vgic_cpu->nr_lr) {
> vgic_cpu->nr_lr) {
looks like you missed a line here.
> struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
>
> --
> 1.8.3.4
>
Otherwise,
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
^ permalink raw reply [flat|nested] 57+ messages in thread
* [PATCH v3 09/19] KVM: ARM: vgic: abstract MISR decoding
2014-04-16 13:39 [PATCH v3 00/19] arm64: GICv3 support Marc Zyngier
` (7 preceding siblings ...)
2014-04-16 13:39 ` [PATCH v3 08/19] KVM: ARM: vgic: abstract EISR bitmap access Marc Zyngier
@ 2014-04-16 13:39 ` Marc Zyngier
2014-05-09 14:06 ` Christoffer Dall
2014-04-16 13:39 ` [PATCH v3 10/19] KVM: ARM: vgic: move underflow handling to vgic_ops Marc Zyngier
` (9 subsequent siblings)
18 siblings, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-04-16 13:39 UTC (permalink / raw)
To: linux-arm-kernel
Instead of directly dealing with the GICH_MISR bits, move the code to
its own function and use a couple of public flags to represent the
actual state.
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
include/kvm/arm_vgic.h | 4 ++++
virt/kvm/arm/vgic.c | 26 +++++++++++++++++++++++---
2 files changed, 27 insertions(+), 3 deletions(-)
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 5dbc8ee..98208e0 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -86,6 +86,7 @@ struct vgic_ops {
void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
+ u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu);
};
struct vgic_dist {
@@ -164,6 +165,9 @@ struct vgic_cpu {
#define LR_EMPTY 0xff
+#define INT_STATUS_EOI (1 << 0)
+#define INT_STATUS_UNDERFLOW (1 << 1)
+
struct kvm;
struct kvm_vcpu;
struct kvm_run;
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 896359f..9e100f6 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1034,11 +1034,25 @@ static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
return *(u64 *)eisr;
}
+static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
+{
+ u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr;
+ u32 ret = 0;
+
+ if (misr & GICH_MISR_EOI)
+ ret |= INT_STATUS_EOI;
+ if (misr & GICH_MISR_U)
+ ret |= INT_STATUS_UNDERFLOW;
+
+ return ret;
+}
+
static const struct vgic_ops vgic_ops = {
.get_lr = vgic_v2_get_lr,
.set_lr = vgic_v2_set_lr,
.get_elrsr = vgic_v2_get_elrsr,
.get_eisr = vgic_v2_get_eisr,
+ .get_interrupt_status = vgic_v2_get_interrupt_status,
};
static inline struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
@@ -1062,6 +1076,11 @@ static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
return vgic_ops.get_eisr(vcpu);
}
+static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
+{
+ return vgic_ops.get_interrupt_status(vcpu);
+}
+
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
@@ -1259,11 +1278,12 @@ epilog:
static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ u32 status = vgic_get_interrupt_status(vcpu);
bool level_pending = false;
- kvm_debug("MISR = %08x\n", vgic_cpu->vgic_v2.vgic_misr);
+ kvm_debug("STATUS = %08x\n", status);
- if (vgic_cpu->vgic_v2.vgic_misr & GICH_MISR_EOI) {
+ if (status & INT_STATUS_EOI) {
/*
* Some level interrupts have been EOIed. Clear their
* active bit.
@@ -1290,7 +1310,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
}
}
- if (vgic_cpu->vgic_v2.vgic_misr & GICH_MISR_U)
+ if (status & INT_STATUS_UNDERFLOW)
vgic_cpu->vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
return level_pending;
--
1.8.3.4
^ permalink raw reply related [flat|nested] 57+ messages in thread* [PATCH v3 09/19] KVM: ARM: vgic: abstract MISR decoding
2014-04-16 13:39 ` [PATCH v3 09/19] KVM: ARM: vgic: abstract MISR decoding Marc Zyngier
@ 2014-05-09 14:06 ` Christoffer Dall
0 siblings, 0 replies; 57+ messages in thread
From: Christoffer Dall @ 2014-05-09 14:06 UTC (permalink / raw)
To: linux-arm-kernel
On Wed, Apr 16, 2014 at 02:39:41PM +0100, Marc Zyngier wrote:
> Instead of directly dealing with the GICH_MISR bits, move the code to
> its own function and use a couple of public flags to represent the
> actual state.
>
> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
> include/kvm/arm_vgic.h | 4 ++++
> virt/kvm/arm/vgic.c | 26 +++++++++++++++++++++++---
> 2 files changed, 27 insertions(+), 3 deletions(-)
>
> diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
> index 5dbc8ee..98208e0 100644
> --- a/include/kvm/arm_vgic.h
> +++ b/include/kvm/arm_vgic.h
> @@ -86,6 +86,7 @@ struct vgic_ops {
> void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
> u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
> u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
> + u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu);
> };
>
> struct vgic_dist {
> @@ -164,6 +165,9 @@ struct vgic_cpu {
>
> #define LR_EMPTY 0xff
>
> +#define INT_STATUS_EOI (1 << 0)
> +#define INT_STATUS_UNDERFLOW (1 << 1)
> +
> struct kvm;
> struct kvm_vcpu;
> struct kvm_run;
> diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
> index 896359f..9e100f6 100644
> --- a/virt/kvm/arm/vgic.c
> +++ b/virt/kvm/arm/vgic.c
> @@ -1034,11 +1034,25 @@ static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
> return *(u64 *)eisr;
> }
>
> +static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
> +{
> + u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr;
> + u32 ret = 0;
> +
> + if (misr & GICH_MISR_EOI)
> + ret |= INT_STATUS_EOI;
> + if (misr & GICH_MISR_U)
> + ret |= INT_STATUS_UNDERFLOW;
> +
> + return ret;
> +}
> +
> static const struct vgic_ops vgic_ops = {
> .get_lr = vgic_v2_get_lr,
> .set_lr = vgic_v2_set_lr,
> .get_elrsr = vgic_v2_get_elrsr,
> .get_eisr = vgic_v2_get_eisr,
> + .get_interrupt_status = vgic_v2_get_interrupt_status,
> };
>
> static inline struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
> @@ -1062,6 +1076,11 @@ static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
> return vgic_ops.get_eisr(vcpu);
> }
>
> +static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
> +{
> + return vgic_ops.get_interrupt_status(vcpu);
> +}
> +
> static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
> {
> struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
> @@ -1259,11 +1278,12 @@ epilog:
> static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
> {
> struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
> + u32 status = vgic_get_interrupt_status(vcpu);
> bool level_pending = false;
>
> - kvm_debug("MISR = %08x\n", vgic_cpu->vgic_v2.vgic_misr);
> + kvm_debug("STATUS = %08x\n", status);
>
> - if (vgic_cpu->vgic_v2.vgic_misr & GICH_MISR_EOI) {
> + if (status & INT_STATUS_EOI) {
> /*
> * Some level interrupts have been EOIed. Clear their
> * active bit.
> @@ -1290,7 +1310,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
> }
> }
>
> - if (vgic_cpu->vgic_v2.vgic_misr & GICH_MISR_U)
> + if (status & INT_STATUS_UNDERFLOW)
> vgic_cpu->vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
>
> return level_pending;
> --
> 1.8.3.4
>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
^ permalink raw reply [flat|nested] 57+ messages in thread
* [PATCH v3 10/19] KVM: ARM: vgic: move underflow handling to vgic_ops
2014-04-16 13:39 [PATCH v3 00/19] arm64: GICv3 support Marc Zyngier
` (8 preceding siblings ...)
2014-04-16 13:39 ` [PATCH v3 09/19] KVM: ARM: vgic: abstract MISR decoding Marc Zyngier
@ 2014-04-16 13:39 ` Marc Zyngier
2014-05-09 14:06 ` Christoffer Dall
2014-04-16 13:39 ` [PATCH v3 11/19] KVM: ARM: vgic: abstract VMCR access Marc Zyngier
` (8 subsequent siblings)
18 siblings, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-04-16 13:39 UTC (permalink / raw)
To: linux-arm-kernel
Move the code dealing with LR underflow handling to its own functions,
and make them accessible through vgic_ops.
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
include/kvm/arm_vgic.h | 2 ++
virt/kvm/arm/vgic.c | 28 +++++++++++++++++++++++++---
2 files changed, 27 insertions(+), 3 deletions(-)
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 98208e0..831b9f5 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -87,6 +87,8 @@ struct vgic_ops {
u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu);
+ void (*set_underflow)(struct kvm_vcpu *vcpu);
+ void (*clear_underflow)(struct kvm_vcpu *vcpu);
};
struct vgic_dist {
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 9e100f6..13dad1f 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1047,12 +1047,24 @@ static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
return ret;
}
+static void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr |= GICH_HCR_UIE;
+}
+
+static void vgic_v2_clear_underflow(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
+}
+
static const struct vgic_ops vgic_ops = {
.get_lr = vgic_v2_get_lr,
.set_lr = vgic_v2_set_lr,
.get_elrsr = vgic_v2_get_elrsr,
.get_eisr = vgic_v2_get_eisr,
.get_interrupt_status = vgic_v2_get_interrupt_status,
+ .set_underflow = vgic_v2_set_underflow,
+ .clear_underflow = vgic_v2_clear_underflow,
};
static inline struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
@@ -1081,6 +1093,16 @@ static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
return vgic_ops.get_interrupt_status(vcpu);
}
+static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
+{
+ vgic_ops.set_underflow(vcpu);
+}
+
+static inline void vgic_clear_underflow(struct kvm_vcpu *vcpu)
+{
+ vgic_ops.clear_underflow(vcpu);
+}
+
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
@@ -1262,9 +1284,9 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
epilog:
if (overflow) {
- vgic_cpu->vgic_v2.vgic_hcr |= GICH_HCR_UIE;
+ vgic_set_underflow(vcpu);
} else {
- vgic_cpu->vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
+ vgic_clear_underflow(vcpu);
/*
* We're about to run this VCPU, and we've consumed
* everything the distributor had in store for
@@ -1311,7 +1333,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
}
if (status & INT_STATUS_UNDERFLOW)
- vgic_cpu->vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
+ vgic_clear_underflow(vcpu);
return level_pending;
}
--
1.8.3.4
^ permalink raw reply related [flat|nested] 57+ messages in thread* [PATCH v3 10/19] KVM: ARM: vgic: move underflow handling to vgic_ops
2014-04-16 13:39 ` [PATCH v3 10/19] KVM: ARM: vgic: move underflow handling to vgic_ops Marc Zyngier
@ 2014-05-09 14:06 ` Christoffer Dall
0 siblings, 0 replies; 57+ messages in thread
From: Christoffer Dall @ 2014-05-09 14:06 UTC (permalink / raw)
To: linux-arm-kernel
On Wed, Apr 16, 2014 at 02:39:42PM +0100, Marc Zyngier wrote:
> Move the code dealing with LR underflow handling to its own functions,
> and make them accessible through vgic_ops.
>
> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
> include/kvm/arm_vgic.h | 2 ++
> virt/kvm/arm/vgic.c | 28 +++++++++++++++++++++++++---
> 2 files changed, 27 insertions(+), 3 deletions(-)
>
> diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
> index 98208e0..831b9f5 100644
> --- a/include/kvm/arm_vgic.h
> +++ b/include/kvm/arm_vgic.h
> @@ -87,6 +87,8 @@ struct vgic_ops {
> u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
> u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
> u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu);
> + void (*set_underflow)(struct kvm_vcpu *vcpu);
> + void (*clear_underflow)(struct kvm_vcpu *vcpu);
> };
>
> struct vgic_dist {
> diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
> index 9e100f6..13dad1f 100644
> --- a/virt/kvm/arm/vgic.c
> +++ b/virt/kvm/arm/vgic.c
> @@ -1047,12 +1047,24 @@ static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
> return ret;
> }
>
> +static void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
> +{
> + vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr |= GICH_HCR_UIE;
> +}
> +
> +static void vgic_v2_clear_underflow(struct kvm_vcpu *vcpu)
> +{
> + vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
> +}
> +
> static const struct vgic_ops vgic_ops = {
> .get_lr = vgic_v2_get_lr,
> .set_lr = vgic_v2_set_lr,
> .get_elrsr = vgic_v2_get_elrsr,
> .get_eisr = vgic_v2_get_eisr,
> .get_interrupt_status = vgic_v2_get_interrupt_status,
> + .set_underflow = vgic_v2_set_underflow,
> + .clear_underflow = vgic_v2_clear_underflow,
hmm, not crazy about the naming, we're not setting/clearing underflow
status but enabling/disabling underflow interrupts...
Perhaps vgic_v2_enable_underflow_int ? Too long?
> };
>
> static inline struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
> @@ -1081,6 +1093,16 @@ static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
> return vgic_ops.get_interrupt_status(vcpu);
> }
>
> +static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
> +{
> + vgic_ops.set_underflow(vcpu);
> +}
> +
> +static inline void vgic_clear_underflow(struct kvm_vcpu *vcpu)
> +{
> + vgic_ops.clear_underflow(vcpu);
> +}
> +
> static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
> {
> struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
> @@ -1262,9 +1284,9 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
>
> epilog:
> if (overflow) {
> - vgic_cpu->vgic_v2.vgic_hcr |= GICH_HCR_UIE;
> + vgic_set_underflow(vcpu);
> } else {
> - vgic_cpu->vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
> + vgic_clear_underflow(vcpu);
> /*
> * We're about to run this VCPU, and we've consumed
> * everything the distributor had in store for
> @@ -1311,7 +1333,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
> }
>
> if (status & INT_STATUS_UNDERFLOW)
> - vgic_cpu->vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
> + vgic_clear_underflow(vcpu);
>
> return level_pending;
> }
> --
> 1.8.3.4
>
Otherwise,
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
^ permalink raw reply [flat|nested] 57+ messages in thread
* [PATCH v3 11/19] KVM: ARM: vgic: abstract VMCR access
2014-04-16 13:39 [PATCH v3 00/19] arm64: GICv3 support Marc Zyngier
` (9 preceding siblings ...)
2014-04-16 13:39 ` [PATCH v3 10/19] KVM: ARM: vgic: move underflow handling to vgic_ops Marc Zyngier
@ 2014-04-16 13:39 ` Marc Zyngier
2014-05-09 14:06 ` Christoffer Dall
2014-04-16 13:39 ` [PATCH v3 12/19] KVM: ARM: vgic: introduce vgic_enable Marc Zyngier
` (7 subsequent siblings)
18 siblings, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-04-16 13:39 UTC (permalink / raw)
To: linux-arm-kernel
Instead of directly messing with with the GICH_VMCR bits for the CPU
interface save/restore code, add accessors that encode/decode the
entire set of registers exposed by VMCR.
Not the most efficient thing, but given that this code is only used
by the save/restore code, performance is far from being critical.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
include/kvm/arm_vgic.h | 9 +++++++
virt/kvm/arm/vgic.c | 69 ++++++++++++++++++++++++++++++++++++++------------
2 files changed, 62 insertions(+), 16 deletions(-)
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 831b9f5..0017253 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -81,6 +81,13 @@ struct vgic_lr {
u8 state;
};
+struct vgic_vmcr {
+ u32 ctlr;
+ u32 abpr;
+ u32 bpr;
+ u32 pmr;
+};
+
struct vgic_ops {
struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
@@ -89,6 +96,8 @@ struct vgic_ops {
u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu);
void (*set_underflow)(struct kvm_vcpu *vcpu);
void (*clear_underflow)(struct kvm_vcpu *vcpu);
+ void (*get_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+ void (*set_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
};
struct vgic_dist {
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 13dad1f..574ca47 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -100,8 +100,10 @@ static void vgic_kick_vcpus(struct kvm *kvm);
static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
-static u32 vgic_nr_lr;
+static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+static u32 vgic_nr_lr;
static unsigned int vgic_maint_irq;
static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
@@ -1057,6 +1059,28 @@ static void vgic_v2_clear_underflow(struct kvm_vcpu *vcpu)
vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
}
+static void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
+{
+ u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr;
+
+ vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> GICH_VMCR_CTRL_SHIFT;
+ vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> GICH_VMCR_ALIAS_BINPOINT_SHIFT;
+ vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> GICH_VMCR_BINPOINT_SHIFT;
+ vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> GICH_VMCR_PRIMASK_SHIFT;
+}
+
+static void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
+{
+ u32 vmcr;
+
+ vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
+ vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & GICH_VMCR_ALIAS_BINPOINT_MASK;
+ vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK;
+ vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
+
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
+}
+
static const struct vgic_ops vgic_ops = {
.get_lr = vgic_v2_get_lr,
.set_lr = vgic_v2_set_lr,
@@ -1065,6 +1089,8 @@ static const struct vgic_ops vgic_ops = {
.get_interrupt_status = vgic_v2_get_interrupt_status,
.set_underflow = vgic_v2_set_underflow,
.clear_underflow = vgic_v2_clear_underflow,
+ .get_vmcr = vgic_v2_get_vmcr,
+ .set_vmcr = vgic_v2_set_vmcr,
};
static inline struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
@@ -1103,6 +1129,16 @@ static inline void vgic_clear_underflow(struct kvm_vcpu *vcpu)
vgic_ops.clear_underflow(vcpu);
}
+static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
+{
+ vgic_ops.get_vmcr(vcpu, vmcr);
+}
+
+static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
+{
+ vgic_ops.set_vmcr(vcpu, vmcr);
+}
+
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
@@ -1847,39 +1883,40 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio, phys_addr_t offset)
{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- u32 reg, mask = 0, shift = 0;
bool updated = false;
+ struct vgic_vmcr vmcr;
+ u32 *vmcr_field;
+ u32 reg;
+
+ vgic_get_vmcr(vcpu, &vmcr);
switch (offset & ~0x3) {
case GIC_CPU_CTRL:
- mask = GICH_VMCR_CTRL_MASK;
- shift = GICH_VMCR_CTRL_SHIFT;
+ vmcr_field = &vmcr.ctlr;
break;
case GIC_CPU_PRIMASK:
- mask = GICH_VMCR_PRIMASK_MASK;
- shift = GICH_VMCR_PRIMASK_SHIFT;
+ vmcr_field = &vmcr.pmr;
break;
case GIC_CPU_BINPOINT:
- mask = GICH_VMCR_BINPOINT_MASK;
- shift = GICH_VMCR_BINPOINT_SHIFT;
+ vmcr_field = &vmcr.bpr;
break;
case GIC_CPU_ALIAS_BINPOINT:
- mask = GICH_VMCR_ALIAS_BINPOINT_MASK;
- shift = GICH_VMCR_ALIAS_BINPOINT_SHIFT;
+ vmcr_field = &vmcr.abpr;
break;
+ default:
+ BUG();
}
if (!mmio->is_write) {
- reg = (vgic_cpu->vgic_v2.vgic_vmcr & mask) >> shift;
+ reg = *vmcr_field;
mmio_data_write(mmio, ~0, reg);
} else {
reg = mmio_data_read(mmio, ~0);
- reg = (reg << shift) & mask;
- if (reg != (vgic_cpu->vgic_v2.vgic_vmcr & mask))
+ if (reg != *vmcr_field) {
+ *vmcr_field = reg;
+ vgic_set_vmcr(vcpu, &vmcr);
updated = true;
- vgic_cpu->vgic_v2.vgic_vmcr &= ~mask;
- vgic_cpu->vgic_v2.vgic_vmcr |= reg;
+ }
}
return updated;
}
--
1.8.3.4
^ permalink raw reply related [flat|nested] 57+ messages in thread* [PATCH v3 11/19] KVM: ARM: vgic: abstract VMCR access
2014-04-16 13:39 ` [PATCH v3 11/19] KVM: ARM: vgic: abstract VMCR access Marc Zyngier
@ 2014-05-09 14:06 ` Christoffer Dall
2014-05-13 17:43 ` Marc Zyngier
0 siblings, 1 reply; 57+ messages in thread
From: Christoffer Dall @ 2014-05-09 14:06 UTC (permalink / raw)
To: linux-arm-kernel
On Wed, Apr 16, 2014 at 02:39:43PM +0100, Marc Zyngier wrote:
> Instead of directly messing with with the GICH_VMCR bits for the CPU
> interface save/restore code, add accessors that encode/decode the
> entire set of registers exposed by VMCR.
>
> Not the most efficient thing, but given that this code is only used
> by the save/restore code, performance is far from being critical.
>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
> include/kvm/arm_vgic.h | 9 +++++++
> virt/kvm/arm/vgic.c | 69 ++++++++++++++++++++++++++++++++++++++------------
> 2 files changed, 62 insertions(+), 16 deletions(-)
>
> diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
> index 831b9f5..0017253 100644
> --- a/include/kvm/arm_vgic.h
> +++ b/include/kvm/arm_vgic.h
> @@ -81,6 +81,13 @@ struct vgic_lr {
> u8 state;
> };
>
> +struct vgic_vmcr {
> + u32 ctlr;
> + u32 abpr;
> + u32 bpr;
> + u32 pmr;
> +};
> +
> struct vgic_ops {
> struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
> void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
> @@ -89,6 +96,8 @@ struct vgic_ops {
> u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu);
> void (*set_underflow)(struct kvm_vcpu *vcpu);
> void (*clear_underflow)(struct kvm_vcpu *vcpu);
> + void (*get_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
> + void (*set_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
> };
>
> struct vgic_dist {
> diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
> index 13dad1f..574ca47 100644
> --- a/virt/kvm/arm/vgic.c
> +++ b/virt/kvm/arm/vgic.c
> @@ -100,8 +100,10 @@ static void vgic_kick_vcpus(struct kvm *kvm);
> static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
> static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
> static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
> -static u32 vgic_nr_lr;
> +static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
> +static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
>
> +static u32 vgic_nr_lr;
> static unsigned int vgic_maint_irq;
>
> static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
> @@ -1057,6 +1059,28 @@ static void vgic_v2_clear_underflow(struct kvm_vcpu *vcpu)
> vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
> }
>
> +static void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
> +{
> + u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr;
> +
> + vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> GICH_VMCR_CTRL_SHIFT;
> + vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> GICH_VMCR_ALIAS_BINPOINT_SHIFT;
> + vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> GICH_VMCR_BINPOINT_SHIFT;
> + vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> GICH_VMCR_PRIMASK_SHIFT;
> +}
> +
> +static void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
> +{
> + u32 vmcr;
> +
> + vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
> + vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & GICH_VMCR_ALIAS_BINPOINT_MASK;
> + vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK;
> + vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
> +
> + vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
did you forget to add this field to the vgic_v2 struct as part of this patch?
> +}
> +
> static const struct vgic_ops vgic_ops = {
> .get_lr = vgic_v2_get_lr,
> .set_lr = vgic_v2_set_lr,
> @@ -1065,6 +1089,8 @@ static const struct vgic_ops vgic_ops = {
> .get_interrupt_status = vgic_v2_get_interrupt_status,
> .set_underflow = vgic_v2_set_underflow,
> .clear_underflow = vgic_v2_clear_underflow,
> + .get_vmcr = vgic_v2_get_vmcr,
> + .set_vmcr = vgic_v2_set_vmcr,
> };
>
> static inline struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
> @@ -1103,6 +1129,16 @@ static inline void vgic_clear_underflow(struct kvm_vcpu *vcpu)
> vgic_ops.clear_underflow(vcpu);
> }
>
> +static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
> +{
> + vgic_ops.get_vmcr(vcpu, vmcr);
> +}
> +
> +static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
> +{
> + vgic_ops.set_vmcr(vcpu, vmcr);
> +}
> +
> static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
> {
> struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
> @@ -1847,39 +1883,40 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
> static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
> struct kvm_exit_mmio *mmio, phys_addr_t offset)
> {
> - struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
> - u32 reg, mask = 0, shift = 0;
> bool updated = false;
> + struct vgic_vmcr vmcr;
> + u32 *vmcr_field;
> + u32 reg;
> +
> + vgic_get_vmcr(vcpu, &vmcr);
>
> switch (offset & ~0x3) {
> case GIC_CPU_CTRL:
> - mask = GICH_VMCR_CTRL_MASK;
> - shift = GICH_VMCR_CTRL_SHIFT;
> + vmcr_field = &vmcr.ctlr;
> break;
> case GIC_CPU_PRIMASK:
> - mask = GICH_VMCR_PRIMASK_MASK;
> - shift = GICH_VMCR_PRIMASK_SHIFT;
> + vmcr_field = &vmcr.pmr;
> break;
> case GIC_CPU_BINPOINT:
> - mask = GICH_VMCR_BINPOINT_MASK;
> - shift = GICH_VMCR_BINPOINT_SHIFT;
> + vmcr_field = &vmcr.bpr;
> break;
> case GIC_CPU_ALIAS_BINPOINT:
> - mask = GICH_VMCR_ALIAS_BINPOINT_MASK;
> - shift = GICH_VMCR_ALIAS_BINPOINT_SHIFT;
> + vmcr_field = &vmcr.abpr;
> break;
> + default:
> + BUG();
> }
>
> if (!mmio->is_write) {
> - reg = (vgic_cpu->vgic_v2.vgic_vmcr & mask) >> shift;
> + reg = *vmcr_field;
> mmio_data_write(mmio, ~0, reg);
> } else {
> reg = mmio_data_read(mmio, ~0);
> - reg = (reg << shift) & mask;
> - if (reg != (vgic_cpu->vgic_v2.vgic_vmcr & mask))
> + if (reg != *vmcr_field) {
> + *vmcr_field = reg;
> + vgic_set_vmcr(vcpu, &vmcr);
> updated = true;
> - vgic_cpu->vgic_v2.vgic_vmcr &= ~mask;
> - vgic_cpu->vgic_v2.vgic_vmcr |= reg;
> + }
> }
> return updated;
> }
> --
> 1.8.3.4
>
I'd like to actually try and compile this patch before giving a
reviewed-by tag, but it looks good otherwise.
-Christoffer
^ permalink raw reply [flat|nested] 57+ messages in thread* [PATCH v3 11/19] KVM: ARM: vgic: abstract VMCR access
2014-05-09 14:06 ` Christoffer Dall
@ 2014-05-13 17:43 ` Marc Zyngier
2014-05-14 16:28 ` Christoffer Dall
0 siblings, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-05-13 17:43 UTC (permalink / raw)
To: linux-arm-kernel
On Fri, May 09 2014 at 3:06:33 pm BST, Christoffer Dall <christoffer.dall@linaro.org> wrote:
> On Wed, Apr 16, 2014 at 02:39:43PM +0100, Marc Zyngier wrote:
>> Instead of directly messing with with the GICH_VMCR bits for the CPU
>> interface save/restore code, add accessors that encode/decode the
>> entire set of registers exposed by VMCR.
>>
>> Not the most efficient thing, but given that this code is only used
>> by the save/restore code, performance is far from being critical.
>>
>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>> ---
>> include/kvm/arm_vgic.h | 9 +++++++
>> virt/kvm/arm/vgic.c | 69 ++++++++++++++++++++++++++++++++++++++------------
>> 2 files changed, 62 insertions(+), 16 deletions(-)
>>
>> diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
>> index 831b9f5..0017253 100644
>> --- a/include/kvm/arm_vgic.h
>> +++ b/include/kvm/arm_vgic.h
>> @@ -81,6 +81,13 @@ struct vgic_lr {
>> u8 state;
>> };
>>
>> +struct vgic_vmcr {
>> + u32 ctlr;
>> + u32 abpr;
>> + u32 bpr;
>> + u32 pmr;
>> +};
>> +
>> struct vgic_ops {
>> struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
>> void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
>> @@ -89,6 +96,8 @@ struct vgic_ops {
>> u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu);
>> void (*set_underflow)(struct kvm_vcpu *vcpu);
>> void (*clear_underflow)(struct kvm_vcpu *vcpu);
>> + void (*get_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
>> + void (*set_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
>> };
>>
>> struct vgic_dist {
>> diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
>> index 13dad1f..574ca47 100644
>> --- a/virt/kvm/arm/vgic.c
>> +++ b/virt/kvm/arm/vgic.c
>> @@ -100,8 +100,10 @@ static void vgic_kick_vcpus(struct kvm *kvm);
>> static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
>> static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
>> static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
>> -static u32 vgic_nr_lr;
>> +static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
>> +static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
>>
>> +static u32 vgic_nr_lr;
>> static unsigned int vgic_maint_irq;
>>
>> static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
>> @@ -1057,6 +1059,28 @@ static void vgic_v2_clear_underflow(struct kvm_vcpu *vcpu)
>> vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
>> }
>>
>> +static void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
>> +{
>> + u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr;
>> +
>> + vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> GICH_VMCR_CTRL_SHIFT;
>> + vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> GICH_VMCR_ALIAS_BINPOINT_SHIFT;
>> + vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> GICH_VMCR_BINPOINT_SHIFT;
>> + vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> GICH_VMCR_PRIMASK_SHIFT;
>> +}
>> +
>> +static void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
>> +{
>> + u32 vmcr;
>> +
>> + vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
>> + vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & GICH_VMCR_ALIAS_BINPOINT_MASK;
>> + vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK;
>> + vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
>> +
>> + vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
>
> did you forget to add this field to the vgic_v2 struct as part of this patch?
I don't think so, I believe it appears in patch #5.
M.
--
Jazz is not dead. It just smells funny.
^ permalink raw reply [flat|nested] 57+ messages in thread* [PATCH v3 11/19] KVM: ARM: vgic: abstract VMCR access
2014-05-13 17:43 ` Marc Zyngier
@ 2014-05-14 16:28 ` Christoffer Dall
0 siblings, 0 replies; 57+ messages in thread
From: Christoffer Dall @ 2014-05-14 16:28 UTC (permalink / raw)
To: linux-arm-kernel
On 13 May 2014 18:43, Marc Zyngier <marc.zyngier@arm.com> wrote:
> On Fri, May 09 2014 at 3:06:33 pm BST, Christoffer Dall <christoffer.dall@linaro.org> wrote:
>> On Wed, Apr 16, 2014 at 02:39:43PM +0100, Marc Zyngier wrote:
>>> Instead of directly messing with with the GICH_VMCR bits for the CPU
>>> interface save/restore code, add accessors that encode/decode the
>>> entire set of registers exposed by VMCR.
>>>
>>> Not the most efficient thing, but given that this code is only used
>>> by the save/restore code, performance is far from being critical.
>>>
>>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>>> ---
>>> include/kvm/arm_vgic.h | 9 +++++++
>>> virt/kvm/arm/vgic.c | 69 ++++++++++++++++++++++++++++++++++++++------------
>>> 2 files changed, 62 insertions(+), 16 deletions(-)
>>>
>>> diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
>>> index 831b9f5..0017253 100644
>>> --- a/include/kvm/arm_vgic.h
>>> +++ b/include/kvm/arm_vgic.h
>>> @@ -81,6 +81,13 @@ struct vgic_lr {
>>> u8 state;
>>> };
>>>
>>> +struct vgic_vmcr {
>>> + u32 ctlr;
>>> + u32 abpr;
>>> + u32 bpr;
>>> + u32 pmr;
>>> +};
>>> +
>>> struct vgic_ops {
>>> struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
>>> void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
>>> @@ -89,6 +96,8 @@ struct vgic_ops {
>>> u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu);
>>> void (*set_underflow)(struct kvm_vcpu *vcpu);
>>> void (*clear_underflow)(struct kvm_vcpu *vcpu);
>>> + void (*get_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
>>> + void (*set_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
>>> };
>>>
>>> struct vgic_dist {
>>> diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
>>> index 13dad1f..574ca47 100644
>>> --- a/virt/kvm/arm/vgic.c
>>> +++ b/virt/kvm/arm/vgic.c
>>> @@ -100,8 +100,10 @@ static void vgic_kick_vcpus(struct kvm *kvm);
>>> static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
>>> static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
>>> static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
>>> -static u32 vgic_nr_lr;
>>> +static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
>>> +static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
>>>
>>> +static u32 vgic_nr_lr;
>>> static unsigned int vgic_maint_irq;
>>>
>>> static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
>>> @@ -1057,6 +1059,28 @@ static void vgic_v2_clear_underflow(struct kvm_vcpu *vcpu)
>>> vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
>>> }
>>>
>>> +static void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
>>> +{
>>> + u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr;
>>> +
>>> + vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> GICH_VMCR_CTRL_SHIFT;
>>> + vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> GICH_VMCR_ALIAS_BINPOINT_SHIFT;
>>> + vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> GICH_VMCR_BINPOINT_SHIFT;
>>> + vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> GICH_VMCR_PRIMASK_SHIFT;
>>> +}
>>> +
>>> +static void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
>>> +{
>>> + u32 vmcr;
>>> +
>>> + vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
>>> + vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & GICH_VMCR_ALIAS_BINPOINT_MASK;
>>> + vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK;
>>> + vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
>>> +
>>> + vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
>>
>> did you forget to add this field to the vgic_v2 struct as part of this patch?
>
> I don't think so, I believe it appears in patch #5.
>
right, not sure what happened here.
^ permalink raw reply [flat|nested] 57+ messages in thread
* [PATCH v3 12/19] KVM: ARM: vgic: introduce vgic_enable
2014-04-16 13:39 [PATCH v3 00/19] arm64: GICv3 support Marc Zyngier
` (10 preceding siblings ...)
2014-04-16 13:39 ` [PATCH v3 11/19] KVM: ARM: vgic: abstract VMCR access Marc Zyngier
@ 2014-04-16 13:39 ` Marc Zyngier
2014-05-09 14:06 ` Christoffer Dall
2014-04-16 13:39 ` [PATCH v3 13/19] KVM: ARM: introduce vgic_params structure Marc Zyngier
` (6 subsequent siblings)
18 siblings, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-04-16 13:39 UTC (permalink / raw)
To: linux-arm-kernel
Move the code dealing with kicking the VGIC on to vgic_ops.
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
include/kvm/arm_vgic.h | 1 +
virt/kvm/arm/vgic.c | 29 +++++++++++++++++++++--------
2 files changed, 22 insertions(+), 8 deletions(-)
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 0017253..58a938f 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -98,6 +98,7 @@ struct vgic_ops {
void (*clear_underflow)(struct kvm_vcpu *vcpu);
void (*get_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
void (*set_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+ void (*enable)(struct kvm_vcpu *vcpu);
};
struct vgic_dist {
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 574ca47..a6d70fc 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1081,6 +1081,19 @@ static void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
}
+static void vgic_v2_enable(struct kvm_vcpu *vcpu)
+{
+ /*
+ * By forcing VMCR to zero, the GIC will restore the binary
+ * points to their reset values. Anything else resets to zero
+ * anyway.
+ */
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
+
+ /* Get the show on the road... */
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
+}
+
static const struct vgic_ops vgic_ops = {
.get_lr = vgic_v2_get_lr,
.set_lr = vgic_v2_set_lr,
@@ -1091,6 +1104,7 @@ static const struct vgic_ops vgic_ops = {
.clear_underflow = vgic_v2_clear_underflow,
.get_vmcr = vgic_v2_get_vmcr,
.set_vmcr = vgic_v2_set_vmcr,
+ .enable = vgic_v2_enable,
};
static inline struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
@@ -1139,6 +1153,11 @@ static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
vgic_ops.set_vmcr(vcpu, vmcr);
}
+static inline void vgic_enable(struct kvm_vcpu *vcpu)
+{
+ vgic_ops.enable(vcpu);
+}
+
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
@@ -1593,15 +1612,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
}
- /*
- * By forcing VMCR to zero, the GIC will restore the binary
- * points to their reset values. Anything else resets to zero
- * anyway.
- */
- vgic_cpu->vgic_v2.vgic_vmcr = 0;
-
vgic_cpu->nr_lr = vgic_nr_lr;
- vgic_cpu->vgic_v2.vgic_hcr = GICH_HCR_EN; /* Get the show on the road... */
+
+ vgic_enable(vcpu);
return 0;
}
--
1.8.3.4
^ permalink raw reply related [flat|nested] 57+ messages in thread* [PATCH v3 12/19] KVM: ARM: vgic: introduce vgic_enable
2014-04-16 13:39 ` [PATCH v3 12/19] KVM: ARM: vgic: introduce vgic_enable Marc Zyngier
@ 2014-05-09 14:06 ` Christoffer Dall
0 siblings, 0 replies; 57+ messages in thread
From: Christoffer Dall @ 2014-05-09 14:06 UTC (permalink / raw)
To: linux-arm-kernel
On Wed, Apr 16, 2014 at 02:39:44PM +0100, Marc Zyngier wrote:
> Move the code dealing with kicking the VGIC on to vgic_ops.
Kicking the VGIC? Enabling it?
>
> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
> include/kvm/arm_vgic.h | 1 +
> virt/kvm/arm/vgic.c | 29 +++++++++++++++++++++--------
> 2 files changed, 22 insertions(+), 8 deletions(-)
>
> diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
> index 0017253..58a938f 100644
> --- a/include/kvm/arm_vgic.h
> +++ b/include/kvm/arm_vgic.h
> @@ -98,6 +98,7 @@ struct vgic_ops {
> void (*clear_underflow)(struct kvm_vcpu *vcpu);
> void (*get_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
> void (*set_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
> + void (*enable)(struct kvm_vcpu *vcpu);
> };
>
> struct vgic_dist {
> diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
> index 574ca47..a6d70fc 100644
> --- a/virt/kvm/arm/vgic.c
> +++ b/virt/kvm/arm/vgic.c
> @@ -1081,6 +1081,19 @@ static void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
> vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
> }
>
> +static void vgic_v2_enable(struct kvm_vcpu *vcpu)
> +{
> + /*
> + * By forcing VMCR to zero, the GIC will restore the binary
> + * points to their reset values. Anything else resets to zero
> + * anyway.
> + */
> + vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
> +
> + /* Get the show on the road... */
> + vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
> +}
> +
> static const struct vgic_ops vgic_ops = {
> .get_lr = vgic_v2_get_lr,
> .set_lr = vgic_v2_set_lr,
> @@ -1091,6 +1104,7 @@ static const struct vgic_ops vgic_ops = {
> .clear_underflow = vgic_v2_clear_underflow,
> .get_vmcr = vgic_v2_get_vmcr,
> .set_vmcr = vgic_v2_set_vmcr,
> + .enable = vgic_v2_enable,
> };
>
> static inline struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
> @@ -1139,6 +1153,11 @@ static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
> vgic_ops.set_vmcr(vcpu, vmcr);
> }
>
> +static inline void vgic_enable(struct kvm_vcpu *vcpu)
> +{
> + vgic_ops.enable(vcpu);
> +}
> +
> static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
> {
> struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
> @@ -1593,15 +1612,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
> vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
> }
>
> - /*
> - * By forcing VMCR to zero, the GIC will restore the binary
> - * points to their reset values. Anything else resets to zero
> - * anyway.
> - */
> - vgic_cpu->vgic_v2.vgic_vmcr = 0;
> -
> vgic_cpu->nr_lr = vgic_nr_lr;
> - vgic_cpu->vgic_v2.vgic_hcr = GICH_HCR_EN; /* Get the show on the road... */
> +
> + vgic_enable(vcpu);
>
> return 0;
> }
> --
> 1.8.3.4
>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
^ permalink raw reply [flat|nested] 57+ messages in thread
* [PATCH v3 13/19] KVM: ARM: introduce vgic_params structure
2014-04-16 13:39 [PATCH v3 00/19] arm64: GICv3 support Marc Zyngier
` (11 preceding siblings ...)
2014-04-16 13:39 ` [PATCH v3 12/19] KVM: ARM: vgic: introduce vgic_enable Marc Zyngier
@ 2014-04-16 13:39 ` Marc Zyngier
2014-05-09 14:06 ` Christoffer Dall
2014-04-16 13:39 ` [PATCH v3 14/19] KVM: ARM: vgic: split GICv2 backend from the main vgic code Marc Zyngier
` (5 subsequent siblings)
18 siblings, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-04-16 13:39 UTC (permalink / raw)
To: linux-arm-kernel
Move all the data specific to a given GIC implementation into its own
little structure.
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
include/kvm/arm_vgic.h | 11 +++++++++
virt/kvm/arm/vgic.c | 66 +++++++++++++++++++++-----------------------------
2 files changed, 39 insertions(+), 38 deletions(-)
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 58a938f..23922b9 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -101,6 +101,17 @@ struct vgic_ops {
void (*enable)(struct kvm_vcpu *vcpu);
};
+struct vgic_params {
+ /* Physical address of vgic virtual cpu interface */
+ phys_addr_t vcpu_base;
+ /* Number of list registers */
+ u32 nr_lr;
+ /* Interrupt number */
+ unsigned int maint_irq;
+ /* Virtual control interface base address */
+ void __iomem *vctrl_base;
+};
+
struct vgic_dist {
#ifdef CONFIG_KVM_ARM_VGIC
spinlock_t lock;
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index a6d70fc..c22afce 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -76,14 +76,6 @@
#define IMPLEMENTER_ARM 0x43b
#define GICC_ARCH_VERSION_V2 0x2
-/* Physical address of vgic virtual cpu interface */
-static phys_addr_t vgic_vcpu_base;
-
-/* Virtual control interface base address */
-static void __iomem *vgic_vctrl_base;
-
-static struct device_node *vgic_node;
-
#define ACCESS_READ_VALUE (1 << 0)
#define ACCESS_READ_RAZ (0 << 0)
#define ACCESS_READ_MASK(x) ((x) & (1 << 0))
@@ -103,8 +95,7 @@ static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
-static u32 vgic_nr_lr;
-static unsigned int vgic_maint_irq;
+static struct vgic_params vgic;
static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
int cpuid, u32 offset)
@@ -1183,7 +1174,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
int lr;
- for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
+ for_each_set_bit(lr, vgic_cpu->lr_used, vgic.nr_lr) {
struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
@@ -1227,8 +1218,8 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
/* Try to use another LR for this interrupt */
lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
- vgic_cpu->nr_lr);
- if (lr >= vgic_cpu->nr_lr)
+ vgic.nr_lr);
+ if (lr >= vgic.nr_lr)
return false;
kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
@@ -1354,7 +1345,6 @@ epilog:
static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
u32 status = vgic_get_interrupt_status(vcpu);
bool level_pending = false;
@@ -1369,8 +1359,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
unsigned long *eisr_ptr = (unsigned long *)&eisr;
int lr;
- for_each_set_bit(lr, eisr_ptr, vgic_cpu->nr_lr) {
- vgic_cpu->nr_lr) {
+ for_each_set_bit(lr, eisr_ptr, vgic.nr_lr) {
struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
vgic_irq_clear_active(vcpu, vlr.irq);
@@ -1409,7 +1398,7 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
level_pending = vgic_process_maintenance(vcpu);
/* Clear mappings for empty LRs */
- for_each_set_bit(lr, elrsr_ptr, vgic_cpu->nr_lr) {
+ for_each_set_bit(lr, elrsr_ptr, vgic.nr_lr) {
struct vgic_lr vlr;
if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
@@ -1422,8 +1411,8 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
}
/* Check if we still have something up our sleeve... */
- pending = find_first_zero_bit(elrsr_ptr, vgic_cpu->nr_lr);
- if (level_pending || pending < vgic_cpu->nr_lr)
+ pending = find_first_zero_bit(elrsr_ptr, vgic.nr_lr);
+ if (level_pending || pending < vgic.nr_lr)
set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
}
@@ -1612,7 +1601,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
}
- vgic_cpu->nr_lr = vgic_nr_lr;
+ vgic_cpu->nr_lr = vgic.nr_lr;
vgic_enable(vcpu);
@@ -1621,7 +1610,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
static void vgic_init_maintenance_interrupt(void *info)
{
- enable_percpu_irq(vgic_maint_irq, 0);
+ enable_percpu_irq(vgic.maint_irq, 0);
}
static int vgic_cpu_notify(struct notifier_block *self,
@@ -1634,7 +1623,7 @@ static int vgic_cpu_notify(struct notifier_block *self,
break;
case CPU_DYING:
case CPU_DYING_FROZEN:
- disable_percpu_irq(vgic_maint_irq);
+ disable_percpu_irq(vgic.maint_irq);
break;
}
@@ -1650,6 +1639,7 @@ int kvm_vgic_hyp_init(void)
int ret;
struct resource vctrl_res;
struct resource vcpu_res;
+ struct device_node *vgic_node;
vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic");
if (!vgic_node) {
@@ -1657,17 +1647,17 @@ int kvm_vgic_hyp_init(void)
return -ENODEV;
}
- vgic_maint_irq = irq_of_parse_and_map(vgic_node, 0);
- if (!vgic_maint_irq) {
+ vgic.maint_irq = irq_of_parse_and_map(vgic_node, 0);
+ if (!vgic.maint_irq) {
kvm_err("error getting vgic maintenance irq from DT\n");
ret = -ENXIO;
goto out;
}
- ret = request_percpu_irq(vgic_maint_irq, vgic_maintenance_handler,
+ ret = request_percpu_irq(vgic.maint_irq, vgic_maintenance_handler,
"vgic", kvm_get_running_vcpus());
if (ret) {
- kvm_err("Cannot register interrupt %d\n", vgic_maint_irq);
+ kvm_err("Cannot register interrupt %d\n", vgic.maint_irq);
goto out;
}
@@ -1683,18 +1673,18 @@ int kvm_vgic_hyp_init(void)
goto out_free_irq;
}
- vgic_vctrl_base = of_iomap(vgic_node, 2);
- if (!vgic_vctrl_base) {
+ vgic.vctrl_base = of_iomap(vgic_node, 2);
+ if (!vgic.vctrl_base) {
kvm_err("Cannot ioremap VCTRL\n");
ret = -ENOMEM;
goto out_free_irq;
}
- vgic_nr_lr = readl_relaxed(vgic_vctrl_base + GICH_VTR);
- vgic_nr_lr = (vgic_nr_lr & 0x3f) + 1;
+ vgic.nr_lr = readl_relaxed(vgic.vctrl_base + GICH_VTR);
+ vgic.nr_lr = (vgic.nr_lr & 0x3f) + 1;
- ret = create_hyp_io_mappings(vgic_vctrl_base,
- vgic_vctrl_base + resource_size(&vctrl_res),
+ ret = create_hyp_io_mappings(vgic.vctrl_base,
+ vgic.vctrl_base + resource_size(&vctrl_res),
vctrl_res.start);
if (ret) {
kvm_err("Cannot map VCTRL into hyp\n");
@@ -1702,7 +1692,7 @@ int kvm_vgic_hyp_init(void)
}
kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
- vctrl_res.start, vgic_maint_irq);
+ vctrl_res.start, vgic.maint_irq);
on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
@@ -1710,14 +1700,14 @@ int kvm_vgic_hyp_init(void)
ret = -ENXIO;
goto out_unmap;
}
- vgic_vcpu_base = vcpu_res.start;
+ vgic.vcpu_base = vcpu_res.start;
goto out;
out_unmap:
- iounmap(vgic_vctrl_base);
+ iounmap(vgic.vctrl_base);
out_free_irq:
- free_percpu_irq(vgic_maint_irq, kvm_get_running_vcpus());
+ free_percpu_irq(vgic.maint_irq, kvm_get_running_vcpus());
out:
of_node_put(vgic_node);
return ret;
@@ -1752,7 +1742,7 @@ int kvm_vgic_init(struct kvm *kvm)
}
ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
- vgic_vcpu_base, KVM_VGIC_V2_CPU_SIZE);
+ vgic.vcpu_base, KVM_VGIC_V2_CPU_SIZE);
if (ret) {
kvm_err("Unable to remap VGIC CPU to VCPU\n");
goto out;
@@ -1798,7 +1788,7 @@ int kvm_vgic_create(struct kvm *kvm)
}
spin_lock_init(&kvm->arch.vgic.lock);
- kvm->arch.vgic.vctrl_base = vgic_vctrl_base;
+ kvm->arch.vgic.vctrl_base = vgic.vctrl_base;
kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
--
1.8.3.4
^ permalink raw reply related [flat|nested] 57+ messages in thread* [PATCH v3 13/19] KVM: ARM: introduce vgic_params structure
2014-04-16 13:39 ` [PATCH v3 13/19] KVM: ARM: introduce vgic_params structure Marc Zyngier
@ 2014-05-09 14:06 ` Christoffer Dall
2014-05-12 17:50 ` Marc Zyngier
0 siblings, 1 reply; 57+ messages in thread
From: Christoffer Dall @ 2014-05-09 14:06 UTC (permalink / raw)
To: linux-arm-kernel
On Wed, Apr 16, 2014 at 02:39:45PM +0100, Marc Zyngier wrote:
> Move all the data specific to a given GIC implementation into its own
> little structure.
>
> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
> include/kvm/arm_vgic.h | 11 +++++++++
> virt/kvm/arm/vgic.c | 66 +++++++++++++++++++++-----------------------------
> 2 files changed, 39 insertions(+), 38 deletions(-)
>
> diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
> index 58a938f..23922b9 100644
> --- a/include/kvm/arm_vgic.h
> +++ b/include/kvm/arm_vgic.h
> @@ -101,6 +101,17 @@ struct vgic_ops {
> void (*enable)(struct kvm_vcpu *vcpu);
> };
>
> +struct vgic_params {
> + /* Physical address of vgic virtual cpu interface */
> + phys_addr_t vcpu_base;
> + /* Number of list registers */
> + u32 nr_lr;
> + /* Interrupt number */
> + unsigned int maint_irq;
> + /* Virtual control interface base address */
> + void __iomem *vctrl_base;
> +};
> +
> struct vgic_dist {
> #ifdef CONFIG_KVM_ARM_VGIC
> spinlock_t lock;
> diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
> index a6d70fc..c22afce 100644
> --- a/virt/kvm/arm/vgic.c
> +++ b/virt/kvm/arm/vgic.c
> @@ -76,14 +76,6 @@
> #define IMPLEMENTER_ARM 0x43b
> #define GICC_ARCH_VERSION_V2 0x2
>
> -/* Physical address of vgic virtual cpu interface */
> -static phys_addr_t vgic_vcpu_base;
> -
> -/* Virtual control interface base address */
> -static void __iomem *vgic_vctrl_base;
> -
> -static struct device_node *vgic_node;
> -
> #define ACCESS_READ_VALUE (1 << 0)
> #define ACCESS_READ_RAZ (0 << 0)
> #define ACCESS_READ_MASK(x) ((x) & (1 << 0))
> @@ -103,8 +95,7 @@ static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
> static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
> static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
>
> -static u32 vgic_nr_lr;
> -static unsigned int vgic_maint_irq;
> +static struct vgic_params vgic;
>
> static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
> int cpuid, u32 offset)
> @@ -1183,7 +1174,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
> struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
> int lr;
>
> - for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
> + for_each_set_bit(lr, vgic_cpu->lr_used, vgic.nr_lr) {
Does the architecture mandate the same number of list registers per CPU
interface? Couldn't quickly find this in the spec.
> struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
>
> if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
> @@ -1227,8 +1218,8 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
>
> /* Try to use another LR for this interrupt */
> lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
> - vgic_cpu->nr_lr);
> - if (lr >= vgic_cpu->nr_lr)
> + vgic.nr_lr);
> + if (lr >= vgic.nr_lr)
> return false;
>
> kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
> @@ -1354,7 +1345,6 @@ epilog:
>
> static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
> {
> - struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
> u32 status = vgic_get_interrupt_status(vcpu);
> bool level_pending = false;
>
> @@ -1369,8 +1359,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
> unsigned long *eisr_ptr = (unsigned long *)&eisr;
> int lr;
>
> - for_each_set_bit(lr, eisr_ptr, vgic_cpu->nr_lr) {
> - vgic_cpu->nr_lr) {
> + for_each_set_bit(lr, eisr_ptr, vgic.nr_lr) {
ah, compile is happy again
> struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
>
> vgic_irq_clear_active(vcpu, vlr.irq);
> @@ -1409,7 +1398,7 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
> level_pending = vgic_process_maintenance(vcpu);
>
> /* Clear mappings for empty LRs */
> - for_each_set_bit(lr, elrsr_ptr, vgic_cpu->nr_lr) {
> + for_each_set_bit(lr, elrsr_ptr, vgic.nr_lr) {
> struct vgic_lr vlr;
>
> if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
> @@ -1422,8 +1411,8 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
> }
>
> /* Check if we still have something up our sleeve... */
> - pending = find_first_zero_bit(elrsr_ptr, vgic_cpu->nr_lr);
> - if (level_pending || pending < vgic_cpu->nr_lr)
> + pending = find_first_zero_bit(elrsr_ptr, vgic.nr_lr);
> + if (level_pending || pending < vgic.nr_lr)
> set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
> }
>
> @@ -1612,7 +1601,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
> vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
> }
>
> - vgic_cpu->nr_lr = vgic_nr_lr;
> + vgic_cpu->nr_lr = vgic.nr_lr;
why are we setting this is we're keeping this globally and stopped
referring to this all over the code? assembly code that only has a
vgic_cpu pointer? If so, comment so new code knows which value to use?
>
> vgic_enable(vcpu);
>
> @@ -1621,7 +1610,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
>
> static void vgic_init_maintenance_interrupt(void *info)
> {
> - enable_percpu_irq(vgic_maint_irq, 0);
> + enable_percpu_irq(vgic.maint_irq, 0);
> }
>
> static int vgic_cpu_notify(struct notifier_block *self,
> @@ -1634,7 +1623,7 @@ static int vgic_cpu_notify(struct notifier_block *self,
> break;
> case CPU_DYING:
> case CPU_DYING_FROZEN:
> - disable_percpu_irq(vgic_maint_irq);
> + disable_percpu_irq(vgic.maint_irq);
> break;
> }
>
> @@ -1650,6 +1639,7 @@ int kvm_vgic_hyp_init(void)
> int ret;
> struct resource vctrl_res;
> struct resource vcpu_res;
> + struct device_node *vgic_node;
>
> vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic");
> if (!vgic_node) {
> @@ -1657,17 +1647,17 @@ int kvm_vgic_hyp_init(void)
> return -ENODEV;
> }
>
> - vgic_maint_irq = irq_of_parse_and_map(vgic_node, 0);
> - if (!vgic_maint_irq) {
> + vgic.maint_irq = irq_of_parse_and_map(vgic_node, 0);
> + if (!vgic.maint_irq) {
> kvm_err("error getting vgic maintenance irq from DT\n");
> ret = -ENXIO;
> goto out;
> }
>
> - ret = request_percpu_irq(vgic_maint_irq, vgic_maintenance_handler,
> + ret = request_percpu_irq(vgic.maint_irq, vgic_maintenance_handler,
> "vgic", kvm_get_running_vcpus());
> if (ret) {
> - kvm_err("Cannot register interrupt %d\n", vgic_maint_irq);
> + kvm_err("Cannot register interrupt %d\n", vgic.maint_irq);
> goto out;
> }
>
> @@ -1683,18 +1673,18 @@ int kvm_vgic_hyp_init(void)
> goto out_free_irq;
> }
>
> - vgic_vctrl_base = of_iomap(vgic_node, 2);
> - if (!vgic_vctrl_base) {
> + vgic.vctrl_base = of_iomap(vgic_node, 2);
> + if (!vgic.vctrl_base) {
> kvm_err("Cannot ioremap VCTRL\n");
> ret = -ENOMEM;
> goto out_free_irq;
> }
>
> - vgic_nr_lr = readl_relaxed(vgic_vctrl_base + GICH_VTR);
> - vgic_nr_lr = (vgic_nr_lr & 0x3f) + 1;
> + vgic.nr_lr = readl_relaxed(vgic.vctrl_base + GICH_VTR);
> + vgic.nr_lr = (vgic.nr_lr & 0x3f) + 1;
>
> - ret = create_hyp_io_mappings(vgic_vctrl_base,
> - vgic_vctrl_base + resource_size(&vctrl_res),
> + ret = create_hyp_io_mappings(vgic.vctrl_base,
> + vgic.vctrl_base + resource_size(&vctrl_res),
> vctrl_res.start);
> if (ret) {
> kvm_err("Cannot map VCTRL into hyp\n");
> @@ -1702,7 +1692,7 @@ int kvm_vgic_hyp_init(void)
> }
>
> kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
> - vctrl_res.start, vgic_maint_irq);
> + vctrl_res.start, vgic.maint_irq);
> on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
>
> if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
> @@ -1710,14 +1700,14 @@ int kvm_vgic_hyp_init(void)
> ret = -ENXIO;
> goto out_unmap;
> }
> - vgic_vcpu_base = vcpu_res.start;
> + vgic.vcpu_base = vcpu_res.start;
>
> goto out;
>
> out_unmap:
> - iounmap(vgic_vctrl_base);
> + iounmap(vgic.vctrl_base);
> out_free_irq:
> - free_percpu_irq(vgic_maint_irq, kvm_get_running_vcpus());
> + free_percpu_irq(vgic.maint_irq, kvm_get_running_vcpus());
> out:
> of_node_put(vgic_node);
> return ret;
> @@ -1752,7 +1742,7 @@ int kvm_vgic_init(struct kvm *kvm)
> }
>
> ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
> - vgic_vcpu_base, KVM_VGIC_V2_CPU_SIZE);
> + vgic.vcpu_base, KVM_VGIC_V2_CPU_SIZE);
> if (ret) {
> kvm_err("Unable to remap VGIC CPU to VCPU\n");
> goto out;
> @@ -1798,7 +1788,7 @@ int kvm_vgic_create(struct kvm *kvm)
> }
>
> spin_lock_init(&kvm->arch.vgic.lock);
> - kvm->arch.vgic.vctrl_base = vgic_vctrl_base;
> + kvm->arch.vgic.vctrl_base = vgic.vctrl_base;
> kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
> kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
>
> --
> 1.8.3.4
>
Besides the small question,
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
^ permalink raw reply [flat|nested] 57+ messages in thread* [PATCH v3 13/19] KVM: ARM: introduce vgic_params structure
2014-05-09 14:06 ` Christoffer Dall
@ 2014-05-12 17:50 ` Marc Zyngier
0 siblings, 0 replies; 57+ messages in thread
From: Marc Zyngier @ 2014-05-12 17:50 UTC (permalink / raw)
To: linux-arm-kernel
On Fri, May 09 2014 at 3:06:52 pm BST, Christoffer Dall <christoffer.dall@linaro.org> wrote:
> On Wed, Apr 16, 2014 at 02:39:45PM +0100, Marc Zyngier wrote:
>> Move all the data specific to a given GIC implementation into its own
>> little structure.
>>
>> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>> ---
>> include/kvm/arm_vgic.h | 11 +++++++++
>> virt/kvm/arm/vgic.c | 66 +++++++++++++++++++++-----------------------------
>> 2 files changed, 39 insertions(+), 38 deletions(-)
>>
>> diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
>> index 58a938f..23922b9 100644
>> --- a/include/kvm/arm_vgic.h
>> +++ b/include/kvm/arm_vgic.h
>> @@ -101,6 +101,17 @@ struct vgic_ops {
>> void (*enable)(struct kvm_vcpu *vcpu);
>> };
>>
>> +struct vgic_params {
>> + /* Physical address of vgic virtual cpu interface */
>> + phys_addr_t vcpu_base;
>> + /* Number of list registers */
>> + u32 nr_lr;
>> + /* Interrupt number */
>> + unsigned int maint_irq;
>> + /* Virtual control interface base address */
>> + void __iomem *vctrl_base;
>> +};
>> +
>> struct vgic_dist {
>> #ifdef CONFIG_KVM_ARM_VGIC
>> spinlock_t lock;
>> diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
>> index a6d70fc..c22afce 100644
>> --- a/virt/kvm/arm/vgic.c
>> +++ b/virt/kvm/arm/vgic.c
>> @@ -76,14 +76,6 @@
>> #define IMPLEMENTER_ARM 0x43b
>> #define GICC_ARCH_VERSION_V2 0x2
>>
>> -/* Physical address of vgic virtual cpu interface */
>> -static phys_addr_t vgic_vcpu_base;
>> -
>> -/* Virtual control interface base address */
>> -static void __iomem *vgic_vctrl_base;
>> -
>> -static struct device_node *vgic_node;
>> -
>> #define ACCESS_READ_VALUE (1 << 0)
>> #define ACCESS_READ_RAZ (0 << 0)
>> #define ACCESS_READ_MASK(x) ((x) & (1 << 0))
>> @@ -103,8 +95,7 @@ static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
>> static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
>> static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
>>
>> -static u32 vgic_nr_lr;
>> -static unsigned int vgic_maint_irq;
>> +static struct vgic_params vgic;
>>
>> static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
>> int cpuid, u32 offset)
>> @@ -1183,7 +1174,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
>> struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
>> int lr;
>>
>> - for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
>> + for_each_set_bit(lr, vgic_cpu->lr_used, vgic.nr_lr) {
>
> Does the architecture mandate the same number of list registers per CPU
> interface? Couldn't quickly find this in the spec.
If that was the case, we wouldn't be able to reliably migrate vcpus
(unable to restore all the list registers, for example). We'd probably
have to check that at boot time and rely on the minimum value across the
whole system. I'll have a word with whoever bakes such a thing in an
actual chip...
>> struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
>>
>> if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
>> @@ -1227,8 +1218,8 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
>>
>> /* Try to use another LR for this interrupt */
>> lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
>> - vgic_cpu->nr_lr);
>> - if (lr >= vgic_cpu->nr_lr)
>> + vgic.nr_lr);
>> + if (lr >= vgic.nr_lr)
>> return false;
>>
>> kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
>> @@ -1354,7 +1345,6 @@ epilog:
>>
>> static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
>> {
>> - struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
>> u32 status = vgic_get_interrupt_status(vcpu);
>> bool level_pending = false;
>>
>> @@ -1369,8 +1359,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
>> unsigned long *eisr_ptr = (unsigned long *)&eisr;
>> int lr;
>>
>> - for_each_set_bit(lr, eisr_ptr, vgic_cpu->nr_lr) {
>> - vgic_cpu->nr_lr) {
>> + for_each_set_bit(lr, eisr_ptr, vgic.nr_lr) {
>
> ah, compile is happy again
Yup, some serious cleanup required here. I'll respin my checkout/compile script...
>> struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
>>
>> vgic_irq_clear_active(vcpu, vlr.irq);
>> @@ -1409,7 +1398,7 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
>> level_pending = vgic_process_maintenance(vcpu);
>>
>> /* Clear mappings for empty LRs */
>> - for_each_set_bit(lr, elrsr_ptr, vgic_cpu->nr_lr) {
>> + for_each_set_bit(lr, elrsr_ptr, vgic.nr_lr) {
>> struct vgic_lr vlr;
>>
>> if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
>> @@ -1422,8 +1411,8 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
>> }
>>
>> /* Check if we still have something up our sleeve... */
>> - pending = find_first_zero_bit(elrsr_ptr, vgic_cpu->nr_lr);
>> - if (level_pending || pending < vgic_cpu->nr_lr)
>> + pending = find_first_zero_bit(elrsr_ptr, vgic.nr_lr);
>> + if (level_pending || pending < vgic.nr_lr)
>> set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
>> }
>>
>> @@ -1612,7 +1601,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
>> vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
>> }
>>
>> - vgic_cpu->nr_lr = vgic_nr_lr;
>> + vgic_cpu->nr_lr = vgic.nr_lr;
>
> why are we setting this is we're keeping this globally and stopped
> referring to this all over the code? assembly code that only has a
> vgic_cpu pointer? If so, comment so new code knows which value to use?
Yes, I'd very much like the switch code to avoid going all the way up to
the distributor. I'll add some comments.
>>
>> vgic_enable(vcpu);
>>
>> @@ -1621,7 +1610,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
>>
>> static void vgic_init_maintenance_interrupt(void *info)
>> {
>> - enable_percpu_irq(vgic_maint_irq, 0);
>> + enable_percpu_irq(vgic.maint_irq, 0);
>> }
>>
>> static int vgic_cpu_notify(struct notifier_block *self,
>> @@ -1634,7 +1623,7 @@ static int vgic_cpu_notify(struct notifier_block *self,
>> break;
>> case CPU_DYING:
>> case CPU_DYING_FROZEN:
>> - disable_percpu_irq(vgic_maint_irq);
>> + disable_percpu_irq(vgic.maint_irq);
>> break;
>> }
>>
>> @@ -1650,6 +1639,7 @@ int kvm_vgic_hyp_init(void)
>> int ret;
>> struct resource vctrl_res;
>> struct resource vcpu_res;
>> + struct device_node *vgic_node;
>>
>> vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic");
>> if (!vgic_node) {
>> @@ -1657,17 +1647,17 @@ int kvm_vgic_hyp_init(void)
>> return -ENODEV;
>> }
>>
>> - vgic_maint_irq = irq_of_parse_and_map(vgic_node, 0);
>> - if (!vgic_maint_irq) {
>> + vgic.maint_irq = irq_of_parse_and_map(vgic_node, 0);
>> + if (!vgic.maint_irq) {
>> kvm_err("error getting vgic maintenance irq from DT\n");
>> ret = -ENXIO;
>> goto out;
>> }
>>
>> - ret = request_percpu_irq(vgic_maint_irq, vgic_maintenance_handler,
>> + ret = request_percpu_irq(vgic.maint_irq, vgic_maintenance_handler,
>> "vgic", kvm_get_running_vcpus());
>> if (ret) {
>> - kvm_err("Cannot register interrupt %d\n", vgic_maint_irq);
>> + kvm_err("Cannot register interrupt %d\n", vgic.maint_irq);
>> goto out;
>> }
>>
>> @@ -1683,18 +1673,18 @@ int kvm_vgic_hyp_init(void)
>> goto out_free_irq;
>> }
>>
>> - vgic_vctrl_base = of_iomap(vgic_node, 2);
>> - if (!vgic_vctrl_base) {
>> + vgic.vctrl_base = of_iomap(vgic_node, 2);
>> + if (!vgic.vctrl_base) {
>> kvm_err("Cannot ioremap VCTRL\n");
>> ret = -ENOMEM;
>> goto out_free_irq;
>> }
>>
>> - vgic_nr_lr = readl_relaxed(vgic_vctrl_base + GICH_VTR);
>> - vgic_nr_lr = (vgic_nr_lr & 0x3f) + 1;
>> + vgic.nr_lr = readl_relaxed(vgic.vctrl_base + GICH_VTR);
>> + vgic.nr_lr = (vgic.nr_lr & 0x3f) + 1;
>>
>> - ret = create_hyp_io_mappings(vgic_vctrl_base,
>> - vgic_vctrl_base + resource_size(&vctrl_res),
>> + ret = create_hyp_io_mappings(vgic.vctrl_base,
>> + vgic.vctrl_base + resource_size(&vctrl_res),
>> vctrl_res.start);
>> if (ret) {
>> kvm_err("Cannot map VCTRL into hyp\n");
>> @@ -1702,7 +1692,7 @@ int kvm_vgic_hyp_init(void)
>> }
>>
>> kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
>> - vctrl_res.start, vgic_maint_irq);
>> + vctrl_res.start, vgic.maint_irq);
>> on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
>>
>> if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
>> @@ -1710,14 +1700,14 @@ int kvm_vgic_hyp_init(void)
>> ret = -ENXIO;
>> goto out_unmap;
>> }
>> - vgic_vcpu_base = vcpu_res.start;
>> + vgic.vcpu_base = vcpu_res.start;
>>
>> goto out;
>>
>> out_unmap:
>> - iounmap(vgic_vctrl_base);
>> + iounmap(vgic.vctrl_base);
>> out_free_irq:
>> - free_percpu_irq(vgic_maint_irq, kvm_get_running_vcpus());
>> + free_percpu_irq(vgic.maint_irq, kvm_get_running_vcpus());
>> out:
>> of_node_put(vgic_node);
>> return ret;
>> @@ -1752,7 +1742,7 @@ int kvm_vgic_init(struct kvm *kvm)
>> }
>>
>> ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
>> - vgic_vcpu_base, KVM_VGIC_V2_CPU_SIZE);
>> + vgic.vcpu_base, KVM_VGIC_V2_CPU_SIZE);
>> if (ret) {
>> kvm_err("Unable to remap VGIC CPU to VCPU\n");
>> goto out;
>> @@ -1798,7 +1788,7 @@ int kvm_vgic_create(struct kvm *kvm)
>> }
>>
>> spin_lock_init(&kvm->arch.vgic.lock);
>> - kvm->arch.vgic.vctrl_base = vgic_vctrl_base;
>> + kvm->arch.vgic.vctrl_base = vgic.vctrl_base;
>> kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
>> kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
>>
>> --
>> 1.8.3.4
>>
>
> Besides the small question,
>
> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
>
--
Jazz is not dead. It just smells funny.
^ permalink raw reply [flat|nested] 57+ messages in thread
* [PATCH v3 14/19] KVM: ARM: vgic: split GICv2 backend from the main vgic code
2014-04-16 13:39 [PATCH v3 00/19] arm64: GICv3 support Marc Zyngier
` (12 preceding siblings ...)
2014-04-16 13:39 ` [PATCH v3 13/19] KVM: ARM: introduce vgic_params structure Marc Zyngier
@ 2014-04-16 13:39 ` Marc Zyngier
2014-05-09 14:07 ` Christoffer Dall
2014-04-16 13:39 ` [PATCH v3 15/19] arm64: KVM: remove __kvm_hyp_code_{start, end} from hyp.S Marc Zyngier
` (4 subsequent siblings)
18 siblings, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-04-16 13:39 UTC (permalink / raw)
To: linux-arm-kernel
Brutally hack the innocent vgic code, and move the GICv2 specific code
to its own file, using vgic_ops and vgic_params as a way to pass
information between the two blocks.
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
arch/arm/kvm/Makefile | 1 +
arch/arm64/kvm/Makefile | 2 +-
include/kvm/arm_vgic.h | 3 +
virt/kvm/arm/vgic-v2.c | 229 ++++++++++++++++++++++++++++++++++++++++++++++
virt/kvm/arm/vgic.c | 239 +++++++-----------------------------------------
5 files changed, 266 insertions(+), 208 deletions(-)
create mode 100644 virt/kvm/arm/vgic-v2.c
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index 789bca9..f7057ed 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -21,4 +21,5 @@ obj-y += kvm-arm.o init.o interrupts.o
obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
+obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 72a9fd5..7e92952 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -19,5 +19,5 @@ kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
-kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o $(KVM)/arm/vgic-v2.o
kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 23922b9..d8ec2eb 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -213,6 +213,9 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.vctrl_base))
#define vgic_initialized(k) ((k)->arch.vgic.ready)
+int vgic_v2_probe(const struct vgic_ops **ops,
+ const struct vgic_params **params);
+
#else
static inline int kvm_vgic_hyp_init(void)
{
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
new file mode 100644
index 0000000..52f438f
--- /dev/null
+++ b/virt/kvm/arm/vgic-v2.c
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) 2012,2013 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/cpu.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+
+static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr)
+{
+ struct vgic_lr lr_desc;
+ u32 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr];
+
+ lr_desc.irq = val & GICH_LR_VIRTUALID;
+ lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0xff;
+ lr_desc.state = 0;
+
+ if (val & GICH_LR_PENDING_BIT)
+ lr_desc.state |= LR_STATE_PENDING;
+ if (val & GICH_LR_ACTIVE_BIT)
+ lr_desc.state |= LR_STATE_ACTIVE;
+ if (val & GICH_LR_EOI)
+ lr_desc.state |= LR_EOI_INT;
+
+ return lr_desc;
+}
+
+#define MK_LR_PEND(src, irq) \
+ (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq))
+
+static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
+ struct vgic_lr lr_desc)
+{
+ u32 lr_val = MK_LR_PEND(lr_desc.source, lr_desc.irq);
+
+ if (lr_desc.state & LR_STATE_PENDING)
+ lr_val |= GICH_LR_PENDING_BIT;
+ if (lr_desc.state & LR_STATE_ACTIVE)
+ lr_val |= GICH_LR_ACTIVE_BIT;
+ if (lr_desc.state & LR_EOI_INT)
+ lr_val |= GICH_LR_EOI;
+
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val;
+
+ /*
+ * Despite being EOIed, the LR may not have been marked as
+ * empty.
+ */
+ if (!(lr_val & GICH_LR_STATE))
+ set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
+}
+
+static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
+{
+ const u32 *elrsr = vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
+ return *(u64 *)elrsr;
+}
+
+static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
+{
+ const u32 *eisr = vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
+ return *(u64 *)eisr;
+}
+
+static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
+{
+ u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr;
+ u32 ret = 0;
+
+ if (misr & GICH_MISR_EOI)
+ ret |= INT_STATUS_EOI;
+ if (misr & GICH_MISR_U)
+ ret |= INT_STATUS_UNDERFLOW;
+
+ return ret;
+}
+
+static void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr |= GICH_HCR_UIE;
+}
+
+static void vgic_v2_clear_underflow(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
+}
+
+static void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
+{
+ u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr;
+
+ vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> GICH_VMCR_CTRL_SHIFT;
+ vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> GICH_VMCR_ALIAS_BINPOINT_SHIFT;
+ vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> GICH_VMCR_BINPOINT_SHIFT;
+ vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> GICH_VMCR_PRIMASK_SHIFT;
+}
+
+static void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
+{
+ u32 vmcr;
+
+ vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
+ vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & GICH_VMCR_ALIAS_BINPOINT_MASK;
+ vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK;
+ vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
+
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
+}
+
+static void vgic_v2_enable(struct kvm_vcpu *vcpu)
+{
+ /*
+ * By forcing VMCR to zero, the GIC will restore the binary
+ * points to their reset values. Anything else resets to zero
+ * anyway.
+ */
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
+
+ /* Get the show on the road... */
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
+}
+
+static const struct vgic_ops vgic_v2_ops = {
+ .get_lr = vgic_v2_get_lr,
+ .set_lr = vgic_v2_set_lr,
+ .get_elrsr = vgic_v2_get_elrsr,
+ .get_eisr = vgic_v2_get_eisr,
+ .get_interrupt_status = vgic_v2_get_interrupt_status,
+ .set_underflow = vgic_v2_set_underflow,
+ .clear_underflow = vgic_v2_clear_underflow,
+ .get_vmcr = vgic_v2_get_vmcr,
+ .set_vmcr = vgic_v2_set_vmcr,
+ .enable = vgic_v2_enable,
+};
+
+static struct vgic_params vgic_v2_params;
+
+int vgic_v2_probe(const struct vgic_ops **ops,
+ const struct vgic_params **params)
+{
+ int ret;
+ struct resource vctrl_res;
+ struct resource vcpu_res;
+ struct device_node *vgic_node;
+ struct vgic_params *vgic = &vgic_v2_params;
+
+ vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic");
+ if (!vgic_node) {
+ kvm_err("error: no compatible vgic node in DT\n");
+ return -ENODEV;
+ }
+
+ vgic->maint_irq = irq_of_parse_and_map(vgic_node, 0);
+ if (!vgic->maint_irq) {
+ kvm_err("error getting vgic maintenance irq from DT\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ ret = of_address_to_resource(vgic_node, 2, &vctrl_res);
+ if (ret) {
+ kvm_err("Cannot obtain VCTRL resource\n");
+ goto out_free_irq;
+ }
+
+ vgic->vctrl_base = of_iomap(vgic_node, 2);
+ if (!vgic->vctrl_base) {
+ kvm_err("Cannot ioremap VCTRL\n");
+ ret = -ENOMEM;
+ goto out_free_irq;
+ }
+
+ vgic->nr_lr = readl_relaxed(vgic->vctrl_base + GICH_VTR);
+ vgic->nr_lr = (vgic->nr_lr & 0x3f) + 1;
+
+ ret = create_hyp_io_mappings(vgic->vctrl_base,
+ vgic->vctrl_base + resource_size(&vctrl_res),
+ vctrl_res.start);
+ if (ret) {
+ kvm_err("Cannot map VCTRL into hyp\n");
+ goto out_unmap;
+ }
+
+ if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
+ kvm_err("Cannot obtain VCPU resource\n");
+ ret = -ENXIO;
+ goto out_unmap;
+ }
+ vgic->vcpu_base = vcpu_res.start;
+
+ kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
+ vctrl_res.start, vgic->maint_irq);
+
+ *ops = &vgic_v2_ops;
+ *params = vgic;
+ goto out;
+
+out_unmap:
+ iounmap(vgic->vctrl_base);
+out_free_irq:
+ free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
+out:
+ of_node_put(vgic_node);
+ return ret;
+}
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index c22afce..613b492 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -95,7 +95,8 @@ static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
-static struct vgic_params vgic;
+static const struct vgic_ops *vgic_ops;
+static const struct vgic_params *vgic;
static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
int cpuid, u32 offset)
@@ -971,182 +972,55 @@ static void vgic_update_state(struct kvm *kvm)
}
}
-static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr)
-{
- struct vgic_lr lr_desc;
- u32 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr];
-
- lr_desc.irq = val & GICH_LR_VIRTUALID;
- lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0xff;
- lr_desc.state = 0;
-
- if (val & GICH_LR_PENDING_BIT)
- lr_desc.state |= LR_STATE_PENDING;
- if (val & GICH_LR_ACTIVE_BIT)
- lr_desc.state |= LR_STATE_ACTIVE;
- if (val & GICH_LR_EOI)
- lr_desc.state |= LR_EOI_INT;
-
- return lr_desc;
-}
-
-#define MK_LR_PEND(src, irq) \
- (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq))
-
-static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
- struct vgic_lr lr_desc)
-{
- u32 lr_val = MK_LR_PEND(lr_desc.source, lr_desc.irq);
-
- if (lr_desc.state & LR_STATE_PENDING)
- lr_val |= GICH_LR_PENDING_BIT;
- if (lr_desc.state & LR_STATE_ACTIVE)
- lr_val |= GICH_LR_ACTIVE_BIT;
- if (lr_desc.state & LR_EOI_INT)
- lr_val |= GICH_LR_EOI;
-
- vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val;
-
- /*
- * Despite being EOIed, the LR may not have been marked as
- * empty.
- */
- if (!(lr_val & GICH_LR_STATE))
- set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
-}
-
-static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
-{
- const u32 *elrsr = vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
- return *(u64 *)elrsr;
-}
-
-static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
-{
- const u32 *eisr = vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
- return *(u64 *)eisr;
-}
-
-static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
-{
- u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr;
- u32 ret = 0;
-
- if (misr & GICH_MISR_EOI)
- ret |= INT_STATUS_EOI;
- if (misr & GICH_MISR_U)
- ret |= INT_STATUS_UNDERFLOW;
-
- return ret;
-}
-
-static void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr |= GICH_HCR_UIE;
-}
-
-static void vgic_v2_clear_underflow(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
-}
-
-static void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
-{
- u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr;
-
- vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> GICH_VMCR_CTRL_SHIFT;
- vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> GICH_VMCR_ALIAS_BINPOINT_SHIFT;
- vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> GICH_VMCR_BINPOINT_SHIFT;
- vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> GICH_VMCR_PRIMASK_SHIFT;
-}
-
-static void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
-{
- u32 vmcr;
-
- vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
- vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & GICH_VMCR_ALIAS_BINPOINT_MASK;
- vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK;
- vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
-
- vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
-}
-
-static void vgic_v2_enable(struct kvm_vcpu *vcpu)
-{
- /*
- * By forcing VMCR to zero, the GIC will restore the binary
- * points to their reset values. Anything else resets to zero
- * anyway.
- */
- vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
-
- /* Get the show on the road... */
- vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
-}
-
-static const struct vgic_ops vgic_ops = {
- .get_lr = vgic_v2_get_lr,
- .set_lr = vgic_v2_set_lr,
- .get_elrsr = vgic_v2_get_elrsr,
- .get_eisr = vgic_v2_get_eisr,
- .get_interrupt_status = vgic_v2_get_interrupt_status,
- .set_underflow = vgic_v2_set_underflow,
- .clear_underflow = vgic_v2_clear_underflow,
- .get_vmcr = vgic_v2_get_vmcr,
- .set_vmcr = vgic_v2_set_vmcr,
- .enable = vgic_v2_enable,
-};
-
static inline struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
{
- return vgic_ops.get_lr(vcpu, lr);
+ return vgic_ops->get_lr(vcpu, lr);
}
static inline void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
struct vgic_lr vlr)
{
- vgic_ops.set_lr(vcpu, lr, vlr);
+ vgic_ops->set_lr(vcpu, lr, vlr);
}
static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
{
- return vgic_ops.get_elrsr(vcpu);
+ return vgic_ops->get_elrsr(vcpu);
}
static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
{
- return vgic_ops.get_eisr(vcpu);
+ return vgic_ops->get_eisr(vcpu);
}
static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
{
- return vgic_ops.get_interrupt_status(vcpu);
+ return vgic_ops->get_interrupt_status(vcpu);
}
static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
{
- vgic_ops.set_underflow(vcpu);
+ vgic_ops->set_underflow(vcpu);
}
static inline void vgic_clear_underflow(struct kvm_vcpu *vcpu)
{
- vgic_ops.clear_underflow(vcpu);
+ vgic_ops->clear_underflow(vcpu);
}
static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
{
- vgic_ops.get_vmcr(vcpu, vmcr);
+ vgic_ops->get_vmcr(vcpu, vmcr);
}
static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
{
- vgic_ops.set_vmcr(vcpu, vmcr);
+ vgic_ops->set_vmcr(vcpu, vmcr);
}
static inline void vgic_enable(struct kvm_vcpu *vcpu)
{
- vgic_ops.enable(vcpu);
+ vgic_ops->enable(vcpu);
}
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
@@ -1174,7 +1048,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
int lr;
- for_each_set_bit(lr, vgic_cpu->lr_used, vgic.nr_lr) {
+ for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) {
struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
@@ -1218,8 +1092,8 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
/* Try to use another LR for this interrupt */
lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
- vgic.nr_lr);
- if (lr >= vgic.nr_lr)
+ vgic->nr_lr);
+ if (lr >= vgic->nr_lr)
return false;
kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
@@ -1359,7 +1233,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
unsigned long *eisr_ptr = (unsigned long *)&eisr;
int lr;
- for_each_set_bit(lr, eisr_ptr, vgic.nr_lr) {
+ for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
vgic_irq_clear_active(vcpu, vlr.irq);
@@ -1398,7 +1272,7 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
level_pending = vgic_process_maintenance(vcpu);
/* Clear mappings for empty LRs */
- for_each_set_bit(lr, elrsr_ptr, vgic.nr_lr) {
+ for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) {
struct vgic_lr vlr;
if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
@@ -1411,8 +1285,8 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
}
/* Check if we still have something up our sleeve... */
- pending = find_first_zero_bit(elrsr_ptr, vgic.nr_lr);
- if (level_pending || pending < vgic.nr_lr)
+ pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
+ if (level_pending || pending < vgic->nr_lr)
set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
}
@@ -1601,7 +1475,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
}
- vgic_cpu->nr_lr = vgic.nr_lr;
+ vgic_cpu->nr_lr = vgic->nr_lr;
vgic_enable(vcpu);
@@ -1610,7 +1484,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
static void vgic_init_maintenance_interrupt(void *info)
{
- enable_percpu_irq(vgic.maint_irq, 0);
+ enable_percpu_irq(vgic->maint_irq, 0);
}
static int vgic_cpu_notify(struct notifier_block *self,
@@ -1623,7 +1497,7 @@ static int vgic_cpu_notify(struct notifier_block *self,
break;
case CPU_DYING:
case CPU_DYING_FROZEN:
- disable_percpu_irq(vgic.maint_irq);
+ disable_percpu_irq(vgic->maint_irq);
break;
}
@@ -1637,28 +1511,16 @@ static struct notifier_block vgic_cpu_nb = {
int kvm_vgic_hyp_init(void)
{
int ret;
- struct resource vctrl_res;
- struct resource vcpu_res;
- struct device_node *vgic_node;
- vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic");
- if (!vgic_node) {
- kvm_err("error: no compatible vgic node in DT\n");
- return -ENODEV;
- }
-
- vgic.maint_irq = irq_of_parse_and_map(vgic_node, 0);
- if (!vgic.maint_irq) {
- kvm_err("error getting vgic maintenance irq from DT\n");
- ret = -ENXIO;
- goto out;
- }
+ ret = vgic_v2_probe(&vgic_ops, &vgic);
+ if (ret)
+ return ret;
- ret = request_percpu_irq(vgic.maint_irq, vgic_maintenance_handler,
+ ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
"vgic", kvm_get_running_vcpus());
if (ret) {
- kvm_err("Cannot register interrupt %d\n", vgic.maint_irq);
- goto out;
+ kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
+ return ret;
}
ret = register_cpu_notifier(&vgic_cpu_nb);
@@ -1667,49 +1529,12 @@ int kvm_vgic_hyp_init(void)
goto out_free_irq;
}
- ret = of_address_to_resource(vgic_node, 2, &vctrl_res);
- if (ret) {
- kvm_err("Cannot obtain VCTRL resource\n");
- goto out_free_irq;
- }
-
- vgic.vctrl_base = of_iomap(vgic_node, 2);
- if (!vgic.vctrl_base) {
- kvm_err("Cannot ioremap VCTRL\n");
- ret = -ENOMEM;
- goto out_free_irq;
- }
-
- vgic.nr_lr = readl_relaxed(vgic.vctrl_base + GICH_VTR);
- vgic.nr_lr = (vgic.nr_lr & 0x3f) + 1;
-
- ret = create_hyp_io_mappings(vgic.vctrl_base,
- vgic.vctrl_base + resource_size(&vctrl_res),
- vctrl_res.start);
- if (ret) {
- kvm_err("Cannot map VCTRL into hyp\n");
- goto out_unmap;
- }
-
- kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
- vctrl_res.start, vgic.maint_irq);
on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
- if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
- kvm_err("Cannot obtain VCPU resource\n");
- ret = -ENXIO;
- goto out_unmap;
- }
- vgic.vcpu_base = vcpu_res.start;
-
- goto out;
+ return 0;
-out_unmap:
- iounmap(vgic.vctrl_base);
out_free_irq:
- free_percpu_irq(vgic.maint_irq, kvm_get_running_vcpus());
-out:
- of_node_put(vgic_node);
+ free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
return ret;
}
@@ -1742,7 +1567,7 @@ int kvm_vgic_init(struct kvm *kvm)
}
ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
- vgic.vcpu_base, KVM_VGIC_V2_CPU_SIZE);
+ vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE);
if (ret) {
kvm_err("Unable to remap VGIC CPU to VCPU\n");
goto out;
@@ -1788,7 +1613,7 @@ int kvm_vgic_create(struct kvm *kvm)
}
spin_lock_init(&kvm->arch.vgic.lock);
- kvm->arch.vgic.vctrl_base = vgic.vctrl_base;
+ kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
--
1.8.3.4
^ permalink raw reply related [flat|nested] 57+ messages in thread* [PATCH v3 14/19] KVM: ARM: vgic: split GICv2 backend from the main vgic code
2014-04-16 13:39 ` [PATCH v3 14/19] KVM: ARM: vgic: split GICv2 backend from the main vgic code Marc Zyngier
@ 2014-05-09 14:07 ` Christoffer Dall
2014-05-12 17:54 ` Marc Zyngier
0 siblings, 1 reply; 57+ messages in thread
From: Christoffer Dall @ 2014-05-09 14:07 UTC (permalink / raw)
To: linux-arm-kernel
On Wed, Apr 16, 2014 at 02:39:46PM +0100, Marc Zyngier wrote:
> Brutally hack the innocent vgic code, and move the GICv2 specific code
> to its own file, using vgic_ops and vgic_params as a way to pass
> information between the two blocks.
>
> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
> arch/arm/kvm/Makefile | 1 +
> arch/arm64/kvm/Makefile | 2 +-
> include/kvm/arm_vgic.h | 3 +
> virt/kvm/arm/vgic-v2.c | 229 ++++++++++++++++++++++++++++++++++++++++++++++
> virt/kvm/arm/vgic.c | 239 +++++++-----------------------------------------
> 5 files changed, 266 insertions(+), 208 deletions(-)
> create mode 100644 virt/kvm/arm/vgic-v2.c
>
> diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
> index 789bca9..f7057ed 100644
> --- a/arch/arm/kvm/Makefile
> +++ b/arch/arm/kvm/Makefile
> @@ -21,4 +21,5 @@ obj-y += kvm-arm.o init.o interrupts.o
> obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
> obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
> obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
> +obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
> obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
> diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
> index 72a9fd5..7e92952 100644
> --- a/arch/arm64/kvm/Makefile
> +++ b/arch/arm64/kvm/Makefile
> @@ -19,5 +19,5 @@ kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
> kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
> kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
>
> -kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
> +kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o $(KVM)/arm/vgic-v2.o
> kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
> diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
> index 23922b9..d8ec2eb 100644
> --- a/include/kvm/arm_vgic.h
> +++ b/include/kvm/arm_vgic.h
> @@ -213,6 +213,9 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
> #define irqchip_in_kernel(k) (!!((k)->arch.vgic.vctrl_base))
> #define vgic_initialized(k) ((k)->arch.vgic.ready)
>
> +int vgic_v2_probe(const struct vgic_ops **ops,
> + const struct vgic_params **params);
> +
> #else
> static inline int kvm_vgic_hyp_init(void)
> {
> diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
> new file mode 100644
> index 0000000..52f438f
> --- /dev/null
> +++ b/virt/kvm/arm/vgic-v2.c
> @@ -0,0 +1,229 @@
> +/*
> + * Copyright (C) 2012,2013 ARM Limited, All Rights Reserved.
> + * Author: Marc Zyngier <marc.zyngier@arm.com>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program. If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#include <linux/cpu.h>
> +#include <linux/kvm.h>
> +#include <linux/kvm_host.h>
> +#include <linux/interrupt.h>
> +#include <linux/io.h>
> +#include <linux/of.h>
> +#include <linux/of_address.h>
> +#include <linux/of_irq.h>
> +
> +#include <linux/irqchip/arm-gic.h>
> +
> +#include <asm/kvm_emulate.h>
> +#include <asm/kvm_arm.h>
> +#include <asm/kvm_mmu.h>
> +
> +static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr)
> +{
> + struct vgic_lr lr_desc;
> + u32 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr];
> +
> + lr_desc.irq = val & GICH_LR_VIRTUALID;
> + lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0xff;
> + lr_desc.state = 0;
> +
> + if (val & GICH_LR_PENDING_BIT)
> + lr_desc.state |= LR_STATE_PENDING;
> + if (val & GICH_LR_ACTIVE_BIT)
> + lr_desc.state |= LR_STATE_ACTIVE;
> + if (val & GICH_LR_EOI)
> + lr_desc.state |= LR_EOI_INT;
> +
> + return lr_desc;
> +}
> +
> +#define MK_LR_PEND(src, irq) \
> + (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq))
> +
> +static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
> + struct vgic_lr lr_desc)
> +{
> + u32 lr_val = MK_LR_PEND(lr_desc.source, lr_desc.irq);
> +
> + if (lr_desc.state & LR_STATE_PENDING)
> + lr_val |= GICH_LR_PENDING_BIT;
> + if (lr_desc.state & LR_STATE_ACTIVE)
> + lr_val |= GICH_LR_ACTIVE_BIT;
> + if (lr_desc.state & LR_EOI_INT)
> + lr_val |= GICH_LR_EOI;
> +
> + vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val;
> +
> + /*
> + * Despite being EOIed, the LR may not have been marked as
> + * empty.
> + */
> + if (!(lr_val & GICH_LR_STATE))
> + set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
> +}
> +
> +static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
> +{
> + const u32 *elrsr = vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
> + return *(u64 *)elrsr;
> +}
> +
> +static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
> +{
> + const u32 *eisr = vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
> + return *(u64 *)eisr;
> +}
> +
> +static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
> +{
> + u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr;
> + u32 ret = 0;
> +
> + if (misr & GICH_MISR_EOI)
> + ret |= INT_STATUS_EOI;
> + if (misr & GICH_MISR_U)
> + ret |= INT_STATUS_UNDERFLOW;
> +
> + return ret;
> +}
> +
> +static void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
> +{
> + vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr |= GICH_HCR_UIE;
> +}
> +
> +static void vgic_v2_clear_underflow(struct kvm_vcpu *vcpu)
> +{
> + vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
> +}
> +
> +static void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
> +{
> + u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr;
> +
> + vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> GICH_VMCR_CTRL_SHIFT;
> + vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> GICH_VMCR_ALIAS_BINPOINT_SHIFT;
> + vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> GICH_VMCR_BINPOINT_SHIFT;
> + vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> GICH_VMCR_PRIMASK_SHIFT;
> +}
> +
> +static void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
> +{
> + u32 vmcr;
> +
> + vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
> + vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & GICH_VMCR_ALIAS_BINPOINT_MASK;
> + vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK;
> + vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
> +
> + vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
> +}
> +
> +static void vgic_v2_enable(struct kvm_vcpu *vcpu)
> +{
> + /*
> + * By forcing VMCR to zero, the GIC will restore the binary
> + * points to their reset values. Anything else resets to zero
> + * anyway.
> + */
> + vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
> +
> + /* Get the show on the road... */
> + vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
> +}
> +
> +static const struct vgic_ops vgic_v2_ops = {
> + .get_lr = vgic_v2_get_lr,
> + .set_lr = vgic_v2_set_lr,
> + .get_elrsr = vgic_v2_get_elrsr,
> + .get_eisr = vgic_v2_get_eisr,
> + .get_interrupt_status = vgic_v2_get_interrupt_status,
> + .set_underflow = vgic_v2_set_underflow,
> + .clear_underflow = vgic_v2_clear_underflow,
> + .get_vmcr = vgic_v2_get_vmcr,
> + .set_vmcr = vgic_v2_set_vmcr,
> + .enable = vgic_v2_enable,
> +};
relying on all these being copied verbatim from the other file...
> +
> +static struct vgic_params vgic_v2_params;
> +
> +int vgic_v2_probe(const struct vgic_ops **ops,
> + const struct vgic_params **params)
> +{
Could you add some kdocs to this function, please?
> + int ret;
> + struct resource vctrl_res;
> + struct resource vcpu_res;
> + struct device_node *vgic_node;
> + struct vgic_params *vgic = &vgic_v2_params;
> +
> + vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic");
> + if (!vgic_node) {
> + kvm_err("error: no compatible vgic node in DT\n");
> + return -ENODEV;
> + }
> +
> + vgic->maint_irq = irq_of_parse_and_map(vgic_node, 0);
> + if (!vgic->maint_irq) {
> + kvm_err("error getting vgic maintenance irq from DT\n");
> + ret = -ENXIO;
> + goto out;
> + }
> +
> + ret = of_address_to_resource(vgic_node, 2, &vctrl_res);
> + if (ret) {
> + kvm_err("Cannot obtain VCTRL resource\n");
> + goto out_free_irq;
> + }
> +
> + vgic->vctrl_base = of_iomap(vgic_node, 2);
> + if (!vgic->vctrl_base) {
> + kvm_err("Cannot ioremap VCTRL\n");
> + ret = -ENOMEM;
> + goto out_free_irq;
> + }
> +
> + vgic->nr_lr = readl_relaxed(vgic->vctrl_base + GICH_VTR);
> + vgic->nr_lr = (vgic->nr_lr & 0x3f) + 1;
> +
> + ret = create_hyp_io_mappings(vgic->vctrl_base,
> + vgic->vctrl_base + resource_size(&vctrl_res),
> + vctrl_res.start);
> + if (ret) {
> + kvm_err("Cannot map VCTRL into hyp\n");
> + goto out_unmap;
> + }
> +
> + if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
> + kvm_err("Cannot obtain VCPU resource\n");
> + ret = -ENXIO;
> + goto out_unmap;
> + }
> + vgic->vcpu_base = vcpu_res.start;
> +
> + kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
> + vctrl_res.start, vgic->maint_irq);
> +
> + *ops = &vgic_v2_ops;
> + *params = vgic;
> + goto out;
> +
> +out_unmap:
> + iounmap(vgic->vctrl_base);
> +out_free_irq:
> + free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
shouldn't you only free this if you actually request it?
> +out:
> + of_node_put(vgic_node);
> + return ret;
> +}
> diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
> index c22afce..613b492 100644
> --- a/virt/kvm/arm/vgic.c
> +++ b/virt/kvm/arm/vgic.c
> @@ -95,7 +95,8 @@ static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
> static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
> static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
>
> -static struct vgic_params vgic;
> +static const struct vgic_ops *vgic_ops;
> +static const struct vgic_params *vgic;
>
> static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
> int cpuid, u32 offset)
> @@ -971,182 +972,55 @@ static void vgic_update_state(struct kvm *kvm)
> }
> }
>
> -static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr)
> -{
> - struct vgic_lr lr_desc;
> - u32 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr];
> -
> - lr_desc.irq = val & GICH_LR_VIRTUALID;
> - lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0xff;
> - lr_desc.state = 0;
> -
> - if (val & GICH_LR_PENDING_BIT)
> - lr_desc.state |= LR_STATE_PENDING;
> - if (val & GICH_LR_ACTIVE_BIT)
> - lr_desc.state |= LR_STATE_ACTIVE;
> - if (val & GICH_LR_EOI)
> - lr_desc.state |= LR_EOI_INT;
> -
> - return lr_desc;
> -}
> -
> -#define MK_LR_PEND(src, irq) \
> - (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq))
> -
> -static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
> - struct vgic_lr lr_desc)
> -{
> - u32 lr_val = MK_LR_PEND(lr_desc.source, lr_desc.irq);
> -
> - if (lr_desc.state & LR_STATE_PENDING)
> - lr_val |= GICH_LR_PENDING_BIT;
> - if (lr_desc.state & LR_STATE_ACTIVE)
> - lr_val |= GICH_LR_ACTIVE_BIT;
> - if (lr_desc.state & LR_EOI_INT)
> - lr_val |= GICH_LR_EOI;
> -
> - vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val;
> -
> - /*
> - * Despite being EOIed, the LR may not have been marked as
> - * empty.
> - */
> - if (!(lr_val & GICH_LR_STATE))
> - set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
> -}
> -
> -static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
> -{
> - const u32 *elrsr = vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
> - return *(u64 *)elrsr;
> -}
> -
> -static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
> -{
> - const u32 *eisr = vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
> - return *(u64 *)eisr;
> -}
> -
> -static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
> -{
> - u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr;
> - u32 ret = 0;
> -
> - if (misr & GICH_MISR_EOI)
> - ret |= INT_STATUS_EOI;
> - if (misr & GICH_MISR_U)
> - ret |= INT_STATUS_UNDERFLOW;
> -
> - return ret;
> -}
> -
> -static void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
> -{
> - vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr |= GICH_HCR_UIE;
> -}
> -
> -static void vgic_v2_clear_underflow(struct kvm_vcpu *vcpu)
> -{
> - vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
> -}
> -
> -static void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
> -{
> - u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr;
> -
> - vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> GICH_VMCR_CTRL_SHIFT;
> - vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> GICH_VMCR_ALIAS_BINPOINT_SHIFT;
> - vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> GICH_VMCR_BINPOINT_SHIFT;
> - vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> GICH_VMCR_PRIMASK_SHIFT;
> -}
> -
> -static void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
> -{
> - u32 vmcr;
> -
> - vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
> - vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & GICH_VMCR_ALIAS_BINPOINT_MASK;
> - vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK;
> - vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
> -
> - vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
> -}
> -
> -static void vgic_v2_enable(struct kvm_vcpu *vcpu)
> -{
> - /*
> - * By forcing VMCR to zero, the GIC will restore the binary
> - * points to their reset values. Anything else resets to zero
> - * anyway.
> - */
> - vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
> -
> - /* Get the show on the road... */
> - vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
> -}
> -
> -static const struct vgic_ops vgic_ops = {
> - .get_lr = vgic_v2_get_lr,
> - .set_lr = vgic_v2_set_lr,
> - .get_elrsr = vgic_v2_get_elrsr,
> - .get_eisr = vgic_v2_get_eisr,
> - .get_interrupt_status = vgic_v2_get_interrupt_status,
> - .set_underflow = vgic_v2_set_underflow,
> - .clear_underflow = vgic_v2_clear_underflow,
> - .get_vmcr = vgic_v2_get_vmcr,
> - .set_vmcr = vgic_v2_set_vmcr,
> - .enable = vgic_v2_enable,
> -};
> -
> static inline struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
> {
> - return vgic_ops.get_lr(vcpu, lr);
> + return vgic_ops->get_lr(vcpu, lr);
> }
>
> static inline void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
> struct vgic_lr vlr)
> {
> - vgic_ops.set_lr(vcpu, lr, vlr);
> + vgic_ops->set_lr(vcpu, lr, vlr);
> }
>
> static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
> {
> - return vgic_ops.get_elrsr(vcpu);
> + return vgic_ops->get_elrsr(vcpu);
> }
>
> static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
> {
> - return vgic_ops.get_eisr(vcpu);
> + return vgic_ops->get_eisr(vcpu);
> }
>
> static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
> {
> - return vgic_ops.get_interrupt_status(vcpu);
> + return vgic_ops->get_interrupt_status(vcpu);
> }
>
> static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
> {
> - vgic_ops.set_underflow(vcpu);
> + vgic_ops->set_underflow(vcpu);
> }
>
> static inline void vgic_clear_underflow(struct kvm_vcpu *vcpu)
> {
> - vgic_ops.clear_underflow(vcpu);
> + vgic_ops->clear_underflow(vcpu);
> }
>
> static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
> {
> - vgic_ops.get_vmcr(vcpu, vmcr);
> + vgic_ops->get_vmcr(vcpu, vmcr);
> }
>
> static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
> {
> - vgic_ops.set_vmcr(vcpu, vmcr);
> + vgic_ops->set_vmcr(vcpu, vmcr);
> }
>
> static inline void vgic_enable(struct kvm_vcpu *vcpu)
> {
> - vgic_ops.enable(vcpu);
> + vgic_ops->enable(vcpu);
> }
>
> static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
> @@ -1174,7 +1048,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
> struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
> int lr;
>
> - for_each_set_bit(lr, vgic_cpu->lr_used, vgic.nr_lr) {
> + for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) {
> struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
>
> if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
> @@ -1218,8 +1092,8 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
>
> /* Try to use another LR for this interrupt */
> lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
> - vgic.nr_lr);
> - if (lr >= vgic.nr_lr)
> + vgic->nr_lr);
> + if (lr >= vgic->nr_lr)
> return false;
>
> kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
> @@ -1359,7 +1233,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
> unsigned long *eisr_ptr = (unsigned long *)&eisr;
> int lr;
>
> - for_each_set_bit(lr, eisr_ptr, vgic.nr_lr) {
> + for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
> struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
>
> vgic_irq_clear_active(vcpu, vlr.irq);
> @@ -1398,7 +1272,7 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
> level_pending = vgic_process_maintenance(vcpu);
>
> /* Clear mappings for empty LRs */
> - for_each_set_bit(lr, elrsr_ptr, vgic.nr_lr) {
> + for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) {
> struct vgic_lr vlr;
>
> if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
> @@ -1411,8 +1285,8 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
> }
>
> /* Check if we still have something up our sleeve... */
> - pending = find_first_zero_bit(elrsr_ptr, vgic.nr_lr);
> - if (level_pending || pending < vgic.nr_lr)
> + pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
> + if (level_pending || pending < vgic->nr_lr)
> set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
> }
>
> @@ -1601,7 +1475,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
> vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
> }
>
> - vgic_cpu->nr_lr = vgic.nr_lr;
> + vgic_cpu->nr_lr = vgic->nr_lr;
>
> vgic_enable(vcpu);
>
> @@ -1610,7 +1484,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
>
> static void vgic_init_maintenance_interrupt(void *info)
> {
> - enable_percpu_irq(vgic.maint_irq, 0);
> + enable_percpu_irq(vgic->maint_irq, 0);
> }
>
> static int vgic_cpu_notify(struct notifier_block *self,
> @@ -1623,7 +1497,7 @@ static int vgic_cpu_notify(struct notifier_block *self,
> break;
> case CPU_DYING:
> case CPU_DYING_FROZEN:
> - disable_percpu_irq(vgic.maint_irq);
> + disable_percpu_irq(vgic->maint_irq);
> break;
> }
>
> @@ -1637,28 +1511,16 @@ static struct notifier_block vgic_cpu_nb = {
> int kvm_vgic_hyp_init(void)
> {
> int ret;
> - struct resource vctrl_res;
> - struct resource vcpu_res;
> - struct device_node *vgic_node;
>
> - vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic");
> - if (!vgic_node) {
> - kvm_err("error: no compatible vgic node in DT\n");
> - return -ENODEV;
> - }
> -
> - vgic.maint_irq = irq_of_parse_and_map(vgic_node, 0);
> - if (!vgic.maint_irq) {
> - kvm_err("error getting vgic maintenance irq from DT\n");
> - ret = -ENXIO;
> - goto out;
> - }
> + ret = vgic_v2_probe(&vgic_ops, &vgic);
> + if (ret)
> + return ret;
>
> - ret = request_percpu_irq(vgic.maint_irq, vgic_maintenance_handler,
> + ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
> "vgic", kvm_get_running_vcpus());
> if (ret) {
> - kvm_err("Cannot register interrupt %d\n", vgic.maint_irq);
> - goto out;
> + kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
> + return ret;
> }
>
> ret = register_cpu_notifier(&vgic_cpu_nb);
> @@ -1667,49 +1529,12 @@ int kvm_vgic_hyp_init(void)
> goto out_free_irq;
> }
>
> - ret = of_address_to_resource(vgic_node, 2, &vctrl_res);
> - if (ret) {
> - kvm_err("Cannot obtain VCTRL resource\n");
> - goto out_free_irq;
> - }
> -
> - vgic.vctrl_base = of_iomap(vgic_node, 2);
> - if (!vgic.vctrl_base) {
> - kvm_err("Cannot ioremap VCTRL\n");
> - ret = -ENOMEM;
> - goto out_free_irq;
> - }
> -
> - vgic.nr_lr = readl_relaxed(vgic.vctrl_base + GICH_VTR);
> - vgic.nr_lr = (vgic.nr_lr & 0x3f) + 1;
> -
> - ret = create_hyp_io_mappings(vgic.vctrl_base,
> - vgic.vctrl_base + resource_size(&vctrl_res),
> - vctrl_res.start);
> - if (ret) {
> - kvm_err("Cannot map VCTRL into hyp\n");
> - goto out_unmap;
> - }
> -
> - kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
> - vctrl_res.start, vgic.maint_irq);
> on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
>
> - if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
> - kvm_err("Cannot obtain VCPU resource\n");
> - ret = -ENXIO;
> - goto out_unmap;
> - }
> - vgic.vcpu_base = vcpu_res.start;
> -
> - goto out;
> + return 0;
>
> -out_unmap:
> - iounmap(vgic.vctrl_base);
> out_free_irq:
> - free_percpu_irq(vgic.maint_irq, kvm_get_running_vcpus());
> -out:
> - of_node_put(vgic_node);
> + free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
> return ret;
> }
>
> @@ -1742,7 +1567,7 @@ int kvm_vgic_init(struct kvm *kvm)
> }
>
> ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
> - vgic.vcpu_base, KVM_VGIC_V2_CPU_SIZE);
> + vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE);
Shouldn't this be factored out to the v2-specific file then?
> if (ret) {
> kvm_err("Unable to remap VGIC CPU to VCPU\n");
> goto out;
> @@ -1788,7 +1613,7 @@ int kvm_vgic_create(struct kvm *kvm)
> }
>
> spin_lock_init(&kvm->arch.vgic.lock);
> - kvm->arch.vgic.vctrl_base = vgic.vctrl_base;
> + kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
> kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
> kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
>
> --
> 1.8.3.4
>
Otherwise,
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
^ permalink raw reply [flat|nested] 57+ messages in thread* [PATCH v3 14/19] KVM: ARM: vgic: split GICv2 backend from the main vgic code
2014-05-09 14:07 ` Christoffer Dall
@ 2014-05-12 17:54 ` Marc Zyngier
0 siblings, 0 replies; 57+ messages in thread
From: Marc Zyngier @ 2014-05-12 17:54 UTC (permalink / raw)
To: linux-arm-kernel
On Fri, May 09 2014 at 3:07:01 pm BST, Christoffer Dall <christoffer.dall@linaro.org> wrote:
> On Wed, Apr 16, 2014 at 02:39:46PM +0100, Marc Zyngier wrote:
>> Brutally hack the innocent vgic code, and move the GICv2 specific code
>> to its own file, using vgic_ops and vgic_params as a way to pass
>> information between the two blocks.
>>
>> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>> ---
>> arch/arm/kvm/Makefile | 1 +
>> arch/arm64/kvm/Makefile | 2 +-
>> include/kvm/arm_vgic.h | 3 +
>> virt/kvm/arm/vgic-v2.c | 229 ++++++++++++++++++++++++++++++++++++++++++++++
>> virt/kvm/arm/vgic.c | 239 +++++++-----------------------------------------
>> 5 files changed, 266 insertions(+), 208 deletions(-)
>> create mode 100644 virt/kvm/arm/vgic-v2.c
>>
>> diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
>> index 789bca9..f7057ed 100644
>> --- a/arch/arm/kvm/Makefile
>> +++ b/arch/arm/kvm/Makefile
>> @@ -21,4 +21,5 @@ obj-y += kvm-arm.o init.o interrupts.o
>> obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
>> obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
>> obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
>> +obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
>> obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
>> diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
>> index 72a9fd5..7e92952 100644
>> --- a/arch/arm64/kvm/Makefile
>> +++ b/arch/arm64/kvm/Makefile
>> @@ -19,5 +19,5 @@ kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
>> kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
>> kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
>>
>> -kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
>> +kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o $(KVM)/arm/vgic-v2.o
>> kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
>> diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
>> index 23922b9..d8ec2eb 100644
>> --- a/include/kvm/arm_vgic.h
>> +++ b/include/kvm/arm_vgic.h
>> @@ -213,6 +213,9 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
>> #define irqchip_in_kernel(k) (!!((k)->arch.vgic.vctrl_base))
>> #define vgic_initialized(k) ((k)->arch.vgic.ready)
>>
>> +int vgic_v2_probe(const struct vgic_ops **ops,
>> + const struct vgic_params **params);
>> +
>> #else
>> static inline int kvm_vgic_hyp_init(void)
>> {
>> diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
>> new file mode 100644
>> index 0000000..52f438f
>> --- /dev/null
>> +++ b/virt/kvm/arm/vgic-v2.c
>> @@ -0,0 +1,229 @@
>> +/*
>> + * Copyright (C) 2012,2013 ARM Limited, All Rights Reserved.
>> + * Author: Marc Zyngier <marc.zyngier@arm.com>
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License version 2 as
>> + * published by the Free Software Foundation.
>> + *
>> + * This program is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
>> + * GNU General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU General Public License
>> + * along with this program. If not, see <http://www.gnu.org/licenses/>.
>> + */
>> +
>> +#include <linux/cpu.h>
>> +#include <linux/kvm.h>
>> +#include <linux/kvm_host.h>
>> +#include <linux/interrupt.h>
>> +#include <linux/io.h>
>> +#include <linux/of.h>
>> +#include <linux/of_address.h>
>> +#include <linux/of_irq.h>
>> +
>> +#include <linux/irqchip/arm-gic.h>
>> +
>> +#include <asm/kvm_emulate.h>
>> +#include <asm/kvm_arm.h>
>> +#include <asm/kvm_mmu.h>
>> +
>> +static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr)
>> +{
>> + struct vgic_lr lr_desc;
>> + u32 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr];
>> +
>> + lr_desc.irq = val & GICH_LR_VIRTUALID;
>> + lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0xff;
>> + lr_desc.state = 0;
>> +
>> + if (val & GICH_LR_PENDING_BIT)
>> + lr_desc.state |= LR_STATE_PENDING;
>> + if (val & GICH_LR_ACTIVE_BIT)
>> + lr_desc.state |= LR_STATE_ACTIVE;
>> + if (val & GICH_LR_EOI)
>> + lr_desc.state |= LR_EOI_INT;
>> +
>> + return lr_desc;
>> +}
>> +
>> +#define MK_LR_PEND(src, irq) \
>> + (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq))
>> +
>> +static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
>> + struct vgic_lr lr_desc)
>> +{
>> + u32 lr_val = MK_LR_PEND(lr_desc.source, lr_desc.irq);
>> +
>> + if (lr_desc.state & LR_STATE_PENDING)
>> + lr_val |= GICH_LR_PENDING_BIT;
>> + if (lr_desc.state & LR_STATE_ACTIVE)
>> + lr_val |= GICH_LR_ACTIVE_BIT;
>> + if (lr_desc.state & LR_EOI_INT)
>> + lr_val |= GICH_LR_EOI;
>> +
>> + vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val;
>> +
>> + /*
>> + * Despite being EOIed, the LR may not have been marked as
>> + * empty.
>> + */
>> + if (!(lr_val & GICH_LR_STATE))
>> + set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
>> +}
>> +
>> +static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
>> +{
>> + const u32 *elrsr = vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
>> + return *(u64 *)elrsr;
>> +}
>> +
>> +static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
>> +{
>> + const u32 *eisr = vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
>> + return *(u64 *)eisr;
>> +}
>> +
>> +static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
>> +{
>> + u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr;
>> + u32 ret = 0;
>> +
>> + if (misr & GICH_MISR_EOI)
>> + ret |= INT_STATUS_EOI;
>> + if (misr & GICH_MISR_U)
>> + ret |= INT_STATUS_UNDERFLOW;
>> +
>> + return ret;
>> +}
>> +
>> +static void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
>> +{
>> + vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr |= GICH_HCR_UIE;
>> +}
>> +
>> +static void vgic_v2_clear_underflow(struct kvm_vcpu *vcpu)
>> +{
>> + vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
>> +}
>> +
>> +static void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
>> +{
>> + u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr;
>> +
>> + vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> GICH_VMCR_CTRL_SHIFT;
>> + vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> GICH_VMCR_ALIAS_BINPOINT_SHIFT;
>> + vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> GICH_VMCR_BINPOINT_SHIFT;
>> + vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> GICH_VMCR_PRIMASK_SHIFT;
>> +}
>> +
>> +static void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
>> +{
>> + u32 vmcr;
>> +
>> + vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
>> + vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & GICH_VMCR_ALIAS_BINPOINT_MASK;
>> + vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK;
>> + vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
>> +
>> + vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
>> +}
>> +
>> +static void vgic_v2_enable(struct kvm_vcpu *vcpu)
>> +{
>> + /*
>> + * By forcing VMCR to zero, the GIC will restore the binary
>> + * points to their reset values. Anything else resets to zero
>> + * anyway.
>> + */
>> + vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
>> +
>> + /* Get the show on the road... */
>> + vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
>> +}
>> +
>> +static const struct vgic_ops vgic_v2_ops = {
>> + .get_lr = vgic_v2_get_lr,
>> + .set_lr = vgic_v2_set_lr,
>> + .get_elrsr = vgic_v2_get_elrsr,
>> + .get_eisr = vgic_v2_get_eisr,
>> + .get_interrupt_status = vgic_v2_get_interrupt_status,
>> + .set_underflow = vgic_v2_set_underflow,
>> + .clear_underflow = vgic_v2_clear_underflow,
>> + .get_vmcr = vgic_v2_get_vmcr,
>> + .set_vmcr = vgic_v2_set_vmcr,
>> + .enable = vgic_v2_enable,
>> +};
>
> relying on all these being copied verbatim from the other file...
>
>> +
>> +static struct vgic_params vgic_v2_params;
>> +
>> +int vgic_v2_probe(const struct vgic_ops **ops,
>> + const struct vgic_params **params)
>> +{
>
> Could you add some kdocs to this function, please?
Sure can.
>
>> + int ret;
>> + struct resource vctrl_res;
>> + struct resource vcpu_res;
>> + struct device_node *vgic_node;
>> + struct vgic_params *vgic = &vgic_v2_params;
>> +
>> + vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic");
>> + if (!vgic_node) {
>> + kvm_err("error: no compatible vgic node in DT\n");
>> + return -ENODEV;
>> + }
>> +
>> + vgic->maint_irq = irq_of_parse_and_map(vgic_node, 0);
>> + if (!vgic->maint_irq) {
>> + kvm_err("error getting vgic maintenance irq from DT\n");
>> + ret = -ENXIO;
>> + goto out;
>> + }
>> +
>> + ret = of_address_to_resource(vgic_node, 2, &vctrl_res);
>> + if (ret) {
>> + kvm_err("Cannot obtain VCTRL resource\n");
>> + goto out_free_irq;
>> + }
>> +
>> + vgic->vctrl_base = of_iomap(vgic_node, 2);
>> + if (!vgic->vctrl_base) {
>> + kvm_err("Cannot ioremap VCTRL\n");
>> + ret = -ENOMEM;
>> + goto out_free_irq;
>> + }
>> +
>> + vgic->nr_lr = readl_relaxed(vgic->vctrl_base + GICH_VTR);
>> + vgic->nr_lr = (vgic->nr_lr & 0x3f) + 1;
>> +
>> + ret = create_hyp_io_mappings(vgic->vctrl_base,
>> + vgic->vctrl_base + resource_size(&vctrl_res),
>> + vctrl_res.start);
>> + if (ret) {
>> + kvm_err("Cannot map VCTRL into hyp\n");
>> + goto out_unmap;
>> + }
>> +
>> + if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
>> + kvm_err("Cannot obtain VCPU resource\n");
>> + ret = -ENXIO;
>> + goto out_unmap;
>> + }
>> + vgic->vcpu_base = vcpu_res.start;
>> +
>> + kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
>> + vctrl_res.start, vgic->maint_irq);
>> +
>> + *ops = &vgic_v2_ops;
>> + *params = vgic;
>> + goto out;
>> +
>> +out_unmap:
>> + iounmap(vgic->vctrl_base);
>> +out_free_irq:
>> + free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
>
> shouldn't you only free this if you actually request it?
Yes, and that code is now in a different location. I'll nuke that bit too.
>> +out:
>> + of_node_put(vgic_node);
>> + return ret;
>> +}
>> diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
>> index c22afce..613b492 100644
>> --- a/virt/kvm/arm/vgic.c
>> +++ b/virt/kvm/arm/vgic.c
>> @@ -95,7 +95,8 @@ static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
>> static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
>> static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
>>
>> -static struct vgic_params vgic;
>> +static const struct vgic_ops *vgic_ops;
>> +static const struct vgic_params *vgic;
>>
>> static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
>> int cpuid, u32 offset)
>> @@ -971,182 +972,55 @@ static void vgic_update_state(struct kvm *kvm)
>> }
>> }
>>
>> -static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr)
>> -{
>> - struct vgic_lr lr_desc;
>> - u32 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr];
>> -
>> - lr_desc.irq = val & GICH_LR_VIRTUALID;
>> - lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0xff;
>> - lr_desc.state = 0;
>> -
>> - if (val & GICH_LR_PENDING_BIT)
>> - lr_desc.state |= LR_STATE_PENDING;
>> - if (val & GICH_LR_ACTIVE_BIT)
>> - lr_desc.state |= LR_STATE_ACTIVE;
>> - if (val & GICH_LR_EOI)
>> - lr_desc.state |= LR_EOI_INT;
>> -
>> - return lr_desc;
>> -}
>> -
>> -#define MK_LR_PEND(src, irq) \
>> - (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq))
>> -
>> -static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
>> - struct vgic_lr lr_desc)
>> -{
>> - u32 lr_val = MK_LR_PEND(lr_desc.source, lr_desc.irq);
>> -
>> - if (lr_desc.state & LR_STATE_PENDING)
>> - lr_val |= GICH_LR_PENDING_BIT;
>> - if (lr_desc.state & LR_STATE_ACTIVE)
>> - lr_val |= GICH_LR_ACTIVE_BIT;
>> - if (lr_desc.state & LR_EOI_INT)
>> - lr_val |= GICH_LR_EOI;
>> -
>> - vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val;
>> -
>> - /*
>> - * Despite being EOIed, the LR may not have been marked as
>> - * empty.
>> - */
>> - if (!(lr_val & GICH_LR_STATE))
>> - set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
>> -}
>> -
>> -static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
>> -{
>> - const u32 *elrsr = vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
>> - return *(u64 *)elrsr;
>> -}
>> -
>> -static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
>> -{
>> - const u32 *eisr = vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
>> - return *(u64 *)eisr;
>> -}
>> -
>> -static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
>> -{
>> - u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr;
>> - u32 ret = 0;
>> -
>> - if (misr & GICH_MISR_EOI)
>> - ret |= INT_STATUS_EOI;
>> - if (misr & GICH_MISR_U)
>> - ret |= INT_STATUS_UNDERFLOW;
>> -
>> - return ret;
>> -}
>> -
>> -static void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
>> -{
>> - vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr |= GICH_HCR_UIE;
>> -}
>> -
>> -static void vgic_v2_clear_underflow(struct kvm_vcpu *vcpu)
>> -{
>> - vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
>> -}
>> -
>> -static void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
>> -{
>> - u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr;
>> -
>> - vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> GICH_VMCR_CTRL_SHIFT;
>> - vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> GICH_VMCR_ALIAS_BINPOINT_SHIFT;
>> - vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> GICH_VMCR_BINPOINT_SHIFT;
>> - vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> GICH_VMCR_PRIMASK_SHIFT;
>> -}
>> -
>> -static void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
>> -{
>> - u32 vmcr;
>> -
>> - vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
>> - vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & GICH_VMCR_ALIAS_BINPOINT_MASK;
>> - vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK;
>> - vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
>> -
>> - vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
>> -}
>> -
>> -static void vgic_v2_enable(struct kvm_vcpu *vcpu)
>> -{
>> - /*
>> - * By forcing VMCR to zero, the GIC will restore the binary
>> - * points to their reset values. Anything else resets to zero
>> - * anyway.
>> - */
>> - vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
>> -
>> - /* Get the show on the road... */
>> - vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
>> -}
>> -
>> -static const struct vgic_ops vgic_ops = {
>> - .get_lr = vgic_v2_get_lr,
>> - .set_lr = vgic_v2_set_lr,
>> - .get_elrsr = vgic_v2_get_elrsr,
>> - .get_eisr = vgic_v2_get_eisr,
>> - .get_interrupt_status = vgic_v2_get_interrupt_status,
>> - .set_underflow = vgic_v2_set_underflow,
>> - .clear_underflow = vgic_v2_clear_underflow,
>> - .get_vmcr = vgic_v2_get_vmcr,
>> - .set_vmcr = vgic_v2_set_vmcr,
>> - .enable = vgic_v2_enable,
>> -};
>> -
>> static inline struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
>> {
>> - return vgic_ops.get_lr(vcpu, lr);
>> + return vgic_ops->get_lr(vcpu, lr);
>> }
>>
>> static inline void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
>> struct vgic_lr vlr)
>> {
>> - vgic_ops.set_lr(vcpu, lr, vlr);
>> + vgic_ops->set_lr(vcpu, lr, vlr);
>> }
>>
>> static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
>> {
>> - return vgic_ops.get_elrsr(vcpu);
>> + return vgic_ops->get_elrsr(vcpu);
>> }
>>
>> static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
>> {
>> - return vgic_ops.get_eisr(vcpu);
>> + return vgic_ops->get_eisr(vcpu);
>> }
>>
>> static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
>> {
>> - return vgic_ops.get_interrupt_status(vcpu);
>> + return vgic_ops->get_interrupt_status(vcpu);
>> }
>>
>> static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
>> {
>> - vgic_ops.set_underflow(vcpu);
>> + vgic_ops->set_underflow(vcpu);
>> }
>>
>> static inline void vgic_clear_underflow(struct kvm_vcpu *vcpu)
>> {
>> - vgic_ops.clear_underflow(vcpu);
>> + vgic_ops->clear_underflow(vcpu);
>> }
>>
>> static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
>> {
>> - vgic_ops.get_vmcr(vcpu, vmcr);
>> + vgic_ops->get_vmcr(vcpu, vmcr);
>> }
>>
>> static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
>> {
>> - vgic_ops.set_vmcr(vcpu, vmcr);
>> + vgic_ops->set_vmcr(vcpu, vmcr);
>> }
>>
>> static inline void vgic_enable(struct kvm_vcpu *vcpu)
>> {
>> - vgic_ops.enable(vcpu);
>> + vgic_ops->enable(vcpu);
>> }
>>
>> static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
>> @@ -1174,7 +1048,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
>> struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
>> int lr;
>>
>> - for_each_set_bit(lr, vgic_cpu->lr_used, vgic.nr_lr) {
>> + for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) {
>> struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
>>
>> if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
>> @@ -1218,8 +1092,8 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
>>
>> /* Try to use another LR for this interrupt */
>> lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
>> - vgic.nr_lr);
>> - if (lr >= vgic.nr_lr)
>> + vgic->nr_lr);
>> + if (lr >= vgic->nr_lr)
>> return false;
>>
>> kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
>> @@ -1359,7 +1233,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
>> unsigned long *eisr_ptr = (unsigned long *)&eisr;
>> int lr;
>>
>> - for_each_set_bit(lr, eisr_ptr, vgic.nr_lr) {
>> + for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
>> struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
>>
>> vgic_irq_clear_active(vcpu, vlr.irq);
>> @@ -1398,7 +1272,7 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
>> level_pending = vgic_process_maintenance(vcpu);
>>
>> /* Clear mappings for empty LRs */
>> - for_each_set_bit(lr, elrsr_ptr, vgic.nr_lr) {
>> + for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) {
>> struct vgic_lr vlr;
>>
>> if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
>> @@ -1411,8 +1285,8 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
>> }
>>
>> /* Check if we still have something up our sleeve... */
>> - pending = find_first_zero_bit(elrsr_ptr, vgic.nr_lr);
>> - if (level_pending || pending < vgic.nr_lr)
>> + pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
>> + if (level_pending || pending < vgic->nr_lr)
>> set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
>> }
>>
>> @@ -1601,7 +1475,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
>> vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
>> }
>>
>> - vgic_cpu->nr_lr = vgic.nr_lr;
>> + vgic_cpu->nr_lr = vgic->nr_lr;
>>
>> vgic_enable(vcpu);
>>
>> @@ -1610,7 +1484,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
>>
>> static void vgic_init_maintenance_interrupt(void *info)
>> {
>> - enable_percpu_irq(vgic.maint_irq, 0);
>> + enable_percpu_irq(vgic->maint_irq, 0);
>> }
>>
>> static int vgic_cpu_notify(struct notifier_block *self,
>> @@ -1623,7 +1497,7 @@ static int vgic_cpu_notify(struct notifier_block *self,
>> break;
>> case CPU_DYING:
>> case CPU_DYING_FROZEN:
>> - disable_percpu_irq(vgic.maint_irq);
>> + disable_percpu_irq(vgic->maint_irq);
>> break;
>> }
>>
>> @@ -1637,28 +1511,16 @@ static struct notifier_block vgic_cpu_nb = {
>> int kvm_vgic_hyp_init(void)
>> {
>> int ret;
>> - struct resource vctrl_res;
>> - struct resource vcpu_res;
>> - struct device_node *vgic_node;
>>
>> - vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic");
>> - if (!vgic_node) {
>> - kvm_err("error: no compatible vgic node in DT\n");
>> - return -ENODEV;
>> - }
>> -
>> - vgic.maint_irq = irq_of_parse_and_map(vgic_node, 0);
>> - if (!vgic.maint_irq) {
>> - kvm_err("error getting vgic maintenance irq from DT\n");
>> - ret = -ENXIO;
>> - goto out;
>> - }
>> + ret = vgic_v2_probe(&vgic_ops, &vgic);
>> + if (ret)
>> + return ret;
>>
>> - ret = request_percpu_irq(vgic.maint_irq, vgic_maintenance_handler,
>> + ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
>> "vgic", kvm_get_running_vcpus());
>> if (ret) {
>> - kvm_err("Cannot register interrupt %d\n", vgic.maint_irq);
>> - goto out;
>> + kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
>> + return ret;
>> }
>>
>> ret = register_cpu_notifier(&vgic_cpu_nb);
>> @@ -1667,49 +1529,12 @@ int kvm_vgic_hyp_init(void)
>> goto out_free_irq;
>> }
>>
>> - ret = of_address_to_resource(vgic_node, 2, &vctrl_res);
>> - if (ret) {
>> - kvm_err("Cannot obtain VCTRL resource\n");
>> - goto out_free_irq;
>> - }
>> -
>> - vgic.vctrl_base = of_iomap(vgic_node, 2);
>> - if (!vgic.vctrl_base) {
>> - kvm_err("Cannot ioremap VCTRL\n");
>> - ret = -ENOMEM;
>> - goto out_free_irq;
>> - }
>> -
>> - vgic.nr_lr = readl_relaxed(vgic.vctrl_base + GICH_VTR);
>> - vgic.nr_lr = (vgic.nr_lr & 0x3f) + 1;
>> -
>> - ret = create_hyp_io_mappings(vgic.vctrl_base,
>> - vgic.vctrl_base + resource_size(&vctrl_res),
>> - vctrl_res.start);
>> - if (ret) {
>> - kvm_err("Cannot map VCTRL into hyp\n");
>> - goto out_unmap;
>> - }
>> -
>> - kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
>> - vctrl_res.start, vgic.maint_irq);
>> on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
>>
>> - if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
>> - kvm_err("Cannot obtain VCPU resource\n");
>> - ret = -ENXIO;
>> - goto out_unmap;
>> - }
>> - vgic.vcpu_base = vcpu_res.start;
>> -
>> - goto out;
>> + return 0;
>>
>> -out_unmap:
>> - iounmap(vgic.vctrl_base);
>> out_free_irq:
>> - free_percpu_irq(vgic.maint_irq, kvm_get_running_vcpus());
>> -out:
>> - of_node_put(vgic_node);
>> + free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
>> return ret;
>> }
>>
>> @@ -1742,7 +1567,7 @@ int kvm_vgic_init(struct kvm *kvm)
>> }
>>
>> ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
>> - vgic.vcpu_base, KVM_VGIC_V2_CPU_SIZE);
>> + vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE);
>
> Shouldn't this be factored out to the v2-specific file then?
Fair point. I'll have a look.
>> if (ret) {
>> kvm_err("Unable to remap VGIC CPU to VCPU\n");
>> goto out;
>> @@ -1788,7 +1613,7 @@ int kvm_vgic_create(struct kvm *kvm)
>> }
>>
>> spin_lock_init(&kvm->arch.vgic.lock);
>> - kvm->arch.vgic.vctrl_base = vgic.vctrl_base;
>> + kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
>> kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
>> kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
>>
>> --
>> 1.8.3.4
>>
>
> Otherwise,
>
> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
>
--
Jazz is not dead. It just smells funny.
^ permalink raw reply [flat|nested] 57+ messages in thread
* [PATCH v3 15/19] arm64: KVM: remove __kvm_hyp_code_{start, end} from hyp.S
2014-04-16 13:39 [PATCH v3 00/19] arm64: GICv3 support Marc Zyngier
` (13 preceding siblings ...)
2014-04-16 13:39 ` [PATCH v3 14/19] KVM: ARM: vgic: split GICv2 backend from the main vgic code Marc Zyngier
@ 2014-04-16 13:39 ` Marc Zyngier
2014-05-09 14:07 ` [PATCH v3 15/19] arm64: KVM: remove __kvm_hyp_code_{start,end} " Christoffer Dall
2014-04-16 13:39 ` [PATCH v3 16/19] arm64: KVM: split GICv2 world switch from hyp code Marc Zyngier
` (3 subsequent siblings)
18 siblings, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-04-16 13:39 UTC (permalink / raw)
To: linux-arm-kernel
We already have __hyp_text_{start,end} to express the boundaries
of the HYP text section, and __kvm_hyp_code_{start,end} are getting
in the way of a more modular world switch code.
Just turn __kvm_hyp_code_{start,end} into #defines mapping the
linker-emited symbols.
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
arch/arm64/include/asm/kvm_asm.h | 6 ++++--
arch/arm64/include/asm/virt.h | 4 ++++
arch/arm64/kvm/hyp.S | 6 ------
3 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index b25763b..dddb345 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -18,6 +18,8 @@
#ifndef __ARM_KVM_ASM_H__
#define __ARM_KVM_ASM_H__
+#include <asm/virt.h>
+
/*
* 0 is reserved as an invalid value.
* Order *must* be kept in sync with the hyp switch code.
@@ -95,8 +97,8 @@ extern char __kvm_hyp_init_end[];
extern char __kvm_hyp_vector[];
-extern char __kvm_hyp_code_start[];
-extern char __kvm_hyp_code_end[];
+#define __kvm_hyp_code_start __hyp_text_start
+#define __kvm_hyp_code_end __hyp_text_end
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 130e2be..290fb66 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -63,6 +63,10 @@ static inline bool is_hyp_mode_mismatched(void)
return __boot_cpu_mode[0] != __boot_cpu_mode[1];
}
+/* The section containing the hypervisor text */
+extern char __hyp_text_start[];
+extern char __hyp_text_end[];
+
#endif /* __ASSEMBLY__ */
#endif /* ! __ASM__VIRT_H */
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index cc1b471..9e3364b 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -36,9 +36,6 @@
.pushsection .hyp.text, "ax"
.align PAGE_SHIFT
-__kvm_hyp_code_start:
- .globl __kvm_hyp_code_start
-
.macro save_common_regs
// x2: base address for cpu context
// x3: tmp register
@@ -874,7 +871,4 @@ ENTRY(__kvm_hyp_vector)
ventry el1_error_invalid // Error 32-bit EL1
ENDPROC(__kvm_hyp_vector)
-__kvm_hyp_code_end:
- .globl __kvm_hyp_code_end
-
.popsection
--
1.8.3.4
^ permalink raw reply related [flat|nested] 57+ messages in thread* [PATCH v3 15/19] arm64: KVM: remove __kvm_hyp_code_{start,end} from hyp.S
2014-04-16 13:39 ` [PATCH v3 15/19] arm64: KVM: remove __kvm_hyp_code_{start, end} from hyp.S Marc Zyngier
@ 2014-05-09 14:07 ` Christoffer Dall
0 siblings, 0 replies; 57+ messages in thread
From: Christoffer Dall @ 2014-05-09 14:07 UTC (permalink / raw)
To: linux-arm-kernel
On Wed, Apr 16, 2014 at 02:39:47PM +0100, Marc Zyngier wrote:
> We already have __hyp_text_{start,end} to express the boundaries
> of the HYP text section, and __kvm_hyp_code_{start,end} are getting
> in the way of a more modular world switch code.
>
> Just turn __kvm_hyp_code_{start,end} into #defines mapping the
> linker-emited symbols.
>
> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
> arch/arm64/include/asm/kvm_asm.h | 6 ++++--
> arch/arm64/include/asm/virt.h | 4 ++++
> arch/arm64/kvm/hyp.S | 6 ------
> 3 files changed, 8 insertions(+), 8 deletions(-)
>
> diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
> index b25763b..dddb345 100644
> --- a/arch/arm64/include/asm/kvm_asm.h
> +++ b/arch/arm64/include/asm/kvm_asm.h
> @@ -18,6 +18,8 @@
> #ifndef __ARM_KVM_ASM_H__
> #define __ARM_KVM_ASM_H__
>
> +#include <asm/virt.h>
> +
> /*
> * 0 is reserved as an invalid value.
> * Order *must* be kept in sync with the hyp switch code.
> @@ -95,8 +97,8 @@ extern char __kvm_hyp_init_end[];
>
> extern char __kvm_hyp_vector[];
>
> -extern char __kvm_hyp_code_start[];
> -extern char __kvm_hyp_code_end[];
> +#define __kvm_hyp_code_start __hyp_text_start
> +#define __kvm_hyp_code_end __hyp_text_end
>
> extern void __kvm_flush_vm_context(void);
> extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
> diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
> index 130e2be..290fb66 100644
> --- a/arch/arm64/include/asm/virt.h
> +++ b/arch/arm64/include/asm/virt.h
> @@ -63,6 +63,10 @@ static inline bool is_hyp_mode_mismatched(void)
> return __boot_cpu_mode[0] != __boot_cpu_mode[1];
> }
>
> +/* The section containing the hypervisor text */
> +extern char __hyp_text_start[];
> +extern char __hyp_text_end[];
> +
> #endif /* __ASSEMBLY__ */
>
> #endif /* ! __ASM__VIRT_H */
> diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
> index cc1b471..9e3364b 100644
> --- a/arch/arm64/kvm/hyp.S
> +++ b/arch/arm64/kvm/hyp.S
> @@ -36,9 +36,6 @@
> .pushsection .hyp.text, "ax"
> .align PAGE_SHIFT
>
> -__kvm_hyp_code_start:
> - .globl __kvm_hyp_code_start
> -
> .macro save_common_regs
> // x2: base address for cpu context
> // x3: tmp register
> @@ -874,7 +871,4 @@ ENTRY(__kvm_hyp_vector)
> ventry el1_error_invalid // Error 32-bit EL1
> ENDPROC(__kvm_hyp_vector)
>
> -__kvm_hyp_code_end:
> - .globl __kvm_hyp_code_end
> -
> .popsection
> --
> 1.8.3.4
>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
^ permalink raw reply [flat|nested] 57+ messages in thread
* [PATCH v3 16/19] arm64: KVM: split GICv2 world switch from hyp code
2014-04-16 13:39 [PATCH v3 00/19] arm64: GICv3 support Marc Zyngier
` (14 preceding siblings ...)
2014-04-16 13:39 ` [PATCH v3 15/19] arm64: KVM: remove __kvm_hyp_code_{start, end} from hyp.S Marc Zyngier
@ 2014-04-16 13:39 ` Marc Zyngier
2014-05-09 14:07 ` Christoffer Dall
2014-04-16 13:39 ` [PATCH v3 17/19] arm64: KVM: move hcr_el2 setting into vgic-v2-switch.S Marc Zyngier
` (2 subsequent siblings)
18 siblings, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-04-16 13:39 UTC (permalink / raw)
To: linux-arm-kernel
Move the GICv2 world switch code into its own file, and add the
necessary indirection to the arm64 switch code.
Also introduce a new type field to the vgic_params structure.
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
arch/arm/include/asm/kvm_host.h | 5 ++
arch/arm64/include/asm/kvm_asm.h | 4 ++
arch/arm64/include/asm/kvm_host.h | 21 ++++++
arch/arm64/kernel/asm-offsets.c | 3 +
arch/arm64/kvm/Makefile | 4 +-
arch/arm64/kvm/hyp.S | 120 ++++++--------------------------
arch/arm64/kvm/vgic-v2-switch.S | 141 ++++++++++++++++++++++++++++++++++++++
include/kvm/arm_vgic.h | 7 +-
virt/kvm/arm/vgic-v2.c | 15 ++--
virt/kvm/arm/vgic.c | 3 +
10 files changed, 213 insertions(+), 110 deletions(-)
create mode 100644 arch/arm64/kvm/vgic-v2-switch.S
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 098f7dd..228ae1c 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -222,6 +222,11 @@ static inline int kvm_arch_dev_ioctl_check_extension(long ext)
return 0;
}
+static inline void vgic_arch_setup(const struct vgic_params *vgic)
+{
+ BUG_ON(vgic->type != VGIC_V2);
+}
+
int kvm_perf_init(void);
int kvm_perf_teardown(void);
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index dddb345..6515a52 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -104,6 +104,10 @@ extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+
+extern char __save_vgic_v2_state[];
+extern char __restore_vgic_v2_state[];
+
#endif
#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 0a1d697..65f0c43 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -200,4 +200,25 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
hyp_stack_ptr, vector_ptr);
}
+struct vgic_sr_vectors {
+ void *save_vgic;
+ void *restore_vgic;
+};
+
+static inline void vgic_arch_setup(const struct vgic_params *vgic)
+{
+ extern struct vgic_sr_vectors __vgic_sr_vectors;
+
+ switch(vgic->type)
+ {
+ case VGIC_V2:
+ __vgic_sr_vectors.save_vgic = __save_vgic_v2_state;
+ __vgic_sr_vectors.restore_vgic = __restore_vgic_v2_state;
+ break;
+
+ default:
+ BUG();
+ }
+}
+
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 20fd488..dafc415 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -129,6 +129,9 @@ int main(void)
DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
+ DEFINE(VGIC_SAVE_FN, offsetof(struct vgic_sr_vectors, save_vgic));
+ DEFINE(VGIC_RESTORE_FN, offsetof(struct vgic_sr_vectors, restore_vgic));
+ DEFINE(VGIC_SR_VECTOR_SZ, sizeof(struct vgic_sr_vectors));
DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 7e92952..daf24dc 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -19,5 +19,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
-kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o $(KVM)/arm/vgic-v2.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o
kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 9e3364b..aed72d0 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -16,7 +16,6 @@
*/
#include <linux/linkage.h>
-#include <linux/irqchip/arm-gic.h>
#include <asm/assembler.h>
#include <asm/memory.h>
@@ -375,103 +374,6 @@
msr vttbr_el2, xzr
.endm
-/*
- * Save the VGIC CPU state into memory
- * x0: Register pointing to VCPU struct
- * Do not corrupt x1!!!
- */
-.macro save_vgic_state
- /* Get VGIC VCTRL base into x2 */
- ldr x2, [x0, #VCPU_KVM]
- kern_hyp_va x2
- ldr x2, [x2, #KVM_VGIC_VCTRL]
- kern_hyp_va x2
- cbz x2, 2f // disabled
-
- /* Compute the address of struct vgic_cpu */
- add x3, x0, #VCPU_VGIC_CPU
-
- /* Save all interesting registers */
- ldr w4, [x2, #GICH_HCR]
- ldr w5, [x2, #GICH_VMCR]
- ldr w6, [x2, #GICH_MISR]
- ldr w7, [x2, #GICH_EISR0]
- ldr w8, [x2, #GICH_EISR1]
- ldr w9, [x2, #GICH_ELRSR0]
- ldr w10, [x2, #GICH_ELRSR1]
- ldr w11, [x2, #GICH_APR]
-CPU_BE( rev w4, w4 )
-CPU_BE( rev w5, w5 )
-CPU_BE( rev w6, w6 )
-CPU_BE( rev w7, w7 )
-CPU_BE( rev w8, w8 )
-CPU_BE( rev w9, w9 )
-CPU_BE( rev w10, w10 )
-CPU_BE( rev w11, w11 )
-
- str w4, [x3, #VGIC_V2_CPU_HCR]
- str w5, [x3, #VGIC_V2_CPU_VMCR]
- str w6, [x3, #VGIC_V2_CPU_MISR]
- str w7, [x3, #VGIC_V2_CPU_EISR]
- str w8, [x3, #(VGIC_V2_CPU_EISR + 4)]
- str w9, [x3, #VGIC_V2_CPU_ELRSR]
- str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)]
- str w11, [x3, #VGIC_V2_CPU_APR]
-
- /* Clear GICH_HCR */
- str wzr, [x2, #GICH_HCR]
-
- /* Save list registers */
- add x2, x2, #GICH_LR0
- ldr w4, [x3, #VGIC_CPU_NR_LR]
- add x3, x3, #VGIC_V2_CPU_LR
-1: ldr w5, [x2], #4
-CPU_BE( rev w5, w5 )
- str w5, [x3], #4
- sub w4, w4, #1
- cbnz w4, 1b
-2:
-.endm
-
-/*
- * Restore the VGIC CPU state from memory
- * x0: Register pointing to VCPU struct
- */
-.macro restore_vgic_state
- /* Get VGIC VCTRL base into x2 */
- ldr x2, [x0, #VCPU_KVM]
- kern_hyp_va x2
- ldr x2, [x2, #KVM_VGIC_VCTRL]
- kern_hyp_va x2
- cbz x2, 2f // disabled
-
- /* Compute the address of struct vgic_cpu */
- add x3, x0, #VCPU_VGIC_CPU
-
- /* We only restore a minimal set of registers */
- ldr w4, [x3, #VGIC_V2_CPU_HCR]
- ldr w5, [x3, #VGIC_V2_CPU_VMCR]
- ldr w6, [x3, #VGIC_V2_CPU_APR]
-CPU_BE( rev w4, w4 )
-CPU_BE( rev w5, w5 )
-CPU_BE( rev w6, w6 )
-
- str w4, [x2, #GICH_HCR]
- str w5, [x2, #GICH_VMCR]
- str w6, [x2, #GICH_APR]
-
- /* Restore list registers */
- add x2, x2, #GICH_LR0
- ldr w4, [x3, #VGIC_CPU_NR_LR]
- add x3, x3, #VGIC_V2_CPU_LR
-1: ldr w5, [x3], #4
-CPU_BE( rev w5, w5 )
- str w5, [x2], #4
- sub w4, w4, #1
- cbnz w4, 1b
-2:
-.endm
-
.macro save_timer_state
// x0: vcpu pointer
ldr x2, [x0, #VCPU_KVM]
@@ -568,7 +470,10 @@ ENTRY(__kvm_vcpu_run)
activate_traps
activate_vm
- restore_vgic_state
+ adr x24, __vgic_sr_vectors
+ ldr x24, [x24, #VGIC_RESTORE_FN]
+ kern_hyp_va x24
+ blr x24
restore_timer_state
// Guest context
@@ -595,7 +500,10 @@ __kvm_vcpu_return:
save_guest_32bit_state
save_timer_state
- save_vgic_state
+ adr x24, __vgic_sr_vectors
+ ldr x24, [x24, VGIC_SAVE_FN]
+ kern_hyp_va x24
+ blr x24
deactivate_traps
deactivate_vm
@@ -644,6 +552,12 @@ ENTRY(__kvm_flush_vm_context)
ret
ENDPROC(__kvm_flush_vm_context)
+ // struct vgic_sr_vectors __vgi_sr_vectors;
+ .align 3
+ENTRY(__vgic_sr_vectors)
+ .skip VGIC_SR_VECTOR_SZ
+ENDPROC(__vgic_sr_vectors)
+
__kvm_hyp_panic:
// Guess the context by looking at VTTBR:
// If zero, then we're already a host.
@@ -653,6 +567,12 @@ __kvm_hyp_panic:
mrs x0, tpidr_el2
+ save_timer_state
+ adr x24, __vgic_sr_vectors
+ ldr x24, [x24, VGIC_SAVE_FN]
+ kern_hyp_va x24
+ blr x24
+
deactivate_traps
deactivate_vm
diff --git a/arch/arm64/kvm/vgic-v2-switch.S b/arch/arm64/kvm/vgic-v2-switch.S
new file mode 100644
index 0000000..c5dc777
--- /dev/null
+++ b/arch/arm64/kvm/vgic-v2-switch.S
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/assembler.h>
+#include <asm/memory.h>
+#include <asm/asm-offsets.h>
+#include <asm/kvm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+
+ .text
+ .pushsection .hyp.text, "ax"
+
+/*
+ * Save the VGIC CPU state into memory
+ * x0: Register pointing to VCPU struct
+ * Do not corrupt x1!!!
+ */
+.macro save_vgic_v2_state
+ /* Get VGIC VCTRL base into x2 */
+ ldr x2, [x0, #VCPU_KVM]
+ kern_hyp_va x2
+ ldr x2, [x2, #KVM_VGIC_VCTRL]
+ kern_hyp_va x2
+ cbz x2, 2f // disabled
+
+ /* Compute the address of struct vgic_cpu */
+ add x3, x0, #VCPU_VGIC_CPU
+
+ /* Save all interesting registers */
+ ldr w4, [x2, #GICH_HCR]
+ ldr w5, [x2, #GICH_VMCR]
+ ldr w6, [x2, #GICH_MISR]
+ ldr w7, [x2, #GICH_EISR0]
+ ldr w8, [x2, #GICH_EISR1]
+ ldr w9, [x2, #GICH_ELRSR0]
+ ldr w10, [x2, #GICH_ELRSR1]
+ ldr w11, [x2, #GICH_APR]
+CPU_BE( rev w4, w4 )
+CPU_BE( rev w5, w5 )
+CPU_BE( rev w6, w6 )
+CPU_BE( rev w7, w7 )
+CPU_BE( rev w8, w8 )
+CPU_BE( rev w9, w9 )
+CPU_BE( rev w10, w10 )
+CPU_BE( rev w11, w11 )
+
+ str w4, [x3, #VGIC_V2_CPU_HCR]
+ str w5, [x3, #VGIC_V2_CPU_VMCR]
+ str w6, [x3, #VGIC_V2_CPU_MISR]
+ str w7, [x3, #VGIC_V2_CPU_EISR]
+ str w8, [x3, #(VGIC_V2_CPU_EISR + 4)]
+ str w9, [x3, #VGIC_V2_CPU_ELRSR]
+ str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)]
+ str w11, [x3, #VGIC_V2_CPU_APR]
+
+ /* Clear GICH_HCR */
+ str wzr, [x2, #GICH_HCR]
+
+ /* Save list registers */
+ add x2, x2, #GICH_LR0
+ ldr w4, [x3, #VGIC_CPU_NR_LR]
+ add x3, x3, #VGIC_V2_CPU_LR
+1: ldr w5, [x2], #4
+CPU_BE( rev w5, w5 )
+ str w5, [x3], #4
+ sub w4, w4, #1
+ cbnz w4, 1b
+2:
+.endm
+
+/*
+ * Restore the VGIC CPU state from memory
+ * x0: Register pointing to VCPU struct
+ */
+.macro restore_vgic_v2_state
+ /* Get VGIC VCTRL base into x2 */
+ ldr x2, [x0, #VCPU_KVM]
+ kern_hyp_va x2
+ ldr x2, [x2, #KVM_VGIC_VCTRL]
+ kern_hyp_va x2
+ cbz x2, 2f // disabled
+
+ /* Compute the address of struct vgic_cpu */
+ add x3, x0, #VCPU_VGIC_CPU
+
+ /* We only restore a minimal set of registers */
+ ldr w4, [x3, #VGIC_V2_CPU_HCR]
+ ldr w5, [x3, #VGIC_V2_CPU_VMCR]
+ ldr w6, [x3, #VGIC_V2_CPU_APR]
+CPU_BE( rev w4, w4 )
+CPU_BE( rev w5, w5 )
+CPU_BE( rev w6, w6 )
+
+ str w4, [x2, #GICH_HCR]
+ str w5, [x2, #GICH_VMCR]
+ str w6, [x2, #GICH_APR]
+
+ /* Restore list registers */
+ add x2, x2, #GICH_LR0
+ ldr w4, [x3, #VGIC_CPU_NR_LR]
+ add x3, x3, #VGIC_V2_CPU_LR
+1: ldr w5, [x3], #4
+CPU_BE( rev w5, w5 )
+ str w5, [x2], #4
+ sub w4, w4, #1
+ cbnz w4, 1b
+2:
+.endm
+
+ENTRY(__save_vgic_v2_state)
+ save_vgic_v2_state
+ ret
+ENDPROC(__save_vgic_v2_state)
+
+ENTRY(__restore_vgic_v2_state)
+__restore_vgic_v2_state:
+ restore_vgic_v2_state
+ ret
+ENDPROC(__restore_vgic_v2_state)
+
+
+ .popsection
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index d8ec2eb..c47dee5 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -24,7 +24,6 @@
#include <linux/irqreturn.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-#include <linux/irqchip/arm-gic.h>
#define VGIC_NR_IRQS 256
#define VGIC_NR_SGIS 16
@@ -70,6 +69,10 @@ struct vgic_bytemap {
struct kvm_vcpu;
+enum vgic_type {
+ VGIC_V2, /* Good ol' GICv2 */
+};
+
#define LR_STATE_PENDING (1 << 0)
#define LR_STATE_ACTIVE (1 << 1)
#define LR_STATE_MASK (3 << 0)
@@ -102,6 +105,8 @@ struct vgic_ops {
};
struct vgic_params {
+ /* vgic type */
+ enum vgic_type type;
/* Physical address of vgic virtual cpu interface */
phys_addr_t vcpu_base;
/* Number of list registers */
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
index 52f438f..4c6606e 100644
--- a/virt/kvm/arm/vgic-v2.c
+++ b/virt/kvm/arm/vgic-v2.c
@@ -170,7 +170,7 @@ int vgic_v2_probe(const struct vgic_ops **ops,
vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic");
if (!vgic_node) {
- kvm_err("error: no compatible vgic node in DT\n");
+ kvm_err("error: no compatible GICv2 node in DT\n");
return -ENODEV;
}
@@ -183,15 +183,15 @@ int vgic_v2_probe(const struct vgic_ops **ops,
ret = of_address_to_resource(vgic_node, 2, &vctrl_res);
if (ret) {
- kvm_err("Cannot obtain VCTRL resource\n");
- goto out_free_irq;
+ kvm_err("Cannot obtain GICH resource\n");
+ goto out;
}
vgic->vctrl_base = of_iomap(vgic_node, 2);
if (!vgic->vctrl_base) {
- kvm_err("Cannot ioremap VCTRL\n");
+ kvm_err("Cannot ioremap GICH\n");
ret = -ENOMEM;
- goto out_free_irq;
+ goto out;
}
vgic->nr_lr = readl_relaxed(vgic->vctrl_base + GICH_VTR);
@@ -206,7 +206,7 @@ int vgic_v2_probe(const struct vgic_ops **ops,
}
if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
- kvm_err("Cannot obtain VCPU resource\n");
+ kvm_err("Cannot obtain GICV resource\n");
ret = -ENXIO;
goto out_unmap;
}
@@ -215,14 +215,13 @@ int vgic_v2_probe(const struct vgic_ops **ops,
kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
vctrl_res.start, vgic->maint_irq);
+ vgic->type = VGIC_V2;
*ops = &vgic_v2_ops;
*params = vgic;
goto out;
out_unmap:
iounmap(vgic->vctrl_base);
-out_free_irq:
- free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
out:
of_node_put(vgic_node);
return ret;
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 613b492..8365189 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1531,6 +1531,9 @@ int kvm_vgic_hyp_init(void)
on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
+ /* Callback into for arch code for setup */
+ vgic_arch_setup(vgic);
+
return 0;
out_free_irq:
--
1.8.3.4
^ permalink raw reply related [flat|nested] 57+ messages in thread* [PATCH v3 16/19] arm64: KVM: split GICv2 world switch from hyp code
2014-04-16 13:39 ` [PATCH v3 16/19] arm64: KVM: split GICv2 world switch from hyp code Marc Zyngier
@ 2014-05-09 14:07 ` Christoffer Dall
0 siblings, 0 replies; 57+ messages in thread
From: Christoffer Dall @ 2014-05-09 14:07 UTC (permalink / raw)
To: linux-arm-kernel
On Wed, Apr 16, 2014 at 02:39:48PM +0100, Marc Zyngier wrote:
> Move the GICv2 world switch code into its own file, and add the
> necessary indirection to the arm64 switch code.
>
> Also introduce a new type field to the vgic_params structure.
>
> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
> arch/arm/include/asm/kvm_host.h | 5 ++
> arch/arm64/include/asm/kvm_asm.h | 4 ++
> arch/arm64/include/asm/kvm_host.h | 21 ++++++
> arch/arm64/kernel/asm-offsets.c | 3 +
> arch/arm64/kvm/Makefile | 4 +-
> arch/arm64/kvm/hyp.S | 120 ++++++--------------------------
> arch/arm64/kvm/vgic-v2-switch.S | 141 ++++++++++++++++++++++++++++++++++++++
> include/kvm/arm_vgic.h | 7 +-
> virt/kvm/arm/vgic-v2.c | 15 ++--
> virt/kvm/arm/vgic.c | 3 +
> 10 files changed, 213 insertions(+), 110 deletions(-)
> create mode 100644 arch/arm64/kvm/vgic-v2-switch.S
>
> diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
> index 098f7dd..228ae1c 100644
> --- a/arch/arm/include/asm/kvm_host.h
> +++ b/arch/arm/include/asm/kvm_host.h
> @@ -222,6 +222,11 @@ static inline int kvm_arch_dev_ioctl_check_extension(long ext)
> return 0;
> }
>
> +static inline void vgic_arch_setup(const struct vgic_params *vgic)
> +{
> + BUG_ON(vgic->type != VGIC_V2);
> +}
> +
> int kvm_perf_init(void);
> int kvm_perf_teardown(void);
>
> diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
> index dddb345..6515a52 100644
> --- a/arch/arm64/include/asm/kvm_asm.h
> +++ b/arch/arm64/include/asm/kvm_asm.h
> @@ -104,6 +104,10 @@ extern void __kvm_flush_vm_context(void);
> extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
>
> extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
> +
> +extern char __save_vgic_v2_state[];
> +extern char __restore_vgic_v2_state[];
> +
> #endif
>
> #endif /* __ARM_KVM_ASM_H__ */
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index 0a1d697..65f0c43 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -200,4 +200,25 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
> hyp_stack_ptr, vector_ptr);
> }
>
> +struct vgic_sr_vectors {
> + void *save_vgic;
> + void *restore_vgic;
> +};
> +
> +static inline void vgic_arch_setup(const struct vgic_params *vgic)
> +{
> + extern struct vgic_sr_vectors __vgic_sr_vectors;
> +
> + switch(vgic->type)
> + {
> + case VGIC_V2:
> + __vgic_sr_vectors.save_vgic = __save_vgic_v2_state;
> + __vgic_sr_vectors.restore_vgic = __restore_vgic_v2_state;
> + break;
> +
> + default:
> + BUG();
> + }
> +}
> +
> #endif /* __ARM64_KVM_HOST_H__ */
> diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
> index 20fd488..dafc415 100644
> --- a/arch/arm64/kernel/asm-offsets.c
> +++ b/arch/arm64/kernel/asm-offsets.c
> @@ -129,6 +129,9 @@ int main(void)
> DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
> DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
> DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
> + DEFINE(VGIC_SAVE_FN, offsetof(struct vgic_sr_vectors, save_vgic));
> + DEFINE(VGIC_RESTORE_FN, offsetof(struct vgic_sr_vectors, restore_vgic));
> + DEFINE(VGIC_SR_VECTOR_SZ, sizeof(struct vgic_sr_vectors));
> DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
> DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
> DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
> diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
> index 7e92952..daf24dc 100644
> --- a/arch/arm64/kvm/Makefile
> +++ b/arch/arm64/kvm/Makefile
> @@ -19,5 +19,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
> kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
> kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
>
> -kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o $(KVM)/arm/vgic-v2.o
> +kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
> +kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
> +kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o
> kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
> diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
> index 9e3364b..aed72d0 100644
> --- a/arch/arm64/kvm/hyp.S
> +++ b/arch/arm64/kvm/hyp.S
> @@ -16,7 +16,6 @@
> */
>
> #include <linux/linkage.h>
> -#include <linux/irqchip/arm-gic.h>
>
> #include <asm/assembler.h>
> #include <asm/memory.h>
> @@ -375,103 +374,6 @@
> msr vttbr_el2, xzr
> .endm
>
> -/*
> - * Save the VGIC CPU state into memory
> - * x0: Register pointing to VCPU struct
> - * Do not corrupt x1!!!
> - */
> -.macro save_vgic_state
> - /* Get VGIC VCTRL base into x2 */
> - ldr x2, [x0, #VCPU_KVM]
> - kern_hyp_va x2
> - ldr x2, [x2, #KVM_VGIC_VCTRL]
> - kern_hyp_va x2
> - cbz x2, 2f // disabled
> -
> - /* Compute the address of struct vgic_cpu */
> - add x3, x0, #VCPU_VGIC_CPU
> -
> - /* Save all interesting registers */
> - ldr w4, [x2, #GICH_HCR]
> - ldr w5, [x2, #GICH_VMCR]
> - ldr w6, [x2, #GICH_MISR]
> - ldr w7, [x2, #GICH_EISR0]
> - ldr w8, [x2, #GICH_EISR1]
> - ldr w9, [x2, #GICH_ELRSR0]
> - ldr w10, [x2, #GICH_ELRSR1]
> - ldr w11, [x2, #GICH_APR]
> -CPU_BE( rev w4, w4 )
> -CPU_BE( rev w5, w5 )
> -CPU_BE( rev w6, w6 )
> -CPU_BE( rev w7, w7 )
> -CPU_BE( rev w8, w8 )
> -CPU_BE( rev w9, w9 )
> -CPU_BE( rev w10, w10 )
> -CPU_BE( rev w11, w11 )
> -
> - str w4, [x3, #VGIC_V2_CPU_HCR]
> - str w5, [x3, #VGIC_V2_CPU_VMCR]
> - str w6, [x3, #VGIC_V2_CPU_MISR]
> - str w7, [x3, #VGIC_V2_CPU_EISR]
> - str w8, [x3, #(VGIC_V2_CPU_EISR + 4)]
> - str w9, [x3, #VGIC_V2_CPU_ELRSR]
> - str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)]
> - str w11, [x3, #VGIC_V2_CPU_APR]
> -
> - /* Clear GICH_HCR */
> - str wzr, [x2, #GICH_HCR]
> -
> - /* Save list registers */
> - add x2, x2, #GICH_LR0
> - ldr w4, [x3, #VGIC_CPU_NR_LR]
> - add x3, x3, #VGIC_V2_CPU_LR
> -1: ldr w5, [x2], #4
> -CPU_BE( rev w5, w5 )
> - str w5, [x3], #4
> - sub w4, w4, #1
> - cbnz w4, 1b
> -2:
> -.endm
> -
> -/*
> - * Restore the VGIC CPU state from memory
> - * x0: Register pointing to VCPU struct
> - */
> -.macro restore_vgic_state
> - /* Get VGIC VCTRL base into x2 */
> - ldr x2, [x0, #VCPU_KVM]
> - kern_hyp_va x2
> - ldr x2, [x2, #KVM_VGIC_VCTRL]
> - kern_hyp_va x2
> - cbz x2, 2f // disabled
> -
> - /* Compute the address of struct vgic_cpu */
> - add x3, x0, #VCPU_VGIC_CPU
> -
> - /* We only restore a minimal set of registers */
> - ldr w4, [x3, #VGIC_V2_CPU_HCR]
> - ldr w5, [x3, #VGIC_V2_CPU_VMCR]
> - ldr w6, [x3, #VGIC_V2_CPU_APR]
> -CPU_BE( rev w4, w4 )
> -CPU_BE( rev w5, w5 )
> -CPU_BE( rev w6, w6 )
> -
> - str w4, [x2, #GICH_HCR]
> - str w5, [x2, #GICH_VMCR]
> - str w6, [x2, #GICH_APR]
> -
> - /* Restore list registers */
> - add x2, x2, #GICH_LR0
> - ldr w4, [x3, #VGIC_CPU_NR_LR]
> - add x3, x3, #VGIC_V2_CPU_LR
> -1: ldr w5, [x3], #4
> -CPU_BE( rev w5, w5 )
> - str w5, [x2], #4
> - sub w4, w4, #1
> - cbnz w4, 1b
> -2:
> -.endm
> -
> .macro save_timer_state
> // x0: vcpu pointer
> ldr x2, [x0, #VCPU_KVM]
> @@ -568,7 +470,10 @@ ENTRY(__kvm_vcpu_run)
> activate_traps
> activate_vm
>
> - restore_vgic_state
> + adr x24, __vgic_sr_vectors
> + ldr x24, [x24, #VGIC_RESTORE_FN]
> + kern_hyp_va x24
> + blr x24
could you not keep the restore_vgic_state macro name that expands to
these four lines; the Hyp code was so pretty and compact before this
mess ;)
> restore_timer_state
>
> // Guest context
> @@ -595,7 +500,10 @@ __kvm_vcpu_return:
> save_guest_32bit_state
>
> save_timer_state
> - save_vgic_state
> + adr x24, __vgic_sr_vectors
> + ldr x24, [x24, VGIC_SAVE_FN]
> + kern_hyp_va x24
> + blr x24
>
> deactivate_traps
> deactivate_vm
> @@ -644,6 +552,12 @@ ENTRY(__kvm_flush_vm_context)
> ret
> ENDPROC(__kvm_flush_vm_context)
>
> + // struct vgic_sr_vectors __vgi_sr_vectors;
> + .align 3
> +ENTRY(__vgic_sr_vectors)
> + .skip VGIC_SR_VECTOR_SZ
> +ENDPROC(__vgic_sr_vectors)
> +
> __kvm_hyp_panic:
> // Guess the context by looking at VTTBR:
> // If zero, then we're already a host.
> @@ -653,6 +567,12 @@ __kvm_hyp_panic:
>
> mrs x0, tpidr_el2
>
> + save_timer_state
> + adr x24, __vgic_sr_vectors
> + ldr x24, [x24, VGIC_SAVE_FN]
> + kern_hyp_va x24
> + blr x24
> +
why are we doing this? If something bad happened, aren't we just trying
to get to print a panic string with as little in our way as possible?
I could see it if we were also restoring the host state, but this seems
faily pointless...
> deactivate_traps
> deactivate_vm
>
> diff --git a/arch/arm64/kvm/vgic-v2-switch.S b/arch/arm64/kvm/vgic-v2-switch.S
> new file mode 100644
> index 0000000..c5dc777
> --- /dev/null
> +++ b/arch/arm64/kvm/vgic-v2-switch.S
> @@ -0,0 +1,141 @@
> +/*
> + * Copyright (C) 2012,2013 - ARM Ltd
> + * Author: Marc Zyngier <marc.zyngier@arm.com>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program. If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#include <linux/linkage.h>
> +#include <linux/irqchip/arm-gic.h>
> +
> +#include <asm/assembler.h>
> +#include <asm/memory.h>
> +#include <asm/asm-offsets.h>
> +#include <asm/kvm.h>
> +#include <asm/kvm_asm.h>
> +#include <asm/kvm_arm.h>
> +#include <asm/kvm_mmu.h>
> +
> + .text
> + .pushsection .hyp.text, "ax"
> +
> +/*
> + * Save the VGIC CPU state into memory
> + * x0: Register pointing to VCPU struct
> + * Do not corrupt x1!!!
> + */
> +.macro save_vgic_v2_state
> + /* Get VGIC VCTRL base into x2 */
> + ldr x2, [x0, #VCPU_KVM]
> + kern_hyp_va x2
> + ldr x2, [x2, #KVM_VGIC_VCTRL]
> + kern_hyp_va x2
> + cbz x2, 2f // disabled
> +
> + /* Compute the address of struct vgic_cpu */
> + add x3, x0, #VCPU_VGIC_CPU
> +
> + /* Save all interesting registers */
> + ldr w4, [x2, #GICH_HCR]
> + ldr w5, [x2, #GICH_VMCR]
> + ldr w6, [x2, #GICH_MISR]
> + ldr w7, [x2, #GICH_EISR0]
> + ldr w8, [x2, #GICH_EISR1]
> + ldr w9, [x2, #GICH_ELRSR0]
> + ldr w10, [x2, #GICH_ELRSR1]
> + ldr w11, [x2, #GICH_APR]
> +CPU_BE( rev w4, w4 )
> +CPU_BE( rev w5, w5 )
> +CPU_BE( rev w6, w6 )
> +CPU_BE( rev w7, w7 )
> +CPU_BE( rev w8, w8 )
> +CPU_BE( rev w9, w9 )
> +CPU_BE( rev w10, w10 )
> +CPU_BE( rev w11, w11 )
> +
> + str w4, [x3, #VGIC_V2_CPU_HCR]
> + str w5, [x3, #VGIC_V2_CPU_VMCR]
> + str w6, [x3, #VGIC_V2_CPU_MISR]
> + str w7, [x3, #VGIC_V2_CPU_EISR]
> + str w8, [x3, #(VGIC_V2_CPU_EISR + 4)]
> + str w9, [x3, #VGIC_V2_CPU_ELRSR]
> + str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)]
> + str w11, [x3, #VGIC_V2_CPU_APR]
> +
> + /* Clear GICH_HCR */
> + str wzr, [x2, #GICH_HCR]
> +
> + /* Save list registers */
> + add x2, x2, #GICH_LR0
> + ldr w4, [x3, #VGIC_CPU_NR_LR]
> + add x3, x3, #VGIC_V2_CPU_LR
> +1: ldr w5, [x2], #4
> +CPU_BE( rev w5, w5 )
> + str w5, [x3], #4
> + sub w4, w4, #1
> + cbnz w4, 1b
> +2:
> +.endm
> +
> +/*
> + * Restore the VGIC CPU state from memory
> + * x0: Register pointing to VCPU struct
> + */
> +.macro restore_vgic_v2_state
> + /* Get VGIC VCTRL base into x2 */
> + ldr x2, [x0, #VCPU_KVM]
> + kern_hyp_va x2
> + ldr x2, [x2, #KVM_VGIC_VCTRL]
> + kern_hyp_va x2
> + cbz x2, 2f // disabled
> +
> + /* Compute the address of struct vgic_cpu */
> + add x3, x0, #VCPU_VGIC_CPU
> +
> + /* We only restore a minimal set of registers */
> + ldr w4, [x3, #VGIC_V2_CPU_HCR]
> + ldr w5, [x3, #VGIC_V2_CPU_VMCR]
> + ldr w6, [x3, #VGIC_V2_CPU_APR]
> +CPU_BE( rev w4, w4 )
> +CPU_BE( rev w5, w5 )
> +CPU_BE( rev w6, w6 )
> +
> + str w4, [x2, #GICH_HCR]
> + str w5, [x2, #GICH_VMCR]
> + str w6, [x2, #GICH_APR]
> +
> + /* Restore list registers */
> + add x2, x2, #GICH_LR0
> + ldr w4, [x3, #VGIC_CPU_NR_LR]
> + add x3, x3, #VGIC_V2_CPU_LR
> +1: ldr w5, [x3], #4
> +CPU_BE( rev w5, w5 )
> + str w5, [x2], #4
> + sub w4, w4, #1
> + cbnz w4, 1b
> +2:
> +.endm
again relying on this being copied verbatim from the other file
> +
> +ENTRY(__save_vgic_v2_state)
> + save_vgic_v2_state
> + ret
> +ENDPROC(__save_vgic_v2_state)
> +
> +ENTRY(__restore_vgic_v2_state)
> +__restore_vgic_v2_state:
> + restore_vgic_v2_state
> + ret
> +ENDPROC(__restore_vgic_v2_state)
why the macro indirection, couldn't we just have the code be the
functions?
> +
> +
> + .popsection
> diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
> index d8ec2eb..c47dee5 100644
> --- a/include/kvm/arm_vgic.h
> +++ b/include/kvm/arm_vgic.h
> @@ -24,7 +24,6 @@
> #include <linux/irqreturn.h>
> #include <linux/spinlock.h>
> #include <linux/types.h>
> -#include <linux/irqchip/arm-gic.h>
>
> #define VGIC_NR_IRQS 256
> #define VGIC_NR_SGIS 16
> @@ -70,6 +69,10 @@ struct vgic_bytemap {
>
> struct kvm_vcpu;
>
> +enum vgic_type {
> + VGIC_V2, /* Good ol' GICv2 */
love it ;)
> +};
> +
> #define LR_STATE_PENDING (1 << 0)
> #define LR_STATE_ACTIVE (1 << 1)
> #define LR_STATE_MASK (3 << 0)
> @@ -102,6 +105,8 @@ struct vgic_ops {
> };
>
> struct vgic_params {
> + /* vgic type */
> + enum vgic_type type;
> /* Physical address of vgic virtual cpu interface */
> phys_addr_t vcpu_base;
> /* Number of list registers */
> diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
> index 52f438f..4c6606e 100644
> --- a/virt/kvm/arm/vgic-v2.c
> +++ b/virt/kvm/arm/vgic-v2.c
> @@ -170,7 +170,7 @@ int vgic_v2_probe(const struct vgic_ops **ops,
>
> vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic");
> if (!vgic_node) {
> - kvm_err("error: no compatible vgic node in DT\n");
> + kvm_err("error: no compatible GICv2 node in DT\n");
> return -ENODEV;
> }
>
> @@ -183,15 +183,15 @@ int vgic_v2_probe(const struct vgic_ops **ops,
>
> ret = of_address_to_resource(vgic_node, 2, &vctrl_res);
> if (ret) {
> - kvm_err("Cannot obtain VCTRL resource\n");
> - goto out_free_irq;
> + kvm_err("Cannot obtain GICH resource\n");
> + goto out;
changing these labels and getting rid of the out_free_irq seems like it
should be part of the previous patch.
> }
>
> vgic->vctrl_base = of_iomap(vgic_node, 2);
> if (!vgic->vctrl_base) {
> - kvm_err("Cannot ioremap VCTRL\n");
> + kvm_err("Cannot ioremap GICH\n");
> ret = -ENOMEM;
> - goto out_free_irq;
> + goto out;
> }
>
> vgic->nr_lr = readl_relaxed(vgic->vctrl_base + GICH_VTR);
> @@ -206,7 +206,7 @@ int vgic_v2_probe(const struct vgic_ops **ops,
> }
>
> if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
> - kvm_err("Cannot obtain VCPU resource\n");
> + kvm_err("Cannot obtain GICV resource\n");
> ret = -ENXIO;
> goto out_unmap;
> }
> @@ -215,14 +215,13 @@ int vgic_v2_probe(const struct vgic_ops **ops,
> kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
> vctrl_res.start, vgic->maint_irq);
>
> + vgic->type = VGIC_V2;
> *ops = &vgic_v2_ops;
> *params = vgic;
> goto out;
>
> out_unmap:
> iounmap(vgic->vctrl_base);
> -out_free_irq:
> - free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
> out:
> of_node_put(vgic_node);
> return ret;
> diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
> index 613b492..8365189 100644
> --- a/virt/kvm/arm/vgic.c
> +++ b/virt/kvm/arm/vgic.c
> @@ -1531,6 +1531,9 @@ int kvm_vgic_hyp_init(void)
>
> on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
>
> + /* Callback into for arch code for setup */
> + vgic_arch_setup(vgic);
> +
> return 0;
>
> out_free_irq:
> --
> 1.8.3.4
>
-Christoffer
^ permalink raw reply [flat|nested] 57+ messages in thread
* [PATCH v3 17/19] arm64: KVM: move hcr_el2 setting into vgic-v2-switch.S
2014-04-16 13:39 [PATCH v3 00/19] arm64: GICv3 support Marc Zyngier
` (15 preceding siblings ...)
2014-04-16 13:39 ` [PATCH v3 16/19] arm64: KVM: split GICv2 world switch from hyp code Marc Zyngier
@ 2014-04-16 13:39 ` Marc Zyngier
2014-05-09 14:07 ` Christoffer Dall
2014-04-16 13:39 ` [PATCH v3 18/19] KVM: ARM: vgic: add the GICv3 backend Marc Zyngier
2014-04-16 13:39 ` [PATCH v3 19/19] arm64: KVM: vgic: add GICv3 world switch Marc Zyngier
18 siblings, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-04-16 13:39 UTC (permalink / raw)
To: linux-arm-kernel
GICv3 requires the hcr_el2 switch to be tightly coupled with some
of the interrupt controller's register switch.
In order to have similar code paths, start moving the hcr_el2
manipulation code to the GICv2 switch code.
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
arch/arm64/kvm/hyp.S | 7 -------
arch/arm64/kvm/vgic-v2-switch.S | 8 ++++++++
2 files changed, 8 insertions(+), 7 deletions(-)
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index aed72d0..92b9120 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -335,11 +335,6 @@
.endm
.macro activate_traps
- ldr x2, [x0, #VCPU_IRQ_LINES]
- ldr x1, [x0, #VCPU_HCR_EL2]
- orr x2, x2, x1
- msr hcr_el2, x2
-
ldr x2, =(CPTR_EL2_TTA)
msr cptr_el2, x2
@@ -353,8 +348,6 @@
.endm
.macro deactivate_traps
- mov x2, #HCR_RW
- msr hcr_el2, x2
msr cptr_el2, xzr
msr hstr_el2, xzr
diff --git a/arch/arm64/kvm/vgic-v2-switch.S b/arch/arm64/kvm/vgic-v2-switch.S
index c5dc777..d36cd7a 100644
--- a/arch/arm64/kvm/vgic-v2-switch.S
+++ b/arch/arm64/kvm/vgic-v2-switch.S
@@ -85,6 +85,9 @@ CPU_BE( rev w5, w5 )
sub w4, w4, #1
cbnz w4, 1b
2:
+ mov x2, #HCR_RW
+ msr hcr_el2, x2
+ isb
.endm
/*
@@ -92,6 +95,11 @@ CPU_BE( rev w5, w5 )
* x0: Register pointing to VCPU struct
*/
.macro restore_vgic_v2_state
+ ldr x2, [x0, #VCPU_IRQ_LINES]
+ ldr x1, [x0, #VCPU_HCR_EL2]
+ orr x2, x2, x1
+ msr hcr_el2, x2
+
/* Get VGIC VCTRL base into x2 */
ldr x2, [x0, #VCPU_KVM]
kern_hyp_va x2
--
1.8.3.4
^ permalink raw reply related [flat|nested] 57+ messages in thread* [PATCH v3 17/19] arm64: KVM: move hcr_el2 setting into vgic-v2-switch.S
2014-04-16 13:39 ` [PATCH v3 17/19] arm64: KVM: move hcr_el2 setting into vgic-v2-switch.S Marc Zyngier
@ 2014-05-09 14:07 ` Christoffer Dall
2014-05-14 14:33 ` Marc Zyngier
0 siblings, 1 reply; 57+ messages in thread
From: Christoffer Dall @ 2014-05-09 14:07 UTC (permalink / raw)
To: linux-arm-kernel
On Wed, Apr 16, 2014 at 02:39:49PM +0100, Marc Zyngier wrote:
> GICv3 requires the hcr_el2 switch to be tightly coupled with some
> of the interrupt controller's register switch.
can you be more specific, this feels a bit odd, enabling Stage-2
translation and configuring all traps from within the vgic code...
>
> In order to have similar code paths, start moving the hcr_el2
> manipulation code to the GICv2 switch code.
>
> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
> arch/arm64/kvm/hyp.S | 7 -------
> arch/arm64/kvm/vgic-v2-switch.S | 8 ++++++++
> 2 files changed, 8 insertions(+), 7 deletions(-)
>
> diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
> index aed72d0..92b9120 100644
> --- a/arch/arm64/kvm/hyp.S
> +++ b/arch/arm64/kvm/hyp.S
> @@ -335,11 +335,6 @@
> .endm
>
> .macro activate_traps
> - ldr x2, [x0, #VCPU_IRQ_LINES]
> - ldr x1, [x0, #VCPU_HCR_EL2]
> - orr x2, x2, x1
> - msr hcr_el2, x2
> -
> ldr x2, =(CPTR_EL2_TTA)
> msr cptr_el2, x2
>
> @@ -353,8 +348,6 @@
> .endm
>
> .macro deactivate_traps
> - mov x2, #HCR_RW
> - msr hcr_el2, x2
> msr cptr_el2, xzr
> msr hstr_el2, xzr
>
> diff --git a/arch/arm64/kvm/vgic-v2-switch.S b/arch/arm64/kvm/vgic-v2-switch.S
> index c5dc777..d36cd7a 100644
> --- a/arch/arm64/kvm/vgic-v2-switch.S
> +++ b/arch/arm64/kvm/vgic-v2-switch.S
> @@ -85,6 +85,9 @@ CPU_BE( rev w5, w5 )
> sub w4, w4, #1
> cbnz w4, 1b
> 2:
> + mov x2, #HCR_RW
> + msr hcr_el2, x2
> + isb
> .endm
>
> /*
> @@ -92,6 +95,11 @@ CPU_BE( rev w5, w5 )
> * x0: Register pointing to VCPU struct
> */
> .macro restore_vgic_v2_state
> + ldr x2, [x0, #VCPU_IRQ_LINES]
will this ever have any values on aarch64? Don't we mandate vgic
support and bail out during hyp init if we cannot init a vgic?
> + ldr x1, [x0, #VCPU_HCR_EL2]
> + orr x2, x2, x1
> + msr hcr_el2, x2
> +
> /* Get VGIC VCTRL base into x2 */
> ldr x2, [x0, #VCPU_KVM]
> kern_hyp_va x2
> --
> 1.8.3.4
>
^ permalink raw reply [flat|nested] 57+ messages in thread
* [PATCH v3 17/19] arm64: KVM: move hcr_el2 setting into vgic-v2-switch.S
2014-05-09 14:07 ` Christoffer Dall
@ 2014-05-14 14:33 ` Marc Zyngier
2014-05-14 16:34 ` Christoffer Dall
0 siblings, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-05-14 14:33 UTC (permalink / raw)
To: linux-arm-kernel
On Fri, May 09 2014 at 3:07:23 pm BST, Christoffer Dall <christoffer.dall@linaro.org> wrote:
> On Wed, Apr 16, 2014 at 02:39:49PM +0100, Marc Zyngier wrote:
>> GICv3 requires the hcr_el2 switch to be tightly coupled with some
>> of the interrupt controller's register switch.
>
> can you be more specific, this feels a bit odd, enabling Stage-2
> translation and configuring all traps from within the vgic code...
The IMO and FMO bits must be set before restoring the various system
registers in GICv3. But I agreee that this looks pretty horrible.
The alternative is to split the bits we set in HCR_EL2 into two sets (VM
and trap control on one side, interrupt control on the other). This
would translate into two accesses to HCR_EL2, but it would look
nicer. I'll have a look.
>> In order to have similar code paths, start moving the hcr_el2
>> manipulation code to the GICv2 switch code.
>>
>> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>> ---
>> arch/arm64/kvm/hyp.S | 7 -------
>> arch/arm64/kvm/vgic-v2-switch.S | 8 ++++++++
>> 2 files changed, 8 insertions(+), 7 deletions(-)
>>
>> diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
>> index aed72d0..92b9120 100644
>> --- a/arch/arm64/kvm/hyp.S
>> +++ b/arch/arm64/kvm/hyp.S
>> @@ -335,11 +335,6 @@
>> .endm
>>
>> .macro activate_traps
>> - ldr x2, [x0, #VCPU_IRQ_LINES]
>> - ldr x1, [x0, #VCPU_HCR_EL2]
>> - orr x2, x2, x1
>> - msr hcr_el2, x2
>> -
>> ldr x2, =(CPTR_EL2_TTA)
>> msr cptr_el2, x2
>>
>> @@ -353,8 +348,6 @@
>> .endm
>>
>> .macro deactivate_traps
>> - mov x2, #HCR_RW
>> - msr hcr_el2, x2
>> msr cptr_el2, xzr
>> msr hstr_el2, xzr
>>
>> diff --git a/arch/arm64/kvm/vgic-v2-switch.S b/arch/arm64/kvm/vgic-v2-switch.S
>> index c5dc777..d36cd7a 100644
>> --- a/arch/arm64/kvm/vgic-v2-switch.S
>> +++ b/arch/arm64/kvm/vgic-v2-switch.S
>> @@ -85,6 +85,9 @@ CPU_BE( rev w5, w5 )
>> sub w4, w4, #1
>> cbnz w4, 1b
>> 2:
>> + mov x2, #HCR_RW
>> + msr hcr_el2, x2
>> + isb
>> .endm
>>
>> /*
>> @@ -92,6 +95,11 @@ CPU_BE( rev w5, w5 )
>> * x0: Register pointing to VCPU struct
>> */
>> .macro restore_vgic_v2_state
>> + ldr x2, [x0, #VCPU_IRQ_LINES]
>
> will this ever have any values on aarch64? Don't we mandate vgic
> support and bail out during hyp init if we cannot init a vgic?
Yes. But that doesn't mean we don't support the feature either. The case
is fairly slim, I agree, but it has been there since Day-1...
M.
--
Jazz is not dead. It just smells funny.
^ permalink raw reply [flat|nested] 57+ messages in thread
* [PATCH v3 17/19] arm64: KVM: move hcr_el2 setting into vgic-v2-switch.S
2014-05-14 14:33 ` Marc Zyngier
@ 2014-05-14 16:34 ` Christoffer Dall
2014-05-14 16:58 ` Marc Zyngier
0 siblings, 1 reply; 57+ messages in thread
From: Christoffer Dall @ 2014-05-14 16:34 UTC (permalink / raw)
To: linux-arm-kernel
On 14 May 2014 15:33, Marc Zyngier <marc.zyngier@arm.com> wrote:
> On Fri, May 09 2014 at 3:07:23 pm BST, Christoffer Dall <christoffer.dall@linaro.org> wrote:
>> On Wed, Apr 16, 2014 at 02:39:49PM +0100, Marc Zyngier wrote:
>>> GICv3 requires the hcr_el2 switch to be tightly coupled with some
>>> of the interrupt controller's register switch.
>>
>> can you be more specific, this feels a bit odd, enabling Stage-2
>> translation and configuring all traps from within the vgic code...
>
> The IMO and FMO bits must be set before restoring the various system
> registers in GICv3. But I agreee that this looks pretty horrible.
>
> The alternative is to split the bits we set in HCR_EL2 into two sets (VM
> and trap control on one side, interrupt control on the other). This
> would translate into two accesses to HCR_EL2, but it would look
> nicer. I'll have a look.
>
>>> In order to have similar code paths, start moving the hcr_el2
>>> manipulation code to the GICv2 switch code.
>>>
>>> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
>>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>>> ---
>>> arch/arm64/kvm/hyp.S | 7 -------
>>> arch/arm64/kvm/vgic-v2-switch.S | 8 ++++++++
>>> 2 files changed, 8 insertions(+), 7 deletions(-)
>>>
>>> diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
>>> index aed72d0..92b9120 100644
>>> --- a/arch/arm64/kvm/hyp.S
>>> +++ b/arch/arm64/kvm/hyp.S
>>> @@ -335,11 +335,6 @@
>>> .endm
>>>
>>> .macro activate_traps
>>> - ldr x2, [x0, #VCPU_IRQ_LINES]
>>> - ldr x1, [x0, #VCPU_HCR_EL2]
>>> - orr x2, x2, x1
>>> - msr hcr_el2, x2
>>> -
>>> ldr x2, =(CPTR_EL2_TTA)
>>> msr cptr_el2, x2
>>>
>>> @@ -353,8 +348,6 @@
>>> .endm
>>>
>>> .macro deactivate_traps
>>> - mov x2, #HCR_RW
>>> - msr hcr_el2, x2
>>> msr cptr_el2, xzr
>>> msr hstr_el2, xzr
>>>
>>> diff --git a/arch/arm64/kvm/vgic-v2-switch.S b/arch/arm64/kvm/vgic-v2-switch.S
>>> index c5dc777..d36cd7a 100644
>>> --- a/arch/arm64/kvm/vgic-v2-switch.S
>>> +++ b/arch/arm64/kvm/vgic-v2-switch.S
>>> @@ -85,6 +85,9 @@ CPU_BE( rev w5, w5 )
>>> sub w4, w4, #1
>>> cbnz w4, 1b
>>> 2:
>>> + mov x2, #HCR_RW
>>> + msr hcr_el2, x2
>>> + isb
>>> .endm
>>>
>>> /*
>>> @@ -92,6 +95,11 @@ CPU_BE( rev w5, w5 )
>>> * x0: Register pointing to VCPU struct
>>> */
>>> .macro restore_vgic_v2_state
>>> + ldr x2, [x0, #VCPU_IRQ_LINES]
>>
>> will this ever have any values on aarch64? Don't we mandate vgic
>> support and bail out during hyp init if we cannot init a vgic?
>
> Yes. But that doesn't mean we don't support the feature either. The case
> is fairly slim, I agree, but it has been there since Day-1...
>
See kvm_vm_ioctl_irq_line() in arch/arm/kvm/arm.c:
case KVM_ARM_IRQ_TYPE_CPU:
if (irqchip_in_kernel(kvm))
return -ENXIO;
-Christoffer
^ permalink raw reply [flat|nested] 57+ messages in thread* [PATCH v3 17/19] arm64: KVM: move hcr_el2 setting into vgic-v2-switch.S
2014-05-14 16:34 ` Christoffer Dall
@ 2014-05-14 16:58 ` Marc Zyngier
2014-05-15 12:20 ` Christoffer Dall
0 siblings, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-05-14 16:58 UTC (permalink / raw)
To: linux-arm-kernel
On 14/05/14 17:34, Christoffer Dall wrote:
> On 14 May 2014 15:33, Marc Zyngier <marc.zyngier@arm.com> wrote:
>> On Fri, May 09 2014 at 3:07:23 pm BST, Christoffer Dall <christoffer.dall@linaro.org> wrote:
>>> On Wed, Apr 16, 2014 at 02:39:49PM +0100, Marc Zyngier wrote:
>>>> GICv3 requires the hcr_el2 switch to be tightly coupled with some
>>>> of the interrupt controller's register switch.
>>>
>>> can you be more specific, this feels a bit odd, enabling Stage-2
>>> translation and configuring all traps from within the vgic code...
>>
>> The IMO and FMO bits must be set before restoring the various system
>> registers in GICv3. But I agreee that this looks pretty horrible.
>>
>> The alternative is to split the bits we set in HCR_EL2 into two sets (VM
>> and trap control on one side, interrupt control on the other). This
>> would translate into two accesses to HCR_EL2, but it would look
>> nicer. I'll have a look.
>>
>>>> In order to have similar code paths, start moving the hcr_el2
>>>> manipulation code to the GICv2 switch code.
>>>>
>>>> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
>>>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>>>> ---
>>>> arch/arm64/kvm/hyp.S | 7 -------
>>>> arch/arm64/kvm/vgic-v2-switch.S | 8 ++++++++
>>>> 2 files changed, 8 insertions(+), 7 deletions(-)
>>>>
>>>> diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
>>>> index aed72d0..92b9120 100644
>>>> --- a/arch/arm64/kvm/hyp.S
>>>> +++ b/arch/arm64/kvm/hyp.S
>>>> @@ -335,11 +335,6 @@
>>>> .endm
>>>>
>>>> .macro activate_traps
>>>> - ldr x2, [x0, #VCPU_IRQ_LINES]
>>>> - ldr x1, [x0, #VCPU_HCR_EL2]
>>>> - orr x2, x2, x1
>>>> - msr hcr_el2, x2
>>>> -
>>>> ldr x2, =(CPTR_EL2_TTA)
>>>> msr cptr_el2, x2
>>>>
>>>> @@ -353,8 +348,6 @@
>>>> .endm
>>>>
>>>> .macro deactivate_traps
>>>> - mov x2, #HCR_RW
>>>> - msr hcr_el2, x2
>>>> msr cptr_el2, xzr
>>>> msr hstr_el2, xzr
>>>>
>>>> diff --git a/arch/arm64/kvm/vgic-v2-switch.S b/arch/arm64/kvm/vgic-v2-switch.S
>>>> index c5dc777..d36cd7a 100644
>>>> --- a/arch/arm64/kvm/vgic-v2-switch.S
>>>> +++ b/arch/arm64/kvm/vgic-v2-switch.S
>>>> @@ -85,6 +85,9 @@ CPU_BE( rev w5, w5 )
>>>> sub w4, w4, #1
>>>> cbnz w4, 1b
>>>> 2:
>>>> + mov x2, #HCR_RW
>>>> + msr hcr_el2, x2
>>>> + isb
>>>> .endm
>>>>
>>>> /*
>>>> @@ -92,6 +95,11 @@ CPU_BE( rev w5, w5 )
>>>> * x0: Register pointing to VCPU struct
>>>> */
>>>> .macro restore_vgic_v2_state
>>>> + ldr x2, [x0, #VCPU_IRQ_LINES]
>>>
>>> will this ever have any values on aarch64? Don't we mandate vgic
>>> support and bail out during hyp init if we cannot init a vgic?
>>
>> Yes. But that doesn't mean we don't support the feature either. The case
>> is fairly slim, I agree, but it has been there since Day-1...
>>
> See kvm_vm_ioctl_irq_line() in arch/arm/kvm/arm.c:
>
> case KVM_ARM_IRQ_TYPE_CPU:
> if (irqchip_in_kernel(kvm))
> return -ENXIO;
Unfortunately, this only checks if the VM has a vgic instantiated. It is
always possible to create a VM without the in-kernel GIC, and use the
pins to inject IRQs. As I said, unlikely to happen, but nonetheless...
M.
--
Jazz is not dead. It just smells funny...
^ permalink raw reply [flat|nested] 57+ messages in thread
* [PATCH v3 17/19] arm64: KVM: move hcr_el2 setting into vgic-v2-switch.S
2014-05-14 16:58 ` Marc Zyngier
@ 2014-05-15 12:20 ` Christoffer Dall
0 siblings, 0 replies; 57+ messages in thread
From: Christoffer Dall @ 2014-05-15 12:20 UTC (permalink / raw)
To: linux-arm-kernel
On Wed, May 14, 2014 at 05:58:04PM +0100, Marc Zyngier wrote:
> On 14/05/14 17:34, Christoffer Dall wrote:
> > On 14 May 2014 15:33, Marc Zyngier <marc.zyngier@arm.com> wrote:
> >> On Fri, May 09 2014 at 3:07:23 pm BST, Christoffer Dall <christoffer.dall@linaro.org> wrote:
> >>> On Wed, Apr 16, 2014 at 02:39:49PM +0100, Marc Zyngier wrote:
> >>>> GICv3 requires the hcr_el2 switch to be tightly coupled with some
> >>>> of the interrupt controller's register switch.
> >>>
> >>> can you be more specific, this feels a bit odd, enabling Stage-2
> >>> translation and configuring all traps from within the vgic code...
> >>
> >> The IMO and FMO bits must be set before restoring the various system
> >> registers in GICv3. But I agreee that this looks pretty horrible.
> >>
> >> The alternative is to split the bits we set in HCR_EL2 into two sets (VM
> >> and trap control on one side, interrupt control on the other). This
> >> would translate into two accesses to HCR_EL2, but it would look
> >> nicer. I'll have a look.
> >>
> >>>> In order to have similar code paths, start moving the hcr_el2
> >>>> manipulation code to the GICv2 switch code.
> >>>>
> >>>> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
> >>>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> >>>> ---
> >>>> arch/arm64/kvm/hyp.S | 7 -------
> >>>> arch/arm64/kvm/vgic-v2-switch.S | 8 ++++++++
> >>>> 2 files changed, 8 insertions(+), 7 deletions(-)
> >>>>
> >>>> diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
> >>>> index aed72d0..92b9120 100644
> >>>> --- a/arch/arm64/kvm/hyp.S
> >>>> +++ b/arch/arm64/kvm/hyp.S
> >>>> @@ -335,11 +335,6 @@
> >>>> .endm
> >>>>
> >>>> .macro activate_traps
> >>>> - ldr x2, [x0, #VCPU_IRQ_LINES]
> >>>> - ldr x1, [x0, #VCPU_HCR_EL2]
> >>>> - orr x2, x2, x1
> >>>> - msr hcr_el2, x2
> >>>> -
> >>>> ldr x2, =(CPTR_EL2_TTA)
> >>>> msr cptr_el2, x2
> >>>>
> >>>> @@ -353,8 +348,6 @@
> >>>> .endm
> >>>>
> >>>> .macro deactivate_traps
> >>>> - mov x2, #HCR_RW
> >>>> - msr hcr_el2, x2
> >>>> msr cptr_el2, xzr
> >>>> msr hstr_el2, xzr
> >>>>
> >>>> diff --git a/arch/arm64/kvm/vgic-v2-switch.S b/arch/arm64/kvm/vgic-v2-switch.S
> >>>> index c5dc777..d36cd7a 100644
> >>>> --- a/arch/arm64/kvm/vgic-v2-switch.S
> >>>> +++ b/arch/arm64/kvm/vgic-v2-switch.S
> >>>> @@ -85,6 +85,9 @@ CPU_BE( rev w5, w5 )
> >>>> sub w4, w4, #1
> >>>> cbnz w4, 1b
> >>>> 2:
> >>>> + mov x2, #HCR_RW
> >>>> + msr hcr_el2, x2
> >>>> + isb
> >>>> .endm
> >>>>
> >>>> /*
> >>>> @@ -92,6 +95,11 @@ CPU_BE( rev w5, w5 )
> >>>> * x0: Register pointing to VCPU struct
> >>>> */
> >>>> .macro restore_vgic_v2_state
> >>>> + ldr x2, [x0, #VCPU_IRQ_LINES]
> >>>
> >>> will this ever have any values on aarch64? Don't we mandate vgic
> >>> support and bail out during hyp init if we cannot init a vgic?
> >>
> >> Yes. But that doesn't mean we don't support the feature either. The case
> >> is fairly slim, I agree, but it has been there since Day-1...
> >>
> > See kvm_vm_ioctl_irq_line() in arch/arm/kvm/arm.c:
> >
> > case KVM_ARM_IRQ_TYPE_CPU:
> > if (irqchip_in_kernel(kvm))
> > return -ENXIO;
>
> Unfortunately, this only checks if the VM has a vgic instantiated. It is
> always possible to create a VM without the in-kernel GIC, and use the
> pins to inject IRQs. As I said, unlikely to happen, but nonetheless...
>
Yeah, you're right, I'm an idiot. Sorry for the noise.
Thanks,
-Christoffer
^ permalink raw reply [flat|nested] 57+ messages in thread
* [PATCH v3 18/19] KVM: ARM: vgic: add the GICv3 backend
2014-04-16 13:39 [PATCH v3 00/19] arm64: GICv3 support Marc Zyngier
` (16 preceding siblings ...)
2014-04-16 13:39 ` [PATCH v3 17/19] arm64: KVM: move hcr_el2 setting into vgic-v2-switch.S Marc Zyngier
@ 2014-04-16 13:39 ` Marc Zyngier
2014-05-09 14:07 ` Christoffer Dall
2014-04-16 13:39 ` [PATCH v3 19/19] arm64: KVM: vgic: add GICv3 world switch Marc Zyngier
18 siblings, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-04-16 13:39 UTC (permalink / raw)
To: linux-arm-kernel
Introduce the support code for emulating a GICv2 on top of GICv3
hardware.
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
include/kvm/arm_vgic.h | 26 ++++++
virt/kvm/arm/vgic-v3.c | 220 +++++++++++++++++++++++++++++++++++++++++++++++++
virt/kvm/arm/vgic.c | 2 +
3 files changed, 248 insertions(+)
create mode 100644 virt/kvm/arm/vgic-v3.c
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index c47dee5..6119a5a 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -32,6 +32,7 @@
#define VGIC_NR_SHARED_IRQS (VGIC_NR_IRQS - VGIC_NR_PRIVATE_IRQS)
#define VGIC_MAX_CPUS KVM_MAX_VCPUS
#define VGIC_MAX_LRS (1 << 6)
+#define VGIC_V3_MAX_LRS 16
/* Sanity checks... */
#if (VGIC_MAX_CPUS > 8)
@@ -71,6 +72,7 @@ struct kvm_vcpu;
enum vgic_type {
VGIC_V2, /* Good ol' GICv2 */
+ VGIC_V3, /* v2 on v3, really */
};
#define LR_STATE_PENDING (1 << 0)
@@ -169,6 +171,19 @@ struct vgic_v2_cpu_if {
u32 vgic_lr[VGIC_MAX_LRS];
};
+struct vgic_v3_cpu_if {
+#ifdef CONFIG_ARM_GIC_V3
+ u32 vgic_hcr;
+ u32 vgic_vmcr;
+ u32 vgic_misr; /* Saved only */
+ u32 vgic_eisr; /* Saved only */
+ u32 vgic_elrsr; /* Saved only */
+ u32 vgic_ap0r[4];
+ u32 vgic_ap1r[4];
+ u64 vgic_lr[VGIC_V3_MAX_LRS];
+#endif
+};
+
struct vgic_cpu {
#ifdef CONFIG_KVM_ARM_VGIC
/* per IRQ to LR mapping */
@@ -187,6 +202,7 @@ struct vgic_cpu {
/* CPU vif control registers for world switch */
union {
struct vgic_v2_cpu_if vgic_v2;
+ struct vgic_v3_cpu_if vgic_v3;
};
#endif
};
@@ -220,6 +236,16 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
int vgic_v2_probe(const struct vgic_ops **ops,
const struct vgic_params **params);
+#ifdef CONFIG_ARM_GIC_V3
+int vgic_v3_probe(const struct vgic_ops **ops,
+ const struct vgic_params **params);
+#else
+static inline int vgic_v3_probe(const struct vgic_ops **ops,
+ const struct vgic_params **params)
+{
+ return -ENODEV;
+}
+#endif
#else
static inline int kvm_vgic_hyp_init(void)
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
new file mode 100644
index 0000000..a804a73
--- /dev/null
+++ b/virt/kvm/arm/vgic-v3.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2013 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/cpu.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <linux/irqchip/arm-gic-v3.h>
+
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+
+/* These are for GICv2 emulation only */
+#define GICH_LR_VIRTUALID (0x3ffUL << 0)
+#define GICH_LR_PHYSID_CPUID_SHIFT (10)
+#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
+
+static u32 ich_vtr_el2;
+
+static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
+{
+ struct vgic_lr lr_desc;
+ u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr];
+
+ lr_desc.irq = val & GICH_LR_VIRTUALID;
+ lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0xff;
+ lr_desc.state = 0;
+
+ if (val & GICH_LR_PENDING_BIT)
+ lr_desc.state |= LR_STATE_PENDING;
+ if (val & GICH_LR_ACTIVE_BIT)
+ lr_desc.state |= LR_STATE_ACTIVE;
+ if (val & GICH_LR_EOI)
+ lr_desc.state |= LR_EOI_INT;
+
+ return lr_desc;
+}
+
+#define MK_LR_PEND(src, irq) \
+ (GICH_LR_PENDING_BIT | \
+ (((u32)(src)) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq))
+
+static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
+ struct vgic_lr lr_desc)
+{
+ u64 lr_val = MK_LR_PEND(lr_desc.source, lr_desc.irq);
+
+ if (lr_desc.state & LR_STATE_PENDING)
+ lr_val |= GICH_LR_PENDING_BIT;
+ if (lr_desc.state & LR_STATE_ACTIVE)
+ lr_val |= GICH_LR_ACTIVE_BIT;
+ if (lr_desc.state & LR_EOI_INT)
+ lr_val |= GICH_LR_EOI;
+
+ vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = lr_val;
+
+ /*
+ * Despite being EOIed, the LR may not have been marked as
+ * empty.
+ */
+ if (!(lr_val & GICH_LR_STATE))
+ vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
+}
+
+static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr;
+}
+
+static u64 vgic_v3_get_eisr(const struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr;
+}
+
+static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu)
+{
+ u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr;
+ u32 ret = 0;
+
+ if (misr & GICH_MISR_EOI)
+ ret |= INT_STATUS_EOI;
+ if (misr & GICH_MISR_U)
+ ret |= INT_STATUS_UNDERFLOW;
+
+ return ret;
+}
+
+static void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
+{
+ u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr;
+
+ vmcrp->ctlr = (vmcr & GICH_VMCR_CTLR_MASK) >> GICH_VMCR_CTLR_SHIFT;
+ vmcrp->abpr = (vmcr & GICH_VMCR_BPR1_MASK) >> GICH_VMCR_BPR1_SHIFT;
+ vmcrp->bpr = (vmcr & GICH_VMCR_BPR0_MASK) >> GICH_VMCR_BPR0_SHIFT;
+ vmcrp->pmr = (vmcr & GICH_VMCR_PMR_MASK) >> GICH_VMCR_PMR_SHIFT;
+}
+
+static void vgic_v3_clear_underflow(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr &= ~GICH_HCR_UIE;
+}
+
+static void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
+{
+ u32 vmcr;
+
+ vmcr = (vmcrp->ctlr << GICH_VMCR_CTLR_SHIFT) & GICH_VMCR_CTLR_MASK;
+ vmcr |= (vmcrp->abpr << GICH_VMCR_BPR1_SHIFT) & GICH_VMCR_BPR1_MASK;
+ vmcr |= (vmcrp->bpr << GICH_VMCR_BPR0_SHIFT) & GICH_VMCR_BPR0_MASK;
+ vmcr |= (vmcrp->pmr << GICH_VMCR_PMR_SHIFT) & GICH_VMCR_PMR_MASK;
+
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
+}
+
+static void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr |= GICH_HCR_UIE;
+}
+
+static void vgic_v3_enable(struct kvm_vcpu *vcpu)
+{
+ /*
+ * By forcing VMCR to zero, the GIC will restore the binary
+ * points to their reset values. Anything else resets to zero
+ * anyway.
+ */
+ vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = 0;
+
+ /* Get the show on the road... */
+ vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr = GICH_HCR_EN;
+}
+
+static const struct vgic_ops vgic_v3_ops = {
+ .get_lr = vgic_v3_get_lr,
+ .set_lr = vgic_v3_set_lr,
+ .get_elrsr = vgic_v3_get_elrsr,
+ .get_eisr = vgic_v3_get_eisr,
+ .get_interrupt_status = vgic_v3_get_interrupt_status,
+ .set_underflow = vgic_v3_set_underflow,
+ .clear_underflow = vgic_v3_clear_underflow,
+ .get_vmcr = vgic_v3_get_vmcr,
+ .set_vmcr = vgic_v3_set_vmcr,
+ .enable = vgic_v3_enable,
+};
+
+static struct vgic_params vgic_v3_params;
+
+int vgic_v3_probe(const struct vgic_ops **ops,
+ const struct vgic_params **params)
+{
+ int ret = 0;
+ u32 gicv_idx;
+ struct resource vcpu_res;
+ struct device_node *vgic_node;
+ struct vgic_params *vgic = &vgic_v3_params;
+
+ vgic_node = of_find_compatible_node(NULL, NULL, "arm,gic-v3");
+ if (!vgic_node) {
+ kvm_err("error: no compatible GICv3 node in DT\n");
+ return -ENODEV;
+ }
+
+ vgic->maint_irq = irq_of_parse_and_map(vgic_node, 0);
+ if (!vgic->maint_irq) {
+ kvm_err("error getting vgic maintenance irq from DT\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2);
+
+ /*
+ * The ListRegs field is 5 bits, but there is a architectural
+ * maximum of 16 list registers. Just ignore bit 4...
+ */
+ vgic->nr_lr = (ich_vtr_el2 & 0xf) + 1;
+
+ if (of_property_read_u32(vgic_node, "#redistributor-regions", &gicv_idx))
+ gicv_idx = 1;
+
+ gicv_idx += 3; /* Also skip GICD, GICC, GICH */
+ if (of_address_to_resource(vgic_node, gicv_idx, &vcpu_res)) {
+ kvm_err("Cannot obtain GICV region\n");
+ ret = -ENXIO;
+ goto out;
+ }
+ vgic->vcpu_base = vcpu_res.start;
+ vgic->vctrl_base = (void *)(-1);
+ vgic->type = VGIC_V3;
+
+ kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
+ vcpu_res.start, vgic->maint_irq);
+
+ *ops = &vgic_v3_ops;
+ *params = vgic;
+
+out:
+ of_node_put(vgic_node);
+ return ret;
+}
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 8365189..f29761b 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1514,6 +1514,8 @@ int kvm_vgic_hyp_init(void)
ret = vgic_v2_probe(&vgic_ops, &vgic);
if (ret)
+ ret = vgic_v3_probe(&vgic_ops, &vgic);
+ if (ret)
return ret;
ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
--
1.8.3.4
^ permalink raw reply related [flat|nested] 57+ messages in thread* [PATCH v3 18/19] KVM: ARM: vgic: add the GICv3 backend
2014-04-16 13:39 ` [PATCH v3 18/19] KVM: ARM: vgic: add the GICv3 backend Marc Zyngier
@ 2014-05-09 14:07 ` Christoffer Dall
2014-05-14 17:47 ` Marc Zyngier
2014-05-15 8:13 ` Marc Zyngier
0 siblings, 2 replies; 57+ messages in thread
From: Christoffer Dall @ 2014-05-09 14:07 UTC (permalink / raw)
To: linux-arm-kernel
On Wed, Apr 16, 2014 at 02:39:50PM +0100, Marc Zyngier wrote:
> Introduce the support code for emulating a GICv2 on top of GICv3
> hardware.
>
> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
> include/kvm/arm_vgic.h | 26 ++++++
> virt/kvm/arm/vgic-v3.c | 220 +++++++++++++++++++++++++++++++++++++++++++++++++
> virt/kvm/arm/vgic.c | 2 +
> 3 files changed, 248 insertions(+)
> create mode 100644 virt/kvm/arm/vgic-v3.c
>
> diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
> index c47dee5..6119a5a 100644
> --- a/include/kvm/arm_vgic.h
> +++ b/include/kvm/arm_vgic.h
> @@ -32,6 +32,7 @@
> #define VGIC_NR_SHARED_IRQS (VGIC_NR_IRQS - VGIC_NR_PRIVATE_IRQS)
> #define VGIC_MAX_CPUS KVM_MAX_VCPUS
> #define VGIC_MAX_LRS (1 << 6)
shouldn't these have been renamed to VGIC_V2_MAX_LRS etc.?
> +#define VGIC_V3_MAX_LRS 16
>
> /* Sanity checks... */
> #if (VGIC_MAX_CPUS > 8)
> @@ -71,6 +72,7 @@ struct kvm_vcpu;
>
> enum vgic_type {
> VGIC_V2, /* Good ol' GICv2 */
> + VGIC_V3, /* v2 on v3, really */
comment maybe a bit too misleading, this is about the hardware, not what
we emulate.
> };
>
> #define LR_STATE_PENDING (1 << 0)
> @@ -169,6 +171,19 @@ struct vgic_v2_cpu_if {
> u32 vgic_lr[VGIC_MAX_LRS];
> };
>
> +struct vgic_v3_cpu_if {
> +#ifdef CONFIG_ARM_GIC_V3
> + u32 vgic_hcr;
> + u32 vgic_vmcr;
> + u32 vgic_misr; /* Saved only */
> + u32 vgic_eisr; /* Saved only */
> + u32 vgic_elrsr; /* Saved only */
> + u32 vgic_ap0r[4];
> + u32 vgic_ap1r[4];
> + u64 vgic_lr[VGIC_V3_MAX_LRS];
> +#endif
> +};
> +
> struct vgic_cpu {
> #ifdef CONFIG_KVM_ARM_VGIC
> /* per IRQ to LR mapping */
> @@ -187,6 +202,7 @@ struct vgic_cpu {
> /* CPU vif control registers for world switch */
> union {
> struct vgic_v2_cpu_if vgic_v2;
> + struct vgic_v3_cpu_if vgic_v3;
> };
> #endif
> };
> @@ -220,6 +236,16 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
>
> int vgic_v2_probe(const struct vgic_ops **ops,
> const struct vgic_params **params);
> +#ifdef CONFIG_ARM_GIC_V3
> +int vgic_v3_probe(const struct vgic_ops **ops,
> + const struct vgic_params **params);
> +#else
> +static inline int vgic_v3_probe(const struct vgic_ops **ops,
> + const struct vgic_params **params)
> +{
> + return -ENODEV;
> +}
> +#endif
>
> #else
> static inline int kvm_vgic_hyp_init(void)
> diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
> new file mode 100644
> index 0000000..a804a73
> --- /dev/null
> +++ b/virt/kvm/arm/vgic-v3.c
> @@ -0,0 +1,220 @@
> +/*
> + * Copyright (C) 2013 ARM Limited, All Rights Reserved.
> + * Author: Marc Zyngier <marc.zyngier@arm.com>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program. If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#include <linux/cpu.h>
> +#include <linux/kvm.h>
> +#include <linux/kvm_host.h>
> +#include <linux/interrupt.h>
> +#include <linux/io.h>
> +#include <linux/of.h>
> +#include <linux/of_address.h>
> +#include <linux/of_irq.h>
> +
> +#include <linux/irqchip/arm-gic-v3.h>
> +
> +#include <asm/kvm_emulate.h>
> +#include <asm/kvm_arm.h>
> +#include <asm/kvm_mmu.h>
> +
> +/* These are for GICv2 emulation only */
Is this really true, seems like you're using them to form the lr values
for the hardware below.
> +#define GICH_LR_VIRTUALID (0x3ffUL << 0)
> +#define GICH_LR_PHYSID_CPUID_SHIFT (10)
> +#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
can't we include them from the existing header file then?
> +
> +static u32 ich_vtr_el2;
> +
> +static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
> +{
> + struct vgic_lr lr_desc;
> + u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr];
> +
> + lr_desc.irq = val & GICH_LR_VIRTUALID;
> + lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0xff;
isn't this mask only for bits [12:10] which would make it 0x7 ?
> + lr_desc.state = 0;
> +
> + if (val & GICH_LR_PENDING_BIT)
> + lr_desc.state |= LR_STATE_PENDING;
> + if (val & GICH_LR_ACTIVE_BIT)
> + lr_desc.state |= LR_STATE_ACTIVE;
> + if (val & GICH_LR_EOI)
> + lr_desc.state |= LR_EOI_INT;
> +
> + return lr_desc;
> +}
> +
> +#define MK_LR_PEND(src, irq) \
> + (GICH_LR_PENDING_BIT | \
> + (((u32)(src)) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq))
> +
> +static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
> + struct vgic_lr lr_desc)
> +{
> + u64 lr_val = MK_LR_PEND(lr_desc.source, lr_desc.irq);
> +
> + if (lr_desc.state & LR_STATE_PENDING)
> + lr_val |= GICH_LR_PENDING_BIT;
> + if (lr_desc.state & LR_STATE_ACTIVE)
> + lr_val |= GICH_LR_ACTIVE_BIT;
> + if (lr_desc.state & LR_EOI_INT)
> + lr_val |= GICH_LR_EOI;
> +
> + vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = lr_val;
> +
> + /*
> + * Despite being EOIed, the LR may not have been marked as
> + * empty.
> + */
> + if (!(lr_val & GICH_LR_STATE))
> + vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
> +}
these funcitons are _identical_ to those in vgic_v2. Seems like they
should share the code when emulating GICv2.
> +
> +static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu)
> +{
> + return vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr;
> +}
> +
> +static u64 vgic_v3_get_eisr(const struct kvm_vcpu *vcpu)
> +{
> + return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr;
> +}
> +
> +static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu)
> +{
> + u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr;
> + u32 ret = 0;
> +
> + if (misr & GICH_MISR_EOI)
> + ret |= INT_STATUS_EOI;
> + if (misr & GICH_MISR_U)
> + ret |= INT_STATUS_UNDERFLOW;
> +
> + return ret;
> +}
> +
> +static void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
> +{
> + u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr;
vgic_v3?
> +
> + vmcrp->ctlr = (vmcr & GICH_VMCR_CTLR_MASK) >> GICH_VMCR_CTLR_SHIFT;
> + vmcrp->abpr = (vmcr & GICH_VMCR_BPR1_MASK) >> GICH_VMCR_BPR1_SHIFT;
> + vmcrp->bpr = (vmcr & GICH_VMCR_BPR0_MASK) >> GICH_VMCR_BPR0_SHIFT;
> + vmcrp->pmr = (vmcr & GICH_VMCR_PMR_MASK) >> GICH_VMCR_PMR_SHIFT;
> +}
> +
> +static void vgic_v3_clear_underflow(struct kvm_vcpu *vcpu)
> +{
> + vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr &= ~GICH_HCR_UIE;
> +}
> +
> +static void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
> +{
> + u32 vmcr;
> +
> + vmcr = (vmcrp->ctlr << GICH_VMCR_CTLR_SHIFT) & GICH_VMCR_CTLR_MASK;
> + vmcr |= (vmcrp->abpr << GICH_VMCR_BPR1_SHIFT) & GICH_VMCR_BPR1_MASK;
> + vmcr |= (vmcrp->bpr << GICH_VMCR_BPR0_SHIFT) & GICH_VMCR_BPR0_MASK;
> + vmcr |= (vmcrp->pmr << GICH_VMCR_PMR_SHIFT) & GICH_VMCR_PMR_MASK;
> +
> + vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
vgic_v3?
> +}
> +
> +static void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
> +{
> + vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr |= GICH_HCR_UIE;
> +}
can you group set/clear underflow please?
> +
> +static void vgic_v3_enable(struct kvm_vcpu *vcpu)
> +{
> + /*
> + * By forcing VMCR to zero, the GIC will restore the binary
> + * points to their reset values. Anything else resets to zero
> + * anyway.
> + */
> + vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = 0;
> +
> + /* Get the show on the road... */
> + vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr = GICH_HCR_EN;
> +}
> +
> +static const struct vgic_ops vgic_v3_ops = {
> + .get_lr = vgic_v3_get_lr,
> + .set_lr = vgic_v3_set_lr,
> + .get_elrsr = vgic_v3_get_elrsr,
> + .get_eisr = vgic_v3_get_eisr,
> + .get_interrupt_status = vgic_v3_get_interrupt_status,
> + .set_underflow = vgic_v3_set_underflow,
> + .clear_underflow = vgic_v3_clear_underflow,
> + .get_vmcr = vgic_v3_get_vmcr,
> + .set_vmcr = vgic_v3_set_vmcr,
> + .enable = vgic_v3_enable,
> +};
> +
> +static struct vgic_params vgic_v3_params;
> +
> +int vgic_v3_probe(const struct vgic_ops **ops,
> + const struct vgic_params **params)
> +{
> + int ret = 0;
> + u32 gicv_idx;
> + struct resource vcpu_res;
> + struct device_node *vgic_node;
> + struct vgic_params *vgic = &vgic_v3_params;
> +
> + vgic_node = of_find_compatible_node(NULL, NULL, "arm,gic-v3");
> + if (!vgic_node) {
> + kvm_err("error: no compatible GICv3 node in DT\n");
> + return -ENODEV;
> + }
> +
> + vgic->maint_irq = irq_of_parse_and_map(vgic_node, 0);
> + if (!vgic->maint_irq) {
> + kvm_err("error getting vgic maintenance irq from DT\n");
> + ret = -ENXIO;
> + goto out;
> + }
> +
> + ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2);
I'm just going to assume this actually returns ich_vtr_el2 for the rest
of reviewing this patch and hope that I get to see this function in the
next patch ;)
> +
> + /*
> + * The ListRegs field is 5 bits, but there is a architectural
> + * maximum of 16 list registers. Just ignore bit 4...
> + */
> + vgic->nr_lr = (ich_vtr_el2 & 0xf) + 1;
> +
> + if (of_property_read_u32(vgic_node, "#redistributor-regions", &gicv_idx))
> + gicv_idx = 1;
> +
> + gicv_idx += 3; /* Also skip GICD, GICC, GICH */
> + if (of_address_to_resource(vgic_node, gicv_idx, &vcpu_res)) {
> + kvm_err("Cannot obtain GICV region\n");
> + ret = -ENXIO;
> + goto out;
> + }
> + vgic->vcpu_base = vcpu_res.start;
> + vgic->vctrl_base = (void *)(-1);
this indicates to me that all mentioning of vctrl_base should be local
to the vgic_v2 file?
> + vgic->type = VGIC_V3;
> +
> + kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
> + vcpu_res.start, vgic->maint_irq);
> +
> + *ops = &vgic_v3_ops;
> + *params = vgic;
> +
> +out:
> + of_node_put(vgic_node);
> + return ret;
> +}
> diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
> index 8365189..f29761b 100644
> --- a/virt/kvm/arm/vgic.c
> +++ b/virt/kvm/arm/vgic.c
> @@ -1514,6 +1514,8 @@ int kvm_vgic_hyp_init(void)
>
> ret = vgic_v2_probe(&vgic_ops, &vgic);
> if (ret)
> + ret = vgic_v3_probe(&vgic_ops, &vgic);
this doesn't compile for me, missing Makefile include but even when
adding that, then other stuff breaks and the config option can actually
be set here... :(
> + if (ret)
> return ret;
>
> ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
> --
> 1.8.3.4
>
Please fix the bisectability of this entire series.
I have reviewed the actual functional logic of this patch and have not
found any issues.
-Christoffer
^ permalink raw reply [flat|nested] 57+ messages in thread* [PATCH v3 18/19] KVM: ARM: vgic: add the GICv3 backend
2014-05-09 14:07 ` Christoffer Dall
@ 2014-05-14 17:47 ` Marc Zyngier
2014-05-15 8:13 ` Marc Zyngier
1 sibling, 0 replies; 57+ messages in thread
From: Marc Zyngier @ 2014-05-14 17:47 UTC (permalink / raw)
To: linux-arm-kernel
On Fri, May 09 2014 at 3:07:31 pm BST, Christoffer Dall <christoffer.dall@linaro.org> wrote:
> On Wed, Apr 16, 2014 at 02:39:50PM +0100, Marc Zyngier wrote:
>> Introduce the support code for emulating a GICv2 on top of GICv3
>> hardware.
>>
>> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>> ---
>> include/kvm/arm_vgic.h | 26 ++++++
>> virt/kvm/arm/vgic-v3.c | 220 +++++++++++++++++++++++++++++++++++++++++++++++++
>> virt/kvm/arm/vgic.c | 2 +
>> 3 files changed, 248 insertions(+)
>> create mode 100644 virt/kvm/arm/vgic-v3.c
>>
>> diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
>> index c47dee5..6119a5a 100644
>> --- a/include/kvm/arm_vgic.h
>> +++ b/include/kvm/arm_vgic.h
>> @@ -32,6 +32,7 @@
>> #define VGIC_NR_SHARED_IRQS (VGIC_NR_IRQS - VGIC_NR_PRIVATE_IRQS)
>> #define VGIC_MAX_CPUS KVM_MAX_VCPUS
>> #define VGIC_MAX_LRS (1 << 6)
>
> shouldn't these have been renamed to VGIC_V2_MAX_LRS etc.?
Yup.
>> +#define VGIC_V3_MAX_LRS 16
>>
>> /* Sanity checks... */
>> #if (VGIC_MAX_CPUS > 8)
>> @@ -71,6 +72,7 @@ struct kvm_vcpu;
>>
>> enum vgic_type {
>> VGIC_V2, /* Good ol' GICv2 */
>> + VGIC_V3, /* v2 on v3, really */
>
> comment maybe a bit too misleading, this is about the hardware, not what
> we emulate.
Indeed.
>> };
>>
>> #define LR_STATE_PENDING (1 << 0)
>> @@ -169,6 +171,19 @@ struct vgic_v2_cpu_if {
>> u32 vgic_lr[VGIC_MAX_LRS];
>> };
>>
>> +struct vgic_v3_cpu_if {
>> +#ifdef CONFIG_ARM_GIC_V3
>> + u32 vgic_hcr;
>> + u32 vgic_vmcr;
>> + u32 vgic_misr; /* Saved only */
>> + u32 vgic_eisr; /* Saved only */
>> + u32 vgic_elrsr; /* Saved only */
>> + u32 vgic_ap0r[4];
>> + u32 vgic_ap1r[4];
>> + u64 vgic_lr[VGIC_V3_MAX_LRS];
>> +#endif
>> +};
>> +
>> struct vgic_cpu {
>> #ifdef CONFIG_KVM_ARM_VGIC
>> /* per IRQ to LR mapping */
>> @@ -187,6 +202,7 @@ struct vgic_cpu {
>> /* CPU vif control registers for world switch */
>> union {
>> struct vgic_v2_cpu_if vgic_v2;
>> + struct vgic_v3_cpu_if vgic_v3;
>> };
>> #endif
>> };
>> @@ -220,6 +236,16 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
>>
>> int vgic_v2_probe(const struct vgic_ops **ops,
>> const struct vgic_params **params);
>> +#ifdef CONFIG_ARM_GIC_V3
>> +int vgic_v3_probe(const struct vgic_ops **ops,
>> + const struct vgic_params **params);
>> +#else
>> +static inline int vgic_v3_probe(const struct vgic_ops **ops,
>> + const struct vgic_params **params)
>> +{
>> + return -ENODEV;
>> +}
>> +#endif
>>
>> #else
>> static inline int kvm_vgic_hyp_init(void)
>> diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
>> new file mode 100644
>> index 0000000..a804a73
>> --- /dev/null
>> +++ b/virt/kvm/arm/vgic-v3.c
>> @@ -0,0 +1,220 @@
>> +/*
>> + * Copyright (C) 2013 ARM Limited, All Rights Reserved.
>> + * Author: Marc Zyngier <marc.zyngier@arm.com>
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License version 2 as
>> + * published by the Free Software Foundation.
>> + *
>> + * This program is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
>> + * GNU General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU General Public License
>> + * along with this program. If not, see <http://www.gnu.org/licenses/>.
>> + */
>> +
>> +#include <linux/cpu.h>
>> +#include <linux/kvm.h>
>> +#include <linux/kvm_host.h>
>> +#include <linux/interrupt.h>
>> +#include <linux/io.h>
>> +#include <linux/of.h>
>> +#include <linux/of_address.h>
>> +#include <linux/of_irq.h>
>> +
>> +#include <linux/irqchip/arm-gic-v3.h>
>> +
>> +#include <asm/kvm_emulate.h>
>> +#include <asm/kvm_arm.h>
>> +#include <asm/kvm_mmu.h>
>> +
>> +/* These are for GICv2 emulation only */
>
> Is this really true, seems like you're using them to form the lr values
> for the hardware below.
>
>> +#define GICH_LR_VIRTUALID (0x3ffUL << 0)
>> +#define GICH_LR_PHYSID_CPUID_SHIFT (10)
>> +#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
>
> can't we include them from the existing header file then?
Good point, I'll move them to the GICv3 header.
>> +
>> +static u32 ich_vtr_el2;
>> +
>> +static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
>> +{
>> + struct vgic_lr lr_desc;
>> + u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr];
>> +
>> + lr_desc.irq = val & GICH_LR_VIRTUALID;
>> + lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0xff;
>
> isn't this mask only for bits [12:10] which would make it 0x7 ?
Yeah, same crap as the GICv2 version.
>> + lr_desc.state = 0;
>> +
>> + if (val & GICH_LR_PENDING_BIT)
>> + lr_desc.state |= LR_STATE_PENDING;
>> + if (val & GICH_LR_ACTIVE_BIT)
>> + lr_desc.state |= LR_STATE_ACTIVE;
>> + if (val & GICH_LR_EOI)
>> + lr_desc.state |= LR_EOI_INT;
>> +
>> + return lr_desc;
>> +}
>> +
>> +#define MK_LR_PEND(src, irq) \
>> + (GICH_LR_PENDING_BIT | \
>> + (((u32)(src)) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq))
>> +
>> +static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
>> + struct vgic_lr lr_desc)
>> +{
>> + u64 lr_val = MK_LR_PEND(lr_desc.source, lr_desc.irq);
>> +
>> + if (lr_desc.state & LR_STATE_PENDING)
>> + lr_val |= GICH_LR_PENDING_BIT;
>> + if (lr_desc.state & LR_STATE_ACTIVE)
>> + lr_val |= GICH_LR_ACTIVE_BIT;
>> + if (lr_desc.state & LR_EOI_INT)
>> + lr_val |= GICH_LR_EOI;
>> +
>> + vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = lr_val;
>> +
>> + /*
>> + * Despite being EOIed, the LR may not have been marked as
>> + * empty.
>> + */
>> + if (!(lr_val & GICH_LR_STATE))
>> + vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
>> +}
>
> these funcitons are _identical_ to those in vgic_v2. Seems like they
> should share the code when emulating GICv2.
The code is quite similar indeed, but the data types are different,
hence the duplication.
>> +
>> +static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu)
>> +{
>> + return vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr;
>> +}
>> +
>> +static u64 vgic_v3_get_eisr(const struct kvm_vcpu *vcpu)
>> +{
>> + return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr;
>> +}
>> +
>> +static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu)
>> +{
>> + u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr;
>> + u32 ret = 0;
>> +
>> + if (misr & GICH_MISR_EOI)
>> + ret |= INT_STATUS_EOI;
>> + if (misr & GICH_MISR_U)
>> + ret |= INT_STATUS_UNDERFLOW;
>> +
>> + return ret;
>> +}
>> +
>> +static void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
>> +{
>> + u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr;
>
> vgic_v3?
Awesome. We got lucky! :-)
>> +
>> + vmcrp->ctlr = (vmcr & GICH_VMCR_CTLR_MASK) >> GICH_VMCR_CTLR_SHIFT;
>> + vmcrp->abpr = (vmcr & GICH_VMCR_BPR1_MASK) >> GICH_VMCR_BPR1_SHIFT;
>> + vmcrp->bpr = (vmcr & GICH_VMCR_BPR0_MASK) >> GICH_VMCR_BPR0_SHIFT;
>> + vmcrp->pmr = (vmcr & GICH_VMCR_PMR_MASK) >> GICH_VMCR_PMR_SHIFT;
>> +}
>> +
>> +static void vgic_v3_clear_underflow(struct kvm_vcpu *vcpu)
>> +{
>> + vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr &= ~GICH_HCR_UIE;
>> +}
>> +
>> +static void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
>> +{
>> + u32 vmcr;
>> +
>> + vmcr = (vmcrp->ctlr << GICH_VMCR_CTLR_SHIFT) & GICH_VMCR_CTLR_MASK;
>> + vmcr |= (vmcrp->abpr << GICH_VMCR_BPR1_SHIFT) & GICH_VMCR_BPR1_MASK;
>> + vmcr |= (vmcrp->bpr << GICH_VMCR_BPR0_SHIFT) & GICH_VMCR_BPR0_MASK;
>> + vmcr |= (vmcrp->pmr << GICH_VMCR_PMR_SHIFT) & GICH_VMCR_PMR_MASK;
>> +
>> + vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
>
> vgic_v3?
Same.
>> +}
>> +
>> +static void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
>> +{
>> + vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr |= GICH_HCR_UIE;
>> +}
>
> can you group set/clear underflow please?
Yup.
>> +
>> +static void vgic_v3_enable(struct kvm_vcpu *vcpu)
>> +{
>> + /*
>> + * By forcing VMCR to zero, the GIC will restore the binary
>> + * points to their reset values. Anything else resets to zero
>> + * anyway.
>> + */
>> + vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = 0;
>> +
>> + /* Get the show on the road... */
>> + vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr = GICH_HCR_EN;
>> +}
>> +
>> +static const struct vgic_ops vgic_v3_ops = {
>> + .get_lr = vgic_v3_get_lr,
>> + .set_lr = vgic_v3_set_lr,
>> + .get_elrsr = vgic_v3_get_elrsr,
>> + .get_eisr = vgic_v3_get_eisr,
>> + .get_interrupt_status = vgic_v3_get_interrupt_status,
>> + .set_underflow = vgic_v3_set_underflow,
>> + .clear_underflow = vgic_v3_clear_underflow,
>> + .get_vmcr = vgic_v3_get_vmcr,
>> + .set_vmcr = vgic_v3_set_vmcr,
>> + .enable = vgic_v3_enable,
>> +};
>> +
>> +static struct vgic_params vgic_v3_params;
>> +
>> +int vgic_v3_probe(const struct vgic_ops **ops,
>> + const struct vgic_params **params)
>> +{
>> + int ret = 0;
>> + u32 gicv_idx;
>> + struct resource vcpu_res;
>> + struct device_node *vgic_node;
>> + struct vgic_params *vgic = &vgic_v3_params;
>> +
>> + vgic_node = of_find_compatible_node(NULL, NULL, "arm,gic-v3");
>> + if (!vgic_node) {
>> + kvm_err("error: no compatible GICv3 node in DT\n");
>> + return -ENODEV;
>> + }
>> +
>> + vgic->maint_irq = irq_of_parse_and_map(vgic_node, 0);
>> + if (!vgic->maint_irq) {
>> + kvm_err("error getting vgic maintenance irq from DT\n");
>> + ret = -ENXIO;
>> + goto out;
>> + }
>> +
>> + ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2);
>
> I'm just going to assume this actually returns ich_vtr_el2 for the rest
> of reviewing this patch and hope that I get to see this function in the
> next patch ;)
?$^&*$?$%!!!!! I really need to come up with this script that check each
effin' patch...
>> +
>> + /*
>> + * The ListRegs field is 5 bits, but there is a architectural
>> + * maximum of 16 list registers. Just ignore bit 4...
>> + */
>> + vgic->nr_lr = (ich_vtr_el2 & 0xf) + 1;
>> +
>> + if (of_property_read_u32(vgic_node, "#redistributor-regions", &gicv_idx))
>> + gicv_idx = 1;
>> +
>> + gicv_idx += 3; /* Also skip GICD, GICC, GICH */
>> + if (of_address_to_resource(vgic_node, gicv_idx, &vcpu_res)) {
>> + kvm_err("Cannot obtain GICV region\n");
>> + ret = -ENXIO;
>> + goto out;
>> + }
>> + vgic->vcpu_base = vcpu_res.start;
>> + vgic->vctrl_base = (void *)(-1);
>
> this indicates to me that all mentioning of vctrl_base should be local
> to the vgic_v2 file?
It should, except that vctrl_base is used to implement
irqchip_in_kernel(). I'll change that as well.
>> + vgic->type = VGIC_V3;
>> +
>> + kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
>> + vcpu_res.start, vgic->maint_irq);
>> +
>> + *ops = &vgic_v3_ops;
>> + *params = vgic;
>> +
>> +out:
>> + of_node_put(vgic_node);
>> + return ret;
>> +}
>> diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
>> index 8365189..f29761b 100644
>> --- a/virt/kvm/arm/vgic.c
>> +++ b/virt/kvm/arm/vgic.c
>> @@ -1514,6 +1514,8 @@ int kvm_vgic_hyp_init(void)
>>
>> ret = vgic_v2_probe(&vgic_ops, &vgic);
>> if (ret)
>> + ret = vgic_v3_probe(&vgic_ops, &vgic);
>
> this doesn't compile for me, missing Makefile include but even when
> adding that, then other stuff breaks and the config option can actually
> be set here... :(
>
>> + if (ret)
>> return ret;
>>
>> ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
>> --
>> 1.8.3.4
>>
>
> Please fix the bisectability of this entire series.
>
> I have reviewed the actual functional logic of this patch and have not
> found any issues.
>
> -Christoffer
>
--
Jazz is not dead. It just smells funny.
^ permalink raw reply [flat|nested] 57+ messages in thread* [PATCH v3 18/19] KVM: ARM: vgic: add the GICv3 backend
2014-05-09 14:07 ` Christoffer Dall
2014-05-14 17:47 ` Marc Zyngier
@ 2014-05-15 8:13 ` Marc Zyngier
2014-05-15 12:18 ` Christoffer Dall
1 sibling, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-05-15 8:13 UTC (permalink / raw)
To: linux-arm-kernel
On Fri, May 09 2014 at 3:07:31 pm BST, Christoffer Dall <christoffer.dall@linaro.org> wrote:
> On Wed, Apr 16, 2014 at 02:39:50PM +0100, Marc Zyngier wrote:
>> Introduce the support code for emulating a GICv2 on top of GICv3
>> hardware.
>>
>> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>> ---
>> include/kvm/arm_vgic.h | 26 ++++++
>> virt/kvm/arm/vgic-v3.c | 220 +++++++++++++++++++++++++++++++++++++++++++++++++
>> virt/kvm/arm/vgic.c | 2 +
>> 3 files changed, 248 insertions(+)
>> create mode 100644 virt/kvm/arm/vgic-v3.c
>>
[...]
>> diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
>> new file mode 100644
>> index 0000000..a804a73
>> --- /dev/null
>> +++ b/virt/kvm/arm/vgic-v3.c
>> @@ -0,0 +1,220 @@
>> +/*
>> + * Copyright (C) 2013 ARM Limited, All Rights Reserved.
>> + * Author: Marc Zyngier <marc.zyngier@arm.com>
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License version 2 as
>> + * published by the Free Software Foundation.
>> + *
>> + * This program is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
>> + * GNU General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU General Public License
>> + * along with this program. If not, see <http://www.gnu.org/licenses/>.
>> + */
>> +
>> +#include <linux/cpu.h>
>> +#include <linux/kvm.h>
>> +#include <linux/kvm_host.h>
>> +#include <linux/interrupt.h>
>> +#include <linux/io.h>
>> +#include <linux/of.h>
>> +#include <linux/of_address.h>
>> +#include <linux/of_irq.h>
>> +
>> +#include <linux/irqchip/arm-gic-v3.h>
>> +
>> +#include <asm/kvm_emulate.h>
>> +#include <asm/kvm_arm.h>
>> +#include <asm/kvm_mmu.h>
>> +
>> +/* These are for GICv2 emulation only */
>
> Is this really true, seems like you're using them to form the lr values
> for the hardware below.
>
>> +#define GICH_LR_VIRTUALID (0x3ffUL << 0)
>> +#define GICH_LR_PHYSID_CPUID_SHIFT (10)
>> +#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
>
> can't we include them from the existing header file then?
I've given this some more thought, and I think they actually belong
here. The arm-gic-v3.h file is really relevant to GICv3 used in v3
mode. Here, we're using the GICv3 HW to inject GICv2-style interrupts
(ID limited to 1023, CPUID present in the LR...).
So I'd rather keep them here than expose it to the outside world, as I
fear this would be rather confusing.
Thanks,
M.
--
Jazz is not dead. It just smells funny.
^ permalink raw reply [flat|nested] 57+ messages in thread* [PATCH v3 18/19] KVM: ARM: vgic: add the GICv3 backend
2014-05-15 8:13 ` Marc Zyngier
@ 2014-05-15 12:18 ` Christoffer Dall
0 siblings, 0 replies; 57+ messages in thread
From: Christoffer Dall @ 2014-05-15 12:18 UTC (permalink / raw)
To: linux-arm-kernel
On Thu, May 15, 2014 at 09:13:02AM +0100, Marc Zyngier wrote:
> On Fri, May 09 2014 at 3:07:31 pm BST, Christoffer Dall <christoffer.dall@linaro.org> wrote:
> > On Wed, Apr 16, 2014 at 02:39:50PM +0100, Marc Zyngier wrote:
> >> Introduce the support code for emulating a GICv2 on top of GICv3
> >> hardware.
> >>
> >> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
> >> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> >> ---
> >> include/kvm/arm_vgic.h | 26 ++++++
> >> virt/kvm/arm/vgic-v3.c | 220 +++++++++++++++++++++++++++++++++++++++++++++++++
> >> virt/kvm/arm/vgic.c | 2 +
> >> 3 files changed, 248 insertions(+)
> >> create mode 100644 virt/kvm/arm/vgic-v3.c
> >>
>
> [...]
>
> >> diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
> >> new file mode 100644
> >> index 0000000..a804a73
> >> --- /dev/null
> >> +++ b/virt/kvm/arm/vgic-v3.c
> >> @@ -0,0 +1,220 @@
> >> +/*
> >> + * Copyright (C) 2013 ARM Limited, All Rights Reserved.
> >> + * Author: Marc Zyngier <marc.zyngier@arm.com>
> >> + *
> >> + * This program is free software; you can redistribute it and/or modify
> >> + * it under the terms of the GNU General Public License version 2 as
> >> + * published by the Free Software Foundation.
> >> + *
> >> + * This program is distributed in the hope that it will be useful,
> >> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> >> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> >> + * GNU General Public License for more details.
> >> + *
> >> + * You should have received a copy of the GNU General Public License
> >> + * along with this program. If not, see <http://www.gnu.org/licenses/>.
> >> + */
> >> +
> >> +#include <linux/cpu.h>
> >> +#include <linux/kvm.h>
> >> +#include <linux/kvm_host.h>
> >> +#include <linux/interrupt.h>
> >> +#include <linux/io.h>
> >> +#include <linux/of.h>
> >> +#include <linux/of_address.h>
> >> +#include <linux/of_irq.h>
> >> +
> >> +#include <linux/irqchip/arm-gic-v3.h>
> >> +
> >> +#include <asm/kvm_emulate.h>
> >> +#include <asm/kvm_arm.h>
> >> +#include <asm/kvm_mmu.h>
> >> +
> >> +/* These are for GICv2 emulation only */
> >
> > Is this really true, seems like you're using them to form the lr values
> > for the hardware below.
> >
> >> +#define GICH_LR_VIRTUALID (0x3ffUL << 0)
> >> +#define GICH_LR_PHYSID_CPUID_SHIFT (10)
> >> +#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
> >
> > can't we include them from the existing header file then?
>
> I've given this some more thought, and I think they actually belong
> here. The arm-gic-v3.h file is really relevant to GICv3 used in v3
> mode. Here, we're using the GICv3 HW to inject GICv2-style interrupts
> (ID limited to 1023, CPUID present in the LR...).
>
> So I'd rather keep them here than expose it to the outside world, as I
> fear this would be rather confusing.
>
Fair enough.
-Christoffer
^ permalink raw reply [flat|nested] 57+ messages in thread
* [PATCH v3 19/19] arm64: KVM: vgic: add GICv3 world switch
2014-04-16 13:39 [PATCH v3 00/19] arm64: GICv3 support Marc Zyngier
` (17 preceding siblings ...)
2014-04-16 13:39 ` [PATCH v3 18/19] KVM: ARM: vgic: add the GICv3 backend Marc Zyngier
@ 2014-04-16 13:39 ` Marc Zyngier
2014-05-09 14:07 ` Christoffer Dall
18 siblings, 1 reply; 57+ messages in thread
From: Marc Zyngier @ 2014-04-16 13:39 UTC (permalink / raw)
To: linux-arm-kernel
Introduce the GICv3 world switch code and helper functions, enabling
GICv2 emulation on GICv3 hardware.
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
arch/arm64/include/asm/kvm_asm.h | 4 +
arch/arm64/include/asm/kvm_host.h | 7 +
arch/arm64/kernel/asm-offsets.c | 8 ++
arch/arm64/kvm/Makefile | 2 +
arch/arm64/kvm/vgic-v3-switch.S | 279 ++++++++++++++++++++++++++++++++++++++
5 files changed, 300 insertions(+)
create mode 100644 arch/arm64/kvm/vgic-v3-switch.S
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 6515a52..270ea13 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -105,8 +105,12 @@ extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+extern u64 __vgic_v3_get_ich_vtr_el2(void);
+
extern char __save_vgic_v2_state[];
extern char __restore_vgic_v2_state[];
+extern char __save_vgic_v3_state[];
+extern char __restore_vgic_v3_state[];
#endif
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 65f0c43..a10803c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -216,6 +216,13 @@ static inline void vgic_arch_setup(const struct vgic_params *vgic)
__vgic_sr_vectors.restore_vgic = __restore_vgic_v2_state;
break;
+#ifdef CONFIG_ARM_GIC_V3
+ case VGIC_V3:
+ __vgic_sr_vectors.save_vgic = __save_vgic_v3_state;
+ __vgic_sr_vectors.restore_vgic = __restore_vgic_v3_state;
+ break;
+#endif
+
default:
BUG();
}
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index dafc415..e74654c 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -139,6 +139,14 @@ int main(void)
DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
+ DEFINE(VGIC_V3_CPU_HCR, offsetof(struct vgic_cpu, vgic_v3.vgic_hcr));
+ DEFINE(VGIC_V3_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr));
+ DEFINE(VGIC_V3_CPU_MISR, offsetof(struct vgic_cpu, vgic_v3.vgic_misr));
+ DEFINE(VGIC_V3_CPU_EISR, offsetof(struct vgic_cpu, vgic_v3.vgic_eisr));
+ DEFINE(VGIC_V3_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v3.vgic_elrsr));
+ DEFINE(VGIC_V3_CPU_AP0R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap0r));
+ DEFINE(VGIC_V3_CPU_AP1R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap1r));
+ DEFINE(VGIC_V3_CPU_LR, offsetof(struct vgic_cpu, vgic_v3.vgic_lr));
DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index daf24dc..32a0961 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -22,4 +22,6 @@ kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v3-switch.o
kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm64/kvm/vgic-v3-switch.S b/arch/arm64/kvm/vgic-v3-switch.S
new file mode 100644
index 0000000..7d2bc86
--- /dev/null
+++ b/arch/arm64/kvm/vgic-v3-switch.S
@@ -0,0 +1,279 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+#include <linux/irqchip/arm-gic-v3.h>
+
+#include <asm/assembler.h>
+#include <asm/memory.h>
+#include <asm/asm-offsets.h>
+#include <asm/kvm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+
+ .text
+ .pushsection .hyp.text, "ax"
+
+/*
+ * Save the VGIC CPU state into memory
+ * x0: Register pointing to VCPU struct
+ * Do not corrupt x1!!!
+ */
+.macro save_vgic_v3_state
+ // Compute the address of struct vgic_cpu
+ add x3, x0, #VCPU_VGIC_CPU
+
+ // Make sure stores to the GIC via the memory mapped interface
+ // are now visible to the system register interface
+ dsb sy
+
+ // Save all interesting registers
+ mrs x4, ICH_HCR_EL2
+ mrs x5, ICH_VMCR_EL2
+ mrs x6, ICH_MISR_EL2
+ mrs x7, ICH_EISR_EL2
+ mrs x8, ICH_ELSR_EL2
+
+ str w4, [x3, #VGIC_V3_CPU_HCR]
+ str w5, [x3, #VGIC_V3_CPU_VMCR]
+ str w6, [x3, #VGIC_V3_CPU_MISR]
+ str w7, [x3, #VGIC_V3_CPU_EISR]
+ str w8, [x3, #VGIC_V3_CPU_ELRSR]
+
+ msr ICH_HCR_EL2, xzr
+
+ mrs x21, ICH_VTR_EL2
+ and w22, w21, #0xf
+ mov w23, #0xf
+ sub w23, w23, w22 // How many regs we have to skip
+
+ adr x24, 1f
+ add x24, x24, x23, lsl #2
+ br x24
+
+1:
+ mrs x20, ICH_LR15_EL2
+ mrs x19, ICH_LR14_EL2
+ mrs x18, ICH_LR13_EL2
+ mrs x17, ICH_LR12_EL2
+ mrs x16, ICH_LR11_EL2
+ mrs x15, ICH_LR10_EL2
+ mrs x14, ICH_LR9_EL2
+ mrs x13, ICH_LR8_EL2
+ mrs x12, ICH_LR7_EL2
+ mrs x11, ICH_LR6_EL2
+ mrs x10, ICH_LR5_EL2
+ mrs x9, ICH_LR4_EL2
+ mrs x8, ICH_LR3_EL2
+ mrs x7, ICH_LR2_EL2
+ mrs x6, ICH_LR1_EL2
+ mrs x5, ICH_LR0_EL2
+
+ adr x24, 1f
+ add x24, x24, x23, lsl #2 // adr(1f) + unimp_nr*4
+ br x24
+
+1:
+ str x20, [x3, #(VGIC_V3_CPU_LR + 15*8)]
+ str x19, [x3, #(VGIC_V3_CPU_LR + 14*8)]
+ str x18, [x3, #(VGIC_V3_CPU_LR + 13*8)]
+ str x17, [x3, #(VGIC_V3_CPU_LR + 12*8)]
+ str x16, [x3, #(VGIC_V3_CPU_LR + 11*8)]
+ str x15, [x3, #(VGIC_V3_CPU_LR + 10*8)]
+ str x14, [x3, #(VGIC_V3_CPU_LR + 9*8)]
+ str x13, [x3, #(VGIC_V3_CPU_LR + 8*8)]
+ str x12, [x3, #(VGIC_V3_CPU_LR + 7*8)]
+ str x11, [x3, #(VGIC_V3_CPU_LR + 6*8)]
+ str x10, [x3, #(VGIC_V3_CPU_LR + 5*8)]
+ str x9, [x3, #(VGIC_V3_CPU_LR + 4*8)]
+ str x8, [x3, #(VGIC_V3_CPU_LR + 3*8)]
+ str x7, [x3, #(VGIC_V3_CPU_LR + 2*8)]
+ str x6, [x3, #(VGIC_V3_CPU_LR + 1*8)]
+ str x5, [x3, #VGIC_V3_CPU_LR]
+
+ lsr w22, w21, #29 // Get PRIbits
+ cmp w22, #4 // 5 bits
+ b.eq 5f
+ cmp w22, #5 // 6 bits
+ b.eq 6f
+ // 7 bits
+ mrs x20, ICH_AP0R3_EL2
+ str w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
+ mrs x19, ICH_AP0R2_EL2
+ str w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
+6: mrs x18, ICH_AP0R1_EL2
+ str w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
+5: mrs x17, ICH_AP0R0_EL2
+ str w17, [x3, #VGIC_V3_CPU_AP0R]
+
+ cmp w22, #4 // 5 bits
+ b.eq 5f
+ cmp w22, #5 // 6 bits
+ b.eq 6f
+ // 7 bits
+ mrs x20, ICH_AP1R3_EL2
+ str w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
+ mrs x19, ICH_AP1R2_EL2
+ str w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
+6: mrs x18, ICH_AP1R1_EL2
+ str w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
+5: mrs x17, ICH_AP1R0_EL2
+ str w17, [x3, #VGIC_V3_CPU_AP1R]
+
+ // Restore SRE_EL1 access and re-enable SRE at EL1.
+ mrs x5, ICC_SRE_EL2
+ orr x5, x5, #(1 << 3)
+ msr ICC_SRE_EL2, x5
+ isb
+ mov x5, #1
+ msr ICC_SRE_EL1, x5
+
+ mov x2, #HCR_RW
+ msr hcr_el2, x2
+.endm
+
+/*
+ * Restore the VGIC CPU state from memory
+ * x0: Register pointing to VCPU struct
+ */
+.macro restore_vgic_v3_state
+ ldr x2, [x0, #VCPU_IRQ_LINES]
+ ldr x1, [x0, #VCPU_HCR_EL2]
+ orr x2, x2, x1
+ msr hcr_el2, x2
+
+ // Disable SRE_EL1 access. Necessary, otherwise
+ // ICH_VMCR_EL2.VFIQEn becomes one, and FIQ happens...
+ msr ICC_SRE_EL1, xzr
+ isb
+
+ // Compute the address of struct vgic_cpu
+ add x3, x0, #VCPU_VGIC_CPU
+
+ // Restore all interesting registers
+ ldr w4, [x3, #VGIC_V3_CPU_HCR]
+ ldr w5, [x3, #VGIC_V3_CPU_VMCR]
+
+ msr ICH_HCR_EL2, x4
+ msr ICH_VMCR_EL2, x5
+
+ mrs x21, ICH_VTR_EL2
+
+ lsr w22, w21, #29 // Get PRIbits
+ cmp w22, #4 // 5 bits
+ b.eq 5f
+ cmp w22, #5 // 6 bits
+ b.eq 6f
+ // 7 bits
+ ldr w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
+ msr ICH_AP1R3_EL2, x20
+ ldr w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
+ msr ICH_AP1R2_EL2, x19
+6: ldr w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
+ msr ICH_AP1R1_EL2, x18
+5: ldr w17, [x3, #VGIC_V3_CPU_AP1R]
+ msr ICH_AP1R0_EL2, x17
+
+ cmp w22, #4 // 5 bits
+ b.eq 5f
+ cmp w22, #5 // 6 bits
+ b.eq 6f
+ // 7 bits
+ ldr w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
+ msr ICH_AP0R3_EL2, x20
+ ldr w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
+ msr ICH_AP0R2_EL2, x19
+6: ldr w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
+ msr ICH_AP0R1_EL2, x18
+5: ldr w17, [x3, #VGIC_V3_CPU_AP0R]
+ msr ICH_AP0R0_EL2, x17
+
+ and w22, w21, #0xf
+ mov w23, #0xf
+ sub w23, w23, w22 // How many regs we have to skip
+
+ adr x24, 1f
+ add x24, x24, x23, lsl #2 // adr(1f) + unimp_nr*4
+ br x24
+
+1:
+ ldr x20, [x3, #(VGIC_V3_CPU_LR + 15*8)]
+ ldr x19, [x3, #(VGIC_V3_CPU_LR + 14*8)]
+ ldr x18, [x3, #(VGIC_V3_CPU_LR + 13*8)]
+ ldr x17, [x3, #(VGIC_V3_CPU_LR + 12*8)]
+ ldr x16, [x3, #(VGIC_V3_CPU_LR + 11*8)]
+ ldr x15, [x3, #(VGIC_V3_CPU_LR + 10*8)]
+ ldr x14, [x3, #(VGIC_V3_CPU_LR + 9*8)]
+ ldr x13, [x3, #(VGIC_V3_CPU_LR + 8*8)]
+ ldr x12, [x3, #(VGIC_V3_CPU_LR + 7*8)]
+ ldr x11, [x3, #(VGIC_V3_CPU_LR + 6*8)]
+ ldr x10, [x3, #(VGIC_V3_CPU_LR + 5*8)]
+ ldr x9, [x3, #(VGIC_V3_CPU_LR + 4*8)]
+ ldr x8, [x3, #(VGIC_V3_CPU_LR + 3*8)]
+ ldr x7, [x3, #(VGIC_V3_CPU_LR + 2*8)]
+ ldr x6, [x3, #(VGIC_V3_CPU_LR + 1*8)]
+ ldr x5, [x3, #VGIC_V3_CPU_LR]
+
+ adr x24, 1f
+ add x24, x24, x23, lsl #2
+ br x24
+
+1:
+ msr ICH_LR15_EL2, x20
+ msr ICH_LR14_EL2, x19
+ msr ICH_LR13_EL2, x18
+ msr ICH_LR12_EL2, x17
+ msr ICH_LR11_EL2, x16
+ msr ICH_LR10_EL2, x15
+ msr ICH_LR9_EL2, x14
+ msr ICH_LR8_EL2, x13
+ msr ICH_LR7_EL2, x12
+ msr ICH_LR6_EL2, x11
+ msr ICH_LR5_EL2, x10
+ msr ICH_LR4_EL2, x9
+ msr ICH_LR3_EL2, x8
+ msr ICH_LR2_EL2, x7
+ msr ICH_LR1_EL2, x6
+ msr ICH_LR0_EL2, x5
+
+ // Ensure that the above will be visible via the memory-mapped
+ // view of the CPU interface (GICV).
+ isb
+ dsb sy
+
+ // Prevent the guest from touching the GIC system registers
+ mrs x5, ICC_SRE_EL2
+ and x5, x5, #~(1 << 3)
+ msr ICC_SRE_EL2, x5
+.endm
+
+ENTRY(__save_vgic_v3_state)
+ save_vgic_v3_state
+ ret
+ENDPROC(__save_vgic_v3_state)
+
+ENTRY(__restore_vgic_v3_state)
+ restore_vgic_v3_state
+ ret
+ENDPROC(__restore_vgic_v3_state)
+
+ENTRY(__vgic_v3_get_ich_vtr_el2)
+ mrs x0, ICH_VTR_EL2
+ ret
+ENDPROC(__vgic_v3_get_ich_vtr_el2)
+
+ .popsection
--
1.8.3.4
^ permalink raw reply related [flat|nested] 57+ messages in thread* [PATCH v3 19/19] arm64: KVM: vgic: add GICv3 world switch
2014-04-16 13:39 ` [PATCH v3 19/19] arm64: KVM: vgic: add GICv3 world switch Marc Zyngier
@ 2014-05-09 14:07 ` Christoffer Dall
2014-05-15 8:31 ` Marc Zyngier
0 siblings, 1 reply; 57+ messages in thread
From: Christoffer Dall @ 2014-05-09 14:07 UTC (permalink / raw)
To: linux-arm-kernel
On Wed, Apr 16, 2014 at 02:39:51PM +0100, Marc Zyngier wrote:
> Introduce the GICv3 world switch code and helper functions, enabling
> GICv2 emulation on GICv3 hardware.
>
> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
> arch/arm64/include/asm/kvm_asm.h | 4 +
> arch/arm64/include/asm/kvm_host.h | 7 +
> arch/arm64/kernel/asm-offsets.c | 8 ++
> arch/arm64/kvm/Makefile | 2 +
> arch/arm64/kvm/vgic-v3-switch.S | 279 ++++++++++++++++++++++++++++++++++++++
> 5 files changed, 300 insertions(+)
> create mode 100644 arch/arm64/kvm/vgic-v3-switch.S
>
> diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
> index 6515a52..270ea13 100644
> --- a/arch/arm64/include/asm/kvm_asm.h
> +++ b/arch/arm64/include/asm/kvm_asm.h
> @@ -105,8 +105,12 @@ extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
>
> extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
>
> +extern u64 __vgic_v3_get_ich_vtr_el2(void);
> +
> extern char __save_vgic_v2_state[];
> extern char __restore_vgic_v2_state[];
> +extern char __save_vgic_v3_state[];
> +extern char __restore_vgic_v3_state[];
>
> #endif
>
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index 65f0c43..a10803c 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -216,6 +216,13 @@ static inline void vgic_arch_setup(const struct vgic_params *vgic)
> __vgic_sr_vectors.restore_vgic = __restore_vgic_v2_state;
> break;
>
> +#ifdef CONFIG_ARM_GIC_V3
> + case VGIC_V3:
> + __vgic_sr_vectors.save_vgic = __save_vgic_v3_state;
> + __vgic_sr_vectors.restore_vgic = __restore_vgic_v3_state;
> + break;
> +#endif
> +
> default:
> BUG();
> }
> diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
> index dafc415..e74654c 100644
> --- a/arch/arm64/kernel/asm-offsets.c
> +++ b/arch/arm64/kernel/asm-offsets.c
> @@ -139,6 +139,14 @@ int main(void)
> DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
> DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
> DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
> + DEFINE(VGIC_V3_CPU_HCR, offsetof(struct vgic_cpu, vgic_v3.vgic_hcr));
> + DEFINE(VGIC_V3_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr));
> + DEFINE(VGIC_V3_CPU_MISR, offsetof(struct vgic_cpu, vgic_v3.vgic_misr));
> + DEFINE(VGIC_V3_CPU_EISR, offsetof(struct vgic_cpu, vgic_v3.vgic_eisr));
> + DEFINE(VGIC_V3_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v3.vgic_elrsr));
> + DEFINE(VGIC_V3_CPU_AP0R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap0r));
> + DEFINE(VGIC_V3_CPU_AP1R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap1r));
> + DEFINE(VGIC_V3_CPU_LR, offsetof(struct vgic_cpu, vgic_v3.vgic_lr));
> DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
> DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
> DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
> diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
> index daf24dc..32a0961 100644
> --- a/arch/arm64/kvm/Makefile
> +++ b/arch/arm64/kvm/Makefile
> @@ -22,4 +22,6 @@ kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
> kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
> kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
> kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o
> +kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o
> +kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v3-switch.o
> kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
> diff --git a/arch/arm64/kvm/vgic-v3-switch.S b/arch/arm64/kvm/vgic-v3-switch.S
> new file mode 100644
> index 0000000..7d2bc86
> --- /dev/null
> +++ b/arch/arm64/kvm/vgic-v3-switch.S
> @@ -0,0 +1,279 @@
> +/*
> + * Copyright (C) 2012,2013 - ARM Ltd
> + * Author: Marc Zyngier <marc.zyngier@arm.com>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program. If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#include <linux/linkage.h>
> +#include <linux/irqchip/arm-gic-v3.h>
> +
> +#include <asm/assembler.h>
> +#include <asm/memory.h>
> +#include <asm/asm-offsets.h>
> +#include <asm/kvm.h>
> +#include <asm/kvm_asm.h>
> +#include <asm/kvm_arm.h>
> +
> + .text
> + .pushsection .hyp.text, "ax"
> +
> +/*
> + * Save the VGIC CPU state into memory
> + * x0: Register pointing to VCPU struct
> + * Do not corrupt x1!!!
> + */
> +.macro save_vgic_v3_state
> + // Compute the address of struct vgic_cpu
> + add x3, x0, #VCPU_VGIC_CPU
> +
> + // Make sure stores to the GIC via the memory mapped interface
> + // are now visible to the system register interface
> + dsb sy
> +
> + // Save all interesting registers
> + mrs x4, ICH_HCR_EL2
> + mrs x5, ICH_VMCR_EL2
> + mrs x6, ICH_MISR_EL2
> + mrs x7, ICH_EISR_EL2
> + mrs x8, ICH_ELSR_EL2
> +
> + str w4, [x3, #VGIC_V3_CPU_HCR]
> + str w5, [x3, #VGIC_V3_CPU_VMCR]
> + str w6, [x3, #VGIC_V3_CPU_MISR]
> + str w7, [x3, #VGIC_V3_CPU_EISR]
> + str w8, [x3, #VGIC_V3_CPU_ELRSR]
> +
> + msr ICH_HCR_EL2, xzr
> +
> + mrs x21, ICH_VTR_EL2
> + and w22, w21, #0xf
> + mov w23, #0xf
> + sub w23, w23, w22 // How many regs we have to skip
> +
> + adr x24, 1f
> + add x24, x24, x23, lsl #2
> + br x24
> +
> +1:
> + mrs x20, ICH_LR15_EL2
> + mrs x19, ICH_LR14_EL2
> + mrs x18, ICH_LR13_EL2
> + mrs x17, ICH_LR12_EL2
> + mrs x16, ICH_LR11_EL2
> + mrs x15, ICH_LR10_EL2
> + mrs x14, ICH_LR9_EL2
> + mrs x13, ICH_LR8_EL2
> + mrs x12, ICH_LR7_EL2
> + mrs x11, ICH_LR6_EL2
> + mrs x10, ICH_LR5_EL2
> + mrs x9, ICH_LR4_EL2
> + mrs x8, ICH_LR3_EL2
> + mrs x7, ICH_LR2_EL2
> + mrs x6, ICH_LR1_EL2
> + mrs x5, ICH_LR0_EL2
> +
> + adr x24, 1f
> + add x24, x24, x23, lsl #2 // adr(1f) + unimp_nr*4
> + br x24
> +
> +1:
> + str x20, [x3, #(VGIC_V3_CPU_LR + 15*8)]
> + str x19, [x3, #(VGIC_V3_CPU_LR + 14*8)]
> + str x18, [x3, #(VGIC_V3_CPU_LR + 13*8)]
> + str x17, [x3, #(VGIC_V3_CPU_LR + 12*8)]
> + str x16, [x3, #(VGIC_V3_CPU_LR + 11*8)]
> + str x15, [x3, #(VGIC_V3_CPU_LR + 10*8)]
> + str x14, [x3, #(VGIC_V3_CPU_LR + 9*8)]
> + str x13, [x3, #(VGIC_V3_CPU_LR + 8*8)]
> + str x12, [x3, #(VGIC_V3_CPU_LR + 7*8)]
> + str x11, [x3, #(VGIC_V3_CPU_LR + 6*8)]
> + str x10, [x3, #(VGIC_V3_CPU_LR + 5*8)]
> + str x9, [x3, #(VGIC_V3_CPU_LR + 4*8)]
> + str x8, [x3, #(VGIC_V3_CPU_LR + 3*8)]
> + str x7, [x3, #(VGIC_V3_CPU_LR + 2*8)]
> + str x6, [x3, #(VGIC_V3_CPU_LR + 1*8)]
> + str x5, [x3, #VGIC_V3_CPU_LR]
> +
> + lsr w22, w21, #29 // Get PRIbits
> + cmp w22, #4 // 5 bits
> + b.eq 5f
> + cmp w22, #5 // 6 bits
> + b.eq 6f
> + // 7 bits
> + mrs x20, ICH_AP0R3_EL2
> + str w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
> + mrs x19, ICH_AP0R2_EL2
> + str w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
> +6: mrs x18, ICH_AP0R1_EL2
> + str w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
> +5: mrs x17, ICH_AP0R0_EL2
> + str w17, [x3, #VGIC_V3_CPU_AP0R]
> +
> + cmp w22, #4 // 5 bits
> + b.eq 5f
> + cmp w22, #5 // 6 bits
> + b.eq 6f
> + // 7 bits
> + mrs x20, ICH_AP1R3_EL2
> + str w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
> + mrs x19, ICH_AP1R2_EL2
> + str w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
> +6: mrs x18, ICH_AP1R1_EL2
> + str w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
> +5: mrs x17, ICH_AP1R0_EL2
> + str w17, [x3, #VGIC_V3_CPU_AP1R]
> +
> + // Restore SRE_EL1 access and re-enable SRE at EL1.
> + mrs x5, ICC_SRE_EL2
> + orr x5, x5, #(1 << 3)
couldn't we define ICC_SRE_ENABLE (1 << 3)?
> + msr ICC_SRE_EL2, x5
> + isb
> + mov x5, #1
> + msr ICC_SRE_EL1, x5
the other bits are always read-only (WI), so you can safely just
overwrite all other bits here?
> +
> + mov x2, #HCR_RW
> + msr hcr_el2, x2
> +.endm
> +
> +/*
> + * Restore the VGIC CPU state from memory
> + * x0: Register pointing to VCPU struct
> + */
> +.macro restore_vgic_v3_state
> + ldr x2, [x0, #VCPU_IRQ_LINES]
again, what can this be used for with aarch64?
> + ldr x1, [x0, #VCPU_HCR_EL2]
> + orr x2, x2, x1
> + msr hcr_el2, x2
> +
> + // Disable SRE_EL1 access. Necessary, otherwise
> + // ICH_VMCR_EL2.VFIQEn becomes one, and FIQ happens...
> + msr ICC_SRE_EL1, xzr
> + isb
> +
> + // Compute the address of struct vgic_cpu
> + add x3, x0, #VCPU_VGIC_CPU
> +
> + // Restore all interesting registers
> + ldr w4, [x3, #VGIC_V3_CPU_HCR]
> + ldr w5, [x3, #VGIC_V3_CPU_VMCR]
> +
> + msr ICH_HCR_EL2, x4
> + msr ICH_VMCR_EL2, x5
> +
> + mrs x21, ICH_VTR_EL2
> +
> + lsr w22, w21, #29 // Get PRIbits
> + cmp w22, #4 // 5 bits
> + b.eq 5f
> + cmp w22, #5 // 6 bits
> + b.eq 6f
> + // 7 bits
> + ldr w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
> + msr ICH_AP1R3_EL2, x20
> + ldr w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
> + msr ICH_AP1R2_EL2, x19
> +6: ldr w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
> + msr ICH_AP1R1_EL2, x18
> +5: ldr w17, [x3, #VGIC_V3_CPU_AP1R]
> + msr ICH_AP1R0_EL2, x17
> +
> + cmp w22, #4 // 5 bits
> + b.eq 5f
> + cmp w22, #5 // 6 bits
> + b.eq 6f
> + // 7 bits
> + ldr w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
> + msr ICH_AP0R3_EL2, x20
> + ldr w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
> + msr ICH_AP0R2_EL2, x19
> +6: ldr w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
> + msr ICH_AP0R1_EL2, x18
> +5: ldr w17, [x3, #VGIC_V3_CPU_AP0R]
> + msr ICH_AP0R0_EL2, x17
> +
> + and w22, w21, #0xf
> + mov w23, #0xf
> + sub w23, w23, w22 // How many regs we have to skip
> +
> + adr x24, 1f
> + add x24, x24, x23, lsl #2 // adr(1f) + unimp_nr*4
> + br x24
> +
> +1:
> + ldr x20, [x3, #(VGIC_V3_CPU_LR + 15*8)]
> + ldr x19, [x3, #(VGIC_V3_CPU_LR + 14*8)]
> + ldr x18, [x3, #(VGIC_V3_CPU_LR + 13*8)]
> + ldr x17, [x3, #(VGIC_V3_CPU_LR + 12*8)]
> + ldr x16, [x3, #(VGIC_V3_CPU_LR + 11*8)]
> + ldr x15, [x3, #(VGIC_V3_CPU_LR + 10*8)]
> + ldr x14, [x3, #(VGIC_V3_CPU_LR + 9*8)]
> + ldr x13, [x3, #(VGIC_V3_CPU_LR + 8*8)]
> + ldr x12, [x3, #(VGIC_V3_CPU_LR + 7*8)]
> + ldr x11, [x3, #(VGIC_V3_CPU_LR + 6*8)]
> + ldr x10, [x3, #(VGIC_V3_CPU_LR + 5*8)]
> + ldr x9, [x3, #(VGIC_V3_CPU_LR + 4*8)]
> + ldr x8, [x3, #(VGIC_V3_CPU_LR + 3*8)]
> + ldr x7, [x3, #(VGIC_V3_CPU_LR + 2*8)]
> + ldr x6, [x3, #(VGIC_V3_CPU_LR + 1*8)]
> + ldr x5, [x3, #VGIC_V3_CPU_LR]
> +
> + adr x24, 1f
> + add x24, x24, x23, lsl #2
> + br x24
> +
> +1:
> + msr ICH_LR15_EL2, x20
> + msr ICH_LR14_EL2, x19
> + msr ICH_LR13_EL2, x18
> + msr ICH_LR12_EL2, x17
> + msr ICH_LR11_EL2, x16
> + msr ICH_LR10_EL2, x15
> + msr ICH_LR9_EL2, x14
> + msr ICH_LR8_EL2, x13
> + msr ICH_LR7_EL2, x12
> + msr ICH_LR6_EL2, x11
> + msr ICH_LR5_EL2, x10
> + msr ICH_LR4_EL2, x9
> + msr ICH_LR3_EL2, x8
> + msr ICH_LR2_EL2, x7
> + msr ICH_LR1_EL2, x6
> + msr ICH_LR0_EL2, x5
> +
> + // Ensure that the above will be visible via the memory-mapped
> + // view of the CPU interface (GICV).
> + isb
> + dsb sy
> +
> + // Prevent the guest from touching the GIC system registers
> + mrs x5, ICC_SRE_EL2
> + and x5, x5, #~(1 << 3)
ditto on the define
> + msr ICC_SRE_EL2, x5
I trust Will reviewed all the barriers etc., but you really don't
need an ISB or anything here?
> +.endm
> +
> +ENTRY(__save_vgic_v3_state)
> + save_vgic_v3_state
> + ret
> +ENDPROC(__save_vgic_v3_state)
> +
> +ENTRY(__restore_vgic_v3_state)
> + restore_vgic_v3_state
> + ret
> +ENDPROC(__restore_vgic_v3_state)
> +
> +ENTRY(__vgic_v3_get_ich_vtr_el2)
> + mrs x0, ICH_VTR_EL2
> + ret
> +ENDPROC(__vgic_v3_get_ich_vtr_el2)
> +
> + .popsection
> --
> 1.8.3.4
>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
^ permalink raw reply [flat|nested] 57+ messages in thread
* [PATCH v3 19/19] arm64: KVM: vgic: add GICv3 world switch
2014-05-09 14:07 ` Christoffer Dall
@ 2014-05-15 8:31 ` Marc Zyngier
0 siblings, 0 replies; 57+ messages in thread
From: Marc Zyngier @ 2014-05-15 8:31 UTC (permalink / raw)
To: linux-arm-kernel
On Fri, May 09 2014 at 3:07:38 pm BST, Christoffer Dall <christoffer.dall@linaro.org> wrote:
> On Wed, Apr 16, 2014 at 02:39:51PM +0100, Marc Zyngier wrote:
>> Introduce the GICv3 world switch code and helper functions, enabling
>> GICv2 emulation on GICv3 hardware.
>>
>> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>> ---
>> arch/arm64/include/asm/kvm_asm.h | 4 +
>> arch/arm64/include/asm/kvm_host.h | 7 +
>> arch/arm64/kernel/asm-offsets.c | 8 ++
>> arch/arm64/kvm/Makefile | 2 +
>> arch/arm64/kvm/vgic-v3-switch.S | 279 ++++++++++++++++++++++++++++++++++++++
>> 5 files changed, 300 insertions(+)
>> create mode 100644 arch/arm64/kvm/vgic-v3-switch.S
>>
>> diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
>> index 6515a52..270ea13 100644
>> --- a/arch/arm64/include/asm/kvm_asm.h
>> +++ b/arch/arm64/include/asm/kvm_asm.h
>> @@ -105,8 +105,12 @@ extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
>>
>> extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
>>
>> +extern u64 __vgic_v3_get_ich_vtr_el2(void);
>> +
>> extern char __save_vgic_v2_state[];
>> extern char __restore_vgic_v2_state[];
>> +extern char __save_vgic_v3_state[];
>> +extern char __restore_vgic_v3_state[];
>>
>> #endif
>>
>> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
>> index 65f0c43..a10803c 100644
>> --- a/arch/arm64/include/asm/kvm_host.h
>> +++ b/arch/arm64/include/asm/kvm_host.h
>> @@ -216,6 +216,13 @@ static inline void vgic_arch_setup(const struct vgic_params *vgic)
>> __vgic_sr_vectors.restore_vgic = __restore_vgic_v2_state;
>> break;
>>
>> +#ifdef CONFIG_ARM_GIC_V3
>> + case VGIC_V3:
>> + __vgic_sr_vectors.save_vgic = __save_vgic_v3_state;
>> + __vgic_sr_vectors.restore_vgic = __restore_vgic_v3_state;
>> + break;
>> +#endif
>> +
>> default:
>> BUG();
>> }
>> diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
>> index dafc415..e74654c 100644
>> --- a/arch/arm64/kernel/asm-offsets.c
>> +++ b/arch/arm64/kernel/asm-offsets.c
>> @@ -139,6 +139,14 @@ int main(void)
>> DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
>> DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
>> DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
>> + DEFINE(VGIC_V3_CPU_HCR, offsetof(struct vgic_cpu, vgic_v3.vgic_hcr));
>> + DEFINE(VGIC_V3_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr));
>> + DEFINE(VGIC_V3_CPU_MISR, offsetof(struct vgic_cpu, vgic_v3.vgic_misr));
>> + DEFINE(VGIC_V3_CPU_EISR, offsetof(struct vgic_cpu, vgic_v3.vgic_eisr));
>> + DEFINE(VGIC_V3_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v3.vgic_elrsr));
>> + DEFINE(VGIC_V3_CPU_AP0R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap0r));
>> + DEFINE(VGIC_V3_CPU_AP1R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap1r));
>> + DEFINE(VGIC_V3_CPU_LR, offsetof(struct vgic_cpu, vgic_v3.vgic_lr));
>> DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
>> DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
>> DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
>> diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
>> index daf24dc..32a0961 100644
>> --- a/arch/arm64/kvm/Makefile
>> +++ b/arch/arm64/kvm/Makefile
>> @@ -22,4 +22,6 @@ kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
>> kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
>> kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
>> kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o
>> +kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o
>> +kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v3-switch.o
>> kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
>> diff --git a/arch/arm64/kvm/vgic-v3-switch.S b/arch/arm64/kvm/vgic-v3-switch.S
>> new file mode 100644
>> index 0000000..7d2bc86
>> --- /dev/null
>> +++ b/arch/arm64/kvm/vgic-v3-switch.S
>> @@ -0,0 +1,279 @@
>> +/*
>> + * Copyright (C) 2012,2013 - ARM Ltd
>> + * Author: Marc Zyngier <marc.zyngier@arm.com>
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License version 2 as
>> + * published by the Free Software Foundation.
>> + *
>> + * This program is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
>> + * GNU General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU General Public License
>> + * along with this program. If not, see <http://www.gnu.org/licenses/>.
>> + */
>> +
>> +#include <linux/linkage.h>
>> +#include <linux/irqchip/arm-gic-v3.h>
>> +
>> +#include <asm/assembler.h>
>> +#include <asm/memory.h>
>> +#include <asm/asm-offsets.h>
>> +#include <asm/kvm.h>
>> +#include <asm/kvm_asm.h>
>> +#include <asm/kvm_arm.h>
>> +
>> + .text
>> + .pushsection .hyp.text, "ax"
>> +
>> +/*
>> + * Save the VGIC CPU state into memory
>> + * x0: Register pointing to VCPU struct
>> + * Do not corrupt x1!!!
>> + */
>> +.macro save_vgic_v3_state
>> + // Compute the address of struct vgic_cpu
>> + add x3, x0, #VCPU_VGIC_CPU
>> +
>> + // Make sure stores to the GIC via the memory mapped interface
>> + // are now visible to the system register interface
>> + dsb sy
>> +
>> + // Save all interesting registers
>> + mrs x4, ICH_HCR_EL2
>> + mrs x5, ICH_VMCR_EL2
>> + mrs x6, ICH_MISR_EL2
>> + mrs x7, ICH_EISR_EL2
>> + mrs x8, ICH_ELSR_EL2
>> +
>> + str w4, [x3, #VGIC_V3_CPU_HCR]
>> + str w5, [x3, #VGIC_V3_CPU_VMCR]
>> + str w6, [x3, #VGIC_V3_CPU_MISR]
>> + str w7, [x3, #VGIC_V3_CPU_EISR]
>> + str w8, [x3, #VGIC_V3_CPU_ELRSR]
>> +
>> + msr ICH_HCR_EL2, xzr
>> +
>> + mrs x21, ICH_VTR_EL2
>> + and w22, w21, #0xf
>> + mov w23, #0xf
>> + sub w23, w23, w22 // How many regs we have to skip
>> +
>> + adr x24, 1f
>> + add x24, x24, x23, lsl #2
>> + br x24
>> +
>> +1:
>> + mrs x20, ICH_LR15_EL2
>> + mrs x19, ICH_LR14_EL2
>> + mrs x18, ICH_LR13_EL2
>> + mrs x17, ICH_LR12_EL2
>> + mrs x16, ICH_LR11_EL2
>> + mrs x15, ICH_LR10_EL2
>> + mrs x14, ICH_LR9_EL2
>> + mrs x13, ICH_LR8_EL2
>> + mrs x12, ICH_LR7_EL2
>> + mrs x11, ICH_LR6_EL2
>> + mrs x10, ICH_LR5_EL2
>> + mrs x9, ICH_LR4_EL2
>> + mrs x8, ICH_LR3_EL2
>> + mrs x7, ICH_LR2_EL2
>> + mrs x6, ICH_LR1_EL2
>> + mrs x5, ICH_LR0_EL2
>> +
>> + adr x24, 1f
>> + add x24, x24, x23, lsl #2 // adr(1f) + unimp_nr*4
>> + br x24
>> +
>> +1:
>> + str x20, [x3, #(VGIC_V3_CPU_LR + 15*8)]
>> + str x19, [x3, #(VGIC_V3_CPU_LR + 14*8)]
>> + str x18, [x3, #(VGIC_V3_CPU_LR + 13*8)]
>> + str x17, [x3, #(VGIC_V3_CPU_LR + 12*8)]
>> + str x16, [x3, #(VGIC_V3_CPU_LR + 11*8)]
>> + str x15, [x3, #(VGIC_V3_CPU_LR + 10*8)]
>> + str x14, [x3, #(VGIC_V3_CPU_LR + 9*8)]
>> + str x13, [x3, #(VGIC_V3_CPU_LR + 8*8)]
>> + str x12, [x3, #(VGIC_V3_CPU_LR + 7*8)]
>> + str x11, [x3, #(VGIC_V3_CPU_LR + 6*8)]
>> + str x10, [x3, #(VGIC_V3_CPU_LR + 5*8)]
>> + str x9, [x3, #(VGIC_V3_CPU_LR + 4*8)]
>> + str x8, [x3, #(VGIC_V3_CPU_LR + 3*8)]
>> + str x7, [x3, #(VGIC_V3_CPU_LR + 2*8)]
>> + str x6, [x3, #(VGIC_V3_CPU_LR + 1*8)]
>> + str x5, [x3, #VGIC_V3_CPU_LR]
>> +
>> + lsr w22, w21, #29 // Get PRIbits
>> + cmp w22, #4 // 5 bits
>> + b.eq 5f
>> + cmp w22, #5 // 6 bits
>> + b.eq 6f
>> + // 7 bits
>> + mrs x20, ICH_AP0R3_EL2
>> + str w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
>> + mrs x19, ICH_AP0R2_EL2
>> + str w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
>> +6: mrs x18, ICH_AP0R1_EL2
>> + str w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
>> +5: mrs x17, ICH_AP0R0_EL2
>> + str w17, [x3, #VGIC_V3_CPU_AP0R]
>> +
>> + cmp w22, #4 // 5 bits
>> + b.eq 5f
>> + cmp w22, #5 // 6 bits
>> + b.eq 6f
>> + // 7 bits
>> + mrs x20, ICH_AP1R3_EL2
>> + str w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
>> + mrs x19, ICH_AP1R2_EL2
>> + str w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
>> +6: mrs x18, ICH_AP1R1_EL2
>> + str w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
>> +5: mrs x17, ICH_AP1R0_EL2
>> + str w17, [x3, #VGIC_V3_CPU_AP1R]
>> +
>> + // Restore SRE_EL1 access and re-enable SRE at EL1.
>> + mrs x5, ICC_SRE_EL2
>> + orr x5, x5, #(1 << 3)
>
> couldn't we define ICC_SRE_ENABLE (1 << 3)?
We sure can.
>> + msr ICC_SRE_EL2, x5
>> + isb
>> + mov x5, #1
>> + msr ICC_SRE_EL1, x5
>
> the other bits are always read-only (WI), so you can safely just
> overwrite all other bits here?
Essentially, yes. The only case where the other bits are writable is
when EL2 is not present, and that's obviously not the case if we're
running KVM.
>> +
>> + mov x2, #HCR_RW
>> + msr hcr_el2, x2
>> +.endm
>> +
>> +/*
>> + * Restore the VGIC CPU state from memory
>> + * x0: Register pointing to VCPU struct
>> + */
>> +.macro restore_vgic_v3_state
>> + ldr x2, [x0, #VCPU_IRQ_LINES]
>
> again, what can this be used for with aarch64?
Yes it can.
>> + ldr x1, [x0, #VCPU_HCR_EL2]
>> + orr x2, x2, x1
>> + msr hcr_el2, x2
>> +
>> + // Disable SRE_EL1 access. Necessary, otherwise
>> + // ICH_VMCR_EL2.VFIQEn becomes one, and FIQ happens...
>> + msr ICC_SRE_EL1, xzr
>> + isb
>> +
>> + // Compute the address of struct vgic_cpu
>> + add x3, x0, #VCPU_VGIC_CPU
>> +
>> + // Restore all interesting registers
>> + ldr w4, [x3, #VGIC_V3_CPU_HCR]
>> + ldr w5, [x3, #VGIC_V3_CPU_VMCR]
>> +
>> + msr ICH_HCR_EL2, x4
>> + msr ICH_VMCR_EL2, x5
>> +
>> + mrs x21, ICH_VTR_EL2
>> +
>> + lsr w22, w21, #29 // Get PRIbits
>> + cmp w22, #4 // 5 bits
>> + b.eq 5f
>> + cmp w22, #5 // 6 bits
>> + b.eq 6f
>> + // 7 bits
>> + ldr w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
>> + msr ICH_AP1R3_EL2, x20
>> + ldr w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
>> + msr ICH_AP1R2_EL2, x19
>> +6: ldr w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
>> + msr ICH_AP1R1_EL2, x18
>> +5: ldr w17, [x3, #VGIC_V3_CPU_AP1R]
>> + msr ICH_AP1R0_EL2, x17
>> +
>> + cmp w22, #4 // 5 bits
>> + b.eq 5f
>> + cmp w22, #5 // 6 bits
>> + b.eq 6f
>> + // 7 bits
>> + ldr w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
>> + msr ICH_AP0R3_EL2, x20
>> + ldr w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
>> + msr ICH_AP0R2_EL2, x19
>> +6: ldr w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
>> + msr ICH_AP0R1_EL2, x18
>> +5: ldr w17, [x3, #VGIC_V3_CPU_AP0R]
>> + msr ICH_AP0R0_EL2, x17
>> +
>> + and w22, w21, #0xf
>> + mov w23, #0xf
>> + sub w23, w23, w22 // How many regs we have to skip
>> +
>> + adr x24, 1f
>> + add x24, x24, x23, lsl #2 // adr(1f) + unimp_nr*4
>> + br x24
>> +
>> +1:
>> + ldr x20, [x3, #(VGIC_V3_CPU_LR + 15*8)]
>> + ldr x19, [x3, #(VGIC_V3_CPU_LR + 14*8)]
>> + ldr x18, [x3, #(VGIC_V3_CPU_LR + 13*8)]
>> + ldr x17, [x3, #(VGIC_V3_CPU_LR + 12*8)]
>> + ldr x16, [x3, #(VGIC_V3_CPU_LR + 11*8)]
>> + ldr x15, [x3, #(VGIC_V3_CPU_LR + 10*8)]
>> + ldr x14, [x3, #(VGIC_V3_CPU_LR + 9*8)]
>> + ldr x13, [x3, #(VGIC_V3_CPU_LR + 8*8)]
>> + ldr x12, [x3, #(VGIC_V3_CPU_LR + 7*8)]
>> + ldr x11, [x3, #(VGIC_V3_CPU_LR + 6*8)]
>> + ldr x10, [x3, #(VGIC_V3_CPU_LR + 5*8)]
>> + ldr x9, [x3, #(VGIC_V3_CPU_LR + 4*8)]
>> + ldr x8, [x3, #(VGIC_V3_CPU_LR + 3*8)]
>> + ldr x7, [x3, #(VGIC_V3_CPU_LR + 2*8)]
>> + ldr x6, [x3, #(VGIC_V3_CPU_LR + 1*8)]
>> + ldr x5, [x3, #VGIC_V3_CPU_LR]
>> +
>> + adr x24, 1f
>> + add x24, x24, x23, lsl #2
>> + br x24
>> +
>> +1:
>> + msr ICH_LR15_EL2, x20
>> + msr ICH_LR14_EL2, x19
>> + msr ICH_LR13_EL2, x18
>> + msr ICH_LR12_EL2, x17
>> + msr ICH_LR11_EL2, x16
>> + msr ICH_LR10_EL2, x15
>> + msr ICH_LR9_EL2, x14
>> + msr ICH_LR8_EL2, x13
>> + msr ICH_LR7_EL2, x12
>> + msr ICH_LR6_EL2, x11
>> + msr ICH_LR5_EL2, x10
>> + msr ICH_LR4_EL2, x9
>> + msr ICH_LR3_EL2, x8
>> + msr ICH_LR2_EL2, x7
>> + msr ICH_LR1_EL2, x6
>> + msr ICH_LR0_EL2, x5
>> +
>> + // Ensure that the above will be visible via the memory-mapped
>> + // view of the CPU interface (GICV).
>> + isb
>> + dsb sy
>> +
>> + // Prevent the guest from touching the GIC system registers
>> + mrs x5, ICC_SRE_EL2
>> + and x5, x5, #~(1 << 3)
>
> ditto on the define
>
>> + msr ICC_SRE_EL2, x5
>
> I trust Will reviewed all the barriers etc., but you really don't
> need an ISB or anything here?
No, we should be fine at that stage. The final ERET into the guest
ensure architectural execution of this instruction.
>> +.endm
>> +
>> +ENTRY(__save_vgic_v3_state)
>> + save_vgic_v3_state
>> + ret
>> +ENDPROC(__save_vgic_v3_state)
>> +
>> +ENTRY(__restore_vgic_v3_state)
>> + restore_vgic_v3_state
>> + ret
>> +ENDPROC(__restore_vgic_v3_state)
>> +
>> +ENTRY(__vgic_v3_get_ich_vtr_el2)
>> + mrs x0, ICH_VTR_EL2
>> + ret
>> +ENDPROC(__vgic_v3_get_ich_vtr_el2)
>> +
>> + .popsection
>> --
>> 1.8.3.4
>>
>
> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
>
--
Jazz is not dead. It just smells funny.
^ permalink raw reply [flat|nested] 57+ messages in thread