qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: qemu@gibson.dropbear.id.au
To: qemu-devel@nongnu.org
Cc: paulus@samba.org, agraf@suse.de, anton@samba.org
Subject: [Qemu-devel] [PATCH 19/28] Implement the PAPR (pSeries) virtualized interrupt controller (xics)
Date: Tue, 15 Feb 2011 15:56:30 +1100	[thread overview]
Message-ID: <1297745799-26148-20-git-send-email-qemu@gibson.dropbear.id.au> (raw)
In-Reply-To: <1297745799-26148-1-git-send-email-qemu@gibson.dropbear.id.au>

From: David Gibson <david@gibson.dropbear.id.au>

PAPR defines an interrupt control architecture which is logically divided
into ICS (Interrupt Control Presentation, each unit is responsible for
presenting interrupts to a particular "interrupt server", i.e. CPU) and
ICS (Interrupt Control Source, each unit responsible for one or more
hardware interrupts as numbered globally across the system).  All PAPR
virtual IO devices expect to deliver interrupts via this mechanism.  In
Linux, this interrupt controller system is handled by the "xics" driver.

On pSeries systems, access to the interrupt controller is virtualized via
hypercalls and RTAS methods.  However, the virtualized interface is very
similar to the underlying interrupt controller hardware, and similar PICs
exist un-virtualized in some other systems.

This patch implements both the ICP and ICS sides of the PAPR interrupt
controller.  For now, only the hypercall virtualized interface is provided,
however it would be relatively straightforward to graft an emulated
register interface onto the underlying interrupt logic if we want to add
a machine with a hardware ICS/ICP system in the future.

There are some limitations in this implementation: it is assumed for now
that only one instance of the ICS exists, although a full xics system can
have several, each responsible for a different group of hardware irqs.
ICP/ICS can handle both level-sensitve (LSI) and message signalled (MSI)
interrupt inputs.  For now, this implementation supports only MSI
interrupts, since that is used by PAPR virtual IO devices.

Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
---
 Makefile.target |    2 +-
 hw/spapr.c      |   26 +++
 hw/spapr.h      |    2 +
 hw/xics.c       |  528 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
 hw/xics.h       |   13 ++
 5 files changed, 570 insertions(+), 1 deletions(-)
 create mode 100644 hw/xics.c
 create mode 100644 hw/xics.h

diff --git a/Makefile.target b/Makefile.target
index fa59109..00cb554 100644
--- a/Makefile.target
+++ b/Makefile.target
@@ -233,7 +233,7 @@ obj-ppc-y += ppc_oldworld.o
 obj-ppc-y += ppc_newworld.o
 # IBM pSeries (sPAPR)
 obj-ppc-y += spapr.o spapr_hcall.o spapr_rtas.o spapr_vio.o
-obj-ppc-y += spapr_vty.o
+obj-ppc-y += xics.o spapr_vty.o
 # PowerPC 4xx boards
 obj-ppc-y += ppc4xx_devs.o ppc4xx_pci.o ppc405_uc.o ppc405_boards.o
 obj-ppc-y += ppc440.o ppc440_bamboo.o
diff --git a/hw/spapr.c b/hw/spapr.c
index 23f493a..be30def 100644
--- a/hw/spapr.c
+++ b/hw/spapr.c
@@ -34,6 +34,7 @@
 
 #include "hw/spapr.h"
 #include "hw/spapr_vio.h"
+#include "hw/xics.h"
 
 #include <libfdt.h>
 
@@ -62,6 +63,7 @@ static void *spapr_create_fdt(int *fdt_size, ram_addr_t ramsize,
     uint32_t end_prop = cpu_to_be32(initrd_base + initrd_size);
     uint32_t pft_size_prop[] = {0, cpu_to_be32(hash_shift)};
     char hypertas_prop[] = "hcall-pft\0hcall-term\0hcall-dabr";
+    uint32_t interrupt_server_ranges_prop[] = {0, cpu_to_be32(smp_cpus)};
     int i;
     char *modelname;
     int ret;
@@ -120,6 +122,7 @@ static void *spapr_create_fdt(int *fdt_size, ram_addr_t ramsize,
 
     for (i = 0; i < smp_cpus; i++) {
         CPUState *env = envs[i];
+        uint32_t gserver_prop[] = {cpu_to_be32(i), 0}; /* HACK! */
         char *nodename;
         uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40),
                            0xffffffff, 0xffffffff};
@@ -147,6 +150,9 @@ static void *spapr_create_fdt(int *fdt_size, ram_addr_t ramsize,
         _FDT((fdt_property(fdt, "ibm,pft-size", pft_size_prop, sizeof(pft_size_prop))));
         _FDT((fdt_property_string(fdt, "status", "okay")));
         _FDT((fdt_property(fdt, "64-bit", NULL, 0)));
+        _FDT((fdt_property_cell(fdt, "ibm,ppc-interrupt-server#s", i)));
+        _FDT((fdt_property(fdt, "ibm,ppc-interrupt-gserver#s", 
+                           gserver_prop, sizeof(gserver_prop))));
 
         if (envs[i]->mmu_model & POWERPC_MMU_1TSEG) {
             _FDT((fdt_property(fdt, "ibm,processor-segment-sizes",
@@ -168,6 +174,20 @@ static void *spapr_create_fdt(int *fdt_size, ram_addr_t ramsize,
 
     _FDT((fdt_end_node(fdt)));
 
+    /* interrupt controller */ 
+    _FDT((fdt_begin_node(fdt, "interrupt-controller@0")));
+
+    _FDT((fdt_property_string(fdt, "device_type",
+                              "PowerPC-External-Interrupt-Presentation")));
+    _FDT((fdt_property_string(fdt, "compatible", "IBM,ppc-xicp")));
+    _FDT((fdt_property_cell(fdt, "reg", 0)));    
+    _FDT((fdt_property(fdt, "interrupt-controller", NULL, 0)));
+    _FDT((fdt_property(fdt, "ibm,interrupt-server-ranges",
+                       interrupt_server_ranges_prop,
+                       sizeof(interrupt_server_ranges_prop))));
+
+    _FDT((fdt_end_node(fdt)));
+   
     /* vdevice */
     _FDT((fdt_begin_node(fdt, "vdevice")));
 
@@ -175,6 +195,8 @@ static void *spapr_create_fdt(int *fdt_size, ram_addr_t ramsize,
     _FDT((fdt_property_string(fdt, "compatible", "IBM,vdevice")));
     _FDT((fdt_property_cell(fdt, "#address-cells", 0x1)));
     _FDT((fdt_property_cell(fdt, "#size-cells", 0x0)));
+    _FDT((fdt_property_cell(fdt, "#interrupt-cells", 0x2)));
+    _FDT((fdt_property(fdt, "interrupt-controller", NULL, 0)));
     
     _FDT((fdt_end_node(fdt)));
 
@@ -290,6 +312,10 @@ static void ppc_spapr_init(ram_addr_t ram_size,
     }
     qemu_free(filename);
 
+    /* Set up Interrupt Controller */
+    spapr->icp = xics_system_init(smp_cpus, &env, MAX_SERIAL_PORTS);
+
+    /* Set up VIO bus */
     spapr->vio_bus = spapr_vio_bus_init();
 
     for (i = 0; i < MAX_SERIAL_PORTS; i++) {
diff --git a/hw/spapr.h b/hw/spapr.h
index 7a7c319..4b54c22 100644
--- a/hw/spapr.h
+++ b/hw/spapr.h
@@ -2,9 +2,11 @@
 #define __HW_SPAPR_H__
 
 struct VIOsPAPRBus;
+struct icp_state;
 
 typedef struct sPAPREnvironment {
     struct VIOsPAPRBus *vio_bus;
+    struct icp_state *icp;
 } sPAPREnvironment;
 
 #define H_SUCCESS         0
diff --git a/hw/xics.c b/hw/xics.c
new file mode 100644
index 0000000..46e778a
--- /dev/null
+++ b/hw/xics.c
@@ -0,0 +1,528 @@
+#include "hw.h"
+#include "hw/spapr.h"
+#include "hw/xics.h"
+
+#include <pthread.h>
+
+/*
+ * ICP: Presentation layer
+ */
+
+struct icp_server_state {
+    uint32_t cppr :8;
+    uint32_t xisr :24;
+    uint8_t pending_priority;
+    uint8_t mfrr;
+    qemu_irq output;
+    pthread_mutex_t lock;
+};
+
+struct ics_state;
+
+struct icp_state {
+    long nr_servers;
+    struct icp_server_state *ss;
+    struct ics_state *ics;
+};
+
+static void ics_reject(struct ics_state *ics, int nr);
+static void ics_resend(struct ics_state *ics);
+static void ics_eoi(struct ics_state *ics, int nr);
+
+static void icp_check_ipi(struct icp_state *icp, int server)
+{
+    struct icp_server_state *ss = icp->ss + server;
+    
+    if (ss->xisr && (ss->pending_priority <= ss->mfrr)) {
+        return;
+    }
+
+    if (ss->xisr) {
+        ics_reject(icp->ics, ss->xisr);
+    }
+
+    ss->xisr = XICS_IPI;
+    ss->pending_priority = ss->mfrr;
+    qemu_irq_raise(ss->output);
+}
+
+static void icp_resend(struct icp_state *icp, int server)
+{
+    struct icp_server_state *ss = icp->ss + server;
+
+    if (ss->mfrr < ss->cppr) {
+        icp_check_ipi(icp, server);
+    }
+    ics_resend(icp->ics);
+}
+
+static void icp_set_cppr(struct icp_state *icp, int server, uint8_t cppr)
+{
+    struct icp_server_state *ss = icp->ss + server;
+    uint8_t old_cppr;
+    uint32_t old_xisr;
+
+    pthread_mutex_lock(&ss->lock);
+    old_cppr = ss->cppr;
+    ss->cppr = cppr;
+
+    if (cppr < old_cppr) {
+        if (ss->xisr && (cppr <= ss->pending_priority)) {
+            old_xisr = ss->xisr;
+            ss->xisr = 0;
+            qemu_irq_lower(ss->output);
+            ics_reject(icp->ics, old_xisr);
+        }
+    } else {
+        if (!ss->xisr) {
+            icp_resend(icp, server);
+        }
+    }
+    pthread_mutex_unlock(&ss->lock);
+}
+
+static void icp_set_mfrr(struct icp_state *icp, int nr, uint8_t mfrr)
+{
+    struct icp_server_state *ss = icp->ss + nr;
+
+    pthread_mutex_lock(&ss->lock);
+
+    ss->mfrr = mfrr;
+    if (mfrr < ss->cppr) {
+        icp_check_ipi(icp, nr);
+    }
+
+    pthread_mutex_unlock(&ss->lock);
+}
+
+static uint32_t icp_accept(struct icp_server_state *ss)
+{
+    uint32_t xirr;
+
+    pthread_mutex_lock(&ss->lock);
+    qemu_irq_lower(ss->output);
+    xirr = ss->cppr << 24 | ss->xisr;
+    ss->xisr = 0;
+    ss->cppr = ss->pending_priority;
+    pthread_mutex_unlock(&ss->lock);
+    return xirr;
+}
+
+static void icp_eoi(struct icp_state *icp, int server, uint32_t xirr)
+{
+    struct icp_server_state *ss = icp->ss + server;
+
+    ics_eoi(icp->ics, xirr & 0xffffff);
+    /* Send EOI -> ICS */
+    ss->cppr = xirr >> 24;
+    if (!ss->xisr) {
+        icp_resend(icp, server);
+    }
+}
+
+static void icp_irq(struct icp_state *icp, int server, int nr, uint8_t priority)
+{
+    struct icp_server_state *ss = icp->ss + server;
+
+    pthread_mutex_lock(&ss->lock);
+
+    if ((priority >= ss->cppr)
+        || (ss->xisr && (ss->pending_priority <= priority))) {
+        ics_reject(icp->ics, nr);
+    } else {
+        if (ss->xisr) {
+            ics_reject(icp->ics, ss->xisr);
+        }
+        ss->xisr = nr;
+        ss->pending_priority = priority;
+        qemu_irq_raise(ss->output);
+    }
+
+    pthread_mutex_unlock(&ss->lock);
+}
+
+/*
+ * ICS: Source layer
+ */
+
+struct ics_irq_state {
+    int server;
+    uint8_t priority;
+    uint8_t saved_priority;
+    /* int pending :1; */
+    /* int presented :1; */
+    int rejected :1;
+    int masked_pending :1;
+};
+
+struct ics_state {
+    int nr_irqs;
+    int offset;
+    qemu_irq *qirqs;
+    struct ics_irq_state *irqs;
+    struct icp_state *icp;
+};
+
+static int ics_valid_irq(struct ics_state *ics, uint32_t nr)
+{
+    return (nr >= ics->offset)
+        && (nr < (ics->offset + ics->nr_irqs));
+}
+
+static void ics_set_irq_msi(void *opaque, int nr, int val)
+{
+    struct ics_state *ics = (struct ics_state *)opaque;
+    struct ics_irq_state *irq = ics->irqs + nr;
+
+    if (val) {
+        if (irq->priority == 0xff) {
+            irq->masked_pending = 1;
+            /* masked pending */ ;
+        } else  {
+            icp_irq(ics->icp, irq->server, nr + ics->offset, irq->priority);
+        }
+    }
+}
+
+static void ics_reject_msi(struct ics_state *ics, int nr)
+{
+    struct ics_irq_state *irq = ics->irqs + nr - ics->offset;
+
+    irq->rejected = 1;
+}
+
+static void ics_resend_msi(struct ics_state *ics)
+{
+    int i;
+
+    for (i = 0; i < ics->nr_irqs; i++) {
+        struct ics_irq_state *irq = ics->irqs + i;
+
+        /* FIXME: filter by server#? */
+        if (irq->rejected) {
+            irq->rejected = 0;
+            if (irq->priority != 0xff) {
+                icp_irq(ics->icp, irq->server, i + ics->offset, irq->priority);
+            }
+        }
+    }
+}
+
+static void ics_write_xive_msi(struct ics_state *ics, int nr, int server,
+                               uint8_t priority)
+{
+    struct ics_irq_state *irq = ics->irqs + nr;
+
+    irq->server = server;
+    irq->priority = priority;
+
+    if (!irq->masked_pending || (priority = 0xff)) {
+        return;
+    }
+
+    irq->masked_pending = 0;
+    icp_irq(ics->icp, server, nr + ics->offset, priority);
+}
+
+/* static void ics_recheck_irq(struct ics_state *ics, int nr) */
+/* { */
+/*     struct ics_irq_state *irq = xics->irqs + (nr - xics->offset); */
+
+/*     if (irq->pending && (irq->priority != 0xff)) { */
+/*      irq->presented = 1; */
+/*      icp_irq(xicp->ss + irq->server, nr + ics->offset, irq->priority); */
+/*     } */
+/* } */
+
+/* static void ics_set_irq(void *opaque, int nr, int val) */
+/* { */
+/*     struct ics_state *ics = (struct ics_state *)opaque; */
+/*     struct ics_irq_state *irq = ics->irqs + nr; */
+
+/*     irq->pending = val; */
+/*     ics_recheck_irq(ics, nr); */
+/* } */
+
+/* static void ics_reject(int nr) */
+/* { */
+/*     struct ics_irq_state *irq = xics->irqs + (nr - xics->offset); */
+
+/*     assert(irq->presented); */
+/*     irq->rejected = 1; */
+/*     irq->presented = 0; */
+/* } */
+
+/* static void ics_eoi(int nr) */
+/* { */
+/*     struct ics_irq_state *irq = xics->irqs + (nr - xics->offset); */
+
+/*     assert(irq->presented); */
+/*     irq->presented = 0; */
+/*     irq->rejected = 0; */
+/*     ics_recheck_irq(xics, nr); */
+/* } */
+
+/* static void ics_resend_irq(struct ics_state *ics, int nr, */
+/*                            struct icp_server_state *ss) */
+/* { */
+/*     struct ics_irq_state *irq = ics->irqs + (nr - ics->offset); */
+
+/*     if (!irq->rejected) */
+/*         return; /\* Not rejected, so no need to resend *\/ */
+
+/*     if (ss != (xicp->ss + irq->server)) */
+/*         return; /\* Not for this server, so don't resend *\/ */
+
+/*     ics_recheck_irq(ics, nr); */
+/* } */
+
+/* static void ics_resend(struct icp_server_state *ss) */
+/* { */
+/*     int i; */
+
+/*     for (i = 0; i < xics->nr_irqs; i++) */
+/*         ics_resend_irq(xics, nr, ss); */
+/* } */
+
+static void ics_reject(struct ics_state *ics, int nr)
+{
+    ics_reject_msi(ics, nr);
+}
+
+static void ics_resend(struct ics_state *ics)
+{
+    ics_resend_msi(ics);
+}
+
+static void ics_eoi(struct ics_state *ics, int nr)
+{
+}
+
+/*
+ * Exported functions
+ */
+
+qemu_irq xics_find_qirq(struct icp_state *icp, int irq)
+{
+    if ((irq < icp->ics->offset)
+        || (irq >= (icp->ics->offset + icp->ics->nr_irqs))) {
+        return NULL;
+    }
+
+    return icp->ics->qirqs[irq - icp->ics->offset];
+}
+
+static target_ulong h_cppr(CPUState *env, sPAPREnvironment *spapr,
+                           target_ulong opcode, target_ulong *args)
+{
+    target_ulong cppr = args[0];
+
+    icp_set_cppr(spapr->icp, env->cpu_index, cppr);
+    return H_SUCCESS;
+}
+
+static target_ulong h_ipi(CPUState *env, sPAPREnvironment *spapr,
+                          target_ulong opcode, target_ulong *args)
+{
+    target_ulong server = args[0];
+    target_ulong mfrr = args[1];
+
+    if (server >= spapr->icp->nr_servers) {
+        return H_PARAMETER;
+    }
+
+    icp_set_mfrr(spapr->icp, server, mfrr);
+    return H_SUCCESS;
+
+}
+
+static target_ulong h_xirr(CPUState *env, sPAPREnvironment *spapr,
+                           target_ulong opcode, target_ulong *args)
+{
+    uint32_t xirr = icp_accept(spapr->icp->ss + env->cpu_index);
+
+    args[0] = xirr;
+    return H_SUCCESS;
+}
+
+static target_ulong h_eoi(CPUState *env, sPAPREnvironment *spapr,
+                          target_ulong opcode, target_ulong *args)
+{
+    target_ulong xirr = args[0];
+
+    icp_eoi(spapr->icp, env->cpu_index, xirr);
+    return H_SUCCESS;
+}
+
+static void rtas_set_xive(sPAPREnvironment *spapr, uint32_t token,
+                          uint32_t nargs, target_ulong args,
+                          uint32_t nret, target_ulong rets)
+{
+    struct ics_state *ics = spapr->icp->ics;
+    uint32_t nr, server, priority;
+
+    if ((nargs != 3) || (nret != 1)) {
+        rtas_st(rets, 0, -3);
+        return;
+    }
+
+    nr = rtas_ld(args, 0);
+    server = rtas_ld(args, 1);
+    priority = rtas_ld(args, 2);
+
+    if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers)
+        || (priority > 0xff)) {
+        rtas_st(rets, 0, -3);
+        return;
+    }
+
+    ics_write_xive_msi(ics, nr - ics->offset, server, priority);
+
+    rtas_st(rets, 0, 0); /* Success */
+}
+
+static void rtas_get_xive(sPAPREnvironment *spapr, uint32_t token,
+                          uint32_t nargs, target_ulong args,
+                          uint32_t nret, target_ulong rets)
+{
+    struct ics_state *ics = spapr->icp->ics;
+    uint32_t nr;
+
+    if ((nargs != 1) || (nret != 3)) {
+        rtas_st(rets, 0, -3);
+        return;
+    }
+
+    nr = rtas_ld(args, 0);
+
+    if (!ics_valid_irq(ics, nr)) {
+        rtas_st(rets, 0, -3);
+        return;
+    }
+
+    rtas_st(rets, 0, 0); /* Success */
+    rtas_st(rets, 1, ics->irqs[nr - ics->offset].server);
+    rtas_st(rets, 2, ics->irqs[nr - ics->offset].priority);
+}
+
+static void rtas_int_off(sPAPREnvironment *spapr, uint32_t token,
+                         uint32_t nargs, target_ulong args,
+                         uint32_t nret, target_ulong rets)
+{
+    struct ics_state *ics = spapr->icp->ics;
+    uint32_t nr;
+
+    if ((nargs != 1) || (nret != 1)) {
+        rtas_st(rets, 0, -3);
+        return;
+    }
+
+    nr = rtas_ld(args, 0);
+
+    if (!ics_valid_irq(ics, nr)) {
+        rtas_st(rets, 0, -3);
+        return;
+    }
+
+    /* This is a NOP for now, since the described PAPR semantics don't
+     * seem to gel with what Linux does */
+#if 0
+    struct ics_irq_state *irq = xics->irqs + (nr - xics->offset);
+
+    irq->saved_priority = irq->priority;
+    ics_write_xive_msi(xics, nr - xics->offset, irq->server, 0xff);
+#endif
+
+    rtas_st(rets, 0, 0); /* Success */
+}
+
+static void rtas_int_on(sPAPREnvironment *spapr, uint32_t token,
+                        uint32_t nargs, target_ulong args,
+                        uint32_t nret, target_ulong rets)
+{
+    struct ics_state *ics = spapr->icp->ics;
+    uint32_t nr;
+
+    if ((nargs != 1) || (nret != 1)) {
+        rtas_st(rets, 0, -3);
+        return;
+    }
+
+    nr = rtas_ld(args, 0);
+
+    if (!ics_valid_irq(ics, nr)) {
+        rtas_st(rets, 0, -3);
+        return;
+    }
+
+    /* This is a NOP for now, since the described PAPR semantics don't
+     * seem to gel with what Linux does */
+#if 0
+    struct ics_irq_state *irq = xics->irqs + (nr - xics->offset);
+
+    ics_write_xive_msi(xics, nr - xics->offset,
+                       irq->server, irq->saved_priority);
+#endif
+
+    rtas_st(rets, 0, 0); /* Success */
+}
+
+struct icp_state *xics_system_init(int nr_servers, CPUState *servers[],
+                                   int nr_irqs)
+{
+    int i;
+    struct icp_state *icp;
+    struct ics_state *ics;
+
+    icp = qemu_mallocz(sizeof(*icp));
+    icp->nr_servers = nr_servers;
+    icp->ss = qemu_mallocz(nr_servers * sizeof(struct icp_server_state));
+
+    for (i = 0; i < nr_servers; i++) {
+        servers[i]->cpu_index = i;
+
+        switch (PPC_INPUT(servers[i])) {
+        case PPC_FLAGS_INPUT_POWER7:
+            icp->ss[i].output = servers[i]->irq_inputs[POWER7_INPUT_INT];
+            break;
+
+        case PPC_FLAGS_INPUT_970:
+            icp->ss[i].output = servers[i]->irq_inputs[PPC970_INPUT_INT];
+            break;
+
+        default:
+            hw_error("XICS interrupt model does not support this CPU bus model\n");
+            exit(1);
+        }
+
+        icp->ss[i].mfrr = 0xff;
+        pthread_mutex_init(&icp->ss[i].lock, NULL);
+    }
+
+    ics = qemu_mallocz(sizeof(*ics));
+    ics->nr_irqs = nr_irqs;
+    ics->offset = 16;
+    ics->irqs = qemu_mallocz(nr_irqs * sizeof(struct ics_irq_state));
+
+    icp->ics = ics;
+    ics->icp = icp;
+
+    for (i = 0; i < nr_irqs; i++) {
+        ics->irqs[i].priority = 0xff;
+        ics->irqs[i].saved_priority = 0xff;
+    }
+
+    ics->qirqs = qemu_allocate_irqs(ics_set_irq_msi, ics, nr_irqs);
+
+    spapr_register_hypercall(H_CPPR, h_cppr);
+    spapr_register_hypercall(H_IPI, h_ipi);
+    spapr_register_hypercall(H_XIRR, h_xirr);
+    spapr_register_hypercall(H_EOI, h_eoi);
+
+    spapr_rtas_register("ibm,set-xive", rtas_set_xive);
+    spapr_rtas_register("ibm,get-xive", rtas_get_xive);
+    spapr_rtas_register("ibm,int-off", rtas_int_off);
+    spapr_rtas_register("ibm,int-on", rtas_int_on);
+
+    return icp;
+}
diff --git a/hw/xics.h b/hw/xics.h
new file mode 100644
index 0000000..e55f5f1
--- /dev/null
+++ b/hw/xics.h
@@ -0,0 +1,13 @@
+#if !defined(__XICS_H__)
+#define __XICS_H__
+
+#define XICS_IPI        0x2
+
+struct icp_state;
+
+qemu_irq xics_find_qirq(struct icp_state *icp, int irq);
+
+struct icp_state *xics_system_init(int nr_servers, CPUState *servers[],
+                                   int nr_irqs);
+
+#endif /* __XICS_H__ */
-- 
1.7.1

  parent reply	other threads:[~2011-02-15  4:57 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-02-15  4:56 [Qemu-devel] RFC: Implement emulation of pSeries logical partitions (v2) qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 01/28] Add TAGS and *~ to .gitignore qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 02/28] Clean up PowerPC SLB handling code qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 03/28] Allow qemu_devtree_setprop() to take arbitrary values qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 04/28] Add a hook to allow hypercalls to be emulated on PowerPC qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 05/28] Implement PowerPC slbmfee and slbmfev instructions qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 06/28] Implement missing parts of the logic for the POWER PURR qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 07/28] Correct ppc popcntb logic, implement popcntw and popcntd qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 08/28] Clean up slb_lookup() function qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 09/28] Parse SDR1 on mtspr instead of at translate time qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 10/28] Use "hash" more consistently in ppc mmu code qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 11/28] Better factor the ppc hash translation path qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 12/28] Support 1T segments on ppc qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 13/28] Add POWER7 support for ppc qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 14/28] Start implementing pSeries logical partition machine qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 15/28] Implement the bus structure for PAPR virtual IO qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 16/28] Virtual hash page table handling on pSeries machine qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 17/28] Implement hcall based RTAS for pSeries machines qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 18/28] Implement assorted pSeries hcalls and RTAS methods qemu
2011-02-15  4:56 ` qemu [this message]
2011-02-15  4:56 ` [Qemu-devel] [PATCH 20/28] Add PAPR H_VIO_SIGNAL hypercall and infrastructure for VIO interrupts qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 21/28] Add (virtual)_interrupt to PAPR virtual tty device qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 22/28] Implement TCE translation for sPAPR VIO qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 23/28] Implement sPAPR Virtual LAN (ibmveth) qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 24/28] Implement PAPR CRQ hypercalls qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 25/28] Implement PAPR virtual SCSI interface (ibmvscsi) qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 26/28] Add a PAPR TCE-bypass mechanism for the pSeries machine qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 27/28] Add SLOF-based partition firmware for pSeries machine, allowing more boot options qemu
2011-02-15  4:56 ` [Qemu-devel] [PATCH 28/28] Implement PAPR VPA functions for pSeries shared processor partitions qemu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1297745799-26148-20-git-send-email-qemu@gibson.dropbear.id.au \
    --to=qemu@gibson.dropbear.id.au \
    --cc=agraf@suse.de \
    --cc=anton@samba.org \
    --cc=paulus@samba.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).