* [PATCH 6/16] cell: abstract spu management routines
@ 2006-11-10 20:01 Geoff Levand
2006-11-13 4:11 ` Michael Ellerman
2006-11-14 3:44 ` Michael Ellerman
0 siblings, 2 replies; 12+ messages in thread
From: Geoff Levand @ 2006-11-10 20:01 UTC (permalink / raw)
To: Paul Mackerras; +Cc: linuxppc-dev, Arnd Bergmann
This adds a platform specific spu management abstraction and the coresponding
routines to support the IBM Cell Blade. It also removes the hypervisor only
resources that were included in struct spu.
Three new platform specific routines are introduced, spu_enumerate_spus(),
spu_create_spu() and spu_destroy_spu(). The underlining design uses a new
type, struct spu_management_ops, to hold function pointers that the platform
setup code is expected to initialize to instances appropriate to that platform.
For the IBM Cell Blade support, I put the hypervisor only resources that were
in struct spu into a platform specific data structure struct spu_pdata.
Signed-off-by: Geoff Levand <geoffrey.levand@am.sony.com>
---
Michael,
Unfortunately, for your xmon spu support, your DUMP_FIELD is setup in such a
way that it is not easy to change to use from inside spu_priv1_mmio.c, so I
left of_dump_pdata_fields() empty. We'll need to work on something usable
there, or make some other way to abstract those platform specific spu
variables.
arch/powerpc/platforms/cell/cbe_thermal.c | 5
arch/powerpc/platforms/cell/setup.c | 3
arch/powerpc/platforms/cell/spu_base.c | 308 +-------------------
arch/powerpc/platforms/cell/spu_priv1_mmio.c | 406 +++++++++++++++++++++++++--
arch/powerpc/platforms/cell/spu_priv1_mmio.h | 26 +
arch/powerpc/xmon/xmon.c | 8
include/asm-powerpc/spu.h | 5
include/asm-powerpc/spu_priv1.h | 48 ++-
8 files changed, 481 insertions(+), 328 deletions(-)
Index: cell--common--6/arch/powerpc/platforms/cell/cbe_thermal.c
===================================================================
--- cell--common--6.orig/arch/powerpc/platforms/cell/cbe_thermal.c
+++ cell--common--6/arch/powerpc/platforms/cell/cbe_thermal.c
@@ -29,6 +29,7 @@
#include <asm/prom.h>
#include "cbe_regs.h"
+#include "spu_priv1_mmio.h"
static struct cbe_pmd_regs __iomem *get_pmd_regs(struct sys_device *sysdev)
{
@@ -36,7 +37,7 @@
spu = container_of(sysdev, struct spu, sysdev);
- return cbe_get_pmd_regs(spu->devnode);
+ return cbe_get_pmd_regs(spu_devnode(spu));
}
/* returns the value for a given spu in a given register */
@@ -49,7 +50,7 @@
/* getting the id from the reg attribute will not work on future device-tree layouts
* in future we should store the id to the spu struct and use it here */
spu = container_of(sysdev, struct spu, sysdev);
- id = (unsigned int *)get_property(spu->devnode, "reg", NULL);
+ id = (unsigned int *)get_property(spu_devnode(spu), "reg", NULL);
value.val = in_be64(®->val);
return value.spe[*id];
Index: cell--common--6/arch/powerpc/platforms/cell/setup.c
===================================================================
--- cell--common--6.orig/arch/powerpc/platforms/cell/setup.c
+++ cell--common--6/arch/powerpc/platforms/cell/setup.c
@@ -97,7 +97,8 @@
static void __init cell_setup_arch(void)
{
#ifdef CONFIG_SPU_BASE
- spu_priv1_ops = &spu_priv1_mmio_ops;
+ spu_priv1_ops = &spu_priv1_mmio_ops;
+ spu_management_ops = &spu_management_of_ops;
#endif
cbe_regs_init();
Index: cell--common--6/arch/powerpc/platforms/cell/spu_base.c
===================================================================
--- cell--common--6.orig/arch/powerpc/platforms/cell/spu_base.c
+++ cell--common--6/arch/powerpc/platforms/cell/spu_base.c
@@ -25,23 +25,17 @@
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/poll.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/wait.h>
-
-#include <asm/firmware.h>
-#include <asm/io.h>
-#include <asm/prom.h>
+#include <linux/mm.h>
+#include <linux/io.h>
#include <linux/mutex.h>
#include <asm/spu.h>
#include <asm/spu_priv1.h>
-#include <asm/mmu_context.h>
#include <asm/xmon.h>
-#include "interrupt.h"
-
+const struct spu_management_ops *spu_management_ops;
const struct spu_priv1_ops *spu_priv1_ops;
EXPORT_SYMBOL_GPL(spu_priv1_ops);
@@ -512,235 +506,6 @@
return ret;
}
-static int __init find_spu_node_id(struct device_node *spe)
-{
- const unsigned int *id;
- struct device_node *cpu;
- cpu = spe->parent->parent;
- id = get_property(cpu, "node-id", NULL);
- return id ? *id : 0;
-}
-
-static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
- const char *prop)
-{
- static DEFINE_MUTEX(add_spumem_mutex);
-
- const struct address_prop {
- unsigned long address;
- unsigned int len;
- } __attribute__((packed)) *p;
- int proplen;
-
- unsigned long start_pfn, nr_pages;
- struct pglist_data *pgdata;
- struct zone *zone;
- int ret;
-
- p = get_property(spe, prop, &proplen);
- WARN_ON(proplen != sizeof (*p));
-
- start_pfn = p->address >> PAGE_SHIFT;
- nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
-
- pgdata = NODE_DATA(spu->nid);
- zone = pgdata->node_zones;
-
- /* XXX rethink locking here */
- mutex_lock(&add_spumem_mutex);
- ret = __add_pages(zone, start_pfn, nr_pages);
- mutex_unlock(&add_spumem_mutex);
-
- return ret;
-}
-
-static void __iomem * __init map_spe_prop(struct spu *spu,
- struct device_node *n, const char *name)
-{
- const struct address_prop {
- unsigned long address;
- unsigned int len;
- } __attribute__((packed)) *prop;
-
- const void *p;
- int proplen;
- void __iomem *ret = NULL;
- int err = 0;
-
- p = get_property(n, name, &proplen);
- if (proplen != sizeof (struct address_prop))
- return NULL;
-
- prop = p;
-
- err = cell_spuprop_present(spu, n, name);
- if (err && (err != -EEXIST))
- goto out;
-
- ret = ioremap(prop->address, prop->len);
-
- out:
- return ret;
-}
-
-static void spu_unmap(struct spu *spu)
-{
- iounmap(spu->priv2);
- iounmap(spu->priv1);
- iounmap(spu->problem);
- iounmap((__force u8 __iomem *)spu->local_store);
-}
-
-/* This function shall be abstracted for HV platforms */
-static int __init spu_map_interrupts_old(struct spu *spu, struct device_node *np)
-{
- unsigned int isrc;
- const u32 *tmp;
-
- /* Get the interrupt source unit from the device-tree */
- tmp = get_property(np, "isrc", NULL);
- if (!tmp)
- return -ENODEV;
- isrc = tmp[0];
-
- /* Add the node number */
- isrc |= spu->node << IIC_IRQ_NODE_SHIFT;
-
- /* Now map interrupts of all 3 classes */
- spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
- spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
- spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
-
- /* Right now, we only fail if class 2 failed */
- return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
-}
-
-static int __init spu_map_device_old(struct spu *spu, struct device_node *node)
-{
- const char *prop;
- int ret;
-
- ret = -ENODEV;
- spu->name = get_property(node, "name", NULL);
- if (!spu->name)
- goto out;
-
- prop = get_property(node, "local-store", NULL);
- if (!prop)
- goto out;
- spu->local_store_phys = *(unsigned long *)prop;
-
- /* we use local store as ram, not io memory */
- spu->local_store = (void __force *)
- map_spe_prop(spu, node, "local-store");
- if (!spu->local_store)
- goto out;
-
- prop = get_property(node, "problem", NULL);
- if (!prop)
- goto out_unmap;
- spu->problem_phys = *(unsigned long *)prop;
-
- spu->problem= map_spe_prop(spu, node, "problem");
- if (!spu->problem)
- goto out_unmap;
-
- spu->priv1= map_spe_prop(spu, node, "priv1");
- /* priv1 is not available on a hypervisor */
-
- spu->priv2= map_spe_prop(spu, node, "priv2");
- if (!spu->priv2)
- goto out_unmap;
- ret = 0;
- goto out;
-
-out_unmap:
- spu_unmap(spu);
-out:
- return ret;
-}
-
-static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
-{
- struct of_irq oirq;
- int ret;
- int i;
-
- for (i=0; i < 3; i++) {
- ret = of_irq_map_one(np, i, &oirq);
- if (ret)
- goto err;
-
- ret = -EINVAL;
- spu->irqs[i] = irq_create_of_mapping(oirq.controller,
- oirq.specifier, oirq.size);
- if (spu->irqs[i] == NO_IRQ)
- goto err;
- }
- return 0;
-
-err:
- pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier, spu->name);
- for (; i >= 0; i--) {
- if (spu->irqs[i] != NO_IRQ)
- irq_dispose_mapping(spu->irqs[i]);
- }
- return ret;
-}
-
-static int spu_map_resource(struct device_node *node, int nr,
- void __iomem** virt, unsigned long *phys)
-{
- struct resource resource = { };
- int ret;
-
- ret = of_address_to_resource(node, 0, &resource);
- if (ret)
- goto out;
-
- if (phys)
- *phys = resource.start;
- *virt = ioremap(resource.start, resource.end - resource.start);
- if (!*virt)
- ret = -EINVAL;
-
-out:
- return ret;
-}
-
-static int __init spu_map_device(struct spu *spu, struct device_node *node)
-{
- int ret = -ENODEV;
- spu->name = get_property(node, "name", NULL);
- if (!spu->name)
- goto out;
-
- ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
- &spu->local_store_phys);
- if (ret)
- goto out;
- ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
- &spu->problem_phys);
- if (ret)
- goto out_unmap;
- ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
- NULL);
- if (ret)
- goto out_unmap;
-
- if (!firmware_has_feature(FW_FEATURE_LPAR))
- ret = spu_map_resource(node, 3, (void __iomem**)&spu->priv1,
- NULL);
- if (ret)
- goto out_unmap;
- return 0;
-
-out_unmap:
- spu_unmap(spu);
-out:
- pr_debug("failed to map spe %s: %d\n", spu->name, ret);
- return ret;
-}
struct sysdev_class spu_sysdev_class = {
set_kset_name("spu")
@@ -821,7 +586,7 @@
sysdev_unregister(&spu->sysdev);
}
-static int __init create_spu(struct device_node *spe)
+static int __init create_spu(void *data)
{
struct spu *spu;
int ret;
@@ -832,60 +597,37 @@
if (!spu)
goto out;
- spu->node = find_spu_node_id(spe);
- if (spu->node >= MAX_NUMNODES) {
- printk(KERN_WARNING "SPE %s on node %d ignored,"
- " node number too big\n", spe->full_name, spu->node);
- printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
- return -ENODEV;
- }
- spu->nid = of_node_to_nid(spe);
- if (spu->nid == -1)
- spu->nid = 0;
+ spin_lock_init(&spu->register_lock);
+ mutex_lock(&spu_mutex);
+ spu->number = number++;
+ mutex_unlock(&spu_mutex);
+
+ ret = spu_create_spu(spu, data);
- ret = spu_map_device(spu, spe);
- /* try old method */
- if (ret)
- ret = spu_map_device_old(spu, spe);
if (ret)
goto out_free;
- ret = spu_map_interrupts(spu, spe);
- if (ret)
- ret = spu_map_interrupts_old(spu, spe);
- if (ret)
- goto out_unmap;
- spin_lock_init(&spu->register_lock);
spu_mfc_sdr_setup(spu);
spu_mfc_sr1_set(spu, 0x33);
- mutex_lock(&spu_mutex);
-
- spu->number = number++;
ret = spu_request_irqs(spu);
if (ret)
- goto out_unlock;
+ goto out_destroy;
ret = spu_create_sysdev(spu);
if (ret)
goto out_free_irqs;
+ mutex_lock(&spu_mutex);
list_add(&spu->list, &spu_list[spu->node]);
list_add(&spu->full_list, &spu_full_list);
- spu->devnode = of_node_get(spe);
-
mutex_unlock(&spu_mutex);
- pr_debug(KERN_DEBUG "Using SPE %s %p %p %p %p %d\n",
- spu->name, spu->local_store,
- spu->problem, spu->priv1, spu->priv2, spu->number);
goto out;
out_free_irqs:
spu_free_irqs(spu);
-out_unlock:
- mutex_unlock(&spu_mutex);
-out_unmap:
- spu_unmap(spu);
+out_destroy:
+ spu_destroy_spu(spu);
out_free:
kfree(spu);
out:
@@ -897,11 +639,9 @@
list_del_init(&spu->list);
list_del_init(&spu->full_list);
- of_node_put(spu->devnode);
-
spu_destroy_sysdev(spu);
spu_free_irqs(spu);
- spu_unmap(spu);
+ spu_destroy_spu(spu);
kfree(spu);
}
@@ -922,7 +662,6 @@
static int __init init_spu_base(void)
{
- struct device_node *node;
int i, ret;
/* create sysdev class for spus */
@@ -933,16 +672,13 @@
for (i = 0; i < MAX_NUMNODES; i++)
INIT_LIST_HEAD(&spu_list[i]);
- ret = -ENODEV;
- for (node = of_find_node_by_type(NULL, "spe");
- node; node = of_find_node_by_type(node, "spe")) {
- ret = create_spu(node);
- if (ret) {
- printk(KERN_WARNING "%s: Error initializing %s\n",
- __FUNCTION__, node->name);
- cleanup_spu_base();
- break;
- }
+ ret = spu_enumerate_spus(create_spu);
+
+ if (ret) {
+ printk(KERN_WARNING "%s: Error initializing spus\n",
+ __FUNCTION__);
+ cleanup_spu_base();
+ return ret;
}
xmon_register_spus(&spu_full_list);
Index: cell--common--6/arch/powerpc/platforms/cell/spu_priv1_mmio.c
===================================================================
--- cell--common--6.orig/arch/powerpc/platforms/cell/spu_priv1_mmio.c
+++ cell--common--6/arch/powerpc/platforms/cell/spu_priv1_mmio.c
@@ -18,120 +18,480 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#undef DEBUG
+
+#include <linux/interrupt.h>
+#include <linux/list.h>
#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
-#include <asm/io.h>
#include <asm/spu.h>
#include <asm/spu_priv1.h>
+#include <asm/firmware.h>
+#include <asm/prom.h>
#include "interrupt.h"
+#include "spu_priv1_mmio.h"
+
+struct spu_pdata {
+ int nid;
+ struct device_node *devnode;
+ struct spu_priv1 __iomem *priv1;
+};
+
+static struct spu_pdata *spu_get_pdata(struct spu *spu)
+{
+ BUG_ON(!spu->pdata);
+ return spu->pdata;
+}
+
+struct device_node *spu_devnode(struct spu *spu)
+{
+ return spu_get_pdata(spu)->devnode;
+}
+
+EXPORT_SYMBOL_GPL(spu_devnode);
+
+static int __init find_spu_node_id(struct device_node *spe)
+{
+ const unsigned int *id;
+ struct device_node *cpu;
+ cpu = spe->parent->parent;
+ id = get_property(cpu, "node-id", NULL);
+ return id ? *id : 0;
+}
+
+static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
+ const char *prop)
+{
+ static DEFINE_MUTEX(add_spumem_mutex);
+
+ const struct address_prop {
+ unsigned long address;
+ unsigned int len;
+ } __attribute__((packed)) *p;
+ int proplen;
+
+ unsigned long start_pfn, nr_pages;
+ struct pglist_data *pgdata;
+ struct zone *zone;
+ int ret;
+
+ p = get_property(spe, prop, &proplen);
+ WARN_ON(proplen != sizeof (*p));
+
+ start_pfn = p->address >> PAGE_SHIFT;
+ nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ pgdata = NODE_DATA(spu_get_pdata(spu)->nid);
+ zone = pgdata->node_zones;
+
+ /* XXX rethink locking here */
+ mutex_lock(&add_spumem_mutex);
+ ret = __add_pages(zone, start_pfn, nr_pages);
+ mutex_unlock(&add_spumem_mutex);
+
+ return ret;
+}
+
+static void __iomem * __init map_spe_prop(struct spu *spu,
+ struct device_node *n, const char *name)
+{
+ const struct address_prop {
+ unsigned long address;
+ unsigned int len;
+ } __attribute__((packed)) *prop;
+
+ const void *p;
+ int proplen;
+ void __iomem *ret = NULL;
+ int err = 0;
+
+ p = get_property(n, name, &proplen);
+ if (proplen != sizeof (struct address_prop))
+ return NULL;
+
+ prop = p;
+
+ err = cell_spuprop_present(spu, n, name);
+ if (err && (err != -EEXIST))
+ goto out;
+
+ ret = ioremap(prop->address, prop->len);
+
+ out:
+ return ret;
+}
+
+static void spu_unmap(struct spu *spu)
+{
+ iounmap(spu->priv2);
+ iounmap(spu_get_pdata(spu)->priv1);
+ iounmap(spu->problem);
+ iounmap((__force u8 __iomem *)spu->local_store);
+}
+
+static int __init spu_map_interrupts_old(struct spu *spu,
+ struct device_node *np)
+{
+ unsigned int isrc;
+ const u32 *tmp;
+
+ /* Get the interrupt source unit from the device-tree */
+ tmp = get_property(np, "isrc", NULL);
+ if (!tmp)
+ return -ENODEV;
+ isrc = tmp[0];
+
+ /* Add the node number */
+ isrc |= spu->node << IIC_IRQ_NODE_SHIFT;
+
+ /* Now map interrupts of all 3 classes */
+ spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
+ spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
+ spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
+
+ /* Right now, we only fail if class 2 failed */
+ return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
+}
+
+static int __init spu_map_device_old(struct spu *spu, struct device_node *node)
+{
+ const char *prop;
+ int ret;
+
+ ret = -ENODEV;
+ spu->name = get_property(node, "name", NULL);
+ if (!spu->name)
+ goto out;
+
+ prop = get_property(node, "local-store", NULL);
+ if (!prop)
+ goto out;
+ spu->local_store_phys = *(unsigned long *)prop;
+
+ /* we use local store as ram, not io memory */
+ spu->local_store = (void __force *)
+ map_spe_prop(spu, node, "local-store");
+ if (!spu->local_store)
+ goto out;
+
+ prop = get_property(node, "problem", NULL);
+ if (!prop)
+ goto out_unmap;
+ spu->problem_phys = *(unsigned long *)prop;
+
+ spu->problem= map_spe_prop(spu, node, "problem");
+ if (!spu->problem)
+ goto out_unmap;
+
+ spu_get_pdata(spu)->priv1= map_spe_prop(spu, node, "priv1");
+
+ spu->priv2= map_spe_prop(spu, node, "priv2");
+ if (!spu->priv2)
+ goto out_unmap;
+ ret = 0;
+ goto out;
+
+out_unmap:
+ spu_unmap(spu);
+out:
+ return ret;
+}
+
+static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
+{
+ struct of_irq oirq;
+ int ret;
+ int i;
+
+ for (i=0; i < 3; i++) {
+ ret = of_irq_map_one(np, i, &oirq);
+ if (ret)
+ goto err;
+
+ ret = -EINVAL;
+ spu->irqs[i] = irq_create_of_mapping(oirq.controller,
+ oirq.specifier, oirq.size);
+ if (spu->irqs[i] == NO_IRQ)
+ goto err;
+ }
+ return 0;
+
+err:
+ pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier,
+ spu->name);
+ for (; i >= 0; i--) {
+ if (spu->irqs[i] != NO_IRQ)
+ irq_dispose_mapping(spu->irqs[i]);
+ }
+ return ret;
+}
+
+static int spu_map_resource(struct device_node *node, int nr,
+ void __iomem** virt, unsigned long *phys)
+{
+ struct resource resource = { };
+ int ret;
+
+ ret = of_address_to_resource(node, 0, &resource);
+ if (ret)
+ goto out;
+
+ if (phys)
+ *phys = resource.start;
+ *virt = ioremap(resource.start, resource.end - resource.start);
+ if (!*virt)
+ ret = -EINVAL;
+
+out:
+ return ret;
+}
+
+static int __init spu_map_device(struct spu *spu, struct device_node *node)
+{
+ int ret = -ENODEV;
+ spu->name = get_property(node, "name", NULL);
+ if (!spu->name)
+ goto out;
+
+ ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
+ &spu->local_store_phys);
+ if (ret)
+ goto out;
+ ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
+ &spu->problem_phys);
+ if (ret)
+ goto out_unmap;
+ ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
+ NULL);
+ if (ret)
+ goto out_unmap;
+
+ if (!firmware_has_feature(FW_FEATURE_LPAR))
+ ret = spu_map_resource(node, 3,
+ (void __iomem**)&spu_get_pdata(spu)->priv1, NULL);
+ if (ret)
+ goto out_unmap;
+ return 0;
+
+out_unmap:
+ spu_unmap(spu);
+out:
+ pr_debug("failed to map spe %s: %d\n", spu->name, ret);
+ return ret;
+}
+
+static int __init of_enumerate_spus(int (*fn)(void *data))
+{
+ int ret;
+ struct device_node *node;
+
+ ret = -ENODEV;
+ for (node = of_find_node_by_type(NULL, "spe");
+ node; node = of_find_node_by_type(node, "spe")) {
+ ret = fn(node);
+ if (ret) {
+ printk(KERN_WARNING "%s: Error initializing %s\n",
+ __FUNCTION__, node->name);
+ break;
+ }
+ }
+ return ret;
+}
+
+static int __init of_create_spu(struct spu *spu, void *data)
+{
+ int ret;
+ struct device_node *spe = (struct device_node *)data;
+
+ spu->pdata = kzalloc(sizeof(struct spu_pdata),
+ GFP_KERNEL);
+ if (!spu->pdata) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ spu->node = find_spu_node_id(spe);
+ if (spu->node >= MAX_NUMNODES) {
+ printk(KERN_WARNING "SPE %s on node %d ignored,"
+ " node number too big\n", spe->full_name, spu->node);
+ printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ spu_get_pdata(spu)->nid = of_node_to_nid(spe);
+ if (spu_get_pdata(spu)->nid == -1)
+ spu_get_pdata(spu)->nid = 0;
+
+ ret = spu_map_device(spu, spe);
+ /* try old method */
+ if (ret)
+ ret = spu_map_device_old(spu, spe);
+ if (ret)
+ goto out_free;
+
+ ret = spu_map_interrupts(spu, spe);
+ if (ret)
+ ret = spu_map_interrupts_old(spu, spe);
+ if (ret)
+ goto out_unmap;
+
+ spu_get_pdata(spu)->devnode = of_node_get(spe);
+
+ pr_debug(KERN_DEBUG "Using SPE %s %p %p %p %p %d\n", spu->name,
+ spu->local_store, spu->problem, spu_get_pdata(spu)->priv1,
+ spu->priv2, spu->number);
+ goto out;
+
+out_unmap:
+ spu_unmap(spu);
+out_free:
+ kfree(spu->pdata);
+ spu->pdata = NULL;
+out:
+ return ret;
+}
+
+static int of_destroy_spu(struct spu *spu)
+{
+ spu_unmap(spu);
+ of_node_put(spu_get_pdata(spu)->devnode);
+ kfree(spu->pdata);
+ spu->pdata = NULL;
+ return 0;
+}
+
+static void of_dump_pdata_fields(struct spu *spu)
+{
+}
+
+const struct spu_management_ops spu_management_of_ops = {
+ .enumerate_spus = of_enumerate_spus,
+ .create_spu = of_create_spu,
+ .destroy_spu = of_destroy_spu,
+ .dump_pdata_fields = of_dump_pdata_fields,
+};
static void int_mask_and(struct spu *spu, int class, u64 mask)
{
u64 old_mask;
- old_mask = in_be64(&spu->priv1->int_mask_RW[class]);
- out_be64(&spu->priv1->int_mask_RW[class], old_mask & mask);
+ old_mask = in_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class]);
+ out_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class],
+ old_mask & mask);
}
static void int_mask_or(struct spu *spu, int class, u64 mask)
{
u64 old_mask;
- old_mask = in_be64(&spu->priv1->int_mask_RW[class]);
- out_be64(&spu->priv1->int_mask_RW[class], old_mask | mask);
+ old_mask = in_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class]);
+ out_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class],
+ old_mask | mask);
}
static void int_mask_set(struct spu *spu, int class, u64 mask)
{
- out_be64(&spu->priv1->int_mask_RW[class], mask);
+ out_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class], mask);
}
static u64 int_mask_get(struct spu *spu, int class)
{
- return in_be64(&spu->priv1->int_mask_RW[class]);
+ return in_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class]);
}
static void int_stat_clear(struct spu *spu, int class, u64 stat)
{
- out_be64(&spu->priv1->int_stat_RW[class], stat);
+ out_be64(&spu_get_pdata(spu)->priv1->int_stat_RW[class], stat);
}
static u64 int_stat_get(struct spu *spu, int class)
{
- return in_be64(&spu->priv1->int_stat_RW[class]);
+ return in_be64(&spu_get_pdata(spu)->priv1->int_stat_RW[class]);
}
static void cpu_affinity_set(struct spu *spu, int cpu)
{
u64 target = iic_get_target_id(cpu);
u64 route = target << 48 | target << 32 | target << 16;
- out_be64(&spu->priv1->int_route_RW, route);
+ out_be64(&spu_get_pdata(spu)->priv1->int_route_RW, route);
}
static u64 mfc_dar_get(struct spu *spu)
{
- return in_be64(&spu->priv1->mfc_dar_RW);
+ return in_be64(&spu_get_pdata(spu)->priv1->mfc_dar_RW);
}
static u64 mfc_dsisr_get(struct spu *spu)
{
- return in_be64(&spu->priv1->mfc_dsisr_RW);
+ return in_be64(&spu_get_pdata(spu)->priv1->mfc_dsisr_RW);
}
static void mfc_dsisr_set(struct spu *spu, u64 dsisr)
{
- out_be64(&spu->priv1->mfc_dsisr_RW, dsisr);
+ out_be64(&spu_get_pdata(spu)->priv1->mfc_dsisr_RW, dsisr);
}
static void mfc_sdr_setup(struct spu *spu)
{
- out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1));
+ out_be64(&spu_get_pdata(spu)->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1));
}
static void mfc_sr1_set(struct spu *spu, u64 sr1)
{
- out_be64(&spu->priv1->mfc_sr1_RW, sr1);
+ out_be64(&spu_get_pdata(spu)->priv1->mfc_sr1_RW, sr1);
}
static u64 mfc_sr1_get(struct spu *spu)
{
- return in_be64(&spu->priv1->mfc_sr1_RW);
+ return in_be64(&spu_get_pdata(spu)->priv1->mfc_sr1_RW);
}
static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
{
- out_be64(&spu->priv1->mfc_tclass_id_RW, tclass_id);
+ out_be64(&spu_get_pdata(spu)->priv1->mfc_tclass_id_RW, tclass_id);
}
static u64 mfc_tclass_id_get(struct spu *spu)
{
- return in_be64(&spu->priv1->mfc_tclass_id_RW);
+ return in_be64(&spu_get_pdata(spu)->priv1->mfc_tclass_id_RW);
}
static void tlb_invalidate(struct spu *spu)
{
- out_be64(&spu->priv1->tlb_invalidate_entry_W, 0ul);
+ out_be64(&spu_get_pdata(spu)->priv1->tlb_invalidate_entry_W, 0ul);
}
static void resource_allocation_groupID_set(struct spu *spu, u64 id)
{
- out_be64(&spu->priv1->resource_allocation_groupID_RW, id);
+ out_be64(&spu_get_pdata(spu)->priv1->resource_allocation_groupID_RW,
+ id);
}
static u64 resource_allocation_groupID_get(struct spu *spu)
{
- return in_be64(&spu->priv1->resource_allocation_groupID_RW);
+ return in_be64(
+ &spu_get_pdata(spu)->priv1->resource_allocation_groupID_RW);
}
static void resource_allocation_enable_set(struct spu *spu, u64 enable)
{
- out_be64(&spu->priv1->resource_allocation_enable_RW, enable);
+ out_be64(&spu_get_pdata(spu)->priv1->resource_allocation_enable_RW,
+ enable);
}
static u64 resource_allocation_enable_get(struct spu *spu)
{
- return in_be64(&spu->priv1->resource_allocation_enable_RW);
+ return in_be64(
+ &spu_get_pdata(spu)->priv1->resource_allocation_enable_RW);
}
const struct spu_priv1_ops spu_priv1_mmio_ops =
Index: cell--common--6/arch/powerpc/platforms/cell/spu_priv1_mmio.h
===================================================================
--- /dev/null
+++ cell--common--6/arch/powerpc/platforms/cell/spu_priv1_mmio.h
@@ -0,0 +1,26 @@
+/*
+ * spu hypervisor abstraction for direct hardware access.
+ *
+ * Copyright (C) 2006 Sony Computer Entertainment Inc.
+ * Copyright 2006 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef SPU_PRIV1_MMIO_H
+#define SPU_PRIV1_MMIO_H
+
+struct device_node *spu_devnode(struct spu *spu);
+
+#endif /* SPU_PRIV1_MMIO_H */
Index: cell--common--6/arch/powerpc/xmon/xmon.c
===================================================================
--- cell--common--6.orig/arch/powerpc/xmon/xmon.c
+++ cell--common--6/arch/powerpc/xmon/xmon.c
@@ -2769,8 +2769,6 @@
DUMP_FIELD(spu, "0x%x", number);
DUMP_FIELD(spu, "%s", name);
- DUMP_FIELD(spu, "%s", devnode->full_name);
- DUMP_FIELD(spu, "0x%x", nid);
DUMP_FIELD(spu, "0x%lx", local_store_phys);
DUMP_FIELD(spu, "0x%p", local_store);
DUMP_FIELD(spu, "0x%lx", ls_size);
@@ -2794,12 +2792,8 @@
DUMP_FIELD(spu, "0x%x", problem->spu_runcntl_RW);
DUMP_FIELD(spu, "0x%x", problem->spu_status_R);
DUMP_FIELD(spu, "0x%x", problem->spu_npc_RW);
- DUMP_FIELD(spu, "0x%p", priv1);
-
- if (spu->priv1)
- DUMP_FIELD(spu, "0x%lx", priv1->mfc_sr1_RW);
-
DUMP_FIELD(spu, "0x%p", priv2);
+ spu_dump_pdata_fields(spu);
}
static int do_spu_cmd(void)
Index: cell--common--6/include/asm-powerpc/spu.h
===================================================================
--- cell--common--6.orig/include/asm-powerpc/spu.h
+++ cell--common--6/include/asm-powerpc/spu.h
@@ -111,13 +111,11 @@
u8 *local_store;
unsigned long problem_phys;
struct spu_problem __iomem *problem;
- struct spu_priv1 __iomem *priv1;
struct spu_priv2 __iomem *priv2;
struct list_head list;
struct list_head sched_list;
struct list_head full_list;
int number;
- int nid;
unsigned int irqs[3];
u32 node;
u64 flags;
@@ -144,8 +142,7 @@
char irq_c1[8];
char irq_c2[8];
- struct device_node *devnode;
-
+ void* pdata; /* platform private data */
struct sys_device sysdev;
};
Index: cell--common--6/include/asm-powerpc/spu_priv1.h
===================================================================
--- cell--common--6.orig/include/asm-powerpc/spu_priv1.h
+++ cell--common--6/include/asm-powerpc/spu_priv1.h
@@ -21,12 +21,13 @@
#define _SPU_PRIV1_H
#if defined(__KERNEL__)
+#include <linux/types.h>
+
struct spu;
/* access to priv1 registers */
-struct spu_priv1_ops
-{
+struct spu_priv1_ops {
void (*int_mask_and) (struct spu *spu, int class, u64 mask);
void (*int_mask_or) (struct spu *spu, int class, u64 mask);
void (*int_mask_set) (struct spu *spu, int class, u64 mask);
@@ -171,12 +172,49 @@
return spu_priv1_ops->resource_allocation_enable_get(spu);
}
-/* The declarations folowing are put here for convenience
- * and only intended to be used by the platform setup code
- * for initializing spu_priv1_ops.
+/* spu management abstraction */
+
+struct spu_management_ops {
+ int (*enumerate_spus)(int (*fn)(void *data));
+ int (*create_spu)(struct spu *spu, void *data);
+ int (*destroy_spu)(struct spu *spu);
+ void (*dump_pdata_fields)(struct spu *spu);
+};
+
+extern const struct spu_management_ops* spu_management_ops;
+
+static inline int
+spu_enumerate_spus (int (*fn)(void *data))
+{
+ return spu_management_ops->enumerate_spus(fn);
+}
+
+static inline int
+spu_create_spu (struct spu *spu, void *data)
+{
+ return spu_management_ops->create_spu(spu, data);
+}
+
+static inline int
+spu_destroy_spu (struct spu *spu)
+{
+ return spu_management_ops->destroy_spu(spu);
+}
+
+static inline void
+spu_dump_pdata_fields (struct spu *spu)
+{
+ if (spu_management_ops->dump_pdata_fields)
+ spu_management_ops->dump_pdata_fields(spu);
+}
+
+/*
+ * The declarations folowing are put here for convenience
+ * and only intended to be used by the platform setup code.
*/
extern const struct spu_priv1_ops spu_priv1_mmio_ops;
+extern const struct spu_management_ops spu_management_of_ops;
#endif /* __KERNEL__ */
#endif
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 6/16] cell: abstract spu management routines
2006-11-10 20:01 [PATCH 6/16] cell: abstract spu management routines Geoff Levand
@ 2006-11-13 4:11 ` Michael Ellerman
2006-11-13 4:34 ` Geoff Levand
2006-11-14 3:44 ` Michael Ellerman
1 sibling, 1 reply; 12+ messages in thread
From: Michael Ellerman @ 2006-11-13 4:11 UTC (permalink / raw)
To: Geoff Levand; +Cc: linuxppc-dev, Paul Mackerras, Arnd Bergmann
[-- Attachment #1: Type: text/plain, Size: 1212 bytes --]
On Fri, 2006-11-10 at 12:01 -0800, Geoff Levand wrote:
> This adds a platform specific spu management abstraction and the coresponding
> routines to support the IBM Cell Blade. It also removes the hypervisor only
> resources that were included in struct spu.
>
> Three new platform specific routines are introduced, spu_enumerate_spus(),
> spu_create_spu() and spu_destroy_spu(). The underlining design uses a new
> type, struct spu_management_ops, to hold function pointers that the platform
> setup code is expected to initialize to instances appropriate to that platform.
>
> For the IBM Cell Blade support, I put the hypervisor only resources that were
> in struct spu into a platform specific data structure struct spu_pdata.
As far as I can see you haven't posted your HV-backed management ops, is
that right?
Why can't your PS3 platform code fake-up device nodes for SPUs? It seems
that would simplify this quite a lot.
cheers
--
Michael Ellerman
OzLabs, IBM Australia Development Lab
wwweb: http://michael.ellerman.id.au
phone: +61 2 6212 1183 (tie line 70 21183)
We do not inherit the earth from our ancestors,
we borrow it from our children. - S.M.A.R.T Person
[-- Attachment #2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 189 bytes --]
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 6/16] cell: abstract spu management routines
2006-11-13 4:11 ` Michael Ellerman
@ 2006-11-13 4:34 ` Geoff Levand
2006-11-14 2:01 ` Michael Ellerman
0 siblings, 1 reply; 12+ messages in thread
From: Geoff Levand @ 2006-11-13 4:34 UTC (permalink / raw)
To: michael; +Cc: linuxppc-dev, Paul Mackerras, Arnd Bergmann
Michael Ellerman wrote:
> On Fri, 2006-11-10 at 12:01 -0800, Geoff Levand wrote:
>> This adds a platform specific spu management abstraction and the coresponding
>> routines to support the IBM Cell Blade. It also removes the hypervisor only
>> resources that were included in struct spu.
>>
>> Three new platform specific routines are introduced, spu_enumerate_spus(),
>> spu_create_spu() and spu_destroy_spu(). The underlining design uses a new
>> type, struct spu_management_ops, to hold function pointers that the platform
>> setup code is expected to initialize to instances appropriate to that platform.
>>
>> For the IBM Cell Blade support, I put the hypervisor only resources that were
>> in struct spu into a platform specific data structure struct spu_pdata.
>
> As far as I can see you haven't posted your HV-backed management ops, is
> that right?
Yes, that is in '[PATCH 15/16] cell: add ps3 platform spu support' posted
with the other ps3pf patches.
> Why can't your PS3 platform code fake-up device nodes for SPUs? It seems
> that would simplify this quite a lot.
Seems like a hack to me. My concern is that I just have to keep adding some
extra hack for every new spu feature that comes out. I would prefer to make
a proper design from the start, but if anyone can be more convincing I am
open to suggestions.
-Geoff
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 6/16] cell: abstract spu management routines
2006-11-13 4:34 ` Geoff Levand
@ 2006-11-14 2:01 ` Michael Ellerman
2006-11-14 2:56 ` Geoff Levand
0 siblings, 1 reply; 12+ messages in thread
From: Michael Ellerman @ 2006-11-14 2:01 UTC (permalink / raw)
To: Geoff Levand; +Cc: linuxppc-dev, Paul Mackerras, Arnd Bergmann
[-- Attachment #1: Type: text/plain, Size: 2775 bytes --]
On Sun, 2006-11-12 at 20:34 -0800, Geoff Levand wrote:
> Michael Ellerman wrote:
> > On Fri, 2006-11-10 at 12:01 -0800, Geoff Levand wrote:
> >> This adds a platform specific spu management abstraction and the coresponding
> >> routines to support the IBM Cell Blade. It also removes the hypervisor only
> >> resources that were included in struct spu.
> >>
> >> Three new platform specific routines are introduced, spu_enumerate_spus(),
> >> spu_create_spu() and spu_destroy_spu(). The underlining design uses a new
> >> type, struct spu_management_ops, to hold function pointers that the platform
> >> setup code is expected to initialize to instances appropriate to that platform.
> >>
> >> For the IBM Cell Blade support, I put the hypervisor only resources that were
> >> in struct spu into a platform specific data structure struct spu_pdata.
> >
> > As far as I can see you haven't posted your HV-backed management ops, is
> > that right?
>
>
> Yes, that is in '[PATCH 15/16] cell: add ps3 platform spu support' posted
> with the other ps3pf patches.
OK, I'll have a look at it.
> > Why can't your PS3 platform code fake-up device nodes for SPUs? It seems
> > that would simplify this quite a lot.
>
>
> Seems like a hack to me. My concern is that I just have to keep adding some
> extra hack for every new spu feature that comes out. I would prefer to make
> a proper design from the start, but if anyone can be more convincing I am
> open to suggestions.
Well the whole thrust of the flattened-device-tree model, is that we do
as much platform-specific hackery in a boot-loader/early-init, and
present the hardware in as standard a way as possible to the kernel via
the device tree.
The hope is that this isolates most of the kernel from platform specific
details, as far as is possible - there will always be some things that
need to be abstracted out - for that we have ppc_md and a few other
callbacks.
The priv1_ops serve that purpose, providing callbacks, and there's
really no way around that - you can't tap the priv1 area when you're
running under a HV - fine. But for just finding the spus it strikes me
that it would be _nicer_, perhaps not easier :), to have your
"enumerate_spus" populate the flat device tree early on - which would
leave more of the spu code untouched by the hv/bare-metal issue.
But as I said before, I haven't looked thoroughly at the code, so
perhaps there's some obvious reason why that wouldn't work.
cheers
--
Michael Ellerman
OzLabs, IBM Australia Development Lab
wwweb: http://michael.ellerman.id.au
phone: +61 2 6212 1183 (tie line 70 21183)
We do not inherit the earth from our ancestors,
we borrow it from our children. - S.M.A.R.T Person
[-- Attachment #2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 189 bytes --]
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 6/16] cell: abstract spu management routines
2006-11-14 2:01 ` Michael Ellerman
@ 2006-11-14 2:56 ` Geoff Levand
2006-11-14 3:13 ` Michael Ellerman
0 siblings, 1 reply; 12+ messages in thread
From: Geoff Levand @ 2006-11-14 2:56 UTC (permalink / raw)
To: michael; +Cc: linuxppc-dev, Paul Mackerras, Arnd Bergmann
Michael Ellerman wrote:
>> > Why can't your PS3 platform code fake-up device nodes for SPUs? It seems
>> > that would simplify this quite a lot.
>>
>>
>> Seems like a hack to me. My concern is that I just have to keep adding some
>> extra hack for every new spu feature that comes out. I would prefer to make
>> a proper design from the start, but if anyone can be more convincing I am
>> open to suggestions.
>
> Well the whole thrust of the flattened-device-tree model, is that we do
> as much platform-specific hackery in a boot-loader/early-init, and
> present the hardware in as standard a way as possible to the kernel via
The thing is that the spus are visualized, so to create one takes up
HV resources, mainly HV memory. Creating spus in the bootloader has
several problems. One is that you could be allocating HV memory that would
never be used if the kernel is not configured for spu support, and this
is memory could be used for other HV support. Another problem is the
management of those HV resources across kernel reloads, with kexec for
example. If the management is split then both entities need to have
knowledge of the other, which complicates things.
> The hope is that this isolates most of the kernel from platform specific
> details, as far as is possible - there will always be some things that
> need to be abstracted out - for that we have ppc_md and a few other
> callbacks.
>
> The priv1_ops serve that purpose, providing callbacks, and there's
> really no way around that - you can't tap the priv1 area when you're
> running under a HV - fine. But for just finding the spus it strikes me
> that it would be _nicer_, perhaps not easier :), to have your
> "enumerate_spus" populate the flat device tree early on - which would
> leave more of the spu code untouched by the hv/bare-metal issue.
And how many would you like to find? 1? 5? 400? Although there is
a current limitation in the HV implementation, these are logical
spus. It would seem the kernel could create spus based on the need,
and thus better balance resource usage, but this is not at all how
the current spu code works though. I don't plan to do any work on
this, but it would be nice to keep it open.
-Geoff
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 6/16] cell: abstract spu management routines
2006-11-14 2:56 ` Geoff Levand
@ 2006-11-14 3:13 ` Michael Ellerman
2006-11-14 11:32 ` Geoff Levand
0 siblings, 1 reply; 12+ messages in thread
From: Michael Ellerman @ 2006-11-14 3:13 UTC (permalink / raw)
To: Geoff Levand; +Cc: linuxppc-dev, Paul Mackerras, Arnd Bergmann
[-- Attachment #1: Type: text/plain, Size: 3129 bytes --]
On Mon, 2006-11-13 at 18:56 -0800, Geoff Levand wrote:
> Michael Ellerman wrote:
> >> > Why can't your PS3 platform code fake-up device nodes for SPUs? It seems
> >> > that would simplify this quite a lot.
> >>
> >>
> >> Seems like a hack to me. My concern is that I just have to keep adding some
> >> extra hack for every new spu feature that comes out. I would prefer to make
> >> a proper design from the start, but if anyone can be more convincing I am
> >> open to suggestions.
> >
> > Well the whole thrust of the flattened-device-tree model, is that we do
> > as much platform-specific hackery in a boot-loader/early-init, and
> > present the hardware in as standard a way as possible to the kernel via
>
>
> The thing is that the spus are visualized, so to create one takes up
> HV resources, mainly HV memory. Creating spus in the bootloader has
> several problems. One is that you could be allocating HV memory that would
> never be used if the kernel is not configured for spu support, and this
> is memory could be used for other HV support. Another problem is the
> management of those HV resources across kernel reloads, with kexec for
> example. If the management is split then both entities need to have
> knowledge of the other, which complicates things.
Yeah I knew you were going to say that :) How much memory does it take
in the HV to create a "logical spu"?
Kexec might complicate things, is it really high on your feature list?
> > The hope is that this isolates most of the kernel from platform specific
> > details, as far as is possible - there will always be some things that
> > need to be abstracted out - for that we have ppc_md and a few other
> > callbacks.
> >
> > The priv1_ops serve that purpose, providing callbacks, and there's
> > really no way around that - you can't tap the priv1 area when you're
> > running under a HV - fine. But for just finding the spus it strikes me
> > that it would be _nicer_, perhaps not easier :), to have your
> > "enumerate_spus" populate the flat device tree early on - which would
> > leave more of the spu code untouched by the hv/bare-metal issue.
>
>
> And how many would you like to find? 1? 5? 400? Although there is
> a current limitation in the HV implementation, these are logical
> spus. It would seem the kernel could create spus based on the need,
> and thus better balance resource usage, but this is not at all how
> the current spu code works though. I don't plan to do any work on
> this, but it would be nice to keep it open.
Actually I'd like 8, or is it 7. I don't see why having more "logical
spus" than "physical spus" is useful - the kernel can already schedule
many spu contexts over a smaller number of physical spus. As far as
giving unused spus back to the HV .. I'll believe it when I see it :)
cheers
--
Michael Ellerman
OzLabs, IBM Australia Development Lab
wwweb: http://michael.ellerman.id.au
phone: +61 2 6212 1183 (tie line 70 21183)
We do not inherit the earth from our ancestors,
we borrow it from our children. - S.M.A.R.T Person
[-- Attachment #2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 189 bytes --]
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 6/16] cell: abstract spu management routines
2006-11-10 20:01 [PATCH 6/16] cell: abstract spu management routines Geoff Levand
2006-11-13 4:11 ` Michael Ellerman
@ 2006-11-14 3:44 ` Michael Ellerman
2006-11-14 9:55 ` Arnd Bergmann
1 sibling, 1 reply; 12+ messages in thread
From: Michael Ellerman @ 2006-11-14 3:44 UTC (permalink / raw)
To: Geoff Levand; +Cc: linuxppc-dev, Paul Mackerras, Arnd Bergmann
[-- Attachment #1: Type: text/plain, Size: 2578 bytes --]
On Fri, 2006-11-10 at 12:01 -0800, Geoff Levand wrote:
> This adds a platform specific spu management abstraction and the coresponding
> routines to support the IBM Cell Blade. It also removes the hypervisor only
> resources that were included in struct spu.
>
> Three new platform specific routines are introduced, spu_enumerate_spus(),
> spu_create_spu() and spu_destroy_spu(). The underlining design uses a new
> type, struct spu_management_ops, to hold function pointers that the platform
> setup code is expected to initialize to instances appropriate to that platform.
>
> For the IBM Cell Blade support, I put the hypervisor only resources that were
> in struct spu into a platform specific data structure struct spu_pdata.
>
>
> Signed-off-by: Geoff Levand <geoffrey.levand@am.sony.com>
>
> ---
>
> Michael,
>
> Unfortunately, for your xmon spu support, your DUMP_FIELD is setup in such a
> way that it is not easy to change to use from inside spu_priv1_mmio.c, so I
> left of_dump_pdata_fields() empty. We'll need to work on something usable
> there, or make some other way to abstract those platform specific spu
> variables.
OK, back to the task at hand :) For the moment I'd rather see you leave
out dump_data_fields(), as neither HV or baremetal implementations do
anything. Just put the offending xmon code inside an #ifdef
CONFIG_PPC_CELL_NATIVE. eg:
Index: cell/arch/powerpc/xmon/xmon.c
===================================================================
--- cell.orig/arch/powerpc/xmon/xmon.c 2006-11-14 14:43:11.000000000 +1100
+++ cell/arch/powerpc/xmon/xmon.c 2006-11-14 14:42:35.000000000 +1100
@@ -2807,12 +2807,11 @@ static void dump_spu_fields(struct spu *
in_be32(&spu->problem->spu_status_R));
DUMP_VALUE("0x%x", problem->spu_npc_RW,
in_be32(&spu->problem->spu_npc_RW));
+#ifdef CONFIG_PPC_CELL_NATIVE
DUMP_FIELD(spu, "0x%p", priv1);
-
- if (spu->priv1) {
- DUMP_VALUE("0x%lx", priv1->mfc_sr1_RW,
- in_be64(&spu->priv1->mfc_sr1_RW));
- }
+ DUMP_VALUE("0x%lx", priv1->mfc_sr1_RW,
+ in_be64(&spu->priv1->mfc_sr1_RW));
+#endif
DUMP_FIELD(spu, "0x%p", priv2);
}
cheers
--
Michael Ellerman
OzLabs, IBM Australia Development Lab
wwweb: http://michael.ellerman.id.au
phone: +61 2 6212 1183 (tie line 70 21183)
We do not inherit the earth from our ancestors,
we borrow it from our children. - S.M.A.R.T Person
[-- Attachment #2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 189 bytes --]
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 6/16] cell: abstract spu management routines
2006-11-14 3:44 ` Michael Ellerman
@ 2006-11-14 9:55 ` Arnd Bergmann
2006-11-14 10:50 ` Geoff Levand
0 siblings, 1 reply; 12+ messages in thread
From: Arnd Bergmann @ 2006-11-14 9:55 UTC (permalink / raw)
To: michael; +Cc: linuxppc-dev, Paul Mackerras
On Tuesday 14 November 2006 04:44, Michael Ellerman wrote:
> OK, back to the task at hand :) =A0For the moment I'd rather see you leave
> out dump_data_fields(), as neither HV or baremetal implementations do
> anything. Just put the offending xmon code inside an #ifdef
> CONFIG_PPC_CELL_NATIVE. eg:
>=20
> Index: cell/arch/powerpc/xmon/xmon.c
> =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
> --- cell.orig/arch/powerpc/xmon/xmon.c =A02006-11-14 14:43:11.000000000 +=
1100
> +++ cell/arch/powerpc/xmon/xmon.c =A0 =A0 =A0 2006-11-14 14:42:35.0000000=
00 +1100
> @@ -2807,12 +2807,11 @@ static void dump_spu_fields(struct spu *
> =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 in_be32(&spu->problem->sp=
u_status_R));
> =A0 =A0 =A0 =A0 DUMP_VALUE("0x%x", problem->spu_npc_RW,
> =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 in_be32(&spu->problem->sp=
u_npc_RW));
> +#ifdef CONFIG_PPC_CELL_NATIVE
> =A0 =A0 =A0 =A0 DUMP_FIELD(spu, "0x%p", priv1);
> -
> - =A0 =A0 =A0 if (spu->priv1) {
> - =A0 =A0 =A0 =A0 =A0 =A0 =A0 DUMP_VALUE("0x%lx", priv1->mfc_sr1_RW,
> - =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 in_be64(&sp=
u->priv1->mfc_sr1_RW));
> - =A0 =A0 =A0 }
> + =A0 =A0 =A0 DUMP_VALUE("0x%lx", priv1->mfc_sr1_RW,
> + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 in_be64(&spu->priv1->mfc_sr=
1_RW));
> +#endif
Oops. Null pointer dereference.
You can't do this if you want to compile in both native and PS3PF
support in a single kernel. I think your original code that prints
priv1 and the sr1 only if priv1 exists is fine on both ways.
Arnd <><
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 6/16] cell: abstract spu management routines
2006-11-14 9:55 ` Arnd Bergmann
@ 2006-11-14 10:50 ` Geoff Levand
2006-11-15 0:07 ` Michael Ellerman
0 siblings, 1 reply; 12+ messages in thread
From: Geoff Levand @ 2006-11-14 10:50 UTC (permalink / raw)
To: Arnd Bergmann; +Cc: linuxppc-dev, Paul Mackerras
QXJuZCBCZXJnbWFubiB3cm90ZToNCj4gT24gVHVlc2RheSAxNCBOb3ZlbWJlciAyMDA2IDA0OjQ0
LCBNaWNoYWVsIEVsbGVybWFuIHdyb3RlOg0KPj4gT0ssIGJhY2sgdG8gdGhlIHRhc2sgYXQgaGFu
ZCA6KSDvv71Gb3IgdGhlIG1vbWVudCBJJ2QgcmF0aGVyIHNlZSB5b3UgbGVhdmUNCj4+IG91dCBk
dW1wX2RhdGFfZmllbGRzKCksIGFzIG5laXRoZXIgSFYgb3IgYmFyZW1ldGFsIGltcGxlbWVudGF0
aW9ucyBkbw0KPj4gYW55dGhpbmcuIEp1c3QgcHV0IHRoZSBvZmZlbmRpbmcgeG1vbiBjb2RlIGlu
c2lkZSBhbiAjaWZkZWYNCj4+IENPTkZJR19QUENfQ0VMTF9OQVRJVkUuIGVnOg0KPj4gDQo+PiBJ
bmRleDogY2VsbC9hcmNoL3Bvd2VycGMveG1vbi94bW9uLmMNCj4+ID09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT0NCj4+IC0t
LSBjZWxsLm9yaWcvYXJjaC9wb3dlcnBjL3htb24veG1vbi5jIO+/vTIwMDYtMTEtMTQgMTQ6NDM6
MTEuMDAwMDAwMDAwICsxMTAwDQo+PiArKysgY2VsbC9hcmNoL3Bvd2VycGMveG1vbi94bW9uLmMg
77+9IO+/vSDvv70gMjAwNi0xMS0xNCAxNDo0MjozNS4wMDAwMDAwMDAgKzExMDANCj4+IEBAIC0y
ODA3LDEyICsyODA3LDExIEBAIHN0YXRpYyB2b2lkIGR1bXBfc3B1X2ZpZWxkcyhzdHJ1Y3Qgc3B1
ICoNCj4+IO+/vSDvv70g77+9IO+/vSDvv70g77+9IO+/vSDvv70g77+9IO+/vSDvv70g77+9IGlu
X2JlMzIoJnNwdS0+cHJvYmxlbS0+c3B1X3N0YXR1c19SKSk7DQo+PiDvv70g77+9IO+/vSDvv70g
RFVNUF9WQUxVRSgiMHgleCIsIHByb2JsZW0tPnNwdV9ucGNfUlcsDQo+PiDvv70g77+9IO+/vSDv
v70g77+9IO+/vSDvv70g77+9IO+/vSDvv70g77+9IO+/vSBpbl9iZTMyKCZzcHUtPnByb2JsZW0t
PnNwdV9ucGNfUlcpKTsNCj4+ICsjaWZkZWYgQ09ORklHX1BQQ19DRUxMX05BVElWRQ0KPj4g77+9
IO+/vSDvv70g77+9IERVTVBfRklFTEQoc3B1LCAiMHglcCIsIHByaXYxKTsNCj4+IC0NCj4+IC0g
77+9IO+/vSDvv70gaWYgKHNwdS0+cHJpdjEpIHsNCj4+IC0g77+9IO+/vSDvv70g77+9IO+/vSDv
v70g77+9IERVTVBfVkFMVUUoIjB4JWx4IiwgcHJpdjEtPm1mY19zcjFfUlcsDQo+PiAtIO+/vSDv
v70g77+9IO+/vSDvv70g77+9IO+/vSDvv70g77+9IO+/vSDvv70g77+9IO+/vSDvv70g77+9IGlu
X2JlNjQoJnNwdS0+cHJpdjEtPm1mY19zcjFfUlcpKTsNCj4+IC0g77+9IO+/vSDvv70gfQ0KPj4g
KyDvv70g77+9IO+/vSBEVU1QX1ZBTFVFKCIweCVseCIsIHByaXYxLT5tZmNfc3IxX1JXLA0KPj4g
KyDvv70g77+9IO+/vSDvv70g77+9IO+/vSDvv70g77+9IO+/vSDvv70g77+9IGluX2JlNjQoJnNw
dS0+cHJpdjEtPm1mY19zcjFfUlcpKTsNCj4+ICsjZW5kaWYNCj4gDQo+IE9vcHMuIE51bGwgcG9p
bnRlciBkZXJlZmVyZW5jZS4NCj4gDQo+IFlvdSBjYW4ndCBkbyB0aGlzIGlmIHlvdSB3YW50IHRv
IGNvbXBpbGUgaW4gYm90aCBuYXRpdmUgYW5kIFBTM1BGDQo+IHN1cHBvcnQgaW4gYSBzaW5nbGUg
a2VybmVsLiBJIHRoaW5rIHlvdXIgb3JpZ2luYWwgY29kZSB0aGF0IHByaW50cw0KPiBwcml2MSBh
bmQgdGhlIHNyMSBvbmx5IGlmIHByaXYxIGV4aXN0cyBpcyBmaW5lIG9uIGJvdGggd2F5cy4NCg0K
Tm8sIHRoYXQgd29uJ3Qgd29yayBlaXRoZXIgc2luY2UgSSBtb3ZlZCBwcml2MSB0byBzdHJ1Y3Qg
c3B1X3BkYXRhLg0KDQorc3RydWN0IHNwdV9wZGF0YSB7DQorCWludCBuaWQ7DQorCXN0cnVjdCBk
ZXZpY2Vfbm9kZSAqZGV2bm9kZTsNCisJc3RydWN0IHNwdV9wcml2MSBfX2lvbWVtICpwcml2MTsN
Cit9Ow0KDQpUaGUgb25seSB3YXkgSSBzZWUgdGhpcyB3b3JraW5nIGlzIHRvIGhhdmUgYSBwbGF0
Zm9ybSBzcGVjaWZpYw0Kcm91dGluZSB0byBkdW1wIHRob3NlIHNwdV9wZGF0YSB2YXJpYWJsZXMu
ICBUaGUgcHJvYmxlbSBpcyB0aGF0DQpvbmx5IHRoZSBzcHUgcGxhdGZvcm0gY29kZSBrbm93cyBh
Ym91dCB0aGUgdmFyaWFibGVzLCBhbmQgb25seSB0aGUNCnhtb24gY29kZSBrbm93cyBob3cgdG8g
ZG8gdGhlIGR1bXAuICBUaGF0IGlzIHdoeSBJIHN1Z2dlc3RlZCBlYXJsaWVyDQp0byByZS13b3Jr
IHRoZSB4bW9uIGNvZGUgdG8gbWFrZSBhIGR1bXAgcm91dGluZSB3aXRoIGdsb2JhbCBzY29wZQ0K
dGhhdCBjYW4gYmUgY2FsbGVkIGZyb20gdGhlIHZhcmlvdXMgc3B1IHBsYXRmb3JtIA0Kc3B1X2R1
bXBfcGRhdGFfZmllbGRzKCkgcm91dGluZXMuDQoNCk5vdGUgdGhhdCB0aGlzIGlzIG5vdCBzcGVj
aWZpYyB0byB0aGUgbmF0aXZlIHN1cHBvcnQsIHNpbmNlIEkgd2FudA0KdG8gaG9vayBteSBwbGF0
Zm9ybSBjb2RlIGludG8geG1vbiBhbHNvLCBhbmQgSSBuZWVkIHRvIHNvbWVob3cNCmR1bXAgbXkg
dmFyaWFibGVzLiAgU28gdGhlIHNvbHV0aW9uIGlzIG5vdCB0byBhZGQgcHJpdjEgYmFjayBpbnRv
DQpzdHJ1Y3Qgc3B1Lg0KDQotR2VvZmYNCg==
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 6/16] cell: abstract spu management routines
2006-11-14 3:13 ` Michael Ellerman
@ 2006-11-14 11:32 ` Geoff Levand
0 siblings, 0 replies; 12+ messages in thread
From: Geoff Levand @ 2006-11-14 11:32 UTC (permalink / raw)
To: michael; +Cc: linuxppc-dev, Paul Mackerras, Arnd Bergmann
Michael Ellerman wrote:
> On Mon, 2006-11-13 at 18:56 -0800, Geoff Levand wrote:
>> Michael Ellerman wrote:
>> >> > Why can't your PS3 platform code fake-up device nodes for SPUs? It seems
>> >> > that would simplify this quite a lot.
>> >>
>> >>
>> >> Seems like a hack to me. My concern is that I just have to keep adding some
>> >> extra hack for every new spu feature that comes out. I would prefer to make
>> >> a proper design from the start, but if anyone can be more convincing I am
>> >> open to suggestions.
>> >
>> > Well the whole thrust of the flattened-device-tree model, is that we do
>> > as much platform-specific hackery in a boot-loader/early-init, and
>> > present the hardware in as standard a way as possible to the kernel via
>>
>>
>> The thing is that the spus are visualized, so to create one takes up
>> HV resources, mainly HV memory. Creating spus in the bootloader has
>> several problems. One is that you could be allocating HV memory that would
>> never be used if the kernel is not configured for spu support, and this
>> is memory could be used for other HV support. Another problem is the
>> management of those HV resources across kernel reloads, with kexec for
>> example. If the management is split then both entities need to have
>> knowledge of the other, which complicates things.
>
> Yeah I knew you were going to say that :) How much memory does it take
> in the HV to create a "logical spu"?
I don't know the exact amount. I guess with the current implementation it
is minimal though, since it now only supports one-to-one physical to logical.
> Kexec might complicate things, is it really high on your feature list?
Yes, the bootloader needs USB HID for the menu, plus the capability to
load the kernel with tftp or from HD, usb storage, etc. The only
way currently is with kboot. No other bootloader has been ported,
and there is no plan to do any other.
>> > The hope is that this isolates most of the kernel from platform specific
>> > details, as far as is possible - there will always be some things that
>> > need to be abstracted out - for that we have ppc_md and a few other
>> > callbacks.
>> >
>> > The priv1_ops serve that purpose, providing callbacks, and there's
>> > really no way around that - you can't tap the priv1 area when you're
>> > running under a HV - fine. But for just finding the spus it strikes me
>> > that it would be _nicer_, perhaps not easier :), to have your
>> > "enumerate_spus" populate the flat device tree early on - which would
>> > leave more of the spu code untouched by the hv/bare-metal issue.
>>
>>
>> And how many would you like to find? 1? 5? 400? Although there is
>> a current limitation in the HV implementation, these are logical
>> spus. It would seem the kernel could create spus based on the need,
>> and thus better balance resource usage, but this is not at all how
>> the current spu code works though. I don't plan to do any work on
>> this, but it would be nice to keep it open.
>
> Actually I'd like 8, or is it 7. I don't see why having more "logical
> spus" than "physical spus" is useful - the kernel can already schedule
> many spu contexts over a smaller number of physical spus. As far as
> giving unused spus back to the HV .. I'll believe it when I see it :)
The way it works is that the policy module reserves a number of the
physical spus for the partition. So for example, on a machine that
has eight spus, it might reserve five for a partition. Now that
would mean the guest could have 100% of the five, and the potential to
share some of the others with another partition, but the current
implementation does not support the sharing part, so you only get
the reserved ones. If the guest creates and destroys spus on demand,
the unused ones can be used to run shared contexts in other
partitions, at least that is what the design allows for.
-Geoff
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 6/16] cell: abstract spu management routines
2006-11-14 10:50 ` Geoff Levand
@ 2006-11-15 0:07 ` Michael Ellerman
2006-11-15 0:47 ` Geoff Levand
0 siblings, 1 reply; 12+ messages in thread
From: Michael Ellerman @ 2006-11-15 0:07 UTC (permalink / raw)
To: Geoff Levand; +Cc: linuxppc-dev, Paul Mackerras, Arnd Bergmann
[-- Attachment #1: Type: text/plain, Size: 3420 bytes --]
On Tue, 2006-11-14 at 02:50 -0800, Geoff Levand wrote:
> Arnd Bergmann wrote:
> > On Tuesday 14 November 2006 04:44, Michael Ellerman wrote:
> >> OK, back to the task at hand :) �For the moment I'd rather see you leave
> >> out dump_data_fields(), as neither HV or baremetal implementations do
> >> anything. Just put the offending xmon code inside an #ifdef
> >> CONFIG_PPC_CELL_NATIVE. eg:
> >>
> >> Index: cell/arch/powerpc/xmon/xmon.c
> >> ===================================================================
> >> --- cell.orig/arch/powerpc/xmon/xmon.c �2006-11-14 14:43:11.000000000 +1100
> >> +++ cell/arch/powerpc/xmon/xmon.c � � � 2006-11-14 14:42:35.000000000 +1100
> >> @@ -2807,12 +2807,11 @@ static void dump_spu_fields(struct spu *
> >> � � � � � � � � � � � � in_be32(&spu->problem->spu_status_R));
> >> � � � � DUMP_VALUE("0x%x", problem->spu_npc_RW,
> >> � � � � � � � � � � � � in_be32(&spu->problem->spu_npc_RW));
> >> +#ifdef CONFIG_PPC_CELL_NATIVE
> >> � � � � DUMP_FIELD(spu, "0x%p", priv1);
> >> -
> >> - � � � if (spu->priv1) {
> >> - � � � � � � � DUMP_VALUE("0x%lx", priv1->mfc_sr1_RW,
> >> - � � � � � � � � � � � � � � � in_be64(&spu->priv1->mfc_sr1_RW));
> >> - � � � }
> >> + � � � DUMP_VALUE("0x%lx", priv1->mfc_sr1_RW,
> >> + � � � � � � � � � � � in_be64(&spu->priv1->mfc_sr1_RW));
> >> +#endif
> >
> > Oops. Null pointer dereference.
> >
> > You can't do this if you want to compile in both native and PS3PF
> > support in a single kernel. I think your original code that prints
> > priv1 and the sr1 only if priv1 exists is fine on both ways.
>
> No, that won't work either since I moved priv1 to struct spu_pdata.
>
> +struct spu_pdata {
> + int nid;
> + struct device_node *devnode;
> + struct spu_priv1 __iomem *priv1;
> +};
OK, I wasn't very clear. I didn't mean that patch was the final solution
- obviously it needs to take into account the movement of priv1 etc.
> The only way I see this working is to have a platform specific
> routine to dump those spu_pdata variables. The problem is that
> only the spu platform code knows about the variables, and only the
> xmon code knows how to do the dump. That is why I suggested earlier
> to re-work the xmon code to make a dump routine with global scope
> that can be called from the various spu platform
> spu_dump_pdata_fields() routines.
>
> Note that this is not specific to the native support, since I want
> to hook my platform code into xmon also, and I need to somehow
> dump my variables. So the solution is not to add priv1 back into
> struct spu.
I'm not that bothered, but I really don't think we want to spend too
much effort engineering interfaces between xmon and the spu code - at
the end of the day xmon is just a hackish debugging aid for _kernel
developers_.
The simplest solution might just be to dump the address of the pdata
pointer, and leave it up to the xmon user to dump that and interpret it
as they see fit.
cheers
--
Michael Ellerman
OzLabs, IBM Australia Development Lab
wwweb: http://michael.ellerman.id.au
phone: +61 2 6212 1183 (tie line 70 21183)
We do not inherit the earth from our ancestors,
we borrow it from our children. - S.M.A.R.T Person
[-- Attachment #2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 189 bytes --]
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 6/16] cell: abstract spu management routines
2006-11-15 0:07 ` Michael Ellerman
@ 2006-11-15 0:47 ` Geoff Levand
0 siblings, 0 replies; 12+ messages in thread
From: Geoff Levand @ 2006-11-15 0:47 UTC (permalink / raw)
To: michael; +Cc: linuxppc-dev, Paul Mackerras, Arnd Bergmann
TWljaGFlbCBFbGxlcm1hbiB3cm90ZToNCj4gT24gVHVlLCAyMDA2LTExLTE0IGF0IDAyOjUwIC0w
ODAwLCBHZW9mZiBMZXZhbmQgd3JvdGU6DQo+PiBBcm5kIEJlcmdtYW5uIHdyb3RlOg0KPj4gPiBP
biBUdWVzZGF5IDE0IE5vdmVtYmVyIDIwMDYgMDQ6NDQsIE1pY2hhZWwgRWxsZXJtYW4gd3JvdGU6
DQo+PiA+PiBPSywgYmFjayB0byB0aGUgdGFzayBhdCBoYW5kIDopIO+/vUZvciB0aGUgbW9tZW50
IEknZCByYXRoZXIgc2VlIHlvdSBsZWF2ZQ0KPj4gPj4gb3V0IGR1bXBfZGF0YV9maWVsZHMoKSwg
YXMgbmVpdGhlciBIViBvciBiYXJlbWV0YWwgaW1wbGVtZW50YXRpb25zIGRvDQo+PiA+PiBhbnl0
aGluZy4gSnVzdCBwdXQgdGhlIG9mZmVuZGluZyB4bW9uIGNvZGUgaW5zaWRlIGFuICNpZmRlZg0K
Pj4gPj4gQ09ORklHX1BQQ19DRUxMX05BVElWRS4gZWc6DQo+PiA+PiANCj4+ID4+IEluZGV4OiBj
ZWxsL2FyY2gvcG93ZXJwYy94bW9uL3htb24uYw0KPj4gPj4gPT09PT09PT09PT09PT09PT09PT09
PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PT09PQ0KPj4gPj4gLS0t
IGNlbGwub3JpZy9hcmNoL3Bvd2VycGMveG1vbi94bW9uLmMg77+9MjAwNi0xMS0xNCAxNDo0Mzox
MS4wMDAwMDAwMDAgKzExMDANCj4+ID4+ICsrKyBjZWxsL2FyY2gvcG93ZXJwYy94bW9uL3htb24u
YyDvv70g77+9IO+/vSAyMDA2LTExLTE0IDE0OjQyOjM1LjAwMDAwMDAwMCArMTEwMA0KPj4gPj4g
QEAgLTI4MDcsMTIgKzI4MDcsMTEgQEAgc3RhdGljIHZvaWQgZHVtcF9zcHVfZmllbGRzKHN0cnVj
dCBzcHUgKg0KPj4gPj4g77+9IO+/vSDvv70g77+9IO+/vSDvv70g77+9IO+/vSDvv70g77+9IO+/
vSDvv70gaW5fYmUzMigmc3B1LT5wcm9ibGVtLT5zcHVfc3RhdHVzX1IpKTsNCj4+ID4+IO+/vSDv
v70g77+9IO+/vSBEVU1QX1ZBTFVFKCIweCV4IiwgcHJvYmxlbS0+c3B1X25wY19SVywNCj4+ID4+
IO+/vSDvv70g77+9IO+/vSDvv70g77+9IO+/vSDvv70g77+9IO+/vSDvv70g77+9IGluX2JlMzIo
JnNwdS0+cHJvYmxlbS0+c3B1X25wY19SVykpOw0KPj4gPj4gKyNpZmRlZiBDT05GSUdfUFBDX0NF
TExfTkFUSVZFDQo+PiA+PiDvv70g77+9IO+/vSDvv70gRFVNUF9GSUVMRChzcHUsICIweCVwIiwg
cHJpdjEpOw0KPj4gPj4gLQ0KPj4gPj4gLSDvv70g77+9IO+/vSBpZiAoc3B1LT5wcml2MSkgew0K
Pj4gPj4gLSDvv70g77+9IO+/vSDvv70g77+9IO+/vSDvv70gRFVNUF9WQUxVRSgiMHglbHgiLCBw
cml2MS0+bWZjX3NyMV9SVywNCj4+ID4+IC0g77+9IO+/vSDvv70g77+9IO+/vSDvv70g77+9IO+/
vSDvv70g77+9IO+/vSDvv70g77+9IO+/vSDvv70gaW5fYmU2NCgmc3B1LT5wcml2MS0+bWZjX3Ny
MV9SVykpOw0KPj4gPj4gLSDvv70g77+9IO+/vSB9DQo+PiA+PiArIO+/vSDvv70g77+9IERVTVBf
VkFMVUUoIjB4JWx4IiwgcHJpdjEtPm1mY19zcjFfUlcsDQo+PiA+PiArIO+/vSDvv70g77+9IO+/
vSDvv70g77+9IO+/vSDvv70g77+9IO+/vSDvv70gaW5fYmU2NCgmc3B1LT5wcml2MS0+bWZjX3Ny
MV9SVykpOw0KPj4gPj4gKyNlbmRpZg0KPj4gPiANCj4+ID4gT29wcy4gTnVsbCBwb2ludGVyIGRl
cmVmZXJlbmNlLg0KPj4gPiANCj4+ID4gWW91IGNhbid0IGRvIHRoaXMgaWYgeW91IHdhbnQgdG8g
Y29tcGlsZSBpbiBib3RoIG5hdGl2ZSBhbmQgUFMzUEYNCj4+ID4gc3VwcG9ydCBpbiBhIHNpbmds
ZSBrZXJuZWwuIEkgdGhpbmsgeW91ciBvcmlnaW5hbCBjb2RlIHRoYXQgcHJpbnRzDQo+PiA+IHBy
aXYxIGFuZCB0aGUgc3IxIG9ubHkgaWYgcHJpdjEgZXhpc3RzIGlzIGZpbmUgb24gYm90aCB3YXlz
Lg0KPj4gDQo+PiBObywgdGhhdCB3b24ndCB3b3JrIGVpdGhlciBzaW5jZSBJIG1vdmVkIHByaXYx
IHRvIHN0cnVjdCBzcHVfcGRhdGEuDQo+PiANCj4+ICtzdHJ1Y3Qgc3B1X3BkYXRhIHsNCj4+ICsJ
aW50IG5pZDsNCj4+ICsJc3RydWN0IGRldmljZV9ub2RlICpkZXZub2RlOw0KPj4gKwlzdHJ1Y3Qg
c3B1X3ByaXYxIF9faW9tZW0gKnByaXYxOw0KPj4gK307DQo+IA0KPiBPSywgSSB3YXNuJ3QgdmVy
eSBjbGVhci4gSSBkaWRuJ3QgbWVhbiB0aGF0IHBhdGNoIHdhcyB0aGUgZmluYWwgc29sdXRpb24N
Cj4gLSBvYnZpb3VzbHkgaXQgbmVlZHMgdG8gdGFrZSBpbnRvIGFjY291bnQgdGhlIG1vdmVtZW50
IG9mIHByaXYxIGV0Yy4NCj4gDQo+PiBUaGUgb25seSB3YXkgSSBzZWUgdGhpcyB3b3JraW5nIGlz
IHRvIGhhdmUgYSBwbGF0Zm9ybSBzcGVjaWZpYw0KPj4gcm91dGluZSB0byBkdW1wIHRob3NlIHNw
dV9wZGF0YSB2YXJpYWJsZXMuICBUaGUgcHJvYmxlbSBpcyB0aGF0DQo+PiBvbmx5IHRoZSBzcHUg
cGxhdGZvcm0gY29kZSBrbm93cyBhYm91dCB0aGUgdmFyaWFibGVzLCBhbmQgb25seSB0aGUNCj4+
IHhtb24gY29kZSBrbm93cyBob3cgdG8gZG8gdGhlIGR1bXAuICBUaGF0IGlzIHdoeSBJIHN1Z2dl
c3RlZCBlYXJsaWVyDQo+PiB0byByZS13b3JrIHRoZSB4bW9uIGNvZGUgdG8gbWFrZSBhIGR1bXAg
cm91dGluZSB3aXRoIGdsb2JhbCBzY29wZQ0KPj4gdGhhdCBjYW4gYmUgY2FsbGVkIGZyb20gdGhl
IHZhcmlvdXMgc3B1IHBsYXRmb3JtIA0KPj4gc3B1X2R1bXBfcGRhdGFfZmllbGRzKCkgcm91dGlu
ZXMuDQo+PiANCj4+IE5vdGUgdGhhdCB0aGlzIGlzIG5vdCBzcGVjaWZpYyB0byB0aGUgbmF0aXZl
IHN1cHBvcnQsIHNpbmNlIEkgd2FudA0KPj4gdG8gaG9vayBteSBwbGF0Zm9ybSBjb2RlIGludG8g
eG1vbiBhbHNvLCBhbmQgSSBuZWVkIHRvIHNvbWVob3cNCj4+IGR1bXAgbXkgdmFyaWFibGVzLiAg
U28gdGhlIHNvbHV0aW9uIGlzIG5vdCB0byBhZGQgcHJpdjEgYmFjayBpbnRvDQo+PiBzdHJ1Y3Qg
c3B1Lg0KPiANCj4gSSdtIG5vdCB0aGF0IGJvdGhlcmVkLCBidXQgSSByZWFsbHkgZG9uJ3QgdGhp
bmsgd2Ugd2FudCB0byBzcGVuZCB0b28NCj4gbXVjaCBlZmZvcnQgZW5naW5lZXJpbmcgaW50ZXJm
YWNlcyBiZXR3ZWVuIHhtb24gYW5kIHRoZSBzcHUgY29kZSAtIGF0DQo+IHRoZSBlbmQgb2YgdGhl
IGRheSB4bW9uIGlzIGp1c3QgYSBoYWNraXNoIGRlYnVnZ2luZyBhaWQgZm9yIF9rZXJuZWwNCj4g
ZGV2ZWxvcGVyc18uDQo+IA0KPiBUaGUgc2ltcGxlc3Qgc29sdXRpb24gbWlnaHQganVzdCBiZSB0
byBkdW1wIHRoZSBhZGRyZXNzIG9mIHRoZSBwZGF0YQ0KPiBwb2ludGVyLCBhbmQgbGVhdmUgaXQg
dXAgdG8gdGhlIHhtb24gdXNlciB0byBkdW1wIHRoYXQgYW5kIGludGVycHJldCBpdA0KPiBhcyB0
aGV5IHNlZSBmaXQuDQoNCk9LLCB0aGF0J3MgYSBnb29kIHNvbHV0aW9uIGZvciBub3cuICBJJ2xs
IHNldCBpdCB1cCB0aGF0IHdheSBpbiBteSBwYXRjaGVzLg0KDQotR2VvZmYgDQo=
^ permalink raw reply [flat|nested] 12+ messages in thread
end of thread, other threads:[~2006-11-15 0:47 UTC | newest]
Thread overview: 12+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2006-11-10 20:01 [PATCH 6/16] cell: abstract spu management routines Geoff Levand
2006-11-13 4:11 ` Michael Ellerman
2006-11-13 4:34 ` Geoff Levand
2006-11-14 2:01 ` Michael Ellerman
2006-11-14 2:56 ` Geoff Levand
2006-11-14 3:13 ` Michael Ellerman
2006-11-14 11:32 ` Geoff Levand
2006-11-14 3:44 ` Michael Ellerman
2006-11-14 9:55 ` Arnd Bergmann
2006-11-14 10:50 ` Geoff Levand
2006-11-15 0:07 ` Michael Ellerman
2006-11-15 0:47 ` Geoff Levand
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).