X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fpowerpc%2Fplatforms%2Fcell%2Fspu_base.c;h=bd7bffc3ddd08be19caf0a15572b5e727c6f0cd2;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=d75ae03df68679e9bd05d9d97c5ef00d0b18d174;hpb=64ba3f394c830ec48a1c31b53dcae312c56f1604;p=linux-2.6.git diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index d75ae03df..bd7bffc3d 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -25,37 +25,39 @@ #include #include #include -#include #include #include #include - -#include -#include -#include +#include +#include +#include #include -#include +#include +#include + +const struct spu_management_ops *spu_management_ops; +const struct spu_priv1_ops *spu_priv1_ops; -#include "interrupt.h" +EXPORT_SYMBOL_GPL(spu_priv1_ops); static int __spu_trap_invalid_dma(struct spu *spu) { pr_debug("%s\n", __FUNCTION__); - force_sig(SIGBUS, /* info, */ current); + spu->dma_callback(spu, SPE_EVENT_INVALID_DMA); return 0; } static int __spu_trap_dma_align(struct spu *spu) { pr_debug("%s\n", __FUNCTION__); - force_sig(SIGBUS, /* info, */ current); + spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT); return 0; } static int __spu_trap_error(struct spu *spu) { pr_debug("%s\n", __FUNCTION__); - force_sig(SIGILL, /* info, */ current); + spu->dma_callback(spu, SPE_EVENT_SPE_ERROR); return 0; } @@ -71,7 +73,7 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) { struct spu_priv2 __iomem *priv2 = spu->priv2; struct mm_struct *mm = spu->mm; - u64 esid, vsid; + u64 esid, vsid, llp; pr_debug("%s\n", __FUNCTION__); @@ -82,7 +84,30 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) printk("%s: invalid access during switch!\n", __func__); return 1; } - if (!mm || (REGION_ID(ea) != USER_REGION_ID)) { + esid = (ea & ESID_MASK) | SLB_ESID_V; + + switch(REGION_ID(ea)) { + case USER_REGION_ID: +#ifdef CONFIG_HUGETLB_PAGE + if (in_hugepage_area(mm->context, ea)) + llp = mmu_psize_defs[mmu_huge_psize].sllp; + else +#endif + llp = mmu_psize_defs[mmu_virtual_psize].sllp; + vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | + SLB_VSID_USER | llp; + break; + case VMALLOC_REGION_ID: + llp = mmu_psize_defs[mmu_virtual_psize].sllp; + vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | + SLB_VSID_KERNEL | llp; + break; + case KERNEL_REGION_ID: + llp = mmu_psize_defs[mmu_linear_psize].sllp; + vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | + SLB_VSID_KERNEL | llp; + break; + default: /* Future: support kernel segments so that drivers * can use SPUs. */ @@ -90,11 +115,6 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) return 1; } - esid = (ea & ESID_MASK) | SLB_ESID_V; - vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | SLB_VSID_USER; - if (in_hugepage_area(mm->context, ea)) - vsid |= SLB_VSID_L; - out_be64(&priv2->slb_index_W, spu->slb_replace); out_be64(&priv2->slb_vsid_RW, vsid); out_be64(&priv2->slb_esid_RW, esid); @@ -111,7 +131,7 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) { - pr_debug("%s\n", __FUNCTION__); + pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea); /* Handle kernel space hash faults immediately. User hash faults need to be deferred to process context. */ @@ -130,69 +150,18 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) spu->dar = ea; spu->dsisr = dsisr; mb(); - if (spu->stop_callback) - spu->stop_callback(spu); - return 0; -} - -static int __spu_trap_mailbox(struct spu *spu) -{ - if (spu->ibox_callback) - spu->ibox_callback(spu); - - /* atomically disable SPU mailbox interrupts */ - spin_lock(&spu->register_lock); - spu_int_mask_and(spu, 2, ~0x1); - spin_unlock(&spu->register_lock); - return 0; -} - -static int __spu_trap_stop(struct spu *spu) -{ - pr_debug("%s\n", __FUNCTION__); - spu->stop_code = in_be32(&spu->problem->spu_status_R); - if (spu->stop_callback) - spu->stop_callback(spu); - return 0; -} - -static int __spu_trap_halt(struct spu *spu) -{ - pr_debug("%s\n", __FUNCTION__); - spu->stop_code = in_be32(&spu->problem->spu_status_R); - if (spu->stop_callback) - spu->stop_callback(spu); - return 0; -} - -static int __spu_trap_tag_group(struct spu *spu) -{ - pr_debug("%s\n", __FUNCTION__); - /* wake_up(&spu->dma_wq); */ - return 0; -} - -static int __spu_trap_spubox(struct spu *spu) -{ - if (spu->wbox_callback) - spu->wbox_callback(spu); - - /* atomically disable SPU mailbox interrupts */ - spin_lock(&spu->register_lock); - spu_int_mask_and(spu, 2, ~0x10); - spin_unlock(&spu->register_lock); + spu->stop_callback(spu); return 0; } static irqreturn_t -spu_irq_class_0(int irq, void *data, struct pt_regs *regs) +spu_irq_class_0(int irq, void *data) { struct spu *spu; spu = data; spu->class_0_pending = 1; - if (spu->stop_callback) - spu->stop_callback(spu); + spu->stop_callback(spu); return IRQ_HANDLED; } @@ -209,12 +178,12 @@ spu_irq_class_0_bottom(struct spu *spu) stat &= mask; - if (stat & 1) /* invalid MFC DMA */ - __spu_trap_invalid_dma(spu); - - if (stat & 2) /* invalid DMA alignment */ + if (stat & 1) /* invalid DMA alignment */ __spu_trap_dma_align(spu); + if (stat & 2) /* invalid MFC DMA */ + __spu_trap_invalid_dma(spu); + if (stat & 4) /* error on SPU */ __spu_trap_error(spu); @@ -225,7 +194,7 @@ spu_irq_class_0_bottom(struct spu *spu) EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom); static irqreturn_t -spu_irq_class_1(int irq, void *data, struct pt_regs *regs) +spu_irq_class_1(int irq, void *data) { struct spu *spu; unsigned long stat, mask, dar, dsisr; @@ -242,6 +211,8 @@ spu_irq_class_1(int irq, void *data, struct pt_regs *regs) spu_mfc_dsisr_set(spu, 0ul); spu_int_stat_clear(spu, 1, stat); spin_unlock(&spu->register_lock); + pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat, + dar, dsisr); if (stat & 1) /* segment fault */ __spu_trap_data_seg(spu, dar); @@ -261,88 +232,104 @@ spu_irq_class_1(int irq, void *data, struct pt_regs *regs) EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom); static irqreturn_t -spu_irq_class_2(int irq, void *data, struct pt_regs *regs) +spu_irq_class_2(int irq, void *data) { struct spu *spu; unsigned long stat; unsigned long mask; spu = data; + spin_lock(&spu->register_lock); stat = spu_int_stat_get(spu, 2); mask = spu_int_mask_get(spu, 2); + /* ignore interrupts we're not waiting for */ + stat &= mask; + /* + * mailbox interrupts (0x1 and 0x10) are level triggered. + * mask them now before acknowledging. + */ + if (stat & 0x11) + spu_int_mask_and(spu, 2, ~(stat & 0x11)); + /* acknowledge all interrupts before the callbacks */ + spu_int_stat_clear(spu, 2, stat); + spin_unlock(&spu->register_lock); pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); - stat &= mask; - if (stat & 1) /* PPC core mailbox */ - __spu_trap_mailbox(spu); + spu->ibox_callback(spu); if (stat & 2) /* SPU stop-and-signal */ - __spu_trap_stop(spu); + spu->stop_callback(spu); if (stat & 4) /* SPU halted */ - __spu_trap_halt(spu); + spu->stop_callback(spu); if (stat & 8) /* DMA tag group complete */ - __spu_trap_tag_group(spu); + spu->mfc_callback(spu); if (stat & 0x10) /* SPU mailbox threshold */ - __spu_trap_spubox(spu); + spu->wbox_callback(spu); - spu_int_stat_clear(spu, 2, stat); return stat ? IRQ_HANDLED : IRQ_NONE; } -static int -spu_request_irqs(struct spu *spu) +static int spu_request_irqs(struct spu *spu) { - int ret; - int irq_base; - - irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET; - - snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number); - ret = request_irq(irq_base + spu->isrc, - spu_irq_class_0, 0, spu->irq_c0, spu); - if (ret) - goto out; - - snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number); - ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, - spu_irq_class_1, 0, spu->irq_c1, spu); - if (ret) - goto out1; + int ret = 0; - snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number); - ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, - spu_irq_class_2, 0, spu->irq_c2, spu); - if (ret) - goto out2; - goto out; + if (spu->irqs[0] != NO_IRQ) { + snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", + spu->number); + ret = request_irq(spu->irqs[0], spu_irq_class_0, + IRQF_DISABLED, + spu->irq_c0, spu); + if (ret) + goto bail0; + } + if (spu->irqs[1] != NO_IRQ) { + snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", + spu->number); + ret = request_irq(spu->irqs[1], spu_irq_class_1, + IRQF_DISABLED, + spu->irq_c1, spu); + if (ret) + goto bail1; + } + if (spu->irqs[2] != NO_IRQ) { + snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", + spu->number); + ret = request_irq(spu->irqs[2], spu_irq_class_2, + IRQF_DISABLED, + spu->irq_c2, spu); + if (ret) + goto bail2; + } + return 0; -out2: - free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu); -out1: - free_irq(irq_base + spu->isrc, spu); -out: +bail2: + if (spu->irqs[1] != NO_IRQ) + free_irq(spu->irqs[1], spu); +bail1: + if (spu->irqs[0] != NO_IRQ) + free_irq(spu->irqs[0], spu); +bail0: return ret; } -static void -spu_free_irqs(struct spu *spu) +static void spu_free_irqs(struct spu *spu) { - int irq_base; - - irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET; - - free_irq(irq_base + spu->isrc, spu); - free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu); - free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu); + if (spu->irqs[0] != NO_IRQ) + free_irq(spu->irqs[0], spu); + if (spu->irqs[1] != NO_IRQ) + free_irq(spu->irqs[1], spu); + if (spu->irqs[2] != NO_IRQ) + free_irq(spu->irqs[2], spu); } -static LIST_HEAD(spu_list); -static DECLARE_MUTEX(spu_mutex); +static struct list_head spu_list[MAX_NUMNODES]; +static LIST_HEAD(spu_full_list); +static DEFINE_MUTEX(spu_mutex); static void spu_init_channels(struct spu *spu) { @@ -378,33 +365,42 @@ static void spu_init_channels(struct spu *spu) } } -struct spu *spu_alloc(void) +struct spu *spu_alloc_node(int node) { - struct spu *spu; + struct spu *spu = NULL; - down(&spu_mutex); - if (!list_empty(&spu_list)) { - spu = list_entry(spu_list.next, struct spu, list); + mutex_lock(&spu_mutex); + if (!list_empty(&spu_list[node])) { + spu = list_entry(spu_list[node].next, struct spu, list); list_del_init(&spu->list); - pr_debug("Got SPU %x %d\n", spu->isrc, spu->number); - } else { - pr_debug("No SPU left\n"); - spu = NULL; + pr_debug("Got SPU %d %d\n", spu->number, spu->node); + spu_init_channels(spu); } - up(&spu_mutex); + mutex_unlock(&spu_mutex); - if (spu) - spu_init_channels(spu); + return spu; +} +EXPORT_SYMBOL_GPL(spu_alloc_node); + +struct spu *spu_alloc(void) +{ + struct spu *spu = NULL; + int node; + + for (node = 0; node < MAX_NUMNODES; node++) { + spu = spu_alloc_node(node); + if (spu) + break; + } return spu; } -EXPORT_SYMBOL_GPL(spu_alloc); void spu_free(struct spu *spu) { - down(&spu_mutex); - list_add_tail(&spu->list, &spu_list); - up(&spu_mutex); + mutex_lock(&spu_mutex); + list_add_tail(&spu->list, &spu_list[spu->node]); + mutex_unlock(&spu_mutex); } EXPORT_SYMBOL_GPL(spu_free); @@ -484,14 +480,17 @@ int spu_irq_class_1_bottom(struct spu *spu) ea = spu->dar; dsisr = spu->dsisr; - if (dsisr & MFC_DSISR_PTE_NOT_FOUND) { + if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) { + u64 flags; + access = (_PAGE_PRESENT | _PAGE_USER); access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL; + local_irq_save(flags); if (hash_page(ea, access, 0x300) != 0) error |= CLASS1_ENABLE_STORAGE_FAULT_INTR; + local_irq_restore(flags); } - if ((error & CLASS1_ENABLE_STORAGE_FAULT_INTR) || - (dsisr & MFC_DSISR_ACCESS_DENIED)) { + if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) { if ((ret = spu_handle_mm_fault(spu)) != 0) error |= CLASS1_ENABLE_STORAGE_FAULT_INTR; else @@ -502,154 +501,132 @@ int spu_irq_class_1_bottom(struct spu *spu) if (!error) { spu_restart_dma(spu); } else { - __spu_trap_invalid_dma(spu); + spu->dma_callback(spu, SPE_EVENT_SPE_DATA_STORAGE); } return ret; } -void spu_irq_setaffinity(struct spu *spu, int cpu) -{ - u64 target = iic_get_target_id(cpu); - u64 route = target << 48 | target << 32 | target << 16; - spu_int_route_set(spu, route); -} -EXPORT_SYMBOL_GPL(spu_irq_setaffinity); +struct sysdev_class spu_sysdev_class = { + set_kset_name("spu") +}; -static void __iomem * __init map_spe_prop(struct device_node *n, - const char *name) +int spu_add_sysdev_attr(struct sysdev_attribute *attr) { - struct address_prop { - unsigned long address; - unsigned int len; - } __attribute__((packed)) *prop; - - void *p; - int proplen; - - p = get_property(n, name, &proplen); - if (proplen != sizeof (struct address_prop)) - return NULL; + struct spu *spu; + mutex_lock(&spu_mutex); - prop = p; + list_for_each_entry(spu, &spu_full_list, full_list) + sysdev_create_file(&spu->sysdev, attr); - return ioremap(prop->address, prop->len); + mutex_unlock(&spu_mutex); + return 0; } +EXPORT_SYMBOL_GPL(spu_add_sysdev_attr); -static void spu_unmap(struct spu *spu) +int spu_add_sysdev_attr_group(struct attribute_group *attrs) { - iounmap(spu->priv2); - iounmap(spu->priv1); - iounmap(spu->problem); - iounmap((u8 __iomem *)spu->local_store); -} + struct spu *spu; + mutex_lock(&spu_mutex); -static int __init spu_map_device(struct spu *spu, struct device_node *spe) -{ - char *prop; - int ret; + list_for_each_entry(spu, &spu_full_list, full_list) + sysfs_create_group(&spu->sysdev.kobj, attrs); - ret = -ENODEV; - prop = get_property(spe, "isrc", NULL); - if (!prop) - goto out; - spu->isrc = *(unsigned int *)prop; + mutex_unlock(&spu_mutex); + return 0; +} +EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group); - spu->name = get_property(spe, "name", NULL); - if (!spu->name) - goto out; - prop = get_property(spe, "local-store", NULL); - if (!prop) - goto out; - spu->local_store_phys = *(unsigned long *)prop; +void spu_remove_sysdev_attr(struct sysdev_attribute *attr) +{ + struct spu *spu; + mutex_lock(&spu_mutex); - /* we use local store as ram, not io memory */ - spu->local_store = (void __force *)map_spe_prop(spe, "local-store"); - if (!spu->local_store) - goto out; + list_for_each_entry(spu, &spu_full_list, full_list) + sysdev_remove_file(&spu->sysdev, attr); - spu->problem= map_spe_prop(spe, "problem"); - if (!spu->problem) - goto out_unmap; + mutex_unlock(&spu_mutex); +} +EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr); - spu->priv1= map_spe_prop(spe, "priv1"); - /* priv1 is not available on a hypervisor */ +void spu_remove_sysdev_attr_group(struct attribute_group *attrs) +{ + struct spu *spu; + mutex_lock(&spu_mutex); - spu->priv2= map_spe_prop(spe, "priv2"); - if (!spu->priv2) - goto out_unmap; - ret = 0; - goto out; + list_for_each_entry(spu, &spu_full_list, full_list) + sysfs_remove_group(&spu->sysdev.kobj, attrs); -out_unmap: - spu_unmap(spu); -out: - return ret; + mutex_unlock(&spu_mutex); } +EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group); -static int __init find_spu_node_id(struct device_node *spe) +static int spu_create_sysdev(struct spu *spu) { - unsigned int *id; - struct device_node *cpu; + int ret; + + spu->sysdev.id = spu->number; + spu->sysdev.cls = &spu_sysdev_class; + ret = sysdev_register(&spu->sysdev); + if (ret) { + printk(KERN_ERR "Can't register SPU %d with sysfs\n", + spu->number); + return ret; + } - cpu = spe->parent->parent; - id = (unsigned int *)get_property(cpu, "node-id", NULL); + sysfs_add_device_to_node(&spu->sysdev, spu->node); - return id ? *id : 0; + return 0; } -static int __init create_spu(struct device_node *spe) +static void spu_destroy_sysdev(struct spu *spu) +{ + sysfs_remove_device_from_node(&spu->sysdev, spu->node); + sysdev_unregister(&spu->sysdev); +} + +static int __init create_spu(void *data) { struct spu *spu; int ret; static int number; ret = -ENOMEM; - spu = kmalloc(sizeof (*spu), GFP_KERNEL); + spu = kzalloc(sizeof (*spu), GFP_KERNEL); if (!spu) goto out; - ret = spu_map_device(spu, spe); - if (ret) - goto out_free; - - spu->node = find_spu_node_id(spe); - spu->stop_code = 0; - spu->slb_replace = 0; - spu->mm = NULL; - spu->ctx = NULL; - spu->rq = NULL; - spu->pid = 0; - spu->class_0_pending = 0; - spu->flags = 0UL; - spu->dar = 0UL; - spu->dsisr = 0UL; spin_lock_init(&spu->register_lock); + mutex_lock(&spu_mutex); + spu->number = number++; + mutex_unlock(&spu_mutex); - spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1)); - spu_mfc_sr1_set(spu, 0x33); + ret = spu_create_spu(spu, data); - spu->ibox_callback = NULL; - spu->wbox_callback = NULL; - spu->stop_callback = NULL; + if (ret) + goto out_free; - down(&spu_mutex); - spu->number = number++; + spu_mfc_sdr_setup(spu); + spu_mfc_sr1_set(spu, 0x33); ret = spu_request_irqs(spu); if (ret) - goto out_unmap; + goto out_destroy; - list_add(&spu->list, &spu_list); - up(&spu_mutex); + ret = spu_create_sysdev(spu); + if (ret) + goto out_free_irqs; + + mutex_lock(&spu_mutex); + list_add(&spu->list, &spu_list[spu->node]); + list_add(&spu->full_list, &spu_full_list); + mutex_unlock(&spu_mutex); - pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n", - spu->name, spu->isrc, spu->local_store, - spu->problem, spu->priv1, spu->priv2, spu->number); goto out; -out_unmap: - up(&spu_mutex); - spu_unmap(spu); +out_free_irqs: + spu_free_irqs(spu); +out_destroy: + spu_destroy_spu(spu); out_free: kfree(spu); out: @@ -659,50 +636,55 @@ out: static void destroy_spu(struct spu *spu) { list_del_init(&spu->list); + list_del_init(&spu->full_list); + spu_destroy_sysdev(spu); spu_free_irqs(spu); - spu_unmap(spu); + spu_destroy_spu(spu); kfree(spu); } static void cleanup_spu_base(void) { struct spu *spu, *tmp; - down(&spu_mutex); - list_for_each_entry_safe(spu, tmp, &spu_list, list) - destroy_spu(spu); - up(&spu_mutex); + int node; + + mutex_lock(&spu_mutex); + for (node = 0; node < MAX_NUMNODES; node++) { + list_for_each_entry_safe(spu, tmp, &spu_list[node], list) + destroy_spu(spu); + } + mutex_unlock(&spu_mutex); + sysdev_class_unregister(&spu_sysdev_class); } module_exit(cleanup_spu_base); static int __init init_spu_base(void) { - struct device_node *node; - int ret; + int i, ret; - ret = -ENODEV; - for (node = of_find_node_by_type(NULL, "spe"); - node; node = of_find_node_by_type(node, "spe")) { - ret = create_spu(node); - if (ret) { - printk(KERN_WARNING "%s: Error initializing %s\n", - __FUNCTION__, node->name); - cleanup_spu_base(); - break; - } - } - /* in some old firmware versions, the spe is called 'spc', so we - look for that as well */ - for (node = of_find_node_by_type(NULL, "spc"); - node; node = of_find_node_by_type(node, "spc")) { - ret = create_spu(node); - if (ret) { - printk(KERN_WARNING "%s: Error initializing %s\n", - __FUNCTION__, node->name); - cleanup_spu_base(); - break; - } + if (!spu_management_ops) + return 0; + + /* create sysdev class for spus */ + ret = sysdev_class_register(&spu_sysdev_class); + if (ret) + return ret; + + for (i = 0; i < MAX_NUMNODES; i++) + INIT_LIST_HEAD(&spu_list[i]); + + ret = spu_enumerate_spus(create_spu); + + if (ret) { + printk(KERN_WARNING "%s: Error initializing spus\n", + __FUNCTION__); + cleanup_spu_base(); + return ret; } + + xmon_register_spus(&spu_full_list); + return ret; } module_init(init_spu_base);