From 871480933a1c28f8a9fed4c4d34d06c439a7a422 Mon Sep 17 00:00:00 2001
From: Srikant Patnaik
Date: Sun, 11 Jan 2015 12:28:04 +0530
Subject: Moved, renamed, and deleted files
The original directory structure was scattered and unorganized.
Changes are basically to make it look like kernel structure.
---
arch/powerpc/platforms/cell/Kconfig | 163 ++
arch/powerpc/platforms/cell/Makefile | 49 +
arch/powerpc/platforms/cell/axon_msi.c | 502 ++++
arch/powerpc/platforms/cell/beat.c | 264 ++
arch/powerpc/platforms/cell/beat.h | 39 +
arch/powerpc/platforms/cell/beat_htab.c | 440 ++++
arch/powerpc/platforms/cell/beat_hvCall.S | 287 ++
arch/powerpc/platforms/cell/beat_interrupt.c | 253 ++
arch/powerpc/platforms/cell/beat_interrupt.h | 30 +
arch/powerpc/platforms/cell/beat_iommu.c | 115 +
arch/powerpc/platforms/cell/beat_spu_priv1.c | 205 ++
arch/powerpc/platforms/cell/beat_syscall.h | 164 ++
arch/powerpc/platforms/cell/beat_udbg.c | 98 +
arch/powerpc/platforms/cell/beat_wrapper.h | 290 ++
arch/powerpc/platforms/cell/cbe_cpufreq.c | 209 ++
arch/powerpc/platforms/cell/cbe_cpufreq.h | 24 +
.../powerpc/platforms/cell/cbe_cpufreq_pervasive.c | 115 +
arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c | 156 ++
arch/powerpc/platforms/cell/cbe_powerbutton.c | 118 +
arch/powerpc/platforms/cell/cbe_regs.c | 281 ++
arch/powerpc/platforms/cell/cbe_thermal.c | 399 +++
arch/powerpc/platforms/cell/celleb_pci.c | 500 ++++
arch/powerpc/platforms/cell/celleb_pci.h | 46 +
arch/powerpc/platforms/cell/celleb_scc.h | 232 ++
arch/powerpc/platforms/cell/celleb_scc_epci.c | 429 +++
arch/powerpc/platforms/cell/celleb_scc_pciex.c | 541 ++++
arch/powerpc/platforms/cell/celleb_scc_sio.c | 101 +
arch/powerpc/platforms/cell/celleb_scc_uhc.c | 95 +
arch/powerpc/platforms/cell/celleb_setup.c | 243 ++
arch/powerpc/platforms/cell/cpufreq_spudemand.c | 171 ++
arch/powerpc/platforms/cell/interrupt.c | 411 +++
arch/powerpc/platforms/cell/interrupt.h | 89 +
arch/powerpc/platforms/cell/iommu.c | 1237 +++++++++
arch/powerpc/platforms/cell/pervasive.c | 133 +
arch/powerpc/platforms/cell/pervasive.h | 42 +
arch/powerpc/platforms/cell/pmu.c | 424 +++
arch/powerpc/platforms/cell/qpace_setup.c | 148 ++
arch/powerpc/platforms/cell/ras.c | 355 +++
arch/powerpc/platforms/cell/ras.h | 9 +
arch/powerpc/platforms/cell/setup.c | 281 ++
arch/powerpc/platforms/cell/smp.c | 187 ++
arch/powerpc/platforms/cell/spider-pci.c | 184 ++
arch/powerpc/platforms/cell/spider-pic.c | 362 +++
arch/powerpc/platforms/cell/spu_base.c | 852 ++++++
arch/powerpc/platforms/cell/spu_callbacks.c | 74 +
arch/powerpc/platforms/cell/spu_fault.c | 94 +
arch/powerpc/platforms/cell/spu_manage.c | 556 ++++
arch/powerpc/platforms/cell/spu_notify.c | 68 +
arch/powerpc/platforms/cell/spu_priv1_mmio.c | 180 ++
arch/powerpc/platforms/cell/spu_priv1_mmio.h | 26 +
arch/powerpc/platforms/cell/spu_syscalls.c | 178 ++
arch/powerpc/platforms/cell/spufs/Makefile | 61 +
arch/powerpc/platforms/cell/spufs/backing_ops.c | 413 +++
arch/powerpc/platforms/cell/spufs/context.c | 188 ++
arch/powerpc/platforms/cell/spufs/coredump.c | 250 ++
arch/powerpc/platforms/cell/spufs/fault.c | 191 ++
arch/powerpc/platforms/cell/spufs/file.c | 2775 ++++++++++++++++++++
arch/powerpc/platforms/cell/spufs/gang.c | 87 +
arch/powerpc/platforms/cell/spufs/hw_ops.c | 349 +++
arch/powerpc/platforms/cell/spufs/inode.c | 866 ++++++
arch/powerpc/platforms/cell/spufs/lscsa_alloc.c | 183 ++
arch/powerpc/platforms/cell/spufs/run.c | 454 ++++
arch/powerpc/platforms/cell/spufs/sched.c | 1174 +++++++++
arch/powerpc/platforms/cell/spufs/spu_restore.c | 336 +++
.../platforms/cell/spufs/spu_restore_crt0.S | 116 +
.../cell/spufs/spu_restore_dump.h_shipped | 935 +++++++
arch/powerpc/platforms/cell/spufs/spu_save.c | 195 ++
arch/powerpc/platforms/cell/spufs/spu_save_crt0.S | 102 +
.../platforms/cell/spufs/spu_save_dump.h_shipped | 743 ++++++
arch/powerpc/platforms/cell/spufs/spu_utils.h | 160 ++
arch/powerpc/platforms/cell/spufs/spufs.h | 376 +++
arch/powerpc/platforms/cell/spufs/sputrace.h | 39 +
arch/powerpc/platforms/cell/spufs/switch.c | 2222 ++++++++++++++++
arch/powerpc/platforms/cell/spufs/syscalls.c | 86 +
74 files changed, 24750 insertions(+)
create mode 100644 arch/powerpc/platforms/cell/Kconfig
create mode 100644 arch/powerpc/platforms/cell/Makefile
create mode 100644 arch/powerpc/platforms/cell/axon_msi.c
create mode 100644 arch/powerpc/platforms/cell/beat.c
create mode 100644 arch/powerpc/platforms/cell/beat.h
create mode 100644 arch/powerpc/platforms/cell/beat_htab.c
create mode 100644 arch/powerpc/platforms/cell/beat_hvCall.S
create mode 100644 arch/powerpc/platforms/cell/beat_interrupt.c
create mode 100644 arch/powerpc/platforms/cell/beat_interrupt.h
create mode 100644 arch/powerpc/platforms/cell/beat_iommu.c
create mode 100644 arch/powerpc/platforms/cell/beat_spu_priv1.c
create mode 100644 arch/powerpc/platforms/cell/beat_syscall.h
create mode 100644 arch/powerpc/platforms/cell/beat_udbg.c
create mode 100644 arch/powerpc/platforms/cell/beat_wrapper.h
create mode 100644 arch/powerpc/platforms/cell/cbe_cpufreq.c
create mode 100644 arch/powerpc/platforms/cell/cbe_cpufreq.h
create mode 100644 arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
create mode 100644 arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
create mode 100644 arch/powerpc/platforms/cell/cbe_powerbutton.c
create mode 100644 arch/powerpc/platforms/cell/cbe_regs.c
create mode 100644 arch/powerpc/platforms/cell/cbe_thermal.c
create mode 100644 arch/powerpc/platforms/cell/celleb_pci.c
create mode 100644 arch/powerpc/platforms/cell/celleb_pci.h
create mode 100644 arch/powerpc/platforms/cell/celleb_scc.h
create mode 100644 arch/powerpc/platforms/cell/celleb_scc_epci.c
create mode 100644 arch/powerpc/platforms/cell/celleb_scc_pciex.c
create mode 100644 arch/powerpc/platforms/cell/celleb_scc_sio.c
create mode 100644 arch/powerpc/platforms/cell/celleb_scc_uhc.c
create mode 100644 arch/powerpc/platforms/cell/celleb_setup.c
create mode 100644 arch/powerpc/platforms/cell/cpufreq_spudemand.c
create mode 100644 arch/powerpc/platforms/cell/interrupt.c
create mode 100644 arch/powerpc/platforms/cell/interrupt.h
create mode 100644 arch/powerpc/platforms/cell/iommu.c
create mode 100644 arch/powerpc/platforms/cell/pervasive.c
create mode 100644 arch/powerpc/platforms/cell/pervasive.h
create mode 100644 arch/powerpc/platforms/cell/pmu.c
create mode 100644 arch/powerpc/platforms/cell/qpace_setup.c
create mode 100644 arch/powerpc/platforms/cell/ras.c
create mode 100644 arch/powerpc/platforms/cell/ras.h
create mode 100644 arch/powerpc/platforms/cell/setup.c
create mode 100644 arch/powerpc/platforms/cell/smp.c
create mode 100644 arch/powerpc/platforms/cell/spider-pci.c
create mode 100644 arch/powerpc/platforms/cell/spider-pic.c
create mode 100644 arch/powerpc/platforms/cell/spu_base.c
create mode 100644 arch/powerpc/platforms/cell/spu_callbacks.c
create mode 100644 arch/powerpc/platforms/cell/spu_fault.c
create mode 100644 arch/powerpc/platforms/cell/spu_manage.c
create mode 100644 arch/powerpc/platforms/cell/spu_notify.c
create mode 100644 arch/powerpc/platforms/cell/spu_priv1_mmio.c
create mode 100644 arch/powerpc/platforms/cell/spu_priv1_mmio.h
create mode 100644 arch/powerpc/platforms/cell/spu_syscalls.c
create mode 100644 arch/powerpc/platforms/cell/spufs/Makefile
create mode 100644 arch/powerpc/platforms/cell/spufs/backing_ops.c
create mode 100644 arch/powerpc/platforms/cell/spufs/context.c
create mode 100644 arch/powerpc/platforms/cell/spufs/coredump.c
create mode 100644 arch/powerpc/platforms/cell/spufs/fault.c
create mode 100644 arch/powerpc/platforms/cell/spufs/file.c
create mode 100644 arch/powerpc/platforms/cell/spufs/gang.c
create mode 100644 arch/powerpc/platforms/cell/spufs/hw_ops.c
create mode 100644 arch/powerpc/platforms/cell/spufs/inode.c
create mode 100644 arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
create mode 100644 arch/powerpc/platforms/cell/spufs/run.c
create mode 100644 arch/powerpc/platforms/cell/spufs/sched.c
create mode 100644 arch/powerpc/platforms/cell/spufs/spu_restore.c
create mode 100644 arch/powerpc/platforms/cell/spufs/spu_restore_crt0.S
create mode 100644 arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped
create mode 100644 arch/powerpc/platforms/cell/spufs/spu_save.c
create mode 100644 arch/powerpc/platforms/cell/spufs/spu_save_crt0.S
create mode 100644 arch/powerpc/platforms/cell/spufs/spu_save_dump.h_shipped
create mode 100644 arch/powerpc/platforms/cell/spufs/spu_utils.h
create mode 100644 arch/powerpc/platforms/cell/spufs/spufs.h
create mode 100644 arch/powerpc/platforms/cell/spufs/sputrace.h
create mode 100644 arch/powerpc/platforms/cell/spufs/switch.c
create mode 100644 arch/powerpc/platforms/cell/spufs/syscalls.c
(limited to 'arch/powerpc/platforms/cell')
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
new file mode 100644
index 00000000..2e7ff0c5
--- /dev/null
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -0,0 +1,163 @@
+config PPC_CELL
+ bool
+ default n
+
+config PPC_CELL_COMMON
+ bool
+ select PPC_CELL
+ select PPC_DCR_MMIO
+ select PPC_INDIRECT_PIO
+ select PPC_INDIRECT_MMIO
+ select PPC_NATIVE
+ select PPC_RTAS
+ select IRQ_EDGE_EOI_HANDLER
+
+config PPC_CELL_NATIVE
+ bool
+ select PPC_CELL_COMMON
+ select MPIC
+ select PPC_IO_WORKAROUNDS
+ select IBM_EMAC_EMAC4
+ select IBM_EMAC_RGMII
+ select IBM_EMAC_ZMII #test only
+ select IBM_EMAC_TAH #test only
+ default n
+
+config PPC_IBM_CELL_BLADE
+ bool "IBM Cell Blade"
+ depends on PPC64 && PPC_BOOK3S
+ select PPC_CELL_NATIVE
+ select PPC_OF_PLATFORM_PCI
+ select PCI
+ select MMIO_NVRAM
+ select PPC_UDBG_16550
+ select UDBG_RTAS_CONSOLE
+
+config PPC_CELLEB
+ bool "Toshiba's Cell Reference Set 'Celleb' Architecture"
+ depends on PPC64 && PPC_BOOK3S
+ select PPC_CELL_NATIVE
+ select PPC_OF_PLATFORM_PCI
+ select PCI
+ select HAS_TXX9_SERIAL
+ select PPC_UDBG_BEAT
+ select USB_OHCI_BIG_ENDIAN_MMIO
+ select USB_EHCI_BIG_ENDIAN_MMIO
+
+config PPC_CELL_QPACE
+ bool "IBM Cell - QPACE"
+ depends on PPC64 && PPC_BOOK3S
+ select PPC_CELL_COMMON
+
+config AXON_MSI
+ bool
+ depends on PPC_IBM_CELL_BLADE && PCI_MSI
+ default y
+
+menu "Cell Broadband Engine options"
+ depends on PPC_CELL
+
+config SPU_FS
+ tristate "SPU file system"
+ default m
+ depends on PPC_CELL
+ select SPU_BASE
+ select MEMORY_HOTPLUG
+ help
+ The SPU file system is used to access Synergistic Processing
+ Units on machines implementing the Broadband Processor
+ Architecture.
+
+config SPU_FS_64K_LS
+ bool "Use 64K pages to map SPE local store"
+ # we depend on PPC_MM_SLICES for now rather than selecting
+ # it because we depend on hugetlbfs hooks being present. We
+ # will fix that when the generic code has been improved to
+ # not require hijacking hugetlbfs hooks.
+ depends on SPU_FS && PPC_MM_SLICES && !PPC_64K_PAGES
+ default y
+ select PPC_HAS_HASH_64K
+ help
+ This option causes SPE local stores to be mapped in process
+ address spaces using 64K pages while the rest of the kernel
+ uses 4K pages. This can improve performances of applications
+ using multiple SPEs by lowering the TLB pressure on them.
+
+config SPU_BASE
+ bool
+ default n
+
+config CBE_RAS
+ bool "RAS features for bare metal Cell BE"
+ depends on PPC_CELL_NATIVE
+ default y
+
+config PPC_IBM_CELL_RESETBUTTON
+ bool "IBM Cell Blade Pinhole reset button"
+ depends on CBE_RAS && PPC_IBM_CELL_BLADE
+ default y
+ help
+ Support Pinhole Resetbutton on IBM Cell blades.
+ This adds a method to trigger system reset via front panel pinhole button.
+
+config PPC_IBM_CELL_POWERBUTTON
+ tristate "IBM Cell Blade power button"
+ depends on PPC_IBM_CELL_BLADE && INPUT_EVDEV
+ default y
+ help
+ Support Powerbutton on IBM Cell blades.
+ This will enable the powerbutton as an input device.
+
+config CBE_THERM
+ tristate "CBE thermal support"
+ default m
+ depends on CBE_RAS && SPU_BASE
+
+config CBE_CPUFREQ
+ tristate "CBE frequency scaling"
+ depends on CBE_RAS && CPU_FREQ
+ default m
+ help
+ This adds the cpufreq driver for Cell BE processors.
+ For details, take a look at .
+ If you don't have such processor, say N
+
+config CBE_CPUFREQ_PMI_ENABLE
+ bool "CBE frequency scaling using PMI interface"
+ depends on CBE_CPUFREQ && EXPERIMENTAL
+ default n
+ help
+ Select this, if you want to use the PMI interface
+ to switch frequencies. Using PMI, the
+ processor will not only be able to run at lower speed,
+ but also at lower core voltage.
+
+config CBE_CPUFREQ_PMI
+ tristate
+ depends on CBE_CPUFREQ_PMI_ENABLE
+ default CBE_CPUFREQ
+
+config PPC_PMI
+ tristate
+ default y
+ depends on CBE_CPUFREQ_PMI || PPC_IBM_CELL_POWERBUTTON
+ help
+ PMI (Platform Management Interrupt) is a way to
+ communicate with the BMC (Baseboard Management Controller).
+ It is used in some IBM Cell blades.
+
+config CBE_CPUFREQ_SPU_GOVERNOR
+ tristate "CBE frequency scaling based on SPU usage"
+ depends on SPU_FS && CPU_FREQ
+ default m
+ help
+ This governor checks for spu usage to adjust the cpu frequency.
+ If no spu is running on a given cpu, that cpu will be throttled to
+ the minimal possible frequency.
+
+endmenu
+
+config OPROFILE_CELL
+ def_bool y
+ depends on PPC_CELL_NATIVE && (OPROFILE = m || OPROFILE = y) && SPU_BASE
+
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
new file mode 100644
index 00000000..a4a89350
--- /dev/null
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -0,0 +1,49 @@
+obj-$(CONFIG_PPC_CELL_COMMON) += cbe_regs.o interrupt.o pervasive.o
+
+obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \
+ pmu.o spider-pci.o
+obj-$(CONFIG_CBE_RAS) += ras.o
+
+obj-$(CONFIG_CBE_THERM) += cbe_thermal.o
+obj-$(CONFIG_CBE_CPUFREQ_PMI) += cbe_cpufreq_pmi.o
+obj-$(CONFIG_CBE_CPUFREQ) += cbe-cpufreq.o
+cbe-cpufreq-y += cbe_cpufreq_pervasive.o cbe_cpufreq.o
+obj-$(CONFIG_CBE_CPUFREQ_SPU_GOVERNOR) += cpufreq_spudemand.o
+
+obj-$(CONFIG_PPC_IBM_CELL_POWERBUTTON) += cbe_powerbutton.o
+
+ifeq ($(CONFIG_SMP),y)
+obj-$(CONFIG_PPC_CELL_NATIVE) += smp.o
+obj-$(CONFIG_PPC_CELL_QPACE) += smp.o
+endif
+
+# needed only when building loadable spufs.ko
+spu-priv1-$(CONFIG_PPC_CELL_COMMON) += spu_priv1_mmio.o
+spu-manage-$(CONFIG_PPC_CELL_COMMON) += spu_manage.o
+
+obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \
+ spu_notify.o \
+ spu_syscalls.o spu_fault.o \
+ $(spu-priv1-y) \
+ $(spu-manage-y) \
+ spufs/
+
+obj-$(CONFIG_AXON_MSI) += axon_msi.o
+
+# qpace setup
+obj-$(CONFIG_PPC_CELL_QPACE) += qpace_setup.o
+
+# celleb stuff
+ifeq ($(CONFIG_PPC_CELLEB),y)
+obj-y += celleb_setup.o \
+ celleb_pci.o celleb_scc_epci.o \
+ celleb_scc_pciex.o \
+ celleb_scc_uhc.o \
+ spider-pci.o beat.o beat_htab.o \
+ beat_hvCall.o beat_interrupt.o \
+ beat_iommu.o
+
+obj-$(CONFIG_PPC_UDBG_BEAT) += beat_udbg.o
+obj-$(CONFIG_SERIAL_TXX9) += celleb_scc_sio.o
+obj-$(CONFIG_SPU_BASE) += beat_spu_priv1.o
+endif
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
new file mode 100644
index 00000000..85825b54
--- /dev/null
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -0,0 +1,502 @@
+/*
+ * Copyright 2007, Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+
+/*
+ * MSIC registers, specified as offsets from dcr_base
+ */
+#define MSIC_CTRL_REG 0x0
+
+/* Base Address registers specify FIFO location in BE memory */
+#define MSIC_BASE_ADDR_HI_REG 0x3
+#define MSIC_BASE_ADDR_LO_REG 0x4
+
+/* Hold the read/write offsets into the FIFO */
+#define MSIC_READ_OFFSET_REG 0x5
+#define MSIC_WRITE_OFFSET_REG 0x6
+
+
+/* MSIC control register flags */
+#define MSIC_CTRL_ENABLE 0x0001
+#define MSIC_CTRL_FIFO_FULL_ENABLE 0x0002
+#define MSIC_CTRL_IRQ_ENABLE 0x0008
+#define MSIC_CTRL_FULL_STOP_ENABLE 0x0010
+
+/*
+ * The MSIC can be configured to use a FIFO of 32KB, 64KB, 128KB or 256KB.
+ * Currently we're using a 64KB FIFO size.
+ */
+#define MSIC_FIFO_SIZE_SHIFT 16
+#define MSIC_FIFO_SIZE_BYTES (1 << MSIC_FIFO_SIZE_SHIFT)
+
+/*
+ * To configure the FIFO size as (1 << n) bytes, we write (n - 15) into bits
+ * 8-9 of the MSIC control reg.
+ */
+#define MSIC_CTRL_FIFO_SIZE (((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300)
+
+/*
+ * We need to mask the read/write offsets to make sure they stay within
+ * the bounds of the FIFO. Also they should always be 16-byte aligned.
+ */
+#define MSIC_FIFO_SIZE_MASK ((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu)
+
+/* Each entry in the FIFO is 16 bytes, the first 4 bytes hold the irq # */
+#define MSIC_FIFO_ENTRY_SIZE 0x10
+
+
+struct axon_msic {
+ struct irq_domain *irq_domain;
+ __le32 *fifo_virt;
+ dma_addr_t fifo_phys;
+ dcr_host_t dcr_host;
+ u32 read_offset;
+#ifdef DEBUG
+ u32 __iomem *trigger;
+#endif
+};
+
+#ifdef DEBUG
+void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic);
+#else
+static inline void axon_msi_debug_setup(struct device_node *dn,
+ struct axon_msic *msic) { }
+#endif
+
+
+static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
+{
+ pr_devel("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n);
+
+ dcr_write(msic->dcr_host, dcr_n, val);
+}
+
+static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct axon_msic *msic = irq_get_handler_data(irq);
+ u32 write_offset, msi;
+ int idx;
+ int retry = 0;
+
+ write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG);
+ pr_devel("axon_msi: original write_offset 0x%x\n", write_offset);
+
+ /* write_offset doesn't wrap properly, so we have to mask it */
+ write_offset &= MSIC_FIFO_SIZE_MASK;
+
+ while (msic->read_offset != write_offset && retry < 100) {
+ idx = msic->read_offset / sizeof(__le32);
+ msi = le32_to_cpu(msic->fifo_virt[idx]);
+ msi &= 0xFFFF;
+
+ pr_devel("axon_msi: woff %x roff %x msi %x\n",
+ write_offset, msic->read_offset, msi);
+
+ if (msi < nr_irqs && irq_get_chip_data(msi) == msic) {
+ generic_handle_irq(msi);
+ msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
+ } else {
+ /*
+ * Reading the MSIC_WRITE_OFFSET_REG does not
+ * reliably flush the outstanding DMA to the
+ * FIFO buffer. Here we were reading stale
+ * data, so we need to retry.
+ */
+ udelay(1);
+ retry++;
+ pr_devel("axon_msi: invalid irq 0x%x!\n", msi);
+ continue;
+ }
+
+ if (retry) {
+ pr_devel("axon_msi: late irq 0x%x, retry %d\n",
+ msi, retry);
+ retry = 0;
+ }
+
+ msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
+ msic->read_offset &= MSIC_FIFO_SIZE_MASK;
+ }
+
+ if (retry) {
+ printk(KERN_WARNING "axon_msi: irq timed out\n");
+
+ msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
+ msic->read_offset &= MSIC_FIFO_SIZE_MASK;
+ }
+
+ chip->irq_eoi(&desc->irq_data);
+}
+
+static struct axon_msic *find_msi_translator(struct pci_dev *dev)
+{
+ struct irq_domain *irq_domain;
+ struct device_node *dn, *tmp;
+ const phandle *ph;
+ struct axon_msic *msic = NULL;
+
+ dn = of_node_get(pci_device_to_OF_node(dev));
+ if (!dn) {
+ dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
+ return NULL;
+ }
+
+ for (; dn; dn = of_get_next_parent(dn)) {
+ ph = of_get_property(dn, "msi-translator", NULL);
+ if (ph)
+ break;
+ }
+
+ if (!ph) {
+ dev_dbg(&dev->dev,
+ "axon_msi: no msi-translator property found\n");
+ goto out_error;
+ }
+
+ tmp = dn;
+ dn = of_find_node_by_phandle(*ph);
+ of_node_put(tmp);
+ if (!dn) {
+ dev_dbg(&dev->dev,
+ "axon_msi: msi-translator doesn't point to a node\n");
+ goto out_error;
+ }
+
+ irq_domain = irq_find_host(dn);
+ if (!irq_domain) {
+ dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %s\n",
+ dn->full_name);
+ goto out_error;
+ }
+
+ msic = irq_domain->host_data;
+
+out_error:
+ of_node_put(dn);
+
+ return msic;
+}
+
+static int axon_msi_check_device(struct pci_dev *dev, int nvec, int type)
+{
+ if (!find_msi_translator(dev))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
+{
+ struct device_node *dn;
+ struct msi_desc *entry;
+ int len;
+ const u32 *prop;
+
+ dn = of_node_get(pci_device_to_OF_node(dev));
+ if (!dn) {
+ dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
+ return -ENODEV;
+ }
+
+ entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
+
+ for (; dn; dn = of_get_next_parent(dn)) {
+ if (entry->msi_attrib.is_64) {
+ prop = of_get_property(dn, "msi-address-64", &len);
+ if (prop)
+ break;
+ }
+
+ prop = of_get_property(dn, "msi-address-32", &len);
+ if (prop)
+ break;
+ }
+
+ if (!prop) {
+ dev_dbg(&dev->dev,
+ "axon_msi: no msi-address-(32|64) properties found\n");
+ return -ENOENT;
+ }
+
+ switch (len) {
+ case 8:
+ msg->address_hi = prop[0];
+ msg->address_lo = prop[1];
+ break;
+ case 4:
+ msg->address_hi = 0;
+ msg->address_lo = prop[0];
+ break;
+ default:
+ dev_dbg(&dev->dev,
+ "axon_msi: malformed msi-address-(32|64) property\n");
+ of_node_put(dn);
+ return -EINVAL;
+ }
+
+ of_node_put(dn);
+
+ return 0;
+}
+
+static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ unsigned int virq, rc;
+ struct msi_desc *entry;
+ struct msi_msg msg;
+ struct axon_msic *msic;
+
+ msic = find_msi_translator(dev);
+ if (!msic)
+ return -ENODEV;
+
+ rc = setup_msi_msg_address(dev, &msg);
+ if (rc)
+ return rc;
+
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ virq = irq_create_direct_mapping(msic->irq_domain);
+ if (virq == NO_IRQ) {
+ dev_warn(&dev->dev,
+ "axon_msi: virq allocation failed!\n");
+ return -1;
+ }
+ dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq);
+
+ irq_set_msi_desc(virq, entry);
+ msg.data = virq;
+ write_msi_msg(virq, &msg);
+ }
+
+ return 0;
+}
+
+static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
+{
+ struct msi_desc *entry;
+
+ dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n");
+
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ if (entry->irq == NO_IRQ)
+ continue;
+
+ irq_set_msi_desc(entry->irq, NULL);
+ irq_dispose_mapping(entry->irq);
+ }
+}
+
+static struct irq_chip msic_irq_chip = {
+ .irq_mask = mask_msi_irq,
+ .irq_unmask = unmask_msi_irq,
+ .irq_shutdown = mask_msi_irq,
+ .name = "AXON-MSI",
+};
+
+static int msic_host_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_data(virq, h->host_data);
+ irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops msic_host_ops = {
+ .map = msic_host_map,
+};
+
+static void axon_msi_shutdown(struct platform_device *device)
+{
+ struct axon_msic *msic = dev_get_drvdata(&device->dev);
+ u32 tmp;
+
+ pr_devel("axon_msi: disabling %s\n",
+ msic->irq_domain->of_node->full_name);
+ tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
+ tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
+ msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
+}
+
+static int axon_msi_probe(struct platform_device *device)
+{
+ struct device_node *dn = device->dev.of_node;
+ struct axon_msic *msic;
+ unsigned int virq;
+ int dcr_base, dcr_len;
+
+ pr_devel("axon_msi: setting up dn %s\n", dn->full_name);
+
+ msic = kzalloc(sizeof(struct axon_msic), GFP_KERNEL);
+ if (!msic) {
+ printk(KERN_ERR "axon_msi: couldn't allocate msic for %s\n",
+ dn->full_name);
+ goto out;
+ }
+
+ dcr_base = dcr_resource_start(dn, 0);
+ dcr_len = dcr_resource_len(dn, 0);
+
+ if (dcr_base == 0 || dcr_len == 0) {
+ printk(KERN_ERR
+ "axon_msi: couldn't parse dcr properties on %s\n",
+ dn->full_name);
+ goto out_free_msic;
+ }
+
+ msic->dcr_host = dcr_map(dn, dcr_base, dcr_len);
+ if (!DCR_MAP_OK(msic->dcr_host)) {
+ printk(KERN_ERR "axon_msi: dcr_map failed for %s\n",
+ dn->full_name);
+ goto out_free_msic;
+ }
+
+ msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES,
+ &msic->fifo_phys, GFP_KERNEL);
+ if (!msic->fifo_virt) {
+ printk(KERN_ERR "axon_msi: couldn't allocate fifo for %s\n",
+ dn->full_name);
+ goto out_free_msic;
+ }
+
+ virq = irq_of_parse_and_map(dn, 0);
+ if (virq == NO_IRQ) {
+ printk(KERN_ERR "axon_msi: irq parse and map failed for %s\n",
+ dn->full_name);
+ goto out_free_fifo;
+ }
+ memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
+
+ /* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */
+ msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic);
+ if (!msic->irq_domain) {
+ printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
+ dn->full_name);
+ goto out_free_fifo;
+ }
+
+ irq_set_handler_data(virq, msic);
+ irq_set_chained_handler(virq, axon_msi_cascade);
+ pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq);
+
+ /* Enable the MSIC hardware */
+ msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32);
+ msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG,
+ msic->fifo_phys & 0xFFFFFFFF);
+ msic_dcr_write(msic, MSIC_CTRL_REG,
+ MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE |
+ MSIC_CTRL_FIFO_SIZE);
+
+ msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG)
+ & MSIC_FIFO_SIZE_MASK;
+
+ dev_set_drvdata(&device->dev, msic);
+
+ ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs;
+ ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
+ ppc_md.msi_check_device = axon_msi_check_device;
+
+ axon_msi_debug_setup(dn, msic);
+
+ printk(KERN_DEBUG "axon_msi: setup MSIC on %s\n", dn->full_name);
+
+ return 0;
+
+out_free_fifo:
+ dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt,
+ msic->fifo_phys);
+out_free_msic:
+ kfree(msic);
+out:
+
+ return -1;
+}
+
+static const struct of_device_id axon_msi_device_id[] = {
+ {
+ .compatible = "ibm,axon-msic"
+ },
+ {}
+};
+
+static struct platform_driver axon_msi_driver = {
+ .probe = axon_msi_probe,
+ .shutdown = axon_msi_shutdown,
+ .driver = {
+ .name = "axon-msi",
+ .owner = THIS_MODULE,
+ .of_match_table = axon_msi_device_id,
+ },
+};
+
+static int __init axon_msi_init(void)
+{
+ return platform_driver_register(&axon_msi_driver);
+}
+subsys_initcall(axon_msi_init);
+
+
+#ifdef DEBUG
+static int msic_set(void *data, u64 val)
+{
+ struct axon_msic *msic = data;
+ out_le32(msic->trigger, val);
+ return 0;
+}
+
+static int msic_get(void *data, u64 *val)
+{
+ *val = 0;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n");
+
+void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic)
+{
+ char name[8];
+ u64 addr;
+
+ addr = of_translate_address(dn, of_get_property(dn, "reg", NULL));
+ if (addr == OF_BAD_ADDR) {
+ pr_devel("axon_msi: couldn't translate reg property\n");
+ return;
+ }
+
+ msic->trigger = ioremap(addr, 0x4);
+ if (!msic->trigger) {
+ pr_devel("axon_msi: ioremap failed\n");
+ return;
+ }
+
+ snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn));
+
+ if (!debugfs_create_file(name, 0600, powerpc_debugfs_root,
+ msic, &fops_msic)) {
+ pr_devel("axon_msi: debugfs_create_file failed!\n");
+ return;
+ }
+}
+#endif /* DEBUG */
diff --git a/arch/powerpc/platforms/cell/beat.c b/arch/powerpc/platforms/cell/beat.c
new file mode 100644
index 00000000..852592b2
--- /dev/null
+++ b/arch/powerpc/platforms/cell/beat.c
@@ -0,0 +1,264 @@
+/*
+ * Simple routines for Celleb/Beat
+ *
+ * (C) Copyright 2006-2007 TOSHIBA CORPORATION
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+
+#include "beat_wrapper.h"
+#include "beat.h"
+#include "beat_interrupt.h"
+
+static int beat_pm_poweroff_flag;
+
+void beat_restart(char *cmd)
+{
+ beat_shutdown_logical_partition(!beat_pm_poweroff_flag);
+}
+
+void beat_power_off(void)
+{
+ beat_shutdown_logical_partition(0);
+}
+
+u64 beat_halt_code = 0x1000000000000000UL;
+EXPORT_SYMBOL(beat_halt_code);
+
+void beat_halt(void)
+{
+ beat_shutdown_logical_partition(beat_halt_code);
+}
+
+int beat_set_rtc_time(struct rtc_time *rtc_time)
+{
+ u64 tim;
+ tim = mktime(rtc_time->tm_year+1900,
+ rtc_time->tm_mon+1, rtc_time->tm_mday,
+ rtc_time->tm_hour, rtc_time->tm_min, rtc_time->tm_sec);
+ if (beat_rtc_write(tim))
+ return -1;
+ return 0;
+}
+
+void beat_get_rtc_time(struct rtc_time *rtc_time)
+{
+ u64 tim;
+
+ if (beat_rtc_read(&tim))
+ tim = 0;
+ to_tm(tim, rtc_time);
+ rtc_time->tm_year -= 1900;
+ rtc_time->tm_mon -= 1;
+}
+
+#define BEAT_NVRAM_SIZE 4096
+
+ssize_t beat_nvram_read(char *buf, size_t count, loff_t *index)
+{
+ unsigned int i;
+ unsigned long len;
+ char *p = buf;
+
+ if (*index >= BEAT_NVRAM_SIZE)
+ return -ENODEV;
+ i = *index;
+ if (i + count > BEAT_NVRAM_SIZE)
+ count = BEAT_NVRAM_SIZE - i;
+
+ for (; count != 0; count -= len) {
+ len = count;
+ if (len > BEAT_NVRW_CNT)
+ len = BEAT_NVRW_CNT;
+ if (beat_eeprom_read(i, len, p))
+ return -EIO;
+
+ p += len;
+ i += len;
+ }
+ *index = i;
+ return p - buf;
+}
+
+ssize_t beat_nvram_write(char *buf, size_t count, loff_t *index)
+{
+ unsigned int i;
+ unsigned long len;
+ char *p = buf;
+
+ if (*index >= BEAT_NVRAM_SIZE)
+ return -ENODEV;
+ i = *index;
+ if (i + count > BEAT_NVRAM_SIZE)
+ count = BEAT_NVRAM_SIZE - i;
+
+ for (; count != 0; count -= len) {
+ len = count;
+ if (len > BEAT_NVRW_CNT)
+ len = BEAT_NVRW_CNT;
+ if (beat_eeprom_write(i, len, p))
+ return -EIO;
+
+ p += len;
+ i += len;
+ }
+ *index = i;
+ return p - buf;
+}
+
+ssize_t beat_nvram_get_size(void)
+{
+ return BEAT_NVRAM_SIZE;
+}
+
+int beat_set_xdabr(unsigned long dabr)
+{
+ if (beat_set_dabr(dabr, DABRX_KERNEL | DABRX_USER))
+ return -1;
+ return 0;
+}
+
+int64_t beat_get_term_char(u64 vterm, u64 *len, u64 *t1, u64 *t2)
+{
+ u64 db[2];
+ s64 ret;
+
+ ret = beat_get_characters_from_console(vterm, len, (u8 *)db);
+ if (ret == 0) {
+ *t1 = db[0];
+ *t2 = db[1];
+ }
+ return ret;
+}
+EXPORT_SYMBOL(beat_get_term_char);
+
+int64_t beat_put_term_char(u64 vterm, u64 len, u64 t1, u64 t2)
+{
+ u64 db[2];
+
+ db[0] = t1;
+ db[1] = t2;
+ return beat_put_characters_to_console(vterm, len, (u8 *)db);
+}
+EXPORT_SYMBOL(beat_put_term_char);
+
+void beat_power_save(void)
+{
+ beat_pause(0);
+}
+
+#ifdef CONFIG_KEXEC
+void beat_kexec_cpu_down(int crash, int secondary)
+{
+ beatic_deinit_IRQ();
+}
+#endif
+
+static irqreturn_t beat_power_event(int virq, void *arg)
+{
+ printk(KERN_DEBUG "Beat: power button pressed\n");
+ beat_pm_poweroff_flag = 1;
+ ctrl_alt_del();
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t beat_reset_event(int virq, void *arg)
+{
+ printk(KERN_DEBUG "Beat: reset button pressed\n");
+ beat_pm_poweroff_flag = 0;
+ ctrl_alt_del();
+ return IRQ_HANDLED;
+}
+
+static struct beat_event_list {
+ const char *typecode;
+ irq_handler_t handler;
+ unsigned int virq;
+} beat_event_list[] = {
+ { "power", beat_power_event, 0 },
+ { "reset", beat_reset_event, 0 },
+};
+
+static int __init beat_register_event(void)
+{
+ u64 path[4], data[2];
+ int rc, i;
+ unsigned int virq;
+
+ for (i = 0; i < ARRAY_SIZE(beat_event_list); i++) {
+ struct beat_event_list *ev = &beat_event_list[i];
+
+ if (beat_construct_event_receive_port(data) != 0) {
+ printk(KERN_ERR "Beat: "
+ "cannot construct event receive port for %s\n",
+ ev->typecode);
+ return -EINVAL;
+ }
+
+ virq = irq_create_mapping(NULL, data[0]);
+ if (virq == NO_IRQ) {
+ printk(KERN_ERR "Beat: failed to get virtual IRQ"
+ " for event receive port for %s\n",
+ ev->typecode);
+ beat_destruct_event_receive_port(data[0]);
+ return -EIO;
+ }
+ ev->virq = virq;
+
+ rc = request_irq(virq, ev->handler, 0,
+ ev->typecode, NULL);
+ if (rc != 0) {
+ printk(KERN_ERR "Beat: failed to request virtual IRQ"
+ " for event receive port for %s\n",
+ ev->typecode);
+ beat_destruct_event_receive_port(data[0]);
+ return rc;
+ }
+
+ path[0] = 0x1000000065780000ul; /* 1,ex */
+ path[1] = 0x627574746f6e0000ul; /* button */
+ path[2] = 0;
+ strncpy((char *)&path[2], ev->typecode, 8);
+ path[3] = 0;
+ data[1] = 0;
+
+ beat_create_repository_node(path, data);
+ }
+ return 0;
+}
+
+static int __init beat_event_init(void)
+{
+ if (!firmware_has_feature(FW_FEATURE_BEAT))
+ return -EINVAL;
+
+ beat_pm_poweroff_flag = 0;
+ return beat_register_event();
+}
+
+device_initcall(beat_event_init);
diff --git a/arch/powerpc/platforms/cell/beat.h b/arch/powerpc/platforms/cell/beat.h
new file mode 100644
index 00000000..32c8efce
--- /dev/null
+++ b/arch/powerpc/platforms/cell/beat.h
@@ -0,0 +1,39 @@
+/*
+ * Guest OS Interfaces.
+ *
+ * (C) Copyright 2006 TOSHIBA CORPORATION
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _CELLEB_BEAT_H
+#define _CELLEB_BEAT_H
+
+int64_t beat_get_term_char(uint64_t, uint64_t *, uint64_t *, uint64_t *);
+int64_t beat_put_term_char(uint64_t, uint64_t, uint64_t, uint64_t);
+int64_t beat_repository_encode(int, const char *, uint64_t[4]);
+void beat_restart(char *);
+void beat_power_off(void);
+void beat_halt(void);
+int beat_set_rtc_time(struct rtc_time *);
+void beat_get_rtc_time(struct rtc_time *);
+ssize_t beat_nvram_get_size(void);
+ssize_t beat_nvram_read(char *, size_t, loff_t *);
+ssize_t beat_nvram_write(char *, size_t, loff_t *);
+int beat_set_xdabr(unsigned long);
+void beat_power_save(void);
+void beat_kexec_cpu_down(int, int);
+
+#endif /* _CELLEB_BEAT_H */
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
new file mode 100644
index 00000000..943c9d39
--- /dev/null
+++ b/arch/powerpc/platforms/cell/beat_htab.c
@@ -0,0 +1,440 @@
+/*
+ * "Cell Reference Set" HTAB support.
+ *
+ * (C) Copyright 2006-2007 TOSHIBA CORPORATION
+ *
+ * This code is based on arch/powerpc/platforms/pseries/lpar.c:
+ * Copyright (C) 2001 Todd Inglett, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#undef DEBUG_LOW
+
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+
+#include "beat_wrapper.h"
+
+#ifdef DEBUG_LOW
+#define DBG_LOW(fmt...) do { udbg_printf(fmt); } while (0)
+#else
+#define DBG_LOW(fmt...) do { } while (0)
+#endif
+
+static DEFINE_RAW_SPINLOCK(beat_htab_lock);
+
+static inline unsigned int beat_read_mask(unsigned hpte_group)
+{
+ unsigned long rmask = 0;
+ u64 hpte_v[5];
+
+ beat_read_htab_entries(0, hpte_group + 0, hpte_v);
+ if (!(hpte_v[0] & HPTE_V_BOLTED))
+ rmask |= 0x8000;
+ if (!(hpte_v[1] & HPTE_V_BOLTED))
+ rmask |= 0x4000;
+ if (!(hpte_v[2] & HPTE_V_BOLTED))
+ rmask |= 0x2000;
+ if (!(hpte_v[3] & HPTE_V_BOLTED))
+ rmask |= 0x1000;
+ beat_read_htab_entries(0, hpte_group + 4, hpte_v);
+ if (!(hpte_v[0] & HPTE_V_BOLTED))
+ rmask |= 0x0800;
+ if (!(hpte_v[1] & HPTE_V_BOLTED))
+ rmask |= 0x0400;
+ if (!(hpte_v[2] & HPTE_V_BOLTED))
+ rmask |= 0x0200;
+ if (!(hpte_v[3] & HPTE_V_BOLTED))
+ rmask |= 0x0100;
+ hpte_group = ~hpte_group & (htab_hash_mask * HPTES_PER_GROUP);
+ beat_read_htab_entries(0, hpte_group + 0, hpte_v);
+ if (!(hpte_v[0] & HPTE_V_BOLTED))
+ rmask |= 0x80;
+ if (!(hpte_v[1] & HPTE_V_BOLTED))
+ rmask |= 0x40;
+ if (!(hpte_v[2] & HPTE_V_BOLTED))
+ rmask |= 0x20;
+ if (!(hpte_v[3] & HPTE_V_BOLTED))
+ rmask |= 0x10;
+ beat_read_htab_entries(0, hpte_group + 4, hpte_v);
+ if (!(hpte_v[0] & HPTE_V_BOLTED))
+ rmask |= 0x08;
+ if (!(hpte_v[1] & HPTE_V_BOLTED))
+ rmask |= 0x04;
+ if (!(hpte_v[2] & HPTE_V_BOLTED))
+ rmask |= 0x02;
+ if (!(hpte_v[3] & HPTE_V_BOLTED))
+ rmask |= 0x01;
+ return rmask;
+}
+
+static long beat_lpar_hpte_insert(unsigned long hpte_group,
+ unsigned long va, unsigned long pa,
+ unsigned long rflags, unsigned long vflags,
+ int psize, int ssize)
+{
+ unsigned long lpar_rc;
+ u64 hpte_v, hpte_r, slot;
+
+ if (vflags & HPTE_V_SECONDARY)
+ return -1;
+
+ if (!(vflags & HPTE_V_BOLTED))
+ DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, "
+ "rflags=%lx, vflags=%lx, psize=%d)\n",
+ hpte_group, va, pa, rflags, vflags, psize);
+
+ hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) |
+ vflags | HPTE_V_VALID;
+ hpte_r = hpte_encode_r(pa, psize) | rflags;
+
+ if (!(vflags & HPTE_V_BOLTED))
+ DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
+
+ if (rflags & _PAGE_NO_CACHE)
+ hpte_r &= ~_PAGE_COHERENT;
+
+ raw_spin_lock(&beat_htab_lock);
+ lpar_rc = beat_read_mask(hpte_group);
+ if (lpar_rc == 0) {
+ if (!(vflags & HPTE_V_BOLTED))
+ DBG_LOW(" full\n");
+ raw_spin_unlock(&beat_htab_lock);
+ return -1;
+ }
+
+ lpar_rc = beat_insert_htab_entry(0, hpte_group, lpar_rc << 48,
+ hpte_v, hpte_r, &slot);
+ raw_spin_unlock(&beat_htab_lock);
+
+ /*
+ * Since we try and ioremap PHBs we don't own, the pte insert
+ * will fail. However we must catch the failure in hash_page
+ * or we will loop forever, so return -2 in this case.
+ */
+ if (unlikely(lpar_rc != 0)) {
+ if (!(vflags & HPTE_V_BOLTED))
+ DBG_LOW(" lpar err %lx\n", lpar_rc);
+ return -2;
+ }
+ if (!(vflags & HPTE_V_BOLTED))
+ DBG_LOW(" -> slot: %lx\n", slot);
+
+ /* We have to pass down the secondary bucket bit here as well */
+ return (slot ^ hpte_group) & 15;
+}
+
+static long beat_lpar_hpte_remove(unsigned long hpte_group)
+{
+ DBG_LOW("hpte_remove(group=%lx)\n", hpte_group);
+ return -1;
+}
+
+static unsigned long beat_lpar_hpte_getword0(unsigned long slot)
+{
+ unsigned long dword0;
+ unsigned long lpar_rc;
+ u64 dword[5];
+
+ lpar_rc = beat_read_htab_entries(0, slot & ~3UL, dword);
+
+ dword0 = dword[slot&3];
+
+ BUG_ON(lpar_rc != 0);
+
+ return dword0;
+}
+
+static void beat_lpar_hptab_clear(void)
+{
+ unsigned long size_bytes = 1UL << ppc64_pft_size;
+ unsigned long hpte_count = size_bytes >> 4;
+ int i;
+ u64 dummy0, dummy1;
+
+ /* TODO: Use bulk call */
+ for (i = 0; i < hpte_count; i++)
+ beat_write_htab_entry(0, i, 0, 0, -1UL, -1UL, &dummy0, &dummy1);
+}
+
+/*
+ * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
+ * the low 3 bits of flags happen to line up. So no transform is needed.
+ * We can probably optimize here and assume the high bits of newpp are
+ * already zero. For now I am paranoid.
+ */
+static long beat_lpar_hpte_updatepp(unsigned long slot,
+ unsigned long newpp,
+ unsigned long va,
+ int psize, int ssize, int local)
+{
+ unsigned long lpar_rc;
+ u64 dummy0, dummy1;
+ unsigned long want_v;
+
+ want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
+
+ DBG_LOW(" update: "
+ "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
+ want_v & HPTE_V_AVPN, slot, psize, newpp);
+
+ raw_spin_lock(&beat_htab_lock);
+ dummy0 = beat_lpar_hpte_getword0(slot);
+ if ((dummy0 & ~0x7FUL) != (want_v & ~0x7FUL)) {
+ DBG_LOW("not found !\n");
+ raw_spin_unlock(&beat_htab_lock);
+ return -1;
+ }
+
+ lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7, &dummy0,
+ &dummy1);
+ raw_spin_unlock(&beat_htab_lock);
+ if (lpar_rc != 0 || dummy0 == 0) {
+ DBG_LOW("not found !\n");
+ return -1;
+ }
+
+ DBG_LOW("ok %lx %lx\n", dummy0, dummy1);
+
+ BUG_ON(lpar_rc != 0);
+
+ return 0;
+}
+
+static long beat_lpar_hpte_find(unsigned long va, int psize)
+{
+ unsigned long hash;
+ unsigned long i, j;
+ long slot;
+ unsigned long want_v, hpte_v;
+
+ hash = hpt_hash(va, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M);
+ want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
+
+ for (j = 0; j < 2; j++) {
+ slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
+ for (i = 0; i < HPTES_PER_GROUP; i++) {
+ hpte_v = beat_lpar_hpte_getword0(slot);
+
+ if (HPTE_V_COMPARE(hpte_v, want_v)
+ && (hpte_v & HPTE_V_VALID)
+ && (!!(hpte_v & HPTE_V_SECONDARY) == j)) {
+ /* HPTE matches */
+ if (j)
+ slot = -slot;
+ return slot;
+ }
+ ++slot;
+ }
+ hash = ~hash;
+ }
+
+ return -1;
+}
+
+static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
+ unsigned long ea,
+ int psize, int ssize)
+{
+ unsigned long lpar_rc, slot, vsid, va;
+ u64 dummy0, dummy1;
+
+ vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
+ va = (vsid << 28) | (ea & 0x0fffffff);
+
+ raw_spin_lock(&beat_htab_lock);
+ slot = beat_lpar_hpte_find(va, psize);
+ BUG_ON(slot == -1);
+
+ lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7,
+ &dummy0, &dummy1);
+ raw_spin_unlock(&beat_htab_lock);
+
+ BUG_ON(lpar_rc != 0);
+}
+
+static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
+ int psize, int ssize, int local)
+{
+ unsigned long want_v;
+ unsigned long lpar_rc;
+ u64 dummy1, dummy2;
+ unsigned long flags;
+
+ DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
+ slot, va, psize, local);
+ want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
+
+ raw_spin_lock_irqsave(&beat_htab_lock, flags);
+ dummy1 = beat_lpar_hpte_getword0(slot);
+
+ if ((dummy1 & ~0x7FUL) != (want_v & ~0x7FUL)) {
+ DBG_LOW("not found !\n");
+ raw_spin_unlock_irqrestore(&beat_htab_lock, flags);
+ return;
+ }
+
+ lpar_rc = beat_write_htab_entry(0, slot, 0, 0, HPTE_V_VALID, 0,
+ &dummy1, &dummy2);
+ raw_spin_unlock_irqrestore(&beat_htab_lock, flags);
+
+ BUG_ON(lpar_rc != 0);
+}
+
+void __init hpte_init_beat(void)
+{
+ ppc_md.hpte_invalidate = beat_lpar_hpte_invalidate;
+ ppc_md.hpte_updatepp = beat_lpar_hpte_updatepp;
+ ppc_md.hpte_updateboltedpp = beat_lpar_hpte_updateboltedpp;
+ ppc_md.hpte_insert = beat_lpar_hpte_insert;
+ ppc_md.hpte_remove = beat_lpar_hpte_remove;
+ ppc_md.hpte_clear_all = beat_lpar_hptab_clear;
+}
+
+static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
+ unsigned long va, unsigned long pa,
+ unsigned long rflags, unsigned long vflags,
+ int psize, int ssize)
+{
+ unsigned long lpar_rc;
+ u64 hpte_v, hpte_r, slot;
+
+ if (vflags & HPTE_V_SECONDARY)
+ return -1;
+
+ if (!(vflags & HPTE_V_BOLTED))
+ DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, "
+ "rflags=%lx, vflags=%lx, psize=%d)\n",
+ hpte_group, va, pa, rflags, vflags, psize);
+
+ hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) |
+ vflags | HPTE_V_VALID;
+ hpte_r = hpte_encode_r(pa, psize) | rflags;
+
+ if (!(vflags & HPTE_V_BOLTED))
+ DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
+
+ if (rflags & _PAGE_NO_CACHE)
+ hpte_r &= ~_PAGE_COHERENT;
+
+ /* insert into not-volted entry */
+ lpar_rc = beat_insert_htab_entry3(0, hpte_group, hpte_v, hpte_r,
+ HPTE_V_BOLTED, 0, &slot);
+ /*
+ * Since we try and ioremap PHBs we don't own, the pte insert
+ * will fail. However we must catch the failure in hash_page
+ * or we will loop forever, so return -2 in this case.
+ */
+ if (unlikely(lpar_rc != 0)) {
+ if (!(vflags & HPTE_V_BOLTED))
+ DBG_LOW(" lpar err %lx\n", lpar_rc);
+ return -2;
+ }
+ if (!(vflags & HPTE_V_BOLTED))
+ DBG_LOW(" -> slot: %lx\n", slot);
+
+ /* We have to pass down the secondary bucket bit here as well */
+ return (slot ^ hpte_group) & 15;
+}
+
+/*
+ * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
+ * the low 3 bits of flags happen to line up. So no transform is needed.
+ * We can probably optimize here and assume the high bits of newpp are
+ * already zero. For now I am paranoid.
+ */
+static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
+ unsigned long newpp,
+ unsigned long va,
+ int psize, int ssize, int local)
+{
+ unsigned long lpar_rc;
+ unsigned long want_v;
+ unsigned long pss;
+
+ want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
+ pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc;
+
+ DBG_LOW(" update: "
+ "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
+ want_v & HPTE_V_AVPN, slot, psize, newpp);
+
+ lpar_rc = beat_update_htab_permission3(0, slot, want_v, pss, 7, newpp);
+
+ if (lpar_rc == 0xfffffff7) {
+ DBG_LOW("not found !\n");
+ return -1;
+ }
+
+ DBG_LOW("ok\n");
+
+ BUG_ON(lpar_rc != 0);
+
+ return 0;
+}
+
+static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long va,
+ int psize, int ssize, int local)
+{
+ unsigned long want_v;
+ unsigned long lpar_rc;
+ unsigned long pss;
+
+ DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
+ slot, va, psize, local);
+ want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
+ pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc;
+
+ lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss);
+
+ /* E_busy can be valid output: page may be already replaced */
+ BUG_ON(lpar_rc != 0 && lpar_rc != 0xfffffff7);
+}
+
+static int64_t _beat_lpar_hptab_clear_v3(void)
+{
+ return beat_clear_htab3(0);
+}
+
+static void beat_lpar_hptab_clear_v3(void)
+{
+ _beat_lpar_hptab_clear_v3();
+}
+
+void __init hpte_init_beat_v3(void)
+{
+ if (_beat_lpar_hptab_clear_v3() == 0) {
+ ppc_md.hpte_invalidate = beat_lpar_hpte_invalidate_v3;
+ ppc_md.hpte_updatepp = beat_lpar_hpte_updatepp_v3;
+ ppc_md.hpte_updateboltedpp = beat_lpar_hpte_updateboltedpp;
+ ppc_md.hpte_insert = beat_lpar_hpte_insert_v3;
+ ppc_md.hpte_remove = beat_lpar_hpte_remove;
+ ppc_md.hpte_clear_all = beat_lpar_hptab_clear_v3;
+ } else {
+ ppc_md.hpte_invalidate = beat_lpar_hpte_invalidate;
+ ppc_md.hpte_updatepp = beat_lpar_hpte_updatepp;
+ ppc_md.hpte_updateboltedpp = beat_lpar_hpte_updateboltedpp;
+ ppc_md.hpte_insert = beat_lpar_hpte_insert;
+ ppc_md.hpte_remove = beat_lpar_hpte_remove;
+ ppc_md.hpte_clear_all = beat_lpar_hptab_clear;
+ }
+}
diff --git a/arch/powerpc/platforms/cell/beat_hvCall.S b/arch/powerpc/platforms/cell/beat_hvCall.S
new file mode 100644
index 00000000..74c81744
--- /dev/null
+++ b/arch/powerpc/platforms/cell/beat_hvCall.S
@@ -0,0 +1,287 @@
+/*
+ * Beat hypervisor call I/F
+ *
+ * (C) Copyright 2007 TOSHIBA CORPORATION
+ *
+ * This code is based on arch/powerpc/platforms/pseries/hvCall.S.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include
+
+#define STK_PARM(i) (48 + ((i)-3)*8)
+
+/* Not implemented on Beat, now */
+#define HCALL_INST_PRECALL
+#define HCALL_INST_POSTCALL
+
+ .text
+
+#define HVSC .long 0x44000022
+
+/* Note: takes only 7 input parameters at maximum */
+_GLOBAL(beat_hcall_norets)
+ HMT_MEDIUM
+
+ mfcr r0
+ stw r0,8(r1)
+
+ HCALL_INST_PRECALL
+
+ mr r11,r3
+ mr r3,r4
+ mr r4,r5
+ mr r5,r6
+ mr r6,r7
+ mr r7,r8
+ mr r8,r9
+
+ HVSC /* invoke the hypervisor */
+
+ HCALL_INST_POSTCALL
+
+ lwz r0,8(r1)
+ mtcrf 0xff,r0
+
+ blr /* return r3 = status */
+
+/* Note: takes 8 input parameters at maximum */
+_GLOBAL(beat_hcall_norets8)
+ HMT_MEDIUM
+
+ mfcr r0
+ stw r0,8(r1)
+
+ HCALL_INST_PRECALL
+
+ mr r11,r3
+ mr r3,r4
+ mr r4,r5
+ mr r5,r6
+ mr r6,r7
+ mr r7,r8
+ mr r8,r9
+ ld r10,STK_PARM(r10)(r1)
+
+ HVSC /* invoke the hypervisor */
+
+ HCALL_INST_POSTCALL
+
+ lwz r0,8(r1)
+ mtcrf 0xff,r0
+
+ blr /* return r3 = status */
+
+/* Note: takes only 6 input parameters, 1 output parameters at maximum */
+_GLOBAL(beat_hcall1)
+ HMT_MEDIUM
+
+ mfcr r0
+ stw r0,8(r1)
+
+ HCALL_INST_PRECALL
+
+ std r4,STK_PARM(r4)(r1) /* save ret buffer */
+
+ mr r11,r3
+ mr r3,r5
+ mr r4,r6
+ mr r5,r7
+ mr r6,r8
+ mr r7,r9
+ mr r8,r10
+
+ HVSC /* invoke the hypervisor */
+
+ HCALL_INST_POSTCALL
+
+ ld r12,STK_PARM(r4)(r1)
+ std r4, 0(r12)
+
+ lwz r0,8(r1)
+ mtcrf 0xff,r0
+
+ blr /* return r3 = status */
+
+/* Note: takes only 6 input parameters, 2 output parameters at maximum */
+_GLOBAL(beat_hcall2)
+ HMT_MEDIUM
+
+ mfcr r0
+ stw r0,8(r1)
+
+ HCALL_INST_PRECALL
+
+ std r4,STK_PARM(r4)(r1) /* save ret buffer */
+
+ mr r11,r3
+ mr r3,r5
+ mr r4,r6
+ mr r5,r7
+ mr r6,r8
+ mr r7,r9
+ mr r8,r10
+
+ HVSC /* invoke the hypervisor */
+
+ HCALL_INST_POSTCALL
+
+ ld r12,STK_PARM(r4)(r1)
+ std r4, 0(r12)
+ std r5, 8(r12)
+
+ lwz r0,8(r1)
+ mtcrf 0xff,r0
+
+ blr /* return r3 = status */
+
+/* Note: takes only 6 input parameters, 3 output parameters at maximum */
+_GLOBAL(beat_hcall3)
+ HMT_MEDIUM
+
+ mfcr r0
+ stw r0,8(r1)
+
+ HCALL_INST_PRECALL
+
+ std r4,STK_PARM(r4)(r1) /* save ret buffer */
+
+ mr r11,r3
+ mr r3,r5
+ mr r4,r6
+ mr r5,r7
+ mr r6,r8
+ mr r7,r9
+ mr r8,r10
+
+ HVSC /* invoke the hypervisor */
+
+ HCALL_INST_POSTCALL
+
+ ld r12,STK_PARM(r4)(r1)
+ std r4, 0(r12)
+ std r5, 8(r12)
+ std r6, 16(r12)
+
+ lwz r0,8(r1)
+ mtcrf 0xff,r0
+
+ blr /* return r3 = status */
+
+/* Note: takes only 6 input parameters, 4 output parameters at maximum */
+_GLOBAL(beat_hcall4)
+ HMT_MEDIUM
+
+ mfcr r0
+ stw r0,8(r1)
+
+ HCALL_INST_PRECALL
+
+ std r4,STK_PARM(r4)(r1) /* save ret buffer */
+
+ mr r11,r3
+ mr r3,r5
+ mr r4,r6
+ mr r5,r7
+ mr r6,r8
+ mr r7,r9
+ mr r8,r10
+
+ HVSC /* invoke the hypervisor */
+
+ HCALL_INST_POSTCALL
+
+ ld r12,STK_PARM(r4)(r1)
+ std r4, 0(r12)
+ std r5, 8(r12)
+ std r6, 16(r12)
+ std r7, 24(r12)
+
+ lwz r0,8(r1)
+ mtcrf 0xff,r0
+
+ blr /* return r3 = status */
+
+/* Note: takes only 6 input parameters, 5 output parameters at maximum */
+_GLOBAL(beat_hcall5)
+ HMT_MEDIUM
+
+ mfcr r0
+ stw r0,8(r1)
+
+ HCALL_INST_PRECALL
+
+ std r4,STK_PARM(r4)(r1) /* save ret buffer */
+
+ mr r11,r3
+ mr r3,r5
+ mr r4,r6
+ mr r5,r7
+ mr r6,r8
+ mr r7,r9
+ mr r8,r10
+
+ HVSC /* invoke the hypervisor */
+
+ HCALL_INST_POSTCALL
+
+ ld r12,STK_PARM(r4)(r1)
+ std r4, 0(r12)
+ std r5, 8(r12)
+ std r6, 16(r12)
+ std r7, 24(r12)
+ std r8, 32(r12)
+
+ lwz r0,8(r1)
+ mtcrf 0xff,r0
+
+ blr /* return r3 = status */
+
+/* Note: takes only 6 input parameters, 6 output parameters at maximum */
+_GLOBAL(beat_hcall6)
+ HMT_MEDIUM
+
+ mfcr r0
+ stw r0,8(r1)
+
+ HCALL_INST_PRECALL
+
+ std r4,STK_PARM(r4)(r1) /* save ret buffer */
+
+ mr r11,r3
+ mr r3,r5
+ mr r4,r6
+ mr r5,r7
+ mr r6,r8
+ mr r7,r9
+ mr r8,r10
+
+ HVSC /* invoke the hypervisor */
+
+ HCALL_INST_POSTCALL
+
+ ld r12,STK_PARM(r4)(r1)
+ std r4, 0(r12)
+ std r5, 8(r12)
+ std r6, 16(r12)
+ std r7, 24(r12)
+ std r8, 32(r12)
+ std r9, 40(r12)
+
+ lwz r0,8(r1)
+ mtcrf 0xff,r0
+
+ blr /* return r3 = status */
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
new file mode 100644
index 00000000..8c6dc42e
--- /dev/null
+++ b/arch/powerpc/platforms/cell/beat_interrupt.c
@@ -0,0 +1,253 @@
+/*
+ * Celleb/Beat Interrupt controller
+ *
+ * (C) Copyright 2006-2007 TOSHIBA CORPORATION
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include "beat_interrupt.h"
+#include "beat_wrapper.h"
+
+#define MAX_IRQS NR_IRQS
+static DEFINE_RAW_SPINLOCK(beatic_irq_mask_lock);
+static uint64_t beatic_irq_mask_enable[(MAX_IRQS+255)/64];
+static uint64_t beatic_irq_mask_ack[(MAX_IRQS+255)/64];
+
+static struct irq_domain *beatic_host;
+
+/*
+ * In this implementation, "virq" == "IRQ plug number",
+ * "(irq_hw_number_t)hwirq" == "IRQ outlet number".
+ */
+
+/* assumption: locked */
+static inline void beatic_update_irq_mask(unsigned int irq_plug)
+{
+ int off;
+ unsigned long masks[4];
+
+ off = (irq_plug / 256) * 4;
+ masks[0] = beatic_irq_mask_enable[off + 0]
+ & beatic_irq_mask_ack[off + 0];
+ masks[1] = beatic_irq_mask_enable[off + 1]
+ & beatic_irq_mask_ack[off + 1];
+ masks[2] = beatic_irq_mask_enable[off + 2]
+ & beatic_irq_mask_ack[off + 2];
+ masks[3] = beatic_irq_mask_enable[off + 3]
+ & beatic_irq_mask_ack[off + 3];
+ if (beat_set_interrupt_mask(irq_plug&~255UL,
+ masks[0], masks[1], masks[2], masks[3]) != 0)
+ panic("Failed to set mask IRQ!");
+}
+
+static void beatic_mask_irq(struct irq_data *d)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
+ beatic_irq_mask_enable[d->irq/64] &= ~(1UL << (63 - (d->irq%64)));
+ beatic_update_irq_mask(d->irq);
+ raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
+}
+
+static void beatic_unmask_irq(struct irq_data *d)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
+ beatic_irq_mask_enable[d->irq/64] |= 1UL << (63 - (d->irq%64));
+ beatic_update_irq_mask(d->irq);
+ raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
+}
+
+static void beatic_ack_irq(struct irq_data *d)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
+ beatic_irq_mask_ack[d->irq/64] &= ~(1UL << (63 - (d->irq%64)));
+ beatic_update_irq_mask(d->irq);
+ raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
+}
+
+static void beatic_end_irq(struct irq_data *d)
+{
+ s64 err;
+ unsigned long flags;
+
+ err = beat_downcount_of_interrupt(d->irq);
+ if (err != 0) {
+ if ((err & 0xFFFFFFFF) != 0xFFFFFFF5) /* -11: wrong state */
+ panic("Failed to downcount IRQ! Error = %16llx", err);
+
+ printk(KERN_ERR "IRQ over-downcounted, plug %d\n", d->irq);
+ }
+ raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
+ beatic_irq_mask_ack[d->irq/64] |= 1UL << (63 - (d->irq%64));
+ beatic_update_irq_mask(d->irq);
+ raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
+}
+
+static struct irq_chip beatic_pic = {
+ .name = "CELL-BEAT",
+ .irq_unmask = beatic_unmask_irq,
+ .irq_mask = beatic_mask_irq,
+ .irq_eoi = beatic_end_irq,
+};
+
+/*
+ * Dispose binding hardware IRQ number (hw) and Virtuql IRQ number (virq),
+ * update flags.
+ *
+ * Note that the number (virq) is already assigned at upper layer.
+ */
+static void beatic_pic_host_unmap(struct irq_domain *h, unsigned int virq)
+{
+ beat_destruct_irq_plug(virq);
+}
+
+/*
+ * Create or update binding hardware IRQ number (hw) and Virtuql
+ * IRQ number (virq). This is called only once for a given mapping.
+ *
+ * Note that the number (virq) is already assigned at upper layer.
+ */
+static int beatic_pic_host_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ int64_t err;
+
+ err = beat_construct_and_connect_irq_plug(virq, hw);
+ if (err < 0)
+ return -EIO;
+
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &beatic_pic, handle_fasteoi_irq);
+ return 0;
+}
+
+/*
+ * Translate device-tree interrupt spec to irq_hw_number_t style (ulong),
+ * to pass away to irq_create_mapping().
+ *
+ * Called from irq_create_of_mapping() only.
+ * Note: We have only 1 entry to translate.
+ */
+static int beatic_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq,
+ unsigned int *out_flags)
+{
+ const u64 *intspec2 = (const u64 *)intspec;
+
+ *out_hwirq = *intspec2;
+ *out_flags |= IRQ_TYPE_LEVEL_LOW;
+ return 0;
+}
+
+static int beatic_pic_host_match(struct irq_domain *h, struct device_node *np)
+{
+ /* Match all */
+ return 1;
+}
+
+static const struct irq_domain_ops beatic_pic_host_ops = {
+ .map = beatic_pic_host_map,
+ .unmap = beatic_pic_host_unmap,
+ .xlate = beatic_pic_host_xlate,
+ .match = beatic_pic_host_match,
+};
+
+/*
+ * Get an IRQ number
+ * Note: returns VIRQ
+ */
+static inline unsigned int beatic_get_irq_plug(void)
+{
+ int i;
+ uint64_t pending[4], ub;
+
+ for (i = 0; i < MAX_IRQS; i += 256) {
+ beat_detect_pending_interrupts(i, pending);
+ __asm__ ("cntlzd %0,%1":"=r"(ub):
+ "r"(pending[0] & beatic_irq_mask_enable[i/64+0]
+ & beatic_irq_mask_ack[i/64+0]));
+ if (ub != 64)
+ return i + ub + 0;
+ __asm__ ("cntlzd %0,%1":"=r"(ub):
+ "r"(pending[1] & beatic_irq_mask_enable[i/64+1]
+ & beatic_irq_mask_ack[i/64+1]));
+ if (ub != 64)
+ return i + ub + 64;
+ __asm__ ("cntlzd %0,%1":"=r"(ub):
+ "r"(pending[2] & beatic_irq_mask_enable[i/64+2]
+ & beatic_irq_mask_ack[i/64+2]));
+ if (ub != 64)
+ return i + ub + 128;
+ __asm__ ("cntlzd %0,%1":"=r"(ub):
+ "r"(pending[3] & beatic_irq_mask_enable[i/64+3]
+ & beatic_irq_mask_ack[i/64+3]));
+ if (ub != 64)
+ return i + ub + 192;
+ }
+
+ return NO_IRQ;
+}
+unsigned int beatic_get_irq(void)
+{
+ unsigned int ret;
+
+ ret = beatic_get_irq_plug();
+ if (ret != NO_IRQ)
+ beatic_ack_irq(irq_get_irq_data(ret));
+ return ret;
+}
+
+/*
+ */
+void __init beatic_init_IRQ(void)
+{
+ int i;
+
+ memset(beatic_irq_mask_enable, 0, sizeof(beatic_irq_mask_enable));
+ memset(beatic_irq_mask_ack, 255, sizeof(beatic_irq_mask_ack));
+ for (i = 0; i < MAX_IRQS; i += 256)
+ beat_set_interrupt_mask(i, 0L, 0L, 0L, 0L);
+
+ /* Set out get_irq function */
+ ppc_md.get_irq = beatic_get_irq;
+
+ /* Allocate an irq host */
+ beatic_host = irq_domain_add_nomap(NULL, 0, &beatic_pic_host_ops, NULL);
+ BUG_ON(beatic_host == NULL);
+ irq_set_default_host(beatic_host);
+}
+
+void beatic_deinit_IRQ(void)
+{
+ int i;
+
+ for (i = 1; i < nr_irqs; i++)
+ beat_destruct_irq_plug(i);
+}
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.h b/arch/powerpc/platforms/cell/beat_interrupt.h
new file mode 100644
index 00000000..a7e52f91
--- /dev/null
+++ b/arch/powerpc/platforms/cell/beat_interrupt.h
@@ -0,0 +1,30 @@
+/*
+ * Celleb/Beat Interrupt controller
+ *
+ * (C) Copyright 2006 TOSHIBA CORPORATION
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef ASM_BEAT_PIC_H
+#define ASM_BEAT_PIC_H
+#ifdef __KERNEL__
+
+extern void beatic_init_IRQ(void);
+extern unsigned int beatic_get_irq(void);
+extern void beatic_deinit_IRQ(void);
+
+#endif
+#endif /* ASM_BEAT_PIC_H */
diff --git a/arch/powerpc/platforms/cell/beat_iommu.c b/arch/powerpc/platforms/cell/beat_iommu.c
new file mode 100644
index 00000000..3ce68556
--- /dev/null
+++ b/arch/powerpc/platforms/cell/beat_iommu.c
@@ -0,0 +1,115 @@
+/*
+ * Support for IOMMU on Celleb platform.
+ *
+ * (C) Copyright 2006-2007 TOSHIBA CORPORATION
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include "beat_wrapper.h"
+
+#define DMA_FLAGS 0xf800000000000000UL /* r/w permitted, coherency required,
+ strongest order */
+
+static int __init find_dma_window(u64 *io_space_id, u64 *ioid,
+ u64 *base, u64 *size, u64 *io_page_size)
+{
+ struct device_node *dn;
+ const unsigned long *dma_window;
+
+ for_each_node_by_type(dn, "ioif") {
+ dma_window = of_get_property(dn, "toshiba,dma-window", NULL);
+ if (dma_window) {
+ *io_space_id = (dma_window[0] >> 32) & 0xffffffffUL;
+ *ioid = dma_window[0] & 0x7ffUL;
+ *base = dma_window[1];
+ *size = dma_window[2];
+ *io_page_size = 1 << dma_window[3];
+ of_node_put(dn);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static unsigned long celleb_dma_direct_offset;
+
+static void __init celleb_init_direct_mapping(void)
+{
+ u64 lpar_addr, io_addr;
+ u64 io_space_id, ioid, dma_base, dma_size, io_page_size;
+
+ if (!find_dma_window(&io_space_id, &ioid, &dma_base, &dma_size,
+ &io_page_size)) {
+ pr_info("No dma window found !\n");
+ return;
+ }
+
+ for (lpar_addr = 0; lpar_addr < dma_size; lpar_addr += io_page_size) {
+ io_addr = lpar_addr + dma_base;
+ (void)beat_put_iopte(io_space_id, io_addr, lpar_addr,
+ ioid, DMA_FLAGS);
+ }
+
+ celleb_dma_direct_offset = dma_base;
+}
+
+static void celleb_dma_dev_setup(struct device *dev)
+{
+ set_dma_ops(dev, &dma_direct_ops);
+ set_dma_offset(dev, celleb_dma_direct_offset);
+}
+
+static void celleb_pci_dma_dev_setup(struct pci_dev *pdev)
+{
+ celleb_dma_dev_setup(&pdev->dev);
+}
+
+static int celleb_of_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+
+ /* We are only intereted in device addition */
+ if (action != BUS_NOTIFY_ADD_DEVICE)
+ return 0;
+
+ celleb_dma_dev_setup(dev);
+
+ return 0;
+}
+
+static struct notifier_block celleb_of_bus_notifier = {
+ .notifier_call = celleb_of_bus_notify
+};
+
+static int __init celleb_init_iommu(void)
+{
+ celleb_init_direct_mapping();
+ ppc_md.pci_dma_dev_setup = celleb_pci_dma_dev_setup;
+ bus_register_notifier(&platform_bus_type, &celleb_of_bus_notifier);
+
+ return 0;
+}
+
+machine_arch_initcall(celleb_beat, celleb_init_iommu);
diff --git a/arch/powerpc/platforms/cell/beat_spu_priv1.c b/arch/powerpc/platforms/cell/beat_spu_priv1.c
new file mode 100644
index 00000000..13f52589
--- /dev/null
+++ b/arch/powerpc/platforms/cell/beat_spu_priv1.c
@@ -0,0 +1,205 @@
+/*
+ * spu hypervisor abstraction for Beat
+ *
+ * (C) Copyright 2006-2007 TOSHIBA CORPORATION
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include
+#include
+#include
+
+#include "beat_wrapper.h"
+
+static inline void _int_mask_set(struct spu *spu, int class, u64 mask)
+{
+ spu->shadow_int_mask_RW[class] = mask;
+ beat_set_irq_mask_for_spe(spu->spe_id, class, mask);
+}
+
+static inline u64 _int_mask_get(struct spu *spu, int class)
+{
+ return spu->shadow_int_mask_RW[class];
+}
+
+static void int_mask_set(struct spu *spu, int class, u64 mask)
+{
+ _int_mask_set(spu, class, mask);
+}
+
+static u64 int_mask_get(struct spu *spu, int class)
+{
+ return _int_mask_get(spu, class);
+}
+
+static void int_mask_and(struct spu *spu, int class, u64 mask)
+{
+ u64 old_mask;
+ old_mask = _int_mask_get(spu, class);
+ _int_mask_set(spu, class, old_mask & mask);
+}
+
+static void int_mask_or(struct spu *spu, int class, u64 mask)
+{
+ u64 old_mask;
+ old_mask = _int_mask_get(spu, class);
+ _int_mask_set(spu, class, old_mask | mask);
+}
+
+static void int_stat_clear(struct spu *spu, int class, u64 stat)
+{
+ beat_clear_interrupt_status_of_spe(spu->spe_id, class, stat);
+}
+
+static u64 int_stat_get(struct spu *spu, int class)
+{
+ u64 int_stat;
+ beat_get_interrupt_status_of_spe(spu->spe_id, class, &int_stat);
+ return int_stat;
+}
+
+static void cpu_affinity_set(struct spu *spu, int cpu)
+{
+ return;
+}
+
+static u64 mfc_dar_get(struct spu *spu)
+{
+ u64 dar;
+ beat_get_spe_privileged_state_1_registers(
+ spu->spe_id,
+ offsetof(struct spu_priv1, mfc_dar_RW), &dar);
+ return dar;
+}
+
+static u64 mfc_dsisr_get(struct spu *spu)
+{
+ u64 dsisr;
+ beat_get_spe_privileged_state_1_registers(
+ spu->spe_id,
+ offsetof(struct spu_priv1, mfc_dsisr_RW), &dsisr);
+ return dsisr;
+}
+
+static void mfc_dsisr_set(struct spu *spu, u64 dsisr)
+{
+ beat_set_spe_privileged_state_1_registers(
+ spu->spe_id,
+ offsetof(struct spu_priv1, mfc_dsisr_RW), dsisr);
+}
+
+static void mfc_sdr_setup(struct spu *spu)
+{
+ return;
+}
+
+static void mfc_sr1_set(struct spu *spu, u64 sr1)
+{
+ beat_set_spe_privileged_state_1_registers(
+ spu->spe_id,
+ offsetof(struct spu_priv1, mfc_sr1_RW), sr1);
+}
+
+static u64 mfc_sr1_get(struct spu *spu)
+{
+ u64 sr1;
+ beat_get_spe_privileged_state_1_registers(
+ spu->spe_id,
+ offsetof(struct spu_priv1, mfc_sr1_RW), &sr1);
+ return sr1;
+}
+
+static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
+{
+ beat_set_spe_privileged_state_1_registers(
+ spu->spe_id,
+ offsetof(struct spu_priv1, mfc_tclass_id_RW), tclass_id);
+}
+
+static u64 mfc_tclass_id_get(struct spu *spu)
+{
+ u64 tclass_id;
+ beat_get_spe_privileged_state_1_registers(
+ spu->spe_id,
+ offsetof(struct spu_priv1, mfc_tclass_id_RW), &tclass_id);
+ return tclass_id;
+}
+
+static void tlb_invalidate(struct spu *spu)
+{
+ beat_set_spe_privileged_state_1_registers(
+ spu->spe_id,
+ offsetof(struct spu_priv1, tlb_invalidate_entry_W), 0ul);
+}
+
+static void resource_allocation_groupID_set(struct spu *spu, u64 id)
+{
+ beat_set_spe_privileged_state_1_registers(
+ spu->spe_id,
+ offsetof(struct spu_priv1, resource_allocation_groupID_RW),
+ id);
+}
+
+static u64 resource_allocation_groupID_get(struct spu *spu)
+{
+ u64 id;
+ beat_get_spe_privileged_state_1_registers(
+ spu->spe_id,
+ offsetof(struct spu_priv1, resource_allocation_groupID_RW),
+ &id);
+ return id;
+}
+
+static void resource_allocation_enable_set(struct spu *spu, u64 enable)
+{
+ beat_set_spe_privileged_state_1_registers(
+ spu->spe_id,
+ offsetof(struct spu_priv1, resource_allocation_enable_RW),
+ enable);
+}
+
+static u64 resource_allocation_enable_get(struct spu *spu)
+{
+ u64 enable;
+ beat_get_spe_privileged_state_1_registers(
+ spu->spe_id,
+ offsetof(struct spu_priv1, resource_allocation_enable_RW),
+ &enable);
+ return enable;
+}
+
+const struct spu_priv1_ops spu_priv1_beat_ops = {
+ .int_mask_and = int_mask_and,
+ .int_mask_or = int_mask_or,
+ .int_mask_set = int_mask_set,
+ .int_mask_get = int_mask_get,
+ .int_stat_clear = int_stat_clear,
+ .int_stat_get = int_stat_get,
+ .cpu_affinity_set = cpu_affinity_set,
+ .mfc_dar_get = mfc_dar_get,
+ .mfc_dsisr_get = mfc_dsisr_get,
+ .mfc_dsisr_set = mfc_dsisr_set,
+ .mfc_sdr_setup = mfc_sdr_setup,
+ .mfc_sr1_set = mfc_sr1_set,
+ .mfc_sr1_get = mfc_sr1_get,
+ .mfc_tclass_id_set = mfc_tclass_id_set,
+ .mfc_tclass_id_get = mfc_tclass_id_get,
+ .tlb_invalidate = tlb_invalidate,
+ .resource_allocation_groupID_set = resource_allocation_groupID_set,
+ .resource_allocation_groupID_get = resource_allocation_groupID_get,
+ .resource_allocation_enable_set = resource_allocation_enable_set,
+ .resource_allocation_enable_get = resource_allocation_enable_get,
+};
diff --git a/arch/powerpc/platforms/cell/beat_syscall.h b/arch/powerpc/platforms/cell/beat_syscall.h
new file mode 100644
index 00000000..8580dc7e
--- /dev/null
+++ b/arch/powerpc/platforms/cell/beat_syscall.h
@@ -0,0 +1,164 @@
+/*
+ * Beat hypervisor call numbers
+ *
+ * (C) Copyright 2004-2007 TOSHIBA CORPORATION
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef BEAT_BEAT_syscall_H
+#define BEAT_BEAT_syscall_H
+
+#ifdef __ASSEMBLY__
+#define __BEAT_ADD_VENDOR_ID(__x, __v) ((__v)<<60|(__x))
+#else
+#define __BEAT_ADD_VENDOR_ID(__x, __v) ((u64)(__v)<<60|(__x))
+#endif
+#define HV_allocate_memory __BEAT_ADD_VENDOR_ID(0, 0)
+#define HV_construct_virtual_address_space __BEAT_ADD_VENDOR_ID(2, 0)
+#define HV_destruct_virtual_address_space __BEAT_ADD_VENDOR_ID(10, 0)
+#define HV_get_virtual_address_space_id_of_ppe __BEAT_ADD_VENDOR_ID(4, 0)
+#define HV_query_logical_partition_address_region_info \
+ __BEAT_ADD_VENDOR_ID(6, 0)
+#define HV_release_memory __BEAT_ADD_VENDOR_ID(13, 0)
+#define HV_select_virtual_address_space __BEAT_ADD_VENDOR_ID(7, 0)
+#define HV_load_range_registers __BEAT_ADD_VENDOR_ID(68, 0)
+#define HV_set_ppe_l2cache_rmt_entry __BEAT_ADD_VENDOR_ID(70, 0)
+#define HV_set_ppe_tlb_rmt_entry __BEAT_ADD_VENDOR_ID(71, 0)
+#define HV_set_spe_tlb_rmt_entry __BEAT_ADD_VENDOR_ID(72, 0)
+#define HV_get_io_address_translation_fault_info __BEAT_ADD_VENDOR_ID(14, 0)
+#define HV_get_iopte __BEAT_ADD_VENDOR_ID(16, 0)
+#define HV_preload_iopt_cache __BEAT_ADD_VENDOR_ID(17, 0)
+#define HV_put_iopte __BEAT_ADD_VENDOR_ID(15, 0)
+#define HV_connect_event_ports __BEAT_ADD_VENDOR_ID(21, 0)
+#define HV_construct_event_receive_port __BEAT_ADD_VENDOR_ID(18, 0)
+#define HV_destruct_event_receive_port __BEAT_ADD_VENDOR_ID(19, 0)
+#define HV_destruct_event_send_port __BEAT_ADD_VENDOR_ID(22, 0)
+#define HV_get_state_of_event_send_port __BEAT_ADD_VENDOR_ID(25, 0)
+#define HV_request_to_connect_event_ports __BEAT_ADD_VENDOR_ID(20, 0)
+#define HV_send_event_externally __BEAT_ADD_VENDOR_ID(23, 0)
+#define HV_send_event_locally __BEAT_ADD_VENDOR_ID(24, 0)
+#define HV_construct_and_connect_irq_plug __BEAT_ADD_VENDOR_ID(28, 0)
+#define HV_destruct_irq_plug __BEAT_ADD_VENDOR_ID(29, 0)
+#define HV_detect_pending_interrupts __BEAT_ADD_VENDOR_ID(26, 0)
+#define HV_end_of_interrupt __BEAT_ADD_VENDOR_ID(27, 0)
+#define HV_assign_control_signal_notification_port __BEAT_ADD_VENDOR_ID(45, 0)
+#define HV_end_of_control_signal_processing __BEAT_ADD_VENDOR_ID(48, 0)
+#define HV_get_control_signal __BEAT_ADD_VENDOR_ID(46, 0)
+#define HV_set_irq_mask_for_spe __BEAT_ADD_VENDOR_ID(61, 0)
+#define HV_shutdown_logical_partition __BEAT_ADD_VENDOR_ID(44, 0)
+#define HV_connect_message_ports __BEAT_ADD_VENDOR_ID(35, 0)
+#define HV_destruct_message_port __BEAT_ADD_VENDOR_ID(36, 0)
+#define HV_receive_message __BEAT_ADD_VENDOR_ID(37, 0)
+#define HV_get_message_port_info __BEAT_ADD_VENDOR_ID(34, 0)
+#define HV_request_to_connect_message_ports __BEAT_ADD_VENDOR_ID(33, 0)
+#define HV_send_message __BEAT_ADD_VENDOR_ID(32, 0)
+#define HV_get_logical_ppe_id __BEAT_ADD_VENDOR_ID(69, 0)
+#define HV_pause __BEAT_ADD_VENDOR_ID(9, 0)
+#define HV_destruct_shared_memory_handle __BEAT_ADD_VENDOR_ID(51, 0)
+#define HV_get_shared_memory_info __BEAT_ADD_VENDOR_ID(52, 0)
+#define HV_permit_sharing_memory __BEAT_ADD_VENDOR_ID(50, 0)
+#define HV_request_to_attach_shared_memory __BEAT_ADD_VENDOR_ID(49, 0)
+#define HV_enable_logical_spe_execution __BEAT_ADD_VENDOR_ID(55, 0)
+#define HV_construct_logical_spe __BEAT_ADD_VENDOR_ID(53, 0)
+#define HV_disable_logical_spe_execution __BEAT_ADD_VENDOR_ID(56, 0)
+#define HV_destruct_logical_spe __BEAT_ADD_VENDOR_ID(54, 0)
+#define HV_sense_spe_execution_status __BEAT_ADD_VENDOR_ID(58, 0)
+#define HV_insert_htab_entry __BEAT_ADD_VENDOR_ID(101, 0)
+#define HV_read_htab_entries __BEAT_ADD_VENDOR_ID(95, 0)
+#define HV_write_htab_entry __BEAT_ADD_VENDOR_ID(94, 0)
+#define HV_assign_io_address_translation_fault_port \
+ __BEAT_ADD_VENDOR_ID(100, 0)
+#define HV_set_interrupt_mask __BEAT_ADD_VENDOR_ID(73, 0)
+#define HV_get_logical_partition_id __BEAT_ADD_VENDOR_ID(74, 0)
+#define HV_create_repository_node2 __BEAT_ADD_VENDOR_ID(90, 0)
+#define HV_create_repository_node __BEAT_ADD_VENDOR_ID(90, 0) /* alias */
+#define HV_get_repository_node_value2 __BEAT_ADD_VENDOR_ID(91, 0)
+#define HV_get_repository_node_value __BEAT_ADD_VENDOR_ID(91, 0) /* alias */
+#define HV_modify_repository_node_value2 __BEAT_ADD_VENDOR_ID(92, 0)
+#define HV_modify_repository_node_value __BEAT_ADD_VENDOR_ID(92, 0) /* alias */
+#define HV_remove_repository_node2 __BEAT_ADD_VENDOR_ID(93, 0)
+#define HV_remove_repository_node __BEAT_ADD_VENDOR_ID(93, 0) /* alias */
+#define HV_cancel_shared_memory __BEAT_ADD_VENDOR_ID(104, 0)
+#define HV_clear_interrupt_status_of_spe __BEAT_ADD_VENDOR_ID(206, 0)
+#define HV_construct_spe_irq_outlet __BEAT_ADD_VENDOR_ID(80, 0)
+#define HV_destruct_spe_irq_outlet __BEAT_ADD_VENDOR_ID(81, 0)
+#define HV_disconnect_ipspc_service __BEAT_ADD_VENDOR_ID(88, 0)
+#define HV_execute_ipspc_command __BEAT_ADD_VENDOR_ID(86, 0)
+#define HV_get_interrupt_status_of_spe __BEAT_ADD_VENDOR_ID(205, 0)
+#define HV_get_spe_privileged_state_1_registers __BEAT_ADD_VENDOR_ID(208, 0)
+#define HV_permit_use_of_ipspc_service __BEAT_ADD_VENDOR_ID(85, 0)
+#define HV_reinitialize_logical_spe __BEAT_ADD_VENDOR_ID(82, 0)
+#define HV_request_ipspc_service __BEAT_ADD_VENDOR_ID(84, 0)
+#define HV_stop_ipspc_command __BEAT_ADD_VENDOR_ID(87, 0)
+#define HV_set_spe_privileged_state_1_registers __BEAT_ADD_VENDOR_ID(204, 0)
+#define HV_get_status_of_ipspc_service __BEAT_ADD_VENDOR_ID(203, 0)
+#define HV_put_characters_to_console __BEAT_ADD_VENDOR_ID(0x101, 1)
+#define HV_get_characters_from_console __BEAT_ADD_VENDOR_ID(0x102, 1)
+#define HV_get_base_clock __BEAT_ADD_VENDOR_ID(0x111, 1)
+#define HV_set_base_clock __BEAT_ADD_VENDOR_ID(0x112, 1)
+#define HV_get_frame_cycle __BEAT_ADD_VENDOR_ID(0x114, 1)
+#define HV_disable_console __BEAT_ADD_VENDOR_ID(0x115, 1)
+#define HV_disable_all_console __BEAT_ADD_VENDOR_ID(0x116, 1)
+#define HV_oneshot_timer __BEAT_ADD_VENDOR_ID(0x117, 1)
+#define HV_set_dabr __BEAT_ADD_VENDOR_ID(0x118, 1)
+#define HV_get_dabr __BEAT_ADD_VENDOR_ID(0x119, 1)
+#define HV_start_hv_stats __BEAT_ADD_VENDOR_ID(0x21c, 1)
+#define HV_stop_hv_stats __BEAT_ADD_VENDOR_ID(0x21d, 1)
+#define HV_get_hv_stats __BEAT_ADD_VENDOR_ID(0x21e, 1)
+#define HV_get_hv_error_stats __BEAT_ADD_VENDOR_ID(0x221, 1)
+#define HV_get_stats __BEAT_ADD_VENDOR_ID(0x224, 1)
+#define HV_get_heap_stats __BEAT_ADD_VENDOR_ID(0x225, 1)
+#define HV_get_memory_stats __BEAT_ADD_VENDOR_ID(0x227, 1)
+#define HV_get_memory_detail __BEAT_ADD_VENDOR_ID(0x228, 1)
+#define HV_set_priority_of_irq_outlet __BEAT_ADD_VENDOR_ID(0x122, 1)
+#define HV_get_physical_spe_by_reservation_id __BEAT_ADD_VENDOR_ID(0x128, 1)
+#define HV_get_spe_context __BEAT_ADD_VENDOR_ID(0x129, 1)
+#define HV_set_spe_context __BEAT_ADD_VENDOR_ID(0x12a, 1)
+#define HV_downcount_of_interrupt __BEAT_ADD_VENDOR_ID(0x12e, 1)
+#define HV_peek_spe_context __BEAT_ADD_VENDOR_ID(0x12f, 1)
+#define HV_read_bpa_register __BEAT_ADD_VENDOR_ID(0x131, 1)
+#define HV_write_bpa_register __BEAT_ADD_VENDOR_ID(0x132, 1)
+#define HV_map_context_table_of_spe __BEAT_ADD_VENDOR_ID(0x137, 1)
+#define HV_get_slb_for_logical_spe __BEAT_ADD_VENDOR_ID(0x138, 1)
+#define HV_set_slb_for_logical_spe __BEAT_ADD_VENDOR_ID(0x139, 1)
+#define HV_init_pm __BEAT_ADD_VENDOR_ID(0x150, 1)
+#define HV_set_pm_signal __BEAT_ADD_VENDOR_ID(0x151, 1)
+#define HV_get_pm_signal __BEAT_ADD_VENDOR_ID(0x152, 1)
+#define HV_set_pm_config __BEAT_ADD_VENDOR_ID(0x153, 1)
+#define HV_get_pm_config __BEAT_ADD_VENDOR_ID(0x154, 1)
+#define HV_get_inner_trace_data __BEAT_ADD_VENDOR_ID(0x155, 1)
+#define HV_set_ext_trace_buffer __BEAT_ADD_VENDOR_ID(0x156, 1)
+#define HV_get_ext_trace_buffer __BEAT_ADD_VENDOR_ID(0x157, 1)
+#define HV_set_pm_interrupt __BEAT_ADD_VENDOR_ID(0x158, 1)
+#define HV_get_pm_interrupt __BEAT_ADD_VENDOR_ID(0x159, 1)
+#define HV_kick_pm __BEAT_ADD_VENDOR_ID(0x160, 1)
+#define HV_construct_pm_context __BEAT_ADD_VENDOR_ID(0x164, 1)
+#define HV_destruct_pm_context __BEAT_ADD_VENDOR_ID(0x165, 1)
+#define HV_be_slow __BEAT_ADD_VENDOR_ID(0x170, 1)
+#define HV_assign_ipspc_server_connection_status_notification_port \
+ __BEAT_ADD_VENDOR_ID(0x173, 1)
+#define HV_get_raid_of_physical_spe __BEAT_ADD_VENDOR_ID(0x174, 1)
+#define HV_set_physical_spe_to_rag __BEAT_ADD_VENDOR_ID(0x175, 1)
+#define HV_release_physical_spe_from_rag __BEAT_ADD_VENDOR_ID(0x176, 1)
+#define HV_rtc_read __BEAT_ADD_VENDOR_ID(0x190, 1)
+#define HV_rtc_write __BEAT_ADD_VENDOR_ID(0x191, 1)
+#define HV_eeprom_read __BEAT_ADD_VENDOR_ID(0x192, 1)
+#define HV_eeprom_write __BEAT_ADD_VENDOR_ID(0x193, 1)
+#define HV_insert_htab_entry3 __BEAT_ADD_VENDOR_ID(0x104, 1)
+#define HV_invalidate_htab_entry3 __BEAT_ADD_VENDOR_ID(0x105, 1)
+#define HV_update_htab_permission3 __BEAT_ADD_VENDOR_ID(0x106, 1)
+#define HV_clear_htab3 __BEAT_ADD_VENDOR_ID(0x107, 1)
+#endif
diff --git a/arch/powerpc/platforms/cell/beat_udbg.c b/arch/powerpc/platforms/cell/beat_udbg.c
new file mode 100644
index 00000000..350735bc
--- /dev/null
+++ b/arch/powerpc/platforms/cell/beat_udbg.c
@@ -0,0 +1,98 @@
+/*
+ * udbg function for Beat
+ *
+ * (C) Copyright 2006 TOSHIBA CORPORATION
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include
+#include
+
+#include
+#include
+#include
+
+#include "beat.h"
+
+#define celleb_vtermno 0
+
+static void udbg_putc_beat(char c)
+{
+ unsigned long rc;
+
+ if (c == '\n')
+ udbg_putc_beat('\r');
+
+ rc = beat_put_term_char(celleb_vtermno, 1, (uint64_t)c << 56, 0);
+}
+
+/* Buffered chars getc */
+static u64 inbuflen;
+static u64 inbuf[2]; /* must be 2 u64s */
+
+static int udbg_getc_poll_beat(void)
+{
+ /* The interface is tricky because it may return up to 16 chars.
+ * We save them statically for future calls to udbg_getc().
+ */
+ char ch, *buf = (char *)inbuf;
+ int i;
+ long rc;
+ if (inbuflen == 0) {
+ /* get some more chars. */
+ inbuflen = 0;
+ rc = beat_get_term_char(celleb_vtermno, &inbuflen,
+ inbuf+0, inbuf+1);
+ if (rc != 0)
+ inbuflen = 0; /* otherwise inbuflen is garbage */
+ }
+ if (inbuflen <= 0 || inbuflen > 16) {
+ /* Catch error case as well as other oddities (corruption) */
+ inbuflen = 0;
+ return -1;
+ }
+ ch = buf[0];
+ for (i = 1; i < inbuflen; i++) /* shuffle them down. */
+ buf[i-1] = buf[i];
+ inbuflen--;
+ return ch;
+}
+
+static int udbg_getc_beat(void)
+{
+ int ch;
+ for (;;) {
+ ch = udbg_getc_poll_beat();
+ if (ch == -1) {
+ /* This shouldn't be needed...but... */
+ volatile unsigned long delay;
+ for (delay = 0; delay < 2000000; delay++)
+ ;
+ } else {
+ return ch;
+ }
+ }
+}
+
+/* call this from early_init() for a working debug console on
+ * vterm capable LPAR machines
+ */
+void __init udbg_init_debug_beat(void)
+{
+ udbg_putc = udbg_putc_beat;
+ udbg_getc = udbg_getc_beat;
+ udbg_getc_poll = udbg_getc_poll_beat;
+}
diff --git a/arch/powerpc/platforms/cell/beat_wrapper.h b/arch/powerpc/platforms/cell/beat_wrapper.h
new file mode 100644
index 00000000..c1109969
--- /dev/null
+++ b/arch/powerpc/platforms/cell/beat_wrapper.h
@@ -0,0 +1,290 @@
+/*
+ * Beat hypervisor call I/F
+ *
+ * (C) Copyright 2007 TOSHIBA CORPORATION
+ *
+ * This code is based on arch/powerpc/platforms/pseries/plpar_wrapper.h.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef BEAT_HCALL
+#include
+#include "beat_syscall.h"
+
+/* defined in hvCall.S */
+extern s64 beat_hcall_norets(u64 opcode, ...);
+extern s64 beat_hcall_norets8(u64 opcode, u64 arg1, u64 arg2, u64 arg3,
+ u64 arg4, u64 arg5, u64 arg6, u64 arg7, u64 arg8);
+extern s64 beat_hcall1(u64 opcode, u64 retbuf[1], ...);
+extern s64 beat_hcall2(u64 opcode, u64 retbuf[2], ...);
+extern s64 beat_hcall3(u64 opcode, u64 retbuf[3], ...);
+extern s64 beat_hcall4(u64 opcode, u64 retbuf[4], ...);
+extern s64 beat_hcall5(u64 opcode, u64 retbuf[5], ...);
+extern s64 beat_hcall6(u64 opcode, u64 retbuf[6], ...);
+
+static inline s64 beat_downcount_of_interrupt(u64 plug_id)
+{
+ return beat_hcall_norets(HV_downcount_of_interrupt, plug_id);
+}
+
+static inline s64 beat_set_interrupt_mask(u64 index,
+ u64 val0, u64 val1, u64 val2, u64 val3)
+{
+ return beat_hcall_norets(HV_set_interrupt_mask, index,
+ val0, val1, val2, val3);
+}
+
+static inline s64 beat_destruct_irq_plug(u64 plug_id)
+{
+ return beat_hcall_norets(HV_destruct_irq_plug, plug_id);
+}
+
+static inline s64 beat_construct_and_connect_irq_plug(u64 plug_id,
+ u64 outlet_id)
+{
+ return beat_hcall_norets(HV_construct_and_connect_irq_plug, plug_id,
+ outlet_id);
+}
+
+static inline s64 beat_detect_pending_interrupts(u64 index, u64 *retbuf)
+{
+ return beat_hcall4(HV_detect_pending_interrupts, retbuf, index);
+}
+
+static inline s64 beat_pause(u64 style)
+{
+ return beat_hcall_norets(HV_pause, style);
+}
+
+static inline s64 beat_read_htab_entries(u64 htab_id, u64 index, u64 *retbuf)
+{
+ return beat_hcall5(HV_read_htab_entries, retbuf, htab_id, index);
+}
+
+static inline s64 beat_insert_htab_entry(u64 htab_id, u64 group,
+ u64 bitmask, u64 hpte_v, u64 hpte_r, u64 *slot)
+{
+ u64 dummy[3];
+ s64 ret;
+
+ ret = beat_hcall3(HV_insert_htab_entry, dummy, htab_id, group,
+ bitmask, hpte_v, hpte_r);
+ *slot = dummy[0];
+ return ret;
+}
+
+static inline s64 beat_write_htab_entry(u64 htab_id, u64 slot,
+ u64 hpte_v, u64 hpte_r, u64 mask_v, u64 mask_r,
+ u64 *ret_v, u64 *ret_r)
+{
+ u64 dummy[2];
+ s64 ret;
+
+ ret = beat_hcall2(HV_write_htab_entry, dummy, htab_id, slot,
+ hpte_v, hpte_r, mask_v, mask_r);
+ *ret_v = dummy[0];
+ *ret_r = dummy[1];
+ return ret;
+}
+
+static inline s64 beat_insert_htab_entry3(u64 htab_id, u64 group,
+ u64 hpte_v, u64 hpte_r, u64 mask_v, u64 value_v, u64 *slot)
+{
+ u64 dummy[1];
+ s64 ret;
+
+ ret = beat_hcall1(HV_insert_htab_entry3, dummy, htab_id, group,
+ hpte_v, hpte_r, mask_v, value_v);
+ *slot = dummy[0];
+ return ret;
+}
+
+static inline s64 beat_invalidate_htab_entry3(u64 htab_id, u64 group,
+ u64 va, u64 pss)
+{
+ return beat_hcall_norets(HV_invalidate_htab_entry3,
+ htab_id, group, va, pss);
+}
+
+static inline s64 beat_update_htab_permission3(u64 htab_id, u64 group,
+ u64 va, u64 pss, u64 ptel_mask, u64 ptel_value)
+{
+ return beat_hcall_norets(HV_update_htab_permission3,
+ htab_id, group, va, pss, ptel_mask, ptel_value);
+}
+
+static inline s64 beat_clear_htab3(u64 htab_id)
+{
+ return beat_hcall_norets(HV_clear_htab3, htab_id);
+}
+
+static inline void beat_shutdown_logical_partition(u64 code)
+{
+ (void)beat_hcall_norets(HV_shutdown_logical_partition, code);
+}
+
+static inline s64 beat_rtc_write(u64 time_from_epoch)
+{
+ return beat_hcall_norets(HV_rtc_write, time_from_epoch);
+}
+
+static inline s64 beat_rtc_read(u64 *time_from_epoch)
+{
+ u64 dummy[1];
+ s64 ret;
+
+ ret = beat_hcall1(HV_rtc_read, dummy);
+ *time_from_epoch = dummy[0];
+ return ret;
+}
+
+#define BEAT_NVRW_CNT (sizeof(u64) * 6)
+
+static inline s64 beat_eeprom_write(u64 index, u64 length, u8 *buffer)
+{
+ u64 b[6];
+
+ if (length > BEAT_NVRW_CNT)
+ return -1;
+ memcpy(b, buffer, sizeof(b));
+ return beat_hcall_norets8(HV_eeprom_write, index, length,
+ b[0], b[1], b[2], b[3], b[4], b[5]);
+}
+
+static inline s64 beat_eeprom_read(u64 index, u64 length, u8 *buffer)
+{
+ u64 b[6];
+ s64 ret;
+
+ if (length > BEAT_NVRW_CNT)
+ return -1;
+ ret = beat_hcall6(HV_eeprom_read, b, index, length);
+ memcpy(buffer, b, length);
+ return ret;
+}
+
+static inline s64 beat_set_dabr(u64 value, u64 style)
+{
+ return beat_hcall_norets(HV_set_dabr, value, style);
+}
+
+static inline s64 beat_get_characters_from_console(u64 termno, u64 *len,
+ u8 *buffer)
+{
+ u64 dummy[3];
+ s64 ret;
+
+ ret = beat_hcall3(HV_get_characters_from_console, dummy, termno, len);
+ *len = dummy[0];
+ memcpy(buffer, dummy + 1, *len);
+ return ret;
+}
+
+static inline s64 beat_put_characters_to_console(u64 termno, u64 len,
+ u8 *buffer)
+{
+ u64 b[2];
+
+ memcpy(b, buffer, len);
+ return beat_hcall_norets(HV_put_characters_to_console, termno, len,
+ b[0], b[1]);
+}
+
+static inline s64 beat_get_spe_privileged_state_1_registers(
+ u64 id, u64 offsetof, u64 *value)
+{
+ u64 dummy[1];
+ s64 ret;
+
+ ret = beat_hcall1(HV_get_spe_privileged_state_1_registers, dummy, id,
+ offsetof);
+ *value = dummy[0];
+ return ret;
+}
+
+static inline s64 beat_set_irq_mask_for_spe(u64 id, u64 class, u64 mask)
+{
+ return beat_hcall_norets(HV_set_irq_mask_for_spe, id, class, mask);
+}
+
+static inline s64 beat_clear_interrupt_status_of_spe(u64 id, u64 class,
+ u64 mask)
+{
+ return beat_hcall_norets(HV_clear_interrupt_status_of_spe,
+ id, class, mask);
+}
+
+static inline s64 beat_set_spe_privileged_state_1_registers(
+ u64 id, u64 offsetof, u64 value)
+{
+ return beat_hcall_norets(HV_set_spe_privileged_state_1_registers,
+ id, offsetof, value);
+}
+
+static inline s64 beat_get_interrupt_status_of_spe(u64 id, u64 class, u64 *val)
+{
+ u64 dummy[1];
+ s64 ret;
+
+ ret = beat_hcall1(HV_get_interrupt_status_of_spe, dummy, id, class);
+ *val = dummy[0];
+ return ret;
+}
+
+static inline s64 beat_put_iopte(u64 ioas_id, u64 io_addr, u64 real_addr,
+ u64 ioid, u64 flags)
+{
+ return beat_hcall_norets(HV_put_iopte, ioas_id, io_addr, real_addr,
+ ioid, flags);
+}
+
+static inline s64 beat_construct_event_receive_port(u64 *port)
+{
+ u64 dummy[1];
+ s64 ret;
+
+ ret = beat_hcall1(HV_construct_event_receive_port, dummy);
+ *port = dummy[0];
+ return ret;
+}
+
+static inline s64 beat_destruct_event_receive_port(u64 port)
+{
+ s64 ret;
+
+ ret = beat_hcall_norets(HV_destruct_event_receive_port, port);
+ return ret;
+}
+
+static inline s64 beat_create_repository_node(u64 path[4], u64 data[2])
+{
+ s64 ret;
+
+ ret = beat_hcall_norets(HV_create_repository_node2,
+ path[0], path[1], path[2], path[3], data[0], data[1]);
+ return ret;
+}
+
+static inline s64 beat_get_repository_node_value(u64 lpid, u64 path[4],
+ u64 data[2])
+{
+ s64 ret;
+
+ ret = beat_hcall2(HV_get_repository_node_value2, data,
+ lpid, path[0], path[1], path[2], path[3]);
+ return ret;
+}
+
+#endif
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
new file mode 100644
index 00000000..d4c39e32
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c
@@ -0,0 +1,209 @@
+/*
+ * cpufreq driver for the cell processor
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
+ *
+ * Author: Christian Krafft
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include "cbe_cpufreq.h"
+
+static DEFINE_MUTEX(cbe_switch_mutex);
+
+
+/* the CBE supports an 8 step frequency scaling */
+static struct cpufreq_frequency_table cbe_freqs[] = {
+ {1, 0},
+ {2, 0},
+ {3, 0},
+ {4, 0},
+ {5, 0},
+ {6, 0},
+ {8, 0},
+ {10, 0},
+ {0, CPUFREQ_TABLE_END},
+};
+
+/*
+ * hardware specific functions
+ */
+
+static int set_pmode(unsigned int cpu, unsigned int slow_mode)
+{
+ int rc;
+
+ if (cbe_cpufreq_has_pmi)
+ rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode);
+ else
+ rc = cbe_cpufreq_set_pmode(cpu, slow_mode);
+
+ pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu));
+
+ return rc;
+}
+
+/*
+ * cpufreq functions
+ */
+
+static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ const u32 *max_freqp;
+ u32 max_freq;
+ int i, cur_pmode;
+ struct device_node *cpu;
+
+ cpu = of_get_cpu_node(policy->cpu, NULL);
+
+ if (!cpu)
+ return -ENODEV;
+
+ pr_debug("init cpufreq on CPU %d\n", policy->cpu);
+
+ /*
+ * Let's check we can actually get to the CELL regs
+ */
+ if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
+ !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
+ pr_info("invalid CBE regs pointers for cpufreq\n");
+ return -EINVAL;
+ }
+
+ max_freqp = of_get_property(cpu, "clock-frequency", NULL);
+
+ of_node_put(cpu);
+
+ if (!max_freqp)
+ return -EINVAL;
+
+ /* we need the freq in kHz */
+ max_freq = *max_freqp / 1000;
+
+ pr_debug("max clock-frequency is at %u kHz\n", max_freq);
+ pr_debug("initializing frequency table\n");
+
+ /* initialize frequency table */
+ for (i=0; cbe_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) {
+ cbe_freqs[i].frequency = max_freq / cbe_freqs[i].index;
+ pr_debug("%d: %d\n", i, cbe_freqs[i].frequency);
+ }
+
+ /* if DEBUG is enabled set_pmode() measures the latency
+ * of a transition */
+ policy->cpuinfo.transition_latency = 25000;
+
+ cur_pmode = cbe_cpufreq_get_pmode(policy->cpu);
+ pr_debug("current pmode is at %d\n",cur_pmode);
+
+ policy->cur = cbe_freqs[cur_pmode].frequency;
+
+#ifdef CONFIG_SMP
+ cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
+#endif
+
+ cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
+
+ /* this ensures that policy->cpuinfo_min
+ * and policy->cpuinfo_max are set correctly */
+ return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs);
+}
+
+static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ return 0;
+}
+
+static int cbe_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, cbe_freqs);
+}
+
+static int cbe_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ int rc;
+ struct cpufreq_freqs freqs;
+ unsigned int cbe_pmode_new;
+
+ cpufreq_frequency_table_target(policy,
+ cbe_freqs,
+ target_freq,
+ relation,
+ &cbe_pmode_new);
+
+ freqs.old = policy->cur;
+ freqs.new = cbe_freqs[cbe_pmode_new].frequency;
+ freqs.cpu = policy->cpu;
+
+ mutex_lock(&cbe_switch_mutex);
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ pr_debug("setting frequency for cpu %d to %d kHz, " \
+ "1/%d of max frequency\n",
+ policy->cpu,
+ cbe_freqs[cbe_pmode_new].frequency,
+ cbe_freqs[cbe_pmode_new].index);
+
+ rc = set_pmode(policy->cpu, cbe_pmode_new);
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ mutex_unlock(&cbe_switch_mutex);
+
+ return rc;
+}
+
+static struct cpufreq_driver cbe_cpufreq_driver = {
+ .verify = cbe_cpufreq_verify,
+ .target = cbe_cpufreq_target,
+ .init = cbe_cpufreq_cpu_init,
+ .exit = cbe_cpufreq_cpu_exit,
+ .name = "cbe-cpufreq",
+ .owner = THIS_MODULE,
+ .flags = CPUFREQ_CONST_LOOPS,
+};
+
+/*
+ * module init and destoy
+ */
+
+static int __init cbe_cpufreq_init(void)
+{
+ if (!machine_is(cell))
+ return -ENODEV;
+
+ return cpufreq_register_driver(&cbe_cpufreq_driver);
+}
+
+static void __exit cbe_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&cbe_cpufreq_driver);
+}
+
+module_init(cbe_cpufreq_init);
+module_exit(cbe_cpufreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Krafft ");
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.h b/arch/powerpc/platforms/cell/cbe_cpufreq.h
new file mode 100644
index 00000000..c1d86bfa
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq.h
@@ -0,0 +1,24 @@
+/*
+ * cbe_cpufreq.h
+ *
+ * This file contains the definitions used by the cbe_cpufreq driver.
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
+ *
+ * Author: Christian Krafft
+ *
+ */
+
+#include
+#include
+
+int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode);
+int cbe_cpufreq_get_pmode(int cpu);
+
+int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
+
+#if defined(CONFIG_CBE_CPUFREQ_PMI) || defined(CONFIG_CBE_CPUFREQ_PMI_MODULE)
+extern bool cbe_cpufreq_has_pmi;
+#else
+#define cbe_cpufreq_has_pmi (0)
+#endif
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
new file mode 100644
index 00000000..20472e48
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
@@ -0,0 +1,115 @@
+/*
+ * pervasive backend for the cbe_cpufreq driver
+ *
+ * This driver makes use of the pervasive unit to
+ * engage the desired frequency.
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
+ *
+ * Author: Christian Krafft
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "cbe_cpufreq.h"
+
+/* to write to MIC register */
+static u64 MIC_Slow_Fast_Timer_table[] = {
+ [0 ... 7] = 0x007fc00000000000ull,
+};
+
+/* more values for the MIC */
+static u64 MIC_Slow_Next_Timer_table[] = {
+ 0x0000240000000000ull,
+ 0x0000268000000000ull,
+ 0x000029C000000000ull,
+ 0x00002D0000000000ull,
+ 0x0000300000000000ull,
+ 0x0000334000000000ull,
+ 0x000039C000000000ull,
+ 0x00003FC000000000ull,
+};
+
+
+int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode)
+{
+ struct cbe_pmd_regs __iomem *pmd_regs;
+ struct cbe_mic_tm_regs __iomem *mic_tm_regs;
+ unsigned long flags;
+ u64 value;
+#ifdef DEBUG
+ long time;
+#endif
+
+ local_irq_save(flags);
+
+ mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu);
+ pmd_regs = cbe_get_cpu_pmd_regs(cpu);
+
+#ifdef DEBUG
+ time = jiffies;
+#endif
+
+ out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]);
+ out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]);
+
+ out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]);
+ out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]);
+
+ value = in_be64(&pmd_regs->pmcr);
+ /* set bits to zero */
+ value &= 0xFFFFFFFFFFFFFFF8ull;
+ /* set bits to next pmode */
+ value |= pmode;
+
+ out_be64(&pmd_regs->pmcr, value);
+
+#ifdef DEBUG
+ /* wait until new pmode appears in status register */
+ value = in_be64(&pmd_regs->pmsr) & 0x07;
+ while (value != pmode) {
+ cpu_relax();
+ value = in_be64(&pmd_regs->pmsr) & 0x07;
+ }
+
+ time = jiffies - time;
+ time = jiffies_to_msecs(time);
+ pr_debug("had to wait %lu ms for a transition using " \
+ "pervasive unit\n", time);
+#endif
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+
+int cbe_cpufreq_get_pmode(int cpu)
+{
+ int ret;
+ struct cbe_pmd_regs __iomem *pmd_regs;
+
+ pmd_regs = cbe_get_cpu_pmd_regs(cpu);
+ ret = in_be64(&pmd_regs->pmsr) & 0x07;
+
+ return ret;
+}
+
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
new file mode 100644
index 00000000..60a07a4f
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
@@ -0,0 +1,156 @@
+/*
+ * pmi backend for the cbe_cpufreq driver
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
+ *
+ * Author: Christian Krafft
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+
+#ifdef DEBUG
+#include
+#endif
+
+#include "cbe_cpufreq.h"
+
+static u8 pmi_slow_mode_limit[MAX_CBE];
+
+bool cbe_cpufreq_has_pmi = false;
+EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi);
+
+/*
+ * hardware specific functions
+ */
+
+int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode)
+{
+ int ret;
+ pmi_message_t pmi_msg;
+#ifdef DEBUG
+ long time;
+#endif
+ pmi_msg.type = PMI_TYPE_FREQ_CHANGE;
+ pmi_msg.data1 = cbe_cpu_to_node(cpu);
+ pmi_msg.data2 = pmode;
+
+#ifdef DEBUG
+ time = jiffies;
+#endif
+ pmi_send_message(pmi_msg);
+
+#ifdef DEBUG
+ time = jiffies - time;
+ time = jiffies_to_msecs(time);
+ pr_debug("had to wait %lu ms for a transition using " \
+ "PMI\n", time);
+#endif
+ ret = pmi_msg.data2;
+ pr_debug("PMI returned slow mode %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi);
+
+
+static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
+{
+ u8 node, slow_mode;
+
+ BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE);
+
+ node = pmi_msg.data1;
+ slow_mode = pmi_msg.data2;
+
+ pmi_slow_mode_limit[node] = slow_mode;
+
+ pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode);
+}
+
+static int pmi_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct cpufreq_policy *policy = data;
+ struct cpufreq_frequency_table *cbe_freqs;
+ u8 node;
+
+ /* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE
+ * and CPUFREQ_NOTIFY policy events?)
+ */
+ if (event == CPUFREQ_START)
+ return 0;
+
+ cbe_freqs = cpufreq_frequency_get_table(policy->cpu);
+ node = cbe_cpu_to_node(policy->cpu);
+
+ pr_debug("got notified, event=%lu, node=%u\n", event, node);
+
+ if (pmi_slow_mode_limit[node] != 0) {
+ pr_debug("limiting node %d to slow mode %d\n",
+ node, pmi_slow_mode_limit[node]);
+
+ cpufreq_verify_within_limits(policy, 0,
+
+ cbe_freqs[pmi_slow_mode_limit[node]].frequency);
+ }
+
+ return 0;
+}
+
+static struct notifier_block pmi_notifier_block = {
+ .notifier_call = pmi_notifier,
+};
+
+static struct pmi_handler cbe_pmi_handler = {
+ .type = PMI_TYPE_FREQ_CHANGE,
+ .handle_pmi_message = cbe_cpufreq_handle_pmi,
+};
+
+
+
+static int __init cbe_cpufreq_pmi_init(void)
+{
+ cbe_cpufreq_has_pmi = pmi_register_handler(&cbe_pmi_handler) == 0;
+
+ if (!cbe_cpufreq_has_pmi)
+ return -ENODEV;
+
+ cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
+
+ return 0;
+}
+
+static void __exit cbe_cpufreq_pmi_exit(void)
+{
+ cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
+ pmi_unregister_handler(&cbe_pmi_handler);
+}
+
+module_init(cbe_cpufreq_pmi_init);
+module_exit(cbe_cpufreq_pmi_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Krafft ");
diff --git a/arch/powerpc/platforms/cell/cbe_powerbutton.c b/arch/powerpc/platforms/cell/cbe_powerbutton.c
new file mode 100644
index 00000000..2bb80313
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cbe_powerbutton.c
@@ -0,0 +1,118 @@
+/*
+ * driver for powerbutton on IBM cell blades
+ *
+ * (C) Copyright IBM Corp. 2005-2008
+ *
+ * Author: Christian Krafft
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+static struct input_dev *button_dev;
+static struct platform_device *button_pdev;
+
+static void cbe_powerbutton_handle_pmi(pmi_message_t pmi_msg)
+{
+ BUG_ON(pmi_msg.type != PMI_TYPE_POWER_BUTTON);
+
+ input_report_key(button_dev, KEY_POWER, 1);
+ input_sync(button_dev);
+ input_report_key(button_dev, KEY_POWER, 0);
+ input_sync(button_dev);
+}
+
+static struct pmi_handler cbe_pmi_handler = {
+ .type = PMI_TYPE_POWER_BUTTON,
+ .handle_pmi_message = cbe_powerbutton_handle_pmi,
+};
+
+static int __init cbe_powerbutton_init(void)
+{
+ int ret = 0;
+ struct input_dev *dev;
+
+ if (!of_machine_is_compatible("IBM,CBPLUS-1.0")) {
+ printk(KERN_ERR "%s: Not a cell blade.\n", __func__);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ dev = input_allocate_device();
+ if (!dev) {
+ ret = -ENOMEM;
+ printk(KERN_ERR "%s: Not enough memory.\n", __func__);
+ goto out;
+ }
+
+ set_bit(EV_KEY, dev->evbit);
+ set_bit(KEY_POWER, dev->keybit);
+
+ dev->name = "Power Button";
+ dev->id.bustype = BUS_HOST;
+
+ /* this makes the button look like an acpi power button
+ * no clue whether anyone relies on that though */
+ dev->id.product = 0x02;
+ dev->phys = "LNXPWRBN/button/input0";
+
+ button_pdev = platform_device_register_simple("power_button", 0, NULL, 0);
+ if (IS_ERR(button_pdev)) {
+ ret = PTR_ERR(button_pdev);
+ goto out_free_input;
+ }
+
+ dev->dev.parent = &button_pdev->dev;
+ ret = input_register_device(dev);
+ if (ret) {
+ printk(KERN_ERR "%s: Failed to register device\n", __func__);
+ goto out_free_pdev;
+ }
+
+ button_dev = dev;
+
+ ret = pmi_register_handler(&cbe_pmi_handler);
+ if (ret) {
+ printk(KERN_ERR "%s: Failed to register with pmi.\n", __func__);
+ goto out_free_pdev;
+ }
+
+ goto out;
+
+out_free_pdev:
+ platform_device_unregister(button_pdev);
+out_free_input:
+ input_free_device(dev);
+out:
+ return ret;
+}
+
+static void __exit cbe_powerbutton_exit(void)
+{
+ pmi_unregister_handler(&cbe_pmi_handler);
+ platform_device_unregister(button_pdev);
+ input_free_device(button_dev);
+}
+
+module_init(cbe_powerbutton_init);
+module_exit(cbe_powerbutton_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Krafft ");
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
new file mode 100644
index 00000000..1428d583
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cbe_regs.c
@@ -0,0 +1,281 @@
+/*
+ * cbe_regs.c
+ *
+ * Accessor routines for the various MMIO register blocks of the CBE
+ *
+ * (c) 2006 Benjamin Herrenschmidt , IBM Corp.
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+
+/*
+ * Current implementation uses "cpu" nodes. We build our own mapping
+ * array of cpu numbers to cpu nodes locally for now to allow interrupt
+ * time code to have a fast path rather than call of_get_cpu_node(). If
+ * we implement cpu hotplug, we'll have to install an appropriate norifier
+ * in order to release references to the cpu going away
+ */
+static struct cbe_regs_map
+{
+ struct device_node *cpu_node;
+ struct device_node *be_node;
+ struct cbe_pmd_regs __iomem *pmd_regs;
+ struct cbe_iic_regs __iomem *iic_regs;
+ struct cbe_mic_tm_regs __iomem *mic_tm_regs;
+ struct cbe_pmd_shadow_regs pmd_shadow_regs;
+} cbe_regs_maps[MAX_CBE];
+static int cbe_regs_map_count;
+
+static struct cbe_thread_map
+{
+ struct device_node *cpu_node;
+ struct device_node *be_node;
+ struct cbe_regs_map *regs;
+ unsigned int thread_id;
+ unsigned int cbe_id;
+} cbe_thread_map[NR_CPUS];
+
+static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} };
+static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE };
+
+static struct cbe_regs_map *cbe_find_map(struct device_node *np)
+{
+ int i;
+ struct device_node *tmp_np;
+
+ if (strcasecmp(np->type, "spe")) {
+ for (i = 0; i < cbe_regs_map_count; i++)
+ if (cbe_regs_maps[i].cpu_node == np ||
+ cbe_regs_maps[i].be_node == np)
+ return &cbe_regs_maps[i];
+ return NULL;
+ }
+
+ if (np->data)
+ return np->data;
+
+ /* walk up path until cpu or be node was found */
+ tmp_np = np;
+ do {
+ tmp_np = tmp_np->parent;
+ /* on a correct devicetree we wont get up to root */
+ BUG_ON(!tmp_np);
+ } while (strcasecmp(tmp_np->type, "cpu") &&
+ strcasecmp(tmp_np->type, "be"));
+
+ np->data = cbe_find_map(tmp_np);
+
+ return np->data;
+}
+
+struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np)
+{
+ struct cbe_regs_map *map = cbe_find_map(np);
+ if (map == NULL)
+ return NULL;
+ return map->pmd_regs;
+}
+EXPORT_SYMBOL_GPL(cbe_get_pmd_regs);
+
+struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
+{
+ struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
+ if (map == NULL)
+ return NULL;
+ return map->pmd_regs;
+}
+EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs);
+
+struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np)
+{
+ struct cbe_regs_map *map = cbe_find_map(np);
+ if (map == NULL)
+ return NULL;
+ return &map->pmd_shadow_regs;
+}
+
+struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu)
+{
+ struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
+ if (map == NULL)
+ return NULL;
+ return &map->pmd_shadow_regs;
+}
+
+struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np)
+{
+ struct cbe_regs_map *map = cbe_find_map(np);
+ if (map == NULL)
+ return NULL;
+ return map->iic_regs;
+}
+
+struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
+{
+ struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
+ if (map == NULL)
+ return NULL;
+ return map->iic_regs;
+}
+
+struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np)
+{
+ struct cbe_regs_map *map = cbe_find_map(np);
+ if (map == NULL)
+ return NULL;
+ return map->mic_tm_regs;
+}
+
+struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu)
+{
+ struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
+ if (map == NULL)
+ return NULL;
+ return map->mic_tm_regs;
+}
+EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs);
+
+u32 cbe_get_hw_thread_id(int cpu)
+{
+ return cbe_thread_map[cpu].thread_id;
+}
+EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id);
+
+u32 cbe_cpu_to_node(int cpu)
+{
+ return cbe_thread_map[cpu].cbe_id;
+}
+EXPORT_SYMBOL_GPL(cbe_cpu_to_node);
+
+u32 cbe_node_to_cpu(int node)
+{
+ return cpumask_first(&cbe_local_mask[node]);
+
+}
+EXPORT_SYMBOL_GPL(cbe_node_to_cpu);
+
+static struct device_node *cbe_get_be_node(int cpu_id)
+{
+ struct device_node *np;
+
+ for_each_node_by_type (np, "be") {
+ int len,i;
+ const phandle *cpu_handle;
+
+ cpu_handle = of_get_property(np, "cpus", &len);
+
+ /*
+ * the CAB SLOF tree is non compliant, so we just assume
+ * there is only one node
+ */
+ if (WARN_ON_ONCE(!cpu_handle))
+ return np;
+
+ for (i=0; ibe_node) {
+ struct device_node *be, *np;
+
+ be = map->be_node;
+
+ for_each_node_by_type(np, "pervasive")
+ if (of_get_parent(np) == be)
+ map->pmd_regs = of_iomap(np, 0);
+
+ for_each_node_by_type(np, "CBEA-Internal-Interrupt-Controller")
+ if (of_get_parent(np) == be)
+ map->iic_regs = of_iomap(np, 2);
+
+ for_each_node_by_type(np, "mic-tm")
+ if (of_get_parent(np) == be)
+ map->mic_tm_regs = of_iomap(np, 0);
+ } else {
+ struct device_node *cpu;
+ /* That hack must die die die ! */
+ const struct address_prop {
+ unsigned long address;
+ unsigned int len;
+ } __attribute__((packed)) *prop;
+
+ cpu = map->cpu_node;
+
+ prop = of_get_property(cpu, "pervasive", NULL);
+ if (prop != NULL)
+ map->pmd_regs = ioremap(prop->address, prop->len);
+
+ prop = of_get_property(cpu, "iic", NULL);
+ if (prop != NULL)
+ map->iic_regs = ioremap(prop->address, prop->len);
+
+ prop = of_get_property(cpu, "mic-tm", NULL);
+ if (prop != NULL)
+ map->mic_tm_regs = ioremap(prop->address, prop->len);
+ }
+}
+
+
+void __init cbe_regs_init(void)
+{
+ int i;
+ unsigned int thread_id;
+ struct device_node *cpu;
+
+ /* Build local fast map of CPUs */
+ for_each_possible_cpu(i) {
+ cbe_thread_map[i].cpu_node = of_get_cpu_node(i, &thread_id);
+ cbe_thread_map[i].be_node = cbe_get_be_node(i);
+ cbe_thread_map[i].thread_id = thread_id;
+ }
+
+ /* Find maps for each device tree CPU */
+ for_each_node_by_type(cpu, "cpu") {
+ struct cbe_regs_map *map;
+ unsigned int cbe_id;
+
+ cbe_id = cbe_regs_map_count++;
+ map = &cbe_regs_maps[cbe_id];
+
+ if (cbe_regs_map_count > MAX_CBE) {
+ printk(KERN_ERR "cbe_regs: More BE chips than supported"
+ "!\n");
+ cbe_regs_map_count--;
+ of_node_put(cpu);
+ return;
+ }
+ map->cpu_node = cpu;
+
+ for_each_possible_cpu(i) {
+ struct cbe_thread_map *thread = &cbe_thread_map[i];
+
+ if (thread->cpu_node == cpu) {
+ thread->regs = map;
+ thread->cbe_id = cbe_id;
+ map->be_node = thread->be_node;
+ cpumask_set_cpu(i, &cbe_local_mask[cbe_id]);
+ if(thread->thread_id == 0)
+ cpumask_set_cpu(i, &cbe_first_online_cpu);
+ }
+ }
+
+ cbe_fill_regs_map(map);
+ }
+}
+
diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c
new file mode 100644
index 00000000..94560db7
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cbe_thermal.c
@@ -0,0 +1,399 @@
+/*
+ * thermal support for the cell processor
+ *
+ * This module adds some sysfs attributes to cpu and spu nodes.
+ * Base for measurements are the digital thermal sensors (DTS)
+ * located on the chip.
+ * The accuracy is 2 degrees, starting from 65 up to 125 degrees celsius
+ * The attributes can be found under
+ * /sys/devices/system/cpu/cpuX/thermal
+ * /sys/devices/system/spu/spuX/thermal
+ *
+ * The following attributes are added for each node:
+ * temperature:
+ * contains the current temperature measured by the DTS
+ * throttle_begin:
+ * throttling begins when temperature is greater or equal to
+ * throttle_begin. Setting this value to 125 prevents throttling.
+ * throttle_end:
+ * throttling is being ceased, if the temperature is lower than
+ * throttle_end. Due to a delay between applying throttling and
+ * a reduced temperature this value should be less than throttle_begin.
+ * A value equal to throttle_begin provides only a very little hysteresis.
+ * throttle_full_stop:
+ * If the temperatrue is greater or equal to throttle_full_stop,
+ * full throttling is applied to the cpu or spu. This value should be
+ * greater than throttle_begin and throttle_end. Setting this value to
+ * 65 prevents the unit from running code at all.
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Christian Krafft
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "spu_priv1_mmio.h"
+
+#define TEMP_MIN 65
+#define TEMP_MAX 125
+
+#define DEVICE_PREFIX_ATTR(_prefix,_name,_mode) \
+struct device_attribute attr_ ## _prefix ## _ ## _name = { \
+ .attr = { .name = __stringify(_name), .mode = _mode }, \
+ .show = _prefix ## _show_ ## _name, \
+ .store = _prefix ## _store_ ## _name, \
+};
+
+static inline u8 reg_to_temp(u8 reg_value)
+{
+ return ((reg_value & 0x3f) << 1) + TEMP_MIN;
+}
+
+static inline u8 temp_to_reg(u8 temp)
+{
+ return ((temp - TEMP_MIN) >> 1) & 0x3f;
+}
+
+static struct cbe_pmd_regs __iomem *get_pmd_regs(struct device *dev)
+{
+ struct spu *spu;
+
+ spu = container_of(dev, struct spu, dev);
+
+ return cbe_get_pmd_regs(spu_devnode(spu));
+}
+
+/* returns the value for a given spu in a given register */
+static u8 spu_read_register_value(struct device *dev, union spe_reg __iomem *reg)
+{
+ union spe_reg value;
+ struct spu *spu;
+
+ spu = container_of(dev, struct spu, dev);
+ value.val = in_be64(®->val);
+
+ return value.spe[spu->spe_id];
+}
+
+static ssize_t spu_show_temp(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ u8 value;
+ struct cbe_pmd_regs __iomem *pmd_regs;
+
+ pmd_regs = get_pmd_regs(dev);
+
+ value = spu_read_register_value(dev, &pmd_regs->ts_ctsr1);
+
+ return sprintf(buf, "%d\n", reg_to_temp(value));
+}
+
+static ssize_t show_throttle(struct cbe_pmd_regs __iomem *pmd_regs, char *buf, int pos)
+{
+ u64 value;
+
+ value = in_be64(&pmd_regs->tm_tpr.val);
+ /* access the corresponding byte */
+ value >>= pos;
+ value &= 0x3F;
+
+ return sprintf(buf, "%d\n", reg_to_temp(value));
+}
+
+static ssize_t store_throttle(struct cbe_pmd_regs __iomem *pmd_regs, const char *buf, size_t size, int pos)
+{
+ u64 reg_value;
+ int temp;
+ u64 new_value;
+ int ret;
+
+ ret = sscanf(buf, "%u", &temp);
+
+ if (ret != 1 || temp < TEMP_MIN || temp > TEMP_MAX)
+ return -EINVAL;
+
+ new_value = temp_to_reg(temp);
+
+ reg_value = in_be64(&pmd_regs->tm_tpr.val);
+
+ /* zero out bits for new value */
+ reg_value &= ~(0xffull << pos);
+ /* set bits to new value */
+ reg_value |= new_value << pos;
+
+ out_be64(&pmd_regs->tm_tpr.val, reg_value);
+ return size;
+}
+
+static ssize_t spu_show_throttle_end(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return show_throttle(get_pmd_regs(dev), buf, 0);
+}
+
+static ssize_t spu_show_throttle_begin(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return show_throttle(get_pmd_regs(dev), buf, 8);
+}
+
+static ssize_t spu_show_throttle_full_stop(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return show_throttle(get_pmd_regs(dev), buf, 16);
+}
+
+static ssize_t spu_store_throttle_end(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return store_throttle(get_pmd_regs(dev), buf, size, 0);
+}
+
+static ssize_t spu_store_throttle_begin(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return store_throttle(get_pmd_regs(dev), buf, size, 8);
+}
+
+static ssize_t spu_store_throttle_full_stop(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return store_throttle(get_pmd_regs(dev), buf, size, 16);
+}
+
+static ssize_t ppe_show_temp(struct device *dev, char *buf, int pos)
+{
+ struct cbe_pmd_regs __iomem *pmd_regs;
+ u64 value;
+
+ pmd_regs = cbe_get_cpu_pmd_regs(dev->id);
+ value = in_be64(&pmd_regs->ts_ctsr2);
+
+ value = (value >> pos) & 0x3f;
+
+ return sprintf(buf, "%d\n", reg_to_temp(value));
+}
+
+
+/* shows the temperature of the DTS on the PPE,
+ * located near the linear thermal sensor */
+static ssize_t ppe_show_temp0(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return ppe_show_temp(dev, buf, 32);
+}
+
+/* shows the temperature of the second DTS on the PPE */
+static ssize_t ppe_show_temp1(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return ppe_show_temp(dev, buf, 0);
+}
+
+static ssize_t ppe_show_throttle_end(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 32);
+}
+
+static ssize_t ppe_show_throttle_begin(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 40);
+}
+
+static ssize_t ppe_show_throttle_full_stop(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 48);
+}
+
+static ssize_t ppe_store_throttle_end(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 32);
+}
+
+static ssize_t ppe_store_throttle_begin(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 40);
+}
+
+static ssize_t ppe_store_throttle_full_stop(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 48);
+}
+
+
+static struct device_attribute attr_spu_temperature = {
+ .attr = {.name = "temperature", .mode = 0400 },
+ .show = spu_show_temp,
+};
+
+static DEVICE_PREFIX_ATTR(spu, throttle_end, 0600);
+static DEVICE_PREFIX_ATTR(spu, throttle_begin, 0600);
+static DEVICE_PREFIX_ATTR(spu, throttle_full_stop, 0600);
+
+
+static struct attribute *spu_attributes[] = {
+ &attr_spu_temperature.attr,
+ &attr_spu_throttle_end.attr,
+ &attr_spu_throttle_begin.attr,
+ &attr_spu_throttle_full_stop.attr,
+ NULL,
+};
+
+static struct attribute_group spu_attribute_group = {
+ .name = "thermal",
+ .attrs = spu_attributes,
+};
+
+static struct device_attribute attr_ppe_temperature0 = {
+ .attr = {.name = "temperature0", .mode = 0400 },
+ .show = ppe_show_temp0,
+};
+
+static struct device_attribute attr_ppe_temperature1 = {
+ .attr = {.name = "temperature1", .mode = 0400 },
+ .show = ppe_show_temp1,
+};
+
+static DEVICE_PREFIX_ATTR(ppe, throttle_end, 0600);
+static DEVICE_PREFIX_ATTR(ppe, throttle_begin, 0600);
+static DEVICE_PREFIX_ATTR(ppe, throttle_full_stop, 0600);
+
+static struct attribute *ppe_attributes[] = {
+ &attr_ppe_temperature0.attr,
+ &attr_ppe_temperature1.attr,
+ &attr_ppe_throttle_end.attr,
+ &attr_ppe_throttle_begin.attr,
+ &attr_ppe_throttle_full_stop.attr,
+ NULL,
+};
+
+static struct attribute_group ppe_attribute_group = {
+ .name = "thermal",
+ .attrs = ppe_attributes,
+};
+
+/*
+ * initialize throttling with default values
+ */
+static int __init init_default_values(void)
+{
+ int cpu;
+ struct cbe_pmd_regs __iomem *pmd_regs;
+ struct device *dev;
+ union ppe_spe_reg tpr;
+ union spe_reg str1;
+ u64 str2;
+ union spe_reg cr1;
+ u64 cr2;
+
+ /* TPR defaults */
+ /* ppe
+ * 1F - no full stop
+ * 08 - dynamic throttling starts if over 80 degrees
+ * 03 - dynamic throttling ceases if below 70 degrees */
+ tpr.ppe = 0x1F0803;
+ /* spe
+ * 10 - full stopped when over 96 degrees
+ * 08 - dynamic throttling starts if over 80 degrees
+ * 03 - dynamic throttling ceases if below 70 degrees
+ */
+ tpr.spe = 0x100803;
+
+ /* STR defaults */
+ /* str1
+ * 10 - stop 16 of 32 cycles
+ */
+ str1.val = 0x1010101010101010ull;
+ /* str2
+ * 10 - stop 16 of 32 cycles
+ */
+ str2 = 0x10;
+
+ /* CR defaults */
+ /* cr1
+ * 4 - normal operation
+ */
+ cr1.val = 0x0404040404040404ull;
+ /* cr2
+ * 4 - normal operation
+ */
+ cr2 = 0x04;
+
+ for_each_possible_cpu (cpu) {
+ pr_debug("processing cpu %d\n", cpu);
+ dev = get_cpu_device(cpu);
+
+ if (!dev) {
+ pr_info("invalid dev pointer for cbe_thermal\n");
+ return -EINVAL;
+ }
+
+ pmd_regs = cbe_get_cpu_pmd_regs(dev->id);
+
+ if (!pmd_regs) {
+ pr_info("invalid CBE regs pointer for cbe_thermal\n");
+ return -EINVAL;
+ }
+
+ out_be64(&pmd_regs->tm_str2, str2);
+ out_be64(&pmd_regs->tm_str1.val, str1.val);
+ out_be64(&pmd_regs->tm_tpr.val, tpr.val);
+ out_be64(&pmd_regs->tm_cr1.val, cr1.val);
+ out_be64(&pmd_regs->tm_cr2, cr2);
+ }
+
+ return 0;
+}
+
+
+static int __init thermal_init(void)
+{
+ int rc = init_default_values();
+
+ if (rc == 0) {
+ spu_add_dev_attr_group(&spu_attribute_group);
+ cpu_add_dev_attr_group(&ppe_attribute_group);
+ }
+
+ return rc;
+}
+module_init(thermal_init);
+
+static void __exit thermal_exit(void)
+{
+ spu_remove_dev_attr_group(&spu_attribute_group);
+ cpu_remove_dev_attr_group(&ppe_attribute_group);
+}
+module_exit(thermal_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Krafft ");
+
diff --git a/arch/powerpc/platforms/cell/celleb_pci.c b/arch/powerpc/platforms/cell/celleb_pci.c
new file mode 100644
index 00000000..5822141a
--- /dev/null
+++ b/arch/powerpc/platforms/cell/celleb_pci.c
@@ -0,0 +1,500 @@
+/*
+ * Support for PCI on Celleb platform.
+ *
+ * (C) Copyright 2006-2007 TOSHIBA CORPORATION
+ *
+ * This code is based on arch/powerpc/kernel/rtas_pci.c:
+ * Copyright (C) 2001 Dave Engebretsen, IBM Corporation
+ * Copyright (C) 2003 Anton Blanchard , IBM
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#undef DEBUG
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+
+#include "celleb_pci.h"
+
+#define MAX_PCI_DEVICES 32
+#define MAX_PCI_FUNCTIONS 8
+#define MAX_PCI_BASE_ADDRS 3 /* use 64 bit address */
+
+/* definition for fake pci configuration area for GbE, .... ,and etc. */
+
+struct celleb_pci_resource {
+ struct resource r[MAX_PCI_BASE_ADDRS];
+};
+
+struct celleb_pci_private {
+ unsigned char *fake_config[MAX_PCI_DEVICES][MAX_PCI_FUNCTIONS];
+ struct celleb_pci_resource *res[MAX_PCI_DEVICES][MAX_PCI_FUNCTIONS];
+};
+
+static inline u8 celleb_fake_config_readb(void *addr)
+{
+ u8 *p = addr;
+ return *p;
+}
+
+static inline u16 celleb_fake_config_readw(void *addr)
+{
+ __le16 *p = addr;
+ return le16_to_cpu(*p);
+}
+
+static inline u32 celleb_fake_config_readl(void *addr)
+{
+ __le32 *p = addr;
+ return le32_to_cpu(*p);
+}
+
+static inline void celleb_fake_config_writeb(u32 val, void *addr)
+{
+ u8 *p = addr;
+ *p = val;
+}
+
+static inline void celleb_fake_config_writew(u32 val, void *addr)
+{
+ __le16 val16;
+ __le16 *p = addr;
+ val16 = cpu_to_le16(val);
+ *p = val16;
+}
+
+static inline void celleb_fake_config_writel(u32 val, void *addr)
+{
+ __le32 val32;
+ __le32 *p = addr;
+ val32 = cpu_to_le32(val);
+ *p = val32;
+}
+
+static unsigned char *get_fake_config_start(struct pci_controller *hose,
+ int devno, int fn)
+{
+ struct celleb_pci_private *private = hose->private_data;
+
+ if (private == NULL)
+ return NULL;
+
+ return private->fake_config[devno][fn];
+}
+
+static struct celleb_pci_resource *get_resource_start(
+ struct pci_controller *hose,
+ int devno, int fn)
+{
+ struct celleb_pci_private *private = hose->private_data;
+
+ if (private == NULL)
+ return NULL;
+
+ return private->res[devno][fn];
+}
+
+
+static void celleb_config_read_fake(unsigned char *config, int where,
+ int size, u32 *val)
+{
+ char *p = config + where;
+
+ switch (size) {
+ case 1:
+ *val = celleb_fake_config_readb(p);
+ break;
+ case 2:
+ *val = celleb_fake_config_readw(p);
+ break;
+ case 4:
+ *val = celleb_fake_config_readl(p);
+ break;
+ }
+}
+
+static void celleb_config_write_fake(unsigned char *config, int where,
+ int size, u32 val)
+{
+ char *p = config + where;
+
+ switch (size) {
+ case 1:
+ celleb_fake_config_writeb(val, p);
+ break;
+ case 2:
+ celleb_fake_config_writew(val, p);
+ break;
+ case 4:
+ celleb_fake_config_writel(val, p);
+ break;
+ }
+}
+
+static int celleb_fake_pci_read_config(struct pci_bus *bus,
+ unsigned int devfn, int where, int size, u32 *val)
+{
+ char *config;
+ struct pci_controller *hose = pci_bus_to_host(bus);
+ unsigned int devno = devfn >> 3;
+ unsigned int fn = devfn & 0x7;
+
+ /* allignment check */
+ BUG_ON(where % size);
+
+ pr_debug(" fake read: bus=0x%x, ", bus->number);
+ config = get_fake_config_start(hose, devno, fn);
+
+ pr_debug("devno=0x%x, where=0x%x, size=0x%x, ", devno, where, size);
+ if (!config) {
+ pr_debug("failed\n");
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ celleb_config_read_fake(config, where, size, val);
+ pr_debug("val=0x%x\n", *val);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+
+static int celleb_fake_pci_write_config(struct pci_bus *bus,
+ unsigned int devfn, int where, int size, u32 val)
+{
+ char *config;
+ struct pci_controller *hose = pci_bus_to_host(bus);
+ struct celleb_pci_resource *res;
+ unsigned int devno = devfn >> 3;
+ unsigned int fn = devfn & 0x7;
+
+ /* allignment check */
+ BUG_ON(where % size);
+
+ config = get_fake_config_start(hose, devno, fn);
+
+ if (!config)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (val == ~0) {
+ int i = (where - PCI_BASE_ADDRESS_0) >> 3;
+
+ switch (where) {
+ case PCI_BASE_ADDRESS_0:
+ case PCI_BASE_ADDRESS_2:
+ if (size != 4)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ res = get_resource_start(hose, devno, fn);
+ if (!res)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ celleb_config_write_fake(config, where, size,
+ (res->r[i].end - res->r[i].start));
+ return PCIBIOS_SUCCESSFUL;
+ case PCI_BASE_ADDRESS_1:
+ case PCI_BASE_ADDRESS_3:
+ case PCI_BASE_ADDRESS_4:
+ case PCI_BASE_ADDRESS_5:
+ break;
+ default:
+ break;
+ }
+ }
+
+ celleb_config_write_fake(config, where, size, val);
+ pr_debug(" fake write: where=%x, size=%d, val=%x\n",
+ where, size, val);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops celleb_fake_pci_ops = {
+ .read = celleb_fake_pci_read_config,
+ .write = celleb_fake_pci_write_config,
+};
+
+static inline void celleb_setup_pci_base_addrs(struct pci_controller *hose,
+ unsigned int devno, unsigned int fn,
+ unsigned int num_base_addr)
+{
+ u32 val;
+ unsigned char *config;
+ struct celleb_pci_resource *res;
+
+ config = get_fake_config_start(hose, devno, fn);
+ res = get_resource_start(hose, devno, fn);
+
+ if (!config || !res)
+ return;
+
+ switch (num_base_addr) {
+ case 3:
+ val = (res->r[2].start & 0xfffffff0)
+ | PCI_BASE_ADDRESS_MEM_TYPE_64;
+ celleb_config_write_fake(config, PCI_BASE_ADDRESS_4, 4, val);
+ val = res->r[2].start >> 32;
+ celleb_config_write_fake(config, PCI_BASE_ADDRESS_5, 4, val);
+ /* FALLTHROUGH */
+ case 2:
+ val = (res->r[1].start & 0xfffffff0)
+ | PCI_BASE_ADDRESS_MEM_TYPE_64;
+ celleb_config_write_fake(config, PCI_BASE_ADDRESS_2, 4, val);
+ val = res->r[1].start >> 32;
+ celleb_config_write_fake(config, PCI_BASE_ADDRESS_3, 4, val);
+ /* FALLTHROUGH */
+ case 1:
+ val = (res->r[0].start & 0xfffffff0)
+ | PCI_BASE_ADDRESS_MEM_TYPE_64;
+ celleb_config_write_fake(config, PCI_BASE_ADDRESS_0, 4, val);
+ val = res->r[0].start >> 32;
+ celleb_config_write_fake(config, PCI_BASE_ADDRESS_1, 4, val);
+ break;
+ }
+
+ val = PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+ celleb_config_write_fake(config, PCI_COMMAND, 2, val);
+}
+
+static int __init celleb_setup_fake_pci_device(struct device_node *node,
+ struct pci_controller *hose)
+{
+ unsigned int rlen;
+ int num_base_addr = 0;
+ u32 val;
+ const u32 *wi0, *wi1, *wi2, *wi3, *wi4;
+ unsigned int devno, fn;
+ struct celleb_pci_private *private = hose->private_data;
+ unsigned char **config = NULL;
+ struct celleb_pci_resource **res = NULL;
+ const char *name;
+ const unsigned long *li;
+ int size, result;
+
+ if (private == NULL) {
+ printk(KERN_ERR "PCI: "
+ "memory space for pci controller is not assigned\n");
+ goto error;
+ }
+
+ name = of_get_property(node, "model", &rlen);
+ if (!name) {
+ printk(KERN_ERR "PCI: model property not found.\n");
+ goto error;
+ }
+
+ wi4 = of_get_property(node, "reg", &rlen);
+ if (wi4 == NULL)
+ goto error;
+
+ devno = ((wi4[0] >> 8) & 0xff) >> 3;
+ fn = (wi4[0] >> 8) & 0x7;
+
+ pr_debug("PCI: celleb_setup_fake_pci() %s devno=%x fn=%x\n", name,
+ devno, fn);
+
+ size = 256;
+ config = &private->fake_config[devno][fn];
+ *config = zalloc_maybe_bootmem(size, GFP_KERNEL);
+ if (*config == NULL) {
+ printk(KERN_ERR "PCI: "
+ "not enough memory for fake configuration space\n");
+ goto error;
+ }
+ pr_debug("PCI: fake config area assigned 0x%016lx\n",
+ (unsigned long)*config);
+
+ size = sizeof(struct celleb_pci_resource);
+ res = &private->res[devno][fn];
+ *res = zalloc_maybe_bootmem(size, GFP_KERNEL);
+ if (*res == NULL) {
+ printk(KERN_ERR
+ "PCI: not enough memory for resource data space\n");
+ goto error;
+ }
+ pr_debug("PCI: res assigned 0x%016lx\n", (unsigned long)*res);
+
+ wi0 = of_get_property(node, "device-id", NULL);
+ wi1 = of_get_property(node, "vendor-id", NULL);
+ wi2 = of_get_property(node, "class-code", NULL);
+ wi3 = of_get_property(node, "revision-id", NULL);
+ if (!wi0 || !wi1 || !wi2 || !wi3) {
+ printk(KERN_ERR "PCI: Missing device tree properties.\n");
+ goto error;
+ }
+
+ celleb_config_write_fake(*config, PCI_DEVICE_ID, 2, wi0[0] & 0xffff);
+ celleb_config_write_fake(*config, PCI_VENDOR_ID, 2, wi1[0] & 0xffff);
+ pr_debug("class-code = 0x%08x\n", wi2[0]);
+
+ celleb_config_write_fake(*config, PCI_CLASS_PROG, 1, wi2[0] & 0xff);
+ celleb_config_write_fake(*config, PCI_CLASS_DEVICE, 2,
+ (wi2[0] >> 8) & 0xffff);
+ celleb_config_write_fake(*config, PCI_REVISION_ID, 1, wi3[0]);
+
+ while (num_base_addr < MAX_PCI_BASE_ADDRS) {
+ result = of_address_to_resource(node,
+ num_base_addr, &(*res)->r[num_base_addr]);
+ if (result)
+ break;
+ num_base_addr++;
+ }
+
+ celleb_setup_pci_base_addrs(hose, devno, fn, num_base_addr);
+
+ li = of_get_property(node, "interrupts", &rlen);
+ if (!li) {
+ printk(KERN_ERR "PCI: interrupts not found.\n");
+ goto error;
+ }
+ val = li[0];
+ celleb_config_write_fake(*config, PCI_INTERRUPT_PIN, 1, 1);
+ celleb_config_write_fake(*config, PCI_INTERRUPT_LINE, 1, val);
+
+#ifdef DEBUG
+ pr_debug("PCI: %s irq=%ld\n", name, li[0]);
+ for (i = 0; i < 6; i++) {
+ celleb_config_read_fake(*config,
+ PCI_BASE_ADDRESS_0 + 0x4 * i, 4,
+ &val);
+ pr_debug("PCI: %s fn=%d base_address_%d=0x%x\n",
+ name, fn, i, val);
+ }
+#endif
+
+ celleb_config_write_fake(*config, PCI_HEADER_TYPE, 1,
+ PCI_HEADER_TYPE_NORMAL);
+
+ return 0;
+
+error:
+ if (mem_init_done) {
+ if (config && *config)
+ kfree(*config);
+ if (res && *res)
+ kfree(*res);
+
+ } else {
+ if (config && *config) {
+ size = 256;
+ free_bootmem((unsigned long)(*config), size);
+ }
+ if (res && *res) {
+ size = sizeof(struct celleb_pci_resource);
+ free_bootmem((unsigned long)(*res), size);
+ }
+ }
+
+ return 1;
+}
+
+static int __init phb_set_bus_ranges(struct device_node *dev,
+ struct pci_controller *phb)
+{
+ const int *bus_range;
+ unsigned int len;
+
+ bus_range = of_get_property(dev, "bus-range", &len);
+ if (bus_range == NULL || len < 2 * sizeof(int))
+ return 1;
+
+ phb->first_busno = bus_range[0];
+ phb->last_busno = bus_range[1];
+
+ return 0;
+}
+
+static void __init celleb_alloc_private_mem(struct pci_controller *hose)
+{
+ hose->private_data =
+ zalloc_maybe_bootmem(sizeof(struct celleb_pci_private),
+ GFP_KERNEL);
+}
+
+static int __init celleb_setup_fake_pci(struct device_node *dev,
+ struct pci_controller *phb)
+{
+ struct device_node *node;
+
+ phb->ops = &celleb_fake_pci_ops;
+ celleb_alloc_private_mem(phb);
+
+ for (node = of_get_next_child(dev, NULL);
+ node != NULL; node = of_get_next_child(dev, node))
+ celleb_setup_fake_pci_device(node, phb);
+
+ return 0;
+}
+
+static struct celleb_phb_spec celleb_fake_pci_spec __initdata = {
+ .setup = celleb_setup_fake_pci,
+};
+
+static struct of_device_id celleb_phb_match[] __initdata = {
+ {
+ .name = "pci-pseudo",
+ .data = &celleb_fake_pci_spec,
+ }, {
+ .name = "epci",
+ .data = &celleb_epci_spec,
+ }, {
+ .name = "pcie",
+ .data = &celleb_pciex_spec,
+ }, {
+ },
+};
+
+int __init celleb_setup_phb(struct pci_controller *phb)
+{
+ struct device_node *dev = phb->dn;
+ const struct of_device_id *match;
+ struct celleb_phb_spec *phb_spec;
+ int rc;
+
+ match = of_match_node(celleb_phb_match, dev);
+ if (!match)
+ return 1;
+
+ phb_set_bus_ranges(dev, phb);
+ phb->buid = 1;
+
+ phb_spec = match->data;
+ rc = (*phb_spec->setup)(dev, phb);
+ if (rc)
+ return 1;
+
+ if (phb_spec->ops)
+ iowa_register_bus(phb, phb_spec->ops,
+ phb_spec->iowa_init,
+ phb_spec->iowa_data);
+ return 0;
+}
+
+int celleb_pci_probe_mode(struct pci_bus *bus)
+{
+ return PCI_PROBE_DEVTREE;
+}
diff --git a/arch/powerpc/platforms/cell/celleb_pci.h b/arch/powerpc/platforms/cell/celleb_pci.h
new file mode 100644
index 00000000..a801fcc5
--- /dev/null
+++ b/arch/powerpc/platforms/cell/celleb_pci.h
@@ -0,0 +1,46 @@
+/*
+ * pci prototypes for Celleb platform
+ *
+ * (C) Copyright 2006-2007 TOSHIBA CORPORATION
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _CELLEB_PCI_H
+#define _CELLEB_PCI_H
+
+#include
+
+#include
+#include
+#include
+#include
+
+struct iowa_bus;
+
+struct celleb_phb_spec {
+ int (*setup)(struct device_node *, struct pci_controller *);
+ struct ppc_pci_io *ops;
+ int (*iowa_init)(struct iowa_bus *, void *);
+ void *iowa_data;
+};
+
+extern int celleb_setup_phb(struct pci_controller *);
+extern int celleb_pci_probe_mode(struct pci_bus *);
+
+extern struct celleb_phb_spec celleb_epci_spec;
+extern struct celleb_phb_spec celleb_pciex_spec;
+
+#endif /* _CELLEB_PCI_H */
diff --git a/arch/powerpc/platforms/cell/celleb_scc.h b/arch/powerpc/platforms/cell/celleb_scc.h
new file mode 100644
index 00000000..b596a711
--- /dev/null
+++ b/arch/powerpc/platforms/cell/celleb_scc.h
@@ -0,0 +1,232 @@
+/*
+ * SCC (Super Companion Chip) definitions
+ *
+ * (C) Copyright 2004-2006 TOSHIBA CORPORATION
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _CELLEB_SCC_H
+#define _CELLEB_SCC_H
+
+#define PCI_VENDOR_ID_TOSHIBA_2 0x102f
+#define PCI_DEVICE_ID_TOSHIBA_SCC_PCIEXC_BRIDGE 0x01b0
+#define PCI_DEVICE_ID_TOSHIBA_SCC_EPCI_BRIDGE 0x01b1
+#define PCI_DEVICE_ID_TOSHIBA_SCC_BRIDGE 0x01b2
+#define PCI_DEVICE_ID_TOSHIBA_SCC_GBE 0x01b3
+#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
+#define PCI_DEVICE_ID_TOSHIBA_SCC_USB2 0x01b5
+#define PCI_DEVICE_ID_TOSHIBA_SCC_USB 0x01b6
+#define PCI_DEVICE_ID_TOSHIBA_SCC_ENCDEC 0x01b7
+
+#define SCC_EPCI_REG 0x0000d000
+
+/* EPCI registers */
+#define SCC_EPCI_CNF10_REG 0x010
+#define SCC_EPCI_CNF14_REG 0x014
+#define SCC_EPCI_CNF18_REG 0x018
+#define SCC_EPCI_PVBAT 0x100
+#define SCC_EPCI_VPMBAT 0x104
+#define SCC_EPCI_VPIBAT 0x108
+#define SCC_EPCI_VCSR 0x110
+#define SCC_EPCI_VIENAB 0x114
+#define SCC_EPCI_VISTAT 0x118
+#define SCC_EPCI_VRDCOUNT 0x124
+#define SCC_EPCI_BAM0 0x12c
+#define SCC_EPCI_BAM1 0x134
+#define SCC_EPCI_BAM2 0x13c
+#define SCC_EPCI_IADR 0x164
+#define SCC_EPCI_CLKRST 0x800
+#define SCC_EPCI_INTSET 0x804
+#define SCC_EPCI_STATUS 0x808
+#define SCC_EPCI_ABTSET 0x80c
+#define SCC_EPCI_WATRP 0x810
+#define SCC_EPCI_DUMYRADR 0x814
+#define SCC_EPCI_SWRESP 0x818
+#define SCC_EPCI_CNTOPT 0x81c
+#define SCC_EPCI_ECMODE 0xf00
+#define SCC_EPCI_IOM_AC_NUM 5
+#define SCC_EPCI_IOM_ACTE(n) (0xf10 + (n) * 4)
+#define SCC_EPCI_IOT_AC_NUM 4
+#define SCC_EPCI_IOT_ACTE(n) (0xf30 + (n) * 4)
+#define SCC_EPCI_MAEA 0xf50
+#define SCC_EPCI_MAEC 0xf54
+#define SCC_EPCI_CKCTRL 0xff0
+
+/* bits for SCC_EPCI_VCSR */
+#define SCC_EPCI_VCSR_FRE 0x00020000
+#define SCC_EPCI_VCSR_FWE 0x00010000
+#define SCC_EPCI_VCSR_DR 0x00000400
+#define SCC_EPCI_VCSR_SR 0x00000008
+#define SCC_EPCI_VCSR_AT 0x00000004
+
+/* bits for SCC_EPCI_VIENAB/SCC_EPCI_VISTAT */
+#define SCC_EPCI_VISTAT_PMPE 0x00000008
+#define SCC_EPCI_VISTAT_PMFE 0x00000004
+#define SCC_EPCI_VISTAT_PRA 0x00000002
+#define SCC_EPCI_VISTAT_PRD 0x00000001
+#define SCC_EPCI_VISTAT_ALL 0x0000000f
+
+#define SCC_EPCI_VIENAB_PMPEE 0x00000008
+#define SCC_EPCI_VIENAB_PMFEE 0x00000004
+#define SCC_EPCI_VIENAB_PRA 0x00000002
+#define SCC_EPCI_VIENAB_PRD 0x00000001
+#define SCC_EPCI_VIENAB_ALL 0x0000000f
+
+/* bits for SCC_EPCI_CLKRST */
+#define SCC_EPCI_CLKRST_CKS_MASK 0x00030000
+#define SCC_EPCI_CLKRST_CKS_2 0x00000000
+#define SCC_EPCI_CLKRST_CKS_4 0x00010000
+#define SCC_EPCI_CLKRST_CKS_8 0x00020000
+#define SCC_EPCI_CLKRST_PCICRST 0x00000400
+#define SCC_EPCI_CLKRST_BC 0x00000200
+#define SCC_EPCI_CLKRST_PCIRST 0x00000100
+#define SCC_EPCI_CLKRST_PCKEN 0x00000001
+
+/* bits for SCC_EPCI_INTSET/SCC_EPCI_STATUS */
+#define SCC_EPCI_INT_2M 0x01000000
+#define SCC_EPCI_INT_RERR 0x00200000
+#define SCC_EPCI_INT_SERR 0x00100000
+#define SCC_EPCI_INT_PRTER 0x00080000
+#define SCC_EPCI_INT_SER 0x00040000
+#define SCC_EPCI_INT_PER 0x00020000
+#define SCC_EPCI_INT_PAI 0x00010000
+#define SCC_EPCI_INT_1M 0x00000100
+#define SCC_EPCI_INT_PME 0x00000010
+#define SCC_EPCI_INT_INTD 0x00000008
+#define SCC_EPCI_INT_INTC 0x00000004
+#define SCC_EPCI_INT_INTB 0x00000002
+#define SCC_EPCI_INT_INTA 0x00000001
+#define SCC_EPCI_INT_DEVINT 0x0000000f
+#define SCC_EPCI_INT_ALL 0x003f001f
+#define SCC_EPCI_INT_ALLERR 0x003f0000
+
+/* bits for SCC_EPCI_CKCTRL */
+#define SCC_EPCI_CKCTRL_CRST0 0x00010000
+#define SCC_EPCI_CKCTRL_CRST1 0x00020000
+#define SCC_EPCI_CKCTRL_OCLKEN 0x00000100
+#define SCC_EPCI_CKCTRL_LCLKEN 0x00000001
+
+#define SCC_EPCI_IDSEL_AD_TO_SLOT(ad) ((ad) - 10)
+#define SCC_EPCI_MAX_DEVNU SCC_EPCI_IDSEL_AD_TO_SLOT(32)
+
+/* bits for SCC_EPCI_CNTOPT */
+#define SCC_EPCI_CNTOPT_O2PMB 0x00000002
+
+/* SCC PCIEXC SMMIO registers */
+#define PEXCADRS 0x000
+#define PEXCWDATA 0x004
+#define PEXCRDATA 0x008
+#define PEXDADRS 0x010
+#define PEXDCMND 0x014
+#define PEXDWDATA 0x018
+#define PEXDRDATA 0x01c
+#define PEXREQID 0x020
+#define PEXTIDMAP 0x024
+#define PEXINTMASK 0x028
+#define PEXINTSTS 0x02c
+#define PEXAERRMASK 0x030
+#define PEXAERRSTS 0x034
+#define PEXPRERRMASK 0x040
+#define PEXPRERRSTS 0x044
+#define PEXPRERRID01 0x048
+#define PEXPRERRID23 0x04c
+#define PEXVDMASK 0x050
+#define PEXVDSTS 0x054
+#define PEXRCVCPLIDA 0x060
+#define PEXLENERRIDA 0x068
+#define PEXPHYPLLST 0x070
+#define PEXDMRDEN0 0x100
+#define PEXDMRDADR0 0x104
+#define PEXDMRDENX 0x110
+#define PEXDMRDADRX 0x114
+#define PEXECMODE 0xf00
+#define PEXMAEA(n) (0xf50 + (8 * n))
+#define PEXMAEC(n) (0xf54 + (8 * n))
+#define PEXCCRCTRL 0xff0
+
+/* SCC PCIEXC bits and shifts for PEXCADRS */
+#define PEXCADRS_BYTE_EN_SHIFT 20
+#define PEXCADRS_CMD_SHIFT 16
+#define PEXCADRS_CMD_READ (0xa << PEXCADRS_CMD_SHIFT)
+#define PEXCADRS_CMD_WRITE (0xb << PEXCADRS_CMD_SHIFT)
+
+/* SCC PCIEXC shifts for PEXDADRS */
+#define PEXDADRS_BUSNO_SHIFT 20
+#define PEXDADRS_DEVNO_SHIFT 15
+#define PEXDADRS_FUNCNO_SHIFT 12
+
+/* SCC PCIEXC bits and shifts for PEXDCMND */
+#define PEXDCMND_BYTE_EN_SHIFT 4
+#define PEXDCMND_IO_READ 0x2
+#define PEXDCMND_IO_WRITE 0x3
+#define PEXDCMND_CONFIG_READ 0xa
+#define PEXDCMND_CONFIG_WRITE 0xb
+
+/* SCC PCIEXC bits for PEXPHYPLLST */
+#define PEXPHYPLLST_PEXPHYAPLLST 0x00000001
+
+/* SCC PCIEXC bits for PEXECMODE */
+#define PEXECMODE_ALL_THROUGH 0x00000000
+#define PEXECMODE_ALL_8BIT 0x00550155
+#define PEXECMODE_ALL_16BIT 0x00aa02aa
+
+/* SCC PCIEXC bits for PEXCCRCTRL */
+#define PEXCCRCTRL_PEXIPCOREEN 0x00040000
+#define PEXCCRCTRL_PEXIPCONTEN 0x00020000
+#define PEXCCRCTRL_PEXPHYPLLEN 0x00010000
+#define PEXCCRCTRL_PCIEXCAOCKEN 0x00000100
+
+/* SCC PCIEXC port configuration registers */
+#define PEXTCERRCHK 0x21c
+#define PEXTAMAPB0 0x220
+#define PEXTAMAPL0 0x224
+#define PEXTAMAPB(n) (PEXTAMAPB0 + 8 * (n))
+#define PEXTAMAPL(n) (PEXTAMAPL0 + 8 * (n))
+#define PEXCHVC0P 0x500
+#define PEXCHVC0NP 0x504
+#define PEXCHVC0C 0x508
+#define PEXCDVC0P 0x50c
+#define PEXCDVC0NP 0x510
+#define PEXCDVC0C 0x514
+#define PEXCHVCXP 0x518
+#define PEXCHVCXNP 0x51c
+#define PEXCHVCXC 0x520
+#define PEXCDVCXP 0x524
+#define PEXCDVCXNP 0x528
+#define PEXCDVCXC 0x52c
+#define PEXCTTRG 0x530
+#define PEXTSCTRL 0x700
+#define PEXTSSTS 0x704
+#define PEXSKPCTRL 0x708
+
+/* UHC registers */
+#define SCC_UHC_CKRCTRL 0xff0
+#define SCC_UHC_ECMODE 0xf00
+
+/* bits for SCC_UHC_CKRCTRL */
+#define SCC_UHC_F48MCKLEN 0x00000001
+#define SCC_UHC_P_SUSPEND 0x00000002
+#define SCC_UHC_PHY_SUSPEND_SEL 0x00000004
+#define SCC_UHC_HCLKEN 0x00000100
+#define SCC_UHC_USBEN 0x00010000
+#define SCC_UHC_USBCEN 0x00020000
+#define SCC_UHC_PHYEN 0x00040000
+
+/* bits for SCC_UHC_ECMODE */
+#define SCC_UHC_ECMODE_BY_BYTE 0x00000555
+#define SCC_UHC_ECMODE_BY_WORD 0x00000aaa
+
+#endif /* _CELLEB_SCC_H */
diff --git a/arch/powerpc/platforms/cell/celleb_scc_epci.c b/arch/powerpc/platforms/cell/celleb_scc_epci.c
new file mode 100644
index 00000000..844c0fac
--- /dev/null
+++ b/arch/powerpc/platforms/cell/celleb_scc_epci.c
@@ -0,0 +1,429 @@
+/*
+ * Support for SCC external PCI
+ *
+ * (C) Copyright 2004-2007 TOSHIBA CORPORATION
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#undef DEBUG
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+
+#include "celleb_scc.h"
+#include "celleb_pci.h"
+
+#define MAX_PCI_DEVICES 32
+#define MAX_PCI_FUNCTIONS 8
+
+#define iob() __asm__ __volatile__("eieio; sync":::"memory")
+
+static inline PCI_IO_ADDR celleb_epci_get_epci_base(
+ struct pci_controller *hose)
+{
+ /*
+ * Note:
+ * Celleb epci uses cfg_addr as a base address for
+ * epci control registers.
+ */
+
+ return hose->cfg_addr;
+}
+
+static inline PCI_IO_ADDR celleb_epci_get_epci_cfg(
+ struct pci_controller *hose)
+{
+ /*
+ * Note:
+ * Celleb epci uses cfg_data as a base address for
+ * configuration area for epci devices.
+ */
+
+ return hose->cfg_data;
+}
+
+static inline void clear_and_disable_master_abort_interrupt(
+ struct pci_controller *hose)
+{
+ PCI_IO_ADDR epci_base;
+ PCI_IO_ADDR reg;
+ epci_base = celleb_epci_get_epci_base(hose);
+ reg = epci_base + PCI_COMMAND;
+ out_be32(reg, in_be32(reg) | (PCI_STATUS_REC_MASTER_ABORT << 16));
+}
+
+static int celleb_epci_check_abort(struct pci_controller *hose,
+ PCI_IO_ADDR addr)
+{
+ PCI_IO_ADDR reg;
+ PCI_IO_ADDR epci_base;
+ u32 val;
+
+ iob();
+ epci_base = celleb_epci_get_epci_base(hose);
+
+ reg = epci_base + PCI_COMMAND;
+ val = in_be32(reg);
+
+ if (val & (PCI_STATUS_REC_MASTER_ABORT << 16)) {
+ out_be32(reg,
+ (val & 0xffff) | (PCI_STATUS_REC_MASTER_ABORT << 16));
+
+ /* clear PCI Controller error, FRE, PMFE */
+ reg = epci_base + SCC_EPCI_STATUS;
+ out_be32(reg, SCC_EPCI_INT_PAI);
+
+ reg = epci_base + SCC_EPCI_VCSR;
+ val = in_be32(reg) & 0xffff;
+ val |= SCC_EPCI_VCSR_FRE;
+ out_be32(reg, val);
+
+ reg = epci_base + SCC_EPCI_VISTAT;
+ out_be32(reg, SCC_EPCI_VISTAT_PMFE);
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static PCI_IO_ADDR celleb_epci_make_config_addr(struct pci_bus *bus,
+ struct pci_controller *hose, unsigned int devfn, int where)
+{
+ PCI_IO_ADDR addr;
+
+ if (bus != hose->bus)
+ addr = celleb_epci_get_epci_cfg(hose) +
+ (((bus->number & 0xff) << 16)
+ | ((devfn & 0xff) << 8)
+ | (where & 0xff)
+ | 0x01000000);
+ else
+ addr = celleb_epci_get_epci_cfg(hose) +
+ (((devfn & 0xff) << 8) | (where & 0xff));
+
+ pr_debug("EPCI: config_addr = 0x%p\n", addr);
+
+ return addr;
+}
+
+static int celleb_epci_read_config(struct pci_bus *bus,
+ unsigned int devfn, int where, int size, u32 *val)
+{
+ PCI_IO_ADDR epci_base;
+ PCI_IO_ADDR addr;
+ struct pci_controller *hose = pci_bus_to_host(bus);
+
+ /* allignment check */
+ BUG_ON(where % size);
+
+ if (!celleb_epci_get_epci_cfg(hose))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (bus->number == hose->first_busno && devfn == 0) {
+ /* EPCI controller self */
+
+ epci_base = celleb_epci_get_epci_base(hose);
+ addr = epci_base + where;
+
+ switch (size) {
+ case 1:
+ *val = in_8(addr);
+ break;
+ case 2:
+ *val = in_be16(addr);
+ break;
+ case 4:
+ *val = in_be32(addr);
+ break;
+ default:
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ } else {
+
+ clear_and_disable_master_abort_interrupt(hose);
+ addr = celleb_epci_make_config_addr(bus, hose, devfn, where);
+
+ switch (size) {
+ case 1:
+ *val = in_8(addr);
+ break;
+ case 2:
+ *val = in_le16(addr);
+ break;
+ case 4:
+ *val = in_le32(addr);
+ break;
+ default:
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ }
+
+ pr_debug("EPCI: "
+ "addr=0x%p, devfn=0x%x, where=0x%x, size=0x%x, val=0x%x\n",
+ addr, devfn, where, size, *val);
+
+ return celleb_epci_check_abort(hose, NULL);
+}
+
+static int celleb_epci_write_config(struct pci_bus *bus,
+ unsigned int devfn, int where, int size, u32 val)
+{
+ PCI_IO_ADDR epci_base;
+ PCI_IO_ADDR addr;
+ struct pci_controller *hose = pci_bus_to_host(bus);
+
+ /* allignment check */
+ BUG_ON(where % size);
+
+ if (!celleb_epci_get_epci_cfg(hose))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (bus->number == hose->first_busno && devfn == 0) {
+ /* EPCI controller self */
+
+ epci_base = celleb_epci_get_epci_base(hose);
+ addr = epci_base + where;
+
+ switch (size) {
+ case 1:
+ out_8(addr, val);
+ break;
+ case 2:
+ out_be16(addr, val);
+ break;
+ case 4:
+ out_be32(addr, val);
+ break;
+ default:
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ } else {
+
+ clear_and_disable_master_abort_interrupt(hose);
+ addr = celleb_epci_make_config_addr(bus, hose, devfn, where);
+
+ switch (size) {
+ case 1:
+ out_8(addr, val);
+ break;
+ case 2:
+ out_le16(addr, val);
+ break;
+ case 4:
+ out_le32(addr, val);
+ break;
+ default:
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ }
+
+ return celleb_epci_check_abort(hose, addr);
+}
+
+struct pci_ops celleb_epci_ops = {
+ .read = celleb_epci_read_config,
+ .write = celleb_epci_write_config,
+};
+
+/* to be moved in FW */
+static int __init celleb_epci_init(struct pci_controller *hose)
+{
+ u32 val;
+ PCI_IO_ADDR reg;
+ PCI_IO_ADDR epci_base;
+ int hwres = 0;
+
+ epci_base = celleb_epci_get_epci_base(hose);
+
+ /* PCI core reset(Internal bus and PCI clock) */
+ reg = epci_base + SCC_EPCI_CKCTRL;
+ val = in_be32(reg);
+ if (val == 0x00030101)
+ hwres = 1;
+ else {
+ val &= ~(SCC_EPCI_CKCTRL_CRST0 | SCC_EPCI_CKCTRL_CRST1);
+ out_be32(reg, val);
+
+ /* set PCI core clock */
+ val = in_be32(reg);
+ val |= (SCC_EPCI_CKCTRL_OCLKEN | SCC_EPCI_CKCTRL_LCLKEN);
+ out_be32(reg, val);
+
+ /* release PCI core reset (internal bus) */
+ val = in_be32(reg);
+ val |= SCC_EPCI_CKCTRL_CRST0;
+ out_be32(reg, val);
+
+ /* set PCI clock select */
+ reg = epci_base + SCC_EPCI_CLKRST;
+ val = in_be32(reg);
+ val &= ~SCC_EPCI_CLKRST_CKS_MASK;
+ val |= SCC_EPCI_CLKRST_CKS_2;
+ out_be32(reg, val);
+
+ /* set arbiter */
+ reg = epci_base + SCC_EPCI_ABTSET;
+ out_be32(reg, 0x0f1f001f); /* temporary value */
+
+ /* buffer on */
+ reg = epci_base + SCC_EPCI_CLKRST;
+ val = in_be32(reg);
+ val |= SCC_EPCI_CLKRST_BC;
+ out_be32(reg, val);
+
+ /* PCI clock enable */
+ val = in_be32(reg);
+ val |= SCC_EPCI_CLKRST_PCKEN;
+ out_be32(reg, val);
+
+ /* release PCI core reset (all) */
+ reg = epci_base + SCC_EPCI_CKCTRL;
+ val = in_be32(reg);
+ val |= (SCC_EPCI_CKCTRL_CRST0 | SCC_EPCI_CKCTRL_CRST1);
+ out_be32(reg, val);
+
+ /* set base translation registers. (already set by Beat) */
+
+ /* set base address masks. (already set by Beat) */
+ }
+
+ /* release interrupt masks and clear all interrupts */
+ reg = epci_base + SCC_EPCI_INTSET;
+ out_be32(reg, 0x013f011f); /* all interrupts enable */
+ reg = epci_base + SCC_EPCI_VIENAB;
+ val = SCC_EPCI_VIENAB_PMPEE | SCC_EPCI_VIENAB_PMFEE;
+ out_be32(reg, val);
+ reg = epci_base + SCC_EPCI_STATUS;
+ out_be32(reg, 0xffffffff);
+ reg = epci_base + SCC_EPCI_VISTAT;
+ out_be32(reg, 0xffffffff);
+
+ /* disable PCI->IB address translation */
+ reg = epci_base + SCC_EPCI_VCSR;
+ val = in_be32(reg);
+ val &= ~(SCC_EPCI_VCSR_DR | SCC_EPCI_VCSR_AT);
+ out_be32(reg, val);
+
+ /* set base addresses. (no need to set?) */
+
+ /* memory space, bus master enable */
+ reg = epci_base + PCI_COMMAND;
+ val = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+ out_be32(reg, val);
+
+ /* endian mode setup */
+ reg = epci_base + SCC_EPCI_ECMODE;
+ val = 0x00550155;
+ out_be32(reg, val);
+
+ /* set control option */
+ reg = epci_base + SCC_EPCI_CNTOPT;
+ val = in_be32(reg);
+ val |= SCC_EPCI_CNTOPT_O2PMB;
+ out_be32(reg, val);
+
+ /* XXX: temporay: set registers for address conversion setup */
+ reg = epci_base + SCC_EPCI_CNF10_REG;
+ out_be32(reg, 0x80000008);
+ reg = epci_base + SCC_EPCI_CNF14_REG;
+ out_be32(reg, 0x40000008);
+
+ reg = epci_base + SCC_EPCI_BAM0;
+ out_be32(reg, 0x80000000);
+ reg = epci_base + SCC_EPCI_BAM1;
+ out_be32(reg, 0xe0000000);
+
+ reg = epci_base + SCC_EPCI_PVBAT;
+ out_be32(reg, 0x80000000);
+
+ if (!hwres) {
+ /* release external PCI reset */
+ reg = epci_base + SCC_EPCI_CLKRST;
+ val = in_be32(reg);
+ val |= SCC_EPCI_CLKRST_PCIRST;
+ out_be32(reg, val);
+ }
+
+ return 0;
+}
+
+static int __init celleb_setup_epci(struct device_node *node,
+ struct pci_controller *hose)
+{
+ struct resource r;
+
+ pr_debug("PCI: celleb_setup_epci()\n");
+
+ /*
+ * Note:
+ * Celleb epci uses cfg_addr and cfg_data member of
+ * pci_controller structure in irregular way.
+ *
+ * cfg_addr is used to map for control registers of
+ * celleb epci.
+ *
+ * cfg_data is used for configuration area of devices
+ * on Celleb epci buses.
+ */
+
+ if (of_address_to_resource(node, 0, &r))
+ goto error;
+ hose->cfg_addr = ioremap(r.start, resource_size(&r));
+ if (!hose->cfg_addr)
+ goto error;
+ pr_debug("EPCI: cfg_addr map 0x%016llx->0x%016lx + 0x%016llx\n",
+ r.start, (unsigned long)hose->cfg_addr, resource_size(&r));
+
+ if (of_address_to_resource(node, 2, &r))
+ goto error;
+ hose->cfg_data = ioremap(r.start, resource_size(&r));
+ if (!hose->cfg_data)
+ goto error;
+ pr_debug("EPCI: cfg_data map 0x%016llx->0x%016lx + 0x%016llx\n",
+ r.start, (unsigned long)hose->cfg_data, resource_size(&r));
+
+ hose->ops = &celleb_epci_ops;
+ celleb_epci_init(hose);
+
+ return 0;
+
+error:
+ if (hose->cfg_addr)
+ iounmap(hose->cfg_addr);
+
+ if (hose->cfg_data)
+ iounmap(hose->cfg_data);
+ return 1;
+}
+
+struct celleb_phb_spec celleb_epci_spec __initdata = {
+ .setup = celleb_setup_epci,
+ .ops = &spiderpci_ops,
+ .iowa_init = &spiderpci_iowa_init,
+ .iowa_data = (void *)0,
+};
diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
new file mode 100644
index 00000000..14be2bd3
--- /dev/null
+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
@@ -0,0 +1,541 @@
+/*
+ * Support for Celleb PCI-Express.
+ *
+ * (C) Copyright 2007-2008 TOSHIBA CORPORATION
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#undef DEBUG
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+
+#include "celleb_scc.h"
+#include "celleb_pci.h"
+
+#define PEX_IN(base, off) in_be32((void __iomem *)(base) + (off))
+#define PEX_OUT(base, off, data) out_be32((void __iomem *)(base) + (off), (data))
+
+static void scc_pciex_io_flush(struct iowa_bus *bus)
+{
+ (void)PEX_IN(bus->phb->cfg_addr, PEXDMRDEN0);
+}
+
+/*
+ * Memory space access to device on PCIEX
+ */
+#define PCIEX_MMIO_READ(name, ret) \
+static ret scc_pciex_##name(const PCI_IO_ADDR addr) \
+{ \
+ ret val = __do_##name(addr); \
+ scc_pciex_io_flush(iowa_mem_find_bus(addr)); \
+ return val; \
+}
+
+#define PCIEX_MMIO_READ_STR(name) \
+static void scc_pciex_##name(const PCI_IO_ADDR addr, void *buf, \
+ unsigned long count) \
+{ \
+ __do_##name(addr, buf, count); \
+ scc_pciex_io_flush(iowa_mem_find_bus(addr)); \
+}
+
+PCIEX_MMIO_READ(readb, u8)
+PCIEX_MMIO_READ(readw, u16)
+PCIEX_MMIO_READ(readl, u32)
+PCIEX_MMIO_READ(readq, u64)
+PCIEX_MMIO_READ(readw_be, u16)
+PCIEX_MMIO_READ(readl_be, u32)
+PCIEX_MMIO_READ(readq_be, u64)
+PCIEX_MMIO_READ_STR(readsb)
+PCIEX_MMIO_READ_STR(readsw)
+PCIEX_MMIO_READ_STR(readsl)
+
+static void scc_pciex_memcpy_fromio(void *dest, const PCI_IO_ADDR src,
+ unsigned long n)
+{
+ __do_memcpy_fromio(dest, src, n);
+ scc_pciex_io_flush(iowa_mem_find_bus(src));
+}
+
+/*
+ * I/O port access to devices on PCIEX.
+ */
+
+static inline unsigned long get_bus_address(struct pci_controller *phb,
+ unsigned long port)
+{
+ return port - ((unsigned long)(phb->io_base_virt) - _IO_BASE);
+}
+
+static u32 scc_pciex_read_port(struct pci_controller *phb,
+ unsigned long port, int size)
+{
+ unsigned int byte_enable;
+ unsigned int cmd, shift;
+ unsigned long addr;
+ u32 data, ret;
+
+ BUG_ON(((port & 0x3ul) + size) > 4);
+
+ addr = get_bus_address(phb, port);
+ shift = addr & 0x3ul;
+ byte_enable = ((1 << size) - 1) << shift;
+ cmd = PEXDCMND_IO_READ | (byte_enable << PEXDCMND_BYTE_EN_SHIFT);
+ PEX_OUT(phb->cfg_addr, PEXDADRS, (addr & ~0x3ul));
+ PEX_OUT(phb->cfg_addr, PEXDCMND, cmd);
+ data = PEX_IN(phb->cfg_addr, PEXDRDATA);
+ ret = (data >> (shift * 8)) & (0xFFFFFFFF >> ((4 - size) * 8));
+
+ pr_debug("PCIEX:PIO READ:port=0x%lx, addr=0x%lx, size=%d, be=%x,"
+ " cmd=%x, data=%x, ret=%x\n", port, addr, size, byte_enable,
+ cmd, data, ret);
+
+ return ret;
+}
+
+static void scc_pciex_write_port(struct pci_controller *phb,
+ unsigned long port, int size, u32 val)
+{
+ unsigned int byte_enable;
+ unsigned int cmd, shift;
+ unsigned long addr;
+ u32 data;
+
+ BUG_ON(((port & 0x3ul) + size) > 4);
+
+ addr = get_bus_address(phb, port);
+ shift = addr & 0x3ul;
+ byte_enable = ((1 << size) - 1) << shift;
+ cmd = PEXDCMND_IO_WRITE | (byte_enable << PEXDCMND_BYTE_EN_SHIFT);
+ data = (val & (0xFFFFFFFF >> (4 - size) * 8)) << (shift * 8);
+ PEX_OUT(phb->cfg_addr, PEXDADRS, (addr & ~0x3ul));
+ PEX_OUT(phb->cfg_addr, PEXDCMND, cmd);
+ PEX_OUT(phb->cfg_addr, PEXDWDATA, data);
+
+ pr_debug("PCIEX:PIO WRITE:port=0x%lx, addr=%lx, size=%d, val=%x,"
+ " be=%x, cmd=%x, data=%x\n", port, addr, size, val,
+ byte_enable, cmd, data);
+}
+
+static u8 __scc_pciex_inb(struct pci_controller *phb, unsigned long port)
+{
+ return (u8)scc_pciex_read_port(phb, port, 1);
+}
+
+static u16 __scc_pciex_inw(struct pci_controller *phb, unsigned long port)
+{
+ u32 data;
+ if ((port & 0x3ul) < 3)
+ data = scc_pciex_read_port(phb, port, 2);
+ else {
+ u32 d1 = scc_pciex_read_port(phb, port, 1);
+ u32 d2 = scc_pciex_read_port(phb, port + 1, 1);
+ data = d1 | (d2 << 8);
+ }
+ return (u16)data;
+}
+
+static u32 __scc_pciex_inl(struct pci_controller *phb, unsigned long port)
+{
+ unsigned int mod = port & 0x3ul;
+ u32 data;
+ if (mod == 0)
+ data = scc_pciex_read_port(phb, port, 4);
+ else {
+ u32 d1 = scc_pciex_read_port(phb, port, 4 - mod);
+ u32 d2 = scc_pciex_read_port(phb, port + 1, mod);
+ data = d1 | (d2 << (mod * 8));
+ }
+ return data;
+}
+
+static void __scc_pciex_outb(struct pci_controller *phb,
+ u8 val, unsigned long port)
+{
+ scc_pciex_write_port(phb, port, 1, (u32)val);
+}
+
+static void __scc_pciex_outw(struct pci_controller *phb,
+ u16 val, unsigned long port)
+{
+ if ((port & 0x3ul) < 3)
+ scc_pciex_write_port(phb, port, 2, (u32)val);
+ else {
+ u32 d1 = val & 0x000000FF;
+ u32 d2 = (val & 0x0000FF00) >> 8;
+ scc_pciex_write_port(phb, port, 1, d1);
+ scc_pciex_write_port(phb, port + 1, 1, d2);
+ }
+}
+
+static void __scc_pciex_outl(struct pci_controller *phb,
+ u32 val, unsigned long port)
+{
+ unsigned int mod = port & 0x3ul;
+ if (mod == 0)
+ scc_pciex_write_port(phb, port, 4, val);
+ else {
+ u32 d1 = val & (0xFFFFFFFFul >> (mod * 8));
+ u32 d2 = val >> ((4 - mod) * 8);
+ scc_pciex_write_port(phb, port, 4 - mod, d1);
+ scc_pciex_write_port(phb, port + 1, mod, d2);
+ }
+}
+
+#define PCIEX_PIO_FUNC(size, name) \
+static u##size scc_pciex_in##name(unsigned long port) \
+{ \
+ struct iowa_bus *bus = iowa_pio_find_bus(port); \
+ u##size data = __scc_pciex_in##name(bus->phb, port); \
+ scc_pciex_io_flush(bus); \
+ return data; \
+} \
+static void scc_pciex_ins##name(unsigned long p, void *b, unsigned long c) \
+{ \
+ struct iowa_bus *bus = iowa_pio_find_bus(p); \
+ __le##size *dst = b; \
+ for (; c != 0; c--, dst++) \
+ *dst = cpu_to_le##size(__scc_pciex_in##name(bus->phb, p)); \
+ scc_pciex_io_flush(bus); \
+} \
+static void scc_pciex_out##name(u##size val, unsigned long port) \
+{ \
+ struct iowa_bus *bus = iowa_pio_find_bus(port); \
+ __scc_pciex_out##name(bus->phb, val, port); \
+} \
+static void scc_pciex_outs##name(unsigned long p, const void *b, \
+ unsigned long c) \
+{ \
+ struct iowa_bus *bus = iowa_pio_find_bus(p); \
+ const __le##size *src = b; \
+ for (; c != 0; c--, src++) \
+ __scc_pciex_out##name(bus->phb, le##size##_to_cpu(*src), p); \
+}
+#define __le8 u8
+#define cpu_to_le8(x) (x)
+#define le8_to_cpu(x) (x)
+PCIEX_PIO_FUNC(8, b)
+PCIEX_PIO_FUNC(16, w)
+PCIEX_PIO_FUNC(32, l)
+
+static struct ppc_pci_io scc_pciex_ops = {
+ .readb = scc_pciex_readb,
+ .readw = scc_pciex_readw,
+ .readl = scc_pciex_readl,
+ .readq = scc_pciex_readq,
+ .readw_be = scc_pciex_readw_be,
+ .readl_be = scc_pciex_readl_be,
+ .readq_be = scc_pciex_readq_be,
+ .readsb = scc_pciex_readsb,
+ .readsw = scc_pciex_readsw,
+ .readsl = scc_pciex_readsl,
+ .memcpy_fromio = scc_pciex_memcpy_fromio,
+ .inb = scc_pciex_inb,
+ .inw = scc_pciex_inw,
+ .inl = scc_pciex_inl,
+ .outb = scc_pciex_outb,
+ .outw = scc_pciex_outw,
+ .outl = scc_pciex_outl,
+ .insb = scc_pciex_insb,
+ .insw = scc_pciex_insw,
+ .insl = scc_pciex_insl,
+ .outsb = scc_pciex_outsb,
+ .outsw = scc_pciex_outsw,
+ .outsl = scc_pciex_outsl,
+};
+
+static int __init scc_pciex_iowa_init(struct iowa_bus *bus, void *data)
+{
+ dma_addr_t dummy_page_da;
+ void *dummy_page_va;
+
+ dummy_page_va = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!dummy_page_va) {
+ pr_err("PCIEX:Alloc dummy_page_va failed\n");
+ return -1;
+ }
+
+ dummy_page_da = dma_map_single(bus->phb->parent, dummy_page_va,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(bus->phb->parent, dummy_page_da)) {
+ pr_err("PCIEX:Map dummy page failed.\n");
+ kfree(dummy_page_va);
+ return -1;
+ }
+
+ PEX_OUT(bus->phb->cfg_addr, PEXDMRDADR0, dummy_page_da);
+
+ return 0;
+}
+
+/*
+ * config space access
+ */
+#define MK_PEXDADRS(bus_no, dev_no, func_no, addr) \
+ ((uint32_t)(((addr) & ~0x3UL) | \
+ ((bus_no) << PEXDADRS_BUSNO_SHIFT) | \
+ ((dev_no) << PEXDADRS_DEVNO_SHIFT) | \
+ ((func_no) << PEXDADRS_FUNCNO_SHIFT)))
+
+#define MK_PEXDCMND_BYTE_EN(addr, size) \
+ ((((0x1 << (size))-1) << ((addr) & 0x3)) << PEXDCMND_BYTE_EN_SHIFT)
+#define MK_PEXDCMND(cmd, addr, size) ((cmd) | MK_PEXDCMND_BYTE_EN(addr, size))
+
+static uint32_t config_read_pciex_dev(unsigned int __iomem *base,
+ uint64_t bus_no, uint64_t dev_no, uint64_t func_no,
+ uint64_t off, uint64_t size)
+{
+ uint32_t ret;
+ uint32_t addr, cmd;
+
+ addr = MK_PEXDADRS(bus_no, dev_no, func_no, off);
+ cmd = MK_PEXDCMND(PEXDCMND_CONFIG_READ, off, size);
+ PEX_OUT(base, PEXDADRS, addr);
+ PEX_OUT(base, PEXDCMND, cmd);
+ ret = (PEX_IN(base, PEXDRDATA)
+ >> ((off & (4-size)) * 8)) & ((0x1 << (size * 8)) - 1);
+ return ret;
+}
+
+static void config_write_pciex_dev(unsigned int __iomem *base, uint64_t bus_no,
+ uint64_t dev_no, uint64_t func_no, uint64_t off, uint64_t size,
+ uint32_t data)
+{
+ uint32_t addr, cmd;
+
+ addr = MK_PEXDADRS(bus_no, dev_no, func_no, off);
+ cmd = MK_PEXDCMND(PEXDCMND_CONFIG_WRITE, off, size);
+ PEX_OUT(base, PEXDADRS, addr);
+ PEX_OUT(base, PEXDCMND, cmd);
+ PEX_OUT(base, PEXDWDATA,
+ (data & ((0x1 << (size * 8)) - 1)) << ((off & (4-size)) * 8));
+}
+
+#define MK_PEXCADRS_BYTE_EN(off, len) \
+ ((((0x1 << (len)) - 1) << ((off) & 0x3)) << PEXCADRS_BYTE_EN_SHIFT)
+#define MK_PEXCADRS(cmd, addr, size) \
+ ((cmd) | MK_PEXCADRS_BYTE_EN(addr, size) | ((addr) & ~0x3))
+static uint32_t config_read_pciex_rc(unsigned int __iomem *base,
+ uint32_t where, uint32_t size)
+{
+ PEX_OUT(base, PEXCADRS, MK_PEXCADRS(PEXCADRS_CMD_READ, where, size));
+ return (PEX_IN(base, PEXCRDATA)
+ >> ((where & (4 - size)) * 8)) & ((0x1 << (size * 8)) - 1);
+}
+
+static void config_write_pciex_rc(unsigned int __iomem *base, uint32_t where,
+ uint32_t size, uint32_t val)
+{
+ uint32_t data;
+
+ data = (val & ((0x1 << (size * 8)) - 1)) << ((where & (4 - size)) * 8);
+ PEX_OUT(base, PEXCADRS, MK_PEXCADRS(PEXCADRS_CMD_WRITE, where, size));
+ PEX_OUT(base, PEXCWDATA, data);
+}
+
+/* Interfaces */
+/* Note: Work-around
+ * On SCC PCIEXC, one device is seen on all 32 dev_no.
+ * As SCC PCIEXC can have only one device on the bus, we look only one dev_no.
+ * (dev_no = 1)
+ */
+static int scc_pciex_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, unsigned int *val)
+{
+ struct pci_controller *phb = pci_bus_to_host(bus);
+
+ if (bus->number == phb->first_busno && PCI_SLOT(devfn) != 1) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ if (bus->number == 0 && PCI_SLOT(devfn) == 0)
+ *val = config_read_pciex_rc(phb->cfg_addr, where, size);
+ else
+ *val = config_read_pciex_dev(phb->cfg_addr, bus->number,
+ PCI_SLOT(devfn), PCI_FUNC(devfn), where, size);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, unsigned int val)
+{
+ struct pci_controller *phb = pci_bus_to_host(bus);
+
+ if (bus->number == phb->first_busno && PCI_SLOT(devfn) != 1)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (bus->number == 0 && PCI_SLOT(devfn) == 0)
+ config_write_pciex_rc(phb->cfg_addr, where, size, val);
+ else
+ config_write_pciex_dev(phb->cfg_addr, bus->number,
+ PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops scc_pciex_pci_ops = {
+ scc_pciex_read_config,
+ scc_pciex_write_config,
+};
+
+static void pciex_clear_intr_all(unsigned int __iomem *base)
+{
+ PEX_OUT(base, PEXAERRSTS, 0xffffffff);
+ PEX_OUT(base, PEXPRERRSTS, 0xffffffff);
+ PEX_OUT(base, PEXINTSTS, 0xffffffff);
+}
+
+#if 0
+static void pciex_disable_intr_all(unsigned int *base)
+{
+ PEX_OUT(base, PEXINTMASK, 0x0);
+ PEX_OUT(base, PEXAERRMASK, 0x0);
+ PEX_OUT(base, PEXPRERRMASK, 0x0);
+ PEX_OUT(base, PEXVDMASK, 0x0);
+}
+#endif
+
+static void pciex_enable_intr_all(unsigned int __iomem *base)
+{
+ PEX_OUT(base, PEXINTMASK, 0x0000e7f1);
+ PEX_OUT(base, PEXAERRMASK, 0x03ff01ff);
+ PEX_OUT(base, PEXPRERRMASK, 0x0001010f);
+ PEX_OUT(base, PEXVDMASK, 0x00000001);
+}
+
+static void pciex_check_status(unsigned int __iomem *base)
+{
+ uint32_t err = 0;
+ uint32_t intsts, aerr, prerr, rcvcp, lenerr;
+ uint32_t maea, maec;
+
+ intsts = PEX_IN(base, PEXINTSTS);
+ aerr = PEX_IN(base, PEXAERRSTS);
+ prerr = PEX_IN(base, PEXPRERRSTS);
+ rcvcp = PEX_IN(base, PEXRCVCPLIDA);
+ lenerr = PEX_IN(base, PEXLENERRIDA);
+
+ if (intsts || aerr || prerr || rcvcp || lenerr)
+ err = 1;
+
+ pr_info("PCEXC interrupt!!\n");
+ pr_info("PEXINTSTS :0x%08x\n", intsts);
+ pr_info("PEXAERRSTS :0x%08x\n", aerr);
+ pr_info("PEXPRERRSTS :0x%08x\n", prerr);
+ pr_info("PEXRCVCPLIDA :0x%08x\n", rcvcp);
+ pr_info("PEXLENERRIDA :0x%08x\n", lenerr);
+
+ /* print detail of Protection Error */
+ if (intsts & 0x00004000) {
+ uint32_t i, n;
+ for (i = 0; i < 4; i++) {
+ n = 1 << i;
+ if (prerr & n) {
+ maea = PEX_IN(base, PEXMAEA(i));
+ maec = PEX_IN(base, PEXMAEC(i));
+ pr_info("PEXMAEC%d :0x%08x\n", i, maec);
+ pr_info("PEXMAEA%d :0x%08x\n", i, maea);
+ }
+ }
+ }
+
+ if (err)
+ pciex_clear_intr_all(base);
+}
+
+static irqreturn_t pciex_handle_internal_irq(int irq, void *dev_id)
+{
+ struct pci_controller *phb = dev_id;
+
+ pr_debug("PCIEX:pciex_handle_internal_irq(irq=%d)\n", irq);
+
+ BUG_ON(phb->cfg_addr == NULL);
+
+ pciex_check_status(phb->cfg_addr);
+
+ return IRQ_HANDLED;
+}
+
+static __init int celleb_setup_pciex(struct device_node *node,
+ struct pci_controller *phb)
+{
+ struct resource r;
+ struct of_irq oirq;
+ int virq;
+
+ /* SMMIO registers; used inside this file */
+ if (of_address_to_resource(node, 0, &r)) {
+ pr_err("PCIEXC:Failed to get config resource.\n");
+ return 1;
+ }
+ phb->cfg_addr = ioremap(r.start, resource_size(&r));
+ if (!phb->cfg_addr) {
+ pr_err("PCIEXC:Failed to remap SMMIO region.\n");
+ return 1;
+ }
+
+ /* Not use cfg_data, cmd and data regs are near address reg */
+ phb->cfg_data = NULL;
+
+ /* set pci_ops */
+ phb->ops = &scc_pciex_pci_ops;
+
+ /* internal interrupt handler */
+ if (of_irq_map_one(node, 1, &oirq)) {
+ pr_err("PCIEXC:Failed to map irq\n");
+ goto error;
+ }
+ virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
+ oirq.size);
+ if (request_irq(virq, pciex_handle_internal_irq,
+ 0, "pciex", (void *)phb)) {
+ pr_err("PCIEXC:Failed to request irq\n");
+ goto error;
+ }
+
+ /* enable all interrupts */
+ pciex_clear_intr_all(phb->cfg_addr);
+ pciex_enable_intr_all(phb->cfg_addr);
+ /* MSI: TBD */
+
+ return 0;
+
+error:
+ phb->cfg_data = NULL;
+ if (phb->cfg_addr)
+ iounmap(phb->cfg_addr);
+ phb->cfg_addr = NULL;
+ return 1;
+}
+
+struct celleb_phb_spec celleb_pciex_spec __initdata = {
+ .setup = celleb_setup_pciex,
+ .ops = &scc_pciex_ops,
+ .iowa_init = &scc_pciex_iowa_init,
+};
diff --git a/arch/powerpc/platforms/cell/celleb_scc_sio.c b/arch/powerpc/platforms/cell/celleb_scc_sio.c
new file mode 100644
index 00000000..3a16c5b3
--- /dev/null
+++ b/arch/powerpc/platforms/cell/celleb_scc_sio.c
@@ -0,0 +1,101 @@
+/*
+ * setup serial port in SCC
+ *
+ * (C) Copyright 2006-2007 TOSHIBA CORPORATION
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+/* sio irq0=0xb00010022 irq0=0xb00010023 irq2=0xb00010024
+ mmio=0xfff000-0x1000,0xff2000-0x1000 */
+static int txx9_serial_bitmap __initdata;
+
+static struct {
+ uint32_t offset;
+ uint32_t index;
+} txx9_scc_tab[3] __initdata = {
+ { 0x300, 0 }, /* 0xFFF300 */
+ { 0x400, 0 }, /* 0xFFF400 */
+ { 0x800, 1 } /* 0xFF2800 */
+};
+
+static int __init txx9_serial_init(void)
+{
+ extern int early_serial_txx9_setup(struct uart_port *port);
+ struct device_node *node = NULL;
+ int i;
+ struct uart_port req;
+ struct of_irq irq;
+ struct resource res;
+
+ while ((node = of_find_compatible_node(node,
+ "serial", "toshiba,sio-scc")) != NULL) {
+ for (i = 0; i < ARRAY_SIZE(txx9_scc_tab); i++) {
+ if (!(txx9_serial_bitmap & (1<
+#include
+
+#include
+#include
+#include
+
+#include "celleb_scc.h"
+
+#define UHC_RESET_WAIT_MAX 10000
+
+static inline int uhc_clkctrl_ready(u32 val)
+{
+ const u32 mask = SCC_UHC_USBCEN | SCC_UHC_USBCEN;
+ return((val & mask) == mask);
+}
+
+/*
+ * UHC(usb host controller) enable function.
+ * affect to both of OHCI and EHCI core module.
+ */
+static void enable_scc_uhc(struct pci_dev *dev)
+{
+ void __iomem *uhc_base;
+ u32 __iomem *uhc_clkctrl;
+ u32 __iomem *uhc_ecmode;
+ u32 val = 0;
+ int i;
+
+ if (!machine_is(celleb_beat) &&
+ !machine_is(celleb_native))
+ return;
+
+ uhc_base = ioremap(pci_resource_start(dev, 0),
+ pci_resource_len(dev, 0));
+ if (!uhc_base) {
+ printk(KERN_ERR "failed to map UHC register base.\n");
+ return;
+ }
+ uhc_clkctrl = uhc_base + SCC_UHC_CKRCTRL;
+ uhc_ecmode = uhc_base + SCC_UHC_ECMODE;
+
+ /* setup for normal mode */
+ val |= SCC_UHC_F48MCKLEN;
+ out_be32(uhc_clkctrl, val);
+ val |= SCC_UHC_PHY_SUSPEND_SEL;
+ out_be32(uhc_clkctrl, val);
+ udelay(10);
+ val |= SCC_UHC_PHYEN;
+ out_be32(uhc_clkctrl, val);
+ udelay(50);
+
+ /* disable reset */
+ val |= SCC_UHC_HCLKEN;
+ out_be32(uhc_clkctrl, val);
+ val |= (SCC_UHC_USBCEN | SCC_UHC_USBEN);
+ out_be32(uhc_clkctrl, val);
+ i = 0;
+ while (!uhc_clkctrl_ready(in_be32(uhc_clkctrl))) {
+ udelay(10);
+ if (i++ > UHC_RESET_WAIT_MAX) {
+ printk(KERN_ERR "Failed to disable UHC reset %x\n",
+ in_be32(uhc_clkctrl));
+ break;
+ }
+ }
+
+ /* Endian Conversion Mode for Master ALL area */
+ out_be32(uhc_ecmode, SCC_UHC_ECMODE_BY_BYTE);
+
+ iounmap(uhc_base);
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
+ PCI_DEVICE_ID_TOSHIBA_SCC_USB, enable_scc_uhc);
diff --git a/arch/powerpc/platforms/cell/celleb_setup.c b/arch/powerpc/platforms/cell/celleb_setup.c
new file mode 100644
index 00000000..1d5a4d8d
--- /dev/null
+++ b/arch/powerpc/platforms/cell/celleb_setup.c
@@ -0,0 +1,243 @@
+/*
+ * Celleb setup code
+ *
+ * (C) Copyright 2006-2007 TOSHIBA CORPORATION
+ *
+ * This code is based on arch/powerpc/platforms/cell/setup.c:
+ * Copyright (C) 1995 Linus Torvalds
+ * Adapted from 'alpha' version by Gary Thomas
+ * Modified by Cort Dougan (cort@cs.nmt.edu)
+ * Modified by PPC64 Team, IBM Corp
+ * Modified by Cell Team, IBM Deutschland Entwicklung GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#undef DEBUG
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "beat_interrupt.h"
+#include "beat_wrapper.h"
+#include "beat.h"
+#include "celleb_pci.h"
+#include "interrupt.h"
+#include "pervasive.h"
+#include "ras.h"
+
+static char celleb_machine_type[128] = "Celleb";
+
+static void celleb_show_cpuinfo(struct seq_file *m)
+{
+ struct device_node *root;
+ const char *model = "";
+
+ root = of_find_node_by_path("/");
+ if (root)
+ model = of_get_property(root, "model", NULL);
+ /* using "CHRP" is to trick anaconda into installing FCx into Celleb */
+ seq_printf(m, "machine\t\t: %s %s\n", celleb_machine_type, model);
+ of_node_put(root);
+}
+
+static int __init celleb_machine_type_hack(char *ptr)
+{
+ strlcpy(celleb_machine_type, ptr, sizeof(celleb_machine_type));
+ return 0;
+}
+
+__setup("celleb_machine_type_hack=", celleb_machine_type_hack);
+
+static void celleb_progress(char *s, unsigned short hex)
+{
+ printk("*** %04x : %s\n", hex, s ? s : "");
+}
+
+static void __init celleb_setup_arch_common(void)
+{
+ /* init to some ~sane value until calibrate_delay() runs */
+ loops_per_jiffy = 50000000;
+
+#ifdef CONFIG_DUMMY_CONSOLE
+ conswitchp = &dummy_con;
+#endif
+}
+
+static struct of_device_id celleb_bus_ids[] __initdata = {
+ { .type = "scc", },
+ { .type = "ioif", }, /* old style */
+ {},
+};
+
+static int __init celleb_publish_devices(void)
+{
+ /* Publish OF platform devices for southbridge IOs */
+ of_platform_bus_probe(NULL, celleb_bus_ids, NULL);
+
+ return 0;
+}
+machine_device_initcall(celleb_beat, celleb_publish_devices);
+machine_device_initcall(celleb_native, celleb_publish_devices);
+
+
+/*
+ * functions for Celleb-Beat
+ */
+static void __init celleb_setup_arch_beat(void)
+{
+#ifdef CONFIG_SPU_BASE
+ spu_priv1_ops = &spu_priv1_beat_ops;
+ spu_management_ops = &spu_management_of_ops;
+#endif
+
+ celleb_setup_arch_common();
+}
+
+static int __init celleb_probe_beat(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ if (!of_flat_dt_is_compatible(root, "Beat"))
+ return 0;
+
+ powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS
+ | FW_FEATURE_BEAT | FW_FEATURE_LPAR;
+ hpte_init_beat_v3();
+
+ return 1;
+}
+
+
+/*
+ * functions for Celleb-native
+ */
+static void __init celleb_init_IRQ_native(void)
+{
+ iic_init_IRQ();
+ spider_init_IRQ();
+}
+
+static void __init celleb_setup_arch_native(void)
+{
+#ifdef CONFIG_SPU_BASE
+ spu_priv1_ops = &spu_priv1_mmio_ops;
+ spu_management_ops = &spu_management_of_ops;
+#endif
+
+ cbe_regs_init();
+
+#ifdef CONFIG_CBE_RAS
+ cbe_ras_init();
+#endif
+
+#ifdef CONFIG_SMP
+ smp_init_cell();
+#endif
+
+ cbe_pervasive_init();
+
+ /* XXX: nvram initialization should be added */
+
+ celleb_setup_arch_common();
+}
+
+static int __init celleb_probe_native(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ if (of_flat_dt_is_compatible(root, "Beat") ||
+ !of_flat_dt_is_compatible(root, "TOSHIBA,Celleb"))
+ return 0;
+
+ powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS;
+ hpte_init_native();
+
+ return 1;
+}
+
+
+/*
+ * machine definitions
+ */
+define_machine(celleb_beat) {
+ .name = "Cell Reference Set (Beat)",
+ .probe = celleb_probe_beat,
+ .setup_arch = celleb_setup_arch_beat,
+ .show_cpuinfo = celleb_show_cpuinfo,
+ .restart = beat_restart,
+ .power_off = beat_power_off,
+ .halt = beat_halt,
+ .get_rtc_time = beat_get_rtc_time,
+ .set_rtc_time = beat_set_rtc_time,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = celleb_progress,
+ .power_save = beat_power_save,
+ .nvram_size = beat_nvram_get_size,
+ .nvram_read = beat_nvram_read,
+ .nvram_write = beat_nvram_write,
+ .set_dabr = beat_set_xdabr,
+ .init_IRQ = beatic_init_IRQ,
+ .get_irq = beatic_get_irq,
+ .pci_probe_mode = celleb_pci_probe_mode,
+ .pci_setup_phb = celleb_setup_phb,
+#ifdef CONFIG_KEXEC
+ .kexec_cpu_down = beat_kexec_cpu_down,
+#endif
+};
+
+define_machine(celleb_native) {
+ .name = "Cell Reference Set (native)",
+ .probe = celleb_probe_native,
+ .setup_arch = celleb_setup_arch_native,
+ .show_cpuinfo = celleb_show_cpuinfo,
+ .restart = rtas_restart,
+ .power_off = rtas_power_off,
+ .halt = rtas_halt,
+ .get_boot_time = rtas_get_boot_time,
+ .get_rtc_time = rtas_get_rtc_time,
+ .set_rtc_time = rtas_set_rtc_time,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = celleb_progress,
+ .pci_probe_mode = celleb_pci_probe_mode,
+ .pci_setup_phb = celleb_setup_phb,
+ .init_IRQ = celleb_init_IRQ_native,
+};
diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
new file mode 100644
index 00000000..23bc9db4
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
@@ -0,0 +1,171 @@
+/*
+ * spu aware cpufreq governor for the cell processor
+ *
+ * © Copyright IBM Corporation 2006-2008
+ *
+ * Author: Christian Krafft
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define POLL_TIME 100000 /* in µs */
+#define EXP 753 /* exp(-1) in fixed-point */
+
+struct spu_gov_info_struct {
+ unsigned long busy_spus; /* fixed-point */
+ struct cpufreq_policy *policy;
+ struct delayed_work work;
+ unsigned int poll_int; /* µs */
+};
+static DEFINE_PER_CPU(struct spu_gov_info_struct, spu_gov_info);
+
+static int calc_freq(struct spu_gov_info_struct *info)
+{
+ int cpu;
+ int busy_spus;
+
+ cpu = info->policy->cpu;
+ busy_spus = atomic_read(&cbe_spu_info[cpu_to_node(cpu)].busy_spus);
+
+ CALC_LOAD(info->busy_spus, EXP, busy_spus * FIXED_1);
+ pr_debug("cpu %d: busy_spus=%d, info->busy_spus=%ld\n",
+ cpu, busy_spus, info->busy_spus);
+
+ return info->policy->max * info->busy_spus / FIXED_1;
+}
+
+static void spu_gov_work(struct work_struct *work)
+{
+ struct spu_gov_info_struct *info;
+ int delay;
+ unsigned long target_freq;
+
+ info = container_of(work, struct spu_gov_info_struct, work.work);
+
+ /* after cancel_delayed_work_sync we unset info->policy */
+ BUG_ON(info->policy == NULL);
+
+ target_freq = calc_freq(info);
+ __cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H);
+
+ delay = usecs_to_jiffies(info->poll_int);
+ schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
+}
+
+static void spu_gov_init_work(struct spu_gov_info_struct *info)
+{
+ int delay = usecs_to_jiffies(info->poll_int);
+ INIT_DELAYED_WORK_DEFERRABLE(&info->work, spu_gov_work);
+ schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
+}
+
+static void spu_gov_cancel_work(struct spu_gov_info_struct *info)
+{
+ cancel_delayed_work_sync(&info->work);
+}
+
+static int spu_gov_govern(struct cpufreq_policy *policy, unsigned int event)
+{
+ unsigned int cpu = policy->cpu;
+ struct spu_gov_info_struct *info, *affected_info;
+ int i;
+ int ret = 0;
+
+ info = &per_cpu(spu_gov_info, cpu);
+
+ switch (event) {
+ case CPUFREQ_GOV_START:
+ if (!cpu_online(cpu)) {
+ printk(KERN_ERR "cpu %d is not online\n", cpu);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!policy->cur) {
+ printk(KERN_ERR "no cpu specified in policy\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ /* initialize spu_gov_info for all affected cpus */
+ for_each_cpu(i, policy->cpus) {
+ affected_info = &per_cpu(spu_gov_info, i);
+ affected_info->policy = policy;
+ }
+
+ info->poll_int = POLL_TIME;
+
+ /* setup timer */
+ spu_gov_init_work(info);
+
+ break;
+
+ case CPUFREQ_GOV_STOP:
+ /* cancel timer */
+ spu_gov_cancel_work(info);
+
+ /* clean spu_gov_info for all affected cpus */
+ for_each_cpu (i, policy->cpus) {
+ info = &per_cpu(spu_gov_info, i);
+ info->policy = NULL;
+ }
+
+ break;
+ }
+
+ return ret;
+}
+
+static struct cpufreq_governor spu_governor = {
+ .name = "spudemand",
+ .governor = spu_gov_govern,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * module init and destoy
+ */
+
+static int __init spu_gov_init(void)
+{
+ int ret;
+
+ ret = cpufreq_register_governor(&spu_governor);
+ if (ret)
+ printk(KERN_ERR "registration of governor failed\n");
+ return ret;
+}
+
+static void __exit spu_gov_exit(void)
+{
+ cpufreq_unregister_governor(&spu_governor);
+}
+
+
+module_init(spu_gov_init);
+module_exit(spu_gov_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Krafft ");
+
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
new file mode 100644
index 00000000..2d42f3bb
--- /dev/null
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -0,0 +1,411 @@
+/*
+ * Cell Internal Interrupt Controller
+ *
+ * Copyright (C) 2006 Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ * IBM, Corp.
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Arnd Bergmann
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * TODO:
+ * - Fix various assumptions related to HW CPU numbers vs. linux CPU numbers
+ * vs node numbers in the setup code
+ * - Implement proper handling of maxcpus=1/2 (that is, routing of irqs from
+ * a non-active node to the active node)
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "interrupt.h"
+
+struct iic {
+ struct cbe_iic_thread_regs __iomem *regs;
+ u8 target_id;
+ u8 eoi_stack[16];
+ int eoi_ptr;
+ struct device_node *node;
+};
+
+static DEFINE_PER_CPU(struct iic, cpu_iic);
+#define IIC_NODE_COUNT 2
+static struct irq_domain *iic_host;
+
+/* Convert between "pending" bits and hw irq number */
+static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits)
+{
+ unsigned char unit = bits.source & 0xf;
+ unsigned char node = bits.source >> 4;
+ unsigned char class = bits.class & 3;
+
+ /* Decode IPIs */
+ if (bits.flags & CBE_IIC_IRQ_IPI)
+ return IIC_IRQ_TYPE_IPI | (bits.prio >> 4);
+ else
+ return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit;
+}
+
+static void iic_mask(struct irq_data *d)
+{
+}
+
+static void iic_unmask(struct irq_data *d)
+{
+}
+
+static void iic_eoi(struct irq_data *d)
+{
+ struct iic *iic = &__get_cpu_var(cpu_iic);
+ out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
+ BUG_ON(iic->eoi_ptr < 0);
+}
+
+static struct irq_chip iic_chip = {
+ .name = "CELL-IIC",
+ .irq_mask = iic_mask,
+ .irq_unmask = iic_unmask,
+ .irq_eoi = iic_eoi,
+};
+
+
+static void iic_ioexc_eoi(struct irq_data *d)
+{
+}
+
+static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct cbe_iic_regs __iomem *node_iic =
+ (void __iomem *)irq_desc_get_handler_data(desc);
+ unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC;
+ unsigned long bits, ack;
+ int cascade;
+
+ for (;;) {
+ bits = in_be64(&node_iic->iic_is);
+ if (bits == 0)
+ break;
+ /* pre-ack edge interrupts */
+ ack = bits & IIC_ISR_EDGE_MASK;
+ if (ack)
+ out_be64(&node_iic->iic_is, ack);
+ /* handle them */
+ for (cascade = 63; cascade >= 0; cascade--)
+ if (bits & (0x8000000000000000UL >> cascade)) {
+ unsigned int cirq =
+ irq_linear_revmap(iic_host,
+ base | cascade);
+ if (cirq != NO_IRQ)
+ generic_handle_irq(cirq);
+ }
+ /* post-ack level interrupts */
+ ack = bits & ~IIC_ISR_EDGE_MASK;
+ if (ack)
+ out_be64(&node_iic->iic_is, ack);
+ }
+ chip->irq_eoi(&desc->irq_data);
+}
+
+
+static struct irq_chip iic_ioexc_chip = {
+ .name = "CELL-IOEX",
+ .irq_mask = iic_mask,
+ .irq_unmask = iic_unmask,
+ .irq_eoi = iic_ioexc_eoi,
+};
+
+/* Get an IRQ number from the pending state register of the IIC */
+static unsigned int iic_get_irq(void)
+{
+ struct cbe_iic_pending_bits pending;
+ struct iic *iic;
+ unsigned int virq;
+
+ iic = &__get_cpu_var(cpu_iic);
+ *(unsigned long *) &pending =
+ in_be64((u64 __iomem *) &iic->regs->pending_destr);
+ if (!(pending.flags & CBE_IIC_IRQ_VALID))
+ return NO_IRQ;
+ virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending));
+ if (virq == NO_IRQ)
+ return NO_IRQ;
+ iic->eoi_stack[++iic->eoi_ptr] = pending.prio;
+ BUG_ON(iic->eoi_ptr > 15);
+ return virq;
+}
+
+void iic_setup_cpu(void)
+{
+ out_be64(&__get_cpu_var(cpu_iic).regs->prio, 0xff);
+}
+
+u8 iic_get_target_id(int cpu)
+{
+ return per_cpu(cpu_iic, cpu).target_id;
+}
+
+EXPORT_SYMBOL_GPL(iic_get_target_id);
+
+#ifdef CONFIG_SMP
+
+/* Use the highest interrupt priorities for IPI */
+static inline int iic_msg_to_irq(int msg)
+{
+ return IIC_IRQ_TYPE_IPI + 0xf - msg;
+}
+
+void iic_message_pass(int cpu, int msg)
+{
+ out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4);
+}
+
+struct irq_domain *iic_get_irq_host(int node)
+{
+ return iic_host;
+}
+EXPORT_SYMBOL_GPL(iic_get_irq_host);
+
+static void iic_request_ipi(int msg)
+{
+ int virq;
+
+ virq = irq_create_mapping(iic_host, iic_msg_to_irq(msg));
+ if (virq == NO_IRQ) {
+ printk(KERN_ERR
+ "iic: failed to map IPI %s\n", smp_ipi_name[msg]);
+ return;
+ }
+
+ /*
+ * If smp_request_message_ipi encounters an error it will notify
+ * the error. If a message is not needed it will return non-zero.
+ */
+ if (smp_request_message_ipi(virq, msg))
+ irq_dispose_mapping(virq);
+}
+
+void iic_request_IPIs(void)
+{
+ iic_request_ipi(PPC_MSG_CALL_FUNCTION);
+ iic_request_ipi(PPC_MSG_RESCHEDULE);
+ iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE);
+ iic_request_ipi(PPC_MSG_DEBUGGER_BREAK);
+}
+
+#endif /* CONFIG_SMP */
+
+
+static int iic_host_match(struct irq_domain *h, struct device_node *node)
+{
+ return of_device_is_compatible(node,
+ "IBM,CBEA-Internal-Interrupt-Controller");
+}
+
+static int iic_host_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ switch (hw & IIC_IRQ_TYPE_MASK) {
+ case IIC_IRQ_TYPE_IPI:
+ irq_set_chip_and_handler(virq, &iic_chip, handle_percpu_irq);
+ break;
+ case IIC_IRQ_TYPE_IOEXC:
+ irq_set_chip_and_handler(virq, &iic_ioexc_chip,
+ handle_edge_eoi_irq);
+ break;
+ default:
+ irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq);
+ }
+ return 0;
+}
+
+static int iic_host_xlate(struct irq_domain *h, struct device_node *ct,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_flags)
+
+{
+ unsigned int node, ext, unit, class;
+ const u32 *val;
+
+ if (!of_device_is_compatible(ct,
+ "IBM,CBEA-Internal-Interrupt-Controller"))
+ return -ENODEV;
+ if (intsize != 1)
+ return -ENODEV;
+ val = of_get_property(ct, "#interrupt-cells", NULL);
+ if (val == NULL || *val != 1)
+ return -ENODEV;
+
+ node = intspec[0] >> 24;
+ ext = (intspec[0] >> 16) & 0xff;
+ class = (intspec[0] >> 8) & 0xff;
+ unit = intspec[0] & 0xff;
+
+ /* Check if node is in supported range */
+ if (node > 1)
+ return -EINVAL;
+
+ /* Build up interrupt number, special case for IO exceptions */
+ *out_hwirq = (node << IIC_IRQ_NODE_SHIFT);
+ if (unit == IIC_UNIT_IIC && class == 1)
+ *out_hwirq |= IIC_IRQ_TYPE_IOEXC | ext;
+ else
+ *out_hwirq |= IIC_IRQ_TYPE_NORMAL |
+ (class << IIC_IRQ_CLASS_SHIFT) | unit;
+
+ /* Dummy flags, ignored by iic code */
+ *out_flags = IRQ_TYPE_EDGE_RISING;
+
+ return 0;
+}
+
+static const struct irq_domain_ops iic_host_ops = {
+ .match = iic_host_match,
+ .map = iic_host_map,
+ .xlate = iic_host_xlate,
+};
+
+static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
+ struct device_node *node)
+{
+ /* XXX FIXME: should locate the linux CPU number from the HW cpu
+ * number properly. We are lucky for now
+ */
+ struct iic *iic = &per_cpu(cpu_iic, hw_cpu);
+
+ iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs));
+ BUG_ON(iic->regs == NULL);
+
+ iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe);
+ iic->eoi_stack[0] = 0xff;
+ iic->node = of_node_get(node);
+ out_be64(&iic->regs->prio, 0);
+
+ printk(KERN_INFO "IIC for CPU %d target id 0x%x : %s\n",
+ hw_cpu, iic->target_id, node->full_name);
+}
+
+static int __init setup_iic(void)
+{
+ struct device_node *dn;
+ struct resource r0, r1;
+ unsigned int node, cascade, found = 0;
+ struct cbe_iic_regs __iomem *node_iic;
+ const u32 *np;
+
+ for (dn = NULL;
+ (dn = of_find_node_by_name(dn,"interrupt-controller")) != NULL;) {
+ if (!of_device_is_compatible(dn,
+ "IBM,CBEA-Internal-Interrupt-Controller"))
+ continue;
+ np = of_get_property(dn, "ibm,interrupt-server-ranges", NULL);
+ if (np == NULL) {
+ printk(KERN_WARNING "IIC: CPU association not found\n");
+ of_node_put(dn);
+ return -ENODEV;
+ }
+ if (of_address_to_resource(dn, 0, &r0) ||
+ of_address_to_resource(dn, 1, &r1)) {
+ printk(KERN_WARNING "IIC: Can't resolve addresses\n");
+ of_node_put(dn);
+ return -ENODEV;
+ }
+ found++;
+ init_one_iic(np[0], r0.start, dn);
+ init_one_iic(np[1], r1.start, dn);
+
+ /* Setup cascade for IO exceptions. XXX cleanup tricks to get
+ * node vs CPU etc...
+ * Note that we configure the IIC_IRR here with a hard coded
+ * priority of 1. We might want to improve that later.
+ */
+ node = np[0] >> 1;
+ node_iic = cbe_get_cpu_iic_regs(np[0]);
+ cascade = node << IIC_IRQ_NODE_SHIFT;
+ cascade |= 1 << IIC_IRQ_CLASS_SHIFT;
+ cascade |= IIC_UNIT_IIC;
+ cascade = irq_create_mapping(iic_host, cascade);
+ if (cascade == NO_IRQ)
+ continue;
+ /*
+ * irq_data is a generic pointer that gets passed back
+ * to us later, so the forced cast is fine.
+ */
+ irq_set_handler_data(cascade, (void __force *)node_iic);
+ irq_set_chained_handler(cascade, iic_ioexc_cascade);
+ out_be64(&node_iic->iic_ir,
+ (1 << 12) /* priority */ |
+ (node << 4) /* dest node */ |
+ IIC_UNIT_THREAD_0 /* route them to thread 0 */);
+ /* Flush pending (make sure it triggers if there is
+ * anything pending
+ */
+ out_be64(&node_iic->iic_is, 0xfffffffffffffffful);
+ }
+
+ if (found)
+ return 0;
+ else
+ return -ENODEV;
+}
+
+void __init iic_init_IRQ(void)
+{
+ /* Setup an irq host data structure */
+ iic_host = irq_domain_add_linear(NULL, IIC_SOURCE_COUNT, &iic_host_ops,
+ NULL);
+ BUG_ON(iic_host == NULL);
+ irq_set_default_host(iic_host);
+
+ /* Discover and initialize iics */
+ if (setup_iic() < 0)
+ panic("IIC: Failed to initialize !\n");
+
+ /* Set master interrupt handling function */
+ ppc_md.get_irq = iic_get_irq;
+
+ /* Enable on current CPU */
+ iic_setup_cpu();
+}
+
+void iic_set_interrupt_routing(int cpu, int thread, int priority)
+{
+ struct cbe_iic_regs __iomem *iic_regs = cbe_get_cpu_iic_regs(cpu);
+ u64 iic_ir = 0;
+ int node = cpu >> 1;
+
+ /* Set which node and thread will handle the next interrupt */
+ iic_ir |= CBE_IIC_IR_PRIO(priority) |
+ CBE_IIC_IR_DEST_NODE(node);
+ if (thread == 0)
+ iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_0);
+ else
+ iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_1);
+ out_be64(&iic_regs->iic_ir, iic_ir);
+}
diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h
new file mode 100644
index 00000000..4f60ae6c
--- /dev/null
+++ b/arch/powerpc/platforms/cell/interrupt.h
@@ -0,0 +1,89 @@
+#ifndef ASM_CELL_PIC_H
+#define ASM_CELL_PIC_H
+#ifdef __KERNEL__
+/*
+ * Mapping of IIC pending bits into per-node interrupt numbers.
+ *
+ * Interrupt numbers are in the range 0...0x1ff where the top bit
+ * (0x100) represent the source node. Only 2 nodes are supported with
+ * the current code though it's trivial to extend that if necessary using
+ * higher level bits
+ *
+ * The bottom 8 bits are split into 2 type bits and 6 data bits that
+ * depend on the type:
+ *
+ * 00 (0x00 | data) : normal interrupt. data is (class << 4) | source
+ * 01 (0x40 | data) : IO exception. data is the exception number as
+ * defined by bit numbers in IIC_SR
+ * 10 (0x80 | data) : IPI. data is the IPI number (obtained from the priority)
+ * and node is always 0 (IPIs are per-cpu, their source is
+ * not relevant)
+ * 11 (0xc0 | data) : reserved
+ *
+ * In addition, interrupt number 0x80000000 is defined as always invalid
+ * (that is the node field is expected to never extend to move than 23 bits)
+ *
+ */
+
+enum {
+ IIC_IRQ_INVALID = 0x80000000u,
+ IIC_IRQ_NODE_MASK = 0x100,
+ IIC_IRQ_NODE_SHIFT = 8,
+ IIC_IRQ_MAX = 0x1ff,
+ IIC_IRQ_TYPE_MASK = 0xc0,
+ IIC_IRQ_TYPE_NORMAL = 0x00,
+ IIC_IRQ_TYPE_IOEXC = 0x40,
+ IIC_IRQ_TYPE_IPI = 0x80,
+ IIC_IRQ_CLASS_SHIFT = 4,
+ IIC_IRQ_CLASS_0 = 0x00,
+ IIC_IRQ_CLASS_1 = 0x10,
+ IIC_IRQ_CLASS_2 = 0x20,
+ IIC_SOURCE_COUNT = 0x200,
+
+ /* Here are defined the various source/dest units. Avoid using those
+ * definitions if you can, they are mostly here for reference
+ */
+ IIC_UNIT_SPU_0 = 0x4,
+ IIC_UNIT_SPU_1 = 0x7,
+ IIC_UNIT_SPU_2 = 0x3,
+ IIC_UNIT_SPU_3 = 0x8,
+ IIC_UNIT_SPU_4 = 0x2,
+ IIC_UNIT_SPU_5 = 0x9,
+ IIC_UNIT_SPU_6 = 0x1,
+ IIC_UNIT_SPU_7 = 0xa,
+ IIC_UNIT_IOC_0 = 0x0,
+ IIC_UNIT_IOC_1 = 0xb,
+ IIC_UNIT_THREAD_0 = 0xe, /* target only */
+ IIC_UNIT_THREAD_1 = 0xf, /* target only */
+ IIC_UNIT_IIC = 0xe, /* source only (IO exceptions) */
+
+ /* Base numbers for the external interrupts */
+ IIC_IRQ_EXT_IOIF0 =
+ IIC_IRQ_TYPE_NORMAL | IIC_IRQ_CLASS_2 | IIC_UNIT_IOC_0,
+ IIC_IRQ_EXT_IOIF1 =
+ IIC_IRQ_TYPE_NORMAL | IIC_IRQ_CLASS_2 | IIC_UNIT_IOC_1,
+
+ /* Base numbers for the IIC_ISR interrupts */
+ IIC_IRQ_IOEX_TMI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 63,
+ IIC_IRQ_IOEX_PMI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 62,
+ IIC_IRQ_IOEX_ATI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 61,
+ IIC_IRQ_IOEX_MATBFI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 60,
+ IIC_IRQ_IOEX_ELDI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 59,
+
+ /* Which bits in IIC_ISR are edge sensitive */
+ IIC_ISR_EDGE_MASK = 0x4ul,
+};
+
+extern void iic_init_IRQ(void);
+extern void iic_message_pass(int cpu, int msg);
+extern void iic_request_IPIs(void);
+extern void iic_setup_cpu(void);
+
+extern u8 iic_get_target_id(int cpu);
+
+extern void spider_init_IRQ(void);
+
+extern void iic_set_interrupt_routing(int cpu, int thread, int priority);
+
+#endif
+#endif /* ASM_CELL_PIC_H */
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
new file mode 100644
index 00000000..b9f509a3
--- /dev/null
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -0,0 +1,1237 @@
+/*
+ * IOMMU implementation for Cell Broadband Processor Architecture
+ *
+ * (C) Copyright IBM Corporation 2006-2008
+ *
+ * Author: Jeremy Kerr
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#undef DEBUG
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "interrupt.h"
+
+/* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages
+ * instead of leaving them mapped to some dummy page. This can be
+ * enabled once the appropriate workarounds for spider bugs have
+ * been enabled
+ */
+#define CELL_IOMMU_REAL_UNMAP
+
+/* Define CELL_IOMMU_STRICT_PROTECTION to enforce protection of
+ * IO PTEs based on the transfer direction. That can be enabled
+ * once spider-net has been fixed to pass the correct direction
+ * to the DMA mapping functions
+ */
+#define CELL_IOMMU_STRICT_PROTECTION
+
+
+#define NR_IOMMUS 2
+
+/* IOC mmap registers */
+#define IOC_Reg_Size 0x2000
+
+#define IOC_IOPT_CacheInvd 0x908
+#define IOC_IOPT_CacheInvd_NE_Mask 0xffe0000000000000ul
+#define IOC_IOPT_CacheInvd_IOPTE_Mask 0x000003fffffffff8ul
+#define IOC_IOPT_CacheInvd_Busy 0x0000000000000001ul
+
+#define IOC_IOST_Origin 0x918
+#define IOC_IOST_Origin_E 0x8000000000000000ul
+#define IOC_IOST_Origin_HW 0x0000000000000800ul
+#define IOC_IOST_Origin_HL 0x0000000000000400ul
+
+#define IOC_IO_ExcpStat 0x920
+#define IOC_IO_ExcpStat_V 0x8000000000000000ul
+#define IOC_IO_ExcpStat_SPF_Mask 0x6000000000000000ul
+#define IOC_IO_ExcpStat_SPF_S 0x6000000000000000ul
+#define IOC_IO_ExcpStat_SPF_P 0x2000000000000000ul
+#define IOC_IO_ExcpStat_ADDR_Mask 0x00000007fffff000ul
+#define IOC_IO_ExcpStat_RW_Mask 0x0000000000000800ul
+#define IOC_IO_ExcpStat_IOID_Mask 0x00000000000007fful
+
+#define IOC_IO_ExcpMask 0x928
+#define IOC_IO_ExcpMask_SFE 0x4000000000000000ul
+#define IOC_IO_ExcpMask_PFE 0x2000000000000000ul
+
+#define IOC_IOCmd_Offset 0x1000
+
+#define IOC_IOCmd_Cfg 0xc00
+#define IOC_IOCmd_Cfg_TE 0x0000800000000000ul
+
+
+/* Segment table entries */
+#define IOSTE_V 0x8000000000000000ul /* valid */
+#define IOSTE_H 0x4000000000000000ul /* cache hint */
+#define IOSTE_PT_Base_RPN_Mask 0x3ffffffffffff000ul /* base RPN of IOPT */
+#define IOSTE_NPPT_Mask 0x0000000000000fe0ul /* no. pages in IOPT */
+#define IOSTE_PS_Mask 0x0000000000000007ul /* page size */
+#define IOSTE_PS_4K 0x0000000000000001ul /* - 4kB */
+#define IOSTE_PS_64K 0x0000000000000003ul /* - 64kB */
+#define IOSTE_PS_1M 0x0000000000000005ul /* - 1MB */
+#define IOSTE_PS_16M 0x0000000000000007ul /* - 16MB */
+
+
+/* IOMMU sizing */
+#define IO_SEGMENT_SHIFT 28
+#define IO_PAGENO_BITS(shift) (IO_SEGMENT_SHIFT - (shift))
+
+/* The high bit needs to be set on every DMA address */
+#define SPIDER_DMA_OFFSET 0x80000000ul
+
+struct iommu_window {
+ struct list_head list;
+ struct cbe_iommu *iommu;
+ unsigned long offset;
+ unsigned long size;
+ unsigned int ioid;
+ struct iommu_table table;
+};
+
+#define NAMESIZE 8
+struct cbe_iommu {
+ int nid;
+ char name[NAMESIZE];
+ void __iomem *xlate_regs;
+ void __iomem *cmd_regs;
+ unsigned long *stab;
+ unsigned long *ptab;
+ void *pad_page;
+ struct list_head windows;
+};
+
+/* Static array of iommus, one per node
+ * each contains a list of windows, keyed from dma_window property
+ * - on bus setup, look for a matching window, or create one
+ * - on dev setup, assign iommu_table ptr
+ */
+static struct cbe_iommu iommus[NR_IOMMUS];
+static int cbe_nr_iommus;
+
+static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte,
+ long n_ptes)
+{
+ u64 __iomem *reg;
+ u64 val;
+ long n;
+
+ reg = iommu->xlate_regs + IOC_IOPT_CacheInvd;
+
+ while (n_ptes > 0) {
+ /* we can invalidate up to 1 << 11 PTEs at once */
+ n = min(n_ptes, 1l << 11);
+ val = (((n /*- 1*/) << 53) & IOC_IOPT_CacheInvd_NE_Mask)
+ | (__pa(pte) & IOC_IOPT_CacheInvd_IOPTE_Mask)
+ | IOC_IOPT_CacheInvd_Busy;
+
+ out_be64(reg, val);
+ while (in_be64(reg) & IOC_IOPT_CacheInvd_Busy)
+ ;
+
+ n_ptes -= n;
+ pte += n;
+ }
+}
+
+static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
+ unsigned long uaddr, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ int i;
+ unsigned long *io_pte, base_pte;
+ struct iommu_window *window =
+ container_of(tbl, struct iommu_window, table);
+
+ /* implementing proper protection causes problems with the spidernet
+ * driver - check mapping directions later, but allow read & write by
+ * default for now.*/
+#ifdef CELL_IOMMU_STRICT_PROTECTION
+ /* to avoid referencing a global, we use a trick here to setup the
+ * protection bit. "prot" is setup to be 3 fields of 4 bits apprended
+ * together for each of the 3 supported direction values. It is then
+ * shifted left so that the fields matching the desired direction
+ * lands on the appropriate bits, and other bits are masked out.
+ */
+ const unsigned long prot = 0xc48;
+ base_pte =
+ ((prot << (52 + 4 * direction)) &
+ (CBE_IOPTE_PP_W | CBE_IOPTE_PP_R)) |
+ CBE_IOPTE_M | CBE_IOPTE_SO_RW |
+ (window->ioid & CBE_IOPTE_IOID_Mask);
+#else
+ base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M |
+ CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask);
+#endif
+ if (unlikely(dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)))
+ base_pte &= ~CBE_IOPTE_SO_RW;
+
+ io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
+
+ for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE)
+ io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
+
+ mb();
+
+ invalidate_tce_cache(window->iommu, io_pte, npages);
+
+ pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n",
+ index, npages, direction, base_pte);
+ return 0;
+}
+
+static void tce_free_cell(struct iommu_table *tbl, long index, long npages)
+{
+
+ int i;
+ unsigned long *io_pte, pte;
+ struct iommu_window *window =
+ container_of(tbl, struct iommu_window, table);
+
+ pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index, npages);
+
+#ifdef CELL_IOMMU_REAL_UNMAP
+ pte = 0;
+#else
+ /* spider bridge does PCI reads after freeing - insert a mapping
+ * to a scratch page instead of an invalid entry */
+ pte = CBE_IOPTE_PP_R | CBE_IOPTE_M | CBE_IOPTE_SO_RW |
+ __pa(window->iommu->pad_page) |
+ (window->ioid & CBE_IOPTE_IOID_Mask);
+#endif
+
+ io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
+
+ for (i = 0; i < npages; i++)
+ io_pte[i] = pte;
+
+ mb();
+
+ invalidate_tce_cache(window->iommu, io_pte, npages);
+}
+
+static irqreturn_t ioc_interrupt(int irq, void *data)
+{
+ unsigned long stat, spf;
+ struct cbe_iommu *iommu = data;
+
+ stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
+ spf = stat & IOC_IO_ExcpStat_SPF_Mask;
+
+ /* Might want to rate limit it */
+ printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat);
+ printk(KERN_ERR " V=%d, SPF=[%c%c], RW=%s, IOID=0x%04x\n",
+ !!(stat & IOC_IO_ExcpStat_V),
+ (spf == IOC_IO_ExcpStat_SPF_S) ? 'S' : ' ',
+ (spf == IOC_IO_ExcpStat_SPF_P) ? 'P' : ' ',
+ (stat & IOC_IO_ExcpStat_RW_Mask) ? "Read" : "Write",
+ (unsigned int)(stat & IOC_IO_ExcpStat_IOID_Mask));
+ printk(KERN_ERR " page=0x%016lx\n",
+ stat & IOC_IO_ExcpStat_ADDR_Mask);
+
+ /* clear interrupt */
+ stat &= ~IOC_IO_ExcpStat_V;
+ out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat);
+
+ return IRQ_HANDLED;
+}
+
+static int cell_iommu_find_ioc(int nid, unsigned long *base)
+{
+ struct device_node *np;
+ struct resource r;
+
+ *base = 0;
+
+ /* First look for new style /be nodes */
+ for_each_node_by_name(np, "ioc") {
+ if (of_node_to_nid(np) != nid)
+ continue;
+ if (of_address_to_resource(np, 0, &r)) {
+ printk(KERN_ERR "iommu: can't get address for %s\n",
+ np->full_name);
+ continue;
+ }
+ *base = r.start;
+ of_node_put(np);
+ return 0;
+ }
+
+ /* Ok, let's try the old way */
+ for_each_node_by_type(np, "cpu") {
+ const unsigned int *nidp;
+ const unsigned long *tmp;
+
+ nidp = of_get_property(np, "node-id", NULL);
+ if (nidp && *nidp == nid) {
+ tmp = of_get_property(np, "ioc-translation", NULL);
+ if (tmp) {
+ *base = *tmp;
+ of_node_put(np);
+ return 0;
+ }
+ }
+ }
+
+ return -ENODEV;
+}
+
+static void cell_iommu_setup_stab(struct cbe_iommu *iommu,
+ unsigned long dbase, unsigned long dsize,
+ unsigned long fbase, unsigned long fsize)
+{
+ struct page *page;
+ unsigned long segments, stab_size;
+
+ segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT;
+
+ pr_debug("%s: iommu[%d]: segments: %lu\n",
+ __func__, iommu->nid, segments);
+
+ /* set up the segment table */
+ stab_size = segments * sizeof(unsigned long);
+ page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size));
+ BUG_ON(!page);
+ iommu->stab = page_address(page);
+ memset(iommu->stab, 0, stab_size);
+}
+
+static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
+ unsigned long base, unsigned long size, unsigned long gap_base,
+ unsigned long gap_size, unsigned long page_shift)
+{
+ struct page *page;
+ int i;
+ unsigned long reg, segments, pages_per_segment, ptab_size,
+ n_pte_pages, start_seg, *ptab;
+
+ start_seg = base >> IO_SEGMENT_SHIFT;
+ segments = size >> IO_SEGMENT_SHIFT;
+ pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift);
+ /* PTEs for each segment must start on a 4K bounday */
+ pages_per_segment = max(pages_per_segment,
+ (1 << 12) / sizeof(unsigned long));
+
+ ptab_size = segments * pages_per_segment * sizeof(unsigned long);
+ pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __func__,
+ iommu->nid, ptab_size, get_order(ptab_size));
+ page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size));
+ BUG_ON(!page);
+
+ ptab = page_address(page);
+ memset(ptab, 0, ptab_size);
+
+ /* number of 4K pages needed for a page table */
+ n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> 12;
+
+ pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n",
+ __func__, iommu->nid, iommu->stab, ptab,
+ n_pte_pages);
+
+ /* initialise the STEs */
+ reg = IOSTE_V | ((n_pte_pages - 1) << 5);
+
+ switch (page_shift) {
+ case 12: reg |= IOSTE_PS_4K; break;
+ case 16: reg |= IOSTE_PS_64K; break;
+ case 20: reg |= IOSTE_PS_1M; break;
+ case 24: reg |= IOSTE_PS_16M; break;
+ default: BUG();
+ }
+
+ gap_base = gap_base >> IO_SEGMENT_SHIFT;
+ gap_size = gap_size >> IO_SEGMENT_SHIFT;
+
+ pr_debug("Setting up IOMMU stab:\n");
+ for (i = start_seg; i < (start_seg + segments); i++) {
+ if (i >= gap_base && i < (gap_base + gap_size)) {
+ pr_debug("\toverlap at %d, skipping\n", i);
+ continue;
+ }
+ iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) *
+ (i - start_seg));
+ pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]);
+ }
+
+ return ptab;
+}
+
+static void cell_iommu_enable_hardware(struct cbe_iommu *iommu)
+{
+ int ret;
+ unsigned long reg, xlate_base;
+ unsigned int virq;
+
+ if (cell_iommu_find_ioc(iommu->nid, &xlate_base))
+ panic("%s: missing IOC register mappings for node %d\n",
+ __func__, iommu->nid);
+
+ iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size);
+ iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset;
+
+ /* ensure that the STEs have updated */
+ mb();
+
+ /* setup interrupts for the iommu. */
+ reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
+ out_be64(iommu->xlate_regs + IOC_IO_ExcpStat,
+ reg & ~IOC_IO_ExcpStat_V);
+ out_be64(iommu->xlate_regs + IOC_IO_ExcpMask,
+ IOC_IO_ExcpMask_PFE | IOC_IO_ExcpMask_SFE);
+
+ virq = irq_create_mapping(NULL,
+ IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT));
+ BUG_ON(virq == NO_IRQ);
+
+ ret = request_irq(virq, ioc_interrupt, 0, iommu->name, iommu);
+ BUG_ON(ret);
+
+ /* set the IOC segment table origin register (and turn on the iommu) */
+ reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW;
+ out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg);
+ in_be64(iommu->xlate_regs + IOC_IOST_Origin);
+
+ /* turn on IO translation */
+ reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE;
+ out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg);
+}
+
+static void cell_iommu_setup_hardware(struct cbe_iommu *iommu,
+ unsigned long base, unsigned long size)
+{
+ cell_iommu_setup_stab(iommu, base, size, 0, 0);
+ iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0,
+ IOMMU_PAGE_SHIFT);
+ cell_iommu_enable_hardware(iommu);
+}
+
+#if 0/* Unused for now */
+static struct iommu_window *find_window(struct cbe_iommu *iommu,
+ unsigned long offset, unsigned long size)
+{
+ struct iommu_window *window;
+
+ /* todo: check for overlapping (but not equal) windows) */
+
+ list_for_each_entry(window, &(iommu->windows), list) {
+ if (window->offset == offset && window->size == size)
+ return window;
+ }
+
+ return NULL;
+}
+#endif
+
+static inline u32 cell_iommu_get_ioid(struct device_node *np)
+{
+ const u32 *ioid;
+
+ ioid = of_get_property(np, "ioid", NULL);
+ if (ioid == NULL) {
+ printk(KERN_WARNING "iommu: missing ioid for %s using 0\n",
+ np->full_name);
+ return 0;
+ }
+
+ return *ioid;
+}
+
+static struct iommu_window * __init
+cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
+ unsigned long offset, unsigned long size,
+ unsigned long pte_offset)
+{
+ struct iommu_window *window;
+ struct page *page;
+ u32 ioid;
+
+ ioid = cell_iommu_get_ioid(np);
+
+ window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid);
+ BUG_ON(window == NULL);
+
+ window->offset = offset;
+ window->size = size;
+ window->ioid = ioid;
+ window->iommu = iommu;
+
+ window->table.it_blocksize = 16;
+ window->table.it_base = (unsigned long)iommu->ptab;
+ window->table.it_index = iommu->nid;
+ window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) + pte_offset;
+ window->table.it_size = size >> IOMMU_PAGE_SHIFT;
+
+ iommu_init_table(&window->table, iommu->nid);
+
+ pr_debug("\tioid %d\n", window->ioid);
+ pr_debug("\tblocksize %ld\n", window->table.it_blocksize);
+ pr_debug("\tbase 0x%016lx\n", window->table.it_base);
+ pr_debug("\toffset 0x%lx\n", window->table.it_offset);
+ pr_debug("\tsize %ld\n", window->table.it_size);
+
+ list_add(&window->list, &iommu->windows);
+
+ if (offset != 0)
+ return window;
+
+ /* We need to map and reserve the first IOMMU page since it's used
+ * by the spider workaround. In theory, we only need to do that when
+ * running on spider but it doesn't really matter.
+ *
+ * This code also assumes that we have a window that starts at 0,
+ * which is the case on all spider based blades.
+ */
+ page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0);
+ BUG_ON(!page);
+ iommu->pad_page = page_address(page);
+ clear_page(iommu->pad_page);
+
+ __set_bit(0, window->table.it_map);
+ tce_build_cell(&window->table, window->table.it_offset, 1,
+ (unsigned long)iommu->pad_page, DMA_TO_DEVICE, NULL);
+ window->table.it_hint = window->table.it_blocksize;
+
+ return window;
+}
+
+static struct cbe_iommu *cell_iommu_for_node(int nid)
+{
+ int i;
+
+ for (i = 0; i < cbe_nr_iommus; i++)
+ if (iommus[i].nid == nid)
+ return &iommus[i];
+ return NULL;
+}
+
+static unsigned long cell_dma_direct_offset;
+
+static unsigned long dma_iommu_fixed_base;
+
+/* iommu_fixed_is_weak is set if booted with iommu_fixed=weak */
+static int iommu_fixed_is_weak;
+
+static struct iommu_table *cell_get_iommu_table(struct device *dev)
+{
+ struct iommu_window *window;
+ struct cbe_iommu *iommu;
+
+ /* Current implementation uses the first window available in that
+ * node's iommu. We -might- do something smarter later though it may
+ * never be necessary
+ */
+ iommu = cell_iommu_for_node(dev_to_node(dev));
+ if (iommu == NULL || list_empty(&iommu->windows)) {
+ printk(KERN_ERR "iommu: missing iommu for %s (node %d)\n",
+ dev->of_node ? dev->of_node->full_name : "?",
+ dev_to_node(dev));
+ return NULL;
+ }
+ window = list_entry(iommu->windows.next, struct iommu_window, list);
+
+ return &window->table;
+}
+
+/* A coherent allocation implies strong ordering */
+
+static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
+{
+ if (iommu_fixed_is_weak)
+ return iommu_alloc_coherent(dev, cell_get_iommu_table(dev),
+ size, dma_handle,
+ device_to_mask(dev), flag,
+ dev_to_node(dev));
+ else
+ return dma_direct_ops.alloc(dev, size, dma_handle, flag,
+ attrs);
+}
+
+static void dma_fixed_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ if (iommu_fixed_is_weak)
+ iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr,
+ dma_handle);
+ else
+ dma_direct_ops.free(dev, size, vaddr, dma_handle, attrs);
+}
+
+static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
+ return dma_direct_ops.map_page(dev, page, offset, size,
+ direction, attrs);
+ else
+ return iommu_map_page(dev, cell_get_iommu_table(dev), page,
+ offset, size, device_to_mask(dev),
+ direction, attrs);
+}
+
+static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
+ dma_direct_ops.unmap_page(dev, dma_addr, size, direction,
+ attrs);
+ else
+ iommu_unmap_page(cell_get_iommu_table(dev), dma_addr, size,
+ direction, attrs);
+}
+
+static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
+ return dma_direct_ops.map_sg(dev, sg, nents, direction, attrs);
+ else
+ return iommu_map_sg(dev, cell_get_iommu_table(dev), sg, nents,
+ device_to_mask(dev), direction, attrs);
+}
+
+static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
+ dma_direct_ops.unmap_sg(dev, sg, nents, direction, attrs);
+ else
+ iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents, direction,
+ attrs);
+}
+
+static int dma_fixed_dma_supported(struct device *dev, u64 mask)
+{
+ return mask == DMA_BIT_MASK(64);
+}
+
+static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
+
+struct dma_map_ops dma_iommu_fixed_ops = {
+ .alloc = dma_fixed_alloc_coherent,
+ .free = dma_fixed_free_coherent,
+ .map_sg = dma_fixed_map_sg,
+ .unmap_sg = dma_fixed_unmap_sg,
+ .dma_supported = dma_fixed_dma_supported,
+ .set_dma_mask = dma_set_mask_and_switch,
+ .map_page = dma_fixed_map_page,
+ .unmap_page = dma_fixed_unmap_page,
+};
+
+static void cell_dma_dev_setup_fixed(struct device *dev);
+
+static void cell_dma_dev_setup(struct device *dev)
+{
+ /* Order is important here, these are not mutually exclusive */
+ if (get_dma_ops(dev) == &dma_iommu_fixed_ops)
+ cell_dma_dev_setup_fixed(dev);
+ else if (get_pci_dma_ops() == &dma_iommu_ops)
+ set_iommu_table_base(dev, cell_get_iommu_table(dev));
+ else if (get_pci_dma_ops() == &dma_direct_ops)
+ set_dma_offset(dev, cell_dma_direct_offset);
+ else
+ BUG();
+}
+
+static void cell_pci_dma_dev_setup(struct pci_dev *dev)
+{
+ cell_dma_dev_setup(&dev->dev);
+}
+
+static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct device *dev = data;
+
+ /* We are only intereted in device addition */
+ if (action != BUS_NOTIFY_ADD_DEVICE)
+ return 0;
+
+ /* We use the PCI DMA ops */
+ dev->archdata.dma_ops = get_pci_dma_ops();
+
+ cell_dma_dev_setup(dev);
+
+ return 0;
+}
+
+static struct notifier_block cell_of_bus_notifier = {
+ .notifier_call = cell_of_bus_notify
+};
+
+static int __init cell_iommu_get_window(struct device_node *np,
+ unsigned long *base,
+ unsigned long *size)
+{
+ const void *dma_window;
+ unsigned long index;
+
+ /* Use ibm,dma-window if available, else, hard code ! */
+ dma_window = of_get_property(np, "ibm,dma-window", NULL);
+ if (dma_window == NULL) {
+ *base = 0;
+ *size = 0x80000000u;
+ return -ENODEV;
+ }
+
+ of_parse_dma_window(np, dma_window, &index, base, size);
+ return 0;
+}
+
+static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np)
+{
+ struct cbe_iommu *iommu;
+ int nid, i;
+
+ /* Get node ID */
+ nid = of_node_to_nid(np);
+ if (nid < 0) {
+ printk(KERN_ERR "iommu: failed to get node for %s\n",
+ np->full_name);
+ return NULL;
+ }
+ pr_debug("iommu: setting up iommu for node %d (%s)\n",
+ nid, np->full_name);
+
+ /* XXX todo: If we can have multiple windows on the same IOMMU, which
+ * isn't the case today, we probably want here to check wether the
+ * iommu for that node is already setup.
+ * However, there might be issue with getting the size right so let's
+ * ignore that for now. We might want to completely get rid of the
+ * multiple window support since the cell iommu supports per-page ioids
+ */
+
+ if (cbe_nr_iommus >= NR_IOMMUS) {
+ printk(KERN_ERR "iommu: too many IOMMUs detected ! (%s)\n",
+ np->full_name);
+ return NULL;
+ }
+
+ /* Init base fields */
+ i = cbe_nr_iommus++;
+ iommu = &iommus[i];
+ iommu->stab = NULL;
+ iommu->nid = nid;
+ snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i);
+ INIT_LIST_HEAD(&iommu->windows);
+
+ return iommu;
+}
+
+static void __init cell_iommu_init_one(struct device_node *np,
+ unsigned long offset)
+{
+ struct cbe_iommu *iommu;
+ unsigned long base, size;
+
+ iommu = cell_iommu_alloc(np);
+ if (!iommu)
+ return;
+
+ /* Obtain a window for it */
+ cell_iommu_get_window(np, &base, &size);
+
+ pr_debug("\ttranslating window 0x%lx...0x%lx\n",
+ base, base + size - 1);
+
+ /* Initialize the hardware */
+ cell_iommu_setup_hardware(iommu, base, size);
+
+ /* Setup the iommu_table */
+ cell_iommu_setup_window(iommu, np, base, size,
+ offset >> IOMMU_PAGE_SHIFT);
+}
+
+static void __init cell_disable_iommus(void)
+{
+ int node;
+ unsigned long base, val;
+ void __iomem *xregs, *cregs;
+
+ /* Make sure IOC translation is disabled on all nodes */
+ for_each_online_node(node) {
+ if (cell_iommu_find_ioc(node, &base))
+ continue;
+ xregs = ioremap(base, IOC_Reg_Size);
+ if (xregs == NULL)
+ continue;
+ cregs = xregs + IOC_IOCmd_Offset;
+
+ pr_debug("iommu: cleaning up iommu on node %d\n", node);
+
+ out_be64(xregs + IOC_IOST_Origin, 0);
+ (void)in_be64(xregs + IOC_IOST_Origin);
+ val = in_be64(cregs + IOC_IOCmd_Cfg);
+ val &= ~IOC_IOCmd_Cfg_TE;
+ out_be64(cregs + IOC_IOCmd_Cfg, val);
+ (void)in_be64(cregs + IOC_IOCmd_Cfg);
+
+ iounmap(xregs);
+ }
+}
+
+static int __init cell_iommu_init_disabled(void)
+{
+ struct device_node *np = NULL;
+ unsigned long base = 0, size;
+
+ /* When no iommu is present, we use direct DMA ops */
+ set_pci_dma_ops(&dma_direct_ops);
+
+ /* First make sure all IOC translation is turned off */
+ cell_disable_iommus();
+
+ /* If we have no Axon, we set up the spider DMA magic offset */
+ if (of_find_node_by_name(NULL, "axon") == NULL)
+ cell_dma_direct_offset = SPIDER_DMA_OFFSET;
+
+ /* Now we need to check to see where the memory is mapped
+ * in PCI space. We assume that all busses use the same dma
+ * window which is always the case so far on Cell, thus we
+ * pick up the first pci-internal node we can find and check
+ * the DMA window from there.
+ */
+ for_each_node_by_name(np, "axon") {
+ if (np->parent == NULL || np->parent->parent != NULL)
+ continue;
+ if (cell_iommu_get_window(np, &base, &size) == 0)
+ break;
+ }
+ if (np == NULL) {
+ for_each_node_by_name(np, "pci-internal") {
+ if (np->parent == NULL || np->parent->parent != NULL)
+ continue;
+ if (cell_iommu_get_window(np, &base, &size) == 0)
+ break;
+ }
+ }
+ of_node_put(np);
+
+ /* If we found a DMA window, we check if it's big enough to enclose
+ * all of physical memory. If not, we force enable IOMMU
+ */
+ if (np && size < memblock_end_of_DRAM()) {
+ printk(KERN_WARNING "iommu: force-enabled, dma window"
+ " (%ldMB) smaller than total memory (%lldMB)\n",
+ size >> 20, memblock_end_of_DRAM() >> 20);
+ return -ENODEV;
+ }
+
+ cell_dma_direct_offset += base;
+
+ if (cell_dma_direct_offset != 0)
+ ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup;
+
+ printk("iommu: disabled, direct DMA offset is 0x%lx\n",
+ cell_dma_direct_offset);
+
+ return 0;
+}
+
+/*
+ * Fixed IOMMU mapping support
+ *
+ * This code adds support for setting up a fixed IOMMU mapping on certain
+ * cell machines. For 64-bit devices this avoids the performance overhead of
+ * mapping and unmapping pages at runtime. 32-bit devices are unable to use
+ * the fixed mapping.
+ *
+ * The fixed mapping is established at boot, and maps all of physical memory
+ * 1:1 into device space at some offset. On machines with < 30 GB of memory
+ * we setup the fixed mapping immediately above the normal IOMMU window.
+ *
+ * For example a machine with 4GB of memory would end up with the normal
+ * IOMMU window from 0-2GB and the fixed mapping window from 2GB to 6GB. In
+ * this case a 64-bit device wishing to DMA to 1GB would be told to DMA to
+ * 3GB, plus any offset required by firmware. The firmware offset is encoded
+ * in the "dma-ranges" property.
+ *
+ * On machines with 30GB or more of memory, we are unable to place the fixed
+ * mapping above the normal IOMMU window as we would run out of address space.
+ * Instead we move the normal IOMMU window to coincide with the hash page
+ * table, this region does not need to be part of the fixed mapping as no
+ * device should ever be DMA'ing to it. We then setup the fixed mapping
+ * from 0 to 32GB.
+ */
+
+static u64 cell_iommu_get_fixed_address(struct device *dev)
+{
+ u64 cpu_addr, size, best_size, dev_addr = OF_BAD_ADDR;
+ struct device_node *np;
+ const u32 *ranges = NULL;
+ int i, len, best, naddr, nsize, pna, range_size;
+
+ np = of_node_get(dev->of_node);
+ while (1) {
+ naddr = of_n_addr_cells(np);
+ nsize = of_n_size_cells(np);
+ np = of_get_next_parent(np);
+ if (!np)
+ break;
+
+ ranges = of_get_property(np, "dma-ranges", &len);
+
+ /* Ignore empty ranges, they imply no translation required */
+ if (ranges && len > 0)
+ break;
+ }
+
+ if (!ranges) {
+ dev_dbg(dev, "iommu: no dma-ranges found\n");
+ goto out;
+ }
+
+ len /= sizeof(u32);
+
+ pna = of_n_addr_cells(np);
+ range_size = naddr + nsize + pna;
+
+ /* dma-ranges format:
+ * child addr : naddr cells
+ * parent addr : pna cells
+ * size : nsize cells
+ */
+ for (i = 0, best = -1, best_size = 0; i < len; i += range_size) {
+ cpu_addr = of_translate_dma_address(np, ranges + i + naddr);
+ size = of_read_number(ranges + i + naddr + pna, nsize);
+
+ if (cpu_addr == 0 && size > best_size) {
+ best = i;
+ best_size = size;
+ }
+ }
+
+ if (best >= 0) {
+ dev_addr = of_read_number(ranges + best, naddr);
+ } else
+ dev_dbg(dev, "iommu: no suitable range found!\n");
+
+out:
+ of_node_put(np);
+
+ return dev_addr;
+}
+
+static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+
+ if (dma_mask == DMA_BIT_MASK(64) &&
+ cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR)
+ {
+ dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
+ set_dma_ops(dev, &dma_iommu_fixed_ops);
+ } else {
+ dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
+ set_dma_ops(dev, get_pci_dma_ops());
+ }
+
+ cell_dma_dev_setup(dev);
+
+ *dev->dma_mask = dma_mask;
+
+ return 0;
+}
+
+static void cell_dma_dev_setup_fixed(struct device *dev)
+{
+ u64 addr;
+
+ addr = cell_iommu_get_fixed_address(dev) + dma_iommu_fixed_base;
+ set_dma_offset(dev, addr);
+
+ dev_dbg(dev, "iommu: fixed addr = %llx\n", addr);
+}
+
+static void insert_16M_pte(unsigned long addr, unsigned long *ptab,
+ unsigned long base_pte)
+{
+ unsigned long segment, offset;
+
+ segment = addr >> IO_SEGMENT_SHIFT;
+ offset = (addr >> 24) - (segment << IO_PAGENO_BITS(24));
+ ptab = ptab + (segment * (1 << 12) / sizeof(unsigned long));
+
+ pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n",
+ addr, ptab, segment, offset);
+
+ ptab[offset] = base_pte | (__pa(addr) & CBE_IOPTE_RPN_Mask);
+}
+
+static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
+ struct device_node *np, unsigned long dbase, unsigned long dsize,
+ unsigned long fbase, unsigned long fsize)
+{
+ unsigned long base_pte, uaddr, ioaddr, *ptab;
+
+ ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24);
+
+ dma_iommu_fixed_base = fbase;
+
+ pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase);
+
+ base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M |
+ (cell_iommu_get_ioid(np) & CBE_IOPTE_IOID_Mask);
+
+ if (iommu_fixed_is_weak)
+ pr_info("IOMMU: Using weak ordering for fixed mapping\n");
+ else {
+ pr_info("IOMMU: Using strong ordering for fixed mapping\n");
+ base_pte |= CBE_IOPTE_SO_RW;
+ }
+
+ for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) {
+ /* Don't touch the dynamic region */
+ ioaddr = uaddr + fbase;
+ if (ioaddr >= dbase && ioaddr < (dbase + dsize)) {
+ pr_debug("iommu: fixed/dynamic overlap, skipping\n");
+ continue;
+ }
+
+ insert_16M_pte(uaddr, ptab, base_pte);
+ }
+
+ mb();
+}
+
+static int __init cell_iommu_fixed_mapping_init(void)
+{
+ unsigned long dbase, dsize, fbase, fsize, hbase, hend;
+ struct cbe_iommu *iommu;
+ struct device_node *np;
+
+ /* The fixed mapping is only supported on axon machines */
+ np = of_find_node_by_name(NULL, "axon");
+ of_node_put(np);
+
+ if (!np) {
+ pr_debug("iommu: fixed mapping disabled, no axons found\n");
+ return -1;
+ }
+
+ /* We must have dma-ranges properties for fixed mapping to work */
+ np = of_find_node_with_property(NULL, "dma-ranges");
+ of_node_put(np);
+
+ if (!np) {
+ pr_debug("iommu: no dma-ranges found, no fixed mapping\n");
+ return -1;
+ }
+
+ /* The default setup is to have the fixed mapping sit after the
+ * dynamic region, so find the top of the largest IOMMU window
+ * on any axon, then add the size of RAM and that's our max value.
+ * If that is > 32GB we have to do other shennanigans.
+ */
+ fbase = 0;
+ for_each_node_by_name(np, "axon") {
+ cell_iommu_get_window(np, &dbase, &dsize);
+ fbase = max(fbase, dbase + dsize);
+ }
+
+ fbase = _ALIGN_UP(fbase, 1 << IO_SEGMENT_SHIFT);
+ fsize = memblock_phys_mem_size();
+
+ if ((fbase + fsize) <= 0x800000000ul)
+ hbase = 0; /* use the device tree window */
+ else {
+ /* If we're over 32 GB we need to cheat. We can't map all of
+ * RAM with the fixed mapping, and also fit the dynamic
+ * region. So try to place the dynamic region where the hash
+ * table sits, drivers never need to DMA to it, we don't
+ * need a fixed mapping for that area.
+ */
+ if (!htab_address) {
+ pr_debug("iommu: htab is NULL, on LPAR? Huh?\n");
+ return -1;
+ }
+ hbase = __pa(htab_address);
+ hend = hbase + htab_size_bytes;
+
+ /* The window must start and end on a segment boundary */
+ if ((hbase != _ALIGN_UP(hbase, 1 << IO_SEGMENT_SHIFT)) ||
+ (hend != _ALIGN_UP(hend, 1 << IO_SEGMENT_SHIFT))) {
+ pr_debug("iommu: hash window not segment aligned\n");
+ return -1;
+ }
+
+ /* Check the hash window fits inside the real DMA window */
+ for_each_node_by_name(np, "axon") {
+ cell_iommu_get_window(np, &dbase, &dsize);
+
+ if (hbase < dbase || (hend > (dbase + dsize))) {
+ pr_debug("iommu: hash window doesn't fit in"
+ "real DMA window\n");
+ return -1;
+ }
+ }
+
+ fbase = 0;
+ }
+
+ /* Setup the dynamic regions */
+ for_each_node_by_name(np, "axon") {
+ iommu = cell_iommu_alloc(np);
+ BUG_ON(!iommu);
+
+ if (hbase == 0)
+ cell_iommu_get_window(np, &dbase, &dsize);
+ else {
+ dbase = hbase;
+ dsize = htab_size_bytes;
+ }
+
+ printk(KERN_DEBUG "iommu: node %d, dynamic window 0x%lx-0x%lx "
+ "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase,
+ dbase + dsize, fbase, fbase + fsize);
+
+ cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize);
+ iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0,
+ IOMMU_PAGE_SHIFT);
+ cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize,
+ fbase, fsize);
+ cell_iommu_enable_hardware(iommu);
+ cell_iommu_setup_window(iommu, np, dbase, dsize, 0);
+ }
+
+ dma_iommu_ops.set_dma_mask = dma_set_mask_and_switch;
+ set_pci_dma_ops(&dma_iommu_ops);
+
+ return 0;
+}
+
+static int iommu_fixed_disabled;
+
+static int __init setup_iommu_fixed(char *str)
+{
+ struct device_node *pciep;
+
+ if (strcmp(str, "off") == 0)
+ iommu_fixed_disabled = 1;
+
+ /* If we can find a pcie-endpoint in the device tree assume that
+ * we're on a triblade or a CAB so by default the fixed mapping
+ * should be set to be weakly ordered; but only if the boot
+ * option WASN'T set for strong ordering
+ */
+ pciep = of_find_node_by_type(NULL, "pcie-endpoint");
+
+ if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0))
+ iommu_fixed_is_weak = 1;
+
+ of_node_put(pciep);
+
+ return 1;
+}
+__setup("iommu_fixed=", setup_iommu_fixed);
+
+static u64 cell_dma_get_required_mask(struct device *dev)
+{
+ struct dma_map_ops *dma_ops;
+
+ if (!dev->dma_mask)
+ return 0;
+
+ if (!iommu_fixed_disabled &&
+ cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR)
+ return DMA_BIT_MASK(64);
+
+ dma_ops = get_dma_ops(dev);
+ if (dma_ops->get_required_mask)
+ return dma_ops->get_required_mask(dev);
+
+ WARN_ONCE(1, "no get_required_mask in %p ops", dma_ops);
+
+ return DMA_BIT_MASK(64);
+}
+
+static int __init cell_iommu_init(void)
+{
+ struct device_node *np;
+
+ /* If IOMMU is disabled or we have little enough RAM to not need
+ * to enable it, we setup a direct mapping.
+ *
+ * Note: should we make sure we have the IOMMU actually disabled ?
+ */
+ if (iommu_is_off ||
+ (!iommu_force_on && memblock_end_of_DRAM() <= 0x80000000ull))
+ if (cell_iommu_init_disabled() == 0)
+ goto bail;
+
+ /* Setup various ppc_md. callbacks */
+ ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup;
+ ppc_md.dma_get_required_mask = cell_dma_get_required_mask;
+ ppc_md.tce_build = tce_build_cell;
+ ppc_md.tce_free = tce_free_cell;
+
+ if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0)
+ goto bail;
+
+ /* Create an iommu for each /axon node. */
+ for_each_node_by_name(np, "axon") {
+ if (np->parent == NULL || np->parent->parent != NULL)
+ continue;
+ cell_iommu_init_one(np, 0);
+ }
+
+ /* Create an iommu for each toplevel /pci-internal node for
+ * old hardware/firmware
+ */
+ for_each_node_by_name(np, "pci-internal") {
+ if (np->parent == NULL || np->parent->parent != NULL)
+ continue;
+ cell_iommu_init_one(np, SPIDER_DMA_OFFSET);
+ }
+
+ /* Setup default PCI iommu ops */
+ set_pci_dma_ops(&dma_iommu_ops);
+
+ bail:
+ /* Register callbacks on OF platform device addition/removal
+ * to handle linking them to the right DMA operations
+ */
+ bus_register_notifier(&platform_bus_type, &cell_of_bus_notifier);
+
+ return 0;
+}
+machine_arch_initcall(cell, cell_iommu_init);
+machine_arch_initcall(celleb_native, cell_iommu_init);
+
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
new file mode 100644
index 00000000..d17e98bc
--- /dev/null
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -0,0 +1,133 @@
+/*
+ * CBE Pervasive Monitor and Debug
+ *
+ * (C) Copyright IBM Corporation 2005
+ *
+ * Authors: Maximino Aguilar (maguilar@us.ibm.com)
+ * Michael N. Day (mnday@us.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#undef DEBUG
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "pervasive.h"
+
+static void cbe_power_save(void)
+{
+ unsigned long ctrl, thread_switch_control;
+
+ /* Ensure our interrupt state is properly tracked */
+ if (!prep_irq_for_idle())
+ return;
+
+ ctrl = mfspr(SPRN_CTRLF);
+
+ /* Enable DEC and EE interrupt request */
+ thread_switch_control = mfspr(SPRN_TSC_CELL);
+ thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST;
+
+ switch (ctrl & CTRL_CT) {
+ case CTRL_CT0:
+ thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
+ break;
+ case CTRL_CT1:
+ thread_switch_control |= TSC_CELL_DEC_ENABLE_1;
+ break;
+ default:
+ printk(KERN_WARNING "%s: unknown configuration\n",
+ __func__);
+ break;
+ }
+ mtspr(SPRN_TSC_CELL, thread_switch_control);
+
+ /*
+ * go into low thread priority, medium priority will be
+ * restored for us after wake-up.
+ */
+ HMT_low();
+
+ /*
+ * atomically disable thread execution and runlatch.
+ * External and Decrementer exceptions are still handled when the
+ * thread is disabled but now enter in cbe_system_reset_exception()
+ */
+ ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
+ mtspr(SPRN_CTRLT, ctrl);
+
+ /* Re-enable interrupts in MSR */
+ __hard_irq_enable();
+}
+
+static int cbe_system_reset_exception(struct pt_regs *regs)
+{
+ switch (regs->msr & SRR1_WAKEMASK) {
+ case SRR1_WAKEEE:
+ do_IRQ(regs);
+ break;
+ case SRR1_WAKEDEC:
+ timer_interrupt(regs);
+ break;
+ case SRR1_WAKEMT:
+ return cbe_sysreset_hack();
+#ifdef CONFIG_CBE_RAS
+ case SRR1_WAKESYSERR:
+ cbe_system_error_exception(regs);
+ break;
+ case SRR1_WAKETHERM:
+ cbe_thermal_exception(regs);
+ break;
+#endif /* CONFIG_CBE_RAS */
+ default:
+ /* do system reset */
+ return 0;
+ }
+ /* everything handled */
+ return 1;
+}
+
+void __init cbe_pervasive_init(void)
+{
+ int cpu;
+
+ if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
+ return;
+
+ for_each_possible_cpu(cpu) {
+ struct cbe_pmd_regs __iomem *regs = cbe_get_cpu_pmd_regs(cpu);
+ if (!regs)
+ continue;
+
+ /* Enable Pause(0) control bit */
+ out_be64(®s->pmcr, in_be64(®s->pmcr) |
+ CBE_PMD_PAUSE_ZERO_CONTROL);
+ }
+
+ ppc_md.power_save = cbe_power_save;
+ ppc_md.system_reset_exception = cbe_system_reset_exception;
+}
diff --git a/arch/powerpc/platforms/cell/pervasive.h b/arch/powerpc/platforms/cell/pervasive.h
new file mode 100644
index 00000000..fd4d7b70
--- /dev/null
+++ b/arch/powerpc/platforms/cell/pervasive.h
@@ -0,0 +1,42 @@
+/*
+ * Cell Pervasive Monitor and Debug interface and HW structures
+ *
+ * (C) Copyright IBM Corporation 2005
+ *
+ * Authors: Maximino Aguilar (maguilar@us.ibm.com)
+ * David J. Erb (djerb@us.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+#ifndef PERVASIVE_H
+#define PERVASIVE_H
+
+extern void cbe_pervasive_init(void);
+extern void cbe_system_error_exception(struct pt_regs *regs);
+extern void cbe_maintenance_exception(struct pt_regs *regs);
+extern void cbe_thermal_exception(struct pt_regs *regs);
+
+#ifdef CONFIG_PPC_IBM_CELL_RESETBUTTON
+extern int cbe_sysreset_hack(void);
+#else
+static inline int cbe_sysreset_hack(void)
+{
+ return 1;
+}
+#endif /* CONFIG_PPC_IBM_CELL_RESETBUTTON */
+
+#endif
diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c
new file mode 100644
index 00000000..59c1a169
--- /dev/null
+++ b/arch/powerpc/platforms/cell/pmu.c
@@ -0,0 +1,424 @@
+/*
+ * Cell Broadband Engine Performance Monitor
+ *
+ * (C) Copyright IBM Corporation 2001,2006
+ *
+ * Author:
+ * David Erb (djerb@us.ibm.com)
+ * Kevin Corry (kevcorry@us.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "interrupt.h"
+
+/*
+ * When writing to write-only mmio addresses, save a shadow copy. All of the
+ * registers are 32-bit, but stored in the upper-half of a 64-bit field in
+ * pmd_regs.
+ */
+
+#define WRITE_WO_MMIO(reg, x) \
+ do { \
+ u32 _x = (x); \
+ struct cbe_pmd_regs __iomem *pmd_regs; \
+ struct cbe_pmd_shadow_regs *shadow_regs; \
+ pmd_regs = cbe_get_cpu_pmd_regs(cpu); \
+ shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \
+ out_be64(&(pmd_regs->reg), (((u64)_x) << 32)); \
+ shadow_regs->reg = _x; \
+ } while (0)
+
+#define READ_SHADOW_REG(val, reg) \
+ do { \
+ struct cbe_pmd_shadow_regs *shadow_regs; \
+ shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \
+ (val) = shadow_regs->reg; \
+ } while (0)
+
+#define READ_MMIO_UPPER32(val, reg) \
+ do { \
+ struct cbe_pmd_regs __iomem *pmd_regs; \
+ pmd_regs = cbe_get_cpu_pmd_regs(cpu); \
+ (val) = (u32)(in_be64(&pmd_regs->reg) >> 32); \
+ } while (0)
+
+/*
+ * Physical counter registers.
+ * Each physical counter can act as one 32-bit counter or two 16-bit counters.
+ */
+
+u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr)
+{
+ u32 val_in_latch, val = 0;
+
+ if (phys_ctr < NR_PHYS_CTRS) {
+ READ_SHADOW_REG(val_in_latch, counter_value_in_latch);
+
+ /* Read the latch or the actual counter, whichever is newer. */
+ if (val_in_latch & (1 << phys_ctr)) {
+ READ_SHADOW_REG(val, pm_ctr[phys_ctr]);
+ } else {
+ READ_MMIO_UPPER32(val, pm_ctr[phys_ctr]);
+ }
+ }
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(cbe_read_phys_ctr);
+
+void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val)
+{
+ struct cbe_pmd_shadow_regs *shadow_regs;
+ u32 pm_ctrl;
+
+ if (phys_ctr < NR_PHYS_CTRS) {
+ /* Writing to a counter only writes to a hardware latch.
+ * The new value is not propagated to the actual counter
+ * until the performance monitor is enabled.
+ */
+ WRITE_WO_MMIO(pm_ctr[phys_ctr], val);
+
+ pm_ctrl = cbe_read_pm(cpu, pm_control);
+ if (pm_ctrl & CBE_PM_ENABLE_PERF_MON) {
+ /* The counters are already active, so we need to
+ * rewrite the pm_control register to "re-enable"
+ * the PMU.
+ */
+ cbe_write_pm(cpu, pm_control, pm_ctrl);
+ } else {
+ shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);
+ shadow_regs->counter_value_in_latch |= (1 << phys_ctr);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(cbe_write_phys_ctr);
+
+/*
+ * "Logical" counter registers.
+ * These will read/write 16-bits or 32-bits depending on the
+ * current size of the counter. Counters 4 - 7 are always 16-bit.
+ */
+
+u32 cbe_read_ctr(u32 cpu, u32 ctr)
+{
+ u32 val;
+ u32 phys_ctr = ctr & (NR_PHYS_CTRS - 1);
+
+ val = cbe_read_phys_ctr(cpu, phys_ctr);
+
+ if (cbe_get_ctr_size(cpu, phys_ctr) == 16)
+ val = (ctr < NR_PHYS_CTRS) ? (val >> 16) : (val & 0xffff);
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(cbe_read_ctr);
+
+void cbe_write_ctr(u32 cpu, u32 ctr, u32 val)
+{
+ u32 phys_ctr;
+ u32 phys_val;
+
+ phys_ctr = ctr & (NR_PHYS_CTRS - 1);
+
+ if (cbe_get_ctr_size(cpu, phys_ctr) == 16) {
+ phys_val = cbe_read_phys_ctr(cpu, phys_ctr);
+
+ if (ctr < NR_PHYS_CTRS)
+ val = (val << 16) | (phys_val & 0xffff);
+ else
+ val = (val & 0xffff) | (phys_val & 0xffff0000);
+ }
+
+ cbe_write_phys_ctr(cpu, phys_ctr, val);
+}
+EXPORT_SYMBOL_GPL(cbe_write_ctr);
+
+/*
+ * Counter-control registers.
+ * Each "logical" counter has a corresponding control register.
+ */
+
+u32 cbe_read_pm07_control(u32 cpu, u32 ctr)
+{
+ u32 pm07_control = 0;
+
+ if (ctr < NR_CTRS)
+ READ_SHADOW_REG(pm07_control, pm07_control[ctr]);
+
+ return pm07_control;
+}
+EXPORT_SYMBOL_GPL(cbe_read_pm07_control);
+
+void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val)
+{
+ if (ctr < NR_CTRS)
+ WRITE_WO_MMIO(pm07_control[ctr], val);
+}
+EXPORT_SYMBOL_GPL(cbe_write_pm07_control);
+
+/*
+ * Other PMU control registers. Most of these are write-only.
+ */
+
+u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg)
+{
+ u32 val = 0;
+
+ switch (reg) {
+ case group_control:
+ READ_SHADOW_REG(val, group_control);
+ break;
+
+ case debug_bus_control:
+ READ_SHADOW_REG(val, debug_bus_control);
+ break;
+
+ case trace_address:
+ READ_MMIO_UPPER32(val, trace_address);
+ break;
+
+ case ext_tr_timer:
+ READ_SHADOW_REG(val, ext_tr_timer);
+ break;
+
+ case pm_status:
+ READ_MMIO_UPPER32(val, pm_status);
+ break;
+
+ case pm_control:
+ READ_SHADOW_REG(val, pm_control);
+ break;
+
+ case pm_interval:
+ READ_MMIO_UPPER32(val, pm_interval);
+ break;
+
+ case pm_start_stop:
+ READ_SHADOW_REG(val, pm_start_stop);
+ break;
+ }
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(cbe_read_pm);
+
+void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val)
+{
+ switch (reg) {
+ case group_control:
+ WRITE_WO_MMIO(group_control, val);
+ break;
+
+ case debug_bus_control:
+ WRITE_WO_MMIO(debug_bus_control, val);
+ break;
+
+ case trace_address:
+ WRITE_WO_MMIO(trace_address, val);
+ break;
+
+ case ext_tr_timer:
+ WRITE_WO_MMIO(ext_tr_timer, val);
+ break;
+
+ case pm_status:
+ WRITE_WO_MMIO(pm_status, val);
+ break;
+
+ case pm_control:
+ WRITE_WO_MMIO(pm_control, val);
+ break;
+
+ case pm_interval:
+ WRITE_WO_MMIO(pm_interval, val);
+ break;
+
+ case pm_start_stop:
+ WRITE_WO_MMIO(pm_start_stop, val);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(cbe_write_pm);
+
+/*
+ * Get/set the size of a physical counter to either 16 or 32 bits.
+ */
+
+u32 cbe_get_ctr_size(u32 cpu, u32 phys_ctr)
+{
+ u32 pm_ctrl, size = 0;
+
+ if (phys_ctr < NR_PHYS_CTRS) {
+ pm_ctrl = cbe_read_pm(cpu, pm_control);
+ size = (pm_ctrl & CBE_PM_16BIT_CTR(phys_ctr)) ? 16 : 32;
+ }
+
+ return size;
+}
+EXPORT_SYMBOL_GPL(cbe_get_ctr_size);
+
+void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size)
+{
+ u32 pm_ctrl;
+
+ if (phys_ctr < NR_PHYS_CTRS) {
+ pm_ctrl = cbe_read_pm(cpu, pm_control);
+ switch (ctr_size) {
+ case 16:
+ pm_ctrl |= CBE_PM_16BIT_CTR(phys_ctr);
+ break;
+
+ case 32:
+ pm_ctrl &= ~CBE_PM_16BIT_CTR(phys_ctr);
+ break;
+ }
+ cbe_write_pm(cpu, pm_control, pm_ctrl);
+ }
+}
+EXPORT_SYMBOL_GPL(cbe_set_ctr_size);
+
+/*
+ * Enable/disable the entire performance monitoring unit.
+ * When we enable the PMU, all pending writes to counters get committed.
+ */
+
+void cbe_enable_pm(u32 cpu)
+{
+ struct cbe_pmd_shadow_regs *shadow_regs;
+ u32 pm_ctrl;
+
+ shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);
+ shadow_regs->counter_value_in_latch = 0;
+
+ pm_ctrl = cbe_read_pm(cpu, pm_control) | CBE_PM_ENABLE_PERF_MON;
+ cbe_write_pm(cpu, pm_control, pm_ctrl);
+}
+EXPORT_SYMBOL_GPL(cbe_enable_pm);
+
+void cbe_disable_pm(u32 cpu)
+{
+ u32 pm_ctrl;
+ pm_ctrl = cbe_read_pm(cpu, pm_control) & ~CBE_PM_ENABLE_PERF_MON;
+ cbe_write_pm(cpu, pm_control, pm_ctrl);
+}
+EXPORT_SYMBOL_GPL(cbe_disable_pm);
+
+/*
+ * Reading from the trace_buffer.
+ * The trace buffer is two 64-bit registers. Reading from
+ * the second half automatically increments the trace_address.
+ */
+
+void cbe_read_trace_buffer(u32 cpu, u64 *buf)
+{
+ struct cbe_pmd_regs __iomem *pmd_regs = cbe_get_cpu_pmd_regs(cpu);
+
+ *buf++ = in_be64(&pmd_regs->trace_buffer_0_63);
+ *buf++ = in_be64(&pmd_regs->trace_buffer_64_127);
+}
+EXPORT_SYMBOL_GPL(cbe_read_trace_buffer);
+
+/*
+ * Enabling/disabling interrupts for the entire performance monitoring unit.
+ */
+
+u32 cbe_get_and_clear_pm_interrupts(u32 cpu)
+{
+ /* Reading pm_status clears the interrupt bits. */
+ return cbe_read_pm(cpu, pm_status);
+}
+EXPORT_SYMBOL_GPL(cbe_get_and_clear_pm_interrupts);
+
+void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask)
+{
+ /* Set which node and thread will handle the next interrupt. */
+ iic_set_interrupt_routing(cpu, thread, 0);
+
+ /* Enable the interrupt bits in the pm_status register. */
+ if (mask)
+ cbe_write_pm(cpu, pm_status, mask);
+}
+EXPORT_SYMBOL_GPL(cbe_enable_pm_interrupts);
+
+void cbe_disable_pm_interrupts(u32 cpu)
+{
+ cbe_get_and_clear_pm_interrupts(cpu);
+ cbe_write_pm(cpu, pm_status, 0);
+}
+EXPORT_SYMBOL_GPL(cbe_disable_pm_interrupts);
+
+static irqreturn_t cbe_pm_irq(int irq, void *dev_id)
+{
+ perf_irq(get_irq_regs());
+ return IRQ_HANDLED;
+}
+
+static int __init cbe_init_pm_irq(void)
+{
+ unsigned int irq;
+ int rc, node;
+
+ for_each_node(node) {
+ irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI |
+ (node << IIC_IRQ_NODE_SHIFT));
+ if (irq == NO_IRQ) {
+ printk("ERROR: Unable to allocate irq for node %d\n",
+ node);
+ return -EINVAL;
+ }
+
+ rc = request_irq(irq, cbe_pm_irq,
+ 0, "cbe-pmu-0", NULL);
+ if (rc) {
+ printk("ERROR: Request for irq on node %d failed\n",
+ node);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+machine_arch_initcall(cell, cbe_init_pm_irq);
+
+void cbe_sync_irq(int node)
+{
+ unsigned int irq;
+
+ irq = irq_find_mapping(NULL,
+ IIC_IRQ_IOEX_PMI
+ | (node << IIC_IRQ_NODE_SHIFT));
+
+ if (irq == NO_IRQ) {
+ printk(KERN_WARNING "ERROR, unable to get existing irq %d " \
+ "for node %d\n", irq, node);
+ return;
+ }
+
+ synchronize_irq(irq);
+}
+EXPORT_SYMBOL_GPL(cbe_sync_irq);
+
diff --git a/arch/powerpc/platforms/cell/qpace_setup.c b/arch/powerpc/platforms/cell/qpace_setup.c
new file mode 100644
index 00000000..6e3409d5
--- /dev/null
+++ b/arch/powerpc/platforms/cell/qpace_setup.c
@@ -0,0 +1,148 @@
+/*
+ * linux/arch/powerpc/platforms/cell/qpace_setup.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * Adapted from 'alpha' version by Gary Thomas
+ * Modified by Cort Dougan (cort@cs.nmt.edu)
+ * Modified by PPC64 Team, IBM Corp
+ * Modified by Cell Team, IBM Deutschland Entwicklung GmbH
+ * Modified by Benjamin Krill