aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/boot/Makefile2
-rwxr-xr-xarch/powerpc/boot/wrapper6
-rw-r--r--arch/powerpc/configs/pseries_defconfig8
-rw-r--r--arch/powerpc/include/asm/dbell.h3
-rw-r--r--arch/powerpc/include/asm/io.h17
-rw-r--r--arch/powerpc/include/asm/irq.h5
-rw-r--r--arch/powerpc/include/asm/lppaca.h2
-rw-r--r--arch/powerpc/include/asm/machdep.h21
-rw-r--r--arch/powerpc/include/asm/page_64.h19
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h13
-rw-r--r--arch/powerpc/include/asm/reg.h5
-rw-r--r--arch/powerpc/include/asm/smp.h35
-rw-r--r--arch/powerpc/include/asm/system.h2
-rw-r--r--arch/powerpc/include/asm/xics.h2
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/crash.c2
-rw-r--r--arch/powerpc/kernel/dbell.c65
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S8
-rw-r--r--arch/powerpc/kernel/head_32.S13
-rw-r--r--arch/powerpc/kernel/head_64.S13
-rw-r--r--arch/powerpc/kernel/irq.c112
-rw-r--r--arch/powerpc/kernel/kgdb.c2
-rw-r--r--arch/powerpc/kernel/misc_64.S13
-rw-r--r--arch/powerpc/kernel/paca.c17
-rw-r--r--arch/powerpc/kernel/pci_dn.c3
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c5
-rw-r--r--arch/powerpc/kernel/prom.c15
-rw-r--r--arch/powerpc/kernel/setup-common.c18
-rw-r--r--arch/powerpc/kernel/smp.c120
-rw-r--r--arch/powerpc/kernel/vector.S2
-rw-r--r--arch/powerpc/kvm/book3s.c2
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S2
-rw-r--r--arch/powerpc/lib/alloc.c8
-rw-r--r--arch/powerpc/lib/copypage_64.S7
-rw-r--r--arch/powerpc/lib/devres.c6
-rw-r--r--arch/powerpc/mm/pgtable_32.c12
-rw-r--r--arch/powerpc/mm/pgtable_64.c15
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_ads_cpld.c6
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c4
-rw-r--r--arch/powerpc/platforms/82xx/pq2ads-pci-pic.c8
-rw-r--r--arch/powerpc/platforms/85xx/smp.c6
-rw-r--r--arch/powerpc/platforms/Kconfig13
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype2
-rw-r--r--arch/powerpc/platforms/cell/Makefile1
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c3
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.c27
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.h3
-rw-r--r--arch/powerpc/platforms/cell/beat_smp.c123
-rw-r--r--arch/powerpc/platforms/cell/celleb_pci.c6
-rw-r--r--arch/powerpc/platforms/cell/celleb_setup.c4
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c16
-rw-r--r--arch/powerpc/platforms/cell/smp.c18
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c13
-rw-r--r--arch/powerpc/platforms/embedded6xx/flipper-pic.c7
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.c7
-rw-r--r--arch/powerpc/platforms/iseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/iseries/exception.S59
-rw-r--r--arch/powerpc/platforms/iseries/irq.c3
-rw-r--r--arch/powerpc/platforms/iseries/setup.c5
-rw-r--r--arch/powerpc/platforms/iseries/smp.c39
-rw-r--r--arch/powerpc/platforms/iseries/smp.h6
-rw-r--r--arch/powerpc/platforms/powermac/Kconfig11
-rw-r--r--arch/powerpc/platforms/powermac/pic.c13
-rw-r--r--arch/powerpc/platforms/powermac/pic.h11
-rw-r--r--arch/powerpc/platforms/powermac/pmac.h1
-rw-r--r--arch/powerpc/platforms/powermac/smp.c87
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c6
-rw-r--r--arch/powerpc/platforms/ps3/smp.c22
-rw-r--r--arch/powerpc/platforms/ps3/spu.c4
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c20
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c117
-rw-r--r--arch/powerpc/platforms/pseries/ras.c2
-rw-r--r--arch/powerpc/platforms/pseries/setup.c31
-rw-r--r--arch/powerpc/platforms/pseries/smp.c3
-rw-r--r--arch/powerpc/platforms/wsp/smp.c3
-rw-r--r--arch/powerpc/sysdev/axonram.c2
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_cache_sram.c4
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c10
-rw-r--r--arch/powerpc/sysdev/i8259.c13
-rw-r--r--arch/powerpc/sysdev/mpic.c52
-rw-r--r--arch/powerpc/sysdev/xics/Kconfig1
-rw-r--r--arch/powerpc/sysdev/xics/icp-hv.c26
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c26
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c42
-rw-r--r--arch/powerpc/xmon/xmon.c2
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/carma/Kconfig17
-rw-r--r--drivers/misc/carma/Makefile2
-rw-r--r--drivers/misc/carma/carma-fpga-program.c1141
-rw-r--r--drivers/misc/carma/carma-fpga.c1433
91 files changed, 3163 insertions, 922 deletions
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 0e2a152c3aa..c26200b40a4 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -128,7 +128,7 @@ quiet_cmd_bootas = BOOTAS $@
cmd_bootas = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTAFLAGS) -c -o $@ $<
quiet_cmd_bootar = BOOTAR $@
- cmd_bootar = $(CROSS32AR) -cr $@.$$$$ $(filter-out FORCE,$^); mv $@.$$$$ $@
+ cmd_bootar = $(CROSS32AR) -cr$(KBUILD_ARFLAGS) $@.$$$$ $(filter-out FORCE,$^); mv $@.$$$$ $@
$(obj-libfdt): $(obj)/%.o: $(srctree)/scripts/dtc/libfdt/%.c FORCE
$(call if_changed_dep,bootcc)
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index dfa29cb0f47..c74531af72c 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -258,7 +258,7 @@ if [ -z "$cacheit" -o ! -f "$vmz$gzip" -o "$vmz$gzip" -ot "$kernel" ]; then
${CROSS}objcopy $objflags "$kernel" "$vmz.$$"
if [ -n "$gzip" ]; then
- gzip -f -9 "$vmz.$$"
+ gzip -n -f -9 "$vmz.$$"
fi
if [ -n "$cacheit" ]; then
@@ -343,7 +343,7 @@ coff)
$objbin/hack-coff "$ofile"
;;
cuboot*)
- gzip -f -9 "$ofile"
+ gzip -n -f -9 "$ofile"
${MKIMAGE} -A ppc -O linux -T kernel -C gzip -a "$base" -e "$entry" \
$uboot_version -d "$ofile".gz "$ofile"
;;
@@ -390,6 +390,6 @@ ps3)
odir="$(dirname "$ofile.bin")"
rm -f "$odir/otheros.bld"
- gzip --force -9 --stdout "$ofile.bin" > "$odir/otheros.bld"
+ gzip -n --force -9 --stdout "$ofile.bin" > "$odir/otheros.bld"
;;
esac
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 249ddd0a27c..7de13865508 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -146,12 +146,18 @@ CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_SAS_ATTRS=m
+CONFIG_SCSI_CXGB3_ISCSI=m
+CONFIG_SCSI_CXGB4_ISCSI=m
+CONFIG_SCSI_BNX2_ISCSI=m
+CONFIG_SCSI_BNX2_ISCSI=m
+CONFIG_BE2ISCSI=m
CONFIG_SCSI_IBMVSCSI=y
CONFIG_SCSI_IBMVFC=m
CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
CONFIG_SCSI_IPR=y
CONFIG_SCSI_QLA_FC=m
+CONFIG_SCSI_QLA_ISCSI=m
CONFIG_SCSI_LPFC=m
CONFIG_ATA=y
# CONFIG_ATA_SFF is not set
@@ -197,6 +203,8 @@ CONFIG_S2IO=m
CONFIG_MYRI10GE=m
CONFIG_NETXEN_NIC=m
CONFIG_MLX4_EN=m
+CONFIG_QLGE=m
+CONFIG_BE2NET=m
CONFIG_PPP=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
index 0893ab9343a..9c70d0ca96d 100644
--- a/arch/powerpc/include/asm/dbell.h
+++ b/arch/powerpc/include/asm/dbell.h
@@ -27,9 +27,8 @@ enum ppc_dbell {
PPC_G_DBELL_MC = 4, /* guest mcheck doorbell */
};
-extern void doorbell_message_pass(int target, int msg);
+extern void doorbell_cause_ipi(int cpu, unsigned long data);
extern void doorbell_exception(struct pt_regs *regs);
-extern void doorbell_check_self(void);
extern void doorbell_setup_this_cpu(void);
static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag)
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 2f365f5007a..45698d55cd6 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -2,6 +2,8 @@
#define _ASM_POWERPC_IO_H
#ifdef __KERNEL__
+#define ARCH_HAS_IOREMAP_WC
+
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -622,12 +624,13 @@ static inline void iosync(void)
* * ioremap is the standard one and provides non-cacheable guarded mappings
* and can be hooked by the platform via ppc_md
*
- * * ioremap_flags allows to specify the page flags as an argument and can
- * also be hooked by the platform via ppc_md. ioremap_prot is the exact
- * same thing as ioremap_flags.
+ * * ioremap_prot allows to specify the page flags as an argument and can
+ * also be hooked by the platform via ppc_md.
*
* * ioremap_nocache is identical to ioremap
*
+ * * ioremap_wc enables write combining
+ *
* * iounmap undoes such a mapping and can be hooked
*
* * __ioremap_at (and the pending __iounmap_at) are low level functions to
@@ -635,7 +638,7 @@ static inline void iosync(void)
* currently be hooked. Must be page aligned.
*
* * __ioremap is the low level implementation used by ioremap and
- * ioremap_flags and cannot be hooked (but can be used by a hook on one
+ * ioremap_prot and cannot be hooked (but can be used by a hook on one
* of the previous ones)
*
* * __ioremap_caller is the same as above but takes an explicit caller
@@ -646,10 +649,10 @@ static inline void iosync(void)
*
*/
extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
-extern void __iomem *ioremap_flags(phys_addr_t address, unsigned long size,
- unsigned long flags);
+extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size,
+ unsigned long flags);
+extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size);
#define ioremap_nocache(addr, size) ioremap((addr), (size))
-#define ioremap_prot(addr, size, prot) ioremap_flags((addr), (size), (prot))
extern void iounmap(volatile void __iomem *addr);
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index e1983d57768..1bff591f7f7 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -88,9 +88,6 @@ struct irq_host_ops {
/* Dispose of such a mapping */
void (*unmap)(struct irq_host *h, unsigned int virq);
- /* Update of such a mapping */
- void (*remap)(struct irq_host *h, unsigned int virq, irq_hw_number_t hw);
-
/* Translate device-tree interrupt specifier from raw format coming
* from the firmware to a irq_hw_number_t (interrupt line number) and
* type (sense) that can be passed to set_irq_type(). In the absence
@@ -131,7 +128,7 @@ struct irq_host {
struct irq_data;
extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d);
extern irq_hw_number_t virq_to_hw(unsigned int virq);
-extern struct irq_host *virq_to_host(unsigned int virq);
+extern bool virq_is_host(unsigned int virq, struct irq_host *host);
/**
* irq_alloc_host - Allocate a new irq_host data structure
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index a077adc0b35..e0298d26ce5 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -210,6 +210,8 @@ struct dtl_entry {
#define DISPATCH_LOG_BYTES 4096 /* bytes per cpu */
#define N_DISPATCH_LOG (DISPATCH_LOG_BYTES / sizeof(struct dtl_entry))
+extern struct kmem_cache *dtl_cache;
+
/*
* When CONFIG_VIRT_CPU_ACCOUNTING = y, the cpu accounting code controls
* reading from the dispatch trace log. If other code wants to consume
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index c6345acf166..47cacddb14c 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -29,21 +29,6 @@ struct file;
struct pci_controller;
struct kimage;
-#ifdef CONFIG_SMP
-struct smp_ops_t {
- void (*message_pass)(int target, int msg);
- int (*probe)(void);
- int (*kick_cpu)(int nr);
- void (*setup_cpu)(int nr);
- void (*bringup_done)(void);
- void (*take_timebase)(void);
- void (*give_timebase)(void);
- int (*cpu_disable)(void);
- void (*cpu_die)(unsigned int nr);
- int (*cpu_bootable)(unsigned int nr);
-};
-#endif
-
struct machdep_calls {
char *name;
#ifdef CONFIG_PPC64
@@ -312,12 +297,6 @@ extern sys_ctrler_t sys_ctrler;
#endif /* CONFIG_PPC_PMAC */
-#ifdef CONFIG_SMP
-/* Poor default implementations */
-extern void __devinit smp_generic_give_timebase(void);
-extern void __devinit smp_generic_take_timebase(void);
-#endif /* CONFIG_SMP */
-
/* Functions to produce codes on the leds.
* The SRC code should be unique for the message category and should
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index 488c52eb64c..9356262fd3c 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -59,24 +59,7 @@ static __inline__ void clear_page(void *addr)
: "ctr", "memory");
}
-extern void copy_4K_page(void *to, void *from);
-
-#ifdef CONFIG_PPC_64K_PAGES
-static inline void copy_page(void *to, void *from)
-{
- unsigned int i;
- for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) {
- copy_4K_page(to, from);
- to += 4096;
- from += 4096;
- }
-}
-#else /* CONFIG_PPC_64K_PAGES */
-static inline void copy_page(void *to, void *from)
-{
- copy_4K_page(to, from);
-}
-#endif /* CONFIG_PPC_64K_PAGES */
+extern void copy_page(void *to, void *from);
/* Log 2 of page table size */
extern u64 ppc64_pft_size;
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 2b09cd522d3..81576ee0cfb 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -257,21 +257,20 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- unsigned long old;
- if ((pte_val(*ptep) & _PAGE_RW) == 0)
- return;
- old = pte_update(mm, addr, ptep, _PAGE_RW, 0);
+ if ((pte_val(*ptep) & _PAGE_RW) == 0)
+ return;
+
+ pte_update(mm, addr, ptep, _PAGE_RW, 0);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- unsigned long old;
-
if ((pte_val(*ptep) & _PAGE_RW) == 0)
return;
- old = pte_update(mm, addr, ptep, _PAGE_RW, 1);
+
+ pte_update(mm, addr, ptep, _PAGE_RW, 1);
}
/*
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index fdec5933305..c5cae0dd176 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -824,6 +824,11 @@
FTR_SECTION_ELSE_NESTED(66); \
mtspr SPRN_SPRG_HSCRATCH0,rX; \
ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66)
+
+#else /* CONFIG_PPC_BOOK3S_64 */
+#define GET_SCRATCH0(rX) mfspr rX,SPRN_SPRG_SCRATCH0
+#define SET_SCRATCH0(rX) mtspr SPRN_SPRG_SCRATCH0,rX
+
#endif
#ifdef CONFIG_PPC_BOOK3E_64
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 50873493a97..880b8c1e6e5 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -20,6 +20,7 @@
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/kernel.h>
+#include <linux/irqreturn.h>
#ifndef __ASSEMBLY__
@@ -35,9 +36,26 @@ extern void cpu_die(void);
#ifdef CONFIG_SMP
-extern void smp_send_debugger_break(int cpu);
-extern void smp_message_recv(int);
+struct smp_ops_t {
+ void (*message_pass)(int cpu, int msg);
+#ifdef CONFIG_PPC_SMP_MUXED_IPI
+ void (*cause_ipi)(int cpu, unsigned long data);
+#endif
+ int (*probe)(void);
+ int (*kick_cpu)(int nr);
+ void (*setup_cpu)(int nr);
+ void (*bringup_done)(void);
+ void (*take_timebase)(void);
+ void (*give_timebase)(void);
+ int (*cpu_disable)(void);
+ void (*cpu_die)(unsigned int nr);
+ int (*cpu_bootable)(unsigned int nr);
+};
+
+extern void smp_send_debugger_break(void);
extern void start_secondary_resume(void);
+extern void __devinit smp_generic_give_timebase(void);
+extern void __devinit smp_generic_take_timebase(void);
DECLARE_PER_CPU(unsigned int, cpu_pvr);
@@ -94,13 +112,16 @@ extern int cpu_to_core_id(int cpu);
#define PPC_MSG_CALL_FUNC_SINGLE 2
#define PPC_MSG_DEBUGGER_BREAK 3
-/*
- * irq controllers that have dedicated ipis per message and don't
- * need additional code in the action handler may use this
- */
+/* for irq controllers that have dedicated ipis per message (4) */
extern int smp_request_message_ipi(int virq, int message);
extern const char *smp_ipi_name[];
+/* for irq controllers with only a single ipi */
+extern void smp_muxed_ipi_set_data(int cpu, unsigned long data);
+extern void smp_muxed_ipi_message_pass(int cpu, int msg);
+extern void smp_muxed_ipi_resend(void);
+extern irqreturn_t smp_ipi_demux(void);
+
void smp_init_iSeries(void);
void smp_init_pSeries(void);
void smp_init_cell(void);
@@ -170,6 +191,8 @@ extern unsigned long __secondary_hold_spinloop;
extern unsigned long __secondary_hold_acknowledge;
extern char __secondary_hold;
+extern irqreturn_t debug_ipi_action(int irq, void *data);
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index 5e474ddd227..2dc595dda03 100644
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -219,8 +219,6 @@ extern int mem_init_done; /* set on boot once kmalloc can be called */
extern int init_bootmem_done; /* set once bootmem is available */
extern phys_addr_t memory_limit;
extern unsigned long klimit;
-
-extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
extern int powersave_nap; /* set if nap mode can be used in idle loop */
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index 6c06306c410..b183a406201 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -40,7 +40,7 @@ struct icp_ops {
void (*teardown_cpu)(void);
void (*flush_ipi)(void);
#ifdef CONFIG_SMP
- void (*message_pass)(int target, int msg);
+ void (*cause_ipi)(int cpu, unsigned long data);
irq_handler_t ipi_action;
#endif
};
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 82e0bed0650..9aab3631257 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -78,7 +78,6 @@ obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o dbell.o
obj-$(CONFIG_PPC_BOOK3E_64) += dbell.o
extra-y := head_$(CONFIG_WORD_SIZE).o
-extra-$(CONFIG_PPC_BOOK3E_32) := head_new_booke.o
extra-$(CONFIG_40x) := head_40x.o
extra-$(CONFIG_44x) := head_44x.o
extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 21f2c781ded..4e6ee944495 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -236,7 +236,7 @@ static void crash_kexec_wait_realmode(int cpu)
int i;
msecs = 10000;
- for (i=0; i < NR_CPUS && msecs > 0; i++) {
+ for (i=0; i < nr_cpu_ids && msecs > 0; i++) {
if (i == cpu)
continue;
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index 3307a52d797..2cc451aaaca 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -13,84 +13,35 @@
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/threads.h>
-#include <linux/percpu.h>
+#include <linux/hardirq.h>
#include <asm/dbell.h>
#include <asm/irq_regs.h>
#ifdef CONFIG_SMP
-struct doorbell_cpu_info {
- unsigned long messages; /* current messages bits */
- unsigned int tag; /* tag value */
-};
-
-static DEFINE_PER_CPU(struct doorbell_cpu_info, doorbell_cpu_info);
-
void doorbell_setup_this_cpu(void)
{
- struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info);
+ unsigned long tag = mfspr(SPRN_PIR) & 0x3fff;
- info->messages = 0;
- info->tag = mfspr(SPRN_PIR) & 0x3fff;
+ smp_muxed_ipi_set_data(smp_processor_id(), tag);
}
-void doorbell_message_pass(int target, int msg)
+void doorbell_cause_ipi(int cpu, unsigned long data)
{
- struct doorbell_cpu_info *info;
- int i;
-
- if (target < NR_CPUS) {
- info = &per_cpu(doorbell_cpu_info, target);
- set_bit(msg, &info->messages);
- ppc_msgsnd(PPC_DBELL, 0, info->tag);
- }
- else if (target == MSG_ALL_BUT_SELF) {
- for_each_online_cpu(i) {
- if (i == smp_processor_id())
- continue;
- info = &per_cpu(doorbell_cpu_info, i);
- set_bit(msg, &info->messages);
- ppc_msgsnd(PPC_DBELL, 0, info->tag);
- }
- }
- else { /* target == MSG_ALL */
- for_each_online_cpu(i) {
- info = &per_cpu(doorbell_cpu_info, i);
- set_bit(msg, &info->messages);
- }
- ppc_msgsnd(PPC_DBELL, PPC_DBELL_MSG_BRDCAST, 0);
- }
+ ppc_msgsnd(PPC_DBELL, 0, data);
}
void doorbell_exception(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info);
- int msg;
- /* Warning: regs can be NULL when called from irq enable */
+ irq_enter();
- if (!info->messages || (num_online_cpus() < 2))
- goto out;
+ smp_ipi_demux();
- for (msg = 0; msg < 4; msg++)
- if (test_and_clear_bit(msg, &info->messages))
- smp_message_recv(msg);
-
-out:
+ irq_exit();
set_irq_regs(old_regs);
}
-
-void doorbell_check_self(void)
-{
- struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info);
-
- if (!info->messages)
- return;
-
- ppc_msgsnd(PPC_DBELL, 0, info->tag);
-}
-
#else /* CONFIG_SMP */
void doorbell_exception(struct pt_regs *regs)
{
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 0ec3b42717d..a85f4874cba 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -211,11 +211,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
mr r9,r13
GET_PACA(r13)
mfspr r11,SPRN_SRR0
- ld r12,PACAKBASE(r13)
- ld r10,PACAKMSR(r13)
- LOAD_HANDLER(r12, system_call_entry)
- mtspr SPRN_SRR0,r12
mfspr r12,SPRN_SRR1
+ ld r10,PACAKBASE(r13)
+ LOAD_HANDLER(r10, system_call_entry)
+ mtspr SPRN_SRR0,r10
+ ld r10,PACAKMSR(r13)
mtspr SPRN_SRR1,r10
rfid
b . /* prevent speculative execution */
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 98c4b29a56f..ba250d505e0 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -805,19 +805,6 @@ _ENTRY(copy_and_flush)
blr
#ifdef CONFIG_SMP
-#ifdef CONFIG_GEMINI
- .globl __secondary_start_gemini
-__secondary_start_gemini:
- mfspr r4,SPRN_HID0
- ori r4,r4,HID0_ICFI
- li r3,0
- ori r3,r3,HID0_ICE
- andc r4,r4,r3
- mtspr SPRN_HID0,r4
- sync
- b __secondary_start
-#endif /* CONFIG_GEMINI */
-
.globl __secondary_start_mpc86xx
__secondary_start_mpc86xx:
mfspr r3, SPRN_PIR
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 73d6e9afcdf..ba504099844 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -218,13 +218,19 @@ generic_secondary_common_init:
*/
LOAD_REG_ADDR(r13, paca) /* Load paca pointer */
ld r13,0(r13) /* Get base vaddr of paca array */
+#ifndef CONFIG_SMP
+ addi r13,r13,PACA_SIZE /* know r13 if used accidentally */
+ b .kexec_wait /* wait for next kernel if !SMP */
+#else
+ LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */
+ lwz r7,0(r7) /* also the max paca allocated */
li r5,0 /* logical cpu id */
1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
cmpw r6,r24 /* Compare to our id */
beq 2f
addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
addi r5,r5,1
- cmpwi r5,NR_CPUS
+ cmpw r5,r7 /* Check if more pacas exist */
blt 1b
mr r3,r24 /* not found, copy phys to r3 */
@@ -259,9 +265,6 @@ generic_secondary_common_init:
4: HMT_LOW
lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
/* start. */
-#ifndef CONFIG_SMP
- b 4b /* Never go on non-SMP */
-#else
cmpwi 0,r23,0
beq 4b /* Loop until told to go */
@@ -273,7 +276,7 @@ generic_secondary_common_init:
subi r1,r1,STACK_FRAME_OVERHEAD
b __secondary_start
-#endif
+#endif /* SMP */
/*
* Turn the MMU off.
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index a81dd74414b..a24d37d4cf5 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -66,7 +66,6 @@
#include <asm/ptrace.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
-#include <asm/dbell.h>
#include <asm/smp.h>
#ifdef CONFIG_PPC64
@@ -160,7 +159,8 @@ notrace void arch_local_irq_restore(unsigned long en)
#if defined(CONFIG_BOOKE) && defined(CONFIG_SMP)
/* Check for pending doorbell interrupts and resend to ourself */
- doorbell_check_self();
+ if (cpu_has_feature(CPU_FTR_DBELL))
+ smp_muxed_ipi_resend();
#endif
/*
@@ -493,7 +493,6 @@ struct irq_map_entry {
static LIST_HEAD(irq_hosts);
static DEFINE_RAW_SPINLOCK(irq_big_lock);
-static unsigned int revmap_trees_allocated;
static DEFINE_MUTEX(revmap_trees_mutex);
static struct irq_map_entry irq_map[NR_IRQS];
static unsigned int irq_virq_count = NR_IRQS;
@@ -511,11 +510,11 @@ irq_hw_number_t virq_to_hw(unsigned int virq)
}
EXPORT_SYMBOL_GPL(virq_to_hw);
-struct irq_host *virq_to_host(unsigned int virq)
+bool virq_is_host(unsigned int virq, struct irq_host *host)
{
- return irq_map[virq].host;
+ return irq_map[virq].host == host;
}
-EXPORT_SYMBOL_GPL(virq_to_host);
+EXPORT_SYMBOL_GPL(virq_is_host);
static int default_irq_host_match(struct irq_host *h, struct device_node *np)
{
@@ -537,7 +536,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
/* Allocate structure and revmap table if using linear mapping */
if (revmap_type == IRQ_HOST_MAP_LINEAR)
size += revmap_arg * sizeof(unsigned int);
- host = zalloc_maybe_bootmem(size, GFP_KERNEL);
+ host = kzalloc(size, GFP_KERNEL);
if (host == NULL)
return NULL;
@@ -587,14 +586,14 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
irq_map[i].host = host;
smp_wmb();
- /* Clear norequest flags */
- irq_clear_status_flags(i, IRQ_NOREQUEST);
-
/* Legacy flags are left to default at this point,
* one can then use irq_create_mapping() to
* explicitly change them
*/
ops->map(host, i, i);
+
+ /* Clear norequest flags */
+ irq_clear_status_flags(i, IRQ_NOREQUEST);
}
break;
case IRQ_HOST_MAP_LINEAR:
@@ -605,6 +604,9 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
smp_wmb();
host->revmap_data.linear.revmap = rmap;
break;
+ case IRQ_HOST_MAP_TREE:
+ INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL);
+ break;
default:
break;
}
@@ -662,8 +664,6 @@ static int irq_setup_virq(struct irq_host *host, unsigned int virq,
goto error;
}
- irq_clear_status_flags(virq, IRQ_NOREQUEST);
-
/* map it */
smp_wmb();
irq_map[virq].hwirq = hwirq;
@@ -674,6 +674,8 @@ static int irq_setup_virq(struct irq_host *host, unsigned int virq,
goto errdesc;
}
+ irq_clear_status_flags(virq, IRQ_NOREQUEST);
+
return 0;
errdesc:
@@ -730,8 +732,6 @@ unsigned int irq_create_mapping(struct irq_host *host,
*/
virq = irq_find_mapping(host, hwirq);
if (virq != NO_IRQ) {
- if (host->ops->remap)
- host->ops->remap(host, virq, hwirq);
pr_debug("irq: -> existing mapping on virq %d\n", virq);
return virq;
}
@@ -812,14 +812,15 @@ void irq_dispose_mapping(unsigned int virq)
return;
host = irq_map[virq].host;
- WARN_ON (host == NULL);
- if (host == NULL)
+ if (WARN_ON(host == NULL))
return;
/* Never unmap legacy interrupts */
if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
return;
+ irq_set_status_flags(virq, IRQ_NOREQUEST);
+
/* remove chip and handler */
irq_set_chip_and_handler(virq, NULL, NULL);
@@ -839,13 +840,6 @@ void irq_dispose_mapping(unsigned int virq)
host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
break;
case IRQ_HOST_MAP_TREE:
- /*
- * Check if radix tree allocated yet, if not then nothing to
- * remove.
- */
- smp_rmb();
- if (revmap_trees_allocated < 1)
- break;
mutex_lock(&revmap_trees_mutex);
radix_tree_delete(&host->revmap_data.tree, hwirq);
mutex_unlock(&revmap_trees_mutex);
@@ -856,8 +850,6 @@ void irq_dispose_mapping(unsigned int virq)
smp_mb();
irq_map[virq].hwirq = host->inval_irq;
- irq_set_status_flags(virq, IRQ_NOREQUEST);
-
irq_free_descs(virq, 1);
/* Free it */
irq_free_virt(virq, 1);
@@ -903,16 +895,9 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host,
struct irq_map_entry *ptr;
unsigned int virq;
- WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
-
- /*
- * Check if the radix tree exists and has bee initialized.
- * If not, we fallback to slow mode
- */
- if (revmap_trees_allocated < 2)
+ if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE))
return irq_find_mapping(host, hwirq);
- /* Now try to resolve */
/*
* No rcu_read_lock(ing) needed, the ptr returned can't go under us
* as it's referencing an entry in the static irq_map table.
@@ -935,16 +920,7 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host,
void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
irq_hw_number_t hwirq)
{
-
- WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
-
- /*
- * Check if the radix tree exists yet.
- * If not, then the irq will be inserted into the tree when it gets
- * initialized.
- */
- smp_rmb();
- if (revmap_trees_allocated < 1)
+ if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE))
return;
if (virq != NO_IRQ) {
@@ -960,7 +936,8 @@ unsigned int irq_linear_revmap(struct irq_host *host,
{
unsigned int *revmap;
- WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR);
+ if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR))
+ return irq_find_mapping(host, hwirq);
/* Check revmap bounds */
if (unlikely(hwirq >= host->revmap_data.linear.size))
@@ -1054,53 +1031,6 @@ int arch_early_irq_init(void)
return 0;
}
-/* We need to create the radix trees late */
-static int irq_late_init(void)
-{
- struct irq_host *h;
- unsigned int i;
-
- /*
- * No mutual exclusion with respect to accessors of the tree is needed
- * here as the synchronization is done via the state variable
- * revmap_trees_allocated.
- */
- list_for_each_entry(h, &irq_hosts, link) {
- if (h->revmap_type == IRQ_HOST_MAP_TREE)
- INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL);
- }
-
- /*
- * Make sure the radix trees inits are visible before setting
- * the flag
- */
- smp_wmb();
- revmap_trees_allocated = 1;
-
- /*
- * Insert the reverse mapping for those interrupts already present
- * in irq_map[].
- */
- mutex_lock(&revmap_trees_mutex);
- for (i = 0; i < irq_virq_count; i++) {
- if (irq_map[i].host &&
- (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE))
- radix_tree_insert(&irq_map[i].host->revmap_data.tree,
- irq_map[i].hwirq, &irq_map[i]);
- }
- mutex_unlock(&revmap_trees_mutex);
-
- /*
- * Make sure the radix trees insertions are visible before setting
- * the flag
- */
- smp_wmb();
- revmap_trees_allocated = 2;
-
- return 0;
-}
-arch_initcall(irq_late_init);
-
#ifdef CONFIG_VIRQ_DEBUG
static int virq_debug_show(struct seq_file *m, void *private)
{
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 42850ee00ad..bd9d35f59cf 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -109,7 +109,7 @@ static int kgdb_call_nmi_hook(struct pt_regs *regs)
#ifdef CONFIG_SMP
void kgdb_roundup_cpus(unsigned long flags)
{
- smp_send_debugger_break(MSG_ALL_BUT_SELF);
+ smp_send_debugger_break();
}
#endif
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 206a321a71d..e89df59cdc5 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -462,7 +462,8 @@ _GLOBAL(disable_kernel_fp)
* wait for the flag to change, indicating this kernel is going away but
* the slave code for the next one is at addresses 0 to 100.
*
- * This is used by all slaves.
+ * This is used by all slaves, even those that did not find a matching
+ * paca in the secondary startup code.
*
* Physical (hardware) cpu id should be in r3.
*/
@@ -471,10 +472,6 @@ _GLOBAL(kexec_wait)
1: mflr r5
addi r5,r5,kexec_flag-1b
- li r4,KEXEC_STATE_REAL_MODE
- stb r4,PACAKEXECSTATE(r13)
- SYNC
-
99: HMT_LOW
#ifdef CONFIG_KEXEC /* use no memory without kexec */
lwz r4,0(r5)
@@ -499,11 +496,17 @@ kexec_flag:
*
* get phys id from paca
* switch to real mode
+ * mark the paca as no longer used
* join other cpus in kexec_wait(phys_id)
*/
_GLOBAL(kexec_smp_wait)
lhz r3,PACAHWCPUID(r13)
bl real_mode
+
+ li r4,KEXEC_STATE_REAL_MODE
+ stb r4,PACAKEXECSTATE(r13)
+ SYNC
+
b .kexec_wait
/*
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 102244edecf..efeb8818418 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -7,7 +7,7 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/threads.h>
+#include <linux/smp.h>
#include <linux/module.h>
#include <linux/memblock.h>
@@ -178,7 +178,7 @@ static int __initdata paca_size;
void __init allocate_pacas(void)
{
- int nr_cpus, cpu, limit;
+ int cpu, limit;
/*
* We can't take SLB misses on the paca, and we want to access them
@@ -190,23 +190,18 @@ void __init allocate_pacas(void)
if (firmware_has_feature(FW_FEATURE_ISERIES))
limit = min(limit, HvPagesToMap * HVPAGESIZE);
- nr_cpus = NR_CPUS;
- /* On iSeries we know we can never have more than 64 cpus */
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- nr_cpus = min(64, nr_cpus);
-
- paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpus);
+ paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids);
paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit));
memset(paca, 0, paca_size);
printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n",
- paca_size, nr_cpus, paca);
+ paca_size, nr_cpu_ids, paca);
- allocate_lppacas(nr_cpus, limit);
+ allocate_lppacas(nr_cpu_ids, limit);
/* Can't use for_each_*_cpu, as they aren't functional yet */
- for (cpu = 0; cpu < nr_cpus; cpu++)
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
initialise_paca(&paca[cpu], cpu);
}
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index d225d99fe39..6baabc13306 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -43,10 +43,9 @@ void * __devinit update_dn_pci_info(struct device_node *dn, void *data)
const u32 *regs;
struct pci_dn *pdn;
- pdn = alloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL);
+ pdn = zalloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL);
if (pdn == NULL)
return NULL;
- memset(pdn, 0, sizeof(*pdn));
dn->data = pdn;
pdn->node = dn;
pdn->phb = phb;
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index ef3ef566235..7d28f540200 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -54,7 +54,6 @@ extern void single_step_exception(struct pt_regs *regs);
extern int sys_sigreturn(struct pt_regs *regs);
EXPORT_SYMBOL(clear_pages);
-EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
EXPORT_SYMBOL(DMA_MODE_READ);
EXPORT_SYMBOL(DMA_MODE_WRITE);
@@ -88,9 +87,7 @@ EXPORT_SYMBOL(__copy_tofrom_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__strnlen_user);
-#ifdef CONFIG_PPC64
-EXPORT_SYMBOL(copy_4K_page);
-#endif
+EXPORT_SYMBOL(copy_page);
#if defined(CONFIG_PCI) && defined(CONFIG_PPC32)
EXPORT_SYMBOL(isa_io_base);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 5f5e6aed2b7..5311a26dcf4 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -68,6 +68,7 @@ int __initdata iommu_force_on;
unsigned long tce_alloc_start, tce_alloc_end;
u64 ppc64_rma_size;
#endif
+static phys_addr_t first_memblock_size;
static int __init early_parse_mem(char *p)
{
@@ -505,11 +506,14 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
size = 0x80000000ul - base;
}
#endif
-
- /* First MEMBLOCK added, do some special initializations */
- if (memstart_addr == ~(phys_addr_t)0)
- setup_initial_memory_limit(base, size);
- memstart_addr = min((u64)memstart_addr, base);
+ /* Keep track of the beginning of memory -and- the size of
+ * the very first block in the device-tree as it represents
+ * the RMA on ppc64 server
+ */
+ if (base < memstart_addr) {
+ memstart_addr = base;
+ first_memblock_size = size;
+ }
/* Add the chunk to the MEMBLOCK list */
memblock_add(base, size);
@@ -694,6 +698,7 @@ void __init early_init_devtree(void *params)
of_scan_flat_dt(early_init_dt_scan_root, NULL);
of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
+ setup_initial_memory_limit(memstart_addr, first_memblock_size);
/* Save command line for /proc/cmdline and then parse parameters */
strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 1475df6e403..79fca2651b6 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -404,7 +404,7 @@ static void __init cpu_init_thread_core_maps(int tpc)
* cpu_present_mask
*
* Having the possible map set up early allows us to restrict allocations
- * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
+ * of things like irqstacks to nr_cpu_ids rather than NR_CPUS.
*
* We do not initialize the online map here; cpus set their own bits in
* cpu_online_mask as they come up.
@@ -424,7 +424,7 @@ void __init smp_setup_cpu_maps(void)
DBG("smp_setup_cpu_maps()\n");
- while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
+ while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < nr_cpu_ids) {
const int *intserv;
int j, len;
@@ -443,7 +443,7 @@ void __init smp_setup_cpu_maps(void)
intserv = &cpu; /* assume logical == phys */
}
- for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
+ for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
DBG(" thread %d -> cpu %d (hard id %d)\n",
j, cpu, intserv[j]);
set_cpu_present(cpu, true);
@@ -483,12 +483,12 @@ void __init smp_setup_cpu_maps(void)
if (cpu_has_feature(CPU_FTR_SMT))
maxcpus *= nthreads;
- if (maxcpus > NR_CPUS) {
+ if (maxcpus > nr_cpu_ids) {
printk(KERN_WARNING
"Partition configured for %d cpus, "
"operating system maximum is %d.\n",
- maxcpus, NR_CPUS);
- maxcpus = NR_CPUS;
+ maxcpus, nr_cpu_ids);
+ maxcpus = nr_cpu_ids;
} else
printk(KERN_INFO "Partition configured for %d cpus.\n",
maxcpus);
@@ -510,7 +510,7 @@ void __init smp_setup_cpu_maps(void)
cpu_init_thread_core_maps(nthreads);
/* Now that possible cpus are set, set nr_cpu_ids for later use */
- nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
+ setup_nr_cpu_ids();
free_unused_pacas();
}
@@ -602,6 +602,10 @@ int check_legacy_ioport(unsigned long base_port)
* name instead */
if (!np)
np = of_find_node_by_name(NULL, "8042");
+ if (np) {
+ of_i8042_kbd_irq = 1;
+ of_i8042_aux_irq = 12;
+ }
break;
case FDC_BASE: /* FDC1 */
np = of_find_node_by_type(NULL, "fdc");
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 0eefccd3316..dd1973fed30 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -111,35 +111,6 @@ int __devinit smp_generic_kick_cpu(int nr)
}
#endif
-void smp_message_recv(int msg)
-{
- switch(msg) {
- case PPC_MSG_CALL_FUNCTION:
- generic_smp_call_function_interrupt();
- break;
- case PPC_MSG_RESCHEDULE:
- /* we notice need_resched on exit */
- break;
- case PPC_MSG_CALL_FUNC_SINGLE:
- generic_smp_call_function_single_interrupt();
- break;
- case PPC_MSG_DEBUGGER_BREAK:
- if (crash_ipi_function_ptr) {
- crash_ipi_function_ptr(get_irq_regs());
- break;
- }
-#ifdef CONFIG_DEBUGGER
- debugger_ipi(get_irq_regs());
- break;
-#endif /* CONFIG_DEBUGGER */
- /* FALLTHROUGH */
- default:
- printk("SMP %d: smp_message_recv(): unknown msg %d\n",
- smp_processor_id(), msg);
- break;
- }
-}
-
static irqreturn_t call_function_action(int irq, void *data)
{
generic_smp_call_function_interrupt();
@@ -158,9 +129,17 @@ static irqreturn_t call_function_single_action(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t debug_ipi_action(int irq, void *data)
+irqreturn_t debug_ipi_action(int irq, void *data)
{
- smp_message_recv(PPC_MSG_DEBUGGER_BREAK);
+ if (crash_ipi_function_ptr) {
+ crash_ipi_function_ptr(get_irq_regs());
+ return IRQ_HANDLED;
+ }
+
+#ifdef CONFIG_DEBUGGER
+ debugger_ipi(get_irq_regs());
+#endif /* CONFIG_DEBUGGER */
+
return IRQ_HANDLED;
}
@@ -199,6 +178,66 @@ int smp_request_message_ipi(int virq, int msg)
return err;
}
+#ifdef CONFIG_PPC_SMP_MUXED_IPI
+struct cpu_messages {
+ int messages; /* current messages */
+ unsigned long data; /* data for cause ipi */
+};
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
+
+void smp_muxed_ipi_set_data(int cpu, unsigned long data)
+{
+ struct cpu_messages *info = &per_cpu(ipi_message, cpu);
+
+ info->data = data;
+}
+
+void smp_muxed_ipi_message_pass(int cpu, int msg)
+{
+ struct cpu_messages *info = &per_cpu(ipi_message, cpu);
+ char *message = (char *)&info->messages;
+
+ message[msg] = 1;
+ mb();
+ smp_ops->cause_ipi(cpu, info->data);
+}
+
+void smp_muxed_ipi_resend(void)
+{
+ struct cpu_messages *info = &__get_cpu_var(ipi_message);
+
+ if (info->messages)
+ smp_ops->cause_ipi(smp_processor_id(), info->data);
+}
+
+irqreturn_t smp_ipi_demux(void)
+{
+ struct cpu_messages *info = &__get_cpu_var(ipi_message);
+ unsigned int all;
+
+ mb(); /* order any irq clear */
+
+ do {
+ all = xchg_local(&info->messages, 0);
+
+#ifdef __BIG_ENDIAN
+ if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
+ generic_smp_call_function_interrupt();
+ if (all & (1 << (24 - 8 * PPC_MSG_RESCHEDULE)))
+ reschedule_action(0, NULL); /* upcoming sched hook */
+ if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNC_SINGLE)))
+ generic_smp_call_function_single_interrupt();
+ if (all & (1 << (24 - 8 * PPC_MSG_DEBUGGER_BREAK)))
+ debug_ipi_action(0, NULL);
+#else
+#error Unsupported ENDIAN
+#endif
+ } while (info->messages);
+
+ return IRQ_HANDLED;
+}
+#endif /* CONFIG_PPC_SMP_MUXED_IPI */
+
void smp_send_reschedule(int cpu)
{
if (likely(smp_ops))
@@ -218,11 +257,18 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
}
-#ifdef CONFIG_DEBUGGER
-void smp_send_debugger_break(int cpu)
+#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
+void smp_send_debugger_break(void)
{
- if (likely(smp_ops))
- smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
+ int cpu;
+ int me = raw_smp_processor_id();
+
+ if (unlikely(!smp_ops))
+ return;
+
+ for_each_online_cpu(cpu)
+ if (cpu != me)
+ smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
}
#endif
@@ -230,9 +276,9 @@ void smp_send_debugger_break(int cpu)
void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
{
crash_ipi_function_ptr = crash_ipi_callback;
- if (crash_ipi_callback && smp_ops) {
+ if (crash_ipi_callback) {
mb();
- smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK);
+ smp_send_debugger_break();
}
}
#endif
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 9de6f396cf8..4d5a3edff49 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -102,7 +102,7 @@ _GLOBAL(giveup_altivec)
MTMSRD(r5) /* enable use of VMX now */
isync
PPC_LCMPI 0,r3,0
- beqlr- /* if no previous owner, done */
+ beqlr /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */
PPC_LL r5,PT_REGS(r3)
PPC_LCMPI 0,r5,0
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index c961de40c67..0f95b5cce03 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -236,7 +236,7 @@ void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
{
- return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions);
+ return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
}
void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index ae99af66ca3..1a1b34487e7 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -112,7 +112,9 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL
+#ifdef CONFIG_PPC_BOOK3S_64
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL_HV
+#endif
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_FP_UNAVAIL
diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c
index f53e09c7dac..13b676c20d1 100644
--- a/arch/powerpc/lib/alloc.c
+++ b/arch/powerpc/lib/alloc.c
@@ -6,14 +6,6 @@
#include <asm/system.h>
-void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask)
-{
- if (mem_init_done)
- return kmalloc(size, mask);
- else
- return alloc_bootmem(size);
-}
-
void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
{
void *p;
diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S
index 4d4eeb90048..53dcb6b1b70 100644
--- a/arch/powerpc/lib/copypage_64.S
+++ b/arch/powerpc/lib/copypage_64.S
@@ -6,6 +6,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#include <asm/page.h>
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
@@ -15,9 +16,9 @@ PPC64_CACHES:
.tc ppc64_caches[TC],ppc64_caches
.section ".text"
-
-_GLOBAL(copy_4K_page)
- li r5,4096 /* 4K page size */
+_GLOBAL(copy_page)
+ lis r5,PAGE_SIZE@h
+ ori r5,r5,PAGE_SIZE@l
BEGIN_FTR_SECTION
ld r10,PPC64_CACHES@toc(r2)
lwz r11,DCACHEL1LOGLINESIZE(r10) /* log2 of cache line size */
diff --git a/arch/powerpc/lib/devres.c b/arch/powerpc/lib/devres.c
index deac4d30daf..e91615abae6 100644
--- a/arch/powerpc/lib/devres.c
+++ b/arch/powerpc/lib/devres.c
@@ -9,11 +9,11 @@
#include <linux/device.h> /* devres_*(), devm_ioremap_release() */
#include <linux/gfp.h>
-#include <linux/io.h> /* ioremap_flags() */
+#include <linux/io.h> /* ioremap_prot() */
#include <linux/module.h> /* EXPORT_SYMBOL() */
/**
- * devm_ioremap_prot - Managed ioremap_flags()
+ * devm_ioremap_prot - Managed ioremap_prot()
* @dev: Generic device to remap IO address for
* @offset: BUS offset to map
* @size: Size of map
@@ -31,7 +31,7 @@ void __iomem *devm_ioremap_prot(struct device *dev, resource_size_t offset,
if (!ptr)
return NULL;
- addr = ioremap_flags(offset, size, flags);
+ addr = ioremap_prot(offset, size, flags);
if (addr) {
*ptr = addr;
devres_add(dev, ptr);
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 8dc41c0157f..51f87956f8f 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -133,7 +133,15 @@ ioremap(phys_addr_t addr, unsigned long size)
EXPORT_SYMBOL(ioremap);
void __iomem *
-ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
+ioremap_wc(phys_addr_t addr, unsigned long size)
+{
+ return __ioremap_caller(addr, size, _PAGE_NO_CACHE,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_wc);
+
+void __iomem *
+ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
{
/* writeable implies dirty for kernel addresses */
if (flags & _PAGE_RW)
@@ -152,7 +160,7 @@ ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
}
-EXPORT_SYMBOL(ioremap_flags);
+EXPORT_SYMBOL(ioremap_prot);
void __iomem *
__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 88927a05cdc..6e595f6496d 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -255,7 +255,17 @@ void __iomem * ioremap(phys_addr_t addr, unsigned long size)
return __ioremap_caller(addr, size, flags, caller);
}
-void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
+void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
+{
+ unsigned long flags = _PAGE_NO_CACHE;
+ void *caller = __builtin_return_address(0);
+
+ if (ppc_md.ioremap)
+ return ppc_md.ioremap(addr, size, flags, caller);
+ return __ioremap_caller(addr, size, flags, caller);
+}
+
+void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
unsigned long flags)
{
void *caller = __builtin_return_address(0);
@@ -311,7 +321,8 @@ void iounmap(volatile void __iomem *token)
}
EXPORT_SYMBOL(ioremap);
-EXPORT_SYMBOL(ioremap_flags);
+EXPORT_SYMBOL(ioremap_wc);
+EXPORT_SYMBOL(ioremap_prot);
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(__ioremap_at);
EXPORT_SYMBOL(iounmap);
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
index a8bc0d44393..9f09319352c 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
@@ -97,7 +97,7 @@ cpld_pic_get_irq(int offset, u8 ignore, u8 __iomem *statusp,
status |= (ignore | mask);
if (status == 0xff)
- return NO_IRQ_IGNORE;
+ return NO_IRQ;
cpld_irq = ffz(status) + offset;
@@ -109,14 +109,14 @@ cpld_pic_cascade(unsigned int irq, struct irq_desc *desc)
{
irq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status,
&cpld_regs->pci_mask);
- if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) {
+ if (irq != NO_IRQ) {
generic_handle_irq(irq);
return;
}
irq = cpld_pic_get_irq(8, MISC_IGNORE, &cpld_regs->misc_status,
&cpld_regs->misc_mask);
- if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) {
+ if (irq != NO_IRQ) {
generic_handle_irq(irq);
return;
}
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index bb611819b83..1a9a4957057 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -486,7 +486,7 @@ void __init mpc52xx_init_irq(void)
unsigned int mpc52xx_get_irq(void)
{
u32 status;
- int irq = NO_IRQ_IGNORE;
+ int irq;
status = in_be32(&intr->enc_status);
if (status & 0x00000400) { /* critical */
@@ -509,6 +509,8 @@ unsigned int mpc52xx_get_irq(void)
} else {
irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET);
}
+ } else {
+ return NO_IRQ;
}
return irq_linear_revmap(mpc52xx_irqhost, irq);
diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
index 5d6c34ce0cb..8ccf9ed62fe 100644
--- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
+++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
@@ -112,16 +112,8 @@ static int pci_pic_host_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static void pci_host_unmap(struct irq_host *h, unsigned int virq)
-{
- /* remove chip and handler */
- irq_set_chip_data(virq, NULL);
- irq_set_chip(virq, NULL);
-}
-
static struct irq_host_ops pci_pic_host_ops = {
.map = pci_pic_host_map,
- .unmap = pci_host_unmap,
};
int __init pq2ads_pci_init_irq(void)
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index fe3f6a3a530..d6a93a10c0f 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -235,8 +235,10 @@ void __init mpc85xx_smp_init(void)
smp_85xx_ops.message_pass = smp_mpic_message_pass;
}
- if (cpu_has_feature(CPU_FTR_DBELL))
- smp_85xx_ops.message_pass = doorbell_message_pass;
+ if (cpu_has_feature(CPU_FTR_DBELL)) {
+ smp_85xx_ops.message_pass = smp_muxed_ipi_message_pass;
+ smp_85xx_ops.cause_ipi = doorbell_cause_ipi;
+ }
BUG_ON(!smp_85xx_ops.message_pass);
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 6059053e715..f970ca2b180 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -57,16 +57,19 @@ config UDBG_RTAS_CONSOLE
depends on PPC_RTAS
default n
+config PPC_SMP_MUXED_IPI
+ bool
+ help
+ Select this opton if your platform supports SMP and your
+ interrupt controller provides less than 4 interrupts to each
+ cpu. This will enable the generic code to multiplex the 4
+ messages on to one ipi.
+
config PPC_UDBG_BEAT
bool "BEAT based debug console"
depends on PPC_CELLEB
default n
-config XICS
- depends on PPC_PSERIES
- bool
- default y
-
config IPIC
bool
default n
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index a1e623822a3..2165b65876f 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -73,6 +73,7 @@ config PPC_BOOK3S_64
config PPC_BOOK3E_64
bool "Embedded processors"
select PPC_FPU # Make it a choice ?
+ select PPC_SMP_MUXED_IPI
endchoice
@@ -178,6 +179,7 @@ config FSL_BOOKE
config PPC_FSL_BOOK3E
bool
select FSL_EMB_PERFMON
+ select PPC_SMP_MUXED_IPI
default y if FSL_BOOKE
config PTE_64BIT
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index 8839ef6c718..a4a89350bcf 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -43,7 +43,6 @@ obj-y += celleb_setup.o \
beat_hvCall.o beat_interrupt.o \
beat_iommu.o
-obj-$(CONFIG_SMP) += beat_smp.o
obj-$(CONFIG_PPC_UDBG_BEAT) += beat_udbg.o
obj-$(CONFIG_SERIAL_TXX9) += celleb_scc_sio.o
obj-$(CONFIG_SPU_BASE) += beat_spu_priv1.o
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 1e3329e8578..ac06903e136 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -113,7 +113,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
pr_devel("axon_msi: woff %x roff %x msi %x\n",
write_offset, msic->read_offset, msi);
- if (msi < NR_IRQS && virq_to_host(msi) == msic->irq_host) {
+ if (msi < NR_IRQS && irq_get_chip_data(msi) == msic) {
generic_handle_irq(msi);
msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
} else {
@@ -320,6 +320,7 @@ static struct irq_chip msic_irq_chip = {
static int msic_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
+ irq_set_chip_data(virq, h->host_data);
irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq);
return 0;
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
index 4cb9e147c30..55015e1f693 100644
--- a/arch/powerpc/platforms/cell/beat_interrupt.c
+++ b/arch/powerpc/platforms/cell/beat_interrupt.c
@@ -148,16 +148,6 @@ static int beatic_pic_host_map(struct irq_host *h, unsigned int virq,
}
/*
- * Update binding hardware IRQ number (hw) and Virtuql
- * IRQ number (virq). This is called only once for a given mapping.
- */
-static void beatic_pic_host_remap(struct irq_host *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- beat_construct_and_connect_irq_plug(virq, hw);
-}
-
-/*
* Translate device-tree interrupt spec to irq_hw_number_t style (ulong),
* to pass away to irq_create_mapping().
*
@@ -184,7 +174,6 @@ static int beatic_pic_host_match(struct irq_host *h, struct device_node *np)
static struct irq_host_ops beatic_pic_host_ops = {
.map = beatic_pic_host_map,
- .remap = beatic_pic_host_remap,
.unmap = beatic_pic_host_unmap,
.xlate = beatic_pic_host_xlate,
.match = beatic_pic_host_match,
@@ -257,22 +246,6 @@ void __init beatic_init_IRQ(void)
irq_set_default_host(beatic_host);
}
-#ifdef CONFIG_SMP
-
-/* Nullified to compile with SMP mode */
-void beatic_setup_cpu(int cpu)
-{
-}
-
-void beatic_cause_IPI(int cpu, int mesg)
-{
-}
-
-void beatic_request_IPIs(void)
-{
-}
-#endif /* CONFIG_SMP */
-
void beatic_deinit_IRQ(void)
{
int i;
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.h b/arch/powerpc/platforms/cell/beat_interrupt.h
index b470fd0051f..a7e52f91a07 100644
--- a/arch/powerpc/platforms/cell/beat_interrupt.h
+++ b/arch/powerpc/platforms/cell/beat_interrupt.h
@@ -24,9 +24,6 @@
extern void beatic_init_IRQ(void);
extern unsigned int beatic_get_irq(void);
-extern void beatic_cause_IPI(int cpu, int mesg);
-extern void beatic_request_IPIs(void);
-extern void beatic_setup_cpu(int);
extern void beatic_deinit_IRQ(void);
#endif
diff --git a/arch/powerpc/platforms/cell/beat_smp.c b/arch/powerpc/platforms/cell/beat_smp.c
deleted file mode 100644
index 3e86acbb0fb..00000000000
--- a/arch/powerpc/platforms/cell/beat_smp.c
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * SMP support for Celleb platform. (Incomplete)
- *
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * This code is based on arch/powerpc/platforms/cell/smp.c:
- * Dave Engebretsen, Peter Bergner, and
- * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
- * Plus various changes from other IBM teams...
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/threads.h>
-#include <linux/cpu.h>
-
-#include <asm/irq.h>
-#include <asm/smp.h>
-#include <asm/machdep.h>
-#include <asm/udbg.h>
-
-#include "beat_interrupt.h"
-
-#ifdef DEBUG
-#define DBG(fmt...) udbg_printf(fmt)
-#else
-#define DBG(fmt...)
-#endif
-
-/*
- * The primary thread of each non-boot processor is recorded here before
- * smp init.
- */
-/* static cpumask_t of_spin_map; */
-
-/**
- * smp_startup_cpu() - start the given cpu
- *
- * At boot time, there is nothing to do for primary threads which were
- * started from Open Firmware. For anything else, call RTAS with the
- * appropriate start location.
- *
- * Returns:
- * 0 - failure
- * 1 - success
- */
-static inline int __devinit smp_startup_cpu(unsigned int lcpu)
-{
- return 0;
-}
-
-static void smp_beatic_message_pass(int target, int msg)
-{
- unsigned int i;
-
- if (target < NR_CPUS) {
- beatic_cause_IPI(target, msg);
- } else {
- for_each_online_cpu(i) {
- if (target == MSG_ALL_BUT_SELF
- && i == smp_processor_id())
- continue;
- beatic_cause_IPI(i, msg);
- }
- }
-}
-
-static int __init smp_beatic_probe(void)
-{
- return cpumask_weight(cpu_possible_mask);
-}
-
-static void __devinit smp_beatic_setup_cpu(int cpu)
-{
- beatic_setup_cpu(cpu);
-}
-
-static int __devinit smp_celleb_kick_cpu(int nr)
-{
- BUG_ON(nr < 0 || nr >= NR_CPUS);
-
- return smp_startup_cpu(nr);
-}
-
-static int smp_celleb_cpu_bootable(unsigned int nr)
-{
- return 1;
-}
-static struct smp_ops_t bpa_beatic_smp_ops = {
- .message_pass = smp_beatic_message_pass,
- .probe = smp_beatic_probe,
- .kick_cpu = smp_celleb_kick_cpu,
- .setup_cpu = smp_beatic_setup_cpu,
- .cpu_bootable = smp_celleb_cpu_bootable,
-};
-
-/* This is called very early */
-void __init smp_init_celleb(void)
-{
- DBG(" -> smp_init_celleb()\n");
-
- smp_ops = &bpa_beatic_smp_ops;
-
- DBG(" <- smp_init_celleb()\n");
-}
diff --git a/arch/powerpc/platforms/cell/celleb_pci.c b/arch/powerpc/platforms/cell/celleb_pci.c
index 2904b0a6b2c..5822141aa63 100644
--- a/arch/powerpc/platforms/cell/celleb_pci.c
+++ b/arch/powerpc/platforms/cell/celleb_pci.c
@@ -319,7 +319,7 @@ static int __init celleb_setup_fake_pci_device(struct device_node *node,
size = 256;
config = &private->fake_config[devno][fn];
- *config = alloc_maybe_bootmem(size, GFP_KERNEL);
+ *config = zalloc_maybe_bootmem(size, GFP_KERNEL);
if (*config == NULL) {
printk(KERN_ERR "PCI: "
"not enough memory for fake configuration space\n");
@@ -330,7 +330,7 @@ static int __init celleb_setup_fake_pci_device(struct device_node *node,
size = sizeof(struct celleb_pci_resource);
res = &private->res[devno][fn];
- *res = alloc_maybe_bootmem(size, GFP_KERNEL);
+ *res = zalloc_maybe_bootmem(size, GFP_KERNEL);
if (*res == NULL) {
printk(KERN_ERR
"PCI: not enough memory for resource data space\n");
@@ -431,7 +431,7 @@ static int __init phb_set_bus_ranges(struct device_node *dev,
static void __init celleb_alloc_private_mem(struct pci_controller *hose)
{
hose->private_data =
- alloc_maybe_bootmem(sizeof(struct celleb_pci_private),
+ zalloc_maybe_bootmem(sizeof(struct celleb_pci_private),
GFP_KERNEL);
}
diff --git a/arch/powerpc/platforms/cell/celleb_setup.c b/arch/powerpc/platforms/cell/celleb_setup.c
index e5384557977..d58d9bae4b9 100644
--- a/arch/powerpc/platforms/cell/celleb_setup.c
+++ b/arch/powerpc/platforms/cell/celleb_setup.c
@@ -128,10 +128,6 @@ static void __init celleb_setup_arch_beat(void)
spu_management_ops = &spu_management_of_ops;
#endif
-#ifdef CONFIG_SMP
- smp_init_celleb();
-#endif
-
celleb_setup_arch_common();
}
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 44cfd1bef89..6a58744d66c 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -196,8 +196,20 @@ static irqreturn_t iic_ipi_action(int irq, void *dev_id)
{
int ipi = (int)(long)dev_id;
- smp_message_recv(ipi);
-
+ switch(ipi) {
+ case PPC_MSG_CALL_FUNCTION:
+ generic_smp_call_function_interrupt();
+ break;
+ case PPC_MSG_RESCHEDULE:
+ /* Upcoming sched hook */
+ break;
+ case PPC_MSG_CALL_FUNC_SINGLE:
+ generic_smp_call_function_single_interrupt();
+ break;
+ case PPC_MSG_DEBUGGER_BREAK:
+ debug_ipi_action(0, NULL);
+ break;
+ }
return IRQ_HANDLED;
}
static void iic_request_ipi(int ipi, const char *name)
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index a2161b91b0b..d176e6148e3 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -103,22 +103,6 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
return 1;
}
-static void smp_iic_message_pass(int target, int msg)
-{
- unsigned int i;
-
- if (target < NR_CPUS) {
- iic_cause_IPI(target, msg);
- } else {
- for_each_online_cpu(i) {
- if (target == MSG_ALL_BUT_SELF
- && i == smp_processor_id())
- continue;
- iic_cause_IPI(i, msg);
- }
- }
-}
-
static int __init smp_iic_probe(void)
{
iic_request_IPIs();
@@ -168,7 +152,7 @@ static int smp_cell_cpu_bootable(unsigned int nr)
return 1;
}
static struct smp_ops_t bpa_iic_smp_ops = {
- .message_pass = smp_iic_message_pass,
+ .message_pass = iic_cause_IPI,
.probe = smp_iic_probe,
.kick_cpu = smp_cell_kick_cpu,
.setup_cpu = smp_cell_setup_cpu,
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 34d2b99d10c..442c28c00f8 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -68,9 +68,9 @@ struct spider_pic {
};
static struct spider_pic spider_pics[SPIDER_CHIP_COUNT];
-static struct spider_pic *spider_virq_to_pic(unsigned int virq)
+static struct spider_pic *spider_irq_data_to_pic(struct irq_data *d)
{
- return virq_to_host(virq)->host_data;
+ return irq_data_get_irq_chip_data(d);
}
static void __iomem *spider_get_irq_config(struct spider_pic *pic,
@@ -81,7 +81,7 @@ static void __iomem *spider_get_irq_config(struct spider_pic *pic,
static void spider_unmask_irq(struct irq_data *d)
{
- struct spider_pic *pic = spider_virq_to_pic(d->irq);
+ struct spider_pic *pic = spider_irq_data_to_pic(d);
void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d));
out_be32(cfg, in_be32(cfg) | 0x30000000u);
@@ -89,7 +89,7 @@ static void spider_unmask_irq(struct irq_data *d)
static void spider_mask_irq(struct irq_data *d)
{
- struct spider_pic *pic = spider_virq_to_pic(d->irq);
+ struct spider_pic *pic = spider_irq_data_to_pic(d);
void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d));
out_be32(cfg, in_be32(cfg) & ~0x30000000u);
@@ -97,7 +97,7 @@ static void spider_mask_irq(struct irq_data *d)
static void spider_ack_irq(struct irq_data *d)
{
- struct spider_pic *pic = spider_virq_to_pic(d->irq);
+ struct spider_pic *pic = spider_irq_data_to_pic(d);
unsigned int src = irqd_to_hwirq(d);
/* Reset edge detection logic if necessary
@@ -116,7 +116,7 @@ static void spider_ack_irq(struct irq_data *d)
static int spider_set_irq_type(struct irq_data *d, unsigned int type)
{
unsigned int sense = type & IRQ_TYPE_SENSE_MASK;
- struct spider_pic *pic = spider_virq_to_pic(d->irq);
+ struct spider_pic *pic = spider_irq_data_to_pic(d);
unsigned int hw = irqd_to_hwirq(d);
void __iomem *cfg = spider_get_irq_config(pic, hw);
u32 old_mask;
@@ -171,6 +171,7 @@ static struct irq_chip spider_pic = {
static int spider_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
+ irq_set_chip_data(virq, h->host_data);
irq_set_chip_and_handler(virq, &spider_pic, handle_level_irq);
/* Set default irq type */
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
index 77cbe4c8f95..f61a2dd96b9 100644
--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
@@ -107,12 +107,6 @@ static int flipper_pic_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static void flipper_pic_unmap(struct irq_host *h, unsigned int irq)
-{
- irq_set_chip_data(irq, NULL);
- irq_set_chip(irq, NULL);
-}
-
static int flipper_pic_match(struct irq_host *h, struct device_node *np)
{
return 1;
@@ -121,7 +115,6 @@ static int flipper_pic_match(struct irq_host *h, struct device_node *np)
static struct irq_host_ops flipper_irq_host_ops = {
.map = flipper_pic_map,
- .unmap = flipper_pic_unmap,
.match = flipper_pic_match,
};
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index 44b398b0a2f..e4919170c6b 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -100,15 +100,8 @@ static int hlwd_pic_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static void hlwd_pic_unmap(struct irq_host *h, unsigned int irq)
-{
- irq_set_chip_data(irq, NULL);
- irq_set_chip(irq, NULL);
-}
-
static struct irq_host_ops hlwd_irq_host_ops = {
.map = hlwd_pic_map,
- .unmap = hlwd_pic_unmap,
};
static unsigned int __hlwd_pic_get_irq(struct irq_host *h)
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig
index ea1d3622b41..b57cda3a081 100644
--- a/arch/powerpc/platforms/iseries/Kconfig
+++ b/arch/powerpc/platforms/iseries/Kconfig
@@ -1,6 +1,7 @@
config PPC_ISERIES
bool "IBM Legacy iSeries"
depends on PPC64 && PPC_BOOK3S
+ select PPC_SMP_MUXED_IPI
select PPC_INDIRECT_PIO
select PPC_INDIRECT_MMIO
select PPC_PCI_CHOICE if EXPERT
diff --git a/arch/powerpc/platforms/iseries/exception.S b/arch/powerpc/platforms/iseries/exception.S
index a67984c0495..29c02f36b32 100644
--- a/arch/powerpc/platforms/iseries/exception.S
+++ b/arch/powerpc/platforms/iseries/exception.S
@@ -61,29 +61,31 @@ system_reset_iSeries:
/* Spin on __secondary_hold_spinloop until it is updated by the boot cpu. */
/* In the UP case we'll yield() later, and we will not access the paca anyway */
#ifdef CONFIG_SMP
-1:
+iSeries_secondary_wait_paca:
HMT_LOW
LOAD_REG_ADDR(r23, __secondary_hold_spinloop)
ld r23,0(r23)
- sync
- LOAD_REG_ADDR(r3,current_set)
- sldi r28,r24,3 /* get current_set[cpu#] */
- ldx r3,r3,r28
- addi r1,r3,THREAD_SIZE
- subi r1,r1,STACK_FRAME_OVERHEAD
- cmpwi 0,r23,0 /* Keep poking the Hypervisor until */
- bne 2f /* we're released */
- /* Let the Hypervisor know we are alive */
+ cmpdi 0,r23,0
+ bne 2f /* go on when the master is ready */
+
+ /* Keep poking the Hypervisor until we're released */
/* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
lis r3,0x8002
rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
li r0,-1 /* r0=-1 indicates a Hypervisor call */
sc /* Invoke the hypervisor via a system call */
- b 1b
-#endif
+ b iSeries_secondary_wait_paca
2:
+ HMT_MEDIUM
+ sync
+
+ LOAD_REG_ADDR(r3, nr_cpu_ids) /* get number of pacas allocated */
+ lwz r3,0(r3) /* nr_cpus= or NR_CPUS can limit */
+ cmpld 0,r24,r3 /* is our cpu number allocated? */
+ bge iSeries_secondary_yield /* no, yield forever */
+
/* Load our paca now that it's been allocated */
LOAD_REG_ADDR(r13, paca)
ld r13,0(r13)
@@ -94,10 +96,24 @@ system_reset_iSeries:
ori r23,r23,MSR_RI
mtmsrd r23 /* RI on */
- HMT_LOW
-#ifdef CONFIG_SMP
+iSeries_secondary_smp_loop:
lbz r23,PACAPROCSTART(r13) /* Test if this processor
* should start */
+ cmpwi 0,r23,0
+ bne 3f /* go on when we are told */
+
+ HMT_LOW
+ /* Let the Hypervisor know we are alive */
+ /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
+ lis r3,0x8002
+ rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
+ li r0,-1 /* r0=-1 indicates a Hypervisor call */
+ sc /* Invoke the hypervisor via a system call */
+ mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */
+ b iSeries_secondary_smp_loop /* wait for signal to start */
+
+3:
+ HMT_MEDIUM
sync
LOAD_REG_ADDR(r3,current_set)
sldi r28,r24,3 /* get current_set[cpu#] */
@@ -105,27 +121,22 @@ system_reset_iSeries:
addi r1,r3,THREAD_SIZE
subi r1,r1,STACK_FRAME_OVERHEAD
- cmpwi 0,r23,0
- beq iSeries_secondary_smp_loop /* Loop until told to go */
b __secondary_start /* Loop until told to go */
-iSeries_secondary_smp_loop:
- /* Let the Hypervisor know we are alive */
- /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
- lis r3,0x8002
- rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
-#else /* CONFIG_SMP */
+#endif /* CONFIG_SMP */
+
+iSeries_secondary_yield:
/* Yield the processor. This is required for non-SMP kernels
which are running on multi-threaded machines. */
+ HMT_LOW
lis r3,0x8000
rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
li r4,0 /* "yield timed" */
li r5,-1 /* "yield forever" */
-#endif /* CONFIG_SMP */
li r0,-1 /* r0=-1 indicates a Hypervisor call */
sc /* Invoke the hypervisor via a system call */
mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */
- b 2b /* If SMP not configured, secondaries
+ b iSeries_secondary_yield /* If SMP not configured, secondaries
* loop forever */
/*** ISeries-LPAR interrupt handlers ***/
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
index 375c21ca660..b2103453eb0 100644
--- a/arch/powerpc/platforms/iseries/irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -42,7 +42,6 @@
#include "irq.h"
#include "pci.h"
#include "call_pci.h"
-#include "smp.h"
#ifdef CONFIG_PCI
@@ -316,7 +315,7 @@ unsigned int iSeries_get_irq(void)
#ifdef CONFIG_SMP
if (get_lppaca()->int_dword.fields.ipi_cnt) {
get_lppaca()->int_dword.fields.ipi_cnt = 0;
- iSeries_smp_message_recv();
+ smp_ipi_demux();
}
#endif /* CONFIG_SMP */
if (hvlpevent_is_pending())
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index 81cb8d2c413..c25a0815c26 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -685,6 +685,11 @@ void * __init iSeries_early_setup(void)
powerpc_firmware_features |= FW_FEATURE_ISERIES;
powerpc_firmware_features |= FW_FEATURE_LPAR;
+#ifdef CONFIG_SMP
+ /* On iSeries we know we can never have more than 64 cpus */
+ nr_cpu_ids = max(nr_cpu_ids, 64);
+#endif
+
iSeries_fixup_klimit();
/*
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c
index 02a677a1f91..e3265adde5d 100644
--- a/arch/powerpc/platforms/iseries/smp.c
+++ b/arch/powerpc/platforms/iseries/smp.c
@@ -42,45 +42,11 @@
#include <asm/cputable.h>
#include <asm/system.h>
-#include "smp.h"
-
-static unsigned long iSeries_smp_message[NR_CPUS];
-
-void iSeries_smp_message_recv(void)
+static void smp_iSeries_cause_ipi(int cpu, unsigned long data)
{
- int cpu = smp_processor_id();
- int msg;
-
- if (num_online_cpus() < 2)
- return;
-
- for (msg = 0; msg < 4; msg++)
- if (test_and_clear_bit(msg, &iSeries_smp_message[cpu]))
- smp_message_recv(msg);
-}
-
-static inline void smp_iSeries_do_message(int cpu, int msg)
-{
- set_bit(msg, &iSeries_smp_message[cpu]);
HvCall_sendIPI(&(paca[cpu]));
}
-static void smp_iSeries_message_pass(int target, int msg)
-{
- int i;
-
- if (target < NR_CPUS)
- smp_iSeries_do_message(target, msg);
- else {
- for_each_online_cpu(i) {
- if ((target == MSG_ALL_BUT_SELF) &&
- (i == smp_processor_id()))
- continue;
- smp_iSeries_do_message(i, msg);
- }
- }
-}
-
static int smp_iSeries_probe(void)
{
return cpumask_weight(cpu_possible_mask);
@@ -109,7 +75,8 @@ static void __devinit smp_iSeries_setup_cpu(int nr)
}
static struct smp_ops_t iSeries_smp_ops = {
- .message_pass = smp_iSeries_message_pass,
+ .message_pass = smp_muxed_ipi_message_pass,
+ .cause_ipi = smp_iSeries_cause_ipi,
.probe = smp_iSeries_probe,
.kick_cpu = smp_iSeries_kick_cpu,
.setup_cpu = smp_iSeries_setup_cpu,
diff --git a/arch/powerpc/platforms/iseries/smp.h b/arch/powerpc/platforms/iseries/smp.h
deleted file mode 100644
index d501f7de01e..00000000000
--- a/arch/powerpc/platforms/iseries/smp.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _PLATFORMS_ISERIES_SMP_H
-#define _PLATFORMS_ISERIES_SMP_H
-
-extern void iSeries_smp_message_recv(void);
-
-#endif /* _PLATFORMS_ISERIES_SMP_H */
diff --git a/arch/powerpc/platforms/powermac/Kconfig b/arch/powerpc/platforms/powermac/Kconfig
index 1e1a0873e1d..1afd10f6785 100644
--- a/arch/powerpc/platforms/powermac/Kconfig
+++ b/arch/powerpc/platforms/powermac/Kconfig
@@ -18,4 +18,13 @@ config PPC_PMAC64
select PPC_970_NAP
default y
-
+config PPC_PMAC32_PSURGE
+ bool "Support for powersurge upgrade cards" if EXPERT
+ depends on SMP && PPC32 && PPC_PMAC
+ select PPC_SMP_MUXED_IPI
+ default y
+ help
+ The powersurge cpu boards can be used in the generation
+ of powermacs that have a socket for an upgradeable cpu card,
+ including the 7500, 8500, 9500, 9600. Support exists for
+ both dual and quad socket upgrade cards.
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 2f34ad04029..360260d1352 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -239,15 +239,12 @@ static unsigned int pmac_pic_get_irq(void)
unsigned long bits = 0;
unsigned long flags;
-#ifdef CONFIG_SMP
- void psurge_smp_message_recv(void);
-
- /* IPI's are a hack on the powersurge -- Cort */
- if ( smp_processor_id() != 0 ) {
- psurge_smp_message_recv();
- return NO_IRQ_IGNORE; /* ignore, already handled */
+#ifdef CONFIG_PPC_PMAC32_PSURGE
+ /* IPI's are a hack on the powersurge -- Cort */
+ if (smp_processor_id() != 0) {
+ return psurge_secondary_virq;
}
-#endif /* CONFIG_SMP */
+#endif /* CONFIG_PPC_PMAC32_PSURGE */
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
for (irq = max_real_irqs; (irq -= 32) >= 0; ) {
int i = irq >> 5;
diff --git a/arch/powerpc/platforms/powermac/pic.h b/arch/powerpc/platforms/powermac/pic.h
deleted file mode 100644
index d622a8345aa..00000000000
--- a/arch/powerpc/platforms/powermac/pic.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __PPC_PLATFORMS_PMAC_PIC_H
-#define __PPC_PLATFORMS_PMAC_PIC_H
-
-#include <linux/irq.h>
-
-extern struct irq_chip pmac_pic;
-
-extern void pmac_pic_init(void);
-extern int pmac_get_irq(void);
-
-#endif /* __PPC_PLATFORMS_PMAC_PIC_H */
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
index 20468f49aec..8327cce2bdb 100644
--- a/arch/powerpc/platforms/powermac/pmac.h
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -33,6 +33,7 @@ extern void pmac_setup_pci_dma(void);
extern void pmac_check_ht_link(void);
extern void pmac_setup_smp(void);
+extern int psurge_secondary_virq;
extern void low_cpu_die(void) __attribute__((noreturn));
extern int pmac_nvram_init(void);
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 621d4b7755f..db092d7c4c5 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -70,7 +70,7 @@ static void (*pmac_tb_freeze)(int freeze);
static u64 timebase;
static int tb_req;
-#ifdef CONFIG_PPC32
+#ifdef CONFIG_PPC_PMAC32_PSURGE
/*
* Powersurge (old powermac SMP) support.
@@ -124,6 +124,10 @@ static volatile u32 __iomem *psurge_start;
/* what sort of powersurge board we have */
static int psurge_type = PSURGE_NONE;
+/* irq for secondary cpus to report */
+static struct irq_host *psurge_host;
+int psurge_secondary_virq;
+
/*
* Set and clear IPIs for powersurge.
*/
@@ -156,51 +160,52 @@ static inline void psurge_clr_ipi(int cpu)
/*
* On powersurge (old SMP powermac architecture) we don't have
* separate IPIs for separate messages like openpic does. Instead
- * we have a bitmap for each processor, where a 1 bit means that
- * the corresponding message is pending for that processor.
- * Ideally each cpu's entry would be in a different cache line.
+ * use the generic demux helpers
* -- paulus.
*/
-static unsigned long psurge_smp_message[NR_CPUS];
-
-void psurge_smp_message_recv(void)
+static irqreturn_t psurge_ipi_intr(int irq, void *d)
{
- int cpu = smp_processor_id();
- int msg;
-
- /* clear interrupt */
- psurge_clr_ipi(cpu);
+ psurge_clr_ipi(smp_processor_id());
+ smp_ipi_demux();
- if (num_online_cpus() < 2)
- return;
+ return IRQ_HANDLED;
+}
- /* make sure there is a message there */
- for (msg = 0; msg < 4; msg++)
- if (test_and_clear_bit(msg, &psurge_smp_message[cpu]))
- smp_message_recv(msg);
+static void smp_psurge_cause_ipi(int cpu, unsigned long data)
+{
+ psurge_set_ipi(cpu);
}
-irqreturn_t psurge_primary_intr(int irq, void *d)
+static int psurge_host_map(struct irq_host *h, unsigned int virq,
+ irq_hw_number_t hw)
{
- psurge_smp_message_recv();
- return IRQ_HANDLED;
+ irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_percpu_irq);
+
+ return 0;
}
-static void smp_psurge_message_pass(int target, int msg)
+struct irq_host_ops psurge_host_ops = {
+ .map = psurge_host_map,
+};
+
+static int psurge_secondary_ipi_init(void)
{
- int i;
+ int rc = -ENOMEM;
- if (num_online_cpus() < 2)
- return;
+ psurge_host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0,
+ &psurge_host_ops, 0);
- for_each_online_cpu(i) {
- if (target == MSG_ALL
- || (target == MSG_ALL_BUT_SELF && i != smp_processor_id())
- || target == i) {
- set_bit(msg, &psurge_smp_message[i]);
- psurge_set_ipi(i);
- }
- }
+ if (psurge_host)
+ psurge_secondary_virq = irq_create_direct_mapping(psurge_host);
+
+ if (psurge_secondary_virq)
+ rc = request_irq(psurge_secondary_virq, psurge_ipi_intr,
+ IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL);
+
+ if (rc)
+ pr_err("Failed to setup secondary cpu IPI\n");
+
+ return rc;
}
/*
@@ -311,6 +316,9 @@ static int __init smp_psurge_probe(void)
ncpus = 2;
}
+ if (psurge_secondary_ipi_init())
+ return 1;
+
psurge_start = ioremap(PSURGE_START, 4);
psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
@@ -399,8 +407,8 @@ static int __init smp_psurge_kick_cpu(int nr)
}
static struct irqaction psurge_irqaction = {
- .handler = psurge_primary_intr,
- .flags = IRQF_DISABLED,
+ .handler = psurge_ipi_intr,
+ .flags = IRQF_DISABLED|IRQF_PERCPU,
.name = "primary IPI",
};
@@ -439,14 +447,15 @@ void __init smp_psurge_give_timebase(void)
/* PowerSurge-style Macs */
struct smp_ops_t psurge_smp_ops = {
- .message_pass = smp_psurge_message_pass,
+ .message_pass = smp_muxed_ipi_message_pass,
+ .cause_ipi = smp_psurge_cause_ipi,
.probe = smp_psurge_probe,
.kick_cpu = smp_psurge_kick_cpu,
.setup_cpu = smp_psurge_setup_cpu,
.give_timebase = smp_psurge_give_timebase,
.take_timebase = smp_psurge_take_timebase,
};
-#endif /* CONFIG_PPC32 - actually powersurge support */
+#endif /* CONFIG_PPC_PMAC32_PSURGE */
/*
* Core 99 and later support
@@ -1006,7 +1015,7 @@ void __init pmac_setup_smp(void)
of_node_put(np);
smp_ops = &core99_smp_ops;
}
-#ifdef CONFIG_PPC32
+#ifdef CONFIG_PPC_PMAC32_PSURGE
else {
/* We have to set bits in cpu_possible_mask here since the
* secondary CPU(s) aren't in the device tree. Various
@@ -1019,7 +1028,7 @@ void __init pmac_setup_smp(void)
set_cpu_possible(cpu, true);
smp_ops = &psurge_smp_ops;
}
-#endif /* CONFIG_PPC32 */
+#endif /* CONFIG_PPC_PMAC32_PSURGE */
#ifdef CONFIG_HOTPLUG_CPU
ppc_md.cpu_die = pmac_cpu_die;
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 523bd0d34d9..600ed2c0ed5 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -659,11 +659,6 @@ static void __maybe_unused _dump_mask(struct ps3_private *pd,
static void dump_bmp(struct ps3_private* pd) {};
#endif /* defined(DEBUG) */
-static void ps3_host_unmap(struct irq_host *h, unsigned int virq)
-{
- irq_set_chip_data(virq, NULL);
-}
-
static int ps3_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hwirq)
{
@@ -683,7 +678,6 @@ static int ps3_host_match(struct irq_host *h, struct device_node *np)
static struct irq_host_ops ps3_host_ops = {
.map = ps3_host_map,
- .unmap = ps3_host_unmap,
.match = ps3_host_match,
};
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index 51ffde40af2..4c44794faac 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -39,7 +39,7 @@
#define MSG_COUNT 4
static DEFINE_PER_CPU(unsigned int [MSG_COUNT], ps3_ipi_virqs);
-static void do_message_pass(int target, int msg)
+static void ps3_smp_message_pass(int cpu, int msg)
{
int result;
unsigned int virq;
@@ -49,28 +49,12 @@ static void do_message_pass(int target, int msg)
return;
}
- virq = per_cpu(ps3_ipi_virqs, target)[msg];
+ virq = per_cpu(ps3_ipi_virqs, cpu)[msg];
result = ps3_send_event_locally(virq);
if (result)
DBG("%s:%d: ps3_send_event_locally(%d, %d) failed"
- " (%d)\n", __func__, __LINE__, target, msg, result);
-}
-
-static void ps3_smp_message_pass(int target, int msg)
-{
- int cpu;
-
- if (target < NR_CPUS)
- do_message_pass(target, msg);
- else if (target == MSG_ALL_BUT_SELF) {
- for_each_online_cpu(cpu)
- if (cpu != smp_processor_id())
- do_message_pass(cpu, msg);
- } else {
- for_each_online_cpu(cpu)
- do_message_pass(cpu, msg);
- }
+ " (%d)\n", __func__, __LINE__, cpu, msg, result);
}
static int ps3_smp_probe(void)
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c
index 39a472e9e80..375a9f92158 100644
--- a/arch/powerpc/platforms/ps3/spu.c
+++ b/arch/powerpc/platforms/ps3/spu.c
@@ -197,7 +197,7 @@ static void spu_unmap(struct spu *spu)
* The current HV requires the spu shadow regs to be mapped with the
* PTE page protection bits set as read-only (PP=3). This implementation
* uses the low level __ioremap() to bypass the page protection settings
- * inforced by ioremap_flags() to get the needed PTE bits set for the
+ * inforced by ioremap_prot() to get the needed PTE bits set for the
* shadow regs.
*/
@@ -214,7 +214,7 @@ static int __init setup_areas(struct spu *spu)
goto fail_ioremap;
}
- spu->local_store = (__force void *)ioremap_flags(spu->local_store_phys,
+ spu->local_store = (__force void *)ioremap_prot(spu->local_store_phys,
LS_SIZE, _PAGE_NO_CACHE);
if (!spu->local_store) {
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index c371bc06434..e9190073bb9 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -52,10 +52,10 @@ static u8 dtl_event_mask = 0x7;
/*
- * Size of per-cpu log buffers. Default is just under 16 pages worth.
+ * Size of per-cpu log buffers. Firmware requires that the buffer does
+ * not cross a 4k boundary.
*/
-static int dtl_buf_entries = (16 * 85);
-
+static int dtl_buf_entries = N_DISPATCH_LOG;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
struct dtl_ring {
@@ -151,7 +151,7 @@ static int dtl_start(struct dtl *dtl)
/* Register our dtl buffer with the hypervisor. The HV expects the
* buffer size to be passed in the second word of the buffer */
- ((u32 *)dtl->buf)[1] = dtl->buf_entries * sizeof(struct dtl_entry);
+ ((u32 *)dtl->buf)[1] = DISPATCH_LOG_BYTES;
hwcpu = get_hard_smp_processor_id(dtl->cpu);
addr = __pa(dtl->buf);
@@ -196,13 +196,15 @@ static int dtl_enable(struct dtl *dtl)
long int rc;
struct dtl_entry *buf = NULL;
+ if (!dtl_cache)
+ return -ENOMEM;
+
/* only allow one reader */
if (dtl->buf)
return -EBUSY;
n_entries = dtl_buf_entries;
- buf = kmalloc_node(n_entries * sizeof(struct dtl_entry),
- GFP_KERNEL, cpu_to_node(dtl->cpu));
+ buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));
if (!buf) {
printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
__func__, dtl->cpu);
@@ -223,7 +225,7 @@ static int dtl_enable(struct dtl *dtl)
spin_unlock(&dtl->lock);
if (rc)
- kfree(buf);
+ kmem_cache_free(dtl_cache, buf);
return rc;
}
@@ -231,7 +233,7 @@ static void dtl_disable(struct dtl *dtl)
{
spin_lock(&dtl->lock);
dtl_stop(dtl);
- kfree(dtl->buf);
+ kmem_cache_free(dtl_cache, dtl->buf);
dtl->buf = NULL;
dtl->buf_entries = 0;
spin_unlock(&dtl->lock);
@@ -365,7 +367,7 @@ static int dtl_init(void)
event_mask_file = debugfs_create_x8("dtl_event_mask", 0600,
dtl_dir, &dtl_event_mask);
- buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0600,
+ buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0400,
dtl_dir, &dtl_buf_entries);
if (!event_mask_file || !buf_entries_file) {
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 6d5412a18b2..01faab9456c 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -659,15 +659,18 @@ static void remove_ddw(struct device_node *np)
{
struct dynamic_dma_window_prop *dwp;
struct property *win64;
- const u32 *ddr_avail;
+ const u32 *ddw_avail;
u64 liobn;
int len, ret;
- ddr_avail = of_get_property(np, "ibm,ddw-applicable", &len);
+ ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len);
win64 = of_find_property(np, DIRECT64_PROPNAME, NULL);
- if (!win64 || !ddr_avail || len < 3 * sizeof(u32))
+ if (!win64)
return;
+ if (!ddw_avail || len < 3 * sizeof(u32) || win64->length < sizeof(*dwp))
+ goto delprop;
+
dwp = win64->value;
liobn = (u64)be32_to_cpu(dwp->liobn);
@@ -681,28 +684,29 @@ static void remove_ddw(struct device_node *np)
pr_debug("%s successfully cleared tces in window.\n",
np->full_name);
- ret = rtas_call(ddr_avail[2], 1, 1, NULL, liobn);
+ ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
if (ret)
pr_warning("%s: failed to remove direct window: rtas returned "
"%d to ibm,remove-pe-dma-window(%x) %llx\n",
- np->full_name, ret, ddr_avail[2], liobn);
+ np->full_name, ret, ddw_avail[2], liobn);
else
pr_debug("%s: successfully removed direct window: rtas returned "
"%d to ibm,remove-pe-dma-window(%x) %llx\n",
- np->full_name, ret, ddr_avail[2], liobn);
-}
+ np->full_name, ret, ddw_avail[2], liobn);
+delprop:
+ ret = prom_remove_property(np, win64);
+ if (ret)
+ pr_warning("%s: failed to remove direct window property: %d\n",
+ np->full_name, ret);
+}
-static int dupe_ddw_if_already_created(struct pci_dev *dev, struct device_node *pdn)
+static u64 find_existing_ddw(struct device_node *pdn)
{
- struct device_node *dn;
- struct pci_dn *pcidn;
struct direct_window *window;
const struct dynamic_dma_window_prop *direct64;
u64 dma_addr = 0;
- dn = pci_device_to_OF_node(dev);
- pcidn = PCI_DN(dn);
spin_lock(&direct_window_list_lock);
/* check if we already created a window and dupe that config if so */
list_for_each_entry(window, &direct_window_list, list) {
@@ -717,36 +721,40 @@ static int dupe_ddw_if_already_created(struct pci_dev *dev, struct device_node *
return dma_addr;
}
-static u64 dupe_ddw_if_kexec(struct pci_dev *dev, struct device_node *pdn)
+static int find_existing_ddw_windows(void)
{
- struct device_node *dn;
- struct pci_dn *pcidn;
int len;
+ struct device_node *pdn;
struct direct_window *window;
const struct dynamic_dma_window_prop *direct64;
- u64 dma_addr = 0;
- dn = pci_device_to_OF_node(dev);
- pcidn = PCI_DN(dn);
- direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len);
- if (direct64) {
+ if (!firmware_has_feature(FW_FEATURE_LPAR))
+ return 0;
+
+ for_each_node_with_property(pdn, DIRECT64_PROPNAME) {
+ direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len);
+ if (!direct64)
+ continue;
+
window = kzalloc(sizeof(*window), GFP_KERNEL);
- if (!window) {
+ if (!window || len < sizeof(struct dynamic_dma_window_prop)) {
+ kfree(window);
remove_ddw(pdn);
- } else {
- window->device = pdn;
- window->prop = direct64;
- spin_lock(&direct_window_list_lock);
- list_add(&window->list, &direct_window_list);
- spin_unlock(&direct_window_list_lock);
- dma_addr = direct64->dma_base;
+ continue;
}
+
+ window->device = pdn;
+ window->prop = direct64;
+ spin_lock(&direct_window_list_lock);
+ list_add(&window->list, &direct_window_list);
+ spin_unlock(&direct_window_list_lock);
}
- return dma_addr;
+ return 0;
}
+machine_arch_initcall(pseries, find_existing_ddw_windows);
-static int query_ddw(struct pci_dev *dev, const u32 *ddr_avail,
+static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
struct ddw_query_response *query)
{
struct device_node *dn;
@@ -767,15 +775,15 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddr_avail,
if (pcidn->eeh_pe_config_addr)
cfg_addr = pcidn->eeh_pe_config_addr;
buid = pcidn->phb->buid;
- ret = rtas_call(ddr_avail[0], 3, 5, (u32 *)query,
+ ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
cfg_addr, BUID_HI(buid), BUID_LO(buid));
dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x"
- " returned %d\n", ddr_avail[0], cfg_addr, BUID_HI(buid),
+ " returned %d\n", ddw_avail[0], cfg_addr, BUID_HI(buid),
BUID_LO(buid), ret);
return ret;
}
-static int create_ddw(struct pci_dev *dev, const u32 *ddr_avail,
+static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
struct ddw_create_response *create, int page_shift,
int window_shift)
{
@@ -800,12 +808,12 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddr_avail,
do {
/* extra outputs are LIOBN and dma-addr (hi, lo) */
- ret = rtas_call(ddr_avail[1], 5, 4, (u32 *)create, cfg_addr,
+ ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create, cfg_addr,
BUID_HI(buid), BUID_LO(buid), page_shift, window_shift);
} while (rtas_busy_delay(ret));
dev_info(&dev->dev,
"ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
- "(liobn = 0x%x starting addr = %x %x)\n", ddr_avail[1],
+ "(liobn = 0x%x starting addr = %x %x)\n", ddw_avail[1],
cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift,
window_shift, ret, create->liobn, create->addr_hi, create->addr_lo);
@@ -831,18 +839,14 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
int page_shift;
u64 dma_addr, max_addr;
struct device_node *dn;
- const u32 *uninitialized_var(ddr_avail);
+ const u32 *uninitialized_var(ddw_avail);
struct direct_window *window;
- struct property *uninitialized_var(win64);
+ struct property *win64;
struct dynamic_dma_window_prop *ddwprop;
mutex_lock(&direct_window_init_mutex);
- dma_addr = dupe_ddw_if_already_created(dev, pdn);
- if (dma_addr != 0)
- goto out_unlock;
-
- dma_addr = dupe_ddw_if_kexec(dev, pdn);
+ dma_addr = find_existing_ddw(pdn);
if (dma_addr != 0)
goto out_unlock;
@@ -854,8 +858,8 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
* for the given node in that order.
* the property is actually in the parent, not the PE
*/
- ddr_avail = of_get_property(pdn, "ibm,ddw-applicable", &len);
- if (!ddr_avail || len < 3 * sizeof(u32))
+ ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len);
+ if (!ddw_avail || len < 3 * sizeof(u32))
goto out_unlock;
/*
@@ -865,7 +869,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
* of page sizes: supported and supported for migrate-dma.
*/
dn = pci_device_to_OF_node(dev);
- ret = query_ddw(dev, ddr_avail, &query);
+ ret = query_ddw(dev, ddw_avail, &query);
if (ret != 0)
goto out_unlock;
@@ -907,13 +911,14 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
}
win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL);
win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL);
+ win64->length = sizeof(*ddwprop);
if (!win64->name || !win64->value) {
dev_info(&dev->dev,
"couldn't allocate property name and value\n");
goto out_free_prop;
}
- ret = create_ddw(dev, ddr_avail, &create, page_shift, len);
+ ret = create_ddw(dev, ddw_avail, &create, page_shift, len);
if (ret != 0)
goto out_free_prop;
@@ -1021,13 +1026,16 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
const void *dma_window = NULL;
u64 dma_offset;
- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ if (!dev->dma_mask)
return -EIO;
+ if (!dev_is_pci(dev))
+ goto check_mask;
+
+ pdev = to_pci_dev(dev);
+
/* only attempt to use a new window if 64-bit DMA is requested */
if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) {
- pdev = to_pci_dev(dev);
-
dn = pci_device_to_OF_node(pdev);
dev_dbg(dev, "node is %s\n", dn->full_name);
@@ -1054,12 +1062,17 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
}
}
- /* fall-through to iommu ops */
- if (!ddw_enabled) {
- dev_info(dev, "Using 32-bit DMA via iommu\n");
+ /* fall back on iommu ops, restore table pointer with ops */
+ if (!ddw_enabled && get_dma_ops(dev) != &dma_iommu_ops) {
+ dev_info(dev, "Restoring 32-bit DMA via iommu\n");
set_dma_ops(dev, &dma_iommu_ops);
+ pci_dma_dev_setup_pSeriesLP(pdev);
}
+check_mask:
+ if (!dma_supported(dev, dma_mask))
+ return -EIO;
+
*dev->dma_mask = dma_mask;
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 164a8eb4592..086d2ae4e06 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -227,7 +227,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
struct rtas_error_log *h, *errhdr = NULL;
if (!VALID_FWNMI_BUFFER(regs->gpr[3])) {
- printk(KERN_ERR "FWNMI: corrupt r3\n");
+ printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]);
return NULL;
}
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 1689adccc6d..593acceeff9 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -278,6 +278,8 @@ static struct notifier_block pci_dn_reconfig_nb = {
.notifier_call = pci_dn_reconfig_notifier,
};
+struct kmem_cache *dtl_cache;
+
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
/*
* Allocate space for the dispatch trace log for all possible cpus
@@ -289,18 +291,12 @@ static int alloc_dispatch_logs(void)
int cpu, ret;
struct paca_struct *pp;
struct dtl_entry *dtl;
- struct kmem_cache *dtl_cache;
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
return 0;
- dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
- DISPATCH_LOG_BYTES, 0, NULL);
- if (!dtl_cache) {
- pr_warn("Failed to create dispatch trace log buffer cache\n");
- pr_warn("Stolen time statistics will be unreliable\n");
+ if (!dtl_cache)
return 0;
- }
for_each_possible_cpu(cpu) {
pp = &paca[cpu];
@@ -334,10 +330,27 @@ static int alloc_dispatch_logs(void)
return 0;
}
-
-early_initcall(alloc_dispatch_logs);
+#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
+static inline int alloc_dispatch_logs(void)
+{
+ return 0;
+}
#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+static int alloc_dispatch_log_kmem_cache(void)
+{
+ dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
+ DISPATCH_LOG_BYTES, 0, NULL);
+ if (!dtl_cache) {
+ pr_warn("Failed to create dispatch trace log buffer cache\n");
+ pr_warn("Stolen time statistics will be unreliable\n");
+ return 0;
+ }
+
+ return alloc_dispatch_logs();
+}
+early_initcall(alloc_dispatch_log_kmem_cache);
+
static void __init pSeries_setup_arch(void)
{
/* Discover PIC type and setup ppc_md accordingly */
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 95f578158ff..fbffd7e47ab 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -207,7 +207,8 @@ static struct smp_ops_t pSeries_mpic_smp_ops = {
};
static struct smp_ops_t pSeries_xics_smp_ops = {
- .message_pass = NULL, /* Filled at runtime by xics_smp_probe() */
+ .message_pass = smp_muxed_ipi_message_pass,
+ .cause_ipi = NULL, /* Filled at runtime by xics_smp_probe() */
.probe = xics_smp_probe,
.kick_cpu = smp_pSeries_kick_cpu,
.setup_cpu = smp_xics_setup_cpu,
diff --git a/arch/powerpc/platforms/wsp/smp.c b/arch/powerpc/platforms/wsp/smp.c
index c7b8db9ed9b..9d20fa9d371 100644
--- a/arch/powerpc/platforms/wsp/smp.c
+++ b/arch/powerpc/platforms/wsp/smp.c
@@ -75,7 +75,8 @@ static int __init smp_a2_probe(void)
}
static struct smp_ops_t a2_smp_ops = {
- .message_pass = doorbell_message_pass,
+ .message_pass = smp_muxed_ipi_message_pass,
+ .cause_ipi = doorbell_cause_ipi,
.probe = smp_a2_probe,
.kick_cpu = smp_a2_kick_cpu,
.setup_cpu = smp_a2_setup_cpu,
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 1636dd89670..bd0d54060b9 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -216,7 +216,7 @@ static int axon_ram_probe(struct platform_device *device)
AXON_RAM_DEVICE_NAME, axon_ram_bank_id, bank->size >> 20);
bank->ph_addr = resource.start;
- bank->io_addr = (unsigned long) ioremap_flags(
+ bank->io_addr = (unsigned long) ioremap_prot(
bank->ph_addr, bank->size, _PAGE_NO_CACHE);
if (bank->io_addr == 0) {
dev_err(&device->dev, "ioremap() failed\n");
diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
index 54fb1922fe3..11641589917 100644
--- a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
+++ b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
@@ -106,10 +106,10 @@ int __init instantiate_cache_sram(struct platform_device *dev,
goto out_free;
}
- cache_sram->base_virt = ioremap_flags(cache_sram->base_phys,
+ cache_sram->base_virt = ioremap_prot(cache_sram->base_phys,
cache_sram->size, _PAGE_COHERENT | PAGE_KERNEL);
if (!cache_sram->base_virt) {
- dev_err(&dev->dev, "%s: ioremap_flags failed\n",
+ dev_err(&dev->dev, "%s: ioremap_prot failed\n",
dev->dev.of_node->full_name);
ret = -ENOMEM;
goto out_release;
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 01cd2f08951..92e78333c47 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -110,7 +110,7 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
list_for_each_entry(entry, &pdev->msi_list, list) {
if (entry->irq == NO_IRQ)
continue;
- msi_data = irq_get_handler_data(entry->irq);
+ msi_data = irq_get_chip_data(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
msi_bitmap_free_hwirqs(&msi_data->bitmap,
virq_to_hw(entry->irq), 1);
@@ -168,7 +168,7 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
rc = -ENOSPC;
goto out_free;
}
- irq_set_handler_data(virq, msi_data);
+ /* chip_data is msi_data via host->hostdata in host->map() */
irq_set_msi_desc(virq, entry);
fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data);
@@ -193,7 +193,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
u32 have_shift = 0;
struct fsl_msi_cascade_data *cascade_data;
- cascade_data = (struct fsl_msi_cascade_data *)irq_get_handler_data(irq);
+ cascade_data = irq_get_handler_data(irq);
msi_data = cascade_data->msi_data;
raw_spin_lock(&desc->lock);
@@ -253,7 +253,7 @@ unlock:
static int fsl_of_msi_remove(struct platform_device *ofdev)
{
- struct fsl_msi *msi = ofdev->dev.platform_data;
+ struct fsl_msi *msi = platform_get_drvdata(ofdev);
int virq, i;
struct fsl_msi_cascade_data *cascade_data;
@@ -330,7 +330,7 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)
dev_err(&dev->dev, "No memory for MSI structure\n");
return -ENOMEM;
}
- dev->dev.platform_data = msi;
+ platform_set_drvdata(dev, msi);
msi->irqhost = irq_alloc_host(dev->dev.of_node, IRQ_HOST_MAP_LINEAR,
NR_MSI_IRQS, &fsl_msi_host_ops, 0);
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index 142770cb84b..d18bb27e4df 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -185,18 +185,6 @@ static int i8259_host_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static void i8259_host_unmap(struct irq_host *h, unsigned int virq)
-{
- /* Make sure irq is masked in hardware */
- i8259_mask_irq(irq_get_irq_data(virq));
-
- /* remove chip and handler */
- irq_set_chip_and_handler(virq, NULL, NULL);
-
- /* Make sure it's completed */
- synchronize_irq(virq);
-}
-
static int i8259_host_xlate(struct irq_host *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
@@ -220,7 +208,6 @@ static int i8259_host_xlate(struct irq_host *h, struct device_node *ct,
static struct irq_host_ops i8259_host_ops = {
.match = i8259_host_match,
.map = i8259_host_map,
- .unmap = i8259_host_unmap,
.xlate = i8259_host_xlate,
};
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 0e47bce5f69..57e954142c7 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -663,7 +663,7 @@ static inline u32 mpic_physmask(u32 cpumask)
int i;
u32 mask = 0;
- for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1)
+ for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1)
mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
return mask;
}
@@ -872,16 +872,12 @@ int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid);
} else {
- cpumask_var_t tmp;
+ u32 mask = cpumask_bits(cpumask)[0];
- alloc_cpumask_var(&tmp, GFP_KERNEL);
-
- cpumask_and(tmp, cpumask, cpu_online_mask);
+ mask &= cpumask_bits(cpu_online_mask)[0];
mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION),
- mpic_physmask(cpumask_bits(tmp)[0]));
-
- free_cpumask_var(tmp);
+ mpic_physmask(mask));
}
return 0;
@@ -1732,46 +1728,28 @@ void mpic_request_ipis(void)
}
}
-static void mpic_send_ipi(unsigned int ipi_no, const struct cpumask *cpu_mask)
+void smp_mpic_message_pass(int cpu, int msg)
{
struct mpic *mpic = mpic_primary;
+ u32 physmask;
BUG_ON(mpic == NULL);
-#ifdef DEBUG_IPI
- DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
-#endif
-
- mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) +
- ipi_no * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE),
- mpic_physmask(cpumask_bits(cpu_mask)[0]));
-}
-
-void smp_mpic_message_pass(int target, int msg)
-{
- cpumask_var_t tmp;
-
/* make sure we're sending something that translates to an IPI */
if ((unsigned int)msg > 3) {
printk("SMP %d: smp_message_pass: unknown msg %d\n",
smp_processor_id(), msg);
return;
}
- switch (target) {
- case MSG_ALL:
- mpic_send_ipi(msg, cpu_online_mask);
- break;
- case MSG_ALL_BUT_SELF:
- alloc_cpumask_var(&tmp, GFP_NOWAIT);
- cpumask_andnot(tmp, cpu_online_mask,
- cpumask_of(smp_processor_id()));
- mpic_send_ipi(msg, tmp);
- free_cpumask_var(tmp);
- break;
- default:
- mpic_send_ipi(msg, cpumask_of(target));
- break;
- }
+
+#ifdef DEBUG_IPI
+ DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, msg);
+#endif
+
+ physmask = 1 << get_hard_smp_processor_id(cpu);
+
+ mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) +
+ msg * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), physmask);
}
int __init smp_mpic_probe(void)
diff --git a/arch/powerpc/sysdev/xics/Kconfig b/arch/powerpc/sysdev/xics/Kconfig
index 123b8ddf281..0031eda320c 100644
--- a/arch/powerpc/sysdev/xics/Kconfig
+++ b/arch/powerpc/sysdev/xics/Kconfig
@@ -1,5 +1,6 @@
config PPC_XICS
def_bool n
+ select PPC_SMP_MUXED_IPI
config PPC_ICP_NATIVE
def_bool n
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
index 76e87245bbf..9518d367a64 100644
--- a/arch/powerpc/sysdev/xics/icp-hv.c
+++ b/arch/powerpc/sysdev/xics/icp-hv.c
@@ -118,38 +118,18 @@ static void icp_hv_set_cpu_priority(unsigned char cppr)
#ifdef CONFIG_SMP
-static inline void icp_hv_do_message(int cpu, int msg)
+static void icp_hv_cause_ipi(int cpu, unsigned long data)
{
- unsigned long *tgt = &per_cpu(xics_ipi_message, cpu);
-
- set_bit(msg, tgt);
- mb();
icp_hv_set_qirr(cpu, IPI_PRIORITY);
}
-static void icp_hv_message_pass(int target, int msg)
-{
- unsigned int i;
-
- if (target < NR_CPUS) {
- icp_hv_do_message(target, msg);
- } else {
- for_each_online_cpu(i) {
- if (target == MSG_ALL_BUT_SELF
- && i == smp_processor_id())
- continue;
- icp_hv_do_message(i, msg);
- }
- }
-}
-
static irqreturn_t icp_hv_ipi_action(int irq, void *dev_id)
{
int cpu = smp_processor_id();
icp_hv_set_qirr(cpu, 0xff);
- return xics_ipi_dispatch(cpu);
+ return smp_ipi_demux();
}
#endif /* CONFIG_SMP */
@@ -162,7 +142,7 @@ static const struct icp_ops icp_hv_ops = {
.flush_ipi = icp_hv_flush_ipi,
#ifdef CONFIG_SMP
.ipi_action = icp_hv_ipi_action,
- .message_pass = icp_hv_message_pass,
+ .cause_ipi = icp_hv_cause_ipi,
#endif
};
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
index 3508321c450..1f15ad43614 100644
--- a/arch/powerpc/sysdev/xics/icp-native.c
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -134,38 +134,18 @@ static unsigned int icp_native_get_irq(void)
#ifdef CONFIG_SMP
-static inline void icp_native_do_message(int cpu, int msg)
+static void icp_native_cause_ipi(int cpu, unsigned long data)
{
- unsigned long *tgt = &per_cpu(xics_ipi_message, cpu);
-
- set_bit(msg, tgt);
- mb();
icp_native_set_qirr(cpu, IPI_PRIORITY);
}
-static void icp_native_message_pass(int target, int msg)
-{
- unsigned int i;
-
- if (target < NR_CPUS) {
- icp_native_do_message(target, msg);
- } else {
- for_each_online_cpu(i) {
- if (target == MSG_ALL_BUT_SELF
- && i == smp_processor_id())
- continue;
- icp_native_do_message(i, msg);
- }
- }
-}
-
static irqreturn_t icp_native_ipi_action(int irq, void *dev_id)
{
int cpu = smp_processor_id();
icp_native_set_qirr(cpu, 0xff);
- return xics_ipi_dispatch(cpu);
+ return smp_ipi_demux();
}
#endif /* CONFIG_SMP */
@@ -283,7 +263,7 @@ static const struct icp_ops icp_native_ops = {
.flush_ipi = icp_native_flush_ipi,
#ifdef CONFIG_SMP
.ipi_action = icp_native_ipi_action,
- .message_pass = icp_native_message_pass,
+ .cause_ipi = icp_native_cause_ipi,
#endif
};
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index a0576b705dd..445c5a01b76 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -126,32 +126,6 @@ void xics_mask_unknown_vec(unsigned int vec)
#ifdef CONFIG_SMP
-DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message);
-
-irqreturn_t xics_ipi_dispatch(int cpu)
-{
- unsigned long *tgt = &per_cpu(xics_ipi_message, cpu);
-
- mb(); /* order mmio clearing qirr */
- while (*tgt) {
- if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, tgt)) {
- smp_message_recv(PPC_MSG_CALL_FUNCTION);
- }
- if (test_and_clear_bit(PPC_MSG_RESCHEDULE, tgt)) {
- smp_message_recv(PPC_MSG_RESCHEDULE);
- }
- if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, tgt)) {
- smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE);
- }
-#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
- if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, tgt)) {
- smp_message_recv(PPC_MSG_DEBUGGER_BREAK);
- }
-#endif
- }
- return IRQ_HANDLED;
-}
-
static void xics_request_ipi(void)
{
unsigned int ipi;
@@ -161,17 +135,16 @@ static void xics_request_ipi(void)
/*
* IPIs are marked IRQF_DISABLED as they must run with irqs
- * disabled
+ * disabled, and PERCPU. The handler was set in map.
*/
- irq_set_handler(ipi, handle_percpu_irq);
BUG_ON(request_irq(ipi, icp_ops->ipi_action,
IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL));
}
int __init xics_smp_probe(void)
{
- /* Setup message_pass callback based on which ICP is used */
- smp_ops->message_pass = icp_ops->message_pass;
+ /* Setup cause_ipi callback based on which ICP is used */
+ smp_ops->cause_ipi = icp_ops->cause_ipi;
/* Register all the IPIs */
xics_request_ipi();
@@ -240,7 +213,7 @@ void xics_migrate_irqs_away(void)
/* We can't set affinity on ISA interrupts */
if (virq < NUM_ISA_INTERRUPTS)
continue;
- if (virq_to_host(virq) != xics_host)
+ if (!virq_is_host(virq, xics_host))
continue;
irq = (unsigned int)virq_to_hw(virq);
/* We need to get IPIs still. */
@@ -367,15 +340,16 @@ static int xics_host_map(struct irq_host *h, unsigned int virq,
/* Don't call into ICS for IPIs */
if (hw == XICS_IPI) {
irq_set_chip_and_handler(virq, &xics_ipi_chip,
- handle_fasteoi_irq);
+ handle_percpu_irq);
return 0;
}
/* Let the ICS setup the chip data */
list_for_each_entry(ics, &ics_list, link)
if (ics->map(ics, virq) == 0)
- break;
- return 0;
+ return 0;
+
+ return -EINVAL;
}
static int xics_host_xlate(struct irq_host *h, struct device_node *ct,
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 91309c5c00d..42541bbcc7f 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -437,7 +437,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
xmon_owner = cpu;
mb();
if (ncpus > 1) {
- smp_send_debugger_break(MSG_ALL_BUT_SELF);
+ smp_send_debugger_break();
/* wait for other cpus to come in */
for (timeout = 100000000; timeout != 0; --timeout) {
if (cpumask_weight(&cpus_in_xmon) >= ncpus)
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 4e007c6a4b4..d80dcdee88f 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -481,5 +481,6 @@ source "drivers/misc/cb710/Kconfig"
source "drivers/misc/iwmc3200top/Kconfig"
source "drivers/misc/ti-st/Kconfig"
source "drivers/misc/lis3lv02d/Kconfig"
+source "drivers/misc/carma/Kconfig"
endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index f5468602961..848e8464faa 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -44,3 +44,4 @@ obj-$(CONFIG_PCH_PHUB) += pch_phub.o
obj-y += ti-st/
obj-$(CONFIG_AB8500_PWM) += ab8500-pwm.o
obj-y += lis3lv02d/
+obj-y += carma/
diff --git a/drivers/misc/carma/Kconfig b/drivers/misc/carma/Kconfig
new file mode 100644
index 00000000000..c90370ed712
--- /dev/null
+++ b/drivers/misc/carma/Kconfig
@@ -0,0 +1,17 @@
+config CARMA_FPGA
+ tristate "CARMA DATA-FPGA Access Driver"
+ depends on FSL_SOC && PPC_83xx && MEDIA_SUPPORT && HAS_DMA && FSL_DMA
+ select VIDEOBUF_DMA_SG
+ default n
+ help
+ Say Y here to include support for communicating with the data
+ processing FPGAs on the OVRO CARMA board.
+
+config CARMA_FPGA_PROGRAM
+ tristate "CARMA DATA-FPGA Programmer"
+ depends on FSL_SOC && PPC_83xx && MEDIA_SUPPORT && HAS_DMA && FSL_DMA
+ select VIDEOBUF_DMA_SG
+ default n
+ help
+ Say Y here to include support for programming the data processing
+ FPGAs on the OVRO CARMA board.
diff --git a/drivers/misc/carma/Makefile b/drivers/misc/carma/Makefile
new file mode 100644
index 00000000000..ff36ac2ce53
--- /dev/null
+++ b/drivers/misc/carma/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CARMA_FPGA) += carma-fpga.o
+obj-$(CONFIG_CARMA_FPGA_PROGRAM) += carma-fpga-program.o
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
new file mode 100644
index 00000000000..7ce6065dc20
--- /dev/null
+++ b/drivers/misc/carma/carma-fpga-program.c
@@ -0,0 +1,1141 @@
+/*
+ * CARMA Board DATA-FPGA Programmer
+ *
+ * Copyright (c) 2009-2011 Ira W. Snyder <iws@ovro.caltech.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/of_platform.h>
+#include <linux/completion.h>
+#include <linux/miscdevice.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+#include <linux/kref.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+
+#include <media/videobuf-dma-sg.h>
+
+/* MPC8349EMDS specific get_immrbase() */
+#include <sysdev/fsl_soc.h>
+
+static const char drv_name[] = "carma-fpga-program";
+
+/*
+ * Firmware images are always this exact size
+ *
+ * 12849552 bytes for a CARMA Digitizer Board (EP2S90 FPGAs)
+ * 18662880 bytes for a CARMA Correlator Board (EP2S130 FPGAs)
+ */
+#define FW_SIZE_EP2S90 12849552
+#define FW_SIZE_EP2S130 18662880
+
+struct fpga_dev {
+ struct miscdevice miscdev;
+
+ /* Reference count */
+ struct kref ref;
+
+ /* Device Registers */
+ struct device *dev;
+ void __iomem *regs;
+ void __iomem *immr;
+
+ /* Freescale DMA Device */
+ struct dma_chan *chan;
+
+ /* Interrupts */
+ int irq, status;
+ struct completion completion;
+
+ /* FPGA Bitfile */
+ struct mutex lock;
+
+ struct videobuf_dmabuf vb;
+ bool vb_allocated;
+
+ /* max size and written bytes */
+ size_t fw_size;
+ size_t bytes;
+};
+
+/*
+ * FPGA Bitfile Helpers
+ */
+
+/**
+ * fpga_drop_firmware_data() - drop the bitfile image from memory
+ * @priv: the driver's private data structure
+ *
+ * LOCKING: must hold priv->lock
+ */
+static void fpga_drop_firmware_data(struct fpga_dev *priv)
+{
+ videobuf_dma_free(&priv->vb);
+ priv->vb_allocated = false;
+ priv->bytes = 0;
+}
+
+/*
+ * Private Data Reference Count
+ */
+
+static void fpga_dev_remove(struct kref *ref)
+{
+ struct fpga_dev *priv = container_of(ref, struct fpga_dev, ref);
+
+ /* free any firmware image that was not programmed */
+ fpga_drop_firmware_data(priv);
+
+ mutex_destroy(&priv->lock);
+ kfree(priv);
+}
+
+/*
+ * LED Trigger (could be a seperate module)
+ */
+
+/*
+ * NOTE: this whole thing does have the problem that whenever the led's are
+ * NOTE: first set to use the fpga trigger, they could be in the wrong state
+ */
+
+DEFINE_LED_TRIGGER(ledtrig_fpga);
+
+static void ledtrig_fpga_programmed(bool enabled)
+{
+ if (enabled)
+ led_trigger_event(ledtrig_fpga, LED_FULL);
+ else
+ led_trigger_event(ledtrig_fpga, LED_OFF);
+}
+
+/*
+ * FPGA Register Helpers
+ */
+
+/* Register Definitions */
+#define FPGA_CONFIG_CONTROL 0x40
+#define FPGA_CONFIG_STATUS 0x44
+#define FPGA_CONFIG_FIFO_SIZE 0x48
+#define FPGA_CONFIG_FIFO_USED 0x4C
+#define FPGA_CONFIG_TOTAL_BYTE_COUNT 0x50
+#define FPGA_CONFIG_CUR_BYTE_COUNT 0x54
+
+#define FPGA_FIFO_ADDRESS 0x3000
+
+static int fpga_fifo_size(void __iomem *regs)
+{
+ return ioread32be(regs + FPGA_CONFIG_FIFO_SIZE);
+}
+
+#define CFG_STATUS_ERR_MASK 0xfffe
+
+static int fpga_config_error(void __iomem *regs)
+{
+ return ioread32be(regs + FPGA_CONFIG_STATUS) & CFG_STATUS_ERR_MASK;
+}
+
+static int fpga_fifo_empty(void __iomem *regs)
+{
+ return ioread32be(regs + FPGA_CONFIG_FIFO_USED) == 0;
+}
+
+static void fpga_fifo_write(void __iomem *regs, u32 val)
+{
+ iowrite32be(val, regs + FPGA_FIFO_ADDRESS);
+}
+
+static void fpga_set_byte_count(void __iomem *regs, u32 count)
+{
+ iowrite32be(count, regs + FPGA_CONFIG_TOTAL_BYTE_COUNT);
+}
+
+#define CFG_CTL_ENABLE (1 << 0)
+#define CFG_CTL_RESET (1 << 1)
+#define CFG_CTL_DMA (1 << 2)
+
+static void fpga_programmer_enable(struct fpga_dev *priv, bool dma)
+{
+ u32 val;
+
+ val = (dma) ? (CFG_CTL_ENABLE | CFG_CTL_DMA) : CFG_CTL_ENABLE;
+ iowrite32be(val, priv->regs + FPGA_CONFIG_CONTROL);
+}
+
+static void fpga_programmer_disable(struct fpga_dev *priv)
+{
+ iowrite32be(0x0, priv->regs + FPGA_CONFIG_CONTROL);
+}
+
+static void fpga_dump_registers(struct fpga_dev *priv)
+{
+ u32 control, status, size, used, total, curr;
+
+ /* good status: do nothing */
+ if (priv->status == 0)
+ return;
+
+ /* Dump all status registers */
+ control = ioread32be(priv->regs + FPGA_CONFIG_CONTROL);
+ status = ioread32be(priv->regs + FPGA_CONFIG_STATUS);
+ size = ioread32be(priv->regs + FPGA_CONFIG_FIFO_SIZE);
+ used = ioread32be(priv->regs + FPGA_CONFIG_FIFO_USED);
+ total = ioread32be(priv->regs + FPGA_CONFIG_TOTAL_BYTE_COUNT);
+ curr = ioread32be(priv->regs + FPGA_CONFIG_CUR_BYTE_COUNT);
+
+ dev_err(priv->dev, "Configuration failed, dumping status registers\n");
+ dev_err(priv->dev, "Control: 0x%.8x\n", control);
+ dev_err(priv->dev, "Status: 0x%.8x\n", status);
+ dev_err(priv->dev, "FIFO Size: 0x%.8x\n", size);
+ dev_err(priv->dev, "FIFO Used: 0x%.8x\n", used);
+ dev_err(priv->dev, "FIFO Total: 0x%.8x\n", total);
+ dev_err(priv->dev, "FIFO Curr: 0x%.8x\n", curr);
+}
+
+/*
+ * FPGA Power Supply Code
+ */
+
+#define CTL_PWR_CONTROL 0x2006
+#define CTL_PWR_STATUS 0x200A
+#define CTL_PWR_FAIL 0x200B
+
+#define PWR_CONTROL_ENABLE 0x01
+
+#define PWR_STATUS_ERROR_MASK 0x10
+#define PWR_STATUS_GOOD 0x0f
+
+/*
+ * Determine if the FPGA power is good for all supplies
+ */
+static bool fpga_power_good(struct fpga_dev *priv)
+{
+ u8 val;
+
+ val = ioread8(priv->regs + CTL_PWR_STATUS);
+ if (val & PWR_STATUS_ERROR_MASK)
+ return false;
+
+ return val == PWR_STATUS_GOOD;
+}
+
+/*
+ * Disable the FPGA power supplies
+ */
+static void fpga_disable_power_supplies(struct fpga_dev *priv)
+{
+ unsigned long start;
+ u8 val;
+
+ iowrite8(0x0, priv->regs + CTL_PWR_CONTROL);
+
+ /*
+ * Wait 500ms for the power rails to discharge
+ *
+ * Without this delay, the CTL-CPLD state machine can get into a
+ * state where it is waiting for the power-goods to assert, but they
+ * never do. This only happens when enabling and disabling the
+ * power sequencer very rapidly.
+ *
+ * The loop below will also wait for the power goods to de-assert,
+ * but testing has shown that they are always disabled by the time
+ * the sleep completes. However, omitting the sleep and only waiting
+ * for the power-goods to de-assert was not sufficient to ensure
+ * that the power sequencer would not wedge itself.
+ */
+ msleep(500);
+
+ start = jiffies;
+ while (time_before(jiffies, start + HZ)) {
+ val = ioread8(priv->regs + CTL_PWR_STATUS);
+ if (!(val & PWR_STATUS_GOOD))
+ break;
+
+ usleep_range(5000, 10000);
+ }
+
+ val = ioread8(priv->regs + CTL_PWR_STATUS);
+ if (val & PWR_STATUS_GOOD) {
+ dev_err(priv->dev, "power disable failed: "
+ "power goods: status 0x%.2x\n", val);
+ }
+
+ if (val & PWR_STATUS_ERROR_MASK) {
+ dev_err(priv->dev, "power disable failed: "
+ "alarm bit set: status 0x%.2x\n", val);
+ }
+}
+
+/**
+ * fpga_enable_power_supplies() - enable the DATA-FPGA power supplies
+ * @priv: the driver's private data structure
+ *
+ * Enable the DATA-FPGA power supplies, waiting up to 1 second for
+ * them to enable successfully.
+ *
+ * Returns 0 on success, -ERRNO otherwise
+ */
+static int fpga_enable_power_supplies(struct fpga_dev *priv)
+{
+ unsigned long start = jiffies;
+
+ if (fpga_power_good(priv)) {
+ dev_dbg(priv->dev, "power was already good\n");
+ return 0;
+ }
+
+ iowrite8(PWR_CONTROL_ENABLE, priv->regs + CTL_PWR_CONTROL);
+ while (time_before(jiffies, start + HZ)) {
+ if (fpga_power_good(priv))
+ return 0;
+
+ usleep_range(5000, 10000);
+ }
+
+ return fpga_power_good(priv) ? 0 : -ETIMEDOUT;
+}
+
+/*
+ * Determine if the FPGA power supplies are all enabled
+ */
+static bool fpga_power_enabled(struct fpga_dev *priv)
+{
+ u8 val;
+
+ val = ioread8(priv->regs + CTL_PWR_CONTROL);
+ if (val & PWR_CONTROL_ENABLE)
+ return true;
+
+ return false;
+}
+
+/*
+ * Determine if the FPGA's are programmed and running correctly
+ */
+static bool fpga_running(struct fpga_dev *priv)
+{
+ if (!fpga_power_good(priv))
+ return false;
+
+ /* Check the config done bit */
+ return ioread32be(priv->regs + FPGA_CONFIG_STATUS) & (1 << 18);
+}
+
+/*
+ * FPGA Programming Code
+ */
+
+/**
+ * fpga_program_block() - put a block of data into the programmer's FIFO
+ * @priv: the driver's private data structure
+ * @buf: the data to program
+ * @count: the length of data to program (must be a multiple of 4 bytes)
+ *
+ * Returns 0 on success, -ERRNO otherwise
+ */
+static int fpga_program_block(struct fpga_dev *priv, void *buf, size_t count)
+{
+ u32 *data = buf;
+ int size = fpga_fifo_size(priv->regs);
+ int i, len;
+ unsigned long timeout;
+
+ /* enforce correct data length for the FIFO */
+ BUG_ON(count % 4 != 0);
+
+ while (count > 0) {
+
+ /* Get the size of the block to write (maximum is FIFO_SIZE) */
+ len = min_t(size_t, count, size);
+ timeout = jiffies + HZ / 4;
+
+ /* Write the block */
+ for (i = 0; i < len / 4; i++)
+ fpga_fifo_write(priv->regs, data[i]);
+
+ /* Update the amounts left */
+ count -= len;
+ data += len / 4;
+
+ /* Wait for the fifo to empty */
+ while (true) {
+
+ if (fpga_fifo_empty(priv->regs)) {
+ break;
+ } else {
+ dev_dbg(priv->dev, "Fifo not empty\n");
+ cpu_relax();
+ }
+
+ if (fpga_config_error(priv->regs)) {
+ dev_err(priv->dev, "Error detected\n");
+ return -EIO;
+ }
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(priv->dev, "Fifo drain timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ usleep_range(5000, 10000);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * fpga_program_cpu() - program the DATA-FPGA's using the CPU
+ * @priv: the driver's private data structure
+ *
+ * This is useful when the DMA programming method fails. It is possible to
+ * wedge the Freescale DMA controller such that the DMA programming method
+ * always fails. This method has always succeeded.
+ *
+ * Returns 0 on success, -ERRNO otherwise
+ */
+static noinline int fpga_program_cpu(struct fpga_dev *priv)
+{
+ int ret;
+
+ /* Disable the programmer */
+ fpga_programmer_disable(priv);
+
+ /* Set the total byte count */
+ fpga_set_byte_count(priv->regs, priv->bytes);
+ dev_dbg(priv->dev, "total byte count %u bytes\n", priv->bytes);
+
+ /* Enable the controller for programming */
+ fpga_programmer_enable(priv, false);
+ dev_dbg(priv->dev, "enabled the controller\n");
+
+ /* Write each chunk of the FPGA bitfile to FPGA programmer */
+ ret = fpga_program_block(priv, priv->vb.vaddr, priv->bytes);
+ if (ret)
+ goto out_disable_controller;
+
+ /* Wait for the interrupt handler to signal that programming finished */
+ ret = wait_for_completion_timeout(&priv->completion, 2 * HZ);
+ if (!ret) {
+ dev_err(priv->dev, "Timed out waiting for completion\n");
+ ret = -ETIMEDOUT;
+ goto out_disable_controller;
+ }
+
+ /* Retrieve the status from the interrupt handler */
+ ret = priv->status;
+
+out_disable_controller:
+ fpga_programmer_disable(priv);
+ return ret;
+}
+
+#define FIFO_DMA_ADDRESS 0xf0003000
+#define FIFO_MAX_LEN 4096
+
+/**
+ * fpga_program_dma() - program the DATA-FPGA's using the DMA engine
+ * @priv: the driver's private data structure
+ *
+ * Program the DATA-FPGA's using the Freescale DMA engine. This requires that
+ * the engine is programmed such that the hardware DMA request lines can
+ * control the entire DMA transaction. The system controller FPGA then
+ * completely offloads the programming from the CPU.
+ *
+ * Returns 0 on success, -ERRNO otherwise
+ */
+static noinline int fpga_program_dma(struct fpga_dev *priv)
+{
+ struct videobuf_dmabuf *vb = &priv->vb;
+ struct dma_chan *chan = priv->chan;
+ struct dma_async_tx_descriptor *tx;
+ size_t num_pages, len, avail = 0;
+ struct dma_slave_config config;
+ struct scatterlist *sg;
+ struct sg_table table;
+ dma_cookie_t cookie;
+ int ret, i;
+
+ /* Disable the programmer */
+ fpga_programmer_disable(priv);
+
+ /* Allocate a scatterlist for the DMA destination */
+ num_pages = DIV_ROUND_UP(priv->bytes, FIFO_MAX_LEN);
+ ret = sg_alloc_table(&table, num_pages, GFP_KERNEL);
+ if (ret) {
+ dev_err(priv->dev, "Unable to allocate dst scatterlist\n");
+ ret = -ENOMEM;
+ goto out_return;
+ }
+
+ /*
+ * This is an ugly hack
+ *
+ * We fill in a scatterlist as if it were mapped for DMA. This is
+ * necessary because there exists no better structure for this
+ * inside the kernel code.
+ *
+ * As an added bonus, we can use the DMAEngine API for all of this,
+ * rather than inventing another extremely similar API.
+ */
+ avail = priv->bytes;
+ for_each_sg(table.sgl, sg, num_pages, i) {
+ len = min_t(size_t, avail, FIFO_MAX_LEN);
+ sg_dma_address(sg) = FIFO_DMA_ADDRESS;
+ sg_dma_len(sg) = len;
+
+ avail -= len;
+ }
+
+ /* Map the buffer for DMA */
+ ret = videobuf_dma_map(priv->dev, &priv->vb);
+ if (ret) {
+ dev_err(priv->dev, "Unable to map buffer for DMA\n");
+ goto out_free_table;
+ }
+
+ /*
+ * Configure the DMA channel to transfer FIFO_SIZE / 2 bytes per
+ * transaction, and then put it under external control
+ */
+ memset(&config, 0, sizeof(config));
+ config.direction = DMA_TO_DEVICE;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.dst_maxburst = fpga_fifo_size(priv->regs) / 2 / 4;
+ ret = chan->device->device_control(chan, DMA_SLAVE_CONFIG,
+ (unsigned long)&config);
+ if (ret) {
+ dev_err(priv->dev, "DMA slave configuration failed\n");
+ goto out_dma_unmap;
+ }
+
+ ret = chan->device->device_control(chan, FSLDMA_EXTERNAL_START, 1);
+ if (ret) {
+ dev_err(priv->dev, "DMA external control setup failed\n");
+ goto out_dma_unmap;
+ }
+
+ /* setup and submit the DMA transaction */
+ tx = chan->device->device_prep_dma_sg(chan,
+ table.sgl, num_pages,
+ vb->sglist, vb->sglen, 0);
+ if (!tx) {
+ dev_err(priv->dev, "Unable to prep DMA transaction\n");
+ ret = -ENOMEM;
+ goto out_dma_unmap;
+ }
+
+ cookie = tx->tx_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(priv->dev, "Unable to submit DMA transaction\n");
+ ret = -ENOMEM;
+ goto out_dma_unmap;
+ }
+
+ dma_async_memcpy_issue_pending(chan);
+
+ /* Set the total byte count */
+ fpga_set_byte_count(priv->regs, priv->bytes);
+ dev_dbg(priv->dev, "total byte count %u bytes\n", priv->bytes);
+
+ /* Enable the controller for DMA programming */
+ fpga_programmer_enable(priv, true);
+ dev_dbg(priv->dev, "enabled the controller\n");
+
+ /* Wait for the interrupt handler to signal that programming finished */
+ ret = wait_for_completion_timeout(&priv->completion, 2 * HZ);
+ if (!ret) {
+ dev_err(priv->dev, "Timed out waiting for completion\n");
+ ret = -ETIMEDOUT;
+ goto out_disable_controller;
+ }
+
+ /* Retrieve the status from the interrupt handler */
+ ret = priv->status;
+
+out_disable_controller:
+ fpga_programmer_disable(priv);
+out_dma_unmap:
+ videobuf_dma_unmap(priv->dev, vb);
+out_free_table:
+ sg_free_table(&table);
+out_return:
+ return ret;
+}
+
+/*
+ * Interrupt Handling
+ */
+
+static irqreturn_t fpga_irq(int irq, void *dev_id)
+{
+ struct fpga_dev *priv = dev_id;
+
+ /* Save the status */
+ priv->status = fpga_config_error(priv->regs) ? -EIO : 0;
+ dev_dbg(priv->dev, "INTERRUPT status %d\n", priv->status);
+ fpga_dump_registers(priv);
+
+ /* Disabling the programmer clears the interrupt */
+ fpga_programmer_disable(priv);
+
+ /* Notify any waiters */
+ complete(&priv->completion);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * SYSFS Helpers
+ */
+
+/**
+ * fpga_do_stop() - deconfigure (reset) the DATA-FPGA's
+ * @priv: the driver's private data structure
+ *
+ * LOCKING: must hold priv->lock
+ */
+static int fpga_do_stop(struct fpga_dev *priv)
+{
+ u32 val;
+
+ /* Set the led to unprogrammed */
+ ledtrig_fpga_programmed(false);
+
+ /* Pulse the config line to reset the FPGA's */
+ val = CFG_CTL_ENABLE | CFG_CTL_RESET;
+ iowrite32be(val, priv->regs + FPGA_CONFIG_CONTROL);
+ iowrite32be(0x0, priv->regs + FPGA_CONFIG_CONTROL);
+
+ return 0;
+}
+
+static noinline int fpga_do_program(struct fpga_dev *priv)
+{
+ int ret;
+
+ if (priv->bytes != priv->fw_size) {
+ dev_err(priv->dev, "Incorrect bitfile size: got %zu bytes, "
+ "should be %zu bytes\n",
+ priv->bytes, priv->fw_size);
+ return -EINVAL;
+ }
+
+ if (!fpga_power_enabled(priv)) {
+ dev_err(priv->dev, "Power not enabled\n");
+ return -EINVAL;
+ }
+
+ if (!fpga_power_good(priv)) {
+ dev_err(priv->dev, "Power not good\n");
+ return -EINVAL;
+ }
+
+ /* Set the LED to unprogrammed */
+ ledtrig_fpga_programmed(false);
+
+ /* Try to program the FPGA's using DMA */
+ ret = fpga_program_dma(priv);
+
+ /* If DMA failed or doesn't exist, try with CPU */
+ if (ret) {
+ dev_warn(priv->dev, "Falling back to CPU programming\n");
+ ret = fpga_program_cpu(priv);
+ }
+
+ if (ret) {
+ dev_err(priv->dev, "Unable to program FPGA's\n");
+ return ret;
+ }
+
+ /* Drop the firmware bitfile from memory */
+ fpga_drop_firmware_data(priv);
+
+ dev_dbg(priv->dev, "FPGA programming successful\n");
+ ledtrig_fpga_programmed(true);
+
+ return 0;
+}
+
+/*
+ * File Operations
+ */
+
+static int fpga_open(struct inode *inode, struct file *filp)
+{
+ /*
+ * The miscdevice layer puts our struct miscdevice into the
+ * filp->private_data field. We use this to find our private
+ * data and then overwrite it with our own private structure.
+ */
+ struct fpga_dev *priv = container_of(filp->private_data,
+ struct fpga_dev, miscdev);
+ unsigned int nr_pages;
+ int ret;
+
+ /* We only allow one process at a time */
+ ret = mutex_lock_interruptible(&priv->lock);
+ if (ret)
+ return ret;
+
+ filp->private_data = priv;
+ kref_get(&priv->ref);
+
+ /* Truncation: drop any existing data */
+ if (filp->f_flags & O_TRUNC)
+ priv->bytes = 0;
+
+ /* Check if we have already allocated a buffer */
+ if (priv->vb_allocated)
+ return 0;
+
+ /* Allocate a buffer to hold enough data for the bitfile */
+ nr_pages = DIV_ROUND_UP(priv->fw_size, PAGE_SIZE);
+ ret = videobuf_dma_init_kernel(&priv->vb, DMA_TO_DEVICE, nr_pages);
+ if (ret) {
+ dev_err(priv->dev, "unable to allocate data buffer\n");
+ mutex_unlock(&priv->lock);
+ kref_put(&priv->ref, fpga_dev_remove);
+ return ret;
+ }
+
+ priv->vb_allocated = true;
+ return 0;
+}
+
+static int fpga_release(struct inode *inode, struct file *filp)
+{
+ struct fpga_dev *priv = filp->private_data;
+
+ mutex_unlock(&priv->lock);
+ kref_put(&priv->ref, fpga_dev_remove);
+ return 0;
+}
+
+static ssize_t fpga_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct fpga_dev *priv = filp->private_data;
+
+ /* FPGA bitfiles have an exact size: disallow anything else */
+ if (priv->bytes >= priv->fw_size)
+ return -ENOSPC;
+
+ count = min_t(size_t, priv->fw_size - priv->bytes, count);
+ if (copy_from_user(priv->vb.vaddr + priv->bytes, buf, count))
+ return -EFAULT;
+
+ priv->bytes += count;
+ return count;
+}
+
+static ssize_t fpga_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ struct fpga_dev *priv = filp->private_data;
+
+ count = min_t(size_t, priv->bytes - *f_pos, count);
+ if (copy_to_user(buf, priv->vb.vaddr + *f_pos, count))
+ return -EFAULT;
+
+ *f_pos += count;
+ return count;
+}
+
+static loff_t fpga_llseek(struct file *filp, loff_t offset, int origin)
+{
+ struct fpga_dev *priv = filp->private_data;
+ loff_t newpos;
+
+ /* only read-only opens are allowed to seek */
+ if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
+ return -EINVAL;
+
+ switch (origin) {
+ case SEEK_SET: /* seek relative to the beginning of the file */
+ newpos = offset;
+ break;
+ case SEEK_CUR: /* seek relative to current position in the file */
+ newpos = filp->f_pos + offset;
+ break;
+ case SEEK_END: /* seek relative to the end of the file */
+ newpos = priv->fw_size - offset;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* check for sanity */
+ if (newpos > priv->fw_size)
+ return -EINVAL;
+
+ filp->f_pos = newpos;
+ return newpos;
+}
+
+static const struct file_operations fpga_fops = {
+ .open = fpga_open,
+ .release = fpga_release,
+ .write = fpga_write,
+ .read = fpga_read,
+ .llseek = fpga_llseek,
+};
+
+/*
+ * Device Attributes
+ */
+
+static ssize_t pfail_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fpga_dev *priv = dev_get_drvdata(dev);
+ u8 val;
+
+ val = ioread8(priv->regs + CTL_PWR_FAIL);
+ return snprintf(buf, PAGE_SIZE, "0x%.2x\n", val);
+}
+
+static ssize_t pgood_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fpga_dev *priv = dev_get_drvdata(dev);
+ return snprintf(buf, PAGE_SIZE, "%d\n", fpga_power_good(priv));
+}
+
+static ssize_t penable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fpga_dev *priv = dev_get_drvdata(dev);
+ return snprintf(buf, PAGE_SIZE, "%d\n", fpga_power_enabled(priv));
+}
+
+static ssize_t penable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fpga_dev *priv = dev_get_drvdata(dev);
+ unsigned long val;
+ int ret;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val) {
+ ret = fpga_enable_power_supplies(priv);
+ if (ret)
+ return ret;
+ } else {
+ fpga_do_stop(priv);
+ fpga_disable_power_supplies(priv);
+ }
+
+ return count;
+}
+
+static ssize_t program_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fpga_dev *priv = dev_get_drvdata(dev);
+ return snprintf(buf, PAGE_SIZE, "%d\n", fpga_running(priv));
+}
+
+static ssize_t program_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fpga_dev *priv = dev_get_drvdata(dev);
+ unsigned long val;
+ int ret;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ /* We can't have an image writer and be programming simultaneously */
+ if (mutex_lock_interruptible(&priv->lock))
+ return -ERESTARTSYS;
+
+ /* Program or Reset the FPGA's */
+ ret = val ? fpga_do_program(priv) : fpga_do_stop(priv);
+ if (ret)
+ goto out_unlock;
+
+ /* Success */
+ ret = count;
+
+out_unlock:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static DEVICE_ATTR(power_fail, S_IRUGO, pfail_show, NULL);
+static DEVICE_ATTR(power_good, S_IRUGO, pgood_show, NULL);
+static DEVICE_ATTR(power_enable, S_IRUGO | S_IWUSR,
+ penable_show, penable_store);
+
+static DEVICE_ATTR(program, S_IRUGO | S_IWUSR,
+ program_show, program_store);
+
+static struct attribute *fpga_attributes[] = {
+ &dev_attr_power_fail.attr,
+ &dev_attr_power_good.attr,
+ &dev_attr_power_enable.attr,
+ &dev_attr_program.attr,
+ NULL,
+};
+
+static const struct attribute_group fpga_attr_group = {
+ .attrs = fpga_attributes,
+};
+
+/*
+ * OpenFirmware Device Subsystem
+ */
+
+#define SYS_REG_VERSION 0x00
+#define SYS_REG_GEOGRAPHIC 0x10
+
+static bool dma_filter(struct dma_chan *chan, void *data)
+{
+ /*
+ * DMA Channel #0 is the only acceptable device
+ *
+ * This probably won't survive an unload/load cycle of the Freescale
+ * DMAEngine driver, but that won't be a problem
+ */
+ return chan->chan_id == 0 && chan->device->dev_id == 0;
+}
+
+static int fpga_of_remove(struct platform_device *op)
+{
+ struct fpga_dev *priv = dev_get_drvdata(&op->dev);
+ struct device *this_device = priv->miscdev.this_device;
+
+ sysfs_remove_group(&this_device->kobj, &fpga_attr_group);
+ misc_deregister(&priv->miscdev);
+
+ free_irq(priv->irq, priv);
+ irq_dispose_mapping(priv->irq);
+
+ /* make sure the power supplies are off */
+ fpga_disable_power_supplies(priv);
+
+ /* unmap registers */
+ iounmap(priv->immr);
+ iounmap(priv->regs);
+
+ dma_release_channel(priv->chan);
+
+ /* drop our reference to the private data structure */
+ kref_put(&priv->ref, fpga_dev_remove);
+ return 0;
+}
+
+/* CTL-CPLD Version Register */
+#define CTL_CPLD_VERSION 0x2000
+
+static int fpga_of_probe(struct platform_device *op,
+ const struct of_device_id *match)
+{
+ struct device_node *of_node = op->dev.of_node;
+ struct device *this_device;
+ struct fpga_dev *priv;
+ dma_cap_mask_t mask;
+ u32 ver;
+ int ret;
+
+ /* Allocate private data */
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&op->dev, "Unable to allocate private data\n");
+ ret = -ENOMEM;
+ goto out_return;
+ }
+
+ /* Setup the miscdevice */
+ priv->miscdev.minor = MISC_DYNAMIC_MINOR;
+ priv->miscdev.name = drv_name;
+ priv->miscdev.fops = &fpga_fops;
+
+ kref_init(&priv->ref);
+
+ dev_set_drvdata(&op->dev, priv);
+ priv->dev = &op->dev;
+ mutex_init(&priv->lock);
+ init_completion(&priv->completion);
+ videobuf_dma_init(&priv->vb);
+
+ dev_set_drvdata(priv->dev, priv);
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+ dma_cap_set(DMA_INTERRUPT, mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_set(DMA_SG, mask);
+
+ /* Get control of DMA channel #0 */
+ priv->chan = dma_request_channel(mask, dma_filter, NULL);
+ if (!priv->chan) {
+ dev_err(&op->dev, "Unable to acquire DMA channel #0\n");
+ ret = -ENODEV;
+ goto out_free_priv;
+ }
+
+ /* Remap the registers for use */
+ priv->regs = of_iomap(of_node, 0);
+ if (!priv->regs) {
+ dev_err(&op->dev, "Unable to ioremap registers\n");
+ ret = -ENOMEM;
+ goto out_dma_release_channel;
+ }
+
+ /* Remap the IMMR for use */
+ priv->immr = ioremap(get_immrbase(), 0x100000);
+ if (!priv->immr) {
+ dev_err(&op->dev, "Unable to ioremap IMMR\n");
+ ret = -ENOMEM;
+ goto out_unmap_regs;
+ }
+
+ /*
+ * Check that external DMA is configured
+ *
+ * U-Boot does this for us, but we should check it and bail out if
+ * there is a problem. Failing to have this register setup correctly
+ * will cause the DMA controller to transfer a single cacheline
+ * worth of data, then wedge itself.
+ */
+ if ((ioread32be(priv->immr + 0x114) & 0xE00) != 0xE00) {
+ dev_err(&op->dev, "External DMA control not configured\n");
+ ret = -ENODEV;
+ goto out_unmap_immr;
+ }
+
+ /*
+ * Check the CTL-CPLD version
+ *
+ * This driver uses the CTL-CPLD DATA-FPGA power sequencer, and we
+ * don't want to run on any version of the CTL-CPLD that does not use
+ * a compatible register layout.
+ *
+ * v2: changed register layout, added power sequencer
+ * v3: added glitch filter on the i2c overcurrent/overtemp outputs
+ */
+ ver = ioread8(priv->regs + CTL_CPLD_VERSION);
+ if (ver != 0x02 && ver != 0x03) {
+ dev_err(&op->dev, "CTL-CPLD is not version 0x02 or 0x03!\n");
+ ret = -ENODEV;
+ goto out_unmap_immr;
+ }
+
+ /* Set the exact size that the firmware image should be */
+ ver = ioread32be(priv->regs + SYS_REG_VERSION);
+ priv->fw_size = (ver & (1 << 18)) ? FW_SIZE_EP2S130 : FW_SIZE_EP2S90;
+
+ /* Find the correct IRQ number */
+ priv->irq = irq_of_parse_and_map(of_node, 0);
+ if (priv->irq == NO_IRQ) {
+ dev_err(&op->dev, "Unable to find IRQ line\n");
+ ret = -ENODEV;
+ goto out_unmap_immr;
+ }
+
+ /* Request the IRQ */
+ ret = request_irq(priv->irq, fpga_irq, IRQF_SHARED, drv_name, priv);
+ if (ret) {
+ dev_err(&op->dev, "Unable to request IRQ %d\n", priv->irq);
+ ret = -ENODEV;
+ goto out_irq_dispose_mapping;
+ }
+
+ /* Reset and stop the FPGA's, just in case */
+ fpga_do_stop(priv);
+
+ /* Register the miscdevice */
+ ret = misc_register(&priv->miscdev);
+ if (ret) {
+ dev_err(&op->dev, "Unable to register miscdevice\n");
+ goto out_free_irq;
+ }
+
+ /* Create the sysfs files */
+ this_device = priv->miscdev.this_device;
+ dev_set_drvdata(this_device, priv);
+ ret = sysfs_create_group(&this_device->kobj, &fpga_attr_group);
+ if (ret) {
+ dev_err(&op->dev, "Unable to create sysfs files\n");
+ goto out_misc_deregister;
+ }
+
+ dev_info(priv->dev, "CARMA FPGA Programmer: %s rev%s with %s FPGAs\n",
+ (ver & (1 << 17)) ? "Correlator" : "Digitizer",
+ (ver & (1 << 16)) ? "B" : "A",
+ (ver & (1 << 18)) ? "EP2S130" : "EP2S90");
+
+ return 0;
+
+out_misc_deregister:
+ misc_deregister(&priv->miscdev);
+out_free_irq:
+ free_irq(priv->irq, priv);
+out_irq_dispose_mapping:
+ irq_dispose_mapping(priv->irq);
+out_unmap_immr:
+ iounmap(priv->immr);
+out_unmap_regs:
+ iounmap(priv->regs);
+out_dma_release_channel:
+ dma_release_channel(priv->chan);
+out_free_priv:
+ kref_put(&priv->ref, fpga_dev_remove);
+out_return:
+ return ret;
+}
+
+static struct of_device_id fpga_of_match[] = {
+ { .compatible = "carma,fpga-programmer", },
+ {},
+};
+
+static struct of_platform_driver fpga_of_driver = {
+ .probe = fpga_of_probe,
+ .remove = fpga_of_remove,
+ .driver = {
+ .name = drv_name,
+ .of_match_table = fpga_of_match,
+ .owner = THIS_MODULE,
+ },
+};
+
+/*
+ * Module Init / Exit
+ */
+
+static int __init fpga_init(void)
+{
+ led_trigger_register_simple("fpga", &ledtrig_fpga);
+ return of_register_platform_driver(&fpga_of_driver);
+}
+
+static void __exit fpga_exit(void)
+{
+ of_unregister_platform_driver(&fpga_of_driver);
+ led_trigger_unregister_simple(ledtrig_fpga);
+}
+
+MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
+MODULE_DESCRIPTION("CARMA Board DATA-FPGA Programmer");
+MODULE_LICENSE("GPL");
+
+module_init(fpga_init);
+module_exit(fpga_exit);
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
new file mode 100644
index 00000000000..3965821fef1
--- /dev/null
+++ b/drivers/misc/carma/carma-fpga.c
@@ -0,0 +1,1433 @@
+/*
+ * CARMA DATA-FPGA Access Driver
+ *
+ * Copyright (c) 2009-2011 Ira W. Snyder <iws@ovro.caltech.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/*
+ * FPGA Memory Dump Format
+ *
+ * FPGA #0 control registers (32 x 32-bit words)
+ * FPGA #1 control registers (32 x 32-bit words)
+ * FPGA #2 control registers (32 x 32-bit words)
+ * FPGA #3 control registers (32 x 32-bit words)
+ * SYSFPGA control registers (32 x 32-bit words)
+ * FPGA #0 correlation array (NUM_CORL0 correlation blocks)
+ * FPGA #1 correlation array (NUM_CORL1 correlation blocks)
+ * FPGA #2 correlation array (NUM_CORL2 correlation blocks)
+ * FPGA #3 correlation array (NUM_CORL3 correlation blocks)
+ *
+ * Each correlation array consists of:
+ *
+ * Correlation Data (2 x NUM_LAGSn x 32-bit words)
+ * Pipeline Metadata (2 x NUM_METAn x 32-bit words)
+ * Quantization Counters (2 x NUM_QCNTn x 32-bit words)
+ *
+ * The NUM_CORLn, NUM_LAGSn, NUM_METAn, and NUM_QCNTn values come from
+ * the FPGA configuration registers. They do not change once the FPGA's
+ * have been programmed, they only change on re-programming.
+ */
+
+/*
+ * Basic Description:
+ *
+ * This driver is used to capture correlation spectra off of the four data
+ * processing FPGAs. The FPGAs are often reprogrammed at runtime, therefore
+ * this driver supports dynamic enable/disable of capture while the device
+ * remains open.
+ *
+ * The nominal capture rate is 64Hz (every 15.625ms). To facilitate this fast
+ * capture rate, all buffers are pre-allocated to avoid any potentially long
+ * running memory allocations while capturing.
+ *
+ * There are two lists and one pointer which are used to keep track of the
+ * different states of data buffers.
+ *
+ * 1) free list
+ * This list holds all empty data buffers which are ready to receive data.
+ *
+ * 2) inflight pointer
+ * This pointer holds the currently inflight data buffer. This buffer is having
+ * data copied into it by the DMA engine.
+ *
+ * 3) used list
+ * This list holds data buffers which have been filled, and are waiting to be
+ * read by userspace.
+ *
+ * All buffers start life on the free list, then move successively to the
+ * inflight pointer, and then to the used list. After they have been read by
+ * userspace, they are moved back to the free list. The cycle repeats as long
+ * as necessary.
+ *
+ * It should be noted that all buffers are mapped and ready for DMA when they
+ * are on any of the three lists. They are only unmapped when they are in the
+ * process of being read by userspace.
+ */
+
+/*
+ * Notes on the IRQ masking scheme:
+ *
+ * The IRQ masking scheme here is different than most other hardware. The only
+ * way for the DATA-FPGAs to detect if the kernel has taken too long to copy
+ * the data is if the status registers are not cleared before the next
+ * correlation data dump is ready.
+ *
+ * The interrupt line is connected to the status registers, such that when they
+ * are cleared, the interrupt is de-asserted. Therein lies our problem. We need
+ * to schedule a long-running DMA operation and return from the interrupt
+ * handler quickly, but we cannot clear the status registers.
+ *
+ * To handle this, the system controller FPGA has the capability to connect the
+ * interrupt line to a user-controlled GPIO pin. This pin is driven high
+ * (unasserted) and left that way. To mask the interrupt, we change the
+ * interrupt source to the GPIO pin. Tada, we hid the interrupt. :)
+ */
+
+#include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
+#include <linux/miscdevice.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/seq_file.h>
+#include <linux/highmem.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kref.h>
+#include <linux/io.h>
+
+#include <media/videobuf-dma-sg.h>
+
+/* system controller registers */
+#define SYS_IRQ_SOURCE_CTL 0x24
+#define SYS_IRQ_OUTPUT_EN 0x28
+#define SYS_IRQ_OUTPUT_DATA 0x2C
+#define SYS_IRQ_INPUT_DATA 0x30
+#define SYS_FPGA_CONFIG_STATUS 0x44
+
+/* GPIO IRQ line assignment */
+#define IRQ_CORL_DONE 0x10
+
+/* FPGA registers */
+#define MMAP_REG_VERSION 0x00
+#define MMAP_REG_CORL_CONF1 0x08
+#define MMAP_REG_CORL_CONF2 0x0C
+#define MMAP_REG_STATUS 0x48
+
+#define SYS_FPGA_BLOCK 0xF0000000
+
+#define DATA_FPGA_START 0x400000
+#define DATA_FPGA_SIZE 0x80000
+
+static const char drv_name[] = "carma-fpga";
+
+#define NUM_FPGA 4
+
+#define MIN_DATA_BUFS 8
+#define MAX_DATA_BUFS 64
+
+struct fpga_info {
+ unsigned int num_lag_ram;
+ unsigned int blk_size;
+};
+
+struct data_buf {
+ struct list_head entry;
+ struct videobuf_dmabuf vb;
+ size_t size;
+};
+
+struct fpga_device {
+ /* character device */
+ struct miscdevice miscdev;
+ struct device *dev;
+ struct mutex mutex;
+
+ /* reference count */
+ struct kref ref;
+
+ /* FPGA registers and information */
+ struct fpga_info info[NUM_FPGA];
+ void __iomem *regs;
+ int irq;
+
+ /* FPGA Physical Address/Size Information */
+ resource_size_t phys_addr;
+ size_t phys_size;
+
+ /* DMA structures */
+ struct sg_table corl_table;
+ unsigned int corl_nents;
+ struct dma_chan *chan;
+
+ /* Protection for all members below */
+ spinlock_t lock;
+
+ /* Device enable/disable flag */
+ bool enabled;
+
+ /* Correlation data buffers */
+ wait_queue_head_t wait;
+ struct list_head free;
+ struct list_head used;
+ struct data_buf *inflight;
+
+ /* Information about data buffers */
+ unsigned int num_dropped;
+ unsigned int num_buffers;
+ size_t bufsize;
+ struct dentry *dbg_entry;
+};
+
+struct fpga_reader {
+ struct fpga_device *priv;
+ struct data_buf *buf;
+ off_t buf_start;
+};
+
+static void fpga_device_release(struct kref *ref)
+{
+ struct fpga_device *priv = container_of(ref, struct fpga_device, ref);
+
+ /* the last reader has exited, cleanup the last bits */
+ mutex_destroy(&priv->mutex);
+ kfree(priv);
+}
+
+/*
+ * Data Buffer Allocation Helpers
+ */
+
+/**
+ * data_free_buffer() - free a single data buffer and all allocated memory
+ * @buf: the buffer to free
+ *
+ * This will free all of the pages allocated to the given data buffer, and
+ * then free the structure itself
+ */
+static void data_free_buffer(struct data_buf *buf)
+{
+ /* It is ok to free a NULL buffer */
+ if (!buf)
+ return;
+
+ /* free all memory */
+ videobuf_dma_free(&buf->vb);
+ kfree(buf);
+}
+
+/**
+ * data_alloc_buffer() - allocate and fill a data buffer with pages
+ * @bytes: the number of bytes required
+ *
+ * This allocates all space needed for a data buffer. It must be mapped before
+ * use in a DMA transaction using videobuf_dma_map().
+ *
+ * Returns NULL on failure
+ */
+static struct data_buf *data_alloc_buffer(const size_t bytes)
+{
+ unsigned int nr_pages;
+ struct data_buf *buf;
+ int ret;
+
+ /* calculate the number of pages necessary */
+ nr_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
+
+ /* allocate the buffer structure */
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ goto out_return;
+
+ /* initialize internal fields */
+ INIT_LIST_HEAD(&buf->entry);
+ buf->size = bytes;
+
+ /* allocate the videobuf */
+ videobuf_dma_init(&buf->vb);
+ ret = videobuf_dma_init_kernel(&buf->vb, DMA_FROM_DEVICE, nr_pages);
+ if (ret)
+ goto out_free_buf;
+
+ return buf;
+
+out_free_buf:
+ kfree(buf);
+out_return:
+ return NULL;
+}
+
+/**
+ * data_free_buffers() - free all allocated buffers
+ * @priv: the driver's private data structure
+ *
+ * Free all buffers allocated by the driver (except those currently in the
+ * process of being read by userspace).
+ *
+ * LOCKING: must hold dev->mutex
+ * CONTEXT: user
+ */
+static void data_free_buffers(struct fpga_device *priv)
+{
+ struct data_buf *buf, *tmp;
+
+ /* the device should be stopped, no DMA in progress */
+ BUG_ON(priv->inflight != NULL);
+
+ list_for_each_entry_safe(buf, tmp, &priv->free, entry) {
+ list_del_init(&buf->entry);
+ videobuf_dma_unmap(priv->dev, &buf->vb);
+ data_free_buffer(buf);
+ }
+
+ list_for_each_entry_safe(buf, tmp, &priv->used, entry) {
+ list_del_init(&buf->entry);
+ videobuf_dma_unmap(priv->dev, &buf->vb);
+ data_free_buffer(buf);
+ }
+
+ priv->num_buffers = 0;
+ priv->bufsize = 0;
+}
+
+/**
+ * data_alloc_buffers() - allocate 1 seconds worth of data buffers
+ * @priv: the driver's private data structure
+ *
+ * Allocate enough buffers for a whole second worth of data
+ *
+ * This routine will attempt to degrade nicely by succeeding even if a full
+ * second worth of data buffers could not be allocated, as long as a minimum
+ * number were allocated. In this case, it will print a message to the kernel
+ * log.
+ *
+ * The device must not be modifying any lists when this is called.
+ *
+ * CONTEXT: user
+ * LOCKING: must hold dev->mutex
+ *
+ * Returns 0 on success, -ERRNO otherwise
+ */
+static int data_alloc_buffers(struct fpga_device *priv)
+{
+ struct data_buf *buf;
+ int i, ret;
+
+ for (i = 0; i < MAX_DATA_BUFS; i++) {
+
+ /* allocate a buffer */
+ buf = data_alloc_buffer(priv->bufsize);
+ if (!buf)
+ break;
+
+ /* map it for DMA */
+ ret = videobuf_dma_map(priv->dev, &buf->vb);
+ if (ret) {
+ data_free_buffer(buf);
+ break;
+ }
+
+ /* add it to the list of free buffers */
+ list_add_tail(&buf->entry, &priv->free);
+ priv->num_buffers++;
+ }
+
+ /* Make sure we allocated the minimum required number of buffers */
+ if (priv->num_buffers < MIN_DATA_BUFS) {
+ dev_err(priv->dev, "Unable to allocate enough data buffers\n");
+ data_free_buffers(priv);
+ return -ENOMEM;
+ }
+
+ /* Warn if we are running in a degraded state, but do not fail */
+ if (priv->num_buffers < MAX_DATA_BUFS) {
+ dev_warn(priv->dev,
+ "Unable to allocate %d buffers, using %d buffers instead\n",
+ MAX_DATA_BUFS, i);
+ }
+
+ return 0;
+}
+
+/*
+ * DMA Operations Helpers
+ */
+
+/**
+ * fpga_start_addr() - get the physical address a DATA-FPGA
+ * @priv: the driver's private data structure
+ * @fpga: the DATA-FPGA number (zero based)
+ */
+static dma_addr_t fpga_start_addr(struct fpga_device *priv, unsigned int fpga)
+{
+ return priv->phys_addr + 0x400000 + (0x80000 * fpga);
+}
+
+/**
+ * fpga_block_addr() - get the physical address of a correlation data block
+ * @priv: the driver's private data structure
+ * @fpga: the DATA-FPGA number (zero based)
+ * @blknum: the correlation block number (zero based)
+ */
+static dma_addr_t fpga_block_addr(struct fpga_device *priv, unsigned int fpga,
+ unsigned int blknum)
+{
+ return fpga_start_addr(priv, fpga) + (0x10000 * (1 + blknum));
+}
+
+#define REG_BLOCK_SIZE (32 * 4)
+
+/**
+ * data_setup_corl_table() - create the scatterlist for correlation dumps
+ * @priv: the driver's private data structure
+ *
+ * Create the scatterlist for transferring a correlation dump from the
+ * DATA FPGAs. This structure will be reused for each buffer than needs
+ * to be filled with correlation data.
+ *
+ * Returns 0 on success, -ERRNO otherwise
+ */
+static int data_setup_corl_table(struct fpga_device *priv)
+{
+ struct sg_table *table = &priv->corl_table;
+ struct scatterlist *sg;
+ struct fpga_info *info;
+ int i, j, ret;
+
+ /* Calculate the number of entries needed */
+ priv->corl_nents = (1 + NUM_FPGA) * REG_BLOCK_SIZE;
+ for (i = 0; i < NUM_FPGA; i++)
+ priv->corl_nents += priv->info[i].num_lag_ram;
+
+ /* Allocate the scatterlist table */
+ ret = sg_alloc_table(table, priv->corl_nents, GFP_KERNEL);
+ if (ret) {
+ dev_err(priv->dev, "unable to allocate DMA table\n");
+ return ret;
+ }
+
+ /* Add the DATA FPGA registers to the scatterlist */
+ sg = table->sgl;
+ for (i = 0; i < NUM_FPGA; i++) {
+ sg_dma_address(sg) = fpga_start_addr(priv, i);
+ sg_dma_len(sg) = REG_BLOCK_SIZE;
+ sg = sg_next(sg);
+ }
+
+ /* Add the SYS-FPGA registers to the scatterlist */
+ sg_dma_address(sg) = SYS_FPGA_BLOCK;
+ sg_dma_len(sg) = REG_BLOCK_SIZE;
+ sg = sg_next(sg);
+
+ /* Add the FPGA correlation data blocks to the scatterlist */
+ for (i = 0; i < NUM_FPGA; i++) {
+ info = &priv->info[i];
+ for (j = 0; j < info->num_lag_ram; j++) {
+ sg_dma_address(sg) = fpga_block_addr(priv, i, j);
+ sg_dma_len(sg) = info->blk_size;
+ sg = sg_next(sg);
+ }
+ }
+
+ /*
+ * All physical addresses and lengths are present in the structure
+ * now. It can be reused for every FPGA DATA interrupt
+ */
+ return 0;
+}
+
+/*
+ * FPGA Register Access Helpers
+ */
+
+static void fpga_write_reg(struct fpga_device *priv, unsigned int fpga,
+ unsigned int reg, u32 val)
+{
+ const int fpga_start = DATA_FPGA_START + (fpga * DATA_FPGA_SIZE);
+ iowrite32be(val, priv->regs + fpga_start + reg);
+}
+
+static u32 fpga_read_reg(struct fpga_device *priv, unsigned int fpga,
+ unsigned int reg)
+{
+ const int fpga_start = DATA_FPGA_START + (fpga * DATA_FPGA_SIZE);
+ return ioread32be(priv->regs + fpga_start + reg);
+}
+
+/**
+ * data_calculate_bufsize() - calculate the data buffer size required
+ * @priv: the driver's private data structure
+ *
+ * Calculate the total buffer size needed to hold a single block
+ * of correlation data
+ *
+ * CONTEXT: user
+ *
+ * Returns 0 on success, -ERRNO otherwise
+ */
+static int data_calculate_bufsize(struct fpga_device *priv)
+{
+ u32 num_corl, num_lags, num_meta, num_qcnt, num_pack;
+ u32 conf1, conf2, version;
+ u32 num_lag_ram, blk_size;
+ int i;
+
+ /* Each buffer starts with the 5 FPGA register areas */
+ priv->bufsize = (1 + NUM_FPGA) * REG_BLOCK_SIZE;
+
+ /* Read and store the configuration data for each FPGA */
+ for (i = 0; i < NUM_FPGA; i++) {
+ version = fpga_read_reg(priv, i, MMAP_REG_VERSION);
+ conf1 = fpga_read_reg(priv, i, MMAP_REG_CORL_CONF1);
+ conf2 = fpga_read_reg(priv, i, MMAP_REG_CORL_CONF2);
+
+ /* minor version 2 and later */
+ if ((version & 0x000000FF) >= 2) {
+ num_corl = (conf1 & 0x000000F0) >> 4;
+ num_pack = (conf1 & 0x00000F00) >> 8;
+ num_lags = (conf1 & 0x00FFF000) >> 12;
+ num_meta = (conf1 & 0x7F000000) >> 24;
+ num_qcnt = (conf2 & 0x00000FFF) >> 0;
+ } else {
+ num_corl = (conf1 & 0x000000F0) >> 4;
+ num_pack = 1; /* implied */
+ num_lags = (conf1 & 0x000FFF00) >> 8;
+ num_meta = (conf1 & 0x7FF00000) >> 20;
+ num_qcnt = (conf2 & 0x00000FFF) >> 0;
+ }
+
+ num_lag_ram = (num_corl + num_pack - 1) / num_pack;
+ blk_size = ((num_pack * num_lags) + num_meta + num_qcnt) * 8;
+
+ priv->info[i].num_lag_ram = num_lag_ram;
+ priv->info[i].blk_size = blk_size;
+ priv->bufsize += num_lag_ram * blk_size;
+
+ dev_dbg(priv->dev, "FPGA %d NUM_CORL: %d\n", i, num_corl);
+ dev_dbg(priv->dev, "FPGA %d NUM_PACK: %d\n", i, num_pack);
+ dev_dbg(priv->dev, "FPGA %d NUM_LAGS: %d\n", i, num_lags);
+ dev_dbg(priv->dev, "FPGA %d NUM_META: %d\n", i, num_meta);
+ dev_dbg(priv->dev, "FPGA %d NUM_QCNT: %d\n", i, num_qcnt);
+ dev_dbg(priv->dev, "FPGA %d BLK_SIZE: %d\n", i, blk_size);
+ }
+
+ dev_dbg(priv->dev, "TOTAL BUFFER SIZE: %zu bytes\n", priv->bufsize);
+ return 0;
+}
+
+/*
+ * Interrupt Handling
+ */
+
+/**
+ * data_disable_interrupts() - stop the device from generating interrupts
+ * @priv: the driver's private data structure
+ *
+ * Hide interrupts by switching to GPIO interrupt source
+ *
+ * LOCKING: must hold dev->lock
+ */
+static void data_disable_interrupts(struct fpga_device *priv)
+{
+ /* hide the interrupt by switching the IRQ driver to GPIO */
+ iowrite32be(0x2F, priv->regs + SYS_IRQ_SOURCE_CTL);
+}
+
+/**
+ * data_enable_interrupts() - allow the device to generate interrupts
+ * @priv: the driver's private data structure
+ *
+ * Unhide interrupts by switching to the FPGA interrupt source. At the
+ * same time, clear the DATA-FPGA status registers.
+ *
+ * LOCKING: must hold dev->lock
+ */
+static void data_enable_interrupts(struct fpga_device *priv)
+{
+ /* clear the actual FPGA corl_done interrupt */
+ fpga_write_reg(priv, 0, MMAP_REG_STATUS, 0x0);
+ fpga_write_reg(priv, 1, MMAP_REG_STATUS, 0x0);
+ fpga_write_reg(priv, 2, MMAP_REG_STATUS, 0x0);
+ fpga_write_reg(priv, 3, MMAP_REG_STATUS, 0x0);
+
+ /* flush the writes */
+ fpga_read_reg(priv, 0, MMAP_REG_STATUS);
+
+ /* switch back to the external interrupt source */
+ iowrite32be(0x3F, priv->regs + SYS_IRQ_SOURCE_CTL);
+}
+
+/**
+ * data_dma_cb() - DMAEngine callback for DMA completion
+ * @data: the driver's private data structure
+ *
+ * Complete a DMA transfer from the DATA-FPGA's
+ *
+ * This is called via the DMA callback mechanism, and will handle moving the
+ * completed DMA transaction to the used list, and then wake any processes
+ * waiting for new data
+ *
+ * CONTEXT: any, softirq expected
+ */
+static void data_dma_cb(void *data)
+{
+ struct fpga_device *priv = data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* If there is no inflight buffer, we've got a bug */
+ BUG_ON(priv->inflight == NULL);
+
+ /* Move the inflight buffer onto the used list */
+ list_move_tail(&priv->inflight->entry, &priv->used);
+ priv->inflight = NULL;
+
+ /* clear the FPGA status and re-enable interrupts */
+ data_enable_interrupts(priv);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /*
+ * We've changed both the inflight and used lists, so we need
+ * to wake up any processes that are blocking for those events
+ */
+ wake_up(&priv->wait);
+}
+
+/**
+ * data_submit_dma() - prepare and submit the required DMA to fill a buffer
+ * @priv: the driver's private data structure
+ * @buf: the data buffer
+ *
+ * Prepare and submit the necessary DMA transactions to fill a correlation
+ * data buffer.
+ *
+ * LOCKING: must hold dev->lock
+ * CONTEXT: hardirq only
+ *
+ * Returns 0 on success, -ERRNO otherwise
+ */
+static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf)
+{
+ struct scatterlist *dst_sg, *src_sg;
+ unsigned int dst_nents, src_nents;
+ struct dma_chan *chan = priv->chan;
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+ dma_addr_t dst, src;
+
+ dst_sg = buf->vb.sglist;
+ dst_nents = buf->vb.sglen;
+
+ src_sg = priv->corl_table.sgl;
+ src_nents = priv->corl_nents;
+
+ /*
+ * All buffers passed to this function should be ready and mapped
+ * for DMA already. Therefore, we don't need to do anything except
+ * submit it to the Freescale DMA Engine for processing
+ */
+
+ /* setup the scatterlist to scatterlist transfer */
+ tx = chan->device->device_prep_dma_sg(chan,
+ dst_sg, dst_nents,
+ src_sg, src_nents,
+ 0);
+ if (!tx) {
+ dev_err(priv->dev, "unable to prep scatterlist DMA\n");
+ return -ENOMEM;
+ }
+
+ /* submit the transaction to the DMA controller */
+ cookie = tx->tx_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(priv->dev, "unable to submit scatterlist DMA\n");
+ return -ENOMEM;
+ }
+
+ /* Prepare the re-read of the SYS-FPGA block */
+ dst = sg_dma_address(dst_sg) + (NUM_FPGA * REG_BLOCK_SIZE);
+ src = SYS_FPGA_BLOCK;
+ tx = chan->device->device_prep_dma_memcpy(chan, dst, src,
+ REG_BLOCK_SIZE,
+ DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(priv->dev, "unable to prep SYS-FPGA DMA\n");
+ return -ENOMEM;
+ }
+
+ /* Setup the callback */
+ tx->callback = data_dma_cb;
+ tx->callback_param = priv;
+
+ /* submit the transaction to the DMA controller */
+ cookie = tx->tx_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(priv->dev, "unable to submit SYS-FPGA DMA\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+#define CORL_DONE 0x1
+#define CORL_ERR 0x2
+
+static irqreturn_t data_irq(int irq, void *dev_id)
+{
+ struct fpga_device *priv = dev_id;
+ bool submitted = false;
+ struct data_buf *buf;
+ u32 status;
+ int i;
+
+ /* detect spurious interrupts via FPGA status */
+ for (i = 0; i < 4; i++) {
+ status = fpga_read_reg(priv, i, MMAP_REG_STATUS);
+ if (!(status & (CORL_DONE | CORL_ERR))) {
+ dev_err(priv->dev, "spurious irq detected (FPGA)\n");
+ return IRQ_NONE;
+ }
+ }
+
+ /* detect spurious interrupts via raw IRQ pin readback */
+ status = ioread32be(priv->regs + SYS_IRQ_INPUT_DATA);
+ if (status & IRQ_CORL_DONE) {
+ dev_err(priv->dev, "spurious irq detected (IRQ)\n");
+ return IRQ_NONE;
+ }
+
+ spin_lock(&priv->lock);
+
+ /* hide the interrupt by switching the IRQ driver to GPIO */
+ data_disable_interrupts(priv);
+
+ /* If there are no free buffers, drop this data */
+ if (list_empty(&priv->free)) {
+ priv->num_dropped++;
+ goto out;
+ }
+
+ buf = list_first_entry(&priv->free, struct data_buf, entry);
+ list_del_init(&buf->entry);
+ BUG_ON(buf->size != priv->bufsize);
+
+ /* Submit a DMA transfer to get the correlation data */
+ if (data_submit_dma(priv, buf)) {
+ dev_err(priv->dev, "Unable to setup DMA transfer\n");
+ list_move_tail(&buf->entry, &priv->free);
+ goto out;
+ }
+
+ /* Save the buffer for the DMA callback */
+ priv->inflight = buf;
+ submitted = true;
+
+ /* Start the DMA Engine */
+ dma_async_memcpy_issue_pending(priv->chan);
+
+out:
+ /* If no DMA was submitted, re-enable interrupts */
+ if (!submitted)
+ data_enable_interrupts(priv);
+
+ spin_unlock(&priv->lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Realtime Device Enable Helpers
+ */
+
+/**
+ * data_device_enable() - enable the device for buffered dumping
+ * @priv: the driver's private data structure
+ *
+ * Enable the device for buffered dumping. Allocates buffers and hooks up
+ * the interrupt handler. When this finishes, data will come pouring in.
+ *
+ * LOCKING: must hold dev->mutex
+ * CONTEXT: user context only
+ *
+ * Returns 0 on success, -ERRNO otherwise
+ */
+static int data_device_enable(struct fpga_device *priv)
+{
+ u32 val;
+ int ret;
+
+ /* multiple enables are safe: they do nothing */
+ if (priv->enabled)
+ return 0;
+
+ /* check that the FPGAs are programmed */
+ val = ioread32be(priv->regs + SYS_FPGA_CONFIG_STATUS);
+ if (!(val & (1 << 18))) {
+ dev_err(priv->dev, "DATA-FPGAs are not enabled\n");
+ return -ENODATA;
+ }
+
+ /* read the FPGAs to calculate the buffer size */
+ ret = data_calculate_bufsize(priv);
+ if (ret) {
+ dev_err(priv->dev, "unable to calculate buffer size\n");
+ goto out_error;
+ }
+
+ /* allocate the correlation data buffers */
+ ret = data_alloc_buffers(priv);
+ if (ret) {
+ dev_err(priv->dev, "unable to allocate buffers\n");
+ goto out_error;
+ }
+
+ /* setup the source scatterlist for dumping correlation data */
+ ret = data_setup_corl_table(priv);
+ if (ret) {
+ dev_err(priv->dev, "unable to setup correlation DMA table\n");
+ goto out_error;
+ }
+
+ /* hookup the irq handler */
+ ret = request_irq(priv->irq, data_irq, IRQF_SHARED, drv_name, priv);
+ if (ret) {
+ dev_err(priv->dev, "unable to request IRQ handler\n");
+ goto out_error;
+ }
+
+ /* switch to the external FPGA IRQ line */
+ data_enable_interrupts(priv);
+
+ /* success, we're enabled */
+ priv->enabled = true;
+ return 0;
+
+out_error:
+ sg_free_table(&priv->corl_table);
+ priv->corl_nents = 0;
+
+ data_free_buffers(priv);
+ return ret;
+}
+
+/**
+ * data_device_disable() - disable the device for buffered dumping
+ * @priv: the driver's private data structure
+ *
+ * Disable the device for buffered dumping. Stops new DMA transactions from
+ * being generated, waits for all outstanding DMA to complete, and then frees
+ * all buffers.
+ *
+ * LOCKING: must hold dev->mutex
+ * CONTEXT: user only
+ *
+ * Returns 0 on success, -ERRNO otherwise
+ */
+static int data_device_disable(struct fpga_device *priv)
+{
+ int ret;
+
+ /* allow multiple disable */
+ if (!priv->enabled)
+ return 0;
+
+ /* switch to the internal GPIO IRQ line */
+ data_disable_interrupts(priv);
+
+ /* unhook the irq handler */
+ free_irq(priv->irq, priv);
+
+ /*
+ * wait for all outstanding DMA to complete
+ *
+ * Device interrupts are disabled, therefore another buffer cannot
+ * be marked inflight.
+ */
+ ret = wait_event_interruptible(priv->wait, priv->inflight == NULL);
+ if (ret)
+ return ret;
+
+ /* free the correlation table */
+ sg_free_table(&priv->corl_table);
+ priv->corl_nents = 0;
+
+ /*
+ * We are taking the spinlock not to protect priv->enabled, but instead
+ * to make sure that there are no readers in the process of altering
+ * the free or used lists while we are setting this flag.
+ */
+ spin_lock_irq(&priv->lock);
+ priv->enabled = false;
+ spin_unlock_irq(&priv->lock);
+
+ /* free all buffers: the free and used lists are not being changed */
+ data_free_buffers(priv);
+ return 0;
+}
+
+/*
+ * DEBUGFS Interface
+ */
+#ifdef CONFIG_DEBUG_FS
+
+/*
+ * Count the number of entries in the given list
+ */
+static unsigned int list_num_entries(struct list_head *list)
+{
+ struct list_head *entry;
+ unsigned int ret = 0;
+
+ list_for_each(entry, list)
+ ret++;
+
+ return ret;
+}
+
+static int data_debug_show(struct seq_file *f, void *offset)
+{
+ struct fpga_device *priv = f->private;
+ int ret;
+
+ /*
+ * Lock the mutex first, so that we get an accurate value for enable
+ * Lock the spinlock next, to get accurate list counts
+ */
+ ret = mutex_lock_interruptible(&priv->mutex);
+ if (ret)
+ return ret;
+
+ spin_lock_irq(&priv->lock);
+
+ seq_printf(f, "enabled: %d\n", priv->enabled);
+ seq_printf(f, "bufsize: %d\n", priv->bufsize);
+ seq_printf(f, "num_buffers: %d\n", priv->num_buffers);
+ seq_printf(f, "num_free: %d\n", list_num_entries(&priv->free));
+ seq_printf(f, "inflight: %d\n", priv->inflight != NULL);
+ seq_printf(f, "num_used: %d\n", list_num_entries(&priv->used));
+ seq_printf(f, "num_dropped: %d\n", priv->num_dropped);
+
+ spin_unlock_irq(&priv->lock);
+ mutex_unlock(&priv->mutex);
+ return 0;
+}
+
+static int data_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, data_debug_show, inode->i_private);
+}
+
+static const struct file_operations data_debug_fops = {
+ .owner = THIS_MODULE,
+ .open = data_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int data_debugfs_init(struct fpga_device *priv)
+{
+ priv->dbg_entry = debugfs_create_file(drv_name, S_IRUGO, NULL, priv,
+ &data_debug_fops);
+ if (IS_ERR(priv->dbg_entry))
+ return PTR_ERR(priv->dbg_entry);
+
+ return 0;
+}
+
+static void data_debugfs_exit(struct fpga_device *priv)
+{
+ debugfs_remove(priv->dbg_entry);
+}
+
+#else
+
+static inline int data_debugfs_init(struct fpga_device *priv)
+{
+ return 0;
+}
+
+static inline void data_debugfs_exit(struct fpga_device *priv)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ * SYSFS Attributes
+ */
+
+static ssize_t data_en_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fpga_device *priv = dev_get_drvdata(dev);
+ return snprintf(buf, PAGE_SIZE, "%u\n", priv->enabled);
+}
+
+static ssize_t data_en_set(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fpga_device *priv = dev_get_drvdata(dev);
+ unsigned long enable;
+ int ret;
+
+ ret = strict_strtoul(buf, 0, &enable);
+ if (ret) {
+ dev_err(priv->dev, "unable to parse enable input\n");
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_interruptible(&priv->mutex);
+ if (ret)
+ return ret;
+
+ if (enable)
+ ret = data_device_enable(priv);
+ else
+ ret = data_device_disable(priv);
+
+ if (ret) {
+ dev_err(priv->dev, "device %s failed\n",
+ enable ? "enable" : "disable");
+ count = ret;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&priv->mutex);
+ return count;
+}
+
+static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO, data_en_show, data_en_set);
+
+static struct attribute *data_sysfs_attrs[] = {
+ &dev_attr_enable.attr,
+ NULL,
+};
+
+static const struct attribute_group rt_sysfs_attr_group = {
+ .attrs = data_sysfs_attrs,
+};
+
+/*
+ * FPGA Realtime Data Character Device
+ */
+
+static int data_open(struct inode *inode, struct file *filp)
+{
+ /*
+ * The miscdevice layer puts our struct miscdevice into the
+ * filp->private_data field. We use this to find our private
+ * data and then overwrite it with our own private structure.
+ */
+ struct fpga_device *priv = container_of(filp->private_data,
+ struct fpga_device, miscdev);
+ struct fpga_reader *reader;
+ int ret;
+
+ /* allocate private data */
+ reader = kzalloc(sizeof(*reader), GFP_KERNEL);
+ if (!reader)
+ return -ENOMEM;
+
+ reader->priv = priv;
+ reader->buf = NULL;
+
+ filp->private_data = reader;
+ ret = nonseekable_open(inode, filp);
+ if (ret) {
+ dev_err(priv->dev, "nonseekable-open failed\n");
+ kfree(reader);
+ return ret;
+ }
+
+ /*
+ * success, increase the reference count of the private data structure
+ * so that it doesn't disappear if the device is unbound
+ */
+ kref_get(&priv->ref);
+ return 0;
+}
+
+static int data_release(struct inode *inode, struct file *filp)
+{
+ struct fpga_reader *reader = filp->private_data;
+ struct fpga_device *priv = reader->priv;
+
+ /* free the per-reader structure */
+ data_free_buffer(reader->buf);
+ kfree(reader);
+ filp->private_data = NULL;
+
+ /* decrement our reference count to the private data */
+ kref_put(&priv->ref, fpga_device_release);
+ return 0;
+}
+
+static ssize_t data_read(struct file *filp, char __user *ubuf, size_t count,
+ loff_t *f_pos)
+{
+ struct fpga_reader *reader = filp->private_data;
+ struct fpga_device *priv = reader->priv;
+ struct list_head *used = &priv->used;
+ struct data_buf *dbuf;
+ size_t avail;
+ void *data;
+ int ret;
+
+ /* check if we already have a partial buffer */
+ if (reader->buf) {
+ dbuf = reader->buf;
+ goto have_buffer;
+ }
+
+ spin_lock_irq(&priv->lock);
+
+ /* Block until there is at least one buffer on the used list */
+ while (list_empty(used)) {
+ spin_unlock_irq(&priv->lock);
+
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(priv->wait, !list_empty(used));
+ if (ret)
+ return ret;
+
+ spin_lock_irq(&priv->lock);
+ }
+
+ /* Grab the first buffer off of the used list */
+ dbuf = list_first_entry(used, struct data_buf, entry);
+ list_del_init(&dbuf->entry);
+
+ spin_unlock_irq(&priv->lock);
+
+ /* Buffers are always mapped: unmap it */
+ videobuf_dma_unmap(priv->dev, &dbuf->vb);
+
+ /* save the buffer for later */
+ reader->buf = dbuf;
+ reader->buf_start = 0;
+
+have_buffer:
+ /* Get the number of bytes available */
+ avail = dbuf->size - reader->buf_start;
+ data = dbuf->vb.vaddr + reader->buf_start;
+
+ /* Get the number of bytes we can transfer */
+ count = min(count, avail);
+
+ /* Copy the data to the userspace buffer */
+ if (copy_to_user(ubuf, data, count))
+ return -EFAULT;
+
+ /* Update the amount of available space */
+ avail -= count;
+
+ /*
+ * If there is still some data available, save the buffer for the
+ * next userspace call to read() and return
+ */
+ if (avail > 0) {
+ reader->buf_start += count;
+ reader->buf = dbuf;
+ return count;
+ }
+
+ /*
+ * Get the buffer ready to be reused for DMA
+ *
+ * If it fails, we pretend that the read never happed and return
+ * -EFAULT to userspace. The read will be retried.
+ */
+ ret = videobuf_dma_map(priv->dev, &dbuf->vb);
+ if (ret) {
+ dev_err(priv->dev, "unable to remap buffer for DMA\n");
+ return -EFAULT;
+ }
+
+ /* Lock against concurrent enable/disable */
+ spin_lock_irq(&priv->lock);
+
+ /* the reader is finished with this buffer */
+ reader->buf = NULL;
+
+ /*
+ * One of two things has happened, the device is disabled, or the
+ * device has been reconfigured underneath us. In either case, we
+ * should just throw away the buffer.
+ */
+ if (!priv->enabled || dbuf->size != priv->bufsize) {
+ videobuf_dma_unmap(priv->dev, &dbuf->vb);
+ data_free_buffer(dbuf);
+ goto out_unlock;
+ }
+
+ /* The buffer is safe to reuse, so add it back to the free list */
+ list_add_tail(&dbuf->entry, &priv->free);
+
+out_unlock:
+ spin_unlock_irq(&priv->lock);
+ return count;
+}
+
+static unsigned int data_poll(struct file *filp, struct poll_table_struct *tbl)
+{
+ struct fpga_reader *reader = filp->private_data;
+ struct fpga_device *priv = reader->priv;
+ unsigned int mask = 0;
+
+ poll_wait(filp, &priv->wait, tbl);
+
+ if (!list_empty(&priv->used))
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
+}
+
+static int data_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct fpga_reader *reader = filp->private_data;
+ struct fpga_device *priv = reader->priv;
+ unsigned long offset, vsize, psize, addr;
+
+ /* VMA properties */
+ offset = vma->vm_pgoff << PAGE_SHIFT;
+ vsize = vma->vm_end - vma->vm_start;
+ psize = priv->phys_size - offset;
+ addr = (priv->phys_addr + offset) >> PAGE_SHIFT;
+
+ /* Check against the FPGA region's physical memory size */
+ if (vsize > psize) {
+ dev_err(priv->dev, "requested mmap mapping too large\n");
+ return -EINVAL;
+ }
+
+ /* IO memory (stop cacheing) */
+ vma->vm_flags |= VM_IO | VM_RESERVED;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ return io_remap_pfn_range(vma, vma->vm_start, addr, vsize,
+ vma->vm_page_prot);
+}
+
+static const struct file_operations data_fops = {
+ .owner = THIS_MODULE,
+ .open = data_open,
+ .release = data_release,
+ .read = data_read,
+ .poll = data_poll,
+ .mmap = data_mmap,
+ .llseek = no_llseek,
+};
+
+/*
+ * OpenFirmware Device Subsystem
+ */
+
+static bool dma_filter(struct dma_chan *chan, void *data)
+{
+ /*
+ * DMA Channel #0 is used for the FPGA Programmer, so ignore it
+ *
+ * This probably won't survive an unload/load cycle of the Freescale
+ * DMAEngine driver, but that won't be a problem
+ */
+ if (chan->chan_id == 0 && chan->device->dev_id == 0)
+ return false;
+
+ return true;
+}
+
+static int data_of_probe(struct platform_device *op,
+ const struct of_device_id *match)
+{
+ struct device_node *of_node = op->dev.of_node;
+ struct device *this_device;
+ struct fpga_device *priv;
+ struct resource res;
+ dma_cap_mask_t mask;
+ int ret;
+
+ /* Allocate private data */
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&op->dev, "Unable to allocate device private data\n");
+ ret = -ENOMEM;
+ goto out_return;
+ }
+
+ dev_set_drvdata(&op->dev, priv);
+ priv->dev = &op->dev;
+ kref_init(&priv->ref);
+ mutex_init(&priv->mutex);
+
+ dev_set_drvdata(priv->dev, priv);
+ spin_lock_init(&priv->lock);
+ INIT_LIST_HEAD(&priv->free);
+ INIT_LIST_HEAD(&priv->used);
+ init_waitqueue_head(&priv->wait);
+
+ /* Setup the misc device */
+ priv->miscdev.minor = MISC_DYNAMIC_MINOR;
+ priv->miscdev.name = drv_name;
+ priv->miscdev.fops = &data_fops;
+
+ /* Get the physical address of the FPGA registers */
+ ret = of_address_to_resource(of_node, 0, &res);
+ if (ret) {
+ dev_err(&op->dev, "Unable to find FPGA physical address\n");
+ ret = -ENODEV;
+ goto out_free_priv;
+ }
+
+ priv->phys_addr = res.start;
+ priv->phys_size = resource_size(&res);
+
+ /* ioremap the registers for use */
+ priv->regs = of_iomap(of_node, 0);
+ if (!priv->regs) {
+ dev_err(&op->dev, "Unable to ioremap registers\n");
+ ret = -ENOMEM;
+ goto out_free_priv;
+ }
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+ dma_cap_set(DMA_INTERRUPT, mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_set(DMA_SG, mask);
+
+ /* Request a DMA channel */
+ priv->chan = dma_request_channel(mask, dma_filter, NULL);
+ if (!priv->chan) {
+ dev_err(&op->dev, "Unable to request DMA channel\n");
+ ret = -ENODEV;
+ goto out_unmap_regs;
+ }
+
+ /* Find the correct IRQ number */
+ priv->irq = irq_of_parse_and_map(of_node, 0);
+ if (priv->irq == NO_IRQ) {
+ dev_err(&op->dev, "Unable to find IRQ line\n");
+ ret = -ENODEV;
+ goto out_release_dma;
+ }
+
+ /* Drive the GPIO for FPGA IRQ high (no interrupt) */
+ iowrite32be(IRQ_CORL_DONE, priv->regs + SYS_IRQ_OUTPUT_DATA);
+
+ /* Register the miscdevice */
+ ret = misc_register(&priv->miscdev);
+ if (ret) {
+ dev_err(&op->dev, "Unable to register miscdevice\n");
+ goto out_irq_dispose_mapping;
+ }
+
+ /* Create the debugfs files */
+ ret = data_debugfs_init(priv);
+ if (ret) {
+ dev_err(&op->dev, "Unable to create debugfs files\n");
+ goto out_misc_deregister;
+ }
+
+ /* Create the sysfs files */
+ this_device = priv->miscdev.this_device;
+ dev_set_drvdata(this_device, priv);
+ ret = sysfs_create_group(&this_device->kobj, &rt_sysfs_attr_group);
+ if (ret) {
+ dev_err(&op->dev, "Unable to create sysfs files\n");
+ goto out_data_debugfs_exit;
+ }
+
+ dev_info(&op->dev, "CARMA FPGA Realtime Data Driver Loaded\n");
+ return 0;
+
+out_data_debugfs_exit:
+ data_debugfs_exit(priv);
+out_misc_deregister:
+ misc_deregister(&priv->miscdev);
+out_irq_dispose_mapping:
+ irq_dispose_mapping(priv->irq);
+out_release_dma:
+ dma_release_channel(priv->chan);
+out_unmap_regs:
+ iounmap(priv->regs);
+out_free_priv:
+ kref_put(&priv->ref, fpga_device_release);
+out_return:
+ return ret;
+}
+
+static int data_of_remove(struct platform_device *op)
+{
+ struct fpga_device *priv = dev_get_drvdata(&op->dev);
+ struct device *this_device = priv->miscdev.this_device;
+
+ /* remove all sysfs files, now the device cannot be re-enabled */
+ sysfs_remove_group(&this_device->kobj, &rt_sysfs_attr_group);
+
+ /* remove all debugfs files */
+ data_debugfs_exit(priv);
+
+ /* disable the device from generating data */
+ data_device_disable(priv);
+
+ /* remove the character device to stop new readers from appearing */
+ misc_deregister(&priv->miscdev);
+
+ /* cleanup everything not needed by readers */
+ irq_dispose_mapping(priv->irq);
+ dma_release_channel(priv->chan);
+ iounmap(priv->regs);
+
+ /* release our reference */
+ kref_put(&priv->ref, fpga_device_release);
+ return 0;
+}
+
+static struct of_device_id data_of_match[] = {
+ { .compatible = "carma,carma-fpga", },
+ {},
+};
+
+static struct of_platform_driver data_of_driver = {
+ .probe = data_of_probe,
+ .remove = data_of_remove,
+ .driver = {
+ .name = drv_name,
+ .of_match_table = data_of_match,
+ .owner = THIS_MODULE,
+ },
+};
+
+/*
+ * Module Init / Exit
+ */
+
+static int __init data_init(void)
+{
+ return of_register_platform_driver(&data_of_driver);
+}
+
+static void __exit data_exit(void)
+{
+ of_unregister_platform_driver(&data_of_driver);
+}
+
+MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
+MODULE_DESCRIPTION("CARMA DATA-FPGA Access Driver");
+MODULE_LICENSE("GPL");
+
+module_init(data_init);
+module_exit(data_exit);