aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r--arch/powerpc/include/asm/8253pit.h3
-rw-r--r--arch/powerpc/include/asm/8xx_immap.h8
-rw-r--r--arch/powerpc/include/asm/Kbuild42
-rw-r--r--arch/powerpc/include/asm/abs_addr.h75
-rw-r--r--arch/powerpc/include/asm/apm82181-adma.h311
-rw-r--r--arch/powerpc/include/asm/archrandom.h50
-rw-r--r--arch/powerpc/include/asm/asm-compat.h12
-rw-r--r--arch/powerpc/include/asm/atomic.h139
-rw-r--r--arch/powerpc/include/asm/auxvec.h19
-rw-r--r--arch/powerpc/include/asm/barrier.h90
-rw-r--r--arch/powerpc/include/asm/bitops.h129
-rw-r--r--arch/powerpc/include/asm/bitsperlong.h12
-rw-r--r--arch/powerpc/include/asm/bootx.h123
-rw-r--r--arch/powerpc/include/asm/btext.h1
-rw-r--r--arch/powerpc/include/asm/bug.h11
-rw-r--r--arch/powerpc/include/asm/byteorder.h12
-rw-r--r--arch/powerpc/include/asm/cache.h30
-rw-r--r--arch/powerpc/include/asm/cacheflush.h8
-rw-r--r--arch/powerpc/include/asm/checksum.h5
-rw-r--r--arch/powerpc/include/asm/clk_interface.h20
-rw-r--r--arch/powerpc/include/asm/cmpxchg.h310
-rw-r--r--arch/powerpc/include/asm/code-patching.h62
-rw-r--r--arch/powerpc/include/asm/compat.h74
-rw-r--r--arch/powerpc/include/asm/context_tracking.h10
-rw-r--r--arch/powerpc/include/asm/cpm.h2
-rw-r--r--arch/powerpc/include/asm/cpm1.h2
-rw-r--r--arch/powerpc/include/asm/cpm2.h1
-rw-r--r--arch/powerpc/include/asm/cputable.h270
-rw-r--r--arch/powerpc/include/asm/cputhreads.h36
-rw-r--r--arch/powerpc/include/asm/cputime.h84
-rw-r--r--arch/powerpc/include/asm/dbell.h40
-rw-r--r--arch/powerpc/include/asm/dcr-mmio.h4
-rw-r--r--arch/powerpc/include/asm/debug.h60
-rw-r--r--arch/powerpc/include/asm/device.h17
-rw-r--r--arch/powerpc/include/asm/disassemble.h38
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h34
-rw-r--r--arch/powerpc/include/asm/dma.h10
-rw-r--r--arch/powerpc/include/asm/eeh.h302
-rw-r--r--arch/powerpc/include/asm/eeh_event.h35
-rw-r--r--arch/powerpc/include/asm/ehv_pic.h40
-rw-r--r--arch/powerpc/include/asm/elf.h322
-rw-r--r--arch/powerpc/include/asm/emulated_ops.h14
-rw-r--r--arch/powerpc/include/asm/epapr_hcalls.h575
-rw-r--r--arch/powerpc/include/asm/errno.h11
-rw-r--r--arch/powerpc/include/asm/exception-64e.h49
-rw-r--r--arch/powerpc/include/asm/exception-64s.h565
-rw-r--r--arch/powerpc/include/asm/exec.h9
-rw-r--r--arch/powerpc/include/asm/fadump.h217
-rw-r--r--arch/powerpc/include/asm/fcntl.h11
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h40
-rw-r--r--arch/powerpc/include/asm/firmware.h28
-rw-r--r--arch/powerpc/include/asm/fixmap.h44
-rw-r--r--arch/powerpc/include/asm/floppy.h4
-rw-r--r--arch/powerpc/include/asm/fsl_gtm.h2
-rw-r--r--arch/powerpc/include/asm/fsl_guts.h38
-rw-r--r--arch/powerpc/include/asm/fsl_hcalls.h655
-rw-r--r--arch/powerpc/include/asm/fsl_lbc.h11
-rw-r--r--arch/powerpc/include/asm/fsl_pamu_stash.h39
-rw-r--r--arch/powerpc/include/asm/ftrace.h16
-rw-r--r--arch/powerpc/include/asm/futex.h34
-rw-r--r--arch/powerpc/include/asm/gpio.h57
-rw-r--r--arch/powerpc/include/asm/hardirq.h6
-rw-r--r--arch/powerpc/include/asm/highmem.h2
-rw-r--r--arch/powerpc/include/asm/hugetlb.h124
-rw-r--r--arch/powerpc/include/asm/hvcall.h104
-rw-r--r--arch/powerpc/include/asm/hvsi.h94
-rw-r--r--arch/powerpc/include/asm/hw_breakpoint.h38
-rw-r--r--arch/powerpc/include/asm/hw_irq.h82
-rw-r--r--arch/powerpc/include/asm/i8259.h2
-rw-r--r--arch/powerpc/include/asm/ibmebus.h4
-rw-r--r--arch/powerpc/include/asm/immap_qe.h27
-rw-r--r--arch/powerpc/include/asm/io-workarounds.h48
-rw-r--r--arch/powerpc/include/asm/io.h163
-rw-r--r--arch/powerpc/include/asm/io_event_irq.h54
-rw-r--r--arch/powerpc/include/asm/ioctl.h13
-rw-r--r--arch/powerpc/include/asm/ioctls.h114
-rw-r--r--arch/powerpc/include/asm/iommu.h107
-rw-r--r--arch/powerpc/include/asm/ipcbuf.h34
-rw-r--r--arch/powerpc/include/asm/irq.h279
-rw-r--r--arch/powerpc/include/asm/irqflags.h62
-rw-r--r--arch/powerpc/include/asm/iseries/alpaca.h31
-rw-r--r--arch/powerpc/include/asm/iseries/hv_call.h111
-rw-r--r--arch/powerpc/include/asm/iseries/hv_call_event.h201
-rw-r--r--arch/powerpc/include/asm/iseries/hv_call_sc.h50
-rw-r--r--arch/powerpc/include/asm/iseries/hv_call_xm.h61
-rw-r--r--arch/powerpc/include/asm/iseries/hv_lp_config.h128
-rw-r--r--arch/powerpc/include/asm/iseries/hv_lp_event.h162
-rw-r--r--arch/powerpc/include/asm/iseries/hv_types.h112
-rw-r--r--arch/powerpc/include/asm/iseries/iommu.h37
-rw-r--r--arch/powerpc/include/asm/iseries/it_lp_queue.h78
-rw-r--r--arch/powerpc/include/asm/iseries/lpar_map.h85
-rw-r--r--arch/powerpc/include/asm/iseries/mf.h51
-rw-r--r--arch/powerpc/include/asm/iseries/vio.h265
-rw-r--r--arch/powerpc/include/asm/jump_label.h45
-rw-r--r--arch/powerpc/include/asm/kdump.h14
-rw-r--r--arch/powerpc/include/asm/kexec.h9
-rw-r--r--arch/powerpc/include/asm/keylargo.h4
-rw-r--r--arch/powerpc/include/asm/kmap_types.h31
-rw-r--r--arch/powerpc/include/asm/kprobes.h22
-rw-r--r--arch/powerpc/include/asm/kvm.h91
-rw-r--r--arch/powerpc/include/asm/kvm_44x.h1
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h62
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h217
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_32.h9
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h395
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h73
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h17
-rw-r--r--arch/powerpc/include/asm/kvm_booke_hv_asm.h69
-rw-r--r--arch/powerpc/include/asm/kvm_e500.h70
-rw-r--r--arch/powerpc/include/asm/kvm_host.h485
-rw-r--r--arch/powerpc/include/asm/kvm_para.h117
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h462
-rw-r--r--arch/powerpc/include/asm/linkage.h11
-rw-r--r--arch/powerpc/include/asm/local.h2
-rw-r--r--arch/powerpc/include/asm/lppaca.h236
-rw-r--r--arch/powerpc/include/asm/lv1call.h17
-rw-r--r--arch/powerpc/include/asm/machdep.h155
-rw-r--r--arch/powerpc/include/asm/macio.h2
-rw-r--r--arch/powerpc/include/asm/mce.h198
-rw-r--r--arch/powerpc/include/asm/memblock.h8
-rw-r--r--arch/powerpc/include/asm/mman.h27
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h90
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h324
-rw-r--r--arch/powerpc/include/asm/mmu.h96
-rw-r--r--arch/powerpc/include/asm/mmu_context.h14
-rw-r--r--arch/powerpc/include/asm/mmzone.h12
-rw-r--r--arch/powerpc/include/asm/module.h16
-rw-r--r--arch/powerpc/include/asm/mpc5121.h41
-rw-r--r--arch/powerpc/include/asm/mpc52xx.h2
-rw-r--r--arch/powerpc/include/asm/mpc52xx_psc.h49
-rw-r--r--arch/powerpc/include/asm/mpc85xx.h92
-rw-r--r--arch/powerpc/include/asm/mpic.h88
-rw-r--r--arch/powerpc/include/asm/mpic_msgr.h134
-rw-r--r--arch/powerpc/include/asm/mpic_timer.h46
-rw-r--r--arch/powerpc/include/asm/msgbuf.h33
-rw-r--r--arch/powerpc/include/asm/mutex.h10
-rw-r--r--arch/powerpc/include/asm/nvram.h104
-rw-r--r--arch/powerpc/include/asm/opal.h917
-rw-r--r--arch/powerpc/include/asm/oprofile_impl.h2
-rw-r--r--arch/powerpc/include/asm/pSeries_reconfig.h30
-rw-r--r--arch/powerpc/include/asm/paca.h64
-rw-r--r--arch/powerpc/include/asm/page.h156
-rw-r--r--arch/powerpc/include/asm/page_64.h49
-rw-r--r--arch/powerpc/include/asm/param.h1
-rw-r--r--arch/powerpc/include/asm/parport.h6
-rw-r--r--arch/powerpc/include/asm/pasemi_dma.h2
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h141
-rw-r--r--arch/powerpc/include/asm/pci.h33
-rw-r--r--arch/powerpc/include/asm/perf_event.h5
-rw-r--r--arch/powerpc/include/asm/perf_event_fsl_emb.h2
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h46
-rw-r--r--arch/powerpc/include/asm/pgalloc-32.h43
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h162
-rw-r--r--arch/powerpc/include/asm/pgalloc.h31
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64-4k.h4
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64-64k.h9
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h307
-rw-r--r--arch/powerpc/include/asm/pgtable.h146
-rw-r--r--arch/powerpc/include/asm/phyp_dump.h47
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h326
-rw-r--r--arch/powerpc/include/asm/pmac_feature.h4
-rw-r--r--arch/powerpc/include/asm/poll.h1
-rw-r--r--arch/powerpc/include/asm/posix_types.h128
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h285
-rw-r--r--arch/powerpc/include/asm/ppc-pci.h99
-rw-r--r--arch/powerpc/include/asm/ppc4xx_ocm.h45
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h358
-rw-r--r--arch/powerpc/include/asm/probes.h67
-rw-r--r--arch/powerpc/include/asm/processor.h219
-rw-r--r--arch/powerpc/include/asm/prom.h188
-rw-r--r--arch/powerpc/include/asm/ps3.h5
-rw-r--r--arch/powerpc/include/asm/ps3fb.h45
-rw-r--r--arch/powerpc/include/asm/pte-book3e.h5
-rw-r--r--arch/powerpc/include/asm/pte-common.h6
-rw-r--r--arch/powerpc/include/asm/pte-hash64-64k.h24
-rw-r--r--arch/powerpc/include/asm/pte-hash64.h8
-rw-r--r--arch/powerpc/include/asm/ptrace.h296
-rw-r--r--arch/powerpc/include/asm/qe.h5
-rw-r--r--arch/powerpc/include/asm/qe_ic.h21
-rw-r--r--arch/powerpc/include/asm/reg.h430
-rw-r--r--arch/powerpc/include/asm/reg_a2.h156
-rw-r--r--arch/powerpc/include/asm/reg_booke.h105
-rw-r--r--arch/powerpc/include/asm/reg_fsl_emb.h24
-rw-r--r--arch/powerpc/include/asm/resource.h1
-rw-r--r--arch/powerpc/include/asm/rio.h5
-rw-r--r--arch/powerpc/include/asm/rtas.h210
-rw-r--r--arch/powerpc/include/asm/runlatch.h45
-rw-r--r--arch/powerpc/include/asm/rwsem.h183
-rw-r--r--arch/powerpc/include/asm/scom.h167
-rw-r--r--arch/powerpc/include/asm/seccomp.h16
-rw-r--r--arch/powerpc/include/asm/sections.h30
-rw-r--r--arch/powerpc/include/asm/sembuf.h36
-rw-r--r--arch/powerpc/include/asm/setup.h30
-rw-r--r--arch/powerpc/include/asm/sfp-machine.h2
-rw-r--r--arch/powerpc/include/asm/shmbuf.h59
-rw-r--r--arch/powerpc/include/asm/sigcontext.h87
-rw-r--r--arch/powerpc/include/asm/siginfo.h21
-rw-r--r--arch/powerpc/include/asm/signal.h147
-rw-r--r--arch/powerpc/include/asm/smp.h55
-rw-r--r--arch/powerpc/include/asm/smu.h6
-rw-r--r--arch/powerpc/include/asm/socket.h72
-rw-r--r--arch/powerpc/include/asm/sockios.h20
-rw-r--r--arch/powerpc/include/asm/sparsemem.h6
-rw-r--r--arch/powerpc/include/asm/spinlock.h23
-rw-r--r--arch/powerpc/include/asm/spu.h26
-rw-r--r--arch/powerpc/include/asm/spu_info.h29
-rw-r--r--arch/powerpc/include/asm/spu_priv1.h2
-rw-r--r--arch/powerpc/include/asm/stat.h81
-rw-r--r--arch/powerpc/include/asm/statfs.h6
-rw-r--r--arch/powerpc/include/asm/suspend.h6
-rw-r--r--arch/powerpc/include/asm/swab.h58
-rw-r--r--arch/powerpc/include/asm/swiotlb.h6
-rw-r--r--arch/powerpc/include/asm/switch_to.h101
-rw-r--r--arch/powerpc/include/asm/synch.h9
-rw-r--r--arch/powerpc/include/asm/syscall.h5
-rw-r--r--arch/powerpc/include/asm/syscalls.h27
-rw-r--r--arch/powerpc/include/asm/systbl.h90
-rw-r--r--arch/powerpc/include/asm/system.h548
-rw-r--r--arch/powerpc/include/asm/tce.h10
-rw-r--r--arch/powerpc/include/asm/termbits.h210
-rw-r--r--arch/powerpc/include/asm/termios.h69
-rw-r--r--arch/powerpc/include/asm/thread_info.h86
-rw-r--r--arch/powerpc/include/asm/time.h25
-rw-r--r--arch/powerpc/include/asm/timex.h8
-rw-r--r--arch/powerpc/include/asm/tlbflush.h12
-rw-r--r--arch/powerpc/include/asm/tm.h27
-rw-r--r--arch/powerpc/include/asm/topology.h85
-rw-r--r--arch/powerpc/include/asm/trace.h45
-rw-r--r--arch/powerpc/include/asm/types.h40
-rw-r--r--arch/powerpc/include/asm/uaccess.h68
-rw-r--r--arch/powerpc/include/asm/ucc.h2
-rw-r--r--arch/powerpc/include/asm/ucc_fast.h4
-rw-r--r--arch/powerpc/include/asm/ucc_slow.h4
-rw-r--r--arch/powerpc/include/asm/ucontext.h40
-rw-r--r--arch/powerpc/include/asm/udbg.h18
-rw-r--r--arch/powerpc/include/asm/unaligned.h7
-rw-r--r--arch/powerpc/include/asm/uninorth.h2
-rw-r--r--arch/powerpc/include/asm/unistd.h383
-rw-r--r--arch/powerpc/include/asm/uprobes.h48
-rw-r--r--arch/powerpc/include/asm/vdso.h8
-rw-r--r--arch/powerpc/include/asm/vdso_datapage.h4
-rw-r--r--arch/powerpc/include/asm/vio.h61
-rw-r--r--arch/powerpc/include/asm/word-at-a-time.h119
-rw-r--r--arch/powerpc/include/asm/xics.h162
-rw-r--r--arch/powerpc/include/asm/xor.h67
245 files changed, 12728 insertions, 8155 deletions
diff --git a/arch/powerpc/include/asm/8253pit.h b/arch/powerpc/include/asm/8253pit.h
deleted file mode 100644
index a71c9c1455a..00000000000
--- a/arch/powerpc/include/asm/8253pit.h
+++ /dev/null
@@ -1,3 +0,0 @@
-/*
- * 8253/8254 Programmable Interval Timer
- */
diff --git a/arch/powerpc/include/asm/8xx_immap.h b/arch/powerpc/include/asm/8xx_immap.h
index 4b0e1520600..bdf0563ba42 100644
--- a/arch/powerpc/include/asm/8xx_immap.h
+++ b/arch/powerpc/include/asm/8xx_immap.h
@@ -93,7 +93,7 @@ typedef struct mem_ctlr {
} memctl8xx_t;
/*-----------------------------------------------------------------------
- * BR - Memory Controler: Base Register 16-9
+ * BR - Memory Controller: Base Register 16-9
*/
#define BR_BA_MSK 0xffff8000 /* Base Address Mask */
#define BR_AT_MSK 0x00007000 /* Address Type Mask */
@@ -110,7 +110,7 @@ typedef struct mem_ctlr {
#define BR_V 0x00000001 /* Bank Valid */
/*-----------------------------------------------------------------------
- * OR - Memory Controler: Option Register 16-11
+ * OR - Memory Controller: Option Register 16-11
*/
#define OR_AM_MSK 0xffff8000 /* Address Mask Mask */
#define OR_ATM_MSK 0x00007000 /* Address Type Mask Mask */
@@ -393,8 +393,8 @@ typedef struct fec {
uint fec_addr_low; /* lower 32 bits of station address */
ushort fec_addr_high; /* upper 16 bits of station address */
ushort res1; /* reserved */
- uint fec_hash_table_high; /* upper 32-bits of hash table */
- uint fec_hash_table_low; /* lower 32-bits of hash table */
+ uint fec_grp_hash_table_high; /* upper 32-bits of hash table */
+ uint fec_grp_hash_table_low; /* lower 32-bits of hash table */
uint fec_r_des_start; /* beginning of Rx descriptor ring */
uint fec_x_des_start; /* beginning of Tx descriptor ring */
uint fec_r_buff_size; /* Rx buffer size */
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index d51df17c7e6..3fb1bc432f4 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -1,36 +1,8 @@
-include include/asm-generic/Kbuild.asm
-header-y += auxvec.h
-header-y += bootx.h
-header-y += byteorder.h
-header-y += cputable.h
-header-y += elf.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += linkage.h
-header-y += msgbuf.h
-header-y += nvram.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ps3fb.h
-header-y += resource.h
-header-y += seccomp.h
-header-y += sembuf.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += spu_info.h
-header-y += stat.h
-header-y += statfs.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += ucontext.h
-header-y += unistd.h
+generic-y += clkdev.h
+generic-y += hash.h
+generic-y += mcs_spinlock.h
+generic-y += preempt.h
+generic-y += rwsem.h
+generic-y += trace_clock.h
+generic-y += vtime.h
diff --git a/arch/powerpc/include/asm/abs_addr.h b/arch/powerpc/include/asm/abs_addr.h
deleted file mode 100644
index 5ab0b71531b..00000000000
--- a/arch/powerpc/include/asm/abs_addr.h
+++ /dev/null
@@ -1,75 +0,0 @@
-#ifndef _ASM_POWERPC_ABS_ADDR_H
-#define _ASM_POWERPC_ABS_ADDR_H
-#ifdef __KERNEL__
-
-
-/*
- * c 2001 PPC 64 Team, IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/memblock.h>
-
-#include <asm/types.h>
-#include <asm/page.h>
-#include <asm/prom.h>
-#include <asm/firmware.h>
-
-struct mschunks_map {
- unsigned long num_chunks;
- unsigned long chunk_size;
- unsigned long chunk_shift;
- unsigned long chunk_mask;
- u32 *mapping;
-};
-
-extern struct mschunks_map mschunks_map;
-
-/* Chunks are 256 KB */
-#define MSCHUNKS_CHUNK_SHIFT (18)
-#define MSCHUNKS_CHUNK_SIZE (1UL << MSCHUNKS_CHUNK_SHIFT)
-#define MSCHUNKS_OFFSET_MASK (MSCHUNKS_CHUNK_SIZE - 1)
-
-static inline unsigned long chunk_to_addr(unsigned long chunk)
-{
- return chunk << MSCHUNKS_CHUNK_SHIFT;
-}
-
-static inline unsigned long addr_to_chunk(unsigned long addr)
-{
- return addr >> MSCHUNKS_CHUNK_SHIFT;
-}
-
-static inline unsigned long phys_to_abs(unsigned long pa)
-{
- unsigned long chunk;
-
- /* This is a no-op on non-iSeries */
- if (!firmware_has_feature(FW_FEATURE_ISERIES))
- return pa;
-
- chunk = addr_to_chunk(pa);
-
- if (chunk < mschunks_map.num_chunks)
- chunk = mschunks_map.mapping[chunk];
-
- return chunk_to_addr(chunk) + (pa & MSCHUNKS_OFFSET_MASK);
-}
-
-/* Convenience macros */
-#define virt_to_abs(va) phys_to_abs(__pa(va))
-#define abs_to_virt(aa) __va(aa)
-
-/*
- * Converts Virtual Address to Real Address for
- * Legacy iSeries Hypervisor calls
- */
-#define iseries_hv_addr(virtaddr) \
- (0x8000000000000000UL | virt_to_abs(virtaddr))
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_ABS_ADDR_H */
diff --git a/arch/powerpc/include/asm/apm82181-adma.h b/arch/powerpc/include/asm/apm82181-adma.h
new file mode 100644
index 00000000000..8d36b517fbf
--- /dev/null
+++ b/arch/powerpc/include/asm/apm82181-adma.h
@@ -0,0 +1,311 @@
+/*
+ * 2009-2010 (C) Applied Micro Circuits Corporation.
+ *
+ * Author: Tai Tri Nguyen<ttnguyen@appliedmicro.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of
+ * any kind, whether express or implied.
+ */
+
+#ifndef APM82181_ADMA_H
+#define APM82181_ADMA_H
+
+
+#include <linux/types.h>
+
+
+#define to_apm82181_adma_chan(chan) container_of(chan,apm82181_ch_t,common)
+#define to_apm82181_adma_device(dev) container_of(dev,apm82181_dev_t,common)
+#define tx_to_apm82181_adma_slot(tx) container_of(tx,apm82181_desc_t,async_tx)
+
+#define APM82181_DMA_PROC_ROOT "driver/apm82181_adma"
+
+/* Number of operands supported in the h/w */
+#define XOR_MAX_OPS 16
+/* this is the XOR_CBBCR width */
+#define APM82181_ADMA_XOR_MAX_BYTE_COUNT (1 << 31)
+#define APM82181_ADMA_DMA_MAX_BYTE_COUNT 1024 * 1024
+#define MAX_APM82181_DMA_CHANNELS 5
+#define APM82181_ADMA_THRESHOLD 1
+
+#define APM82181_PDMA0_ID 0
+#define APM82181_PDMA1_ID 1
+#define APM82181_PDMA2_ID 2
+#define APM82181_PDMA3_ID 3
+#define APM82181_XOR_ID 4
+
+/* DMA 0/1/2/3 registers */
+#define DCR_DMAx_BASE(x) (0x200 + x*0x8) /* DMA DCR base */
+#define DCR_DMA2P40_CRx(x) (DCR_DMAx_BASE(x) + 0x0) /* DMA Channel Control */
+#define DMA_CR_CE (1 << 31)
+#define DMA_CR_CIE (1 << 30)
+#define DMA_CR_PL (1 << 28)
+#define DMA_CR_PW_128 0x08000000
+#define DMA_CR_DAI 0x01000000
+#define DMA_CR_SAI 0x00800000
+#define DMA_CR_BEN 0x00400000
+#define DMA_CR_TM_S_MM 0x00300000
+#define DMA_CR_ETD 0x00000100
+#define DMA_CR_TCE 0x00000080
+#define DMA_CR_CP(x) (x<<5)& 0x00000060
+#define DMA_CR_DEC (1 << 2)
+#define DMA_CR_SL (1 << 1)
+#define DCR_DMA2P40_CTCx(x) (DCR_DMAx_BASE(x) + 0x1) /* DMA Count 0 */
+#define DMA_CTC_ETIE (1 << 28)
+#define DMA_CTC_EIE (1 << 27)
+#define DMA_CTC_PCE (1 << 20)
+#define DMA_CTC_TC_MASK 0x000fffff
+#define DCR_DMA2P40_SAHx(x) (DCR_DMAx_BASE(x) + 0x2) /* DMA Src Addr High 0 */
+#define DCR_DMA2P40_SALx(x) (DCR_DMAx_BASE(x) + 0x3) /* DMA Src Addr Low 0 */
+#define DCR_DMA2P40_DAHx(x) (DCR_DMAx_BASE(x) + 0x4) /* DMA Dest Addr High 0 */
+#define DCR_DMA2P40_DALx(x) (DCR_DMAx_BASE(x) + 0x5) /* DMA Dest Addr Low 0 */
+#define DCR_DMA2P40_SGHx(x) (DCR_DMAx_BASE(x) + 0x6) /* DMA SG Desc Addr High 0 */
+#define DCR_DMA2P40_SGLx(x) (DCR_DMAx_BASE(x) + 0x7) /* DMA SG Desc Addr Low 0 */
+/* DMA Status Register */
+#define DCR_DMA2P40_SR 0x220
+#define DMA_SR_CS(x) (1 << (31 -x))
+#define DMA_SR_TS(x) (1 << (27 -x))
+#define DMA_SR_RI(x) (1 << (23 -x))
+#define DMA_SR_IR(x) (1 << (19 -x))
+#define DMA_SR_ER(x) (1 << (15 -x))
+#define DMA_SR_CB(x) (1 << (11 -x))
+#define DMA_SR_SG(x) (1 << (7 -x))
+/* S/G registers */
+#define DCR_DMA2P40_SGC 0x223
+#define DMA_SGC_SSG(x) ( 1 << (31 - x))
+#define DMA_SGC_SGL(x,y) ( y << (27 - x)) /* x: channel; y: 0 PLB, 1 OPB*/
+#define DMA_SGC_EM(x) ( 1 << (15 - x))
+#define DMA_SGC_EM_ALL 0x0000F000
+
+/*
+ * XOR Command Block Control Register bits
+ */
+#define XOR_CBCR_LNK_BIT (1<<31) /* link present */
+#define XOR_CBCR_TGT_BIT (1<<30) /* target present */
+#define XOR_CBCR_CBCE_BIT (1<<29) /* command block compete enable */
+#define XOR_CBCR_RNZE_BIT (1<<28) /* result not zero enable */
+#define XOR_CBCR_XNOR_BIT (1<<15) /* XOR/XNOR */
+#define XOR_CDCR_OAC_MSK (0x7F) /* operand address count */
+
+/*
+ * XORCore Status Register bits
+ */
+#define XOR_SR_XCP_BIT (1<<31) /* core processing */
+#define XOR_SR_ICB_BIT (1<<17) /* invalid CB */
+#define XOR_SR_IC_BIT (1<<16) /* invalid command */
+#define XOR_SR_IPE_BIT (1<<15) /* internal parity error */
+#define XOR_SR_RNZ_BIT (1<<2) /* result not Zero */
+#define XOR_SR_CBC_BIT (1<<1) /* CB complete */
+#define XOR_SR_CBLC_BIT (1<<0) /* CB list complete */
+
+/*
+ * XORCore Control Set and Reset Register bits
+ */
+#define XOR_CRSR_XASR_BIT (1<<31) /* soft reset */
+#define XOR_CRSR_XAE_BIT (1<<30) /* enable */
+#define XOR_CRSR_RCBE_BIT (1<<29) /* refetch CB enable */
+#define XOR_CRSR_PAUS_BIT (1<<28) /* pause */
+#define XOR_CRSR_64BA_BIT (1<<27) /* 64/32 CB format */
+#define XOR_CRSR_CLP_BIT (1<<25) /* continue list processing */
+
+/*
+ * XORCore Interrupt Enable Register
+ */
+#define XOR_IE_ICBIE_BIT (1<<17) /* Invalid Command Block Interrupt Enable */
+#define XOR_IE_ICIE_BIT (1<<16) /* Invalid Command Interrupt Enable */
+#define XOR_IE_RPTIE_BIT (1<<14) /* Read PLB Timeout Error Interrupt Enable */
+#define XOR_IE_CBCIE_BIT (1<<1) /* CB complete interrupt enable */
+#define XOR_IE_CBLCI_BIT (1<<0) /* CB list complete interrupt enable */
+
+typedef struct apm82181_plb_dma4_device {
+ struct resource reg; /* Resource for register */
+ void __iomem *reg_base;
+ struct platform_device *ofdev;
+ struct device *dev;
+} apm82181_plb_dma_t;
+
+/**
+ * struct apm82181_dma_device - internal representation of an DMA device
+ * @id: HW DMA Device selector
+ * @ofdev: OF device
+ * @dcr_base: dcr base of HW PLB DMA channels
+ * @reg_base: base of ADMA XOR channel
+ * @dma_desc_pool: base of DMA descriptor region (DMA address)
+ * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
+ * @pool_size: memory pool size for the channel device
+ * @common: embedded struct dma_device
+ * @cap_mask: capabilities of ADMA channels
+ */
+typedef struct apm82181_plb_dma_device {
+ int id;
+ struct platform_device *ofdev;
+ u32 dcr_base;
+ void __iomem *xor_base;
+ struct device *dev;
+ struct dma_device common;
+ struct apm82181_plb_dma4_device *pdma;
+ void *dma_desc_pool_virt;
+ u32 pool_size;
+ dma_addr_t dma_desc_pool;
+ dma_cap_mask_t cap_mask;
+} apm82181_dev_t;
+
+/**
+ * struct apm82181_dma_chan - internal representation of an ADMA channel
+ * @lock: serializes enqueue/dequeue operations to the slot pool
+ * @device: parent device
+ * @chain: device chain view of the descriptors
+ * @common: common dmaengine channel object members
+ * @all_slots: complete domain of slots usable by the channel
+ * @reg: Resource for register
+ * @pending: allows batching of hardware operations
+ * @completed_cookie: identifier for the most recently completed operation
+ * @slots_allocated: records the actual size of the descriptor slot pool
+ * @hw_chain_inited: h/w descriptor chain initialization flag
+ * @irq_tasklet: bottom half where apm82181_adma_slot_cleanup runs
+ * @needs_unmap: if buffers should not be unmapped upon final processing
+ */
+typedef struct apm82181_plb_dma_chan {
+ spinlock_t lock;
+ struct apm82181_plb_dma_device *device;
+ struct timer_list cleanup_watchdog;
+ struct list_head chain;
+ struct dma_chan common;
+ struct list_head all_slots;
+ struct apm82181_adma_plb_desc_slot *last_used;
+ int pending;
+ dma_cookie_t completed_cookie;
+ int slots_allocated;
+ int hw_chain_inited;
+ struct tasklet_struct irq_tasklet;
+ u8 needs_unmap;
+ phys_addr_t current_cdb_addr;
+} apm82181_ch_t;
+
+typedef struct apm82181_adma_plb_desc_slot {
+ dma_addr_t phys;
+ struct apm82181_adma_plb_desc_slot *group_head;
+ struct apm82181_adma_plb_desc_slot *hw_next;
+ struct dma_async_tx_descriptor async_tx;
+ struct list_head slot_node;
+ struct list_head chain_node;
+ struct list_head group_list;
+ unsigned int unmap_len;
+ void *hw_desc;
+ u16 stride;
+ u16 idx;
+ u16 slot_cnt;
+ u8 src_cnt;
+ u8 dst_cnt;
+ u8 slots_per_op;
+ u8 descs_per_op;
+ unsigned long flags;
+ unsigned long reverse_flags[8];
+#define APM82181_DESC_INT 0 /* generate interrupt on complete */
+#define APM82181_DESC_FENCE 1 /* Other tx will use its result */
+ /* This tx needs to be polled to complete */
+
+}apm82181_desc_t;
+
+typedef struct {
+ u32 ce:1;
+ u32 cie:1;
+ u32 td:1;
+ u32 pl:1;
+ u32 pw:3;
+ u32 dai:1;
+ u32 sai:1;
+ u32 ben:1;
+ u32 tm:2;
+ u32 psc:2;
+ u32 pwc:6;
+ u32 phc:3;
+ u32 etd:1;
+ u32 tce:1;
+ u32 cp:2;
+ u32 pf:2;
+ u32 dec:1;
+ u32 sl:1;
+ u32 reserved:1;
+} __attribute__((packed)) dma_cdb_ctrl_t;
+
+typedef struct {
+ u32 link:1;
+ u32 sgl:1;
+ u32 tcie:1;
+ u32 etie:1;
+ u32 eie:1;
+ u32 sid:3;
+ u32 bten:1;
+ u32 bsiz:2;
+ u32 pce:1;
+ u32 tc:20;
+} __attribute__((packed)) dma_cdb_count_t;
+/* scatter/gather descriptor struct */
+typedef struct dma_cdb {
+ dma_cdb_ctrl_t ctrl;
+ dma_cdb_count_t cnt;
+ u32 src_hi;
+ u32 src_lo;
+ u32 dest_hi;
+ u32 dest_lo;
+ u32 sg_hi;
+ u32 sg_lo;
+}dma_cdb_t;
+
+typedef struct {
+ uint32_t control;
+ phys_addr_t src_addr;
+ phys_addr_t dst_addr;
+ uint32_t control_count;
+ uint32_t next;
+} ppc_sgl_t;
+
+/*
+ * XOR Accelerator engine Command Block Type
+ */
+typedef struct {
+ /*
+ * Basic 64-bit format XOR CB
+ */
+ u32 cbc; /* control */
+ u32 cbbc; /* byte count */
+ u32 cbs; /* status */
+ u8 pad0[4]; /* reserved */
+ u32 cbtah; /* target address high */
+ u32 cbtal; /* target address low */
+ u32 cblah; /* link address high */
+ u32 cblal; /* link address low */
+ struct {
+ u32 h;
+ u32 l;
+ } __attribute__ ((packed)) ops [16];
+} __attribute__ ((packed)) xor_cb_t;
+
+/*
+ * XOR hardware registers
+ */
+typedef struct {
+ u32 op_ar[16][2]; /* operand address[0]-high,[1]-low registers */
+ u8 pad0[352]; /* reserved */
+ u32 cbcr; /* CB control register */
+ u32 cbbcr; /* CB byte count register */
+ u32 cbsr; /* CB status register */
+ u8 pad1[4]; /* reserved */
+ u32 cbtahr; /* operand target address high register */
+ u32 cbtalr; /* operand target address low register */
+ u32 cblahr; /* CB link address high register */
+ u32 cblalr; /* CB link address low register */
+ u32 crsr; /* control set register */
+ u32 crrr; /* control reset register */
+ u32 ccbahr; /* current CB address high register */
+ u32 ccbalr; /* current CB address low register */
+ u32 plbr; /* PLB configuration register */
+ u32 ier; /* interrupt enable register */
+ u32 pecr; /* parity error count register */
+ u32 sr; /* status register */
+ u32 revidr; /* revision ID register */
+} xor_regs_t;
+
+#endif
diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h
new file mode 100644
index 00000000000..bde53110363
--- /dev/null
+++ b/arch/powerpc/include/asm/archrandom.h
@@ -0,0 +1,50 @@
+#ifndef _ASM_POWERPC_ARCHRANDOM_H
+#define _ASM_POWERPC_ARCHRANDOM_H
+
+#ifdef CONFIG_ARCH_RANDOM
+
+#include <asm/machdep.h>
+
+static inline int arch_get_random_long(unsigned long *v)
+{
+ if (ppc_md.get_random_long)
+ return ppc_md.get_random_long(v);
+
+ return 0;
+}
+
+static inline int arch_get_random_int(unsigned int *v)
+{
+ unsigned long val;
+ int rc;
+
+ rc = arch_get_random_long(&val);
+ if (rc)
+ *v = val;
+
+ return rc;
+}
+
+static inline int arch_has_random(void)
+{
+ return !!ppc_md.get_random_long;
+}
+
+int powernv_get_random_long(unsigned long *v);
+
+static inline int arch_get_random_seed_long(unsigned long *v)
+{
+ return 0;
+}
+static inline int arch_get_random_seed_int(unsigned int *v)
+{
+ return 0;
+}
+static inline int arch_has_random_seed(void)
+{
+ return 0;
+}
+
+#endif /* CONFIG_ARCH_RANDOM */
+
+#endif /* _ASM_POWERPC_ARCHRANDOM_H */
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index decad950f11..4b237aa3566 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -29,16 +29,16 @@
#define PPC_LLARX(t, a, b, eh) PPC_LDARX(t, a, b, eh)
#define PPC_STLCX stringify_in_c(stdcx.)
#define PPC_CNTLZL stringify_in_c(cntlzd)
+#define PPC_MTOCRF(FXM, RS) MTOCRF((FXM), RS)
#define PPC_LR_STKOFF 16
#define PPC_MIN_STKFRM 112
-/* Move to CR, single-entry optimized version. Only available
- * on POWER4 and later.
- */
-#ifdef CONFIG_POWER4_ONLY
-#define PPC_MTOCRF stringify_in_c(mtocrf)
+#ifdef __BIG_ENDIAN__
+#define LDX_BE stringify_in_c(ldx)
+#define STDX_BE stringify_in_c(stdx)
#else
-#define PPC_MTOCRF stringify_in_c(mtcrf)
+#define LDX_BE stringify_in_c(ldbrx)
+#define STDX_BE stringify_in_c(stdbrx)
#endif
#else /* 32-bit */
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index b8f152ece02..28992d01292 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -5,13 +5,10 @@
* PowerPC atomic operations
*/
-#include <linux/types.h>
-
#ifdef __KERNEL__
-#include <linux/compiler.h>
-#include <asm/synch.h>
-#include <asm/asm-compat.h>
-#include <asm/system.h>
+#include <linux/types.h>
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
#define ATOMIC_INIT(i) { (i) }
@@ -49,13 +46,13 @@ static __inline__ int atomic_add_return(int a, atomic_t *v)
int t;
__asm__ __volatile__(
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%2 # atomic_add_return\n\
add %0,%1,%0\n"
PPC405_ERR77(0,%2)
" stwcx. %0,0,%2 \n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
: "=&r" (t)
: "r" (a), "r" (&v->counter)
: "cc", "memory");
@@ -85,13 +82,13 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v)
int t;
__asm__ __volatile__(
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%2 # atomic_sub_return\n\
subf %0,%1,%0\n"
PPC405_ERR77(0,%2)
" stwcx. %0,0,%2 \n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
: "=&r" (t)
: "r" (a), "r" (&v->counter)
: "cc", "memory");
@@ -119,13 +116,13 @@ static __inline__ int atomic_inc_return(atomic_t *v)
int t;
__asm__ __volatile__(
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%1 # atomic_inc_return\n\
addic %0,%0,1\n"
PPC405_ERR77(0,%1)
" stwcx. %0,0,%1 \n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
: "=&r" (t)
: "r" (&v->counter)
: "cc", "xer", "memory");
@@ -163,13 +160,13 @@ static __inline__ int atomic_dec_return(atomic_t *v)
int t;
__asm__ __volatile__(
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%1 # atomic_dec_return\n\
addic %0,%0,-1\n"
PPC405_ERR77(0,%1)
" stwcx. %0,0,%1\n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
: "=&r" (t)
: "r" (&v->counter)
: "cc", "xer", "memory");
@@ -181,38 +178,67 @@ static __inline__ int atomic_dec_return(atomic_t *v)
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
- * atomic_add_unless - add unless the number is a given value
+ * __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
+ * Returns the old value of @v.
*/
-static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int t;
__asm__ __volatile__ (
- PPC_RELEASE_BARRIER
-"1: lwarx %0,0,%1 # atomic_add_unless\n\
+ PPC_ATOMIC_ENTRY_BARRIER
+"1: lwarx %0,0,%1 # __atomic_add_unless\n\
cmpw 0,%0,%3 \n\
beq- 2f \n\
add %0,%2,%0 \n"
PPC405_ERR77(0,%2)
" stwcx. %0,0,%1 \n\
bne- 1b \n"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
" subf %0,%2,%0 \n\
2:"
: "=&r" (t)
: "r" (&v->counter), "r" (a), "r" (u)
: "cc", "memory");
- return t != u;
+ return t;
}
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+/**
+ * atomic_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1, so long as @v is non-zero.
+ * Returns non-zero if @v was non-zero, and zero otherwise.
+ */
+static __inline__ int atomic_inc_not_zero(atomic_t *v)
+{
+ int t1, t2;
+
+ __asm__ __volatile__ (
+ PPC_ATOMIC_ENTRY_BARRIER
+"1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
+ cmpwi 0,%0,0\n\
+ beq- 2f\n\
+ addic %1,%0,1\n"
+ PPC405_ERR77(0,%2)
+" stwcx. %1,0,%2\n\
+ bne- 1b\n"
+ PPC_ATOMIC_EXIT_BARRIER
+ "\n\
+2:"
+ : "=&r" (t1), "=&r" (t2)
+ : "r" (&v->counter)
+ : "cc", "xer", "memory");
+
+ return t1;
+}
+#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
@@ -227,7 +253,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
int t;
__asm__ __volatile__(
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
cmpwi %0,1\n\
addi %0,%0,-1\n\
@@ -235,7 +261,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
PPC405_ERR77(0,%1)
" stwcx. %0,0,%1\n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
"\n\
2:" : "=&b" (t)
: "r" (&v->counter)
@@ -243,11 +269,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
return t;
}
-
-#define smp_mb__before_atomic_dec() smp_mb()
-#define smp_mb__after_atomic_dec() smp_mb()
-#define smp_mb__before_atomic_inc() smp_mb()
-#define smp_mb__after_atomic_inc() smp_mb()
+#define atomic_dec_if_positive atomic_dec_if_positive
#ifdef __powerpc64__
@@ -286,12 +308,12 @@ static __inline__ long atomic64_add_return(long a, atomic64_t *v)
long t;
__asm__ __volatile__(
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: ldarx %0,0,%2 # atomic64_add_return\n\
add %0,%1,%0\n\
stdcx. %0,0,%2 \n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
: "=&r" (t)
: "r" (a), "r" (&v->counter)
: "cc", "memory");
@@ -320,12 +342,12 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
long t;
__asm__ __volatile__(
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: ldarx %0,0,%2 # atomic64_sub_return\n\
subf %0,%1,%0\n\
stdcx. %0,0,%2 \n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
: "=&r" (t)
: "r" (a), "r" (&v->counter)
: "cc", "memory");
@@ -352,12 +374,12 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
long t;
__asm__ __volatile__(
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: ldarx %0,0,%1 # atomic64_inc_return\n\
addic %0,%0,1\n\
stdcx. %0,0,%1 \n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
: "=&r" (t)
: "r" (&v->counter)
: "cc", "xer", "memory");
@@ -394,12 +416,12 @@ static __inline__ long atomic64_dec_return(atomic64_t *v)
long t;
__asm__ __volatile__(
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: ldarx %0,0,%1 # atomic64_dec_return\n\
addic %0,%0,-1\n\
stdcx. %0,0,%1\n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
: "=&r" (t)
: "r" (&v->counter)
: "cc", "xer", "memory");
@@ -419,13 +441,13 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
long t;
__asm__ __volatile__(
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
addic. %0,%0,-1\n\
blt- 2f\n\
stdcx. %0,0,%1\n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
"\n\
2:" : "=&r" (t)
: "r" (&v->counter)
@@ -444,21 +466,21 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
+ * Returns the old value of @v.
*/
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
{
long t;
__asm__ __volatile__ (
- PPC_RELEASE_BARRIER
-"1: ldarx %0,0,%1 # atomic_add_unless\n\
+ PPC_ATOMIC_ENTRY_BARRIER
+"1: ldarx %0,0,%1 # __atomic_add_unless\n\
cmpd 0,%0,%3 \n\
beq- 2f \n\
add %0,%2,%0 \n"
" stdcx. %0,0,%1 \n\
bne- 1b \n"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
" subf %0,%2,%0 \n\
2:"
: "=&r" (t)
@@ -468,13 +490,36 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
return t != u;
}
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+/**
+ * atomic_inc64_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1, so long as @v is non-zero.
+ * Returns non-zero if @v was non-zero, and zero otherwise.
+ */
+static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
+{
+ long t1, t2;
+
+ __asm__ __volatile__ (
+ PPC_ATOMIC_ENTRY_BARRIER
+"1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
+ cmpdi 0,%0,0\n\
+ beq- 2f\n\
+ addic %1,%0,1\n\
+ stdcx. %1,0,%2\n\
+ bne- 1b\n"
+ PPC_ATOMIC_EXIT_BARRIER
+ "\n\
+2:"
+ : "=&r" (t1), "=&r" (t2)
+ : "r" (&v->counter)
+ : "cc", "xer", "memory");
-#else /* __powerpc64__ */
-#include <asm-generic/atomic64.h>
+ return t1;
+}
#endif /* __powerpc64__ */
-#include <asm-generic/atomic-long.h>
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_ATOMIC_H_ */
diff --git a/arch/powerpc/include/asm/auxvec.h b/arch/powerpc/include/asm/auxvec.h
deleted file mode 100644
index 19a099b62cd..00000000000
--- a/arch/powerpc/include/asm/auxvec.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef _ASM_POWERPC_AUXVEC_H
-#define _ASM_POWERPC_AUXVEC_H
-
-/*
- * We need to put in some extra aux table entries to tell glibc what
- * the cache block size is, so it can use the dcbz instruction safely.
- */
-#define AT_DCACHEBSIZE 19
-#define AT_ICACHEBSIZE 20
-#define AT_UCACHEBSIZE 21
-/* A special ignored type value for PPC, for glibc compatibility. */
-#define AT_IGNOREPPC 22
-
-/* The vDSO location. We have to use the same value as x86 for glibc's
- * sake :-)
- */
-#define AT_SYSINFO_EHDR 33
-
-#endif
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
new file mode 100644
index 00000000000..bab79a110c7
--- /dev/null
+++ b/arch/powerpc/include/asm/barrier.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _ASM_POWERPC_BARRIER_H
+#define _ASM_POWERPC_BARRIER_H
+
+/*
+ * Memory barrier.
+ * The sync instruction guarantees that all memory accesses initiated
+ * by this processor have been performed (with respect to all other
+ * mechanisms that access memory). The eieio instruction is a barrier
+ * providing an ordering (separately) for (a) cacheable stores and (b)
+ * loads and stores to non-cacheable memory (e.g. I/O devices).
+ *
+ * mb() prevents loads and stores being reordered across this point.
+ * rmb() prevents loads being reordered across this point.
+ * wmb() prevents stores being reordered across this point.
+ * read_barrier_depends() prevents data-dependent loads being reordered
+ * across this point (nop on PPC).
+ *
+ * *mb() variants without smp_ prefix must order all types of memory
+ * operations with one another. sync is the only instruction sufficient
+ * to do this.
+ *
+ * For the smp_ barriers, ordering is for cacheable memory operations
+ * only. We have to use the sync instruction for smp_mb(), since lwsync
+ * doesn't order loads with respect to previous stores. Lwsync can be
+ * used for smp_rmb() and smp_wmb().
+ *
+ * However, on CPUs that don't support lwsync, lwsync actually maps to a
+ * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio.
+ */
+#define mb() __asm__ __volatile__ ("sync" : : : "memory")
+#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
+#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
+#define read_barrier_depends() do { } while(0)
+
+#define set_mb(var, value) do { var = value; mb(); } while (0)
+
+#ifdef CONFIG_SMP
+
+#ifdef __SUBARCH_HAS_LWSYNC
+# define SMPWMB LWSYNC
+#else
+# define SMPWMB eieio
+#endif
+
+#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
+
+#define smp_mb() mb()
+#define smp_rmb() __lwsync()
+#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
+#define smp_read_barrier_depends() read_barrier_depends()
+#else
+#define __lwsync() barrier()
+
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() do { } while(0)
+#endif /* CONFIG_SMP */
+
+/*
+ * This is a barrier which prevents following instructions from being
+ * started until the value of the argument x is known. For example, if
+ * x is a variable loaded from memory, this prevents following
+ * instructions from being executed until the load has been performed.
+ */
+#define data_barrier(x) \
+ asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
+
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ __lwsync(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ __lwsync(); \
+ ___p1; \
+})
+
+#define smp_mb__before_atomic() smp_mb()
+#define smp_mb__after_atomic() smp_mb()
+
+#endif /* _ASM_POWERPC_BARRIER_H */
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 30964ae2d09..bd3bd573d0a 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -46,18 +46,15 @@
#include <asm/asm-compat.h>
#include <asm/synch.h>
-/*
- * clear_bit doesn't imply a memory barrier
- */
-#define smp_mb__before_clear_bit() smp_mb()
-#define smp_mb__after_clear_bit() smp_mb()
+/* PPC bit number conversion */
+#define PPC_BITLSHIFT(be) (BITS_PER_LONG - 1 - (be))
+#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
+#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
-#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
-#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
-#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
+#include <asm/barrier.h>
/* Macro for generating the ***_bits() functions */
-#define DEFINE_BITOP(fn, op, prefix, postfix) \
+#define DEFINE_BITOP(fn, op, prefix) \
static __inline__ void fn(unsigned long mask, \
volatile unsigned long *_p) \
{ \
@@ -70,35 +67,34 @@ static __inline__ void fn(unsigned long mask, \
PPC405_ERR77(0,%3) \
PPC_STLCX "%0,0,%3\n" \
"bne- 1b\n" \
- postfix \
: "=&r" (old), "+m" (*p) \
: "r" (mask), "r" (p) \
: "cc", "memory"); \
}
-DEFINE_BITOP(set_bits, or, "", "")
-DEFINE_BITOP(clear_bits, andc, "", "")
-DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER, "")
-DEFINE_BITOP(change_bits, xor, "", "")
+DEFINE_BITOP(set_bits, or, "")
+DEFINE_BITOP(clear_bits, andc, "")
+DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER)
+DEFINE_BITOP(change_bits, xor, "")
static __inline__ void set_bit(int nr, volatile unsigned long *addr)
{
- set_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr));
+ set_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
}
static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
{
- clear_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr));
+ clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
}
static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr)
{
- clear_bits_unlock(BITOP_MASK(nr), addr + BITOP_WORD(nr));
+ clear_bits_unlock(BIT_MASK(nr), addr + BIT_WORD(nr));
}
static __inline__ void change_bit(int nr, volatile unsigned long *addr)
{
- change_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr));
+ change_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
}
/* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output
@@ -124,38 +120,38 @@ static __inline__ unsigned long fn( \
return (old & mask); \
}
-DEFINE_TESTOP(test_and_set_bits, or, PPC_RELEASE_BARRIER,
- PPC_ACQUIRE_BARRIER, 0)
+DEFINE_TESTOP(test_and_set_bits, or, PPC_ATOMIC_ENTRY_BARRIER,
+ PPC_ATOMIC_EXIT_BARRIER, 0)
DEFINE_TESTOP(test_and_set_bits_lock, or, "",
PPC_ACQUIRE_BARRIER, 1)
-DEFINE_TESTOP(test_and_clear_bits, andc, PPC_RELEASE_BARRIER,
- PPC_ACQUIRE_BARRIER, 0)
-DEFINE_TESTOP(test_and_change_bits, xor, PPC_RELEASE_BARRIER,
- PPC_ACQUIRE_BARRIER, 0)
+DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER,
+ PPC_ATOMIC_EXIT_BARRIER, 0)
+DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER,
+ PPC_ATOMIC_EXIT_BARRIER, 0)
static __inline__ int test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
- return test_and_set_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0;
+ return test_and_set_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
}
static __inline__ int test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr)
{
- return test_and_set_bits_lock(BITOP_MASK(nr),
- addr + BITOP_WORD(nr)) != 0;
+ return test_and_set_bits_lock(BIT_MASK(nr),
+ addr + BIT_WORD(nr)) != 0;
}
static __inline__ int test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr)
{
- return test_and_clear_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0;
+ return test_and_clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
}
static __inline__ int test_and_change_bit(unsigned long nr,
volatile unsigned long *addr)
{
- return test_and_change_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0;
+ return test_and_change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
}
#include <asm-generic/bitops/non-atomic.h>
@@ -209,8 +205,8 @@ static __inline__ unsigned long ffz(unsigned long x)
return BITS_PER_LONG;
/*
- * Calculate the bit position of the least signficant '1' bit in x
- * (since x has been changed this will actually be the least signficant
+ * Calculate the bit position of the least significant '1' bit in x
+ * (since x has been changed this will actually be the least significant
* '0' bit in * the original x). Note: (x & -x) gives us a mask that
* is the least significant * (RIGHT-most) 1-bit of the value in x.
*/
@@ -267,73 +263,24 @@ static __inline__ int fls64(__u64 x)
#include <asm-generic/bitops/fls64.h>
#endif /* __powerpc64__ */
+#ifdef CONFIG_PPC64
+unsigned int __arch_hweight8(unsigned int w);
+unsigned int __arch_hweight16(unsigned int w);
+unsigned int __arch_hweight32(unsigned int w);
+unsigned long __arch_hweight64(__u64 w);
+#include <asm-generic/bitops/const_hweight.h>
+#else
#include <asm-generic/bitops/hweight.h>
+#endif
+
#include <asm-generic/bitops/find.h>
/* Little-endian versions */
+#include <asm-generic/bitops/le.h>
-static __inline__ int test_le_bit(unsigned long nr,
- __const__ unsigned long *addr)
-{
- __const__ unsigned char *tmp = (__const__ unsigned char *) addr;
- return (tmp[nr >> 3] >> (nr & 7)) & 1;
-}
-
-#define __set_le_bit(nr, addr) \
- __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-#define __clear_le_bit(nr, addr) \
- __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-
-#define test_and_set_le_bit(nr, addr) \
- test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-#define test_and_clear_le_bit(nr, addr) \
- test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-
-#define __test_and_set_le_bit(nr, addr) \
- __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-#define __test_and_clear_le_bit(nr, addr) \
- __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
-
-#define find_first_zero_le_bit(addr, size) generic_find_next_zero_le_bit((addr), (size), 0)
-unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
- unsigned long size, unsigned long offset);
-
-unsigned long generic_find_next_le_bit(const unsigned long *addr,
- unsigned long size, unsigned long offset);
/* Bitmap functions for the ext2 filesystem */
-#define ext2_set_bit(nr,addr) \
- __test_and_set_le_bit((nr), (unsigned long*)addr)
-#define ext2_clear_bit(nr, addr) \
- __test_and_clear_le_bit((nr), (unsigned long*)addr)
-
-#define ext2_set_bit_atomic(lock, nr, addr) \
- test_and_set_le_bit((nr), (unsigned long*)addr)
-#define ext2_clear_bit_atomic(lock, nr, addr) \
- test_and_clear_le_bit((nr), (unsigned long*)addr)
-
-#define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
-
-#define ext2_find_first_zero_bit(addr, size) \
- find_first_zero_le_bit((unsigned long*)addr, size)
-#define ext2_find_next_zero_bit(addr, size, off) \
- generic_find_next_zero_le_bit((unsigned long*)addr, size, off)
-
-#define ext2_find_next_bit(addr, size, off) \
- generic_find_next_le_bit((unsigned long *)addr, size, off)
-/* Bitmap functions for the minix filesystem. */
-
-#define minix_test_and_set_bit(nr,addr) \
- __test_and_set_le_bit(nr, (unsigned long *)addr)
-#define minix_set_bit(nr,addr) \
- __set_le_bit(nr, (unsigned long *)addr)
-#define minix_test_and_clear_bit(nr,addr) \
- __test_and_clear_le_bit(nr, (unsigned long *)addr)
-#define minix_test_bit(nr,addr) \
- test_le_bit(nr, (unsigned long *)addr)
-
-#define minix_find_first_zero_bit(addr,size) \
- find_first_zero_le_bit((unsigned long *)addr, size)
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
#include <asm-generic/bitops/sched.h>
diff --git a/arch/powerpc/include/asm/bitsperlong.h b/arch/powerpc/include/asm/bitsperlong.h
deleted file mode 100644
index 5f1659032c4..00000000000
--- a/arch/powerpc/include/asm/bitsperlong.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef __ASM_POWERPC_BITSPERLONG_H
-#define __ASM_POWERPC_BITSPERLONG_H
-
-#if defined(__powerpc64__)
-# define __BITS_PER_LONG 64
-#else
-# define __BITS_PER_LONG 32
-#endif
-
-#include <asm-generic/bitsperlong.h>
-
-#endif /* __ASM_POWERPC_BITSPERLONG_H */
diff --git a/arch/powerpc/include/asm/bootx.h b/arch/powerpc/include/asm/bootx.h
index 60a3c9ef301..dd9461003df 100644
--- a/arch/powerpc/include/asm/bootx.h
+++ b/arch/powerpc/include/asm/bootx.h
@@ -5,126 +5,11 @@
* Written by Benjamin Herrenschmidt.
*/
-
#ifndef __ASM_BOOTX_H__
#define __ASM_BOOTX_H__
-#include <linux/types.h>
-
-#ifdef macintosh
-#include <Types.h>
-#include "linux_type_defs.h"
-#endif
-
-#ifdef macintosh
-/* All this requires PowerPC alignment */
-#pragma options align=power
-#endif
-
-/* On kernel entry:
- *
- * r3 = 0x426f6f58 ('BooX')
- * r4 = pointer to boot_infos
- * r5 = NULL
- *
- * Data and instruction translation disabled, interrupts
- * disabled, kernel loaded at physical 0x00000000 on PCI
- * machines (will be different on NuBus).
- */
-
-#define BOOT_INFO_VERSION 5
-#define BOOT_INFO_COMPATIBLE_VERSION 1
-
-/* Bit in the architecture flag mask. More to be defined in
- future versions. Note that either BOOT_ARCH_PCI or
- BOOT_ARCH_NUBUS is set. The other BOOT_ARCH_NUBUS_xxx are
- set additionally when BOOT_ARCH_NUBUS is set.
- */
-#define BOOT_ARCH_PCI 0x00000001UL
-#define BOOT_ARCH_NUBUS 0x00000002UL
-#define BOOT_ARCH_NUBUS_PDM 0x00000010UL
-#define BOOT_ARCH_NUBUS_PERFORMA 0x00000020UL
-#define BOOT_ARCH_NUBUS_POWERBOOK 0x00000040UL
-
-/* Maximum number of ranges in phys memory map */
-#define MAX_MEM_MAP_SIZE 26
-
-/* This is the format of an element in the physical memory map. Note that
- the map is optional and current BootX will only build it for pre-PCI
- machines */
-typedef struct boot_info_map_entry
-{
- __u32 physAddr; /* Physical starting address */
- __u32 size; /* Size in bytes */
-} boot_info_map_entry_t;
-
-
-/* Here are the boot informations that are passed to the bootstrap
- * Note that the kernel arguments and the device tree are appended
- * at the end of this structure. */
-typedef struct boot_infos
-{
- /* Version of this structure */
- __u32 version;
- /* backward compatible down to version: */
- __u32 compatible_version;
-
- /* NEW (vers. 2) this holds the current _logical_ base addr of
- the frame buffer (for use by early boot message) */
- __u8* logicalDisplayBase;
+#include <uapi/asm/bootx.h>
- /* NEW (vers. 4) Apple's machine identification */
- __u32 machineID;
-
- /* NEW (vers. 4) Detected hw architecture */
- __u32 architecture;
-
- /* The device tree (internal addresses relative to the beginning of the tree,
- * device tree offset relative to the beginning of this structure).
- * On pre-PCI macintosh (BOOT_ARCH_PCI bit set to 0 in architecture), this
- * field is 0.
- */
- __u32 deviceTreeOffset; /* Device tree offset */
- __u32 deviceTreeSize; /* Size of the device tree */
-
- /* Some infos about the current MacOS display */
- __u32 dispDeviceRect[4]; /* left,top,right,bottom */
- __u32 dispDeviceDepth; /* (8, 16 or 32) */
- __u8* dispDeviceBase; /* base address (physical) */
- __u32 dispDeviceRowBytes; /* rowbytes (in bytes) */
- __u32 dispDeviceColorsOffset; /* Colormap (8 bits only) or 0 (*) */
- /* Optional offset in the registry to the current
- * MacOS display. (Can be 0 when not detected) */
- __u32 dispDeviceRegEntryOffset;
-
- /* Optional pointer to boot ramdisk (offset from this structure) */
- __u32 ramDisk;
- __u32 ramDiskSize; /* size of ramdisk image */
-
- /* Kernel command line arguments (offset from this structure) */
- __u32 kernelParamsOffset;
-
- /* ALL BELOW NEW (vers. 4) */
-
- /* This defines the physical memory. Valid with BOOT_ARCH_NUBUS flag
- (non-PCI) only. On PCI, memory is contiguous and it's size is in the
- device-tree. */
- boot_info_map_entry_t
- physMemoryMap[MAX_MEM_MAP_SIZE]; /* Where the phys memory is */
- __u32 physMemoryMapSize; /* How many entries in map */
-
-
- /* The framebuffer size (optional, currently 0) */
- __u32 frameBufferSize; /* Represents a max size, can be 0. */
-
- /* NEW (vers. 5) */
-
- /* Total params size (args + colormap + device tree + ramdisk) */
- __u32 totalParamsSize;
-
-} boot_infos_t;
-
-#ifdef __KERNEL__
/* (*) The format of the colormap is 256 * 3 * 2 bytes. Each color index
* is represented by 3 short words containing a 16 bits (unsigned) color
* component. Later versions may contain the gamma table for direct-color
@@ -162,10 +47,4 @@ struct bootx_dt_node {
extern void bootx_init(unsigned long r4, unsigned long phys);
-#endif /* __KERNEL__ */
-
-#ifdef macintosh
-#pragma options align=reset
-#endif
-
#endif
diff --git a/arch/powerpc/include/asm/btext.h b/arch/powerpc/include/asm/btext.h
index 906f46e3100..89fc382648b 100644
--- a/arch/powerpc/include/asm/btext.h
+++ b/arch/powerpc/include/asm/btext.h
@@ -13,6 +13,7 @@ extern void btext_update_display(unsigned long phys, int width, int height,
extern void btext_setup_display(int width, int height, int depth, int pitch,
unsigned long address);
extern void btext_prepare_BAT(void);
+extern void btext_map(void);
extern void btext_unmap(void);
extern void btext_drawchar(char c);
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index 065c590c991..3eb53d74107 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -126,5 +126,16 @@
#include <asm-generic/bug.h>
+#ifndef __ASSEMBLY__
+
+struct pt_regs;
+extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
+extern void bad_page_fault(struct pt_regs *, unsigned long, int);
+extern void _exception(int, struct pt_regs *, int, unsigned long);
+extern void die(const char *, struct pt_regs *, long);
+extern void print_backtrace(unsigned long *);
+
+#endif /* !__ASSEMBLY__ */
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_BUG_H */
diff --git a/arch/powerpc/include/asm/byteorder.h b/arch/powerpc/include/asm/byteorder.h
deleted file mode 100644
index aa6cc4fac96..00000000000
--- a/arch/powerpc/include/asm/byteorder.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_POWERPC_BYTEORDER_H
-#define _ASM_POWERPC_BYTEORDER_H
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <linux/byteorder/big_endian.h>
-
-#endif /* _ASM_POWERPC_BYTEORDER_H */
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 4b509411ad8..ed0afc1e44a 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -41,9 +41,37 @@ struct ppc64_caches {
extern struct ppc64_caches ppc64_caches;
#endif /* __powerpc64__ && ! __ASSEMBLY__ */
-#if !defined(__ASSEMBLY__)
+#if defined(__ASSEMBLY__)
+/*
+ * For a snooping icache, we still need a dummy icbi to purge all the
+ * prefetched instructions from the ifetch buffers. We also need a sync
+ * before the icbi to order the the actual stores to memory that might
+ * have modified instructions with the icbi.
+ */
+#define PURGE_PREFETCHED_INS \
+ sync; \
+ icbi 0,r3; \
+ sync; \
+ isync
+
+#else
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+
+#ifdef CONFIG_6xx
+extern long _get_L2CR(void);
+extern long _get_L3CR(void);
+extern void _set_L2CR(unsigned long);
+extern void _set_L3CR(unsigned long);
+#else
+#define _get_L2CR() 0L
+#define _get_L3CR() 0L
+#define _set_L2CR(val) do { } while(0)
+#define _set_L3CR(val) do { } while(0)
#endif
+extern void cacheable_memzero(void *p, unsigned int nb);
+extern void *cacheable_memcpy(void *, const void *, unsigned int);
+
+#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_CACHE_H */
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index ab9e402518e..5b9312220e8 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -30,13 +30,9 @@ extern void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-extern void __flush_icache_range(unsigned long, unsigned long);
-static inline void flush_icache_range(unsigned long start, unsigned long stop)
-{
- if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
- __flush_icache_range(start, stop);
-}
+extern void __flush_disable_L1(void);
+extern void flush_icache_range(unsigned long, unsigned long);
extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr,
int len);
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index ce0c28495f9..8251a3ba870 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -14,6 +14,9 @@
* which always checksum on 4 octet boundaries. ihl is the number
* of 32-bit words and is always >= 5.
*/
+#ifdef CONFIG_GENERIC_CSUM
+#include <asm-generic/checksum.h>
+#else
extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
/*
@@ -123,5 +126,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
return sum;
#endif
}
+
+#endif
#endif /* __KERNEL__ */
#endif
diff --git a/arch/powerpc/include/asm/clk_interface.h b/arch/powerpc/include/asm/clk_interface.h
deleted file mode 100644
index ab1882c1e17..00000000000
--- a/arch/powerpc/include/asm/clk_interface.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef __ASM_POWERPC_CLK_INTERFACE_H
-#define __ASM_POWERPC_CLK_INTERFACE_H
-
-#include <linux/clk.h>
-
-struct clk_interface {
- struct clk* (*clk_get) (struct device *dev, const char *id);
- int (*clk_enable) (struct clk *clk);
- void (*clk_disable) (struct clk *clk);
- unsigned long (*clk_get_rate) (struct clk *clk);
- void (*clk_put) (struct clk *clk);
- long (*clk_round_rate) (struct clk *clk, unsigned long rate);
- int (*clk_set_rate) (struct clk *clk, unsigned long rate);
- int (*clk_set_parent) (struct clk *clk, struct clk *parent);
- struct clk* (*clk_get_parent) (struct clk *clk);
-};
-
-extern struct clk_interface clk_functions;
-
-#endif /* __ASM_POWERPC_CLK_INTERFACE_H */
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
new file mode 100644
index 00000000000..d463c68fe7f
--- /dev/null
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -0,0 +1,310 @@
+#ifndef _ASM_POWERPC_CMPXCHG_H_
+#define _ASM_POWERPC_CMPXCHG_H_
+
+#ifdef __KERNEL__
+#include <linux/compiler.h>
+#include <asm/synch.h>
+#include <asm/asm-compat.h>
+
+/*
+ * Atomic exchange
+ *
+ * Changes the memory location '*ptr' to be val and returns
+ * the previous value stored there.
+ */
+static __always_inline unsigned long
+__xchg_u32(volatile void *p, unsigned long val)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__(
+ PPC_RELEASE_BARRIER
+"1: lwarx %0,0,%2 \n"
+ PPC405_ERR77(0,%2)
+" stwcx. %3,0,%2 \n\
+ bne- 1b"
+ PPC_ACQUIRE_BARRIER
+ : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
+ : "r" (p), "r" (val)
+ : "cc", "memory");
+
+ return prev;
+}
+
+/*
+ * Atomic exchange
+ *
+ * Changes the memory location '*ptr' to be val and returns
+ * the previous value stored there.
+ */
+static __always_inline unsigned long
+__xchg_u32_local(volatile void *p, unsigned long val)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 \n"
+ PPC405_ERR77(0,%2)
+" stwcx. %3,0,%2 \n\
+ bne- 1b"
+ : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
+ : "r" (p), "r" (val)
+ : "cc", "memory");
+
+ return prev;
+}
+
+#ifdef CONFIG_PPC64
+static __always_inline unsigned long
+__xchg_u64(volatile void *p, unsigned long val)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__(
+ PPC_RELEASE_BARRIER
+"1: ldarx %0,0,%2 \n"
+ PPC405_ERR77(0,%2)
+" stdcx. %3,0,%2 \n\
+ bne- 1b"
+ PPC_ACQUIRE_BARRIER
+ : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
+ : "r" (p), "r" (val)
+ : "cc", "memory");
+
+ return prev;
+}
+
+static __always_inline unsigned long
+__xchg_u64_local(volatile void *p, unsigned long val)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%2 \n"
+ PPC405_ERR77(0,%2)
+" stdcx. %3,0,%2 \n\
+ bne- 1b"
+ : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
+ : "r" (p), "r" (val)
+ : "cc", "memory");
+
+ return prev;
+}
+#endif
+
+/*
+ * This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid xchg().
+ */
+extern void __xchg_called_with_bad_pointer(void);
+
+static __always_inline unsigned long
+__xchg(volatile void *ptr, unsigned long x, unsigned int size)
+{
+ switch (size) {
+ case 4:
+ return __xchg_u32(ptr, x);
+#ifdef CONFIG_PPC64
+ case 8:
+ return __xchg_u64(ptr, x);
+#endif
+ }
+ __xchg_called_with_bad_pointer();
+ return x;
+}
+
+static __always_inline unsigned long
+__xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
+{
+ switch (size) {
+ case 4:
+ return __xchg_u32_local(ptr, x);
+#ifdef CONFIG_PPC64
+ case 8:
+ return __xchg_u64_local(ptr, x);
+#endif
+ }
+ __xchg_called_with_bad_pointer();
+ return x;
+}
+#define xchg(ptr,x) \
+ ({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
+ })
+
+#define xchg_local(ptr,x) \
+ ({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg_local((ptr), \
+ (unsigned long)_x_, sizeof(*(ptr))); \
+ })
+
+/*
+ * Compare and exchange - if *p == old, set it to new,
+ * and return the old value of *p.
+ */
+#define __HAVE_ARCH_CMPXCHG 1
+
+static __always_inline unsigned long
+__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
+{
+ unsigned int prev;
+
+ __asm__ __volatile__ (
+ PPC_RELEASE_BARRIER
+"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
+ cmpw 0,%0,%3\n\
+ bne- 2f\n"
+ PPC405_ERR77(0,%2)
+" stwcx. %4,0,%2\n\
+ bne- 1b"
+ PPC_ACQUIRE_BARRIER
+ "\n\
+2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc", "memory");
+
+ return prev;
+}
+
+static __always_inline unsigned long
+__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
+ unsigned long new)
+{
+ unsigned int prev;
+
+ __asm__ __volatile__ (
+"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
+ cmpw 0,%0,%3\n\
+ bne- 2f\n"
+ PPC405_ERR77(0,%2)
+" stwcx. %4,0,%2\n\
+ bne- 1b"
+ "\n\
+2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc", "memory");
+
+ return prev;
+}
+
+#ifdef CONFIG_PPC64
+static __always_inline unsigned long
+__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__ (
+ PPC_RELEASE_BARRIER
+"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
+ cmpd 0,%0,%3\n\
+ bne- 2f\n\
+ stdcx. %4,0,%2\n\
+ bne- 1b"
+ PPC_ACQUIRE_BARRIER
+ "\n\
+2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc", "memory");
+
+ return prev;
+}
+
+static __always_inline unsigned long
+__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
+ unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__ (
+"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
+ cmpd 0,%0,%3\n\
+ bne- 2f\n\
+ stdcx. %4,0,%2\n\
+ bne- 1b"
+ "\n\
+2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc", "memory");
+
+ return prev;
+}
+#endif
+
+/* This function doesn't exist, so you'll get a linker error
+ if something tries to do an invalid cmpxchg(). */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static __always_inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
+ unsigned int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32(ptr, old, new);
+#ifdef CONFIG_PPC64
+ case 8:
+ return __cmpxchg_u64(ptr, old, new);
+#endif
+ }
+ __cmpxchg_called_with_bad_pointer();
+ return old;
+}
+
+static __always_inline unsigned long
+__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
+ unsigned int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32_local(ptr, old, new);
+#ifdef CONFIG_PPC64
+ case 8:
+ return __cmpxchg_u64_local(ptr, old, new);
+#endif
+ }
+ __cmpxchg_called_with_bad_pointer();
+ return old;
+}
+
+#define cmpxchg(ptr, o, n) \
+ ({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof(*(ptr))); \
+ })
+
+
+#define cmpxchg_local(ptr, o, n) \
+ ({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof(*(ptr))); \
+ })
+
+#ifdef CONFIG_PPC64
+#define cmpxchg64(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg((ptr), (o), (n)); \
+ })
+#define cmpxchg64_local(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg_local((ptr), (o), (n)); \
+ })
+#define cmpxchg64_relaxed cmpxchg64_local
+#else
+#include <asm-generic/cmpxchg-local.h>
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_CMPXCHG_H_ */
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 37c32aba79b..840a5509b3f 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -26,27 +26,77 @@ unsigned int create_branch(const unsigned int *addr,
unsigned long target, int flags);
unsigned int create_cond_branch(const unsigned int *addr,
unsigned long target, int flags);
-void patch_branch(unsigned int *addr, unsigned long target, int flags);
-void patch_instruction(unsigned int *addr, unsigned int instr);
+int patch_branch(unsigned int *addr, unsigned long target, int flags);
+int patch_instruction(unsigned int *addr, unsigned int instr);
int instr_is_relative_branch(unsigned int instr);
int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr);
unsigned long branch_target(const unsigned int *instr);
unsigned int translate_branch(const unsigned int *dest,
const unsigned int *src);
+#ifdef CONFIG_PPC_BOOK3E_64
+void __patch_exception(int exc, unsigned long addr);
+#define patch_exception(exc, name) do { \
+ extern unsigned int name; \
+ __patch_exception((exc), (unsigned long)&name); \
+} while (0)
+#endif
+
+#define OP_RT_RA_MASK 0xffff0000UL
+#define LIS_R2 0x3c020000UL
+#define ADDIS_R2_R12 0x3c4c0000UL
+#define ADDI_R2_R2 0x38420000UL
static inline unsigned long ppc_function_entry(void *func)
{
-#ifdef CONFIG_PPC64
+#if defined(CONFIG_PPC64)
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+ u32 *insn = func;
+
/*
- * On PPC64 the function pointer actually points to the function's
- * descriptor. The first entry in the descriptor is the address
- * of the function text.
+ * A PPC64 ABIv2 function may have a local and a global entry
+ * point. We need to use the local entry point when patching
+ * functions, so identify and step over the global entry point
+ * sequence.
+ *
+ * The global entry point sequence is always of the form:
+ *
+ * addis r2,r12,XXXX
+ * addi r2,r2,XXXX
+ *
+ * A linker optimisation may convert the addis to lis:
+ *
+ * lis r2,XXXX
+ * addi r2,r2,XXXX
+ */
+ if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
+ ((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
+ ((*(insn+1) & OP_RT_RA_MASK) == ADDI_R2_R2))
+ return (unsigned long)(insn + 2);
+ else
+ return (unsigned long)func;
+#else
+ /*
+ * On PPC64 ABIv1 the function pointer actually points to the
+ * function's descriptor. The first entry in the descriptor is the
+ * address of the function text.
*/
return ((func_descr_t *)func)->entry;
+#endif
#else
return (unsigned long)func;
#endif
}
+static inline unsigned long ppc_global_function_entry(void *func)
+{
+#if defined(CONFIG_PPC64) && defined(_CALL_ELF) && _CALL_ELF == 2
+ /* PPC64 ABIv2 the global entry point is at the address */
+ return (unsigned long)func;
+#else
+ /* All other cases there is no change vs ppc_function_entry() */
+ return ppc_function_entry(func);
+#endif
+}
+
#endif /* _ASM_POWERPC_CODE_PATCHING_H */
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index 2296112e247..b142b8e0ed9 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -8,7 +8,11 @@
#include <linux/sched.h>
#define COMPAT_USER_HZ 100
+#ifdef __BIG_ENDIAN__
#define COMPAT_UTS_MACHINE "ppc\0\0"
+#else
+#define COMPAT_UTS_MACHINE "ppcle\0\0"
+#endif
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
@@ -38,6 +42,7 @@ typedef s64 compat_s64;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
typedef u64 compat_u64;
+typedef u32 compat_uptr_t;
struct compat_timespec {
compat_time_t tv_sec;
@@ -100,7 +105,8 @@ struct compat_statfs {
compat_fsid_t f_fsid;
int f_namelen; /* SunOS ignores this field. */
int f_frsize;
- int f_spare[5];
+ int f_flags;
+ int f_spare[4];
};
#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
@@ -113,6 +119,64 @@ typedef u32 compat_old_sigset_t;
typedef u32 compat_sigset_word;
+typedef union compat_sigval {
+ compat_int_t sival_int;
+ compat_uptr_t sival_ptr;
+} compat_sigval_t;
+
+#define SI_PAD_SIZE32 (128/sizeof(int) - 3)
+
+typedef struct compat_siginfo {
+ int si_signo;
+ int si_errno;
+ int si_code;
+
+ union {
+ int _pad[SI_PAD_SIZE32];
+
+ /* kill() */
+ struct {
+ compat_pid_t _pid; /* sender's pid */
+ __compat_uid_t _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ compat_timer_t _tid; /* timer id */
+ int _overrun; /* overrun count */
+ compat_sigval_t _sigval; /* same as below */
+ int _sys_private; /* not to be passed to user */
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ compat_pid_t _pid; /* sender's pid */
+ __compat_uid_t _uid; /* sender's uid */
+ compat_sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ compat_pid_t _pid; /* which child */
+ __compat_uid_t _uid; /* sender's uid */
+ int _status; /* exit code */
+ compat_clock_t _utime;
+ compat_clock_t _stime;
+ } _sigchld;
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */
+ struct {
+ unsigned int _addr; /* faulting insn/memory ref. */
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
+ } _sifields;
+} compat_siginfo_t;
+
#define COMPAT_OFF_T_MAX 0x7fffffff
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
@@ -122,7 +186,6 @@ typedef u32 compat_sigset_word;
* as pointers because the syscall entry code will have
* appropriately converted them already.
*/
-typedef u32 compat_uptr_t;
static inline void __user *compat_ptr(compat_uptr_t uptr)
{
@@ -140,11 +203,12 @@ static inline void __user *arch_compat_alloc_user_space(long len)
unsigned long usp = regs->gpr[1];
/*
- * We cant access below the stack pointer in the 32bit ABI and
- * can access 288 bytes in the 64bit ABI
+ * We can't access below the stack pointer in the 32bit ABI and
+ * can access 288 bytes in the 64bit big-endian ABI,
+ * or 512 bytes with the new ELFv2 little-endian ABI.
*/
if (!is_32bit_task())
- usp -= 288;
+ usp -= USER_REDZONE_SIZE;
return (void __user *) (usp - len);
}
diff --git a/arch/powerpc/include/asm/context_tracking.h b/arch/powerpc/include/asm/context_tracking.h
new file mode 100644
index 00000000000..40014921fff
--- /dev/null
+++ b/arch/powerpc/include/asm/context_tracking.h
@@ -0,0 +1,10 @@
+#ifndef _ASM_POWERPC_CONTEXT_TRACKING_H
+#define _ASM_POWERPC_CONTEXT_TRACKING_H
+
+#ifdef CONFIG_CONTEXT_TRACKING
+#define SCHEDULE_USER bl schedule_user
+#else
+#define SCHEDULE_USER bl schedule
+#endif
+
+#endif
diff --git a/arch/powerpc/include/asm/cpm.h b/arch/powerpc/include/asm/cpm.h
index e50323fe941..4398a6cdcf5 100644
--- a/arch/powerpc/include/asm/cpm.h
+++ b/arch/powerpc/include/asm/cpm.h
@@ -98,7 +98,7 @@ typedef struct cpm_buf_desc {
#define BD_SC_INTRPT (0x1000) /* Interrupt on change */
#define BD_SC_LAST (0x0800) /* Last buffer in frame */
#define BD_SC_TC (0x0400) /* Transmit CRC */
-#define BD_SC_CM (0x0200) /* Continous mode */
+#define BD_SC_CM (0x0200) /* Continuous mode */
#define BD_SC_ID (0x0100) /* Rec'd too many idles */
#define BD_SC_P (0x0100) /* xmt preamble */
#define BD_SC_BR (0x0020) /* Break received */
diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h
index bd07650dca5..8ee4211ca0c 100644
--- a/arch/powerpc/include/asm/cpm1.h
+++ b/arch/powerpc/include/asm/cpm1.h
@@ -4,7 +4,7 @@
*
* This file contains structures and information for the communication
* processor channels. Some CPM control and status is available
- * throught the MPC8xx internal memory map. See immap.h for details.
+ * through the MPC8xx internal memory map. See immap.h for details.
* This file only contains what I need for the moment, not the total
* CPM capabilities. I (or someone else) will add definitions as they
* are needed. -- Dan
diff --git a/arch/powerpc/include/asm/cpm2.h b/arch/powerpc/include/asm/cpm2.h
index f42e9baf3a4..7c8608b0969 100644
--- a/arch/powerpc/include/asm/cpm2.h
+++ b/arch/powerpc/include/asm/cpm2.h
@@ -489,7 +489,6 @@ typedef struct scc_trans {
#define FCC_GFMR_TCI ((uint)0x20000000)
#define FCC_GFMR_TRX ((uint)0x10000000)
#define FCC_GFMR_TTX ((uint)0x08000000)
-#define FCC_GFMR_TTX ((uint)0x08000000)
#define FCC_GFMR_CDP ((uint)0x04000000)
#define FCC_GFMR_CTSP ((uint)0x02000000)
#define FCC_GFMR_CDS ((uint)0x01000000)
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index f3a1fdd9cf0..0fdd7eece6d 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -1,42 +1,10 @@
#ifndef __ASM_POWERPC_CPUTABLE_H
#define __ASM_POWERPC_CPUTABLE_H
-#define PPC_FEATURE_32 0x80000000
-#define PPC_FEATURE_64 0x40000000
-#define PPC_FEATURE_601_INSTR 0x20000000
-#define PPC_FEATURE_HAS_ALTIVEC 0x10000000
-#define PPC_FEATURE_HAS_FPU 0x08000000
-#define PPC_FEATURE_HAS_MMU 0x04000000
-#define PPC_FEATURE_HAS_4xxMAC 0x02000000
-#define PPC_FEATURE_UNIFIED_CACHE 0x01000000
-#define PPC_FEATURE_HAS_SPE 0x00800000
-#define PPC_FEATURE_HAS_EFP_SINGLE 0x00400000
-#define PPC_FEATURE_HAS_EFP_DOUBLE 0x00200000
-#define PPC_FEATURE_NO_TB 0x00100000
-#define PPC_FEATURE_POWER4 0x00080000
-#define PPC_FEATURE_POWER5 0x00040000
-#define PPC_FEATURE_POWER5_PLUS 0x00020000
-#define PPC_FEATURE_CELL 0x00010000
-#define PPC_FEATURE_BOOKE 0x00008000
-#define PPC_FEATURE_SMT 0x00004000
-#define PPC_FEATURE_ICACHE_SNOOP 0x00002000
-#define PPC_FEATURE_ARCH_2_05 0x00001000
-#define PPC_FEATURE_PA6T 0x00000800
-#define PPC_FEATURE_HAS_DFP 0x00000400
-#define PPC_FEATURE_POWER6_EXT 0x00000200
-#define PPC_FEATURE_ARCH_2_06 0x00000100
-#define PPC_FEATURE_HAS_VSX 0x00000080
-
-#define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
- 0x00000040
-
-#define PPC_FEATURE_TRUE_LE 0x00000002
-#define PPC_FEATURE_PPC_LE 0x00000001
-
-#ifdef __KERNEL__
#include <asm/asm-compat.h>
#include <asm/feature-fixups.h>
+#include <uapi/asm/cputable.h>
#ifndef __ASSEMBLY__
@@ -84,6 +52,7 @@ struct cpu_spec {
char *cpu_name;
unsigned long cpu_features; /* Kernel features */
unsigned int cpu_user_features; /* Userland features */
+ unsigned int cpu_user_features2; /* Userland features v2 */
unsigned int mmu_features; /* MMU features */
/* cache line sizes */
@@ -121,6 +90,18 @@ struct cpu_spec {
* if the error is fatal, 1 if it was fully recovered and 0 to
* pass up (not CPU originated) */
int (*machine_check)(struct pt_regs *regs);
+
+ /*
+ * Processor specific early machine check handler which is
+ * called in real mode to handle SLB and TLB errors.
+ */
+ long (*machine_check_early)(struct pt_regs *regs);
+
+ /*
+ * Processor specific routine to flush tlbs.
+ */
+ void (*flush_tlb)(unsigned long inval_selector);
+
};
extern struct cpu_spec *cur_cpu_spec;
@@ -138,34 +119,37 @@ extern const char *powerpc_base_platform;
/* CPU kernel features */
/* Retain the 32b definitions all use bottom half of word */
-#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0000000000000001)
-#define CPU_FTR_L2CR ASM_CONST(0x0000000000000002)
-#define CPU_FTR_SPEC7450 ASM_CONST(0x0000000000000004)
-#define CPU_FTR_ALTIVEC ASM_CONST(0x0000000000000008)
-#define CPU_FTR_TAU ASM_CONST(0x0000000000000010)
-#define CPU_FTR_CAN_DOZE ASM_CONST(0x0000000000000020)
-#define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040)
-#define CPU_FTR_L2CSR ASM_CONST(0x0000000000000080)
-#define CPU_FTR_601 ASM_CONST(0x0000000000000100)
-#define CPU_FTR_DBELL ASM_CONST(0x0000000000000200)
-#define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400)
-#define CPU_FTR_L3CR ASM_CONST(0x0000000000000800)
-#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x0000000000001000)
-#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x0000000000002000)
-#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x0000000000004000)
-#define CPU_FTR_NO_DPM ASM_CONST(0x0000000000008000)
-#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000)
-#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000)
-#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000)
-#define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000)
-#define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000)
-#define CPU_FTR_FPU_UNAVAILABLE ASM_CONST(0x0000000000800000)
-#define CPU_FTR_UNIFIED_ID_CACHE ASM_CONST(0x0000000001000000)
-#define CPU_FTR_SPE ASM_CONST(0x0000000002000000)
-#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x0000000004000000)
-#define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000)
-#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000010000000)
-#define CPU_FTR_INDEXED_DCR ASM_CONST(0x0000000020000000)
+#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x00000001)
+#define CPU_FTR_L2CR ASM_CONST(0x00000002)
+#define CPU_FTR_SPEC7450 ASM_CONST(0x00000004)
+#define CPU_FTR_ALTIVEC ASM_CONST(0x00000008)
+#define CPU_FTR_TAU ASM_CONST(0x00000010)
+#define CPU_FTR_CAN_DOZE ASM_CONST(0x00000020)
+#define CPU_FTR_USE_TB ASM_CONST(0x00000040)
+#define CPU_FTR_L2CSR ASM_CONST(0x00000080)
+#define CPU_FTR_601 ASM_CONST(0x00000100)
+#define CPU_FTR_DBELL ASM_CONST(0x00000200)
+#define CPU_FTR_CAN_NAP ASM_CONST(0x00000400)
+#define CPU_FTR_L3CR ASM_CONST(0x00000800)
+#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x00001000)
+#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x00002000)
+#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x00004000)
+#define CPU_FTR_NO_DPM ASM_CONST(0x00008000)
+#define CPU_FTR_476_DD2 ASM_CONST(0x00010000)
+#define CPU_FTR_NEED_COHERENT ASM_CONST(0x00020000)
+#define CPU_FTR_NO_BTIC ASM_CONST(0x00040000)
+#define CPU_FTR_DEBUG_LVL_EXC ASM_CONST(0x00080000)
+#define CPU_FTR_NODSISRALIGN ASM_CONST(0x00100000)
+#define CPU_FTR_PPC_LE ASM_CONST(0x00200000)
+#define CPU_FTR_REAL_LE ASM_CONST(0x00400000)
+#define CPU_FTR_FPU_UNAVAILABLE ASM_CONST(0x00800000)
+#define CPU_FTR_UNIFIED_ID_CACHE ASM_CONST(0x01000000)
+#define CPU_FTR_SPE ASM_CONST(0x02000000)
+#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x04000000)
+#define CPU_FTR_LWSYNC ASM_CONST(0x08000000)
+#define CPU_FTR_NOEXECUTE ASM_CONST(0x10000000)
+#define CPU_FTR_INDEXED_DCR ASM_CONST(0x20000000)
+#define CPU_FTR_EMB_HV ASM_CONST(0x40000000)
/*
* Add the 64-bit processor unique features in the top half of the word;
@@ -177,34 +161,42 @@ extern const char *powerpc_base_platform;
#define LONG_ASM_CONST(x) 0
#endif
-#define CPU_FTR_SLB LONG_ASM_CONST(0x0000000100000000)
-#define CPU_FTR_16M_PAGE LONG_ASM_CONST(0x0000000200000000)
-#define CPU_FTR_TLBIEL LONG_ASM_CONST(0x0000000400000000)
-#define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000)
-#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000)
-#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000008000000000)
-#define CPU_FTR_SMT LONG_ASM_CONST(0x0000010000000000)
-#define CPU_FTR_LOCKLESS_TLBIE LONG_ASM_CONST(0x0000040000000000)
-#define CPU_FTR_CI_LARGE_PAGE LONG_ASM_CONST(0x0000100000000000)
-#define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000200000000000)
-#define CPU_FTR_PURR LONG_ASM_CONST(0x0000400000000000)
-#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000800000000000)
-#define CPU_FTR_SPURR LONG_ASM_CONST(0x0001000000000000)
-#define CPU_FTR_DSCR LONG_ASM_CONST(0x0002000000000000)
-#define CPU_FTR_1T_SEGMENT LONG_ASM_CONST(0x0004000000000000)
-#define CPU_FTR_NO_SLBIE_B LONG_ASM_CONST(0x0008000000000000)
-#define CPU_FTR_VSX LONG_ASM_CONST(0x0010000000000000)
-#define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000)
-#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000)
-#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0080000000000000)
-#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0100000000000000)
-#define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0200000000000000)
+#define CPU_FTR_HVMODE LONG_ASM_CONST(0x0000000100000000)
+#define CPU_FTR_ARCH_201 LONG_ASM_CONST(0x0000000200000000)
+#define CPU_FTR_ARCH_206 LONG_ASM_CONST(0x0000000400000000)
+#define CPU_FTR_ARCH_207S LONG_ASM_CONST(0x0000000800000000)
+#define CPU_FTR_IABR LONG_ASM_CONST(0x0000001000000000)
+#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000002000000000)
+#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000004000000000)
+#define CPU_FTR_SMT LONG_ASM_CONST(0x0000008000000000)
+#define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000010000000000)
+#define CPU_FTR_PURR LONG_ASM_CONST(0x0000020000000000)
+#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000040000000000)
+#define CPU_FTR_SPURR LONG_ASM_CONST(0x0000080000000000)
+#define CPU_FTR_DSCR LONG_ASM_CONST(0x0000100000000000)
+#define CPU_FTR_VSX LONG_ASM_CONST(0x0000200000000000)
+#define CPU_FTR_SAO LONG_ASM_CONST(0x0000400000000000)
+#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0000800000000000)
+#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0001000000000000)
+#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0002000000000000)
+#define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0004000000000000)
+#define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0008000000000000)
+#define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0010000000000000)
+#define CPU_FTR_ICSWX LONG_ASM_CONST(0x0020000000000000)
+#define CPU_FTR_VMX_COPY LONG_ASM_CONST(0x0040000000000000)
+#define CPU_FTR_TM LONG_ASM_CONST(0x0080000000000000)
+#define CPU_FTR_CFAR LONG_ASM_CONST(0x0100000000000000)
+#define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0200000000000000)
+#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
+#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
+#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000)
#ifndef __ASSEMBLY__
-#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_SLB | \
- CPU_FTR_TLBIEL | CPU_FTR_NOEXECUTE | \
- CPU_FTR_NODSISRALIGN | CPU_FTR_16M_PAGE)
+#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN)
+
+#define MMU_FTR_PPCAS_ARCH_V2 (MMU_FTR_SLB | MMU_FTR_TLBIEL | \
+ MMU_FTR_16M_PAGE)
/* We only set the altivec features if the kernel was compiled with altivec
* support
@@ -243,6 +235,15 @@ extern const char *powerpc_base_platform;
#define PPC_FEATURE_HAS_EFP_DOUBLE_COMP 0
#endif
+/* We only set the TM feature if the kernel was compiled with TM supprt */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+#define CPU_FTR_TM_COMP CPU_FTR_TM
+#define PPC_FEATURE2_HTM_COMP PPC_FEATURE2_HTM
+#else
+#define CPU_FTR_TM_COMP 0
+#define PPC_FEATURE2_HTM_COMP 0
+#endif
+
/* We need to mark all pages as being coherent if we're SMP or we have a
* 74[45]x and an MPC107 host bridge. Also 83xx and PowerQUICC II
* require it for PCI "streaming/prefetch" to work properly.
@@ -372,17 +373,30 @@ extern const char *powerpc_base_platform;
#define CPU_FTRS_47X (CPU_FTRS_440x6)
#define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \
CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \
- CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE)
+ CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE | \
+ CPU_FTR_DEBUG_LVL_EXC)
#define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
CPU_FTR_NOEXECUTE)
#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \
CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
-#define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
- CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
+#define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
+ CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
+ CPU_FTR_DBELL | CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
+/*
+ * e5500/e6500 erratum A-006958 is a timebase bug that can use the
+ * same workaround as CPU_FTR_CELL_TB_BUG.
+ */
+#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
+ CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
+ CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+ CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_CELL_TB_BUG)
+#define CPU_FTRS_E6500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
- CPU_FTR_DBELL)
+ CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+ CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_CELL_TB_BUG)
#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
/* 64-bit CPUs */
@@ -396,46 +410,68 @@ extern const char *powerpc_base_platform;
CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ | \
CPU_FTR_STCX_CHECKS_ADDRESS)
#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
- CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS)
+ CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \
+ CPU_FTR_HVMODE | CPU_FTR_DABRX)
#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
- CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
- CPU_FTR_PURR | CPU_FTR_STCX_CHECKS_ADDRESS)
+ CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_DABRX)
#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
- CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
+ CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
- CPU_FTR_STCX_CHECKS_ADDRESS)
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR | \
+ CPU_FTR_DABRX)
#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
CPU_FTR_MMCRA | CPU_FTR_SMT | \
- CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
+ CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
- CPU_FTR_STCX_CHECKS_ADDRESS)
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+ CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | \
+ CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX)
+#define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
+ CPU_FTR_MMCRA | CPU_FTR_SMT | \
+ CPU_FTR_COHERENT_ICACHE | \
+ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | \
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+ CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
+ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
+ CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
+#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
+#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
- CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | \
- CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
- CPU_FTR_UNALIGNED_LD_STD)
+ CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
+ CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_DABRX)
#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_PPCAS_ARCH_V2 | \
- CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \
- CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B)
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX)
#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
+#define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \
+ CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | \
+ CPU_FTR_ICSWX | CPU_FTR_DABRX )
+
#ifdef __powerpc64__
+#ifdef CONFIG_PPC_BOOK3E
+#define CPU_FTRS_POSSIBLE (CPU_FTRS_E6500 | CPU_FTRS_E5500 | CPU_FTRS_A2)
+#else
#define CPU_FTRS_POSSIBLE \
(CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \
CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \
- CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \
- CPU_FTR_1T_SEGMENT | CPU_FTR_VSX)
+ CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | CPU_FTRS_POWER8 | \
+ CPU_FTRS_CELL | CPU_FTRS_PA6T | CPU_FTR_VSX)
+#endif
#else
enum {
CPU_FTRS_POSSIBLE =
@@ -462,23 +498,30 @@ enum {
CPU_FTRS_44X | CPU_FTRS_440x6 |
#endif
#ifdef CONFIG_PPC_47x
- CPU_FTRS_47X |
+ CPU_FTRS_47X | CPU_FTR_476_DD2 |
#endif
#ifdef CONFIG_E200
CPU_FTRS_E200 |
#endif
#ifdef CONFIG_E500
- CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC |
+ CPU_FTRS_E500 | CPU_FTRS_E500_2 |
+#endif
+#ifdef CONFIG_PPC_E500MC
+ CPU_FTRS_E500MC | CPU_FTRS_E5500 | CPU_FTRS_E6500 |
#endif
0,
};
#endif /* __powerpc64__ */
#ifdef __powerpc64__
+#ifdef CONFIG_PPC_BOOK3E
+#define CPU_FTRS_ALWAYS (CPU_FTRS_E6500 & CPU_FTRS_E5500 & CPU_FTRS_A2)
+#else
#define CPU_FTRS_ALWAYS \
(CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \
CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \
CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE)
+#endif
#else
enum {
CPU_FTRS_ALWAYS =
@@ -508,8 +551,12 @@ enum {
CPU_FTRS_E200 &
#endif
#ifdef CONFIG_E500
- CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC &
+ CPU_FTRS_E500 & CPU_FTRS_E500_2 &
+#endif
+#ifdef CONFIG_PPC_E500MC
+ CPU_FTRS_E500MC & CPU_FTRS_E5500 & CPU_FTRS_E6500 &
#endif
+ ~CPU_FTR_EMB_HV & /* can be removed at runtime */
CPU_FTRS_POSSIBLE,
};
#endif /* __powerpc64__ */
@@ -522,11 +569,8 @@ static inline int cpu_has_feature(unsigned long feature)
& feature);
}
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
#define HBP_NUM 1
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif /* !__ASSEMBLY__ */
-#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_CPUTABLE_H */
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index a8e18447c62..2bf8e9307be 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -9,7 +9,7 @@
* Note: This implementation is limited to a power of 2 number of
* threads per core and the same number for each core in the system
* (though it would work if some processors had less threads as long
- * as the CPU numbers are still allocated, just not brought offline).
+ * as the CPU numbers are still allocated, just not brought online).
*
* However, the API allows for a different implementation in the future
* if needed, as long as you only use the functions and not the variables
@@ -18,10 +18,12 @@
#ifdef CONFIG_SMP
extern int threads_per_core;
+extern int threads_per_subcore;
extern int threads_shift;
extern cpumask_t threads_core_mask;
#else
#define threads_per_core 1
+#define threads_per_subcore 1
#define threads_shift 0
#define threads_core_mask (CPU_MASK_CPU0)
#endif
@@ -37,16 +39,16 @@ extern cpumask_t threads_core_mask;
* This can typically be used for things like IPI for tlb invalidations
* since those need to be done only once per core/TLB
*/
-static inline cpumask_t cpu_thread_mask_to_cores(cpumask_t threads)
+static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
{
cpumask_t tmp, res;
int i;
- res = CPU_MASK_NONE;
+ cpumask_clear(&res);
for (i = 0; i < NR_CPUS; i += threads_per_core) {
- cpus_shift_left(tmp, threads_core_mask, i);
- if (cpus_intersects(threads, tmp))
- cpu_set(i, res);
+ cpumask_shift_left(&tmp, &threads_core_mask, i);
+ if (cpumask_intersects(threads, &tmp))
+ cpumask_set_cpu(i, &res);
}
return res;
}
@@ -58,25 +60,33 @@ static inline int cpu_nr_cores(void)
static inline cpumask_t cpu_online_cores_map(void)
{
- return cpu_thread_mask_to_cores(cpu_online_map);
+ return cpu_thread_mask_to_cores(cpu_online_mask);
}
-static inline int cpu_thread_to_core(int cpu)
-{
- return cpu >> threads_shift;
-}
+#ifdef CONFIG_SMP
+int cpu_core_index_of_thread(int cpu);
+int cpu_first_thread_of_core(int core);
+#else
+static inline int cpu_core_index_of_thread(int cpu) { return cpu; }
+static inline int cpu_first_thread_of_core(int core) { return core; }
+#endif
static inline int cpu_thread_in_core(int cpu)
{
return cpu & (threads_per_core - 1);
}
-static inline int cpu_first_thread_in_core(int cpu)
+static inline int cpu_thread_in_subcore(int cpu)
+{
+ return cpu & (threads_per_subcore - 1);
+}
+
+static inline int cpu_first_thread_sibling(int cpu)
{
return cpu & ~(threads_per_core - 1);
}
-static inline int cpu_last_thread_in_core(int cpu)
+static inline int cpu_last_thread_sibling(int cpu)
{
return cpu | (threads_per_core - 1);
}
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 1cf20bdfbec..607559ab271 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -8,7 +8,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in
+ * If we have CONFIG_VIRT_CPU_ACCOUNTING_NATIVE, we measure cpu time in
* the same units as the timebase. Otherwise we measure cpu time
* in jiffies using the generic definitions.
*/
@@ -16,7 +16,7 @@
#ifndef __POWERPC_CPUTIME_H
#define __POWERPC_CPUTIME_H
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#include <asm-generic/cputime.h>
#ifdef __KERNEL__
static inline void setup_cputime_one_jiffy(void) { }
@@ -29,25 +29,8 @@ static inline void setup_cputime_one_jiffy(void) { }
#include <asm/time.h>
#include <asm/param.h>
-typedef u64 cputime_t;
-typedef u64 cputime64_t;
-
-#define cputime_zero ((cputime_t)0)
-#define cputime_max ((~((cputime_t)0) >> 1) - 1)
-#define cputime_add(__a, __b) ((__a) + (__b))
-#define cputime_sub(__a, __b) ((__a) - (__b))
-#define cputime_div(__a, __n) ((__a) / (__n))
-#define cputime_halve(__a) ((__a) >> 1)
-#define cputime_eq(__a, __b) ((__a) == (__b))
-#define cputime_gt(__a, __b) ((__a) > (__b))
-#define cputime_ge(__a, __b) ((__a) >= (__b))
-#define cputime_lt(__a, __b) ((__a) < (__b))
-#define cputime_le(__a, __b) ((__a) <= (__b))
-
-#define cputime64_zero ((cputime64_t)0)
-#define cputime64_add(__a, __b) ((__a) + (__b))
-#define cputime64_sub(__a, __b) ((__a) - (__b))
-#define cputime_to_cputime64(__ct) (__ct)
+typedef u64 __nocast cputime_t;
+typedef u64 __nocast cputime64_t;
#ifdef __KERNEL__
@@ -65,7 +48,7 @@ DECLARE_PER_CPU(unsigned long, cputime_scaled_last_delta);
static inline unsigned long cputime_to_jiffies(const cputime_t ct)
{
- return mulhdu(ct, __cputime_jiffies_factor);
+ return mulhdu((__force u64) ct, __cputime_jiffies_factor);
}
/* Estimate the scaled cputime by scaling the real cputime based on
@@ -74,14 +57,15 @@ static inline cputime_t cputime_to_scaled(const cputime_t ct)
{
if (cpu_has_feature(CPU_FTR_SPURR) &&
__get_cpu_var(cputime_last_delta))
- return ct * __get_cpu_var(cputime_scaled_last_delta) /
- __get_cpu_var(cputime_last_delta);
+ return (__force u64) ct *
+ __get_cpu_var(cputime_scaled_last_delta) /
+ __get_cpu_var(cputime_last_delta);
return ct;
}
static inline cputime_t jiffies_to_cputime(const unsigned long jif)
{
- cputime_t ct;
+ u64 ct;
unsigned long sec;
/* have to be a little careful about overflow */
@@ -93,7 +77,7 @@ static inline cputime_t jiffies_to_cputime(const unsigned long jif)
}
if (sec)
ct += (cputime_t) sec * tb_ticks_per_sec;
- return ct;
+ return (__force cputime_t) ct;
}
static inline void setup_cputime_one_jiffy(void)
@@ -103,7 +87,7 @@ static inline void setup_cputime_one_jiffy(void)
static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
{
- cputime_t ct;
+ u64 ct;
u64 sec;
/* have to be a little careful about overflow */
@@ -114,28 +98,28 @@ static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
do_div(ct, HZ);
}
if (sec)
- ct += (cputime_t) sec * tb_ticks_per_sec;
- return ct;
+ ct += (u64) sec * tb_ticks_per_sec;
+ return (__force cputime64_t) ct;
}
static inline u64 cputime64_to_jiffies64(const cputime_t ct)
{
- return mulhdu(ct, __cputime_jiffies_factor);
+ return mulhdu((__force u64) ct, __cputime_jiffies_factor);
}
/*
* Convert cputime <-> microseconds
*/
-extern u64 __cputime_msec_factor;
+extern u64 __cputime_usec_factor;
static inline unsigned long cputime_to_usecs(const cputime_t ct)
{
- return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC;
+ return mulhdu((__force u64) ct, __cputime_usec_factor);
}
static inline cputime_t usecs_to_cputime(const unsigned long us)
{
- cputime_t ct;
+ u64 ct;
unsigned long sec;
/* have to be a little careful about overflow */
@@ -143,13 +127,15 @@ static inline cputime_t usecs_to_cputime(const unsigned long us)
sec = us / 1000000;
if (ct) {
ct *= tb_ticks_per_sec;
- do_div(ct, 1000);
+ do_div(ct, 1000000);
}
if (sec)
ct += (cputime_t) sec * tb_ticks_per_sec;
- return ct;
+ return (__force cputime_t) ct;
}
+#define usecs_to_cputime64(us) usecs_to_cputime(us)
+
/*
* Convert cputime <-> seconds
*/
@@ -157,12 +143,12 @@ extern u64 __cputime_sec_factor;
static inline unsigned long cputime_to_secs(const cputime_t ct)
{
- return mulhdu(ct, __cputime_sec_factor);
+ return mulhdu((__force u64) ct, __cputime_sec_factor);
}
static inline cputime_t secs_to_cputime(const unsigned long sec)
{
- return (cputime_t) sec * tb_ticks_per_sec;
+ return (__force cputime_t)((u64) sec * tb_ticks_per_sec);
}
/*
@@ -170,7 +156,7 @@ static inline cputime_t secs_to_cputime(const unsigned long sec)
*/
static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p)
{
- u64 x = ct;
+ u64 x = (__force u64) ct;
unsigned int frac;
frac = do_div(x, tb_ticks_per_sec);
@@ -182,11 +168,11 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p)
static inline cputime_t timespec_to_cputime(const struct timespec *p)
{
- cputime_t ct;
+ u64 ct;
ct = (u64) p->tv_nsec * tb_ticks_per_sec;
do_div(ct, 1000000000);
- return ct + (u64) p->tv_sec * tb_ticks_per_sec;
+ return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec);
}
/*
@@ -194,7 +180,7 @@ static inline cputime_t timespec_to_cputime(const struct timespec *p)
*/
static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p)
{
- u64 x = ct;
+ u64 x = (__force u64) ct;
unsigned int frac;
frac = do_div(x, tb_ticks_per_sec);
@@ -206,11 +192,11 @@ static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p)
static inline cputime_t timeval_to_cputime(const struct timeval *p)
{
- cputime_t ct;
+ u64 ct;
ct = (u64) p->tv_usec * tb_ticks_per_sec;
do_div(ct, 1000000);
- return ct + (u64) p->tv_sec * tb_ticks_per_sec;
+ return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec);
}
/*
@@ -220,12 +206,12 @@ extern u64 __cputime_clockt_factor;
static inline unsigned long cputime_to_clock_t(const cputime_t ct)
{
- return mulhdu(ct, __cputime_clockt_factor);
+ return mulhdu((__force u64) ct, __cputime_clockt_factor);
}
static inline cputime_t clock_t_to_cputime(const unsigned long clk)
{
- cputime_t ct;
+ u64 ct;
unsigned long sec;
/* have to be a little careful about overflow */
@@ -236,12 +222,14 @@ static inline cputime_t clock_t_to_cputime(const unsigned long clk)
do_div(ct, USER_HZ);
}
if (sec)
- ct += (cputime_t) sec * tb_ticks_per_sec;
- return ct;
+ ct += (u64) sec * tb_ticks_per_sec;
+ return (__force cputime_t) ct;
}
#define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct))
+static inline void arch_vtime_task_switch(struct task_struct *tsk) { }
+
#endif /* __KERNEL__ */
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#endif /* __POWERPC_CPUTIME_H */
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
index 0893ab9343a..5fa6b20eba1 100644
--- a/arch/powerpc/include/asm/dbell.h
+++ b/arch/powerpc/include/asm/dbell.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2009 Freescale Semicondutor, Inc.
+ * Copyright 2009 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -18,18 +18,48 @@
#include <asm/ppc-opcode.h>
#define PPC_DBELL_MSG_BRDCAST (0x04000000)
-#define PPC_DBELL_TYPE(x) (((x) & 0xf) << 28)
+#define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36))
+#define PPC_DBELL_TYPE_MASK PPC_DBELL_TYPE(0xf)
+#define PPC_DBELL_LPID(x) ((x) << (63 - 49))
+#define PPC_DBELL_PIR_MASK 0x3fff
enum ppc_dbell {
PPC_DBELL = 0, /* doorbell */
PPC_DBELL_CRIT = 1, /* critical doorbell */
PPC_G_DBELL = 2, /* guest doorbell */
PPC_G_DBELL_CRIT = 3, /* guest critical doorbell */
PPC_G_DBELL_MC = 4, /* guest mcheck doorbell */
+ PPC_DBELL_SERVER = 5, /* doorbell on server */
};
-extern void doorbell_message_pass(int target, int msg);
+#ifdef CONFIG_PPC_BOOK3S
+
+#define PPC_DBELL_MSGTYPE PPC_DBELL_SERVER
+#define SPRN_DOORBELL_CPUTAG SPRN_TIR
+#define PPC_DBELL_TAG_MASK 0x7f
+
+static inline void _ppc_msgsnd(u32 msg)
+{
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
+ else
+ __asm__ __volatile__ (PPC_MSGSNDP(%0) : : "r" (msg));
+}
+
+#else /* CONFIG_PPC_BOOK3S */
+
+#define PPC_DBELL_MSGTYPE PPC_DBELL
+#define SPRN_DOORBELL_CPUTAG SPRN_PIR
+#define PPC_DBELL_TAG_MASK 0x3fff
+
+static inline void _ppc_msgsnd(u32 msg)
+{
+ __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
+}
+
+#endif /* CONFIG_PPC_BOOK3S */
+
+extern void doorbell_cause_ipi(int cpu, unsigned long data);
extern void doorbell_exception(struct pt_regs *regs);
-extern void doorbell_check_self(void);
extern void doorbell_setup_this_cpu(void);
static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag)
@@ -37,7 +67,7 @@ static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag)
u32 msg = PPC_DBELL_TYPE(type) | (flags & PPC_DBELL_MSG_BRDCAST) |
(tag & 0x07ffffff);
- __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
+ _ppc_msgsnd(msg);
}
#endif /* _ASM_POWERPC_DBELL_H */
diff --git a/arch/powerpc/include/asm/dcr-mmio.h b/arch/powerpc/include/asm/dcr-mmio.h
index acd491dbd45..93a68b28e69 100644
--- a/arch/powerpc/include/asm/dcr-mmio.h
+++ b/arch/powerpc/include/asm/dcr-mmio.h
@@ -51,10 +51,6 @@ static inline void dcr_write_mmio(dcr_host_mmio_t host,
out_be32(host.token + ((host.base + dcr_n) * host.stride), value);
}
-extern u64 of_translate_dcr_address(struct device_node *dev,
- unsigned int dcr_n,
- unsigned int *stride);
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_DCR_MMIO_H */
diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h
new file mode 100644
index 00000000000..a954e497504
--- /dev/null
+++ b/arch/powerpc/include/asm/debug.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _ASM_POWERPC_DEBUG_H
+#define _ASM_POWERPC_DEBUG_H
+
+#include <asm/hw_breakpoint.h>
+
+struct pt_regs;
+
+extern struct dentry *powerpc_debugfs_root;
+
+#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
+
+extern int (*__debugger)(struct pt_regs *regs);
+extern int (*__debugger_ipi)(struct pt_regs *regs);
+extern int (*__debugger_bpt)(struct pt_regs *regs);
+extern int (*__debugger_sstep)(struct pt_regs *regs);
+extern int (*__debugger_iabr_match)(struct pt_regs *regs);
+extern int (*__debugger_break_match)(struct pt_regs *regs);
+extern int (*__debugger_fault_handler)(struct pt_regs *regs);
+
+#define DEBUGGER_BOILERPLATE(__NAME) \
+static inline int __NAME(struct pt_regs *regs) \
+{ \
+ if (unlikely(__ ## __NAME)) \
+ return __ ## __NAME(regs); \
+ return 0; \
+}
+
+DEBUGGER_BOILERPLATE(debugger)
+DEBUGGER_BOILERPLATE(debugger_ipi)
+DEBUGGER_BOILERPLATE(debugger_bpt)
+DEBUGGER_BOILERPLATE(debugger_sstep)
+DEBUGGER_BOILERPLATE(debugger_iabr_match)
+DEBUGGER_BOILERPLATE(debugger_break_match)
+DEBUGGER_BOILERPLATE(debugger_fault_handler)
+
+#else
+static inline int debugger(struct pt_regs *regs) { return 0; }
+static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
+static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
+static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
+static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
+static inline int debugger_break_match(struct pt_regs *regs) { return 0; }
+static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
+#endif
+
+void set_breakpoint(struct arch_hw_breakpoint *brk);
+void __set_breakpoint(struct arch_hw_breakpoint *brk);
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+extern void do_send_trap(struct pt_regs *regs, unsigned long address,
+ unsigned long error_code, int signal_code, int brkpt);
+#else
+
+extern void do_break(struct pt_regs *regs, unsigned long address,
+ unsigned long error_code);
+#endif
+
+#endif /* _ASM_POWERPC_DEBUG_H */
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index a3954e4fcbe..38faeded7d5 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -9,6 +9,12 @@
struct dma_map_ops;
struct device_node;
+/*
+ * Arch extensions to struct device.
+ *
+ * When adding fields, consider macio_add_one_device in
+ * drivers/macintosh/macio_asic.c
+ */
struct dev_archdata {
/* DMA operations on that device */
struct dma_map_ops *dma_ops;
@@ -22,13 +28,24 @@ struct dev_archdata {
void *iommu_table_base;
} dma_data;
+#ifdef CONFIG_IOMMU_API
+ void *iommu_domain;
+#endif
#ifdef CONFIG_SWIOTLB
dma_addr_t max_direct_dma_addr;
#endif
+#ifdef CONFIG_EEH
+ struct eeh_dev *edev;
+#endif
+#ifdef CONFIG_FAIL_IOMMU
+ int fail_iommu;
+#endif
};
struct pdev_archdata {
u64 dma_mask;
};
+#define ARCH_HAS_DMA_GET_REQUIRED_MASK
+
#endif /* _ASM_POWERPC_DEVICE_H */
diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h
index 9b198d1b3b2..6330a61b875 100644
--- a/arch/powerpc/include/asm/disassemble.h
+++ b/arch/powerpc/include/asm/disassemble.h
@@ -77,4 +77,42 @@ static inline unsigned int get_d(u32 inst)
return inst & 0xffff;
}
+static inline unsigned int get_oc(u32 inst)
+{
+ return (inst >> 11) & 0x7fff;
+}
+
+#define IS_XFORM(inst) (get_op(inst) == 31)
+#define IS_DSFORM(inst) (get_op(inst) >= 56)
+
+/*
+ * Create a DSISR value from the instruction
+ */
+static inline unsigned make_dsisr(unsigned instr)
+{
+ unsigned dsisr;
+
+
+ /* bits 6:15 --> 22:31 */
+ dsisr = (instr & 0x03ff0000) >> 16;
+
+ if (IS_XFORM(instr)) {
+ /* bits 29:30 --> 15:16 */
+ dsisr |= (instr & 0x00000006) << 14;
+ /* bit 25 --> 17 */
+ dsisr |= (instr & 0x00000040) << 8;
+ /* bits 21:24 --> 18:21 */
+ dsisr |= (instr & 0x00000780) << 3;
+ } else {
+ /* bit 5 --> 17 */
+ dsisr |= (instr & 0x04000000) >> 12;
+ /* bits 1: 4 --> 18:21 */
+ dsisr |= (instr & 0x78000000) >> 17;
+ /* bits 30:31 --> 12:13 */
+ if (IS_DSFORM(instr))
+ dsisr |= (instr & 0x00000003) << 18;
+ }
+
+ return dsisr;
+}
#endif /* __ASM_PPC_DISASSEMBLE_H__ */
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 6d2416a8570..150866b2a3f 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -22,10 +22,15 @@
/* Some dma direct funcs must be visible for use in other dma_ops */
extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs);
extern void dma_direct_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
-
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs);
+extern int dma_direct_mmap_coherent(struct device *dev,
+ struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t handle,
+ size_t size, struct dma_attrs *attrs);
#ifdef CONFIG_NOT_COHERENT_CACHE
/*
@@ -42,6 +47,7 @@ extern void __dma_free_coherent(size_t size, void *vaddr);
extern void __dma_sync(void *vaddr, size_t size, int direction);
extern void __dma_sync_page(struct page *page, unsigned long offset,
size_t size, int direction);
+extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr);
#else /* ! CONFIG_NOT_COHERENT_CACHE */
/*
@@ -128,24 +134,31 @@ static inline int dma_supported(struct device *dev, u64 mask)
}
extern int dma_set_mask(struct device *dev, u64 dma_mask);
+extern int __dma_set_mask(struct device *dev, u64 dma_mask);
+
+#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
void *cpu_addr;
BUG_ON(!dma_ops);
- cpu_addr = dma_ops->alloc_coherent(dev, size, dma_handle, flag);
+ cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
return cpu_addr;
}
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
@@ -153,13 +166,14 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
- dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
+ dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
+ debug_dma_mapping_error(dev, dma_addr);
if (dma_ops->mapping_error)
return dma_ops->mapping_error(dev, dma_addr);
@@ -198,6 +212,8 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+#define ARCH_HAS_DMA_MMAP_COHERENT
+
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
diff --git a/arch/powerpc/include/asm/dma.h b/arch/powerpc/include/asm/dma.h
index a7e06e25c70..a5c6d83b5f6 100644
--- a/arch/powerpc/include/asm/dma.h
+++ b/arch/powerpc/include/asm/dma.h
@@ -16,15 +16,10 @@
*
* None of this really applies for Power Macintoshes. There is
* basically just enough here to get kernel/dma.c to compile.
- *
- * There may be some comments or restrictions made here which are
- * not valid for the PReP platform. Take what you read
- * with a grain of salt.
*/
#include <asm/io.h>
#include <linux/spinlock.h>
-#include <asm/system.h>
#ifndef MAX_DMA_CHANNELS
#define MAX_DMA_CHANNELS 8
@@ -34,8 +29,6 @@
/* Doesn't really apply... */
#define MAX_DMA_ADDRESS (~0UL)
-#if !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)
-
#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
#define dma_outb outb_p
#else
@@ -60,7 +53,6 @@
* - page registers for 5-7 don't use data bit 0, represent 128K pages
* - page registers for 0-3 use bit 0, represent 64K pages
*
- * On PReP, DMA transfers are limited to the lower 16MB of _physical_ memory.
* On CHRP, the W83C553F (and VLSI Tollgate?) support full 32 bit addressing.
* Note that addresses loaded into registers must be _physical_ addresses,
* not logical addresses (which may differ if paging is active).
@@ -354,7 +346,5 @@ extern int isa_dma_bridge_buggy;
#define isa_dma_bridge_buggy (0)
#endif
-#endif /* !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI) */
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_DMA_H */
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 66ea9b8b95c..fab7743c264 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -1,6 +1,6 @@
/*
- * eeh.h
* Copyright (C) 2001 Dave Engebretsen & Todd Inglett IBM Corporation.
+ * Copyright 2001-2012 IBM Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,6 +24,7 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/string.h>
+#include <linux/time.h>
struct pci_dev;
struct pci_bus;
@@ -31,45 +32,246 @@ struct device_node;
#ifdef CONFIG_EEH
-extern int eeh_subsystem_enabled;
+/* EEH subsystem flags */
+#define EEH_ENABLED 0x1 /* EEH enabled */
+#define EEH_FORCE_DISABLED 0x2 /* EEH disabled */
+#define EEH_PROBE_MODE_DEV 0x4 /* From PCI device */
+#define EEH_PROBE_MODE_DEVTREE 0x8 /* From device tree */
-/* Values for eeh_mode bits in device_node */
-#define EEH_MODE_SUPPORTED (1<<0)
-#define EEH_MODE_NOCHECK (1<<1)
-#define EEH_MODE_ISOLATED (1<<2)
-#define EEH_MODE_RECOVERING (1<<3)
-#define EEH_MODE_IRQ_DISABLED (1<<4)
+/*
+ * Delay for PE reset, all in ms
+ *
+ * PCI specification has reset hold time of 100 milliseconds.
+ * We have 250 milliseconds here. The PCI bus settlement time
+ * is specified as 1.5 seconds and we have 1.8 seconds.
+ */
+#define EEH_PE_RST_HOLD_TIME 250
+#define EEH_PE_RST_SETTLE_TIME 1800
+
+/*
+ * The struct is used to trace PE related EEH functionality.
+ * In theory, there will have one instance of the struct to
+ * be created against particular PE. In nature, PEs corelate
+ * to each other. the struct has to reflect that hierarchy in
+ * order to easily pick up those affected PEs when one particular
+ * PE has EEH errors.
+ *
+ * Also, one particular PE might be composed of PCI device, PCI
+ * bus and its subordinate components. The struct also need ship
+ * the information. Further more, one particular PE is only meaingful
+ * in the corresponding PHB. Therefore, the root PEs should be created
+ * against existing PHBs in on-to-one fashion.
+ */
+#define EEH_PE_INVALID (1 << 0) /* Invalid */
+#define EEH_PE_PHB (1 << 1) /* PHB PE */
+#define EEH_PE_DEVICE (1 << 2) /* Device PE */
+#define EEH_PE_BUS (1 << 3) /* Bus PE */
+
+#define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */
+#define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */
+#define EEH_PE_RESET (1 << 2) /* PE reset in progress */
+
+#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */
+
+struct eeh_pe {
+ int type; /* PE type: PHB/Bus/Device */
+ int state; /* PE EEH dependent mode */
+ int config_addr; /* Traditional PCI address */
+ int addr; /* PE configuration address */
+ struct pci_controller *phb; /* Associated PHB */
+ struct pci_bus *bus; /* Top PCI bus for bus PE */
+ int check_count; /* Times of ignored error */
+ int freeze_count; /* Times of froze up */
+ struct timeval tstamp; /* Time on first-time freeze */
+ int false_positives; /* Times of reported #ff's */
+ struct eeh_pe *parent; /* Parent PE */
+ struct list_head child_list; /* Link PE to the child list */
+ struct list_head edevs; /* Link list of EEH devices */
+ struct list_head child; /* Child PEs */
+};
+
+#define eeh_pe_for_each_dev(pe, edev, tmp) \
+ list_for_each_entry_safe(edev, tmp, &pe->edevs, list)
+
+/*
+ * The struct is used to trace EEH state for the associated
+ * PCI device node or PCI device. In future, it might
+ * represent PE as well so that the EEH device to form
+ * another tree except the currently existing tree of PCI
+ * buses and PCI devices
+ */
+#define EEH_DEV_BRIDGE (1 << 0) /* PCI bridge */
+#define EEH_DEV_ROOT_PORT (1 << 1) /* PCIe root port */
+#define EEH_DEV_DS_PORT (1 << 2) /* Downstream port */
+#define EEH_DEV_IRQ_DISABLED (1 << 3) /* Interrupt disabled */
+#define EEH_DEV_DISCONNECTED (1 << 4) /* Removing from PE */
+
+#define EEH_DEV_NO_HANDLER (1 << 8) /* No error handler */
+#define EEH_DEV_SYSFS (1 << 9) /* Sysfs created */
+#define EEH_DEV_REMOVED (1 << 10) /* Removed permanently */
+
+struct eeh_dev {
+ int mode; /* EEH mode */
+ int class_code; /* Class code of the device */
+ int config_addr; /* Config address */
+ int pe_config_addr; /* PE config address */
+ u32 config_space[16]; /* Saved PCI config space */
+ int pcix_cap; /* Saved PCIx capability */
+ int pcie_cap; /* Saved PCIe capability */
+ int aer_cap; /* Saved AER capability */
+ struct eeh_pe *pe; /* Associated PE */
+ struct list_head list; /* Form link list in the PE */
+ struct pci_controller *phb; /* Associated PHB */
+ struct device_node *dn; /* Associated device node */
+ struct pci_dev *pdev; /* Associated PCI device */
+ struct pci_bus *bus; /* PCI bus for partial hotplug */
+};
+
+static inline struct device_node *eeh_dev_to_of_node(struct eeh_dev *edev)
+{
+ return edev ? edev->dn : NULL;
+}
+
+static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev)
+{
+ return edev ? edev->pdev : NULL;
+}
+
+/* Return values from eeh_ops::next_error */
+enum {
+ EEH_NEXT_ERR_NONE = 0,
+ EEH_NEXT_ERR_INF,
+ EEH_NEXT_ERR_FROZEN_PE,
+ EEH_NEXT_ERR_FENCED_PHB,
+ EEH_NEXT_ERR_DEAD_PHB,
+ EEH_NEXT_ERR_DEAD_IOC
+};
+
+/*
+ * The struct is used to trace the registered EEH operation
+ * callback functions. Actually, those operation callback
+ * functions are heavily platform dependent. That means the
+ * platform should register its own EEH operation callback
+ * functions before any EEH further operations.
+ */
+#define EEH_OPT_DISABLE 0 /* EEH disable */
+#define EEH_OPT_ENABLE 1 /* EEH enable */
+#define EEH_OPT_THAW_MMIO 2 /* MMIO enable */
+#define EEH_OPT_THAW_DMA 3 /* DMA enable */
+#define EEH_STATE_UNAVAILABLE (1 << 0) /* State unavailable */
+#define EEH_STATE_NOT_SUPPORT (1 << 1) /* EEH not supported */
+#define EEH_STATE_RESET_ACTIVE (1 << 2) /* Active reset */
+#define EEH_STATE_MMIO_ACTIVE (1 << 3) /* Active MMIO */
+#define EEH_STATE_DMA_ACTIVE (1 << 4) /* Active DMA */
+#define EEH_STATE_MMIO_ENABLED (1 << 5) /* MMIO enabled */
+#define EEH_STATE_DMA_ENABLED (1 << 6) /* DMA enabled */
+#define EEH_RESET_DEACTIVATE 0 /* Deactivate the PE reset */
+#define EEH_RESET_HOT 1 /* Hot reset */
+#define EEH_RESET_FUNDAMENTAL 3 /* Fundamental reset */
+#define EEH_LOG_TEMP 1 /* EEH temporary error log */
+#define EEH_LOG_PERM 2 /* EEH permanent error log */
+
+struct eeh_ops {
+ char *name;
+ int (*init)(void);
+ int (*post_init)(void);
+ void* (*of_probe)(struct device_node *dn, void *flag);
+ int (*dev_probe)(struct pci_dev *dev, void *flag);
+ int (*set_option)(struct eeh_pe *pe, int option);
+ int (*get_pe_addr)(struct eeh_pe *pe);
+ int (*get_state)(struct eeh_pe *pe, int *state);
+ int (*reset)(struct eeh_pe *pe, int option);
+ int (*wait_state)(struct eeh_pe *pe, int max_wait);
+ int (*get_log)(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len);
+ int (*configure_bridge)(struct eeh_pe *pe);
+ int (*read_config)(struct device_node *dn, int where, int size, u32 *val);
+ int (*write_config)(struct device_node *dn, int where, int size, u32 val);
+ int (*next_error)(struct eeh_pe **pe);
+ int (*restore_config)(struct device_node *dn);
+};
+
+extern int eeh_subsystem_flags;
+extern struct eeh_ops *eeh_ops;
+extern raw_spinlock_t confirm_error_lock;
+
+static inline bool eeh_enabled(void)
+{
+ if ((eeh_subsystem_flags & EEH_FORCE_DISABLED) ||
+ !(eeh_subsystem_flags & EEH_ENABLED))
+ return false;
+
+ return true;
+}
+
+static inline void eeh_set_enable(bool mode)
+{
+ if (mode)
+ eeh_subsystem_flags |= EEH_ENABLED;
+ else
+ eeh_subsystem_flags &= ~EEH_ENABLED;
+}
+
+static inline void eeh_probe_mode_set(int flag)
+{
+ eeh_subsystem_flags |= flag;
+}
+
+static inline int eeh_probe_mode_devtree(void)
+{
+ return (eeh_subsystem_flags & EEH_PROBE_MODE_DEVTREE);
+}
+
+static inline int eeh_probe_mode_dev(void)
+{
+ return (eeh_subsystem_flags & EEH_PROBE_MODE_DEV);
+}
-/* Max number of EEH freezes allowed before we consider the device
- * to be permanently disabled. */
+static inline void eeh_serialize_lock(unsigned long *flags)
+{
+ raw_spin_lock_irqsave(&confirm_error_lock, *flags);
+}
+
+static inline void eeh_serialize_unlock(unsigned long flags)
+{
+ raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
+}
+
+/*
+ * Max number of EEH freezes allowed before we consider the device
+ * to be permanently disabled.
+ */
#define EEH_MAX_ALLOWED_FREEZES 5
-void __init eeh_init(void);
+typedef void *(*eeh_traverse_func)(void *data, void *flag);
+int eeh_phb_pe_create(struct pci_controller *phb);
+struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb);
+struct eeh_pe *eeh_pe_get(struct eeh_dev *edev);
+int eeh_add_to_parent_pe(struct eeh_dev *edev);
+int eeh_rmv_from_parent_pe(struct eeh_dev *edev);
+void eeh_pe_update_time_stamp(struct eeh_pe *pe);
+void *eeh_pe_traverse(struct eeh_pe *root,
+ eeh_traverse_func fn, void *flag);
+void *eeh_pe_dev_traverse(struct eeh_pe *root,
+ eeh_traverse_func fn, void *flag);
+void eeh_pe_restore_bars(struct eeh_pe *pe);
+const char *eeh_pe_loc_get(struct eeh_pe *pe);
+struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe);
+
+void *eeh_dev_init(struct device_node *dn, void *data);
+void eeh_dev_phb_init_dynamic(struct pci_controller *phb);
+int eeh_init(void);
+int __init eeh_ops_register(struct eeh_ops *ops);
+int __exit eeh_ops_unregister(const char *name);
unsigned long eeh_check_failure(const volatile void __iomem *token,
unsigned long val);
-int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev);
-void __init pci_addr_cache_build(void);
-
-/**
- * eeh_add_device_early
- * eeh_add_device_late
- *
- * Perform eeh initialization for devices added after boot.
- * Call eeh_add_device_early before doing any i/o to the
- * device (including config space i/o). Call eeh_add_device_late
- * to finish the eeh setup for this device.
- */
+int eeh_dev_check_failure(struct eeh_dev *edev);
+void eeh_addr_cache_build(void);
+void eeh_add_device_early(struct device_node *);
void eeh_add_device_tree_early(struct device_node *);
+void eeh_add_device_late(struct pci_dev *);
void eeh_add_device_tree_late(struct pci_bus *);
-
-/**
- * eeh_remove_device_recursive - undo EEH for device & children.
- * @dev: pci device to be removed
- *
- * As above, this removes the device; it also removes child
- * pci devices as well.
- */
-void eeh_remove_bus_device(struct pci_dev *);
+void eeh_add_sysfs_files(struct pci_bus *);
+void eeh_remove_device(struct pci_dev *);
/**
* EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
@@ -77,7 +279,7 @@ void eeh_remove_bus_device(struct pci_dev *);
* If this macro yields TRUE, the caller relays to eeh_check_failure()
* which does further tests out of line.
*/
-#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_subsystem_enabled)
+#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_enabled())
/*
* Reads from a device which has been isolated by EEH will return
@@ -87,25 +289,47 @@ void eeh_remove_bus_device(struct pci_dev *);
#define EEH_IO_ERROR_VALUE(size) (~0U >> ((4 - (size)) * 8))
#else /* !CONFIG_EEH */
-static inline void eeh_init(void) { }
-static inline unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
+static inline bool eeh_enabled(void)
{
- return val;
+ return false;
}
-static inline int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
+static inline void eeh_set_enable(bool mode) { }
+
+static inline int eeh_init(void)
{
return 0;
}
-static inline void pci_addr_cache_build(void) { }
+static inline void *eeh_dev_init(struct device_node *dn, void *data)
+{
+ return NULL;
+}
+
+static inline void eeh_dev_phb_init_dynamic(struct pci_controller *phb) { }
+
+static inline unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
+{
+ return val;
+}
+
+#define eeh_dev_check_failure(x) (0)
+
+static inline void eeh_addr_cache_build(void) { }
+
+static inline void eeh_add_device_early(struct device_node *dn) { }
static inline void eeh_add_device_tree_early(struct device_node *dn) { }
+static inline void eeh_add_device_late(struct pci_dev *dev) { }
+
static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
-static inline void eeh_remove_bus_device(struct pci_dev *dev) { }
+static inline void eeh_add_sysfs_files(struct pci_bus *bus) { }
+
+static inline void eeh_remove_device(struct pci_dev *dev) { }
+
#define EEH_POSSIBLE_ERROR(val, type) (0)
#define EEH_IO_ERROR_VALUE(size) (-1UL)
#endif /* CONFIG_EEH */
diff --git a/arch/powerpc/include/asm/eeh_event.h b/arch/powerpc/include/asm/eeh_event.h
index cc3cb04539a..1e551a2d6f8 100644
--- a/arch/powerpc/include/asm/eeh_event.h
+++ b/arch/powerpc/include/asm/eeh_event.h
@@ -1,6 +1,4 @@
/*
- * eeh_event.h
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -22,32 +20,21 @@
#define ASM_POWERPC_EEH_EVENT_H
#ifdef __KERNEL__
-/** EEH event -- structure holding pci controller data that describes
- * a change in the isolation status of a PCI slot. A pointer
- * to this struct is passed as the data pointer in a notify callback.
+/*
+ * structure holding pci controller data that describes a
+ * change in the isolation status of a PCI slot. A pointer
+ * to this struct is passed as the data pointer in a notify
+ * callback.
*/
struct eeh_event {
- struct list_head list;
- struct device_node *dn; /* struct device node */
- struct pci_dev *dev; /* affected device */
+ struct list_head list; /* to form event queue */
+ struct eeh_pe *pe; /* EEH PE */
};
-/**
- * eeh_send_failure_event - generate a PCI error event
- * @dev pci device
- *
- * This routine builds a PCI error event which will be delivered
- * to all listeners on the eeh_notifier_chain.
- *
- * This routine can be called within an interrupt context;
- * the actual event will be delivered in a normal context
- * (from a workqueue).
- */
-int eeh_send_failure_event (struct device_node *dn,
- struct pci_dev *dev);
-
-/* Main recovery function */
-struct pci_dn * handle_eeh_events (struct eeh_event *);
+int eeh_event_init(void);
+int eeh_send_failure_event(struct eeh_pe *pe);
+void eeh_remove_event(struct eeh_pe *pe, bool force);
+void eeh_handle_event(struct eeh_pe *pe);
#endif /* __KERNEL__ */
#endif /* ASM_POWERPC_EEH_EVENT_H */
diff --git a/arch/powerpc/include/asm/ehv_pic.h b/arch/powerpc/include/asm/ehv_pic.h
new file mode 100644
index 00000000000..dc7d48e3ea9
--- /dev/null
+++ b/arch/powerpc/include/asm/ehv_pic.h
@@ -0,0 +1,40 @@
+/*
+ * EHV_PIC private definitions and structure.
+ *
+ * Copyright 2008-2010 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+#ifndef __EHV_PIC_H__
+#define __EHV_PIC_H__
+
+#include <linux/irq.h>
+
+#define NR_EHV_PIC_INTS 1024
+
+#define EHV_PIC_INFO(name) EHV_PIC_##name
+
+#define EHV_PIC_VECPRI_POLARITY_NEGATIVE 0
+#define EHV_PIC_VECPRI_POLARITY_POSITIVE 1
+#define EHV_PIC_VECPRI_SENSE_EDGE 0
+#define EHV_PIC_VECPRI_SENSE_LEVEL 0x2
+#define EHV_PIC_VECPRI_POLARITY_MASK 0x1
+#define EHV_PIC_VECPRI_SENSE_MASK 0x2
+
+struct ehv_pic {
+ /* The remapper for this EHV_PIC */
+ struct irq_domain *irqhost;
+
+ /* The "linux" controller struct */
+ struct irq_chip hc_irq;
+
+ /* core int flag */
+ int coreint_flag;
+};
+
+void ehv_pic_init(void);
+unsigned int ehv_pic_get_irq(void);
+
+#endif /* __EHV_PIC_H__ */
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 2b917c69ed1..888d8f3f252 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -1,90 +1,3 @@
-#ifndef _ASM_POWERPC_ELF_H
-#define _ASM_POWERPC_ELF_H
-
-#ifdef __KERNEL__
-#include <linux/sched.h> /* for task_struct */
-#include <asm/page.h>
-#include <asm/string.h>
-#endif
-
-#include <linux/types.h>
-
-#include <asm/ptrace.h>
-#include <asm/cputable.h>
-#include <asm/auxvec.h>
-
-/* PowerPC relocations defined by the ABIs */
-#define R_PPC_NONE 0
-#define R_PPC_ADDR32 1 /* 32bit absolute address */
-#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */
-#define R_PPC_ADDR16 3 /* 16bit absolute address */
-#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */
-#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */
-#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */
-#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */
-#define R_PPC_ADDR14_BRTAKEN 8
-#define R_PPC_ADDR14_BRNTAKEN 9
-#define R_PPC_REL24 10 /* PC relative 26 bit */
-#define R_PPC_REL14 11 /* PC relative 16 bit */
-#define R_PPC_REL14_BRTAKEN 12
-#define R_PPC_REL14_BRNTAKEN 13
-#define R_PPC_GOT16 14
-#define R_PPC_GOT16_LO 15
-#define R_PPC_GOT16_HI 16
-#define R_PPC_GOT16_HA 17
-#define R_PPC_PLTREL24 18
-#define R_PPC_COPY 19
-#define R_PPC_GLOB_DAT 20
-#define R_PPC_JMP_SLOT 21
-#define R_PPC_RELATIVE 22
-#define R_PPC_LOCAL24PC 23
-#define R_PPC_UADDR32 24
-#define R_PPC_UADDR16 25
-#define R_PPC_REL32 26
-#define R_PPC_PLT32 27
-#define R_PPC_PLTREL32 28
-#define R_PPC_PLT16_LO 29
-#define R_PPC_PLT16_HI 30
-#define R_PPC_PLT16_HA 31
-#define R_PPC_SDAREL16 32
-#define R_PPC_SECTOFF 33
-#define R_PPC_SECTOFF_LO 34
-#define R_PPC_SECTOFF_HI 35
-#define R_PPC_SECTOFF_HA 36
-
-/* PowerPC relocations defined for the TLS access ABI. */
-#define R_PPC_TLS 67 /* none (sym+add)@tls */
-#define R_PPC_DTPMOD32 68 /* word32 (sym+add)@dtpmod */
-#define R_PPC_TPREL16 69 /* half16* (sym+add)@tprel */
-#define R_PPC_TPREL16_LO 70 /* half16 (sym+add)@tprel@l */
-#define R_PPC_TPREL16_HI 71 /* half16 (sym+add)@tprel@h */
-#define R_PPC_TPREL16_HA 72 /* half16 (sym+add)@tprel@ha */
-#define R_PPC_TPREL32 73 /* word32 (sym+add)@tprel */
-#define R_PPC_DTPREL16 74 /* half16* (sym+add)@dtprel */
-#define R_PPC_DTPREL16_LO 75 /* half16 (sym+add)@dtprel@l */
-#define R_PPC_DTPREL16_HI 76 /* half16 (sym+add)@dtprel@h */
-#define R_PPC_DTPREL16_HA 77 /* half16 (sym+add)@dtprel@ha */
-#define R_PPC_DTPREL32 78 /* word32 (sym+add)@dtprel */
-#define R_PPC_GOT_TLSGD16 79 /* half16* (sym+add)@got@tlsgd */
-#define R_PPC_GOT_TLSGD16_LO 80 /* half16 (sym+add)@got@tlsgd@l */
-#define R_PPC_GOT_TLSGD16_HI 81 /* half16 (sym+add)@got@tlsgd@h */
-#define R_PPC_GOT_TLSGD16_HA 82 /* half16 (sym+add)@got@tlsgd@ha */
-#define R_PPC_GOT_TLSLD16 83 /* half16* (sym+add)@got@tlsld */
-#define R_PPC_GOT_TLSLD16_LO 84 /* half16 (sym+add)@got@tlsld@l */
-#define R_PPC_GOT_TLSLD16_HI 85 /* half16 (sym+add)@got@tlsld@h */
-#define R_PPC_GOT_TLSLD16_HA 86 /* half16 (sym+add)@got@tlsld@ha */
-#define R_PPC_GOT_TPREL16 87 /* half16* (sym+add)@got@tprel */
-#define R_PPC_GOT_TPREL16_LO 88 /* half16 (sym+add)@got@tprel@l */
-#define R_PPC_GOT_TPREL16_HI 89 /* half16 (sym+add)@got@tprel@h */
-#define R_PPC_GOT_TPREL16_HA 90 /* half16 (sym+add)@got@tprel@ha */
-#define R_PPC_GOT_DTPREL16 91 /* half16* (sym+add)@got@dtprel */
-#define R_PPC_GOT_DTPREL16_LO 92 /* half16* (sym+add)@got@dtprel@l */
-#define R_PPC_GOT_DTPREL16_HI 93 /* half16* (sym+add)@got@dtprel@h */
-#define R_PPC_GOT_DTPREL16_HA 94 /* half16* (sym+add)@got@dtprel@ha */
-
-/* keep this the last entry. */
-#define R_PPC_NUM 95
-
/*
* ELF register definitions..
*
@@ -93,77 +6,14 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#ifndef _ASM_POWERPC_ELF_H
+#define _ASM_POWERPC_ELF_H
-#define ELF_NGREG 48 /* includes nip, msr, lr, etc. */
-#define ELF_NFPREG 33 /* includes fpscr */
-
-typedef unsigned long elf_greg_t64;
-typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG];
-
-typedef unsigned int elf_greg_t32;
-typedef elf_greg_t32 elf_gregset_t32[ELF_NGREG];
-typedef elf_gregset_t32 compat_elf_gregset_t;
-
-/*
- * ELF_ARCH, CLASS, and DATA are used to set parameters in the core dumps.
- */
-#ifdef __powerpc64__
-# define ELF_NVRREG32 33 /* includes vscr & vrsave stuffed together */
-# define ELF_NVRREG 34 /* includes vscr & vrsave in split vectors */
-# define ELF_NVSRHALFREG 32 /* Half the vsx registers */
-# define ELF_GREG_TYPE elf_greg_t64
-#else
-# define ELF_NEVRREG 34 /* includes acc (as 2) */
-# define ELF_NVRREG 33 /* includes vscr */
-# define ELF_GREG_TYPE elf_greg_t32
-# define ELF_ARCH EM_PPC
-# define ELF_CLASS ELFCLASS32
-# define ELF_DATA ELFDATA2MSB
-#endif /* __powerpc64__ */
-
-#ifndef ELF_ARCH
-# define ELF_ARCH EM_PPC64
-# define ELF_CLASS ELFCLASS64
-# define ELF_DATA ELFDATA2MSB
- typedef elf_greg_t64 elf_greg_t;
- typedef elf_gregset_t64 elf_gregset_t;
-#else
- /* Assumption: ELF_ARCH == EM_PPC and ELF_CLASS == ELFCLASS32 */
- typedef elf_greg_t32 elf_greg_t;
- typedef elf_gregset_t32 elf_gregset_t;
-#endif /* ELF_ARCH */
-
-/* Floating point registers */
-typedef double elf_fpreg_t;
-typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
-
-/* Altivec registers */
-/*
- * The entries with indexes 0-31 contain the corresponding vector registers.
- * The entry with index 32 contains the vscr as the last word (offset 12)
- * within the quadword. This allows the vscr to be stored as either a
- * quadword (since it must be copied via a vector register to/from storage)
- * or as a word.
- *
- * 64-bit kernel notes: The entry at index 33 contains the vrsave as the first
- * word (offset 0) within the quadword.
- *
- * This definition of the VMX state is compatible with the current PPC32
- * ptrace interface. This allows signal handling and ptrace to use the same
- * structures. This also simplifies the implementation of a bi-arch
- * (combined (32- and 64-bit) gdb.
- *
- * Note that it's _not_ compatible with 32 bits ucontext which stuffs the
- * vrsave along with vscr and so only uses 33 vectors for the register set
- */
-typedef __vector128 elf_vrreg_t;
-typedef elf_vrreg_t elf_vrregset_t[ELF_NVRREG];
-#ifdef __powerpc64__
-typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32];
-typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
-#endif
+#include <linux/sched.h> /* for task_struct */
+#include <asm/page.h>
+#include <asm/string.h>
+#include <uapi/asm/elf.h>
-#ifdef __KERNEL__
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
@@ -181,6 +31,8 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
extern unsigned long randomize_et_dyn(unsigned long base);
#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
+#define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
+
/*
* Our registers are always unsigned longs, whether we're a 32 bit
* process or 64 bit, on either a 64 bit or 32 bit kernel.
@@ -211,6 +63,7 @@ typedef elf_vrregset_t elf_fpxregset_t;
instruction set this cpu supports. This could be done in userspace,
but it's not easy, and we've already done it here. */
# define ELF_HWCAP (cur_cpu_spec->cpu_user_features)
+# define ELF_HWCAP2 (cur_cpu_spec->cpu_user_features2)
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
@@ -235,6 +88,10 @@ typedef elf_vrregset_t elf_fpxregset_t;
#ifdef __powerpc64__
# define SET_PERSONALITY(ex) \
do { \
+ if (((ex).e_flags & 0x3) == 2) \
+ set_thread_flag(TIF_ELF2ABI); \
+ else \
+ clear_thread_flag(TIF_ELF2ABI); \
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
set_thread_flag(TIF_32BIT); \
else \
@@ -253,8 +110,6 @@ do { \
# define elf_read_implies_exec(ex, exec_stk) (is_32bit_task() ? \
(exec_stk == EXSTACK_DEFAULT) : 0)
#else
-# define SET_PERSONALITY(ex) \
- set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
# define elf_read_implies_exec(ex, exec_stk) (exec_stk == EXSTACK_DEFAULT)
#endif /* __powerpc64__ */
@@ -267,7 +122,7 @@ extern int ucache_bsize;
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
-#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b);
+#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b)
/* 1GB for 64bit, 8MB for 32bit */
#define STACK_RND_MASK (is_32bit_task() ? \
@@ -277,153 +132,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
-#endif /* __KERNEL__ */
-
-/*
- * The requirements here are:
- * - keep the final alignment of sp (sp & 0xf)
- * - make sure the 32-bit value at the first 16 byte aligned position of
- * AUXV is greater than 16 for glibc compatibility.
- * AT_IGNOREPPC is used for that.
- * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
- * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
- * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes
- */
-#define ARCH_DLINFO \
-do { \
- /* Handle glibc compatibility. */ \
- NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
- NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
- /* Cache size items */ \
- NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \
- NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \
- NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \
- VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base) \
-} while (0)
-
-/* PowerPC64 relocations defined by the ABIs */
-#define R_PPC64_NONE R_PPC_NONE
-#define R_PPC64_ADDR32 R_PPC_ADDR32 /* 32bit absolute address. */
-#define R_PPC64_ADDR24 R_PPC_ADDR24 /* 26bit address, word aligned. */
-#define R_PPC64_ADDR16 R_PPC_ADDR16 /* 16bit absolute address. */
-#define R_PPC64_ADDR16_LO R_PPC_ADDR16_LO /* lower 16bits of abs. address. */
-#define R_PPC64_ADDR16_HI R_PPC_ADDR16_HI /* high 16bits of abs. address. */
-#define R_PPC64_ADDR16_HA R_PPC_ADDR16_HA /* adjusted high 16bits. */
-#define R_PPC64_ADDR14 R_PPC_ADDR14 /* 16bit address, word aligned. */
-#define R_PPC64_ADDR14_BRTAKEN R_PPC_ADDR14_BRTAKEN
-#define R_PPC64_ADDR14_BRNTAKEN R_PPC_ADDR14_BRNTAKEN
-#define R_PPC64_REL24 R_PPC_REL24 /* PC relative 26 bit, word aligned. */
-#define R_PPC64_REL14 R_PPC_REL14 /* PC relative 16 bit. */
-#define R_PPC64_REL14_BRTAKEN R_PPC_REL14_BRTAKEN
-#define R_PPC64_REL14_BRNTAKEN R_PPC_REL14_BRNTAKEN
-#define R_PPC64_GOT16 R_PPC_GOT16
-#define R_PPC64_GOT16_LO R_PPC_GOT16_LO
-#define R_PPC64_GOT16_HI R_PPC_GOT16_HI
-#define R_PPC64_GOT16_HA R_PPC_GOT16_HA
-
-#define R_PPC64_COPY R_PPC_COPY
-#define R_PPC64_GLOB_DAT R_PPC_GLOB_DAT
-#define R_PPC64_JMP_SLOT R_PPC_JMP_SLOT
-#define R_PPC64_RELATIVE R_PPC_RELATIVE
-
-#define R_PPC64_UADDR32 R_PPC_UADDR32
-#define R_PPC64_UADDR16 R_PPC_UADDR16
-#define R_PPC64_REL32 R_PPC_REL32
-#define R_PPC64_PLT32 R_PPC_PLT32
-#define R_PPC64_PLTREL32 R_PPC_PLTREL32
-#define R_PPC64_PLT16_LO R_PPC_PLT16_LO
-#define R_PPC64_PLT16_HI R_PPC_PLT16_HI
-#define R_PPC64_PLT16_HA R_PPC_PLT16_HA
-
-#define R_PPC64_SECTOFF R_PPC_SECTOFF
-#define R_PPC64_SECTOFF_LO R_PPC_SECTOFF_LO
-#define R_PPC64_SECTOFF_HI R_PPC_SECTOFF_HI
-#define R_PPC64_SECTOFF_HA R_PPC_SECTOFF_HA
-#define R_PPC64_ADDR30 37 /* word30 (S + A - P) >> 2. */
-#define R_PPC64_ADDR64 38 /* doubleword64 S + A. */
-#define R_PPC64_ADDR16_HIGHER 39 /* half16 #higher(S + A). */
-#define R_PPC64_ADDR16_HIGHERA 40 /* half16 #highera(S + A). */
-#define R_PPC64_ADDR16_HIGHEST 41 /* half16 #highest(S + A). */
-#define R_PPC64_ADDR16_HIGHESTA 42 /* half16 #highesta(S + A). */
-#define R_PPC64_UADDR64 43 /* doubleword64 S + A. */
-#define R_PPC64_REL64 44 /* doubleword64 S + A - P. */
-#define R_PPC64_PLT64 45 /* doubleword64 L + A. */
-#define R_PPC64_PLTREL64 46 /* doubleword64 L + A - P. */
-#define R_PPC64_TOC16 47 /* half16* S + A - .TOC. */
-#define R_PPC64_TOC16_LO 48 /* half16 #lo(S + A - .TOC.). */
-#define R_PPC64_TOC16_HI 49 /* half16 #hi(S + A - .TOC.). */
-#define R_PPC64_TOC16_HA 50 /* half16 #ha(S + A - .TOC.). */
-#define R_PPC64_TOC 51 /* doubleword64 .TOC. */
-#define R_PPC64_PLTGOT16 52 /* half16* M + A. */
-#define R_PPC64_PLTGOT16_LO 53 /* half16 #lo(M + A). */
-#define R_PPC64_PLTGOT16_HI 54 /* half16 #hi(M + A). */
-#define R_PPC64_PLTGOT16_HA 55 /* half16 #ha(M + A). */
-
-#define R_PPC64_ADDR16_DS 56 /* half16ds* (S + A) >> 2. */
-#define R_PPC64_ADDR16_LO_DS 57 /* half16ds #lo(S + A) >> 2. */
-#define R_PPC64_GOT16_DS 58 /* half16ds* (G + A) >> 2. */
-#define R_PPC64_GOT16_LO_DS 59 /* half16ds #lo(G + A) >> 2. */
-#define R_PPC64_PLT16_LO_DS 60 /* half16ds #lo(L + A) >> 2. */
-#define R_PPC64_SECTOFF_DS 61 /* half16ds* (R + A) >> 2. */
-#define R_PPC64_SECTOFF_LO_DS 62 /* half16ds #lo(R + A) >> 2. */
-#define R_PPC64_TOC16_DS 63 /* half16ds* (S + A - .TOC.) >> 2. */
-#define R_PPC64_TOC16_LO_DS 64 /* half16ds #lo(S + A - .TOC.) >> 2. */
-#define R_PPC64_PLTGOT16_DS 65 /* half16ds* (M + A) >> 2. */
-#define R_PPC64_PLTGOT16_LO_DS 66 /* half16ds #lo(M + A) >> 2. */
-
-/* PowerPC64 relocations defined for the TLS access ABI. */
-#define R_PPC64_TLS 67 /* none (sym+add)@tls */
-#define R_PPC64_DTPMOD64 68 /* doubleword64 (sym+add)@dtpmod */
-#define R_PPC64_TPREL16 69 /* half16* (sym+add)@tprel */
-#define R_PPC64_TPREL16_LO 70 /* half16 (sym+add)@tprel@l */
-#define R_PPC64_TPREL16_HI 71 /* half16 (sym+add)@tprel@h */
-#define R_PPC64_TPREL16_HA 72 /* half16 (sym+add)@tprel@ha */
-#define R_PPC64_TPREL64 73 /* doubleword64 (sym+add)@tprel */
-#define R_PPC64_DTPREL16 74 /* half16* (sym+add)@dtprel */
-#define R_PPC64_DTPREL16_LO 75 /* half16 (sym+add)@dtprel@l */
-#define R_PPC64_DTPREL16_HI 76 /* half16 (sym+add)@dtprel@h */
-#define R_PPC64_DTPREL16_HA 77 /* half16 (sym+add)@dtprel@ha */
-#define R_PPC64_DTPREL64 78 /* doubleword64 (sym+add)@dtprel */
-#define R_PPC64_GOT_TLSGD16 79 /* half16* (sym+add)@got@tlsgd */
-#define R_PPC64_GOT_TLSGD16_LO 80 /* half16 (sym+add)@got@tlsgd@l */
-#define R_PPC64_GOT_TLSGD16_HI 81 /* half16 (sym+add)@got@tlsgd@h */
-#define R_PPC64_GOT_TLSGD16_HA 82 /* half16 (sym+add)@got@tlsgd@ha */
-#define R_PPC64_GOT_TLSLD16 83 /* half16* (sym+add)@got@tlsld */
-#define R_PPC64_GOT_TLSLD16_LO 84 /* half16 (sym+add)@got@tlsld@l */
-#define R_PPC64_GOT_TLSLD16_HI 85 /* half16 (sym+add)@got@tlsld@h */
-#define R_PPC64_GOT_TLSLD16_HA 86 /* half16 (sym+add)@got@tlsld@ha */
-#define R_PPC64_GOT_TPREL16_DS 87 /* half16ds* (sym+add)@got@tprel */
-#define R_PPC64_GOT_TPREL16_LO_DS 88 /* half16ds (sym+add)@got@tprel@l */
-#define R_PPC64_GOT_TPREL16_HI 89 /* half16 (sym+add)@got@tprel@h */
-#define R_PPC64_GOT_TPREL16_HA 90 /* half16 (sym+add)@got@tprel@ha */
-#define R_PPC64_GOT_DTPREL16_DS 91 /* half16ds* (sym+add)@got@dtprel */
-#define R_PPC64_GOT_DTPREL16_LO_DS 92 /* half16ds (sym+add)@got@dtprel@l */
-#define R_PPC64_GOT_DTPREL16_HI 93 /* half16 (sym+add)@got@dtprel@h */
-#define R_PPC64_GOT_DTPREL16_HA 94 /* half16 (sym+add)@got@dtprel@ha */
-#define R_PPC64_TPREL16_DS 95 /* half16ds* (sym+add)@tprel */
-#define R_PPC64_TPREL16_LO_DS 96 /* half16ds (sym+add)@tprel@l */
-#define R_PPC64_TPREL16_HIGHER 97 /* half16 (sym+add)@tprel@higher */
-#define R_PPC64_TPREL16_HIGHERA 98 /* half16 (sym+add)@tprel@highera */
-#define R_PPC64_TPREL16_HIGHEST 99 /* half16 (sym+add)@tprel@highest */
-#define R_PPC64_TPREL16_HIGHESTA 100 /* half16 (sym+add)@tprel@highesta */
-#define R_PPC64_DTPREL16_DS 101 /* half16ds* (sym+add)@dtprel */
-#define R_PPC64_DTPREL16_LO_DS 102 /* half16ds (sym+add)@dtprel@l */
-#define R_PPC64_DTPREL16_HIGHER 103 /* half16 (sym+add)@dtprel@higher */
-#define R_PPC64_DTPREL16_HIGHERA 104 /* half16 (sym+add)@dtprel@highera */
-#define R_PPC64_DTPREL16_HIGHEST 105 /* half16 (sym+add)@dtprel@highest */
-#define R_PPC64_DTPREL16_HIGHESTA 106 /* half16 (sym+add)@dtprel@highesta */
-
-/* Keep this the last entry. */
-#define R_PPC64_NUM 107
-
-/* There's actually a third entry here, but it's unused */
-struct ppc64_opd_entry
-{
- unsigned long funcaddr;
- unsigned long r2;
-};
-
-#ifdef __KERNEL__
#ifdef CONFIG_SPU_BASE
/* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
@@ -433,6 +141,4 @@ struct ppc64_opd_entry
#endif /* CONFIG_SPU_BASE */
-#endif /* __KERNEL */
-
#endif /* _ASM_POWERPC_ELF_H */
diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h
index f0fb4fc1f6e..f00e10e2a33 100644
--- a/arch/powerpc/include/asm/emulated_ops.h
+++ b/arch/powerpc/include/asm/emulated_ops.h
@@ -18,7 +18,7 @@
#ifndef _ASM_POWERPC_EMULATED_OPS_H
#define _ASM_POWERPC_EMULATED_OPS_H
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/perf_event.h>
@@ -43,15 +43,19 @@ extern struct ppc_emulated {
struct ppc_emulated_entry popcntb;
struct ppc_emulated_entry spe;
struct ppc_emulated_entry string;
+ struct ppc_emulated_entry sync;
struct ppc_emulated_entry unaligned;
#ifdef CONFIG_MATH_EMULATION
struct ppc_emulated_entry math;
-#elif defined(CONFIG_8XX_MINIMAL_FPEMU)
- struct ppc_emulated_entry 8xx;
#endif
#ifdef CONFIG_VSX
struct ppc_emulated_entry vsx;
#endif
+#ifdef CONFIG_PPC64
+ struct ppc_emulated_entry mfdscr;
+ struct ppc_emulated_entry mtdscr;
+ struct ppc_emulated_entry lq_stq;
+#endif
} ppc_emulated;
extern u32 ppc_warn_emulated;
@@ -74,14 +78,14 @@ extern void ppc_warn_emulated_print(const char *type);
#define PPC_WARN_EMULATED(type, regs) \
do { \
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \
- 1, 0, regs, 0); \
+ 1, regs, 0); \
__PPC_WARN_EMULATED(type); \
} while (0)
#define PPC_WARN_ALIGNMENT(type, regs) \
do { \
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \
- 1, 0, regs, regs->dar); \
+ 1, regs, regs->dar); \
__PPC_WARN_EMULATED(type); \
} while (0)
diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h
new file mode 100644
index 00000000000..334459ad145
--- /dev/null
+++ b/arch/powerpc/include/asm/epapr_hcalls.h
@@ -0,0 +1,575 @@
+/*
+ * ePAPR hcall interface
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+ * This file is provided under a dual BSD/GPL license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* A "hypercall" is an "sc 1" instruction. This header file file provides C
+ * wrapper functions for the ePAPR hypervisor interface. It is inteded
+ * for use by Linux device drivers and other operating systems.
+ *
+ * The hypercalls are implemented as inline assembly, rather than assembly
+ * language functions in a .S file, for optimization. It allows
+ * the caller to issue the hypercall instruction directly, improving both
+ * performance and memory footprint.
+ */
+
+#ifndef _EPAPR_HCALLS_H
+#define _EPAPR_HCALLS_H
+
+#include <uapi/asm/epapr_hcalls.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <asm/byteorder.h>
+
+/*
+ * Hypercall register clobber list
+ *
+ * These macros are used to define the list of clobbered registers during a
+ * hypercall. Technically, registers r0 and r3-r12 are always clobbered,
+ * but the gcc inline assembly syntax does not allow us to specify registers
+ * on the clobber list that are also on the input/output list. Therefore,
+ * the lists of clobbered registers depends on the number of register
+ * parmeters ("+r" and "=r") passed to the hypercall.
+ *
+ * Each assembly block should use one of the HCALL_CLOBBERSx macros. As a
+ * general rule, 'x' is the number of parameters passed to the assembly
+ * block *except* for r11.
+ *
+ * If you're not sure, just use the smallest value of 'x' that does not
+ * generate a compilation error. Because these are static inline functions,
+ * the compiler will only check the clobber list for a function if you
+ * compile code that calls that function.
+ *
+ * r3 and r11 are not included in any clobbers list because they are always
+ * listed as output registers.
+ *
+ * XER, CTR, and LR are currently listed as clobbers because it's uncertain
+ * whether they will be clobbered.
+ *
+ * Note that r11 can be used as an output parameter.
+ *
+ * The "memory" clobber is only necessary for hcalls where the Hypervisor
+ * will read or write guest memory. However, we add it to all hcalls because
+ * the impact is minimal, and we want to ensure that it's present for the
+ * hcalls that need it.
+*/
+
+/* List of common clobbered registers. Do not use this macro. */
+#define EV_HCALL_CLOBBERS "r0", "r12", "xer", "ctr", "lr", "cc", "memory"
+
+#define EV_HCALL_CLOBBERS8 EV_HCALL_CLOBBERS
+#define EV_HCALL_CLOBBERS7 EV_HCALL_CLOBBERS8, "r10"
+#define EV_HCALL_CLOBBERS6 EV_HCALL_CLOBBERS7, "r9"
+#define EV_HCALL_CLOBBERS5 EV_HCALL_CLOBBERS6, "r8"
+#define EV_HCALL_CLOBBERS4 EV_HCALL_CLOBBERS5, "r7"
+#define EV_HCALL_CLOBBERS3 EV_HCALL_CLOBBERS4, "r6"
+#define EV_HCALL_CLOBBERS2 EV_HCALL_CLOBBERS3, "r5"
+#define EV_HCALL_CLOBBERS1 EV_HCALL_CLOBBERS2, "r4"
+
+extern bool epapr_paravirt_enabled;
+extern u32 epapr_hypercall_start[];
+
+#ifdef CONFIG_EPAPR_PARAVIRT
+int __init epapr_paravirt_early_init(void);
+#else
+static inline int epapr_paravirt_early_init(void) { return 0; }
+#endif
+
+/*
+ * We use "uintptr_t" to define a register because it's guaranteed to be a
+ * 32-bit integer on a 32-bit platform, and a 64-bit integer on a 64-bit
+ * platform.
+ *
+ * All registers are either input/output or output only. Registers that are
+ * initialized before making the hypercall are input/output. All
+ * input/output registers are represented with "+r". Output-only registers
+ * are represented with "=r". Do not specify any unused registers. The
+ * clobber list will tell the compiler that the hypercall modifies those
+ * registers, which is good enough.
+ */
+
+/**
+ * ev_int_set_config - configure the specified interrupt
+ * @interrupt: the interrupt number
+ * @config: configuration for this interrupt
+ * @priority: interrupt priority
+ * @destination: destination CPU number
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_set_config(unsigned int interrupt,
+ uint32_t config, unsigned int priority, uint32_t destination)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+
+ r11 = EV_HCALL_TOKEN(EV_INT_SET_CONFIG);
+ r3 = interrupt;
+ r4 = config;
+ r5 = priority;
+ r6 = destination;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6)
+ : : EV_HCALL_CLOBBERS4
+ );
+
+ return r3;
+}
+
+/**
+ * ev_int_get_config - return the config of the specified interrupt
+ * @interrupt: the interrupt number
+ * @config: returned configuration for this interrupt
+ * @priority: returned interrupt priority
+ * @destination: returned destination CPU number
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_get_config(unsigned int interrupt,
+ uint32_t *config, unsigned int *priority, uint32_t *destination)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+
+ r11 = EV_HCALL_TOKEN(EV_INT_GET_CONFIG);
+ r3 = interrupt;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5), "=r" (r6)
+ : : EV_HCALL_CLOBBERS4
+ );
+
+ *config = r4;
+ *priority = r5;
+ *destination = r6;
+
+ return r3;
+}
+
+/**
+ * ev_int_set_mask - sets the mask for the specified interrupt source
+ * @interrupt: the interrupt number
+ * @mask: 0=enable interrupts, 1=disable interrupts
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_set_mask(unsigned int interrupt,
+ unsigned int mask)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = EV_HCALL_TOKEN(EV_INT_SET_MASK);
+ r3 = interrupt;
+ r4 = mask;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "+r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ return r3;
+}
+
+/**
+ * ev_int_get_mask - returns the mask for the specified interrupt source
+ * @interrupt: the interrupt number
+ * @mask: returned mask for this interrupt (0=enabled, 1=disabled)
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_get_mask(unsigned int interrupt,
+ unsigned int *mask)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = EV_HCALL_TOKEN(EV_INT_GET_MASK);
+ r3 = interrupt;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "=r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ *mask = r4;
+
+ return r3;
+}
+
+/**
+ * ev_int_eoi - signal the end of interrupt processing
+ * @interrupt: the interrupt number
+ *
+ * This function signals the end of processing for the the specified
+ * interrupt, which must be the interrupt currently in service. By
+ * definition, this is also the highest-priority interrupt.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_eoi(unsigned int interrupt)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = EV_HCALL_TOKEN(EV_INT_EOI);
+ r3 = interrupt;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+/**
+ * ev_byte_channel_send - send characters to a byte stream
+ * @handle: byte stream handle
+ * @count: (input) num of chars to send, (output) num chars sent
+ * @buffer: pointer to a 16-byte buffer
+ *
+ * @buffer must be at least 16 bytes long, because all 16 bytes will be
+ * read from memory into registers, even if count < 16.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_byte_channel_send(unsigned int handle,
+ unsigned int *count, const char buffer[EV_BYTE_CHANNEL_MAX_BYTES])
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+ register uintptr_t r7 __asm__("r7");
+ register uintptr_t r8 __asm__("r8");
+ const uint32_t *p = (const uint32_t *) buffer;
+
+ r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_SEND);
+ r3 = handle;
+ r4 = *count;
+ r5 = be32_to_cpu(p[0]);
+ r6 = be32_to_cpu(p[1]);
+ r7 = be32_to_cpu(p[2]);
+ r8 = be32_to_cpu(p[3]);
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3),
+ "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), "+r" (r8)
+ : : EV_HCALL_CLOBBERS6
+ );
+
+ *count = r4;
+
+ return r3;
+}
+
+/**
+ * ev_byte_channel_receive - fetch characters from a byte channel
+ * @handle: byte channel handle
+ * @count: (input) max num of chars to receive, (output) num chars received
+ * @buffer: pointer to a 16-byte buffer
+ *
+ * The size of @buffer must be at least 16 bytes, even if you request fewer
+ * than 16 characters, because we always write 16 bytes to @buffer. This is
+ * for performance reasons.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_byte_channel_receive(unsigned int handle,
+ unsigned int *count, char buffer[EV_BYTE_CHANNEL_MAX_BYTES])
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+ register uintptr_t r7 __asm__("r7");
+ register uintptr_t r8 __asm__("r8");
+ uint32_t *p = (uint32_t *) buffer;
+
+ r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_RECEIVE);
+ r3 = handle;
+ r4 = *count;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "+r" (r4),
+ "=r" (r5), "=r" (r6), "=r" (r7), "=r" (r8)
+ : : EV_HCALL_CLOBBERS6
+ );
+
+ *count = r4;
+ p[0] = cpu_to_be32(r5);
+ p[1] = cpu_to_be32(r6);
+ p[2] = cpu_to_be32(r7);
+ p[3] = cpu_to_be32(r8);
+
+ return r3;
+}
+
+/**
+ * ev_byte_channel_poll - returns the status of the byte channel buffers
+ * @handle: byte channel handle
+ * @rx_count: returned count of bytes in receive queue
+ * @tx_count: returned count of free space in transmit queue
+ *
+ * This function reports the amount of data in the receive queue (i.e. the
+ * number of bytes you can read), and the amount of free space in the transmit
+ * queue (i.e. the number of bytes you can write).
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_byte_channel_poll(unsigned int handle,
+ unsigned int *rx_count, unsigned int *tx_count)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+
+ r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_POLL);
+ r3 = handle;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5)
+ : : EV_HCALL_CLOBBERS3
+ );
+
+ *rx_count = r4;
+ *tx_count = r5;
+
+ return r3;
+}
+
+/**
+ * ev_int_iack - acknowledge an interrupt
+ * @handle: handle to the target interrupt controller
+ * @vector: returned interrupt vector
+ *
+ * If handle is zero, the function returns the next interrupt source
+ * number to be handled irrespective of the hierarchy or cascading
+ * of interrupt controllers. If non-zero, specifies a handle to the
+ * interrupt controller that is the target of the acknowledge.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_iack(unsigned int handle,
+ unsigned int *vector)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = EV_HCALL_TOKEN(EV_INT_IACK);
+ r3 = handle;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "=r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ *vector = r4;
+
+ return r3;
+}
+
+/**
+ * ev_doorbell_send - send a doorbell to another partition
+ * @handle: doorbell send handle
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_doorbell_send(unsigned int handle)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = EV_HCALL_TOKEN(EV_DOORBELL_SEND);
+ r3 = handle;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+/**
+ * ev_idle -- wait for next interrupt on this core
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_idle(void)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = EV_HCALL_TOKEN(EV_IDLE);
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "=r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+#ifdef CONFIG_EPAPR_PARAVIRT
+static inline unsigned long epapr_hypercall(unsigned long *in,
+ unsigned long *out,
+ unsigned long nr)
+{
+ unsigned long register r0 asm("r0");
+ unsigned long register r3 asm("r3") = in[0];
+ unsigned long register r4 asm("r4") = in[1];
+ unsigned long register r5 asm("r5") = in[2];
+ unsigned long register r6 asm("r6") = in[3];
+ unsigned long register r7 asm("r7") = in[4];
+ unsigned long register r8 asm("r8") = in[5];
+ unsigned long register r9 asm("r9") = in[6];
+ unsigned long register r10 asm("r10") = in[7];
+ unsigned long register r11 asm("r11") = nr;
+ unsigned long register r12 asm("r12");
+
+ asm volatile("bl epapr_hypercall_start"
+ : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
+ "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
+ "=r"(r12)
+ : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
+ "r"(r9), "r"(r10), "r"(r11)
+ : "memory", "cc", "xer", "ctr", "lr");
+
+ out[0] = r4;
+ out[1] = r5;
+ out[2] = r6;
+ out[3] = r7;
+ out[4] = r8;
+ out[5] = r9;
+ out[6] = r10;
+ out[7] = r11;
+
+ return r3;
+}
+#else
+static unsigned long epapr_hypercall(unsigned long *in,
+ unsigned long *out,
+ unsigned long nr)
+{
+ return EV_UNIMPLEMENTED;
+}
+#endif
+
+static inline long epapr_hypercall0_1(unsigned int nr, unsigned long *r2)
+{
+ unsigned long in[8];
+ unsigned long out[8];
+ unsigned long r;
+
+ r = epapr_hypercall(in, out, nr);
+ *r2 = out[0];
+
+ return r;
+}
+
+static inline long epapr_hypercall0(unsigned int nr)
+{
+ unsigned long in[8];
+ unsigned long out[8];
+
+ return epapr_hypercall(in, out, nr);
+}
+
+static inline long epapr_hypercall1(unsigned int nr, unsigned long p1)
+{
+ unsigned long in[8];
+ unsigned long out[8];
+
+ in[0] = p1;
+ return epapr_hypercall(in, out, nr);
+}
+
+static inline long epapr_hypercall2(unsigned int nr, unsigned long p1,
+ unsigned long p2)
+{
+ unsigned long in[8];
+ unsigned long out[8];
+
+ in[0] = p1;
+ in[1] = p2;
+ return epapr_hypercall(in, out, nr);
+}
+
+static inline long epapr_hypercall3(unsigned int nr, unsigned long p1,
+ unsigned long p2, unsigned long p3)
+{
+ unsigned long in[8];
+ unsigned long out[8];
+
+ in[0] = p1;
+ in[1] = p2;
+ in[2] = p3;
+ return epapr_hypercall(in, out, nr);
+}
+
+static inline long epapr_hypercall4(unsigned int nr, unsigned long p1,
+ unsigned long p2, unsigned long p3,
+ unsigned long p4)
+{
+ unsigned long in[8];
+ unsigned long out[8];
+
+ in[0] = p1;
+ in[1] = p2;
+ in[2] = p3;
+ in[3] = p4;
+ return epapr_hypercall(in, out, nr);
+}
+#endif /* !__ASSEMBLY__ */
+#endif /* _EPAPR_HCALLS_H */
diff --git a/arch/powerpc/include/asm/errno.h b/arch/powerpc/include/asm/errno.h
deleted file mode 100644
index 8c145fd17d8..00000000000
--- a/arch/powerpc/include/asm/errno.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef _ASM_POWERPC_ERRNO_H
-#define _ASM_POWERPC_ERRNO_H
-
-#include <asm-generic/errno.h>
-
-#undef EDEADLOCK
-#define EDEADLOCK 58 /* File locking deadlock error */
-
-#define _LAST_ERRNO 516
-
-#endif /* _ASM_POWERPC_ERRNO_H */
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
index 6d53f311d94..a8b52b61043 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -37,6 +37,7 @@
* critical data
*/
+#define PACA_EXGDBELL PACA_EXGEN
/* We are out of SPRGs so we save some things in the PACA. The normal
* exception frame is smaller than the CRIT or MC one though
@@ -48,30 +49,33 @@
#define EX_R14 (4 * 8)
#define EX_R15 (5 * 8)
-/* The TLB miss exception uses different slots */
+/*
+ * The TLB miss exception uses different slots.
+ *
+ * The bolted variant uses only the first six fields,
+ * which in combination with pgd and kernel_pgd fits in
+ * one 64-byte cache line.
+ */
#define EX_TLB_R10 ( 0 * 8)
#define EX_TLB_R11 ( 1 * 8)
-#define EX_TLB_R12 ( 2 * 8)
-#define EX_TLB_R13 ( 3 * 8)
-#define EX_TLB_R14 ( 4 * 8)
-#define EX_TLB_R15 ( 5 * 8)
-#define EX_TLB_R16 ( 6 * 8)
-#define EX_TLB_CR ( 7 * 8)
+#define EX_TLB_R14 ( 2 * 8)
+#define EX_TLB_R15 ( 3 * 8)
+#define EX_TLB_R16 ( 4 * 8)
+#define EX_TLB_CR ( 5 * 8)
+#define EX_TLB_R12 ( 6 * 8)
+#define EX_TLB_R13 ( 7 * 8)
#define EX_TLB_DEAR ( 8 * 8) /* Level 0 and 2 only */
#define EX_TLB_ESR ( 9 * 8) /* Level 0 and 2 only */
#define EX_TLB_SRR0 (10 * 8)
#define EX_TLB_SRR1 (11 * 8)
-#define EX_TLB_MMUCR0 (12 * 8) /* Level 0 */
-#define EX_TLB_MAS1 (12 * 8) /* Level 0 */
-#define EX_TLB_MAS2 (13 * 8) /* Level 0 */
#ifdef CONFIG_BOOK3E_MMU_TLB_STATS
-#define EX_TLB_R8 (14 * 8)
-#define EX_TLB_R9 (15 * 8)
-#define EX_TLB_LR (16 * 8)
-#define EX_TLB_SIZE (17 * 8)
+#define EX_TLB_R8 (12 * 8)
+#define EX_TLB_R9 (13 * 8)
+#define EX_TLB_LR (14 * 8)
+#define EX_TLB_SIZE (15 * 8)
#else
-#define EX_TLB_SIZE (14 * 8)
+#define EX_TLB_SIZE (12 * 8)
#endif
#define START_EXCEPTION(label) \
@@ -170,10 +174,10 @@ exc_##label##_book3e:
mtlr r16;
#define TLB_MISS_STATS_D(name) \
addi r9,r13,MMSTAT_DSTATS+name; \
- bl .tlb_stat_inc;
+ bl tlb_stat_inc;
#define TLB_MISS_STATS_I(name) \
addi r9,r13,MMSTAT_ISTATS+name; \
- bl .tlb_stat_inc;
+ bl tlb_stat_inc;
#define TLB_MISS_STATS_X(name) \
ld r8,PACA_EXTLB+EX_TLB_ESR(r13); \
cmpdi cr2,r8,-1; \
@@ -181,19 +185,22 @@ exc_##label##_book3e:
addi r9,r13,MMSTAT_DSTATS+name; \
b 62f; \
61: addi r9,r13,MMSTAT_ISTATS+name; \
-62: bl .tlb_stat_inc;
+62: bl tlb_stat_inc;
#define TLB_MISS_STATS_SAVE_INFO \
- std r14,EX_TLB_ESR(r12); /* save ESR */ \
-
-
+ std r14,EX_TLB_ESR(r12); /* save ESR */
+#define TLB_MISS_STATS_SAVE_INFO_BOLTED \
+ std r14,PACA_EXTLB+EX_TLB_ESR(r13); /* save ESR */
#else
#define TLB_MISS_PROLOG_STATS
#define TLB_MISS_RESTORE_STATS
+#define TLB_MISS_PROLOG_STATS_BOLTED
+#define TLB_MISS_RESTORE_STATS_BOLTED
#define TLB_MISS_STATS_D(name)
#define TLB_MISS_STATS_I(name)
#define TLB_MISS_STATS_X(name)
#define TLB_MISS_STATS_Y(name)
#define TLB_MISS_STATS_SAVE_INFO
+#define TLB_MISS_STATS_SAVE_INFO_BOLTED
#endif
#define SET_IVOR(vector_number, vector_offset) \
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 7778d6f0c87..8f35cd7d59c 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -46,6 +46,41 @@
#define EX_CCR 60
#define EX_R3 64
#define EX_LR 72
+#define EX_CFAR 80
+#define EX_PPR 88 /* SMT thread status register (priority) */
+#define EX_CTR 96
+
+#ifdef CONFIG_RELOCATABLE
+#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
+ ld r12,PACAKBASE(r13); /* get high part of &label */ \
+ mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
+ LOAD_HANDLER(r12,label); \
+ mtctr r12; \
+ mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
+ li r10,MSR_RI; \
+ mtmsrd r10,1; /* Set RI (EE=0) */ \
+ bctr;
+#else
+/* If not relocatable, we can jump directly -- and save messing with LR */
+#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
+ mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
+ mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
+ li r10,MSR_RI; \
+ mtmsrd r10,1; /* Set RI (EE=0) */ \
+ b label;
+#endif
+#define EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
+ __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
+
+/*
+ * As EXCEPTION_PROLOG_PSERIES(), except we've already got relocation on
+ * so no need to rfid. Save lr in case we're CONFIG_RELOCATABLE, in which
+ * case EXCEPTION_RELON_PROLOG_PSERIES_1 will be using lr.
+ */
+#define EXCEPTION_RELON_PROLOG_PSERIES(area, label, h, extra, vec) \
+ EXCEPTION_PROLOG_0(area); \
+ EXCEPTION_PROLOG_1(area, extra, vec); \
+ EXCEPTION_RELON_PROLOG_PSERIES_1(label, h)
/*
* We're short on space and time in the exception prolog, so we can't
@@ -54,32 +89,192 @@
* word.
*/
#define LOAD_HANDLER(reg, label) \
- addi reg,reg,(label)-_stext; /* virt addr of handler ... */
+ /* Handlers must be within 64K of kbase, which must be 64k aligned */ \
+ ori reg,reg,(label)-_stext; /* virt addr of handler ... */
+
+/* Exception register prefixes */
+#define EXC_HV H
+#define EXC_STD
+
+#if defined(CONFIG_RELOCATABLE)
+/*
+ * If we support interrupts with relocation on AND we're a relocatable kernel,
+ * we need to use CTR to get to the 2nd level handler. So, save/restore it
+ * when required.
+ */
+#define SAVE_CTR(reg, area) mfctr reg ; std reg,area+EX_CTR(r13)
+#define GET_CTR(reg, area) ld reg,area+EX_CTR(r13)
+#define RESTORE_CTR(reg, area) ld reg,area+EX_CTR(r13) ; mtctr reg
+#else
+/* ...else CTR is unused and in register. */
+#define SAVE_CTR(reg, area)
+#define GET_CTR(reg, area) mfctr reg
+#define RESTORE_CTR(reg, area)
+#endif
+
+/*
+ * PPR save/restore macros used in exceptions_64s.S
+ * Used for P7 or later processors
+ */
+#define SAVE_PPR(area, ra, rb) \
+BEGIN_FTR_SECTION_NESTED(940) \
+ ld ra,PACACURRENT(r13); \
+ ld rb,area+EX_PPR(r13); /* Read PPR from paca */ \
+ std rb,TASKTHREADPPR(ra); \
+END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940)
+
+#define RESTORE_PPR_PACA(area, ra) \
+BEGIN_FTR_SECTION_NESTED(941) \
+ ld ra,area+EX_PPR(r13); \
+ mtspr SPRN_PPR,ra; \
+END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941)
+
+/*
+ * Increase the priority on systems where PPR save/restore is not
+ * implemented/ supported.
+ */
+#define HMT_MEDIUM_PPR_DISCARD \
+BEGIN_FTR_SECTION_NESTED(942) \
+ HMT_MEDIUM; \
+END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,0,942) /*non P7*/
+
+/*
+ * Get an SPR into a register if the CPU has the given feature
+ */
+#define OPT_GET_SPR(ra, spr, ftr) \
+BEGIN_FTR_SECTION_NESTED(943) \
+ mfspr ra,spr; \
+END_FTR_SECTION_NESTED(ftr,ftr,943)
+
+/*
+ * Set an SPR from a register if the CPU has the given feature
+ */
+#define OPT_SET_SPR(ra, spr, ftr) \
+BEGIN_FTR_SECTION_NESTED(943) \
+ mtspr spr,ra; \
+END_FTR_SECTION_NESTED(ftr,ftr,943)
+
+/*
+ * Save a register to the PACA if the CPU has the given feature
+ */
+#define OPT_SAVE_REG_TO_PACA(offset, ra, ftr) \
+BEGIN_FTR_SECTION_NESTED(943) \
+ std ra,offset(r13); \
+END_FTR_SECTION_NESTED(ftr,ftr,943)
+
+#define EXCEPTION_PROLOG_0(area) \
+ GET_PACA(r13); \
+ std r9,area+EX_R9(r13); /* save r9 */ \
+ OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); \
+ HMT_MEDIUM; \
+ std r10,area+EX_R10(r13); /* save r10 - r12 */ \
+ OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
-#define EXCEPTION_PROLOG_1(area) \
- mfspr r13,SPRN_SPRG_PACA; /* get paca address into r13 */ \
- std r9,area+EX_R9(r13); /* save r9 - r12 */ \
- std r10,area+EX_R10(r13); \
+#define __EXCEPTION_PROLOG_1(area, extra, vec) \
+ OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \
+ OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \
+ SAVE_CTR(r10, area); \
+ mfcr r9; \
+ extra(vec); \
std r11,area+EX_R11(r13); \
std r12,area+EX_R12(r13); \
- mfspr r9,SPRN_SPRG_SCRATCH0; \
- std r9,area+EX_R13(r13); \
- mfcr r9
+ GET_SCRATCH0(r10); \
+ std r10,area+EX_R13(r13)
+#define EXCEPTION_PROLOG_1(area, extra, vec) \
+ __EXCEPTION_PROLOG_1(area, extra, vec)
-#define EXCEPTION_PROLOG_PSERIES_1(label) \
+#define __EXCEPTION_PROLOG_PSERIES_1(label, h) \
ld r12,PACAKBASE(r13); /* get high part of &label */ \
ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
- mfspr r11,SPRN_SRR0; /* save SRR0 */ \
+ mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
LOAD_HANDLER(r12,label) \
- mtspr SPRN_SRR0,r12; \
- mfspr r12,SPRN_SRR1; /* and SRR1 */ \
- mtspr SPRN_SRR1,r10; \
- rfid; \
+ mtspr SPRN_##h##SRR0,r12; \
+ mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
+ mtspr SPRN_##h##SRR1,r10; \
+ h##rfid; \
b . /* prevent speculative execution */
+#define EXCEPTION_PROLOG_PSERIES_1(label, h) \
+ __EXCEPTION_PROLOG_PSERIES_1(label, h)
+
+#define EXCEPTION_PROLOG_PSERIES(area, label, h, extra, vec) \
+ EXCEPTION_PROLOG_0(area); \
+ EXCEPTION_PROLOG_1(area, extra, vec); \
+ EXCEPTION_PROLOG_PSERIES_1(label, h);
+
+#define __KVMTEST(n) \
+ lbz r10,HSTATE_IN_GUEST(r13); \
+ cmpwi r10,0; \
+ bne do_kvm_##n
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+/*
+ * If hv is possible, interrupts come into to the hv version
+ * of the kvmppc_interrupt code, which then jumps to the PR handler,
+ * kvmppc_interrupt_pr, if the guest is a PR guest.
+ */
+#define kvmppc_interrupt kvmppc_interrupt_hv
+#else
+#define kvmppc_interrupt kvmppc_interrupt_pr
+#endif
+
+#define __KVM_HANDLER(area, h, n) \
+do_kvm_##n: \
+ BEGIN_FTR_SECTION_NESTED(947) \
+ ld r10,area+EX_CFAR(r13); \
+ std r10,HSTATE_CFAR(r13); \
+ END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \
+ BEGIN_FTR_SECTION_NESTED(948) \
+ ld r10,area+EX_PPR(r13); \
+ std r10,HSTATE_PPR(r13); \
+ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \
+ ld r10,area+EX_R10(r13); \
+ stw r9,HSTATE_SCRATCH1(r13); \
+ ld r9,area+EX_R9(r13); \
+ std r12,HSTATE_SCRATCH0(r13); \
+ li r12,n; \
+ b kvmppc_interrupt
+
+#define __KVM_HANDLER_SKIP(area, h, n) \
+do_kvm_##n: \
+ cmpwi r10,KVM_GUEST_MODE_SKIP; \
+ ld r10,area+EX_R10(r13); \
+ beq 89f; \
+ stw r9,HSTATE_SCRATCH1(r13); \
+ BEGIN_FTR_SECTION_NESTED(948) \
+ ld r9,area+EX_PPR(r13); \
+ std r9,HSTATE_PPR(r13); \
+ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \
+ ld r9,area+EX_R9(r13); \
+ std r12,HSTATE_SCRATCH0(r13); \
+ li r12,n; \
+ b kvmppc_interrupt; \
+89: mtocrf 0x80,r9; \
+ ld r9,area+EX_R9(r13); \
+ b kvmppc_skip_##h##interrupt
+
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#define KVMTEST(n) __KVMTEST(n)
+#define KVM_HANDLER(area, h, n) __KVM_HANDLER(area, h, n)
+#define KVM_HANDLER_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n)
+
+#else
+#define KVMTEST(n)
+#define KVM_HANDLER(area, h, n)
+#define KVM_HANDLER_SKIP(area, h, n)
+#endif
+
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+#define KVMTEST_PR(n) __KVMTEST(n)
+#define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n)
+#define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n)
+
+#else
+#define KVMTEST_PR(n)
+#define KVM_HANDLER_PR(area, h, n)
+#define KVM_HANDLER_PR_SKIP(area, h, n)
+#endif
-#define EXCEPTION_PROLOG_PSERIES(area, label) \
- EXCEPTION_PROLOG_1(area); \
- EXCEPTION_PROLOG_PSERIES_1(label);
+#define NOTEST(n)
/*
* The common exception prolog is used for all except a few exceptions
@@ -97,11 +292,13 @@
subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
beq- 1f; \
ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
-1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
- bge- cr1,2f; /* abort if it is */ \
- b 3f; \
-2: li r1,(n); /* will be reloaded later */ \
+1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \
+ blt+ cr1,3f; /* abort if it is */ \
+ li r1,(n); /* will be reloaded later */ \
sth r1,PACA_TRAP_SAVE(r13); \
+ std r3,area+EX_R3(r13); \
+ addi r3,r13,area; /* r3 -> where regs are saved*/ \
+ RESTORE_CTR(r1, area); \
b bad_stack; \
3: std r9,_CCR(r1); /* save CR in stackframe */ \
std r11,_NIP(r1); /* save SRR0 in stackframe */ \
@@ -109,10 +306,15 @@
std r10,0(r1); /* make stack chain pointer */ \
std r0,GPR0(r1); /* save r0 in stackframe */ \
std r10,GPR1(r1); /* save r1 in stackframe */ \
+ beq 4f; /* if from kernel mode */ \
ACCOUNT_CPU_USER_ENTRY(r9, r10); \
- std r2,GPR2(r1); /* save r2 in stackframe */ \
- SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
- SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
+ SAVE_PPR(area, r9, r10); \
+4: EXCEPTION_PROLOG_COMMON_2(area) \
+ EXCEPTION_PROLOG_COMMON_3(n) \
+ ACCOUNT_STOLEN_TIME
+
+/* Save original regs values from save area to stack frame. */
+#define EXCEPTION_PROLOG_COMMON_2(area) \
ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
ld r10,area+EX_R10(r13); \
std r9,GPR9(r1); \
@@ -123,11 +325,20 @@
std r9,GPR11(r1); \
std r10,GPR12(r1); \
std r11,GPR13(r1); \
+ BEGIN_FTR_SECTION_NESTED(66); \
+ ld r10,area+EX_CFAR(r13); \
+ std r10,ORIG_GPR3(r1); \
+ END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
+ GET_CTR(r10, area); \
+ std r10,_CTR(r1);
+
+#define EXCEPTION_PROLOG_COMMON_3(n) \
+ std r2,GPR2(r1); /* save r2 in stackframe */ \
+ SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
+ SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
+ mflr r9; /* Get LR, later save to stack */ \
ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
- mflr r9; /* save LR in stackframe */ \
std r9,_LINK(r1); \
- mfctr r10; /* save CTR in stackframe */ \
- std r10,_CTR(r1); \
lbz r10,PACASOFTIRQEN(r13); \
mfspr r11,SPRN_XER; /* save XER in stackframe */ \
std r10,SOFTE(r1); \
@@ -137,131 +348,207 @@
li r10,0; \
ld r11,exception_marker@toc(r2); \
std r10,RESULT(r1); /* clear regs->result */ \
- std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \
- ACCOUNT_STOLEN_TIME
+ std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
/*
* Exception vectors.
*/
-#define STD_EXCEPTION_PSERIES(n, label) \
- . = n; \
+#define STD_EXCEPTION_PSERIES(loc, vec, label) \
+ . = loc; \
.globl label##_pSeries; \
label##_pSeries: \
- HMT_MEDIUM; \
- DO_KVM n; \
- mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
+ HMT_MEDIUM_PPR_DISCARD; \
+ SET_SCRATCH0(r13); /* save r13 */ \
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
+ EXC_STD, KVMTEST_PR, vec)
-#define HSTD_EXCEPTION_PSERIES(n, label) \
- . = n; \
- .globl label##_pSeries; \
-label##_pSeries: \
- HMT_MEDIUM; \
- mtspr SPRN_SPRG_SCRATCH0,r20; /* save r20 */ \
- mfspr r20,SPRN_HSRR0; /* copy HSRR0 to SRR0 */ \
- mtspr SPRN_SRR0,r20; \
- mfspr r20,SPRN_HSRR1; /* copy HSRR0 to SRR0 */ \
- mtspr SPRN_SRR1,r20; \
- mfspr r20,SPRN_SPRG_SCRATCH0; /* restore r20 */ \
- mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
-
-
-#define MASKABLE_EXCEPTION_PSERIES(n, label) \
- . = n; \
- .globl label##_pSeries; \
-label##_pSeries: \
- HMT_MEDIUM; \
- DO_KVM n; \
- mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
- mfspr r13,SPRN_SPRG_PACA; /* get paca address into r13 */ \
- std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \
- std r10,PACA_EXGEN+EX_R10(r13); \
+/* Version of above for when we have to branch out-of-line */
+#define STD_EXCEPTION_PSERIES_OOL(vec, label) \
+ .globl label##_pSeries; \
+label##_pSeries: \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \
+ EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_STD)
+
+#define STD_EXCEPTION_HV(loc, vec, label) \
+ . = loc; \
+ .globl label##_hv; \
+label##_hv: \
+ HMT_MEDIUM_PPR_DISCARD; \
+ SET_SCRATCH0(r13); /* save r13 */ \
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
+ EXC_HV, KVMTEST, vec)
+
+/* Version of above for when we have to branch out-of-line */
+#define STD_EXCEPTION_HV_OOL(vec, label) \
+ .globl label##_hv; \
+label##_hv: \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, vec); \
+ EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV)
+
+#define STD_RELON_EXCEPTION_PSERIES(loc, vec, label) \
+ . = loc; \
+ .globl label##_relon_pSeries; \
+label##_relon_pSeries: \
+ HMT_MEDIUM_PPR_DISCARD; \
+ /* No guest interrupts come through here */ \
+ SET_SCRATCH0(r13); /* save r13 */ \
+ EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
+ EXC_STD, NOTEST, vec)
+
+#define STD_RELON_EXCEPTION_PSERIES_OOL(vec, label) \
+ .globl label##_relon_pSeries; \
+label##_relon_pSeries: \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec); \
+ EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_STD)
+
+#define STD_RELON_EXCEPTION_HV(loc, vec, label) \
+ . = loc; \
+ .globl label##_relon_hv; \
+label##_relon_hv: \
+ HMT_MEDIUM_PPR_DISCARD; \
+ /* No guest interrupts come through here */ \
+ SET_SCRATCH0(r13); /* save r13 */ \
+ EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
+ EXC_HV, NOTEST, vec)
+
+#define STD_RELON_EXCEPTION_HV_OOL(vec, label) \
+ .globl label##_relon_hv; \
+label##_relon_hv: \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec); \
+ EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_HV)
+
+/* This associate vector numbers with bits in paca->irq_happened */
+#define SOFTEN_VALUE_0x500 PACA_IRQ_EE
+#define SOFTEN_VALUE_0x502 PACA_IRQ_EE
+#define SOFTEN_VALUE_0x900 PACA_IRQ_DEC
+#define SOFTEN_VALUE_0x982 PACA_IRQ_DEC
+#define SOFTEN_VALUE_0xa00 PACA_IRQ_DBELL
+#define SOFTEN_VALUE_0xe80 PACA_IRQ_DBELL
+#define SOFTEN_VALUE_0xe82 PACA_IRQ_DBELL
+
+#define __SOFTEN_TEST(h, vec) \
lbz r10,PACASOFTIRQEN(r13); \
- mfcr r9; \
cmpwi r10,0; \
- beq masked_interrupt; \
- mfspr r10,SPRN_SPRG_SCRATCH0; \
- std r10,PACA_EXGEN+EX_R13(r13); \
- std r11,PACA_EXGEN+EX_R11(r13); \
- std r12,PACA_EXGEN+EX_R12(r13); \
- ld r12,PACAKBASE(r13); /* get high part of &label */ \
- ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
- mfspr r11,SPRN_SRR0; /* save SRR0 */ \
- LOAD_HANDLER(r12,label##_common) \
- mtspr SPRN_SRR0,r12; \
- mfspr r12,SPRN_SRR1; /* and SRR1 */ \
- mtspr SPRN_SRR1,r10; \
- rfid; \
- b . /* prevent speculative execution */
+ li r10,SOFTEN_VALUE_##vec; \
+ beq masked_##h##interrupt
+#define _SOFTEN_TEST(h, vec) __SOFTEN_TEST(h, vec)
-#ifdef CONFIG_PPC_ISERIES
-#define DISABLE_INTS \
- li r11,0; \
- stb r11,PACASOFTIRQEN(r13); \
-BEGIN_FW_FTR_SECTION; \
- stb r11,PACAHARDIRQEN(r13); \
-END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \
- TRACE_DISABLE_INTS; \
-BEGIN_FW_FTR_SECTION; \
- mfmsr r10; \
- ori r10,r10,MSR_EE; \
- mtmsrd r10,1; \
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#else
-#define DISABLE_INTS \
- li r11,0; \
- stb r11,PACASOFTIRQEN(r13); \
- stb r11,PACAHARDIRQEN(r13); \
- TRACE_DISABLE_INTS
-#endif /* CONFIG_PPC_ISERIES */
-
-#define ENABLE_INTS \
- ld r12,_MSR(r1); \
- mfmsr r11; \
- rlwimi r11,r12,0,MSR_EE; \
- mtmsrd r11,1
-
-#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
- .align 7; \
- .globl label##_common; \
-label##_common: \
- EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
- DISABLE_INTS; \
- bl .save_nvgprs; \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- bl hdlr; \
- b .ret_from_except
+#define SOFTEN_TEST_PR(vec) \
+ KVMTEST_PR(vec); \
+ _SOFTEN_TEST(EXC_STD, vec)
+
+#define SOFTEN_TEST_HV(vec) \
+ KVMTEST(vec); \
+ _SOFTEN_TEST(EXC_HV, vec)
+
+#define SOFTEN_TEST_HV_201(vec) \
+ KVMTEST(vec); \
+ _SOFTEN_TEST(EXC_STD, vec)
+
+#define SOFTEN_NOTEST_PR(vec) _SOFTEN_TEST(EXC_STD, vec)
+#define SOFTEN_NOTEST_HV(vec) _SOFTEN_TEST(EXC_HV, vec)
+
+#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \
+ SET_SCRATCH0(r13); /* save r13 */ \
+ EXCEPTION_PROLOG_0(PACA_EXGEN); \
+ __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \
+ EXCEPTION_PROLOG_PSERIES_1(label##_common, h);
+
+#define _MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \
+ __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra)
+
+#define MASKABLE_EXCEPTION_PSERIES(loc, vec, label) \
+ . = loc; \
+ .globl label##_pSeries; \
+label##_pSeries: \
+ HMT_MEDIUM_PPR_DISCARD; \
+ _MASKABLE_EXCEPTION_PSERIES(vec, label, \
+ EXC_STD, SOFTEN_TEST_PR)
+
+#define MASKABLE_EXCEPTION_HV(loc, vec, label) \
+ . = loc; \
+ .globl label##_hv; \
+label##_hv: \
+ _MASKABLE_EXCEPTION_PSERIES(vec, label, \
+ EXC_HV, SOFTEN_TEST_HV)
+
+#define MASKABLE_EXCEPTION_HV_OOL(vec, label) \
+ .globl label##_hv; \
+label##_hv: \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec); \
+ EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV);
+
+#define __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra) \
+ HMT_MEDIUM_PPR_DISCARD; \
+ SET_SCRATCH0(r13); /* save r13 */ \
+ EXCEPTION_PROLOG_0(PACA_EXGEN); \
+ __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \
+ EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, h);
+#define _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra) \
+ __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra)
+
+#define MASKABLE_RELON_EXCEPTION_PSERIES(loc, vec, label) \
+ . = loc; \
+ .globl label##_relon_pSeries; \
+label##_relon_pSeries: \
+ _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \
+ EXC_STD, SOFTEN_NOTEST_PR)
+
+#define MASKABLE_RELON_EXCEPTION_HV(loc, vec, label) \
+ . = loc; \
+ .globl label##_relon_hv; \
+label##_relon_hv: \
+ _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \
+ EXC_HV, SOFTEN_NOTEST_HV)
+
+#define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label) \
+ .globl label##_relon_hv; \
+label##_relon_hv: \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_HV, vec); \
+ EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV);
+
+/*
+ * Our exception common code can be passed various "additions"
+ * to specify the behaviour of interrupts, whether to kick the
+ * runlatch, etc...
+ */
+
+/* Exception addition: Hard disable interrupts */
+#define DISABLE_INTS RECONCILE_IRQ_STATE(r10,r11)
+
+#define ADD_NVGPRS \
+ bl save_nvgprs
+
+#define RUNLATCH_ON \
+BEGIN_FTR_SECTION \
+ CURRENT_THREAD_INFO(r3, r1); \
+ ld r4,TI_LOCAL_FLAGS(r3); \
+ andi. r0,r4,_TLF_RUNLATCH; \
+ beql ppc64_runlatch_on_trampoline; \
+END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
+
+#define EXCEPTION_COMMON(trap, label, hdlr, ret, additions) \
+ .align 7; \
+ .globl label##_common; \
+label##_common: \
+ EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
+ additions; \
+ addi r3,r1,STACK_FRAME_OVERHEAD; \
+ bl hdlr; \
+ b ret
+
+#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
+ EXCEPTION_COMMON(trap, label, hdlr, ret_from_except, \
+ ADD_NVGPRS;DISABLE_INTS)
/*
* Like STD_EXCEPTION_COMMON, but for exceptions that can occur
- * in the idle task and therefore need the special idle handling.
+ * in the idle task and therefore need the special idle handling
+ * (finish nap and runlatch)
*/
-#define STD_EXCEPTION_COMMON_IDLE(trap, label, hdlr) \
- .align 7; \
- .globl label##_common; \
-label##_common: \
- EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
- FINISH_NAP; \
- DISABLE_INTS; \
- bl .save_nvgprs; \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- bl hdlr; \
- b .ret_from_except
-
-#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
- .align 7; \
- .globl label##_common; \
-label##_common: \
- EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
- FINISH_NAP; \
- DISABLE_INTS; \
-BEGIN_FTR_SECTION \
- bl .ppc64_runlatch_on; \
-END_FTR_SECTION_IFSET(CPU_FTR_CTRL) \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- bl hdlr; \
- b .ret_from_except_lite
+#define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \
+ EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \
+ FINISH_NAP;DISABLE_INTS;RUNLATCH_ON)
/*
* When the idle code in power4_idle puts the CPU into NAP mode,
@@ -273,7 +560,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CTRL) \
#ifdef CONFIG_PPC_970_NAP
#define FINISH_NAP \
BEGIN_FTR_SECTION \
- clrrdi r11,r1,THREAD_SHIFT; \
+ CURRENT_THREAD_INFO(r11, r1); \
ld r9,TI_LOCAL_FLAGS(r11); \
andi. r10,r9,_TLF_NAPPING; \
bnel power4_fixup_nap; \
diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
new file mode 100644
index 00000000000..8196e9c7d7e
--- /dev/null
+++ b/arch/powerpc/include/asm/exec.h
@@ -0,0 +1,9 @@
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _ASM_POWERPC_EXEC_H
+#define _ASM_POWERPC_EXEC_H
+
+extern unsigned long arch_align_stack(unsigned long sp);
+
+#endif /* _ASM_POWERPC_EXEC_H */
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
new file mode 100644
index 00000000000..a6774560afe
--- /dev/null
+++ b/arch/powerpc/include/asm/fadump.h
@@ -0,0 +1,217 @@
+/*
+ * Firmware Assisted dump header file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright 2011 IBM Corporation
+ * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+ */
+
+#ifndef __PPC64_FA_DUMP_H__
+#define __PPC64_FA_DUMP_H__
+
+#ifdef CONFIG_FA_DUMP
+
+/*
+ * The RMA region will be saved for later dumping when kernel crashes.
+ * RMA is Real Mode Area, the first block of logical memory address owned
+ * by logical partition, containing the storage that may be accessed with
+ * translate off.
+ */
+#define RMA_START 0x0
+#define RMA_END (ppc64_rma_size)
+
+/*
+ * On some Power systems where RMO is 128MB, it still requires minimum of
+ * 256MB for kernel to boot successfully. When kdump infrastructure is
+ * configured to save vmcore over network, we run into OOM issue while
+ * loading modules related to network setup. Hence we need aditional 64M
+ * of memory to avoid OOM issue.
+ */
+#define MIN_BOOT_MEM (((RMA_END < (0x1UL << 28)) ? (0x1UL << 28) : RMA_END) \
+ + (0x1UL << 26))
+
+#define memblock_num_regions(memblock_type) (memblock.memblock_type.cnt)
+
+#ifndef ELF_CORE_EFLAGS
+#define ELF_CORE_EFLAGS 0
+#endif
+
+/* Firmware provided dump sections */
+#define FADUMP_CPU_STATE_DATA 0x0001
+#define FADUMP_HPTE_REGION 0x0002
+#define FADUMP_REAL_MODE_REGION 0x0011
+
+/* Dump request flag */
+#define FADUMP_REQUEST_FLAG 0x00000001
+
+/* FAD commands */
+#define FADUMP_REGISTER 1
+#define FADUMP_UNREGISTER 2
+#define FADUMP_INVALIDATE 3
+
+/* Dump status flag */
+#define FADUMP_ERROR_FLAG 0x2000
+
+#define FADUMP_CPU_ID_MASK ((1UL << 32) - 1)
+
+#define CPU_UNKNOWN (~((u32)0))
+
+/* Utility macros */
+#define SKIP_TO_NEXT_CPU(reg_entry) \
+({ \
+ while (reg_entry->reg_id != REG_ID("CPUEND")) \
+ reg_entry++; \
+ reg_entry++; \
+})
+
+/* Kernel Dump section info */
+struct fadump_section {
+ u32 request_flag;
+ u16 source_data_type;
+ u16 error_flags;
+ u64 source_address;
+ u64 source_len;
+ u64 bytes_dumped;
+ u64 destination_address;
+};
+
+/* ibm,configure-kernel-dump header. */
+struct fadump_section_header {
+ u32 dump_format_version;
+ u16 dump_num_sections;
+ u16 dump_status_flag;
+ u32 offset_first_dump_section;
+
+ /* Fields for disk dump option. */
+ u32 dd_block_size;
+ u64 dd_block_offset;
+ u64 dd_num_blocks;
+ u32 dd_offset_disk_path;
+
+ /* Maximum time allowed to prevent an automatic dump-reboot. */
+ u32 max_time_auto;
+};
+
+/*
+ * Firmware Assisted dump memory structure. This structure is required for
+ * registering future kernel dump with power firmware through rtas call.
+ *
+ * No disk dump option. Hence disk dump path string section is not included.
+ */
+struct fadump_mem_struct {
+ struct fadump_section_header header;
+
+ /* Kernel dump sections */
+ struct fadump_section cpu_state_data;
+ struct fadump_section hpte_region;
+ struct fadump_section rmr_region;
+};
+
+/* Firmware-assisted dump configuration details. */
+struct fw_dump {
+ unsigned long cpu_state_data_size;
+ unsigned long hpte_region_size;
+ unsigned long boot_memory_size;
+ unsigned long reserve_dump_area_start;
+ unsigned long reserve_dump_area_size;
+ /* cmd line option during boot */
+ unsigned long reserve_bootvar;
+
+ unsigned long fadumphdr_addr;
+ unsigned long cpu_notes_buf;
+ unsigned long cpu_notes_buf_size;
+
+ int ibm_configure_kernel_dump;
+
+ unsigned long fadump_enabled:1;
+ unsigned long fadump_supported:1;
+ unsigned long dump_active:1;
+ unsigned long dump_registered:1;
+};
+
+/*
+ * Copy the ascii values for first 8 characters from a string into u64
+ * variable at their respective indexes.
+ * e.g.
+ * The string "FADMPINF" will be converted into 0x4641444d50494e46
+ */
+static inline u64 str_to_u64(const char *str)
+{
+ u64 val = 0;
+ int i;
+
+ for (i = 0; i < sizeof(val); i++)
+ val = (*str) ? (val << 8) | *str++ : val << 8;
+ return val;
+}
+#define STR_TO_HEX(x) str_to_u64(x)
+#define REG_ID(x) str_to_u64(x)
+
+#define FADUMP_CRASH_INFO_MAGIC STR_TO_HEX("FADMPINF")
+#define REGSAVE_AREA_MAGIC STR_TO_HEX("REGSAVE")
+
+/* The firmware-assisted dump format.
+ *
+ * The register save area is an area in the partition's memory used to preserve
+ * the register contents (CPU state data) for the active CPUs during a firmware
+ * assisted dump. The dump format contains register save area header followed
+ * by register entries. Each list of registers for a CPU starts with
+ * "CPUSTRT" and ends with "CPUEND".
+ */
+
+/* Register save area header. */
+struct fadump_reg_save_area_header {
+ u64 magic_number;
+ u32 version;
+ u32 num_cpu_offset;
+};
+
+/* Register entry. */
+struct fadump_reg_entry {
+ u64 reg_id;
+ u64 reg_value;
+};
+
+/* fadump crash info structure */
+struct fadump_crash_info_header {
+ u64 magic_number;
+ u64 elfcorehdr_addr;
+ u32 crashing_cpu;
+ struct pt_regs regs;
+ struct cpumask cpu_online_mask;
+};
+
+/* Crash memory ranges */
+#define INIT_CRASHMEM_RANGES (INIT_MEMBLOCK_REGIONS + 2)
+
+struct fad_crash_memory_ranges {
+ unsigned long long base;
+ unsigned long long size;
+};
+
+extern int early_init_dt_scan_fw_dump(unsigned long node,
+ const char *uname, int depth, void *data);
+extern int fadump_reserve_mem(void);
+extern int setup_fadump(void);
+extern int is_fadump_active(void);
+extern void crash_fadump(struct pt_regs *, const char *);
+extern void fadump_cleanup(void);
+
+#else /* CONFIG_FA_DUMP */
+static inline int is_fadump_active(void) { return 0; }
+static inline void crash_fadump(struct pt_regs *regs, const char *str) { }
+#endif
+#endif
diff --git a/arch/powerpc/include/asm/fcntl.h b/arch/powerpc/include/asm/fcntl.h
deleted file mode 100644
index ce5c4516d40..00000000000
--- a/arch/powerpc/include/asm/fcntl.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef _ASM_FCNTL_H
-#define _ASM_FCNTL_H
-
-#define O_DIRECTORY 040000 /* must be a directory */
-#define O_NOFOLLOW 0100000 /* don't follow links */
-#define O_LARGEFILE 0200000
-#define O_DIRECT 0400000 /* direct disk access hint */
-
-#include <asm-generic/fcntl.h>
-
-#endif /* _ASM_FCNTL_H */
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index 96a7d067fbb..9a67a38bf7b 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -37,18 +37,21 @@ label##2: \
.align 2; \
label##3:
-#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \
-label##4: \
- .popsection; \
- .pushsection sect,"a"; \
- .align 3; \
-label##5: \
- FTR_ENTRY_LONG msk; \
- FTR_ENTRY_LONG val; \
- FTR_ENTRY_OFFSET label##1b-label##5b; \
- FTR_ENTRY_OFFSET label##2b-label##5b; \
- FTR_ENTRY_OFFSET label##3b-label##5b; \
- FTR_ENTRY_OFFSET label##4b-label##5b; \
+#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \
+label##4: \
+ .popsection; \
+ .pushsection sect,"a"; \
+ .align 3; \
+label##5: \
+ FTR_ENTRY_LONG msk; \
+ FTR_ENTRY_LONG val; \
+ FTR_ENTRY_OFFSET label##1b-label##5b; \
+ FTR_ENTRY_OFFSET label##2b-label##5b; \
+ FTR_ENTRY_OFFSET label##3b-label##5b; \
+ FTR_ENTRY_OFFSET label##4b-label##5b; \
+ .ifgt (label##4b- label##3b)-(label##2b- label##1b); \
+ .error "Feature section else case larger than body"; \
+ .endif; \
.popsection;
@@ -143,6 +146,19 @@ label##5: \
#ifndef __ASSEMBLY__
+#define ASM_FTR_IF(section_if, section_else, msk, val) \
+ stringify_in_c(BEGIN_FTR_SECTION) \
+ section_if "; " \
+ stringify_in_c(FTR_SECTION_ELSE) \
+ section_else "; " \
+ stringify_in_c(ALT_FTR_SECTION_END((msk), (val)))
+
+#define ASM_FTR_IFSET(section_if, section_else, msk) \
+ ASM_FTR_IF(section_if, section_else, (msk), (msk))
+
+#define ASM_FTR_IFCLR(section_if, section_else, msk) \
+ ASM_FTR_IF(section_if, section_else, (msk), 0)
+
#define ASM_MMU_FTR_IF(section_if, section_else, msk, val) \
stringify_in_c(BEGIN_MMU_FTR_SECTION) \
section_if "; " \
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 20778a405d7..681bc0314b6 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -18,7 +18,6 @@
#include <asm/feature-fixups.h>
/* firmware feature bitmask values */
-#define FIRMWARE_MAX_FEATURES 63
#define FW_FEATURE_PFT ASM_CONST(0x0000000000000001)
#define FW_FEATURE_TCE ASM_CONST(0x0000000000000002)
@@ -41,11 +40,19 @@
#define FW_FEATURE_XDABR ASM_CONST(0x0000000000040000)
#define FW_FEATURE_MULTITCE ASM_CONST(0x0000000000080000)
#define FW_FEATURE_SPLPAR ASM_CONST(0x0000000000100000)
-#define FW_FEATURE_ISERIES ASM_CONST(0x0000000000200000)
#define FW_FEATURE_LPAR ASM_CONST(0x0000000000400000)
#define FW_FEATURE_PS3_LV1 ASM_CONST(0x0000000000800000)
#define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000)
#define FW_FEATURE_CMO ASM_CONST(0x0000000002000000)
+#define FW_FEATURE_VPHN ASM_CONST(0x0000000004000000)
+#define FW_FEATURE_XCMO ASM_CONST(0x0000000008000000)
+#define FW_FEATURE_OPAL ASM_CONST(0x0000000010000000)
+#define FW_FEATURE_OPALv2 ASM_CONST(0x0000000020000000)
+#define FW_FEATURE_SET_MODE ASM_CONST(0x0000000040000000)
+#define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000)
+#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
+#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000)
+#define FW_FEATURE_OPALv3 ASM_CONST(0x0000000400000000)
#ifndef __ASSEMBLY__
@@ -59,10 +66,13 @@ enum {
FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR |
FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
- FW_FEATURE_CMO,
+ FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO |
+ FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
+ FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN,
FW_FEATURE_PSERIES_ALWAYS = 0,
- FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
- FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
+ FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2 |
+ FW_FEATURE_OPALv3,
+ FW_FEATURE_POWERNV_ALWAYS = 0,
FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
FW_FEATURE_CELLEB_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_BEAT,
@@ -73,8 +83,8 @@ enum {
#ifdef CONFIG_PPC_PSERIES
FW_FEATURE_PSERIES_POSSIBLE |
#endif
-#ifdef CONFIG_PPC_ISERIES
- FW_FEATURE_ISERIES_POSSIBLE |
+#ifdef CONFIG_PPC_POWERNV
+ FW_FEATURE_POWERNV_POSSIBLE |
#endif
#ifdef CONFIG_PPC_PS3
FW_FEATURE_PS3_POSSIBLE |
@@ -90,8 +100,8 @@ enum {
#ifdef CONFIG_PPC_PSERIES
FW_FEATURE_PSERIES_ALWAYS &
#endif
-#ifdef CONFIG_PPC_ISERIES
- FW_FEATURE_ISERIES_ALWAYS &
+#ifdef CONFIG_PPC_POWERNV
+ FW_FEATURE_POWERNV_ALWAYS &
#endif
#ifdef CONFIG_PPC_PS3
FW_FEATURE_PS3_ALWAYS &
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index 5c2c0233175..90f604bbcd1 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -58,52 +58,12 @@ enum fixed_addresses {
extern void __set_fixmap (enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags);
-#define set_fixmap(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL)
-/*
- * Some hardware wants to get fixmapped without caching.
- */
-#define set_fixmap_nocache(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL_NCG)
-
-#define clear_fixmap(idx) \
- __set_fixmap(idx, 0, __pgprot(0))
-
#define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
-#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
-#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
-
-extern void __this_fixmap_does_not_exist(void);
-
-/*
- * 'index to address' translation. If anyone tries to use the idx
- * directly without tranlation, we catch the bug with a NULL-deference
- * kernel oops. Illegal ranges of incoming indices are caught too.
- */
-static __always_inline unsigned long fix_to_virt(const unsigned int idx)
-{
- /*
- * this branch gets completely eliminated after inlining,
- * except when someone tries to use fixaddr indices in an
- * illegal way. (such as mixing up address types or using
- * out-of-range indices).
- *
- * If it doesn't get removed, the linker will complain
- * loudly with a reasonably clear error message..
- */
- if (idx >= __end_of_fixed_addresses)
- __this_fixmap_does_not_exist();
-
- return __fix_to_virt(idx);
-}
+#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NCG
-static inline unsigned long virt_to_fix(const unsigned long vaddr)
-{
- BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
- return __virt_to_fix(vaddr);
-}
+#include <asm-generic/fixmap.h>
#endif /* !__ASSEMBLY__ */
#endif
diff --git a/arch/powerpc/include/asm/floppy.h b/arch/powerpc/include/asm/floppy.h
index 24bd34c57e9..936a904ae78 100644
--- a/arch/powerpc/include/asm/floppy.h
+++ b/arch/powerpc/include/asm/floppy.h
@@ -108,10 +108,10 @@ static int fd_request_irq(void)
{
if (can_use_virtual_dma)
return request_irq(FLOPPY_IRQ, floppy_hardint,
- IRQF_DISABLED, "floppy", NULL);
+ 0, "floppy", NULL);
else
return request_irq(FLOPPY_IRQ, floppy_interrupt,
- IRQF_DISABLED, "floppy", NULL);
+ 0, "floppy", NULL);
}
static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
diff --git a/arch/powerpc/include/asm/fsl_gtm.h b/arch/powerpc/include/asm/fsl_gtm.h
index 8e8c9b5032d..3b05808f9ca 100644
--- a/arch/powerpc/include/asm/fsl_gtm.h
+++ b/arch/powerpc/include/asm/fsl_gtm.h
@@ -1,7 +1,7 @@
/*
* Freescale General-purpose Timers Module
*
- * Copyright (c) Freescale Semicondutor, Inc. 2006.
+ * Copyright 2006 Freescale Semiconductor, Inc.
* Shlomi Gridish <gridish@freescale.com>
* Jerry Huang <Chang-Ming.Huang@freescale.com>
* Copyright (c) MontaVista Software, Inc. 2008.
diff --git a/arch/powerpc/include/asm/fsl_guts.h b/arch/powerpc/include/asm/fsl_guts.h
index bebd12463ec..77ced0b3d81 100644
--- a/arch/powerpc/include/asm/fsl_guts.h
+++ b/arch/powerpc/include/asm/fsl_guts.h
@@ -4,7 +4,7 @@
* Authors: Jeff Brown
* Timur Tabi <timur@freescale.com>
*
- * Copyright 2004,2007 Freescale Semiconductor, Inc
+ * Copyright 2004,2007,2012 Freescale Semiconductor, Inc
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -16,15 +16,6 @@
#define __ASM_POWERPC_FSL_GUTS_H__
#ifdef __KERNEL__
-/*
- * These #ifdefs are safe because it's not possible to build a kernel that
- * runs on e500 and e600 cores.
- */
-
-#if !defined(CONFIG_PPC_85xx) && !defined(CONFIG_PPC_86xx)
-#error Only 85xx and 86xx SOCs are supported
-#endif
-
/**
* Global Utility Registers.
*
@@ -36,11 +27,7 @@
* different names. In these cases, one name is chosen to avoid extraneous
* #ifdefs.
*/
-#ifdef CONFIG_PPC_85xx
-struct ccsr_guts_85xx {
-#else
-struct ccsr_guts_86xx {
-#endif
+struct ccsr_guts {
__be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */
__be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */
__be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */
@@ -61,6 +48,8 @@ struct ccsr_guts_86xx {
__be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */
u8 res06c[0x70 - 0x6c];
__be32 devdisr; /* 0x.0070 - Device Disable Control */
+#define CCSR_GUTS_DEVDISR_TB1 0x00001000
+#define CCSR_GUTS_DEVDISR_TB0 0x00004000
__be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */
u8 res078[0x7c - 0x78];
__be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */
@@ -77,15 +66,14 @@ struct ccsr_guts_86xx {
u8 res0a8[0xb0 - 0xa8];
__be32 rstcr; /* 0x.00b0 - Reset Control Register */
u8 res0b4[0xc0 - 0xb4];
-#ifdef CONFIG_PPC_85xx
- __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register */
-#else
- __be32 elbcvselcr; /* 0x.00c0 - eLBC Voltage Select Ctrl Reg */
-#endif
+ __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register
+ Called 'elbcvselcr' on 86xx SOCs */
u8 res0c4[0x224 - 0xc4];
__be32 iodelay1; /* 0x.0224 - IO delay control register 1 */
__be32 iodelay2; /* 0x.0228 - IO delay control register 2 */
- u8 res22c[0x800 - 0x22c];
+ u8 res22c[0x604 - 0x22c];
+ __be32 pamubypenr; /* 0x.604 - PAMU bypass enable register */
+ u8 res608[0x800 - 0x608];
__be32 clkdvdr; /* 0x.0800 - Clock Divide Register */
u8 res804[0x900 - 0x804];
__be32 ircr; /* 0x.0900 - Infrared Control Register */
@@ -114,6 +102,10 @@ struct ccsr_guts_86xx {
__be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */
} __attribute__ ((packed));
+
+/* Alternate function signal multiplex control */
+#define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x))
+
#ifdef CONFIG_PPC_86xx
#define CCSR_GUTS_DMACR_DEV_SSI 0 /* DMA controller/channel set to SSI */
@@ -132,7 +124,7 @@ struct ccsr_guts_86xx {
* ch: The channel on the DMA controller (0, 1, 2, or 3)
* device: The device to set as the source (CCSR_GUTS_DMACR_DEV_xx)
*/
-static inline void guts_set_dmacr(struct ccsr_guts_86xx __iomem *guts,
+static inline void guts_set_dmacr(struct ccsr_guts __iomem *guts,
unsigned int co, unsigned int ch, unsigned int device)
{
unsigned int shift = 16 + (8 * (1 - co) + 2 * (3 - ch));
@@ -168,7 +160,7 @@ static inline void guts_set_dmacr(struct ccsr_guts_86xx __iomem *guts,
* ch: The channel on the DMA controller (0, 1, 2, or 3)
* value: the new value for the bit (0 or 1)
*/
-static inline void guts_set_pmuxcr_dma(struct ccsr_guts_86xx __iomem *guts,
+static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts,
unsigned int co, unsigned int ch, unsigned int value)
{
if ((ch == 0) || (ch == 3)) {
diff --git a/arch/powerpc/include/asm/fsl_hcalls.h b/arch/powerpc/include/asm/fsl_hcalls.h
new file mode 100644
index 00000000000..3abb58394da
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_hcalls.h
@@ -0,0 +1,655 @@
+/*
+ * Freescale hypervisor call interface
+ *
+ * Copyright 2008-2010 Freescale Semiconductor, Inc.
+ *
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+ * This file is provided under a dual BSD/GPL license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _FSL_HCALLS_H
+#define _FSL_HCALLS_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <asm/byteorder.h>
+#include <asm/epapr_hcalls.h>
+
+#define FH_API_VERSION 1
+
+#define FH_ERR_GET_INFO 1
+#define FH_PARTITION_GET_DTPROP 2
+#define FH_PARTITION_SET_DTPROP 3
+#define FH_PARTITION_RESTART 4
+#define FH_PARTITION_GET_STATUS 5
+#define FH_PARTITION_START 6
+#define FH_PARTITION_STOP 7
+#define FH_PARTITION_MEMCPY 8
+#define FH_DMA_ENABLE 9
+#define FH_DMA_DISABLE 10
+#define FH_SEND_NMI 11
+#define FH_VMPIC_GET_MSIR 12
+#define FH_SYSTEM_RESET 13
+#define FH_GET_CORE_STATE 14
+#define FH_ENTER_NAP 15
+#define FH_EXIT_NAP 16
+#define FH_CLAIM_DEVICE 17
+#define FH_PARTITION_STOP_DMA 18
+
+/* vendor ID: Freescale Semiconductor */
+#define FH_HCALL_TOKEN(num) _EV_HCALL_TOKEN(EV_FSL_VENDOR_ID, num)
+
+/*
+ * We use "uintptr_t" to define a register because it's guaranteed to be a
+ * 32-bit integer on a 32-bit platform, and a 64-bit integer on a 64-bit
+ * platform.
+ *
+ * All registers are either input/output or output only. Registers that are
+ * initialized before making the hypercall are input/output. All
+ * input/output registers are represented with "+r". Output-only registers
+ * are represented with "=r". Do not specify any unused registers. The
+ * clobber list will tell the compiler that the hypercall modifies those
+ * registers, which is good enough.
+ */
+
+/**
+ * fh_send_nmi - send NMI to virtual cpu(s).
+ * @vcpu_mask: send NMI to virtual cpu(s) specified by this mask.
+ *
+ * Returns 0 for success, or EINVAL for invalid vcpu_mask.
+ */
+static inline unsigned int fh_send_nmi(unsigned int vcpu_mask)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_SEND_NMI);
+ r3 = vcpu_mask;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+/* Arbitrary limits to avoid excessive memory allocation in hypervisor */
+#define FH_DTPROP_MAX_PATHLEN 4096
+#define FH_DTPROP_MAX_PROPLEN 32768
+
+/**
+ * fh_partiton_get_dtprop - get a property from a guest device tree.
+ * @handle: handle of partition whose device tree is to be accessed
+ * @dtpath_addr: physical address of device tree path to access
+ * @propname_addr: physical address of name of property
+ * @propvalue_addr: physical address of property value buffer
+ * @propvalue_len: length of buffer on entry, length of property on return
+ *
+ * Returns zero on success, non-zero on error.
+ */
+static inline unsigned int fh_partition_get_dtprop(int handle,
+ uint64_t dtpath_addr,
+ uint64_t propname_addr,
+ uint64_t propvalue_addr,
+ uint32_t *propvalue_len)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+ register uintptr_t r7 __asm__("r7");
+ register uintptr_t r8 __asm__("r8");
+ register uintptr_t r9 __asm__("r9");
+ register uintptr_t r10 __asm__("r10");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_GET_DTPROP);
+ r3 = handle;
+
+#ifdef CONFIG_PHYS_64BIT
+ r4 = dtpath_addr >> 32;
+ r6 = propname_addr >> 32;
+ r8 = propvalue_addr >> 32;
+#else
+ r4 = 0;
+ r6 = 0;
+ r8 = 0;
+#endif
+ r5 = (uint32_t)dtpath_addr;
+ r7 = (uint32_t)propname_addr;
+ r9 = (uint32_t)propvalue_addr;
+ r10 = *propvalue_len;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11),
+ "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7),
+ "+r" (r8), "+r" (r9), "+r" (r10)
+ : : EV_HCALL_CLOBBERS8
+ );
+
+ *propvalue_len = r4;
+ return r3;
+}
+
+/**
+ * Set a property in a guest device tree.
+ * @handle: handle of partition whose device tree is to be accessed
+ * @dtpath_addr: physical address of device tree path to access
+ * @propname_addr: physical address of name of property
+ * @propvalue_addr: physical address of property value
+ * @propvalue_len: length of property
+ *
+ * Returns zero on success, non-zero on error.
+ */
+static inline unsigned int fh_partition_set_dtprop(int handle,
+ uint64_t dtpath_addr,
+ uint64_t propname_addr,
+ uint64_t propvalue_addr,
+ uint32_t propvalue_len)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r6 __asm__("r6");
+ register uintptr_t r8 __asm__("r8");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r7 __asm__("r7");
+ register uintptr_t r9 __asm__("r9");
+ register uintptr_t r10 __asm__("r10");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_SET_DTPROP);
+ r3 = handle;
+
+#ifdef CONFIG_PHYS_64BIT
+ r4 = dtpath_addr >> 32;
+ r6 = propname_addr >> 32;
+ r8 = propvalue_addr >> 32;
+#else
+ r4 = 0;
+ r6 = 0;
+ r8 = 0;
+#endif
+ r5 = (uint32_t)dtpath_addr;
+ r7 = (uint32_t)propname_addr;
+ r9 = (uint32_t)propvalue_addr;
+ r10 = propvalue_len;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11),
+ "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7),
+ "+r" (r8), "+r" (r9), "+r" (r10)
+ : : EV_HCALL_CLOBBERS8
+ );
+
+ return r3;
+}
+
+/**
+ * fh_partition_restart - reboot the current partition
+ * @partition: partition ID
+ *
+ * Returns an error code if reboot failed. Does not return if it succeeds.
+ */
+static inline unsigned int fh_partition_restart(unsigned int partition)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_RESTART);
+ r3 = partition;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+#define FH_PARTITION_STOPPED 0
+#define FH_PARTITION_RUNNING 1
+#define FH_PARTITION_STARTING 2
+#define FH_PARTITION_STOPPING 3
+#define FH_PARTITION_PAUSING 4
+#define FH_PARTITION_PAUSED 5
+#define FH_PARTITION_RESUMING 6
+
+/**
+ * fh_partition_get_status - gets the status of a partition
+ * @partition: partition ID
+ * @status: returned status code
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_get_status(unsigned int partition,
+ unsigned int *status)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_GET_STATUS);
+ r3 = partition;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "=r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ *status = r4;
+
+ return r3;
+}
+
+/**
+ * fh_partition_start - boots and starts execution of the specified partition
+ * @partition: partition ID
+ * @entry_point: guest physical address to start execution
+ *
+ * The hypervisor creates a 1-to-1 virtual/physical IMA mapping, so at boot
+ * time, guest physical address are the same as guest virtual addresses.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_start(unsigned int partition,
+ uint32_t entry_point, int load)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_START);
+ r3 = partition;
+ r4 = entry_point;
+ r5 = load;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5)
+ : : EV_HCALL_CLOBBERS3
+ );
+
+ return r3;
+}
+
+/**
+ * fh_partition_stop - stops another partition
+ * @partition: partition ID
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_stop(unsigned int partition)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_STOP);
+ r3 = partition;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+/**
+ * struct fh_sg_list: definition of the fh_partition_memcpy S/G list
+ * @source: guest physical address to copy from
+ * @target: guest physical address to copy to
+ * @size: number of bytes to copy
+ * @reserved: reserved, must be zero
+ *
+ * The scatter/gather list for fh_partition_memcpy() is an array of these
+ * structures. The array must be guest physically contiguous.
+ *
+ * This structure must be aligned on 32-byte boundary, so that no single
+ * strucuture can span two pages.
+ */
+struct fh_sg_list {
+ uint64_t source; /**< guest physical address to copy from */
+ uint64_t target; /**< guest physical address to copy to */
+ uint64_t size; /**< number of bytes to copy */
+ uint64_t reserved; /**< reserved, must be zero */
+} __attribute__ ((aligned(32)));
+
+/**
+ * fh_partition_memcpy - copies data from one guest to another
+ * @source: the ID of the partition to copy from
+ * @target: the ID of the partition to copy to
+ * @sg_list: guest physical address of an array of &fh_sg_list structures
+ * @count: the number of entries in @sg_list
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_memcpy(unsigned int source,
+ unsigned int target, phys_addr_t sg_list, unsigned int count)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+ register uintptr_t r7 __asm__("r7");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_MEMCPY);
+ r3 = source;
+ r4 = target;
+ r5 = (uint32_t) sg_list;
+
+#ifdef CONFIG_PHYS_64BIT
+ r6 = sg_list >> 32;
+#else
+ r6 = 0;
+#endif
+ r7 = count;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11),
+ "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7)
+ : : EV_HCALL_CLOBBERS5
+ );
+
+ return r3;
+}
+
+/**
+ * fh_dma_enable - enable DMA for the specified device
+ * @liodn: the LIODN of the I/O device for which to enable DMA
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_dma_enable(unsigned int liodn)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_DMA_ENABLE);
+ r3 = liodn;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+/**
+ * fh_dma_disable - disable DMA for the specified device
+ * @liodn: the LIODN of the I/O device for which to disable DMA
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_dma_disable(unsigned int liodn)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_DMA_DISABLE);
+ r3 = liodn;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+
+/**
+ * fh_vmpic_get_msir - returns the MPIC-MSI register value
+ * @interrupt: the interrupt number
+ * @msir_val: returned MPIC-MSI register value
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_vmpic_get_msir(unsigned int interrupt,
+ unsigned int *msir_val)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = FH_HCALL_TOKEN(FH_VMPIC_GET_MSIR);
+ r3 = interrupt;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "=r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ *msir_val = r4;
+
+ return r3;
+}
+
+/**
+ * fh_system_reset - reset the system
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_system_reset(void)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_SYSTEM_RESET);
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "=r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+
+/**
+ * fh_err_get_info - get platform error information
+ * @queue id:
+ * 0 for guest error event queue
+ * 1 for global error event queue
+ *
+ * @pointer to store the platform error data:
+ * platform error data is returned in registers r4 - r11
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_err_get_info(int queue, uint32_t *bufsize,
+ uint32_t addr_hi, uint32_t addr_lo, int peek)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+ register uintptr_t r7 __asm__("r7");
+
+ r11 = FH_HCALL_TOKEN(FH_ERR_GET_INFO);
+ r3 = queue;
+ r4 = *bufsize;
+ r5 = addr_hi;
+ r6 = addr_lo;
+ r7 = peek;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6),
+ "+r" (r7)
+ : : EV_HCALL_CLOBBERS5
+ );
+
+ *bufsize = r4;
+
+ return r3;
+}
+
+
+#define FH_VCPU_RUN 0
+#define FH_VCPU_IDLE 1
+#define FH_VCPU_NAP 2
+
+/**
+ * fh_get_core_state - get the state of a vcpu
+ *
+ * @handle: handle of partition containing the vcpu
+ * @vcpu: vcpu number within the partition
+ * @state:the current state of the vcpu, see FH_VCPU_*
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_get_core_state(unsigned int handle,
+ unsigned int vcpu, unsigned int *state)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = FH_HCALL_TOKEN(FH_GET_CORE_STATE);
+ r3 = handle;
+ r4 = vcpu;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "+r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ *state = r4;
+ return r3;
+}
+
+/**
+ * fh_enter_nap - enter nap on a vcpu
+ *
+ * Note that though the API supports entering nap on a vcpu other
+ * than the caller, this may not be implmented and may return EINVAL.
+ *
+ * @handle: handle of partition containing the vcpu
+ * @vcpu: vcpu number within the partition
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_enter_nap(unsigned int handle, unsigned int vcpu)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = FH_HCALL_TOKEN(FH_ENTER_NAP);
+ r3 = handle;
+ r4 = vcpu;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "+r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ return r3;
+}
+
+/**
+ * fh_exit_nap - exit nap on a vcpu
+ * @handle: handle of partition containing the vcpu
+ * @vcpu: vcpu number within the partition
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_exit_nap(unsigned int handle, unsigned int vcpu)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = FH_HCALL_TOKEN(FH_EXIT_NAP);
+ r3 = handle;
+ r4 = vcpu;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3), "+r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ return r3;
+}
+/**
+ * fh_claim_device - claim a "claimable" shared device
+ * @handle: fsl,hv-device-handle of node to claim
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_claim_device(unsigned int handle)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_CLAIM_DEVICE);
+ r3 = handle;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+/**
+ * Run deferred DMA disabling on a partition's private devices
+ *
+ * This applies to devices which a partition owns either privately,
+ * or which are claimable and still actively owned by that partition,
+ * and which do not have the no-dma-disable property.
+ *
+ * @handle: partition (must be stopped) whose DMA is to be disabled
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_stop_dma(unsigned int handle)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_STOP_DMA);
+ r3 = handle;
+
+ asm volatile("bl epapr_hypercall_start"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+#endif
diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h
index 5c1bf346674..067fb0dca54 100644
--- a/arch/powerpc/include/asm/fsl_lbc.h
+++ b/arch/powerpc/include/asm/fsl_lbc.h
@@ -157,6 +157,8 @@ struct fsl_lbc_regs {
#define LBCR_EPAR_SHIFT 16
#define LBCR_BMT 0x0000FF00
#define LBCR_BMT_SHIFT 8
+#define LBCR_BMTPS 0x0000000F
+#define LBCR_BMTPS_SHIFT 0
#define LBCR_INIT 0x00040000
__be32 lcrr; /**< Clock Ratio Register */
#define LCRR_DBYP 0x80000000
@@ -236,8 +238,6 @@ struct fsl_lbc_regs {
#define FPAR_LP_CI_SHIFT 0
__be32 fbcr; /**< Flash Byte Count Register */
#define FBCR_BC 0x00000FFF
- u8 res11[0x8];
- u8 res8[0xF00];
};
/*
@@ -285,13 +285,18 @@ struct fsl_lbc_ctrl {
/* device info */
struct device *dev;
struct fsl_lbc_regs __iomem *regs;
- int irq;
+ int irq[2];
wait_queue_head_t irq_wait;
spinlock_t lock;
void *nand;
/* status read from LTESR by irq handler */
unsigned int irq_status;
+
+#ifdef CONFIG_SUSPEND
+ /* save regs when system go to deep-sleep */
+ struct fsl_lbc_regs *saved_regs;
+#endif
};
extern int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base,
diff --git a/arch/powerpc/include/asm/fsl_pamu_stash.h b/arch/powerpc/include/asm/fsl_pamu_stash.h
new file mode 100644
index 00000000000..caa1b21c25c
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_pamu_stash.h
@@ -0,0 +1,39 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef __FSL_PAMU_STASH_H
+#define __FSL_PAMU_STASH_H
+
+/* cache stash targets */
+enum pamu_stash_target {
+ PAMU_ATTR_CACHE_L1 = 1,
+ PAMU_ATTR_CACHE_L2,
+ PAMU_ATTR_CACHE_L3,
+};
+
+/*
+ * This attribute allows configuring stashig specific parameters
+ * in the PAMU hardware.
+ */
+
+struct pamu_stash_attribute {
+ u32 cpu; /* cpu number */
+ u32 cache; /* cache to stash to: L1,L2,L3 */
+};
+
+#endif /* __FSL_PAMU_STASH_H */
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index dde1296b8b4..e3661872fbe 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -60,4 +60,20 @@ struct dyn_arch_ftrace {
#endif
+#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) && !defined(__ASSEMBLY__)
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
+#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
+{
+ /*
+ * Compare the symbol name with the system call name. Skip the .sys or .SyS
+ * prefix from the symbol name and the sys prefix from the system call name and
+ * just match the rest. This is only needed on ppc64 since symbol names on
+ * 32bit do not start with a period so the generic function will work.
+ */
+ return !strcmp(sym + 4, name + 3);
+}
+#endif
+#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 && !__ASSEMBLY__ */
+
#endif /* _ASM_POWERPC_FTRACE */
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index 7c589ef81fb..2a9cf845473 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -11,12 +11,13 @@
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
__asm__ __volatile ( \
- PPC_RELEASE_BARRIER \
+ PPC_ATOMIC_ENTRY_BARRIER \
"1: lwarx %0,0,%2\n" \
insn \
PPC405_ERR77(0, %2) \
"2: stwcx. %1,0,%2\n" \
"bne- 1b\n" \
+ PPC_ATOMIC_EXIT_BARRIER \
"li %1,0\n" \
"3: .section .fixup,\"ax\"\n" \
"4: li %1,%3\n" \
@@ -30,7 +31,7 @@
: "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
: "cr0", "memory")
-static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
+static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
@@ -40,7 +41,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
+ if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
pagefault_disable();
@@ -82,35 +83,38 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
}
static inline int
-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
{
- int prev;
+ int ret = 0;
+ u32 prev;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
__asm__ __volatile__ (
- PPC_RELEASE_BARRIER
-"1: lwarx %0,0,%2 # futex_atomic_cmpxchg_inatomic\n\
- cmpw 0,%0,%3\n\
+ PPC_ATOMIC_ENTRY_BARRIER
+"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\
+ cmpw 0,%1,%4\n\
bne- 3f\n"
- PPC405_ERR77(0,%2)
-"2: stwcx. %4,0,%2\n\
+ PPC405_ERR77(0,%3)
+"2: stwcx. %5,0,%3\n\
bne- 1b\n"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
"3: .section .fixup,\"ax\"\n\
-4: li %0,%5\n\
+4: li %0,%6\n\
b 3b\n\
.previous\n\
.section __ex_table,\"a\"\n\
.align 3\n\
" PPC_LONG "1b,4b,2b,4b\n\
.previous" \
- : "=&r" (prev), "+m" (*uaddr)
+ : "+r" (ret), "=&r" (prev), "+m" (*uaddr)
: "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)
: "cc", "memory");
- return prev;
+ *uval = prev;
+ return ret;
}
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/gpio.h b/arch/powerpc/include/asm/gpio.h
index 38762edb5e5..b3799d88ffc 100644
--- a/arch/powerpc/include/asm/gpio.h
+++ b/arch/powerpc/include/asm/gpio.h
@@ -1,53 +1,4 @@
-/*
- * Generic GPIO API implementation for PowerPC.
- *
- * Copyright (c) 2007-2008 MontaVista Software, Inc.
- *
- * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __ASM_POWERPC_GPIO_H
-#define __ASM_POWERPC_GPIO_H
-
-#include <linux/errno.h>
-#include <asm-generic/gpio.h>
-
-#ifdef CONFIG_GPIOLIB
-
-/*
- * We don't (yet) implement inlined/rapid versions for on-chip gpios.
- * Just call gpiolib.
- */
-static inline int gpio_get_value(unsigned int gpio)
-{
- return __gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(unsigned int gpio, int value)
-{
- __gpio_set_value(gpio, value);
-}
-
-static inline int gpio_cansleep(unsigned int gpio)
-{
- return __gpio_cansleep(gpio);
-}
-
-static inline int gpio_to_irq(unsigned int gpio)
-{
- return __gpio_to_irq(gpio);
-}
-
-static inline int irq_to_gpio(unsigned int irq)
-{
- return -EINVAL;
-}
-
-#endif /* CONFIG_GPIOLIB */
-
-#endif /* __ASM_POWERPC_GPIO_H */
+#ifndef __LINUX_GPIO_H
+#warning Include linux/gpio.h instead of asm/gpio.h
+#include <linux/gpio.h>
+#endif
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h
index 3147a297012..418fb654370 100644
--- a/arch/powerpc/include/asm/hardirq.h
+++ b/arch/powerpc/include/asm/hardirq.h
@@ -6,10 +6,14 @@
typedef struct {
unsigned int __softirq_pending;
- unsigned int timer_irqs;
+ unsigned int timer_irqs_event;
+ unsigned int timer_irqs_others;
unsigned int pmu_irqs;
unsigned int mce_exceptions;
unsigned int spurious_irqs;
+#ifdef CONFIG_PPC_DOORBELL
+ unsigned int doorbell_irqs;
+#endif
} ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index dbc264010d0..caaf6e00630 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -79,7 +79,7 @@ static inline void kunmap(struct page *page)
kunmap_high(page);
}
-static inline void *__kmap_atomic(struct page *page)
+static inline void *kmap_atomic(struct page *page)
{
return kmap_atomic_prot(page, kmap_prot);
}
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 5856a66ab40..623f2971ce0 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -1,15 +1,91 @@
#ifndef _ASM_POWERPC_HUGETLB_H
#define _ASM_POWERPC_HUGETLB_H
+#ifdef CONFIG_HUGETLB_PAGE
#include <asm/page.h>
+#include <asm-generic/hugetlb.h>
+
+extern struct kmem_cache *hugepte_cache;
+
+#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * This should work for other subarchs too. But right now we use the
+ * new format only for 64bit book3s
+ */
+static inline pte_t *hugepd_page(hugepd_t hpd)
+{
+ BUG_ON(!hugepd_ok(hpd));
+ /*
+ * We have only four bits to encode, MMU page size
+ */
+ BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
+ return (pte_t *)(hpd.pd & ~HUGEPD_SHIFT_MASK);
+}
+
+static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
+{
+ return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2;
+}
+
+static inline unsigned int hugepd_shift(hugepd_t hpd)
+{
+ return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
+}
+
+#else
+
+static inline pte_t *hugepd_page(hugepd_t hpd)
+{
+ BUG_ON(!hugepd_ok(hpd));
+ return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
+}
+
+static inline unsigned int hugepd_shift(hugepd_t hpd)
+{
+ return hpd.pd & HUGEPD_SHIFT_MASK;
+}
+
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+
+static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
+ unsigned pdshift)
+{
+ /*
+ * On FSL BookE, we have multiple higher-level table entries that
+ * point to the same hugepte. Just use the first one since they're all
+ * identical. So for that case, idx=0.
+ */
+ unsigned long idx = 0;
+
+ pte_t *dir = hugepd_page(*hpdp);
+#ifndef CONFIG_PPC_FSL_BOOK3E
+ idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp);
+#endif
+
+ return dir + idx;
+}
pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
unsigned long addr, unsigned *shift);
void flush_dcache_icache_hugepage(struct page *page);
+#if defined(CONFIG_PPC_MM_SLICES) || defined(CONFIG_PPC_SUBPAGE_PROT)
int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
unsigned long len);
+#else
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr,
+ unsigned long len)
+{
+ return 0;
+}
+#endif
+
+void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
+ pte_t pte);
+void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor,
@@ -50,8 +126,11 @@ static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
- return __pte(old);
+#ifdef CONFIG_PPC64
+ return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
+#else
+ return __pte(pte_update(ptep, ~0UL, 0));
+#endif
}
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
@@ -76,7 +155,17 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
+#ifdef HUGETLB_NEED_PRELOAD
+ /*
+ * The "return 1" forces a call of update_mmu_cache, which will write a
+ * TLB entry. Without this, platforms that don't do a write of the TLB
+ * entry in the TLB miss handler asm will fault ad infinitum.
+ */
+ ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+ return 1;
+#else
return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+#endif
}
static inline pte_t huge_ptep_get(pte_t *ptep)
@@ -93,4 +182,35 @@ static inline void arch_release_hugepage(struct page *page)
{
}
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+}
+
+#else /* ! CONFIG_HUGETLB_PAGE */
+static inline void flush_hugetlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+}
+
+#define hugepd_shift(x) 0
+static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
+ unsigned pdshift)
+{
+ return 0;
+}
+#endif /* CONFIG_HUGETLB_PAGE */
+
+/*
+ * FSL Book3E platforms require special gpage handling - the gpages
+ * are reserved early in the boot process by memblock instead of via
+ * the .dts as on IBM platforms.
+ */
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
+extern void __init reserve_hugetlb_gpages(void);
+#else
+static inline void reserve_hugetlb_gpages(void)
+{
+}
+#endif
+
#endif /* _ASM_POWERPC_HUGETLB_H */
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index de03ca58db5..5dbbb29f5c3 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -29,6 +29,10 @@
#define H_LONG_BUSY_ORDER_100_SEC 9905 /* Long busy, hint that 100sec \
is a good time to retry */
#define H_LONG_BUSY_END_RANGE 9905 /* End of long busy range */
+
+/* Internal value used in book3s_hv kvm support; not returned to guests */
+#define H_TOO_HARD 9999
+
#define H_HARDWARE -1 /* Hardware error */
#define H_FUNCTION -2 /* Function not supported */
#define H_PRIVILEGE -3 /* Caller not privileged */
@@ -73,8 +77,27 @@
#define H_MR_CONDITION -43
#define H_NOT_ENOUGH_RESOURCES -44
#define H_R_STATE -45
-#define H_RESCINDEND -46
-#define H_MULTI_THREADS_ACTIVE -9005
+#define H_RESCINDED -46
+#define H_P2 -55
+#define H_P3 -56
+#define H_P4 -57
+#define H_P5 -58
+#define H_P6 -59
+#define H_P7 -60
+#define H_P8 -61
+#define H_P9 -62
+#define H_TOO_BIG -64
+#define H_OVERLAP -68
+#define H_INTERRUPT -69
+#define H_BAD_DATA -70
+#define H_NOT_ACTIVE -71
+#define H_SG_LIST -72
+#define H_OP_MODE -73
+#define H_COP_HW -74
+#define H_UNSUPPORTED_FLAG_START -256
+#define H_UNSUPPORTED_FLAG_END -511
+#define H_MULTI_THREADS_ACTIVE -9005
+#define H_OUTSTANDING_COP_OPS -9006
/* Long Busy is a condition that can be returned by the firmware
@@ -100,14 +123,26 @@
#define H_PAGE_SET_ACTIVE H_PAGE_STATE_CHANGE
#define H_AVPN (1UL<<(63-32)) /* An avpn is provided as a sanity test */
#define H_ANDCOND (1UL<<(63-33))
+#define H_LOCAL (1UL<<(63-35))
#define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */
#define H_ICACHE_SYNCHRONIZE (1UL<<(63-41)) /* dcbst, icbi, etc (ignored for IO pages */
+#define H_COALESCE_CAND (1UL<<(63-42)) /* page is a good candidate for coalescing */
#define H_ZERO_PAGE (1UL<<(63-48)) /* zero the page before mapping (ignored for IO pages) */
#define H_COPY_PAGE (1UL<<(63-49))
#define H_N (1UL<<(63-61))
#define H_PP1 (1UL<<(63-62))
#define H_PP2 (1UL<<(63-63))
+/* Flags for H_REGISTER_VPA subfunction field */
+#define H_VPA_FUNC_SHIFT (63-18) /* Bit posn of subfunction code */
+#define H_VPA_FUNC_MASK 7UL
+#define H_VPA_REG_VPA 1UL /* Register Virtual Processor Area */
+#define H_VPA_REG_DTL 2UL /* Register Dispatch Trace Log */
+#define H_VPA_REG_SLB 3UL /* Register SLB shadow buffer */
+#define H_VPA_DEREG_VPA 5UL /* Deregister Virtual Processor Area */
+#define H_VPA_DEREG_DTL 6UL /* Deregister Dispatch Trace Log */
+#define H_VPA_DEREG_SLB 7UL /* Deregister SLB shadow buffer */
+
/* VASI States */
#define H_VASI_INVALID 0
#define H_VASI_ENABLED 1
@@ -117,12 +152,7 @@
#define H_VASI_RESUMED 5
#define H_VASI_COMPLETED 6
-/* DABRX flags */
-#define H_DABRX_HYPERVISOR (1UL<<(63-61))
-#define H_DABRX_KERNEL (1UL<<(63-62))
-#define H_DABRX_USER (1UL<<(63-63))
-
-/* Each control block has to be on a 4K bondary */
+/* Each control block has to be on a 4K boundary */
#define H_CB_ALIGNMENT 4096
/* pSeries hypervisor opcodes */
@@ -232,7 +262,22 @@
#define H_GET_EM_PARMS 0x2B8
#define H_SET_MPP 0x2D0
#define H_GET_MPP 0x2D4
-#define MAX_HCALL_OPCODE H_GET_MPP
+#define H_HOME_NODE_ASSOCIATIVITY 0x2EC
+#define H_BEST_ENERGY 0x2F4
+#define H_XIRR_X 0x2FC
+#define H_RANDOM 0x300
+#define H_COP 0x304
+#define H_GET_MPP_X 0x314
+#define H_SET_MODE 0x31C
+#define MAX_HCALL_OPCODE H_SET_MODE
+
+/* Platform specific hcalls, used by KVM */
+#define H_RTAS 0xf000
+
+/* "Platform specific hcalls", provided by PHYP */
+#define H_GET_24X7_CATALOG_PAGE 0xF078
+#define H_GET_24X7_DATA 0xF07C
+#define H_GET_PERF_COUNTER_INFO 0xF080
#ifndef __ASSEMBLY__
@@ -310,6 +355,36 @@ struct hvcall_mpp_data {
int h_get_mpp(struct hvcall_mpp_data *);
+struct hvcall_mpp_x_data {
+ unsigned long coalesced_bytes;
+ unsigned long pool_coalesced_bytes;
+ unsigned long pool_purr_cycles;
+ unsigned long pool_spurr_cycles;
+ unsigned long reserved[3];
+};
+
+int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data);
+
+static inline unsigned int get_longbusy_msecs(int longbusy_rc)
+{
+ switch (longbusy_rc) {
+ case H_LONG_BUSY_ORDER_1_MSEC:
+ return 1;
+ case H_LONG_BUSY_ORDER_10_MSEC:
+ return 10;
+ case H_LONG_BUSY_ORDER_100_MSEC:
+ return 100;
+ case H_LONG_BUSY_ORDER_1_SEC:
+ return 1000;
+ case H_LONG_BUSY_ORDER_10_SEC:
+ return 10000;
+ case H_LONG_BUSY_ORDER_100_SEC:
+ return 100000;
+ default:
+ return 1;
+ }
+}
+
#ifdef CONFIG_PPC_PSERIES
extern int CMO_PrPSP;
extern int CMO_SecPSP;
@@ -329,6 +404,17 @@ static inline unsigned long cmo_get_page_size(void)
{
return CMO_PageSize;
}
+
+extern long pSeries_enable_reloc_on_exc(void);
+extern long pSeries_disable_reloc_on_exc(void);
+
+extern long pseries_big_endian_exceptions(void);
+
+#else
+
+#define pSeries_enable_reloc_on_exc() do {} while (0)
+#define pSeries_disable_reloc_on_exc() do {} while (0)
+
#endif /* CONFIG_PPC_PSERIES */
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/hvsi.h b/arch/powerpc/include/asm/hvsi.h
new file mode 100644
index 00000000000..d4a5315718c
--- /dev/null
+++ b/arch/powerpc/include/asm/hvsi.h
@@ -0,0 +1,94 @@
+#ifndef _HVSI_H
+#define _HVSI_H
+
+#define VS_DATA_PACKET_HEADER 0xff
+#define VS_CONTROL_PACKET_HEADER 0xfe
+#define VS_QUERY_PACKET_HEADER 0xfd
+#define VS_QUERY_RESPONSE_PACKET_HEADER 0xfc
+
+/* control verbs */
+#define VSV_SET_MODEM_CTL 1 /* to service processor only */
+#define VSV_MODEM_CTL_UPDATE 2 /* from service processor only */
+#define VSV_CLOSE_PROTOCOL 3
+
+/* query verbs */
+#define VSV_SEND_VERSION_NUMBER 1
+#define VSV_SEND_MODEM_CTL_STATUS 2
+
+/* yes, these masks are not consecutive. */
+#define HVSI_TSDTR 0x01
+#define HVSI_TSCD 0x20
+
+#define HVSI_MAX_OUTGOING_DATA 12
+#define HVSI_VERSION 1
+
+struct hvsi_header {
+ uint8_t type;
+ uint8_t len;
+ __be16 seqno;
+} __attribute__((packed));
+
+struct hvsi_data {
+ struct hvsi_header hdr;
+ uint8_t data[HVSI_MAX_OUTGOING_DATA];
+} __attribute__((packed));
+
+struct hvsi_control {
+ struct hvsi_header hdr;
+ __be16 verb;
+ /* optional depending on verb: */
+ __be32 word;
+ __be32 mask;
+} __attribute__((packed));
+
+struct hvsi_query {
+ struct hvsi_header hdr;
+ __be16 verb;
+} __attribute__((packed));
+
+struct hvsi_query_response {
+ struct hvsi_header hdr;
+ __be16 verb;
+ __be16 query_seqno;
+ union {
+ uint8_t version;
+ __be32 mctrl_word;
+ } u;
+} __attribute__((packed));
+
+/* hvsi lib struct definitions */
+#define HVSI_INBUF_SIZE 255
+struct tty_struct;
+struct hvsi_priv {
+ unsigned int inbuf_len; /* data in input buffer */
+ unsigned char inbuf[HVSI_INBUF_SIZE];
+ unsigned int inbuf_cur; /* Cursor in input buffer */
+ unsigned int inbuf_pktlen; /* packet lenght from cursor */
+ atomic_t seqno; /* packet sequence number */
+ unsigned int opened:1; /* driver opened */
+ unsigned int established:1; /* protocol established */
+ unsigned int is_console:1; /* used as a kernel console device */
+ unsigned int mctrl_update:1; /* modem control updated */
+ unsigned short mctrl; /* modem control */
+ struct tty_struct *tty; /* tty structure */
+ int (*get_chars)(uint32_t termno, char *buf, int count);
+ int (*put_chars)(uint32_t termno, const char *buf, int count);
+ uint32_t termno;
+};
+
+/* hvsi lib functions */
+struct hvc_struct;
+extern void hvsilib_init(struct hvsi_priv *pv,
+ int (*get_chars)(uint32_t termno, char *buf, int count),
+ int (*put_chars)(uint32_t termno, const char *buf,
+ int count),
+ int termno, int is_console);
+extern int hvsilib_open(struct hvsi_priv *pv, struct hvc_struct *hp);
+extern void hvsilib_close(struct hvsi_priv *pv, struct hvc_struct *hp);
+extern int hvsilib_read_mctrl(struct hvsi_priv *pv);
+extern int hvsilib_write_mctrl(struct hvsi_priv *pv, int dtr);
+extern void hvsilib_establish(struct hvsi_priv *pv);
+extern int hvsilib_get_chars(struct hvsi_priv *pv, char *buf, int count);
+extern int hvsilib_put_chars(struct hvsi_priv *pv, const char *buf, int count);
+
+#endif /* _HVSI_H */
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index 1c33ec17ca3..ac6432d9be4 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -24,26 +24,39 @@
#define _PPC_BOOK3S_64_HW_BREAKPOINT_H
#ifdef __KERNEL__
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
-
struct arch_hw_breakpoint {
- bool extraneous_interrupt;
- u8 len; /* length of the target data symbol */
- int type;
unsigned long address;
+ u16 type;
+ u16 len; /* length of the target data symbol */
};
+/* Note: Don't change the the first 6 bits below as they are in the same order
+ * as the dabr and dabrx.
+ */
+#define HW_BRK_TYPE_READ 0x01
+#define HW_BRK_TYPE_WRITE 0x02
+#define HW_BRK_TYPE_TRANSLATE 0x04
+#define HW_BRK_TYPE_USER 0x08
+#define HW_BRK_TYPE_KERNEL 0x10
+#define HW_BRK_TYPE_HYP 0x20
+#define HW_BRK_TYPE_EXTRANEOUS_IRQ 0x80
+
+/* bits that overlap with the bottom 3 bits of the dabr */
+#define HW_BRK_TYPE_RDWR (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)
+#define HW_BRK_TYPE_DABR (HW_BRK_TYPE_RDWR | HW_BRK_TYPE_TRANSLATE)
+#define HW_BRK_TYPE_PRIV_ALL (HW_BRK_TYPE_USER | HW_BRK_TYPE_KERNEL | \
+ HW_BRK_TYPE_HYP)
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
#include <linux/kdebug.h>
#include <asm/reg.h>
-#include <asm/system.h>
+#include <asm/debug.h>
struct perf_event;
struct pmu;
struct perf_sample_data;
#define HW_BREAKPOINT_ALIGN 0x7
-/* Maximum permissible length of any HW Breakpoint */
-#define HW_BREAKPOINT_LEN 0x8
extern int hw_breakpoint_slots(int type);
extern int arch_bp_generic_fields(int type, int *gen_bp_type);
@@ -57,11 +70,16 @@ void hw_breakpoint_pmu_read(struct perf_event *bp);
extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
extern struct pmu perf_ops_bp;
-extern void ptrace_triggered(struct perf_event *bp, int nmi,
+extern void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs);
static inline void hw_breakpoint_disable(void)
{
- set_dabr(0);
+ struct arch_hw_breakpoint brk;
+
+ brk.address = 0;
+ brk.type = 0;
+ brk.len = 0;
+ __set_breakpoint(&brk);
}
extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index ff08b70b36d..10be1dd01c6 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -11,7 +11,31 @@
#include <asm/ptrace.h>
#include <asm/processor.h>
+#ifdef CONFIG_PPC64
+
+/*
+ * PACA flags in paca->irq_happened.
+ *
+ * This bits are set when interrupts occur while soft-disabled
+ * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
+ * is set whenever we manually hard disable.
+ */
+#define PACA_IRQ_HARD_DIS 0x01
+#define PACA_IRQ_DBELL 0x02
+#define PACA_IRQ_EE 0x04
+#define PACA_IRQ_DEC 0x08 /* Or FIT */
+#define PACA_IRQ_EE_EDGE 0x10 /* BookE only */
+
+#endif /* CONFIG_PPC64 */
+
+#ifndef __ASSEMBLY__
+
+extern void __replay_interrupt(unsigned int vector);
+
extern void timer_interrupt(struct pt_regs *);
+extern void performance_monitor_exception(struct pt_regs *regs);
+extern void WatchdogException(struct pt_regs *regs);
+extern void unknown_exception(struct pt_regs *regs);
#ifdef CONFIG_PPC64
#include <asm/paca.h>
@@ -42,7 +66,6 @@ static inline unsigned long arch_local_irq_disable(void)
}
extern void arch_local_irq_restore(unsigned long);
-extern void iseries_handle_interrupts(void);
static inline void arch_local_irq_enable(void)
{
@@ -65,19 +88,46 @@ static inline bool arch_irqs_disabled(void)
}
#ifdef CONFIG_PPC_BOOK3E
-#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory");
-#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory");
+#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory")
+#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory")
#else
-#define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1)
-#define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1)
+#define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
+#define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1)
#endif
-#define hard_irq_disable() \
- do { \
- __hard_irq_disable(); \
- get_paca()->soft_enabled = 0; \
- get_paca()->hard_enabled = 0; \
- } while(0)
+#define hard_irq_disable() do { \
+ u8 _was_enabled; \
+ __hard_irq_disable(); \
+ _was_enabled = local_paca->soft_enabled; \
+ local_paca->soft_enabled = 0; \
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
+ if (_was_enabled) \
+ trace_hardirqs_off(); \
+} while(0)
+
+static inline bool lazy_irq_pending(void)
+{
+ return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
+}
+
+/*
+ * This is called by asynchronous interrupts to conditionally
+ * re-enable hard interrupts when soft-disabled after having
+ * cleared the source of the interrupt
+ */
+static inline void may_hard_irq_enable(void)
+{
+ get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
+ if (!(get_paca()->irq_happened & PACA_IRQ_EE))
+ __hard_irq_enable();
+}
+
+static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
+{
+ return !regs->softe;
+}
+
+extern bool prep_irq_for_idle(void);
#else /* CONFIG_PPC64 */
@@ -139,13 +189,23 @@ static inline bool arch_irqs_disabled(void)
#define hard_irq_disable() arch_local_irq_disable()
+static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
+{
+ return !(regs->msr & MSR_EE);
+}
+
+static inline void may_hard_irq_enable(void) { }
+
#endif /* CONFIG_PPC64 */
+#define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
+
/*
* interrupt-retrigger: should we handle this via lost interrupts and IPIs
* or should we not care like we do now ? --BenH.
*/
struct irq_chip;
+#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_HW_IRQ_H */
diff --git a/arch/powerpc/include/asm/i8259.h b/arch/powerpc/include/asm/i8259.h
index 105ade297aa..c3fdfbd5a67 100644
--- a/arch/powerpc/include/asm/i8259.h
+++ b/arch/powerpc/include/asm/i8259.h
@@ -6,7 +6,7 @@
extern void i8259_init(struct device_node *node, unsigned long intack_addr);
extern unsigned int i8259_irq(void);
-extern struct irq_host *i8259_get_host(void);
+extern struct irq_domain *i8259_get_host(void);
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_I8259_H */
diff --git a/arch/powerpc/include/asm/ibmebus.h b/arch/powerpc/include/asm/ibmebus.h
index 1a9d9aea21f..088f95b2e14 100644
--- a/arch/powerpc/include/asm/ibmebus.h
+++ b/arch/powerpc/include/asm/ibmebus.h
@@ -48,8 +48,8 @@
extern struct bus_type ibmebus_bus_type;
-int ibmebus_register_driver(struct of_platform_driver *drv);
-void ibmebus_unregister_driver(struct of_platform_driver *drv);
+int ibmebus_register_driver(struct platform_driver *drv);
+void ibmebus_unregister_driver(struct platform_driver *drv);
int ibmebus_request_irq(u32 ist, irq_handler_t handler,
unsigned long irq_flags, const char *devname,
diff --git a/arch/powerpc/include/asm/immap_qe.h b/arch/powerpc/include/asm/immap_qe.h
index 4e10f508570..bedbff89142 100644
--- a/arch/powerpc/include/asm/immap_qe.h
+++ b/arch/powerpc/include/asm/immap_qe.h
@@ -3,7 +3,7 @@
* The Internal Memory Map for devices with QE on them. This
* is the superset of all QE devices (8360, etc.).
- * Copyright (C) 2006. Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006. Freescale Semiconductor, Inc. All rights reserved.
*
* Authors: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
@@ -26,7 +26,9 @@
struct qe_iram {
__be32 iadd; /* I-RAM Address Register */
__be32 idata; /* I-RAM Data Register */
- u8 res0[0x78];
+ u8 res0[0x04];
+ __be32 iready; /* I-RAM Ready Register */
+ u8 res1[0x70];
} __attribute__ ((packed));
/* QE Interrupt Controller */
@@ -467,13 +469,22 @@ struct qe_immap {
extern struct qe_immap __iomem *qe_immr;
extern phys_addr_t get_qe_base(void);
-static inline unsigned long immrbar_virt_to_phys(void *address)
+/*
+ * Returns the offset within the QE address space of the given pointer.
+ *
+ * Note that the QE does not support 36-bit physical addresses, so if
+ * get_qe_base() returns a number above 4GB, the caller will probably fail.
+ */
+static inline phys_addr_t immrbar_virt_to_phys(void *address)
{
- if ( ((u32)address >= (u32)qe_immr) &&
- ((u32)address < ((u32)qe_immr + QE_IMMAP_SIZE)) )
- return (unsigned long)(address - (u32)qe_immr +
- (u32)get_qe_base());
- return (unsigned long)virt_to_phys(address);
+ void *q = (void *)qe_immr;
+
+ /* Is it a MURAM address? */
+ if ((address >= q) && (address < (q + QE_IMMAP_SIZE)))
+ return get_qe_base() + (address - q);
+
+ /* It's an address returned by kmalloc */
+ return virt_to_phys(address);
}
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/io-workarounds.h b/arch/powerpc/include/asm/io-workarounds.h
new file mode 100644
index 00000000000..f96dd096ff4
--- /dev/null
+++ b/arch/powerpc/include/asm/io-workarounds.h
@@ -0,0 +1,48 @@
+/*
+ * Support PCI IO workaround
+ *
+ * (C) Copyright 2007-2008 TOSHIBA CORPORATION
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _IO_WORKAROUNDS_H
+#define _IO_WORKAROUNDS_H
+
+#include <linux/io.h>
+#include <asm/pci-bridge.h>
+
+/* Bus info */
+struct iowa_bus {
+ struct pci_controller *phb;
+ struct ppc_pci_io *ops;
+ void *private;
+};
+
+void iowa_register_bus(struct pci_controller *, struct ppc_pci_io *,
+ int (*)(struct iowa_bus *, void *), void *);
+struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR);
+struct iowa_bus *iowa_pio_find_bus(unsigned long);
+
+extern struct ppc_pci_io spiderpci_ops;
+extern int spiderpci_iowa_init(struct iowa_bus *, void *);
+
+#define SPIDER_PCI_REG_BASE 0xd000
+#define SPIDER_PCI_REG_SIZE 0x1000
+#define SPIDER_PCI_VCI_CNTL_STAT 0x0110
+#define SPIDER_PCI_DUMMY_READ 0x0810
+#define SPIDER_PCI_DUMMY_READ_BASE 0x0814
+
+#endif /* _IO_WORKAROUNDS_H */
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 001f2f11c19..97d3869991c 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -2,6 +2,8 @@
#define _ASM_POWERPC_IO_H
#ifdef __KERNEL__
+#define ARCH_HAS_IOREMAP_WC
+
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -13,10 +15,14 @@
extern int check_legacy_ioport(unsigned long base_port);
#define I8042_DATA_REG 0x60
#define FDC_BASE 0x3f0
-/* only relevant for PReP */
-#define _PIDXR 0x279
-#define _PNPWRP 0xa79
-#define PNPBIOS_BASE 0xf000
+
+#if defined(CONFIG_PPC64) && defined(CONFIG_PCI)
+extern struct pci_dev *isa_bridge_pcidev;
+/*
+ * has legacy ISA devices ?
+ */
+#define arch_has_dev_port() (isa_bridge_pcidev != NULL || isa_io_special)
+#endif
#include <linux/device.h>
#include <linux/io.h>
@@ -63,8 +69,18 @@ extern unsigned long pci_dram_offset;
extern resource_size_t isa_mem_base;
-#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_INDIRECT_IO)
-#error CONFIG_PPC_INDIRECT_IO is not yet supported on 32 bits
+/* Boolean set by platform if PIO accesses are suppored while _IO_BASE
+ * is not set or addresses cannot be translated to MMIO. This is typically
+ * set when the platform supports "special" PIO accesses via a non memory
+ * mapped mechanism, and allows things like the early udbg UART code to
+ * function.
+ */
+extern bool isa_io_special;
+
+#ifdef CONFIG_PPC32
+#if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
+#error CONFIG_PPC_INDIRECT_{PIO,MMIO} are not yet supported on 32 bits
+#endif
#endif
/*
@@ -97,7 +113,7 @@ extern resource_size_t isa_mem_base;
/* gcc 4.0 and older doesn't have 'Z' constraint */
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ == 0)
-#define DEF_MMIO_IN_LE(name, size, insn) \
+#define DEF_MMIO_IN_X(name, size, insn) \
static inline u##size name(const volatile u##size __iomem *addr) \
{ \
u##size ret; \
@@ -106,7 +122,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \
return ret; \
}
-#define DEF_MMIO_OUT_LE(name, size, insn) \
+#define DEF_MMIO_OUT_X(name, size, insn) \
static inline void name(volatile u##size __iomem *addr, u##size val) \
{ \
__asm__ __volatile__("sync;"#insn" %1,0,%2" \
@@ -114,7 +130,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \
IO_SET_SYNC_FLAG(); \
}
#else /* newer gcc */
-#define DEF_MMIO_IN_LE(name, size, insn) \
+#define DEF_MMIO_IN_X(name, size, insn) \
static inline u##size name(const volatile u##size __iomem *addr) \
{ \
u##size ret; \
@@ -123,7 +139,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \
return ret; \
}
-#define DEF_MMIO_OUT_LE(name, size, insn) \
+#define DEF_MMIO_OUT_X(name, size, insn) \
static inline void name(volatile u##size __iomem *addr, u##size val) \
{ \
__asm__ __volatile__("sync;"#insn" %1,%y0" \
@@ -132,7 +148,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \
}
#endif
-#define DEF_MMIO_IN_BE(name, size, insn) \
+#define DEF_MMIO_IN_D(name, size, insn) \
static inline u##size name(const volatile u##size __iomem *addr) \
{ \
u##size ret; \
@@ -141,7 +157,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \
return ret; \
}
-#define DEF_MMIO_OUT_BE(name, size, insn) \
+#define DEF_MMIO_OUT_D(name, size, insn) \
static inline void name(volatile u##size __iomem *addr, u##size val) \
{ \
__asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \
@@ -149,22 +165,53 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \
IO_SET_SYNC_FLAG(); \
}
+DEF_MMIO_IN_D(in_8, 8, lbz);
+DEF_MMIO_OUT_D(out_8, 8, stb);
-DEF_MMIO_IN_BE(in_8, 8, lbz);
-DEF_MMIO_IN_BE(in_be16, 16, lhz);
-DEF_MMIO_IN_BE(in_be32, 32, lwz);
-DEF_MMIO_IN_LE(in_le16, 16, lhbrx);
-DEF_MMIO_IN_LE(in_le32, 32, lwbrx);
+#ifdef __BIG_ENDIAN__
+DEF_MMIO_IN_D(in_be16, 16, lhz);
+DEF_MMIO_IN_D(in_be32, 32, lwz);
+DEF_MMIO_IN_X(in_le16, 16, lhbrx);
+DEF_MMIO_IN_X(in_le32, 32, lwbrx);
-DEF_MMIO_OUT_BE(out_8, 8, stb);
-DEF_MMIO_OUT_BE(out_be16, 16, sth);
-DEF_MMIO_OUT_BE(out_be32, 32, stw);
-DEF_MMIO_OUT_LE(out_le16, 16, sthbrx);
-DEF_MMIO_OUT_LE(out_le32, 32, stwbrx);
+DEF_MMIO_OUT_D(out_be16, 16, sth);
+DEF_MMIO_OUT_D(out_be32, 32, stw);
+DEF_MMIO_OUT_X(out_le16, 16, sthbrx);
+DEF_MMIO_OUT_X(out_le32, 32, stwbrx);
+#else
+DEF_MMIO_IN_X(in_be16, 16, lhbrx);
+DEF_MMIO_IN_X(in_be32, 32, lwbrx);
+DEF_MMIO_IN_D(in_le16, 16, lhz);
+DEF_MMIO_IN_D(in_le32, 32, lwz);
+
+DEF_MMIO_OUT_X(out_be16, 16, sthbrx);
+DEF_MMIO_OUT_X(out_be32, 32, stwbrx);
+DEF_MMIO_OUT_D(out_le16, 16, sth);
+DEF_MMIO_OUT_D(out_le32, 32, stw);
+
+#endif /* __BIG_ENDIAN */
+
+/*
+ * Cache inhibitied accessors for use in real mode, you don't want to use these
+ * unless you know what you're doing.
+ *
+ * NB. These use the cpu byte ordering.
+ */
+DEF_MMIO_OUT_X(out_rm8, 8, stbcix);
+DEF_MMIO_OUT_X(out_rm16, 16, sthcix);
+DEF_MMIO_OUT_X(out_rm32, 32, stwcix);
+DEF_MMIO_IN_X(in_rm8, 8, lbzcix);
+DEF_MMIO_IN_X(in_rm16, 16, lhzcix);
+DEF_MMIO_IN_X(in_rm32, 32, lwzcix);
#ifdef __powerpc64__
-DEF_MMIO_OUT_BE(out_be64, 64, std);
-DEF_MMIO_IN_BE(in_be64, 64, ld);
+
+DEF_MMIO_OUT_X(out_rm64, 64, stdcix);
+DEF_MMIO_IN_X(in_rm64, 64, ldcix);
+
+#ifdef __BIG_ENDIAN__
+DEF_MMIO_OUT_D(out_be64, 64, std);
+DEF_MMIO_IN_D(in_be64, 64, ld);
/* There is no asm instructions for 64 bits reverse loads and stores */
static inline u64 in_le64(const volatile u64 __iomem *addr)
@@ -176,6 +223,22 @@ static inline void out_le64(volatile u64 __iomem *addr, u64 val)
{
out_be64(addr, swab64(val));
}
+#else
+DEF_MMIO_OUT_D(out_le64, 64, std);
+DEF_MMIO_IN_D(in_le64, 64, ld);
+
+/* There is no asm instructions for 64 bits reverse loads and stores */
+static inline u64 in_be64(const volatile u64 __iomem *addr)
+{
+ return swab64(in_le64(addr));
+}
+
+static inline void out_be64(volatile u64 __iomem *addr, u64 val)
+{
+ out_le64(addr, swab64(val));
+}
+
+#endif
#endif /* __powerpc64__ */
/*
@@ -216,9 +279,9 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
* for PowerPC is as close as possible to the x86 version of these, and thus
* provides fairly heavy weight barriers for the non-raw versions
*
- * In addition, they support a hook mechanism when CONFIG_PPC_INDIRECT_IO
- * allowing the platform to provide its own implementation of some or all
- * of the accessors.
+ * In addition, they support a hook mechanism when CONFIG_PPC_INDIRECT_MMIO
+ * or CONFIG_PPC_INDIRECT_PIO are set allowing the platform to provide its
+ * own implementation of some or all of the accessors.
*/
/*
@@ -234,8 +297,8 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
/* Indirect IO address tokens:
*
- * When CONFIG_PPC_INDIRECT_IO is set, the platform can provide hooks
- * on all IOs. (Note that this is all 64 bits only for now)
+ * When CONFIG_PPC_INDIRECT_MMIO is set, the platform can provide hooks
+ * on all MMIOs. (Note that this is all 64 bits only for now)
*
* To help platforms who may need to differenciate MMIO addresses in
* their hooks, a bitfield is reserved for use by the platform near the
@@ -257,11 +320,14 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
*
* The direct IO mapping operations will then mask off those bits
* before doing the actual access, though that only happen when
- * CONFIG_PPC_INDIRECT_IO is set, thus be careful when you use that
+ * CONFIG_PPC_INDIRECT_MMIO is set, thus be careful when you use that
* mechanism
+ *
+ * For PIO, there is a separate CONFIG_PPC_INDIRECT_PIO which makes
+ * all PIO functions call through a hook.
*/
-#ifdef CONFIG_PPC_INDIRECT_IO
+#ifdef CONFIG_PPC_INDIRECT_MMIO
#define PCI_IO_IND_TOKEN_MASK 0x0fff000000000000ul
#define PCI_IO_IND_TOKEN_SHIFT 48
#define PCI_FIX_ADDR(addr) \
@@ -392,7 +458,7 @@ __do_out_asm(_rec_outl, "stwbrx")
#endif /* CONFIG_PPC32 */
/* The "__do_*" operations below provide the actual "base" implementation
- * for each of the defined acccessor. Some of them use the out_* functions
+ * for each of the defined accessors. Some of them use the out_* functions
* directly, some of them still use EEH, though we might change that in the
* future. Those macros below provide the necessary argument swapping and
* handling of the IO base for PIO.
@@ -481,10 +547,16 @@ __do_out_asm(_rec_outl, "stwbrx")
_memcpy_fromio(dst,PCI_FIX_ADDR(src),n)
#endif /* !CONFIG_EEH */
-#ifdef CONFIG_PPC_INDIRECT_IO
-#define DEF_PCI_HOOK(x) x
+#ifdef CONFIG_PPC_INDIRECT_PIO
+#define DEF_PCI_HOOK_pio(x) x
#else
-#define DEF_PCI_HOOK(x) NULL
+#define DEF_PCI_HOOK_pio(x) NULL
+#endif
+
+#ifdef CONFIG_PPC_INDIRECT_MMIO
+#define DEF_PCI_HOOK_mem(x) x
+#else
+#define DEF_PCI_HOOK_mem(x) NULL
#endif
/* Structure containing all the hooks */
@@ -504,7 +576,7 @@ extern struct ppc_pci_io {
#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \
static inline ret name at \
{ \
- if (DEF_PCI_HOOK(ppc_pci_io.name) != NULL) \
+ if (DEF_PCI_HOOK_##space(ppc_pci_io.name) != NULL) \
return ppc_pci_io.name al; \
return __do_##name al; \
}
@@ -512,7 +584,7 @@ static inline ret name at \
#define DEF_PCI_AC_NORET(name, at, al, space, aa) \
static inline void name at \
{ \
- if (DEF_PCI_HOOK(ppc_pci_io.name) != NULL) \
+ if (DEF_PCI_HOOK_##space(ppc_pci_io.name) != NULL) \
ppc_pci_io.name al; \
else \
__do_##name al; \
@@ -616,12 +688,13 @@ static inline void iosync(void)
* * ioremap is the standard one and provides non-cacheable guarded mappings
* and can be hooked by the platform via ppc_md
*
- * * ioremap_flags allows to specify the page flags as an argument and can
- * also be hooked by the platform via ppc_md. ioremap_prot is the exact
- * same thing as ioremap_flags.
+ * * ioremap_prot allows to specify the page flags as an argument and can
+ * also be hooked by the platform via ppc_md.
*
* * ioremap_nocache is identical to ioremap
*
+ * * ioremap_wc enables write combining
+ *
* * iounmap undoes such a mapping and can be hooked
*
* * __ioremap_at (and the pending __iounmap_at) are low level functions to
@@ -629,7 +702,7 @@ static inline void iosync(void)
* currently be hooked. Must be page aligned.
*
* * __ioremap is the low level implementation used by ioremap and
- * ioremap_flags and cannot be hooked (but can be used by a hook on one
+ * ioremap_prot and cannot be hooked (but can be used by a hook on one
* of the previous ones)
*
* * __ioremap_caller is the same as above but takes an explicit caller
@@ -640,10 +713,10 @@ static inline void iosync(void)
*
*/
extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
-extern void __iomem *ioremap_flags(phys_addr_t address, unsigned long size,
- unsigned long flags);
+extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size,
+ unsigned long flags);
+extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size);
#define ioremap_nocache(addr, size) ioremap((addr), (size))
-#define ioremap_prot(addr, size, prot) ioremap_flags((addr), (size), (prot))
extern void iounmap(volatile void __iomem *addr);
@@ -659,7 +732,7 @@ extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea,
extern void __iounmap_at(void *ea, unsigned long size);
/*
- * When CONFIG_PPC_INDIRECT_IO is set, we use the generic iomap implementation
+ * When CONFIG_PPC_INDIRECT_PIO is set, we use the generic iomap implementation
* which needs some additional definitions here. They basically allow PIO
* space overall to be 1GB. This will work as long as we never try to use
* iomap to map MMIO below 1GB which should be fine on ppc64
diff --git a/arch/powerpc/include/asm/io_event_irq.h b/arch/powerpc/include/asm/io_event_irq.h
new file mode 100644
index 00000000000..b1a9a1be3c2
--- /dev/null
+++ b/arch/powerpc/include/asm/io_event_irq.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2010, 2011 Mark Nelson and Tseng-Hui (Frank) Lin, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_IO_EVENT_IRQ_H
+#define _ASM_POWERPC_IO_EVENT_IRQ_H
+
+#include <linux/types.h>
+#include <linux/notifier.h>
+
+#define PSERIES_IOEI_RPC_MAX_LEN 216
+
+#define PSERIES_IOEI_TYPE_ERR_DETECTED 0x01
+#define PSERIES_IOEI_TYPE_ERR_RECOVERED 0x02
+#define PSERIES_IOEI_TYPE_EVENT 0x03
+#define PSERIES_IOEI_TYPE_RPC_PASS_THRU 0x04
+
+#define PSERIES_IOEI_SUBTYPE_NOT_APP 0x00
+#define PSERIES_IOEI_SUBTYPE_REBALANCE_REQ 0x01
+#define PSERIES_IOEI_SUBTYPE_NODE_ONLINE 0x03
+#define PSERIES_IOEI_SUBTYPE_NODE_OFFLINE 0x04
+#define PSERIES_IOEI_SUBTYPE_DUMP_SIZE_CHANGE 0x05
+#define PSERIES_IOEI_SUBTYPE_TORRENT_IRV_UPDATE 0x06
+#define PSERIES_IOEI_SUBTYPE_TORRENT_HFI_CFGED 0x07
+
+#define PSERIES_IOEI_SCOPE_NOT_APP 0x00
+#define PSERIES_IOEI_SCOPE_RIO_HUB 0x36
+#define PSERIES_IOEI_SCOPE_RIO_BRIDGE 0x37
+#define PSERIES_IOEI_SCOPE_PHB 0x38
+#define PSERIES_IOEI_SCOPE_EADS_GLOBAL 0x39
+#define PSERIES_IOEI_SCOPE_EADS_SLOT 0x3A
+#define PSERIES_IOEI_SCOPE_TORRENT_HUB 0x3B
+#define PSERIES_IOEI_SCOPE_SERVICE_PROC 0x51
+
+/* Platform Event Log Format, Version 6, data portition of IO event section */
+struct pseries_io_event {
+ uint8_t event_type; /* 0x00 IO-Event Type */
+ uint8_t rpc_data_len; /* 0x01 RPC data length */
+ uint8_t scope; /* 0x02 Error/Event Scope */
+ uint8_t event_subtype; /* 0x03 I/O-Event Sub-Type */
+ uint32_t drc_index; /* 0x04 DRC Index */
+ uint8_t rpc_data[PSERIES_IOEI_RPC_MAX_LEN];
+ /* 0x08 RPC Data (0-216 bytes, */
+ /* padded to 4 bytes alignment) */
+};
+
+extern struct atomic_notifier_head pseries_ioei_notifier_list;
+
+#endif /* _ASM_POWERPC_IO_EVENT_IRQ_H */
diff --git a/arch/powerpc/include/asm/ioctl.h b/arch/powerpc/include/asm/ioctl.h
deleted file mode 100644
index 57d68304218..00000000000
--- a/arch/powerpc/include/asm/ioctl.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef _ASM_POWERPC_IOCTL_H
-#define _ASM_POWERPC_IOCTL_H
-
-#define _IOC_SIZEBITS 13
-#define _IOC_DIRBITS 3
-
-#define _IOC_NONE 1U
-#define _IOC_READ 2U
-#define _IOC_WRITE 4U
-
-#include <asm-generic/ioctl.h>
-
-#endif /* _ASM_POWERPC_IOCTL_H */
diff --git a/arch/powerpc/include/asm/ioctls.h b/arch/powerpc/include/asm/ioctls.h
deleted file mode 100644
index 851920052e0..00000000000
--- a/arch/powerpc/include/asm/ioctls.h
+++ /dev/null
@@ -1,114 +0,0 @@
-#ifndef _ASM_POWERPC_IOCTLS_H
-#define _ASM_POWERPC_IOCTLS_H
-
-#include <asm/ioctl.h>
-
-#define FIOCLEX _IO('f', 1)
-#define FIONCLEX _IO('f', 2)
-#define FIOASYNC _IOW('f', 125, int)
-#define FIONBIO _IOW('f', 126, int)
-#define FIONREAD _IOR('f', 127, int)
-#define TIOCINQ FIONREAD
-#define FIOQSIZE _IOR('f', 128, loff_t)
-
-#define TIOCGETP _IOR('t', 8, struct sgttyb)
-#define TIOCSETP _IOW('t', 9, struct sgttyb)
-#define TIOCSETN _IOW('t', 10, struct sgttyb) /* TIOCSETP wo flush */
-
-#define TIOCSETC _IOW('t', 17, struct tchars)
-#define TIOCGETC _IOR('t', 18, struct tchars)
-#define TCGETS _IOR('t', 19, struct termios)
-#define TCSETS _IOW('t', 20, struct termios)
-#define TCSETSW _IOW('t', 21, struct termios)
-#define TCSETSF _IOW('t', 22, struct termios)
-
-#define TCGETA _IOR('t', 23, struct termio)
-#define TCSETA _IOW('t', 24, struct termio)
-#define TCSETAW _IOW('t', 25, struct termio)
-#define TCSETAF _IOW('t', 28, struct termio)
-
-#define TCSBRK _IO('t', 29)
-#define TCXONC _IO('t', 30)
-#define TCFLSH _IO('t', 31)
-
-#define TIOCSWINSZ _IOW('t', 103, struct winsize)
-#define TIOCGWINSZ _IOR('t', 104, struct winsize)
-#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
-#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
-#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
-
-#define TIOCGLTC _IOR('t', 116, struct ltchars)
-#define TIOCSLTC _IOW('t', 117, struct ltchars)
-#define TIOCSPGRP _IOW('t', 118, int)
-#define TIOCGPGRP _IOR('t', 119, int)
-
-#define TIOCEXCL 0x540C
-#define TIOCNXCL 0x540D
-#define TIOCSCTTY 0x540E
-
-#define TIOCSTI 0x5412
-#define TIOCMGET 0x5415
-#define TIOCMBIS 0x5416
-#define TIOCMBIC 0x5417
-#define TIOCMSET 0x5418
-# define TIOCM_LE 0x001
-# define TIOCM_DTR 0x002
-# define TIOCM_RTS 0x004
-# define TIOCM_ST 0x008
-# define TIOCM_SR 0x010
-# define TIOCM_CTS 0x020
-# define TIOCM_CAR 0x040
-# define TIOCM_RNG 0x080
-# define TIOCM_DSR 0x100
-# define TIOCM_CD TIOCM_CAR
-# define TIOCM_RI TIOCM_RNG
-#define TIOCM_OUT1 0x2000
-#define TIOCM_OUT2 0x4000
-#define TIOCM_LOOP 0x8000
-
-#define TIOCGSOFTCAR 0x5419
-#define TIOCSSOFTCAR 0x541A
-#define TIOCLINUX 0x541C
-#define TIOCCONS 0x541D
-#define TIOCGSERIAL 0x541E
-#define TIOCSSERIAL 0x541F
-#define TIOCPKT 0x5420
-# define TIOCPKT_DATA 0
-# define TIOCPKT_FLUSHREAD 1
-# define TIOCPKT_FLUSHWRITE 2
-# define TIOCPKT_STOP 4
-# define TIOCPKT_START 8
-# define TIOCPKT_NOSTOP 16
-# define TIOCPKT_DOSTOP 32
-# define TIOCPKT_IOCTL 64
-
-
-#define TIOCNOTTY 0x5422
-#define TIOCSETD 0x5423
-#define TIOCGETD 0x5424
-#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
-#define TIOCSBRK 0x5427 /* BSD compatibility */
-#define TIOCCBRK 0x5428 /* BSD compatibility */
-#define TIOCGSID 0x5429 /* Return the session ID of FD */
-#define TIOCGRS485 0x542e
-#define TIOCSRS485 0x542f
-#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
-#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
-#define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */
-
-#define TIOCSERCONFIG 0x5453
-#define TIOCSERGWILD 0x5454
-#define TIOCSERSWILD 0x5455
-#define TIOCGLCKTRMIOS 0x5456
-#define TIOCSLCKTRMIOS 0x5457
-#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
-#define TIOCSERGETLSR 0x5459 /* Get line status register */
- /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
-# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
-#define TIOCSERGETMULTI 0x545A /* Get multiport config */
-#define TIOCSERSETMULTI 0x545B /* Set multiport config */
-
-#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
-#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
-
-#endif /* _ASM_POWERPC_IOCTLS_H */
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index edfc9803ec9..42632c7a2a4 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -30,22 +30,19 @@
#include <asm/machdep.h>
#include <asm/types.h>
-#define IOMMU_PAGE_SHIFT 12
-#define IOMMU_PAGE_SIZE (ASM_CONST(1) << IOMMU_PAGE_SHIFT)
-#define IOMMU_PAGE_MASK (~((1 << IOMMU_PAGE_SHIFT) - 1))
-#define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE)
+#define IOMMU_PAGE_SHIFT_4K 12
+#define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K)
+#define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1))
+#define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K)
+
+#define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift)
+#define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1))
+#define IOMMU_PAGE_ALIGN(addr, tblptr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE(tblptr))
/* Boot time flags */
extern int iommu_is_off;
extern int iommu_force_on;
-/* Pure 2^n version of get_order */
-static __inline__ __attribute_const__ int get_iommu_order(unsigned long size)
-{
- return __ilog2((size - 1) >> IOMMU_PAGE_SHIFT) + 1;
-}
-
-
/*
* IOMAP_MAX_ORDER defines the largest contiguous block
* of dma space we can get. IOMAP_MAX_ORDER = 13
@@ -53,6 +50,16 @@ static __inline__ __attribute_const__ int get_iommu_order(unsigned long size)
*/
#define IOMAP_MAX_ORDER 13
+#define IOMMU_POOL_HASHBITS 2
+#define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS)
+
+struct iommu_pool {
+ unsigned long start;
+ unsigned long end;
+ unsigned long hint;
+ spinlock_t lock;
+} ____cacheline_aligned_in_smp;
+
struct iommu_table {
unsigned long it_busno; /* Bus number this table belongs to */
unsigned long it_size; /* Size of iommu table in entries */
@@ -61,13 +68,26 @@ struct iommu_table {
unsigned long it_index; /* which iommu table this is */
unsigned long it_type; /* type: PCI or Virtual Bus */
unsigned long it_blocksize; /* Entries in each block (cacheline) */
- unsigned long it_hint; /* Hint for next alloc */
- unsigned long it_largehint; /* Hint for large allocs */
- unsigned long it_halfpoint; /* Breaking point for small/large allocs */
- spinlock_t it_lock; /* Protects it_map */
+ unsigned long poolsize;
+ unsigned long nr_pools;
+ struct iommu_pool large_pool;
+ struct iommu_pool pools[IOMMU_NR_POOLS];
unsigned long *it_map; /* A simple allocation bitmap for now */
+ unsigned long it_page_shift;/* table iommu page size */
+#ifdef CONFIG_IOMMU_API
+ struct iommu_group *it_group;
+#endif
+ void (*set_bypass)(struct iommu_table *tbl, bool enable);
};
+/* Pure 2^n version of get_order */
+static inline __attribute_const__
+int get_iommu_order(unsigned long size, struct iommu_table *tbl)
+{
+ return __ilog2((size - 1) >> tbl->it_page_shift) + 1;
+}
+
+
struct scatterlist;
static inline void set_iommu_table_base(struct device *dev, void *base)
@@ -88,6 +108,34 @@ extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);
*/
extern struct iommu_table *iommu_init_table(struct iommu_table * tbl,
int nid);
+#ifdef CONFIG_IOMMU_API
+extern void iommu_register_group(struct iommu_table *tbl,
+ int pci_domain_number, unsigned long pe_num);
+extern int iommu_add_device(struct device *dev);
+extern void iommu_del_device(struct device *dev);
+#else
+static inline void iommu_register_group(struct iommu_table *tbl,
+ int pci_domain_number,
+ unsigned long pe_num)
+{
+}
+
+static inline int iommu_add_device(struct device *dev)
+{
+ return 0;
+}
+
+static inline void iommu_del_device(struct device *dev)
+{
+}
+#endif /* !CONFIG_IOMMU_API */
+
+static inline void set_iommu_table_base_and_group(struct device *dev,
+ void *base)
+{
+ set_iommu_table_base(dev, base);
+ iommu_add_device(dev);
+}
extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
struct scatterlist *sglist, int nelems,
@@ -112,17 +160,9 @@ extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
struct dma_attrs *attrs);
extern void iommu_init_early_pSeries(void);
-extern void iommu_init_early_iSeries(void);
extern void iommu_init_early_dart(void);
extern void iommu_init_early_pasemi(void);
-#ifdef CONFIG_PCI
-extern void pci_iommu_init(void);
-extern void pci_direct_iommu_init(void);
-#else
-static inline void pci_iommu_init(void) { }
-#endif
-
extern void alloc_dart_table(void);
#if defined(CONFIG_PPC64) && defined(CONFIG_PM)
static inline void iommu_save(void)
@@ -138,5 +178,26 @@ static inline void iommu_restore(void)
}
#endif
+/* The API to support IOMMU operations for VFIO */
+extern int iommu_tce_clear_param_check(struct iommu_table *tbl,
+ unsigned long ioba, unsigned long tce_value,
+ unsigned long npages);
+extern int iommu_tce_put_param_check(struct iommu_table *tbl,
+ unsigned long ioba, unsigned long tce);
+extern int iommu_tce_build(struct iommu_table *tbl, unsigned long entry,
+ unsigned long hwaddr, enum dma_data_direction direction);
+extern unsigned long iommu_clear_tce(struct iommu_table *tbl,
+ unsigned long entry);
+extern int iommu_clear_tces_and_put_pages(struct iommu_table *tbl,
+ unsigned long entry, unsigned long pages);
+extern int iommu_put_tce_user_mode(struct iommu_table *tbl,
+ unsigned long entry, unsigned long tce);
+
+extern void iommu_flush_tce(struct iommu_table *tbl);
+extern int iommu_take_ownership(struct iommu_table *tbl);
+extern void iommu_release_ownership(struct iommu_table *tbl);
+
+extern enum dma_data_direction iommu_tce_direction(unsigned long tce);
+
#endif /* __KERNEL__ */
#endif /* _ASM_IOMMU_H */
diff --git a/arch/powerpc/include/asm/ipcbuf.h b/arch/powerpc/include/asm/ipcbuf.h
deleted file mode 100644
index 2c3e1d94db1..00000000000
--- a/arch/powerpc/include/asm/ipcbuf.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef _ASM_POWERPC_IPCBUF_H
-#define _ASM_POWERPC_IPCBUF_H
-
-/*
- * The ipc64_perm structure for the powerpc is identical to
- * kern_ipc_perm as we have always had 32-bit UIDs and GIDs in the
- * kernel. Note extra padding because this structure is passed back
- * and forth between kernel and user space. Pad space is left for:
- * - 1 32-bit value to fill up for 8-byte alignment
- * - 2 miscellaneous 64-bit values
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/types.h>
-
-struct ipc64_perm
-{
- __kernel_key_t key;
- __kernel_uid_t uid;
- __kernel_gid_t gid;
- __kernel_uid_t cuid;
- __kernel_gid_t cgid;
- __kernel_mode_t mode;
- unsigned int seq;
- unsigned int __pad1;
- unsigned long long __unused1;
- unsigned long long __unused2;
-};
-
-#endif /* _ASM_POWERPC_IPCBUF_H */
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 67ab5fb7d15..41f13cec8a8 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -9,298 +9,29 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/irqdomain.h>
#include <linux/threads.h>
#include <linux/list.h>
#include <linux/radix-tree.h>
#include <asm/types.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
-/* Define a way to iterate across irqs. */
-#define for_each_irq(i) \
- for ((i) = 0; (i) < NR_IRQS; ++(i))
-
extern atomic_t ppc_n_lost_interrupts;
/* This number is used when no interrupt has been assigned */
#define NO_IRQ (0)
-/* This is a special irq number to return from get_irq() to tell that
- * no interrupt happened _and_ ignore it (don't count it as bad). Some
- * platforms like iSeries rely on that.
- */
-#define NO_IRQ_IGNORE ((unsigned int)-1)
-
/* Total number of virq in the platform */
#define NR_IRQS CONFIG_NR_IRQS
-/* Number of irqs reserved for the legacy controller */
-#define NUM_ISA_INTERRUPTS 16
-
/* Same thing, used by the generic IRQ code */
#define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS
-/* This type is the placeholder for a hardware interrupt number. It has to
- * be big enough to enclose whatever representation is used by a given
- * platform.
- */
-typedef unsigned long irq_hw_number_t;
-
-/* Interrupt controller "host" data structure. This could be defined as a
- * irq domain controller. That is, it handles the mapping between hardware
- * and virtual interrupt numbers for a given interrupt domain. The host
- * structure is generally created by the PIC code for a given PIC instance
- * (though a host can cover more than one PIC if they have a flat number
- * model). It's the host callbacks that are responsible for setting the
- * irq_chip on a given irq_desc after it's been mapped.
- *
- * The host code and data structures are fairly agnostic to the fact that
- * we use an open firmware device-tree. We do have references to struct
- * device_node in two places: in irq_find_host() to find the host matching
- * a given interrupt controller node, and of course as an argument to its
- * counterpart host->ops->match() callback. However, those are treated as
- * generic pointers by the core and the fact that it's actually a device-node
- * pointer is purely a convention between callers and implementation. This
- * code could thus be used on other architectures by replacing those two
- * by some sort of arch-specific void * "token" used to identify interrupt
- * controllers.
- */
-struct irq_host;
-struct radix_tree_root;
-
-/* Functions below are provided by the host and called whenever a new mapping
- * is created or an old mapping is disposed. The host can then proceed to
- * whatever internal data structures management is required. It also needs
- * to setup the irq_desc when returning from map().
- */
-struct irq_host_ops {
- /* Match an interrupt controller device node to a host, returns
- * 1 on a match
- */
- int (*match)(struct irq_host *h, struct device_node *node);
-
- /* Create or update a mapping between a virtual irq number and a hw
- * irq number. This is called only once for a given mapping.
- */
- int (*map)(struct irq_host *h, unsigned int virq, irq_hw_number_t hw);
-
- /* Dispose of such a mapping */
- void (*unmap)(struct irq_host *h, unsigned int virq);
-
- /* Update of such a mapping */
- void (*remap)(struct irq_host *h, unsigned int virq, irq_hw_number_t hw);
-
- /* Translate device-tree interrupt specifier from raw format coming
- * from the firmware to a irq_hw_number_t (interrupt line number) and
- * type (sense) that can be passed to set_irq_type(). In the absence
- * of this callback, irq_create_of_mapping() and irq_of_parse_and_map()
- * will return the hw number in the first cell and IRQ_TYPE_NONE for
- * the type (which amount to keeping whatever default value the
- * interrupt controller has for that line)
- */
- int (*xlate)(struct irq_host *h, struct device_node *ctrler,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_type);
-};
-
-struct irq_host {
- struct list_head link;
-
- /* type of reverse mapping technique */
- unsigned int revmap_type;
-#define IRQ_HOST_MAP_LEGACY 0 /* legacy 8259, gets irqs 1..15 */
-#define IRQ_HOST_MAP_NOMAP 1 /* no fast reverse mapping */
-#define IRQ_HOST_MAP_LINEAR 2 /* linear map of interrupts */
-#define IRQ_HOST_MAP_TREE 3 /* radix tree */
- union {
- struct {
- unsigned int size;
- unsigned int *revmap;
- } linear;
- struct radix_tree_root tree;
- } revmap_data;
- struct irq_host_ops *ops;
- void *host_data;
- irq_hw_number_t inval_irq;
-
- /* Optional device node pointer */
- struct device_node *of_node;
-};
-
-/* The main irq map itself is an array of NR_IRQ entries containing the
- * associate host and irq number. An entry with a host of NULL is free.
- * An entry can be allocated if it's free, the allocator always then sets
- * hwirq first to the host's invalid irq number and then fills ops.
- */
-struct irq_map_entry {
- irq_hw_number_t hwirq;
- struct irq_host *host;
-};
-
-extern struct irq_map_entry irq_map[NR_IRQS];
-
extern irq_hw_number_t virq_to_hw(unsigned int virq);
/**
- * irq_alloc_host - Allocate a new irq_host data structure
- * @of_node: optional device-tree node of the interrupt controller
- * @revmap_type: type of reverse mapping to use
- * @revmap_arg: for IRQ_HOST_MAP_LINEAR linear only: size of the map
- * @ops: map/unmap host callbacks
- * @inval_irq: provide a hw number in that host space that is always invalid
- *
- * Allocates and initialize and irq_host structure. Note that in the case of
- * IRQ_HOST_MAP_LEGACY, the map() callback will be called before this returns
- * for all legacy interrupts except 0 (which is always the invalid irq for
- * a legacy controller). For a IRQ_HOST_MAP_LINEAR, the map is allocated by
- * this call as well. For a IRQ_HOST_MAP_TREE, the radix tree will be allocated
- * later during boot automatically (the reverse mapping will use the slow path
- * until that happens).
- */
-extern struct irq_host *irq_alloc_host(struct device_node *of_node,
- unsigned int revmap_type,
- unsigned int revmap_arg,
- struct irq_host_ops *ops,
- irq_hw_number_t inval_irq);
-
-
-/**
- * irq_find_host - Locates a host for a given device node
- * @node: device-tree node of the interrupt controller
- */
-extern struct irq_host *irq_find_host(struct device_node *node);
-
-
-/**
- * irq_set_default_host - Set a "default" host
- * @host: default host pointer
- *
- * For convenience, it's possible to set a "default" host that will be used
- * whenever NULL is passed to irq_create_mapping(). It makes life easier for
- * platforms that want to manipulate a few hard coded interrupt numbers that
- * aren't properly represented in the device-tree.
- */
-extern void irq_set_default_host(struct irq_host *host);
-
-
-/**
- * irq_set_virq_count - Set the maximum number of virt irqs
- * @count: number of linux virtual irqs, capped with NR_IRQS
- *
- * This is mainly for use by platforms like iSeries who want to program
- * the virtual irq number in the controller to avoid the reverse mapping
- */
-extern void irq_set_virq_count(unsigned int count);
-
-
-/**
- * irq_create_mapping - Map a hardware interrupt into linux virq space
- * @host: host owning this hardware interrupt or NULL for default host
- * @hwirq: hardware irq number in that host space
- *
- * Only one mapping per hardware interrupt is permitted. Returns a linux
- * virq number.
- * If the sense/trigger is to be specified, set_irq_type() should be called
- * on the number returned from that call.
- */
-extern unsigned int irq_create_mapping(struct irq_host *host,
- irq_hw_number_t hwirq);
-
-
-/**
- * irq_dispose_mapping - Unmap an interrupt
- * @virq: linux virq number of the interrupt to unmap
- */
-extern void irq_dispose_mapping(unsigned int virq);
-
-/**
- * irq_find_mapping - Find a linux virq from an hw irq number.
- * @host: host owning this hardware interrupt
- * @hwirq: hardware irq number in that host space
- *
- * This is a slow path, for use by generic code. It's expected that an
- * irq controller implementation directly calls the appropriate low level
- * mapping function.
- */
-extern unsigned int irq_find_mapping(struct irq_host *host,
- irq_hw_number_t hwirq);
-
-/**
- * irq_create_direct_mapping - Allocate a virq for direct mapping
- * @host: host to allocate the virq for or NULL for default host
- *
- * This routine is used for irq controllers which can choose the hardware
- * interrupt numbers they generate. In such a case it's simplest to use
- * the linux virq as the hardware interrupt number.
- */
-extern unsigned int irq_create_direct_mapping(struct irq_host *host);
-
-/**
- * irq_radix_revmap_insert - Insert a hw irq to linux virq number mapping.
- * @host: host owning this hardware interrupt
- * @virq: linux irq number
- * @hwirq: hardware irq number in that host space
- *
- * This is for use by irq controllers that use a radix tree reverse
- * mapping for fast lookup.
- */
-extern void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
- irq_hw_number_t hwirq);
-
-/**
- * irq_radix_revmap_lookup - Find a linux virq from a hw irq number.
- * @host: host owning this hardware interrupt
- * @hwirq: hardware irq number in that host space
- *
- * This is a fast path, for use by irq controller code that uses radix tree
- * revmaps
- */
-extern unsigned int irq_radix_revmap_lookup(struct irq_host *host,
- irq_hw_number_t hwirq);
-
-/**
- * irq_linear_revmap - Find a linux virq from a hw irq number.
- * @host: host owning this hardware interrupt
- * @hwirq: hardware irq number in that host space
- *
- * This is a fast path, for use by irq controller code that uses linear
- * revmaps. It does fallback to the slow path if the revmap doesn't exist
- * yet and will create the revmap entry with appropriate locking
- */
-
-extern unsigned int irq_linear_revmap(struct irq_host *host,
- irq_hw_number_t hwirq);
-
-
-
-/**
- * irq_alloc_virt - Allocate virtual irq numbers
- * @host: host owning these new virtual irqs
- * @count: number of consecutive numbers to allocate
- * @hint: pass a hint number, the allocator will try to use a 1:1 mapping
- *
- * This is a low level function that is used internally by irq_create_mapping()
- * and that can be used by some irq controllers implementations for things
- * like allocating ranges of numbers for MSIs. The revmaps are left untouched.
- */
-extern unsigned int irq_alloc_virt(struct irq_host *host,
- unsigned int count,
- unsigned int hint);
-
-/**
- * irq_free_virt - Free virtual irq numbers
- * @virq: virtual irq number of the first interrupt to free
- * @count: number of interrupts to free
- *
- * This function is the opposite of irq_alloc_virt. It will not clear reverse
- * maps, this should be done previously by unmap'ing the interrupt. In fact,
- * all interrupts covered by the range being freed should have been unmapped
- * prior to calling this.
- */
-extern void irq_free_virt(unsigned int virq, unsigned int count);
-
-/**
* irq_early_init - Init irq remapping subsystem
*/
extern void irq_early_init(void);
@@ -338,9 +69,11 @@ extern struct thread_info *softirq_ctx[NR_CPUS];
extern void irq_ctx_init(void);
extern void call_do_softirq(struct thread_info *tp);
-extern int call_handle_irq(int irq, void *p1,
- struct thread_info *tp, void *func);
+extern void call_do_irq(struct pt_regs *regs, struct thread_info *tp);
extern void do_IRQ(struct pt_regs *regs);
+extern void __do_irq(struct pt_regs *regs);
+
+int irq_choose_cpu(const struct cpumask *mask);
#endif /* _ASM_IRQ_H */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h
index b85d8ddbb66..e20eb95429a 100644
--- a/arch/powerpc/include/asm/irqflags.h
+++ b/arch/powerpc/include/asm/irqflags.h
@@ -12,31 +12,59 @@
#else
#ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_IRQSOFF_TRACER
+/*
+ * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
+ * which is the stack frame here, we need to force a stack frame
+ * in case we came from user space.
+ */
+#define TRACE_WITH_FRAME_BUFFER(func) \
+ mflr r0; \
+ stdu r1, -STACK_FRAME_OVERHEAD(r1); \
+ std r0, 16(r1); \
+ stdu r1, -STACK_FRAME_OVERHEAD(r1); \
+ bl func; \
+ ld r1, 0(r1); \
+ ld r1, 0(r1);
+#else
+#define TRACE_WITH_FRAME_BUFFER(func) \
+ bl func;
+#endif
+
/*
* Most of the CPU's IRQ-state tracing is done from assembly code; we
* have to call a C function so call a wrapper that saves all the
* C-clobbered registers.
*/
-#define TRACE_ENABLE_INTS bl .trace_hardirqs_on
-#define TRACE_DISABLE_INTS bl .trace_hardirqs_off
-#define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip) \
- cmpdi en,0; \
- bne 95f; \
- stb en,PACASOFTIRQEN(r13); \
- bl .trace_hardirqs_off; \
- b skip; \
-95: bl .trace_hardirqs_on; \
- li en,1;
-#define TRACE_AND_RESTORE_IRQ(en) \
- TRACE_AND_RESTORE_IRQ_PARTIAL(en,96f); \
- stb en,PACASOFTIRQEN(r13); \
-96:
+#define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_on)
+#define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_off)
+
+/*
+ * This is used by assembly code to soft-disable interrupts first and
+ * reconcile irq state.
+ */
+#define RECONCILE_IRQ_STATE(__rA, __rB) \
+ lbz __rA,PACASOFTIRQEN(r13); \
+ lbz __rB,PACAIRQHAPPENED(r13); \
+ cmpwi cr0,__rA,0; \
+ li __rA,0; \
+ ori __rB,__rB,PACA_IRQ_HARD_DIS; \
+ stb __rB,PACAIRQHAPPENED(r13); \
+ beq 44f; \
+ stb __rA,PACASOFTIRQEN(r13); \
+ TRACE_DISABLE_INTS; \
+44:
+
#else
#define TRACE_ENABLE_INTS
#define TRACE_DISABLE_INTS
-#define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip)
-#define TRACE_AND_RESTORE_IRQ(en) \
- stb en,PACASOFTIRQEN(r13)
+
+#define RECONCILE_IRQ_STATE(__rA, __rB) \
+ lbz __rA,PACAIRQHAPPENED(r13); \
+ li __rB,0; \
+ ori __rA,__rA,PACA_IRQ_HARD_DIS; \
+ stb __rB,PACASOFTIRQEN(r13); \
+ stb __rA,PACAIRQHAPPENED(r13)
#endif
#endif
diff --git a/arch/powerpc/include/asm/iseries/alpaca.h b/arch/powerpc/include/asm/iseries/alpaca.h
deleted file mode 100644
index c0cce6727a6..00000000000
--- a/arch/powerpc/include/asm/iseries/alpaca.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright © 2008 Stephen Rothwell IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _ASM_POWERPC_ISERIES_ALPACA_H
-#define _ASM_POWERPC_ISERIES_ALPACA_H
-
-/*
- * This is the part of the paca that the iSeries hypervisor
- * needs to be statically initialised. Immediately after boot
- * we switch to the normal Linux paca.
- */
-struct alpaca {
- struct lppaca *lppaca_ptr; /* Pointer to LpPaca for PLIC */
- const void *reg_save_ptr; /* Pointer to LpRegSave for PLIC */
-};
-
-#endif /* _ASM_POWERPC_ISERIES_ALPACA_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_call.h b/arch/powerpc/include/asm/iseries/hv_call.h
deleted file mode 100644
index 162d653ad51..00000000000
--- a/arch/powerpc/include/asm/iseries/hv_call.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This file contains the "hypervisor call" interface which is used to
- * drive the hypervisor from the OS.
- */
-#ifndef _ASM_POWERPC_ISERIES_HV_CALL_H
-#define _ASM_POWERPC_ISERIES_HV_CALL_H
-
-#include <asm/iseries/hv_call_sc.h>
-#include <asm/iseries/hv_types.h>
-#include <asm/paca.h>
-
-/* Type of yield for HvCallBaseYieldProcessor */
-#define HvCall_YieldTimed 0 /* Yield until specified time (tb) */
-#define HvCall_YieldToActive 1 /* Yield until all active procs have run */
-#define HvCall_YieldToProc 2 /* Yield until the specified processor has run */
-
-/* interrupt masks for setEnabledInterrupts */
-#define HvCall_MaskIPI 0x00000001
-#define HvCall_MaskLpEvent 0x00000002
-#define HvCall_MaskLpProd 0x00000004
-#define HvCall_MaskTimeout 0x00000008
-
-/* Log buffer formats */
-#define HvCall_LogBuffer_ASCII 0
-#define HvCall_LogBuffer_EBCDIC 1
-
-#define HvCallBaseAckDeferredInts HvCallBase + 0
-#define HvCallBaseCpmPowerOff HvCallBase + 1
-#define HvCallBaseGetHwPatch HvCallBase + 2
-#define HvCallBaseReIplSpAttn HvCallBase + 3
-#define HvCallBaseSetASR HvCallBase + 4
-#define HvCallBaseSetASRAndRfi HvCallBase + 5
-#define HvCallBaseSetIMR HvCallBase + 6
-#define HvCallBaseSendIPI HvCallBase + 7
-#define HvCallBaseTerminateMachine HvCallBase + 8
-#define HvCallBaseTerminateMachineSrc HvCallBase + 9
-#define HvCallBaseProcessPlicInterrupts HvCallBase + 10
-#define HvCallBaseIsPrimaryCpmOrMsdIpl HvCallBase + 11
-#define HvCallBaseSetVirtualSIT HvCallBase + 12
-#define HvCallBaseVaryOffThisProcessor HvCallBase + 13
-#define HvCallBaseVaryOffMemoryChunk HvCallBase + 14
-#define HvCallBaseVaryOffInteractivePercentage HvCallBase + 15
-#define HvCallBaseSendLpProd HvCallBase + 16
-#define HvCallBaseSetEnabledInterrupts HvCallBase + 17
-#define HvCallBaseYieldProcessor HvCallBase + 18
-#define HvCallBaseVaryOffSharedProcUnits HvCallBase + 19
-#define HvCallBaseSetVirtualDecr HvCallBase + 20
-#define HvCallBaseClearLogBuffer HvCallBase + 21
-#define HvCallBaseGetLogBufferCodePage HvCallBase + 22
-#define HvCallBaseGetLogBufferFormat HvCallBase + 23
-#define HvCallBaseGetLogBufferLength HvCallBase + 24
-#define HvCallBaseReadLogBuffer HvCallBase + 25
-#define HvCallBaseSetLogBufferFormatAndCodePage HvCallBase + 26
-#define HvCallBaseWriteLogBuffer HvCallBase + 27
-#define HvCallBaseRouter28 HvCallBase + 28
-#define HvCallBaseRouter29 HvCallBase + 29
-#define HvCallBaseRouter30 HvCallBase + 30
-#define HvCallBaseSetDebugBus HvCallBase + 31
-
-#define HvCallCcSetDABR HvCallCc + 7
-
-static inline void HvCall_setVirtualDecr(void)
-{
- /*
- * Ignore any error return codes - most likely means that the
- * target value for the LP has been increased and this vary off
- * would bring us below the new target.
- */
- HvCall0(HvCallBaseSetVirtualDecr);
-}
-
-static inline void HvCall_yieldProcessor(unsigned typeOfYield, u64 yieldParm)
-{
- HvCall2(HvCallBaseYieldProcessor, typeOfYield, yieldParm);
-}
-
-static inline void HvCall_setEnabledInterrupts(u64 enabledInterrupts)
-{
- HvCall1(HvCallBaseSetEnabledInterrupts, enabledInterrupts);
-}
-
-static inline void HvCall_setLogBufferFormatAndCodepage(int format,
- u32 codePage)
-{
- HvCall2(HvCallBaseSetLogBufferFormatAndCodePage, format, codePage);
-}
-
-extern void HvCall_writeLogBuffer(const void *buffer, u64 bufLen);
-
-static inline void HvCall_sendIPI(struct paca_struct *targetPaca)
-{
- HvCall1(HvCallBaseSendIPI, targetPaca->paca_index);
-}
-
-#endif /* _ASM_POWERPC_ISERIES_HV_CALL_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_call_event.h b/arch/powerpc/include/asm/iseries/hv_call_event.h
deleted file mode 100644
index cc029d388e1..00000000000
--- a/arch/powerpc/include/asm/iseries/hv_call_event.h
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This file contains the "hypervisor call" interface which is used to
- * drive the hypervisor from the OS.
- */
-#ifndef _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H
-#define _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H
-
-#include <linux/types.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/iseries/hv_call_sc.h>
-#include <asm/iseries/hv_types.h>
-#include <asm/abs_addr.h>
-
-struct HvLpEvent;
-
-typedef u8 HvLpEvent_Type;
-typedef u8 HvLpEvent_AckInd;
-typedef u8 HvLpEvent_AckType;
-
-typedef u8 HvLpDma_Direction;
-typedef u8 HvLpDma_AddressType;
-
-typedef u64 HvLpEvent_Rc;
-typedef u64 HvLpDma_Rc;
-
-#define HvCallEventAckLpEvent HvCallEvent + 0
-#define HvCallEventCancelLpEvent HvCallEvent + 1
-#define HvCallEventCloseLpEventPath HvCallEvent + 2
-#define HvCallEventDmaBufList HvCallEvent + 3
-#define HvCallEventDmaSingle HvCallEvent + 4
-#define HvCallEventDmaToSp HvCallEvent + 5
-#define HvCallEventGetOverflowLpEvents HvCallEvent + 6
-#define HvCallEventGetSourceLpInstanceId HvCallEvent + 7
-#define HvCallEventGetTargetLpInstanceId HvCallEvent + 8
-#define HvCallEventOpenLpEventPath HvCallEvent + 9
-#define HvCallEventSetLpEventStack HvCallEvent + 10
-#define HvCallEventSignalLpEvent HvCallEvent + 11
-#define HvCallEventSignalLpEventParms HvCallEvent + 12
-#define HvCallEventSetInterLpQueueIndex HvCallEvent + 13
-#define HvCallEventSetLpEventQueueInterruptProc HvCallEvent + 14
-#define HvCallEventRouter15 HvCallEvent + 15
-
-static inline void HvCallEvent_getOverflowLpEvents(u8 queueIndex)
-{
- HvCall1(HvCallEventGetOverflowLpEvents, queueIndex);
-}
-
-static inline void HvCallEvent_setInterLpQueueIndex(u8 queueIndex)
-{
- HvCall1(HvCallEventSetInterLpQueueIndex, queueIndex);
-}
-
-static inline void HvCallEvent_setLpEventStack(u8 queueIndex,
- char *eventStackAddr, u32 eventStackSize)
-{
- HvCall3(HvCallEventSetLpEventStack, queueIndex,
- virt_to_abs(eventStackAddr), eventStackSize);
-}
-
-static inline void HvCallEvent_setLpEventQueueInterruptProc(u8 queueIndex,
- u16 lpLogicalProcIndex)
-{
- HvCall2(HvCallEventSetLpEventQueueInterruptProc, queueIndex,
- lpLogicalProcIndex);
-}
-
-static inline HvLpEvent_Rc HvCallEvent_signalLpEvent(struct HvLpEvent *event)
-{
- return HvCall1(HvCallEventSignalLpEvent, virt_to_abs(event));
-}
-
-static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp,
- HvLpEvent_Type type, u16 subtype, HvLpEvent_AckInd ackInd,
- HvLpEvent_AckType ackType, HvLpInstanceId sourceInstanceId,
- HvLpInstanceId targetInstanceId, u64 correlationToken,
- u64 eventData1, u64 eventData2, u64 eventData3,
- u64 eventData4, u64 eventData5)
-{
- /* Pack the misc bits into a single Dword to pass to PLIC */
- union {
- struct {
- u8 ack_and_target;
- u8 type;
- u16 subtype;
- HvLpInstanceId src_inst;
- HvLpInstanceId target_inst;
- } parms;
- u64 dword;
- } packed;
-
- packed.parms.ack_and_target = (ackType << 7) | (ackInd << 6) | targetLp;
- packed.parms.type = type;
- packed.parms.subtype = subtype;
- packed.parms.src_inst = sourceInstanceId;
- packed.parms.target_inst = targetInstanceId;
-
- return HvCall7(HvCallEventSignalLpEventParms, packed.dword,
- correlationToken, eventData1, eventData2,
- eventData3, eventData4, eventData5);
-}
-
-extern void *iseries_hv_alloc(size_t size, dma_addr_t *dma_handle, gfp_t flag);
-extern void iseries_hv_free(size_t size, void *vaddr, dma_addr_t dma_handle);
-extern dma_addr_t iseries_hv_map(void *vaddr, size_t size,
- enum dma_data_direction direction);
-extern void iseries_hv_unmap(dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction);
-
-static inline HvLpEvent_Rc HvCallEvent_ackLpEvent(struct HvLpEvent *event)
-{
- return HvCall1(HvCallEventAckLpEvent, virt_to_abs(event));
-}
-
-static inline HvLpEvent_Rc HvCallEvent_cancelLpEvent(struct HvLpEvent *event)
-{
- return HvCall1(HvCallEventCancelLpEvent, virt_to_abs(event));
-}
-
-static inline HvLpInstanceId HvCallEvent_getSourceLpInstanceId(
- HvLpIndex targetLp, HvLpEvent_Type type)
-{
- return HvCall2(HvCallEventGetSourceLpInstanceId, targetLp, type);
-}
-
-static inline HvLpInstanceId HvCallEvent_getTargetLpInstanceId(
- HvLpIndex targetLp, HvLpEvent_Type type)
-{
- return HvCall2(HvCallEventGetTargetLpInstanceId, targetLp, type);
-}
-
-static inline void HvCallEvent_openLpEventPath(HvLpIndex targetLp,
- HvLpEvent_Type type)
-{
- HvCall2(HvCallEventOpenLpEventPath, targetLp, type);
-}
-
-static inline void HvCallEvent_closeLpEventPath(HvLpIndex targetLp,
- HvLpEvent_Type type)
-{
- HvCall2(HvCallEventCloseLpEventPath, targetLp, type);
-}
-
-static inline HvLpDma_Rc HvCallEvent_dmaBufList(HvLpEvent_Type type,
- HvLpIndex remoteLp, HvLpDma_Direction direction,
- HvLpInstanceId localInstanceId,
- HvLpInstanceId remoteInstanceId,
- HvLpDma_AddressType localAddressType,
- HvLpDma_AddressType remoteAddressType,
- /* Do these need to be converted to absolute addresses? */
- u64 localBufList, u64 remoteBufList, u32 transferLength)
-{
- /* Pack the misc bits into a single Dword to pass to PLIC */
- union {
- struct {
- u8 flags;
- HvLpIndex remote;
- u8 type;
- u8 reserved;
- HvLpInstanceId local_inst;
- HvLpInstanceId remote_inst;
- } parms;
- u64 dword;
- } packed;
-
- packed.parms.flags = (direction << 7) |
- (localAddressType << 6) | (remoteAddressType << 5);
- packed.parms.remote = remoteLp;
- packed.parms.type = type;
- packed.parms.reserved = 0;
- packed.parms.local_inst = localInstanceId;
- packed.parms.remote_inst = remoteInstanceId;
-
- return HvCall4(HvCallEventDmaBufList, packed.dword, localBufList,
- remoteBufList, transferLength);
-}
-
-static inline HvLpDma_Rc HvCallEvent_dmaToSp(void *local, u32 remote,
- u32 length, HvLpDma_Direction dir)
-{
- return HvCall4(HvCallEventDmaToSp, virt_to_abs(local), remote,
- length, dir);
-}
-
-#endif /* _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_call_sc.h b/arch/powerpc/include/asm/iseries/hv_call_sc.h
deleted file mode 100644
index f5d21095925..00000000000
--- a/arch/powerpc/include/asm/iseries/hv_call_sc.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _ASM_POWERPC_ISERIES_HV_CALL_SC_H
-#define _ASM_POWERPC_ISERIES_HV_CALL_SC_H
-
-#include <linux/types.h>
-
-#define HvCallBase 0x8000000000000000ul
-#define HvCallCc 0x8001000000000000ul
-#define HvCallCfg 0x8002000000000000ul
-#define HvCallEvent 0x8003000000000000ul
-#define HvCallHpt 0x8004000000000000ul
-#define HvCallPci 0x8005000000000000ul
-#define HvCallSm 0x8007000000000000ul
-#define HvCallXm 0x8009000000000000ul
-
-extern u64 HvCall0(u64);
-extern u64 HvCall1(u64, u64);
-extern u64 HvCall2(u64, u64, u64);
-extern u64 HvCall3(u64, u64, u64, u64);
-extern u64 HvCall4(u64, u64, u64, u64, u64);
-extern u64 HvCall5(u64, u64, u64, u64, u64, u64);
-extern u64 HvCall6(u64, u64, u64, u64, u64, u64, u64);
-extern u64 HvCall7(u64, u64, u64, u64, u64, u64, u64, u64);
-
-extern u64 HvCall0Ret16(u64, void *);
-extern u64 HvCall1Ret16(u64, void *, u64);
-extern u64 HvCall2Ret16(u64, void *, u64, u64);
-extern u64 HvCall3Ret16(u64, void *, u64, u64, u64);
-extern u64 HvCall4Ret16(u64, void *, u64, u64, u64, u64);
-extern u64 HvCall5Ret16(u64, void *, u64, u64, u64, u64, u64);
-extern u64 HvCall6Ret16(u64, void *, u64, u64, u64, u64, u64, u64);
-extern u64 HvCall7Ret16(u64, void *, u64, u64 ,u64 ,u64 ,u64 ,u64 ,u64);
-
-#endif /* _ASM_POWERPC_ISERIES_HV_CALL_SC_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_call_xm.h b/arch/powerpc/include/asm/iseries/hv_call_xm.h
deleted file mode 100644
index 392ac3f54df..00000000000
--- a/arch/powerpc/include/asm/iseries/hv_call_xm.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * This file contains the "hypervisor call" interface which is used to
- * drive the hypervisor from SLIC.
- */
-#ifndef _ASM_POWERPC_ISERIES_HV_CALL_XM_H
-#define _ASM_POWERPC_ISERIES_HV_CALL_XM_H
-
-#include <asm/iseries/hv_call_sc.h>
-#include <asm/iseries/hv_types.h>
-
-#define HvCallXmGetTceTableParms HvCallXm + 0
-#define HvCallXmTestBus HvCallXm + 1
-#define HvCallXmConnectBusUnit HvCallXm + 2
-#define HvCallXmLoadTod HvCallXm + 8
-#define HvCallXmTestBusUnit HvCallXm + 9
-#define HvCallXmSetTce HvCallXm + 11
-#define HvCallXmSetTces HvCallXm + 13
-
-static inline void HvCallXm_getTceTableParms(u64 cb)
-{
- HvCall1(HvCallXmGetTceTableParms, cb);
-}
-
-static inline u64 HvCallXm_setTce(u64 tceTableToken, u64 tceOffset, u64 tce)
-{
- return HvCall3(HvCallXmSetTce, tceTableToken, tceOffset, tce);
-}
-
-static inline u64 HvCallXm_setTces(u64 tceTableToken, u64 tceOffset,
- u64 numTces, u64 tce1, u64 tce2, u64 tce3, u64 tce4)
-{
- return HvCall7(HvCallXmSetTces, tceTableToken, tceOffset, numTces,
- tce1, tce2, tce3, tce4);
-}
-
-static inline u64 HvCallXm_testBus(u16 busNumber)
-{
- return HvCall1(HvCallXmTestBus, busNumber);
-}
-
-static inline u64 HvCallXm_testBusUnit(u16 busNumber, u8 subBusNumber,
- u8 deviceId)
-{
- return HvCall2(HvCallXmTestBusUnit, busNumber,
- (subBusNumber << 8) | deviceId);
-}
-
-static inline u64 HvCallXm_connectBusUnit(u16 busNumber, u8 subBusNumber,
- u8 deviceId, u64 interruptToken)
-{
- return HvCall5(HvCallXmConnectBusUnit, busNumber,
- (subBusNumber << 8) | deviceId, interruptToken, 0,
- 0 /* HvLpConfig::mapDsaToQueueIndex(HvLpDSA(busNumber, xBoard, xCard)) */);
-}
-
-static inline u64 HvCallXm_loadTod(void)
-{
- return HvCall0(HvCallXmLoadTod);
-}
-
-#endif /* _ASM_POWERPC_ISERIES_HV_CALL_XM_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_lp_config.h b/arch/powerpc/include/asm/iseries/hv_lp_config.h
deleted file mode 100644
index a006fd1e4a2..00000000000
--- a/arch/powerpc/include/asm/iseries/hv_lp_config.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H
-#define _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H
-
-/*
- * This file contains the interface to the LPAR configuration data
- * to determine which resources should be allocated to each partition.
- */
-
-#include <asm/iseries/hv_call_sc.h>
-#include <asm/iseries/hv_types.h>
-
-enum {
- HvCallCfg_Cur = 0,
- HvCallCfg_Init = 1,
- HvCallCfg_Max = 2,
- HvCallCfg_Min = 3
-};
-
-#define HvCallCfgGetSystemPhysicalProcessors HvCallCfg + 6
-#define HvCallCfgGetPhysicalProcessors HvCallCfg + 7
-#define HvCallCfgGetMsChunks HvCallCfg + 9
-#define HvCallCfgGetSharedPoolIndex HvCallCfg + 20
-#define HvCallCfgGetSharedProcUnits HvCallCfg + 21
-#define HvCallCfgGetNumProcsInSharedPool HvCallCfg + 22
-#define HvCallCfgGetVirtualLanIndexMap HvCallCfg + 30
-#define HvCallCfgGetHostingLpIndex HvCallCfg + 32
-
-extern HvLpIndex HvLpConfig_getLpIndex_outline(void);
-extern HvLpIndex HvLpConfig_getLpIndex(void);
-extern HvLpIndex HvLpConfig_getPrimaryLpIndex(void);
-
-static inline u64 HvLpConfig_getMsChunks(void)
-{
- return HvCall2(HvCallCfgGetMsChunks, HvLpConfig_getLpIndex(),
- HvCallCfg_Cur);
-}
-
-static inline u64 HvLpConfig_getSystemPhysicalProcessors(void)
-{
- return HvCall0(HvCallCfgGetSystemPhysicalProcessors);
-}
-
-static inline u64 HvLpConfig_getNumProcsInSharedPool(HvLpSharedPoolIndex sPI)
-{
- return (u16)HvCall1(HvCallCfgGetNumProcsInSharedPool, sPI);
-}
-
-static inline u64 HvLpConfig_getPhysicalProcessors(void)
-{
- return HvCall2(HvCallCfgGetPhysicalProcessors, HvLpConfig_getLpIndex(),
- HvCallCfg_Cur);
-}
-
-static inline HvLpSharedPoolIndex HvLpConfig_getSharedPoolIndex(void)
-{
- return HvCall1(HvCallCfgGetSharedPoolIndex, HvLpConfig_getLpIndex());
-}
-
-static inline u64 HvLpConfig_getSharedProcUnits(void)
-{
- return HvCall2(HvCallCfgGetSharedProcUnits, HvLpConfig_getLpIndex(),
- HvCallCfg_Cur);
-}
-
-static inline u64 HvLpConfig_getMaxSharedProcUnits(void)
-{
- return HvCall2(HvCallCfgGetSharedProcUnits, HvLpConfig_getLpIndex(),
- HvCallCfg_Max);
-}
-
-static inline u64 HvLpConfig_getMaxPhysicalProcessors(void)
-{
- return HvCall2(HvCallCfgGetPhysicalProcessors, HvLpConfig_getLpIndex(),
- HvCallCfg_Max);
-}
-
-static inline HvLpVirtualLanIndexMap HvLpConfig_getVirtualLanIndexMapForLp(
- HvLpIndex lp)
-{
- /*
- * This is a new function in V5R1 so calls to this on older
- * hypervisors will return -1
- */
- u64 retVal = HvCall1(HvCallCfgGetVirtualLanIndexMap, lp);
- if (retVal == -1)
- retVal = 0;
- return retVal;
-}
-
-static inline HvLpVirtualLanIndexMap HvLpConfig_getVirtualLanIndexMap(void)
-{
- return HvLpConfig_getVirtualLanIndexMapForLp(
- HvLpConfig_getLpIndex_outline());
-}
-
-static inline int HvLpConfig_doLpsCommunicateOnVirtualLan(HvLpIndex lp1,
- HvLpIndex lp2)
-{
- HvLpVirtualLanIndexMap virtualLanIndexMap1 =
- HvLpConfig_getVirtualLanIndexMapForLp(lp1);
- HvLpVirtualLanIndexMap virtualLanIndexMap2 =
- HvLpConfig_getVirtualLanIndexMapForLp(lp2);
- return ((virtualLanIndexMap1 & virtualLanIndexMap2) != 0);
-}
-
-static inline HvLpIndex HvLpConfig_getHostingLpIndex(HvLpIndex lp)
-{
- return HvCall1(HvCallCfgGetHostingLpIndex, lp);
-}
-
-#endif /* _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_lp_event.h b/arch/powerpc/include/asm/iseries/hv_lp_event.h
deleted file mode 100644
index 8f5da7d7720..00000000000
--- a/arch/powerpc/include/asm/iseries/hv_lp_event.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-/* This file contains the class for HV events in the system. */
-
-#ifndef _ASM_POWERPC_ISERIES_HV_LP_EVENT_H
-#define _ASM_POWERPC_ISERIES_HV_LP_EVENT_H
-
-#include <asm/types.h>
-#include <asm/ptrace.h>
-#include <asm/iseries/hv_types.h>
-#include <asm/iseries/hv_call_event.h>
-
-/*
- * HvLpEvent is the structure for Lp Event messages passed between
- * partitions through PLIC.
- */
-
-struct HvLpEvent {
- u8 flags; /* Event flags x00-x00 */
- u8 xType; /* Type of message x01-x01 */
- u16 xSubtype; /* Subtype for event x02-x03 */
- u8 xSourceLp; /* Source LP x04-x04 */
- u8 xTargetLp; /* Target LP x05-x05 */
- u8 xSizeMinus1; /* Size of Derived class - 1 x06-x06 */
- u8 xRc; /* RC for Ack flows x07-x07 */
- u16 xSourceInstanceId; /* Source sides instance id x08-x09 */
- u16 xTargetInstanceId; /* Target sides instance id x0A-x0B */
- union {
- u32 xSubtypeData; /* Data usable by the subtype x0C-x0F */
- u16 xSubtypeDataShort[2]; /* Data as 2 shorts */
- u8 xSubtypeDataChar[4]; /* Data as 4 chars */
- } x;
-
- u64 xCorrelationToken; /* Unique value for source/type x10-x17 */
-};
-
-typedef void (*LpEventHandler)(struct HvLpEvent *);
-
-/* Register a handler for an event type - returns 0 on success */
-extern int HvLpEvent_registerHandler(HvLpEvent_Type eventType,
- LpEventHandler hdlr);
-
-/*
- * Unregister a handler for an event type
- *
- * This call will sleep until the handler being removed is guaranteed to
- * be no longer executing on any CPU. Do not call with locks held.
- *
- * returns 0 on success
- * Unregister will fail if there are any paths open for the type
- */
-extern int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType);
-
-/*
- * Open an Lp Event Path for an event type
- * returns 0 on success
- * openPath will fail if there is no handler registered for the event type.
- * The lpIndex specified is the partition index for the target partition
- * (for VirtualIo, VirtualLan and SessionMgr) other types specify zero)
- */
-extern int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex);
-
-/*
- * Close an Lp Event Path for a type and partition
- * returns 0 on success
- */
-extern int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex);
-
-#define HvLpEvent_Type_Hypervisor 0
-#define HvLpEvent_Type_MachineFac 1
-#define HvLpEvent_Type_SessionMgr 2
-#define HvLpEvent_Type_SpdIo 3
-#define HvLpEvent_Type_VirtualBus 4
-#define HvLpEvent_Type_PciIo 5
-#define HvLpEvent_Type_RioIo 6
-#define HvLpEvent_Type_VirtualLan 7
-#define HvLpEvent_Type_VirtualIo 8
-#define HvLpEvent_Type_NumTypes 9
-
-#define HvLpEvent_Rc_Good 0
-#define HvLpEvent_Rc_BufferNotAvailable 1
-#define HvLpEvent_Rc_Cancelled 2
-#define HvLpEvent_Rc_GenericError 3
-#define HvLpEvent_Rc_InvalidAddress 4
-#define HvLpEvent_Rc_InvalidPartition 5
-#define HvLpEvent_Rc_InvalidSize 6
-#define HvLpEvent_Rc_InvalidSubtype 7
-#define HvLpEvent_Rc_InvalidSubtypeData 8
-#define HvLpEvent_Rc_InvalidType 9
-#define HvLpEvent_Rc_PartitionDead 10
-#define HvLpEvent_Rc_PathClosed 11
-#define HvLpEvent_Rc_SubtypeError 12
-
-#define HvLpEvent_Function_Ack 0
-#define HvLpEvent_Function_Int 1
-
-#define HvLpEvent_AckInd_NoAck 0
-#define HvLpEvent_AckInd_DoAck 1
-
-#define HvLpEvent_AckType_ImmediateAck 0
-#define HvLpEvent_AckType_DeferredAck 1
-
-#define HV_LP_EVENT_INT 0x01
-#define HV_LP_EVENT_DO_ACK 0x02
-#define HV_LP_EVENT_DEFERRED_ACK 0x04
-#define HV_LP_EVENT_VALID 0x80
-
-#define HvLpDma_Direction_LocalToRemote 0
-#define HvLpDma_Direction_RemoteToLocal 1
-
-#define HvLpDma_AddressType_TceIndex 0
-#define HvLpDma_AddressType_RealAddress 1
-
-#define HvLpDma_Rc_Good 0
-#define HvLpDma_Rc_Error 1
-#define HvLpDma_Rc_PartitionDead 2
-#define HvLpDma_Rc_PathClosed 3
-#define HvLpDma_Rc_InvalidAddress 4
-#define HvLpDma_Rc_InvalidLength 5
-
-static inline int hvlpevent_is_valid(struct HvLpEvent *h)
-{
- return h->flags & HV_LP_EVENT_VALID;
-}
-
-static inline void hvlpevent_invalidate(struct HvLpEvent *h)
-{
- h->flags &= ~ HV_LP_EVENT_VALID;
-}
-
-static inline int hvlpevent_is_int(struct HvLpEvent *h)
-{
- return h->flags & HV_LP_EVENT_INT;
-}
-
-static inline int hvlpevent_is_ack(struct HvLpEvent *h)
-{
- return !hvlpevent_is_int(h);
-}
-
-static inline int hvlpevent_need_ack(struct HvLpEvent *h)
-{
- return h->flags & HV_LP_EVENT_DO_ACK;
-}
-
-#endif /* _ASM_POWERPC_ISERIES_HV_LP_EVENT_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_types.h b/arch/powerpc/include/asm/iseries/hv_types.h
deleted file mode 100644
index c3e6d2a1d1c..00000000000
--- a/arch/powerpc/include/asm/iseries/hv_types.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _ASM_POWERPC_ISERIES_HV_TYPES_H
-#define _ASM_POWERPC_ISERIES_HV_TYPES_H
-
-/*
- * General typedefs for the hypervisor.
- */
-
-#include <asm/types.h>
-
-typedef u8 HvLpIndex;
-typedef u16 HvLpInstanceId;
-typedef u64 HvLpTOD;
-typedef u64 HvLpSystemSerialNum;
-typedef u8 HvLpDeviceSerialNum[12];
-typedef u16 HvLpSanHwSet;
-typedef u16 HvLpBus;
-typedef u16 HvLpBoard;
-typedef u16 HvLpCard;
-typedef u8 HvLpDeviceType[4];
-typedef u8 HvLpDeviceModel[3];
-typedef u64 HvIoToken;
-typedef u8 HvLpName[8];
-typedef u32 HvIoId;
-typedef u64 HvRealMemoryIndex;
-typedef u32 HvLpIndexMap; /* Must hold HVMAXARCHITECTEDLPS bits!!! */
-typedef u16 HvLpVrmIndex;
-typedef u32 HvXmGenerationId;
-typedef u8 HvLpBusPool;
-typedef u8 HvLpSharedPoolIndex;
-typedef u16 HvLpSharedProcUnitsX100;
-typedef u8 HvLpVirtualLanIndex;
-typedef u16 HvLpVirtualLanIndexMap; /* Must hold HVMAXARCHITECTEDVIRTUALLANS bits!!! */
-typedef u16 HvBusNumber; /* Hypervisor Bus Number */
-typedef u8 HvSubBusNumber; /* Hypervisor SubBus Number */
-typedef u8 HvAgentId; /* Hypervisor DevFn */
-
-
-#define HVMAXARCHITECTEDLPS 32
-#define HVMAXARCHITECTEDVIRTUALLANS 16
-#define HVMAXARCHITECTEDVIRTUALDISKS 32
-#define HVMAXARCHITECTEDVIRTUALCDROMS 8
-#define HVMAXARCHITECTEDVIRTUALTAPES 8
-#define HVCHUNKSIZE (256 * 1024)
-#define HVPAGESIZE (4 * 1024)
-#define HVLPMINMEGSPRIMARY 256
-#define HVLPMINMEGSSECONDARY 64
-#define HVCHUNKSPERMEG 4
-#define HVPAGESPERMEG 256
-#define HVPAGESPERCHUNK 64
-
-#define HvLpIndexInvalid ((HvLpIndex)0xff)
-
-/*
- * Enums for the sub-components under PLIC
- * Used in HvCall and HvPrimaryCall
- */
-enum {
- HvCallCompId = 0,
- HvCallCpuCtlsCompId = 1,
- HvCallCfgCompId = 2,
- HvCallEventCompId = 3,
- HvCallHptCompId = 4,
- HvCallPciCompId = 5,
- HvCallSlmCompId = 6,
- HvCallSmCompId = 7,
- HvCallSpdCompId = 8,
- HvCallXmCompId = 9,
- HvCallRioCompId = 10,
- HvCallRsvd3CompId = 11,
- HvCallRsvd2CompId = 12,
- HvCallRsvd1CompId = 13,
- HvCallMaxCompId = 14,
- HvPrimaryCallCompId = 0,
- HvPrimaryCallCfgCompId = 1,
- HvPrimaryCallPciCompId = 2,
- HvPrimaryCallSmCompId = 3,
- HvPrimaryCallSpdCompId = 4,
- HvPrimaryCallXmCompId = 5,
- HvPrimaryCallRioCompId = 6,
- HvPrimaryCallRsvd7CompId = 7,
- HvPrimaryCallRsvd6CompId = 8,
- HvPrimaryCallRsvd5CompId = 9,
- HvPrimaryCallRsvd4CompId = 10,
- HvPrimaryCallRsvd3CompId = 11,
- HvPrimaryCallRsvd2CompId = 12,
- HvPrimaryCallRsvd1CompId = 13,
- HvPrimaryCallMaxCompId = HvCallMaxCompId
-};
-
-struct HvLpBufferList {
- u64 addr;
- u64 len;
-};
-
-#endif /* _ASM_POWERPC_ISERIES_HV_TYPES_H */
diff --git a/arch/powerpc/include/asm/iseries/iommu.h b/arch/powerpc/include/asm/iseries/iommu.h
deleted file mode 100644
index 1b9692c6089..00000000000
--- a/arch/powerpc/include/asm/iseries/iommu.h
+++ /dev/null
@@ -1,37 +0,0 @@
-#ifndef _ASM_POWERPC_ISERIES_IOMMU_H
-#define _ASM_POWERPC_ISERIES_IOMMU_H
-
-/*
- * Copyright (C) 2005 Stephen Rothwell, IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the:
- * Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330,
- * Boston, MA 02111-1307 USA
- */
-
-struct pci_dev;
-struct vio_dev;
-struct device_node;
-struct iommu_table;
-
-/* Get table parameters from HV */
-extern void iommu_table_getparms_iSeries(unsigned long busno,
- unsigned char slotno, unsigned char virtbus,
- struct iommu_table *tbl);
-
-extern struct iommu_table *vio_build_iommu_table_iseries(struct vio_dev *dev);
-extern void iommu_vio_init(void);
-
-#endif /* _ASM_POWERPC_ISERIES_IOMMU_H */
diff --git a/arch/powerpc/include/asm/iseries/it_lp_queue.h b/arch/powerpc/include/asm/iseries/it_lp_queue.h
deleted file mode 100644
index 42827883882..00000000000
--- a/arch/powerpc/include/asm/iseries/it_lp_queue.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H
-#define _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H
-
-/*
- * This control block defines the simple LP queue structure that is
- * shared between the hypervisor (PLIC) and the OS in order to send
- * events to an LP.
- */
-
-#include <asm/types.h>
-#include <asm/ptrace.h>
-
-#define IT_LP_MAX_QUEUES 8
-
-#define IT_LP_NOT_USED 0 /* Queue will not be used by PLIC */
-#define IT_LP_DEDICATED_IO 1 /* Queue dedicated to IO processor specified */
-#define IT_LP_DEDICATED_LP 2 /* Queue dedicated to LP specified */
-#define IT_LP_SHARED 3 /* Queue shared for both IO and LP */
-
-#define IT_LP_EVENT_STACK_SIZE 4096
-#define IT_LP_EVENT_MAX_SIZE 256
-#define IT_LP_EVENT_ALIGN 64
-
-struct hvlpevent_queue {
-/*
- * The hq_current_event is the pointer to the next event stack entry
- * that will become valid. The OS must peek at this entry to determine
- * if it is valid. PLIC will set the valid indicator as the very last
- * store into that entry.
- *
- * When the OS has completed processing of the event then it will mark
- * the event as invalid so that PLIC knows it can store into that event
- * location again.
- *
- * If the event stack fills and there are overflow events, then PLIC
- * will set the hq_overflow_pending flag in which case the OS will
- * have to fetch the additional LP events once they have drained the
- * event stack.
- *
- * The first 16-bytes are known by both the OS and PLIC. The remainder
- * of the cache line is for use by the OS.
- */
- u8 hq_overflow_pending; /* 0x00 Overflow events are pending */
- u8 hq_status; /* 0x01 DedicatedIo or DedicatedLp or NotUsed */
- u16 hq_proc_index; /* 0x02 Logical Proc Index for correlation */
- u8 hq_reserved1[12]; /* 0x04 */
- char *hq_current_event; /* 0x10 */
- char *hq_last_event; /* 0x18 */
- char *hq_event_stack; /* 0x20 */
- u8 hq_index; /* 0x28 unique sequential index. */
- u8 hq_reserved2[3]; /* 0x29-2b */
- spinlock_t hq_lock;
-};
-
-extern struct hvlpevent_queue hvlpevent_queue;
-
-extern int hvlpevent_is_pending(void);
-extern void process_hvlpevents(void);
-extern void setup_hvlpevent_queue(void);
-
-#endif /* _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H */
diff --git a/arch/powerpc/include/asm/iseries/lpar_map.h b/arch/powerpc/include/asm/iseries/lpar_map.h
deleted file mode 100644
index 5e9f3e128ee..00000000000
--- a/arch/powerpc/include/asm/iseries/lpar_map.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _ASM_POWERPC_ISERIES_LPAR_MAP_H
-#define _ASM_POWERPC_ISERIES_LPAR_MAP_H
-
-#ifndef __ASSEMBLY__
-
-#include <asm/types.h>
-
-#endif
-
-/*
- * The iSeries hypervisor will set up mapping for one or more
- * ESID/VSID pairs (in SLB/segment registers) and will set up
- * mappings of one or more ranges of pages to VAs.
- * We will have the hypervisor set up the ESID->VSID mapping
- * for the four kernel segments (C-F). With shared processors,
- * the hypervisor will clear all segment registers and reload
- * these four whenever the processor is switched from one
- * partition to another.
- */
-
-/* The Vsid and Esid identified below will be used by the hypervisor
- * to set up a memory mapping for part of the load area before giving
- * control to the Linux kernel. The load area is 64 MB, but this must
- * not attempt to map the whole load area. The Hashed Page Table may
- * need to be located within the load area (if the total partition size
- * is 64 MB), but cannot be mapped. Typically, this should specify
- * to map half (32 MB) of the load area.
- *
- * The hypervisor will set up page table entries for the number of
- * pages specified.
- *
- * In 32-bit mode, the hypervisor will load all four of the
- * segment registers (identified by the low-order four bits of the
- * Esid field. In 64-bit mode, the hypervisor will load one SLB
- * entry to map the Esid to the Vsid.
-*/
-
-#define HvEsidsToMap 2
-#define HvRangesToMap 1
-
-/* Hypervisor initially maps 32MB of the load area */
-#define HvPagesToMap 8192
-
-#ifndef __ASSEMBLY__
-struct LparMap {
- u64 xNumberEsids; // Number of ESID/VSID pairs
- u64 xNumberRanges; // Number of VA ranges to map
- u64 xSegmentTableOffs; // Page number within load area of seg table
- u64 xRsvd[5];
- struct {
- u64 xKernelEsid; // Esid used to map kernel load
- u64 xKernelVsid; // Vsid used to map kernel load
- } xEsids[HvEsidsToMap];
- struct {
- u64 xPages; // Number of pages to be mapped
- u64 xOffset; // Offset from start of load area
- u64 xVPN; // Virtual Page Number
- } xRanges[HvRangesToMap];
-};
-
-extern const struct LparMap xLparMap;
-
-#endif /* __ASSEMBLY__ */
-
-/* the fixed address where the LparMap exists */
-#define LPARMAP_PHYS 0x7000
-
-#endif /* _ASM_POWERPC_ISERIES_LPAR_MAP_H */
diff --git a/arch/powerpc/include/asm/iseries/mf.h b/arch/powerpc/include/asm/iseries/mf.h
deleted file mode 100644
index eb851a9c9e5..00000000000
--- a/arch/powerpc/include/asm/iseries/mf.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (C) 2001 Troy D. Armstrong IBM Corporation
- * Copyright (C) 2004 Stephen Rothwell IBM Corporation
- *
- * This modules exists as an interface between a Linux secondary partition
- * running on an iSeries and the primary partition's Virtual Service
- * Processor (VSP) object. The VSP has final authority over powering on/off
- * all partitions in the iSeries. It also provides miscellaneous low-level
- * machine facility type operations.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _ASM_POWERPC_ISERIES_MF_H
-#define _ASM_POWERPC_ISERIES_MF_H
-
-#include <linux/types.h>
-
-#include <asm/iseries/hv_types.h>
-#include <asm/iseries/hv_call_event.h>
-
-struct rtc_time;
-
-typedef void (*MFCompleteHandler)(void *clientToken, int returnCode);
-
-extern void mf_allocate_lp_events(HvLpIndex targetLp, HvLpEvent_Type type,
- unsigned size, unsigned amount, MFCompleteHandler hdlr,
- void *userToken);
-extern void mf_deallocate_lp_events(HvLpIndex targetLp, HvLpEvent_Type type,
- unsigned count, MFCompleteHandler hdlr, void *userToken);
-
-extern void mf_power_off(void);
-extern void mf_reboot(char *cmd);
-
-extern void mf_display_src(u32 word);
-extern void mf_display_progress(u16 value);
-
-extern void mf_init(void);
-
-#endif /* _ASM_POWERPC_ISERIES_MF_H */
diff --git a/arch/powerpc/include/asm/iseries/vio.h b/arch/powerpc/include/asm/iseries/vio.h
deleted file mode 100644
index f9ac0d00b95..00000000000
--- a/arch/powerpc/include/asm/iseries/vio.h
+++ /dev/null
@@ -1,265 +0,0 @@
-/* -*- linux-c -*-
- *
- * iSeries Virtual I/O Message Path header
- *
- * Authors: Dave Boutcher <boutcher@us.ibm.com>
- * Ryan Arnold <ryanarn@us.ibm.com>
- * Colin Devilbiss <devilbis@us.ibm.com>
- *
- * (C) Copyright 2000 IBM Corporation
- *
- * This header file is used by the iSeries virtual I/O device
- * drivers. It defines the interfaces to the common functions
- * (implemented in drivers/char/viopath.h) as well as defining
- * common functions and structures. Currently (at the time I
- * wrote this comment) the iSeries virtual I/O device drivers
- * that use this are
- * drivers/block/viodasd.c
- * drivers/char/viocons.c
- * drivers/char/viotape.c
- * drivers/cdrom/viocd.c
- *
- * The iSeries virtual ethernet support (veth.c) uses a whole
- * different set of functions.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) anyu later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-#ifndef _ASM_POWERPC_ISERIES_VIO_H
-#define _ASM_POWERPC_ISERIES_VIO_H
-
-#include <asm/iseries/hv_types.h>
-#include <asm/iseries/hv_lp_event.h>
-
-/*
- * iSeries virtual I/O events use the subtype field in
- * HvLpEvent to figure out what kind of vio event is coming
- * in. We use a table to route these, and this defines
- * the maximum number of distinct subtypes
- */
-#define VIO_MAX_SUBTYPES 8
-
-#define VIOMAXBLOCKDMA 12
-
-struct open_data {
- u64 disk_size;
- u16 max_disk;
- u16 cylinders;
- u16 tracks;
- u16 sectors;
- u16 bytes_per_sector;
-};
-
-struct rw_data {
- u64 offset;
- struct {
- u32 token;
- u32 reserved;
- u64 len;
- } dma_info[VIOMAXBLOCKDMA];
-};
-
-struct vioblocklpevent {
- struct HvLpEvent event;
- u32 reserved;
- u16 version;
- u16 sub_result;
- u16 disk;
- u16 flags;
- union {
- struct open_data open_data;
- struct rw_data rw_data;
- u64 changed;
- } u;
-};
-
-#define vioblockflags_ro 0x0001
-
-enum vioblocksubtype {
- vioblockopen = 0x0001,
- vioblockclose = 0x0002,
- vioblockread = 0x0003,
- vioblockwrite = 0x0004,
- vioblockflush = 0x0005,
- vioblockcheck = 0x0007
-};
-
-struct viocdlpevent {
- struct HvLpEvent event;
- u32 reserved;
- u16 version;
- u16 sub_result;
- u16 disk;
- u16 flags;
- u32 token;
- u64 offset; /* On open, max number of disks */
- u64 len; /* On open, size of the disk */
- u32 block_size; /* Only set on open */
- u32 media_size; /* Only set on open */
-};
-
-enum viocdsubtype {
- viocdopen = 0x0001,
- viocdclose = 0x0002,
- viocdread = 0x0003,
- viocdwrite = 0x0004,
- viocdlockdoor = 0x0005,
- viocdgetinfo = 0x0006,
- viocdcheck = 0x0007
-};
-
-struct viotapelpevent {
- struct HvLpEvent event;
- u32 reserved;
- u16 version;
- u16 sub_type_result;
- u16 tape;
- u16 flags;
- u32 token;
- u64 len;
- union {
- struct {
- u32 tape_op;
- u32 count;
- } op;
- struct {
- u32 type;
- u32 resid;
- u32 dsreg;
- u32 gstat;
- u32 erreg;
- u32 file_no;
- u32 block_no;
- } get_status;
- struct {
- u32 block_no;
- } get_pos;
- } u;
-};
-
-enum viotapesubtype {
- viotapeopen = 0x0001,
- viotapeclose = 0x0002,
- viotaperead = 0x0003,
- viotapewrite = 0x0004,
- viotapegetinfo = 0x0005,
- viotapeop = 0x0006,
- viotapegetpos = 0x0007,
- viotapesetpos = 0x0008,
- viotapegetstatus = 0x0009
-};
-
-/*
- * Each subtype can register a handler to process their events.
- * The handler must have this interface.
- */
-typedef void (vio_event_handler_t) (struct HvLpEvent * event);
-
-extern int viopath_open(HvLpIndex remoteLp, int subtype, int numReq);
-extern int viopath_close(HvLpIndex remoteLp, int subtype, int numReq);
-extern int vio_setHandler(int subtype, vio_event_handler_t * beh);
-extern int vio_clearHandler(int subtype);
-extern int viopath_isactive(HvLpIndex lp);
-extern HvLpInstanceId viopath_sourceinst(HvLpIndex lp);
-extern HvLpInstanceId viopath_targetinst(HvLpIndex lp);
-extern void vio_set_hostlp(void);
-extern void *vio_get_event_buffer(int subtype);
-extern void vio_free_event_buffer(int subtype, void *buffer);
-
-extern struct vio_dev *vio_create_viodasd(u32 unit);
-
-extern HvLpIndex viopath_hostLp;
-extern HvLpIndex viopath_ourLp;
-
-#define VIOCHAR_MAX_DATA 200
-
-#define VIOMAJOR_SUBTYPE_MASK 0xff00
-#define VIOMINOR_SUBTYPE_MASK 0x00ff
-#define VIOMAJOR_SUBTYPE_SHIFT 8
-
-#define VIOVERSION 0x0101
-
-/*
- * This is the general structure for VIO errors; each module should have
- * a table of them, and each table should be terminated by an entry of
- * { 0, 0, NULL }. Then, to find a specific error message, a module
- * should pass its local table and the return code.
- */
-struct vio_error_entry {
- u16 rc;
- int errno;
- const char *msg;
-};
-extern const struct vio_error_entry *vio_lookup_rc(
- const struct vio_error_entry *local_table, u16 rc);
-
-enum viosubtypes {
- viomajorsubtype_monitor = 0x0100,
- viomajorsubtype_blockio = 0x0200,
- viomajorsubtype_chario = 0x0300,
- viomajorsubtype_config = 0x0400,
- viomajorsubtype_cdio = 0x0500,
- viomajorsubtype_tape = 0x0600,
- viomajorsubtype_scsi = 0x0700
-};
-
-enum vioconfigsubtype {
- vioconfigget = 0x0001,
-};
-
-enum viorc {
- viorc_good = 0x0000,
- viorc_noConnection = 0x0001,
- viorc_noReceiver = 0x0002,
- viorc_noBufferAvailable = 0x0003,
- viorc_invalidMessageType = 0x0004,
- viorc_invalidRange = 0x0201,
- viorc_invalidToken = 0x0202,
- viorc_DMAError = 0x0203,
- viorc_useError = 0x0204,
- viorc_releaseError = 0x0205,
- viorc_invalidDisk = 0x0206,
- viorc_openRejected = 0x0301
-};
-
-/*
- * The structure of the events that flow between us and OS/400 for chario
- * events. You can't mess with this unless the OS/400 side changes too.
- */
-struct viocharlpevent {
- struct HvLpEvent event;
- u32 reserved;
- u16 version;
- u16 subtype_result_code;
- u8 virtual_device;
- u8 len;
- u8 data[VIOCHAR_MAX_DATA];
-};
-
-#define VIOCHAR_WINDOW 10
-
-enum viocharsubtype {
- viocharopen = 0x0001,
- viocharclose = 0x0002,
- viochardata = 0x0003,
- viocharack = 0x0004,
- viocharconfig = 0x0005
-};
-
-enum viochar_rc {
- viochar_rc_ebusy = 1
-};
-
-#endif /* _ASM_POWERPC_ISERIES_VIO_H */
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
new file mode 100644
index 00000000000..f016bb699b5
--- /dev/null
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -0,0 +1,45 @@
+#ifndef _ASM_POWERPC_JUMP_LABEL_H
+#define _ASM_POWERPC_JUMP_LABEL_H
+
+/*
+ * Copyright 2010 Michael Ellerman, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/types.h>
+
+#include <asm/feature-fixups.h>
+
+#define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG)
+#define JUMP_LABEL_NOP_SIZE 4
+
+static __always_inline bool arch_static_branch(struct static_key *key)
+{
+ asm_volatile_goto("1:\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
+ ".popsection \n\t"
+ : : "i" (key) : : l_yes);
+ return false;
+l_yes:
+ return true;
+}
+
+#ifdef CONFIG_PPC64
+typedef u64 jump_label_t;
+#else
+typedef u32 jump_label_t;
+#endif
+
+struct jump_entry {
+ jump_label_t code;
+ jump_label_t target;
+ jump_label_t key;
+};
+
+#endif /* _ASM_POWERPC_JUMP_LABEL_H */
diff --git a/arch/powerpc/include/asm/kdump.h b/arch/powerpc/include/asm/kdump.h
index 6857af58b02..c9776202d7e 100644
--- a/arch/powerpc/include/asm/kdump.h
+++ b/arch/powerpc/include/asm/kdump.h
@@ -3,17 +3,7 @@
#include <asm/page.h>
-/*
- * If CONFIG_RELOCATABLE is enabled we can place the kdump kernel anywhere.
- * To keep enough space in the RMO for the first stage kernel on 64bit, we
- * place it at 64MB. If CONFIG_RELOCATABLE is not enabled we must place
- * the second stage at 32MB.
- */
-#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC64)
-#define KDUMP_KERNELBASE 0x4000000
-#else
#define KDUMP_KERNELBASE 0x2000000
-#endif
/* How many bytes to reserve at zero for kdump. The reserve limit should
* be greater or equal to the trampoline's end address.
@@ -42,11 +32,11 @@
#ifndef __ASSEMBLY__
-#if defined(CONFIG_CRASH_DUMP) && !defined(CONFIG_RELOCATABLE)
+#if defined(CONFIG_CRASH_DUMP) && !defined(CONFIG_NONSTATIC_KERNEL)
extern void reserve_kdump_trampoline(void);
extern void setup_kdump_trampoline(void);
#else
-/* !CRASH_DUMP || RELOCATABLE */
+/* !CRASH_DUMP || !NONSTATIC_KERNEL */
static inline void reserve_kdump_trampoline(void) { ; }
static inline void setup_kdump_trampoline(void) { ; }
#endif
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index f54408d995b..16d7e33d35e 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -2,7 +2,7 @@
#define _ASM_POWERPC_KEXEC_H
#ifdef __KERNEL__
-#ifdef CONFIG_FSL_BOOKE
+#if defined(CONFIG_FSL_BOOKE) || defined(CONFIG_44x)
/*
* On FSL-BookE we setup a 1:1 mapping which covers the first 2GiB of memory
@@ -49,7 +49,6 @@
#define KEXEC_STATE_REAL_MODE 2
#ifndef __ASSEMBLY__
-#include <linux/cpumask.h>
#include <asm/reg.h>
typedef void (*crash_shutdown_t)(void);
@@ -73,11 +72,6 @@ extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
master to copy new code to 0 */
extern int crashing_cpu;
extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *));
-extern cpumask_t cpus_in_sr;
-static inline int kexec_sr_activated(int cpu)
-{
- return cpu_isset(cpu,cpus_in_sr);
-}
struct kimage;
struct pt_regs;
@@ -94,7 +88,6 @@ extern void reserve_crashkernel(void);
extern void machine_kexec_mask_interrupts(void);
#else /* !CONFIG_KEXEC */
-static inline int kexec_sr_activated(int cpu) { return 0; }
static inline void crash_kexec_secondary(struct pt_regs *regs) { }
static inline int overlaps_crashkernel(unsigned long start, unsigned long size)
diff --git a/arch/powerpc/include/asm/keylargo.h b/arch/powerpc/include/asm/keylargo.h
index d8520ef121f..2156315d8a9 100644
--- a/arch/powerpc/include/asm/keylargo.h
+++ b/arch/powerpc/include/asm/keylargo.h
@@ -21,7 +21,7 @@
#define KEYLARGO_FCR4 0x48
#define KEYLARGO_FCR5 0x4c /* Pangea only */
-/* K2 aditional FCRs */
+/* K2 additional FCRs */
#define K2_FCR6 0x34
#define K2_FCR7 0x30
#define K2_FCR8 0x2c
@@ -51,7 +51,7 @@
#define KL_GPIO_SOUND_POWER (KEYLARGO_GPIO_0+0x05)
-/* Hrm... this one is only to be used on Pismo. It seeem to also
+/* Hrm... this one is only to be used on Pismo. It seems to also
* control the timebase enable on other machines. Still to be
* experimented... --BenH.
*/
diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
index bca8fdcd254..5acabbd7ac6 100644
--- a/arch/powerpc/include/asm/kmap_types.h
+++ b/arch/powerpc/include/asm/kmap_types.h
@@ -10,36 +10,7 @@
* 2 of the License, or (at your option) any later version.
*/
-enum km_type {
- KM_BOUNCE_READ,
- KM_SKB_SUNRPC_DATA,
- KM_SKB_DATA_SOFTIRQ,
- KM_USER0,
- KM_USER1,
- KM_BIO_SRC_IRQ,
- KM_BIO_DST_IRQ,
- KM_PTE0,
- KM_PTE1,
- KM_IRQ0,
- KM_IRQ1,
- KM_SOFTIRQ0,
- KM_SOFTIRQ1,
- KM_PPC_SYNC_PAGE,
- KM_PPC_SYNC_ICACHE,
- KM_KDB,
- KM_TYPE_NR
-};
-
-/*
- * This is a temporary build fix that (so they say on lkml....) should no longer
- * be required after 2.6.33, because of changes planned to the kmap code.
- * Let's try to remove this cruft then.
- */
-#ifdef CONFIG_DEBUG_HIGHMEM
-#define KM_NMI (-1)
-#define KM_NMI_PTE (-1)
-#define KM_IRQ_PTE (-1)
-#endif
+#define KM_TYPE_NR 16
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_KMAP_TYPES_H */
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index d0e7701fa1f..af15d4d8d60 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -29,28 +29,24 @@
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/percpu.h>
+#include <asm/probes.h>
+#include <asm/code-patching.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT
struct pt_regs;
struct kprobe;
-typedef unsigned int kprobe_opcode_t;
-#define BREAKPOINT_INSTRUCTION 0x7fe00008 /* trap */
+typedef ppc_opcode_t kprobe_opcode_t;
#define MAX_INSN_SIZE 1
-#define IS_TW(instr) (((instr) & 0xfc0007fe) == 0x7c000008)
-#define IS_TD(instr) (((instr) & 0xfc0007fe) == 0x7c000088)
-#define IS_TDI(instr) (((instr) & 0xfc000000) == 0x08000000)
-#define IS_TWI(instr) (((instr) & 0xfc000000) == 0x0c000000)
-
#ifdef CONFIG_PPC64
/*
* 64bit powerpc uses function descriptors.
* Handle cases where:
* - User passes a <.symbol> or <module:.symbol>
* - User passes a <symbol> or <module:symbol>
- * - User passes a non-existant symbol, kallsyms_lookup_name
+ * - User passes a non-existent symbol, kallsyms_lookup_name
* returns 0. Don't deref the NULL pointer in that case
*/
#define kprobe_lookup_name(name, addr) \
@@ -61,9 +57,9 @@ typedef unsigned int kprobe_opcode_t;
if ((colon = strchr(name, ':')) != NULL) { \
colon++; \
if (*colon != '\0' && *colon != '.') \
- addr = *(kprobe_opcode_t **)addr; \
+ addr = (kprobe_opcode_t *)ppc_function_entry(addr); \
} else if (name[0] != '.') \
- addr = *(kprobe_opcode_t **)addr; \
+ addr = (kprobe_opcode_t *)ppc_function_entry(addr); \
} else { \
char dot_name[KSYM_NAME_LEN]; \
dot_name[0] = '.'; \
@@ -72,12 +68,6 @@ typedef unsigned int kprobe_opcode_t;
addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name); \
} \
}
-
-#define is_trap(instr) (IS_TW(instr) || IS_TD(instr) || \
- IS_TWI(instr) || IS_TDI(instr))
-#else
-/* Use stock kprobe_lookup_name since ppc32 doesn't use function descriptors */
-#define is_trap(instr) (IS_TW(instr) || IS_TWI(instr))
#endif
#define flush_insn_slot(p) do { } while (0)
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
deleted file mode 100644
index 18ea6963ad7..00000000000
--- a/arch/powerpc/include/asm/kvm.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
- * Copyright IBM Corp. 2007
- *
- * Authors: Hollis Blanchard <hollisb@us.ibm.com>
- */
-
-#ifndef __LINUX_KVM_POWERPC_H
-#define __LINUX_KVM_POWERPC_H
-
-#include <linux/types.h>
-
-struct kvm_regs {
- __u64 pc;
- __u64 cr;
- __u64 ctr;
- __u64 lr;
- __u64 xer;
- __u64 msr;
- __u64 srr0;
- __u64 srr1;
- __u64 pid;
-
- __u64 sprg0;
- __u64 sprg1;
- __u64 sprg2;
- __u64 sprg3;
- __u64 sprg4;
- __u64 sprg5;
- __u64 sprg6;
- __u64 sprg7;
-
- __u64 gpr[32];
-};
-
-struct kvm_sregs {
- __u32 pvr;
- union {
- struct {
- __u64 sdr1;
- struct {
- struct {
- __u64 slbe;
- __u64 slbv;
- } slb[64];
- } ppc64;
- struct {
- __u32 sr[16];
- __u64 ibat[8];
- __u64 dbat[8];
- } ppc32;
- } s;
- __u8 pad[1020];
- } u;
-};
-
-struct kvm_fpu {
- __u64 fpr[32];
-};
-
-struct kvm_debug_exit_arch {
-};
-
-/* for KVM_SET_GUEST_DEBUG */
-struct kvm_guest_debug_arch {
-};
-
-#define KVM_REG_MASK 0x001f
-#define KVM_REG_EXT_MASK 0xffe0
-#define KVM_REG_GPR 0x0000
-#define KVM_REG_FPR 0x0020
-#define KVM_REG_QPR 0x0040
-#define KVM_REG_FQPR 0x0060
-
-#define KVM_INTERRUPT_SET -1U
-#define KVM_INTERRUPT_UNSET -2U
-#define KVM_INTERRUPT_SET_LEVEL -3U
-
-#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/arch/powerpc/include/asm/kvm_44x.h b/arch/powerpc/include/asm/kvm_44x.h
index d22d39942a9..a0e57618ff3 100644
--- a/arch/powerpc/include/asm/kvm_44x.h
+++ b/arch/powerpc/include/asm/kvm_44x.h
@@ -61,7 +61,6 @@ static inline struct kvmppc_vcpu_44x *to_44x(struct kvm_vcpu *vcpu)
return container_of(vcpu, struct kvmppc_vcpu_44x, vcpu);
}
-void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid);
void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu);
void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 5b750467439..9601741080e 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -20,6 +20,16 @@
#ifndef __POWERPC_KVM_ASM_H__
#define __POWERPC_KVM_ASM_H__
+#ifdef __ASSEMBLY__
+#ifdef CONFIG_64BIT
+#define PPC_STD(sreg, offset, areg) std sreg, (offset)(areg)
+#define PPC_LD(treg, offset, areg) ld treg, (offset)(areg)
+#else
+#define PPC_STD(sreg, offset, areg) stw sreg, (offset+4)(areg)
+#define PPC_LD(treg, offset, areg) lwz treg, (offset+4)(areg)
+#endif
+#endif
+
/* IVPR must be 64KiB-aligned. */
#define VCPU_SIZE_ORDER 4
#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12)
@@ -44,10 +54,27 @@
#define BOOKE_INTERRUPT_DEBUG 15
/* E500 */
-#define BOOKE_INTERRUPT_SPE_UNAVAIL 32
-#define BOOKE_INTERRUPT_SPE_FP_DATA 33
+#define BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL 32
+#define BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST 33
+/*
+ * TODO: Unify 32-bit and 64-bit kernel exception handlers to use same defines
+ */
+#define BOOKE_INTERRUPT_SPE_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
+#define BOOKE_INTERRUPT_SPE_FP_DATA BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
+#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
+#define BOOKE_INTERRUPT_ALTIVEC_ASSIST \
+ BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
#define BOOKE_INTERRUPT_SPE_FP_ROUND 34
#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
+#define BOOKE_INTERRUPT_DOORBELL 36
+#define BOOKE_INTERRUPT_DOORBELL_CRITICAL 37
+
+/* booke_hv */
+#define BOOKE_INTERRUPT_GUEST_DBELL 38
+#define BOOKE_INTERRUPT_GUEST_DBELL_CRIT 39
+#define BOOKE_INTERRUPT_HV_SYSCALL 40
+#define BOOKE_INTERRUPT_HV_PRIV 41
+#define BOOKE_INTERRUPT_LRAT_ERROR 42
/* book3s */
@@ -59,15 +86,24 @@
#define BOOK3S_INTERRUPT_INST_SEGMENT 0x480
#define BOOK3S_INTERRUPT_EXTERNAL 0x500
#define BOOK3S_INTERRUPT_EXTERNAL_LEVEL 0x501
+#define BOOK3S_INTERRUPT_EXTERNAL_HV 0x502
#define BOOK3S_INTERRUPT_ALIGNMENT 0x600
#define BOOK3S_INTERRUPT_PROGRAM 0x700
#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800
#define BOOK3S_INTERRUPT_DECREMENTER 0x900
+#define BOOK3S_INTERRUPT_HV_DECREMENTER 0x980
+#define BOOK3S_INTERRUPT_DOORBELL 0xa00
#define BOOK3S_INTERRUPT_SYSCALL 0xc00
#define BOOK3S_INTERRUPT_TRACE 0xd00
+#define BOOK3S_INTERRUPT_H_DATA_STORAGE 0xe00
+#define BOOK3S_INTERRUPT_H_INST_STORAGE 0xe20
+#define BOOK3S_INTERRUPT_H_EMUL_ASSIST 0xe40
+#define BOOK3S_INTERRUPT_H_DOORBELL 0xe80
#define BOOK3S_INTERRUPT_PERFMON 0xf00
#define BOOK3S_INTERRUPT_ALTIVEC 0xf20
#define BOOK3S_INTERRUPT_VSX 0xf40
+#define BOOK3S_INTERRUPT_FAC_UNAVAIL 0xf60
+#define BOOK3S_INTERRUPT_H_FAC_UNAVAIL 0xf80
#define BOOK3S_IRQPRIO_SYSTEM_RESET 0
#define BOOK3S_IRQPRIO_DATA_SEGMENT 1
@@ -79,22 +115,26 @@
#define BOOK3S_IRQPRIO_FP_UNAVAIL 7
#define BOOK3S_IRQPRIO_ALTIVEC 8
#define BOOK3S_IRQPRIO_VSX 9
-#define BOOK3S_IRQPRIO_SYSCALL 10
-#define BOOK3S_IRQPRIO_MACHINE_CHECK 11
-#define BOOK3S_IRQPRIO_DEBUG 12
-#define BOOK3S_IRQPRIO_EXTERNAL 13
-#define BOOK3S_IRQPRIO_DECREMENTER 14
-#define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR 15
-#define BOOK3S_IRQPRIO_EXTERNAL_LEVEL 16
-#define BOOK3S_IRQPRIO_MAX 17
+#define BOOK3S_IRQPRIO_FAC_UNAVAIL 10
+#define BOOK3S_IRQPRIO_SYSCALL 11
+#define BOOK3S_IRQPRIO_MACHINE_CHECK 12
+#define BOOK3S_IRQPRIO_DEBUG 13
+#define BOOK3S_IRQPRIO_EXTERNAL 14
+#define BOOK3S_IRQPRIO_DECREMENTER 15
+#define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR 16
+#define BOOK3S_IRQPRIO_EXTERNAL_LEVEL 17
+#define BOOK3S_IRQPRIO_MAX 18
#define BOOK3S_HFLAG_DCBZ32 0x1
#define BOOK3S_HFLAG_SLB 0x2
#define BOOK3S_HFLAG_PAIRED_SINGLE 0x4
#define BOOK3S_HFLAG_NATIVE_PS 0x8
+#define BOOK3S_HFLAG_MULTI_PGSIZE 0x10
+#define BOOK3S_HFLAG_NEW_TLBIE 0x20
#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
+#define RESUME_FLAG_ARCH1 (1<<2)
#define RESUME_GUEST 0
#define RESUME_GUEST_NV RESUME_FLAG_NV
@@ -104,6 +144,8 @@
#define KVM_GUEST_MODE_NONE 0
#define KVM_GUEST_MODE_GUEST 1
#define KVM_GUEST_MODE_SKIP 2
+#define KVM_GUEST_MODE_GUEST_HV 3
+#define KVM_GUEST_MODE_HOST_HV 4
#define KVM_INST_FETCH_FAILED -1
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index d62e703f121..f52f6569452 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -24,20 +24,6 @@
#include <linux/kvm_host.h>
#include <asm/kvm_book3s_asm.h>
-struct kvmppc_slb {
- u64 esid;
- u64 vsid;
- u64 orige;
- u64 origv;
- bool valid : 1;
- bool Ks : 1;
- bool Kp : 1;
- bool nx : 1;
- bool large : 1; /* PTEs are 16MB */
- bool tb : 1; /* 1TB segment */
- bool class : 1;
-};
-
struct kvmppc_bat {
u64 raw;
u32 bepi;
@@ -67,11 +53,24 @@ struct kvmppc_sid_map {
#define VSID_POOL_SIZE (SID_CONTEXTS * 16)
#endif
+struct hpte_cache {
+ struct hlist_node list_pte;
+ struct hlist_node list_pte_long;
+ struct hlist_node list_vpte;
+ struct hlist_node list_vpte_long;
+#ifdef CONFIG_PPC_BOOK3S_64
+ struct hlist_node list_vpte_64k;
+#endif
+ struct rcu_head rcu_head;
+ u64 host_vpn;
+ u64 pfn;
+ ulong slot;
+ struct kvmppc_pte pte;
+ int pagesize;
+};
+
struct kvmppc_vcpu_book3s {
- struct kvm_vcpu vcpu;
- struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
struct kvmppc_sid_map sid_map[SID_MAP_NUM];
- struct kvmppc_slb slb[64];
struct {
u64 esid;
u64 vsid;
@@ -81,27 +80,42 @@ struct kvmppc_vcpu_book3s {
struct kvmppc_bat dbat[8];
u64 hid[6];
u64 gqr[8];
- int slb_nr;
u64 sdr1;
u64 hior;
u64 msr_mask;
- u64 vsid_next;
+ u64 purr_offset;
+ u64 spurr_offset;
#ifdef CONFIG_PPC_BOOK3S_32
u32 vsid_pool[VSID_POOL_SIZE];
+ u32 vsid_next;
#else
- u64 vsid_first;
- u64 vsid_max;
+ u64 proto_vsid_first;
+ u64 proto_vsid_max;
+ u64 proto_vsid_next;
#endif
int context_id[SID_CONTEXTS];
- ulong prog_flags; /* flags to inject when giving a 700 trap */
+
+ bool hior_explicit; /* HIOR is set by ioctl, not PVR */
+
+ struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
+ struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
+ struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
+ struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
+#ifdef CONFIG_PPC_BOOK3S_64
+ struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K];
+#endif
+ int hpte_cache_count;
+ spinlock_t mmu_lock;
};
#define CONTEXT_HOST 0
#define CONTEXT_GUEST 1
#define CONTEXT_GUEST_END 2
-#define VSID_REAL 0x1fffffffffc00000ULL
-#define VSID_BAT 0x1fffffffffb00000ULL
+#define VSID_REAL 0x07ffffffffc00000ULL
+#define VSID_BAT 0x07ffffffffb00000ULL
+#define VSID_64K 0x0800000000000000ULL
+#define VSID_1T 0x1000000000000000ULL
#define VSID_REAL_DR 0x2000000000000000ULL
#define VSID_REAL_IR 0x4000000000000000ULL
#define VSID_PR 0x8000000000000000ULL
@@ -112,136 +126,188 @@ extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong p
extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
-extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
+extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
+extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
+ bool iswrite);
+extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
+extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
+extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
+ struct kvm_vcpu *vcpu, unsigned long addr,
+ unsigned long status);
+extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
+ unsigned long slb_v, unsigned long valid);
extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
+extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte);
extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
extern int kvmppc_mmu_hpte_sysinit(void);
extern void kvmppc_mmu_hpte_sysexit(void);
+extern int kvmppc_mmu_hv_init(void);
extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
+extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
+ unsigned int vec);
+extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
bool upper, u32 val);
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
-extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
-
-extern ulong kvmppc_trampoline_lowmem;
-extern ulong kvmppc_trampoline_enter;
-extern void kvmppc_rmcall(ulong srr0, ulong srr1);
-extern void kvmppc_load_up_fpu(void);
-extern void kvmppc_load_up_altivec(void);
-extern void kvmppc_load_up_vsx(void);
+extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
+ bool *writable);
+extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
+ unsigned long *rmap, long pte_index, int realmode);
+extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
+ unsigned long pte_index);
+void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
+ unsigned long pte_index);
+extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
+ unsigned long *nb_ret);
+extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
+ unsigned long gpa, bool dirty);
+extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
+ long pte_index, unsigned long pteh, unsigned long ptel);
+extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
+ long pte_index, unsigned long pteh, unsigned long ptel,
+ pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
+extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
+ unsigned long pte_index, unsigned long avpn,
+ unsigned long *hpret);
+extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
+ struct kvm_memory_slot *memslot, unsigned long *map);
+extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
+ unsigned long mask);
+
+extern void kvmppc_entry_trampoline(void);
+extern void kvmppc_hv_entry_trampoline(void);
extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
+extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
+extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
+ struct kvm_vcpu *vcpu);
+extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
+ struct kvmppc_book3s_shadow_vcpu *svcpu);
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
{
- return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu);
+ return vcpu->arch.book3s;
}
-static inline ulong dsisr(void)
-{
- ulong r;
- asm ( "mfdsisr %0 " : "=r" (r) );
- return r;
-}
+/* Also add subarch specific defines */
-extern void kvm_return_point(void);
-static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu);
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
+#include <asm/kvm_book3s_32.h>
+#endif
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#include <asm/kvm_book3s_64.h>
+#endif
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
- if ( num < 14 ) {
- to_svcpu(vcpu)->gpr[num] = val;
- to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
- } else
- vcpu->arch.gpr[num] = val;
+ vcpu->arch.gpr[num] = val;
}
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{
- if ( num < 14 )
- return to_svcpu(vcpu)->gpr[num];
- else
- return vcpu->arch.gpr[num];
+ return vcpu->arch.gpr[num];
}
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
{
- to_svcpu(vcpu)->cr = val;
- to_book3s(vcpu)->shadow_vcpu->cr = val;
+ vcpu->arch.cr = val;
}
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
{
- return to_svcpu(vcpu)->cr;
+ return vcpu->arch.cr;
}
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
{
- to_svcpu(vcpu)->xer = val;
- to_book3s(vcpu)->shadow_vcpu->xer = val;
+ vcpu->arch.xer = val;
}
static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
- return to_svcpu(vcpu)->xer;
+ return vcpu->arch.xer;
}
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
{
- to_svcpu(vcpu)->ctr = val;
+ vcpu->arch.ctr = val;
}
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
{
- return to_svcpu(vcpu)->ctr;
+ return vcpu->arch.ctr;
}
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
{
- to_svcpu(vcpu)->lr = val;
+ vcpu->arch.lr = val;
}
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
{
- return to_svcpu(vcpu)->lr;
+ return vcpu->arch.lr;
}
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
{
- to_svcpu(vcpu)->pc = val;
+ vcpu->arch.pc = val;
}
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
{
- return to_svcpu(vcpu)->pc;
+ return vcpu->arch.pc;
}
-static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
+static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
+static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
{
- ulong pc = kvmppc_get_pc(vcpu);
- struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
+ return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
+}
+static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc)
+{
/* Load the instruction manually if it failed to do so in the
* exit path */
- if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
- kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
+ if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
+ kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
+
+ return kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) :
+ vcpu->arch.last_inst;
+}
+
+static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
+{
+ return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu));
+}
- return svcpu->last_inst;
+/*
+ * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
+ * Because the sc instruction sets SRR0 to point to the following
+ * instruction, we have to fetch from pc - 4.
+ */
+static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
+{
+ return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu) - 4);
}
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
- return to_svcpu(vcpu)->fault_dar;
+ return vcpu->arch.fault_dar;
+}
+
+static inline bool is_kvmppc_resume_guest(int r)
+{
+ return (r == RESUME_GUEST || r == RESUME_GUEST_NV);
}
/* Magic register values loaded into r3 and r4 before the 'sc' assembly
@@ -250,13 +316,10 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
#define OSI_SC_MAGIC_R4 0x77810F9B
#define INS_DCBZ 0x7c0007ec
+/* TO = 31 for unconditional trap */
+#define INS_TW 0x7fe00008
-/* Also add subarch specific defines */
-
-#ifdef CONFIG_PPC_BOOK3S_32
-#include <asm/kvm_book3s_32.h>
-#else
-#include <asm/kvm_book3s_64.h>
-#endif
+/* LPIDs we support with this build -- runtime limit may be lower */
+#define KVMPPC_NR_LPIDS (LPID_RSVD + 1)
#endif /* __ASM_KVM_BOOK3S_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_32.h b/arch/powerpc/include/asm/kvm_book3s_32.h
index de604db135f..c720e0b3238 100644
--- a/arch/powerpc/include/asm/kvm_book3s_32.h
+++ b/arch/powerpc/include/asm/kvm_book3s_32.h
@@ -20,9 +20,13 @@
#ifndef __ASM_KVM_BOOK3S_32_H__
#define __ASM_KVM_BOOK3S_32_H__
-static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
+static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.shadow_vcpu;
+}
+
+static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
{
- return to_book3s(vcpu)->shadow_vcpu;
}
#define PTE_SIZE 12
@@ -38,5 +42,6 @@ static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
#define SID_SHIFT 28
#define ESID_MASK 0xf0000000
#define VSID_MASK 0x00fffffff0000000ULL
+#define VPN_SHIFT 12
#endif /* __ASM_KVM_BOOK3S_32_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 4cadd612d57..d645428a65a 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -20,9 +20,402 @@
#ifndef __ASM_KVM_BOOK3S_64_H__
#define __ASM_KVM_BOOK3S_64_H__
-static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
{
+ preempt_disable();
return &get_paca()->shadow_vcpu;
}
+static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
+{
+ preempt_enable();
+}
+#endif
+
+#define SPAPR_TCE_SHIFT 12
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
+extern unsigned long kvm_rma_pages;
+#endif
+
+#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
+
+/*
+ * We use a lock bit in HPTE dword 0 to synchronize updates and
+ * accesses to each HPTE, and another bit to indicate non-present
+ * HPTEs.
+ */
+#define HPTE_V_HVLOCK 0x40UL
+#define HPTE_V_ABSENT 0x20UL
+
+/*
+ * We use this bit in the guest_rpte field of the revmap entry
+ * to indicate a modified HPTE.
+ */
+#define HPTE_GR_MODIFIED (1ul << 62)
+
+/* These bits are reserved in the guest view of the HPTE */
+#define HPTE_GR_RESERVED HPTE_GR_MODIFIED
+
+static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits)
+{
+ unsigned long tmp, old;
+
+ asm volatile(" ldarx %0,0,%2\n"
+ " and. %1,%0,%3\n"
+ " bne 2f\n"
+ " ori %0,%0,%4\n"
+ " stdcx. %0,0,%2\n"
+ " beq+ 2f\n"
+ " mr %1,%3\n"
+ "2: isync"
+ : "=&r" (tmp), "=&r" (old)
+ : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
+ : "cc", "memory");
+ return old == 0;
+}
+
+static inline int __hpte_actual_psize(unsigned int lp, int psize)
+{
+ int i, shift;
+ unsigned int mask;
+
+ /* start from 1 ignoring MMU_PAGE_4K */
+ for (i = 1; i < MMU_PAGE_COUNT; i++) {
+
+ /* invalid penc */
+ if (mmu_psize_defs[psize].penc[i] == -1)
+ continue;
+ /*
+ * encoding bits per actual page size
+ * PTE LP actual page size
+ * rrrr rrrz >=8KB
+ * rrrr rrzz >=16KB
+ * rrrr rzzz >=32KB
+ * rrrr zzzz >=64KB
+ * .......
+ */
+ shift = mmu_psize_defs[i].shift - LP_SHIFT;
+ if (shift > LP_BITS)
+ shift = LP_BITS;
+ mask = (1 << shift) - 1;
+ if ((lp & mask) == mmu_psize_defs[psize].penc[i])
+ return i;
+ }
+ return -1;
+}
+
+static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
+ unsigned long pte_index)
+{
+ int b_psize, a_psize;
+ unsigned int penc;
+ unsigned long rb = 0, va_low, sllp;
+ unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
+
+ if (!(v & HPTE_V_LARGE)) {
+ /* both base and actual psize is 4k */
+ b_psize = MMU_PAGE_4K;
+ a_psize = MMU_PAGE_4K;
+ } else {
+ for (b_psize = 0; b_psize < MMU_PAGE_COUNT; b_psize++) {
+
+ /* valid entries have a shift value */
+ if (!mmu_psize_defs[b_psize].shift)
+ continue;
+
+ a_psize = __hpte_actual_psize(lp, b_psize);
+ if (a_psize != -1)
+ break;
+ }
+ }
+ /*
+ * Ignore the top 14 bits of va
+ * v have top two bits covering segment size, hence move
+ * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
+ * AVA field in v also have the lower 23 bits ignored.
+ * For base page size 4K we need 14 .. 65 bits (so need to
+ * collect extra 11 bits)
+ * For others we need 14..14+i
+ */
+ /* This covers 14..54 bits of va*/
+ rb = (v & ~0x7fUL) << 16; /* AVA field */
+ /*
+ * AVA in v had cleared lower 23 bits. We need to derive
+ * that from pteg index
+ */
+ va_low = pte_index >> 3;
+ if (v & HPTE_V_SECONDARY)
+ va_low = ~va_low;
+ /*
+ * get the vpn bits from va_low using reverse of hashing.
+ * In v we have va with 23 bits dropped and then left shifted
+ * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
+ * right shift it with (SID_SHIFT - (23 - 7))
+ */
+ if (!(v & HPTE_V_1TB_SEG))
+ va_low ^= v >> (SID_SHIFT - 16);
+ else
+ va_low ^= v >> (SID_SHIFT_1T - 16);
+ va_low &= 0x7ff;
+
+ switch (b_psize) {
+ case MMU_PAGE_4K:
+ sllp = ((mmu_psize_defs[a_psize].sllp & SLB_VSID_L) >> 6) |
+ ((mmu_psize_defs[a_psize].sllp & SLB_VSID_LP) >> 4);
+ rb |= sllp << 5; /* AP field */
+ rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */
+ break;
+ default:
+ {
+ int aval_shift;
+ /*
+ * remaining 7bits of AVA/LP fields
+ * Also contain the rr bits of LP
+ */
+ rb |= (va_low & 0x7f) << 16;
+ /*
+ * Now clear not needed LP bits based on actual psize
+ */
+ rb &= ~((1ul << mmu_psize_defs[a_psize].shift) - 1);
+ /*
+ * AVAL field 58..77 - base_page_shift bits of va
+ * we have space for 58..64 bits, Missing bits should
+ * be zero filled. +1 is to take care of L bit shift
+ */
+ aval_shift = 64 - (77 - mmu_psize_defs[b_psize].shift) + 1;
+ rb |= ((va_low << aval_shift) & 0xfe);
+
+ rb |= 1; /* L field */
+ penc = mmu_psize_defs[b_psize].penc[a_psize];
+ rb |= penc << 12; /* LP field */
+ break;
+ }
+ }
+ rb |= (v >> 54) & 0x300; /* B field */
+ return rb;
+}
+
+static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
+ bool is_base_size)
+{
+
+ int size, a_psize;
+ /* Look at the 8 bit LP value */
+ unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
+
+ /* only handle 4k, 64k and 16M pages for now */
+ if (!(h & HPTE_V_LARGE))
+ return 1ul << 12;
+ else {
+ for (size = 0; size < MMU_PAGE_COUNT; size++) {
+ /* valid entries have a shift value */
+ if (!mmu_psize_defs[size].shift)
+ continue;
+
+ a_psize = __hpte_actual_psize(lp, size);
+ if (a_psize != -1) {
+ if (is_base_size)
+ return 1ul << mmu_psize_defs[size].shift;
+ return 1ul << mmu_psize_defs[a_psize].shift;
+ }
+ }
+
+ }
+ return 0;
+}
+
+static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
+{
+ return __hpte_page_size(h, l, 0);
+}
+
+static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
+{
+ return __hpte_page_size(h, l, 1);
+}
+
+static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
+{
+ return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
+}
+
+static inline int hpte_is_writable(unsigned long ptel)
+{
+ unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
+
+ return pp != PP_RXRX && pp != PP_RXXX;
+}
+
+static inline unsigned long hpte_make_readonly(unsigned long ptel)
+{
+ if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
+ ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
+ else
+ ptel |= PP_RXRX;
+ return ptel;
+}
+
+static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type)
+{
+ unsigned int wimg = ptel & HPTE_R_WIMG;
+
+ /* Handle SAO */
+ if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
+ cpu_has_feature(CPU_FTR_ARCH_206))
+ wimg = HPTE_R_M;
+
+ if (!io_type)
+ return wimg == HPTE_R_M;
+
+ return (wimg & (HPTE_R_W | HPTE_R_I)) == io_type;
+}
+
+/*
+ * If it's present and writable, atomically set dirty and referenced bits and
+ * return the PTE, otherwise return 0. If we find a transparent hugepage
+ * and if it is marked splitting we return 0;
+ */
+static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing,
+ unsigned int hugepage)
+{
+ pte_t old_pte, new_pte = __pte(0);
+
+ while (1) {
+ old_pte = pte_val(*ptep);
+ /*
+ * wait until _PAGE_BUSY is clear then set it atomically
+ */
+ if (unlikely(old_pte & _PAGE_BUSY)) {
+ cpu_relax();
+ continue;
+ }
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ /* If hugepage and is trans splitting return None */
+ if (unlikely(hugepage &&
+ pmd_trans_splitting(pte_pmd(old_pte))))
+ return __pte(0);
+#endif
+ /* If pte is not present return None */
+ if (unlikely(!(old_pte & _PAGE_PRESENT)))
+ return __pte(0);
+
+ new_pte = pte_mkyoung(old_pte);
+ if (writing && pte_write(old_pte))
+ new_pte = pte_mkdirty(new_pte);
+
+ if (old_pte == __cmpxchg_u64((unsigned long *)ptep, old_pte,
+ new_pte))
+ break;
+ }
+ return new_pte;
+}
+
+
+/* Return HPTE cache control bits corresponding to Linux pte bits */
+static inline unsigned long hpte_cache_bits(unsigned long pte_val)
+{
+#if _PAGE_NO_CACHE == HPTE_R_I && _PAGE_WRITETHRU == HPTE_R_W
+ return pte_val & (HPTE_R_W | HPTE_R_I);
+#else
+ return ((pte_val & _PAGE_NO_CACHE) ? HPTE_R_I : 0) +
+ ((pte_val & _PAGE_WRITETHRU) ? HPTE_R_W : 0);
+#endif
+}
+
+static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
+{
+ if (key)
+ return PP_RWRX <= pp && pp <= PP_RXRX;
+ return 1;
+}
+
+static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
+{
+ if (key)
+ return pp == PP_RWRW;
+ return pp <= PP_RWRW;
+}
+
+static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
+{
+ unsigned long skey;
+
+ skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
+ ((hpte_r & HPTE_R_KEY_LO) >> 9);
+ return (amr >> (62 - 2 * skey)) & 3;
+}
+
+static inline void lock_rmap(unsigned long *rmap)
+{
+ do {
+ while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
+ cpu_relax();
+ } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
+}
+
+static inline void unlock_rmap(unsigned long *rmap)
+{
+ __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
+}
+
+static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
+ unsigned long pagesize)
+{
+ unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
+
+ if (pagesize <= PAGE_SIZE)
+ return 1;
+ return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
+}
+
+/*
+ * This works for 4k, 64k and 16M pages on POWER7,
+ * and 4k and 16M pages on PPC970.
+ */
+static inline unsigned long slb_pgsize_encoding(unsigned long psize)
+{
+ unsigned long senc = 0;
+
+ if (psize > 0x1000) {
+ senc = SLB_VSID_L;
+ if (psize == 0x10000)
+ senc |= SLB_VSID_LP_01;
+ }
+ return senc;
+}
+
+static inline int is_vrma_hpte(unsigned long hpte_v)
+{
+ return (hpte_v & ~0xffffffUL) ==
+ (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
+}
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+/*
+ * Note modification of an HPTE; set the HPTE modified bit
+ * if anyone is interested.
+ */
+static inline void note_hpte_modification(struct kvm *kvm,
+ struct revmap_entry *rev)
+{
+ if (atomic_read(&kvm->arch.hpte_mod_interest))
+ rev->guest_rpte |= HPTE_GR_MODIFIED;
+}
+
+/*
+ * Like kvm_memslots(), but for use in real mode when we can't do
+ * any RCU stuff (since the secondary threads are offline from the
+ * kernel's point of view), and we can't print anything.
+ * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
+ */
+static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
+{
+ return rcu_dereference_raw_notrace(kvm->memslots);
+}
+
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
+
#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 36fdb3aff30..5bdfb5dd340 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -20,6 +20,11 @@
#ifndef __ASM_KVM_BOOK3S_ASM_H__
#define __ASM_KVM_BOOK3S_ASM_H__
+/* XICS ICP register offsets */
+#define XICS_XIRR 4
+#define XICS_MFRR 0xc
+#define XICS_IPI 2 /* interrupt source # for IPIs */
+
#ifdef __ASSEMBLY__
#ifdef CONFIG_KVM_BOOK3S_HANDLER
@@ -34,6 +39,7 @@
(\intno == BOOK3S_INTERRUPT_DATA_SEGMENT) || \
(\intno == BOOK3S_INTERRUPT_INST_SEGMENT) || \
(\intno == BOOK3S_INTERRUPT_EXTERNAL) || \
+ (\intno == BOOK3S_INTERRUPT_EXTERNAL_HV) || \
(\intno == BOOK3S_INTERRUPT_ALIGNMENT) || \
(\intno == BOOK3S_INTERRUPT_PROGRAM) || \
(\intno == BOOK3S_INTERRUPT_FP_UNAVAIL) || \
@@ -59,39 +65,84 @@ kvmppc_resume_\intno:
#else /*__ASSEMBLY__ */
+/*
+ * This struct goes in the PACA on 64-bit processors. It is used
+ * to store host state that needs to be saved when we enter a guest
+ * and restored when we exit, but isn't specific to any particular
+ * guest or vcpu. It also has some scratch fields used by the guest
+ * exit code.
+ */
+struct kvmppc_host_state {
+ ulong host_r1;
+ ulong host_r2;
+ ulong host_msr;
+ ulong vmhandler;
+ ulong scratch0;
+ ulong scratch1;
+ ulong scratch2;
+ u8 in_guest;
+ u8 restore_hid5;
+ u8 napping;
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ u8 hwthread_req;
+ u8 hwthread_state;
+ u8 host_ipi;
+ u8 ptid;
+ struct kvm_vcpu *kvm_vcpu;
+ struct kvmppc_vcore *kvm_vcore;
+ unsigned long xics_phys;
+ u32 saved_xirr;
+ u64 dabr;
+ u64 host_mmcr[7]; /* MMCR 0,1,A, SIAR, SDAR, MMCR2, SIER */
+ u32 host_pmc[8];
+ u64 host_purr;
+ u64 host_spurr;
+ u64 host_dscr;
+ u64 dec_expires;
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ u64 cfar;
+ u64 ppr;
+ u64 host_fscr;
+#endif
+};
+
struct kvmppc_book3s_shadow_vcpu {
+ bool in_use;
ulong gpr[14];
u32 cr;
u32 xer;
-
- u32 fault_dsisr;
- u32 last_inst;
ulong ctr;
ulong lr;
ulong pc;
+
ulong shadow_srr1;
ulong fault_dar;
-
- ulong host_r1;
- ulong host_r2;
- ulong handler;
- ulong scratch0;
- ulong scratch1;
- ulong vmhandler;
- u8 in_guest;
+ u32 fault_dsisr;
+ u32 last_inst;
#ifdef CONFIG_PPC_BOOK3S_32
u32 sr[16]; /* Guest SRs */
+
+ struct kvmppc_host_state hstate;
#endif
+
#ifdef CONFIG_PPC_BOOK3S_64
u8 slb_max; /* highest used guest slb entry */
struct {
u64 esid;
u64 vsid;
} slb[64]; /* guest SLB */
+ u64 shadow_fscr;
#endif
};
#endif /*__ASSEMBLY__ */
+/* Values for kvm_state */
+#define KVM_HWTHREAD_IN_KERNEL 0
+#define KVM_HWTHREAD_IN_NAP 1
+#define KVM_HWTHREAD_IN_KVM 2
+
#endif /* __ASM_KVM_BOOK3S_ASM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index 9c9ba3d59b1..c7aed6105ff 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -23,6 +23,16 @@
#include <linux/types.h>
#include <linux/kvm_host.h>
+/* LPIDs we support with this build -- runtime limit may be lower */
+#define KVMPPC_NR_LPIDS 64
+
+#define KVMPPC_INST_EHPRIV 0x7c00021c
+#define EHPRIV_OC_SHIFT 11
+/* "ehpriv 1" : ehpriv with OC = 1 is used for debug emulation */
+#define EHPRIV_OC_DEBUG 1
+#define KVMPPC_INST_EHPRIV_DEBUG (KVMPPC_INST_EHPRIV | \
+ (EHPRIV_OC_DEBUG << EHPRIV_OC_SHIFT))
+
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
vcpu->arch.gpr[num] = val;
@@ -53,6 +63,12 @@ static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
return vcpu->arch.xer;
}
+static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
+{
+ /* XXX Would need to check TLB entry */
+ return false;
+}
+
static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
{
return vcpu->arch.last_inst;
@@ -92,5 +108,4 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault_dear;
}
-
#endif /* __ASM_KVM_BOOKE_H__ */
diff --git a/arch/powerpc/include/asm/kvm_booke_hv_asm.h b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
new file mode 100644
index 00000000000..e5f048bbcb7
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_KVM_BOOKE_HV_ASM_H
+#define ASM_KVM_BOOKE_HV_ASM_H
+
+#ifdef __ASSEMBLY__
+
+/*
+ * All exceptions from guest state must go through KVM
+ * (except for those which are delivered directly to the guest) --
+ * there are no exceptions for which we fall through directly to
+ * the normal host handler.
+ *
+ * 32-bit host
+ * Expected inputs (normal exceptions):
+ * SCRATCH0 = saved r10
+ * r10 = thread struct
+ * r11 = appropriate SRR1 variant (currently used as scratch)
+ * r13 = saved CR
+ * *(r10 + THREAD_NORMSAVE(0)) = saved r11
+ * *(r10 + THREAD_NORMSAVE(2)) = saved r13
+ *
+ * Expected inputs (crit/mcheck/debug exceptions):
+ * appropriate SCRATCH = saved r8
+ * r8 = exception level stack frame
+ * r9 = *(r8 + _CCR) = saved CR
+ * r11 = appropriate SRR1 variant (currently used as scratch)
+ * *(r8 + GPR9) = saved r9
+ * *(r8 + GPR10) = saved r10 (r10 not yet clobbered)
+ * *(r8 + GPR11) = saved r11
+ *
+ * 64-bit host
+ * Expected inputs (GEN/GDBELL/DBG/CRIT/MC exception types):
+ * r10 = saved CR
+ * r13 = PACA_POINTER
+ * *(r13 + PACA_EX##type + EX_R10) = saved r10
+ * *(r13 + PACA_EX##type + EX_R11) = saved r11
+ * SPRN_SPRG_##type##_SCRATCH = saved r13
+ *
+ * Expected inputs (TLB exception type):
+ * r10 = saved CR
+ * r12 = extlb pointer
+ * r13 = PACA_POINTER
+ * *(r12 + EX_TLB_R10) = saved r10
+ * *(r12 + EX_TLB_R11) = saved r11
+ * *(r12 + EX_TLB_R13) = saved r13
+ * SPRN_SPRG_GEN_SCRATCH = saved r12
+ *
+ * Only the bolted version of TLB miss exception handlers is supported now.
+ */
+.macro DO_KVM intno srr1
+#ifdef CONFIG_KVM_BOOKE_HV
+BEGIN_FTR_SECTION
+ mtocrf 0x80, r11 /* check MSR[GS] without clobbering reg */
+ bf 3, 1975f
+ b kvmppc_handler_\intno\()_\srr1
+1975:
+END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
+#endif
+.endm
+
+#endif /*__ASSEMBLY__ */
+#endif /* ASM_KVM_BOOKE_HV_ASM_H */
diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h
deleted file mode 100644
index 7fea26fffb2..00000000000
--- a/arch/powerpc/include/asm/kvm_e500.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Author: Yu Liu, <yu.liu@freescale.com>
- *
- * Description:
- * This file is derived from arch/powerpc/include/asm/kvm_44x.h,
- * by Hollis Blanchard <hollisb@us.ibm.com>.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_KVM_E500_H__
-#define __ASM_KVM_E500_H__
-
-#include <linux/kvm_host.h>
-
-#define BOOKE_INTERRUPT_SIZE 36
-
-#define E500_PID_NUM 3
-#define E500_TLB_NUM 2
-
-struct tlbe{
- u32 mas1;
- u32 mas2;
- u32 mas3;
- u32 mas7;
-};
-
-struct kvmppc_vcpu_e500 {
- /* Unmodified copy of the guest's TLB. */
- struct tlbe *guest_tlb[E500_TLB_NUM];
- /* TLB that's actually used when the guest is running. */
- struct tlbe *shadow_tlb[E500_TLB_NUM];
- /* Pages which are referenced in the shadow TLB. */
- struct page **shadow_pages[E500_TLB_NUM];
-
- unsigned int guest_tlb_size[E500_TLB_NUM];
- unsigned int shadow_tlb_size[E500_TLB_NUM];
- unsigned int guest_tlb_nv[E500_TLB_NUM];
-
- u32 host_pid[E500_PID_NUM];
- u32 pid[E500_PID_NUM];
-
- u32 mas0;
- u32 mas1;
- u32 mas2;
- u32 mas3;
- u32 mas4;
- u32 mas5;
- u32 mas6;
- u32 mas7;
- u32 l1csr0;
- u32 l1csr1;
- u32 hid0;
- u32 hid1;
- u32 tlb0cfg;
- u32 tlb1cfg;
-
- struct kvm_vcpu vcpu;
-};
-
-static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
-{
- return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu);
-}
-
-#endif /* __ASM_KVM_E500_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index bba3b9b72a3..bb66d8b8efd 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -25,30 +25,55 @@
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/kvm_types.h>
+#include <linux/threads.h>
+#include <linux/spinlock.h>
#include <linux/kvm_para.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
#include <asm/kvm_asm.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
-#define KVM_MAX_VCPUS 1
-#define KVM_MEMORY_SLOTS 32
-/* memory slots that does not exposed to userspace */
-#define KVM_PRIVATE_MEM_SLOTS 4
+#define KVM_MAX_VCPUS NR_CPUS
+#define KVM_MAX_VCORES NR_CPUS
+#define KVM_USER_MEM_SLOTS 32
+#define KVM_MEM_SLOTS_NUM KVM_USER_MEM_SLOTS
+#ifdef CONFIG_KVM_MMIO
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+#endif
+
+/* These values are internal and can be increased later */
+#define KVM_NR_IRQCHIPS 1
+#define KVM_IRQCHIP_NUM_PINS 256
+
+#if !defined(CONFIG_KVM_440)
+#include <linux/mmu_notifier.h>
-/* We don't currently support large pages. */
-#define KVM_HPAGE_GFN_SHIFT(x) 0
-#define KVM_NR_PAGE_SIZES 1
-#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
+#define KVM_ARCH_WANT_MMU_NOTIFIER
+
+struct kvm;
+extern int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
+extern int kvm_unmap_hva_range(struct kvm *kvm,
+ unsigned long start, unsigned long end);
+extern int kvm_age_hva(struct kvm *kvm, unsigned long hva);
+extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+
+#endif
#define HPTEG_CACHE_NUM (1 << 15)
#define HPTEG_HASH_BITS_PTE 13
#define HPTEG_HASH_BITS_PTE_LONG 12
#define HPTEG_HASH_BITS_VPTE 13
#define HPTEG_HASH_BITS_VPTE_LONG 5
+#define HPTEG_HASH_BITS_VPTE_64K 11
#define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE)
#define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG)
#define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE)
#define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG)
+#define HPTEG_HASH_NUM_VPTE_64K (1 << HPTEG_HASH_BITS_VPTE_64K)
/* Physical Address Mask - allowed range of real mode RAM access */
#define KVM_PAM 0x0fffffffffffffffULL
@@ -57,6 +82,13 @@ struct kvm;
struct kvm_run;
struct kvm_vcpu;
+struct lppaca;
+struct slb_shadow;
+struct dtl_entry;
+
+struct kvmppc_vcpu_book3s;
+struct kvmppc_book3s_shadow_vcpu;
+
struct kvm_vm_stat {
u32 remote_tlb_flush;
};
@@ -79,6 +111,8 @@ struct kvm_vcpu_stat {
u32 dec_exits;
u32 ext_intr_exits;
u32 halt_wakeup;
+ u32 dbell_exits;
+ u32 gdbell_exits;
#ifdef CONFIG_PPC_BOOK3S
u32 pf_storage;
u32 pf_instruc;
@@ -113,6 +147,7 @@ enum kvm_exit_types {
EMULATED_TLBSX_EXITS,
EMULATED_TLBWE_EXITS,
EMULATED_RFI_EXITS,
+ EMULATED_RFCI_EXITS,
DEC_EXITS,
EXT_INTR_EXITS,
HALT_WAKEUP,
@@ -120,6 +155,8 @@ enum kvm_exit_types {
FP_UNAVAIL,
DEBUG_EXITS,
TIMEINGUEST,
+ DBELL_EXITS,
+ GDBELL_EXITS,
__NUMBER_OF_KVM_EXIT_TYPES
};
@@ -133,7 +170,166 @@ struct kvmppc_exit_timing {
};
};
+struct kvmppc_pginfo {
+ unsigned long pfn;
+ atomic_t refcnt;
+};
+
+struct kvmppc_spapr_tce_table {
+ struct list_head list;
+ struct kvm *kvm;
+ u64 liobn;
+ u32 window_size;
+ struct page *pages[0];
+};
+
+struct kvm_rma_info {
+ atomic_t use_count;
+ unsigned long base_pfn;
+};
+
+/* XICS components, defined in book3s_xics.c */
+struct kvmppc_xics;
+struct kvmppc_icp;
+
+/*
+ * The reverse mapping array has one entry for each HPTE,
+ * which stores the guest's view of the second word of the HPTE
+ * (including the guest physical address of the mapping),
+ * plus forward and backward pointers in a doubly-linked ring
+ * of HPTEs that map the same host page. The pointers in this
+ * ring are 32-bit HPTE indexes, to save space.
+ */
+struct revmap_entry {
+ unsigned long guest_rpte;
+ unsigned int forw, back;
+};
+
+/*
+ * We use the top bit of each memslot->arch.rmap entry as a lock bit,
+ * and bit 32 as a present flag. The bottom 32 bits are the
+ * index in the guest HPT of a HPTE that points to the page.
+ */
+#define KVMPPC_RMAP_LOCK_BIT 63
+#define KVMPPC_RMAP_RC_SHIFT 32
+#define KVMPPC_RMAP_REFERENCED (HPTE_R_R << KVMPPC_RMAP_RC_SHIFT)
+#define KVMPPC_RMAP_CHANGED (HPTE_R_C << KVMPPC_RMAP_RC_SHIFT)
+#define KVMPPC_RMAP_PRESENT 0x100000000ul
+#define KVMPPC_RMAP_INDEX 0xfffffffful
+
+/* Low-order bits in memslot->arch.slot_phys[] */
+#define KVMPPC_PAGE_ORDER_MASK 0x1f
+#define KVMPPC_PAGE_NO_CACHE HPTE_R_I /* 0x20 */
+#define KVMPPC_PAGE_WRITETHRU HPTE_R_W /* 0x40 */
+#define KVMPPC_GOT_PAGE 0x80
+
+struct kvm_arch_memory_slot {
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ unsigned long *rmap;
+ unsigned long *slot_phys;
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
+};
+
struct kvm_arch {
+ unsigned int lpid;
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ unsigned long hpt_virt;
+ struct revmap_entry *revmap;
+ unsigned int host_lpid;
+ unsigned long host_lpcr;
+ unsigned long sdr1;
+ unsigned long host_sdr1;
+ int tlbie_lock;
+ unsigned long lpcr;
+ unsigned long rmor;
+ struct kvm_rma_info *rma;
+ unsigned long vrma_slb_v;
+ int rma_setup_done;
+ int using_mmu_notifiers;
+ u32 hpt_order;
+ atomic_t vcpus_running;
+ u32 online_vcores;
+ unsigned long hpt_npte;
+ unsigned long hpt_mask;
+ atomic_t hpte_mod_interest;
+ spinlock_t slot_phys_lock;
+ cpumask_t need_tlb_flush;
+ struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
+ int hpt_cma_alloc;
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ struct mutex hpt_mutex;
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ struct list_head spapr_tce_tables;
+ struct list_head rtas_tokens;
+#endif
+#ifdef CONFIG_KVM_MPIC
+ struct openpic *mpic;
+#endif
+#ifdef CONFIG_KVM_XICS
+ struct kvmppc_xics *xics;
+#endif
+ struct kvmppc_ops *kvm_ops;
+};
+
+/*
+ * Struct for a virtual core.
+ * Note: entry_exit_count combines an entry count in the bottom 8 bits
+ * and an exit count in the next 8 bits. This is so that we can
+ * atomically increment the entry count iff the exit count is 0
+ * without taking the lock.
+ */
+struct kvmppc_vcore {
+ int n_runnable;
+ int n_busy;
+ int num_threads;
+ int entry_exit_count;
+ int n_woken;
+ int nap_count;
+ int napping_threads;
+ int first_vcpuid;
+ u16 pcpu;
+ u16 last_cpu;
+ u8 vcore_state;
+ u8 in_guest;
+ struct list_head runnable_threads;
+ spinlock_t lock;
+ wait_queue_head_t wq;
+ u64 stolen_tb;
+ u64 preempt_tb;
+ struct kvm_vcpu *runner;
+ struct kvm *kvm;
+ u64 tb_offset; /* guest timebase - host timebase */
+ ulong lpcr;
+ u32 arch_compat;
+ ulong pcr;
+ ulong dpdes; /* doorbell state (POWER8) */
+};
+
+#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
+#define VCORE_EXIT_COUNT(vc) ((vc)->entry_exit_count >> 8)
+
+/* Values for vcore_state */
+#define VCORE_INACTIVE 0
+#define VCORE_SLEEPING 1
+#define VCORE_STARTING 2
+#define VCORE_RUNNING 3
+#define VCORE_EXITING 4
+
+/*
+ * Struct used to manage memory for a virtual processor area
+ * registered by a PAPR guest. There are three types of area
+ * that a guest can register.
+ */
+struct kvmppc_vpa {
+ unsigned long gpa; /* Current guest phys addr */
+ void *pinned_addr; /* Address in kernel linear mapping */
+ void *pinned_end; /* End of region */
+ unsigned long next_gpa; /* Guest phys addr for update */
+ unsigned long len; /* Number of bytes required */
+ u8 update_pending; /* 1 => update pinned_addr from next_gpa */
+ bool dirty; /* true => area has been modified by kernel */
};
struct kvmppc_pte {
@@ -143,6 +339,7 @@ struct kvmppc_pte {
bool may_read : 1;
bool may_write : 1;
bool may_execute : 1;
+ u8 page_size; /* MMU_PAGE_xxx */
};
struct kvmppc_mmu {
@@ -155,7 +352,8 @@ struct kvmppc_mmu {
/* book3s */
void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value);
u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
- int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data);
+ int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *pte, bool data, bool iswrite);
void (*reset_msr)(struct kvm_vcpu *vcpu);
void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
@@ -163,45 +361,84 @@ struct kvmppc_mmu {
bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
};
-struct hpte_cache {
- struct hlist_node list_pte;
- struct hlist_node list_pte_long;
- struct hlist_node list_vpte;
- struct hlist_node list_vpte_long;
- struct rcu_head rcu_head;
- u64 host_va;
- u64 pfn;
- ulong slot;
- struct kvmppc_pte pte;
+struct kvmppc_slb {
+ u64 esid;
+ u64 vsid;
+ u64 orige;
+ u64 origv;
+ bool valid : 1;
+ bool Ks : 1;
+ bool Kp : 1;
+ bool nx : 1;
+ bool large : 1; /* PTEs are 16MB */
+ bool tb : 1; /* 1TB segment */
+ bool class : 1;
+ u8 base_page_size; /* MMU_PAGE_xxx */
};
+# ifdef CONFIG_PPC_FSL_BOOK3E
+#define KVMPPC_BOOKE_IAC_NUM 2
+#define KVMPPC_BOOKE_DAC_NUM 2
+# else
+#define KVMPPC_BOOKE_IAC_NUM 4
+#define KVMPPC_BOOKE_DAC_NUM 2
+# endif
+#define KVMPPC_BOOKE_MAX_IAC 4
+#define KVMPPC_BOOKE_MAX_DAC 2
+
+/* KVMPPC_EPR_USER takes precedence over KVMPPC_EPR_KERNEL */
+#define KVMPPC_EPR_NONE 0 /* EPR not supported */
+#define KVMPPC_EPR_USER 1 /* exit to userspace to fill EPR */
+#define KVMPPC_EPR_KERNEL 2 /* in-kernel irqchip */
+
+#define KVMPPC_IRQ_DEFAULT 0
+#define KVMPPC_IRQ_MPIC 1
+#define KVMPPC_IRQ_XICS 2
+
+struct openpic;
+
struct kvm_vcpu_arch {
ulong host_stack;
u32 host_pid;
#ifdef CONFIG_PPC_BOOK3S
- ulong host_msr;
- ulong host_r2;
- void *host_retip;
- ulong trampoline_lowmem;
- ulong trampoline_enter;
- ulong highmem_handler;
- ulong rmcall;
- ulong host_paca_phys;
+ struct kvmppc_slb slb[64];
+ int slb_max; /* 1 + index of last valid entry in slb[] */
+ int slb_nr; /* total number of entries in SLB */
struct kvmppc_mmu mmu;
+ struct kvmppc_vcpu_book3s *book3s;
+#endif
+#ifdef CONFIG_PPC_BOOK3S_32
+ struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
#endif
ulong gpr[32];
- u64 fpr[32];
- u64 fpscr;
+ struct thread_fp_state fp;
+#ifdef CONFIG_SPE
+ ulong evr[32];
+ ulong spefscr;
+ ulong host_spefscr;
+ u64 acc;
+#endif
#ifdef CONFIG_ALTIVEC
- vector128 vr[32];
- vector128 vscr;
+ struct thread_vr_state vr;
+#endif
+
+#ifdef CONFIG_KVM_BOOKE_HV
+ u32 host_mas4;
+ u32 host_mas6;
+ u32 shadow_epcr;
+ u32 shadow_msrp;
+ u32 eplc;
+ u32 epsc;
+ u32 oldpir;
#endif
-#ifdef CONFIG_VSX
- u64 vsr[32];
+#if defined(CONFIG_BOOKE)
+#if defined(CONFIG_KVM_BOOKE_HV) || defined(CONFIG_64BIT)
+ u32 epcr;
+#endif
#endif
#ifdef CONFIG_PPC_BOOK3S
@@ -209,52 +446,110 @@ struct kvm_vcpu_arch {
u32 qpr[32];
#endif
-#ifdef CONFIG_BOOKE
ulong pc;
ulong ctr;
ulong lr;
+#ifdef CONFIG_PPC_BOOK3S
+ ulong tar;
+#endif
ulong xer;
u32 cr;
-#endif
#ifdef CONFIG_PPC_BOOK3S
- ulong shadow_msr;
ulong hflags;
ulong guest_owned_ext;
+ ulong purr;
+ ulong spurr;
+ ulong ic;
+ ulong vtb;
+ ulong dscr;
+ ulong amr;
+ ulong uamor;
+ ulong iamr;
+ u32 ctrl;
+ u32 dabrx;
+ ulong dabr;
+ ulong dawr;
+ ulong dawrx;
+ ulong ciabr;
+ ulong cfar;
+ ulong ppr;
+ ulong pspb;
+ ulong fscr;
+ ulong shadow_fscr;
+ ulong ebbhr;
+ ulong ebbrr;
+ ulong bescr;
+ ulong csigr;
+ ulong tacr;
+ ulong tcscr;
+ ulong acop;
+ ulong wort;
+ ulong shadow_srr1;
#endif
+ u32 vrsave; /* also USPRG0 */
u32 mmucr;
- ulong sprg4;
- ulong sprg5;
- ulong sprg6;
- ulong sprg7;
+ /* shadow_msr is unused for BookE HV */
+ ulong shadow_msr;
ulong csrr0;
ulong csrr1;
ulong dsrr0;
ulong dsrr1;
- ulong esr;
+ ulong mcsrr0;
+ ulong mcsrr1;
+ ulong mcsr;
u32 dec;
+#ifdef CONFIG_BOOKE
u32 decar;
+#endif
u32 tbl;
u32 tbu;
u32 tcr;
- u32 tsr;
+ ulong tsr; /* we need to perform set/clr_bits() which requires ulong */
u32 ivor[64];
ulong ivpr;
- u32 pir;
u32 pvr;
u32 shadow_pid;
+ u32 shadow_pid1;
u32 pid;
u32 swap_pid;
u32 ccr0;
u32 ccr1;
- u32 dbcr0;
- u32 dbcr1;
u32 dbsr;
+ u64 mmcr[5];
+ u32 pmc[8];
+ u32 spmc[2];
+ u64 siar;
+ u64 sdar;
+ u64 sier;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ u64 tfhar;
+ u64 texasr;
+ u64 tfiar;
+
+ u32 cr_tm;
+ u64 lr_tm;
+ u64 ctr_tm;
+ u64 amr_tm;
+ u64 ppr_tm;
+ u64 dscr_tm;
+ u64 tar_tm;
+
+ ulong gpr_tm[32];
+
+ struct thread_fp_state fp_tm;
+
+ struct thread_vr_state vr_tm;
+ u32 vrsave_tm; /* also USPRG0 */
+
+#endif
+
#ifdef CONFIG_KVM_EXIT_TIMING
+ struct mutex exit_timing_lock;
struct kvmppc_exit_timing timing_exit;
struct kvmppc_exit_timing timing_last_enter;
u32 last_exit_type;
@@ -267,14 +562,33 @@ struct kvm_vcpu_arch {
struct dentry *debugfs_exit_timing;
#endif
+#ifdef CONFIG_PPC_BOOK3S
+ ulong fault_dar;
+ u32 fault_dsisr;
+ unsigned long intr_msr;
+#endif
+
#ifdef CONFIG_BOOKE
- u32 last_inst;
ulong fault_dear;
ulong fault_esr;
ulong queued_dear;
ulong queued_esr;
+ spinlock_t wdt_lock;
+ struct timer_list wdt_timer;
+ u32 tlbcfg[4];
+ u32 tlbps[4];
+ u32 mmucfg;
+ u32 eptcfg;
+ u32 epr;
+ u32 crit_save;
+ /* guest debug registers*/
+ struct debug_reg dbg_reg;
+ /* hardware visible debug registers when in guest state */
+ struct debug_reg shadow_dbg_reg;
#endif
gpa_t paddr_accessed;
+ gva_t vaddr_accessed;
+ pgd_t *pgdir;
u8 io_gpr; /* GPR used as IO source/target */
u8 mmio_is_bigendian;
@@ -283,25 +597,90 @@ struct kvm_vcpu_arch {
u8 dcr_is_write;
u8 osi_needed;
u8 osi_enabled;
+ u8 papr_enabled;
+ u8 watchdog_enabled;
+ u8 sane;
+ u8 cpu_type;
+ u8 hcall_needed;
+ u8 epr_flags; /* KVMPPC_EPR_xxx */
+ u8 epr_needed;
u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
struct hrtimer dec_timer;
struct tasklet_struct tasklet;
u64 dec_jiffies;
+ u64 dec_expires;
unsigned long pending_exceptions;
+ u8 ceded;
+ u8 prodded;
+ u32 last_inst;
+
+ wait_queue_head_t *wqp;
+ struct kvmppc_vcore *vcore;
+ int ret;
+ int trap;
+ int state;
+ int ptid;
+ bool timer_running;
+ wait_queue_head_t cpu_run;
+
struct kvm_vcpu_arch_shared *shared;
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
+ bool shared_big_endian;
+#endif
unsigned long magic_page_pa; /* phys addr to map the magic page to */
unsigned long magic_page_ea; /* effect. addr to map the magic page to */
+ bool disable_kernel_nx;
-#ifdef CONFIG_PPC_BOOK3S
- struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
- struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
- struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
- struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
- int hpte_cache_count;
- spinlock_t mmu_lock;
+ int irq_type; /* one of KVM_IRQ_* */
+ int irq_cpu_id;
+ struct openpic *mpic; /* KVM_IRQ_MPIC */
+#ifdef CONFIG_KVM_XICS
+ struct kvmppc_icp *icp; /* XICS presentation controller */
+#endif
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ struct kvm_vcpu_arch_shared shregs;
+
+ unsigned long pgfault_addr;
+ long pgfault_index;
+ unsigned long pgfault_hpte[2];
+
+ struct list_head run_list;
+ struct task_struct *run_task;
+ struct kvm_run *kvm_run;
+
+ spinlock_t vpa_update_lock;
+ struct kvmppc_vpa vpa;
+ struct kvmppc_vpa dtl;
+ struct dtl_entry *dtl_ptr;
+ unsigned long dtl_index;
+ u64 stolen_logged;
+ struct kvmppc_vpa slb_shadow;
+
+ spinlock_t tbacct_lock;
+ u64 busy_stolen;
+ u64 busy_preempt;
#endif
};
+#define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
+
+/* Values for vcpu->arch.state */
+#define KVMPPC_VCPU_NOTREADY 0
+#define KVMPPC_VCPU_RUNNABLE 1
+#define KVMPPC_VCPU_BUSY_IN_HOST 2
+
+/* Values for vcpu->arch.io_gpr */
+#define KVM_MMIO_REG_MASK 0x001f
+#define KVM_MMIO_REG_EXT_MASK 0xffe0
+#define KVM_MMIO_REG_GPR 0x0000
+#define KVM_MMIO_REG_FPR 0x0020
+#define KVM_MMIO_REG_QPR 0x0040
+#define KVM_MMIO_REG_FQPR 0x0060
+
+#define __KVM_HAVE_ARCH_WQP
+#define __KVM_HAVE_CREATE_DEVICE
+
#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h
index 50533f9adf4..336a91acb8b 100644
--- a/arch/powerpc/include/asm/kvm_para.h
+++ b/arch/powerpc/include/asm/kvm_para.h
@@ -16,40 +16,10 @@
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
*/
-
#ifndef __POWERPC_KVM_PARA_H__
#define __POWERPC_KVM_PARA_H__
-#include <linux/types.h>
-
-struct kvm_vcpu_arch_shared {
- __u64 scratch1;
- __u64 scratch2;
- __u64 scratch3;
- __u64 critical; /* Guest may not get interrupts if == r1 */
- __u64 sprg0;
- __u64 sprg1;
- __u64 sprg2;
- __u64 sprg3;
- __u64 srr0;
- __u64 srr1;
- __u64 dar;
- __u64 msr;
- __u32 dsisr;
- __u32 int_pending; /* Tells the guest if we have an interrupt */
- __u32 sr[16];
-};
-
-#define KVM_SC_MAGIC_R0 0x4b564d21 /* "KVM!" */
-#define HC_VENDOR_KVM (42 << 16)
-#define HC_EV_SUCCESS 0
-#define HC_EV_UNIMPLEMENTED 12
-
-#define KVM_FEATURE_MAGIC_PAGE 1
-
-#define KVM_MAGIC_FEAT_SR (1 << 0)
-
-#ifdef __KERNEL__
+#include <uapi/asm/kvm_para.h>
#ifdef CONFIG_KVM_GUEST
@@ -69,10 +39,6 @@ static inline int kvm_para_available(void)
return 1;
}
-extern unsigned long kvm_hypercall(unsigned long *in,
- unsigned long *out,
- unsigned long nr);
-
#else
static inline int kvm_para_available(void)
@@ -80,82 +46,8 @@ static inline int kvm_para_available(void)
return 0;
}
-static unsigned long kvm_hypercall(unsigned long *in,
- unsigned long *out,
- unsigned long nr)
-{
- return HC_EV_UNIMPLEMENTED;
-}
-
#endif
-static inline long kvm_hypercall0_1(unsigned int nr, unsigned long *r2)
-{
- unsigned long in[8];
- unsigned long out[8];
- unsigned long r;
-
- r = kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
- *r2 = out[0];
-
- return r;
-}
-
-static inline long kvm_hypercall0(unsigned int nr)
-{
- unsigned long in[8];
- unsigned long out[8];
-
- return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
-}
-
-static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
-{
- unsigned long in[8];
- unsigned long out[8];
-
- in[0] = p1;
- return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
-}
-
-static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
- unsigned long p2)
-{
- unsigned long in[8];
- unsigned long out[8];
-
- in[0] = p1;
- in[1] = p2;
- return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
-}
-
-static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
- unsigned long p2, unsigned long p3)
-{
- unsigned long in[8];
- unsigned long out[8];
-
- in[0] = p1;
- in[1] = p2;
- in[2] = p3;
- return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
-}
-
-static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
- unsigned long p2, unsigned long p3,
- unsigned long p4)
-{
- unsigned long in[8];
- unsigned long out[8];
-
- in[0] = p1;
- in[1] = p2;
- in[2] = p3;
- in[3] = p4;
- return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
-}
-
-
static inline unsigned int kvm_arch_para_features(void)
{
unsigned long r;
@@ -163,12 +55,15 @@ static inline unsigned int kvm_arch_para_features(void)
if (!kvm_para_available())
return 0;
- if(kvm_hypercall0_1(KVM_HC_FEATURES, &r))
+ if(epapr_hypercall0_1(KVM_HCALL_TOKEN(KVM_HC_FEATURES), &r))
return 0;
return r;
}
-#endif /* __KERNEL__ */
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
#endif /* __POWERPC_KVM_PARA_H__ */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index ecb3bc74c34..9c89cdd067a 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -28,11 +28,15 @@
#include <linux/types.h>
#include <linux/kvm_types.h>
#include <linux/kvm_host.h>
+#include <linux/bug.h>
#ifdef CONFIG_PPC_BOOK3S
#include <asm/kvm_book3s.h>
#else
#include <asm/kvm_booke.h>
#endif
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#include <asm/paca.h>
+#endif
enum emulation_result {
EMULATE_DONE, /* no further processing */
@@ -40,27 +44,33 @@ enum emulation_result {
EMULATE_DO_DCR, /* kvm_run filled with DCR request */
EMULATE_FAIL, /* can't emulate this instruction */
EMULATE_AGAIN, /* something went wrong. go again */
+ EMULATE_EXIT_USER, /* emulation requires exit to user-space */
};
+extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
-extern char kvmppc_handlers_start[];
-extern unsigned long kvmppc_handler_len;
extern void kvmppc_handler_highmem(void);
extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
- int is_bigendian);
+ int is_default_endian);
extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
- int is_bigendian);
+ int is_default_endian);
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
- u64 val, unsigned int bytes, int is_bigendian);
+ u64 val, unsigned int bytes,
+ int is_default_endian);
extern int kvmppc_emulate_instruction(struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
+extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
+extern void kvmppc_decrementer_func(unsigned long data);
+extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
+extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
+extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
/* Core-specific hooks */
@@ -88,26 +98,146 @@ extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
-extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq);
-extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
- struct kvm_interrupt *irq);
-
-extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int op, int *advance);
-extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
-extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
+extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
extern int kvmppc_booke_init(void);
extern void kvmppc_booke_exit(void);
extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
+extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
+
+extern long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp);
+extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp);
+extern void kvmppc_free_hpt(struct kvm *kvm);
+extern long kvmppc_prepare_vrma(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem);
+extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
+ struct kvm_memory_slot *memslot, unsigned long porder);
+extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
+
+extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
+ struct kvm_create_spapr_tce *args);
+extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ unsigned long ioba, unsigned long tce);
+extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ unsigned long ioba);
+extern struct kvm_rma_info *kvm_alloc_rma(void);
+extern void kvm_release_rma(struct kvm_rma_info *ri);
+extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
+extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
+extern int kvmppc_core_init_vm(struct kvm *kvm);
+extern void kvmppc_core_destroy_vm(struct kvm *kvm);
+extern void kvmppc_core_free_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont);
+extern int kvmppc_core_create_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ unsigned long npages);
+extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_userspace_memory_region *mem);
+extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old);
+extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
+ struct kvm_ppc_smmu_info *info);
+extern void kvmppc_core_flush_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *memslot);
+
+extern int kvmppc_bookehv_init(void);
+extern void kvmppc_bookehv_exit(void);
+
+extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
+
+extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
+
+int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
+
+extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
+extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
+extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
+extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
+ u32 priority);
+extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
+ u32 *priority);
+extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
+extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
+
+union kvmppc_one_reg {
+ u32 wval;
+ u64 dval;
+ vector128 vval;
+ u64 vsxval[2];
+ struct {
+ u64 addr;
+ u64 length;
+ } vpaval;
+};
+
+struct kvmppc_ops {
+ struct module *owner;
+ int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+ int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+ int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val);
+ int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val);
+ void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
+ void (*vcpu_put)(struct kvm_vcpu *vcpu);
+ void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
+ int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
+ void (*vcpu_free)(struct kvm_vcpu *vcpu);
+ int (*check_requests)(struct kvm_vcpu *vcpu);
+ int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
+ void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
+ int (*prepare_memory_region)(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_userspace_memory_region *mem);
+ void (*commit_memory_region)(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old);
+ int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
+ int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
+ unsigned long end);
+ int (*age_hva)(struct kvm *kvm, unsigned long hva);
+ int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
+ void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
+ void (*mmu_destroy)(struct kvm_vcpu *vcpu);
+ void (*free_memslot)(struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont);
+ int (*create_memslot)(struct kvm_memory_slot *slot,
+ unsigned long npages);
+ int (*init_vm)(struct kvm *kvm);
+ void (*destroy_vm)(struct kvm *kvm);
+ int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
+ int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int inst, int *advance);
+ int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
+ int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
+ void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
+ long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
+ unsigned long arg);
+
+};
+
+extern struct kvmppc_ops *kvmppc_hv_ops;
+extern struct kvmppc_ops *kvmppc_pr_ops;
+
+static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
+{
+ return kvm->arch.kvm_ops == kvmppc_hv_ops;
+}
/*
* Cuts out inst bits with ordering according to spec.
@@ -142,4 +272,310 @@ static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
return r;
}
+#define one_reg_size(id) \
+ (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
+
+#define get_reg_val(id, reg) ({ \
+ union kvmppc_one_reg __u; \
+ switch (one_reg_size(id)) { \
+ case 4: __u.wval = (reg); break; \
+ case 8: __u.dval = (reg); break; \
+ default: BUG(); \
+ } \
+ __u; \
+})
+
+
+#define set_reg_val(id, val) ({ \
+ u64 __v; \
+ switch (one_reg_size(id)) { \
+ case 4: __v = (val).wval; break; \
+ case 8: __v = (val).dval; break; \
+ default: BUG(); \
+ } \
+ __v; \
+})
+
+int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+
+int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+
+int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
+int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
+int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
+int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
+
+void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
+
+struct openpic;
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+extern void kvm_cma_reserve(void) __init;
+static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
+{
+ paca[cpu].kvm_hstate.xics_phys = addr;
+}
+
+static inline u32 kvmppc_get_xics_latch(void)
+{
+ u32 xirr;
+
+ xirr = get_paca()->kvm_hstate.saved_xirr;
+ get_paca()->kvm_hstate.saved_xirr = 0;
+ return xirr;
+}
+
+static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
+{
+ paca[cpu].kvm_hstate.host_ipi = host_ipi;
+}
+
+static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+ vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
+}
+
+extern void kvm_hv_vm_activated(void);
+extern void kvm_hv_vm_deactivated(void);
+extern bool kvm_hv_mode_active(void);
+
+#else
+static inline void __init kvm_cma_reserve(void)
+{}
+
+static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
+{}
+
+static inline u32 kvmppc_get_xics_latch(void)
+{
+ return 0;
+}
+
+static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
+{}
+
+static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+ kvm_vcpu_kick(vcpu);
+}
+
+static inline bool kvm_hv_mode_active(void) { return false; }
+
+#endif
+
+#ifdef CONFIG_KVM_XICS
+static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
+}
+extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
+extern int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server);
+extern int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args);
+extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
+extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
+extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
+extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 cpu);
+#else
+static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
+ { return 0; }
+static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
+static inline int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu,
+ unsigned long server)
+ { return -EINVAL; }
+static inline int kvm_vm_ioctl_xics_irq(struct kvm *kvm,
+ struct kvm_irq_level *args)
+ { return -ENOTTY; }
+static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
+ { return 0; }
+#endif
+
+static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
+{
+#ifdef CONFIG_KVM_BOOKE_HV
+ mtspr(SPRN_GEPR, epr);
+#elif defined(CONFIG_BOOKE)
+ vcpu->arch.epr = epr;
+#endif
+}
+
+#ifdef CONFIG_KVM_MPIC
+
+void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
+int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
+ u32 cpu);
+void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
+
+#else
+
+static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 cpu)
+{
+ return -EINVAL;
+}
+
+static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
+ struct kvm_vcpu *vcpu)
+{
+}
+
+#endif /* CONFIG_KVM_MPIC */
+
+int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
+ struct kvm_config_tlb *cfg);
+int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
+ struct kvm_dirty_tlb *cfg);
+
+long kvmppc_alloc_lpid(void);
+void kvmppc_claim_lpid(long lpid);
+void kvmppc_free_lpid(long lpid);
+void kvmppc_init_lpid(unsigned long nr_lpids);
+
+static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
+{
+ struct page *page;
+ /*
+ * We can only access pages that the kernel maps
+ * as memory. Bail out for unmapped ones.
+ */
+ if (!pfn_valid(pfn))
+ return;
+
+ /* Clear i-cache for new pages */
+ page = pfn_to_page(pfn);
+ if (!test_bit(PG_arch_1, &page->flags)) {
+ flush_dcache_icache_page(page);
+ set_bit(PG_arch_1, &page->flags);
+ }
+}
+
+/*
+ * Shared struct helpers. The shared struct can be little or big endian,
+ * depending on the guest endianness. So expose helpers to all of them.
+ */
+static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
+{
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
+ /* Only Book3S_64 PR supports bi-endian for now */
+ return vcpu->arch.shared_big_endian;
+#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
+ /* Book3s_64 HV on little endian is always little endian */
+ return false;
+#else
+ return true;
+#endif
+}
+
+#define SHARED_WRAPPER_GET(reg, size) \
+static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
+{ \
+ if (kvmppc_shared_big_endian(vcpu)) \
+ return be##size##_to_cpu(vcpu->arch.shared->reg); \
+ else \
+ return le##size##_to_cpu(vcpu->arch.shared->reg); \
+} \
+
+#define SHARED_WRAPPER_SET(reg, size) \
+static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
+{ \
+ if (kvmppc_shared_big_endian(vcpu)) \
+ vcpu->arch.shared->reg = cpu_to_be##size(val); \
+ else \
+ vcpu->arch.shared->reg = cpu_to_le##size(val); \
+} \
+
+#define SHARED_WRAPPER(reg, size) \
+ SHARED_WRAPPER_GET(reg, size) \
+ SHARED_WRAPPER_SET(reg, size) \
+
+SHARED_WRAPPER(critical, 64)
+SHARED_WRAPPER(sprg0, 64)
+SHARED_WRAPPER(sprg1, 64)
+SHARED_WRAPPER(sprg2, 64)
+SHARED_WRAPPER(sprg3, 64)
+SHARED_WRAPPER(srr0, 64)
+SHARED_WRAPPER(srr1, 64)
+SHARED_WRAPPER(dar, 64)
+SHARED_WRAPPER_GET(msr, 64)
+static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
+{
+ if (kvmppc_shared_big_endian(vcpu))
+ vcpu->arch.shared->msr = cpu_to_be64(val);
+ else
+ vcpu->arch.shared->msr = cpu_to_le64(val);
+}
+SHARED_WRAPPER(dsisr, 32)
+SHARED_WRAPPER(int_pending, 32)
+SHARED_WRAPPER(sprg4, 64)
+SHARED_WRAPPER(sprg5, 64)
+SHARED_WRAPPER(sprg6, 64)
+SHARED_WRAPPER(sprg7, 64)
+
+static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
+{
+ if (kvmppc_shared_big_endian(vcpu))
+ return be32_to_cpu(vcpu->arch.shared->sr[nr]);
+ else
+ return le32_to_cpu(vcpu->arch.shared->sr[nr]);
+}
+
+static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
+{
+ if (kvmppc_shared_big_endian(vcpu))
+ vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
+ else
+ vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
+}
+
+/*
+ * Please call after prepare_to_enter. This function puts the lazy ee and irq
+ * disabled tracking state back to normal mode, without actually enabling
+ * interrupts.
+ */
+static inline void kvmppc_fix_ee_before_entry(void)
+{
+ trace_hardirqs_on();
+
+#ifdef CONFIG_PPC64
+ /*
+ * To avoid races, the caller must have gone directly from having
+ * interrupts fully-enabled to hard-disabled.
+ */
+ WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
+
+ /* Only need to enable IRQs by hard enabling them after this */
+ local_paca->irq_happened = 0;
+ local_paca->soft_enabled = 1;
+#endif
+}
+
+static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
+{
+ ulong ea;
+ ulong msr_64bit = 0;
+
+ ea = kvmppc_get_gpr(vcpu, rb);
+ if (ra)
+ ea += kvmppc_get_gpr(vcpu, ra);
+
+#if defined(CONFIG_PPC_BOOK3E_64)
+ msr_64bit = MSR_CM;
+#elif defined(CONFIG_PPC_BOOK3S_64)
+ msr_64bit = MSR_SF;
+#endif
+
+ if (!(kvmppc_get_msr(vcpu) & msr_64bit))
+ ea = (uint32_t)ea;
+
+ return ea;
+}
+
+extern void xics_wake_cpu(int cpu);
+
#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/linkage.h b/arch/powerpc/include/asm/linkage.h
index e1c4ac1cc4b..e3ad5c72724 100644
--- a/arch/powerpc/include/asm/linkage.h
+++ b/arch/powerpc/include/asm/linkage.h
@@ -1,6 +1,15 @@
#ifndef _ASM_POWERPC_LINKAGE_H
#define _ASM_POWERPC_LINKAGE_H
-/* Nothing to see here... */
+#ifdef CONFIG_PPC64
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
+#define cond_syscall(x) \
+ asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n" \
+ "\t.weak ." #x "\n\t.set ." #x ", .sys_ni_syscall\n")
+#define SYSCALL_ALIAS(alias, name) \
+ asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n" \
+ "\t.globl ." #alias "\n\t.set ." #alias ", ." #name)
+#endif
+#endif
#endif /* _ASM_POWERPC_LINKAGE_H */
diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
index c2410af6bfd..b8da9136386 100644
--- a/arch/powerpc/include/asm/local.h
+++ b/arch/powerpc/include/asm/local.h
@@ -2,7 +2,7 @@
#define _ARCH_POWERPC_LOCAL_H
#include <linux/percpu.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
typedef struct
{
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 7f5e0fefebb..d0a2a2f9956 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -20,135 +20,85 @@
#define _ASM_POWERPC_LPPACA_H
#ifdef __KERNEL__
-/* These definitions relate to hypervisors that only exist when using
+/*
+ * These definitions relate to hypervisors that only exist when using
* a server type processor
*/
#ifdef CONFIG_PPC_BOOK3S
-//=============================================================================
-//
-// This control block contains the data that is shared between the
-// hypervisor (PLIC) and the OS.
-//
-//
-//----------------------------------------------------------------------------
+/*
+ * This control block contains the data that is shared between the
+ * hypervisor and the OS.
+ */
#include <linux/cache.h>
+#include <linux/threads.h>
#include <asm/types.h>
#include <asm/mmu.h>
-/* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k
- * alignment is sufficient to prevent this */
+/*
+ * We only have to have statically allocated lppaca structs on
+ * legacy iSeries, which supports at most 64 cpus.
+ */
+#define NR_LPPACAS 1
+
+/*
+ * The Hypervisor barfs if the lppaca crosses a page boundary. A 1k
+ * alignment is sufficient to prevent this
+ */
struct lppaca {
-//=============================================================================
-// CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
-// NOTE: The xDynXyz fields are fields that will be dynamically changed by
-// PLIC when preparing to bring a processor online or when dispatching a
-// virtual processor!
-//=============================================================================
- u32 desc; // Eye catcher 0xD397D781 x00-x03
- u16 size; // Size of this struct x04-x05
- u16 reserved1; // Reserved x06-x07
- u16 reserved2:14; // Reserved x08-x09
- u8 shared_proc:1; // Shared processor indicator ...
- u8 secondary_thread:1; // Secondary thread indicator ...
- volatile u8 dyn_proc_status:8; // Dynamic Status of this proc x0A-x0A
- u8 secondary_thread_count; // Secondary thread count x0B-x0B
- volatile u16 dyn_hv_phys_proc_index;// Dynamic HV Physical Proc Index0C-x0D
- volatile u16 dyn_hv_log_proc_index;// Dynamic HV Logical Proc Indexx0E-x0F
- u32 decr_val; // Value for Decr programming x10-x13
- u32 pmc_val; // Value for PMC regs x14-x17
- volatile u32 dyn_hw_node_id; // Dynamic Hardware Node id x18-x1B
- volatile u32 dyn_hw_proc_id; // Dynamic Hardware Proc Id x1C-x1F
- volatile u32 dyn_pir; // Dynamic ProcIdReg value x20-x23
- u32 dsei_data; // DSEI data x24-x27
- u64 sprg3; // SPRG3 value x28-x2F
- u8 reserved3[80]; // Reserved x30-x7F
-
-//=============================================================================
-// CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data
-//=============================================================================
- // This Dword contains a byte for each type of interrupt that can occur.
- // The IPI is a count while the others are just a binary 1 or 0.
- union {
- u64 any_int;
- struct {
- u16 reserved; // Reserved - cleared by #mpasmbl
- u8 xirr_int; // Indicates xXirrValue is valid or Immed IO
- u8 ipi_cnt; // IPI Count
- u8 decr_int; // DECR interrupt occurred
- u8 pdc_int; // PDC interrupt occurred
- u8 quantum_int; // Interrupt quantum reached
- u8 old_plic_deferred_ext_int; // Old PLIC has a deferred XIRR pending
- } fields;
- } int_dword;
-
- // Whenever any fields in this Dword are set then PLIC will defer the
- // processing of external interrupts. Note that PLIC will store the
- // XIRR directly into the xXirrValue field so that another XIRR will
- // not be presented until this one clears. The layout of the low
- // 4-bytes of this Dword is upto SLIC - PLIC just checks whether the
- // entire Dword is zero or not. A non-zero value in the low order
- // 2-bytes will result in SLIC being granted the highest thread
- // priority upon return. A 0 will return to SLIC as medium priority.
- u64 plic_defer_ints_area; // Entire Dword
-
- // Used to pass the real SRR0/1 from PLIC to SLIC as well as to
- // pass the target SRR0/1 from SLIC to PLIC on a SetAsrAndRfid.
- u64 saved_srr0; // Saved SRR0 x10-x17
- u64 saved_srr1; // Saved SRR1 x18-x1F
-
- // Used to pass parms from the OS to PLIC for SetAsrAndRfid
- u64 saved_gpr3; // Saved GPR3 x20-x27
- u64 saved_gpr4; // Saved GPR4 x28-x2F
- union {
- u64 saved_gpr5; /* Saved GPR5 x30-x37 */
- struct {
- u8 cede_latency_hint; /* x30 */
- u8 reserved[7]; /* x31-x36 */
- } fields;
- } gpr5_dword;
-
-
- u8 dtl_enable_mask; // Dispatch Trace Log mask x38-x38
- u8 donate_dedicated_cpu; // Donate dedicated CPU cycles x39-x39
- u8 fpregs_in_use; // FP regs in use x3A-x3A
- u8 pmcregs_in_use; // PMC regs in use x3B-x3B
- volatile u32 saved_decr; // Saved Decr Value x3C-x3F
- volatile u64 emulated_time_base;// Emulated TB for this thread x40-x47
- volatile u64 cur_plic_latency; // Unaccounted PLIC latency x48-x4F
- u64 tot_plic_latency; // Accumulated PLIC latency x50-x57
- u64 wait_state_cycles; // Wait cycles for this proc x58-x5F
- u64 end_of_quantum; // TB at end of quantum x60-x67
- u64 pdc_saved_sprg1; // Saved SPRG1 for PMC int x68-x6F
- u64 pdc_saved_srr0; // Saved SRR0 for PMC int x70-x77
- volatile u32 virtual_decr; // Virtual DECR for shared procsx78-x7B
- u16 slb_count; // # of SLBs to maintain x7C-x7D
- u8 idle; // Indicate OS is idle x7E
- u8 vmxregs_in_use; // VMX registers in use x7F
-
-
-//=============================================================================
-// CACHE_LINE_3 0x0100 - 0x017F: This line is shared with other processors
-//=============================================================================
- // This is the yield_count. An "odd" value (low bit on) means that
- // the processor is yielded (either because of an OS yield or a PLIC
- // preempt). An even value implies that the processor is currently
- // executing.
- // NOTE: This value will ALWAYS be zero for dedicated processors and
- // will NEVER be zero for shared processors (ie, initialized to a 1).
- volatile u32 yield_count; // PLIC increments each dispatchx00-x03
- volatile u32 dispersion_count; // dispatch changed phys cpu x04-x07
- volatile u64 cmo_faults; // CMO page fault count x08-x0F
- volatile u64 cmo_fault_time; // CMO page fault time x10-x17
- u8 reserved7[104]; // Reserved x18-x7F
-
-//=============================================================================
-// CACHE_LINE_4-5 0x0180 - 0x027F Contains PMC interrupt data
-//=============================================================================
- u32 page_ins; // CMO Hint - # page ins by OS x00-x03
- u8 reserved8[148]; // Reserved x04-x97
- volatile u64 dtl_idx; // Dispatch Trace Log head idx x98-x9F
- u8 reserved9[96]; // Reserved xA0-xFF
+ /* cacheline 1 contains read-only data */
+
+ __be32 desc; /* Eye catcher 0xD397D781 */
+ __be16 size; /* Size of this struct */
+ u8 reserved1[3];
+ u8 __old_status; /* Old status, including shared proc */
+ u8 reserved3[14];
+ volatile __be32 dyn_hw_node_id; /* Dynamic hardware node id */
+ volatile __be32 dyn_hw_proc_id; /* Dynamic hardware proc id */
+ u8 reserved4[56];
+ volatile u8 vphn_assoc_counts[8]; /* Virtual processor home node */
+ /* associativity change counters */
+ u8 reserved5[32];
+
+ /* cacheline 2 contains local read-write data */
+
+ u8 reserved6[48];
+ u8 cede_latency_hint;
+ u8 ebb_regs_in_use;
+ u8 reserved7[6];
+ u8 dtl_enable_mask; /* Dispatch Trace Log mask */
+ u8 donate_dedicated_cpu; /* Donate dedicated CPU cycles */
+ u8 fpregs_in_use;
+ u8 pmcregs_in_use;
+ u8 reserved8[28];
+ __be64 wait_state_cycles; /* Wait cycles for this proc */
+ u8 reserved9[28];
+ __be16 slb_count; /* # of SLBs to maintain */
+ u8 idle; /* Indicate OS is idle */
+ u8 vmxregs_in_use;
+
+ /* cacheline 3 is shared with other processors */
+
+ /*
+ * This is the yield_count. An "odd" value (low bit on) means that
+ * the processor is yielded (either because of an OS yield or a
+ * hypervisor preempt). An even value implies that the processor is
+ * currently executing.
+ * NOTE: Even dedicated processor partitions can yield so this
+ * field cannot be used to determine if we are shared or dedicated.
+ */
+ volatile __be32 yield_count;
+ volatile __be32 dispersion_count; /* dispatch changed physical cpu */
+ volatile __be64 cmo_faults; /* CMO page fault count */
+ volatile __be64 cmo_fault_time; /* CMO page fault time */
+ u8 reserved10[104];
+
+ /* cacheline 4-5 */
+
+ __be32 page_ins; /* CMO Hint - # page ins by OS */
+ u8 reserved11[148];
+ volatile __be64 dtl_idx; /* Dispatch Trace Log head index */
+ u8 reserved12[96];
} __attribute__((__aligned__(0x400)));
extern struct lppaca lppaca[];
@@ -156,43 +106,55 @@ extern struct lppaca lppaca[];
#define lppaca_of(cpu) (*paca[cpu].lppaca_ptr)
/*
+ * We are using a non architected field to determine if a partition is
+ * shared or dedicated. This currently works on both KVM and PHYP, but
+ * we will have to transition to something better.
+ */
+#define LPPACA_OLD_SHARED_PROC 2
+
+static inline bool lppaca_shared_proc(struct lppaca *l)
+{
+ return !!(l->__old_status & LPPACA_OLD_SHARED_PROC);
+}
+
+/*
* SLB shadow buffer structure as defined in the PAPR. The save_area
* contains adjacent ESID and VSID pairs for each shadowed SLB. The
* ESID is stored in the lower 64bits, then the VSID.
*/
struct slb_shadow {
- u32 persistent; // Number of persistent SLBs x00-x03
- u32 buffer_length; // Total shadow buffer length x04-x07
- u64 reserved; // Alignment x08-x0f
+ __be32 persistent; /* Number of persistent SLBs */
+ __be32 buffer_length; /* Total shadow buffer length */
+ __be64 reserved;
struct {
- u64 esid;
- u64 vsid;
- } save_area[SLB_NUM_BOLTED]; // x10-x40
+ __be64 esid;
+ __be64 vsid;
+ } save_area[SLB_NUM_BOLTED];
} ____cacheline_aligned;
-extern struct slb_shadow slb_shadow[];
-
/*
* Layout of entries in the hypervisor's dispatch trace log buffer.
*/
struct dtl_entry {
u8 dispatch_reason;
u8 preempt_reason;
- u16 processor_id;
- u32 enqueue_to_dispatch_time;
- u32 ready_to_enqueue_time;
- u32 waiting_to_ready_time;
- u64 timebase;
- u64 fault_addr;
- u64 srr0;
- u64 srr1;
+ __be16 processor_id;
+ __be32 enqueue_to_dispatch_time;
+ __be32 ready_to_enqueue_time;
+ __be32 waiting_to_ready_time;
+ __be64 timebase;
+ __be64 fault_addr;
+ __be64 srr0;
+ __be64 srr1;
};
#define DISPATCH_LOG_BYTES 4096 /* bytes per cpu */
#define N_DISPATCH_LOG (DISPATCH_LOG_BYTES / sizeof(struct dtl_entry))
+extern struct kmem_cache *dtl_cache;
+
/*
- * When CONFIG_VIRT_CPU_ACCOUNTING = y, the cpu accounting code controls
+ * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls
* reading from the dispatch trace log. If other code wants to consume
* DTL entries, it can set this pointer to a function that will get
* called once for each DTL entry that gets processed.
diff --git a/arch/powerpc/include/asm/lv1call.h b/arch/powerpc/include/asm/lv1call.h
index 81713acf752..f5117674bf9 100644
--- a/arch/powerpc/include/asm/lv1call.h
+++ b/arch/powerpc/include/asm/lv1call.h
@@ -25,6 +25,7 @@
#if !defined(__ASSEMBLY__)
#include <linux/types.h>
+#include <linux/export.h>
/* lv1 call declaration macros */
@@ -230,7 +231,7 @@ LV1_CALL(allocate_memory, 4, 2, 0 )
LV1_CALL(write_htab_entry, 4, 0, 1 )
LV1_CALL(construct_virtual_address_space, 3, 2, 2 )
LV1_CALL(invalidate_htab_entries, 5, 0, 3 )
-LV1_CALL(get_virtual_address_space_id_of_ppe, 1, 1, 4 )
+LV1_CALL(get_virtual_address_space_id_of_ppe, 0, 1, 4 )
LV1_CALL(query_logical_partition_address_region_info, 1, 5, 6 )
LV1_CALL(select_virtual_address_space, 1, 0, 7 )
LV1_CALL(pause, 1, 0, 9 )
@@ -263,9 +264,9 @@ LV1_CALL(configure_execution_time_variable, 1, 0, 77 )
LV1_CALL(get_spe_irq_outlet, 2, 1, 78 )
LV1_CALL(set_spe_privilege_state_area_1_register, 3, 0, 79 )
LV1_CALL(create_repository_node, 6, 0, 90 )
-LV1_CALL(get_repository_node_value, 5, 2, 91 )
-LV1_CALL(modify_repository_node_value, 6, 0, 92 )
-LV1_CALL(remove_repository_node, 4, 0, 93 )
+LV1_CALL(read_repository_node, 5, 2, 91 )
+LV1_CALL(write_repository_node, 6, 0, 92 )
+LV1_CALL(delete_repository_node, 4, 0, 93 )
LV1_CALL(read_htab_entries, 2, 5, 95 )
LV1_CALL(set_dabr, 2, 0, 96 )
LV1_CALL(get_total_execution_time, 2, 1, 103 )
@@ -275,7 +276,7 @@ LV1_CALL(construct_io_irq_outlet, 1, 1, 120 )
LV1_CALL(destruct_io_irq_outlet, 1, 0, 121 )
LV1_CALL(map_htab, 1, 1, 122 )
LV1_CALL(unmap_htab, 1, 0, 123 )
-LV1_CALL(get_version_info, 0, 1, 127 )
+LV1_CALL(get_version_info, 0, 2, 127 )
LV1_CALL(insert_htab_entry, 6, 3, 158 )
LV1_CALL(read_virtual_uart, 3, 1, 162 )
LV1_CALL(write_virtual_uart, 3, 1, 163 )
@@ -293,9 +294,9 @@ LV1_CALL(unmap_device_dma_region, 4, 0, 177 )
LV1_CALL(net_add_multicast_address, 4, 0, 185 )
LV1_CALL(net_remove_multicast_address, 4, 0, 186 )
LV1_CALL(net_start_tx_dma, 4, 0, 187 )
-LV1_CALL(net_stop_tx_dma, 3, 0, 188 )
+LV1_CALL(net_stop_tx_dma, 2, 0, 188 )
LV1_CALL(net_start_rx_dma, 4, 0, 189 )
-LV1_CALL(net_stop_rx_dma, 3, 0, 190 )
+LV1_CALL(net_stop_rx_dma, 2, 0, 190 )
LV1_CALL(net_set_interrupt_status_indicator, 4, 0, 191 )
LV1_CALL(net_set_interrupt_mask, 4, 0, 193 )
LV1_CALL(net_control, 6, 2, 194 )
@@ -315,7 +316,7 @@ LV1_CALL(gpu_context_free, 1, 0, 218 )
LV1_CALL(gpu_context_iomap, 5, 0, 221 )
LV1_CALL(gpu_context_attribute, 6, 0, 225 )
LV1_CALL(gpu_context_intr, 1, 1, 227 )
-LV1_CALL(gpu_attribute, 5, 0, 228 )
+LV1_CALL(gpu_attribute, 3, 0, 228 )
LV1_CALL(get_rtc, 0, 2, 232 )
LV1_CALL(set_ppe_periodic_tracer_frequency, 1, 0, 240 )
LV1_CALL(start_ppe_periodic_tracer, 5, 0, 241 )
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index d045b014553..f92b0b54e92 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -12,6 +12,7 @@
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
+#include <linux/export.h>
#include <asm/setup.h>
@@ -27,52 +28,40 @@ struct iommu_table;
struct rtc_time;
struct file;
struct pci_controller;
-#ifdef CONFIG_KEXEC
struct kimage;
-#endif
-
-#ifdef CONFIG_SMP
-struct smp_ops_t {
- void (*message_pass)(int target, int msg);
- int (*probe)(void);
- void (*kick_cpu)(int nr);
- void (*setup_cpu)(int nr);
- void (*take_timebase)(void);
- void (*give_timebase)(void);
- int (*cpu_enable)(unsigned int nr);
- int (*cpu_disable)(void);
- void (*cpu_die)(unsigned int nr);
- int (*cpu_bootable)(unsigned int nr);
-};
-#endif
+struct pci_host_bridge;
struct machdep_calls {
char *name;
#ifdef CONFIG_PPC64
void (*hpte_invalidate)(unsigned long slot,
- unsigned long va,
- int psize, int ssize,
- int local);
+ unsigned long vpn,
+ int bpsize, int apsize,
+ int ssize, int local);
long (*hpte_updatepp)(unsigned long slot,
unsigned long newpp,
- unsigned long va,
- int psize, int ssize,
- int local);
+ unsigned long vpn,
+ int bpsize, int apsize,
+ int ssize, int local);
void (*hpte_updateboltedpp)(unsigned long newpp,
unsigned long ea,
int psize, int ssize);
long (*hpte_insert)(unsigned long hpte_group,
- unsigned long va,
+ unsigned long vpn,
unsigned long prpn,
unsigned long rflags,
unsigned long vflags,
- int psize, int ssize);
+ int psize, int apsize,
+ int ssize);
long (*hpte_remove)(unsigned long hpte_group);
void (*hpte_removebolted)(unsigned long ea,
int psize, int ssize);
void (*flush_hash_range)(unsigned long number, int local);
+ void (*hugepage_invalidate)(struct mm_struct *mm,
+ unsigned char *hpte_slot_array,
+ unsigned long addr, int psize);
- /* special for kexec, to be called in real mode, linar mapping is
+ /* special for kexec, to be called in real mode, linear mapping is
* destroyed as well */
void (*hpte_clear_all)(void);
@@ -89,6 +78,18 @@ struct machdep_calls {
long index);
void (*tce_flush)(struct iommu_table *tbl);
+ /* _rm versions are for real mode use only */
+ int (*tce_build_rm)(struct iommu_table *tbl,
+ long index,
+ long npages,
+ unsigned long uaddr,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs);
+ void (*tce_free_rm)(struct iommu_table *tbl,
+ long index,
+ long npages);
+ void (*tce_flush_rm)(struct iommu_table *tbl);
+
void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size,
unsigned long flags, void *caller);
void (*iounmap)(volatile void __iomem *token);
@@ -97,13 +98,17 @@ struct machdep_calls {
void (*iommu_save)(void);
void (*iommu_restore)(void);
#endif
+#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
+ unsigned long (*memory_block_size)(void);
+#endif
#endif /* CONFIG_PPC64 */
void (*pci_dma_dev_setup)(struct pci_dev *dev);
void (*pci_dma_bus_setup)(struct pci_bus *bus);
- /* Platform set_dma_mask override */
+ /* Platform set_dma_mask and dma_get_required_mask overrides */
int (*dma_set_mask)(struct device *dev, u64 dma_mask);
+ u64 (*dma_get_required_mask)(struct device *dev);
int (*probe)(void);
void (*setup_arch)(void); /* Optional, may be NULL */
@@ -111,22 +116,21 @@ struct machdep_calls {
/* Optional, may be NULL. */
void (*show_cpuinfo)(struct seq_file *m);
void (*show_percpuinfo)(struct seq_file *m, int i);
+ /* Returns the current operating frequency of "cpu" in Hz */
+ unsigned long (*get_proc_freq)(unsigned int cpu);
void (*init_IRQ)(void);
- /* Return an irq, or NO_IRQ to indicate there are none pending.
- * If for some reason there is no irq, but the interrupt
- * shouldn't be counted as spurious, return NO_IRQ_IGNORE. */
+ /* Return an irq, or NO_IRQ to indicate there are none pending. */
unsigned int (*get_irq)(void);
-#ifdef CONFIG_KEXEC
- void (*kexec_cpu_down)(int crash_shutdown, int secondary);
-#endif
/* PCI stuff */
/* Called after scanning the bus, before allocating resources */
void (*pcibios_fixup)(void);
int (*pci_probe_mode)(struct pci_bus *);
void (*pci_irq_fixup)(struct pci_dev *dev);
+ int (*pcibios_root_bridge_prepare)(struct pci_host_bridge
+ *bridge);
/* To setup PHBs when using automatic OF platform driver for PCI */
int (*pci_setup_phb)(struct pci_controller *host);
@@ -171,6 +175,9 @@ struct machdep_calls {
int (*system_reset_exception)(struct pt_regs *regs);
int (*machine_check_exception)(struct pt_regs *regs);
+ /* Called during machine check exception to retrive fixup address. */
+ bool (*mce_check_early_recovery)(struct pt_regs *regs);
+
/* Motherboard/chipset features. This is a kind of general purpose
* hook used to control some machine specific features (like reset
* lines, chip power control, etc...).
@@ -186,9 +193,6 @@ struct machdep_calls {
unsigned long size,
pgprot_t vma_prot);
- /* Idle loop for this platform, leave empty for default idle loop */
- void (*idle_loop)(void);
-
/*
* Function for waiting for work with reduced power in idle loop;
* called with interrupts disabled.
@@ -200,7 +204,12 @@ struct machdep_calls {
void (*enable_pmcs)(void);
/* Set DABR for this platform, leave empty for default implemenation */
- int (*set_dabr)(unsigned long dabr);
+ int (*set_dabr)(unsigned long dabr,
+ unsigned long dabrx);
+
+ /* Set DAWR for this platform, leave empty for default implemenation */
+ int (*set_dawr)(unsigned long dawr,
+ unsigned long dawrx);
#ifdef CONFIG_PPC32 /* XXX for now */
/* A general init function, called by ppc_init in init/main.c.
@@ -231,17 +240,22 @@ struct machdep_calls {
* allow assignment/enabling of the device. */
int (*pcibios_enable_device_hook)(struct pci_dev *);
+ /* Called after scan and before resource survey */
+ void (*pcibios_fixup_phb)(struct pci_controller *hose);
+
+ /* Called during PCI resource reassignment */
+ resource_size_t (*pcibios_window_alignment)(struct pci_bus *, unsigned long type);
+
+ /* Reset the secondary bus of bridge */
+ void (*pcibios_reset_secondary_bus)(struct pci_dev *dev);
+
/* Called to shutdown machine specific hardware not already controlled
* by other drivers.
*/
void (*machine_shutdown)(void);
#ifdef CONFIG_KEXEC
- /* Called to do the minimal shutdown needed to run a kexec'd kernel
- * to run successfully.
- * XXX Should we move this one out of kexec scope?
- */
- void (*machine_crash_shutdown)(struct pt_regs *regs);
+ void (*kexec_cpu_down)(int crash_shutdown, int secondary);
/* Called to do what every setup is needed on image and the
* reboot code buffer. Returns 0 on success.
@@ -250,9 +264,6 @@ struct machdep_calls {
*/
int (*machine_kexec_prepare)(struct kimage *image);
- /* Called to handle any machine specific cleanup on image */
- void (*machine_kexec_cleanup)(struct kimage *image);
-
/* Called to perform the _real_ kexec.
* Do NOT allocate memory or fail here. We are past the point of
* no return.
@@ -275,11 +286,19 @@ struct machdep_calls {
ssize_t (*cpu_probe)(const char *, size_t);
ssize_t (*cpu_release)(const char *, size_t);
#endif
+
+#ifdef CONFIG_ARCH_RANDOM
+ int (*get_random_long)(unsigned long *v);
+#endif
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+ int (*remove_memory)(u64, u64);
+#endif
};
extern void e500_idle(void);
extern void power4_idle(void);
-extern void power4_cpu_offline_powersave(void);
+extern void power7_idle(void);
extern void ppc6xx_idle(void);
extern void book3e_idle(void);
@@ -324,14 +343,6 @@ extern sys_ctrler_t sys_ctrler;
#endif /* CONFIG_PPC_PMAC */
-extern void setup_pci_ptrs(void);
-
-#ifdef CONFIG_SMP
-/* Poor default implementations */
-extern void __devinit smp_generic_give_timebase(void);
-extern void __devinit smp_generic_take_timebase(void);
-#endif /* CONFIG_SMP */
-
/* Functions to produce codes on the leds.
* The SRC code should be unique for the message category and should
@@ -348,28 +359,28 @@ static inline void log_error(char *buf, unsigned int err_type, int fatal)
ppc_md.log_error(buf, err_type, fatal);
}
-#define __define_machine_initcall(mach,level,fn,id) \
+#define __define_machine_initcall(mach, fn, id) \
static int __init __machine_initcall_##mach##_##fn(void) { \
if (machine_is(mach)) return fn(); \
return 0; \
} \
- __define_initcall(level,__machine_initcall_##mach##_##fn,id);
-
-#define machine_core_initcall(mach,fn) __define_machine_initcall(mach,"1",fn,1)
-#define machine_core_initcall_sync(mach,fn) __define_machine_initcall(mach,"1s",fn,1s)
-#define machine_postcore_initcall(mach,fn) __define_machine_initcall(mach,"2",fn,2)
-#define machine_postcore_initcall_sync(mach,fn) __define_machine_initcall(mach,"2s",fn,2s)
-#define machine_arch_initcall(mach,fn) __define_machine_initcall(mach,"3",fn,3)
-#define machine_arch_initcall_sync(mach,fn) __define_machine_initcall(mach,"3s",fn,3s)
-#define machine_subsys_initcall(mach,fn) __define_machine_initcall(mach,"4",fn,4)
-#define machine_subsys_initcall_sync(mach,fn) __define_machine_initcall(mach,"4s",fn,4s)
-#define machine_fs_initcall(mach,fn) __define_machine_initcall(mach,"5",fn,5)
-#define machine_fs_initcall_sync(mach,fn) __define_machine_initcall(mach,"5s",fn,5s)
-#define machine_rootfs_initcall(mach,fn) __define_machine_initcall(mach,"rootfs",fn,rootfs)
-#define machine_device_initcall(mach,fn) __define_machine_initcall(mach,"6",fn,6)
-#define machine_device_initcall_sync(mach,fn) __define_machine_initcall(mach,"6s",fn,6s)
-#define machine_late_initcall(mach,fn) __define_machine_initcall(mach,"7",fn,7)
-#define machine_late_initcall_sync(mach,fn) __define_machine_initcall(mach,"7s",fn,7s)
+ __define_initcall(__machine_initcall_##mach##_##fn, id);
+
+#define machine_core_initcall(mach, fn) __define_machine_initcall(mach, fn, 1)
+#define machine_core_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 1s)
+#define machine_postcore_initcall(mach, fn) __define_machine_initcall(mach, fn, 2)
+#define machine_postcore_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 2s)
+#define machine_arch_initcall(mach, fn) __define_machine_initcall(mach, fn, 3)
+#define machine_arch_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 3s)
+#define machine_subsys_initcall(mach, fn) __define_machine_initcall(mach, fn, 4)
+#define machine_subsys_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 4s)
+#define machine_fs_initcall(mach, fn) __define_machine_initcall(mach, fn, 5)
+#define machine_fs_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 5s)
+#define machine_rootfs_initcall(mach, fn) __define_machine_initcall(mach, fn, rootfs)
+#define machine_device_initcall(mach, fn) __define_machine_initcall(mach, fn, 6)
+#define machine_device_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 6s)
+#define machine_late_initcall(mach, fn) __define_machine_initcall(mach, fn, 7)
+#define machine_late_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 7s)
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MACHDEP_H */
diff --git a/arch/powerpc/include/asm/macio.h b/arch/powerpc/include/asm/macio.h
index 7ab82c825a0..27af7f8bbb8 100644
--- a/arch/powerpc/include/asm/macio.h
+++ b/arch/powerpc/include/asm/macio.h
@@ -76,7 +76,7 @@ static inline unsigned long macio_resource_len(struct macio_dev *dev, int resour
struct resource *res = &dev->resource[resource_no];
if (res->start == 0 || res->end == 0 || res->end < res->start)
return 0;
- return res->end - res->start + 1;
+ return resource_size(res);
}
extern int macio_enable_devres(struct macio_dev *dev);
diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h
new file mode 100644
index 00000000000..f97d8cb6bdf
--- /dev/null
+++ b/arch/powerpc/include/asm/mce.h
@@ -0,0 +1,198 @@
+/*
+ * Machine check exception header file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright 2013 IBM Corporation
+ * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+ */
+
+#ifndef __ASM_PPC64_MCE_H__
+#define __ASM_PPC64_MCE_H__
+
+#include <linux/bitops.h>
+
+/*
+ * Machine Check bits on power7 and power8
+ */
+#define P7_SRR1_MC_LOADSTORE(srr1) ((srr1) & PPC_BIT(42)) /* P8 too */
+
+/* SRR1 bits for machine check (On Power7 and Power8) */
+#define P7_SRR1_MC_IFETCH(srr1) ((srr1) & PPC_BITMASK(43, 45)) /* P8 too */
+
+#define P7_SRR1_MC_IFETCH_UE (0x1 << PPC_BITLSHIFT(45)) /* P8 too */
+#define P7_SRR1_MC_IFETCH_SLB_PARITY (0x2 << PPC_BITLSHIFT(45)) /* P8 too */
+#define P7_SRR1_MC_IFETCH_SLB_MULTIHIT (0x3 << PPC_BITLSHIFT(45)) /* P8 too */
+#define P7_SRR1_MC_IFETCH_SLB_BOTH (0x4 << PPC_BITLSHIFT(45))
+#define P7_SRR1_MC_IFETCH_TLB_MULTIHIT (0x5 << PPC_BITLSHIFT(45)) /* P8 too */
+#define P7_SRR1_MC_IFETCH_UE_TLB_RELOAD (0x6 << PPC_BITLSHIFT(45)) /* P8 too */
+#define P7_SRR1_MC_IFETCH_UE_IFU_INTERNAL (0x7 << PPC_BITLSHIFT(45))
+
+/* SRR1 bits for machine check (On Power8) */
+#define P8_SRR1_MC_IFETCH_ERAT_MULTIHIT (0x4 << PPC_BITLSHIFT(45))
+
+/* DSISR bits for machine check (On Power7 and Power8) */
+#define P7_DSISR_MC_UE (PPC_BIT(48)) /* P8 too */
+#define P7_DSISR_MC_UE_TABLEWALK (PPC_BIT(49)) /* P8 too */
+#define P7_DSISR_MC_ERAT_MULTIHIT (PPC_BIT(52)) /* P8 too */
+#define P7_DSISR_MC_TLB_MULTIHIT_MFTLB (PPC_BIT(53)) /* P8 too */
+#define P7_DSISR_MC_SLB_PARITY_MFSLB (PPC_BIT(55)) /* P8 too */
+#define P7_DSISR_MC_SLB_MULTIHIT (PPC_BIT(56)) /* P8 too */
+#define P7_DSISR_MC_SLB_MULTIHIT_PARITY (PPC_BIT(57)) /* P8 too */
+
+/*
+ * DSISR bits for machine check (Power8) in addition to above.
+ * Secondary DERAT Multihit
+ */
+#define P8_DSISR_MC_ERAT_MULTIHIT_SEC (PPC_BIT(54))
+
+/* SLB error bits */
+#define P7_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_ERAT_MULTIHIT | \
+ P7_DSISR_MC_SLB_PARITY_MFSLB | \
+ P7_DSISR_MC_SLB_MULTIHIT | \
+ P7_DSISR_MC_SLB_MULTIHIT_PARITY)
+
+#define P8_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_SLB_ERRORS | \
+ P8_DSISR_MC_ERAT_MULTIHIT_SEC)
+enum MCE_Version {
+ MCE_V1 = 1,
+};
+
+enum MCE_Severity {
+ MCE_SEV_NO_ERROR = 0,
+ MCE_SEV_WARNING = 1,
+ MCE_SEV_ERROR_SYNC = 2,
+ MCE_SEV_FATAL = 3,
+};
+
+enum MCE_Disposition {
+ MCE_DISPOSITION_RECOVERED = 0,
+ MCE_DISPOSITION_NOT_RECOVERED = 1,
+};
+
+enum MCE_Initiator {
+ MCE_INITIATOR_UNKNOWN = 0,
+ MCE_INITIATOR_CPU = 1,
+};
+
+enum MCE_ErrorType {
+ MCE_ERROR_TYPE_UNKNOWN = 0,
+ MCE_ERROR_TYPE_UE = 1,
+ MCE_ERROR_TYPE_SLB = 2,
+ MCE_ERROR_TYPE_ERAT = 3,
+ MCE_ERROR_TYPE_TLB = 4,
+};
+
+enum MCE_UeErrorType {
+ MCE_UE_ERROR_INDETERMINATE = 0,
+ MCE_UE_ERROR_IFETCH = 1,
+ MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
+ MCE_UE_ERROR_LOAD_STORE = 3,
+ MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 4,
+};
+
+enum MCE_SlbErrorType {
+ MCE_SLB_ERROR_INDETERMINATE = 0,
+ MCE_SLB_ERROR_PARITY = 1,
+ MCE_SLB_ERROR_MULTIHIT = 2,
+};
+
+enum MCE_EratErrorType {
+ MCE_ERAT_ERROR_INDETERMINATE = 0,
+ MCE_ERAT_ERROR_PARITY = 1,
+ MCE_ERAT_ERROR_MULTIHIT = 2,
+};
+
+enum MCE_TlbErrorType {
+ MCE_TLB_ERROR_INDETERMINATE = 0,
+ MCE_TLB_ERROR_PARITY = 1,
+ MCE_TLB_ERROR_MULTIHIT = 2,
+};
+
+struct machine_check_event {
+ enum MCE_Version version:8; /* 0x00 */
+ uint8_t in_use; /* 0x01 */
+ enum MCE_Severity severity:8; /* 0x02 */
+ enum MCE_Initiator initiator:8; /* 0x03 */
+ enum MCE_ErrorType error_type:8; /* 0x04 */
+ enum MCE_Disposition disposition:8; /* 0x05 */
+ uint8_t reserved_1[2]; /* 0x06 */
+ uint64_t gpr3; /* 0x08 */
+ uint64_t srr0; /* 0x10 */
+ uint64_t srr1; /* 0x18 */
+ union { /* 0x20 */
+ struct {
+ enum MCE_UeErrorType ue_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t physical_address_provided;
+ uint8_t reserved_1[5];
+ uint64_t effective_address;
+ uint64_t physical_address;
+ uint8_t reserved_2[8];
+ } ue_error;
+
+ struct {
+ enum MCE_SlbErrorType slb_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t reserved_1[6];
+ uint64_t effective_address;
+ uint8_t reserved_2[16];
+ } slb_error;
+
+ struct {
+ enum MCE_EratErrorType erat_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t reserved_1[6];
+ uint64_t effective_address;
+ uint8_t reserved_2[16];
+ } erat_error;
+
+ struct {
+ enum MCE_TlbErrorType tlb_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t reserved_1[6];
+ uint64_t effective_address;
+ uint8_t reserved_2[16];
+ } tlb_error;
+ } u;
+};
+
+struct mce_error_info {
+ enum MCE_ErrorType error_type:8;
+ union {
+ enum MCE_UeErrorType ue_error_type:8;
+ enum MCE_SlbErrorType slb_error_type:8;
+ enum MCE_EratErrorType erat_error_type:8;
+ enum MCE_TlbErrorType tlb_error_type:8;
+ } u;
+ uint8_t reserved[2];
+};
+
+#define MAX_MC_EVT 100
+
+/* Release flags for get_mce_event() */
+#define MCE_EVENT_RELEASE true
+#define MCE_EVENT_DONTRELEASE false
+
+extern void save_mce_event(struct pt_regs *regs, long handled,
+ struct mce_error_info *mce_err, uint64_t nip,
+ uint64_t addr);
+extern int get_mce_event(struct machine_check_event *mce, bool release);
+extern void release_mce_event(void);
+extern void machine_check_queue_event(void);
+extern void machine_check_print_event_info(struct machine_check_event *evt);
+extern uint64_t get_mce_fault_addr(struct machine_check_event *evt);
+
+#endif /* __ASM_PPC64_MCE_H__ */
diff --git a/arch/powerpc/include/asm/memblock.h b/arch/powerpc/include/asm/memblock.h
deleted file mode 100644
index 43efc345065..00000000000
--- a/arch/powerpc/include/asm/memblock.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _ASM_POWERPC_MEMBLOCK_H
-#define _ASM_POWERPC_MEMBLOCK_H
-
-#include <asm/udbg.h>
-
-#define MEMBLOCK_DBG(fmt...) udbg_printf(fmt)
-
-#endif /* _ASM_POWERPC_MEMBLOCK_H */
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
index d4a7f645c5d..8565c254151 100644
--- a/arch/powerpc/include/asm/mman.h
+++ b/arch/powerpc/include/asm/mman.h
@@ -1,34 +1,14 @@
-#ifndef _ASM_POWERPC_MMAN_H
-#define _ASM_POWERPC_MMAN_H
-
-#include <asm-generic/mman-common.h>
-
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#ifndef _ASM_POWERPC_MMAN_H
+#define _ASM_POWERPC_MMAN_H
-#define PROT_SAO 0x10 /* Strong Access Ordering */
-
-#define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */
-#define MAP_NORESERVE 0x40 /* don't reserve swap pages */
-#define MAP_LOCKED 0x80
-
-#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
-#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
-#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
-
-#define MCL_CURRENT 0x2000 /* lock all currently mapped pages */
-#define MCL_FUTURE 0x4000 /* lock all additions to address space */
-
-#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
-#define MAP_NONBLOCK 0x10000 /* do not block on IO */
-#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
-#define MAP_HUGETLB 0x40000 /* create a huge page mapping */
+#include <uapi/asm/mman.h>
-#ifdef __KERNEL__
#ifdef CONFIG_PPC64
#include <asm/cputable.h>
@@ -61,5 +41,4 @@ static inline int arch_validate_prot(unsigned long prot)
#define arch_validate_prot(prot) arch_validate_prot(prot)
#endif /* CONFIG_PPC64 */
-#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MMAN_H */
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 8eaed81ea64..d0918e09557 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -40,8 +40,10 @@
/* MAS registers bit definitions */
-#define MAS0_TLBSEL(x) ((x << 28) & 0x30000000)
-#define MAS0_ESEL(x) ((x << 16) & 0x0FFF0000)
+#define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000)
+#define MAS0_ESEL_MASK 0x0FFF0000
+#define MAS0_ESEL_SHIFT 16
+#define MAS0_ESEL(x) (((x) << MAS0_ESEL_SHIFT) & MAS0_ESEL_MASK)
#define MAS0_NV(x) ((x) & 0x00000FFF)
#define MAS0_HES 0x00004000
#define MAS0_WQ_ALLWAYS 0x00000000
@@ -50,14 +52,14 @@
#define MAS1_VALID 0x80000000
#define MAS1_IPROT 0x40000000
-#define MAS1_TID(x) ((x << 16) & 0x3FFF0000)
+#define MAS1_TID(x) (((x) << 16) & 0x3FFF0000)
#define MAS1_IND 0x00002000
#define MAS1_TS 0x00001000
#define MAS1_TSIZE_MASK 0x00000f80
#define MAS1_TSIZE_SHIFT 7
-#define MAS1_TSIZE(x) ((x << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
+#define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
-#define MAS2_EPN 0xFFFFF000
+#define MAS2_EPN (~0xFFFUL)
#define MAS2_X0 0x00000040
#define MAS2_X1 0x00000020
#define MAS2_W 0x00000010
@@ -65,6 +67,7 @@
#define MAS2_M 0x00000004
#define MAS2_G 0x00000002
#define MAS2_E 0x00000001
+#define MAS2_WIMGE_MASK 0x0000001f
#define MAS2_EPN_MASK(size) (~0 << (size + 10))
#define MAS2_VAL(addr, size, flags) ((addr) & MAS2_EPN_MASK(size) | (flags))
@@ -79,6 +82,7 @@
#define MAS3_SW 0x00000004
#define MAS3_UR 0x00000002
#define MAS3_SR 0x00000001
+#define MAS3_BAP_MASK 0x0000003f
#define MAS3_SPSIZE 0x0000003e
#define MAS3_SPSIZE_SHIFT 1
@@ -100,6 +104,8 @@
#define MAS4_TSIZED_MASK 0x00000f80 /* Default TSIZE */
#define MAS4_TSIZED_SHIFT 7
+#define MAS5_SGS 0x80000000
+
#define MAS6_SPID0 0x3FFF0000
#define MAS6_SPID1 0x00007FFE
#define MAS6_ISIZE(x) MAS1_TSIZE(x)
@@ -114,6 +120,10 @@
#define MAS7_RPN 0xFFFFFFFF
+#define MAS8_TGS 0x80000000 /* Guest space */
+#define MAS8_VF 0x40000000 /* Virtualization Fault */
+#define MAS8_TLPID 0x000000ff
+
/* Bit definitions for MMUCFG */
#define MMUCFG_MAVN 0x00000003 /* MMU Architecture Version Number */
#define MMUCFG_MAVN_V1 0x00000000 /* v1.0 */
@@ -137,6 +147,21 @@
#define MMUCSR0_TLB2PS 0x00078000 /* TLB2 Page Size */
#define MMUCSR0_TLB3PS 0x00780000 /* TLB3 Page Size */
+/* MMUCFG bits */
+#define MMUCFG_MAVN_NASK 0x00000003
+#define MMUCFG_MAVN_V1_0 0x00000000
+#define MMUCFG_MAVN_V2_0 0x00000001
+#define MMUCFG_NTLB_MASK 0x0000000c
+#define MMUCFG_NTLB_SHIFT 2
+#define MMUCFG_PIDSIZE_MASK 0x000007c0
+#define MMUCFG_PIDSIZE_SHIFT 6
+#define MMUCFG_TWC 0x00008000
+#define MMUCFG_LRAT 0x00010000
+#define MMUCFG_RASIZE_MASK 0x00fe0000
+#define MMUCFG_RASIZE_SHIFT 17
+#define MMUCFG_LPIDSIZE_MASK 0x0f000000
+#define MMUCFG_LPIDSIZE_SHIFT 24
+
/* TLBnCFG encoding */
#define TLBnCFG_N_ENTRY 0x00000fff /* number of entries */
#define TLBnCFG_HES 0x00002000 /* HW select supported */
@@ -149,6 +174,7 @@
#define TLBnCFG_MAXSIZE 0x000f0000 /* Maximum Page Size (v1.0) */
#define TLBnCFG_MAXSIZE_SHIFT 16
#define TLBnCFG_ASSOC 0xff000000 /* Associativity */
+#define TLBnCFG_ASSOC_SHIFT 24
/* TLBnPS encoding */
#define TLBnPS_4K 0x00000004
@@ -189,6 +215,7 @@
#define TLBILX_T_CLASS3 7
#ifndef __ASSEMBLY__
+#include <asm/bug.h>
extern unsigned int tlbcam_index;
@@ -196,6 +223,15 @@ typedef struct {
unsigned int id;
unsigned int active;
unsigned long vdso_base;
+#ifdef CONFIG_PPC_MM_SLICES
+ u64 low_slices_psize; /* SLB page size encodings */
+ u64 high_slices_psize; /* 4 bits per slice for now */
+ u16 user_psize; /* page size index */
+#endif
+#ifdef CONFIG_PPC_64K_PAGES
+ /* for 4K PTE fragment support */
+ void *pte_frag;
+#endif
} mm_context_t;
/* Page size definitions, common between 32 and 64-bit
@@ -215,6 +251,23 @@ struct mmu_psize_def
};
extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+static inline int shift_to_mmu_psize(unsigned int shift)
+{
+ int psize;
+
+ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
+ if (mmu_psize_defs[psize].shift == shift)
+ return psize;
+ return -1;
+}
+
+static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
+{
+ if (mmu_psize_defs[mmu_psize].shift)
+ return mmu_psize_defs[mmu_psize].shift;
+ BUG();
+}
+
/* The page sizes use the same names as 64-bit hash but are
* constants
*/
@@ -229,6 +282,33 @@ extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
extern int mmu_linear_psize;
extern int mmu_vmemmap_psize;
+struct tlb_core_data {
+ /*
+ * Per-core spinlock for e6500 TLB handlers (no tlbsrx.)
+ * Must be the first struct element.
+ */
+ u8 lock;
+
+ /* For software way selection, as on Freescale TLB1 */
+ u8 esel_next, esel_max, esel_first;
+};
+
+#ifdef CONFIG_PPC64
+extern unsigned long linear_map_top;
+extern int book3e_htw_mode;
+
+#define PPC_HTW_NONE 0
+#define PPC_HTW_IBM 1
+#define PPC_HTW_E6500 2
+
+/*
+ * 64-bit booke platforms don't load the tlb in the tlb miss handler code.
+ * HUGETLB_NEED_PRELOAD handles this - it causes huge_ptep_set_access_flags to
+ * return 1, indicating that the tlb requires preloading.
+ */
+#define HUGETLB_NEED_PRELOAD
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index acac35d5b38..c2b4dcf23d0 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -16,6 +16,15 @@
#include <asm/page.h>
/*
+ * This is necessary to get the definition of PGTABLE_RANGE which we
+ * need for various slices related matters. Note that this isn't the
+ * complete pgtable.h but only a portion of it.
+ */
+#include <asm/pgtable-ppc64.h>
+#include <asm/bug.h>
+#include <asm/processor.h>
+
+/*
* Segment table
*/
@@ -27,7 +36,7 @@
#define STE_VSID_SHIFT 12
/* Location of cpu0's segment table */
-#define STAB0_PAGE 0x6
+#define STAB0_PAGE 0x8
#define STAB0_OFFSET (STAB0_PAGE << 12)
#define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START)
@@ -90,29 +99,45 @@ extern char initial_stab[];
#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
#define HPTE_R_TS ASM_CONST(0x4000000000000000)
+#define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
#define HPTE_R_RPN_SHIFT 12
-#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000)
-#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff)
+#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
#define HPTE_R_PP ASM_CONST(0x0000000000000003)
#define HPTE_R_N ASM_CONST(0x0000000000000004)
+#define HPTE_R_G ASM_CONST(0x0000000000000008)
+#define HPTE_R_M ASM_CONST(0x0000000000000010)
+#define HPTE_R_I ASM_CONST(0x0000000000000020)
+#define HPTE_R_W ASM_CONST(0x0000000000000040)
+#define HPTE_R_WIMG ASM_CONST(0x0000000000000078)
#define HPTE_R_C ASM_CONST(0x0000000000000080)
#define HPTE_R_R ASM_CONST(0x0000000000000100)
+#define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
#define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
/* Values for PP (assumes Ks=0, Kp=1) */
-/* pp0 will always be 0 for linux */
#define PP_RWXX 0 /* Supervisor read/write, User none */
#define PP_RWRX 1 /* Supervisor read/write, User read */
#define PP_RWRW 2 /* Supervisor read/write, User read/write */
#define PP_RXRX 3 /* Supervisor read, User read */
+#define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */
+
+/* Fields for tlbiel instruction in architecture 2.06 */
+#define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */
+#define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */
+#define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */
+#define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */
+#define TLBIEL_INVAL_SET_MASK 0xfff000 /* set number to inval. */
+#define TLBIEL_INVAL_SET_SHIFT 12
+
+#define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */
#ifndef __ASSEMBLY__
struct hash_pte {
- unsigned long v;
- unsigned long r;
+ __be64 v;
+ __be64 r;
};
extern struct hash_pte *htab_address;
@@ -131,11 +156,29 @@ extern unsigned long htab_hash_mask;
struct mmu_psize_def
{
unsigned int shift; /* number of bits */
- unsigned int penc; /* HPTE encoding */
+ int penc[MMU_PAGE_COUNT]; /* HPTE encoding */
unsigned int tlbiel; /* tlbiel supported for that page size */
unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
};
+extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+
+static inline int shift_to_mmu_psize(unsigned int shift)
+{
+ int psize;
+
+ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
+ if (mmu_psize_defs[psize].shift == shift)
+ return psize;
+ return -1;
+}
+
+static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
+{
+ if (mmu_psize_defs[mmu_psize].shift)
+ return mmu_psize_defs[mmu_psize].shift;
+ BUG();
+}
#endif /* __ASSEMBLY__ */
@@ -148,13 +191,35 @@ struct mmu_psize_def
#define MMU_SEGSIZE_256M 0
#define MMU_SEGSIZE_1T 1
+/*
+ * encode page number shift.
+ * in order to fit the 78 bit va in a 64 bit variable we shift the va by
+ * 12 bits. This enable us to address upto 76 bit va.
+ * For hpt hash from a va we can ignore the page size bits of va and for
+ * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
+ * we work in all cases including 4k page size.
+ */
+#define VPN_SHIFT 12
+
+/*
+ * HPTE Large Page (LP) details
+ */
+#define LP_SHIFT 12
+#define LP_BITS 8
+#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
#ifndef __ASSEMBLY__
+static inline int segment_shift(int ssize)
+{
+ if (ssize == MMU_SEGSIZE_256M)
+ return SID_SHIFT;
+ return SID_SHIFT_1T;
+}
+
/*
* The current system page and segment sizes
*/
-extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
extern int mmu_linear_psize;
extern int mmu_virtual_psize;
extern int mmu_vmalloc_psize;
@@ -174,66 +239,92 @@ extern unsigned long tce_alloc_start, tce_alloc_end;
extern int mmu_ci_restrictions;
/*
- * This function sets the AVPN and L fields of the HPTE appropriately
- * for the page size
+ * This computes the AVPN and B fields of the first dword of a HPTE,
+ * for use when we want to match an existing PTE. The bottom 7 bits
+ * of the returned value are zero.
*/
-static inline unsigned long hpte_encode_v(unsigned long va, int psize,
- int ssize)
+static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
+ int ssize)
{
unsigned long v;
- v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
+ /*
+ * The AVA field omits the low-order 23 bits of the 78 bits VA.
+ * These bits are not needed in the PTE, because the
+ * low-order b of these bits are part of the byte offset
+ * into the virtual page and, if b < 23, the high-order
+ * 23-b of these bits are always used in selecting the
+ * PTEGs to be searched
+ */
+ v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
v <<= HPTE_V_AVPN_SHIFT;
- if (psize != MMU_PAGE_4K)
- v |= HPTE_V_LARGE;
v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
return v;
}
/*
+ * This function sets the AVPN and L fields of the HPTE appropriately
+ * using the base page size and actual page size.
+ */
+static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
+ int actual_psize, int ssize)
+{
+ unsigned long v;
+ v = hpte_encode_avpn(vpn, base_psize, ssize);
+ if (actual_psize != MMU_PAGE_4K)
+ v |= HPTE_V_LARGE;
+ return v;
+}
+
+/*
* This function sets the ARPN, and LP fields of the HPTE appropriately
* for the page size. We assume the pa is already "clean" that is properly
* aligned for the requested page size
*/
-static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
+static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
+ int actual_psize)
{
- unsigned long r;
-
/* A 4K page needs no special encoding */
- if (psize == MMU_PAGE_4K)
+ if (actual_psize == MMU_PAGE_4K)
return pa & HPTE_R_RPN;
else {
- unsigned int penc = mmu_psize_defs[psize].penc;
- unsigned int shift = mmu_psize_defs[psize].shift;
- return (pa & ~((1ul << shift) - 1)) | (penc << 12);
+ unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
+ unsigned int shift = mmu_psize_defs[actual_psize].shift;
+ return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
}
- return r;
}
/*
- * Build a VA given VSID, EA and segment size
+ * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
*/
-static inline unsigned long hpt_va(unsigned long ea, unsigned long vsid,
- int ssize)
+static inline unsigned long hpt_vpn(unsigned long ea,
+ unsigned long vsid, int ssize)
{
- if (ssize == MMU_SEGSIZE_256M)
- return (vsid << 28) | (ea & 0xfffffffUL);
- return (vsid << 40) | (ea & 0xffffffffffUL);
+ unsigned long mask;
+ int s_shift = segment_shift(ssize);
+
+ mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
+ return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
}
/*
* This hashes a virtual address
*/
-
-static inline unsigned long hpt_hash(unsigned long va, unsigned int shift,
- int ssize)
+static inline unsigned long hpt_hash(unsigned long vpn,
+ unsigned int shift, int ssize)
{
+ int mask;
unsigned long hash, vsid;
+ /* VPN_SHIFT can be atmost 12 */
if (ssize == MMU_SEGSIZE_256M) {
- hash = (va >> 28) ^ ((va & 0x0fffffffUL) >> shift);
+ mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
+ hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
+ ((vpn & mask) >> (shift - VPN_SHIFT));
} else {
- vsid = va >> 40;
- hash = vsid ^ (vsid << 25) ^ ((va & 0xffffffffffUL) >> shift);
+ mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
+ vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
+ hash = vsid ^ (vsid << 25) ^
+ ((vpn & mask) >> (shift - VPN_SHIFT)) ;
}
return hash & 0x7fffffffffUL;
}
@@ -250,19 +341,32 @@ extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
pte_t *ptep, unsigned long trap, int local, int ssize,
unsigned int shift, unsigned int mmu_psize);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern int __hash_page_thp(unsigned long ea, unsigned long access,
+ unsigned long vsid, pmd_t *pmdp, unsigned long trap,
+ int local, int ssize, unsigned int psize);
+#else
+static inline int __hash_page_thp(unsigned long ea, unsigned long access,
+ unsigned long vsid, pmd_t *pmdp,
+ unsigned long trap, int local,
+ int ssize, unsigned int psize)
+{
+ BUG();
+ return -1;
+}
+#endif
extern void hash_failure_debug(unsigned long ea, unsigned long access,
unsigned long vsid, unsigned long trap,
- int ssize, int psize, unsigned long pte);
+ int ssize, int psize, int lpsize,
+ unsigned long pte);
extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
unsigned long pstart, unsigned long prot,
int psize, int ssize);
-extern void add_gpage(unsigned long addr, unsigned long page_size,
- unsigned long number_of_pages);
+extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
extern void hpte_init_native(void);
extern void hpte_init_lpar(void);
-extern void hpte_init_iSeries(void);
extern void hpte_init_beat(void);
extern void hpte_init_beat_v3(void);
@@ -276,69 +380,70 @@ extern void slb_set_size(u16 size);
#endif /* __ASSEMBLY__ */
/*
- * VSID allocation
+ * VSID allocation (256MB segment)
*
- * We first generate a 36-bit "proto-VSID". For kernel addresses this
- * is equal to the ESID, for user addresses it is:
- * (context << 15) | (esid & 0x7fff)
+ * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
+ * from mmu context id and effective segment id of the address.
*
- * The two forms are distinguishable because the top bit is 0 for user
- * addresses, whereas the top two bits are 1 for kernel addresses.
- * Proto-VSIDs with the top two bits equal to 0b10 are reserved for
- * now.
+ * For user processes max context id is limited to ((1ul << 19) - 5)
+ * for kernel space, we use the top 4 context ids to map address as below
+ * NOTE: each context only support 64TB now.
+ * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
+ * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
+ * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
+ * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
*
* The proto-VSIDs are then scrambled into real VSIDs with the
* multiplicative hash:
*
* VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
- * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
- * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
*
- * This scramble is only well defined for proto-VSIDs below
- * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are
- * reserved. VSID_MULTIPLIER is prime, so in particular it is
+ * VSID_MULTIPLIER is prime, so in particular it is
* co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
* Because the modulus is 2^n-1 we can compute it efficiently without
- * a divide or extra multiply (see below).
+ * a divide or extra multiply (see below). The scramble function gives
+ * robust scattering in the hash table (at least based on some initial
+ * results).
*
- * This scheme has several advantages over older methods:
+ * We also consider VSID 0 special. We use VSID 0 for slb entries mapping
+ * bad address. This enables us to consolidate bad address handling in
+ * hash_page.
*
- * - We have VSIDs allocated for every kernel address
- * (i.e. everything above 0xC000000000000000), except the very top
- * segment, which simplifies several things.
- *
- * - We allow for 15 significant bits of ESID and 20 bits of
- * context for user addresses. i.e. 8T (43 bits) of address space for
- * up to 1M contexts (although the page table structure and context
- * allocation will need changes to take advantage of this).
- *
- * - The scramble function gives robust scattering in the hash
- * table (at least based on some initial results). The previous
- * method was more susceptible to pathological cases giving excessive
- * hash collisions.
+ * We also need to avoid the last segment of the last context, because that
+ * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
+ * because of the modulo operation in vsid scramble. But the vmemmap
+ * (which is what uses region 0xf) will never be close to 64TB in size
+ * (it's 56 bytes per page of system memory).
*/
+
+#define CONTEXT_BITS 19
+#define ESID_BITS 18
+#define ESID_BITS_1T 6
+
/*
- * WARNING - If you change these you must make sure the asm
- * implementations in slb_allocate (slb_low.S), do_stab_bolted
- * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly.
- *
- * You'll also need to change the precomputed VSID values in head.S
- * which are used by the iSeries firmware.
+ * 256MB segment
+ * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
+ * available for user + kernel mapping. The top 4 contexts are used for
+ * kernel mapping. Each segment contains 2^28 bytes. Each
+ * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
+ * (19 == 37 + 28 - 46).
*/
+#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5)
-#define VSID_MULTIPLIER_256M ASM_CONST(200730139) /* 28-bit prime */
-#define VSID_BITS_256M 36
+/*
+ * This should be computed such that protovosid * vsid_mulitplier
+ * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
+ */
+#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
+#define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS)
#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
-#define VSID_BITS_1T 24
+#define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T)
#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
-#define CONTEXT_BITS 19
-#define USER_ESID_BITS 16
-#define USER_ESID_BITS_1T 4
-#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
+#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
/*
* This macro generates asm code to compute the VSID scramble
@@ -362,7 +467,8 @@ extern void slb_set_size(u16 size);
srdi rx,rt,VSID_BITS_##size; \
clrldi rt,rt,(64-VSID_BITS_##size); \
add rt,rt,rx; /* add high and low bits */ \
- /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
+ /* NOTE: explanation based on VSID_BITS_##size = 36 \
+ * Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
* 2^36-1+2^28-1. That in particular means that if r3 >= \
* 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
* the bit clear, r3 already has the answer we want, if it \
@@ -372,6 +478,8 @@ extern void slb_set_size(u16 size);
srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \
add rt,rt,rx
+/* 4 bits per slice and we have one slice per 1TB */
+#define SLICE_ARRAY_SIZE (PGTABLE_RANGE >> 41)
#ifndef __ASSEMBLY__
@@ -389,7 +497,7 @@ extern void slb_set_size(u16 size);
*/
struct subpage_prot_table {
unsigned long maxaddr; /* only addresses < this are protected */
- unsigned int **protptrs[2];
+ unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
unsigned int *low_prot[4];
};
@@ -408,6 +516,7 @@ static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
#endif /* CONFIG_PPC_SUBPAGE_PROT */
typedef unsigned long mm_context_id_t;
+struct spinlock;
typedef struct {
mm_context_id_t id;
@@ -415,7 +524,7 @@ typedef struct {
#ifdef CONFIG_PPC_MM_SLICES
u64 low_slices_psize; /* SLB page size encodings */
- u64 high_slices_psize; /* 4 bits per slice for now */
+ unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
#else
u16 sllp; /* SLB page size encoding */
#endif
@@ -423,6 +532,15 @@ typedef struct {
#ifdef CONFIG_PPC_SUBPAGE_PROT
struct subpage_prot_table spt;
#endif /* CONFIG_PPC_SUBPAGE_PROT */
+#ifdef CONFIG_PPC_ICSWX
+ struct spinlock *cop_lockp; /* guard acop and cop_pid */
+ unsigned long acop; /* mask of enabled coprocessor types */
+ unsigned int cop_pid; /* pid value used with coprocessors */
+#endif /* CONFIG_PPC_ICSWX */
+#ifdef CONFIG_PPC_64K_PAGES
+ /* for 4K PTE fragment support */
+ void *pte_frag;
+#endif
} mm_context_t;
@@ -446,14 +564,6 @@ typedef struct {
})
#endif /* 1 */
-/* This is only valid for addresses >= PAGE_OFFSET */
-static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
-{
- if (ssize == MMU_SEGSIZE_256M)
- return vsid_scramble(ea >> SID_SHIFT, 256M);
- return vsid_scramble(ea >> SID_SHIFT_1T, 1T);
-}
-
/* Returns the segment size indicator for a user address */
static inline int user_segment_size(unsigned long addr)
{
@@ -463,25 +573,41 @@ static inline int user_segment_size(unsigned long addr)
return MMU_SEGSIZE_256M;
}
-/* This is only valid for user addresses (which are below 2^44) */
static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
int ssize)
{
+ /*
+ * Bad address. We return VSID 0 for that
+ */
+ if ((ea & ~REGION_MASK) >= PGTABLE_RANGE)
+ return 0;
+
if (ssize == MMU_SEGSIZE_256M)
- return vsid_scramble((context << USER_ESID_BITS)
+ return vsid_scramble((context << ESID_BITS)
| (ea >> SID_SHIFT), 256M);
- return vsid_scramble((context << USER_ESID_BITS_1T)
+ return vsid_scramble((context << ESID_BITS_1T)
| (ea >> SID_SHIFT_1T), 1T);
}
/*
- * This is only used on legacy iSeries in lparmap.c,
- * hence the 256MB segment assumption.
+ * This is only valid for addresses >= PAGE_OFFSET
+ *
+ * For kernel space, we use the top 4 context ids to map address as below
+ * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
+ * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
+ * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
+ * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
*/
-#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER_256M) % \
- VSID_MODULUS_256M)
-#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea))
+static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
+{
+ unsigned long context;
+ /*
+ * kernel take the top 4 context from the available range
+ */
+ context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
+ return get_vsid(context, ea, ssize);
+}
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_MMU_HASH64_H_ */
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index bb40a06d3b7..e61f24ed4e6 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -19,8 +19,7 @@
#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004)
#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008)
#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
-#define MMU_FTR_TYPE_3E ASM_CONST(0x00000020)
-#define MMU_FTR_TYPE_47x ASM_CONST(0x00000040)
+#define MMU_FTR_TYPE_47x ASM_CONST(0x00000020)
/*
* This is individual features
@@ -56,11 +55,6 @@
*/
#define MMU_FTR_NEED_DTLB_SW_LRU ASM_CONST(0x00200000)
-/* This indicates that the processor uses the ISA 2.06 server tlbie
- * mnemonics
- */
-#define MMU_FTR_TLBIE_206 ASM_CONST(0x00400000)
-
/* Enable use of TLB reservation. Processor should support tlbsrx.
* instruction and MAS0[WQ].
*/
@@ -70,17 +64,68 @@
*/
#define MMU_FTR_USE_PAIRED_MAS ASM_CONST(0x01000000)
+/* MMU is SLB-based
+ */
+#define MMU_FTR_SLB ASM_CONST(0x02000000)
+
+/* Support 16M large pages
+ */
+#define MMU_FTR_16M_PAGE ASM_CONST(0x04000000)
+
+/* Supports TLBIEL variant
+ */
+#define MMU_FTR_TLBIEL ASM_CONST(0x08000000)
+
+/* Supports tlbies w/o locking
+ */
+#define MMU_FTR_LOCKLESS_TLBIE ASM_CONST(0x10000000)
+
+/* Large pages can be marked CI
+ */
+#define MMU_FTR_CI_LARGE_PAGE ASM_CONST(0x20000000)
+
+/* 1T segments available
+ */
+#define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000)
+
+/* Doesn't support the B bit (1T segment) in SLBIE
+ */
+#define MMU_FTR_NO_SLBIE_B ASM_CONST(0x80000000)
+
+/* MMU feature bit sets for various CPUs */
+#define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 \
+ MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2
+#define MMU_FTRS_POWER4 MMU_FTRS_DEFAULT_HPTE_ARCH_V2
+#define MMU_FTRS_PPC970 MMU_FTRS_POWER4
+#define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
+#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
+#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
+#define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
+#define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
+ MMU_FTR_CI_LARGE_PAGE
+#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
+ MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B
#ifndef __ASSEMBLY__
#include <asm/cputable.h>
+#ifdef CONFIG_PPC_FSL_BOOK3E
+#include <asm/percpu.h>
+DECLARE_PER_CPU(int, next_tlbcam_idx);
+#endif
+
static inline int mmu_has_feature(unsigned long feature)
{
return (cur_cpu_spec->mmu_features & feature);
}
+static inline void mmu_clear_feature(unsigned long feature)
+{
+ cur_cpu_spec->mmu_features &= ~feature;
+}
+
extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
-/* MMU initialization (64-bit only fo now) */
+/* MMU initialization */
extern void early_init_mmu(void);
extern void early_init_mmu_secondary(void);
@@ -94,6 +139,15 @@ extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
extern u64 ppc64_rma_size;
#endif /* CONFIG_PPC64 */
+struct mm_struct;
+#ifdef CONFIG_DEBUG_VM
+extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
+#else /* CONFIG_DEBUG_VM */
+static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
+{
+}
+#endif /* !CONFIG_DEBUG_VM */
+
#endif /* !__ASSEMBLY__ */
/* The kernel use the constants below to index in the page sizes array.
@@ -111,26 +165,24 @@ extern u64 ppc64_rma_size;
* to think about, feedback welcome. --BenH.
*/
-/* There are #define as they have to be used in assembly
- *
- * WARNING: If you change this list, make sure to update the array of
- * names currently in arch/powerpc/mm/hugetlbpage.c or bad things will
- * happen
- */
+/* These are #defines as they have to be used in assembly */
#define MMU_PAGE_4K 0
#define MMU_PAGE_16K 1
#define MMU_PAGE_64K 2
#define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */
#define MMU_PAGE_256K 4
#define MMU_PAGE_1M 5
-#define MMU_PAGE_8M 6
-#define MMU_PAGE_16M 7
-#define MMU_PAGE_256M 8
-#define MMU_PAGE_1G 9
-#define MMU_PAGE_16G 10
-#define MMU_PAGE_64G 11
-#define MMU_PAGE_COUNT 12
-
+#define MMU_PAGE_2M 6
+#define MMU_PAGE_4M 7
+#define MMU_PAGE_8M 8
+#define MMU_PAGE_16M 9
+#define MMU_PAGE_64M 10
+#define MMU_PAGE_256M 11
+#define MMU_PAGE_1G 12
+#define MMU_PAGE_16G 13
+#define MMU_PAGE_64G 14
+
+#define MMU_PAGE_COUNT 15
#if defined(CONFIG_PPC_STD_MMU_64)
/* 64-bit classic hash table MMU */
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 81fb41289d6..b467530e248 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -32,9 +32,13 @@ extern void __destroy_context(unsigned long context_id);
extern void mmu_context_init(void);
#endif
+extern void switch_cop(struct mm_struct *next);
+extern int use_cop(unsigned long acop, struct mm_struct *mm);
+extern void drop_cop(unsigned long acop, struct mm_struct *mm);
+
/*
* switch_mm is the entry point called from the architecture independent
- * code in kernel/sched.c
+ * code in kernel/sched/core.c
*/
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
@@ -55,6 +59,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (prev == next)
return;
+#ifdef CONFIG_PPC_ICSWX
+ /* Switch coprocessor context only if prev or next uses a coprocessor */
+ if (prev->context.acop || next->context.acop)
+ switch_cop(next);
+#endif /* CONFIG_PPC_ICSWX */
+
/* We must stop all altivec streams before changing the HW
* context
*/
@@ -67,7 +77,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* sub architectures.
*/
#ifdef CONFIG_PPC_STD_MMU_64
- if (cpu_has_feature(CPU_FTR_SLB))
+ if (mmu_has_feature(MMU_FTR_SLB))
switch_slb(tsk, next);
else
switch_stab(tsk, next);
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
index aac87cbceb5..7b589178be4 100644
--- a/arch/powerpc/include/asm/mmzone.h
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -33,15 +33,13 @@ extern int numa_cpu_lookup_table[];
extern cpumask_var_t node_to_cpumask_map[];
#ifdef CONFIG_MEMORY_HOTPLUG
extern unsigned long max_pfn;
+u64 memory_hotplug_max(void);
+#else
+#define memory_hotplug_max() memblock_end_of_DRAM()
#endif
-/*
- * Following are macros that each numa implmentation must define.
- */
-
-#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn)
-
+#else
+#define memory_hotplug_max() memblock_end_of_DRAM()
#endif /* CONFIG_NEED_MULTIPLE_NODES */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index 0192a4ee2bc..dcfcad139bc 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -11,6 +11,7 @@
#include <linux/list.h>
#include <asm/bug.h>
+#include <asm-generic/module.h>
#ifndef __powerpc64__
@@ -34,6 +35,7 @@ struct mod_arch_specific {
#ifdef __powerpc64__
unsigned int stubs_section; /* Index of stubs section in module */
unsigned int toc_section; /* What section is the TOC? */
+ bool toc_fixed; /* Have we fixed up .TOC.? */
#ifdef CONFIG_DYNAMIC_FTRACE
unsigned long toc;
unsigned long tramp;
@@ -60,16 +62,10 @@ struct mod_arch_specific {
*/
#ifdef __powerpc64__
-# define Elf_Shdr Elf64_Shdr
-# define Elf_Sym Elf64_Sym
-# define Elf_Ehdr Elf64_Ehdr
# ifdef MODULE
asm(".section .stubs,\"ax\",@nobits; .align 3; .previous");
# endif
#else
-# define Elf_Shdr Elf32_Shdr
-# define Elf_Sym Elf32_Sym
-# define Elf_Ehdr Elf32_Ehdr
# ifdef MODULE
asm(".section .plt,\"ax\",@nobits; .align 3; .previous");
asm(".section .init.plt,\"ax\",@nobits; .align 3; .previous");
@@ -82,15 +78,17 @@ struct mod_arch_specific {
# endif /* MODULE */
#endif
+bool is_module_trampoline(u32 *insns);
+int module_trampoline_target(struct module *mod, u32 *trampoline,
+ unsigned long *target);
struct exception_table_entry;
void sort_ex_table(struct exception_table_entry *start,
struct exception_table_entry *finish);
-#ifdef CONFIG_MODVERSIONS
+#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
#define ARCH_RELOCATES_KCRCTAB
-
-extern const unsigned long reloc_start[];
+#define reloc_start PHYSICAL_START
#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MODULE_H */
diff --git a/arch/powerpc/include/asm/mpc5121.h b/arch/powerpc/include/asm/mpc5121.h
index 8c0ab2ca689..4a69cd1d504 100644
--- a/arch/powerpc/include/asm/mpc5121.h
+++ b/arch/powerpc/include/asm/mpc5121.h
@@ -32,25 +32,32 @@ struct mpc512x_ccm {
u32 scfr2; /* System Clock Frequency Register 2 */
u32 scfr2s; /* System Clock Frequency Shadow Register 2 */
u32 bcr; /* Bread Crumb Register */
- u32 p0ccr; /* PSC0 Clock Control Register */
- u32 p1ccr; /* PSC1 CCR */
- u32 p2ccr; /* PSC2 CCR */
- u32 p3ccr; /* PSC3 CCR */
- u32 p4ccr; /* PSC4 CCR */
- u32 p5ccr; /* PSC5 CCR */
- u32 p6ccr; /* PSC6 CCR */
- u32 p7ccr; /* PSC7 CCR */
- u32 p8ccr; /* PSC8 CCR */
- u32 p9ccr; /* PSC9 CCR */
- u32 p10ccr; /* PSC10 CCR */
- u32 p11ccr; /* PSC11 CCR */
+ u32 psc_ccr[12]; /* PSC Clock Control Registers */
u32 spccr; /* SPDIF Clock Control Register */
u32 cccr; /* CFM Clock Control Register */
u32 dccr; /* DIU Clock Control Register */
- u32 m1ccr; /* MSCAN1 CCR */
- u32 m2ccr; /* MSCAN2 CCR */
- u32 m3ccr; /* MSCAN3 CCR */
- u32 m4ccr; /* MSCAN4 CCR */
- u8 res[0x98]; /* Reserved */
+ u32 mscan_ccr[4]; /* MSCAN Clock Control Registers */
+ u32 out_ccr[4]; /* OUT CLK Configure Registers */
+ u32 rsv0[2]; /* Reserved */
+ u32 scfr3; /* System Clock Frequency Register 3 */
+ u32 rsv1[3]; /* Reserved */
+ u32 spll_lock_cnt; /* System PLL Lock Counter */
+ u8 res[0x6c]; /* Reserved */
};
+
+/*
+ * LPC Module
+ */
+struct mpc512x_lpc {
+ u32 cs_cfg[8]; /* CS config */
+ u32 cs_ctrl; /* CS Control Register */
+ u32 cs_status; /* CS Status Register */
+ u32 burst_ctrl; /* CS Burst Control Register */
+ u32 deadcycle_ctrl; /* CS Deadcycle Control Register */
+ u32 holdcycle_ctrl; /* CS Holdcycle Control Register */
+ u32 alt; /* Address Latch Timing Register */
+};
+
+int mpc512x_cs_config(unsigned int cs, u32 val);
+
#endif /* __ASM_POWERPC_MPC5121_H__ */
diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h
index 1f41382eda3..0acc7c7c28d 100644
--- a/arch/powerpc/include/asm/mpc52xx.h
+++ b/arch/powerpc/include/asm/mpc52xx.h
@@ -307,6 +307,7 @@ struct mpc52xx_lpbfifo_request {
size_t size;
size_t pos; /* current position of transfer */
int flags;
+ int defer_xfer_start;
/* What to do when finished */
void (*callback)(struct mpc52xx_lpbfifo_request *);
@@ -323,6 +324,7 @@ struct mpc52xx_lpbfifo_request {
extern int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req);
extern void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req);
extern void mpc52xx_lpbfifo_poll(void);
+extern int mpc52xx_lpbfifo_start_xfer(struct mpc52xx_lpbfifo_request *req);
/* mpc52xx_pic.c */
extern void mpc52xx_init_irq(void);
diff --git a/arch/powerpc/include/asm/mpc52xx_psc.h b/arch/powerpc/include/asm/mpc52xx_psc.h
index 2966df60422..d0ece257d31 100644
--- a/arch/powerpc/include/asm/mpc52xx_psc.h
+++ b/arch/powerpc/include/asm/mpc52xx_psc.h
@@ -299,4 +299,53 @@ struct mpc512x_psc_fifo {
#define rxdata_32 rxdata.rxdata_32
};
+struct mpc5125_psc {
+ u8 mr1; /* PSC + 0x00 */
+ u8 reserved0[3];
+ u8 mr2; /* PSC + 0x04 */
+ u8 reserved1[3];
+ struct {
+ u16 status; /* PSC + 0x08 */
+ u8 reserved2[2];
+ u8 clock_select; /* PSC + 0x0c */
+ u8 reserved3[3];
+ } sr_csr;
+ u8 command; /* PSC + 0x10 */
+ u8 reserved4[3];
+ union { /* PSC + 0x14 */
+ u8 buffer_8;
+ u16 buffer_16;
+ u32 buffer_32;
+ } buffer;
+ struct {
+ u8 ipcr; /* PSC + 0x18 */
+ u8 reserved5[3];
+ u8 acr; /* PSC + 0x1c */
+ u8 reserved6[3];
+ } ipcr_acr;
+ struct {
+ u16 isr; /* PSC + 0x20 */
+ u8 reserved7[2];
+ u16 imr; /* PSC + 0x24 */
+ u8 reserved8[2];
+ } isr_imr;
+ u8 ctur; /* PSC + 0x28 */
+ u8 reserved9[3];
+ u8 ctlr; /* PSC + 0x2c */
+ u8 reserved10[3];
+ u32 ccr; /* PSC + 0x30 */
+ u32 ac97slots; /* PSC + 0x34 */
+ u32 ac97cmd; /* PSC + 0x38 */
+ u32 ac97data; /* PSC + 0x3c */
+ u8 reserved11[4];
+ u8 ip; /* PSC + 0x44 */
+ u8 reserved12[3];
+ u8 op1; /* PSC + 0x48 */
+ u8 reserved13[3];
+ u8 op0; /* PSC + 0x4c */
+ u8 reserved14[3];
+ u32 sicr; /* PSC + 0x50 */
+ u8 reserved15[4]; /* make eq. sizeof(mpc52xx_psc) */
+};
+
#endif /* __ASM_MPC52xx_PSC_H__ */
diff --git a/arch/powerpc/include/asm/mpc85xx.h b/arch/powerpc/include/asm/mpc85xx.h
new file mode 100644
index 00000000000..736d4acc05a
--- /dev/null
+++ b/arch/powerpc/include/asm/mpc85xx.h
@@ -0,0 +1,92 @@
+/*
+ * MPC85xx cpu type detection
+ *
+ * Copyright 2011-2012 Freescale Semiconductor, Inc.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_PPC_MPC85XX_H
+#define __ASM_PPC_MPC85XX_H
+
+#define SVR_REV(svr) ((svr) & 0xFF) /* SOC design resision */
+#define SVR_MAJ(svr) (((svr) >> 4) & 0xF) /* Major revision field*/
+#define SVR_MIN(svr) (((svr) >> 0) & 0xF) /* Minor revision field*/
+
+/* Some parts define SVR[0:23] as the SOC version */
+#define SVR_SOC_VER(svr) (((svr) >> 8) & 0xFFF7FF) /* SOC Version fields */
+
+#define SVR_8533 0x803400
+#define SVR_8535 0x803701
+#define SVR_8536 0x803700
+#define SVR_8540 0x803000
+#define SVR_8541 0x807200
+#define SVR_8543 0x803200
+#define SVR_8544 0x803401
+#define SVR_8545 0x803102
+#define SVR_8547 0x803101
+#define SVR_8548 0x803100
+#define SVR_8555 0x807100
+#define SVR_8560 0x807000
+#define SVR_8567 0x807501
+#define SVR_8568 0x807500
+#define SVR_8569 0x808000
+#define SVR_8572 0x80E000
+#define SVR_P1010 0x80F100
+#define SVR_P1011 0x80E500
+#define SVR_P1012 0x80E501
+#define SVR_P1013 0x80E700
+#define SVR_P1014 0x80F101
+#define SVR_P1017 0x80F700
+#define SVR_P1020 0x80E400
+#define SVR_P1021 0x80E401
+#define SVR_P1022 0x80E600
+#define SVR_P1023 0x80F600
+#define SVR_P1024 0x80E402
+#define SVR_P1025 0x80E403
+#define SVR_P2010 0x80E300
+#define SVR_P2020 0x80E200
+#define SVR_P2040 0x821000
+#define SVR_P2041 0x821001
+#define SVR_P3041 0x821103
+#define SVR_P4040 0x820100
+#define SVR_P4080 0x820000
+#define SVR_P5010 0x822100
+#define SVR_P5020 0x822000
+#define SVR_P5021 0X820500
+#define SVR_P5040 0x820400
+#define SVR_T4240 0x824000
+#define SVR_T4120 0x824001
+#define SVR_T4160 0x824100
+#define SVR_C291 0x850000
+#define SVR_C292 0x850020
+#define SVR_C293 0x850030
+#define SVR_B4860 0X868000
+#define SVR_G4860 0x868001
+#define SVR_G4060 0x868003
+#define SVR_B4440 0x868100
+#define SVR_G4440 0x868101
+#define SVR_B4420 0x868102
+#define SVR_B4220 0x868103
+#define SVR_T1040 0x852000
+#define SVR_T1041 0x852001
+#define SVR_T1042 0x852002
+#define SVR_T1020 0x852100
+#define SVR_T1021 0x852101
+#define SVR_T1022 0x852102
+
+#define SVR_8610 0x80A000
+#define SVR_8641 0x809000
+#define SVR_8641D 0x809001
+
+#define SVR_9130 0x860001
+#define SVR_9131 0x860000
+#define SVR_9132 0x861000
+#define SVR_9232 0x861400
+
+#define SVR_Unknown 0xFFFFFF
+
+#endif
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index e000cce8f6d..754f93d208f 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -3,7 +3,6 @@
#ifdef __KERNEL__
#include <linux/irq.h>
-#include <linux/sysdev.h>
#include <asm/dcr.h>
#include <asm/msi_bitmap.h>
@@ -64,6 +63,7 @@
*/
#define MPIC_TIMER_BASE 0x01100
#define MPIC_TIMER_STRIDE 0x40
+#define MPIC_TIMER_GROUP_STRIDE 0x1000
#define MPIC_TIMER_CURRENT_CNT 0x00000
#define MPIC_TIMER_BASE_CNT 0x00010
@@ -111,10 +111,16 @@
#define MPIC_VECPRI_SENSE_MASK 0x00400000
#define MPIC_IRQ_DESTINATION 0x00010
+#define MPIC_FSL_BRR1 0x00000
+#define MPIC_FSL_BRR1_VER 0x0000ffff
+
#define MPIC_MAX_IRQ_SOURCES 2048
#define MPIC_MAX_CPUS 32
#define MPIC_MAX_ISU 32
+#define MPIC_MAX_ERR 32
+#define MPIC_FSL_ERR_INT 16
+
/*
* Tsi108 implementation of MPIC has many differences from the original one
*/
@@ -252,8 +258,11 @@ struct mpic_irq_save {
/* The instance data of a given MPIC */
struct mpic
{
+ /* The OpenFirmware dt node for this MPIC */
+ struct device_node *node;
+
/* The remapper for this MPIC */
- struct irq_host *irqhost;
+ struct irq_domain *irqhost;
/* The "linux" controller struct */
struct irq_chip hc_irq;
@@ -263,6 +272,8 @@ struct mpic
#ifdef CONFIG_SMP
struct irq_chip hc_ipi;
#endif
+ struct irq_chip hc_tm;
+ struct irq_chip hc_err;
const char *name;
/* Flags */
unsigned int flags;
@@ -270,18 +281,14 @@ struct mpic
unsigned int isu_size;
unsigned int isu_shift;
unsigned int isu_mask;
- unsigned int irq_count;
/* Number of sources */
unsigned int num_sources;
- /* Number of CPUs */
- unsigned int num_cpus;
- /* default senses array */
- unsigned char *senses;
- unsigned int senses_count;
/* vector numbers used for internal sources (ipi/timers) */
unsigned int ipi_vecs[4];
- unsigned int timer_vecs[4];
+ unsigned int timer_vecs[8];
+ /* vector numbers used for FSL MPIC error interrupts */
+ unsigned int err_int_vecs[MPIC_MAX_ERR];
/* Spurious vector to program into unused sources */
unsigned int spurious_vec;
@@ -295,12 +302,19 @@ struct mpic
/* Register access method */
enum mpic_reg_type reg_type;
+ /* The physical base address of the MPIC */
+ phys_addr_t paddr;
+
/* The various ioremap'ed bases */
+ struct mpic_reg_bank thiscpuregs;
struct mpic_reg_bank gregs;
struct mpic_reg_bank tmregs;
struct mpic_reg_bank cpuregs[MPIC_MAX_CPUS];
struct mpic_reg_bank isus[MPIC_MAX_ISU];
+ /* ioremap'ed base for error interrupt registers */
+ u32 __iomem *err_regs;
+
/* Protected sources */
unsigned long *protected;
@@ -320,13 +334,13 @@ struct mpic
/* link */
struct mpic *next;
- struct sys_device sysdev;
-
#ifdef CONFIG_PM
struct mpic_irq_save *save_data;
#endif
};
+extern struct bus_type mpic_subsys;
+
/*
* MPIC flags (passed to mpic_alloc)
*
@@ -335,11 +349,11 @@ struct mpic
* Note setting any ID (leaving those bits to 0) means standard MPIC
*/
-/* This is the primary controller, only that one has IPIs and
- * has afinity control. A non-primary MPIC always uses CPU0
- * registers only
+/*
+ * This is a secondary ("chained") controller; it only uses the CPU0
+ * registers. Primary controllers have IPIs and affinity control.
*/
-#define MPIC_PRIMARY 0x00000001
+#define MPIC_SECONDARY 0x00000001
/* Set this for a big-endian MPIC */
#define MPIC_BIG_ENDIAN 0x00000002
@@ -347,8 +361,6 @@ struct mpic
#define MPIC_U3_HT_IRQS 0x00000004
/* Broken IPI registers (autodetected) */
#define MPIC_BROKEN_IPI 0x00000008
-/* MPIC wants a reset */
-#define MPIC_WANTS_RESET 0x00000010
/* Spurious vector requires EOI */
#define MPIC_SPV_EOI 0x00000020
/* No passthrough disable */
@@ -361,12 +373,19 @@ struct mpic
#define MPIC_ENABLE_MCK 0x00000200
/* Disable bias among target selection, spread interrupts evenly */
#define MPIC_NO_BIAS 0x00000400
-/* Ignore NIRQS as reported by FRR */
-#define MPIC_BROKEN_FRR_NIRQS 0x00000800
/* Destination only supports a single CPU at a time */
#define MPIC_SINGLE_DEST_CPU 0x00001000
/* Enable CoreInt delivery of interrupts */
#define MPIC_ENABLE_COREINT 0x00002000
+/* Do not reset the MPIC during initialization */
+#define MPIC_NO_RESET 0x00004000
+/* Freescale MPIC (compatible includes "fsl,mpic") */
+#define MPIC_FSL 0x00008000
+/* Freescale MPIC supports EIMR (error interrupt mask register).
+ * This flag is set for MPIC version >= 4.1 (version determined
+ * from the BRR1 register).
+*/
+#define MPIC_FSL_HAS_EIMR 0x00010000
/* MPIC HW modification ID */
#define MPIC_REGSET_MASK 0xf0000000
@@ -376,6 +395,16 @@ struct mpic
#define MPIC_REGSET_STANDARD MPIC_REGSET(0) /* Original MPIC */
#define MPIC_REGSET_TSI108 MPIC_REGSET(1) /* Tsi108/109 PIC */
+/* Get the version of primary MPIC */
+#ifdef CONFIG_MPIC
+extern u32 fsl_mpic_primary_get_version(void);
+#else
+static inline u32 fsl_mpic_primary_get_version(void)
+{
+ return 0;
+}
+#endif
+
/* Allocate the controller structure and setup the linux irq descs
* for the range if interrupts passed in. No HW initialization is
* actually performed.
@@ -414,21 +443,6 @@ extern struct mpic *mpic_alloc(struct device_node *node,
extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
phys_addr_t phys_addr);
-/* Set default sense codes
- *
- * @mpic: controller
- * @senses: array of sense codes
- * @count: size of above array
- *
- * Optionally provide an array (indexed on hardware interrupt numbers
- * for this MPIC) of default sense codes for the chip. Those are linux
- * sense codes IRQ_TYPE_*
- *
- * The driver gets ownership of the pointer, don't dispose of it or
- * anything like that. __init only.
- */
-extern void mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count);
-
/* Initialize the controller. After this has been called, none of the above
* should be called again for this mpic
@@ -467,11 +481,11 @@ extern void mpic_request_ipis(void);
void smp_mpic_message_pass(int target, int msg);
/* Unmask a specific virq */
-extern void mpic_unmask_irq(unsigned int irq);
+extern void mpic_unmask_irq(struct irq_data *d);
/* Mask a specific virq */
-extern void mpic_mask_irq(unsigned int irq);
+extern void mpic_mask_irq(struct irq_data *d);
/* EOI a specific virq */
-extern void mpic_end_irq(unsigned int irq);
+extern void mpic_end_irq(struct irq_data *d);
/* Fetch interrupt from a given mpic */
extern unsigned int mpic_get_one_irq(struct mpic *mpic);
diff --git a/arch/powerpc/include/asm/mpic_msgr.h b/arch/powerpc/include/asm/mpic_msgr.h
new file mode 100644
index 00000000000..d4f471fb103
--- /dev/null
+++ b/arch/powerpc/include/asm/mpic_msgr.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2011-2012, Meador Inge, Mentor Graphics Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ */
+
+#ifndef _ASM_MPIC_MSGR_H
+#define _ASM_MPIC_MSGR_H
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <asm/smp.h>
+#include <asm/io.h>
+
+struct mpic_msgr {
+ u32 __iomem *base;
+ u32 __iomem *mer;
+ int irq;
+ unsigned char in_use;
+ raw_spinlock_t lock;
+ int num;
+};
+
+/* Get a message register
+ *
+ * @reg_num: the MPIC message register to get
+ *
+ * A pointer to the message register is returned. If
+ * the message register asked for is already in use, then
+ * EBUSY is returned. If the number given is not associated
+ * with an actual message register, then ENODEV is returned.
+ * Successfully getting the register marks it as in use.
+ */
+extern struct mpic_msgr *mpic_msgr_get(unsigned int reg_num);
+
+/* Relinquish a message register
+ *
+ * @msgr: the message register to return
+ *
+ * Disables the given message register and marks it as free.
+ * After this call has completed successully the message
+ * register is available to be acquired by a call to
+ * mpic_msgr_get.
+ */
+extern void mpic_msgr_put(struct mpic_msgr *msgr);
+
+/* Enable a message register
+ *
+ * @msgr: the message register to enable
+ *
+ * The given message register is enabled for sending
+ * messages.
+ */
+extern void mpic_msgr_enable(struct mpic_msgr *msgr);
+
+/* Disable a message register
+ *
+ * @msgr: the message register to disable
+ *
+ * The given message register is disabled for sending
+ * messages.
+ */
+extern void mpic_msgr_disable(struct mpic_msgr *msgr);
+
+/* Write a message to a message register
+ *
+ * @msgr: the message register to write to
+ * @message: the message to write
+ *
+ * The given 32-bit message is written to the given message
+ * register. Writing to an enabled message registers fires
+ * an interrupt.
+ */
+static inline void mpic_msgr_write(struct mpic_msgr *msgr, u32 message)
+{
+ out_be32(msgr->base, message);
+}
+
+/* Read a message from a message register
+ *
+ * @msgr: the message register to read from
+ *
+ * Returns the 32-bit value currently in the given message register.
+ * Upon reading the register any interrupts for that register are
+ * cleared.
+ */
+static inline u32 mpic_msgr_read(struct mpic_msgr *msgr)
+{
+ return in_be32(msgr->base);
+}
+
+/* Clear a message register
+ *
+ * @msgr: the message register to clear
+ *
+ * Clears any interrupts associated with the given message register.
+ */
+static inline void mpic_msgr_clear(struct mpic_msgr *msgr)
+{
+ (void) mpic_msgr_read(msgr);
+}
+
+/* Set the destination CPU for the message register
+ *
+ * @msgr: the message register whose destination is to be set
+ * @cpu_num: the Linux CPU number to bind the message register to
+ *
+ * Note that the CPU number given is the CPU number used by the kernel
+ * and *not* the actual hardware CPU number.
+ */
+static inline void mpic_msgr_set_destination(struct mpic_msgr *msgr,
+ u32 cpu_num)
+{
+ out_be32(msgr->base, 1 << get_hard_smp_processor_id(cpu_num));
+}
+
+/* Get the IRQ number for the message register
+ * @msgr: the message register whose IRQ is to be returned
+ *
+ * Returns the IRQ number associated with the given message register.
+ * NO_IRQ is returned if this message register is not capable of
+ * receiving interrupts. What message register can and cannot receive
+ * interrupts is specified in the device tree for the system.
+ */
+static inline int mpic_msgr_get_irq(struct mpic_msgr *msgr)
+{
+ return msgr->irq;
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/mpic_timer.h b/arch/powerpc/include/asm/mpic_timer.h
new file mode 100644
index 00000000000..0e23cd4ac8a
--- /dev/null
+++ b/arch/powerpc/include/asm/mpic_timer.h
@@ -0,0 +1,46 @@
+/*
+ * arch/powerpc/include/asm/mpic_timer.h
+ *
+ * Header file for Mpic Global Timer
+ *
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * Author: Wang Dongsheng <Dongsheng.Wang@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MPIC_TIMER__
+#define __MPIC_TIMER__
+
+#include <linux/interrupt.h>
+#include <linux/time.h>
+
+struct mpic_timer {
+ void *dev;
+ struct cascade_priv *cascade_handle;
+ unsigned int num;
+ unsigned int irq;
+};
+
+#ifdef CONFIG_MPIC_TIMER
+struct mpic_timer *mpic_request_timer(irq_handler_t fn, void *dev,
+ const struct timeval *time);
+void mpic_start_timer(struct mpic_timer *handle);
+void mpic_stop_timer(struct mpic_timer *handle);
+void mpic_get_remain_time(struct mpic_timer *handle, struct timeval *time);
+void mpic_free_timer(struct mpic_timer *handle);
+#else
+struct mpic_timer *mpic_request_timer(irq_handler_t fn, void *dev,
+ const struct timeval *time) { return NULL; }
+void mpic_start_timer(struct mpic_timer *handle) { }
+void mpic_stop_timer(struct mpic_timer *handle) { }
+void mpic_get_remain_time(struct mpic_timer *handle, struct timeval *time) { }
+void mpic_free_timer(struct mpic_timer *handle) { }
+#endif
+
+#endif
diff --git a/arch/powerpc/include/asm/msgbuf.h b/arch/powerpc/include/asm/msgbuf.h
deleted file mode 100644
index dd76743c753..00000000000
--- a/arch/powerpc/include/asm/msgbuf.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef _ASM_POWERPC_MSGBUF_H
-#define _ASM_POWERPC_MSGBUF_H
-
-/*
- * The msqid64_ds structure for the PowerPC architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- */
-
-struct msqid64_ds {
- struct ipc64_perm msg_perm;
-#ifndef __powerpc64__
- unsigned int __unused1;
-#endif
- __kernel_time_t msg_stime; /* last msgsnd time */
-#ifndef __powerpc64__
- unsigned int __unused2;
-#endif
- __kernel_time_t msg_rtime; /* last msgrcv time */
-#ifndef __powerpc64__
- unsigned int __unused3;
-#endif
- __kernel_time_t msg_ctime; /* last change time */
- unsigned long msg_cbytes; /* current number of bytes on queue */
- unsigned long msg_qnum; /* number of messages in queue */
- unsigned long msg_qbytes; /* max number of bytes on queue */
- __kernel_pid_t msg_lspid; /* pid of last msgsnd */
- __kernel_pid_t msg_lrpid; /* last receive pid */
- unsigned long __unused4;
- unsigned long __unused5;
-};
-
-#endif /* _ASM_POWERPC_MSGBUF_H */
diff --git a/arch/powerpc/include/asm/mutex.h b/arch/powerpc/include/asm/mutex.h
index 5399f7e1810..127ab23e1f6 100644
--- a/arch/powerpc/include/asm/mutex.h
+++ b/arch/powerpc/include/asm/mutex.h
@@ -82,17 +82,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value
* @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 1
*
- * Change the count from 1 to a value lower than 1, and call <fail_fn> if
- * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
- * or anything the slow path function returns.
+ * Change the count from 1 to a value lower than 1. This function returns 0
+ * if the fastpath succeeds, or -1 otherwise.
*/
static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+__mutex_fastpath_lock_retval(atomic_t *count)
{
if (unlikely(__mutex_dec_return_lock(count) < 0))
- return fail_fn(count);
+ return -1;
return 0;
}
diff --git a/arch/powerpc/include/asm/nvram.h b/arch/powerpc/include/asm/nvram.h
index 850b72f2744..b0fe0fe4e62 100644
--- a/arch/powerpc/include/asm/nvram.h
+++ b/arch/powerpc/include/asm/nvram.h
@@ -6,75 +6,22 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
-
#ifndef _ASM_POWERPC_NVRAM_H
#define _ASM_POWERPC_NVRAM_H
-#include <linux/errno.h>
-
-#define NVRW_CNT 0x20
-#define NVRAM_HEADER_LEN 16 /* sizeof(struct nvram_header) */
-#define NVRAM_BLOCK_LEN 16
-#define NVRAM_MAX_REQ (2080/NVRAM_BLOCK_LEN)
-#define NVRAM_MIN_REQ (1056/NVRAM_BLOCK_LEN)
-
-#define NVRAM_AS0 0x74
-#define NVRAM_AS1 0x75
-#define NVRAM_DATA 0x77
-
-
-/* RTC Offsets */
-
-#define MOTO_RTC_SECONDS 0x1FF9
-#define MOTO_RTC_MINUTES 0x1FFA
-#define MOTO_RTC_HOURS 0x1FFB
-#define MOTO_RTC_DAY_OF_WEEK 0x1FFC
-#define MOTO_RTC_DAY_OF_MONTH 0x1FFD
-#define MOTO_RTC_MONTH 0x1FFE
-#define MOTO_RTC_YEAR 0x1FFF
-#define MOTO_RTC_CONTROLA 0x1FF8
-#define MOTO_RTC_CONTROLB 0x1FF9
-
-#define NVRAM_SIG_SP 0x02 /* support processor */
-#define NVRAM_SIG_OF 0x50 /* open firmware config */
-#define NVRAM_SIG_FW 0x51 /* general firmware */
-#define NVRAM_SIG_HW 0x52 /* hardware (VPD) */
-#define NVRAM_SIG_FLIP 0x5a /* Apple flip/flop header */
-#define NVRAM_SIG_APPL 0x5f /* Apple "system" (???) */
-#define NVRAM_SIG_SYS 0x70 /* system env vars */
-#define NVRAM_SIG_CFG 0x71 /* config data */
-#define NVRAM_SIG_ELOG 0x72 /* error log */
-#define NVRAM_SIG_VEND 0x7e /* vendor defined */
-#define NVRAM_SIG_FREE 0x7f /* Free space */
-#define NVRAM_SIG_OS 0xa0 /* OS defined */
-#define NVRAM_SIG_PANIC 0xa1 /* Apple OSX "panic" */
-
-/* If change this size, then change the size of NVNAME_LEN */
-struct nvram_header {
- unsigned char signature;
- unsigned char checksum;
- unsigned short length;
- char name[12];
-};
-
-#ifdef __KERNEL__
+#include <linux/errno.h>
#include <linux/list.h>
+#include <uapi/asm/nvram.h>
-struct nvram_partition {
- struct list_head partition;
- struct nvram_header header;
- unsigned int index;
-};
-
-
+#ifdef CONFIG_PPC_PSERIES
extern int nvram_write_error_log(char * buff, int length,
unsigned int err_type, unsigned int err_seq);
extern int nvram_read_error_log(char * buff, int length,
unsigned int * err_type, unsigned int *err_seq);
extern int nvram_clear_error_log(void);
-
extern int pSeries_nvram_init(void);
+#endif /* CONFIG_PPC_PSERIES */
#ifdef CONFIG_MMIO_NVRAM
extern int mmio_nvram_init(void);
@@ -85,17 +32,14 @@ static inline int mmio_nvram_init(void)
}
#endif
-#endif /* __KERNEL__ */
-
-/* PowerMac specific nvram stuffs */
+extern int __init nvram_scan_partitions(void);
+extern loff_t nvram_create_partition(const char *name, int sig,
+ int req_size, int min_size);
+extern int nvram_remove_partition(const char *name, int sig,
+ const char *exceptions[]);
+extern int nvram_get_partition_size(loff_t data_index);
+extern loff_t nvram_find_partition(const char *name, int sig, int *out_size);
-enum {
- pmac_nvram_OF, /* Open Firmware partition */
- pmac_nvram_XPRAM, /* MacOS XPRAM partition */
- pmac_nvram_NR /* MacOS Name Registry partition */
-};
-
-#ifdef __KERNEL__
/* Return partition offset in nvram */
extern int pmac_get_partition(int partition);
@@ -112,30 +56,4 @@ extern ssize_t nvram_get_size(void);
/* Normal access to NVRAM */
extern unsigned char nvram_read_byte(int i);
extern void nvram_write_byte(unsigned char c, int i);
-#endif
-
-/* Some offsets in XPRAM */
-#define PMAC_XPRAM_MACHINE_LOC 0xe4
-#define PMAC_XPRAM_SOUND_VOLUME 0x08
-
-/* Machine location structure in PowerMac XPRAM */
-struct pmac_machine_location {
- unsigned int latitude; /* 2+30 bit Fractional number */
- unsigned int longitude; /* 2+30 bit Fractional number */
- unsigned int delta; /* mix of GMT delta and DLS */
-};
-
-/*
- * /dev/nvram ioctls
- *
- * Note that PMAC_NVRAM_GET_OFFSET is still supported, but is
- * definitely obsolete. Do not use it if you can avoid it
- */
-
-#define OBSOLETE_PMAC_NVRAM_GET_OFFSET \
- _IOWR('p', 0x40, int)
-
-#define IOC_NVRAM_GET_OFFSET _IOWR('p', 0x42, int) /* Get NVRAM partition offset */
-#define IOC_NVRAM_SYNC _IO('p', 0x43) /* Sync NVRAM image */
-
#endif /* _ASM_POWERPC_NVRAM_H */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
new file mode 100644
index 00000000000..0da1dbd42e0
--- /dev/null
+++ b/arch/powerpc/include/asm/opal.h
@@ -0,0 +1,917 @@
+/*
+ * PowerNV OPAL definitions.
+ *
+ * Copyright 2011 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __OPAL_H
+#define __OPAL_H
+
+#ifndef __ASSEMBLY__
+/*
+ * SG entry
+ *
+ * WARNING: The current implementation requires each entry
+ * to represent a block that is 4k aligned *and* each block
+ * size except the last one in the list to be as well.
+ */
+struct opal_sg_entry {
+ __be64 data;
+ __be64 length;
+};
+
+/* SG list */
+struct opal_sg_list {
+ __be64 length;
+ __be64 next;
+ struct opal_sg_entry entry[];
+};
+
+/* We calculate number of sg entries based on PAGE_SIZE */
+#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
+
+#endif /* __ASSEMBLY__ */
+
+/****** OPAL APIs ******/
+
+/* Return codes */
+#define OPAL_SUCCESS 0
+#define OPAL_PARAMETER -1
+#define OPAL_BUSY -2
+#define OPAL_PARTIAL -3
+#define OPAL_CONSTRAINED -4
+#define OPAL_CLOSED -5
+#define OPAL_HARDWARE -6
+#define OPAL_UNSUPPORTED -7
+#define OPAL_PERMISSION -8
+#define OPAL_NO_MEM -9
+#define OPAL_RESOURCE -10
+#define OPAL_INTERNAL_ERROR -11
+#define OPAL_BUSY_EVENT -12
+#define OPAL_HARDWARE_FROZEN -13
+#define OPAL_WRONG_STATE -14
+#define OPAL_ASYNC_COMPLETION -15
+
+/* API Tokens (in r0) */
+#define OPAL_INVALID_CALL -1
+#define OPAL_CONSOLE_WRITE 1
+#define OPAL_CONSOLE_READ 2
+#define OPAL_RTC_READ 3
+#define OPAL_RTC_WRITE 4
+#define OPAL_CEC_POWER_DOWN 5
+#define OPAL_CEC_REBOOT 6
+#define OPAL_READ_NVRAM 7
+#define OPAL_WRITE_NVRAM 8
+#define OPAL_HANDLE_INTERRUPT 9
+#define OPAL_POLL_EVENTS 10
+#define OPAL_PCI_SET_HUB_TCE_MEMORY 11
+#define OPAL_PCI_SET_PHB_TCE_MEMORY 12
+#define OPAL_PCI_CONFIG_READ_BYTE 13
+#define OPAL_PCI_CONFIG_READ_HALF_WORD 14
+#define OPAL_PCI_CONFIG_READ_WORD 15
+#define OPAL_PCI_CONFIG_WRITE_BYTE 16
+#define OPAL_PCI_CONFIG_WRITE_HALF_WORD 17
+#define OPAL_PCI_CONFIG_WRITE_WORD 18
+#define OPAL_SET_XIVE 19
+#define OPAL_GET_XIVE 20
+#define OPAL_GET_COMPLETION_TOKEN_STATUS 21 /* obsolete */
+#define OPAL_REGISTER_OPAL_EXCEPTION_HANDLER 22
+#define OPAL_PCI_EEH_FREEZE_STATUS 23
+#define OPAL_PCI_SHPC 24
+#define OPAL_CONSOLE_WRITE_BUFFER_SPACE 25
+#define OPAL_PCI_EEH_FREEZE_CLEAR 26
+#define OPAL_PCI_PHB_MMIO_ENABLE 27
+#define OPAL_PCI_SET_PHB_MEM_WINDOW 28
+#define OPAL_PCI_MAP_PE_MMIO_WINDOW 29
+#define OPAL_PCI_SET_PHB_TABLE_MEMORY 30
+#define OPAL_PCI_SET_PE 31
+#define OPAL_PCI_SET_PELTV 32
+#define OPAL_PCI_SET_MVE 33
+#define OPAL_PCI_SET_MVE_ENABLE 34
+#define OPAL_PCI_GET_XIVE_REISSUE 35
+#define OPAL_PCI_SET_XIVE_REISSUE 36
+#define OPAL_PCI_SET_XIVE_PE 37
+#define OPAL_GET_XIVE_SOURCE 38
+#define OPAL_GET_MSI_32 39
+#define OPAL_GET_MSI_64 40
+#define OPAL_START_CPU 41
+#define OPAL_QUERY_CPU_STATUS 42
+#define OPAL_WRITE_OPPANEL 43
+#define OPAL_PCI_MAP_PE_DMA_WINDOW 44
+#define OPAL_PCI_MAP_PE_DMA_WINDOW_REAL 45
+#define OPAL_PCI_RESET 49
+#define OPAL_PCI_GET_HUB_DIAG_DATA 50
+#define OPAL_PCI_GET_PHB_DIAG_DATA 51
+#define OPAL_PCI_FENCE_PHB 52
+#define OPAL_PCI_REINIT 53
+#define OPAL_PCI_MASK_PE_ERROR 54
+#define OPAL_SET_SLOT_LED_STATUS 55
+#define OPAL_GET_EPOW_STATUS 56
+#define OPAL_SET_SYSTEM_ATTENTION_LED 57
+#define OPAL_RESERVED1 58
+#define OPAL_RESERVED2 59
+#define OPAL_PCI_NEXT_ERROR 60
+#define OPAL_PCI_EEH_FREEZE_STATUS2 61
+#define OPAL_PCI_POLL 62
+#define OPAL_PCI_MSI_EOI 63
+#define OPAL_PCI_GET_PHB_DIAG_DATA2 64
+#define OPAL_XSCOM_READ 65
+#define OPAL_XSCOM_WRITE 66
+#define OPAL_LPC_READ 67
+#define OPAL_LPC_WRITE 68
+#define OPAL_RETURN_CPU 69
+#define OPAL_REINIT_CPUS 70
+#define OPAL_ELOG_READ 71
+#define OPAL_ELOG_WRITE 72
+#define OPAL_ELOG_ACK 73
+#define OPAL_ELOG_RESEND 74
+#define OPAL_ELOG_SIZE 75
+#define OPAL_FLASH_VALIDATE 76
+#define OPAL_FLASH_MANAGE 77
+#define OPAL_FLASH_UPDATE 78
+#define OPAL_RESYNC_TIMEBASE 79
+#define OPAL_DUMP_INIT 81
+#define OPAL_DUMP_INFO 82
+#define OPAL_DUMP_READ 83
+#define OPAL_DUMP_ACK 84
+#define OPAL_GET_MSG 85
+#define OPAL_CHECK_ASYNC_COMPLETION 86
+#define OPAL_SYNC_HOST_REBOOT 87
+#define OPAL_SENSOR_READ 88
+#define OPAL_GET_PARAM 89
+#define OPAL_SET_PARAM 90
+#define OPAL_DUMP_RESEND 91
+#define OPAL_DUMP_INFO2 94
+
+#ifndef __ASSEMBLY__
+
+#include <linux/notifier.h>
+
+/* Other enums */
+enum OpalVendorApiTokens {
+ OPAL_START_VENDOR_API_RANGE = 1000, OPAL_END_VENDOR_API_RANGE = 1999
+};
+
+enum OpalFreezeState {
+ OPAL_EEH_STOPPED_NOT_FROZEN = 0,
+ OPAL_EEH_STOPPED_MMIO_FREEZE = 1,
+ OPAL_EEH_STOPPED_DMA_FREEZE = 2,
+ OPAL_EEH_STOPPED_MMIO_DMA_FREEZE = 3,
+ OPAL_EEH_STOPPED_RESET = 4,
+ OPAL_EEH_STOPPED_TEMP_UNAVAIL = 5,
+ OPAL_EEH_STOPPED_PERM_UNAVAIL = 6
+};
+
+enum OpalEehFreezeActionToken {
+ OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO = 1,
+ OPAL_EEH_ACTION_CLEAR_FREEZE_DMA = 2,
+ OPAL_EEH_ACTION_CLEAR_FREEZE_ALL = 3
+};
+
+enum OpalPciStatusToken {
+ OPAL_EEH_NO_ERROR = 0,
+ OPAL_EEH_IOC_ERROR = 1,
+ OPAL_EEH_PHB_ERROR = 2,
+ OPAL_EEH_PE_ERROR = 3,
+ OPAL_EEH_PE_MMIO_ERROR = 4,
+ OPAL_EEH_PE_DMA_ERROR = 5
+};
+
+enum OpalPciErrorSeverity {
+ OPAL_EEH_SEV_NO_ERROR = 0,
+ OPAL_EEH_SEV_IOC_DEAD = 1,
+ OPAL_EEH_SEV_PHB_DEAD = 2,
+ OPAL_EEH_SEV_PHB_FENCED = 3,
+ OPAL_EEH_SEV_PE_ER = 4,
+ OPAL_EEH_SEV_INF = 5
+};
+
+enum OpalShpcAction {
+ OPAL_SHPC_GET_LINK_STATE = 0,
+ OPAL_SHPC_GET_SLOT_STATE = 1
+};
+
+enum OpalShpcLinkState {
+ OPAL_SHPC_LINK_DOWN = 0,
+ OPAL_SHPC_LINK_UP = 1
+};
+
+enum OpalMmioWindowType {
+ OPAL_M32_WINDOW_TYPE = 1,
+ OPAL_M64_WINDOW_TYPE = 2,
+ OPAL_IO_WINDOW_TYPE = 3
+};
+
+enum OpalShpcSlotState {
+ OPAL_SHPC_DEV_NOT_PRESENT = 0,
+ OPAL_SHPC_DEV_PRESENT = 1
+};
+
+enum OpalExceptionHandler {
+ OPAL_MACHINE_CHECK_HANDLER = 1,
+ OPAL_HYPERVISOR_MAINTENANCE_HANDLER = 2,
+ OPAL_SOFTPATCH_HANDLER = 3
+};
+
+enum OpalPendingState {
+ OPAL_EVENT_OPAL_INTERNAL = 0x1,
+ OPAL_EVENT_NVRAM = 0x2,
+ OPAL_EVENT_RTC = 0x4,
+ OPAL_EVENT_CONSOLE_OUTPUT = 0x8,
+ OPAL_EVENT_CONSOLE_INPUT = 0x10,
+ OPAL_EVENT_ERROR_LOG_AVAIL = 0x20,
+ OPAL_EVENT_ERROR_LOG = 0x40,
+ OPAL_EVENT_EPOW = 0x80,
+ OPAL_EVENT_LED_STATUS = 0x100,
+ OPAL_EVENT_PCI_ERROR = 0x200,
+ OPAL_EVENT_DUMP_AVAIL = 0x400,
+ OPAL_EVENT_MSG_PENDING = 0x800,
+};
+
+enum OpalMessageType {
+ OPAL_MSG_ASYNC_COMP = 0, /* params[0] = token, params[1] = rc,
+ * additional params function-specific
+ */
+ OPAL_MSG_MEM_ERR,
+ OPAL_MSG_EPOW,
+ OPAL_MSG_SHUTDOWN,
+ OPAL_MSG_TYPE_MAX,
+};
+
+/* Machine check related definitions */
+enum OpalMCE_Version {
+ OpalMCE_V1 = 1,
+};
+
+enum OpalMCE_Severity {
+ OpalMCE_SEV_NO_ERROR = 0,
+ OpalMCE_SEV_WARNING = 1,
+ OpalMCE_SEV_ERROR_SYNC = 2,
+ OpalMCE_SEV_FATAL = 3,
+};
+
+enum OpalMCE_Disposition {
+ OpalMCE_DISPOSITION_RECOVERED = 0,
+ OpalMCE_DISPOSITION_NOT_RECOVERED = 1,
+};
+
+enum OpalMCE_Initiator {
+ OpalMCE_INITIATOR_UNKNOWN = 0,
+ OpalMCE_INITIATOR_CPU = 1,
+};
+
+enum OpalMCE_ErrorType {
+ OpalMCE_ERROR_TYPE_UNKNOWN = 0,
+ OpalMCE_ERROR_TYPE_UE = 1,
+ OpalMCE_ERROR_TYPE_SLB = 2,
+ OpalMCE_ERROR_TYPE_ERAT = 3,
+ OpalMCE_ERROR_TYPE_TLB = 4,
+};
+
+enum OpalMCE_UeErrorType {
+ OpalMCE_UE_ERROR_INDETERMINATE = 0,
+ OpalMCE_UE_ERROR_IFETCH = 1,
+ OpalMCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
+ OpalMCE_UE_ERROR_LOAD_STORE = 3,
+ OpalMCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 4,
+};
+
+enum OpalMCE_SlbErrorType {
+ OpalMCE_SLB_ERROR_INDETERMINATE = 0,
+ OpalMCE_SLB_ERROR_PARITY = 1,
+ OpalMCE_SLB_ERROR_MULTIHIT = 2,
+};
+
+enum OpalMCE_EratErrorType {
+ OpalMCE_ERAT_ERROR_INDETERMINATE = 0,
+ OpalMCE_ERAT_ERROR_PARITY = 1,
+ OpalMCE_ERAT_ERROR_MULTIHIT = 2,
+};
+
+enum OpalMCE_TlbErrorType {
+ OpalMCE_TLB_ERROR_INDETERMINATE = 0,
+ OpalMCE_TLB_ERROR_PARITY = 1,
+ OpalMCE_TLB_ERROR_MULTIHIT = 2,
+};
+
+enum OpalThreadStatus {
+ OPAL_THREAD_INACTIVE = 0x0,
+ OPAL_THREAD_STARTED = 0x1,
+ OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */
+};
+
+enum OpalPciBusCompare {
+ OpalPciBusAny = 0, /* Any bus number match */
+ OpalPciBus3Bits = 2, /* Match top 3 bits of bus number */
+ OpalPciBus4Bits = 3, /* Match top 4 bits of bus number */
+ OpalPciBus5Bits = 4, /* Match top 5 bits of bus number */
+ OpalPciBus6Bits = 5, /* Match top 6 bits of bus number */
+ OpalPciBus7Bits = 6, /* Match top 7 bits of bus number */
+ OpalPciBusAll = 7, /* Match bus number exactly */
+};
+
+enum OpalDeviceCompare {
+ OPAL_IGNORE_RID_DEVICE_NUMBER = 0,
+ OPAL_COMPARE_RID_DEVICE_NUMBER = 1
+};
+
+enum OpalFuncCompare {
+ OPAL_IGNORE_RID_FUNCTION_NUMBER = 0,
+ OPAL_COMPARE_RID_FUNCTION_NUMBER = 1
+};
+
+enum OpalPeAction {
+ OPAL_UNMAP_PE = 0,
+ OPAL_MAP_PE = 1
+};
+
+enum OpalPeltvAction {
+ OPAL_REMOVE_PE_FROM_DOMAIN = 0,
+ OPAL_ADD_PE_TO_DOMAIN = 1
+};
+
+enum OpalMveEnableAction {
+ OPAL_DISABLE_MVE = 0,
+ OPAL_ENABLE_MVE = 1
+};
+
+enum OpalPciResetScope {
+ OPAL_PHB_COMPLETE = 1, OPAL_PCI_LINK = 2, OPAL_PHB_ERROR = 3,
+ OPAL_PCI_HOT_RESET = 4, OPAL_PCI_FUNDAMENTAL_RESET = 5,
+ OPAL_PCI_IODA_TABLE_RESET = 6,
+};
+
+enum OpalPciReinitScope {
+ OPAL_REINIT_PCI_DEV = 1000
+};
+
+enum OpalPciResetState {
+ OPAL_DEASSERT_RESET = 0,
+ OPAL_ASSERT_RESET = 1
+};
+
+enum OpalPciMaskAction {
+ OPAL_UNMASK_ERROR_TYPE = 0,
+ OPAL_MASK_ERROR_TYPE = 1
+};
+
+enum OpalSlotLedType {
+ OPAL_SLOT_LED_ID_TYPE = 0,
+ OPAL_SLOT_LED_FAULT_TYPE = 1
+};
+
+enum OpalLedAction {
+ OPAL_TURN_OFF_LED = 0,
+ OPAL_TURN_ON_LED = 1,
+ OPAL_QUERY_LED_STATE_AFTER_BUSY = 2
+};
+
+enum OpalEpowStatus {
+ OPAL_EPOW_NONE = 0,
+ OPAL_EPOW_UPS = 1,
+ OPAL_EPOW_OVER_AMBIENT_TEMP = 2,
+ OPAL_EPOW_OVER_INTERNAL_TEMP = 3
+};
+
+/*
+ * Address cycle types for LPC accesses. These also correspond
+ * to the content of the first cell of the "reg" property for
+ * device nodes on the LPC bus
+ */
+enum OpalLPCAddressType {
+ OPAL_LPC_MEM = 0,
+ OPAL_LPC_IO = 1,
+ OPAL_LPC_FW = 2,
+};
+
+/* System parameter permission */
+enum OpalSysparamPerm {
+ OPAL_SYSPARAM_READ = 0x1,
+ OPAL_SYSPARAM_WRITE = 0x2,
+ OPAL_SYSPARAM_RW = (OPAL_SYSPARAM_READ | OPAL_SYSPARAM_WRITE),
+};
+
+struct opal_msg {
+ __be32 msg_type;
+ __be32 reserved;
+ __be64 params[8];
+};
+
+struct opal_machine_check_event {
+ enum OpalMCE_Version version:8; /* 0x00 */
+ uint8_t in_use; /* 0x01 */
+ enum OpalMCE_Severity severity:8; /* 0x02 */
+ enum OpalMCE_Initiator initiator:8; /* 0x03 */
+ enum OpalMCE_ErrorType error_type:8; /* 0x04 */
+ enum OpalMCE_Disposition disposition:8; /* 0x05 */
+ uint8_t reserved_1[2]; /* 0x06 */
+ uint64_t gpr3; /* 0x08 */
+ uint64_t srr0; /* 0x10 */
+ uint64_t srr1; /* 0x18 */
+ union { /* 0x20 */
+ struct {
+ enum OpalMCE_UeErrorType ue_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t physical_address_provided;
+ uint8_t reserved_1[5];
+ uint64_t effective_address;
+ uint64_t physical_address;
+ uint8_t reserved_2[8];
+ } ue_error;
+
+ struct {
+ enum OpalMCE_SlbErrorType slb_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t reserved_1[6];
+ uint64_t effective_address;
+ uint8_t reserved_2[16];
+ } slb_error;
+
+ struct {
+ enum OpalMCE_EratErrorType erat_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t reserved_1[6];
+ uint64_t effective_address;
+ uint8_t reserved_2[16];
+ } erat_error;
+
+ struct {
+ enum OpalMCE_TlbErrorType tlb_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t reserved_1[6];
+ uint64_t effective_address;
+ uint8_t reserved_2[16];
+ } tlb_error;
+ } u;
+};
+
+/* FSP memory errors handling */
+enum OpalMemErr_Version {
+ OpalMemErr_V1 = 1,
+};
+
+enum OpalMemErrType {
+ OPAL_MEM_ERR_TYPE_RESILIENCE = 0,
+ OPAL_MEM_ERR_TYPE_DYN_DALLOC,
+ OPAL_MEM_ERR_TYPE_SCRUB,
+};
+
+/* Memory Reilience error type */
+enum OpalMemErr_ResilErrType {
+ OPAL_MEM_RESILIENCE_CE = 0,
+ OPAL_MEM_RESILIENCE_UE,
+ OPAL_MEM_RESILIENCE_UE_SCRUB,
+};
+
+/* Dynamic Memory Deallocation type */
+enum OpalMemErr_DynErrType {
+ OPAL_MEM_DYNAMIC_DEALLOC = 0,
+};
+
+/* OpalMemoryErrorData->flags */
+#define OPAL_MEM_CORRECTED_ERROR 0x0001
+#define OPAL_MEM_THRESHOLD_EXCEEDED 0x0002
+#define OPAL_MEM_ACK_REQUIRED 0x8000
+
+struct OpalMemoryErrorData {
+ enum OpalMemErr_Version version:8; /* 0x00 */
+ enum OpalMemErrType type:8; /* 0x01 */
+ __be16 flags; /* 0x02 */
+ uint8_t reserved_1[4]; /* 0x04 */
+
+ union {
+ /* Memory Resilience corrected/uncorrected error info */
+ struct {
+ enum OpalMemErr_ResilErrType resil_err_type:8;
+ uint8_t reserved_1[7];
+ __be64 physical_address_start;
+ __be64 physical_address_end;
+ } resilience;
+ /* Dynamic memory deallocation error info */
+ struct {
+ enum OpalMemErr_DynErrType dyn_err_type:8;
+ uint8_t reserved_1[7];
+ __be64 physical_address_start;
+ __be64 physical_address_end;
+ } dyn_dealloc;
+ } u;
+};
+
+enum {
+ OPAL_P7IOC_DIAG_TYPE_NONE = 0,
+ OPAL_P7IOC_DIAG_TYPE_RGC = 1,
+ OPAL_P7IOC_DIAG_TYPE_BI = 2,
+ OPAL_P7IOC_DIAG_TYPE_CI = 3,
+ OPAL_P7IOC_DIAG_TYPE_MISC = 4,
+ OPAL_P7IOC_DIAG_TYPE_I2C = 5,
+ OPAL_P7IOC_DIAG_TYPE_LAST = 6
+};
+
+struct OpalIoP7IOCErrorData {
+ uint16_t type;
+
+ /* GEM */
+ uint64_t gemXfir;
+ uint64_t gemRfir;
+ uint64_t gemRirqfir;
+ uint64_t gemMask;
+ uint64_t gemRwof;
+
+ /* LEM */
+ uint64_t lemFir;
+ uint64_t lemErrMask;
+ uint64_t lemAction0;
+ uint64_t lemAction1;
+ uint64_t lemWof;
+
+ union {
+ struct OpalIoP7IOCRgcErrorData {
+ uint64_t rgcStatus; /* 3E1C10 */
+ uint64_t rgcLdcp; /* 3E1C18 */
+ }rgc;
+ struct OpalIoP7IOCBiErrorData {
+ uint64_t biLdcp0; /* 3C0100, 3C0118 */
+ uint64_t biLdcp1; /* 3C0108, 3C0120 */
+ uint64_t biLdcp2; /* 3C0110, 3C0128 */
+ uint64_t biFenceStatus; /* 3C0130, 3C0130 */
+
+ uint8_t biDownbound; /* BI Downbound or Upbound */
+ }bi;
+ struct OpalIoP7IOCCiErrorData {
+ uint64_t ciPortStatus; /* 3Dn008 */
+ uint64_t ciPortLdcp; /* 3Dn010 */
+
+ uint8_t ciPort; /* Index of CI port: 0/1 */
+ }ci;
+ };
+};
+
+/**
+ * This structure defines the overlay which will be used to store PHB error
+ * data upon request.
+ */
+enum {
+ OPAL_PHB_ERROR_DATA_VERSION_1 = 1,
+};
+
+enum {
+ OPAL_PHB_ERROR_DATA_TYPE_P7IOC = 1,
+ OPAL_PHB_ERROR_DATA_TYPE_PHB3 = 2
+};
+
+enum {
+ OPAL_P7IOC_NUM_PEST_REGS = 128,
+ OPAL_PHB3_NUM_PEST_REGS = 256
+};
+
+struct OpalIoPhbErrorCommon {
+ __be32 version;
+ __be32 ioType;
+ __be32 len;
+};
+
+struct OpalIoP7IOCPhbErrorData {
+ struct OpalIoPhbErrorCommon common;
+
+ uint32_t brdgCtl;
+
+ // P7IOC utl regs
+ uint32_t portStatusReg;
+ uint32_t rootCmplxStatus;
+ uint32_t busAgentStatus;
+
+ // P7IOC cfg regs
+ uint32_t deviceStatus;
+ uint32_t slotStatus;
+ uint32_t linkStatus;
+ uint32_t devCmdStatus;
+ uint32_t devSecStatus;
+
+ // cfg AER regs
+ uint32_t rootErrorStatus;
+ uint32_t uncorrErrorStatus;
+ uint32_t corrErrorStatus;
+ uint32_t tlpHdr1;
+ uint32_t tlpHdr2;
+ uint32_t tlpHdr3;
+ uint32_t tlpHdr4;
+ uint32_t sourceId;
+
+ uint32_t rsv3;
+
+ // Record data about the call to allocate a buffer.
+ uint64_t errorClass;
+ uint64_t correlator;
+
+ //P7IOC MMIO Error Regs
+ uint64_t p7iocPlssr; // n120
+ uint64_t p7iocCsr; // n110
+ uint64_t lemFir; // nC00
+ uint64_t lemErrorMask; // nC18
+ uint64_t lemWOF; // nC40
+ uint64_t phbErrorStatus; // nC80
+ uint64_t phbFirstErrorStatus; // nC88
+ uint64_t phbErrorLog0; // nCC0
+ uint64_t phbErrorLog1; // nCC8
+ uint64_t mmioErrorStatus; // nD00
+ uint64_t mmioFirstErrorStatus; // nD08
+ uint64_t mmioErrorLog0; // nD40
+ uint64_t mmioErrorLog1; // nD48
+ uint64_t dma0ErrorStatus; // nD80
+ uint64_t dma0FirstErrorStatus; // nD88
+ uint64_t dma0ErrorLog0; // nDC0
+ uint64_t dma0ErrorLog1; // nDC8
+ uint64_t dma1ErrorStatus; // nE00
+ uint64_t dma1FirstErrorStatus; // nE08
+ uint64_t dma1ErrorLog0; // nE40
+ uint64_t dma1ErrorLog1; // nE48
+ uint64_t pestA[OPAL_P7IOC_NUM_PEST_REGS];
+ uint64_t pestB[OPAL_P7IOC_NUM_PEST_REGS];
+};
+
+struct OpalIoPhb3ErrorData {
+ struct OpalIoPhbErrorCommon common;
+
+ __be32 brdgCtl;
+
+ /* PHB3 UTL regs */
+ __be32 portStatusReg;
+ __be32 rootCmplxStatus;
+ __be32 busAgentStatus;
+
+ /* PHB3 cfg regs */
+ __be32 deviceStatus;
+ __be32 slotStatus;
+ __be32 linkStatus;
+ __be32 devCmdStatus;
+ __be32 devSecStatus;
+
+ /* cfg AER regs */
+ __be32 rootErrorStatus;
+ __be32 uncorrErrorStatus;
+ __be32 corrErrorStatus;
+ __be32 tlpHdr1;
+ __be32 tlpHdr2;
+ __be32 tlpHdr3;
+ __be32 tlpHdr4;
+ __be32 sourceId;
+
+ __be32 rsv3;
+
+ /* Record data about the call to allocate a buffer */
+ __be64 errorClass;
+ __be64 correlator;
+
+ __be64 nFir; /* 000 */
+ __be64 nFirMask; /* 003 */
+ __be64 nFirWOF; /* 008 */
+
+ /* PHB3 MMIO Error Regs */
+ __be64 phbPlssr; /* 120 */
+ __be64 phbCsr; /* 110 */
+ __be64 lemFir; /* C00 */
+ __be64 lemErrorMask; /* C18 */
+ __be64 lemWOF; /* C40 */
+ __be64 phbErrorStatus; /* C80 */
+ __be64 phbFirstErrorStatus; /* C88 */
+ __be64 phbErrorLog0; /* CC0 */
+ __be64 phbErrorLog1; /* CC8 */
+ __be64 mmioErrorStatus; /* D00 */
+ __be64 mmioFirstErrorStatus; /* D08 */
+ __be64 mmioErrorLog0; /* D40 */
+ __be64 mmioErrorLog1; /* D48 */
+ __be64 dma0ErrorStatus; /* D80 */
+ __be64 dma0FirstErrorStatus; /* D88 */
+ __be64 dma0ErrorLog0; /* DC0 */
+ __be64 dma0ErrorLog1; /* DC8 */
+ __be64 dma1ErrorStatus; /* E00 */
+ __be64 dma1FirstErrorStatus; /* E08 */
+ __be64 dma1ErrorLog0; /* E40 */
+ __be64 dma1ErrorLog1; /* E48 */
+ __be64 pestA[OPAL_PHB3_NUM_PEST_REGS];
+ __be64 pestB[OPAL_PHB3_NUM_PEST_REGS];
+};
+
+enum {
+ OPAL_REINIT_CPUS_HILE_BE = (1 << 0),
+ OPAL_REINIT_CPUS_HILE_LE = (1 << 1),
+};
+
+typedef struct oppanel_line {
+ const char * line;
+ uint64_t line_len;
+} oppanel_line_t;
+
+/* /sys/firmware/opal */
+extern struct kobject *opal_kobj;
+
+/* /ibm,opal */
+extern struct device_node *opal_node;
+
+/* API functions */
+int64_t opal_invalid_call(void);
+int64_t opal_console_write(int64_t term_number, __be64 *length,
+ const uint8_t *buffer);
+int64_t opal_console_read(int64_t term_number, __be64 *length,
+ uint8_t *buffer);
+int64_t opal_console_write_buffer_space(int64_t term_number,
+ __be64 *length);
+int64_t opal_rtc_read(__be32 *year_month_day,
+ __be64 *hour_minute_second_millisecond);
+int64_t opal_rtc_write(uint32_t year_month_day,
+ uint64_t hour_minute_second_millisecond);
+int64_t opal_cec_power_down(uint64_t request);
+int64_t opal_cec_reboot(void);
+int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
+int64_t opal_write_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
+int64_t opal_handle_interrupt(uint64_t isn, __be64 *outstanding_event_mask);
+int64_t opal_poll_events(__be64 *outstanding_event_mask);
+int64_t opal_pci_set_hub_tce_memory(uint64_t hub_id, uint64_t tce_mem_addr,
+ uint64_t tce_mem_size);
+int64_t opal_pci_set_phb_tce_memory(uint64_t phb_id, uint64_t tce_mem_addr,
+ uint64_t tce_mem_size);
+int64_t opal_pci_config_read_byte(uint64_t phb_id, uint64_t bus_dev_func,
+ uint64_t offset, uint8_t *data);
+int64_t opal_pci_config_read_half_word(uint64_t phb_id, uint64_t bus_dev_func,
+ uint64_t offset, __be16 *data);
+int64_t opal_pci_config_read_word(uint64_t phb_id, uint64_t bus_dev_func,
+ uint64_t offset, __be32 *data);
+int64_t opal_pci_config_write_byte(uint64_t phb_id, uint64_t bus_dev_func,
+ uint64_t offset, uint8_t data);
+int64_t opal_pci_config_write_half_word(uint64_t phb_id, uint64_t bus_dev_func,
+ uint64_t offset, uint16_t data);
+int64_t opal_pci_config_write_word(uint64_t phb_id, uint64_t bus_dev_func,
+ uint64_t offset, uint32_t data);
+int64_t opal_set_xive(uint32_t isn, uint16_t server, uint8_t priority);
+int64_t opal_get_xive(uint32_t isn, __be16 *server, uint8_t *priority);
+int64_t opal_register_exception_handler(uint64_t opal_exception,
+ uint64_t handler_address,
+ uint64_t glue_cache_line);
+int64_t opal_pci_eeh_freeze_status(uint64_t phb_id, uint64_t pe_number,
+ uint8_t *freeze_state,
+ __be16 *pci_error_type,
+ __be64 *phb_status);
+int64_t opal_pci_eeh_freeze_clear(uint64_t phb_id, uint64_t pe_number,
+ uint64_t eeh_action_token);
+int64_t opal_pci_shpc(uint64_t phb_id, uint64_t shpc_action, uint8_t *state);
+
+
+
+int64_t opal_pci_phb_mmio_enable(uint64_t phb_id, uint16_t window_type,
+ uint16_t window_num, uint16_t enable);
+int64_t opal_pci_set_phb_mem_window(uint64_t phb_id, uint16_t window_type,
+ uint16_t window_num,
+ uint64_t starting_real_address,
+ uint64_t starting_pci_address,
+ uint16_t segment_size);
+int64_t opal_pci_map_pe_mmio_window(uint64_t phb_id, uint16_t pe_number,
+ uint16_t window_type, uint16_t window_num,
+ uint16_t segment_num);
+int64_t opal_pci_set_phb_table_memory(uint64_t phb_id, uint64_t rtt_addr,
+ uint64_t ivt_addr, uint64_t ivt_len,
+ uint64_t reject_array_addr,
+ uint64_t peltv_addr);
+int64_t opal_pci_set_pe(uint64_t phb_id, uint64_t pe_number, uint64_t bus_dev_func,
+ uint8_t bus_compare, uint8_t dev_compare, uint8_t func_compare,
+ uint8_t pe_action);
+int64_t opal_pci_set_peltv(uint64_t phb_id, uint32_t parent_pe, uint32_t child_pe,
+ uint8_t state);
+int64_t opal_pci_set_mve(uint64_t phb_id, uint32_t mve_number, uint32_t pe_number);
+int64_t opal_pci_set_mve_enable(uint64_t phb_id, uint32_t mve_number,
+ uint32_t state);
+int64_t opal_pci_get_xive_reissue(uint64_t phb_id, uint32_t xive_number,
+ uint8_t *p_bit, uint8_t *q_bit);
+int64_t opal_pci_set_xive_reissue(uint64_t phb_id, uint32_t xive_number,
+ uint8_t p_bit, uint8_t q_bit);
+int64_t opal_pci_msi_eoi(uint64_t phb_id, uint32_t hw_irq);
+int64_t opal_pci_set_xive_pe(uint64_t phb_id, uint32_t pe_number,
+ uint32_t xive_num);
+int64_t opal_get_xive_source(uint64_t phb_id, uint32_t xive_num,
+ __be32 *interrupt_source_number);
+int64_t opal_get_msi_32(uint64_t phb_id, uint32_t mve_number, uint32_t xive_num,
+ uint8_t msi_range, __be32 *msi_address,
+ __be32 *message_data);
+int64_t opal_get_msi_64(uint64_t phb_id, uint32_t mve_number,
+ uint32_t xive_num, uint8_t msi_range,
+ __be64 *msi_address, __be32 *message_data);
+int64_t opal_start_cpu(uint64_t thread_number, uint64_t start_address);
+int64_t opal_query_cpu_status(uint64_t thread_number, uint8_t *thread_status);
+int64_t opal_write_oppanel(oppanel_line_t *lines, uint64_t num_lines);
+int64_t opal_pci_map_pe_dma_window(uint64_t phb_id, uint16_t pe_number, uint16_t window_id,
+ uint16_t tce_levels, uint64_t tce_table_addr,
+ uint64_t tce_table_size, uint64_t tce_page_size);
+int64_t opal_pci_map_pe_dma_window_real(uint64_t phb_id, uint16_t pe_number,
+ uint16_t dma_window_number, uint64_t pci_start_addr,
+ uint64_t pci_mem_size);
+int64_t opal_pci_reset(uint64_t phb_id, uint8_t reset_scope, uint8_t assert_state);
+
+int64_t opal_pci_get_hub_diag_data(uint64_t hub_id, void *diag_buffer,
+ uint64_t diag_buffer_len);
+int64_t opal_pci_get_phb_diag_data(uint64_t phb_id, void *diag_buffer,
+ uint64_t diag_buffer_len);
+int64_t opal_pci_get_phb_diag_data2(uint64_t phb_id, void *diag_buffer,
+ uint64_t diag_buffer_len);
+int64_t opal_pci_fence_phb(uint64_t phb_id);
+int64_t opal_pci_reinit(uint64_t phb_id, uint64_t reinit_scope, uint64_t data);
+int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action);
+int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action);
+int64_t opal_get_epow_status(__be64 *status);
+int64_t opal_set_system_attention_led(uint8_t led_action);
+int64_t opal_pci_next_error(uint64_t phb_id, __be64 *first_frozen_pe,
+ __be16 *pci_error_type, __be16 *severity);
+int64_t opal_pci_poll(uint64_t phb_id);
+int64_t opal_return_cpu(void);
+int64_t opal_reinit_cpus(uint64_t flags);
+
+int64_t opal_xscom_read(uint32_t gcid, uint64_t pcb_addr, __be64 *val);
+int64_t opal_xscom_write(uint32_t gcid, uint64_t pcb_addr, uint64_t val);
+
+int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
+ uint32_t addr, uint32_t data, uint32_t sz);
+int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
+ uint32_t addr, __be32 *data, uint32_t sz);
+
+int64_t opal_read_elog(uint64_t buffer, uint64_t size, uint64_t log_id);
+int64_t opal_get_elog_size(__be64 *log_id, __be64 *size, __be64 *elog_type);
+int64_t opal_write_elog(uint64_t buffer, uint64_t size, uint64_t offset);
+int64_t opal_send_ack_elog(uint64_t log_id);
+void opal_resend_pending_logs(void);
+
+int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
+int64_t opal_manage_flash(uint8_t op);
+int64_t opal_update_flash(uint64_t blk_list);
+int64_t opal_dump_init(uint8_t dump_type);
+int64_t opal_dump_info(__be32 *dump_id, __be32 *dump_size);
+int64_t opal_dump_info2(__be32 *dump_id, __be32 *dump_size, __be32 *dump_type);
+int64_t opal_dump_read(uint32_t dump_id, uint64_t buffer);
+int64_t opal_dump_ack(uint32_t dump_id);
+int64_t opal_dump_resend_notification(void);
+
+int64_t opal_get_msg(uint64_t buffer, uint64_t size);
+int64_t opal_check_completion(uint64_t buffer, uint64_t size, uint64_t token);
+int64_t opal_sync_host_reboot(void);
+int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer,
+ uint64_t length);
+int64_t opal_set_param(uint64_t token, uint32_t param_id, uint64_t buffer,
+ uint64_t length);
+int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data);
+
+/* Internal functions */
+extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
+ int depth, void *data);
+extern int early_init_dt_scan_recoverable_ranges(unsigned long node,
+ const char *uname, int depth, void *data);
+
+extern int opal_get_chars(uint32_t vtermno, char *buf, int count);
+extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
+
+extern void hvc_opal_init_early(void);
+
+extern int opal_notifier_register(struct notifier_block *nb);
+extern int opal_notifier_unregister(struct notifier_block *nb);
+
+extern int opal_message_notifier_register(enum OpalMessageType msg_type,
+ struct notifier_block *nb);
+extern void opal_notifier_enable(void);
+extern void opal_notifier_disable(void);
+extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val);
+
+extern int __opal_async_get_token(void);
+extern int opal_async_get_token_interruptible(void);
+extern int __opal_async_release_token(int token);
+extern int opal_async_release_token(int token);
+extern int opal_async_wait_response(uint64_t token, struct opal_msg *msg);
+extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data);
+
+struct rtc_time;
+extern int opal_set_rtc_time(struct rtc_time *tm);
+extern void opal_get_rtc_time(struct rtc_time *tm);
+extern unsigned long opal_get_boot_time(void);
+extern void opal_nvram_init(void);
+extern void opal_flash_init(void);
+extern void opal_flash_term_callback(void);
+extern int opal_elog_init(void);
+extern void opal_platform_dump_init(void);
+extern void opal_sys_param_init(void);
+extern void opal_msglog_init(void);
+
+extern int opal_machine_check(struct pt_regs *regs);
+extern bool opal_mce_check_early_recovery(struct pt_regs *regs);
+
+extern void opal_shutdown(void);
+extern int opal_resync_timebase(void);
+
+extern void opal_lpc_init(void);
+
+struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
+ unsigned long vmalloc_size);
+void opal_free_sg_list(struct opal_sg_list *sg);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __OPAL_H */
diff --git a/arch/powerpc/include/asm/oprofile_impl.h b/arch/powerpc/include/asm/oprofile_impl.h
index 639dc96077a..d697b08994c 100644
--- a/arch/powerpc/include/asm/oprofile_impl.h
+++ b/arch/powerpc/include/asm/oprofile_impl.h
@@ -34,7 +34,7 @@ struct op_system_config {
unsigned long mmcra;
#ifdef CONFIG_OPROFILE_CELL
/* Register for oprofile user tool to check cell kernel profiling
- * suport.
+ * support.
*/
unsigned long cell_support;
#endif
diff --git a/arch/powerpc/include/asm/pSeries_reconfig.h b/arch/powerpc/include/asm/pSeries_reconfig.h
deleted file mode 100644
index d4b4bfa26fb..00000000000
--- a/arch/powerpc/include/asm/pSeries_reconfig.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef _PPC64_PSERIES_RECONFIG_H
-#define _PPC64_PSERIES_RECONFIG_H
-#ifdef __KERNEL__
-
-#include <linux/notifier.h>
-
-/*
- * Use this API if your code needs to know about OF device nodes being
- * added or removed on pSeries systems.
- */
-
-#define PSERIES_RECONFIG_ADD 0x0001
-#define PSERIES_RECONFIG_REMOVE 0x0002
-#define PSERIES_DRCONF_MEM_ADD 0x0003
-#define PSERIES_DRCONF_MEM_REMOVE 0x0004
-
-#ifdef CONFIG_PPC_PSERIES
-extern int pSeries_reconfig_notifier_register(struct notifier_block *);
-extern void pSeries_reconfig_notifier_unregister(struct notifier_block *);
-extern struct blocking_notifier_head pSeries_reconfig_chain;
-#else /* !CONFIG_PPC_PSERIES */
-static inline int pSeries_reconfig_notifier_register(struct notifier_block *nb)
-{
- return 0;
-}
-static inline void pSeries_reconfig_notifier_unregister(struct notifier_block *nb) { }
-#endif /* CONFIG_PPC_PSERIES */
-
-#endif /* __KERNEL__ */
-#endif /* _PPC64_PSERIES_RECONFIG_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index ec57540cd7a..bb0bd25f20d 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -16,7 +16,6 @@
#ifdef CONFIG_PPC64
-#include <linux/init.h>
#include <asm/types.h>
#include <asm/lppaca.h>
#include <asm/mmu.h>
@@ -43,6 +42,7 @@ extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */
#define get_slb_shadow() (get_paca()->slb_shadow_ptr)
struct task_struct;
+struct opal_machine_check_event;
/*
* Defines the layout of the paca.
@@ -67,8 +67,13 @@ struct paca_struct {
* instruction. They must travel together and be properly
* aligned.
*/
+#ifdef __BIG_ENDIAN__
u16 lock_token; /* Constant 0x8000, used in locks */
u16 paca_index; /* Logical processor number */
+#else
+ u16 paca_index; /* Logical processor number */
+ u16 lock_token; /* Constant 0x8000, used in locks */
+#endif
u64 kernel_toc; /* Kernel TOC address */
u64 kernelbase; /* Base address of kernel */
@@ -87,26 +92,38 @@ struct paca_struct {
struct slb_shadow *slb_shadow_ptr;
struct dtl_entry *dispatch_log;
struct dtl_entry *dispatch_log_end;
+#endif /* CONFIG_PPC_STD_MMU_64 */
+ u64 dscr_default; /* per-CPU default DSCR */
+#ifdef CONFIG_PPC_STD_MMU_64
/*
* Now, starting in cacheline 2, the exception save areas
*/
/* used for most interrupts/exceptions */
- u64 exgen[10] __attribute__((aligned(0x80)));
- u64 exmc[10]; /* used for machine checks */
- u64 exslb[10]; /* used for SLB/segment table misses
+ u64 exgen[13] __attribute__((aligned(0x80)));
+ u64 exmc[13]; /* used for machine checks */
+ u64 exslb[13]; /* used for SLB/segment table misses
* on the linear mapping */
/* SLB related definitions */
u16 vmalloc_sllp;
u16 slb_cache_ptr;
- u16 slb_cache[SLB_CACHE_ENTRIES];
+ u32 slb_cache[SLB_CACHE_ENTRIES];
#endif /* CONFIG_PPC_STD_MMU_64 */
#ifdef CONFIG_PPC_BOOK3E
- pgd_t *pgd; /* Current PGD */
- pgd_t *kernel_pgd; /* Kernel PGD */
u64 exgen[8] __attribute__((aligned(0x80)));
- u64 extlb[EX_TLB_SIZE*3] __attribute__((aligned(0x80)));
+ /* Keep pgd in the same cacheline as the start of extlb */
+ pgd_t *pgd __attribute__((aligned(0x80))); /* Current PGD */
+ pgd_t *kernel_pgd; /* Kernel PGD */
+
+ /* Shared by all threads of a core -- points to tcd of first thread */
+ struct tlb_core_data *tcd_ptr;
+
+ /*
+ * We can have up to 3 levels of reentrancy in the TLB miss handler,
+ * in each of four exception levels (normal, crit, mcheck, debug).
+ */
+ u64 extlb[12][EX_TLB_SIZE / sizeof(u64)];
u64 exmc[8]; /* used for machine checks */
u64 excrit[8]; /* used for crit interrupts */
u64 exdbg[8]; /* used for debug interrupts */
@@ -115,6 +132,8 @@ struct paca_struct {
void *mc_kstack;
void *crit_kstack;
void *dbg_kstack;
+
+ struct tlb_core_data tcd;
#endif /* CONFIG_PPC_BOOK3E */
mm_context_t context;
@@ -125,13 +144,34 @@ struct paca_struct {
struct task_struct *__current; /* Pointer to current */
u64 kstack; /* Saved Kernel stack addr */
u64 stab_rr; /* stab/slb round-robin counter */
- u64 saved_r1; /* r1 save for RTAS calls */
+ u64 saved_r1; /* r1 save for RTAS calls or PM */
u64 saved_msr; /* MSR saved here by enter_rtas */
u16 trap_save; /* Used when bad stack is encountered */
u8 soft_enabled; /* irq soft-enable flag */
- u8 hard_enabled; /* set if irqs are enabled in MSR */
+ u8 irq_happened; /* irq happened while soft-disabled */
u8 io_sync; /* writel() needs spin_unlock sync */
u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
+ u8 nap_state_lost; /* NV GPR values lost in power7_idle */
+ u64 sprg_vdso; /* Saved user-visible sprg */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ u64 tm_scratch; /* TM scratch area for reclaim */
+#endif
+
+#ifdef CONFIG_PPC_POWERNV
+ /* Pointer to OPAL machine check event structure set by the
+ * early exception handler for use by high level C handler
+ */
+ struct opal_machine_check_event *opal_mc_evt;
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* Exclusive emergency stack pointer for machine check exception. */
+ void *mc_emergency_sp;
+ /*
+ * Flag to check whether we are in machine check early handler
+ * and already using emergency stack.
+ */
+ u16 in_mce;
+#endif
/* Stuff for accurate time accounting */
u64 user_time; /* accumulated usermode TB ticks */
@@ -146,13 +186,15 @@ struct paca_struct {
struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */
#ifdef CONFIG_KVM_BOOK3S_HANDLER
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
/* We use this to store guest state in */
struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
#endif
+ struct kvmppc_host_state kvm_hstate;
+#endif
};
extern struct paca_struct *paca;
-extern __initdata struct paca_struct boot_paca;
extern void initialise_paca(struct paca_struct *new_paca, int cpu);
extern void setup_paca(struct paca_struct *new_paca);
extern void allocate_pacas(void);
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 53b64be40eb..32e4e212b9c 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -36,6 +36,18 @@
#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_HUGETLB_PAGE
+extern unsigned int HPAGE_SHIFT;
+#else
+#define HPAGE_SHIFT PAGE_SHIFT
+#endif
+#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+#define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1)
+#endif
+
/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
#define __HAVE_ARCH_GATE_AREA 1
@@ -66,7 +78,7 @@
*
* Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
*
- * There are two was to determine a physical address from a virtual one:
+ * There are two ways to determine a physical address from a virtual one:
* va = pa + PAGE_OFFSET - MEMORY_START
* va = pa + KERNELBASE - PHYSICAL_START
*
@@ -80,28 +92,42 @@
#define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET)
#define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
-#if defined(CONFIG_RELOCATABLE)
+#if defined(CONFIG_NONSTATIC_KERNEL)
#ifndef __ASSEMBLY__
extern phys_addr_t memstart_addr;
extern phys_addr_t kernstart_addr;
+
+#ifdef CONFIG_RELOCATABLE_PPC32
+extern long long virt_phys_offset;
#endif
+
+#endif /* __ASSEMBLY__ */
#define PHYSICAL_START kernstart_addr
-#else
+
+#else /* !CONFIG_NONSTATIC_KERNEL */
#define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START)
#endif
+/* See Description below for VIRT_PHYS_OFFSET */
+#ifdef CONFIG_RELOCATABLE_PPC32
+#define VIRT_PHYS_OFFSET virt_phys_offset
+#else
+#define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
+#endif
+
+
#ifdef CONFIG_PPC64
#define MEMORY_START 0UL
-#elif defined(CONFIG_RELOCATABLE)
+#elif defined(CONFIG_NONSTATIC_KERNEL)
#define MEMORY_START memstart_addr
#else
#define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
#endif
#ifdef CONFIG_FLATMEM
-#define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT)
-#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < (ARCH_PFN_OFFSET + max_mapnr))
+#define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT))
+#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr)
#endif
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
@@ -113,16 +139,91 @@ extern phys_addr_t kernstart_addr;
* determine MEMORY_START until then. However we can determine PHYSICAL_START
* from information at hand (program counter, TLB lookup).
*
+ * On BookE with RELOCATABLE (RELOCATABLE_PPC32)
+ *
+ * With RELOCATABLE_PPC32, we support loading the kernel at any physical
+ * address without any restriction on the page alignment.
+ *
+ * We find the runtime address of _stext and relocate ourselves based on
+ * the following calculation:
+ *
+ * virtual_base = ALIGN_DOWN(KERNELBASE,256M) +
+ * MODULO(_stext.run,256M)
+ * and create the following mapping:
+ *
+ * ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M)
+ *
+ * When we process relocations, we cannot depend on the
+ * existing equation for the __va()/__pa() translations:
+ *
+ * __va(x) = (x) - PHYSICAL_START + KERNELBASE
+ *
+ * Where:
+ * PHYSICAL_START = kernstart_addr = Physical address of _stext
+ * KERNELBASE = Compiled virtual address of _stext.
+ *
+ * This formula holds true iff, kernel load address is TLB page aligned.
+ *
+ * In our case, we need to also account for the shift in the kernel Virtual
+ * address.
+ *
+ * E.g.,
+ *
+ * Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET).
+ * In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M
+ *
+ * Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000
+ * = 0xbc100000 , which is wrong.
+ *
+ * Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000
+ * according to our mapping.
+ *
+ * Hence we use the following formula to get the translations right:
+ *
+ * __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ]
+ *
+ * Where :
+ * PHYSICAL_START = dynamic load address.(kernstart_addr variable)
+ * Effective KERNELBASE = virtual_base =
+ * = ALIGN_DOWN(KERNELBASE,256M) +
+ * MODULO(PHYSICAL_START,256M)
+ *
+ * To make the cost of __va() / __pa() more light weight, we introduce
+ * a new variable virt_phys_offset, which will hold :
+ *
+ * virt_phys_offset = Effective KERNELBASE - PHYSICAL_START
+ * = ALIGN_DOWN(KERNELBASE,256M) -
+ * ALIGN_DOWN(PHYSICALSTART,256M)
+ *
+ * Hence :
+ *
+ * __va(x) = x - PHYSICAL_START + Effective KERNELBASE
+ * = x + virt_phys_offset
+ *
+ * and
+ * __pa(x) = x + PHYSICAL_START - Effective KERNELBASE
+ * = x - virt_phys_offset
+ *
* On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use
* the other definitions for __va & __pa.
*/
#ifdef CONFIG_BOOKE
-#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) - PHYSICAL_START + KERNELBASE))
-#define __pa(x) ((unsigned long)(x) + PHYSICAL_START - KERNELBASE)
+#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
+#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
#else
+#ifdef CONFIG_PPC64
+/*
+ * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
+ * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
+ */
+#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET))
+#define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL)
+
+#else /* 32-bit, non book E */
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
#endif
+#endif
/*
* Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
@@ -158,6 +259,26 @@ extern phys_addr_t kernstart_addr;
#define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
#endif
+#ifndef CONFIG_PPC_BOOK3S_64
+/*
+ * Use the top bit of the higher-level page table entries to indicate whether
+ * the entries we point to contain hugepages. This works because we know that
+ * the page tables live in kernel space. If we ever decide to support having
+ * page tables at arbitrary addresses, this breaks and will have to change.
+ */
+#ifdef CONFIG_PPC64
+#define PD_HUGE 0x8000000000000000
+#else
+#define PD_HUGE 0x80000000
+#endif
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+/*
+ * Some number of bits at the level of the page table that points to
+ * a hugepte are used to encode the size. This masks those bits.
+ */
+#define HUGEPD_SHIFT_MASK 0x3f
+
#ifndef __ASSEMBLY__
#undef STRICT_MM_TYPECHECKS
@@ -243,17 +364,29 @@ typedef unsigned long pgprot_t;
#endif
typedef struct { signed long pd; } hugepd_t;
-#define HUGEPD_SHIFT_MASK 0x3f
#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_PPC_BOOK3S_64
+static inline int hugepd_ok(hugepd_t hpd)
+{
+ /*
+ * hugepd pointer, bottom two bits == 00 and next 4 bits
+ * indicate size of table
+ */
+ return (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
+}
+#else
static inline int hugepd_ok(hugepd_t hpd)
{
return (hpd.pd > 0);
}
+#endif
#define is_hugepd(pdep) (hugepd_ok(*((hugepd_t *)(pdep))))
+int pgd_huge(pgd_t pgd);
#else /* CONFIG_HUGETLB_PAGE */
#define is_hugepd(pdep) 0
+#define pgd_huge(pgd) 0
#endif /* CONFIG_HUGETLB_PAGE */
struct page;
@@ -261,6 +394,7 @@ extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
extern void copy_user_page(void *to, void *from, unsigned long vaddr,
struct page *p);
extern int page_is_ram(unsigned long pfn);
+extern int devmem_is_allowed(unsigned long pfn);
#ifdef CONFIG_PPC_SMLPAR
void arch_free_page(struct page *page, int order);
@@ -269,7 +403,11 @@ void arch_free_page(struct page *page, int order);
struct vm_area_struct;
+#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC64)
+typedef pte_t *pgtable_t;
+#else
typedef struct page *pgtable_t;
+#endif
#include <asm-generic/memory_model.h>
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index 932f88dcf6f..88693cef4f3 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -59,39 +59,11 @@ static __inline__ void clear_page(void *addr)
: "ctr", "memory");
}
-extern void copy_4K_page(void *to, void *from);
-
-#ifdef CONFIG_PPC_64K_PAGES
-static inline void copy_page(void *to, void *from)
-{
- unsigned int i;
- for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) {
- copy_4K_page(to, from);
- to += 4096;
- from += 4096;
- }
-}
-#else /* CONFIG_PPC_64K_PAGES */
-static inline void copy_page(void *to, void *from)
-{
- copy_4K_page(to, from);
-}
-#endif /* CONFIG_PPC_64K_PAGES */
+extern void copy_page(void *to, void *from);
/* Log 2 of page table size */
extern u64 ppc64_pft_size;
-/* Large pages size */
-#ifdef CONFIG_HUGETLB_PAGE
-extern unsigned int HPAGE_SHIFT;
-#else
-#define HPAGE_SHIFT PAGE_SHIFT
-#endif
-#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
-#define HPAGE_MASK (~(HPAGE_SIZE - 1))
-#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-#define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1)
-
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_PPC_MM_SLICES
@@ -106,11 +78,19 @@ extern unsigned int HPAGE_SHIFT;
#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
+/*
+ * 1 bit per slice and we have one slice per 1TB
+ * Right now we support only 64TB.
+ * IF we change this we will have to change the type
+ * of high_slices
+ */
+#define SLICE_MASK_SIZE 8
+
#ifndef __ASSEMBLY__
struct slice_mask {
u16 low_slices;
- u16 high_slices;
+ u64 high_slices;
};
struct mm_struct;
@@ -119,8 +99,7 @@ extern unsigned long slice_get_unmapped_area(unsigned long addr,
unsigned long len,
unsigned long flags,
unsigned int psize,
- int topdown,
- int use_cache);
+ int topdown);
extern unsigned int get_slice_psize(struct mm_struct *mm,
unsigned long addr);
@@ -130,7 +109,7 @@ extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long len, unsigned int psize);
-#define slice_mm_new_context(mm) ((mm)->context.id == 0)
+#define slice_mm_new_context(mm) ((mm)->context.id == MMU_NO_CONTEXT)
#endif /* __ASSEMBLY__ */
#else
@@ -158,7 +137,9 @@ do { \
#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_PPC_MM_SLICES
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#endif
#endif /* !CONFIG_HUGETLB_PAGE */
@@ -169,7 +150,7 @@ do { \
/*
* This is the default if a program doesn't have a PT_GNU_STACK
* program header entry. The PPC64 ELF ABI has a non executable stack
- * stack by default, so in the absense of a PT_GNU_STACK program header
+ * stack by default, so in the absence of a PT_GNU_STACK program header
* we turn execute permission off.
*/
#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
diff --git a/arch/powerpc/include/asm/param.h b/arch/powerpc/include/asm/param.h
deleted file mode 100644
index 965d4542797..00000000000
--- a/arch/powerpc/include/asm/param.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/param.h>
diff --git a/arch/powerpc/include/asm/parport.h b/arch/powerpc/include/asm/parport.h
index 1ca1102b4a2..a452968b29e 100644
--- a/arch/powerpc/include/asm/parport.h
+++ b/arch/powerpc/include/asm/parport.h
@@ -12,7 +12,7 @@
#include <asm/prom.h>
-static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
+static int parport_pc_find_nonpci_ports (int autoirq, int autodma)
{
struct device_node *np;
const u32 *prop;
@@ -21,9 +21,7 @@ static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
int count = 0;
int virq;
- for (np = NULL; (np = of_find_compatible_node(np,
- "parallel",
- "pnpPNP,400")) != NULL;) {
+ for_each_compatible_node(np, "parallel", "pnpPNP,400") {
prop = of_get_property(np, "reg", &propsize);
if (!prop || propsize > 6*sizeof(u32))
continue;
diff --git a/arch/powerpc/include/asm/pasemi_dma.h b/arch/powerpc/include/asm/pasemi_dma.h
index 19fd7933e2d..eafa5a5f56d 100644
--- a/arch/powerpc/include/asm/pasemi_dma.h
+++ b/arch/powerpc/include/asm/pasemi_dma.h
@@ -522,7 +522,7 @@ extern void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size,
extern void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size,
dma_addr_t *handle);
-/* Routines to allocate flags (events) for channel syncronization */
+/* Routines to allocate flags (events) for channel synchronization */
extern int pasemi_dma_alloc_flag(void);
extern void pasemi_dma_free_flag(int flag);
extern void pasemi_dma_set_flag(int flag);
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 51e9e6f90d1..4ca90a39d6d 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -10,58 +10,10 @@
#include <linux/pci.h>
#include <linux/list.h>
#include <linux/ioport.h>
+#include <asm-generic/pci-bridge.h>
struct device_node;
-enum {
- /* Force re-assigning all resources (ignore firmware
- * setup completely)
- */
- PPC_PCI_REASSIGN_ALL_RSRC = 0x00000001,
-
- /* Re-assign all bus numbers */
- PPC_PCI_REASSIGN_ALL_BUS = 0x00000002,
-
- /* Do not try to assign, just use existing setup */
- PPC_PCI_PROBE_ONLY = 0x00000004,
-
- /* Don't bother with ISA alignment unless the bridge has
- * ISA forwarding enabled
- */
- PPC_PCI_CAN_SKIP_ISA_ALIGN = 0x00000008,
-
- /* Enable domain numbers in /proc */
- PPC_PCI_ENABLE_PROC_DOMAINS = 0x00000010,
- /* ... except for domain 0 */
- PPC_PCI_COMPAT_DOMAIN_0 = 0x00000020,
-};
-#ifdef CONFIG_PCI
-extern unsigned int ppc_pci_flags;
-
-static inline void ppc_pci_set_flags(int flags)
-{
- ppc_pci_flags = flags;
-}
-
-static inline void ppc_pci_add_flags(int flags)
-{
- ppc_pci_flags |= flags;
-}
-
-static inline int ppc_pci_has_flag(int flag)
-{
- return (ppc_pci_flags & flag);
-}
-#else
-static inline void ppc_pci_set_flags(int flags) { }
-static inline void ppc_pci_add_flags(int flags) { }
-static inline int ppc_pci_has_flag(int flag)
-{
- return 0;
-}
-#endif
-
-
/*
* Structure of a PCI controller (host bridge)
*/
@@ -78,6 +30,7 @@ struct pci_controller {
int first_busno;
int last_busno;
int self_busno;
+ struct resource busn;
void __iomem *io_base_virt;
#ifdef CONFIG_PPC64
@@ -86,11 +39,6 @@ struct pci_controller {
resource_size_t io_base_phys;
resource_size_t pci_io_size;
- /* Some machines (PReP) have a non 1:1 mapping of
- * the PCI memory space in the CPU bus space
- */
- resource_size_t pci_mem_offset;
-
/* Some machines have a special region to forward the ISA
* "memory" cycles such as VGA memory regions. Left to 0
* if unsupported
@@ -106,7 +54,7 @@ struct pci_controller {
* Used for variants of PCI indirect handling and possible quirks:
* SET_CFG_TYPE - used on 4xx or any PHB that does explicit type0/1
* EXT_REG - provides access to PCI-e extended registers
- * SURPRESS_PRIMARY_BUS - we surpress the setting of PCI_PRIMARY_BUS
+ * SURPRESS_PRIMARY_BUS - we suppress the setting of PCI_PRIMARY_BUS
* on Freescale PCI-e controllers since they used the PCI_PRIMARY_BUS
* to determine which bus number to match on when generating type0
* config cycles
@@ -117,6 +65,8 @@ struct pci_controller {
* BIG_ENDIAN - cfg_addr is a big endian register
* BROKEN_MRM - the 440EPx/GRx chips have an errata that causes hangs on
* the PLB4. Effectively disable MRM commands by setting this.
+ * FSL_CFG_REG_LINK - Freescale controller version in which the PCIe
+ * link status is in a RC PCIe cfg register (vs being a SoC register)
*/
#define PPC_INDIRECT_TYPE_SET_CFG_TYPE 0x00000001
#define PPC_INDIRECT_TYPE_EXT_REG 0x00000002
@@ -124,12 +74,14 @@ struct pci_controller {
#define PPC_INDIRECT_TYPE_NO_PCIE_LINK 0x00000008
#define PPC_INDIRECT_TYPE_BIG_ENDIAN 0x00000010
#define PPC_INDIRECT_TYPE_BROKEN_MRM 0x00000020
+#define PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK 0x00000040
u32 indirect_type;
/* Currently, we limit ourselves to 1 IO range and 3 mem
* ranges since the common pci_bus structure can't handle more
*/
struct resource io_resource;
struct resource mem_resources[3];
+ resource_size_t mem_offset[3];
int global_number; /* PCI domain number */
resource_size_t dma_window_base_cur;
@@ -137,9 +89,9 @@ struct pci_controller {
#ifdef CONFIG_PPC64
unsigned long buid;
+#endif /* CONFIG_PPC64 */
void *private_data;
-#endif /* CONFIG_PPC64 */
};
/* These are used for config access before all the PCI probing
@@ -164,13 +116,23 @@ extern void setup_indirect_pci(struct pci_controller* hose,
resource_size_t cfg_addr,
resource_size_t cfg_data, u32 flags);
-#ifndef CONFIG_PPC64
+extern int indirect_read_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 *val);
+
+extern int indirect_write_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 val);
static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
{
return bus->sysdata;
}
+#ifndef CONFIG_PPC64
+
+extern int pci_device_from_OF_node(struct device_node *node,
+ u8 *bus, u8 *devfn);
+extern void pci_create_OF_bus_map(void);
+
static inline int isa_vaddr_is_ioport(void __iomem *address)
{
/* No specific ISA handling on ppc32 at this stage, it
@@ -197,41 +159,24 @@ struct pci_dn {
int pci_ext_config_space; /* for pci devices */
-#ifdef CONFIG_EEH
+ bool force_32bit_msi;
+
struct pci_dev *pcidev; /* back-pointer to the pci device */
- int class_code; /* pci device class */
- int eeh_mode; /* See eeh.h for possible EEH_MODEs */
- int eeh_config_addr;
- int eeh_pe_config_addr; /* new-style partition endpoint address */
- int eeh_check_count; /* # times driver ignored error */
- int eeh_freeze_count; /* # times this device froze up. */
- int eeh_false_positives; /* # times this device reported #ff's */
- u32 config_space[16]; /* saved PCI config space */
+#ifdef CONFIG_EEH
+ struct eeh_dev *edev; /* eeh device */
+#endif
+#define IODA_INVALID_PE (-1)
+#ifdef CONFIG_PPC_POWERNV
+ int pe_number;
#endif
};
/* Get the pointer to a device_node's pci_dn */
#define PCI_DN(dn) ((struct pci_dn *) (dn)->data)
-extern struct device_node *fetch_dev_dn(struct pci_dev *dev);
-extern void * update_dn_pci_info(struct device_node *dn, void *data);
-
-/* Get a device_node from a pci_dev. This code must be fast except
- * in the case where the sysdata is incorrect and needs to be fixed
- * up (this will only happen once).
- * In this case the sysdata will have been inherited from a PCI host
- * bridge or a PCI-PCI bridge further up the tree, so it will point
- * to a valid struct pci_dn, just not the one we want.
- */
-static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev)
-{
- struct device_node *dn = dev->sysdata;
- struct pci_dn *pdn = dn->data;
+extern struct pci_dn *pci_get_pdn(struct pci_dev *pdev);
- if (pdn && pdn->devfn == dev->devfn && pdn->busno == dev->bus->number)
- return dn; /* fast path. sysdata is good */
- return fetch_dev_dn(dev);
-}
+extern void * update_dn_pci_info(struct device_node *dn, void *data);
static inline int pci_device_from_OF_node(struct device_node *np,
u8 *bus, u8 *devfn)
@@ -243,13 +188,22 @@ static inline int pci_device_from_OF_node(struct device_node *np,
return 0;
}
-static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
+#if defined(CONFIG_EEH)
+static inline struct eeh_dev *of_node_to_eeh_dev(struct device_node *dn)
{
- if (bus->self)
- return pci_device_to_OF_node(bus->self);
- else
- return bus->sysdata; /* Must be root bus (PHB) */
+ /*
+ * For those OF nodes whose parent isn't PCI bridge, they
+ * don't have PCI_DN actually. So we have to skip them for
+ * any EEH operations.
+ */
+ if (!dn || !PCI_DN(dn))
+ return NULL;
+
+ return PCI_DN(dn)->edev;
}
+#else
+#define of_node_to_eeh_dev(x) (NULL)
+#endif
/** Find the bus corresponding to the indicated device node */
extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn);
@@ -260,14 +214,6 @@ extern void pcibios_remove_pci_devices(struct pci_bus *bus);
/** Discover new pci devices under this bus, and add them */
extern void pcibios_add_pci_devices(struct pci_bus *bus);
-static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
-{
- struct device_node *busdn = bus->sysdata;
-
- BUG_ON(busdn == NULL);
- return PCI_DN(busdn)->phb;
-}
-
extern void isa_bridge_find_early(struct pci_controller *hose);
@@ -300,7 +246,6 @@ extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
/* Allocate & free a PCI host bridge structure */
extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev);
extern void pcibios_free_controller(struct pci_controller *phb);
-extern void pcibios_setup_phb_resources(struct pci_controller *hose);
#ifdef CONFIG_PCI
extern int pcibios_vaddr_is_ioport(void __iomem *address);
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index a20a9ad2258..1b0739bc14b 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -44,17 +44,7 @@ struct pci_dev;
* bus numbers (don't do that on ppc64 yet !)
*/
#define pcibios_assign_all_busses() \
- (ppc_pci_has_flag(PPC_PCI_REASSIGN_ALL_BUS))
-
-static inline void pcibios_set_master(struct pci_dev *dev)
-{
- /* No special bus mastering setup handling */
-}
-
-static inline void pcibios_penalize_isa_irq(int irq, int active)
-{
- /* We don't do dynamic PCI IRQ allocation */
-}
+ (pci_has_flag(PCI_REASSIGN_ALL_BUS))
#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
@@ -118,11 +108,6 @@ extern int pci_domain_nr(struct pci_bus *bus);
/* Decide whether to display the domain number in /proc */
extern int pci_proc_domain(struct pci_bus *bus);
-/* MSI arch hooks */
-#define arch_setup_msi_irqs arch_setup_msi_irqs
-#define arch_teardown_msi_irqs arch_teardown_msi_irqs
-#define arch_msi_check_device arch_msi_check_device
-
struct vm_area_struct;
/* Map a range of PCI memory or I/O space for a device into user space */
int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
@@ -159,14 +144,6 @@ extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
#endif /* CONFIG_PPC64 */
-extern void pcibios_resource_to_bus(struct pci_dev *dev,
- struct pci_bus_region *region,
- struct resource *res);
-
-extern void pcibios_bus_to_resource(struct pci_dev *dev,
- struct resource *res,
- struct pci_bus_region *region);
-
extern void pcibios_claim_one_bus(struct pci_bus *b);
extern void pcibios_finish_adding_to_bus(struct pci_bus *bus);
@@ -179,14 +156,11 @@ extern int remove_phb_dynamic(struct pci_controller *phb);
extern struct pci_dev *of_create_pci_dev(struct device_node *node,
struct pci_bus *bus, int devfn);
-extern void of_scan_pci_bridge(struct device_node *node,
- struct pci_dev *dev);
+extern void of_scan_pci_bridge(struct pci_dev *dev);
extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
-extern int pci_read_irq_line(struct pci_dev *dev);
-
struct file;
extern pgprot_t pci_phys_mem_access_prot(struct file *file,
unsigned long pfn,
@@ -198,10 +172,11 @@ extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc,
resource_size_t *start, resource_size_t *end);
+extern resource_size_t pcibios_io_space_offset(struct pci_controller *hose);
extern void pcibios_setup_bus_devices(struct pci_bus *bus);
extern void pcibios_setup_bus_self(struct pci_bus *bus);
extern void pcibios_setup_phb_io_space(struct pci_controller *hose);
-extern void pcibios_scan_phb(struct pci_controller *hose, void *sysdata);
+extern void pcibios_scan_phb(struct pci_controller *hose);
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_PCI_H */
diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h
index 5c16b891d50..0bb23725b1e 100644
--- a/arch/powerpc/include/asm/perf_event.h
+++ b/arch/powerpc/include/asm/perf_event.h
@@ -26,8 +26,13 @@
#include <asm/ptrace.h>
#include <asm/reg.h>
+/*
+ * Overload regs->result to specify whether we should use the MSR (result
+ * is zero) or the SIAR (result is non zero).
+ */
#define perf_arch_fetch_caller_regs(regs, __ip) \
do { \
+ (regs)->result = 0; \
(regs)->nip = __ip; \
(regs)->gpr[1] = *(unsigned long *)__get_SP(); \
asm volatile("mfmsr %0" : "=r" ((regs)->msr)); \
diff --git a/arch/powerpc/include/asm/perf_event_fsl_emb.h b/arch/powerpc/include/asm/perf_event_fsl_emb.h
index 718a9fa94e6..a58165450f6 100644
--- a/arch/powerpc/include/asm/perf_event_fsl_emb.h
+++ b/arch/powerpc/include/asm/perf_event_fsl_emb.h
@@ -13,7 +13,7 @@
#include <linux/types.h>
#include <asm/hw_irq.h>
-#define MAX_HWEVENTS 4
+#define MAX_HWEVENTS 6
/* event flags */
#define FSL_EMB_EVENT_VALID 1
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 8f1df1208d2..b3e936027b2 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -11,7 +11,10 @@
#include <linux/types.h>
#include <asm/hw_irq.h>
+#include <linux/device.h>
+#include <uapi/asm/perf_event.h>
+/* Update perf_event_print_debug() if this changes */
#define MAX_HWEVENTS 8
#define MAX_EVENT_ALTERNATIVES 8
#define MAX_LIMITED_HWCOUNTERS 2
@@ -32,21 +35,33 @@ struct power_pmu {
unsigned long *valp);
int (*get_alternatives)(u64 event_id, unsigned int flags,
u64 alt[]);
+ u64 (*bhrb_filter_map)(u64 branch_sample_type);
+ void (*config_bhrb)(u64 pmu_bhrb_filter);
void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
int (*limited_pmc_event)(u64 event_id);
u32 flags;
+ const struct attribute_group **attr_groups;
int n_generic;
int *generic_events;
int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
+
+ /* BHRB entries in the PMU */
+ int bhrb_nr;
};
/*
* Values for power_pmu.flags
*/
-#define PPMU_LIMITED_PMC5_6 1 /* PMC5/6 have limited function */
-#define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */
+#define PPMU_LIMITED_PMC5_6 0x00000001 /* PMC5/6 have limited function */
+#define PPMU_ALT_SIPR 0x00000002 /* uses alternate posn for SIPR/HV */
+#define PPMU_NO_SIPR 0x00000004 /* no SIPR/HV in MMCRA at all */
+#define PPMU_NO_CONT_SAMPLING 0x00000008 /* no continuous sampling */
+#define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */
+#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */
+#define PPMU_HAS_SIER 0x00000040 /* Has SIER */
+#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */
/*
* Values for flags to get_alternatives()
@@ -60,8 +75,7 @@ extern int register_power_pmu(struct power_pmu *);
struct pt_regs;
extern unsigned long perf_misc_flags(struct pt_regs *regs);
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
-
-#define PERF_EVENT_INDEX_OFFSET 1
+extern unsigned long int read_bhrb(int n);
/*
* Only override the default definitions in include/linux/perf_event.h
@@ -108,3 +122,27 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
* If an event_id is not subject to the constraint expressed by a particular
* field, then it will have 0 in both the mask and value for that field.
*/
+
+extern ssize_t power_events_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *page);
+
+/*
+ * EVENT_VAR() is same as PMU_EVENT_VAR with a suffix.
+ *
+ * Having a suffix allows us to have aliases in sysfs - eg: the generic
+ * event 'cpu-cycles' can have two entries in sysfs: 'cpu-cycles' and
+ * 'PM_CYC' where the latter is the name by which the event is known in
+ * POWER CPU specification.
+ */
+#define EVENT_VAR(_id, _suffix) event_attr_##_id##_suffix
+#define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix).attr.attr
+
+#define EVENT_ATTR(_name, _id, _suffix) \
+ PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_##_id, \
+ power_events_sysfs_show)
+
+#define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g)
+#define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g)
+
+#define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _p)
+#define POWER_EVENT_PTR(_id) EVENT_PTR(_id, _p)
diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h
index 580cf73b96e..842846c1b71 100644
--- a/arch/powerpc/include/asm/pgalloc-32.h
+++ b/arch/powerpc/include/asm/pgalloc-32.h
@@ -37,6 +37,17 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
+{
+ pgtable_page_dtor(ptepage);
+ __free_page(ptepage);
+}
+
static inline void pgtable_free(void *table, unsigned index_size)
{
BUG_ON(index_size); /* 32-bit doesn't use this */
@@ -45,4 +56,36 @@ static inline void pgtable_free(void *table, unsigned index_size)
#define check_pgt_cache() do { } while (0)
+#ifdef CONFIG_SMP
+static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+ void *table, int shift)
+{
+ unsigned long pgf = (unsigned long)table;
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+ pgf |= shift;
+ tlb_remove_table(tlb, (void *)pgf);
+}
+
+static inline void __tlb_remove_table(void *_table)
+{
+ void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
+ unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
+
+ pgtable_free(table, shift);
+}
+#else
+static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+ void *table, int shift)
+{
+ pgtable_free(table, shift);
+}
+#endif
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+ unsigned long address)
+{
+ tlb_flush_pgtable(tlb, address);
+ pgtable_page_dtor(table);
+ pgtable_free_tlb(tlb, page_address(table), 0);
+}
#endif /* _ASM_POWERPC_PGALLOC_32_H */
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index 292725cec2e..4b0be20fcbf 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -16,6 +16,7 @@ struct vmemmap_backing {
unsigned long phys;
unsigned long virt_addr;
};
+extern struct vmemmap_backing *vmemmap_list;
/*
* Functions that deal with pagetables that could be at any level of
@@ -35,7 +36,10 @@ struct vmemmap_backing {
#define MAX_PGTABLE_INDEX_SIZE 0xf
extern struct kmem_cache *pgtable_cache[];
-#define PGT_CACHE(shift) (pgtable_cache[(shift)-1])
+#define PGT_CACHE(shift) ({ \
+ BUG_ON(!(shift)); \
+ pgtable_cache[(shift) - 1]; \
+ })
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
@@ -72,8 +76,101 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
#define pmd_pgtable(pmd) pmd_page(pmd)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+}
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+{
+ struct page *page;
+ pte_t *pte;
-#else /* CONFIG_PPC_64K_PAGES */
+ pte = pte_alloc_one_kernel(mm, address);
+ if (!pte)
+ return NULL;
+ page = virt_to_page(pte);
+ if (!pgtable_page_ctor(page)) {
+ __free_page(page);
+ return NULL;
+ }
+ return page;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
+{
+ pgtable_page_dtor(ptepage);
+ __free_page(ptepage);
+}
+
+static inline void pgtable_free(void *table, unsigned index_size)
+{
+ if (!index_size)
+ free_page((unsigned long)table);
+ else {
+ BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
+ kmem_cache_free(PGT_CACHE(index_size), table);
+ }
+}
+
+#ifdef CONFIG_SMP
+static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+ void *table, int shift)
+{
+ unsigned long pgf = (unsigned long)table;
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+ pgf |= shift;
+ tlb_remove_table(tlb, (void *)pgf);
+}
+
+static inline void __tlb_remove_table(void *_table)
+{
+ void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
+ unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
+
+ pgtable_free(table, shift);
+}
+#else /* !CONFIG_SMP */
+static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+ void *table, int shift)
+{
+ pgtable_free(table, shift);
+}
+#endif /* CONFIG_SMP */
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+ unsigned long address)
+{
+ tlb_flush_pgtable(tlb, address);
+ pgtable_page_dtor(table);
+ pgtable_free_tlb(tlb, page_address(table), 0);
+}
+
+#else /* if CONFIG_PPC_64K_PAGES */
+/*
+ * we support 16 fragments per PTE page.
+ */
+#define PTE_FRAG_NR 16
+/*
+ * We use a 2K PTE page fragment and another 2K for storing
+ * real_pte_t hash index
+ */
+#define PTE_FRAG_SIZE_SHIFT 12
+#define PTE_FRAG_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t))
+
+extern pte_t *page_table_alloc(struct mm_struct *, unsigned long, int);
+extern void page_table_free(struct mm_struct *, unsigned long *, int);
+extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
+#ifdef CONFIG_SMP
+extern void __tlb_remove_table(void *_table);
+#endif
#define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
@@ -83,55 +180,60 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
pmd_set(pmd, (unsigned long)pte);
}
-#define pmd_populate(mm, pmd, pte_page) \
- pmd_populate_kernel(mm, pmd, page_address(pte_page))
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
-#endif /* CONFIG_PPC_64K_PAGES */
-
-static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+ pgtable_t pte_page)
{
- return kmem_cache_alloc(PGT_CACHE(PMD_INDEX_SIZE),
- GFP_KERNEL|__GFP_REPEAT);
+ pmd_set(pmd, (unsigned long)pte_page);
}
-static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+static inline pgtable_t pmd_pgtable(pmd_t pmd)
{
- kmem_cache_free(PGT_CACHE(PMD_INDEX_SIZE), pmd);
+ return (pgtable_t)(pmd_val(pmd) & ~PMD_MASKED_BITS);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+ return (pte_t *)page_table_alloc(mm, address, 1);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
- struct page *page;
- pte_t *pte;
+ return (pgtable_t)page_table_alloc(mm, address, 0);
+}
- pte = pte_alloc_one_kernel(mm, address);
- if (!pte)
- return NULL;
- page = virt_to_page(pte);
- pgtable_page_ctor(page);
- return page;
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ page_table_free(mm, (unsigned long *)pte, 1);
}
-static inline void pgtable_free(void *table, unsigned index_size)
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{
- if (!index_size)
- free_page((unsigned long)table);
- else {
- BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
- kmem_cache_free(PGT_CACHE(index_size), table);
- }
+ page_table_free(mm, (unsigned long *)ptepage, 0);
+}
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+ unsigned long address)
+{
+ tlb_flush_pgtable(tlb, address);
+ pgtable_free_tlb(tlb, table, 0);
+}
+#endif /* CONFIG_PPC_64K_PAGES */
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
+ GFP_KERNEL|__GFP_REPEAT);
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+ kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
}
#define __pmd_free_tlb(tlb, pmd, addr) \
- pgtable_free_tlb(tlb, pmd, PMD_INDEX_SIZE)
+ pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX)
#ifndef CONFIG_PPC_64K_PAGES
#define __pud_free_tlb(tlb, pud, addr) \
pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index abe8532bd14..e9a9f60e596 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -3,6 +3,7 @@
#ifdef __KERNEL__
#include <linux/mm.h>
+#include <asm-generic/tlb.h>
#ifdef CONFIG_PPC_BOOK3E
extern void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address);
@@ -13,41 +14,11 @@ static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
}
#endif /* !CONFIG_PPC_BOOK3E */
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- free_page((unsigned long)pte);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
-{
- pgtable_page_dtor(ptepage);
- __free_page(ptepage);
-}
-
#ifdef CONFIG_PPC64
#include <asm/pgalloc-64.h>
#else
#include <asm/pgalloc-32.h>
#endif
-#ifdef CONFIG_SMP
-extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift);
-extern void pte_free_finish(void);
-#else /* CONFIG_SMP */
-static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
-{
- pgtable_free(table, shift);
-}
-static inline void pte_free_finish(void) { }
-#endif /* !CONFIG_SMP */
-
-static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage,
- unsigned long address)
-{
- tlb_flush_pgtable(tlb, address);
- pgtable_page_dtor(ptepage);
- pgtable_free_tlb(tlb, page_address(ptepage), 0);
-}
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-4k.h b/arch/powerpc/include/asm/pgtable-ppc64-4k.h
index 6eefdcffa35..12798c9d4b4 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64-4k.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64-4k.h
@@ -7,7 +7,7 @@
*/
#define PTE_INDEX_SIZE 9
#define PMD_INDEX_SIZE 7
-#define PUD_INDEX_SIZE 7
+#define PUD_INDEX_SIZE 9
#define PGD_INDEX_SIZE 9
#ifndef __ASSEMBLY__
@@ -19,7 +19,7 @@
#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
-#define PTRS_PER_PUD (1 << PMD_INDEX_SIZE)
+#define PTRS_PER_PUD (1 << PUD_INDEX_SIZE)
#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
/* PMD_SHIFT determines what a second-level page table entry can map */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-64k.h b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
index 90533ddcd70..a56b82fb060 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64-64k.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
@@ -4,10 +4,10 @@
#include <asm-generic/pgtable-nopud.h>
-#define PTE_INDEX_SIZE 12
-#define PMD_INDEX_SIZE 12
+#define PTE_INDEX_SIZE 8
+#define PMD_INDEX_SIZE 10
#define PUD_INDEX_SIZE 0
-#define PGD_INDEX_SIZE 4
+#define PGD_INDEX_SIZE 12
#ifndef __ASSEMBLY__
#define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE)
@@ -33,7 +33,8 @@
#define PGDIR_MASK (~(PGDIR_SIZE-1))
/* Bits to mask out from a PMD to get to the PTE page */
-#define PMD_MASKED_BITS 0x1ff
+/* PMDs point to PTE table fragments which are 4K aligned. */
+#define PMD_MASKED_BITS 0xfff
/* Bits to mask out from a PGD/PUD to get to the PMD page */
#define PUD_MASKED_BITS 0x1ff
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 2b09cd522d3..eb9261024f5 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -10,6 +10,7 @@
#else
#include <asm/pgtable-ppc64-4k.h>
#endif
+#include <asm/barrier.h>
#define FIRST_USER_ADDRESS 0
@@ -20,18 +21,11 @@
PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
-
-/* Some sanity checking */
-#if TASK_SIZE_USER64 > PGTABLE_RANGE
-#error TASK_SIZE_USER64 exceeds pagetable range
-#endif
-
-#ifdef CONFIG_PPC_STD_MMU_64
-#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
-#error TASK_SIZE_USER64 exceeds user VSID range
-#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define PMD_CACHE_INDEX (PMD_INDEX_SIZE + 1)
+#else
+#define PMD_CACHE_INDEX PMD_INDEX_SIZE
#endif
-
/*
* Define the address range of the kernel non-linear virtual area
*/
@@ -41,7 +35,7 @@
#else
#define KERN_VIRT_START ASM_CONST(0xD000000000000000)
#endif
-#define KERN_VIRT_SIZE PGTABLE_RANGE
+#define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000)
/*
* The vmalloc space starts at the beginning of that region, and
@@ -117,9 +111,6 @@
#ifndef __ASSEMBLY__
-#include <linux/stddef.h>
-#include <asm/tlbflush.h>
-
/*
* This is the default implementation of various PTE accessors, it's
* used in all cases except Book3S with 64K pages where we have a
@@ -164,7 +155,7 @@
#define pmd_present(pmd) (pmd_val(pmd) != 0)
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS)
-#define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd))
+extern struct page *pmd_page(pmd_t pmd);
#define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval))
#define pud_none(pud) (!pud_val(pud))
@@ -181,8 +172,7 @@
* Find an entry in a page-table-directory. We combine the address region
* (the high order N bits) and the pgd portion of the address.
*/
-/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */
-#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff)
+#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1))
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
@@ -198,12 +188,14 @@
/* to find an entry in a kernel page-table-directory */
/* This now only contains the vmalloc pages */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
+extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long pte, int huge);
/* Atomic PTE updates */
static inline unsigned long pte_update(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep, unsigned long clr,
+ unsigned long set,
int huge)
{
#ifdef PTE_ATOMIC_UPDATES
@@ -214,14 +206,15 @@ static inline unsigned long pte_update(struct mm_struct *mm,
andi. %1,%0,%6\n\
bne- 1b \n\
andc %1,%0,%4 \n\
+ or %1,%1,%7\n\
stdcx. %1,0,%3 \n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*ptep)
- : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY)
+ : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set)
: "cc" );
#else
unsigned long old = pte_val(*ptep);
- *ptep = __pte(old & ~clr);
+ *ptep = __pte((old & ~clr) | set);
#endif
/* huge pages use the old page table lock */
if (!huge)
@@ -240,9 +233,9 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
{
unsigned long old;
- if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
+ if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
return 0;
- old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0);
+ old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
return (old & _PAGE_ACCESSED) != 0;
}
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
@@ -257,21 +250,20 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- unsigned long old;
- if ((pte_val(*ptep) & _PAGE_RW) == 0)
- return;
- old = pte_update(mm, addr, ptep, _PAGE_RW, 0);
+ if ((pte_val(*ptep) & _PAGE_RW) == 0)
+ return;
+
+ pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- unsigned long old;
-
if ((pte_val(*ptep) & _PAGE_RW) == 0)
return;
- old = pte_update(mm, addr, ptep, _PAGE_RW, 1);
+
+ pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
}
/*
@@ -294,14 +286,14 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0);
+ unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
return __pte(old);
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t * ptep)
{
- pte_update(mm, addr, ptep, ~0UL, 0);
+ pte_update(mm, addr, ptep, ~0UL, 0, 0);
}
@@ -354,42 +346,235 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
void pgtable_cache_init(void);
+#endif /* __ASSEMBLY__ */
+
+/*
+ * THP pages can't be special. So use the _PAGE_SPECIAL
+ */
+#define _PAGE_SPLITTING _PAGE_SPECIAL
+
+/*
+ * We need to differentiate between explicit huge page and THP huge
+ * page, since THP huge page also need to track real subpage details
+ */
+#define _PAGE_THP_HUGE _PAGE_4K_PFN
+
+/*
+ * set of bits not changed in pmd_modify.
+ */
+#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | \
+ _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPLITTING | \
+ _PAGE_THP_HUGE)
+#ifndef __ASSEMBLY__
/*
- * find_linux_pte returns the address of a linux pte for a given
- * effective address and directory. If not found, it returns zero.
- */static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
+ * The linux hugepage PMD now include the pmd entries followed by the address
+ * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits.
+ * [ 1 bit secondary | 3 bit hidx | 1 bit valid | 000]. We use one byte per
+ * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and
+ * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t.
+ *
+ * The last three bits are intentionally left to zero. This memory location
+ * are also used as normal page PTE pointers. So if we have any pointers
+ * left around while we collapse a hugepage, we need to make sure
+ * _PAGE_PRESENT and _PAGE_FILE bits of that are zero when we look at them
+ */
+static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index)
{
- pgd_t *pg;
- pud_t *pu;
- pmd_t *pm;
- pte_t *pt = NULL;
-
- pg = pgdir + pgd_index(ea);
- if (!pgd_none(*pg)) {
- pu = pud_offset(pg, ea);
- if (!pud_none(*pu)) {
- pm = pmd_offset(pu, ea);
- if (pmd_present(*pm))
- pt = pte_offset_kernel(pm, ea);
- }
- }
- return pt;
+ return (hpte_slot_array[index] >> 3) & 0x1;
}
-#ifdef CONFIG_HUGETLB_PAGE
-pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
- unsigned *shift);
-#else
-static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
- unsigned *shift)
+static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array,
+ int index)
{
- if (shift)
- *shift = 0;
- return find_linux_pte(pgdir, ea);
+ return hpte_slot_array[index] >> 4;
}
-#endif /* !CONFIG_HUGETLB_PAGE */
-#endif /* __ASSEMBLY__ */
+static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
+ unsigned int index, unsigned int hidx)
+{
+ hpte_slot_array[index] = hidx << 4 | 0x1 << 3;
+}
+
+struct page *realmode_pfn_to_page(unsigned long pfn);
+
+static inline char *get_hpte_slot_array(pmd_t *pmdp)
+{
+ /*
+ * The hpte hindex is stored in the pgtable whose address is in the
+ * second half of the PMD
+ *
+ * Order this load with the test for pmd_trans_huge in the caller
+ */
+ smp_rmb();
+ return *(char **)(pmdp + PTRS_PER_PMD);
+
+
+}
+
+extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
+extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
+extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
+extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd);
+extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd);
+
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+ /*
+ * leaf pte for huge page, bottom two bits != 00
+ */
+ return (pmd_val(pmd) & 0x3) && (pmd_val(pmd) & _PAGE_THP_HUGE);
+}
+
+static inline int pmd_large(pmd_t pmd)
+{
+ /*
+ * leaf pte for huge page, bottom two bits != 00
+ */
+ if (pmd_trans_huge(pmd))
+ return pmd_val(pmd) & _PAGE_PRESENT;
+ return 0;
+}
+
+static inline int pmd_trans_splitting(pmd_t pmd)
+{
+ if (pmd_trans_huge(pmd))
+ return pmd_val(pmd) & _PAGE_SPLITTING;
+ return 0;
+}
+extern int has_transparent_hugepage(void);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+static inline pte_t pmd_pte(pmd_t pmd)
+{
+ return __pte(pmd_val(pmd));
+}
+
+static inline pmd_t pte_pmd(pte_t pte)
+{
+ return __pmd(pte_val(pte));
+}
+
+static inline pte_t *pmdp_ptep(pmd_t *pmd)
+{
+ return (pte_t *)pmd;
+}
+
+#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
+#define pmd_young(pmd) pte_young(pmd_pte(pmd))
+#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
+#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
+#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
+#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
+#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+
+#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write(pmd) pte_write(pmd_pte(pmd))
+
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+ /* Do nothing, mk_pmd() does this part. */
+ return pmd;
+}
+
+static inline pmd_t pmd_mknotpresent(pmd_t pmd)
+{
+ pmd_val(pmd) &= ~_PAGE_PRESENT;
+ return pmd;
+}
+
+static inline pmd_t pmd_mksplitting(pmd_t pmd)
+{
+ pmd_val(pmd) |= _PAGE_SPLITTING;
+ return pmd;
+}
+
+#define __HAVE_ARCH_PMD_SAME
+static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+ return (((pmd_val(pmd_a) ^ pmd_val(pmd_b)) & ~_PAGE_HPTEFLAGS) == 0);
+}
+
+#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+extern int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty);
+
+extern unsigned long pmd_hugepage_update(struct mm_struct *mm,
+ unsigned long addr,
+ pmd_t *pmdp,
+ unsigned long clr,
+ unsigned long set);
+
+static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp)
+{
+ unsigned long old;
+
+ if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
+ return 0;
+ old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
+ return ((old & _PAGE_ACCESSED) != 0);
+}
+
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
+extern pmd_t pmdp_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMDP_CLEAR_FLUSH
+extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp)
+{
+
+ if ((pmd_val(*pmdp) & _PAGE_RW) == 0)
+ return;
+
+ pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0);
+}
+
+#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
+extern void pmdp_splitting_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PGTABLE_DEPOSIT
+extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pgtable);
+#define __HAVE_ARCH_PGTABLE_WITHDRAW
+extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMDP_INVALIDATE
+extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp);
+
+#define pmd_move_must_withdraw pmd_move_must_withdraw
+struct spinlock;
+static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
+ struct spinlock *old_pmd_ptl)
+{
+ /*
+ * Archs like ppc64 use pgtable to store per pmd
+ * specific information. So when we switch the pmd,
+ * we should also withdraw and deposit the pgtable
+ */
+ return true;
+}
+
+#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 89f158731ce..d98c1ecc326 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -3,20 +3,13 @@
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
+#include <linux/mmdebug.h>
#include <asm/processor.h> /* For TASK_SIZE */
#include <asm/mmu.h>
#include <asm/page.h>
struct mm_struct;
-#ifdef CONFIG_DEBUG_VM
-extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
-#else /* CONFIG_DEBUG_VM */
-static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
-{
-}
-#endif /* !CONFIG_DEBUG_VM */
-
#endif /* !__ASSEMBLY__ */
#if defined(CONFIG_PPC64)
@@ -25,18 +18,117 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
# include <asm/pgtable-ppc32.h>
#endif
+/*
+ * We save the slot number & secondary bit in the second half of the
+ * PTE page. We use the 8 bytes per each pte entry.
+ */
+#define PTE_PAGE_HIDX_OFFSET (PTRS_PER_PTE * 8)
+
#ifndef __ASSEMBLY__
+#include <asm/tlbflush.h>
+
/* Generic accessors to PTE bits */
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
-static inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; }
static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
+#ifdef CONFIG_NUMA_BALANCING
+
+static inline int pte_present(pte_t pte)
+{
+ return pte_val(pte) & (_PAGE_PRESENT | _PAGE_NUMA);
+}
+
+#define pte_present_nonuma pte_present_nonuma
+static inline int pte_present_nonuma(pte_t pte)
+{
+ return pte_val(pte) & (_PAGE_PRESENT);
+}
+
+#define pte_numa pte_numa
+static inline int pte_numa(pte_t pte)
+{
+ return (pte_val(pte) &
+ (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA;
+}
+
+#define pte_mknonnuma pte_mknonnuma
+static inline pte_t pte_mknonnuma(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_NUMA;
+ pte_val(pte) |= _PAGE_PRESENT | _PAGE_ACCESSED;
+ return pte;
+}
+
+#define pte_mknuma pte_mknuma
+static inline pte_t pte_mknuma(pte_t pte)
+{
+ /*
+ * We should not set _PAGE_NUMA on non present ptes. Also clear the
+ * present bit so that hash_page will return 1 and we collect this
+ * as numa fault.
+ */
+ if (pte_present(pte)) {
+ pte_val(pte) |= _PAGE_NUMA;
+ pte_val(pte) &= ~_PAGE_PRESENT;
+ } else
+ VM_BUG_ON(1);
+ return pte;
+}
+
+#define ptep_set_numa ptep_set_numa
+static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ if ((pte_val(*ptep) & _PAGE_PRESENT) == 0)
+ VM_BUG_ON(1);
+
+ pte_update(mm, addr, ptep, _PAGE_PRESENT, _PAGE_NUMA, 0);
+ return;
+}
+
+#define pmd_numa pmd_numa
+static inline int pmd_numa(pmd_t pmd)
+{
+ return pte_numa(pmd_pte(pmd));
+}
+
+#define pmdp_set_numa pmdp_set_numa
+static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp)
+{
+ if ((pmd_val(*pmdp) & _PAGE_PRESENT) == 0)
+ VM_BUG_ON(1);
+
+ pmd_hugepage_update(mm, addr, pmdp, _PAGE_PRESENT, _PAGE_NUMA);
+ return;
+}
+
+#define pmd_mknonnuma pmd_mknonnuma
+static inline pmd_t pmd_mknonnuma(pmd_t pmd)
+{
+ return pte_pmd(pte_mknonnuma(pmd_pte(pmd)));
+}
+
+#define pmd_mknuma pmd_mknuma
+static inline pmd_t pmd_mknuma(pmd_t pmd)
+{
+ return pte_pmd(pte_mknuma(pmd_pte(pmd)));
+}
+
+# else
+
+static inline int pte_present(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_PRESENT;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
/* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*
@@ -170,6 +262,10 @@ extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addre
#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
_PAGE_COHERENT | _PAGE_WRITETHRU))
+#define pgprot_cached_noncoherent(prot) \
+ (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
+
+#define pgprot_writecombine pgprot_noncached_wc
struct file;
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
@@ -194,9 +290,6 @@ extern void paging_init(void);
*/
#define kern_addr_valid(addr) (1)
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
- remap_pfn_range(vma, vaddr, pfn, size, prot)
-
#include <asm-generic/pgtable.h>
@@ -214,6 +307,35 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr);
+extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr);
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+#define pmd_large(pmd) 0
+#define has_transparent_hugepage() 0
+#endif
+pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
+ unsigned *shift);
+
+static inline pte_t *lookup_linux_ptep(pgd_t *pgdir, unsigned long hva,
+ unsigned long *pte_sizep)
+{
+ pte_t *ptep;
+ unsigned long ps = *pte_sizep;
+ unsigned int shift;
+
+ ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift);
+ if (!ptep)
+ return NULL;
+ if (shift)
+ *pte_sizep = 1ul << shift;
+ else
+ *pte_sizep = PAGE_SIZE;
+
+ if (ps > *pte_sizep)
+ return NULL;
+
+ return ptep;
+}
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/phyp_dump.h b/arch/powerpc/include/asm/phyp_dump.h
deleted file mode 100644
index fa74c6c3e10..00000000000
--- a/arch/powerpc/include/asm/phyp_dump.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Hypervisor-assisted dump
- *
- * Linas Vepstas, Manish Ahuja 2008
- * Copyright 2008 IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _PPC64_PHYP_DUMP_H
-#define _PPC64_PHYP_DUMP_H
-
-#ifdef CONFIG_PHYP_DUMP
-
-/* The RMR region will be saved for later dumping
- * whenever the kernel crashes. Set this to 256MB. */
-#define PHYP_DUMP_RMR_START 0x0
-#define PHYP_DUMP_RMR_END (1UL<<28)
-
-struct phyp_dump {
- /* Memory that is reserved during very early boot. */
- unsigned long init_reserve_start;
- unsigned long init_reserve_size;
- /* cmd line options during boot */
- unsigned long reserve_bootvar;
- unsigned long phyp_dump_at_boot;
- /* Check status during boot if dump supported, active & present*/
- unsigned long phyp_dump_configured;
- unsigned long phyp_dump_is_active;
- /* store cpu & hpte size */
- unsigned long cpu_state_size;
- unsigned long hpte_region_size;
- /* previous scratch area values */
- unsigned long reserved_scratch_addr;
- unsigned long reserved_scratch_size;
-};
-
-extern struct phyp_dump *phyp_dump_info;
-
-int early_init_dt_scan_phyp_dump(unsigned long node,
- const char *uname, int depth, void *data);
-
-#endif /* CONFIG_PHYP_DUMP */
-#endif /* _PPC64_PHYP_DUMP_H */
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
new file mode 100644
index 00000000000..12c32c5f533
--- /dev/null
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -0,0 +1,326 @@
+#ifndef _ASM_POWERPC_PLPAR_WRAPPERS_H
+#define _ASM_POWERPC_PLPAR_WRAPPERS_H
+
+#include <linux/string.h>
+#include <linux/irqflags.h>
+
+#include <asm/hvcall.h>
+#include <asm/paca.h>
+#include <asm/page.h>
+
+/* Get state of physical CPU from query_cpu_stopped */
+int smp_query_cpu_stopped(unsigned int pcpu);
+#define QCSS_STOPPED 0
+#define QCSS_STOPPING 1
+#define QCSS_NOT_STOPPED 2
+#define QCSS_HARDWARE_ERROR -1
+#define QCSS_HARDWARE_BUSY -2
+
+static inline long poll_pending(void)
+{
+ return plpar_hcall_norets(H_POLL_PENDING);
+}
+
+static inline u8 get_cede_latency_hint(void)
+{
+ return get_lppaca()->cede_latency_hint;
+}
+
+static inline void set_cede_latency_hint(u8 latency_hint)
+{
+ get_lppaca()->cede_latency_hint = latency_hint;
+}
+
+static inline long cede_processor(void)
+{
+ return plpar_hcall_norets(H_CEDE);
+}
+
+static inline long extended_cede_processor(unsigned long latency_hint)
+{
+ long rc;
+ u8 old_latency_hint = get_cede_latency_hint();
+
+ set_cede_latency_hint(latency_hint);
+
+ rc = cede_processor();
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* Ensure that H_CEDE returns with IRQs on */
+ if (WARN_ON(!(mfmsr() & MSR_EE)))
+ __hard_irq_enable();
+#endif
+
+ set_cede_latency_hint(old_latency_hint);
+
+ return rc;
+}
+
+static inline long vpa_call(unsigned long flags, unsigned long cpu,
+ unsigned long vpa)
+{
+ flags = flags << H_VPA_FUNC_SHIFT;
+
+ return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
+}
+
+static inline long unregister_vpa(unsigned long cpu)
+{
+ return vpa_call(H_VPA_DEREG_VPA, cpu, 0);
+}
+
+static inline long register_vpa(unsigned long cpu, unsigned long vpa)
+{
+ return vpa_call(H_VPA_REG_VPA, cpu, vpa);
+}
+
+static inline long unregister_slb_shadow(unsigned long cpu)
+{
+ return vpa_call(H_VPA_DEREG_SLB, cpu, 0);
+}
+
+static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
+{
+ return vpa_call(H_VPA_REG_SLB, cpu, vpa);
+}
+
+static inline long unregister_dtl(unsigned long cpu)
+{
+ return vpa_call(H_VPA_DEREG_DTL, cpu, 0);
+}
+
+static inline long register_dtl(unsigned long cpu, unsigned long vpa)
+{
+ return vpa_call(H_VPA_REG_DTL, cpu, vpa);
+}
+
+static inline long plpar_page_set_loaned(unsigned long vpa)
+{
+ unsigned long cmo_page_sz = cmo_get_page_size();
+ long rc = 0;
+ int i;
+
+ for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
+ rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, vpa + i, 0);
+
+ for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
+ plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE,
+ vpa + i - cmo_page_sz, 0);
+
+ return rc;
+}
+
+static inline long plpar_page_set_active(unsigned long vpa)
+{
+ unsigned long cmo_page_sz = cmo_get_page_size();
+ long rc = 0;
+ int i;
+
+ for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
+ rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, vpa + i, 0);
+
+ for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
+ plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED,
+ vpa + i - cmo_page_sz, 0);
+
+ return rc;
+}
+
+extern void vpa_init(int cpu);
+
+static inline long plpar_pte_enter(unsigned long flags,
+ unsigned long hpte_group, unsigned long hpte_v,
+ unsigned long hpte_r, unsigned long *slot)
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall(H_ENTER, retbuf, flags, hpte_group, hpte_v, hpte_r);
+
+ *slot = retbuf[0];
+
+ return rc;
+}
+
+static inline long plpar_pte_remove(unsigned long flags, unsigned long ptex,
+ unsigned long avpn, unsigned long *old_pteh_ret,
+ unsigned long *old_ptel_ret)
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall(H_REMOVE, retbuf, flags, ptex, avpn);
+
+ *old_pteh_ret = retbuf[0];
+ *old_ptel_ret = retbuf[1];
+
+ return rc;
+}
+
+/* plpar_pte_remove_raw can be called in real mode. It calls plpar_hcall_raw */
+static inline long plpar_pte_remove_raw(unsigned long flags, unsigned long ptex,
+ unsigned long avpn, unsigned long *old_pteh_ret,
+ unsigned long *old_ptel_ret)
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall_raw(H_REMOVE, retbuf, flags, ptex, avpn);
+
+ *old_pteh_ret = retbuf[0];
+ *old_ptel_ret = retbuf[1];
+
+ return rc;
+}
+
+static inline long plpar_pte_read(unsigned long flags, unsigned long ptex,
+ unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall(H_READ, retbuf, flags, ptex);
+
+ *old_pteh_ret = retbuf[0];
+ *old_ptel_ret = retbuf[1];
+
+ return rc;
+}
+
+/* plpar_pte_read_raw can be called in real mode. It calls plpar_hcall_raw */
+static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
+ unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall_raw(H_READ, retbuf, flags, ptex);
+
+ *old_pteh_ret = retbuf[0];
+ *old_ptel_ret = retbuf[1];
+
+ return rc;
+}
+
+/*
+ * plpar_pte_read_4_raw can be called in real mode.
+ * ptes must be 8*sizeof(unsigned long)
+ */
+static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
+ unsigned long *ptes)
+
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+
+ rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
+
+ memcpy(ptes, retbuf, 8*sizeof(unsigned long));
+
+ return rc;
+}
+
+static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
+ unsigned long avpn)
+{
+ return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn);
+}
+
+static inline long plpar_tce_get(unsigned long liobn, unsigned long ioba,
+ unsigned long *tce_ret)
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall(H_GET_TCE, retbuf, liobn, ioba);
+
+ *tce_ret = retbuf[0];
+
+ return rc;
+}
+
+static inline long plpar_tce_put(unsigned long liobn, unsigned long ioba,
+ unsigned long tceval)
+{
+ return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval);
+}
+
+static inline long plpar_tce_put_indirect(unsigned long liobn,
+ unsigned long ioba, unsigned long page, unsigned long count)
+{
+ return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count);
+}
+
+static inline long plpar_tce_stuff(unsigned long liobn, unsigned long ioba,
+ unsigned long tceval, unsigned long count)
+{
+ return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count);
+}
+
+/* Set various resource mode parameters */
+static inline long plpar_set_mode(unsigned long mflags, unsigned long resource,
+ unsigned long value1, unsigned long value2)
+{
+ return plpar_hcall_norets(H_SET_MODE, mflags, resource, value1, value2);
+}
+
+/*
+ * Enable relocation on exceptions on this partition
+ *
+ * Note: this call has a partition wide scope and can take a while to complete.
+ * If it returns H_LONG_BUSY_* it should be retried periodically until it
+ * returns H_SUCCESS.
+ */
+static inline long enable_reloc_on_exceptions(void)
+{
+ /* mflags = 3: Exceptions at 0xC000000000004000 */
+ return plpar_set_mode(3, 3, 0, 0);
+}
+
+/*
+ * Disable relocation on exceptions on this partition
+ *
+ * Note: this call has a partition wide scope and can take a while to complete.
+ * If it returns H_LONG_BUSY_* it should be retried periodically until it
+ * returns H_SUCCESS.
+ */
+static inline long disable_reloc_on_exceptions(void) {
+ return plpar_set_mode(0, 3, 0, 0);
+}
+
+/*
+ * Take exceptions in big endian mode on this partition
+ *
+ * Note: this call has a partition wide scope and can take a while to complete.
+ * If it returns H_LONG_BUSY_* it should be retried periodically until it
+ * returns H_SUCCESS.
+ */
+static inline long enable_big_endian_exceptions(void)
+{
+ /* mflags = 0: big endian exceptions */
+ return plpar_set_mode(0, 4, 0, 0);
+}
+
+/*
+ * Take exceptions in little endian mode on this partition
+ *
+ * Note: this call has a partition wide scope and can take a while to complete.
+ * If it returns H_LONG_BUSY_* it should be retried periodically until it
+ * returns H_SUCCESS.
+ */
+static inline long enable_little_endian_exceptions(void)
+{
+ /* mflags = 1: little endian exceptions */
+ return plpar_set_mode(1, 4, 0, 0);
+}
+
+static inline long plapr_set_ciabr(unsigned long ciabr)
+{
+ return plpar_set_mode(0, 1, ciabr, 0);
+}
+
+static inline long plapr_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0)
+{
+ return plpar_set_mode(0, 2, dawr0, dawrx0);
+}
+
+#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/include/asm/pmac_feature.h b/arch/powerpc/include/asm/pmac_feature.h
index 00eedc5a4e6..10902c9375d 100644
--- a/arch/powerpc/include/asm/pmac_feature.h
+++ b/arch/powerpc/include/asm/pmac_feature.h
@@ -53,8 +53,8 @@
/* Here is the infamous serie of OHare based machines
*/
-#define PMAC_TYPE_COMET 0x20 /* Beleived to be PowerBook 2400 */
-#define PMAC_TYPE_HOOPER 0x21 /* Beleived to be PowerBook 3400 */
+#define PMAC_TYPE_COMET 0x20 /* Believed to be PowerBook 2400 */
+#define PMAC_TYPE_HOOPER 0x21 /* Believed to be PowerBook 3400 */
#define PMAC_TYPE_KANGA 0x22 /* PowerBook 3500 (first G3) */
#define PMAC_TYPE_ALCHEMY 0x23 /* Alchemy motherboard base */
#define PMAC_TYPE_GAZELLE 0x24 /* Spartacus, some 5xxx/6xxx */
diff --git a/arch/powerpc/include/asm/poll.h b/arch/powerpc/include/asm/poll.h
deleted file mode 100644
index c98509d3149..00000000000
--- a/arch/powerpc/include/asm/poll.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/poll.h>
diff --git a/arch/powerpc/include/asm/posix_types.h b/arch/powerpc/include/asm/posix_types.h
deleted file mode 100644
index c4e396b540d..00000000000
--- a/arch/powerpc/include/asm/posix_types.h
+++ /dev/null
@@ -1,128 +0,0 @@
-#ifndef _ASM_POWERPC_POSIX_TYPES_H
-#define _ASM_POWERPC_POSIX_TYPES_H
-
-/*
- * This file is generally used by user-level software, so you need to
- * be a little careful about namespace pollution etc. Also, we cannot
- * assume GCC is being used.
- */
-
-typedef unsigned long __kernel_ino_t;
-typedef unsigned int __kernel_mode_t;
-typedef long __kernel_off_t;
-typedef int __kernel_pid_t;
-typedef unsigned int __kernel_uid_t;
-typedef unsigned int __kernel_gid_t;
-typedef long __kernel_ptrdiff_t;
-typedef long __kernel_time_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_timer_t;
-typedef int __kernel_clockid_t;
-typedef long __kernel_suseconds_t;
-typedef int __kernel_daddr_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-typedef unsigned int __kernel_uid32_t;
-typedef unsigned int __kernel_gid32_t;
-typedef unsigned int __kernel_old_uid_t;
-typedef unsigned int __kernel_old_gid_t;
-
-#ifdef __powerpc64__
-typedef unsigned long __kernel_nlink_t;
-typedef int __kernel_ipc_pid_t;
-typedef unsigned long __kernel_size_t;
-typedef long __kernel_ssize_t;
-typedef unsigned long __kernel_old_dev_t;
-#else
-typedef unsigned short __kernel_nlink_t;
-typedef short __kernel_ipc_pid_t;
-typedef unsigned int __kernel_size_t;
-typedef int __kernel_ssize_t;
-typedef unsigned int __kernel_old_dev_t;
-#endif
-
-#ifdef __powerpc64__
-typedef long long __kernel_loff_t;
-#else
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
-#endif
-#endif
-
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-
-#ifndef __GNUC__
-
-#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
-#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
-#define __FD_ISSET(d, set) (((set)->fds_bits[__FDELT(d)] & __FDMASK(d)) != 0)
-#define __FD_ZERO(set) \
- ((void) memset ((void *) (set), 0, sizeof (__kernel_fd_set)))
-
-#else /* __GNUC__ */
-
-#if defined(__KERNEL__)
-/* With GNU C, use inline functions instead so args are evaluated only once: */
-
-#undef __FD_SET
-static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
-}
-
-#undef __FD_CLR
-static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
-}
-
-#undef __FD_ISSET
-static __inline__ int __FD_ISSET(unsigned long fd, __kernel_fd_set *p)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
-}
-
-/*
- * This will unroll the loop for the normal constant case (8 ints,
- * for a 256-bit fd_set)
- */
-#undef __FD_ZERO
-static __inline__ void __FD_ZERO(__kernel_fd_set *p)
-{
- unsigned long *tmp = (unsigned long *)p->fds_bits;
- int i;
-
- if (__builtin_constant_p(__FDSET_LONGS)) {
- switch (__FDSET_LONGS) {
- case 16:
- tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
- tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
-
- case 8:
- tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
-
- case 4:
- tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
- return;
- }
- }
- i = __FDSET_LONGS;
- while (i) {
- i--;
- *tmp = 0;
- tmp++;
- }
-}
-
-#endif /* defined(__KERNEL__) */
-#endif /* __GNUC__ */
-#endif /* _ASM_POWERPC_POSIX_TYPES_H */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 43adc8b819e..3132bb9365f 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2009 Freescale Semicondutor, Inc.
+ * Copyright 2009 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -15,11 +15,127 @@
#include <linux/stringify.h>
#include <asm/asm-compat.h>
+#define __REG_R0 0
+#define __REG_R1 1
+#define __REG_R2 2
+#define __REG_R3 3
+#define __REG_R4 4
+#define __REG_R5 5
+#define __REG_R6 6
+#define __REG_R7 7
+#define __REG_R8 8
+#define __REG_R9 9
+#define __REG_R10 10
+#define __REG_R11 11
+#define __REG_R12 12
+#define __REG_R13 13
+#define __REG_R14 14
+#define __REG_R15 15
+#define __REG_R16 16
+#define __REG_R17 17
+#define __REG_R18 18
+#define __REG_R19 19
+#define __REG_R20 20
+#define __REG_R21 21
+#define __REG_R22 22
+#define __REG_R23 23
+#define __REG_R24 24
+#define __REG_R25 25
+#define __REG_R26 26
+#define __REG_R27 27
+#define __REG_R28 28
+#define __REG_R29 29
+#define __REG_R30 30
+#define __REG_R31 31
+
+#define __REGA0_0 0
+#define __REGA0_R1 1
+#define __REGA0_R2 2
+#define __REGA0_R3 3
+#define __REGA0_R4 4
+#define __REGA0_R5 5
+#define __REGA0_R6 6
+#define __REGA0_R7 7
+#define __REGA0_R8 8
+#define __REGA0_R9 9
+#define __REGA0_R10 10
+#define __REGA0_R11 11
+#define __REGA0_R12 12
+#define __REGA0_R13 13
+#define __REGA0_R14 14
+#define __REGA0_R15 15
+#define __REGA0_R16 16
+#define __REGA0_R17 17
+#define __REGA0_R18 18
+#define __REGA0_R19 19
+#define __REGA0_R20 20
+#define __REGA0_R21 21
+#define __REGA0_R22 22
+#define __REGA0_R23 23
+#define __REGA0_R24 24
+#define __REGA0_R25 25
+#define __REGA0_R26 26
+#define __REGA0_R27 27
+#define __REGA0_R28 28
+#define __REGA0_R29 29
+#define __REGA0_R30 30
+#define __REGA0_R31 31
+
+/* opcode and xopcode for instructions */
+#define OP_TRAP 3
+#define OP_TRAP_64 2
+
+#define OP_31_XOP_TRAP 4
+#define OP_31_XOP_LWZX 23
+#define OP_31_XOP_DCBST 54
+#define OP_31_XOP_LWZUX 55
+#define OP_31_XOP_TRAP_64 68
+#define OP_31_XOP_DCBF 86
+#define OP_31_XOP_LBZX 87
+#define OP_31_XOP_STWX 151
+#define OP_31_XOP_STBX 215
+#define OP_31_XOP_LBZUX 119
+#define OP_31_XOP_STBUX 247
+#define OP_31_XOP_LHZX 279
+#define OP_31_XOP_LHZUX 311
+#define OP_31_XOP_MFSPR 339
+#define OP_31_XOP_LHAX 343
+#define OP_31_XOP_LHAUX 375
+#define OP_31_XOP_STHX 407
+#define OP_31_XOP_STHUX 439
+#define OP_31_XOP_MTSPR 467
+#define OP_31_XOP_DCBI 470
+#define OP_31_XOP_LWBRX 534
+#define OP_31_XOP_TLBSYNC 566
+#define OP_31_XOP_STWBRX 662
+#define OP_31_XOP_LHBRX 790
+#define OP_31_XOP_STHBRX 918
+
+#define OP_LWZ 32
+#define OP_LD 58
+#define OP_LWZU 33
+#define OP_LBZ 34
+#define OP_LBZU 35
+#define OP_STW 36
+#define OP_STWU 37
+#define OP_STD 62
+#define OP_STB 38
+#define OP_STBU 39
+#define OP_LHZ 40
+#define OP_LHZU 41
+#define OP_LHA 42
+#define OP_LHAU 43
+#define OP_STH 44
+#define OP_STHU 45
+
/* sorted alphabetically */
+#define PPC_INST_BHRBE 0x7c00025c
+#define PPC_INST_CLRBHRB 0x7c00035c
#define PPC_INST_DCBA 0x7c0005ec
#define PPC_INST_DCBA_MASK 0xfc0007fe
#define PPC_INST_DCBAL 0x7c2005ec
#define PPC_INST_DCBZL 0x7c2007ec
+#define PPC_INST_ICBT 0x7c00002c
#define PPC_INST_ISEL 0x7c00001e
#define PPC_INST_ISEL_MASK 0xfc00003e
#define PPC_INST_LDARX 0x7c0000a8
@@ -27,18 +143,32 @@
#define PPC_INST_LSWX 0x7c00042a
#define PPC_INST_LWARX 0x7c000028
#define PPC_INST_LWSYNC 0x7c2004ac
+#define PPC_INST_SYNC 0x7c0004ac
+#define PPC_INST_SYNC_MASK 0xfc0007fe
#define PPC_INST_LXVD2X 0x7c000698
#define PPC_INST_MCRXR 0x7c000400
#define PPC_INST_MCRXR_MASK 0xfc0007fe
#define PPC_INST_MFSPR_PVR 0x7c1f42a6
#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff
#define PPC_INST_MSGSND 0x7c00019c
+#define PPC_INST_MSGSNDP 0x7c00011c
#define PPC_INST_NOP 0x60000000
#define PPC_INST_POPCNTB 0x7c0000f4
#define PPC_INST_POPCNTB_MASK 0xfc0007fe
+#define PPC_INST_POPCNTD 0x7c0003f4
+#define PPC_INST_POPCNTW 0x7c0002f4
#define PPC_INST_RFCI 0x4c000066
#define PPC_INST_RFDI 0x4c00004e
#define PPC_INST_RFMCI 0x4c00004c
+#define PPC_INST_MFSPR_DSCR 0x7c1102a6
+#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff
+#define PPC_INST_MTSPR_DSCR 0x7c1103a6
+#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff
+#define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6
+#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff
+#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
+#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff
+#define PPC_INST_SLBFEE 0x7c0007a7
#define PPC_INST_STRING 0x7c00042a
#define PPC_INST_STRING_MASK 0xfc0007fe
@@ -53,18 +183,88 @@
#define PPC_INST_TLBIVAX 0x7c000624
#define PPC_INST_TLBSRX_DOT 0x7c0006a5
#define PPC_INST_XXLOR 0xf0000510
+#define PPC_INST_XXSWAPD 0xf0000250
+#define PPC_INST_XVCPSGNDP 0xf0000780
+#define PPC_INST_TRECHKPT 0x7c0007dd
+#define PPC_INST_TRECLAIM 0x7c00075d
+#define PPC_INST_TABORT 0x7c00071d
+
+#define PPC_INST_NAP 0x4c000364
+#define PPC_INST_SLEEP 0x4c0003a4
+
+/* A2 specific instructions */
+#define PPC_INST_ERATWE 0x7c0001a6
+#define PPC_INST_ERATRE 0x7c000166
+#define PPC_INST_ERATILX 0x7c000066
+#define PPC_INST_ERATIVAX 0x7c000666
+#define PPC_INST_ERATSX 0x7c000126
+#define PPC_INST_ERATSX_DOT 0x7c000127
+
+/* Misc instructions for BPF compiler */
+#define PPC_INST_LD 0xe8000000
+#define PPC_INST_LHZ 0xa0000000
+#define PPC_INST_LHBRX 0x7c00062c
+#define PPC_INST_LWZ 0x80000000
+#define PPC_INST_STD 0xf8000000
+#define PPC_INST_STDU 0xf8000001
+#define PPC_INST_MFLR 0x7c0802a6
+#define PPC_INST_MTLR 0x7c0803a6
+#define PPC_INST_CMPWI 0x2c000000
+#define PPC_INST_CMPDI 0x2c200000
+#define PPC_INST_CMPLW 0x7c000040
+#define PPC_INST_CMPLWI 0x28000000
+#define PPC_INST_ADDI 0x38000000
+#define PPC_INST_ADDIS 0x3c000000
+#define PPC_INST_ADD 0x7c000214
+#define PPC_INST_SUB 0x7c000050
+#define PPC_INST_BLR 0x4e800020
+#define PPC_INST_BLRL 0x4e800021
+#define PPC_INST_MULLW 0x7c0001d6
+#define PPC_INST_MULHWU 0x7c000016
+#define PPC_INST_MULLI 0x1c000000
+#define PPC_INST_DIVWU 0x7c000396
+#define PPC_INST_RLWINM 0x54000000
+#define PPC_INST_RLDICR 0x78000004
+#define PPC_INST_SLW 0x7c000030
+#define PPC_INST_SRW 0x7c000430
+#define PPC_INST_AND 0x7c000038
+#define PPC_INST_ANDDOT 0x7c000039
+#define PPC_INST_OR 0x7c000378
+#define PPC_INST_XOR 0x7c000278
+#define PPC_INST_ANDI 0x70000000
+#define PPC_INST_ORI 0x60000000
+#define PPC_INST_ORIS 0x64000000
+#define PPC_INST_XORI 0x68000000
+#define PPC_INST_XORIS 0x6c000000
+#define PPC_INST_NEG 0x7c0000d0
+#define PPC_INST_BRANCH 0x48000000
+#define PPC_INST_BRANCH_COND 0x40800000
+#define PPC_INST_LBZCIX 0x7c0006aa
+#define PPC_INST_STBCIX 0x7c0007aa
/* macros to insert fields into opcodes */
-#define __PPC_RA(a) (((a) & 0x1f) << 16)
-#define __PPC_RB(b) (((b) & 0x1f) << 11)
-#define __PPC_RS(s) (((s) & 0x1f) << 21)
-#define __PPC_RT(s) __PPC_RS(s)
+#define ___PPC_RA(a) (((a) & 0x1f) << 16)
+#define ___PPC_RB(b) (((b) & 0x1f) << 11)
+#define ___PPC_RS(s) (((s) & 0x1f) << 21)
+#define ___PPC_RT(t) ___PPC_RS(t)
+#define __PPC_RA(a) ___PPC_RA(__REG_##a)
+#define __PPC_RA0(a) ___PPC_RA(__REGA0_##a)
+#define __PPC_RB(b) ___PPC_RB(__REG_##b)
+#define __PPC_RS(s) ___PPC_RS(__REG_##s)
+#define __PPC_RT(t) ___PPC_RT(__REG_##t)
#define __PPC_XA(a) ((((a) & 0x1f) << 16) | (((a) & 0x20) >> 3))
#define __PPC_XB(b) ((((b) & 0x1f) << 11) | (((b) & 0x20) >> 4))
#define __PPC_XS(s) ((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5))
#define __PPC_XT(s) __PPC_XS(s)
#define __PPC_T_TLB(t) (((t) & 0x3) << 21)
#define __PPC_WC(w) (((w) & 0x3) << 21)
+#define __PPC_WS(w) (((w) & 0x1f) << 11)
+#define __PPC_SH(s) __PPC_WS(s)
+#define __PPC_MB(s) (((s) & 0x1f) << 6)
+#define __PPC_ME(s) (((s) & 0x1f) << 1)
+#define __PPC_BI(s) (((s) & 0x1f) << 16)
+#define __PPC_CT(t) (((t) & 0x0f) << 21)
+
/*
* Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
* larx with EH set as an illegal instruction.
@@ -81,29 +281,60 @@
#define PPC_DCBZL(a, b) stringify_in_c(.long PPC_INST_DCBZL | \
__PPC_RA(a) | __PPC_RB(b))
#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \
- __PPC_RT(t) | __PPC_RA(a) | \
- __PPC_RB(b) | __PPC_EH(eh))
+ ___PPC_RT(t) | ___PPC_RA(a) | \
+ ___PPC_RB(b) | __PPC_EH(eh))
#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \
- __PPC_RT(t) | __PPC_RA(a) | \
- __PPC_RB(b) | __PPC_EH(eh))
+ ___PPC_RT(t) | ___PPC_RA(a) | \
+ ___PPC_RB(b) | __PPC_EH(eh))
#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \
- __PPC_RB(b))
+ ___PPC_RB(b))
+#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \
+ ___PPC_RB(b))
+#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \
+ __PPC_RA(a) | __PPC_RS(s))
+#define PPC_POPCNTD(a, s) stringify_in_c(.long PPC_INST_POPCNTD | \
+ __PPC_RA(a) | __PPC_RS(s))
+#define PPC_POPCNTW(a, s) stringify_in_c(.long PPC_INST_POPCNTW | \
+ __PPC_RA(a) | __PPC_RS(s))
#define PPC_RFCI stringify_in_c(.long PPC_INST_RFCI)
#define PPC_RFDI stringify_in_c(.long PPC_INST_RFDI)
#define PPC_RFMCI stringify_in_c(.long PPC_INST_RFMCI)
#define PPC_TLBILX(t, a, b) stringify_in_c(.long PPC_INST_TLBILX | \
- __PPC_T_TLB(t) | __PPC_RA(a) | __PPC_RB(b))
+ __PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b))
#define PPC_TLBILX_ALL(a, b) PPC_TLBILX(0, a, b)
#define PPC_TLBILX_PID(a, b) PPC_TLBILX(1, a, b)
#define PPC_TLBILX_VA(a, b) PPC_TLBILX(3, a, b)
#define PPC_WAIT(w) stringify_in_c(.long PPC_INST_WAIT | \
__PPC_WC(w))
#define PPC_TLBIE(lp,a) stringify_in_c(.long PPC_INST_TLBIE | \
- __PPC_RB(a) | __PPC_RS(lp))
+ ___PPC_RB(a) | ___PPC_RS(lp))
#define PPC_TLBSRX_DOT(a,b) stringify_in_c(.long PPC_INST_TLBSRX_DOT | \
- __PPC_RA(a) | __PPC_RB(b))
+ __PPC_RA0(a) | __PPC_RB(b))
#define PPC_TLBIVAX(a,b) stringify_in_c(.long PPC_INST_TLBIVAX | \
- __PPC_RA(a) | __PPC_RB(b))
+ __PPC_RA0(a) | __PPC_RB(b))
+
+#define PPC_ERATWE(s, a, w) stringify_in_c(.long PPC_INST_ERATWE | \
+ __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w))
+#define PPC_ERATRE(s, a, w) stringify_in_c(.long PPC_INST_ERATRE | \
+ __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w))
+#define PPC_ERATILX(t, a, b) stringify_in_c(.long PPC_INST_ERATILX | \
+ __PPC_T_TLB(t) | __PPC_RA0(a) | \
+ __PPC_RB(b))
+#define PPC_ERATIVAX(s, a, b) stringify_in_c(.long PPC_INST_ERATIVAX | \
+ __PPC_RS(s) | __PPC_RA0(a) | __PPC_RB(b))
+#define PPC_ERATSX(t, a, w) stringify_in_c(.long PPC_INST_ERATSX | \
+ __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
+#define PPC_ERATSX_DOT(t, a, w) stringify_in_c(.long PPC_INST_ERATSX_DOT | \
+ __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
+#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
+ __PPC_RT(t) | __PPC_RB(b))
+#define PPC_ICBT(c,a,b) stringify_in_c(.long PPC_INST_ICBT | \
+ __PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b))
+/* PASemi instructions */
+#define LBZCIX(t,a,b) stringify_in_c(.long PPC_INST_LBZCIX | \
+ __PPC_RT(t) | __PPC_RA(a) | __PPC_RB(b))
+#define STBCIX(s,a,b) stringify_in_c(.long PPC_INST_STBCIX | \
+ __PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b))
/*
* Define what the VSX XX1 form instructions will look like, then add
@@ -112,10 +343,30 @@
#define VSX_XX1(s, a, b) (__PPC_XS(s) | __PPC_RA(a) | __PPC_RB(b))
#define VSX_XX3(t, a, b) (__PPC_XT(t) | __PPC_XA(a) | __PPC_XB(b))
#define STXVD2X(s, a, b) stringify_in_c(.long PPC_INST_STXVD2X | \
- VSX_XX1((s), (a), (b)))
+ VSX_XX1((s), a, b))
#define LXVD2X(s, a, b) stringify_in_c(.long PPC_INST_LXVD2X | \
- VSX_XX1((s), (a), (b)))
+ VSX_XX1((s), a, b))
#define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \
- VSX_XX3((t), (a), (b)))
+ VSX_XX3((t), a, b))
+#define XXSWAPD(t, a) stringify_in_c(.long PPC_INST_XXSWAPD | \
+ VSX_XX3((t), a, a))
+#define XVCPSGNDP(t, a, b) stringify_in_c(.long (PPC_INST_XVCPSGNDP | \
+ VSX_XX3((t), (a), (b))))
+
+#define PPC_NAP stringify_in_c(.long PPC_INST_NAP)
+#define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP)
+
+/* BHRB instructions */
+#define PPC_CLRBHRB stringify_in_c(.long PPC_INST_CLRBHRB)
+#define PPC_MFBHRBE(r, n) stringify_in_c(.long PPC_INST_BHRBE | \
+ __PPC_RT(r) | \
+ (((n) & 0x3ff) << 11))
+
+/* Transactional memory instructions */
+#define TRECHKPT stringify_in_c(.long PPC_INST_TRECHKPT)
+#define TRECLAIM(r) stringify_in_c(.long PPC_INST_TRECLAIM \
+ | __PPC_RA(r))
+#define TABORT(r) stringify_in_c(.long PPC_INST_TABORT \
+ | __PPC_RA(r))
#endif /* _ASM_POWERPC_PPC_OPCODE_H */
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index 43268f15004..db1e2b8eff3 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -45,94 +45,20 @@ extern void init_pci_config_tokens (void);
extern unsigned long get_phb_buid (struct device_node *);
extern int rtas_setup_phb(struct pci_controller *phb);
-extern unsigned long pci_probe_only;
-
-/* ---- EEH internal-use-only related routines ---- */
#ifdef CONFIG_EEH
-void pci_addr_cache_insert_device(struct pci_dev *dev);
-void pci_addr_cache_remove_device(struct pci_dev *dev);
-void pci_addr_cache_build(void);
-struct pci_dev *pci_get_device_by_addr(unsigned long addr);
-
-/**
- * eeh_slot_error_detail -- record and EEH error condition to the log
- * @pdn: pci device node
- * @severity: EEH_LOG_TEMP_FAILURE or EEH_LOG_PERM_FAILURE
- *
- * Obtains the EEH error details from the RTAS subsystem,
- * and then logs these details with the RTAS error log system.
- */
-#define EEH_LOG_TEMP_FAILURE 1
-#define EEH_LOG_PERM_FAILURE 2
-void eeh_slot_error_detail (struct pci_dn *pdn, int severity);
-
-/**
- * rtas_pci_enable - enable IO transfers for this slot
- * @pdn: pci device node
- * @function: either EEH_THAW_MMIO or EEH_THAW_DMA
- *
- * Enable I/O transfers to this slot
- */
-#define EEH_THAW_MMIO 2
-#define EEH_THAW_DMA 3
-int rtas_pci_enable(struct pci_dn *pdn, int function);
-
-/**
- * rtas_set_slot_reset -- unfreeze a frozen slot
- * @pdn: pci device node
- *
- * Clear the EEH-frozen condition on a slot. This routine
- * does this by asserting the PCI #RST line for 1/8th of
- * a second; this routine will sleep while the adapter is
- * being reset.
- *
- * Returns a non-zero value if the reset failed.
- */
-int rtas_set_slot_reset (struct pci_dn *);
-int eeh_wait_for_slot_status(struct pci_dn *pdn, int max_wait_msecs);
-
-/**
- * eeh_restore_bars - Restore device configuration info.
- * @pdn: pci device node
- *
- * A reset of a PCI device will clear out its config space.
- * This routines will restore the config space for this
- * device, and is children, to values previously obtained
- * from the firmware.
- */
-void eeh_restore_bars(struct pci_dn *);
-
-/**
- * rtas_configure_bridge -- firmware initialization of pci bridge
- * @pdn: pci device node
- *
- * Ask the firmware to configure all PCI bridges devices
- * located behind the indicated node. Required after a
- * pci device reset. Does essentially the same hing as
- * eeh_restore_bars, but for brdges, and lets firmware
- * do the work.
- */
-void rtas_configure_bridge(struct pci_dn *);
-
+void eeh_addr_cache_insert_dev(struct pci_dev *dev);
+void eeh_addr_cache_rmv_dev(struct pci_dev *dev);
+struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr);
+void eeh_slot_error_detail(struct eeh_pe *pe, int severity);
+int eeh_pci_enable(struct eeh_pe *pe, int function);
+int eeh_reset_pe(struct eeh_pe *);
+void eeh_save_bars(struct eeh_dev *edev);
int rtas_write_config(struct pci_dn *, int where, int size, u32 val);
int rtas_read_config(struct pci_dn *, int where, int size, u32 *val);
-
-/**
- * eeh_mark_slot -- set mode flags for pertition endpoint
- * @pdn: pci device node
- *
- * mark and clear slots: find "partition endpoint" PE and set or
- * clear the flags for each subnode of the PE.
- */
-void eeh_mark_slot (struct device_node *dn, int mode_flag);
-void eeh_clear_slot (struct device_node *dn, int mode_flag);
-
-/**
- * find_device_pe -- Find the associated "Partiationable Endpoint" PE
- * @pdn: pci device node
- */
-struct device_node * find_device_pe(struct device_node *dn);
+void eeh_pe_state_mark(struct eeh_pe *pe, int state);
+void eeh_pe_state_clear(struct eeh_pe *pe, int state);
+void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode);
void eeh_sysfs_add_device(struct pci_dev *pdev);
void eeh_sysfs_remove_device(struct pci_dev *pdev);
@@ -142,6 +68,11 @@ static inline const char *eeh_pci_name(struct pci_dev *pdev)
return pdev ? pci_name(pdev) : "<null>";
}
+static inline const char *eeh_driver_name(struct pci_dev *pdev)
+{
+ return (pdev && pdev->driver) ? pdev->driver->name : "<null>";
+}
+
#endif /* CONFIG_EEH */
#else /* CONFIG_PCI */
diff --git a/arch/powerpc/include/asm/ppc4xx_ocm.h b/arch/powerpc/include/asm/ppc4xx_ocm.h
new file mode 100644
index 00000000000..6ce90460553
--- /dev/null
+++ b/arch/powerpc/include/asm/ppc4xx_ocm.h
@@ -0,0 +1,45 @@
+/*
+ * PowerPC 4xx OCM memory allocation support
+ *
+ * (C) Copyright 2009, Applied Micro Circuits Corporation
+ * Victor Gallardo (vgallardo@amcc.com)
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __ASM_POWERPC_PPC4XX_OCM_H__
+#define __ASM_POWERPC_PPC4XX_OCM_H__
+
+#define PPC4XX_OCM_NON_CACHED 0
+#define PPC4XX_OCM_CACHED 1
+
+#if defined(CONFIG_PPC4xx_OCM)
+
+void *ppc4xx_ocm_alloc(phys_addr_t *phys, int size, int align,
+ int flags, const char *owner);
+void ppc4xx_ocm_free(const void *virt);
+
+#else
+
+#define ppc4xx_ocm_alloc(phys, size, align, flags, owner) NULL
+#define ppc4xx_ocm_free(addr) ((void)0)
+
+#endif /* CONFIG_PPC4xx_OCM */
+
+#endif /* __ASM_POWERPC_PPC4XX_OCM_H__ */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 98210067c1c..7e461252854 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -4,7 +4,6 @@
#ifndef _ASM_POWERPC_PPC_ASM_H
#define _ASM_POWERPC_PPC_ASM_H
-#include <linux/init.h>
#include <linux/stringify.h>
#include <asm/asm-compat.h>
#include <asm/processor.h>
@@ -24,13 +23,12 @@
* user_time and system_time fields in the paca.
*/
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#define ACCOUNT_CPU_USER_ENTRY(ra, rb)
#define ACCOUNT_CPU_USER_EXIT(ra, rb)
#define ACCOUNT_STOLEN_TIME
#else
#define ACCOUNT_CPU_USER_ENTRY(ra, rb) \
- beq 2f; /* if from kernel mode */ \
MFTB(ra); /* get timebase */ \
ld rb,PACA_STARTTIME_USER(r13); \
std ra,PACA_STARTTIME(r13); \
@@ -38,7 +36,6 @@
ld ra,PACA_USER_TIME(r13); \
add ra,ra,rb; /* add on to user time */ \
std ra,PACA_USER_TIME(r13); \
-2:
#define ACCOUNT_CPU_USER_EXIT(ra, rb) \
MFTB(ra); /* get timebase */ \
@@ -56,10 +53,13 @@ BEGIN_FW_FTR_SECTION; \
/* from user - see if there are any DTL entries to process */ \
ld r10,PACALPPACAPTR(r13); /* get ptr to VPA */ \
ld r11,PACA_DTL_RIDX(r13); /* get log read index */ \
- ld r10,LPPACA_DTLIDX(r10); /* get log write index */ \
+ addi r10,r10,LPPACA_DTLIDX; \
+ LDX_BE r10,0,r10; /* get log write index */ \
cmpd cr1,r11,r10; \
beq+ cr1,33f; \
- bl .accumulate_stolen_time; \
+ bl accumulate_stolen_time; \
+ ld r12,_MSR(r1); \
+ andi. r10,r12,MSR_PR; /* Restore cr0 (coming from user) */ \
33: \
END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
@@ -68,7 +68,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#endif /* CONFIG_PPC_SPLPAR */
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
/*
* Macros for storing registers into and loading registers from
@@ -97,71 +97,73 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
-#define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base)
+#define SAVE_FPR(n, base) stfd n,8*TS_FPRWIDTH*(n)(base)
#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
#define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
#define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
#define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
#define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
-#define REST_FPR(n, base) lfd n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base)
+#define REST_FPR(n, base) lfd n,8*TS_FPRWIDTH*(n)(base)
#define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base)
#define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base)
#define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base)
#define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base)
#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base)
-#define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); stvx n,base,b
+#define SAVE_VR(n,b,base) li b,16*(n); stvx n,base,b
#define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
#define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
#define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
#define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
#define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
-#define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); lvx n,base,b
+#define REST_VR(n,b,base) li b,16*(n); lvx n,base,b
#define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base)
#define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
#define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
#define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
#define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
+#ifdef __BIG_ENDIAN__
+#define STXVD2X_ROT(n,b,base) STXVD2X(n,b,base)
+#define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base)
+#else
+#define STXVD2X_ROT(n,b,base) XXSWAPD(n,n); \
+ STXVD2X(n,b,base); \
+ XXSWAPD(n,n)
+
+#define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base); \
+ XXSWAPD(n,n)
+#endif
/* Save the lower 32 VSRs in the thread VSR region */
-#define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,base,b)
+#define SAVE_VSR(n,b,base) li b,16*(n); STXVD2X_ROT(n,R##base,R##b)
#define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)
#define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base)
#define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base)
#define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base)
#define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base)
-#define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); LXVD2X(n,base,b)
+#define REST_VSR(n,b,base) li b,16*(n); LXVD2X_ROT(n,R##base,R##b)
#define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base)
#define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base)
#define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
#define REST_16VSRS(n,b,base) REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base)
#define REST_32VSRS(n,b,base) REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base)
-/* Save the upper 32 VSRs (32-63) in the thread VSX region (0-31) */
-#define SAVE_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); STXVD2X(n+32,base,b)
-#define SAVE_2VSRSU(n,b,base) SAVE_VSRU(n,b,base); SAVE_VSRU(n+1,b,base)
-#define SAVE_4VSRSU(n,b,base) SAVE_2VSRSU(n,b,base); SAVE_2VSRSU(n+2,b,base)
-#define SAVE_8VSRSU(n,b,base) SAVE_4VSRSU(n,b,base); SAVE_4VSRSU(n+4,b,base)
-#define SAVE_16VSRSU(n,b,base) SAVE_8VSRSU(n,b,base); SAVE_8VSRSU(n+8,b,base)
-#define SAVE_32VSRSU(n,b,base) SAVE_16VSRSU(n,b,base); SAVE_16VSRSU(n+16,b,base)
-#define REST_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); LXVD2X(n+32,base,b)
-#define REST_2VSRSU(n,b,base) REST_VSRU(n,b,base); REST_VSRU(n+1,b,base)
-#define REST_4VSRSU(n,b,base) REST_2VSRSU(n,b,base); REST_2VSRSU(n+2,b,base)
-#define REST_8VSRSU(n,b,base) REST_4VSRSU(n,b,base); REST_4VSRSU(n+4,b,base)
-#define REST_16VSRSU(n,b,base) REST_8VSRSU(n,b,base); REST_8VSRSU(n+8,b,base)
-#define REST_32VSRSU(n,b,base) REST_16VSRSU(n,b,base); REST_16VSRSU(n+16,b,base)
-
-#define SAVE_EVR(n,s,base) evmergehi s,s,n; stw s,THREAD_EVR0+4*(n)(base)
-#define SAVE_2EVRS(n,s,base) SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base)
-#define SAVE_4EVRS(n,s,base) SAVE_2EVRS(n,s,base); SAVE_2EVRS(n+2,s,base)
-#define SAVE_8EVRS(n,s,base) SAVE_4EVRS(n,s,base); SAVE_4EVRS(n+4,s,base)
-#define SAVE_16EVRS(n,s,base) SAVE_8EVRS(n,s,base); SAVE_8EVRS(n+8,s,base)
-#define SAVE_32EVRS(n,s,base) SAVE_16EVRS(n,s,base); SAVE_16EVRS(n+16,s,base)
-#define REST_EVR(n,s,base) lwz s,THREAD_EVR0+4*(n)(base); evmergelo n,s,n
-#define REST_2EVRS(n,s,base) REST_EVR(n,s,base); REST_EVR(n+1,s,base)
-#define REST_4EVRS(n,s,base) REST_2EVRS(n,s,base); REST_2EVRS(n+2,s,base)
-#define REST_8EVRS(n,s,base) REST_4EVRS(n,s,base); REST_4EVRS(n+4,s,base)
-#define REST_16EVRS(n,s,base) REST_8EVRS(n,s,base); REST_8EVRS(n+8,s,base)
-#define REST_32EVRS(n,s,base) REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base)
+
+/*
+ * b = base register for addressing, o = base offset from register of 1st EVR
+ * n = first EVR, s = scratch
+ */
+#define SAVE_EVR(n,s,b,o) evmergehi s,s,n; stw s,o+4*(n)(b)
+#define SAVE_2EVRS(n,s,b,o) SAVE_EVR(n,s,b,o); SAVE_EVR(n+1,s,b,o)
+#define SAVE_4EVRS(n,s,b,o) SAVE_2EVRS(n,s,b,o); SAVE_2EVRS(n+2,s,b,o)
+#define SAVE_8EVRS(n,s,b,o) SAVE_4EVRS(n,s,b,o); SAVE_4EVRS(n+4,s,b,o)
+#define SAVE_16EVRS(n,s,b,o) SAVE_8EVRS(n,s,b,o); SAVE_8EVRS(n+8,s,b,o)
+#define SAVE_32EVRS(n,s,b,o) SAVE_16EVRS(n,s,b,o); SAVE_16EVRS(n+16,s,b,o)
+#define REST_EVR(n,s,b,o) lwz s,o+4*(n)(b); evmergelo n,s,n
+#define REST_2EVRS(n,s,b,o) REST_EVR(n,s,b,o); REST_EVR(n+1,s,b,o)
+#define REST_4EVRS(n,s,b,o) REST_2EVRS(n,s,b,o); REST_2EVRS(n+2,s,b,o)
+#define REST_8EVRS(n,s,b,o) REST_4EVRS(n,s,b,o); REST_4EVRS(n+4,s,b,o)
+#define REST_16EVRS(n,s,b,o) REST_8EVRS(n,s,b,o); REST_8EVRS(n+8,s,b,o)
+#define REST_32EVRS(n,s,b,o) REST_16EVRS(n,s,b,o); REST_16EVRS(n+16,s,b,o)
/* Macros to adjust thread priority for hardware multithreading */
#define HMT_VERY_LOW or 31,31,31 # very low priority
@@ -170,58 +172,70 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#define HMT_MEDIUM or 2,2,2
#define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority
#define HMT_HIGH or 3,3,3
+#define HMT_EXTRA_HIGH or 7,7,7 # power7 only
+
+#ifdef CONFIG_PPC64
+#define ULONG_SIZE 8
+#else
+#define ULONG_SIZE 4
+#endif
+#define __VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
+#define VCPU_GPR(n) __VCPU_GPR(__REG_##n)
#ifdef __KERNEL__
#ifdef CONFIG_PPC64
-#define XGLUE(a,b) a##b
-#define GLUE(a,b) XGLUE(a,b)
+#define STACKFRAMESIZE 256
+#define __STK_REG(i) (112 + ((i)-14)*8)
+#define STK_REG(i) __STK_REG(__REG_##i)
+
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define STK_GOT 24
+#define __STK_PARAM(i) (32 + ((i)-3)*8)
+#else
+#define STK_GOT 40
+#define __STK_PARAM(i) (48 + ((i)-3)*8)
+#endif
+#define STK_PARAM(i) __STK_PARAM(__REG_##i)
+
+#if defined(_CALL_ELF) && _CALL_ELF == 2
#define _GLOBAL(name) \
.section ".text"; \
.align 2 ; \
+ .type name,@function; \
.globl name; \
- .globl GLUE(.,name); \
- .section ".opd","aw"; \
-name: \
- .quad GLUE(.,name); \
- .quad .TOC.@tocbase; \
- .quad 0; \
- .previous; \
- .type GLUE(.,name),@function; \
-GLUE(.,name):
+name:
-#define _INIT_GLOBAL(name) \
- __REF; \
+#define _GLOBAL_TOC(name) \
+ .section ".text"; \
.align 2 ; \
+ .type name,@function; \
.globl name; \
- .globl GLUE(.,name); \
- .section ".opd","aw"; \
name: \
- .quad GLUE(.,name); \
- .quad .TOC.@tocbase; \
- .quad 0; \
- .previous; \
- .type GLUE(.,name),@function; \
-GLUE(.,name):
+0: addis r2,r12,(.TOC.-0b)@ha; \
+ addi r2,r2,(.TOC.-0b)@l; \
+ .localentry name,.-name
#define _KPROBE(name) \
.section ".kprobes.text","a"; \
.align 2 ; \
+ .type name,@function; \
.globl name; \
- .globl GLUE(.,name); \
- .section ".opd","aw"; \
-name: \
- .quad GLUE(.,name); \
- .quad .TOC.@tocbase; \
- .quad 0; \
- .previous; \
- .type GLUE(.,name),@function; \
-GLUE(.,name):
+name:
-#define _STATIC(name) \
+#define DOTSYM(a) a
+
+#else
+
+#define XGLUE(a,b) a##b
+#define GLUE(a,b) XGLUE(a,b)
+
+#define _GLOBAL(name) \
.section ".text"; \
.align 2 ; \
+ .globl name; \
+ .globl GLUE(.,name); \
.section ".opd","aw"; \
name: \
.quad GLUE(.,name); \
@@ -231,9 +245,13 @@ name: \
.type GLUE(.,name),@function; \
GLUE(.,name):
-#define _INIT_STATIC(name) \
- __REF; \
+#define _GLOBAL_TOC(name) _GLOBAL(name)
+
+#define _KPROBE(name) \
+ .section ".kprobes.text","a"; \
.align 2 ; \
+ .globl name; \
+ .globl GLUE(.,name); \
.section ".opd","aw"; \
name: \
.quad GLUE(.,name); \
@@ -243,6 +261,10 @@ name: \
.type GLUE(.,name),@function; \
GLUE(.,name):
+#define DOTSYM(a) GLUE(.,a)
+
+#endif
+
#else /* 32-bit */
#define _ENTRY(n) \
@@ -255,6 +277,8 @@ n:
.globl n; \
n:
+#define _GLOBAL_TOC(name) _GLOBAL(name)
+
#define _KPROBE(n) \
.section ".kprobes.text","a"; \
.globl n; \
@@ -276,6 +300,11 @@ n:
* you want to access various offsets within it). On ppc32 this is
* identical to LOAD_REG_IMMEDIATE.
*
+ * LOAD_REG_ADDR_PIC(rn, name)
+ * Loads the address of label 'name' into register 'run'. Use this when
+ * the kernel doesn't run at the linked or relocated address. Please
+ * note that this macro will clobber the lr register.
+ *
* LOAD_REG_ADDRBASE(rn, name)
* ADDROFF(name)
* LOAD_REG_ADDRBASE loads part of the address of label 'name' into
@@ -286,16 +315,29 @@ n:
* LOAD_REG_ADDRBASE(rX, name)
* ld rY,ADDROFF(name)(rX)
*/
+
+/* Be careful, this will clobber the lr register. */
+#define LOAD_REG_ADDR_PIC(reg, name) \
+ bl 0f; \
+0: mflr reg; \
+ addis reg,reg,(name - 0b)@ha; \
+ addi reg,reg,(name - 0b)@l;
+
#ifdef __powerpc64__
+#ifdef HAVE_AS_ATHIGH
+#define __AS_ATHIGH high
+#else
+#define __AS_ATHIGH h
+#endif
#define LOAD_REG_IMMEDIATE(reg,expr) \
- lis (reg),(expr)@highest; \
- ori (reg),(reg),(expr)@higher; \
- rldicr (reg),(reg),32,31; \
- oris (reg),(reg),(expr)@h; \
- ori (reg),(reg),(expr)@l;
+ lis reg,(expr)@highest; \
+ ori reg,reg,(expr)@higher; \
+ rldicr reg,reg,32,31; \
+ oris reg,reg,(expr)@__AS_ATHIGH; \
+ ori reg,reg,(expr)@l;
#define LOAD_REG_ADDR(reg,name) \
- ld (reg),name@got(r2)
+ ld reg,name@got(r2)
#define LOAD_REG_ADDRBASE(reg,name) LOAD_REG_ADDR(reg,name)
#define ADDROFF(name) 0
@@ -306,12 +348,12 @@ n:
#else /* 32-bit */
#define LOAD_REG_IMMEDIATE(reg,expr) \
- lis (reg),(expr)@ha; \
- addi (reg),(reg),(expr)@l;
+ lis reg,(expr)@ha; \
+ addi reg,reg,(expr)@l;
#define LOAD_REG_ADDR(reg,name) LOAD_REG_IMMEDIATE(reg, name)
-#define LOAD_REG_ADDRBASE(reg, name) lis (reg),name@ha
+#define LOAD_REG_ADDRBASE(reg, name) lis reg,name@ha
#define ADDROFF(name) name@l
/* offsets for stack frame layout */
@@ -340,15 +382,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_601)
#define ISYNC_601
#endif
-#ifdef CONFIG_PPC_CELL
+#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
#define MFTB(dest) \
-90: mftb dest; \
+90: mfspr dest, SPRN_TBRL; \
BEGIN_FTR_SECTION_NESTED(96); \
cmpwi dest,0; \
beq- 90b; \
END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
-#else
+#elif defined(CONFIG_8xx)
#define MFTB(dest) mftb dest
+#else
+#define MFTB(dest) mfspr dest, SPRN_TBRL
#endif
#ifndef CONFIG_SMP
@@ -362,7 +406,33 @@ BEGIN_FTR_SECTION \
END_FTR_SECTION_IFCLR(CPU_FTR_601)
#endif
-
+#ifdef CONFIG_PPC64
+#define MTOCRF(FXM, RS) \
+ BEGIN_FTR_SECTION_NESTED(848); \
+ mtcrf (FXM), RS; \
+ FTR_SECTION_ELSE_NESTED(848); \
+ mtocrf (FXM), RS; \
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_NOEXECUTE, 848)
+
+/*
+ * PPR restore macros used in entry_64.S
+ * Used for P7 or later processors
+ */
+#define HMT_MEDIUM_LOW_HAS_PPR \
+BEGIN_FTR_SECTION_NESTED(944) \
+ HMT_MEDIUM_LOW; \
+END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,944)
+
+#define SET_DEFAULT_THREAD_PPR(ra, rb) \
+BEGIN_FTR_SECTION_NESTED(945) \
+ lis ra,INIT_PPR@highest; /* default ppr=3 */ \
+ ld rb,PACACURRENT(r13); \
+ sldi ra,ra,32; /* 11- 13 bits are used for ppr */ \
+ std ra,TASKTHREADPPR(rb); \
+END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945)
+
+#endif
+
/*
* This instruction is not implemented on the PPC 603 or 601; however, on
* the 403GCX and 405GP tlbia IS defined and tlbie is not.
@@ -387,6 +457,17 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
#define PPC440EP_ERR42
#endif
+/* The following stops all load and store data streams associated with stream
+ * ID (ie. streams created explicitly). The embedded and server mnemonics for
+ * dcbt are different so we use machine "power4" here explicitly.
+ */
+#define DCBT_STOP_ALL_STREAM_IDS(scratch) \
+.machine push ; \
+.machine "power4" ; \
+ lis scratch,0x60000000@h; \
+ dcbt r0,scratch,0b01010; \
+.machine pop
+
/*
* toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them
* keep the address intact to be compatible with code shared with
@@ -448,6 +529,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
#ifdef CONFIG_PPC_BOOK3S_64
#define RFI rfid
#define MTMSRD(r) mtmsrd r
+#define MTMSR_EERI(reg) mtmsrd reg,1
#else
#define FIX_SRR1(ra, rb)
#ifndef CONFIG_40x
@@ -456,6 +538,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
#define RFI rfi; b . /* Prevent prefetch past rfi */
#endif
#define MTMSRD(r) mtmsr r
+#define MTMSR_EERI(reg) mtmsr reg
#define CLR_TOP32(r)
#endif
@@ -475,40 +558,46 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
#define cr7 7
-/* General Purpose Registers (GPRs) */
-
-#define r0 0
-#define r1 1
-#define r2 2
-#define r3 3
-#define r4 4
-#define r5 5
-#define r6 6
-#define r7 7
-#define r8 8
-#define r9 9
-#define r10 10
-#define r11 11
-#define r12 12
-#define r13 13
-#define r14 14
-#define r15 15
-#define r16 16
-#define r17 17
-#define r18 18
-#define r19 19
-#define r20 20
-#define r21 21
-#define r22 22
-#define r23 23
-#define r24 24
-#define r25 25
-#define r26 26
-#define r27 27
-#define r28 28
-#define r29 29
-#define r30 30
-#define r31 31
+/*
+ * General Purpose Registers (GPRs)
+ *
+ * The lower case r0-r31 should be used in preference to the upper
+ * case R0-R31 as they provide more error checking in the assembler.
+ * Use R0-31 only when really nessesary.
+ */
+
+#define r0 %r0
+#define r1 %r1
+#define r2 %r2
+#define r3 %r3
+#define r4 %r4
+#define r5 %r5
+#define r6 %r6
+#define r7 %r7
+#define r8 %r8
+#define r9 %r9
+#define r10 %r10
+#define r11 %r11
+#define r12 %r12
+#define r13 %r13
+#define r14 %r14
+#define r15 %r15
+#define r16 %r16
+#define r17 %r17
+#define r18 %r18
+#define r19 %r19
+#define r20 %r20
+#define r21 %r21
+#define r22 %r22
+#define r23 %r23
+#define r24 %r24
+#define r25 %r25
+#define r26 %r26
+#define r27 %r27
+#define r28 %r28
+#define r29 %r29
+#define r30 %r30
+#define r31 %r31
/* Floating Point Registers (FPRs) */
@@ -689,6 +778,35 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
#define N_SLINE 68
#define N_SO 100
-#endif /* __ASSEMBLY__ */
+/*
+ * Create an endian fixup trampoline
+ *
+ * This starts with a "tdi 0,0,0x48" instruction which is
+ * essentially a "trap never", and thus akin to a nop.
+ *
+ * The opcode for this instruction read with the wrong endian
+ * however results in a b . + 8
+ *
+ * So essentially we use that trick to execute the following
+ * trampoline in "reverse endian" if we are running with the
+ * MSR_LE bit set the "wrong" way for whatever endianness the
+ * kernel is built for.
+ */
+#ifdef CONFIG_PPC_BOOK3E
+#define FIXUP_ENDIAN
+#else
+#define FIXUP_ENDIAN \
+ tdi 0,0,0x48; /* Reverse endian of b . + 8 */ \
+ b $+36; /* Skip trampoline if endian is good */ \
+ .long 0x05009f42; /* bcl 20,31,$+4 */ \
+ .long 0xa602487d; /* mflr r10 */ \
+ .long 0x1c004a39; /* addi r10,r10,28 */ \
+ .long 0xa600607d; /* mfmsr r11 */ \
+ .long 0x01006b69; /* xori r11,r11,1 */ \
+ .long 0xa6035a7d; /* mtsrr0 r10 */ \
+ .long 0xa6037b7d; /* mtsrr1 r11 */ \
+ .long 0x2400004c /* rfid */
+#endif /* !CONFIG_PPC_BOOK3E */
+#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PPC_ASM_H */
diff --git a/arch/powerpc/include/asm/probes.h b/arch/powerpc/include/asm/probes.h
new file mode 100644
index 00000000000..3421637cfd7
--- /dev/null
+++ b/arch/powerpc/include/asm/probes.h
@@ -0,0 +1,67 @@
+#ifndef _ASM_POWERPC_PROBES_H
+#define _ASM_POWERPC_PROBES_H
+#ifdef __KERNEL__
+/*
+ * Definitions common to probes files
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2012
+ */
+#include <linux/types.h>
+
+typedef u32 ppc_opcode_t;
+#define BREAKPOINT_INSTRUCTION 0x7fe00008 /* trap */
+
+/* Trap definitions per ISA */
+#define IS_TW(instr) (((instr) & 0xfc0007fe) == 0x7c000008)
+#define IS_TD(instr) (((instr) & 0xfc0007fe) == 0x7c000088)
+#define IS_TDI(instr) (((instr) & 0xfc000000) == 0x08000000)
+#define IS_TWI(instr) (((instr) & 0xfc000000) == 0x0c000000)
+
+#ifdef CONFIG_PPC64
+#define is_trap(instr) (IS_TW(instr) || IS_TD(instr) || \
+ IS_TWI(instr) || IS_TDI(instr))
+#else
+#define is_trap(instr) (IS_TW(instr) || IS_TWI(instr))
+#endif /* CONFIG_PPC64 */
+
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+#define MSR_SINGLESTEP (MSR_DE)
+#else
+#define MSR_SINGLESTEP (MSR_SE)
+#endif
+
+/* Enable single stepping for the current task */
+static inline void enable_single_step(struct pt_regs *regs)
+{
+ regs->msr |= MSR_SINGLESTEP;
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+ /*
+ * We turn off Critical Input Exception(CE) to ensure that the single
+ * step will be for the instruction we have the probe on; if we don't,
+ * it is possible we'd get the single step reported for CE.
+ */
+ regs->msr &= ~MSR_CE;
+ mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
+#ifdef CONFIG_PPC_47x
+ isync();
+#endif
+#endif
+}
+
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_PROBES_H */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 4c14187ba02..6d59072e13a 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -14,21 +14,43 @@
#ifdef CONFIG_VSX
#define TS_FPRWIDTH 2
+
+#ifdef __BIG_ENDIAN__
+#define TS_FPROFFSET 0
+#define TS_VSRLOWOFFSET 1
+#else
+#define TS_FPROFFSET 1
+#define TS_VSRLOWOFFSET 0
+#endif
+
#else
#define TS_FPRWIDTH 1
+#define TS_FPROFFSET 0
#endif
+#ifdef CONFIG_PPC64
+/* Default SMT priority is set to 3. Use 11- 13bits to save priority. */
+#define PPR_PRIORITY 3
+#ifdef __ASSEMBLY__
+#define INIT_PPR (PPR_PRIORITY << 50)
+#else
+#define INIT_PPR ((u64)PPR_PRIORITY << 50)
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_PPC64 */
+
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
+#include <linux/cache.h>
#include <asm/ptrace.h>
#include <asm/types.h>
+#include <asm/hw_breakpoint.h>
/* We do _not_ want to define new machine types at all, those must die
* in favor of using the device-tree
* -- BenH.
*/
-/* PREP sub-platform types see residual.h for these */
+/* PREP sub-platform types. Unused */
#define _PREP_Motorola 0x01 /* motorola prep */
#define _PREP_Firm 0x02 /* firmworks prep */
#define _PREP_IBM 0x00 /* ibm prep */
@@ -44,13 +66,6 @@
extern int _chrp_type;
-#ifdef CONFIG_PPC_PREP
-
-/* what kind of prep workstation we are */
-extern int _prep_type;
-
-#endif /* CONFIG_PPC_PREP */
-
#endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */
/*
@@ -73,12 +88,6 @@ struct task_struct;
void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
void release_thread(struct task_struct *);
-/* Prepare to copy thread state - unlazy all lazy status */
-extern void prepare_to_copy(struct task_struct *tsk);
-
-/* Create a new kernel thread. */
-extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
-
/* Lazy FPU handling on uni-processor */
extern struct task_struct *last_task_used_math;
extern struct task_struct *last_task_used_altivec;
@@ -99,8 +108,8 @@ extern struct task_struct *last_task_used_spe;
#endif
#ifdef CONFIG_PPC64
-/* 64-bit user address space is 44-bits (16TB user VM) */
-#define TASK_SIZE_USER64 (0x0000100000000000UL)
+/* 64-bit user address space is 46-bits (64TB user VM) */
+#define TASK_SIZE_USER64 (0x0000400000000000UL)
/*
* 32-bit user address space is 4GB - 1 page
@@ -122,7 +131,6 @@ extern struct task_struct *last_task_used_spe;
TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
#endif
-#ifdef __KERNEL__
#ifdef __powerpc64__
#define STACK_TOP_USER64 TASK_SIZE_USER64
@@ -139,37 +147,36 @@ extern struct task_struct *last_task_used_spe;
#define STACK_TOP_MAX STACK_TOP
#endif /* __powerpc64__ */
-#endif /* __KERNEL__ */
typedef struct {
unsigned long seg;
} mm_segment_t;
-#define TS_FPROFFSET 0
-#define TS_VSRLOWOFFSET 1
-#define TS_FPR(i) fpr[i][TS_FPROFFSET]
+#define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET]
+#define TS_TRANS_FPR(i) transact_fp.fpr[i][TS_FPROFFSET]
-struct thread_struct {
- unsigned long ksp; /* Kernel stack pointer */
- unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
+/* FP and VSX 0-31 register set */
+struct thread_fp_state {
+ u64 fpr[32][TS_FPRWIDTH] __attribute__((aligned(16)));
+ u64 fpscr; /* Floating point status */
+};
-#ifdef CONFIG_PPC64
- unsigned long ksp_vsid;
-#endif
- struct pt_regs *regs; /* Pointer to saved register state */
- mm_segment_t fs; /* for get_fs() validation */
-#ifdef CONFIG_PPC32
- void *pgdir; /* root of page-table tree */
-#endif
+/* Complete AltiVec register set including VSCR */
+struct thread_vr_state {
+ vector128 vr[32] __attribute__((aligned(16)));
+ vector128 vscr __attribute__((aligned(16)));
+};
+
+struct debug_reg {
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
* The following help to manage the use of Debug Control Registers
* om the BookE platforms.
*/
- unsigned long dbcr0;
- unsigned long dbcr1;
+ uint32_t dbcr0;
+ uint32_t dbcr1;
#ifdef CONFIG_BOOKE
- unsigned long dbcr2;
+ uint32_t dbcr2;
#endif
/*
* The stored value of the DBSR register will be the value at the
@@ -177,7 +184,7 @@ struct thread_struct {
* user (will never be written to) and has value while helping to
* describe the reason for the last debug trap. Torez
*/
- unsigned long dbsr;
+ uint32_t dbsr;
/*
* The following will contain addresses used by debug applications
* to help trace and trap on particular address locations.
@@ -197,13 +204,28 @@ struct thread_struct {
unsigned long dvc2;
#endif
#endif
- /* FP and VSX 0-31 register set */
- double fpr[32][TS_FPRWIDTH];
- struct {
+};
+
+struct thread_struct {
+ unsigned long ksp; /* Kernel stack pointer */
- unsigned int pad;
- unsigned int val; /* Floating point status */
- } fpscr;
+#ifdef CONFIG_PPC64
+ unsigned long ksp_vsid;
+#endif
+ struct pt_regs *regs; /* Pointer to saved register state */
+ mm_segment_t fs; /* for get_fs() validation */
+#ifdef CONFIG_BOOKE
+ /* BookE base exception scratch space; align on cacheline */
+ unsigned long normsave[8] ____cacheline_aligned;
+#endif
+#ifdef CONFIG_PPC32
+ void *pgdir; /* root of page-table tree */
+ unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
+#endif
+ /* Debug Registers */
+ struct debug_reg debug;
+ struct thread_fp_state fp_state;
+ struct thread_fp_state *fp_save_area;
int fpexc_mode; /* floating-point exception mode */
unsigned int align_ctl; /* alignment handling control */
#ifdef CONFIG_PPC64
@@ -218,12 +240,11 @@ struct thread_struct {
struct perf_event *last_hit_ubp;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif
- unsigned long dabr; /* Data address breakpoint register */
+ struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */
+ unsigned long trap_nr; /* last trap # on this thread */
#ifdef CONFIG_ALTIVEC
- /* Complete AltiVec register set */
- vector128 vr[32] __attribute__((aligned(16)));
- /* AltiVec status */
- vector128 vscr __attribute__((aligned(16)));
+ struct thread_vr_state vr_state;
+ struct thread_vr_state *vr_save_area;
unsigned long vrsave;
int used_vr; /* set if process has used altivec */
#endif /* CONFIG_ALTIVEC */
@@ -235,11 +256,60 @@ struct thread_struct {
unsigned long evr[32]; /* upper 32-bits of SPE regs */
u64 acc; /* Accumulator */
unsigned long spefscr; /* SPE & eFP status */
+ unsigned long spefscr_last; /* SPEFSCR value on last prctl
+ call or trap return */
int used_spe; /* set if process has used spe */
#endif /* CONFIG_SPE */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ u64 tm_tfhar; /* Transaction fail handler addr */
+ u64 tm_texasr; /* Transaction exception & summary */
+ u64 tm_tfiar; /* Transaction fail instr address reg */
+ unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */
+ struct pt_regs ckpt_regs; /* Checkpointed registers */
+
+ unsigned long tm_tar;
+ unsigned long tm_ppr;
+ unsigned long tm_dscr;
+
+ /*
+ * Transactional FP and VSX 0-31 register set.
+ * NOTE: the sense of these is the opposite of the integer ckpt_regs!
+ *
+ * When a transaction is active/signalled/scheduled etc., *regs is the
+ * most recent set of/speculated GPRs with ckpt_regs being the older
+ * checkpointed regs to which we roll back if transaction aborts.
+ *
+ * However, fpr[] is the checkpointed 'base state' of FP regs, and
+ * transact_fpr[] is the new set of transactional values.
+ * VRs work the same way.
+ */
+ struct thread_fp_state transact_fp;
+ struct thread_vr_state transact_vr;
+ unsigned long transact_vrsave;
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
void* kvm_shadow_vcpu; /* KVM internal data */
#endif /* CONFIG_KVM_BOOK3S_32_HANDLER */
+#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
+ struct kvm_vcpu *kvm_vcpu;
+#endif
+#ifdef CONFIG_PPC64
+ unsigned long dscr;
+ int dscr_inherit;
+ unsigned long ppr; /* used to save/restore SMT priority */
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ unsigned long tar;
+ unsigned long ebbrr;
+ unsigned long ebbhr;
+ unsigned long bescr;
+ unsigned long siar;
+ unsigned long sdar;
+ unsigned long sier;
+ unsigned long mmcr2;
+ unsigned mmcr0;
+ unsigned used_ebb;
+#endif
};
#define ARCH_MIN_TASKALIGN 16
@@ -249,7 +319,9 @@ struct thread_struct {
(_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack)
#ifdef CONFIG_SPE
-#define SPEFSCR_INIT .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE,
+#define SPEFSCR_INIT \
+ .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, \
+ .spefscr_last = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE,
#else
#define SPEFSCR_INIT
#endif
@@ -266,12 +338,10 @@ struct thread_struct {
#else
#define INIT_THREAD { \
.ksp = INIT_SP, \
- .ksp_limit = INIT_SP_LIMIT, \
.regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
.fs = KERNEL_DS, \
- .fpr = {{0}}, \
- .fpscr = { .val = 0, }, \
.fpexc_mode = 0, \
+ .ppr = INIT_PPR, \
}
#endif
@@ -307,6 +377,13 @@ extern int set_endian(struct task_struct *tsk, unsigned int val);
extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
+extern void fp_enable(void);
+extern void vec_enable(void);
+extern void load_fp_state(struct thread_fp_state *fp);
+extern void store_fp_state(struct thread_fp_state *fp);
+extern void load_vr_state(struct thread_vr_state *vr);
+extern void store_vr_state(struct thread_vr_state *vr);
+
static inline unsigned int __unpack_fe01(unsigned long msr_bits)
{
return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
@@ -352,29 +429,47 @@ static inline void prefetchw(const void *x)
#define spin_lock_prefetch(x) prefetchw(x)
-#ifdef CONFIG_PPC64
#define HAVE_ARCH_PICK_MMAP_LAYOUT
-#endif
#ifdef CONFIG_PPC64
-static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
+static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
{
- unsigned long sp;
-
if (is_32)
- sp = regs->gpr[1] & 0x0ffffffffUL;
- else
- sp = regs->gpr[1];
-
+ return sp & 0x0ffffffffUL;
return sp;
}
#else
-static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
+static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
{
- return regs->gpr[1];
+ return sp;
}
#endif
+extern unsigned long cpuidle_disable;
+enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
+
+extern int powersave_nap; /* set if nap mode can be used in idle loop */
+extern void power7_nap(int check_irq);
+extern void power7_sleep(void);
+extern void flush_instruction_cache(void);
+extern void hard_reset_now(void);
+extern void poweroff_now(void);
+extern int fix_alignment(struct pt_regs *);
+extern void cvt_fd(float *from, double *to);
+extern void cvt_df(double *from, float *to);
+extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
+
+#ifdef CONFIG_PPC64
+/*
+ * We handle most unaligned accesses in hardware. On the other hand
+ * unaligned DMA can be very expensive on some ppc64 IO chips (it does
+ * powers of 2 writes until it reaches sufficient alignment).
+ *
+ * Based on this we disable the IP header alignment in network drivers.
+ */
+#define NET_IP_ALIGN 0
+#endif
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PROCESSOR_H */
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index ae26f2efd08..74b79f07f04 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -1,4 +1,3 @@
-#include <linux/of.h> /* linux/of.h gets to determine #include ordering */
#ifndef _POWERPC_PROM_H
#define _POWERPC_PROM_H
#ifdef __KERNEL__
@@ -18,86 +17,155 @@
*/
#include <linux/types.h>
#include <asm/irq.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
-#define HAVE_ARCH_DEVTREE_FIXUPS
+/* These includes should be removed once implicit includes are cleaned up. */
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
+#define OF_DT_END_NODE 0x2 /* End node */
+#define OF_DT_PROP 0x3 /* Property: name off, size,
+ * content */
+#define OF_DT_NOP 0x4 /* nop */
+#define OF_DT_END 0x9
+
+#define OF_DT_VERSION 0x10
-#ifdef CONFIG_PPC32
/*
- * PCI <-> OF matching functions
- * (XXX should these be here?)
+ * This is what gets passed to the kernel by prom_init or kexec
+ *
+ * The dt struct contains the device tree structure, full pathes and
+ * property contents. The dt strings contain a separate block with just
+ * the strings for the property names, and is fully page aligned and
+ * self contained in a page, so that it can be kept around by the kernel,
+ * each property name appears only once in this page (cheap compression)
+ *
+ * the mem_rsvmap contains a map of reserved ranges of physical memory,
+ * passing it here instead of in the device-tree itself greatly simplifies
+ * the job of everybody. It's just a list of u64 pairs (base/size) that
+ * ends when size is 0
*/
-struct pci_bus;
-struct pci_dev;
-extern int pci_device_from_OF_node(struct device_node *node,
- u8* bus, u8* devfn);
-extern struct device_node* pci_busdev_to_OF_node(struct pci_bus *, int);
-extern struct device_node* pci_device_to_OF_node(struct pci_dev *);
-extern void pci_create_OF_bus_map(void);
-#endif
+struct boot_param_header {
+ __be32 magic; /* magic word OF_DT_HEADER */
+ __be32 totalsize; /* total size of DT block */
+ __be32 off_dt_struct; /* offset to structure */
+ __be32 off_dt_strings; /* offset to strings */
+ __be32 off_mem_rsvmap; /* offset to memory reserve map */
+ __be32 version; /* format version */
+ __be32 last_comp_version; /* last compatible version */
+ /* version 2 fields below */
+ __be32 boot_cpuid_phys; /* Physical CPU id we're booting on */
+ /* version 3 fields below */
+ __be32 dt_strings_size; /* size of the DT strings block */
+ /* version 17 fields below */
+ __be32 dt_struct_size; /* size of the DT structure block */
+};
/*
* OF address retreival & translation
*/
-/* Translate a DMA address from device space to CPU space */
-extern u64 of_translate_dma_address(struct device_node *dev,
- const u32 *in_addr);
-
-#ifdef CONFIG_PCI
-extern unsigned long pci_address_to_pio(phys_addr_t address);
-#define pci_address_to_pio pci_address_to_pio
-#endif /* CONFIG_PCI */
-
/* Parse the ibm,dma-window property of an OF node into the busno, phys and
* size parameters.
*/
-void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
- unsigned long *busno, unsigned long *phys, unsigned long *size);
+void of_parse_dma_window(struct device_node *dn, const __be32 *dma_window,
+ unsigned long *busno, unsigned long *phys,
+ unsigned long *size);
extern void kdump_move_device_tree(void);
-/* CPU OF node matching */
-struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
-
-/* cache lookup */
-struct device_node *of_find_next_cache_node(struct device_node *np);
+extern void of_instantiate_rtc(void);
-/* Get the MAC address */
-extern const void *of_get_mac_address(struct device_node *np);
+extern int of_get_ibm_chip_id(struct device_node *np);
-#ifdef CONFIG_NUMA
-extern int of_node_to_nid(struct device_node *device);
-#else
-static inline int of_node_to_nid(struct device_node *device) { return 0; }
-#endif
-#define of_node_to_nid of_node_to_nid
+/* The of_drconf_cell struct defines the layout of the LMB array
+ * specified in the device tree property
+ * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory
+ */
+struct of_drconf_cell {
+ u64 base_addr;
+ u32 drc_index;
+ u32 reserved;
+ u32 aa_index;
+ u32 flags;
+};
+
+#define DRCONF_MEM_ASSIGNED 0x00000008
+#define DRCONF_MEM_AI_INVALID 0x00000040
+#define DRCONF_MEM_RESERVED 0x00000080
-/**
- * of_irq_map_pci - Resolve the interrupt for a PCI device
- * @pdev: the device whose interrupt is to be resolved
- * @out_irq: structure of_irq filled by this function
- *
- * This function resolves the PCI interrupt for a given PCI device. If a
- * device-node exists for a given pci_dev, it will use normal OF tree
- * walking. If not, it will implement standard swizzling and walk up the
- * PCI tree until an device-node is found, at which point it will finish
- * resolving using the OF tree walking.
+/*
+ * There are two methods for telling firmware what our capabilities are.
+ * Newer machines have an "ibm,client-architecture-support" method on the
+ * root node. For older machines, we have to call the "process-elf-header"
+ * method in the /packages/elf-loader node, passing it a fake 32-bit
+ * ELF header containing a couple of PT_NOTE sections that contain
+ * structures that contain various information.
*/
-struct pci_dev;
-struct of_irq;
-extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
-extern void of_instantiate_rtc(void);
+/* New method - extensible architecture description vector. */
+
+/* Option vector bits - generic bits in byte 1 */
+#define OV_IGNORE 0x80 /* ignore this vector */
+#define OV_CESSATION_POLICY 0x40 /* halt if unsupported option present*/
+
+/* Option vector 1: processor architectures supported */
+#define OV1_PPC_2_00 0x80 /* set if we support PowerPC 2.00 */
+#define OV1_PPC_2_01 0x40 /* set if we support PowerPC 2.01 */
+#define OV1_PPC_2_02 0x20 /* set if we support PowerPC 2.02 */
+#define OV1_PPC_2_03 0x10 /* set if we support PowerPC 2.03 */
+#define OV1_PPC_2_04 0x08 /* set if we support PowerPC 2.04 */
+#define OV1_PPC_2_05 0x04 /* set if we support PowerPC 2.05 */
+#define OV1_PPC_2_06 0x02 /* set if we support PowerPC 2.06 */
+#define OV1_PPC_2_07 0x01 /* set if we support PowerPC 2.07 */
+
+/* Option vector 2: Open Firmware options supported */
+#define OV2_REAL_MODE 0x20 /* set if we want OF in real mode */
+
+/* Option vector 3: processor options supported */
+#define OV3_FP 0x80 /* floating point */
+#define OV3_VMX 0x40 /* VMX/Altivec */
+#define OV3_DFP 0x20 /* decimal FP */
+
+/* Option vector 4: IBM PAPR implementation */
+#define OV4_MIN_ENT_CAP 0x01 /* minimum VP entitled capacity */
+
+/* Option vector 5: PAPR/OF options supported
+ * These bits are also used in firmware_has_feature() to validate
+ * the capabilities reported for vector 5 in the device tree so we
+ * encode the vector index in the define and use the OV5_FEAT()
+ * and OV5_INDX() macros to extract the desired information.
+ */
+#define OV5_FEAT(x) ((x) & 0xff)
+#define OV5_INDX(x) ((x) >> 8)
+#define OV5_LPAR 0x0280 /* logical partitioning supported */
+#define OV5_SPLPAR 0x0240 /* shared-processor LPAR supported */
+/* ibm,dynamic-reconfiguration-memory property supported */
+#define OV5_DRCONF_MEMORY 0x0220
+#define OV5_LARGE_PAGES 0x0210 /* large pages supported */
+#define OV5_DONATE_DEDICATE_CPU 0x0202 /* donate dedicated CPU support */
+#define OV5_MSI 0x0201 /* PCIe/MSI support */
+#define OV5_CMO 0x0480 /* Cooperative Memory Overcommitment */
+#define OV5_XCMO 0x0440 /* Page Coalescing */
+#define OV5_TYPE1_AFFINITY 0x0580 /* Type 1 NUMA affinity */
+#define OV5_PRRN 0x0540 /* Platform Resource Reassignment */
+#define OV5_PFO_HW_RNG 0x0E80 /* PFO Random Number Generator */
+#define OV5_PFO_HW_842 0x0E40 /* PFO Compression Accelerator */
+#define OV5_PFO_HW_ENCR 0x0E20 /* PFO Encryption Accelerator */
+#define OV5_SUB_PROCESSORS 0x0F01 /* 1,2,or 4 Sub-Processors supported */
+
+/* Option Vector 6: IBM PAPR hints */
+#define OV6_LINUX 0x02 /* Linux is our OS */
-/* These includes are put at the bottom because they may contain things
- * that are overridden by this file. Ideally they shouldn't be included
- * by this file, but there are a bunch of .c files that currently depend
- * on it. Eventually they will be cleaned up. */
-#include <linux/of_fdt.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/platform_device.h>
+/*
+ * The architecture vector has an array of PVR mask/value pairs,
+ * followed by # option vectors - 1, followed by the option vectors.
+ */
+extern unsigned char ibm_architecture_vec[];
#endif /* __KERNEL__ */
#endif /* _POWERPC_PROM_H */
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index 7f065e178ec..a1bc7e75842 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -21,10 +21,9 @@
#if !defined(_ASM_POWERPC_PS3_H)
#define _ASM_POWERPC_PS3_H
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
-#include "cell-pmu.h"
+#include <asm/cell-pmu.h>
union ps3_firmware_version {
u64 raw;
@@ -245,7 +244,7 @@ enum lv1_result {
static inline const char* ps3_result(int result)
{
-#if defined(DEBUG)
+#if defined(DEBUG) || defined(PS3_VERBOSE_RESULT)
switch (result) {
case LV1_SUCCESS:
return "LV1_SUCCESS (0)";
diff --git a/arch/powerpc/include/asm/ps3fb.h b/arch/powerpc/include/asm/ps3fb.h
deleted file mode 100644
index e7233a84968..00000000000
--- a/arch/powerpc/include/asm/ps3fb.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2006 Sony Computer Entertainment Inc.
- * Copyright 2006, 2007 Sony Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published
- * by the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#ifndef _ASM_POWERPC_PS3FB_H_
-#define _ASM_POWERPC_PS3FB_H_
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-/* ioctl */
-#define PS3FB_IOCTL_SETMODE _IOW('r', 1, int) /* set video mode */
-#define PS3FB_IOCTL_GETMODE _IOR('r', 2, int) /* get video mode */
-#define PS3FB_IOCTL_SCREENINFO _IOR('r', 3, int) /* get screen info */
-#define PS3FB_IOCTL_ON _IO('r', 4) /* use IOCTL_FSEL */
-#define PS3FB_IOCTL_OFF _IO('r', 5) /* return to normal-flip */
-#define PS3FB_IOCTL_FSEL _IOW('r', 6, int) /* blit and flip request */
-
-#ifndef FBIO_WAITFORVSYNC
-#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32) /* wait for vsync */
-#endif
-
-struct ps3fb_ioctl_res {
- __u32 xres; /* frame buffer x_size */
- __u32 yres; /* frame buffer y_size */
- __u32 xoff; /* margine x */
- __u32 yoff; /* margine y */
- __u32 num_frames; /* num of frame buffers */
-};
-
-#endif /* _ASM_POWERPC_PS3FB_H_ */
diff --git a/arch/powerpc/include/asm/pte-book3e.h b/arch/powerpc/include/asm/pte-book3e.h
index 082d515930a..576ad88104c 100644
--- a/arch/powerpc/include/asm/pte-book3e.h
+++ b/arch/powerpc/include/asm/pte-book3e.h
@@ -40,7 +40,7 @@
#define _PAGE_U1 0x010000
#define _PAGE_U0 0x020000
#define _PAGE_ACCESSED 0x040000
-#define _PAGE_LENDIAN 0x080000
+#define _PAGE_ENDIAN 0x080000
#define _PAGE_GUARDED 0x100000
#define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */
#define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */
@@ -72,6 +72,9 @@
#define PTE_RPN_SHIFT (24)
#endif
+#define PTE_WIMGE_SHIFT (19)
+#define PTE_BAP_SHIFT (2)
+
/* On 32-bit, we never clear the top part of the PTE */
#ifdef CONFIG_PPC32
#define _PTE_NONE_MASK 0xffffffff00000000ULL
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
index 76bb195e4f2..8d1569c2904 100644
--- a/arch/powerpc/include/asm/pte-common.h
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -86,7 +86,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
#define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1))
#endif
-/* _PAGE_CHG_MASK masks of bits that are to be preserved accross
+/* _PAGE_CHG_MASK masks of bits that are to be preserved across
* pgprot changes
*/
#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
@@ -162,7 +162,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
* on platforms where such control is possible.
*/
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
- defined(CONFIG_KPROBES)
+ defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
#else
#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
@@ -174,7 +174,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
/*
* Don't just check for any non zero bits in __PAGE_USER, since for book3e
* and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
- * _PAGE_USER. Need to explictly match _PAGE_BAP_UR bit in that case too.
+ * _PAGE_USER. Need to explicitly match _PAGE_BAP_UR bit in that case too.
*/
#define pte_user(val) ((val & _PAGE_USER) == _PAGE_USER)
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
index c4490f9c67c..d836d945068 100644
--- a/arch/powerpc/include/asm/pte-hash64-64k.h
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -22,8 +22,8 @@
#define _PAGE_HASHPTE _PAGE_HPTE_SUB
/* Note the full page bits must be in the same location as for normal
- * 4k pages as the same asssembly will be used to insert 64K pages
- * wether the kernel has CONFIG_PPC_64K_PAGES or not
+ * 4k pages as the same assembly will be used to insert 64K pages
+ * whether the kernel has CONFIG_PPC_64K_PAGES or not
*/
#define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */
#define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */
@@ -47,7 +47,7 @@
* generic accessors and iterators here
*/
#define __real_pte(e,p) ((real_pte_t) { \
- (e), ((e) & _PAGE_COMBO) ? \
+ (e), (pte_val(e) & _PAGE_COMBO) ? \
(pte_val(*((p) + PTRS_PER_PTE))) : 0 })
#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
(((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
@@ -58,14 +58,16 @@
/* Trick: we set __end to va + 64k, which happens works for
* a 16M page as well as we want only one iteration
*/
-#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
- do { \
- unsigned long __end = va + PAGE_SIZE; \
- unsigned __split = (psize == MMU_PAGE_4K || \
- psize == MMU_PAGE_64K_AP); \
- shift = mmu_psize_defs[psize].shift; \
- for (index = 0; va < __end; index++, va += (1L << shift)) { \
- if (!__split || __rpte_sub_valid(rpte, index)) do { \
+#define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \
+ do { \
+ unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \
+ unsigned __split = (psize == MMU_PAGE_4K || \
+ psize == MMU_PAGE_64K_AP); \
+ shift = mmu_psize_defs[psize].shift; \
+ for (index = 0; vpn < __end; index++, \
+ vpn += (1L << (shift - VPN_SHIFT))) { \
+ if (!__split || __rpte_sub_valid(rpte, index)) \
+ do {
#define pte_iterate_hashed_end() } while(0); } } while(0)
diff --git a/arch/powerpc/include/asm/pte-hash64.h b/arch/powerpc/include/asm/pte-hash64.h
index 0419eeb5327..2505d8eab15 100644
--- a/arch/powerpc/include/asm/pte-hash64.h
+++ b/arch/powerpc/include/asm/pte-hash64.h
@@ -19,7 +19,7 @@
#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */
#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */
#define _PAGE_GUARDED 0x0008
-#define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */
+/* We can derive Memory coherence from _PAGE_NO_CACHE */
#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */
#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */
#define _PAGE_DIRTY 0x0080 /* C: page changed */
@@ -27,6 +27,12 @@
#define _PAGE_RW 0x0200 /* software: user write access allowed */
#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */
+/*
+ * Used for tracking numa faults
+ */
+#define _PAGE_NUMA 0x00000010 /* Gather numa placement stats */
+
+
/* No separate kernel read-only */
#define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */
#define _PAGE_KERNEL_RO _PAGE_KERNEL_RW
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 0175a676b34..279b80f3bb2 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -1,6 +1,3 @@
-#ifndef _ASM_POWERPC_PTRACE_H
-#define _ASM_POWERPC_PTRACE_H
-
/*
* Copyright (C) 2001 PPC64 Team, IBM Corp
*
@@ -23,45 +20,31 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#ifndef _ASM_POWERPC_PTRACE_H
+#define _ASM_POWERPC_PTRACE_H
-#include <linux/types.h>
+#include <uapi/asm/ptrace.h>
-#ifndef __ASSEMBLY__
-struct pt_regs {
- unsigned long gpr[32];
- unsigned long nip;
- unsigned long msr;
- unsigned long orig_gpr3; /* Used for restarting system calls */
- unsigned long ctr;
- unsigned long link;
- unsigned long xer;
- unsigned long ccr;
#ifdef __powerpc64__
- unsigned long softe; /* Soft enabled/disabled */
-#else
- unsigned long mq; /* 601 only (not used at present) */
- /* Used on APUS to hold IPL value. */
-#endif
- unsigned long trap; /* Reason for being here */
- /* N.B. for critical exceptions on 4xx, the dar and dsisr
- fields are overloaded to hold srr0 and srr1. */
- unsigned long dar; /* Fault registers */
- unsigned long dsisr; /* on 4xx/Book-E used for ESR */
- unsigned long result; /* Result of a system call */
-};
-
-#endif /* __ASSEMBLY__ */
-
-#ifdef __KERNEL__
-#ifdef __powerpc64__
+/*
+ * Size of redzone that userspace is allowed to use below the stack
+ * pointer. This is 288 in the 64-bit big-endian ELF ABI, and 512 in
+ * the new ELFv2 little-endian ABI, so we allow the larger amount.
+ *
+ * For kernel code we allow a 288-byte redzone, in order to conserve
+ * kernel stack space; gcc currently only uses 288 bytes, and will
+ * hopefully allow explicit control of the redzone size in future.
+ */
+#define USER_REDZONE_SIZE 512
+#define KERNEL_REDZONE_SIZE 288
#define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */
#define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */
#define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265)
#define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \
- STACK_FRAME_OVERHEAD + 288)
+ STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
#define STACK_FRAME_MARKER 12
/* Size of dummy stack frame allocated when calling signal handler. */
@@ -70,6 +53,8 @@ struct pt_regs {
#else /* __powerpc64__ */
+#define USER_REDZONE_SIZE 0
+#define KERNEL_REDZONE_SIZE 0
#define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
#define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */
#define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773)
@@ -83,17 +68,32 @@ struct pt_regs {
#ifndef __ASSEMBLY__
-#define instruction_pointer(regs) ((regs)->nip)
-#define user_stack_pointer(regs) ((regs)->gpr[1])
-#define kernel_stack_pointer(regs) ((regs)->gpr[1])
-#define regs_return_value(regs) ((regs)->gpr[3])
+#define GET_IP(regs) ((regs)->nip)
+#define GET_USP(regs) ((regs)->gpr[1])
+#define GET_FP(regs) (0)
+#define SET_FP(regs, val)
#ifdef CONFIG_SMP
extern unsigned long profile_pc(struct pt_regs *regs);
-#else
-#define profile_pc(regs) instruction_pointer(regs)
+#define profile_pc profile_pc
#endif
+#include <asm-generic/ptrace.h>
+
+#define kernel_stack_pointer(regs) ((regs)->gpr[1])
+static inline int is_syscall_success(struct pt_regs *regs)
+{
+ return !(regs->ccr & 0x10000000);
+}
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+ if (is_syscall_success(regs))
+ return regs->gpr[3];
+ else
+ return -regs->gpr[3];
+}
+
#ifdef __powerpc64__
#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
#else
@@ -106,10 +106,13 @@ extern unsigned long profile_pc(struct pt_regs *regs);
} while(0)
struct task_struct;
-extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
+extern int ptrace_get_reg(struct task_struct *task, int regno,
+ unsigned long *data);
extern int ptrace_put_reg(struct task_struct *task, int regno,
unsigned long data);
+#define current_pt_regs() \
+ ((struct pt_regs *)((unsigned long)current_thread_info() + THREAD_SIZE) - 1)
/*
* We use the least-significant bit of the trap field to indicate
* whether we have saved the full set of registers, or only a
@@ -125,8 +128,10 @@ extern int ptrace_put_reg(struct task_struct *task, int regno,
#endif /* ! __powerpc64__ */
#define TRAP(regs) ((regs)->trap & ~0xF)
#ifdef __powerpc64__
+#define NV_REG_POISON 0xdeadbeefdeadbeefUL
#define CHECK_FULL_REGS(regs) BUG_ON(regs->trap & 1)
#else
+#define NV_REG_POISON 0xdeadbeef
#define CHECK_FULL_REGS(regs) \
do { \
if ((regs)->trap & 1) \
@@ -203,225 +208,12 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
-
-/*
- * Offsets used by 'ptrace' system call interface.
- * These can't be changed without breaking binary compatibility
- * with MkLinux, etc.
- */
-#define PT_R0 0
-#define PT_R1 1
-#define PT_R2 2
-#define PT_R3 3
-#define PT_R4 4
-#define PT_R5 5
-#define PT_R6 6
-#define PT_R7 7
-#define PT_R8 8
-#define PT_R9 9
-#define PT_R10 10
-#define PT_R11 11
-#define PT_R12 12
-#define PT_R13 13
-#define PT_R14 14
-#define PT_R15 15
-#define PT_R16 16
-#define PT_R17 17
-#define PT_R18 18
-#define PT_R19 19
-#define PT_R20 20
-#define PT_R21 21
-#define PT_R22 22
-#define PT_R23 23
-#define PT_R24 24
-#define PT_R25 25
-#define PT_R26 26
-#define PT_R27 27
-#define PT_R28 28
-#define PT_R29 29
-#define PT_R30 30
-#define PT_R31 31
-
-#define PT_NIP 32
-#define PT_MSR 33
-#define PT_ORIG_R3 34
-#define PT_CTR 35
-#define PT_LNK 36
-#define PT_XER 37
-#define PT_CCR 38
#ifndef __powerpc64__
-#define PT_MQ 39
-#else
-#define PT_SOFTE 39
-#endif
-#define PT_TRAP 40
-#define PT_DAR 41
-#define PT_DSISR 42
-#define PT_RESULT 43
-#define PT_REGS_COUNT 44
-
-#define PT_FPR0 48 /* each FP reg occupies 2 slots in this space */
-
-#ifndef __powerpc64__
-
-#define PT_FPR31 (PT_FPR0 + 2*31)
-#define PT_FPSCR (PT_FPR0 + 2*32 + 1)
-
#else /* __powerpc64__ */
-
-#define PT_FPSCR (PT_FPR0 + 32) /* each FP reg occupies 1 slot in 64-bit space */
-
-#ifdef __KERNEL__
#define PT_FPSCR32 (PT_FPR0 + 2*32 + 1) /* each FP reg occupies 2 32-bit userspace slots */
-#endif
-
-#define PT_VR0 82 /* each Vector reg occupies 2 slots in 64-bit */
-#define PT_VSCR (PT_VR0 + 32*2 + 1)
-#define PT_VRSAVE (PT_VR0 + 33*2)
-
-#ifdef __KERNEL__
#define PT_VR0_32 164 /* each Vector reg occupies 4 slots in 32-bit */
#define PT_VSCR_32 (PT_VR0 + 32*4 + 3)
#define PT_VRSAVE_32 (PT_VR0 + 33*4)
-#endif
-
-/*
- * Only store first 32 VSRs here. The second 32 VSRs in VR0-31
- */
-#define PT_VSR0 150 /* each VSR reg occupies 2 slots in 64-bit */
-#define PT_VSR31 (PT_VSR0 + 2*31)
-#ifdef __KERNEL__
#define PT_VSR0_32 300 /* each VSR reg occupies 4 slots in 32-bit */
-#endif
#endif /* __powerpc64__ */
-
-/*
- * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
- * The transfer totals 34 quadword. Quadwords 0-31 contain the
- * corresponding vector registers. Quadword 32 contains the vscr as the
- * last word (offset 12) within that quadword. Quadword 33 contains the
- * vrsave as the first word (offset 0) within the quadword.
- *
- * This definition of the VMX state is compatible with the current PPC32
- * ptrace interface. This allows signal handling and ptrace to use the same
- * structures. This also simplifies the implementation of a bi-arch
- * (combined (32- and 64-bit) gdb.
- */
-#define PTRACE_GETVRREGS 18
-#define PTRACE_SETVRREGS 19
-
-/* Get/set all the upper 32-bits of the SPE registers, accumulator, and
- * spefscr, in one go */
-#define PTRACE_GETEVRREGS 20
-#define PTRACE_SETEVRREGS 21
-
-/* Get the first 32 128bit VSX registers */
-#define PTRACE_GETVSRREGS 27
-#define PTRACE_SETVSRREGS 28
-
-/*
- * Get or set a debug register. The first 16 are DABR registers and the
- * second 16 are IABR registers.
- */
-#define PTRACE_GET_DEBUGREG 25
-#define PTRACE_SET_DEBUGREG 26
-
-/* (new) PTRACE requests using the same numbers as x86 and the same
- * argument ordering. Additionally, they support more registers too
- */
-#define PTRACE_GETREGS 12
-#define PTRACE_SETREGS 13
-#define PTRACE_GETFPREGS 14
-#define PTRACE_SETFPREGS 15
-#define PTRACE_GETREGS64 22
-#define PTRACE_SETREGS64 23
-
-/* (old) PTRACE requests with inverted arguments */
-#define PPC_PTRACE_GETREGS 0x99 /* Get GPRs 0 - 31 */
-#define PPC_PTRACE_SETREGS 0x98 /* Set GPRs 0 - 31 */
-#define PPC_PTRACE_GETFPREGS 0x97 /* Get FPRs 0 - 31 */
-#define PPC_PTRACE_SETFPREGS 0x96 /* Set FPRs 0 - 31 */
-
-/* Calls to trace a 64bit program from a 32bit program */
-#define PPC_PTRACE_PEEKTEXT_3264 0x95
-#define PPC_PTRACE_PEEKDATA_3264 0x94
-#define PPC_PTRACE_POKETEXT_3264 0x93
-#define PPC_PTRACE_POKEDATA_3264 0x92
-#define PPC_PTRACE_PEEKUSR_3264 0x91
-#define PPC_PTRACE_POKEUSR_3264 0x90
-
-#define PTRACE_SINGLEBLOCK 0x100 /* resume execution until next branch */
-
-#define PPC_PTRACE_GETHWDBGINFO 0x89
-#define PPC_PTRACE_SETHWDEBUG 0x88
-#define PPC_PTRACE_DELHWDEBUG 0x87
-
-#ifndef __ASSEMBLY__
-
-struct ppc_debug_info {
- __u32 version; /* Only version 1 exists to date */
- __u32 num_instruction_bps;
- __u32 num_data_bps;
- __u32 num_condition_regs;
- __u32 data_bp_alignment;
- __u32 sizeof_condition; /* size of the DVC register */
- __u64 features;
-};
-
-#endif /* __ASSEMBLY__ */
-
-/*
- * features will have bits indication whether there is support for:
- */
-#define PPC_DEBUG_FEATURE_INSN_BP_RANGE 0x0000000000000001
-#define PPC_DEBUG_FEATURE_INSN_BP_MASK 0x0000000000000002
-#define PPC_DEBUG_FEATURE_DATA_BP_RANGE 0x0000000000000004
-#define PPC_DEBUG_FEATURE_DATA_BP_MASK 0x0000000000000008
-
-#ifndef __ASSEMBLY__
-
-struct ppc_hw_breakpoint {
- __u32 version; /* currently, version must be 1 */
- __u32 trigger_type; /* only some combinations allowed */
- __u32 addr_mode; /* address match mode */
- __u32 condition_mode; /* break/watchpoint condition flags */
- __u64 addr; /* break/watchpoint address */
- __u64 addr2; /* range end or mask */
- __u64 condition_value; /* contents of the DVC register */
-};
-
-#endif /* __ASSEMBLY__ */
-
-/*
- * Trigger Type
- */
-#define PPC_BREAKPOINT_TRIGGER_EXECUTE 0x00000001
-#define PPC_BREAKPOINT_TRIGGER_READ 0x00000002
-#define PPC_BREAKPOINT_TRIGGER_WRITE 0x00000004
-#define PPC_BREAKPOINT_TRIGGER_RW \
- (PPC_BREAKPOINT_TRIGGER_READ | PPC_BREAKPOINT_TRIGGER_WRITE)
-
-/*
- * Address Mode
- */
-#define PPC_BREAKPOINT_MODE_EXACT 0x00000000
-#define PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE 0x00000001
-#define PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE 0x00000002
-#define PPC_BREAKPOINT_MODE_MASK 0x00000003
-
-/*
- * Condition Mode
- */
-#define PPC_BREAKPOINT_CONDITION_MODE 0x00000003
-#define PPC_BREAKPOINT_CONDITION_NONE 0x00000000
-#define PPC_BREAKPOINT_CONDITION_AND 0x00000001
-#define PPC_BREAKPOINT_CONDITION_EXACT PPC_BREAKPOINT_CONDITION_AND
-#define PPC_BREAKPOINT_CONDITION_OR 0x00000002
-#define PPC_BREAKPOINT_CONDITION_AND_OR 0x00000003
-#define PPC_BREAKPOINT_CONDITION_BE_ALL 0x00ff0000
-#define PPC_BREAKPOINT_CONDITION_BE_SHIFT 16
-#define PPC_BREAKPOINT_CONDITION_BE(n) \
- (1<<((n)+PPC_BREAKPOINT_CONDITION_BE_SHIFT))
-
#endif /* _ASM_POWERPC_PTRACE_H */
diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h
index 0947b36e534..32b9bfa0c9b 100644
--- a/arch/powerpc/include/asm/qe.h
+++ b/arch/powerpc/include/asm/qe.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Authors: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
@@ -196,7 +196,7 @@ static inline int qe_alive_during_sleep(void)
/* Structure that defines QE firmware binary files.
*
- * See Documentation/powerpc/qe-firmware.txt for a description of these
+ * See Documentation/powerpc/qe_firmware.txt for a description of these
* fields.
*/
struct qe_firmware {
@@ -499,6 +499,7 @@ enum comm_dir {
/* I-RAM */
#define QE_IRAM_IADD_AIE 0x80000000 /* Auto Increment Enable */
#define QE_IRAM_IADD_BADDR 0x00080000 /* Base Address */
+#define QE_IRAM_READY 0x80000000 /* Ready */
/* UPC */
#define UPGCR_PROTOCOL 0x80000000 /* protocol ul2 or pl2 */
diff --git a/arch/powerpc/include/asm/qe_ic.h b/arch/powerpc/include/asm/qe_ic.h
index cf519663a79..25784cc959a 100644
--- a/arch/powerpc/include/asm/qe_ic.h
+++ b/arch/powerpc/include/asm/qe_ic.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Authors: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
@@ -81,7 +81,7 @@ int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
static inline void qe_ic_cascade_low_ipic(unsigned int irq,
struct irq_desc *desc)
{
- struct qe_ic *qe_ic = desc->handler_data;
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
if (cascade_irq != NO_IRQ)
@@ -91,7 +91,7 @@ static inline void qe_ic_cascade_low_ipic(unsigned int irq,
static inline void qe_ic_cascade_high_ipic(unsigned int irq,
struct irq_desc *desc)
{
- struct qe_ic *qe_ic = desc->handler_data;
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
if (cascade_irq != NO_IRQ)
@@ -101,32 +101,35 @@ static inline void qe_ic_cascade_high_ipic(unsigned int irq,
static inline void qe_ic_cascade_low_mpic(unsigned int irq,
struct irq_desc *desc)
{
- struct qe_ic *qe_ic = desc->handler_data;
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
- desc->chip->eoi(irq);
+ chip->irq_eoi(&desc->irq_data);
}
static inline void qe_ic_cascade_high_mpic(unsigned int irq,
struct irq_desc *desc)
{
- struct qe_ic *qe_ic = desc->handler_data;
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
- desc->chip->eoi(irq);
+ chip->irq_eoi(&desc->irq_data);
}
static inline void qe_ic_cascade_muxed_mpic(unsigned int irq,
struct irq_desc *desc)
{
- struct qe_ic *qe_ic = desc->handler_data;
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
cascade_irq = qe_ic_get_high_irq(qe_ic);
if (cascade_irq == NO_IRQ)
@@ -135,7 +138,7 @@ static inline void qe_ic_cascade_muxed_mpic(unsigned int irq,
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
- desc->chip->eoi(irq);
+ chip->irq_eoi(&desc->irq_data);
}
#endif /* _ASM_POWERPC_QE_IC_H */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index ff0005eec7d..bffd89d2730 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -29,6 +29,10 @@
#define MSR_SF_LG 63 /* Enable 64 bit mode */
#define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */
#define MSR_HV_LG 60 /* Hypervisor state */
+#define MSR_TS_T_LG 34 /* Trans Mem state: Transactional */
+#define MSR_TS_S_LG 33 /* Trans Mem state: Suspended */
+#define MSR_TS_LG 33 /* Trans Mem state (2 bits) */
+#define MSR_TM_LG 32 /* Trans Mem Available */
#define MSR_VEC_LG 25 /* Enable AltiVec */
#define MSR_VSX_LG 23 /* Enable VSX */
#define MSR_POW_LG 18 /* Enable Power Management */
@@ -98,18 +102,38 @@
#define MSR_RI __MASK(MSR_RI_LG) /* Recoverable Exception */
#define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */
+#define MSR_TM __MASK(MSR_TM_LG) /* Transactional Mem Available */
+#define MSR_TS_N 0 /* Non-transactional */
+#define MSR_TS_S __MASK(MSR_TS_S_LG) /* Transaction Suspended */
+#define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */
+#define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */
+#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
+#define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T)
+#define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S)
+
#if defined(CONFIG_PPC_BOOK3S_64)
+#define MSR_64BIT MSR_SF
+
/* Server variant */
-#define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV
-#define MSR_KERNEL MSR_ | MSR_SF
-#define MSR_USER32 MSR_ | MSR_PR | MSR_EE
-#define MSR_USER64 MSR_USER32 | MSR_SF
+#define __MSR (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV)
+#ifdef __BIG_ENDIAN__
+#define MSR_ __MSR
+#else
+#define MSR_ (__MSR | MSR_LE)
+#endif
+#define MSR_KERNEL (MSR_ | MSR_64BIT)
+#define MSR_USER32 (MSR_ | MSR_PR | MSR_EE)
+#define MSR_USER64 (MSR_USER32 | MSR_64BIT)
#elif defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_8xx)
/* Default MSR for kernel mode. */
#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR)
#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
#endif
+#ifndef MSR_64BIT
+#define MSR_64BIT 0
+#endif
+
/* Floating Point Status and Control Register (FPSCR) Fields */
#define FPSCR_FX 0x80000000 /* FPU exception summary */
#define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */
@@ -170,8 +194,29 @@
#define SPEFSCR_FRMC 0x00000003 /* Embedded FP rounding mode control */
/* Special Purpose Registers (SPRNs)*/
+
+#ifdef CONFIG_40x
+#define SPRN_PID 0x3B1 /* Process ID */
+#else
+#define SPRN_PID 0x030 /* Process ID */
+#ifdef CONFIG_BOOKE
+#define SPRN_PID0 SPRN_PID/* Process ID Register 0 */
+#endif
+#endif
+
#define SPRN_CTR 0x009 /* Count Register */
#define SPRN_DSCR 0x11
+#define SPRN_CFAR 0x1c /* Come From Address Register */
+#define SPRN_AMR 0x1d /* Authority Mask Register */
+#define SPRN_UAMOR 0x9d /* User Authority Mask Override Register */
+#define SPRN_AMOR 0x15d /* Authority Mask Override Register */
+#define SPRN_ACOP 0x1F /* Available Coprocessor Register */
+#define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */
+#define SPRN_TEXASR 0x82 /* Transaction EXception & Summary */
+#define TEXASR_FS __MASK(63-36) /* Transaction Failure Summary */
+#define SPRN_TEXASRU 0x83 /* '' '' '' Upper 32 */
+#define TEXASR_FS __MASK(63-36) /* TEXASR Failure Summary */
+#define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */
#define SPRN_CTRLF 0x088
#define SPRN_CTRLT 0x098
#define CTRL_CT 0xc0000000 /* current thread */
@@ -179,14 +224,29 @@
#define CTRL_CT1 0x40000000 /* thread 1 */
#define CTRL_TE 0x00c00000 /* thread enable */
#define CTRL_RUNLATCH 0x1
+#define SPRN_DAWR 0xB4
+#define SPRN_RPR 0xBA /* Relative Priority Register */
+#define SPRN_CIABR 0xBB
+#define CIABR_PRIV 0x3
+#define CIABR_PRIV_USER 1
+#define CIABR_PRIV_SUPER 2
+#define CIABR_PRIV_HYPER 3
+#define SPRN_DAWRX 0xBC
+#define DAWRX_USER __MASK(0)
+#define DAWRX_KERNEL __MASK(1)
+#define DAWRX_HYP __MASK(2)
+#define DAWRX_WTI __MASK(3)
+#define DAWRX_WT __MASK(4)
+#define DAWRX_DR __MASK(5)
+#define DAWRX_DW __MASK(6)
#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
-#define DABR_TRANSLATION (1UL << 2)
-#define DABR_DATA_WRITE (1UL << 1)
-#define DABR_DATA_READ (1UL << 0)
#define SPRN_DABR2 0x13D /* e300 */
#define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */
-#define DABRX_USER (1UL << 0)
-#define DABRX_KERNEL (1UL << 1)
+#define DABRX_USER __MASK(0)
+#define DABRX_KERNEL __MASK(1)
+#define DABRX_HYP __MASK(2)
+#define DABRX_BTI __MASK(3)
+#define DABRX_ALL (DABRX_BTI | DABRX_HYP | DABRX_KERNEL | DABRX_USER)
#define SPRN_DAR 0x013 /* Data Address Register */
#define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
@@ -195,13 +255,105 @@
#define DSISR_ISSTORE 0x02000000 /* access was a store */
#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
#define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */
+#define DSISR_KEYFAULT 0x00200000 /* Key fault */
#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */
#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */
#define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */
+#define SPRN_TBU40 0x11E /* Timebase upper 40 bits (hyper, R/W) */
#define SPRN_SPURR 0x134 /* Scaled PURR */
+#define SPRN_HSPRG0 0x130 /* Hypervisor Scratch 0 */
+#define SPRN_HSPRG1 0x131 /* Hypervisor Scratch 1 */
+#define SPRN_HDSISR 0x132
+#define SPRN_HDAR 0x133
+#define SPRN_HDEC 0x136 /* Hypervisor Decrementer */
#define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */
+#define SPRN_RMOR 0x138 /* Real mode offset register */
+#define SPRN_HRMOR 0x139 /* Real mode offset register */
+#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
+#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
+#define SPRN_IC 0x350 /* Virtual Instruction Count */
+#define SPRN_VTB 0x351 /* Virtual Time Base */
+#define SPRN_LDBAR 0x352 /* LD Base Address Register */
+#define SPRN_PMICR 0x354 /* Power Management Idle Control Reg */
+#define SPRN_PMSR 0x355 /* Power Management Status Reg */
+#define SPRN_PMMAR 0x356 /* Power Management Memory Activity Register */
+#define SPRN_PMCR 0x374 /* Power Management Control Register */
+
+/* HFSCR and FSCR bit numbers are the same */
+#define FSCR_TAR_LG 8 /* Enable Target Address Register */
+#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
+#define FSCR_TM_LG 5 /* Enable Transactional Memory */
+#define FSCR_BHRB_LG 4 /* Enable Branch History Rolling Buffer*/
+#define FSCR_PM_LG 3 /* Enable prob/priv access to PMU SPRs */
+#define FSCR_DSCR_LG 2 /* Enable Data Stream Control Register */
+#define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */
+#define FSCR_FP_LG 0 /* Enable Floating Point */
+#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
+#define FSCR_TAR __MASK(FSCR_TAR_LG)
+#define FSCR_EBB __MASK(FSCR_EBB_LG)
+#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
+#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
+#define HFSCR_TAR __MASK(FSCR_TAR_LG)
+#define HFSCR_EBB __MASK(FSCR_EBB_LG)
+#define HFSCR_TM __MASK(FSCR_TM_LG)
+#define HFSCR_PM __MASK(FSCR_PM_LG)
+#define HFSCR_BHRB __MASK(FSCR_BHRB_LG)
+#define HFSCR_DSCR __MASK(FSCR_DSCR_LG)
+#define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG)
+#define HFSCR_FP __MASK(FSCR_FP_LG)
+#define SPRN_TAR 0x32f /* Target Address Register */
#define SPRN_LPCR 0x13E /* LPAR Control Register */
+#define LPCR_VPM0 (1ul << (63-0))
+#define LPCR_VPM1 (1ul << (63-1))
+#define LPCR_ISL (1ul << (63-2))
+#define LPCR_VC_SH (63-2)
+#define LPCR_DPFD_SH (63-11)
+#define LPCR_DPFD (7ul << LPCR_DPFD_SH)
+#define LPCR_VRMASD (0x1ful << (63-16))
+#define LPCR_VRMA_L (1ul << (63-12))
+#define LPCR_VRMA_LP0 (1ul << (63-15))
+#define LPCR_VRMA_LP1 (1ul << (63-16))
+#define LPCR_VRMASD_SH (63-16)
+#define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */
+#define LPCR_RMLS_SH (63-37)
+#define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */
+#define LPCR_AIL 0x01800000 /* Alternate interrupt location */
+#define LPCR_AIL_0 0x00000000 /* MMU off exception offset 0x0 */
+#define LPCR_AIL_3 0x01800000 /* MMU on exception offset 0xc00...4xxx */
+#define LPCR_ONL 0x00040000 /* online - PURR/SPURR count */
+#define LPCR_PECE 0x0001f000 /* powersave exit cause enable */
+#define LPCR_PECEDP 0x00010000 /* directed priv dbells cause exit */
+#define LPCR_PECEDH 0x00008000 /* directed hyp dbells cause exit */
+#define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */
+#define LPCR_PECE1 0x00002000 /* decrementer can cause exit */
+#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */
+#define LPCR_MER 0x00000800 /* Mediated External Exception */
+#define LPCR_MER_SH 11
+#define LPCR_TC 0x00000200 /* Translation control */
+#define LPCR_LPES 0x0000000c
+#define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */
+#define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */
+#define LPCR_LPES_SH 2
+#define LPCR_RMI 0x00000002 /* real mode is cache inhibit */
+#define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */
+#ifndef SPRN_LPID
+#define SPRN_LPID 0x13F /* Logical Partition Identifier */
+#endif
+#define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */
+#define SPRN_HMER 0x150 /* Hardware m? error recovery */
+#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */
+#define SPRN_PCR 0x152 /* Processor compatibility register */
+#define PCR_VEC_DIS (1ul << (63-0)) /* Vec. disable (bit NA since POWER8) */
+#define PCR_VSX_DIS (1ul << (63-1)) /* VSX disable (bit NA since POWER8) */
+#define PCR_TM_DIS (1ul << (63-2)) /* Trans. memory disable (POWER8) */
+#define PCR_ARCH_206 0x4 /* Architecture 2.06 */
+#define PCR_ARCH_205 0x2 /* Architecture 2.05 */
+#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
+#define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */
+#define SPRN_TLBVPNR 0x155 /* P7 TLB control register */
+#define SPRN_TLBRPNR 0x156 /* P7 TLB control register */
+#define SPRN_TLBLPIDR 0x157 /* P7 TLB control register */
#define SPRN_DBAT0L 0x219 /* Data BAT 0 Lower Register */
#define SPRN_DBAT0U 0x218 /* Data BAT 0 Upper Register */
#define SPRN_DBAT1L 0x21B /* Data BAT 1 Lower Register */
@@ -218,6 +370,7 @@
#define SPRN_DBAT6U 0x23C /* Data BAT 6 Upper Register */
#define SPRN_DBAT7L 0x23F /* Data BAT 7 Lower Register */
#define SPRN_DBAT7U 0x23E /* Data BAT 7 Upper Register */
+#define SPRN_PPR 0x380 /* SMT Thread status Register */
#define SPRN_DEC 0x016 /* Decrement Register */
#define SPRN_DER 0x095 /* Debug Enable Regsiter */
@@ -241,10 +394,13 @@
#define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */
#define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */
#define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */
+#define SPRN_DHDES 0x0B1 /* Directed Hyp. Doorbell Exc. State */
+#define SPRN_DPDES 0x0B0 /* Directed Priv. Doorbell Exc. State */
#define SPRN_EAR 0x11A /* External Address Register */
#define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */
#define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */
#define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */
+#define HID0_HDICE_SH (63 - 23) /* 970 HDEC interrupt enable */
#define HID0_EMCP (1<<31) /* Enable Machine Check pin */
#define HID0_EBA (1<<29) /* Enable Bus Address Parity */
#define HID0_EBD (1<<28) /* Enable Bus Data Parity */
@@ -281,8 +437,15 @@
#define HID0_BTCD (1<<1) /* Branch target cache disable */
#define HID0_NOPDST (1<<1) /* No-op dst, dstt, etc. instr. */
#define HID0_NOPTI (1<<0) /* No-op dcbt and dcbst instr. */
+/* POWER8 HID0 bits */
+#define HID0_POWER8_4LPARMODE __MASK(61)
+#define HID0_POWER8_2LPARMODE __MASK(57)
+#define HID0_POWER8_1TO2LPAR __MASK(52)
+#define HID0_POWER8_1TO4LPAR __MASK(51)
+#define HID0_POWER8_DYNLPARDIS __MASK(48)
#define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */
+#ifdef CONFIG_6xx
#define HID1_EMCP (1<<31) /* 7450 Machine Check Pin Enable */
#define HID1_DFS (1<<22) /* 7447A Dynamic Frequency Scaling */
#define HID1_PC0 (1<<16) /* 7450 PLL_CFG[0] */
@@ -292,12 +455,22 @@
#define HID1_SYNCBE (1<<11) /* 7450 ABE for sync, eieio */
#define HID1_ABE (1<<10) /* 7450 Address Broadcast Enable */
#define HID1_PS (1<<16) /* 750FX PLL selection */
+#endif
#define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */
#define SPRN_HID2_GEKKO 0x398 /* Gekko HID2 Register */
#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
#define SPRN_IABR2 0x3FA /* 83xx */
#define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */
+#define SPRN_IAMR 0x03D /* Instr. Authority Mask Reg */
#define SPRN_HID4 0x3F4 /* 970 HID4 */
+#define HID4_LPES0 (1ul << (63-0)) /* LPAR env. sel. bit 0 */
+#define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */
+#define HID4_LPID5_SH (63 - 6) /* partition ID bottom 4 bits */
+#define HID4_RMOR_SH (63 - 22) /* real mode offset (16 bits) */
+#define HID4_RMOR (0xFFFFul << HID4_RMOR_SH)
+#define HID4_LPES1 (1 << (63-57)) /* LPAR env. sel. bit 1 */
+#define HID4_RMLS0_SH (63 - 58) /* Real mode limit top bit */
+#define HID4_LPID1_SH 0 /* partition ID top 2 bits */
#define SPRN_HID4_GEKKO 0x3F3 /* Gekko HID4 */
#define SPRN_HID5 0x3F6 /* 970 HID5 */
#define SPRN_HID6 0x3F9 /* BE HID 6 */
@@ -402,6 +575,8 @@
#ifndef SPRN_PIR
#define SPRN_PIR 0x3FF /* Processor Identification Register */
#endif
+#define SPRN_TIR 0x1BE /* Thread Identification Register */
+#define SPRN_PSPB 0x09F /* Problem State Priority Boost reg */
#define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */
#define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */
#define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */
@@ -415,25 +590,42 @@
#define SPRN_SPRG1 0x111 /* Special Purpose Register General 1 */
#define SPRN_SPRG2 0x112 /* Special Purpose Register General 2 */
#define SPRN_SPRG3 0x113 /* Special Purpose Register General 3 */
+#define SPRN_USPRG3 0x103 /* SPRG3 userspace read */
#define SPRN_SPRG4 0x114 /* Special Purpose Register General 4 */
+#define SPRN_USPRG4 0x104 /* SPRG4 userspace read */
#define SPRN_SPRG5 0x115 /* Special Purpose Register General 5 */
+#define SPRN_USPRG5 0x105 /* SPRG5 userspace read */
#define SPRN_SPRG6 0x116 /* Special Purpose Register General 6 */
+#define SPRN_USPRG6 0x106 /* SPRG6 userspace read */
#define SPRN_SPRG7 0x117 /* Special Purpose Register General 7 */
+#define SPRN_USPRG7 0x107 /* SPRG7 userspace read */
#define SPRN_SRR0 0x01A /* Save/Restore Register 0 */
#define SPRN_SRR1 0x01B /* Save/Restore Register 1 */
+#define SRR1_ISI_NOPT 0x40000000 /* ISI: Not found in hash */
+#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
+#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
-#define SRR1_WAKERESET 0x00380000 /* System reset */
#define SRR1_WAKESYSERR 0x00300000 /* System error */
#define SRR1_WAKEEE 0x00200000 /* External interrupt */
#define SRR1_WAKEMT 0x00280000 /* mtctrl */
+#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */
#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */
+#define SRR1_WAKERESET 0x00100000 /* System reset */
+#define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */
+#define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained,
+ * may not be recoverable */
+#define SRR1_WS_DEEPER 0x00020000 /* Some resources not maintained */
+#define SRR1_WS_DEEP 0x00010000 /* All resources maintained */
#define SRR1_PROGFPE 0x00100000 /* Floating Point Enabled */
+#define SRR1_PROGILL 0x00080000 /* Illegal instruction */
#define SRR1_PROGPRIV 0x00040000 /* Privileged instruction */
#define SRR1_PROGTRAP 0x00020000 /* Trap */
#define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */
+
#define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */
#define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */
+#define HSRR1_DENORM 0x00100000 /* Denorm exception */
#define SPRN_TBCTL 0x35f /* PA6T Timebase control register */
#define TBCTL_FREEZE 0x0000000000000000ull /* Freeze all tbs */
@@ -488,19 +680,28 @@
#define MMCR0_PROBLEM_DISABLE MMCR0_FCP
#define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */
#define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */
-#define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */
-#define MMCR0_FCECE 0x02000000UL /* freeze ctrs on enabled cond or event */
+#define MMCR0_PMXE ASM_CONST(0x04000000) /* perf mon exception enable */
+#define MMCR0_FCECE ASM_CONST(0x02000000) /* freeze ctrs on enabled cond or event */
#define MMCR0_TBEE 0x00400000UL /* time base exception enable */
+#define MMCR0_BHRBA 0x00200000UL /* BHRB Access allowed in userspace */
+#define MMCR0_EBE 0x00100000UL /* Event based branch enable */
+#define MMCR0_PMCC 0x000c0000UL /* PMC control */
+#define MMCR0_PMCC_U6 0x00080000UL /* PMC1-6 are R/W by user (PR) */
#define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
-#define MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/
+#define MMCR0_PMCjCE ASM_CONST(0x00004000) /* PMCj count enable*/
#define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */
-#define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */
+#define MMCR0_PMAO_SYNC ASM_CONST(0x00000800) /* PMU intr is synchronous */
+#define MMCR0_C56RUN ASM_CONST(0x00000100) /* PMC5/6 count when RUN=0 */
+/* performance monitor alert has occurred, set to 0 after handling exception */
+#define MMCR0_PMAO ASM_CONST(0x00000080)
#define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */
+#define MMCR0_FC56 0x00000010UL /* freeze counters 5 and 6 */
#define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */
#define MMCR0_FCTA 0x00000004UL /* freeze counters in tags active mode */
#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
#define SPRN_MMCR1 798
+#define SPRN_MMCR2 769
#define SPRN_MMCRA 0x312
#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
#define MMCRA_SDAR_DCACHE_MISS 0x40000000UL
@@ -515,6 +716,19 @@
#define POWER6_MMCRA_SIPR 0x0000020000000000ULL
#define POWER6_MMCRA_THRM 0x00000020UL
#define POWER6_MMCRA_OTHER 0x0000000EUL
+
+#define POWER7P_MMCRA_SIAR_VALID 0x10000000 /* P7+ SIAR contents valid */
+#define POWER7P_MMCRA_SDAR_VALID 0x08000000 /* P7+ SDAR contents valid */
+
+#define SPRN_MMCRH 316 /* Hypervisor monitor mode control register */
+#define SPRN_MMCRS 894 /* Supervisor monitor mode control register */
+#define SPRN_MMCRC 851 /* Core monitor mode control register */
+#define SPRN_EBBHR 804 /* Event based branch handler register */
+#define SPRN_EBBRR 805 /* Event based branch return register */
+#define SPRN_BESCR 806 /* Branch event status and control register */
+#define BESCR_GE 0x8000000000000000ULL /* Global Enable */
+#define SPRN_WORT 895 /* Workload optimization register - thread */
+
#define SPRN_PMC1 787
#define SPRN_PMC2 788
#define SPRN_PMC3 789
@@ -525,6 +739,21 @@
#define SPRN_PMC8 794
#define SPRN_SIAR 780
#define SPRN_SDAR 781
+#define SPRN_SIER 784
+#define SIER_SIPR 0x2000000 /* Sampled MSR_PR */
+#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
+#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
+#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
+#define SPRN_TACR 888
+#define SPRN_TCSCR 889
+#define SPRN_CSIGR 890
+#define SPRN_SPMC1 892
+#define SPRN_SPMC2 893
+
+/* When EBB is enabled, some of MMCR0/MMCR2/SIER are user accessible */
+#define MMCR0_USER_MASK (MMCR0_FC | MMCR0_PMXE | MMCR0_PMAO)
+#define MMCR2_USER_MASK 0x4020100804020000UL /* (FC1P|FC2P|FC3P|FC4P|FC5P|FC6P) */
+#define SIER_USER_MASK 0x7fffffUL
#define SPRN_PA6T_MMCR0 795
#define PA6T_MMCR0_EN0 0x0000000000000001UL
@@ -661,20 +890,23 @@
* SPRG usage:
*
* All 64-bit:
- * - SPRG1 stores PACA pointer
+ * - SPRG1 stores PACA pointer except 64-bit server in
+ * HV mode in which case it is HSPRG0
*
* 64-bit server:
- * - SPRG0 unused (reserved for HV on Power4)
+ * - SPRG0 scratch for TM recheckpoint/reclaim (reserved for HV on Power4)
* - SPRG2 scratch for exception vectors
- * - SPRG3 unused (user visible)
+ * - SPRG3 CPU and NUMA node for VDSO getcpu (user visible)
+ * - HSPRG0 stores PACA in HV mode
+ * - HSPRG1 scratch for "HV" exceptions
*
* 64-bit embedded
* - SPRG0 generic exception scratch
* - SPRG2 TLB exception stack
- * - SPRG3 unused (user visible)
+ * - SPRG3 critical exception scratch (user visible, sorry!)
* - SPRG4 unused (user visible)
* - SPRG6 TLB miss scratch (user visible, sorry !)
- * - SPRG7 critical exception scratch
+ * - SPRG7 CPU and NUMA node for VDSO getcpu (user visible)
* - SPRG8 machine check exception scratch
* - SPRG9 debug exception scratch
*
@@ -729,15 +961,59 @@
#ifdef CONFIG_PPC_BOOK3S_64
#define SPRN_SPRG_SCRATCH0 SPRN_SPRG2
+#define SPRN_SPRG_HPACA SPRN_HSPRG0
+#define SPRN_SPRG_HSCRATCH0 SPRN_HSPRG1
+#define SPRN_SPRG_VDSO_READ SPRN_USPRG3
+#define SPRN_SPRG_VDSO_WRITE SPRN_SPRG3
+
+#define GET_PACA(rX) \
+ BEGIN_FTR_SECTION_NESTED(66); \
+ mfspr rX,SPRN_SPRG_PACA; \
+ FTR_SECTION_ELSE_NESTED(66); \
+ mfspr rX,SPRN_SPRG_HPACA; \
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
+
+#define SET_PACA(rX) \
+ BEGIN_FTR_SECTION_NESTED(66); \
+ mtspr SPRN_SPRG_PACA,rX; \
+ FTR_SECTION_ELSE_NESTED(66); \
+ mtspr SPRN_SPRG_HPACA,rX; \
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
+
+#define GET_SCRATCH0(rX) \
+ BEGIN_FTR_SECTION_NESTED(66); \
+ mfspr rX,SPRN_SPRG_SCRATCH0; \
+ FTR_SECTION_ELSE_NESTED(66); \
+ mfspr rX,SPRN_SPRG_HSCRATCH0; \
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
+
+#define SET_SCRATCH0(rX) \
+ BEGIN_FTR_SECTION_NESTED(66); \
+ mtspr SPRN_SPRG_SCRATCH0,rX; \
+ FTR_SECTION_ELSE_NESTED(66); \
+ mtspr SPRN_SPRG_HSCRATCH0,rX; \
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
+
+#else /* CONFIG_PPC_BOOK3S_64 */
+#define GET_SCRATCH0(rX) mfspr rX,SPRN_SPRG_SCRATCH0
+#define SET_SCRATCH0(rX) mtspr SPRN_SPRG_SCRATCH0,rX
+
#endif
#ifdef CONFIG_PPC_BOOK3E_64
#define SPRN_SPRG_MC_SCRATCH SPRN_SPRG8
-#define SPRN_SPRG_CRIT_SCRATCH SPRN_SPRG7
+#define SPRN_SPRG_CRIT_SCRATCH SPRN_SPRG3
#define SPRN_SPRG_DBG_SCRATCH SPRN_SPRG9
#define SPRN_SPRG_TLB_EXFRAME SPRN_SPRG2
#define SPRN_SPRG_TLB_SCRATCH SPRN_SPRG6
#define SPRN_SPRG_GEN_SCRATCH SPRN_SPRG0
+#define SPRN_SPRG_GDBELL_SCRATCH SPRN_SPRG_GEN_SCRATCH
+#define SPRN_SPRG_VDSO_READ SPRN_USPRG7
+#define SPRN_SPRG_VDSO_WRITE SPRN_SPRG7
+
+#define SET_PACA(rX) mtspr SPRN_SPRG_PACA,rX
+#define GET_PACA(rX) mfspr rX,SPRN_SPRG_PACA
+
#endif
#ifdef CONFIG_PPC_BOOK3S_32
@@ -768,8 +1044,8 @@
#define SPRN_SPRG_WSCRATCH2 SPRN_SPRG4W
#define SPRN_SPRG_RSCRATCH3 SPRN_SPRG5R
#define SPRN_SPRG_WSCRATCH3 SPRN_SPRG5W
-#define SPRN_SPRG_RSCRATCH_MC SPRN_SPRG6R
-#define SPRN_SPRG_WSCRATCH_MC SPRN_SPRG6W
+#define SPRN_SPRG_RSCRATCH_MC SPRN_SPRG1
+#define SPRN_SPRG_WSCRATCH_MC SPRN_SPRG1
#define SPRN_SPRG_RSCRATCH4 SPRN_SPRG7R
#define SPRN_SPRG_WSCRATCH4 SPRN_SPRG7W
#ifdef CONFIG_E200
@@ -779,8 +1055,6 @@
#define SPRN_SPRG_RSCRATCH_DBG SPRN_SPRG9
#define SPRN_SPRG_WSCRATCH_DBG SPRN_SPRG9
#endif
-#define SPRN_SPRG_RVCPU SPRN_SPRG1
-#define SPRN_SPRG_WVCPU SPRN_SPRG1
#endif
#ifdef CONFIG_8xx
@@ -788,6 +1062,8 @@
#define SPRN_SPRG_SCRATCH1 SPRN_SPRG1
#endif
+
+
/*
* An mtfsf instruction with the L bit set. On CPUs that support this a
* full 64bits of FPSCR is restored and on other CPUs the L bit is ignored.
@@ -806,7 +1082,7 @@
#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */
-#define __is_processor(pv) (PVR_VER(mfspr(SPRN_PVR)) == (pv))
+#define pvr_version_is(pvr) (PVR_VER(mfspr(SPRN_PVR)) == (pvr))
/*
* IBM has further subdivided the standard PowerPC 16-bit version and
@@ -828,6 +1104,7 @@
#define PVR_403GCX 0x00201400
#define PVR_405GP 0x40110000
#define PVR_476 0x11a52000
+#define PVR_476FPE 0x7ff50000
#define PVR_STB03XXX 0x40310000
#define PVR_NP405H 0x41410000
#define PVR_NP405L 0x41610000
@@ -850,6 +1127,12 @@
#define PVR_7450 0x80000000
#define PVR_8540 0x80200000
#define PVR_8560 0x80200000
+#define PVR_VER_E500V1 0x8020
+#define PVR_VER_E500V2 0x8021
+#define PVR_VER_E500MC 0x8023
+#define PVR_VER_E5500 0x8024
+#define PVR_VER_E6500 0x8040
+
/*
* For the 8xx processors, all of them report the same PVR family for
* the PowerPC core. The various versions of these processors must be
@@ -868,48 +1151,63 @@
#define PVR_476_ISS 0x00052000
/* 64-bit processors */
-/* XXX the prefix should be PVR_, we'll do a global sweep to fix it one day */
-#define PV_NORTHSTAR 0x0033
-#define PV_PULSAR 0x0034
-#define PV_POWER4 0x0035
-#define PV_ICESTAR 0x0036
-#define PV_SSTAR 0x0037
-#define PV_POWER4p 0x0038
-#define PV_970 0x0039
-#define PV_POWER5 0x003A
-#define PV_POWER5p 0x003B
-#define PV_970FX 0x003C
-#define PV_630 0x0040
-#define PV_630p 0x0041
-#define PV_970MP 0x0044
-#define PV_970GX 0x0045
-#define PV_BE 0x0070
-#define PV_PA6T 0x0090
+#define PVR_NORTHSTAR 0x0033
+#define PVR_PULSAR 0x0034
+#define PVR_POWER4 0x0035
+#define PVR_ICESTAR 0x0036
+#define PVR_SSTAR 0x0037
+#define PVR_POWER4p 0x0038
+#define PVR_970 0x0039
+#define PVR_POWER5 0x003A
+#define PVR_POWER5p 0x003B
+#define PVR_970FX 0x003C
+#define PVR_POWER6 0x003E
+#define PVR_POWER7 0x003F
+#define PVR_630 0x0040
+#define PVR_630p 0x0041
+#define PVR_970MP 0x0044
+#define PVR_970GX 0x0045
+#define PVR_POWER7p 0x004A
+#define PVR_POWER8E 0x004B
+#define PVR_POWER8 0x004D
+#define PVR_BE 0x0070
+#define PVR_PA6T 0x0090
+
+/* "Logical" PVR values defined in PAPR, representing architecture levels */
+#define PVR_ARCH_204 0x0f000001
+#define PVR_ARCH_205 0x0f000002
+#define PVR_ARCH_206 0x0f000003
+#define PVR_ARCH_206p 0x0f100003
+#define PVR_ARCH_207 0x0f000004
/* Macros for setting and retrieving special purpose registers */
#ifndef __ASSEMBLY__
#define mfmsr() ({unsigned long rval; \
- asm volatile("mfmsr %0" : "=r" (rval)); rval;})
+ asm volatile("mfmsr %0" : "=r" (rval) : \
+ : "memory"); rval;})
#ifdef CONFIG_PPC_BOOK3S_64
#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
: : "r" (v) : "memory")
#define mtmsrd(v) __mtmsrd((v), 0)
#define mtmsr(v) mtmsrd(v)
#else
-#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v) : "memory")
+#define mtmsr(v) asm volatile("mtmsr %0" : \
+ : "r" ((unsigned long)(v)) \
+ : "memory")
#endif
#define mfspr(rn) ({unsigned long rval; \
asm volatile("mfspr %0," __stringify(rn) \
: "=r" (rval)); rval;})
-#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v)\
+#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : \
+ : "r" ((unsigned long)(v)) \
: "memory")
#ifdef __powerpc64__
-#ifdef CONFIG_PPC_CELL
+#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
#define mftb() ({unsigned long rval; \
asm volatile( \
- "90: mftb %0;\n" \
+ "90: mfspr %0, %2;\n" \
"97: cmpwi %0,0;\n" \
" beq- 90b;\n" \
"99:\n" \
@@ -923,18 +1221,30 @@
" .llong 0\n" \
" .llong 0\n" \
".previous" \
- : "=r" (rval) : "i" (CPU_FTR_CELL_TB_BUG)); rval;})
+ : "=r" (rval) \
+ : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL)); \
+ rval;})
#else
#define mftb() ({unsigned long rval; \
- asm volatile("mftb %0" : "=r" (rval)); rval;})
+ asm volatile("mfspr %0, %1" : \
+ "=r" (rval) : "i" (SPRN_TBRL)); rval;})
#endif /* !CONFIG_PPC_CELL */
#else /* __powerpc64__ */
+#if defined(CONFIG_8xx)
#define mftbl() ({unsigned long rval; \
asm volatile("mftbl %0" : "=r" (rval)); rval;})
#define mftbu() ({unsigned long rval; \
asm volatile("mftbu %0" : "=r" (rval)); rval;})
+#else
+#define mftbl() ({unsigned long rval; \
+ asm volatile("mfspr %0, %1" : "=r" (rval) : \
+ "i" (SPRN_TBRL)); rval;})
+#define mftbu() ({unsigned long rval; \
+ asm volatile("mfspr %0, %1" : "=r" (rval) : \
+ "i" (SPRN_TBRU)); rval;})
+#endif
#endif /* !__powerpc64__ */
#define mttbl(v) asm volatile("mttbl %0":: "r"(v))
@@ -948,30 +1258,12 @@
#define proc_trap() asm volatile("trap")
-#ifdef CONFIG_PPC64
-
-extern void ppc64_runlatch_on(void);
-extern void __ppc64_runlatch_off(void);
-
-#define ppc64_runlatch_off() \
- do { \
- if (cpu_has_feature(CPU_FTR_CTRL) && \
- test_thread_flag(TIF_RUNLATCH)) \
- __ppc64_runlatch_off(); \
- } while (0)
+#define __get_SP() ({unsigned long sp; \
+ asm volatile("mr %0,1": "=r" (sp)); sp;})
extern unsigned long scom970_read(unsigned int address);
extern void scom970_write(unsigned int address, unsigned long value);
-#else
-#define ppc64_runlatch_on()
-#define ppc64_runlatch_off()
-
-#endif /* CONFIG_PPC64 */
-
-#define __get_SP() ({unsigned long sp; \
- asm volatile("mr %0,1": "=r" (sp)); sp;})
-
struct pt_regs;
extern void ppc_save_regs(struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/reg_a2.h b/arch/powerpc/include/asm/reg_a2.h
new file mode 100644
index 00000000000..3ba9c6f096f
--- /dev/null
+++ b/arch/powerpc/include/asm/reg_a2.h
@@ -0,0 +1,156 @@
+/*
+ * Register definitions specific to the A2 core
+ *
+ * Copyright (C) 2008 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __ASM_POWERPC_REG_A2_H__
+#define __ASM_POWERPC_REG_A2_H__
+
+#define SPRN_TENSR 0x1b5
+#define SPRN_TENS 0x1b6 /* Thread ENable Set */
+#define SPRN_TENC 0x1b7 /* Thread ENable Clear */
+
+#define SPRN_A2_CCR0 0x3f0 /* Core Configuration Register 0 */
+#define SPRN_A2_CCR1 0x3f1 /* Core Configuration Register 1 */
+#define SPRN_A2_CCR2 0x3f2 /* Core Configuration Register 2 */
+#define SPRN_MMUCR0 0x3fc /* MMU Control Register 0 */
+#define SPRN_MMUCR1 0x3fd /* MMU Control Register 1 */
+#define SPRN_MMUCR2 0x3fe /* MMU Control Register 2 */
+#define SPRN_MMUCR3 0x3ff /* MMU Control Register 3 */
+
+#define SPRN_IAR 0x372
+
+#define SPRN_IUCR0 0x3f3
+#define IUCR0_ICBI_ACK 0x1000
+
+#define SPRN_XUCR0 0x3f6 /* Execution Unit Config Register 0 */
+
+#define A2_IERAT_SIZE 16
+#define A2_DERAT_SIZE 32
+
+/* A2 MMUCR0 bits */
+#define MMUCR0_ECL 0x80000000 /* Extended Class for TLB fills */
+#define MMUCR0_TID_NZ 0x40000000 /* TID is non-zero */
+#define MMUCR0_TS 0x10000000 /* Translation space for TLB fills */
+#define MMUCR0_TGS 0x20000000 /* Guest space for TLB fills */
+#define MMUCR0_TLBSEL 0x0c000000 /* TLB or ERAT target for TLB fills */
+#define MMUCR0_TLBSEL_U 0x00000000 /* TLBSEL = UTLB */
+#define MMUCR0_TLBSEL_I 0x08000000 /* TLBSEL = I-ERAT */
+#define MMUCR0_TLBSEL_D 0x0c000000 /* TLBSEL = D-ERAT */
+#define MMUCR0_LOCKSRSH 0x02000000 /* Use TLB lock on tlbsx. */
+#define MMUCR0_TID_MASK 0x000000ff /* TID field */
+
+/* A2 MMUCR1 bits */
+#define MMUCR1_IRRE 0x80000000 /* I-ERAT round robin enable */
+#define MMUCR1_DRRE 0x40000000 /* D-ERAT round robin enable */
+#define MMUCR1_REE 0x20000000 /* Reference Exception Enable*/
+#define MMUCR1_CEE 0x10000000 /* Change exception enable */
+#define MMUCR1_CSINV_ALL 0x00000000 /* Inval ERAT on all CS evts */
+#define MMUCR1_CSINV_NISYNC 0x04000000 /* Inval ERAT on all ex isync*/
+#define MMUCR1_CSINV_NEVER 0x0c000000 /* Don't inval ERAT on CS */
+#define MMUCR1_ICTID 0x00080000 /* IERAT class field as TID */
+#define MMUCR1_ITTID 0x00040000 /* IERAT thdid field as TID */
+#define MMUCR1_DCTID 0x00020000 /* DERAT class field as TID */
+#define MMUCR1_DTTID 0x00010000 /* DERAT thdid field as TID */
+#define MMUCR1_DCCD 0x00008000 /* DERAT class ignore */
+#define MMUCR1_TLBWE_BINV 0x00004000 /* back invalidate on tlbwe */
+
+/* A2 MMUCR2 bits */
+#define MMUCR2_PSSEL_SHIFT 4
+
+/* A2 MMUCR3 bits */
+#define MMUCR3_THID 0x0000000f /* Thread ID */
+
+/* *** ERAT TLB bits definitions */
+#define TLB0_EPN_MASK ASM_CONST(0xfffffffffffff000)
+#define TLB0_CLASS_MASK ASM_CONST(0x0000000000000c00)
+#define TLB0_CLASS_00 ASM_CONST(0x0000000000000000)
+#define TLB0_CLASS_01 ASM_CONST(0x0000000000000400)
+#define TLB0_CLASS_10 ASM_CONST(0x0000000000000800)
+#define TLB0_CLASS_11 ASM_CONST(0x0000000000000c00)
+#define TLB0_V ASM_CONST(0x0000000000000200)
+#define TLB0_X ASM_CONST(0x0000000000000100)
+#define TLB0_SIZE_MASK ASM_CONST(0x00000000000000f0)
+#define TLB0_SIZE_4K ASM_CONST(0x0000000000000010)
+#define TLB0_SIZE_64K ASM_CONST(0x0000000000000030)
+#define TLB0_SIZE_1M ASM_CONST(0x0000000000000050)
+#define TLB0_SIZE_16M ASM_CONST(0x0000000000000070)
+#define TLB0_SIZE_1G ASM_CONST(0x00000000000000a0)
+#define TLB0_THDID_MASK ASM_CONST(0x000000000000000f)
+#define TLB0_THDID_0 ASM_CONST(0x0000000000000001)
+#define TLB0_THDID_1 ASM_CONST(0x0000000000000002)
+#define TLB0_THDID_2 ASM_CONST(0x0000000000000004)
+#define TLB0_THDID_3 ASM_CONST(0x0000000000000008)
+#define TLB0_THDID_ALL ASM_CONST(0x000000000000000f)
+
+#define TLB1_RESVATTR ASM_CONST(0x00f0000000000000)
+#define TLB1_U0 ASM_CONST(0x0008000000000000)
+#define TLB1_U1 ASM_CONST(0x0004000000000000)
+#define TLB1_U2 ASM_CONST(0x0002000000000000)
+#define TLB1_U3 ASM_CONST(0x0001000000000000)
+#define TLB1_R ASM_CONST(0x0000800000000000)
+#define TLB1_C ASM_CONST(0x0000400000000000)
+#define TLB1_RPN_MASK ASM_CONST(0x000003fffffff000)
+#define TLB1_W ASM_CONST(0x0000000000000800)
+#define TLB1_I ASM_CONST(0x0000000000000400)
+#define TLB1_M ASM_CONST(0x0000000000000200)
+#define TLB1_G ASM_CONST(0x0000000000000100)
+#define TLB1_E ASM_CONST(0x0000000000000080)
+#define TLB1_VF ASM_CONST(0x0000000000000040)
+#define TLB1_UX ASM_CONST(0x0000000000000020)
+#define TLB1_SX ASM_CONST(0x0000000000000010)
+#define TLB1_UW ASM_CONST(0x0000000000000008)
+#define TLB1_SW ASM_CONST(0x0000000000000004)
+#define TLB1_UR ASM_CONST(0x0000000000000002)
+#define TLB1_SR ASM_CONST(0x0000000000000001)
+
+/* A2 erativax attributes definitions */
+#define ERATIVAX_RS_IS_ALL 0x000
+#define ERATIVAX_RS_IS_TID 0x040
+#define ERATIVAX_RS_IS_CLASS 0x080
+#define ERATIVAX_RS_IS_FULLMATCH 0x0c0
+#define ERATIVAX_CLASS_00 0x000
+#define ERATIVAX_CLASS_01 0x010
+#define ERATIVAX_CLASS_10 0x020
+#define ERATIVAX_CLASS_11 0x030
+#define ERATIVAX_PSIZE_4K (TLB_PSIZE_4K >> 1)
+#define ERATIVAX_PSIZE_64K (TLB_PSIZE_64K >> 1)
+#define ERATIVAX_PSIZE_1M (TLB_PSIZE_1M >> 1)
+#define ERATIVAX_PSIZE_16M (TLB_PSIZE_16M >> 1)
+#define ERATIVAX_PSIZE_1G (TLB_PSIZE_1G >> 1)
+
+/* A2 eratilx attributes definitions */
+#define ERATILX_T_ALL 0
+#define ERATILX_T_TID 1
+#define ERATILX_T_TGS 2
+#define ERATILX_T_FULLMATCH 3
+#define ERATILX_T_CLASS0 4
+#define ERATILX_T_CLASS1 5
+#define ERATILX_T_CLASS2 6
+#define ERATILX_T_CLASS3 7
+
+/* XUCR0 bits */
+#define XUCR0_TRACE_UM_T0 0x40000000 /* Thread 0 */
+#define XUCR0_TRACE_UM_T1 0x20000000 /* Thread 1 */
+#define XUCR0_TRACE_UM_T2 0x10000000 /* Thread 2 */
+#define XUCR0_TRACE_UM_T3 0x08000000 /* Thread 3 */
+
+/* A2 CCR0 register */
+#define A2_CCR0_PME_DISABLED 0x00000000
+#define A2_CCR0_PME_SLEEP 0x40000000
+#define A2_CCR0_PME_RVW 0x80000000
+#define A2_CCR0_PME_DISABLED2 0xc0000000
+
+/* A2 CCR2 register */
+#define A2_CCR2_ERAT_ONLY_MODE 0x00000001
+#define A2_CCR2_ENABLE_ICSWX 0x00000002
+#define A2_CCR2_ENABLE_PC 0x20000000
+#define A2_CCR2_ENABLE_TRACE 0x40000000
+
+#endif /* __ASM_POWERPC_REG_A2_H__ */
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 667a498eaee..464f1089b53 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -2,7 +2,7 @@
* Contains register definitions common to the Book E PowerPC
* specification. Notice that while the IBM-40x series of CPUs
* are not true Book E PowerPCs, they borrowed a number of features
- * before Book E was finalized, and are included here as well. Unfortunatly,
+ * before Book E was finalized, and are included here as well. Unfortunately,
* they sometimes used different locations than true Book E CPUs did.
*
* This program is free software; you can redistribute it and/or
@@ -27,10 +27,12 @@
#define MSR_CM (1<<31) /* Computation Mode (0=32-bit, 1=64-bit) */
#if defined(CONFIG_PPC_BOOK3E_64)
-#define MSR_ MSR_ME | MSR_CE
-#define MSR_KERNEL MSR_ | MSR_CM
-#define MSR_USER32 MSR_ | MSR_PR | MSR_EE | MSR_DE
-#define MSR_USER64 MSR_USER32 | MSR_CM | MSR_DE
+#define MSR_64BIT MSR_CM
+
+#define MSR_ (MSR_ME | MSR_CE)
+#define MSR_KERNEL (MSR_ | MSR_64BIT)
+#define MSR_USER32 (MSR_ | MSR_PR | MSR_EE)
+#define MSR_USER64 (MSR_USER32 | MSR_64BIT)
#elif defined (CONFIG_40x)
#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE)
#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
@@ -54,17 +56,31 @@
#define SPRN_SPRG7W 0x117 /* Special Purpose Register General 7 Write */
#define SPRN_EPCR 0x133 /* Embedded Processor Control Register */
#define SPRN_DBCR2 0x136 /* Debug Control Register 2 */
+#define SPRN_DBCR4 0x233 /* Debug Control Register 4 */
+#define SPRN_MSRP 0x137 /* MSR Protect Register */
#define SPRN_IAC3 0x13A /* Instruction Address Compare 3 */
#define SPRN_IAC4 0x13B /* Instruction Address Compare 4 */
#define SPRN_DVC1 0x13E /* Data Value Compare Register 1 */
#define SPRN_DVC2 0x13F /* Data Value Compare Register 2 */
+#define SPRN_LPID 0x152 /* Logical Partition ID */
#define SPRN_MAS8 0x155 /* MMU Assist Register 8 */
#define SPRN_TLB0PS 0x158 /* TLB 0 Page Size Register */
+#define SPRN_TLB1PS 0x159 /* TLB 1 Page Size Register */
#define SPRN_MAS5_MAS6 0x15c /* MMU Assist Register 5 || 6 */
#define SPRN_MAS8_MAS1 0x15d /* MMU Assist Register 8 || 1 */
#define SPRN_EPTCFG 0x15e /* Embedded Page Table Config */
+#define SPRN_GSPRG0 0x170 /* Guest SPRG0 */
+#define SPRN_GSPRG1 0x171 /* Guest SPRG1 */
+#define SPRN_GSPRG2 0x172 /* Guest SPRG2 */
+#define SPRN_GSPRG3 0x173 /* Guest SPRG3 */
#define SPRN_MAS7_MAS3 0x174 /* MMU Assist Register 7 || 3 */
#define SPRN_MAS0_MAS1 0x175 /* MMU Assist Register 0 || 1 */
+#define SPRN_GSRR0 0x17A /* Guest SRR0 */
+#define SPRN_GSRR1 0x17B /* Guest SRR1 */
+#define SPRN_GEPR 0x17C /* Guest EPR */
+#define SPRN_GDEAR 0x17D /* Guest DEAR */
+#define SPRN_GPIR 0x17E /* Guest PIR */
+#define SPRN_GESR 0x17F /* Guest Exception Syndrome Register */
#define SPRN_IVOR0 0x190 /* Interrupt Vector Offset Register 0 */
#define SPRN_IVOR1 0x191 /* Interrupt Vector Offset Register 1 */
#define SPRN_IVOR2 0x192 /* Interrupt Vector Offset Register 2 */
@@ -81,6 +97,18 @@
#define SPRN_IVOR13 0x19D /* Interrupt Vector Offset Register 13 */
#define SPRN_IVOR14 0x19E /* Interrupt Vector Offset Register 14 */
#define SPRN_IVOR15 0x19F /* Interrupt Vector Offset Register 15 */
+#define SPRN_IVOR38 0x1B0 /* Interrupt Vector Offset Register 38 */
+#define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */
+#define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */
+#define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */
+#define SPRN_IVOR42 0x1B4 /* Interrupt Vector Offset Register 42 */
+#define SPRN_GIVOR2 0x1B8 /* Guest IVOR2 */
+#define SPRN_GIVOR3 0x1B9 /* Guest IVOR3 */
+#define SPRN_GIVOR4 0x1BA /* Guest IVOR4 */
+#define SPRN_GIVOR8 0x1BB /* Guest IVOR8 */
+#define SPRN_GIVOR13 0x1BC /* Guest IVOR13 */
+#define SPRN_GIVOR14 0x1BD /* Guest IVOR14 */
+#define SPRN_GIVPR 0x1BF /* Guest IVPR */
#define SPRN_SPEFSCR 0x200 /* SPE & Embedded FP Status & Control */
#define SPRN_BBEAR 0x201 /* Branch Buffer Entry Address Register */
#define SPRN_BBTAR 0x202 /* Branch Buffer Target Address Register */
@@ -110,7 +138,7 @@
#define SPRN_MAS2 0x272 /* MMU Assist Register 2 */
#define SPRN_MAS3 0x273 /* MMU Assist Register 3 */
#define SPRN_MAS4 0x274 /* MMU Assist Register 4 */
-#define SPRN_MAS5 0x275 /* MMU Assist Register 5 */
+#define SPRN_MAS5 0x153 /* MMU Assist Register 5 */
#define SPRN_MAS6 0x276 /* MMU Assist Register 6 */
#define SPRN_PID1 0x279 /* Process ID Register 1 */
#define SPRN_PID2 0x27A /* Process ID Register 2 */
@@ -143,6 +171,7 @@
#define SPRN_L2CSR1 0x3FA /* L2 Data Cache Control and Status Register 1 */
#define SPRN_DCCR 0x3FA /* Data Cache Cacheability Register */
#define SPRN_ICCR 0x3FB /* Instruction Cache Cacheability Register */
+#define SPRN_PWRMGTCR0 0x3FB /* Power management control register 0 */
#define SPRN_SVR 0x3FF /* System Version Register */
/*
@@ -150,8 +179,6 @@
* or IBM 40x.
*/
#ifdef CONFIG_BOOKE
-#define SPRN_PID 0x030 /* Process ID */
-#define SPRN_PID0 SPRN_PID/* Process ID Register 0 */
#define SPRN_CSRR0 0x03A /* Critical Save and Restore Register 0 */
#define SPRN_CSRR1 0x03B /* Critical Save and Restore Register 1 */
#define SPRN_DEAR 0x03D /* Data Error Address Register */
@@ -168,7 +195,6 @@
#define SPRN_TCR 0x154 /* Timer Control Register */
#endif /* Book E */
#ifdef CONFIG_40x
-#define SPRN_PID 0x3B1 /* Process ID */
#define SPRN_DBCR1 0x3BD /* Debug Control Register 1 */
#define SPRN_ESR 0x3D4 /* Exception Syndrome Register */
#define SPRN_DEAR 0x3D5 /* Data Error Address Register */
@@ -184,10 +210,22 @@
#define SPRN_CSRR1 SPRN_SRR3 /* Critical Save and Restore Register 1 */
#endif
+#ifdef CONFIG_PPC_ICSWX
+#define SPRN_HACOP 0x15F /* Hypervisor Available Coprocessor Register */
+#endif
+
/* Bit definitions for CCR1. */
#define CCR1_DPC 0x00000100 /* Disable L1 I-Cache/D-Cache parity checking */
#define CCR1_TCS 0x00000080 /* Timer Clock Select */
+/* Bit definitions for PWRMGTCR0. */
+#define PWRMGTCR0_PW20_WAIT (1 << 14) /* PW20 state enable bit */
+#define PWRMGTCR0_PW20_ENT_SHIFT 8
+#define PWRMGTCR0_PW20_ENT 0x3F00
+#define PWRMGTCR0_AV_IDLE_PD_EN (1 << 22) /* Altivec idle enable */
+#define PWRMGTCR0_AV_IDLE_CNT_SHIFT 16
+#define PWRMGTCR0_AV_IDLE_CNT 0x3F0000
+
/* Bit definitions for the MCSR. */
#define MCSR_MCS 0x80000000 /* Machine Check Summary */
#define MCSR_IB 0x40000000 /* Instruction PLB Error */
@@ -232,6 +270,10 @@
#define MCSR_LDG 0x00002000UL /* Guarded Load */
#define MCSR_TLBSYNC 0x00000002UL /* Multiple tlbsyncs detected */
#define MCSR_BSL2_ERR 0x00000001UL /* Backside L2 cache error */
+
+#define MSRP_UCLEP 0x04000000 /* Protect MSR[UCLE] */
+#define MSRP_DEP 0x00000200 /* Protect MSR[DE] */
+#define MSRP_PMMP 0x00000004 /* Protect MSR[PMM] */
#endif
#ifdef CONFIG_E200
@@ -246,6 +288,20 @@
store or cache line push */
#endif
+/* Bit definitions for the HID1 */
+#ifdef CONFIG_E500
+/* e500v1/v2 */
+#define HID1_PLL_CFG_MASK 0xfc000000 /* PLL_CFG input pins */
+#define HID1_RFXE 0x00020000 /* Read fault exception enable */
+#define HID1_R1DPE 0x00008000 /* R1 data bus parity enable */
+#define HID1_R2DPE 0x00004000 /* R2 data bus parity enable */
+#define HID1_ASTME 0x00002000 /* Address bus streaming mode enable */
+#define HID1_ABE 0x00001000 /* Address broadcast enable */
+#define HID1_MPXTT 0x00000400 /* MPX re-map transfer type */
+#define HID1_ATS 0x00000080 /* Atomic status */
+#define HID1_MID_MASK 0x0000000f /* MID input pins */
+#endif
+
/* Bit definitions for the DBSR. */
/*
* DBSR bits which have conflicting definitions on true Book E versus IBM 40x.
@@ -301,6 +357,7 @@
#define ESR_ILK 0x00100000 /* Instr. Cache Locking */
#define ESR_PUO 0x00040000 /* Unimplemented Operation exception */
#define ESR_BO 0x00020000 /* Byte Ordering */
+#define ESR_SPV 0x00000080 /* Signal Processing operation */
/* Bit definitions related to the DBCR0. */
#if defined(CONFIG_40x)
@@ -334,7 +391,7 @@
#define DBCR0_IA34T 0x00004000 /* Instr Addr 3-4 range Toggle */
#define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */
-#define dbcr_iac_range(task) ((task)->thread.dbcr0)
+#define dbcr_iac_range(task) ((task)->thread.debug.dbcr0)
#define DBCR_IAC12I DBCR0_IA12 /* Range Inclusive */
#define DBCR_IAC12X (DBCR0_IA12 | DBCR0_IA12X) /* Range Exclusive */
#define DBCR_IAC12MODE (DBCR0_IA12 | DBCR0_IA12X) /* IAC 1-2 Mode Bits */
@@ -348,7 +405,7 @@
#define DBCR1_DAC1W 0x20000000 /* DAC1 Write Debug Event */
#define DBCR1_DAC2W 0x10000000 /* DAC2 Write Debug Event */
-#define dbcr_dac(task) ((task)->thread.dbcr1)
+#define dbcr_dac(task) ((task)->thread.debug.dbcr1)
#define DBCR_DAC1R DBCR1_DAC1R
#define DBCR_DAC1W DBCR1_DAC1W
#define DBCR_DAC2R DBCR1_DAC2R
@@ -394,7 +451,7 @@
#define DBCR0_CRET 0x00000020 /* Critical Return Debug Event */
#define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */
-#define dbcr_dac(task) ((task)->thread.dbcr0)
+#define dbcr_dac(task) ((task)->thread.debug.dbcr0)
#define DBCR_DAC1R DBCR0_DAC1R
#define DBCR_DAC1W DBCR0_DAC1W
#define DBCR_DAC2R DBCR0_DAC2R
@@ -428,7 +485,7 @@
#define DBCR1_IAC34MX 0x000000C0 /* Instr Addr 3-4 range eXclusive */
#define DBCR1_IAC34AT 0x00000001 /* Instr Addr 3-4 range Toggle */
-#define dbcr_iac_range(task) ((task)->thread.dbcr1)
+#define dbcr_iac_range(task) ((task)->thread.debug.dbcr1)
#define DBCR_IAC12I DBCR1_IAC12M /* Range Inclusive */
#define DBCR_IAC12X DBCR1_IAC12MX /* Range Exclusive */
#define DBCR_IAC12MODE DBCR1_IAC12MX /* IAC 1-2 Mode Bits */
@@ -493,6 +550,13 @@
#define TCR_FIE 0x00800000 /* FIT Interrupt Enable */
#define TCR_ARE 0x00400000 /* Auto Reload Enable */
+#ifdef CONFIG_E500
+#define TCR_GET_WP(tcr) ((((tcr) & 0xC0000000) >> 30) | \
+ (((tcr) & 0x1E0000) >> 15))
+#else
+#define TCR_GET_WP(tcr) (((tcr) & 0xC0000000) >> 30)
+#endif
+
/* Bit definitions for the TSR. */
#define TSR_ENW 0x80000000 /* Enable Next Watchdog */
#define TSR_WIS 0x40000000 /* WDT Interrupt Status */
@@ -519,6 +583,7 @@
/* Bit definitions for L1CSR0. */
#define L1CSR0_CPE 0x00010000 /* Data Cache Parity Enable */
+#define L1CSR0_CUL 0x00000400 /* Data Cache Unable to Lock */
#define L1CSR0_CLFC 0x00000100 /* Cache Lock Bits Flash Clear */
#define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */
#define L1CSR0_CFI 0x00000002 /* Cache Flash Invalidate */
@@ -530,6 +595,9 @@
#define L1CSR1_ICFI 0x00000002 /* Instr Cache Flash Invalidate */
#define L1CSR1_ICE 0x00000001 /* Instr Cache Enable */
+/* Bit definitions for L1CSR2. */
+#define L1CSR2_DCWS 0x40000000 /* Data Cache write shadow */
+
/* Bit definitions for L2CSR0. */
#define L2CSR0_L2E 0x80000000 /* L2 Cache Enable */
#define L2CSR0_L2PE 0x40000000 /* L2 Cache Parity/ECC Enable */
@@ -568,6 +636,17 @@
#define SPRN_EPCR_DMIUH 0x00400000 /* Disable MAS Interrupt updates
* for hypervisor */
+/* Bit definitions for EPLC/EPSC */
+#define EPC_EPR 0x80000000 /* 1 = user, 0 = kernel */
+#define EPC_EPR_SHIFT 31
+#define EPC_EAS 0x40000000 /* Address Space */
+#define EPC_EAS_SHIFT 30
+#define EPC_EGS 0x20000000 /* 1 = guest, 0 = hypervisor */
+#define EPC_EGS_SHIFT 29
+#define EPC_ELPID 0x00ff0000
+#define EPC_ELPID_SHIFT 16
+#define EPC_EPID 0x00003fff
+#define EPC_EPID_SHIFT 0
/*
* The IBM-403 is an even more odd special case, as it is much
diff --git a/arch/powerpc/include/asm/reg_fsl_emb.h b/arch/powerpc/include/asm/reg_fsl_emb.h
index 77bb71cfd99..0e3ddf5177f 100644
--- a/arch/powerpc/include/asm/reg_fsl_emb.h
+++ b/arch/powerpc/include/asm/reg_fsl_emb.h
@@ -17,12 +17,16 @@
/* Freescale Book E Performance Monitor APU Registers */
#define PMRN_PMC0 0x010 /* Performance Monitor Counter 0 */
#define PMRN_PMC1 0x011 /* Performance Monitor Counter 1 */
-#define PMRN_PMC2 0x012 /* Performance Monitor Counter 1 */
-#define PMRN_PMC3 0x013 /* Performance Monitor Counter 1 */
+#define PMRN_PMC2 0x012 /* Performance Monitor Counter 2 */
+#define PMRN_PMC3 0x013 /* Performance Monitor Counter 3 */
+#define PMRN_PMC4 0x014 /* Performance Monitor Counter 4 */
+#define PMRN_PMC5 0x015 /* Performance Monitor Counter 5 */
#define PMRN_PMLCA0 0x090 /* PM Local Control A0 */
#define PMRN_PMLCA1 0x091 /* PM Local Control A1 */
#define PMRN_PMLCA2 0x092 /* PM Local Control A2 */
#define PMRN_PMLCA3 0x093 /* PM Local Control A3 */
+#define PMRN_PMLCA4 0x094 /* PM Local Control A4 */
+#define PMRN_PMLCA5 0x095 /* PM Local Control A5 */
#define PMLCA_FC 0x80000000 /* Freeze Counter */
#define PMLCA_FCS 0x40000000 /* Freeze in Supervisor */
@@ -30,14 +34,18 @@
#define PMLCA_FCM1 0x10000000 /* Freeze when PMM==1 */
#define PMLCA_FCM0 0x08000000 /* Freeze when PMM==0 */
#define PMLCA_CE 0x04000000 /* Condition Enable */
+#define PMLCA_FGCS1 0x00000002 /* Freeze in guest state */
+#define PMLCA_FGCS0 0x00000001 /* Freeze in hypervisor state */
-#define PMLCA_EVENT_MASK 0x00ff0000 /* Event field */
+#define PMLCA_EVENT_MASK 0x01ff0000 /* Event field */
#define PMLCA_EVENT_SHIFT 16
#define PMRN_PMLCB0 0x110 /* PM Local Control B0 */
#define PMRN_PMLCB1 0x111 /* PM Local Control B1 */
#define PMRN_PMLCB2 0x112 /* PM Local Control B2 */
#define PMRN_PMLCB3 0x113 /* PM Local Control B3 */
+#define PMRN_PMLCB4 0x114 /* PM Local Control B4 */
+#define PMRN_PMLCB5 0x115 /* PM Local Control B5 */
#define PMLCB_THRESHMUL_MASK 0x0700 /* Threshold Multiple Field */
#define PMLCB_THRESHMUL_SHIFT 8
@@ -55,16 +63,22 @@
#define PMRN_UPMC0 0x000 /* User Performance Monitor Counter 0 */
#define PMRN_UPMC1 0x001 /* User Performance Monitor Counter 1 */
-#define PMRN_UPMC2 0x002 /* User Performance Monitor Counter 1 */
-#define PMRN_UPMC3 0x003 /* User Performance Monitor Counter 1 */
+#define PMRN_UPMC2 0x002 /* User Performance Monitor Counter 2 */
+#define PMRN_UPMC3 0x003 /* User Performance Monitor Counter 3 */
+#define PMRN_UPMC4 0x004 /* User Performance Monitor Counter 4 */
+#define PMRN_UPMC5 0x005 /* User Performance Monitor Counter 5 */
#define PMRN_UPMLCA0 0x080 /* User PM Local Control A0 */
#define PMRN_UPMLCA1 0x081 /* User PM Local Control A1 */
#define PMRN_UPMLCA2 0x082 /* User PM Local Control A2 */
#define PMRN_UPMLCA3 0x083 /* User PM Local Control A3 */
+#define PMRN_UPMLCA4 0x084 /* User PM Local Control A4 */
+#define PMRN_UPMLCA5 0x085 /* User PM Local Control A5 */
#define PMRN_UPMLCB0 0x100 /* User PM Local Control B0 */
#define PMRN_UPMLCB1 0x101 /* User PM Local Control B1 */
#define PMRN_UPMLCB2 0x102 /* User PM Local Control B2 */
#define PMRN_UPMLCB3 0x103 /* User PM Local Control B3 */
+#define PMRN_UPMLCB4 0x104 /* User PM Local Control B4 */
+#define PMRN_UPMLCB5 0x105 /* User PM Local Control B5 */
#define PMRN_UPMGC0 0x180 /* User PM Global Control 0 */
diff --git a/arch/powerpc/include/asm/resource.h b/arch/powerpc/include/asm/resource.h
deleted file mode 100644
index 04bc4db8921..00000000000
--- a/arch/powerpc/include/asm/resource.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/resource.h>
diff --git a/arch/powerpc/include/asm/rio.h b/arch/powerpc/include/asm/rio.h
index 0018bf80cb2..b1d2deceeed 100644
--- a/arch/powerpc/include/asm/rio.h
+++ b/arch/powerpc/include/asm/rio.h
@@ -14,5 +14,10 @@
#define ASM_PPC_RIO_H
extern void platform_rio_init(void);
+#ifdef CONFIG_FSL_RIO
+extern int fsl_rio_mcheck_exception(struct pt_regs *);
+#else
+static inline int fsl_rio_mcheck_exception(struct pt_regs *regs) {return 0; }
+#endif
#endif /* ASM_PPC_RIO_H */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 9a1193e30f2..b390f55b0df 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -44,12 +44,12 @@
*
*/
-typedef u32 rtas_arg_t;
+typedef __be32 rtas_arg_t;
struct rtas_args {
- u32 token;
- u32 nargs;
- u32 nret;
+ __be32 token;
+ __be32 nargs;
+ __be32 nret;
rtas_arg_t args[16];
rtas_arg_t *rets; /* Pointer to return values in args[]. */
};
@@ -74,7 +74,6 @@ struct rtas_suspend_me_data {
/* RTAS event classes */
#define RTAS_INTERNAL_ERROR 0x80000000 /* set bit 0 */
#define RTAS_EPOW_WARNING 0x40000000 /* set bit 1 */
-#define RTAS_POWERMGM_EVENTS 0x20000000 /* set bit 2 */
#define RTAS_HOTPLUG_EVENTS 0x10000000 /* set bit 3 */
#define RTAS_IO_EVENTS 0x08000000 /* set bit 4 */
#define RTAS_EVENT_SCAN_ALL_EVENTS 0xffffffff
@@ -144,23 +143,162 @@ struct rtas_suspend_me_data {
#define RTAS_TYPE_PMGM_TIME_ALARM 0x6f
#define RTAS_TYPE_PMGM_CONFIG_CHANGE 0x70
#define RTAS_TYPE_PMGM_SERVICE_PROC 0x71
+/* Platform Resource Reassignment Notification */
+#define RTAS_TYPE_PRRN 0xA0
/* RTAS check-exception vector offset */
#define RTAS_VECTOR_EXTERNAL_INTERRUPT 0x500
struct rtas_error_log {
- unsigned long version:8; /* Architectural version */
- unsigned long severity:3; /* Severity level of error */
- unsigned long disposition:2; /* Degree of recovery */
- unsigned long extended:1; /* extended log present? */
- unsigned long /* reserved */ :2; /* Reserved for future use */
- unsigned long initiator:4; /* Initiator of event */
- unsigned long target:4; /* Target of failed operation */
- unsigned long type:8; /* General event or error*/
- unsigned long extended_log_length:32; /* length in bytes */
- unsigned char buffer[1];
+ /* Byte 0 */
+ uint8_t byte0; /* Architectural version */
+
+ /* Byte 1 */
+ uint8_t byte1;
+ /* XXXXXXXX
+ * XXX 3: Severity level of error
+ * XX 2: Degree of recovery
+ * X 1: Extended log present?
+ * XX 2: Reserved
+ */
+
+ /* Byte 2 */
+ uint8_t byte2;
+ /* XXXXXXXX
+ * XXXX 4: Initiator of event
+ * XXXX 4: Target of failed operation
+ */
+ uint8_t byte3; /* General event or error*/
+ __be32 extended_log_length; /* length in bytes */
+ unsigned char buffer[1]; /* Start of extended log */
+ /* Variable length. */
};
+static inline uint8_t rtas_error_severity(const struct rtas_error_log *elog)
+{
+ return (elog->byte1 & 0xE0) >> 5;
+}
+
+static inline uint8_t rtas_error_disposition(const struct rtas_error_log *elog)
+{
+ return (elog->byte1 & 0x18) >> 3;
+}
+
+static inline uint8_t rtas_error_extended(const struct rtas_error_log *elog)
+{
+ return (elog->byte1 & 0x04) >> 2;
+}
+
+#define rtas_error_type(x) ((x)->byte3)
+
+static inline
+uint32_t rtas_error_extended_log_length(const struct rtas_error_log *elog)
+{
+ return be32_to_cpu(elog->extended_log_length);
+}
+
+#define RTAS_V6EXT_LOG_FORMAT_EVENT_LOG 14
+
+#define RTAS_V6EXT_COMPANY_ID_IBM (('I' << 24) | ('B' << 16) | ('M' << 8))
+
+/* RTAS general extended event log, Version 6. The extended log starts
+ * from "buffer" field of struct rtas_error_log defined above.
+ */
+struct rtas_ext_event_log_v6 {
+ /* Byte 0 */
+ uint8_t byte0;
+ /* XXXXXXXX
+ * X 1: Log valid
+ * X 1: Unrecoverable error
+ * X 1: Recoverable (correctable or successfully retried)
+ * X 1: Bypassed unrecoverable error (degraded operation)
+ * X 1: Predictive error
+ * X 1: "New" log (always 1 for data returned from RTAS)
+ * X 1: Big Endian
+ * X 1: Reserved
+ */
+
+ /* Byte 1 */
+ uint8_t byte1; /* reserved */
+
+ /* Byte 2 */
+ uint8_t byte2;
+ /* XXXXXXXX
+ * X 1: Set to 1 (indicating log is in PowerPC format)
+ * XXX 3: Reserved
+ * XXXX 4: Log format used for bytes 12-2047
+ */
+
+ /* Byte 3 */
+ uint8_t byte3; /* reserved */
+ /* Byte 4-11 */
+ uint8_t reserved[8]; /* reserved */
+ /* Byte 12-15 */
+ __be32 company_id; /* Company ID of the company */
+ /* that defines the format for */
+ /* the vendor specific log type */
+ /* Byte 16-end of log */
+ uint8_t vendor_log[1]; /* Start of vendor specific log */
+ /* Variable length. */
+};
+
+static
+inline uint8_t rtas_ext_event_log_format(struct rtas_ext_event_log_v6 *ext_log)
+{
+ return ext_log->byte2 & 0x0F;
+}
+
+static
+inline uint32_t rtas_ext_event_company_id(struct rtas_ext_event_log_v6 *ext_log)
+{
+ return be32_to_cpu(ext_log->company_id);
+}
+
+/* pSeries event log format */
+
+/* Two bytes ASCII section IDs */
+#define PSERIES_ELOG_SECT_ID_PRIV_HDR (('P' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_USER_HDR (('U' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_PRIMARY_SRC (('P' << 8) | 'S')
+#define PSERIES_ELOG_SECT_ID_EXTENDED_UH (('E' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_FAILING_MTMS (('M' << 8) | 'T')
+#define PSERIES_ELOG_SECT_ID_SECONDARY_SRC (('S' << 8) | 'S')
+#define PSERIES_ELOG_SECT_ID_DUMP_LOCATOR (('D' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_FW_ERROR (('S' << 8) | 'W')
+#define PSERIES_ELOG_SECT_ID_IMPACT_PART_ID (('L' << 8) | 'P')
+#define PSERIES_ELOG_SECT_ID_LOGIC_RESOURCE_ID (('L' << 8) | 'R')
+#define PSERIES_ELOG_SECT_ID_HMC_ID (('H' << 8) | 'M')
+#define PSERIES_ELOG_SECT_ID_EPOW (('E' << 8) | 'P')
+#define PSERIES_ELOG_SECT_ID_IO_EVENT (('I' << 8) | 'E')
+#define PSERIES_ELOG_SECT_ID_MANUFACT_INFO (('M' << 8) | 'I')
+#define PSERIES_ELOG_SECT_ID_CALL_HOME (('C' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_USER_DEF (('U' << 8) | 'D')
+
+/* Vendor specific Platform Event Log Format, Version 6, section header */
+struct pseries_errorlog {
+ __be16 id; /* 0x00 2-byte ASCII section ID */
+ __be16 length; /* 0x02 Section length in bytes */
+ uint8_t version; /* 0x04 Section version */
+ uint8_t subtype; /* 0x05 Section subtype */
+ __be16 creator_component; /* 0x06 Creator component ID */
+ uint8_t data[]; /* 0x08 Start of section data */
+};
+
+static
+inline uint16_t pseries_errorlog_id(struct pseries_errorlog *sect)
+{
+ return be16_to_cpu(sect->id);
+}
+
+static
+inline uint16_t pseries_errorlog_length(struct pseries_errorlog *sect)
+{
+ return be16_to_cpu(sect->length);
+}
+
+struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
+ uint16_t section_id);
+
/*
* This can be set by the rtas_flash module so that it can get called
* as the absolutely last thing before the kernel terminates.
@@ -187,6 +325,8 @@ extern void rtas_progress(char *s, unsigned short hex);
extern void rtas_initialize(void);
extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
+extern int rtas_online_cpus_mask(cpumask_var_t cpus);
+extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
extern int rtas_ibm_suspend_me(struct rtas_args *);
struct rtc_time;
@@ -202,14 +342,27 @@ extern int early_init_dt_scan_rtas(unsigned long node,
extern void pSeries_log_error(char *buf, unsigned int err_type, int fatal);
+#ifdef CONFIG_PPC_PSERIES
+extern int pseries_devicetree_update(s32 scope);
+extern void post_mobility_fixup(void);
+#endif
+
+#ifdef CONFIG_PPC_RTAS_DAEMON
+extern void rtas_cancel_event_scan(void);
+#else
+static inline void rtas_cancel_event_scan(void) { }
+#endif
+
/* Error types logged. */
#define ERR_FLAG_ALREADY_LOGGED 0x0
#define ERR_FLAG_BOOT 0x1 /* log was pulled from NVRAM on boot */
#define ERR_TYPE_RTAS_LOG 0x2 /* from rtas event-scan */
-#define ERR_TYPE_KERNEL_PANIC 0x4 /* from panic() */
+#define ERR_TYPE_KERNEL_PANIC 0x4 /* from die()/panic() */
+#define ERR_TYPE_KERNEL_PANIC_GZ 0x8 /* ditto, compressed */
/* All the types and not flags */
-#define ERR_TYPE_MASK (ERR_TYPE_RTAS_LOG | ERR_TYPE_KERNEL_PANIC)
+#define ERR_TYPE_MASK \
+ (ERR_TYPE_RTAS_LOG | ERR_TYPE_KERNEL_PANIC | ERR_TYPE_KERNEL_PANIC_GZ)
#define RTAS_DEBUG KERN_DEBUG "RTAS: "
@@ -259,8 +412,27 @@ static inline u32 rtas_config_addr(int busno, int devfn, int reg)
(devfn << 8) | (reg & 0xff);
}
-extern void __cpuinit rtas_give_timebase(void);
-extern void __cpuinit rtas_take_timebase(void);
+extern void rtas_give_timebase(void);
+extern void rtas_take_timebase(void);
+
+#ifdef CONFIG_PPC_RTAS
+static inline int page_is_rtas_user_buf(unsigned long pfn)
+{
+ unsigned long paddr = (pfn << PAGE_SHIFT);
+ if (paddr >= rtas_rmo_buf && paddr < (rtas_rmo_buf + RTAS_RMOBUF_MAX))
+ return 1;
+ return 0;
+}
+
+/* Not the best place to put pSeries_coalesce_init, will be fixed when we
+ * move some of the rtas suspend-me stuff to pseries */
+extern void pSeries_coalesce_init(void);
+#else
+static inline int page_is_rtas_user_buf(unsigned long pfn) { return 0;}
+static inline void pSeries_coalesce_init(void) { }
+#endif
+
+extern int call_rtas(const char *, int, int, unsigned long *, ...);
#endif /* __KERNEL__ */
#endif /* _POWERPC_RTAS_H */
diff --git a/arch/powerpc/include/asm/runlatch.h b/arch/powerpc/include/asm/runlatch.h
new file mode 100644
index 00000000000..54e9b963876
--- /dev/null
+++ b/arch/powerpc/include/asm/runlatch.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _ASM_POWERPC_RUNLATCH_H
+#define _ASM_POWERPC_RUNLATCH_H
+
+#ifdef CONFIG_PPC64
+
+extern void __ppc64_runlatch_on(void);
+extern void __ppc64_runlatch_off(void);
+
+/*
+ * We manually hard enable-disable, this is called
+ * in the idle loop and we don't want to mess up
+ * with soft-disable/enable & interrupt replay.
+ */
+#define ppc64_runlatch_off() \
+ do { \
+ if (cpu_has_feature(CPU_FTR_CTRL) && \
+ test_thread_local_flags(_TLF_RUNLATCH)) { \
+ unsigned long msr = mfmsr(); \
+ __hard_irq_disable(); \
+ __ppc64_runlatch_off(); \
+ if (msr & MSR_EE) \
+ __hard_irq_enable(); \
+ } \
+ } while (0)
+
+#define ppc64_runlatch_on() \
+ do { \
+ if (cpu_has_feature(CPU_FTR_CTRL) && \
+ !test_thread_local_flags(_TLF_RUNLATCH)) { \
+ unsigned long msr = mfmsr(); \
+ __hard_irq_disable(); \
+ __ppc64_runlatch_on(); \
+ if (msr & MSR_EE) \
+ __hard_irq_enable(); \
+ } \
+ } while (0)
+#else
+#define ppc64_runlatch_on()
+#define ppc64_runlatch_off()
+#endif /* CONFIG_PPC64 */
+
+#endif /* _ASM_POWERPC_RUNLATCH_H */
diff --git a/arch/powerpc/include/asm/rwsem.h b/arch/powerpc/include/asm/rwsem.h
deleted file mode 100644
index 8447d89fbe7..00000000000
--- a/arch/powerpc/include/asm/rwsem.h
+++ /dev/null
@@ -1,183 +0,0 @@
-#ifndef _ASM_POWERPC_RWSEM_H
-#define _ASM_POWERPC_RWSEM_H
-
-#ifndef _LINUX_RWSEM_H
-#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
-#endif
-
-#ifdef __KERNEL__
-
-/*
- * R/W semaphores for PPC using the stuff in lib/rwsem.c.
- * Adapted largely from include/asm-i386/rwsem.h
- * by Paul Mackerras <paulus@samba.org>.
- */
-
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
-
-/*
- * the semaphore definition
- */
-#ifdef CONFIG_PPC64
-# define RWSEM_ACTIVE_MASK 0xffffffffL
-#else
-# define RWSEM_ACTIVE_MASK 0x0000ffffL
-#endif
-
-#define RWSEM_UNLOCKED_VALUE 0x00000000L
-#define RWSEM_ACTIVE_BIAS 0x00000001L
-#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
-#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-struct rw_semaphore {
- long count;
- spinlock_t wait_lock;
- struct list_head wait_list;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-};
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
-#else
-# define __RWSEM_DEP_MAP_INIT(lockname)
-#endif
-
-#define __RWSEM_INITIALIZER(name) \
-{ \
- RWSEM_UNLOCKED_VALUE, \
- __SPIN_LOCK_UNLOCKED((name).wait_lock), \
- LIST_HEAD_INIT((name).wait_list) \
- __RWSEM_DEP_MAP_INIT(name) \
-}
-
-#define DECLARE_RWSEM(name) \
- struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
-
-extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
- struct lock_class_key *key);
-
-#define init_rwsem(sem) \
- do { \
- static struct lock_class_key __key; \
- \
- __init_rwsem((sem), #sem, &__key); \
- } while (0)
-
-/*
- * lock for reading
- */
-static inline void __down_read(struct rw_semaphore *sem)
-{
- if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0))
- rwsem_down_read_failed(sem);
-}
-
-static inline int __down_read_trylock(struct rw_semaphore *sem)
-{
- long tmp;
-
- while ((tmp = sem->count) >= 0) {
- if (tmp == cmpxchg(&sem->count, tmp,
- tmp + RWSEM_ACTIVE_READ_BIAS)) {
- return 1;
- }
- }
- return 0;
-}
-
-/*
- * lock for writing
- */
-static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
-{
- long tmp;
-
- tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS,
- (atomic_long_t *)&sem->count);
- if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
- rwsem_down_write_failed(sem);
-}
-
-static inline void __down_write(struct rw_semaphore *sem)
-{
- __down_write_nested(sem, 0);
-}
-
-static inline int __down_write_trylock(struct rw_semaphore *sem)
-{
- long tmp;
-
- tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
- RWSEM_ACTIVE_WRITE_BIAS);
- return tmp == RWSEM_UNLOCKED_VALUE;
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
- long tmp;
-
- tmp = atomic_long_dec_return((atomic_long_t *)&sem->count);
- if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
- rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
- if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
- (atomic_long_t *)&sem->count) < 0))
- rwsem_wake(sem);
-}
-
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
-{
- atomic_long_add(delta, (atomic_long_t *)&sem->count);
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
- long tmp;
-
- tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS,
- (atomic_long_t *)&sem->count);
- if (tmp < 0)
- rwsem_downgrade_wake(sem);
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
-{
- return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
-}
-
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
- return sem->count != 0;
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_RWSEM_H */
diff --git a/arch/powerpc/include/asm/scom.h b/arch/powerpc/include/asm/scom.h
new file mode 100644
index 00000000000..f5cde45b116
--- /dev/null
+++ b/arch/powerpc/include/asm/scom.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2010 Benjamin Herrenschmidt, IBM Corp
+ * <benh@kernel.crashing.org>
+ * and David Gibson, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_POWERPC_SCOM_H
+#define _ASM_POWERPC_SCOM_H
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_PPC_SCOM
+
+/*
+ * The SCOM bus is a sideband bus used for accessing various internal
+ * registers of the processor or the chipset. The implementation details
+ * differ between processors and platforms, and the access method as
+ * well.
+ *
+ * This API allows to "map" ranges of SCOM register numbers associated
+ * with a given SCOM controller. The later must be represented by a
+ * device node, though some implementations might support NULL if there
+ * is no possible ambiguity
+ *
+ * Then, scom_read/scom_write can be used to accesses registers inside
+ * that range. The argument passed is a register number relative to
+ * the beginning of the range mapped.
+ */
+
+typedef void *scom_map_t;
+
+/* Value for an invalid SCOM map */
+#define SCOM_MAP_INVALID (NULL)
+
+/* The scom_controller data structure is what the platform passes
+ * to the core code in scom_init, it provides the actual implementation
+ * of all the SCOM functions
+ */
+struct scom_controller {
+ scom_map_t (*map)(struct device_node *ctrl_dev, u64 reg, u64 count);
+ void (*unmap)(scom_map_t map);
+
+ int (*read)(scom_map_t map, u64 reg, u64 *value);
+ int (*write)(scom_map_t map, u64 reg, u64 value);
+};
+
+extern const struct scom_controller *scom_controller;
+
+/**
+ * scom_init - Initialize the SCOM backend, called by the platform
+ * @controller: The platform SCOM controller
+ */
+static inline void scom_init(const struct scom_controller *controller)
+{
+ scom_controller = controller;
+}
+
+/**
+ * scom_map_ok - Test is a SCOM mapping is successful
+ * @map: The result of scom_map to test
+ */
+static inline int scom_map_ok(scom_map_t map)
+{
+ return map != SCOM_MAP_INVALID;
+}
+
+/**
+ * scom_map - Map a block of SCOM registers
+ * @ctrl_dev: Device node of the SCOM controller
+ * some implementations allow NULL here
+ * @reg: first SCOM register to map
+ * @count: Number of SCOM registers to map
+ */
+
+static inline scom_map_t scom_map(struct device_node *ctrl_dev,
+ u64 reg, u64 count)
+{
+ return scom_controller->map(ctrl_dev, reg, count);
+}
+
+/**
+ * scom_find_parent - Find the SCOM controller for a device
+ * @dev: OF node of the device
+ *
+ * This is not meant for general usage, but in combination with
+ * scom_map() allows to map registers not represented by the
+ * device own scom-reg property. Useful for applying HW workarounds
+ * on things not properly represented in the device-tree for example.
+ */
+struct device_node *scom_find_parent(struct device_node *dev);
+
+
+/**
+ * scom_map_device - Map a device's block of SCOM registers
+ * @dev: OF node of the device
+ * @index: Register bank index (index in "scom-reg" property)
+ *
+ * This function will use the device-tree binding for SCOM which
+ * is to follow "scom-parent" properties until it finds a node with
+ * a "scom-controller" property to find the controller. It will then
+ * use the "scom-reg" property which is made of reg/count pairs,
+ * each of them having a size defined by the controller's #scom-cells
+ * property
+ */
+extern scom_map_t scom_map_device(struct device_node *dev, int index);
+
+
+/**
+ * scom_unmap - Unmap a block of SCOM registers
+ * @map: Result of scom_map is to be unmapped
+ */
+static inline void scom_unmap(scom_map_t map)
+{
+ if (scom_map_ok(map))
+ scom_controller->unmap(map);
+}
+
+/**
+ * scom_read - Read a SCOM register
+ * @map: Result of scom_map
+ * @reg: Register index within that map
+ * @value: Updated with the value read
+ *
+ * Returns 0 (success) or a negative error code
+ */
+static inline int scom_read(scom_map_t map, u64 reg, u64 *value)
+{
+ int rc;
+
+ rc = scom_controller->read(map, reg, value);
+ if (rc)
+ *value = 0xfffffffffffffffful;
+ return rc;
+}
+
+/**
+ * scom_write - Write to a SCOM register
+ * @map: Result of scom_map
+ * @reg: Register index within that map
+ * @value: Value to write
+ *
+ * Returns 0 (success) or a negative error code
+ */
+static inline int scom_write(scom_map_t map, u64 reg, u64 value)
+{
+ return scom_controller->write(map, reg, value);
+}
+
+
+#endif /* CONFIG_PPC_SCOM */
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_SCOM_H */
diff --git a/arch/powerpc/include/asm/seccomp.h b/arch/powerpc/include/asm/seccomp.h
deleted file mode 100644
index 00c1d9133cf..00000000000
--- a/arch/powerpc/include/asm/seccomp.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef _ASM_POWERPC_SECCOMP_H
-#define _ASM_POWERPC_SECCOMP_H
-
-#include <linux/unistd.h>
-
-#define __NR_seccomp_read __NR_read
-#define __NR_seccomp_write __NR_write
-#define __NR_seccomp_exit __NR_exit
-#define __NR_seccomp_sigreturn __NR_rt_sigreturn
-
-#define __NR_seccomp_read_32 __NR_read
-#define __NR_seccomp_write_32 __NR_write
-#define __NR_seccomp_exit_32 __NR_exit
-#define __NR_seccomp_sigreturn_32 __NR_sigreturn
-
-#endif /* _ASM_POWERPC_SECCOMP_H */
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index 6fbce725c71..a5e930aca80 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -8,7 +8,11 @@
#ifdef __powerpc64__
-extern char _end[];
+extern char __start_interrupts[];
+extern char __end_interrupts[];
+
+extern char __prom_init_toc_start[];
+extern char __prom_init_toc_end[];
static inline int in_kernel_text(unsigned long addr)
{
@@ -18,12 +22,35 @@ static inline int in_kernel_text(unsigned long addr)
return 0;
}
+static inline int overlaps_interrupt_vector_text(unsigned long start,
+ unsigned long end)
+{
+ unsigned long real_start, real_end;
+ real_start = __start_interrupts - _stext;
+ real_end = __end_interrupts - _stext;
+
+ return start < (unsigned long)__va(real_end) &&
+ (unsigned long)__va(real_start) < end;
+}
+
static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
{
return start < (unsigned long)__init_end &&
(unsigned long)_stext < end;
}
+static inline int overlaps_kvm_tmp(unsigned long start, unsigned long end)
+{
+#ifdef CONFIG_KVM_GUEST
+ extern char kvm_tmp[];
+ return start < (unsigned long)kvm_tmp &&
+ (unsigned long)&kvm_tmp[1024 * 1024] < end;
+#else
+ return 0;
+#endif
+}
+
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
#undef dereference_function_descriptor
static inline void *dereference_function_descriptor(void *ptr)
{
@@ -34,6 +61,7 @@ static inline void *dereference_function_descriptor(void *ptr)
ptr = p;
return ptr;
}
+#endif
#endif
diff --git a/arch/powerpc/include/asm/sembuf.h b/arch/powerpc/include/asm/sembuf.h
deleted file mode 100644
index 99a41938ae3..00000000000
--- a/arch/powerpc/include/asm/sembuf.h
+++ /dev/null
@@ -1,36 +0,0 @@
-#ifndef _ASM_POWERPC_SEMBUF_H
-#define _ASM_POWERPC_SEMBUF_H
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-/*
- * The semid64_ds structure for PPC architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-
-struct semid64_ds {
- struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
-#ifndef __powerpc64__
- unsigned long __unused1;
-#endif
- __kernel_time_t sem_otime; /* last semop time */
-#ifndef __powerpc64__
- unsigned long __unused2;
-#endif
- __kernel_time_t sem_ctime; /* last change time */
- unsigned long sem_nsems; /* no. of semaphores in array */
- unsigned long __unused3;
- unsigned long __unused4;
-};
-
-#endif /* _ASM_POWERPC_SEMBUF_H */
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index dae19342f0b..11ba86e1763 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -1,6 +1,34 @@
#ifndef _ASM_POWERPC_SETUP_H
#define _ASM_POWERPC_SETUP_H
-#include <asm-generic/setup.h>
+#include <uapi/asm/setup.h>
+
+#ifndef __ASSEMBLY__
+extern void ppc_printk_progress(char *s, unsigned short hex);
+
+extern unsigned int rtas_data;
+extern int mem_init_done; /* set on boot once kmalloc can be called */
+extern int init_bootmem_done; /* set once bootmem is available */
+extern unsigned long long memory_limit;
+extern unsigned long klimit;
+extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
+
+struct device_node;
+extern void note_scsi_host(struct device_node *, void *);
+
+/* Used in very early kernel initialization. */
+extern unsigned long reloc_offset(void);
+extern unsigned long add_reloc_offset(unsigned long);
+extern void reloc_got2(unsigned long);
+
+#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
+
+void check_for_initrd(void);
+void do_init_bootmem(void);
+void setup_panic(void);
+#define ARCH_PANIC_TIMEOUT 180
+
+#endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_SETUP_H */
+
diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h
index 3a7a67a0d00..d89beaba26f 100644
--- a/arch/powerpc/include/asm/sfp-machine.h
+++ b/arch/powerpc/include/asm/sfp-machine.h
@@ -125,7 +125,7 @@
#define FP_EX_DIVZERO (1 << (31 - 5))
#define FP_EX_INEXACT (1 << (31 - 6))
-#define __FPU_FPSCR (current->thread.fpscr.val)
+#define __FPU_FPSCR (current->thread.fp_state.fpscr)
/* We only actually write to the destination register
* if exceptions signalled (if any) will not trap.
diff --git a/arch/powerpc/include/asm/shmbuf.h b/arch/powerpc/include/asm/shmbuf.h
deleted file mode 100644
index 8efa39698b6..00000000000
--- a/arch/powerpc/include/asm/shmbuf.h
+++ /dev/null
@@ -1,59 +0,0 @@
-#ifndef _ASM_POWERPC_SHMBUF_H
-#define _ASM_POWERPC_SHMBUF_H
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-/*
- * The shmid64_ds structure for PPC architecture.
- *
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-
-struct shmid64_ds {
- struct ipc64_perm shm_perm; /* operation perms */
-#ifndef __powerpc64__
- unsigned long __unused1;
-#endif
- __kernel_time_t shm_atime; /* last attach time */
-#ifndef __powerpc64__
- unsigned long __unused2;
-#endif
- __kernel_time_t shm_dtime; /* last detach time */
-#ifndef __powerpc64__
- unsigned long __unused3;
-#endif
- __kernel_time_t shm_ctime; /* last change time */
-#ifndef __powerpc64__
- unsigned long __unused4;
-#endif
- size_t shm_segsz; /* size of segment (bytes) */
- __kernel_pid_t shm_cpid; /* pid of creator */
- __kernel_pid_t shm_lpid; /* pid of last operator */
- unsigned long shm_nattch; /* no. of current attaches */
- unsigned long __unused5;
- unsigned long __unused6;
-};
-
-struct shminfo64 {
- unsigned long shmmax;
- unsigned long shmmin;
- unsigned long shmmni;
- unsigned long shmseg;
- unsigned long shmall;
- unsigned long __unused1;
- unsigned long __unused2;
- unsigned long __unused3;
- unsigned long __unused4;
-};
-
-#endif /* _ASM_POWERPC_SHMBUF_H */
diff --git a/arch/powerpc/include/asm/sigcontext.h b/arch/powerpc/include/asm/sigcontext.h
deleted file mode 100644
index 9c1f24fd5d1..00000000000
--- a/arch/powerpc/include/asm/sigcontext.h
+++ /dev/null
@@ -1,87 +0,0 @@
-#ifndef _ASM_POWERPC_SIGCONTEXT_H
-#define _ASM_POWERPC_SIGCONTEXT_H
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <linux/compiler.h>
-#include <asm/ptrace.h>
-#ifdef __powerpc64__
-#include <asm/elf.h>
-#endif
-
-struct sigcontext {
- unsigned long _unused[4];
- int signal;
-#ifdef __powerpc64__
- int _pad0;
-#endif
- unsigned long handler;
- unsigned long oldmask;
- struct pt_regs __user *regs;
-#ifdef __powerpc64__
- elf_gregset_t gp_regs;
- elf_fpregset_t fp_regs;
-/*
- * To maintain compatibility with current implementations the sigcontext is
- * extended by appending a pointer (v_regs) to a quadword type (elf_vrreg_t)
- * followed by an unstructured (vmx_reserve) field of 69 doublewords. This
- * allows the array of vector registers to be quadword aligned independent of
- * the alignment of the containing sigcontext or ucontext. It is the
- * responsibility of the code setting the sigcontext to set this pointer to
- * either NULL (if this processor does not support the VMX feature) or the
- * address of the first quadword within the allocated (vmx_reserve) area.
- *
- * The pointer (v_regs) of vector type (elf_vrreg_t) is type compatible with
- * an array of 34 quadword entries (elf_vrregset_t). The entries with
- * indexes 0-31 contain the corresponding vector registers. The entry with
- * index 32 contains the vscr as the last word (offset 12) within the
- * quadword. This allows the vscr to be stored as either a quadword (since
- * it must be copied via a vector register to/from storage) or as a word.
- * The entry with index 33 contains the vrsave as the first word (offset 0)
- * within the quadword.
- *
- * Part of the VSX data is stored here also by extending vmx_restore
- * by an additional 32 double words. Architecturally the layout of
- * the VSR registers and how they overlap on top of the legacy FPR and
- * VR registers is shown below:
- *
- * VSR doubleword 0 VSR doubleword 1
- * ----------------------------------------------------------------
- * VSR[0] | FPR[0] | |
- * ----------------------------------------------------------------
- * VSR[1] | FPR[1] | |
- * ----------------------------------------------------------------
- * | ... | |
- * | ... | |
- * ----------------------------------------------------------------
- * VSR[30] | FPR[30] | |
- * ----------------------------------------------------------------
- * VSR[31] | FPR[31] | |
- * ----------------------------------------------------------------
- * VSR[32] | VR[0] |
- * ----------------------------------------------------------------
- * VSR[33] | VR[1] |
- * ----------------------------------------------------------------
- * | ... |
- * | ... |
- * ----------------------------------------------------------------
- * VSR[62] | VR[30] |
- * ----------------------------------------------------------------
- * VSR[63] | VR[31] |
- * ----------------------------------------------------------------
- *
- * FPR/VSR 0-31 doubleword 0 is stored in fp_regs, and VMX/VSR 32-63
- * is stored at the start of vmx_reserve. vmx_reserve is extended for
- * backwards compatility to store VSR 0-31 doubleword 1 after the VMX
- * registers and vscr/vrsave.
- */
- elf_vrreg_t __user *v_regs;
- long vmx_reserve[ELF_NVRREG+ELF_NVRREG+32+1];
-#endif
-};
-
-#endif /* _ASM_POWERPC_SIGCONTEXT_H */
diff --git a/arch/powerpc/include/asm/siginfo.h b/arch/powerpc/include/asm/siginfo.h
deleted file mode 100644
index 49495b0534e..00000000000
--- a/arch/powerpc/include/asm/siginfo.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef _ASM_POWERPC_SIGINFO_H
-#define _ASM_POWERPC_SIGINFO_H
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifdef __powerpc64__
-# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
-# define SI_PAD_SIZE32 ((SI_MAX_SIZE/sizeof(int)) - 3)
-#endif
-
-#include <asm-generic/siginfo.h>
-
-#undef NSIGTRAP
-#define NSIGTRAP 4
-
-#endif /* _ASM_POWERPC_SIGINFO_H */
diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h
index 3eb13be11d8..9322c28aebd 100644
--- a/arch/powerpc/include/asm/signal.h
+++ b/arch/powerpc/include/asm/signal.h
@@ -1,149 +1,10 @@
#ifndef _ASM_POWERPC_SIGNAL_H
#define _ASM_POWERPC_SIGNAL_H
-#include <linux/types.h>
+#define __ARCH_HAS_SA_RESTORER
+#include <uapi/asm/signal.h>
+#include <uapi/asm/ptrace.h>
-#define _NSIG 64
-#ifdef __powerpc64__
-#define _NSIG_BPW 64
-#else
-#define _NSIG_BPW 32
-#endif
-#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
-
-typedef unsigned long old_sigset_t; /* at least 32 bits */
-
-typedef struct {
- unsigned long sig[_NSIG_WORDS];
-} sigset_t;
-
-#define SIGHUP 1
-#define SIGINT 2
-#define SIGQUIT 3
-#define SIGILL 4
-#define SIGTRAP 5
-#define SIGABRT 6
-#define SIGIOT 6
-#define SIGBUS 7
-#define SIGFPE 8
-#define SIGKILL 9
-#define SIGUSR1 10
-#define SIGSEGV 11
-#define SIGUSR2 12
-#define SIGPIPE 13
-#define SIGALRM 14
-#define SIGTERM 15
-#define SIGSTKFLT 16
-#define SIGCHLD 17
-#define SIGCONT 18
-#define SIGSTOP 19
-#define SIGTSTP 20
-#define SIGTTIN 21
-#define SIGTTOU 22
-#define SIGURG 23
-#define SIGXCPU 24
-#define SIGXFSZ 25
-#define SIGVTALRM 26
-#define SIGPROF 27
-#define SIGWINCH 28
-#define SIGIO 29
-#define SIGPOLL SIGIO
-/*
-#define SIGLOST 29
-*/
-#define SIGPWR 30
-#define SIGSYS 31
-#define SIGUNUSED 31
-
-/* These should not be considered constants from userland. */
-#define SIGRTMIN 32
-#define SIGRTMAX _NSIG
-
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK is not currently supported, but will allow sigaltstack(2).
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001U
-#define SA_NOCLDWAIT 0x00000002U
-#define SA_SIGINFO 0x00000004U
-#define SA_ONSTACK 0x08000000U
-#define SA_RESTART 0x10000000U
-#define SA_NODEFER 0x40000000U
-#define SA_RESETHAND 0x80000000U
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-
-#define SA_RESTORER 0x04000000U
-
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
-#define MINSIGSTKSZ 2048
-#define SIGSTKSZ 8192
-
-#include <asm-generic/signal-defs.h>
-
-struct old_sigaction {
- __sighandler_t sa_handler;
- old_sigset_t sa_mask;
- unsigned long sa_flags;
- __sigrestore_t sa_restorer;
-};
-
-struct sigaction {
- __sighandler_t sa_handler;
- unsigned long sa_flags;
- __sigrestore_t sa_restorer;
- sigset_t sa_mask; /* mask last for extensibility */
-};
-
-struct k_sigaction {
- struct sigaction sa;
-};
-
-typedef struct sigaltstack {
- void __user *ss_sp;
- int ss_flags;
- size_t ss_size;
-} stack_t;
-
-#ifdef __KERNEL__
-struct pt_regs;
-#define ptrace_signal_deliver(regs, cookie) do { } while (0)
-#endif /* __KERNEL__ */
-
-#ifndef __powerpc64__
-/*
- * These are parameters to dbg_sigreturn syscall. They enable or
- * disable certain debugging things that can be done from signal
- * handlers. The dbg_sigreturn syscall *must* be called from a
- * SA_SIGINFO signal so the ucontext can be passed to it. It takes an
- * array of struct sig_dbg_op, which has the debug operations to
- * perform before returning from the signal.
- */
-struct sig_dbg_op {
- int dbg_type;
- unsigned long dbg_value;
-};
-
-/* Enable or disable single-stepping. The value sets the state. */
-#define SIG_DBG_SINGLE_STEPPING 1
-
-/* Enable or disable branch tracing. The value sets the state. */
-#define SIG_DBG_BRANCH_TRACING 2
-#endif /* ! __powerpc64__ */
+extern unsigned long get_tm_stackpointer(struct pt_regs *regs);
#endif /* _ASM_POWERPC_SIGNAL_H */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 66e237bbe15..5a6614a7f0b 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -20,6 +20,7 @@
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/kernel.h>
+#include <linux/irqreturn.h>
#ifndef __ASSEMBLY__
@@ -29,22 +30,44 @@
#include <asm/percpu.h>
extern int boot_cpuid;
+extern int spinning_secondaries;
extern void cpu_die(void);
+extern int cpu_to_chip_id(int cpu);
#ifdef CONFIG_SMP
-extern void smp_send_debugger_break(int cpu);
-extern void smp_message_recv(int);
+struct smp_ops_t {
+ void (*message_pass)(int cpu, int msg);
+#ifdef CONFIG_PPC_SMP_MUXED_IPI
+ void (*cause_ipi)(int cpu, unsigned long data);
+#endif
+ int (*probe)(void);
+ int (*kick_cpu)(int nr);
+ void (*setup_cpu)(int nr);
+ void (*bringup_done)(void);
+ void (*take_timebase)(void);
+ void (*give_timebase)(void);
+ int (*cpu_disable)(void);
+ void (*cpu_die)(unsigned int nr);
+ int (*cpu_bootable)(unsigned int nr);
+};
+
+extern void smp_send_debugger_break(void);
+extern void start_secondary_resume(void);
+extern void smp_generic_give_timebase(void);
+extern void smp_generic_take_timebase(void);
DECLARE_PER_CPU(unsigned int, cpu_pvr);
#ifdef CONFIG_HOTPLUG_CPU
-extern void fixup_irqs(const struct cpumask *map);
+extern void migrate_irqs(void);
int generic_cpu_disable(void);
-int generic_cpu_enable(unsigned int cpu);
void generic_cpu_die(unsigned int cpu);
void generic_mach_cpu_die(void);
+void generic_set_cpu_dead(unsigned int cpu);
+void generic_set_cpu_up(unsigned int cpu);
+int generic_check_cpu_restart(unsigned int cpu);
#endif
#ifdef CONFIG_PPC64
@@ -89,17 +112,18 @@ extern int cpu_to_core_id(int cpu);
* in /proc/interrupts will be wrong!!! --Troy */
#define PPC_MSG_CALL_FUNCTION 0
#define PPC_MSG_RESCHEDULE 1
-#define PPC_MSG_CALL_FUNC_SINGLE 2
+#define PPC_MSG_TICK_BROADCAST 2
#define PPC_MSG_DEBUGGER_BREAK 3
-/*
- * irq controllers that have dedicated ipis per message and don't
- * need additional code in the action handler may use this
- */
+/* for irq controllers that have dedicated ipis per message (4) */
extern int smp_request_message_ipi(int virq, int message);
extern const char *smp_ipi_name[];
-void smp_init_iSeries(void);
+/* for irq controllers with only a single ipi */
+extern void smp_muxed_ipi_set_data(int cpu, unsigned long data);
+extern void smp_muxed_ipi_message_pass(int cpu, int msg);
+extern irqreturn_t smp_ipi_demux(void);
+
void smp_init_pSeries(void);
void smp_init_cell(void);
void smp_init_celleb(void);
@@ -112,6 +136,12 @@ extern void __cpu_die(unsigned int cpu);
/* for UP */
#define hard_smp_processor_id() get_hard_smp_processor_id(0)
#define smp_setup_cpu_maps()
+static inline void inhibit_secondary_onlining(void) {}
+static inline void uninhibit_secondary_onlining(void) {}
+static inline const struct cpumask *cpu_sibling_mask(int cpu)
+{
+ return cpumask_of(cpu);
+}
#endif /* CONFIG_SMP */
@@ -148,7 +178,9 @@ extern int smt_enabled_at_boot;
extern int smp_mpic_probe(void);
extern void smp_mpic_setup_cpu(int cpu);
-extern void smp_generic_kick_cpu(int nr);
+extern int smp_generic_kick_cpu(int nr);
+extern int smp_generic_cpu_bootable(unsigned int nr);
+
extern void smp_generic_give_timebase(void);
extern void smp_generic_take_timebase(void);
@@ -168,6 +200,7 @@ extern unsigned long __secondary_hold_spinloop;
extern unsigned long __secondary_hold_acknowledge;
extern char __secondary_hold;
+extern void __early_start(void);
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/smu.h b/arch/powerpc/include/asm/smu.h
index e3bdada8c54..6e909f3e6a4 100644
--- a/arch/powerpc/include/asm/smu.h
+++ b/arch/powerpc/include/asm/smu.h
@@ -132,7 +132,7 @@
*
* At this point, the OF driver seems to have a limitation on transfer
* sizes of 0xd bytes on reads and 0x5 bytes on writes. I do not know
- * wether this is just an OF limit due to some temporary buffer size
+ * whether this is just an OF limit due to some temporary buffer size
* or if this is an SMU imposed limit. This driver has the same limitation
* for now as I use a 0x10 bytes temporary buffer as well
*
@@ -236,7 +236,7 @@
* 3 (optional): enable nmi? [0x00 or 0x01]
*
* Returns:
- * If parameter 2 is 0x00 and parameter 3 is not specified, returns wether
+ * If parameter 2 is 0x00 and parameter 3 is not specified, returns whether
* NMI is enabled. Otherwise unknown.
*/
#define SMU_CMD_MISC_df_NMI_OPTION 0x04
@@ -547,7 +547,7 @@ struct smu_sdbp_header {
* (currently, afaik, this concerns only the FVT partition
* (0x12)
*/
-#define SMU_U16_MIX(x) le16_to_cpu(x);
+#define SMU_U16_MIX(x) le16_to_cpu(x)
#define SMU_U32_MIX(x) ((((x) & 0xff00ff00u) >> 8)|(((x) & 0x00ff00ffu) << 8))
diff --git a/arch/powerpc/include/asm/socket.h b/arch/powerpc/include/asm/socket.h
deleted file mode 100644
index 866f7606da6..00000000000
--- a/arch/powerpc/include/asm/socket.h
+++ /dev/null
@@ -1,72 +0,0 @@
-#ifndef _ASM_POWERPC_SOCKET_H
-#define _ASM_POWERPC_SOCKET_H
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <asm/sockios.h>
-
-/* For setsockopt(2) */
-#define SOL_SOCKET 1
-
-#define SO_DEBUG 1
-#define SO_REUSEADDR 2
-#define SO_TYPE 3
-#define SO_ERROR 4
-#define SO_DONTROUTE 5
-#define SO_BROADCAST 6
-#define SO_SNDBUF 7
-#define SO_RCVBUF 8
-#define SO_SNDBUFFORCE 32
-#define SO_RCVBUFFORCE 33
-#define SO_KEEPALIVE 9
-#define SO_OOBINLINE 10
-#define SO_NO_CHECK 11
-#define SO_PRIORITY 12
-#define SO_LINGER 13
-#define SO_BSDCOMPAT 14
-/* To add :#define SO_REUSEPORT 15 */
-#define SO_RCVLOWAT 16
-#define SO_SNDLOWAT 17
-#define SO_RCVTIMEO 18
-#define SO_SNDTIMEO 19
-#define SO_PASSCRED 20
-#define SO_PEERCRED 21
-
-/* Security levels - as per NRL IPv6 - don't actually do anything */
-#define SO_SECURITY_AUTHENTICATION 22
-#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
-#define SO_SECURITY_ENCRYPTION_NETWORK 24
-
-#define SO_BINDTODEVICE 25
-
-/* Socket filtering */
-#define SO_ATTACH_FILTER 26
-#define SO_DETACH_FILTER 27
-
-#define SO_PEERNAME 28
-#define SO_TIMESTAMP 29
-#define SCM_TIMESTAMP SO_TIMESTAMP
-
-#define SO_ACCEPTCONN 30
-
-#define SO_PEERSEC 31
-#define SO_PASSSEC 34
-#define SO_TIMESTAMPNS 35
-#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
-
-#define SO_MARK 36
-
-#define SO_TIMESTAMPING 37
-#define SCM_TIMESTAMPING SO_TIMESTAMPING
-
-#define SO_PROTOCOL 38
-#define SO_DOMAIN 39
-
-#define SO_RXQ_OVFL 40
-
-#endif /* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/include/asm/sockios.h b/arch/powerpc/include/asm/sockios.h
deleted file mode 100644
index 55cef7675a3..00000000000
--- a/arch/powerpc/include/asm/sockios.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef _ASM_POWERPC_SOCKIOS_H
-#define _ASM_POWERPC_SOCKIOS_H
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-/* Socket-level I/O control calls. */
-#define FIOSETOWN 0x8901
-#define SIOCSPGRP 0x8902
-#define FIOGETOWN 0x8903
-#define SIOCGPGRP 0x8904
-#define SIOCATMARK 0x8905
-#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
-#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
-
-#endif /* _ASM_POWERPC_SOCKIOS_H */
diff --git a/arch/powerpc/include/asm/sparsemem.h b/arch/powerpc/include/asm/sparsemem.h
index 54a47ea2c3a..f6fc0ee813d 100644
--- a/arch/powerpc/include/asm/sparsemem.h
+++ b/arch/powerpc/include/asm/sparsemem.h
@@ -10,13 +10,13 @@
*/
#define SECTION_SIZE_BITS 24
-#define MAX_PHYSADDR_BITS 44
-#define MAX_PHYSMEM_BITS 44
+#define MAX_PHYSADDR_BITS 46
+#define MAX_PHYSMEM_BITS 46
#endif /* CONFIG_SPARSEMEM */
#ifdef CONFIG_MEMORY_HOTPLUG
-extern void create_section_mapping(unsigned long start, unsigned long end);
+extern int create_section_mapping(unsigned long start, unsigned long end);
extern int remove_section_mapping(unsigned long start, unsigned long end);
#ifdef CONFIG_NUMA
extern int hot_add_scn_to_nid(unsigned long scn_addr);
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index f9611bd69ed..35aa339410b 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -23,18 +23,21 @@
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#include <asm/hvcall.h>
-#include <asm/iseries/hv_call.h>
#endif
#include <asm/asm-compat.h>
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
-#define arch_spin_is_locked(x) ((x)->slock != 0)
+#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
#ifdef CONFIG_PPC64
/* use 0x800000yy when locked, where yy == CPU number */
+#ifdef __BIG_ENDIAN__
#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
#else
+#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
+#endif
+#else
#define LOCK_TOKEN 1
#endif
@@ -51,6 +54,16 @@
#define SYNC_IO
#endif
+static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.slock == 0;
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ return !arch_spin_value_unlocked(*lock);
+}
+
/*
* This returns the old value in the lock, so we succeeded
* in getting the lock if the return value is 0.
@@ -95,12 +108,12 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
* value.
*/
-#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
+#if defined(CONFIG_PPC_SPLPAR)
/* We only yield to the hypervisor if we are in shared processor mode */
-#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
+#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
extern void __spin_yield(arch_spinlock_t *lock);
extern void __rw_yield(arch_rwlock_t *lock);
-#else /* SPLPAR || ISERIES */
+#else /* SPLPAR */
#define __spin_yield(x) barrier()
#define __rw_yield(x) barrier()
#define SHARED_PROCESSOR 0
diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h
index 0ab8d869e3d..37b7ca39ec9 100644
--- a/arch/powerpc/include/asm/spu.h
+++ b/arch/powerpc/include/asm/spu.h
@@ -25,7 +25,8 @@
#ifdef __KERNEL__
#include <linux/workqueue.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
#define LS_SIZE (256 * 1024)
#define LS_ADDR_MASK (LS_SIZE - 1)
@@ -165,7 +166,7 @@ struct spu {
/* beat only */
u64 shadow_int_mask_RW[3];
- struct sys_device sysdev;
+ struct device dev;
int has_mem_affinity;
struct list_head aff_list;
@@ -203,14 +204,6 @@ void spu_irq_setaffinity(struct spu *spu, int cpu);
void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
void *code, int code_size);
-#ifdef CONFIG_KEXEC
-void crash_register_spus(struct list_head *list);
-#else
-static inline void crash_register_spus(struct list_head *list)
-{
-}
-#endif
-
extern void spu_invalidate_slbs(struct spu *spu);
extern void spu_associate_mm(struct spu *spu, struct mm_struct *mm);
int spu_64k_pages_available(void);
@@ -242,14 +235,15 @@ extern long spu_sys_callback(struct spu_syscall_block *s);
/* syscalls implemented in spufs */
struct file;
+struct coredump_params;
struct spufs_calls {
long (*create_thread)(const char __user *name,
- unsigned int flags, mode_t mode,
+ unsigned int flags, umode_t mode,
struct file *neighbor);
long (*spu_run)(struct file *filp, __u32 __user *unpc,
__u32 __user *ustatus);
int (*coredump_extra_notes_size)(void);
- int (*coredump_extra_notes_write)(struct file *file, loff_t *foffset);
+ int (*coredump_extra_notes_write)(struct coredump_params *cprm);
void (*notify_spus_active)(void);
struct module *owner;
};
@@ -277,11 +271,11 @@ struct spufs_calls {
int register_spu_syscalls(struct spufs_calls *calls);
void unregister_spu_syscalls(struct spufs_calls *calls);
-int spu_add_sysdev_attr(struct sysdev_attribute *attr);
-void spu_remove_sysdev_attr(struct sysdev_attribute *attr);
+int spu_add_dev_attr(struct device_attribute *attr);
+void spu_remove_dev_attr(struct device_attribute *attr);
-int spu_add_sysdev_attr_group(struct attribute_group *attrs);
-void spu_remove_sysdev_attr_group(struct attribute_group *attrs);
+int spu_add_dev_attr_group(struct attribute_group *attrs);
+void spu_remove_dev_attr_group(struct attribute_group *attrs);
int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
unsigned long dsisr, unsigned *flt);
diff --git a/arch/powerpc/include/asm/spu_info.h b/arch/powerpc/include/asm/spu_info.h
index 1286c823f0d..7146b78e40f 100644
--- a/arch/powerpc/include/asm/spu_info.h
+++ b/arch/powerpc/include/asm/spu_info.h
@@ -19,37 +19,10 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-
#ifndef _SPU_INFO_H
#define _SPU_INFO_H
-#include <linux/types.h>
-
-#ifdef __KERNEL__
#include <asm/spu.h>
-#else
-struct mfc_cq_sr {
- __u64 mfc_cq_data0_RW;
- __u64 mfc_cq_data1_RW;
- __u64 mfc_cq_data2_RW;
- __u64 mfc_cq_data3_RW;
-};
-#endif /* __KERNEL__ */
-
-struct spu_dma_info {
- __u64 dma_info_type;
- __u64 dma_info_mask;
- __u64 dma_info_status;
- __u64 dma_info_stall_and_notify;
- __u64 dma_info_atomic_command_status;
- struct mfc_cq_sr dma_info_command_data[16];
-};
-
-struct spu_proxydma_info {
- __u64 proxydma_info_type;
- __u64 proxydma_info_mask;
- __u64 proxydma_info_status;
- struct mfc_cq_sr proxydma_info_command_data[8];
-};
+#include <uapi/asm/spu_info.h>
#endif
diff --git a/arch/powerpc/include/asm/spu_priv1.h b/arch/powerpc/include/asm/spu_priv1.h
index 25020a34ce7..d8f5c60f61c 100644
--- a/arch/powerpc/include/asm/spu_priv1.h
+++ b/arch/powerpc/include/asm/spu_priv1.h
@@ -223,7 +223,7 @@ spu_disable_spu (struct spu_context *ctx)
}
/*
- * The declarations folowing are put here for convenience
+ * The declarations following are put here for convenience
* and only intended to be used by the platform setup code.
*/
diff --git a/arch/powerpc/include/asm/stat.h b/arch/powerpc/include/asm/stat.h
deleted file mode 100644
index e4edc510b53..00000000000
--- a/arch/powerpc/include/asm/stat.h
+++ /dev/null
@@ -1,81 +0,0 @@
-#ifndef _ASM_POWERPC_STAT_H
-#define _ASM_POWERPC_STAT_H
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <linux/types.h>
-
-#define STAT_HAVE_NSEC 1
-
-#ifndef __powerpc64__
-struct __old_kernel_stat {
- unsigned short st_dev;
- unsigned short st_ino;
- unsigned short st_mode;
- unsigned short st_nlink;
- unsigned short st_uid;
- unsigned short st_gid;
- unsigned short st_rdev;
- unsigned long st_size;
- unsigned long st_atime;
- unsigned long st_mtime;
- unsigned long st_ctime;
-};
-#endif /* !__powerpc64__ */
-
-struct stat {
- unsigned long st_dev;
- ino_t st_ino;
-#ifdef __powerpc64__
- nlink_t st_nlink;
- mode_t st_mode;
-#else
- mode_t st_mode;
- nlink_t st_nlink;
-#endif
- uid_t st_uid;
- gid_t st_gid;
- unsigned long st_rdev;
- off_t st_size;
- unsigned long st_blksize;
- unsigned long st_blocks;
- unsigned long st_atime;
- unsigned long st_atime_nsec;
- unsigned long st_mtime;
- unsigned long st_mtime_nsec;
- unsigned long st_ctime;
- unsigned long st_ctime_nsec;
- unsigned long __unused4;
- unsigned long __unused5;
-#ifdef __powerpc64__
- unsigned long __unused6;
-#endif
-};
-
-/* This matches struct stat64 in glibc2.1. Only used for 32 bit. */
-struct stat64 {
- unsigned long long st_dev; /* Device. */
- unsigned long long st_ino; /* File serial number. */
- unsigned int st_mode; /* File mode. */
- unsigned int st_nlink; /* Link count. */
- unsigned int st_uid; /* User ID of the file's owner. */
- unsigned int st_gid; /* Group ID of the file's group. */
- unsigned long long st_rdev; /* Device number, if device. */
- unsigned short __pad2;
- long long st_size; /* Size of file, in bytes. */
- int st_blksize; /* Optimal block size for I/O. */
- long long st_blocks; /* Number 512-byte blocks allocated. */
- int st_atime; /* Time of last access. */
- unsigned int st_atime_nsec;
- int st_mtime; /* Time of last modification. */
- unsigned int st_mtime_nsec;
- int st_ctime; /* Time of last status change. */
- unsigned int st_ctime_nsec;
- unsigned int __unused4;
- unsigned int __unused5;
-};
-
-#endif /* _ASM_POWERPC_STAT_H */
diff --git a/arch/powerpc/include/asm/statfs.h b/arch/powerpc/include/asm/statfs.h
deleted file mode 100644
index 5244834583a..00000000000
--- a/arch/powerpc/include/asm/statfs.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_POWERPC_STATFS_H
-#define _ASM_POWERPC_STATFS_H
-
-#include <asm-generic/statfs.h>
-
-#endif
diff --git a/arch/powerpc/include/asm/suspend.h b/arch/powerpc/include/asm/suspend.h
deleted file mode 100644
index c6efc3466aa..00000000000
--- a/arch/powerpc/include/asm/suspend.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_POWERPC_SUSPEND_H
-#define __ASM_POWERPC_SUSPEND_H
-
-static inline int arch_prepare_suspend(void) { return 0; }
-
-#endif /* __ASM_POWERPC_SUSPEND_H */
diff --git a/arch/powerpc/include/asm/swab.h b/arch/powerpc/include/asm/swab.h
index c581e3ef73e..96f59de6185 100644
--- a/arch/powerpc/include/asm/swab.h
+++ b/arch/powerpc/include/asm/swab.h
@@ -1,23 +1,13 @@
-#ifndef _ASM_POWERPC_SWAB_H
-#define _ASM_POWERPC_SWAB_H
-
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#ifndef _ASM_POWERPC_SWAB_H
+#define _ASM_POWERPC_SWAB_H
-#include <linux/types.h>
-#include <linux/compiler.h>
-
-#ifdef __GNUC__
-
-#ifndef __powerpc64__
-#define __SWAB_64_THRU_32__
-#endif /* __powerpc64__ */
-
-#ifdef __KERNEL__
+#include <uapi/asm/swab.h>
static __inline__ __u16 ld_le16(const volatile __u16 *addr)
{
@@ -26,19 +16,12 @@ static __inline__ __u16 ld_le16(const volatile __u16 *addr)
__asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
return val;
}
-#define __arch_swab16p ld_le16
static __inline__ void st_le16(volatile __u16 *addr, const __u16 val)
{
__asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
}
-static inline void __arch_swab16s(__u16 *addr)
-{
- st_le16(addr, *addr);
-}
-#define __arch_swab16s __arch_swab16s
-
static __inline__ __u32 ld_le32(const volatile __u32 *addr)
{
__u32 val;
@@ -46,45 +29,10 @@ static __inline__ __u32 ld_le32(const volatile __u32 *addr)
__asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
return val;
}
-#define __arch_swab32p ld_le32
static __inline__ void st_le32(volatile __u32 *addr, const __u32 val)
{
__asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
}
-static inline void __arch_swab32s(__u32 *addr)
-{
- st_le32(addr, *addr);
-}
-#define __arch_swab32s __arch_swab32s
-
-static inline __attribute_const__ __u16 __arch_swab16(__u16 value)
-{
- __u16 result;
-
- __asm__("rlwimi %0,%1,8,16,23"
- : "=r" (result)
- : "r" (value), "0" (value >> 8));
- return result;
-}
-#define __arch_swab16 __arch_swab16
-
-static inline __attribute_const__ __u32 __arch_swab32(__u32 value)
-{
- __u32 result;
-
- __asm__("rlwimi %0,%1,24,16,23\n\t"
- "rlwimi %0,%1,8,8,15\n\t"
- "rlwimi %0,%1,24,0,7"
- : "=r" (result)
- : "r" (value), "0" (value >> 24));
- return result;
-}
-#define __arch_swab32 __arch_swab32
-
-#endif /* __KERNEL__ */
-
-#endif /* __GNUC__ */
-
#endif /* _ASM_POWERPC_SWAB_H */
diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
index 8979d4cd3d7..de99d6e2943 100644
--- a/arch/powerpc/include/asm/swiotlb.h
+++ b/arch/powerpc/include/asm/swiotlb.h
@@ -22,4 +22,10 @@ int __init swiotlb_setup_bus_notifier(void);
extern void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev);
+#ifdef CONFIG_SWIOTLB
+void swiotlb_detect_4g(void);
+#else
+static inline void swiotlb_detect_4g(void) {}
+#endif
+
#endif /* __ASM_SWIOTLB_H */
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
new file mode 100644
index 00000000000..58abeda64cb
--- /dev/null
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _ASM_POWERPC_SWITCH_TO_H
+#define _ASM_POWERPC_SWITCH_TO_H
+
+struct thread_struct;
+struct task_struct;
+struct pt_regs;
+
+extern struct task_struct *__switch_to(struct task_struct *,
+ struct task_struct *);
+#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
+
+struct thread_struct;
+extern struct task_struct *_switch(struct thread_struct *prev,
+ struct thread_struct *next);
+#ifdef CONFIG_PPC_BOOK3S_64
+static inline void save_early_sprs(struct thread_struct *prev)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ prev->tar = mfspr(SPRN_TAR);
+ if (cpu_has_feature(CPU_FTR_DSCR))
+ prev->dscr = mfspr(SPRN_DSCR);
+}
+#else
+static inline void save_early_sprs(struct thread_struct *prev) {}
+#endif
+
+extern void enable_kernel_fp(void);
+extern void enable_kernel_altivec(void);
+extern int emulate_altivec(struct pt_regs *);
+extern void __giveup_vsx(struct task_struct *);
+extern void giveup_vsx(struct task_struct *);
+extern void enable_kernel_spe(void);
+extern void giveup_spe(struct task_struct *);
+extern void load_up_spe(struct task_struct *);
+extern void switch_booke_debug_regs(struct debug_reg *new_debug);
+
+#ifndef CONFIG_SMP
+extern void discard_lazy_cpu_state(void);
+#else
+static inline void discard_lazy_cpu_state(void)
+{
+}
+#endif
+
+#ifdef CONFIG_PPC_FPU
+extern void flush_fp_to_thread(struct task_struct *);
+extern void giveup_fpu(struct task_struct *);
+#else
+static inline void flush_fp_to_thread(struct task_struct *t) { }
+static inline void giveup_fpu(struct task_struct *t) { }
+#endif
+
+#ifdef CONFIG_ALTIVEC
+extern void flush_altivec_to_thread(struct task_struct *);
+extern void giveup_altivec(struct task_struct *);
+extern void giveup_altivec_notask(void);
+#else
+static inline void flush_altivec_to_thread(struct task_struct *t)
+{
+}
+static inline void giveup_altivec(struct task_struct *t)
+{
+}
+#endif
+
+#ifdef CONFIG_VSX
+extern void flush_vsx_to_thread(struct task_struct *);
+#else
+static inline void flush_vsx_to_thread(struct task_struct *t)
+{
+}
+#endif
+
+#ifdef CONFIG_SPE
+extern void flush_spe_to_thread(struct task_struct *);
+#else
+static inline void flush_spe_to_thread(struct task_struct *t)
+{
+}
+#endif
+
+static inline void clear_task_ebb(struct task_struct *t)
+{
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* EBB perf events are not inherited, so clear all EBB state. */
+ t->thread.ebbrr = 0;
+ t->thread.ebbhr = 0;
+ t->thread.bescr = 0;
+ t->thread.mmcr2 = 0;
+ t->thread.mmcr0 = 0;
+ t->thread.siar = 0;
+ t->thread.sdar = 0;
+ t->thread.sier = 0;
+ t->thread.used_ebb = 0;
+#endif
+}
+
+#endif /* _ASM_POWERPC_SWITCH_TO_H */
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
index d7cab44643c..e682a7143ed 100644
--- a/arch/powerpc/include/asm/synch.h
+++ b/arch/powerpc/include/asm/synch.h
@@ -13,6 +13,7 @@
extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
void *fixup_end);
+extern void do_final_fixups(void);
static inline void eieio(void)
{
@@ -41,11 +42,15 @@ static inline void isync(void)
START_LWSYNC_SECTION(97); \
isync; \
MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
-#define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
-#define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n"
+#define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
+#define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n"
+#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n"
+#define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n"
#else
#define PPC_ACQUIRE_BARRIER
#define PPC_RELEASE_BARRIER
+#define PPC_ATOMIC_ENTRY_BARRIER
+#define PPC_ATOMIC_EXIT_BARRIER
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index 23913e902fc..b54b2add07b 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -15,6 +15,11 @@
#include <linux/sched.h>
+/* ftrace syscalls requires exporting the sys_call_table */
+#ifdef CONFIG_FTRACE_SYSCALLS
+extern const unsigned long *sys_call_table;
+#endif /* CONFIG_FTRACE_SYSCALLS */
+
static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
{
diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h
index 4084e567d28..23be8f1e7e6 100644
--- a/arch/powerpc/include/asm/syscalls.h
+++ b/arch/powerpc/include/asm/syscalls.h
@@ -5,11 +5,8 @@
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/types.h>
-#include <asm/signal.h>
-struct pt_regs;
struct rtas_args;
-struct sigaction;
asmlinkage unsigned long sys_mmap(unsigned long addr, size_t len,
unsigned long prot, unsigned long flags,
@@ -17,32 +14,8 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, size_t len,
asmlinkage unsigned long sys_mmap2(unsigned long addr, size_t len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff);
-asmlinkage int sys_execve(unsigned long a0, unsigned long a1,
- unsigned long a2, unsigned long a3, unsigned long a4,
- unsigned long a5, struct pt_regs *regs);
-asmlinkage int sys_clone(unsigned long clone_flags, unsigned long usp,
- int __user *parent_tidp, void __user *child_threadptr,
- int __user *child_tidp, int p6, struct pt_regs *regs);
-asmlinkage int sys_fork(unsigned long p1, unsigned long p2,
- unsigned long p3, unsigned long p4, unsigned long p5,
- unsigned long p6, struct pt_regs *regs);
-asmlinkage int sys_vfork(unsigned long p1, unsigned long p2,
- unsigned long p3, unsigned long p4, unsigned long p5,
- unsigned long p6, struct pt_regs *regs);
-asmlinkage long sys_pipe(int __user *fildes);
-asmlinkage long sys_pipe2(int __user *fildes, int flags);
-asmlinkage long sys_rt_sigaction(int sig,
- const struct sigaction __user *act,
- struct sigaction __user *oact, size_t sigsetsize);
asmlinkage long ppc64_personality(unsigned long personality);
asmlinkage int ppc_rtas(struct rtas_args __user *uargs);
-asmlinkage time_t sys64_time(time_t __user * tloc);
-
-asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset,
- size_t sigsetsize);
-asmlinkage long sys_sigaltstack(const stack_t __user *uss,
- stack_t __user *uoss, unsigned long r5, unsigned long r6,
- unsigned long r7, unsigned long r8, struct pt_regs *regs);
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_SYSCALLS_H */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index aa0f1ebb4aa..babbeca6850 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -10,8 +10,8 @@ SYSCALL_SPU(read)
SYSCALL_SPU(write)
COMPAT_SYS_SPU(open)
SYSCALL_SPU(close)
-COMPAT_SYS_SPU(waitpid)
-COMPAT_SYS_SPU(creat)
+SYSCALL_SPU(waitpid)
+SYSCALL_SPU(creat)
SYSCALL_SPU(link)
SYSCALL_SPU(unlink)
COMPAT_SYS(execve)
@@ -22,7 +22,7 @@ SYSCALL_SPU(chmod)
SYSCALL_SPU(lchown)
SYSCALL(ni_syscall)
OLDSYS(stat)
-SYSX_SPU(sys_lseek,ppc32_lseek,sys_lseek)
+COMPAT_SYS_SPU(lseek)
SYSCALL_SPU(getpid)
COMPAT_SYS(mount)
SYSX(sys_ni_syscall,sys_oldumount,sys_oldumount)
@@ -36,13 +36,13 @@ SYSCALL(pause)
COMPAT_SYS(utime)
SYSCALL(ni_syscall)
SYSCALL(ni_syscall)
-COMPAT_SYS_SPU(access)
-COMPAT_SYS_SPU(nice)
+SYSCALL_SPU(access)
+SYSCALL_SPU(nice)
SYSCALL(ni_syscall)
SYSCALL_SPU(sync)
-COMPAT_SYS_SPU(kill)
+SYSCALL_SPU(kill)
SYSCALL_SPU(rename)
-COMPAT_SYS_SPU(mkdir)
+SYSCALL_SPU(mkdir)
SYSCALL_SPU(rmdir)
SYSCALL_SPU(dup)
SYSCALL_SPU(pipe)
@@ -60,10 +60,10 @@ SYSCALL(ni_syscall)
COMPAT_SYS_SPU(ioctl)
COMPAT_SYS_SPU(fcntl)
SYSCALL(ni_syscall)
-COMPAT_SYS_SPU(setpgid)
+SYSCALL_SPU(setpgid)
SYSCALL(ni_syscall)
-SYSX(sys_ni_syscall,sys_olduname, sys_olduname)
-COMPAT_SYS_SPU(umask)
+SYSX(sys_ni_syscall,sys_olduname,sys_olduname)
+SYSCALL_SPU(umask)
SYSCALL_SPU(chroot)
COMPAT_SYS(ustat)
SYSCALL_SPU(dup2)
@@ -72,23 +72,24 @@ SYSCALL_SPU(getpgrp)
SYSCALL_SPU(setsid)
SYS32ONLY(sigaction)
SYSCALL_SPU(sgetmask)
-COMPAT_SYS_SPU(ssetmask)
+SYSCALL_SPU(ssetmask)
SYSCALL_SPU(setreuid)
SYSCALL_SPU(setregid)
+#define compat_sys_sigsuspend sys_sigsuspend
SYS32ONLY(sigsuspend)
COMPAT_SYS(sigpending)
-COMPAT_SYS_SPU(sethostname)
+SYSCALL_SPU(sethostname)
COMPAT_SYS_SPU(setrlimit)
COMPAT_SYS(old_getrlimit)
COMPAT_SYS_SPU(getrusage)
COMPAT_SYS_SPU(gettimeofday)
COMPAT_SYS_SPU(settimeofday)
-COMPAT_SYS_SPU(getgroups)
-COMPAT_SYS_SPU(setgroups)
+SYSCALL_SPU(getgroups)
+SYSCALL_SPU(setgroups)
SYSX(sys_ni_syscall,sys_ni_syscall,ppc_select)
SYSCALL_SPU(symlink)
OLDSYS(lstat)
-COMPAT_SYS_SPU(readlink)
+SYSCALL_SPU(readlink)
SYSCALL(uselib)
SYSCALL(swapon)
SYSCALL(reboot)
@@ -99,14 +100,14 @@ COMPAT_SYS_SPU(truncate)
COMPAT_SYS_SPU(ftruncate)
SYSCALL_SPU(fchmod)
SYSCALL_SPU(fchown)
-COMPAT_SYS_SPU(getpriority)
-COMPAT_SYS_SPU(setpriority)
+SYSCALL_SPU(getpriority)
+SYSCALL_SPU(setpriority)
SYSCALL(ni_syscall)
COMPAT_SYS(statfs)
COMPAT_SYS(fstatfs)
SYSCALL(ni_syscall)
COMPAT_SYS_SPU(socketcall)
-COMPAT_SYS_SPU(syslog)
+SYSCALL_SPU(syslog)
COMPAT_SYS_SPU(setitimer)
COMPAT_SYS_SPU(getitimer)
COMPAT_SYS_SPU(newstat)
@@ -124,7 +125,7 @@ COMPAT_SYS(ipc)
SYSCALL_SPU(fsync)
SYS32ONLY(sigreturn)
PPC_SYS(clone)
-COMPAT_SYS_SPU(setdomainname)
+SYSCALL_SPU(setdomainname)
SYSCALL_SPU(newuname)
SYSCALL(ni_syscall)
COMPAT_SYS_SPU(adjtimex)
@@ -135,10 +136,10 @@ SYSCALL(init_module)
SYSCALL(delete_module)
SYSCALL(ni_syscall)
SYSCALL(quotactl)
-COMPAT_SYS_SPU(getpgid)
+SYSCALL_SPU(getpgid)
SYSCALL_SPU(fchdir)
SYSCALL_SPU(bdflush)
-COMPAT_SYS(sysfs)
+SYSCALL_SPU(sysfs)
SYSX_SPU(ppc64_personality,ppc64_personality,sys_personality)
SYSCALL(ni_syscall)
SYSCALL_SPU(setfsuid)
@@ -150,20 +151,20 @@ SYSCALL_SPU(flock)
SYSCALL_SPU(msync)
COMPAT_SYS_SPU(readv)
COMPAT_SYS_SPU(writev)
-COMPAT_SYS_SPU(getsid)
+SYSCALL_SPU(getsid)
SYSCALL_SPU(fdatasync)
COMPAT_SYS(sysctl)
SYSCALL_SPU(mlock)
SYSCALL_SPU(munlock)
SYSCALL_SPU(mlockall)
SYSCALL_SPU(munlockall)
-COMPAT_SYS_SPU(sched_setparam)
-COMPAT_SYS_SPU(sched_getparam)
-COMPAT_SYS_SPU(sched_setscheduler)
-COMPAT_SYS_SPU(sched_getscheduler)
+SYSCALL_SPU(sched_setparam)
+SYSCALL_SPU(sched_getparam)
+SYSCALL_SPU(sched_setscheduler)
+SYSCALL_SPU(sched_getscheduler)
SYSCALL_SPU(sched_yield)
-COMPAT_SYS_SPU(sched_get_priority_max)
-COMPAT_SYS_SPU(sched_get_priority_min)
+SYSCALL_SPU(sched_get_priority_max)
+SYSCALL_SPU(sched_get_priority_min)
COMPAT_SYS_SPU(sched_rr_get_interval)
COMPAT_SYS_SPU(nanosleep)
SYSCALL_SPU(mremap)
@@ -171,10 +172,10 @@ SYSCALL_SPU(setresuid)
SYSCALL_SPU(getresuid)
SYSCALL(ni_syscall)
SYSCALL_SPU(poll)
-COMPAT_SYS(nfsservctl)
+SYSCALL(ni_syscall)
SYSCALL_SPU(setresgid)
SYSCALL_SPU(getresgid)
-COMPAT_SYS_SPU(prctl)
+SYSCALL_SPU(prctl)
COMPAT_SYS(rt_sigreturn)
COMPAT_SYS(rt_sigaction)
COMPAT_SYS(rt_sigprocmask)
@@ -238,7 +239,7 @@ SYSCALL_SPU(io_cancel)
SYSCALL(set_tid_address)
SYSX_SPU(sys_fadvise64,ppc32_fadvise64,sys_fadvise64)
SYSCALL(exit_group)
-SYSX(sys_lookup_dcookie,ppc32_lookup_dcookie,sys_lookup_dcookie)
+COMPAT_SYS(lookup_dcookie)
SYSCALL_SPU(epoll_create)
SYSCALL_SPU(epoll_ctl)
SYSCALL_SPU(epoll_wait)
@@ -253,11 +254,11 @@ COMPAT_SYS_SPU(clock_gettime)
COMPAT_SYS_SPU(clock_getres)
COMPAT_SYS_SPU(clock_nanosleep)
SYSX(ppc64_swapcontext,ppc32_swapcontext,ppc_swapcontext)
-COMPAT_SYS_SPU(tgkill)
+SYSCALL_SPU(tgkill)
COMPAT_SYS_SPU(utimes)
COMPAT_SYS_SPU(statfs64)
COMPAT_SYS_SPU(fstatfs64)
-SYSX(sys_ni_syscall, ppc_fadvise64_64, ppc_fadvise64_64)
+SYSX(sys_ni_syscall,ppc_fadvise64_64,ppc_fadvise64_64)
PPC_SYS_SPU(rtas)
OLDSYS(debug_setcontext)
SYSCALL(ni_syscall)
@@ -272,12 +273,12 @@ COMPAT_SYS(mq_timedreceive)
COMPAT_SYS(mq_notify)
COMPAT_SYS(mq_getsetattr)
COMPAT_SYS(kexec_load)
-COMPAT_SYS(add_key)
-COMPAT_SYS(request_key)
+SYSCALL(add_key)
+SYSCALL(request_key)
COMPAT_SYS(keyctl)
COMPAT_SYS(waitid)
-COMPAT_SYS(ioprio_set)
-COMPAT_SYS(ioprio_get)
+SYSCALL(ioprio_set)
+SYSCALL(ioprio_get)
SYSCALL(inotify_init)
SYSCALL(inotify_add_watch)
SYSCALL(inotify_rm_watch)
@@ -294,7 +295,7 @@ SYSCALL_SPU(mkdirat)
SYSCALL_SPU(mknodat)
SYSCALL_SPU(fchownat)
COMPAT_SYS_SPU(futimesat)
-SYSX_SPU(sys_newfstatat, sys_fstatat64, sys_fstatat64)
+SYSX_SPU(sys_newfstatat,sys_fstatat64,sys_fstatat64)
SYSCALL_SPU(unlinkat)
SYSCALL_SPU(renameat)
SYSCALL_SPU(linkat)
@@ -348,3 +349,16 @@ COMPAT_SYS_SPU(sendmsg)
COMPAT_SYS_SPU(recvmsg)
COMPAT_SYS_SPU(recvmmsg)
SYSCALL_SPU(accept4)
+SYSCALL_SPU(name_to_handle_at)
+COMPAT_SYS_SPU(open_by_handle_at)
+COMPAT_SYS_SPU(clock_adjtime)
+SYSCALL_SPU(syncfs)
+COMPAT_SYS_SPU(sendmmsg)
+SYSCALL_SPU(setns)
+COMPAT_SYS(process_vm_readv)
+COMPAT_SYS(process_vm_writev)
+SYSCALL(finit_module)
+SYSCALL(ni_syscall) /* sys_kcmp */
+SYSCALL_SPU(sched_setattr)
+SYSCALL_SPU(sched_getattr)
+SYSCALL_SPU(renameat2)
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
deleted file mode 100644
index 5e474ddd227..00000000000
--- a/arch/powerpc/include/asm/system.h
+++ /dev/null
@@ -1,548 +0,0 @@
-/*
- * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
- */
-#ifndef _ASM_POWERPC_SYSTEM_H
-#define _ASM_POWERPC_SYSTEM_H
-
-#include <linux/kernel.h>
-#include <linux/irqflags.h>
-
-#include <asm/hw_irq.h>
-
-/*
- * Memory barrier.
- * The sync instruction guarantees that all memory accesses initiated
- * by this processor have been performed (with respect to all other
- * mechanisms that access memory). The eieio instruction is a barrier
- * providing an ordering (separately) for (a) cacheable stores and (b)
- * loads and stores to non-cacheable memory (e.g. I/O devices).
- *
- * mb() prevents loads and stores being reordered across this point.
- * rmb() prevents loads being reordered across this point.
- * wmb() prevents stores being reordered across this point.
- * read_barrier_depends() prevents data-dependent loads being reordered
- * across this point (nop on PPC).
- *
- * *mb() variants without smp_ prefix must order all types of memory
- * operations with one another. sync is the only instruction sufficient
- * to do this.
- *
- * For the smp_ barriers, ordering is for cacheable memory operations
- * only. We have to use the sync instruction for smp_mb(), since lwsync
- * doesn't order loads with respect to previous stores. Lwsync can be
- * used for smp_rmb() and smp_wmb().
- *
- * However, on CPUs that don't support lwsync, lwsync actually maps to a
- * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio.
- */
-#define mb() __asm__ __volatile__ ("sync" : : : "memory")
-#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
-#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
-#define read_barrier_depends() do { } while(0)
-
-#define set_mb(var, value) do { var = value; mb(); } while (0)
-
-#ifdef __KERNEL__
-#define AT_VECTOR_SIZE_ARCH 6 /* entries in ARCH_DLINFO */
-#ifdef CONFIG_SMP
-
-#ifdef __SUBARCH_HAS_LWSYNC
-# define SMPWMB LWSYNC
-#else
-# define SMPWMB eieio
-#endif
-
-#define smp_mb() mb()
-#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
-#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
-#define smp_read_barrier_depends() read_barrier_depends()
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do { } while(0)
-#endif /* CONFIG_SMP */
-
-/*
- * This is a barrier which prevents following instructions from being
- * started until the value of the argument x is known. For example, if
- * x is a variable loaded from memory, this prevents following
- * instructions from being executed until the load has been performed.
- */
-#define data_barrier(x) \
- asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
-
-struct task_struct;
-struct pt_regs;
-
-#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
-
-extern int (*__debugger)(struct pt_regs *regs);
-extern int (*__debugger_ipi)(struct pt_regs *regs);
-extern int (*__debugger_bpt)(struct pt_regs *regs);
-extern int (*__debugger_sstep)(struct pt_regs *regs);
-extern int (*__debugger_iabr_match)(struct pt_regs *regs);
-extern int (*__debugger_dabr_match)(struct pt_regs *regs);
-extern int (*__debugger_fault_handler)(struct pt_regs *regs);
-
-#define DEBUGGER_BOILERPLATE(__NAME) \
-static inline int __NAME(struct pt_regs *regs) \
-{ \
- if (unlikely(__ ## __NAME)) \
- return __ ## __NAME(regs); \
- return 0; \
-}
-
-DEBUGGER_BOILERPLATE(debugger)
-DEBUGGER_BOILERPLATE(debugger_ipi)
-DEBUGGER_BOILERPLATE(debugger_bpt)
-DEBUGGER_BOILERPLATE(debugger_sstep)
-DEBUGGER_BOILERPLATE(debugger_iabr_match)
-DEBUGGER_BOILERPLATE(debugger_dabr_match)
-DEBUGGER_BOILERPLATE(debugger_fault_handler)
-
-#else
-static inline int debugger(struct pt_regs *regs) { return 0; }
-static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
-static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
-static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
-static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
-static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
-static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
-#endif
-
-extern int set_dabr(unsigned long dabr);
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
-extern void do_send_trap(struct pt_regs *regs, unsigned long address,
- unsigned long error_code, int signal_code, int brkpt);
-#else
-extern void do_dabr(struct pt_regs *regs, unsigned long address,
- unsigned long error_code);
-#endif
-extern void print_backtrace(unsigned long *);
-extern void show_regs(struct pt_regs * regs);
-extern void flush_instruction_cache(void);
-extern void hard_reset_now(void);
-extern void poweroff_now(void);
-
-#ifdef CONFIG_6xx
-extern long _get_L2CR(void);
-extern long _get_L3CR(void);
-extern void _set_L2CR(unsigned long);
-extern void _set_L3CR(unsigned long);
-#else
-#define _get_L2CR() 0L
-#define _get_L3CR() 0L
-#define _set_L2CR(val) do { } while(0)
-#define _set_L3CR(val) do { } while(0)
-#endif
-
-extern void via_cuda_init(void);
-extern void read_rtc_time(void);
-extern void pmac_find_display(void);
-extern void giveup_fpu(struct task_struct *);
-extern void disable_kernel_fp(void);
-extern void enable_kernel_fp(void);
-extern void flush_fp_to_thread(struct task_struct *);
-extern void enable_kernel_altivec(void);
-extern void giveup_altivec(struct task_struct *);
-extern void load_up_altivec(struct task_struct *);
-extern int emulate_altivec(struct pt_regs *);
-extern void __giveup_vsx(struct task_struct *);
-extern void giveup_vsx(struct task_struct *);
-extern void enable_kernel_spe(void);
-extern void giveup_spe(struct task_struct *);
-extern void load_up_spe(struct task_struct *);
-extern int fix_alignment(struct pt_regs *);
-extern void cvt_fd(float *from, double *to);
-extern void cvt_df(double *from, float *to);
-
-#ifndef CONFIG_SMP
-extern void discard_lazy_cpu_state(void);
-#else
-static inline void discard_lazy_cpu_state(void)
-{
-}
-#endif
-
-#ifdef CONFIG_ALTIVEC
-extern void flush_altivec_to_thread(struct task_struct *);
-#else
-static inline void flush_altivec_to_thread(struct task_struct *t)
-{
-}
-#endif
-
-#ifdef CONFIG_VSX
-extern void flush_vsx_to_thread(struct task_struct *);
-#else
-static inline void flush_vsx_to_thread(struct task_struct *t)
-{
-}
-#endif
-
-#ifdef CONFIG_SPE
-extern void flush_spe_to_thread(struct task_struct *);
-#else
-static inline void flush_spe_to_thread(struct task_struct *t)
-{
-}
-#endif
-
-extern int call_rtas(const char *, int, int, unsigned long *, ...);
-extern void cacheable_memzero(void *p, unsigned int nb);
-extern void *cacheable_memcpy(void *, const void *, unsigned int);
-extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
-extern void bad_page_fault(struct pt_regs *, unsigned long, int);
-extern int die(const char *, struct pt_regs *, long);
-extern void _exception(int, struct pt_regs *, int, unsigned long);
-extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
-
-#ifdef CONFIG_BOOKE_WDT
-extern u32 booke_wdt_enabled;
-extern u32 booke_wdt_period;
-#endif /* CONFIG_BOOKE_WDT */
-
-struct device_node;
-extern void note_scsi_host(struct device_node *, void *);
-
-extern struct task_struct *__switch_to(struct task_struct *,
- struct task_struct *);
-#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
-
-struct thread_struct;
-extern struct task_struct *_switch(struct thread_struct *prev,
- struct thread_struct *next);
-
-extern unsigned int rtas_data;
-extern int mem_init_done; /* set on boot once kmalloc can be called */
-extern int init_bootmem_done; /* set once bootmem is available */
-extern phys_addr_t memory_limit;
-extern unsigned long klimit;
-
-extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
-extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
-
-extern int powersave_nap; /* set if nap mode can be used in idle loop */
-
-/*
- * Atomic exchange
- *
- * Changes the memory location '*ptr' to be val and returns
- * the previous value stored there.
- */
-static __always_inline unsigned long
-__xchg_u32(volatile void *p, unsigned long val)
-{
- unsigned long prev;
-
- __asm__ __volatile__(
- PPC_RELEASE_BARRIER
-"1: lwarx %0,0,%2 \n"
- PPC405_ERR77(0,%2)
-" stwcx. %3,0,%2 \n\
- bne- 1b"
- PPC_ACQUIRE_BARRIER
- : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
- : "r" (p), "r" (val)
- : "cc", "memory");
-
- return prev;
-}
-
-/*
- * Atomic exchange
- *
- * Changes the memory location '*ptr' to be val and returns
- * the previous value stored there.
- */
-static __always_inline unsigned long
-__xchg_u32_local(volatile void *p, unsigned long val)
-{
- unsigned long prev;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%2 \n"
- PPC405_ERR77(0,%2)
-" stwcx. %3,0,%2 \n\
- bne- 1b"
- : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
- : "r" (p), "r" (val)
- : "cc", "memory");
-
- return prev;
-}
-
-#ifdef CONFIG_PPC64
-static __always_inline unsigned long
-__xchg_u64(volatile void *p, unsigned long val)
-{
- unsigned long prev;
-
- __asm__ __volatile__(
- PPC_RELEASE_BARRIER
-"1: ldarx %0,0,%2 \n"
- PPC405_ERR77(0,%2)
-" stdcx. %3,0,%2 \n\
- bne- 1b"
- PPC_ACQUIRE_BARRIER
- : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
- : "r" (p), "r" (val)
- : "cc", "memory");
-
- return prev;
-}
-
-static __always_inline unsigned long
-__xchg_u64_local(volatile void *p, unsigned long val)
-{
- unsigned long prev;
-
- __asm__ __volatile__(
-"1: ldarx %0,0,%2 \n"
- PPC405_ERR77(0,%2)
-" stdcx. %3,0,%2 \n\
- bne- 1b"
- : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
- : "r" (p), "r" (val)
- : "cc", "memory");
-
- return prev;
-}
-#endif
-
-/*
- * This function doesn't exist, so you'll get a linker error
- * if something tries to do an invalid xchg().
- */
-extern void __xchg_called_with_bad_pointer(void);
-
-static __always_inline unsigned long
-__xchg(volatile void *ptr, unsigned long x, unsigned int size)
-{
- switch (size) {
- case 4:
- return __xchg_u32(ptr, x);
-#ifdef CONFIG_PPC64
- case 8:
- return __xchg_u64(ptr, x);
-#endif
- }
- __xchg_called_with_bad_pointer();
- return x;
-}
-
-static __always_inline unsigned long
-__xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
-{
- switch (size) {
- case 4:
- return __xchg_u32_local(ptr, x);
-#ifdef CONFIG_PPC64
- case 8:
- return __xchg_u64_local(ptr, x);
-#endif
- }
- __xchg_called_with_bad_pointer();
- return x;
-}
-#define xchg(ptr,x) \
- ({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
- })
-
-#define xchg_local(ptr,x) \
- ({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg_local((ptr), \
- (unsigned long)_x_, sizeof(*(ptr))); \
- })
-
-/*
- * Compare and exchange - if *p == old, set it to new,
- * and return the old value of *p.
- */
-#define __HAVE_ARCH_CMPXCHG 1
-
-static __always_inline unsigned long
-__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
-{
- unsigned int prev;
-
- __asm__ __volatile__ (
- PPC_RELEASE_BARRIER
-"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
- cmpw 0,%0,%3\n\
- bne- 2f\n"
- PPC405_ERR77(0,%2)
-" stwcx. %4,0,%2\n\
- bne- 1b"
- PPC_ACQUIRE_BARRIER
- "\n\
-2:"
- : "=&r" (prev), "+m" (*p)
- : "r" (p), "r" (old), "r" (new)
- : "cc", "memory");
-
- return prev;
-}
-
-static __always_inline unsigned long
-__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
- unsigned long new)
-{
- unsigned int prev;
-
- __asm__ __volatile__ (
-"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
- cmpw 0,%0,%3\n\
- bne- 2f\n"
- PPC405_ERR77(0,%2)
-" stwcx. %4,0,%2\n\
- bne- 1b"
- "\n\
-2:"
- : "=&r" (prev), "+m" (*p)
- : "r" (p), "r" (old), "r" (new)
- : "cc", "memory");
-
- return prev;
-}
-
-#ifdef CONFIG_PPC64
-static __always_inline unsigned long
-__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
-{
- unsigned long prev;
-
- __asm__ __volatile__ (
- PPC_RELEASE_BARRIER
-"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
- cmpd 0,%0,%3\n\
- bne- 2f\n\
- stdcx. %4,0,%2\n\
- bne- 1b"
- PPC_ACQUIRE_BARRIER
- "\n\
-2:"
- : "=&r" (prev), "+m" (*p)
- : "r" (p), "r" (old), "r" (new)
- : "cc", "memory");
-
- return prev;
-}
-
-static __always_inline unsigned long
-__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
- unsigned long new)
-{
- unsigned long prev;
-
- __asm__ __volatile__ (
-"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
- cmpd 0,%0,%3\n\
- bne- 2f\n\
- stdcx. %4,0,%2\n\
- bne- 1b"
- "\n\
-2:"
- : "=&r" (prev), "+m" (*p)
- : "r" (p), "r" (old), "r" (new)
- : "cc", "memory");
-
- return prev;
-}
-#endif
-
-/* This function doesn't exist, so you'll get a linker error
- if something tries to do an invalid cmpxchg(). */
-extern void __cmpxchg_called_with_bad_pointer(void);
-
-static __always_inline unsigned long
-__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
- unsigned int size)
-{
- switch (size) {
- case 4:
- return __cmpxchg_u32(ptr, old, new);
-#ifdef CONFIG_PPC64
- case 8:
- return __cmpxchg_u64(ptr, old, new);
-#endif
- }
- __cmpxchg_called_with_bad_pointer();
- return old;
-}
-
-static __always_inline unsigned long
-__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
- unsigned int size)
-{
- switch (size) {
- case 4:
- return __cmpxchg_u32_local(ptr, old, new);
-#ifdef CONFIG_PPC64
- case 8:
- return __cmpxchg_u64_local(ptr, old, new);
-#endif
- }
- __cmpxchg_called_with_bad_pointer();
- return old;
-}
-
-#define cmpxchg(ptr, o, n) \
- ({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
- (unsigned long)_n_, sizeof(*(ptr))); \
- })
-
-
-#define cmpxchg_local(ptr, o, n) \
- ({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
- (unsigned long)_n_, sizeof(*(ptr))); \
- })
-
-#ifdef CONFIG_PPC64
-/*
- * We handle most unaligned accesses in hardware. On the other hand
- * unaligned DMA can be very expensive on some ppc64 IO chips (it does
- * powers of 2 writes until it reaches sufficient alignment).
- *
- * Based on this we disable the IP header alignment in network drivers.
- */
-#define NET_IP_ALIGN 0
-
-#define cmpxchg64(ptr, o, n) \
- ({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
- })
-#define cmpxchg64_local(ptr, o, n) \
- ({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_local((ptr), (o), (n)); \
- })
-#else
-#include <asm-generic/cmpxchg-local.h>
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-#endif
-
-extern unsigned long arch_align_stack(unsigned long sp);
-
-/* Used in very early kernel initialization. */
-extern unsigned long reloc_offset(void);
-extern unsigned long add_reloc_offset(unsigned long);
-extern void reloc_got2(unsigned long);
-
-#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
-
-extern struct dentry *powerpc_debugfs_root;
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_SYSTEM_H */
diff --git a/arch/powerpc/include/asm/tce.h b/arch/powerpc/include/asm/tce.h
index f663634cccc..743f36b38e5 100644
--- a/arch/powerpc/include/asm/tce.h
+++ b/arch/powerpc/include/asm/tce.h
@@ -26,10 +26,14 @@
/*
* Tces come in two formats, one for the virtual bus and a different
- * format for PCI
+ * format for PCI. PCI TCEs can have hardware or software maintianed
+ * coherency.
*/
-#define TCE_VB 0
-#define TCE_PCI 1
+#define TCE_VB 0
+#define TCE_PCI 1
+#define TCE_PCI_SWINV_CREATE 2
+#define TCE_PCI_SWINV_FREE 4
+#define TCE_PCI_SWINV_PAIR 8
/* TCE page size is 4096 bytes (1 << 12) */
diff --git a/arch/powerpc/include/asm/termbits.h b/arch/powerpc/include/asm/termbits.h
deleted file mode 100644
index 549d700e18f..00000000000
--- a/arch/powerpc/include/asm/termbits.h
+++ /dev/null
@@ -1,210 +0,0 @@
-#ifndef _ASM_POWERPC_TERMBITS_H
-#define _ASM_POWERPC_TERMBITS_H
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-typedef unsigned char cc_t;
-typedef unsigned int speed_t;
-typedef unsigned int tcflag_t;
-
-/*
- * termios type and macro definitions. Be careful about adding stuff
- * to this file since it's used in GNU libc and there are strict rules
- * concerning namespace pollution.
- */
-
-#define NCCS 19
-struct termios {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_cc[NCCS]; /* control characters */
- cc_t c_line; /* line discipline (== c_cc[19]) */
- speed_t c_ispeed; /* input speed */
- speed_t c_ospeed; /* output speed */
-};
-
-/* For PowerPC the termios and ktermios are the same */
-
-struct ktermios {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_cc[NCCS]; /* control characters */
- cc_t c_line; /* line discipline (== c_cc[19]) */
- speed_t c_ispeed; /* input speed */
- speed_t c_ospeed; /* output speed */
-};
-
-/* c_cc characters */
-#define VINTR 0
-#define VQUIT 1
-#define VERASE 2
-#define VKILL 3
-#define VEOF 4
-#define VMIN 5
-#define VEOL 6
-#define VTIME 7
-#define VEOL2 8
-#define VSWTC 9
-#define VWERASE 10
-#define VREPRINT 11
-#define VSUSP 12
-#define VSTART 13
-#define VSTOP 14
-#define VLNEXT 15
-#define VDISCARD 16
-
-/* c_iflag bits */
-#define IGNBRK 0000001
-#define BRKINT 0000002
-#define IGNPAR 0000004
-#define PARMRK 0000010
-#define INPCK 0000020
-#define ISTRIP 0000040
-#define INLCR 0000100
-#define IGNCR 0000200
-#define ICRNL 0000400
-#define IXON 0001000
-#define IXOFF 0002000
-#define IXANY 0004000
-#define IUCLC 0010000
-#define IMAXBEL 0020000
-#define IUTF8 0040000
-
-/* c_oflag bits */
-#define OPOST 0000001
-#define ONLCR 0000002
-#define OLCUC 0000004
-
-#define OCRNL 0000010
-#define ONOCR 0000020
-#define ONLRET 0000040
-
-#define OFILL 00000100
-#define OFDEL 00000200
-#define NLDLY 00001400
-#define NL0 00000000
-#define NL1 00000400
-#define NL2 00001000
-#define NL3 00001400
-#define TABDLY 00006000
-#define TAB0 00000000
-#define TAB1 00002000
-#define TAB2 00004000
-#define TAB3 00006000
-#define XTABS 00006000 /* required by POSIX to == TAB3 */
-#define CRDLY 00030000
-#define CR0 00000000
-#define CR1 00010000
-#define CR2 00020000
-#define CR3 00030000
-#define FFDLY 00040000
-#define FF0 00000000
-#define FF1 00040000
-#define BSDLY 00100000
-#define BS0 00000000
-#define BS1 00100000
-#define VTDLY 00200000
-#define VT0 00000000
-#define VT1 00200000
-
-/* c_cflag bit meaning */
-#define CBAUD 0000377
-#define B0 0000000 /* hang up */
-#define B50 0000001
-#define B75 0000002
-#define B110 0000003
-#define B134 0000004
-#define B150 0000005
-#define B200 0000006
-#define B300 0000007
-#define B600 0000010
-#define B1200 0000011
-#define B1800 0000012
-#define B2400 0000013
-#define B4800 0000014
-#define B9600 0000015
-#define B19200 0000016
-#define B38400 0000017
-#define EXTA B19200
-#define EXTB B38400
-#define CBAUDEX 0000000
-#define B57600 00020
-#define B115200 00021
-#define B230400 00022
-#define B460800 00023
-#define B500000 00024
-#define B576000 00025
-#define B921600 00026
-#define B1000000 00027
-#define B1152000 00030
-#define B1500000 00031
-#define B2000000 00032
-#define B2500000 00033
-#define B3000000 00034
-#define B3500000 00035
-#define B4000000 00036
-#define BOTHER 00037
-
-#define CIBAUD 077600000
-#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
-
-#define CSIZE 00001400
-#define CS5 00000000
-#define CS6 00000400
-#define CS7 00001000
-#define CS8 00001400
-
-#define CSTOPB 00002000
-#define CREAD 00004000
-#define PARENB 00010000
-#define PARODD 00020000
-#define HUPCL 00040000
-
-#define CLOCAL 00100000
-#define CMSPAR 010000000000 /* mark or space (stick) parity */
-#define CRTSCTS 020000000000 /* flow control */
-
-/* c_lflag bits */
-#define ISIG 0x00000080
-#define ICANON 0x00000100
-#define XCASE 0x00004000
-#define ECHO 0x00000008
-#define ECHOE 0x00000002
-#define ECHOK 0x00000004
-#define ECHONL 0x00000010
-#define NOFLSH 0x80000000
-#define TOSTOP 0x00400000
-#define ECHOCTL 0x00000040
-#define ECHOPRT 0x00000020
-#define ECHOKE 0x00000001
-#define FLUSHO 0x00800000
-#define PENDIN 0x20000000
-#define IEXTEN 0x00000400
-#define EXTPROC 0x10000000
-
-/* Values for the ACTION argument to `tcflow'. */
-#define TCOOFF 0
-#define TCOON 1
-#define TCIOFF 2
-#define TCION 3
-
-/* Values for the QUEUE_SELECTOR argument to `tcflush'. */
-#define TCIFLUSH 0
-#define TCOFLUSH 1
-#define TCIOFLUSH 2
-
-/* Values for the OPTIONAL_ACTIONS argument to `tcsetattr'. */
-#define TCSANOW 0
-#define TCSADRAIN 1
-#define TCSAFLUSH 2
-
-#endif /* _ASM_POWERPC_TERMBITS_H */
diff --git a/arch/powerpc/include/asm/termios.h b/arch/powerpc/include/asm/termios.h
index a24f48704a3..b8353e2032d 100644
--- a/arch/powerpc/include/asm/termios.h
+++ b/arch/powerpc/include/asm/termios.h
@@ -1,6 +1,3 @@
-#ifndef _ASM_POWERPC_TERMIOS_H
-#define _ASM_POWERPC_TERMIOS_H
-
/*
* Liberally adapted from alpha/termios.h. In particular, the c_cc[]
* fields have been reordered so that termio & termios share the
@@ -12,74 +9,14 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#ifndef _ASM_POWERPC_TERMIOS_H
+#define _ASM_POWERPC_TERMIOS_H
-#include <asm/ioctls.h>
-#include <asm/termbits.h>
-
-struct sgttyb {
- char sg_ispeed;
- char sg_ospeed;
- char sg_erase;
- char sg_kill;
- short sg_flags;
-};
-
-struct tchars {
- char t_intrc;
- char t_quitc;
- char t_startc;
- char t_stopc;
- char t_eofc;
- char t_brkc;
-};
-
-struct ltchars {
- char t_suspc;
- char t_dsuspc;
- char t_rprntc;
- char t_flushc;
- char t_werasc;
- char t_lnextc;
-};
-
-struct winsize {
- unsigned short ws_row;
- unsigned short ws_col;
- unsigned short ws_xpixel;
- unsigned short ws_ypixel;
-};
-
-#define NCC 10
-struct termio {
- unsigned short c_iflag; /* input mode flags */
- unsigned short c_oflag; /* output mode flags */
- unsigned short c_cflag; /* control mode flags */
- unsigned short c_lflag; /* local mode flags */
- unsigned char c_line; /* line discipline */
- unsigned char c_cc[NCC]; /* control characters */
-};
-
-/* c_cc characters */
-#define _VINTR 0
-#define _VQUIT 1
-#define _VERASE 2
-#define _VKILL 3
-#define _VEOF 4
-#define _VMIN 5
-#define _VEOL 6
-#define _VTIME 7
-#define _VEOL2 8
-#define _VSWTC 9
+#include <uapi/asm/termios.h>
-#ifdef __KERNEL__
/* ^C ^\ del ^U ^D 1 0 0 0 0 ^W ^R ^Z ^Q ^S ^V ^U */
#define INIT_C_CC "\003\034\177\025\004\001\000\000\000\000\027\022\032\021\023\026\025"
-#endif
-
-#ifdef __KERNEL__
#include <asm-generic/termios-base.h>
-#endif /* __KERNEL__ */
-
#endif /* _ASM_POWERPC_TERMIOS_H */
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 65eb85976a0..b034ecdb7c7 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -22,6 +22,12 @@
#define THREAD_SIZE (1 << THREAD_SHIFT)
+#ifdef CONFIG_PPC64
+#define CURRENT_THREAD_INFO(dest, sp) clrrdi dest, sp, THREAD_SHIFT
+#else
+#define CURRENT_THREAD_INFO(dest, sp) rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT
+#endif
+
#ifndef __ASSEMBLY__
#include <linux/cache.h>
#include <asm/processor.h>
@@ -62,21 +68,8 @@ struct thread_info {
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
-/* thread information allocation */
-
-#if THREAD_SHIFT >= PAGE_SHIFT
-
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
-#else /* THREAD_SHIFT < PAGE_SHIFT */
-
-#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
-
-extern struct thread_info *alloc_thread_info(struct task_struct *tsk);
-extern void free_thread_info(struct thread_info *ti);
-
-#endif /* THREAD_SHIFT < PAGE_SHIFT */
-
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
@@ -89,8 +82,6 @@ static inline struct thread_info *current_thread_info(void)
#endif /* __ASSEMBLY__ */
-#define PREEMPT_ACTIVE 0x10000000
-
/*
* thread information flag bit numbers
*/
@@ -100,17 +91,22 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
TIF_NEED_RESCHED */
#define TIF_32BIT 4 /* 32 bit binary */
-#define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */
-#define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
+#define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SINGLESTEP 8 /* singlestepping active */
-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
+#define TIF_NOHZ 9 /* in adaptive nohz mode */
#define TIF_SECCOMP 10 /* secure computing */
#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
#define TIF_NOERROR 12 /* Force successful syscall return */
#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
-#define TIF_FREEZE 14 /* Freezing for suspend */
-#define TIF_RUNLATCH 15 /* Is the runlatch enabled? */
+#define TIF_UPROBE 14 /* breakpointed or single-stepping */
+#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
+#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
+ for stack store? */
+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
+#if defined(CONFIG_PPC64)
+#define TIF_ELF2ABI 18 /* function descriptors must die! */
+#endif
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -118,20 +114,24 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_32BIT (1<<TIF_32BIT)
-#define _TIF_PERFMON_WORK (1<<TIF_PERFMON_WORK)
-#define _TIF_PERFMON_CTXSW (1<<TIF_PERFMON_CTXSW)
+#define _TIF_RESTORE_TM (1<<TIF_RESTORE_TM)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
#define _TIF_NOERROR (1<<TIF_NOERROR)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
-#define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
-#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP)
+#define _TIF_UPROBE (1<<TIF_UPROBE)
+#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
+#define _TIF_NOHZ (1<<TIF_NOHZ)
+#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
+ _TIF_NOHZ)
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
- _TIF_NOTIFY_RESUME)
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+ _TIF_RESTORE_TM)
#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
/* Bits in local_flags */
@@ -139,10 +139,14 @@ static inline struct thread_info *current_thread_info(void)
#define TLF_NAPPING 0 /* idle thread enabled NAP mode */
#define TLF_SLEEPING 1 /* suspend code enabled SLEEP mode */
#define TLF_RESTORE_SIGMASK 2 /* Restore signal mask in do_signal */
+#define TLF_LAZY_MMU 3 /* tlb_batch is active */
+#define TLF_RUNLATCH 4 /* Is the runlatch enabled? */
#define _TLF_NAPPING (1 << TLF_NAPPING)
#define _TLF_SLEEPING (1 << TLF_SLEEPING)
#define _TLF_RESTORE_SIGMASK (1 << TLF_RESTORE_SIGMASK)
+#define _TLF_LAZY_MMU (1 << TLF_LAZY_MMU)
+#define _TLF_RUNLATCH (1 << TLF_RUNLATCH)
#ifndef __ASSEMBLY__
#define HAVE_SET_RESTORE_SIGMASK 1
@@ -150,7 +154,29 @@ static inline void set_restore_sigmask(void)
{
struct thread_info *ti = current_thread_info();
ti->local_flags |= _TLF_RESTORE_SIGMASK;
- set_bit(TIF_SIGPENDING, &ti->flags);
+ WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
+}
+static inline void clear_restore_sigmask(void)
+{
+ current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+ return current_thread_info()->local_flags & _TLF_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+ struct thread_info *ti = current_thread_info();
+ if (!(ti->local_flags & _TLF_RESTORE_SIGMASK))
+ return false;
+ ti->local_flags &= ~_TLF_RESTORE_SIGMASK;
+ return true;
+}
+
+static inline bool test_thread_local_flags(unsigned int flags)
+{
+ struct thread_info *ti = current_thread_info();
+ return (ti->local_flags & flags) != 0;
}
#ifdef CONFIG_PPC64
@@ -159,6 +185,12 @@ static inline void set_restore_sigmask(void)
#define is_32bit_task() (1)
#endif
+#if defined(CONFIG_PPC64)
+#define is_elf2_task() (test_thread_flag(TIF_ELF2ABI))
+#else
+#define is_elf2_task() (0)
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index fe6f7c2c9c6..1d428e6007c 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -18,20 +18,17 @@
#include <linux/percpu.h>
#include <asm/processor.h>
-#ifdef CONFIG_PPC_ISERIES
-#include <asm/paca.h>
-#include <asm/firmware.h>
-#include <asm/iseries/hv_call.h>
-#endif
/* time.c */
extern unsigned long tb_ticks_per_jiffy;
extern unsigned long tb_ticks_per_usec;
extern unsigned long tb_ticks_per_sec;
+extern struct clock_event_device decrementer_clockevent;
struct rtc_time;
extern void to_tm(int tim, struct rtc_time * tm);
extern void GregorianDay(struct rtc_time *tm);
+extern void tick_broadcast_ipi_handler(void);
extern void generic_calibrate_decr(void);
@@ -167,15 +164,6 @@ static inline void set_dec(int val)
#ifndef CONFIG_BOOKE
--val;
#endif
-#ifdef CONFIG_PPC_ISERIES
- if (firmware_has_feature(FW_FEATURE_ISERIES) &&
- get_lppaca()->shared_proc) {
- get_lppaca()->virtual_decr = val;
- if (get_dec() > val)
- HvCall_setVirtualDecr();
- return;
- }
-#endif
mtspr(SPRN_DEC, val);
#endif /* not 40x or 8xx_CPU6 */
}
@@ -210,14 +198,9 @@ struct cpu_usage {
DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
-#if defined(CONFIG_VIRT_CPU_ACCOUNTING)
-#define account_process_vtime(tsk) account_process_tick(tsk, 0)
-#else
-#define account_process_vtime(tsk) do { } while (0)
-#endif
-
extern void secondary_cpu_time_init(void);
-extern void iSeries_time_init_early(void);
+
+DECLARE_PER_CPU(u64, decrementers_next_tb);
#endif /* __KERNEL__ */
#endif /* __POWERPC_TIME_H */
diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h
index c55e14f7ef4..2cf846edb3f 100644
--- a/arch/powerpc/include/asm/timex.h
+++ b/arch/powerpc/include/asm/timex.h
@@ -29,7 +29,11 @@ static inline cycles_t get_cycles(void)
ret = 0;
__asm__ __volatile__(
+#ifdef CONFIG_8xx
"97: mftb %0\n"
+#else
+ "97: mfspr %0, %2\n"
+#endif
"99:\n"
".section __ftr_fixup,\"a\"\n"
".align 2\n"
@@ -41,7 +45,11 @@ static inline cycles_t get_cycles(void)
" .long 0\n"
" .long 0\n"
".previous"
+#ifdef CONFIG_8xx
: "=r" (ret) : "i" (CPU_FTR_601));
+#else
+ : "=r" (ret) : "i" (CPU_FTR_601), "i" (SPRN_TBRL));
+#endif
return ret;
#endif
}
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index d50a380b2b6..2def01ed0cb 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -79,6 +79,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
#elif defined(CONFIG_PPC_STD_MMU_64)
+#define MMU_NO_CONTEXT 0
+
/*
* TLB flushing for 64-bit hash-MMU CPUs
*/
@@ -93,7 +95,7 @@ struct ppc64_tlb_batch {
unsigned long index;
struct mm_struct *mm;
real_pte_t pte[PPC64_TLB_BATCH_NR];
- unsigned long vaddr[PPC64_TLB_BATCH_NR];
+ unsigned long vpn[PPC64_TLB_BATCH_NR];
unsigned int psize;
int ssize;
};
@@ -101,9 +103,6 @@ DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
-extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, unsigned long pte, int huge);
-
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
static inline void arch_enter_lazy_mmu_mode(void)
@@ -125,7 +124,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
#define arch_flush_lazy_mmu_mode() do {} while (0)
-extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize,
+extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
int ssize, int local);
extern void flush_hash_range(unsigned long number, int local);
@@ -166,7 +165,8 @@ static inline void flush_tlb_kernel_range(unsigned long start,
/* Private function for use by PCI IO mapping code */
extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
unsigned long end);
-
+extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr);
#else
#error Unsupported MMU type
#endif
diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h
new file mode 100644
index 00000000000..c22d704b6d4
--- /dev/null
+++ b/arch/powerpc/include/asm/tm.h
@@ -0,0 +1,27 @@
+/*
+ * Transactional memory support routines to reclaim and recheckpoint
+ * transactional process state.
+ *
+ * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation.
+ */
+
+#include <uapi/asm/tm.h>
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+extern void do_load_up_transact_fpu(struct thread_struct *thread);
+extern void do_load_up_transact_altivec(struct thread_struct *thread);
+#endif
+
+extern void tm_enable(void);
+extern void tm_reclaim(struct thread_struct *thread,
+ unsigned long orig_msr, uint8_t cause);
+extern void tm_reclaim_current(uint8_t cause);
+extern void tm_recheckpoint(struct thread_struct *thread,
+ unsigned long orig_msr);
+extern void tm_abort(uint8_t cause);
+extern void tm_save_sprs(struct thread_struct *thread);
+extern void tm_restore_sprs(struct thread_struct *thread);
+
+#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index afe4aaa65c3..5f1048eaa5b 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -3,38 +3,19 @@
#ifdef __KERNEL__
-struct sys_device;
+struct device;
struct device_node;
#ifdef CONFIG_NUMA
/*
- * Before going off node we want the VM to try and reclaim from the local
- * node. It does this if the remote distance is larger than RECLAIM_DISTANCE.
- * With the default REMOTE_DISTANCE of 20 and the default RECLAIM_DISTANCE of
- * 20, we never reclaim and go off node straight away.
- *
- * To fix this we choose a smaller value of RECLAIM_DISTANCE.
- */
-#define RECLAIM_DISTANCE 10
-
-/*
- * Before going off node we want the VM to try and reclaim from the local
- * node. It does this if the remote distance is larger than RECLAIM_DISTANCE.
- * With the default REMOTE_DISTANCE of 20 and the default RECLAIM_DISTANCE of
- * 20, we never reclaim and go off node straight away.
- *
- * To fix this we choose a smaller value of RECLAIM_DISTANCE.
+ * If zone_reclaim_mode is enabled, a RECLAIM_DISTANCE of 10 will mean that
+ * all zones on all nodes will be eligible for zone_reclaim().
*/
#define RECLAIM_DISTANCE 10
#include <asm/mmzone.h>
-static inline int cpu_to_node(int cpu)
-{
- return numa_cpu_lookup_table[cpu];
-}
-
#define parent_node(node) (node)
#define cpumask_of_node(node) ((node) == -1 ? \
@@ -55,69 +36,57 @@ static inline int pcibus_to_node(struct pci_bus *bus)
cpu_all_mask : \
cpumask_of_node(pcibus_to_node(bus)))
-/* sched_domains SD_NODE_INIT for PPC64 machines */
-#define SD_NODE_INIT (struct sched_domain) { \
- .min_interval = 8, \
- .max_interval = 32, \
- .busy_factor = 32, \
- .imbalance_pct = 125, \
- .cache_nice_tries = 1, \
- .busy_idx = 3, \
- .idle_idx = 1, \
- .newidle_idx = 0, \
- .wake_idx = 0, \
- .forkexec_idx = 0, \
- \
- .flags = 1*SD_LOAD_BALANCE \
- | 1*SD_BALANCE_NEWIDLE \
- | 1*SD_BALANCE_EXEC \
- | 1*SD_BALANCE_FORK \
- | 0*SD_BALANCE_WAKE \
- | 0*SD_WAKE_AFFINE \
- | 0*SD_PREFER_LOCAL \
- | 0*SD_SHARE_CPUPOWER \
- | 0*SD_POWERSAVINGS_BALANCE \
- | 0*SD_SHARE_PKG_RESOURCES \
- | 1*SD_SERIALIZE \
- | 0*SD_PREFER_SIBLING \
- , \
- .last_balance = jiffies, \
- .balance_interval = 1, \
-}
-
extern int __node_distance(int, int);
#define node_distance(a, b) __node_distance(a, b)
extern void __init dump_numa_cpu_topology(void);
-extern int sysfs_add_device_to_node(struct sys_device *dev, int nid);
-extern void sysfs_remove_device_from_node(struct sys_device *dev, int nid);
+extern int sysfs_add_device_to_node(struct device *dev, int nid);
+extern void sysfs_remove_device_from_node(struct device *dev, int nid);
#else
static inline void dump_numa_cpu_topology(void) {}
-static inline int sysfs_add_device_to_node(struct sys_device *dev, int nid)
+static inline int sysfs_add_device_to_node(struct device *dev, int nid)
{
return 0;
}
-static inline void sysfs_remove_device_from_node(struct sys_device *dev,
+static inline void sysfs_remove_device_from_node(struct device *dev,
int nid)
{
}
-
#endif /* CONFIG_NUMA */
+#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
+extern int start_topology_update(void);
+extern int stop_topology_update(void);
+extern int prrn_is_enabled(void);
+#else
+static inline int start_topology_update(void)
+{
+ return 0;
+}
+static inline int stop_topology_update(void)
+{
+ return 0;
+}
+static inline int prrn_is_enabled(void)
+{
+ return 0;
+}
+#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
+
#include <asm-generic/topology.h>
#ifdef CONFIG_SMP
#include <asm/cputable.h>
-#define smt_capable() (cpu_has_feature(CPU_FTR_SMT))
#ifdef CONFIG_PPC64
#include <asm/smp.h>
+#define topology_physical_package_id(cpu) (cpu_to_chip_id(cpu))
#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h
index cbe2297d68b..5712f06905a 100644
--- a/arch/powerpc/include/asm/trace.h
+++ b/arch/powerpc/include/asm/trace.h
@@ -8,7 +8,7 @@
struct pt_regs;
-TRACE_EVENT(irq_entry,
+DECLARE_EVENT_CLASS(ppc64_interrupt_class,
TP_PROTO(struct pt_regs *regs),
@@ -25,55 +25,32 @@ TRACE_EVENT(irq_entry,
TP_printk("pt_regs=%p", __entry->regs)
);
-TRACE_EVENT(irq_exit,
+DEFINE_EVENT(ppc64_interrupt_class, irq_entry,
TP_PROTO(struct pt_regs *regs),
- TP_ARGS(regs),
-
- TP_STRUCT__entry(
- __field(struct pt_regs *, regs)
- ),
-
- TP_fast_assign(
- __entry->regs = regs;
- ),
-
- TP_printk("pt_regs=%p", __entry->regs)
+ TP_ARGS(regs)
);
-TRACE_EVENT(timer_interrupt_entry,
+DEFINE_EVENT(ppc64_interrupt_class, irq_exit,
TP_PROTO(struct pt_regs *regs),
- TP_ARGS(regs),
-
- TP_STRUCT__entry(
- __field(struct pt_regs *, regs)
- ),
-
- TP_fast_assign(
- __entry->regs = regs;
- ),
-
- TP_printk("pt_regs=%p", __entry->regs)
+ TP_ARGS(regs)
);
-TRACE_EVENT(timer_interrupt_exit,
+DEFINE_EVENT(ppc64_interrupt_class, timer_interrupt_entry,
TP_PROTO(struct pt_regs *regs),
- TP_ARGS(regs),
+ TP_ARGS(regs)
+);
- TP_STRUCT__entry(
- __field(struct pt_regs *, regs)
- ),
+DEFINE_EVENT(ppc64_interrupt_class, timer_interrupt_exit,
- TP_fast_assign(
- __entry->regs = regs;
- ),
+ TP_PROTO(struct pt_regs *regs),
- TP_printk("pt_regs=%p", __entry->regs)
+ TP_ARGS(regs)
);
#ifdef CONFIG_PPC_PSERIES
diff --git a/arch/powerpc/include/asm/types.h b/arch/powerpc/include/asm/types.h
index a5aea0ca34e..bfb6ded38ff 100644
--- a/arch/powerpc/include/asm/types.h
+++ b/arch/powerpc/include/asm/types.h
@@ -1,19 +1,3 @@
-#ifndef _ASM_POWERPC_TYPES_H
-#define _ASM_POWERPC_TYPES_H
-
-/*
- * This is here because we used to use l64 for 64bit powerpc
- * and we don't want to impact user mode with our change to ll64
- * in the kernel.
- */
-#if defined(__powerpc64__) && !defined(__KERNEL__)
-# include <asm-generic/int-l64.h>
-#else
-# include <asm-generic/int-ll64.h>
-#endif
-
-#ifndef __ASSEMBLY__
-
/*
* This file is never included by application software unless
* explicitly requested (e.g., via linux/types.h) in which case the
@@ -26,31 +10,15 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#ifndef _ASM_POWERPC_TYPES_H
+#define _ASM_POWERPC_TYPES_H
-#ifdef __powerpc64__
-typedef unsigned int umode_t;
-#else
-typedef unsigned short umode_t;
-#endif
-
-typedef struct {
- __u32 u[4];
-} __attribute__((aligned(16))) __vector128;
-
-#endif /* __ASSEMBLY__ */
+#include <uapi/asm/types.h>
-#ifdef __KERNEL__
#ifndef __ASSEMBLY__
typedef __vector128 vector128;
-#if defined(__powerpc64__) || defined(CONFIG_PHYS_64BIT)
-typedef u64 dma_addr_t;
-#else
-typedef u32 dma_addr_t;
-#endif
-typedef u64 dma64_addr_t;
-
typedef struct {
unsigned long entry;
unsigned long toc;
@@ -59,6 +27,4 @@ typedef struct {
#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
-
#endif /* _ASM_POWERPC_TYPES_H */
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index bd0fb849515..9485b43a7c0 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -40,6 +40,8 @@
#define segment_eq(a, b) ((a).seg == (b).seg)
+#define user_addr_max() (get_fs().seg)
+
#ifdef __powerpc64__
/*
* This check is sufficient because there is a large enough
@@ -96,11 +98,6 @@ struct exception_table_entry {
* PowerPC, we can just do these as direct assignments. (Of course, the
* exception handling means that it's no longer "just"...)
*
- * The "user64" versions of the user access functions are versions that
- * allow access of 64-bit data. The "get_user" functions do not
- * properly handle 64-bit data because the value gets down cast to a long.
- * The "put_user" functions already handle 64-bit data properly but we add
- * "user64" versions for completeness
*/
#define get_user(x, ptr) \
__get_user_check((x), (ptr), sizeof(*(ptr)))
@@ -112,12 +109,6 @@ struct exception_table_entry {
#define __put_user(x, ptr) \
__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
-#ifndef __powerpc64__
-#define __get_user64(x, ptr) \
- __get_user64_nocheck((x), (ptr), sizeof(*(ptr)))
-#define __put_user64(x, ptr) __put_user(x, ptr)
-#endif
-
#define __get_user_inatomic(x, ptr) \
__get_user_nosleep((x), (ptr), sizeof(*(ptr)))
#define __put_user_inatomic(x, ptr) \
@@ -187,7 +178,7 @@ do { \
long __pu_err; \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
if (!is_kernel_addr((unsigned long)__pu_addr)) \
- might_sleep(); \
+ might_fault(); \
__chk_user_ptr(ptr); \
__put_user_size((x), __pu_addr, (size), __pu_err); \
__pu_err; \
@@ -197,7 +188,7 @@ do { \
({ \
long __pu_err = -EFAULT; \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- might_sleep(); \
+ might_fault(); \
if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
__put_user_size((x), __pu_addr, (size), __pu_err); \
__pu_err; \
@@ -277,7 +268,7 @@ do { \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
if (!is_kernel_addr((unsigned long)__gu_addr)) \
- might_sleep(); \
+ might_fault(); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
@@ -291,7 +282,7 @@ do { \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
if (!is_kernel_addr((unsigned long)__gu_addr)) \
- might_sleep(); \
+ might_fault(); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
@@ -303,7 +294,7 @@ do { \
long __gu_err = -EFAULT; \
unsigned long __gu_val = 0; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- might_sleep(); \
+ might_fault(); \
if (access_ok(VERIFY_READ, __gu_addr, (size))) \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
@@ -428,14 +419,14 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
static inline unsigned long __copy_from_user(void *to,
const void __user *from, unsigned long size)
{
- might_sleep();
+ might_fault();
return __copy_from_user_inatomic(to, from, size);
}
static inline unsigned long __copy_to_user(void __user *to,
const void *from, unsigned long size)
{
- might_sleep();
+ might_fault();
return __copy_to_user_inatomic(to, from, size);
}
@@ -443,7 +434,7 @@ extern unsigned long __clear_user(void __user *addr, unsigned long size);
static inline unsigned long clear_user(void __user *addr, unsigned long size)
{
- might_sleep();
+ might_fault();
if (likely(access_ok(VERIFY_WRITE, addr, size)))
return __clear_user(addr, size);
if ((unsigned long)addr < TASK_SIZE) {
@@ -453,42 +444,9 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
return size;
}
-extern int __strncpy_from_user(char *dst, const char __user *src, long count);
-
-static inline long strncpy_from_user(char *dst, const char __user *src,
- long count)
-{
- might_sleep();
- if (likely(access_ok(VERIFY_READ, src, 1)))
- return __strncpy_from_user(dst, src, count);
- return -EFAULT;
-}
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 for error
- */
-extern int __strnlen_user(const char __user *str, long len, unsigned long top);
-
-/*
- * Returns the length of the string at str (including the null byte),
- * or 0 if we hit a page we can't access,
- * or something > len if we didn't find a null byte.
- *
- * The `top' parameter to __strnlen_user is to make sure that
- * we can never overflow from the user area into kernel space.
- */
-static inline int strnlen_user(const char __user *str, long len)
-{
- unsigned long top = current->thread.fs.seg;
-
- if ((unsigned long)str > top)
- return 0;
- return __strnlen_user(str, len, top);
-}
-
-#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
+extern long strncpy_from_user(char *dst, const char __user *src, long count);
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/ucc.h b/arch/powerpc/include/asm/ucc.h
index 46b09ba6bea..6927ac26516 100644
--- a/arch/powerpc/include/asm/ucc.h
+++ b/arch/powerpc/include/asm/ucc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Authors: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
diff --git a/arch/powerpc/include/asm/ucc_fast.h b/arch/powerpc/include/asm/ucc_fast.h
index 839aab8bf37..72ea9bab07d 100644
--- a/arch/powerpc/include/asm/ucc_fast.h
+++ b/arch/powerpc/include/asm/ucc_fast.h
@@ -1,7 +1,7 @@
/*
* Internal header file for UCC FAST unit routines.
*
- * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Authors: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
@@ -19,7 +19,7 @@
#include <asm/immap_qe.h>
#include <asm/qe.h>
-#include "ucc.h"
+#include <asm/ucc.h>
/* Receive BD's status */
#define R_E 0x80000000 /* buffer empty */
diff --git a/arch/powerpc/include/asm/ucc_slow.h b/arch/powerpc/include/asm/ucc_slow.h
index 0980e6ad335..c44131e68e1 100644
--- a/arch/powerpc/include/asm/ucc_slow.h
+++ b/arch/powerpc/include/asm/ucc_slow.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Authors: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
@@ -20,7 +20,7 @@
#include <asm/immap_qe.h>
#include <asm/qe.h>
-#include "ucc.h"
+#include <asm/ucc.h>
/* transmit BD's status */
#define T_R 0x80000000 /* ready bit */
diff --git a/arch/powerpc/include/asm/ucontext.h b/arch/powerpc/include/asm/ucontext.h
deleted file mode 100644
index d9a4ddf0cc8..00000000000
--- a/arch/powerpc/include/asm/ucontext.h
+++ /dev/null
@@ -1,40 +0,0 @@
-#ifndef _ASM_POWERPC_UCONTEXT_H
-#define _ASM_POWERPC_UCONTEXT_H
-
-#ifdef __powerpc64__
-#include <asm/sigcontext.h>
-#else
-#include <asm/elf.h>
-#endif
-#include <asm/signal.h>
-
-#ifndef __powerpc64__
-struct mcontext {
- elf_gregset_t mc_gregs;
- elf_fpregset_t mc_fregs;
- unsigned long mc_pad[2];
- elf_vrregset_t mc_vregs __attribute__((__aligned__(16)));
-};
-#endif
-
-struct ucontext {
- unsigned long uc_flags;
- struct ucontext __user *uc_link;
- stack_t uc_stack;
-#ifndef __powerpc64__
- int uc_pad[7];
- struct mcontext __user *uc_regs;/* points to uc_mcontext field */
-#endif
- sigset_t uc_sigmask;
- /* glibc has 1024-bit signal masks, ours are 64-bit */
-#ifdef __powerpc64__
- sigset_t __unused[15]; /* Allow for uc_sigmask growth */
- struct sigcontext uc_mcontext; /* last for extensibility */
-#else
- int uc_maskext[30];
- int uc_pad2[3];
- struct mcontext uc_mcontext;
-#endif
-};
-
-#endif /* _ASM_POWERPC_UCONTEXT_H */
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
index 11ae699135b..b51fba10e73 100644
--- a/arch/powerpc/include/asm/udbg.h
+++ b/arch/powerpc/include/asm/udbg.h
@@ -21,17 +21,17 @@ extern int (*udbg_getc_poll)(void);
extern void udbg_puts(const char *s);
extern int udbg_write(const char *s, int n);
-extern int udbg_read(char *buf, int buflen);
extern void register_early_udbg_console(void);
extern void udbg_printf(const char *fmt, ...)
__attribute__ ((format (printf, 1, 2)));
extern void udbg_progress(char *s, unsigned short hex);
-extern void udbg_init_uart(void __iomem *comport, unsigned int speed,
- unsigned int clock);
-extern unsigned int udbg_probe_uart_speed(void __iomem *comport,
- unsigned int clock);
+extern void udbg_uart_init_mmio(void __iomem *addr, unsigned int stride);
+extern void udbg_uart_init_pio(unsigned long port, unsigned int stride);
+
+extern void udbg_uart_setup(unsigned int speed, unsigned int clock);
+extern unsigned int udbg_probe_uart_speed(unsigned int clock);
struct device_node;
extern void udbg_scc_init(int force_scc);
@@ -40,10 +40,10 @@ extern void udbg_adb_init_early(void);
extern void __init udbg_early_init(void);
extern void __init udbg_init_debug_lpar(void);
+extern void __init udbg_init_debug_lpar_hvsi(void);
extern void __init udbg_init_pmac_realmode(void);
extern void __init udbg_init_maple_realmode(void);
extern void __init udbg_init_pas_realmode(void);
-extern void __init udbg_init_iseries(void);
extern void __init udbg_init_rtas_panel(void);
extern void __init udbg_init_rtas_console(void);
extern void __init udbg_init_debug_beat(void);
@@ -52,6 +52,12 @@ extern void __init udbg_init_44x_as1(void);
extern void __init udbg_init_40x_realmode(void);
extern void __init udbg_init_cpm(void);
extern void __init udbg_init_usbgecko(void);
+extern void __init udbg_init_wsp(void);
+extern void __init udbg_init_memcons(void);
+extern void __init udbg_init_ehv_bc(void);
+extern void __init udbg_init_ps3gelic(void);
+extern void __init udbg_init_debug_opal_raw(void);
+extern void __init udbg_init_debug_opal_hvsi(void);
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_UDBG_H */
diff --git a/arch/powerpc/include/asm/unaligned.h b/arch/powerpc/include/asm/unaligned.h
index 5f1b1e3c213..8296381ae43 100644
--- a/arch/powerpc/include/asm/unaligned.h
+++ b/arch/powerpc/include/asm/unaligned.h
@@ -4,13 +4,18 @@
#ifdef __KERNEL__
/*
- * The PowerPC can do unaligned accesses itself in big endian mode.
+ * The PowerPC can do unaligned accesses itself based on its endian mode.
*/
#include <linux/unaligned/access_ok.h>
#include <linux/unaligned/generic.h>
+#ifdef __LITTLE_ENDIAN__
+#define get_unaligned __get_unaligned_le
+#define put_unaligned __put_unaligned_le
+#else
#define get_unaligned __get_unaligned_be
#define put_unaligned __put_unaligned_be
+#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_UNALIGNED_H */
diff --git a/arch/powerpc/include/asm/uninorth.h b/arch/powerpc/include/asm/uninorth.h
index f737732c386..d12b11d7641 100644
--- a/arch/powerpc/include/asm/uninorth.h
+++ b/arch/powerpc/include/asm/uninorth.h
@@ -60,7 +60,7 @@
*
* Obviously, the GART is not cache coherent and so any change to it
* must be flushed to memory (or maybe just make the GART space non
- * cachable). AGP memory itself doens't seem to be cache coherent neither.
+ * cachable). AGP memory itself doesn't seem to be cache coherent neither.
*
* In order to invalidate the GART (which is probably necessary to inval
* the bridge internal TLBs), the following sequence has to be written,
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 6151937657f..5ce5552ab9f 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -1,6 +1,3 @@
-#ifndef _ASM_POWERPC_UNISTD_H_
-#define _ASM_POWERPC_UNISTD_H_
-
/*
* This file contains the system call numbers.
*
@@ -9,368 +6,13 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#ifndef _ASM_POWERPC_UNISTD_H_
+#define _ASM_POWERPC_UNISTD_H_
-#define __NR_restart_syscall 0
-#define __NR_exit 1
-#define __NR_fork 2
-#define __NR_read 3
-#define __NR_write 4
-#define __NR_open 5
-#define __NR_close 6
-#define __NR_waitpid 7
-#define __NR_creat 8
-#define __NR_link 9
-#define __NR_unlink 10
-#define __NR_execve 11
-#define __NR_chdir 12
-#define __NR_time 13
-#define __NR_mknod 14
-#define __NR_chmod 15
-#define __NR_lchown 16
-#define __NR_break 17
-#define __NR_oldstat 18
-#define __NR_lseek 19
-#define __NR_getpid 20
-#define __NR_mount 21
-#define __NR_umount 22
-#define __NR_setuid 23
-#define __NR_getuid 24
-#define __NR_stime 25
-#define __NR_ptrace 26
-#define __NR_alarm 27
-#define __NR_oldfstat 28
-#define __NR_pause 29
-#define __NR_utime 30
-#define __NR_stty 31
-#define __NR_gtty 32
-#define __NR_access 33
-#define __NR_nice 34
-#define __NR_ftime 35
-#define __NR_sync 36
-#define __NR_kill 37
-#define __NR_rename 38
-#define __NR_mkdir 39
-#define __NR_rmdir 40
-#define __NR_dup 41
-#define __NR_pipe 42
-#define __NR_times 43
-#define __NR_prof 44
-#define __NR_brk 45
-#define __NR_setgid 46
-#define __NR_getgid 47
-#define __NR_signal 48
-#define __NR_geteuid 49
-#define __NR_getegid 50
-#define __NR_acct 51
-#define __NR_umount2 52
-#define __NR_lock 53
-#define __NR_ioctl 54
-#define __NR_fcntl 55
-#define __NR_mpx 56
-#define __NR_setpgid 57
-#define __NR_ulimit 58
-#define __NR_oldolduname 59
-#define __NR_umask 60
-#define __NR_chroot 61
-#define __NR_ustat 62
-#define __NR_dup2 63
-#define __NR_getppid 64
-#define __NR_getpgrp 65
-#define __NR_setsid 66
-#define __NR_sigaction 67
-#define __NR_sgetmask 68
-#define __NR_ssetmask 69
-#define __NR_setreuid 70
-#define __NR_setregid 71
-#define __NR_sigsuspend 72
-#define __NR_sigpending 73
-#define __NR_sethostname 74
-#define __NR_setrlimit 75
-#define __NR_getrlimit 76
-#define __NR_getrusage 77
-#define __NR_gettimeofday 78
-#define __NR_settimeofday 79
-#define __NR_getgroups 80
-#define __NR_setgroups 81
-#define __NR_select 82
-#define __NR_symlink 83
-#define __NR_oldlstat 84
-#define __NR_readlink 85
-#define __NR_uselib 86
-#define __NR_swapon 87
-#define __NR_reboot 88
-#define __NR_readdir 89
-#define __NR_mmap 90
-#define __NR_munmap 91
-#define __NR_truncate 92
-#define __NR_ftruncate 93
-#define __NR_fchmod 94
-#define __NR_fchown 95
-#define __NR_getpriority 96
-#define __NR_setpriority 97
-#define __NR_profil 98
-#define __NR_statfs 99
-#define __NR_fstatfs 100
-#define __NR_ioperm 101
-#define __NR_socketcall 102
-#define __NR_syslog 103
-#define __NR_setitimer 104
-#define __NR_getitimer 105
-#define __NR_stat 106
-#define __NR_lstat 107
-#define __NR_fstat 108
-#define __NR_olduname 109
-#define __NR_iopl 110
-#define __NR_vhangup 111
-#define __NR_idle 112
-#define __NR_vm86 113
-#define __NR_wait4 114
-#define __NR_swapoff 115
-#define __NR_sysinfo 116
-#define __NR_ipc 117
-#define __NR_fsync 118
-#define __NR_sigreturn 119
-#define __NR_clone 120
-#define __NR_setdomainname 121
-#define __NR_uname 122
-#define __NR_modify_ldt 123
-#define __NR_adjtimex 124
-#define __NR_mprotect 125
-#define __NR_sigprocmask 126
-#define __NR_create_module 127
-#define __NR_init_module 128
-#define __NR_delete_module 129
-#define __NR_get_kernel_syms 130
-#define __NR_quotactl 131
-#define __NR_getpgid 132
-#define __NR_fchdir 133
-#define __NR_bdflush 134
-#define __NR_sysfs 135
-#define __NR_personality 136
-#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
-#define __NR_setfsuid 138
-#define __NR_setfsgid 139
-#define __NR__llseek 140
-#define __NR_getdents 141
-#define __NR__newselect 142
-#define __NR_flock 143
-#define __NR_msync 144
-#define __NR_readv 145
-#define __NR_writev 146
-#define __NR_getsid 147
-#define __NR_fdatasync 148
-#define __NR__sysctl 149
-#define __NR_mlock 150
-#define __NR_munlock 151
-#define __NR_mlockall 152
-#define __NR_munlockall 153
-#define __NR_sched_setparam 154
-#define __NR_sched_getparam 155
-#define __NR_sched_setscheduler 156
-#define __NR_sched_getscheduler 157
-#define __NR_sched_yield 158
-#define __NR_sched_get_priority_max 159
-#define __NR_sched_get_priority_min 160
-#define __NR_sched_rr_get_interval 161
-#define __NR_nanosleep 162
-#define __NR_mremap 163
-#define __NR_setresuid 164
-#define __NR_getresuid 165
-#define __NR_query_module 166
-#define __NR_poll 167
-#define __NR_nfsservctl 168
-#define __NR_setresgid 169
-#define __NR_getresgid 170
-#define __NR_prctl 171
-#define __NR_rt_sigreturn 172
-#define __NR_rt_sigaction 173
-#define __NR_rt_sigprocmask 174
-#define __NR_rt_sigpending 175
-#define __NR_rt_sigtimedwait 176
-#define __NR_rt_sigqueueinfo 177
-#define __NR_rt_sigsuspend 178
-#define __NR_pread64 179
-#define __NR_pwrite64 180
-#define __NR_chown 181
-#define __NR_getcwd 182
-#define __NR_capget 183
-#define __NR_capset 184
-#define __NR_sigaltstack 185
-#define __NR_sendfile 186
-#define __NR_getpmsg 187 /* some people actually want streams */
-#define __NR_putpmsg 188 /* some people actually want streams */
-#define __NR_vfork 189
-#define __NR_ugetrlimit 190 /* SuS compliant getrlimit */
-#define __NR_readahead 191
-#ifndef __powerpc64__ /* these are 32-bit only */
-#define __NR_mmap2 192
-#define __NR_truncate64 193
-#define __NR_ftruncate64 194
-#define __NR_stat64 195
-#define __NR_lstat64 196
-#define __NR_fstat64 197
-#endif
-#define __NR_pciconfig_read 198
-#define __NR_pciconfig_write 199
-#define __NR_pciconfig_iobase 200
-#define __NR_multiplexer 201
-#define __NR_getdents64 202
-#define __NR_pivot_root 203
-#ifndef __powerpc64__
-#define __NR_fcntl64 204
-#endif
-#define __NR_madvise 205
-#define __NR_mincore 206
-#define __NR_gettid 207
-#define __NR_tkill 208
-#define __NR_setxattr 209
-#define __NR_lsetxattr 210
-#define __NR_fsetxattr 211
-#define __NR_getxattr 212
-#define __NR_lgetxattr 213
-#define __NR_fgetxattr 214
-#define __NR_listxattr 215
-#define __NR_llistxattr 216
-#define __NR_flistxattr 217
-#define __NR_removexattr 218
-#define __NR_lremovexattr 219
-#define __NR_fremovexattr 220
-#define __NR_futex 221
-#define __NR_sched_setaffinity 222
-#define __NR_sched_getaffinity 223
-/* 224 currently unused */
-#define __NR_tuxcall 225
-#ifndef __powerpc64__
-#define __NR_sendfile64 226
-#endif
-#define __NR_io_setup 227
-#define __NR_io_destroy 228
-#define __NR_io_getevents 229
-#define __NR_io_submit 230
-#define __NR_io_cancel 231
-#define __NR_set_tid_address 232
-#define __NR_fadvise64 233
-#define __NR_exit_group 234
-#define __NR_lookup_dcookie 235
-#define __NR_epoll_create 236
-#define __NR_epoll_ctl 237
-#define __NR_epoll_wait 238
-#define __NR_remap_file_pages 239
-#define __NR_timer_create 240
-#define __NR_timer_settime 241
-#define __NR_timer_gettime 242
-#define __NR_timer_getoverrun 243
-#define __NR_timer_delete 244
-#define __NR_clock_settime 245
-#define __NR_clock_gettime 246
-#define __NR_clock_getres 247
-#define __NR_clock_nanosleep 248
-#define __NR_swapcontext 249
-#define __NR_tgkill 250
-#define __NR_utimes 251
-#define __NR_statfs64 252
-#define __NR_fstatfs64 253
-#ifndef __powerpc64__
-#define __NR_fadvise64_64 254
-#endif
-#define __NR_rtas 255
-#define __NR_sys_debug_setcontext 256
-/* Number 257 is reserved for vserver */
-#define __NR_migrate_pages 258
-#define __NR_mbind 259
-#define __NR_get_mempolicy 260
-#define __NR_set_mempolicy 261
-#define __NR_mq_open 262
-#define __NR_mq_unlink 263
-#define __NR_mq_timedsend 264
-#define __NR_mq_timedreceive 265
-#define __NR_mq_notify 266
-#define __NR_mq_getsetattr 267
-#define __NR_kexec_load 268
-#define __NR_add_key 269
-#define __NR_request_key 270
-#define __NR_keyctl 271
-#define __NR_waitid 272
-#define __NR_ioprio_set 273
-#define __NR_ioprio_get 274
-#define __NR_inotify_init 275
-#define __NR_inotify_add_watch 276
-#define __NR_inotify_rm_watch 277
-#define __NR_spu_run 278
-#define __NR_spu_create 279
-#define __NR_pselect6 280
-#define __NR_ppoll 281
-#define __NR_unshare 282
-#define __NR_splice 283
-#define __NR_tee 284
-#define __NR_vmsplice 285
-#define __NR_openat 286
-#define __NR_mkdirat 287
-#define __NR_mknodat 288
-#define __NR_fchownat 289
-#define __NR_futimesat 290
-#ifdef __powerpc64__
-#define __NR_newfstatat 291
-#else
-#define __NR_fstatat64 291
-#endif
-#define __NR_unlinkat 292
-#define __NR_renameat 293
-#define __NR_linkat 294
-#define __NR_symlinkat 295
-#define __NR_readlinkat 296
-#define __NR_fchmodat 297
-#define __NR_faccessat 298
-#define __NR_get_robust_list 299
-#define __NR_set_robust_list 300
-#define __NR_move_pages 301
-#define __NR_getcpu 302
-#define __NR_epoll_pwait 303
-#define __NR_utimensat 304
-#define __NR_signalfd 305
-#define __NR_timerfd_create 306
-#define __NR_eventfd 307
-#define __NR_sync_file_range2 308
-#define __NR_fallocate 309
-#define __NR_subpage_prot 310
-#define __NR_timerfd_settime 311
-#define __NR_timerfd_gettime 312
-#define __NR_signalfd4 313
-#define __NR_eventfd2 314
-#define __NR_epoll_create1 315
-#define __NR_dup3 316
-#define __NR_pipe2 317
-#define __NR_inotify_init1 318
-#define __NR_perf_event_open 319
-#define __NR_preadv 320
-#define __NR_pwritev 321
-#define __NR_rt_tgsigqueueinfo 322
-#define __NR_fanotify_init 323
-#define __NR_fanotify_mark 324
-#define __NR_prlimit64 325
-#define __NR_socket 326
-#define __NR_bind 327
-#define __NR_connect 328
-#define __NR_listen 329
-#define __NR_accept 330
-#define __NR_getsockname 331
-#define __NR_getpeername 332
-#define __NR_socketpair 333
-#define __NR_send 334
-#define __NR_sendto 335
-#define __NR_recv 336
-#define __NR_recvfrom 337
-#define __NR_shutdown 338
-#define __NR_setsockopt 339
-#define __NR_getsockopt 340
-#define __NR_sendmsg 341
-#define __NR_recvmsg 342
-#define __NR_recvmmsg 343
-#define __NR_accept4 344
+#include <uapi/asm/unistd.h>
-#ifdef __KERNEL__
-#define __NR_syscalls 345
+#define __NR_syscalls 358
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
@@ -381,14 +23,12 @@
#include <linux/compiler.h>
#include <linux/linkage.h>
-#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_ALARM
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_IPC
#define __ARCH_WANT_SYS_PAUSE
-#define __ARCH_WANT_SYS_SGETMASK
#define __ARCH_WANT_SYS_SIGNAL
#define __ARCH_WANT_SYS_TIME
#define __ARCH_WANT_SYS_UTIME
@@ -403,24 +43,17 @@
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
-#define __ARCH_WANT_SYS_RT_SIGACTION
-#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#ifdef CONFIG_PPC32
#define __ARCH_WANT_OLD_STAT
#endif
#ifdef CONFIG_PPC64
#define __ARCH_WANT_COMPAT_SYS_TIME
-#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_SYS_NEWFSTATAT
+#define __ARCH_WANT_COMPAT_SYS_SENDFILE
#endif
-
-/*
- * "Conditional" syscalls
- */
-#define cond_syscall(x) \
- asmlinkage long x (void) __attribute__((weak,alias("sys_ni_syscall")))
+#define __ARCH_WANT_SYS_FORK
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_CLONE
#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
-
#endif /* _ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h
new file mode 100644
index 00000000000..7422a999a39
--- /dev/null
+++ b/arch/powerpc/include/asm/uprobes.h
@@ -0,0 +1,48 @@
+#ifndef _ASM_UPROBES_H
+#define _ASM_UPROBES_H
+/*
+ * User-space Probes (UProbes) for powerpc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2007-2012
+ *
+ * Adapted from the x86 port by Ananth N Mavinakayanahalli <ananth@in.ibm.com>
+ */
+
+#include <linux/notifier.h>
+#include <asm/probes.h>
+
+typedef ppc_opcode_t uprobe_opcode_t;
+
+#define MAX_UINSN_BYTES 4
+#define UPROBE_XOL_SLOT_BYTES (MAX_UINSN_BYTES)
+
+/* The following alias is needed for reference from arch-agnostic code */
+#define UPROBE_SWBP_INSN BREAKPOINT_INSTRUCTION
+#define UPROBE_SWBP_INSN_SIZE 4 /* swbp insn size in bytes */
+
+struct arch_uprobe {
+ union {
+ u32 insn;
+ u32 ixol;
+ };
+};
+
+struct arch_uprobe_task {
+ unsigned long saved_trap_nr;
+};
+
+#endif /* _ASM_UPROBES_H */
diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h
index dc0419b66f1..c53f5f6d176 100644
--- a/arch/powerpc/include/asm/vdso.h
+++ b/arch/powerpc/include/asm/vdso.h
@@ -4,11 +4,11 @@
#ifdef __KERNEL__
/* Default link addresses for the vDSOs */
-#define VDSO32_LBASE 0x100000
-#define VDSO64_LBASE 0x100000
+#define VDSO32_LBASE 0x0
+#define VDSO64_LBASE 0x0
/* Default map addresses for 32bit vDSO */
-#define VDSO32_MBASE VDSO32_LBASE
+#define VDSO32_MBASE 0x100000
#define VDSO_VERSION_STRING LINUX_2.6.15
@@ -22,6 +22,8 @@ extern unsigned long vdso64_rt_sigtramp;
extern unsigned long vdso32_sigtramp;
extern unsigned long vdso32_rt_sigtramp;
+int vdso_getcpu_init(void);
+
#else /* __ASSEMBLY__ */
#ifdef __VDSO64__
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
index 08679c5319b..b73a8199f16 100644
--- a/arch/powerpc/include/asm/vdso_datapage.h
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -57,7 +57,7 @@ struct vdso_data {
} version;
/* Note about the platform flags: it now only contains the lpar
- * bit. The actual platform number is dead and burried
+ * bit. The actual platform number is dead and buried
*/
__u32 platform; /* Platform flags 0x18 */
__u32 processor; /* Processor type 0x1C */
@@ -116,9 +116,7 @@ struct vdso_data {
#endif /* CONFIG_PPC64 */
-#ifdef __KERNEL__
extern struct vdso_data *vdso_data;
-#endif
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h
index 0a290a19594..4f9b7ca0710 100644
--- a/arch/powerpc/include/asm/vio.h
+++ b/arch/powerpc/include/asm/vio.h
@@ -15,7 +15,6 @@
#define _ASM_POWERPC_VIO_H
#ifdef __KERNEL__
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
@@ -44,8 +43,52 @@
*/
#define VIO_CMO_MIN_ENT 1562624
+extern struct bus_type vio_bus_type;
+
struct iommu_table;
+/*
+ * Platform Facilities Option (PFO)-specific data
+ */
+
+/* Starting unit address for PFO devices on the VIO BUS */
+#define VIO_BASE_PFO_UA 0x50000000
+
+/**
+ * vio_pfo_op - PFO operation parameters
+ *
+ * @flags: h_call subfunctions and modifiers
+ * @in: Input data block logical real address
+ * @inlen: If non-negative, the length of the input data block. If negative,
+ * the length of the input data descriptor list in bytes.
+ * @out: Output data block logical real address
+ * @outlen: If non-negative, the length of the input data block. If negative,
+ * the length of the input data descriptor list in bytes.
+ * @csbcpb: Logical real address of the 4k naturally-aligned storage block
+ * containing the CSB & optional FC field specific CPB
+ * @timeout: # of milliseconds to retry h_call, 0 for no timeout.
+ * @hcall_err: pointer to return the h_call return value, else NULL
+ */
+struct vio_pfo_op {
+ u64 flags;
+ s64 in;
+ s64 inlen;
+ s64 out;
+ s64 outlen;
+ u64 csbcpb;
+ void *done;
+ unsigned long handle;
+ unsigned int timeout;
+ long hcall_err;
+};
+
+/* End PFO specific data */
+
+enum vio_dev_family {
+ VDEVICE, /* The OF node is a child of /vdevice */
+ PFO, /* The OF node is a child of /ibm,platform-facilities */
+};
+
/**
* vio_dev - This structure is used to describe virtual I/O devices.
*
@@ -58,6 +101,7 @@ struct vio_dev {
const char *name;
const char *type;
uint32_t unit_address;
+ uint32_t resource_id;
unsigned int irq;
struct {
size_t desired;
@@ -65,10 +109,12 @@ struct vio_dev {
size_t allocated;
atomic_t allocs_failed;
} cmo;
+ enum vio_dev_family family;
struct device dev;
};
struct vio_driver {
+ const char *name;
const struct vio_device_id *id_table;
int (*probe)(struct vio_dev *dev, const struct vio_device_id *id);
int (*remove)(struct vio_dev *dev);
@@ -76,16 +122,25 @@ struct vio_driver {
* be loaded in a CMO environment if it uses DMA.
*/
unsigned long (*get_desired_dma)(struct vio_dev *dev);
+ const struct dev_pm_ops *pm;
struct device_driver driver;
};
-extern int vio_register_driver(struct vio_driver *drv);
+extern int __vio_register_driver(struct vio_driver *drv, struct module *owner,
+ const char *mod_name);
+/*
+ * vio_register_driver must be a macro so that KBUILD_MODNAME can be expanded
+ */
+#define vio_register_driver(driver) \
+ __vio_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
extern void vio_unregister_driver(struct vio_driver *drv);
extern int vio_cmo_entitlement_update(size_t);
extern void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired);
-extern void __devinit vio_unregister_device(struct vio_dev *dev);
+extern void vio_unregister_device(struct vio_dev *dev);
+
+extern int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op);
struct device_node;
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
new file mode 100644
index 00000000000..9a5c928bb3c
--- /dev/null
+++ b/arch/powerpc/include/asm/word-at-a-time.h
@@ -0,0 +1,119 @@
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+/*
+ * Word-at-a-time interfaces for PowerPC.
+ */
+
+#include <linux/kernel.h>
+#include <asm/asm-compat.h>
+
+#ifdef __BIG_ENDIAN__
+
+struct word_at_a_time {
+ const unsigned long high_bits, low_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) }
+
+/* Bit set in the bytes that have a zero */
+static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c)
+{
+ unsigned long mask = (val & c->low_bits) + c->low_bits;
+ return ~(mask | rhs);
+}
+
+#define create_zero_mask(mask) (mask)
+
+static inline long find_zero(unsigned long mask)
+{
+ long leading_zero_bits;
+
+ asm (PPC_CNTLZL "%0,%1" : "=r" (leading_zero_bits) : "r" (mask));
+ return leading_zero_bits >> 3;
+}
+
+static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+{
+ unsigned long rhs = val | c->low_bits;
+ *data = rhs;
+ return (val + c->high_bits) & ~rhs;
+}
+
+#else
+
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+#ifdef CONFIG_64BIT
+
+/* Alan Modra's little-endian strlen tail for 64-bit */
+#define create_zero_mask(mask) (mask)
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ unsigned long leading_zero_bits;
+ long trailing_zero_bit_mask;
+
+ asm ("addi %1,%2,-1\n\t"
+ "andc %1,%1,%2\n\t"
+ "popcntd %0,%1"
+ : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask)
+ : "r" (mask));
+ return leading_zero_bits >> 3;
+}
+
+#else /* 32-bit case */
+
+/*
+ * This is largely generic for little-endian machines, but the
+ * optimal byte mask counting is probably going to be something
+ * that is architecture-specific. If you have a reliably fast
+ * bit count instruction, that might be better than the multiply
+ * and shift, for example.
+ */
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+ /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+ long a = (0x0ff0001+mask) >> 23;
+ /* Fix the 1 for 00 case */
+ return a & mask;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return count_masked_bytes(mask);
+}
+
+#endif
+
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+#endif
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
new file mode 100644
index 00000000000..282d43a0c85
--- /dev/null
+++ b/arch/powerpc/include/asm/xics.h
@@ -0,0 +1,162 @@
+/*
+ * Common definitions accross all variants of ICP and ICS interrupt
+ * controllers.
+ */
+
+#ifndef _XICS_H
+#define _XICS_H
+
+#include <linux/interrupt.h>
+
+#define XICS_IPI 2
+#define XICS_IRQ_SPURIOUS 0
+
+/* Want a priority other than 0. Various HW issues require this. */
+#define DEFAULT_PRIORITY 5
+
+/*
+ * Mark IPIs as higher priority so we can take them inside interrupts
+ * FIXME: still true now?
+ */
+#define IPI_PRIORITY 4
+
+/* The least favored priority */
+#define LOWEST_PRIORITY 0xFF
+
+/* The number of priorities defined above */
+#define MAX_NUM_PRIORITIES 3
+
+/* Native ICP */
+#ifdef CONFIG_PPC_ICP_NATIVE
+extern int icp_native_init(void);
+#else
+static inline int icp_native_init(void) { return -ENODEV; }
+#endif
+
+/* PAPR ICP */
+#ifdef CONFIG_PPC_ICP_HV
+extern int icp_hv_init(void);
+#else
+static inline int icp_hv_init(void) { return -ENODEV; }
+#endif
+
+/* ICP ops */
+struct icp_ops {
+ unsigned int (*get_irq)(void);
+ void (*eoi)(struct irq_data *d);
+ void (*set_priority)(unsigned char prio);
+ void (*teardown_cpu)(void);
+ void (*flush_ipi)(void);
+#ifdef CONFIG_SMP
+ void (*cause_ipi)(int cpu, unsigned long data);
+ irq_handler_t ipi_action;
+#endif
+};
+
+extern const struct icp_ops *icp_ops;
+
+/* Native ICS */
+extern int ics_native_init(void);
+
+/* RTAS ICS */
+#ifdef CONFIG_PPC_ICS_RTAS
+extern int ics_rtas_init(void);
+#else
+static inline int ics_rtas_init(void) { return -ENODEV; }
+#endif
+
+/* HAL ICS */
+#ifdef CONFIG_PPC_POWERNV
+extern int ics_opal_init(void);
+#else
+static inline int ics_opal_init(void) { return -ENODEV; }
+#endif
+
+/* ICS instance, hooked up to chip_data of an irq */
+struct ics {
+ struct list_head link;
+ int (*map)(struct ics *ics, unsigned int virq);
+ void (*mask_unknown)(struct ics *ics, unsigned long vec);
+ long (*get_server)(struct ics *ics, unsigned long vec);
+ int (*host_match)(struct ics *ics, struct device_node *node);
+ char data[];
+};
+
+/* Commons */
+extern unsigned int xics_default_server;
+extern unsigned int xics_default_distrib_server;
+extern unsigned int xics_interrupt_server_size;
+extern struct irq_domain *xics_host;
+
+struct xics_cppr {
+ unsigned char stack[MAX_NUM_PRIORITIES];
+ int index;
+};
+
+DECLARE_PER_CPU(struct xics_cppr, xics_cppr);
+
+static inline void xics_push_cppr(unsigned int vec)
+{
+ struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
+
+ if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1))
+ return;
+
+ if (vec == XICS_IPI)
+ os_cppr->stack[++os_cppr->index] = IPI_PRIORITY;
+ else
+ os_cppr->stack[++os_cppr->index] = DEFAULT_PRIORITY;
+}
+
+static inline unsigned char xics_pop_cppr(void)
+{
+ struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
+
+ if (WARN_ON(os_cppr->index < 1))
+ return LOWEST_PRIORITY;
+
+ return os_cppr->stack[--os_cppr->index];
+}
+
+static inline void xics_set_base_cppr(unsigned char cppr)
+{
+ struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
+
+ /* we only really want to set the priority when there's
+ * just one cppr value on the stack
+ */
+ WARN_ON(os_cppr->index != 0);
+
+ os_cppr->stack[0] = cppr;
+}
+
+static inline unsigned char xics_cppr_top(void)
+{
+ struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
+
+ return os_cppr->stack[os_cppr->index];
+}
+
+DECLARE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message);
+
+extern void xics_init(void);
+extern void xics_setup_cpu(void);
+extern void xics_update_irq_servers(void);
+extern void xics_set_cpu_giq(unsigned int gserver, unsigned int join);
+extern void xics_mask_unknown_vec(unsigned int vec);
+extern irqreturn_t xics_ipi_dispatch(int cpu);
+extern int xics_smp_probe(void);
+extern void xics_register_ics(struct ics *ics);
+extern void xics_teardown_cpu(void);
+extern void xics_kexec_teardown_cpu(int secondary);
+extern void xics_migrate_irqs_away(void);
+extern void icp_native_eoi(struct irq_data *d);
+#ifdef CONFIG_SMP
+extern int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
+ unsigned int strict_check);
+#else
+#define xics_get_irq_server(virq, cpumask, strict_check) (xics_default_server)
+#endif
+
+
+#endif /* _XICS_H */
diff --git a/arch/powerpc/include/asm/xor.h b/arch/powerpc/include/asm/xor.h
index c82eb12a5b1..0abb97f3be1 100644
--- a/arch/powerpc/include/asm/xor.h
+++ b/arch/powerpc/include/asm/xor.h
@@ -1 +1,68 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2012
+ *
+ * Author: Anton Blanchard <anton@au.ibm.com>
+ */
+#ifndef _ASM_POWERPC_XOR_H
+#define _ASM_POWERPC_XOR_H
+
+#ifdef CONFIG_ALTIVEC
+
+#include <asm/cputable.h>
+
+void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in);
+void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in, unsigned long *v3_in);
+void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in, unsigned long *v3_in,
+ unsigned long *v4_in);
+void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in, unsigned long *v3_in,
+ unsigned long *v4_in, unsigned long *v5_in);
+
+static struct xor_block_template xor_block_altivec = {
+ .name = "altivec",
+ .do_2 = xor_altivec_2,
+ .do_3 = xor_altivec_3,
+ .do_4 = xor_altivec_4,
+ .do_5 = xor_altivec_5,
+};
+
+#define XOR_SPEED_ALTIVEC() \
+ do { \
+ if (cpu_has_feature(CPU_FTR_ALTIVEC)) \
+ xor_speed(&xor_block_altivec); \
+ } while (0)
+#else
+#define XOR_SPEED_ALTIVEC()
+#endif
+
+/* Also try the generic routines. */
#include <asm-generic/xor.h>
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES \
+do { \
+ xor_speed(&xor_block_8regs); \
+ xor_speed(&xor_block_8regs_p); \
+ xor_speed(&xor_block_32regs); \
+ xor_speed(&xor_block_32regs_p); \
+ XOR_SPEED_ALTIVEC(); \
+} while (0)
+
+#endif /* _ASM_POWERPC_XOR_H */