diff options
Diffstat (limited to 'arch/powerpc/include/asm')
107 files changed, 2919 insertions, 1968 deletions
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index 704e6f10ae8..3fb1bc432f4 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -1,5 +1,8 @@  generic-y += clkdev.h +generic-y += hash.h +generic-y += mcs_spinlock.h +generic-y += preempt.h  generic-y += rwsem.h  generic-y += trace_clock.h -generic-y += vtime.h
\ No newline at end of file +generic-y += vtime.h diff --git a/arch/powerpc/include/asm/apm82181-adma.h b/arch/powerpc/include/asm/apm82181-adma.h new file mode 100644 index 00000000000..8d36b517fbf --- /dev/null +++ b/arch/powerpc/include/asm/apm82181-adma.h @@ -0,0 +1,311 @@ +/* + * 2009-2010 (C) Applied Micro Circuits Corporation. + * + * Author: Tai Tri Nguyen<ttnguyen@appliedmicro.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2.  This program is licensed "as is" without any warranty of + * any kind, whether express or implied. + */ + +#ifndef APM82181_ADMA_H +#define APM82181_ADMA_H + + +#include <linux/types.h> + + +#define to_apm82181_adma_chan(chan) container_of(chan,apm82181_ch_t,common) +#define to_apm82181_adma_device(dev) container_of(dev,apm82181_dev_t,common) +#define tx_to_apm82181_adma_slot(tx) container_of(tx,apm82181_desc_t,async_tx) + +#define APM82181_DMA_PROC_ROOT "driver/apm82181_adma" + +/* Number of operands supported in the h/w */ +#define XOR_MAX_OPS             16 +/* this is the XOR_CBBCR width */ +#define APM82181_ADMA_XOR_MAX_BYTE_COUNT       (1 << 31) +#define APM82181_ADMA_DMA_MAX_BYTE_COUNT 1024 * 1024 +#define MAX_APM82181_DMA_CHANNELS 5 +#define APM82181_ADMA_THRESHOLD 1 + +#define APM82181_PDMA0_ID 0 +#define APM82181_PDMA1_ID 1 +#define APM82181_PDMA2_ID 2 +#define APM82181_PDMA3_ID 3 +#define APM82181_XOR_ID	  4  + +/* DMA 0/1/2/3 registers */ +#define DCR_DMAx_BASE(x)	(0x200 + x*0x8)			/* DMA DCR base */ +#define DCR_DMA2P40_CRx(x)     	(DCR_DMAx_BASE(x) + 0x0)   /* DMA Channel Control */ +#define DMA_CR_CE		(1 << 31) +#define DMA_CR_CIE		(1 << 30) +#define DMA_CR_PL		(1 << 28) +#define DMA_CR_PW_128		0x08000000 +#define DMA_CR_DAI		0x01000000 +#define DMA_CR_SAI		0x00800000 +#define DMA_CR_BEN		0x00400000 +#define DMA_CR_TM_S_MM		0x00300000 +#define DMA_CR_ETD		0x00000100 +#define DMA_CR_TCE		0x00000080 +#define DMA_CR_CP(x)		(x<<5)& 0x00000060 +#define DMA_CR_DEC		(1 << 2) +#define DMA_CR_SL		(1 << 1) +#define DCR_DMA2P40_CTCx(x)    	(DCR_DMAx_BASE(x) + 0x1) /* DMA Count 0 */ +#define DMA_CTC_ETIE		(1 << 28) +#define DMA_CTC_EIE		(1 << 27) +#define DMA_CTC_PCE		(1 << 20) +#define DMA_CTC_TC_MASK		0x000fffff +#define DCR_DMA2P40_SAHx(x)    	(DCR_DMAx_BASE(x) + 0x2) /* DMA Src Addr High 0 */ +#define DCR_DMA2P40_SALx(x)    	(DCR_DMAx_BASE(x) + 0x3) /* DMA Src Addr Low 0 */ +#define DCR_DMA2P40_DAHx(x)    	(DCR_DMAx_BASE(x) + 0x4) /* DMA Dest Addr High 0 */ +#define DCR_DMA2P40_DALx(x)    	(DCR_DMAx_BASE(x) + 0x5) /* DMA Dest Addr Low 0 */ +#define DCR_DMA2P40_SGHx(x)    	(DCR_DMAx_BASE(x) + 0x6) /* DMA SG Desc Addr High 0 */ +#define DCR_DMA2P40_SGLx(x)    	(DCR_DMAx_BASE(x) + 0x7) /* DMA SG Desc Addr Low 0 */ +/* DMA Status Register */ +#define DCR_DMA2P40_SR      	0x220  +#define DMA_SR_CS(x)		(1 << (31 -x)) +#define DMA_SR_TS(x)		(1 << (27 -x)) +#define DMA_SR_RI(x)		(1 << (23 -x)) +#define DMA_SR_IR(x)		(1 << (19 -x)) +#define DMA_SR_ER(x)		(1 << (15 -x)) +#define DMA_SR_CB(x)		(1 << (11 -x)) +#define DMA_SR_SG(x)		(1 << (7 -x)) +/* S/G registers */ +#define DCR_DMA2P40_SGC		0x223 +#define DMA_SGC_SSG(x)		( 1 << (31 - x)) +#define DMA_SGC_SGL(x,y)	( y << (27 - x)) /* x: channel; y: 0 PLB, 1 OPB*/ +#define DMA_SGC_EM(x)		( 1 << (15 - x)) +#define DMA_SGC_EM_ALL		0x0000F000 + +/* + * XOR Command Block Control Register bits + */ +#define XOR_CBCR_LNK_BIT        (1<<31) /* link present */ +#define XOR_CBCR_TGT_BIT        (1<<30) /* target present */ +#define XOR_CBCR_CBCE_BIT       (1<<29) /* command block compete enable */ +#define XOR_CBCR_RNZE_BIT       (1<<28) /* result not zero enable */ +#define XOR_CBCR_XNOR_BIT       (1<<15) /* XOR/XNOR */ +#define XOR_CDCR_OAC_MSK        (0x7F)  /* operand address count */ + +/* + * XORCore Status Register bits + */ +#define XOR_SR_XCP_BIT          (1<<31) /* core processing */ +#define XOR_SR_ICB_BIT          (1<<17) /* invalid CB */ +#define XOR_SR_IC_BIT           (1<<16) /* invalid command */ +#define XOR_SR_IPE_BIT          (1<<15) /* internal parity error */ +#define XOR_SR_RNZ_BIT          (1<<2)  /* result not Zero */ +#define XOR_SR_CBC_BIT          (1<<1)  /* CB complete */ +#define XOR_SR_CBLC_BIT         (1<<0)  /* CB list complete */ + +/* + * XORCore Control Set and Reset Register bits + */ +#define XOR_CRSR_XASR_BIT       (1<<31) /* soft reset */ +#define XOR_CRSR_XAE_BIT        (1<<30) /* enable */ +#define XOR_CRSR_RCBE_BIT       (1<<29) /* refetch CB enable */ +#define XOR_CRSR_PAUS_BIT       (1<<28) /* pause */ +#define XOR_CRSR_64BA_BIT       (1<<27) /* 64/32 CB format */ +#define XOR_CRSR_CLP_BIT        (1<<25) /* continue list processing */ + +/* + * XORCore Interrupt Enable Register + */ +#define XOR_IE_ICBIE_BIT        (1<<17) /* Invalid Command Block Interrupt Enable */ +#define XOR_IE_ICIE_BIT         (1<<16) /* Invalid Command Interrupt Enable */ +#define XOR_IE_RPTIE_BIT        (1<<14) /* Read PLB Timeout Error Interrupt Enable */ +#define XOR_IE_CBCIE_BIT        (1<<1)  /* CB complete interrupt enable */ +#define XOR_IE_CBLCI_BIT        (1<<0)  /* CB list complete interrupt enable */ + +typedef struct apm82181_plb_dma4_device { +        struct resource reg;    /* Resource for register */ +        void __iomem *reg_base; +        struct platform_device *ofdev; +        struct device *dev; +} apm82181_plb_dma_t; + +/** + * struct apm82181_dma_device - internal representation of an DMA device + * @id: HW DMA Device selector + * @ofdev: OF device + * @dcr_base: dcr base of HW PLB DMA channels + * @reg_base: base of ADMA XOR channel + * @dma_desc_pool: base of DMA descriptor region (DMA address) + * @dma_desc_pool_virt: base of DMA descriptor region (CPU address) + * @pool_size: memory pool size for the channel device + * @common: embedded struct dma_device + * @cap_mask: capabilities of ADMA channels + */ +typedef struct apm82181_plb_dma_device { +	int id; +	struct platform_device *ofdev; +	u32 dcr_base; +	void __iomem *xor_base; +	struct device *dev; +	struct dma_device common; +	struct apm82181_plb_dma4_device *pdma; +	void *dma_desc_pool_virt; +	u32 pool_size; +	dma_addr_t dma_desc_pool; +	dma_cap_mask_t cap_mask; +} apm82181_dev_t; + +/** + * struct apm82181_dma_chan - internal representation of an ADMA channel + * @lock: serializes enqueue/dequeue operations to the slot pool + * @device: parent device + * @chain: device chain view of the descriptors + * @common: common dmaengine channel object members + * @all_slots: complete domain of slots usable by the channel + * @reg: Resource for register  + * @pending: allows batching of hardware operations + * @completed_cookie: identifier for the most recently completed operation + * @slots_allocated: records the actual size of the descriptor slot pool + * @hw_chain_inited: h/w descriptor chain initialization flag + * @irq_tasklet: bottom half where apm82181_adma_slot_cleanup runs + * @needs_unmap: if buffers should not be unmapped upon final processing + */ +typedef struct apm82181_plb_dma_chan { +	spinlock_t lock; +	struct apm82181_plb_dma_device *device; +	struct timer_list cleanup_watchdog; +	struct list_head chain; +	struct dma_chan common; +	struct list_head all_slots; +	struct apm82181_adma_plb_desc_slot *last_used; +	int pending; +	dma_cookie_t completed_cookie; +	int slots_allocated; +	int hw_chain_inited; +        struct tasklet_struct irq_tasklet; +	u8 needs_unmap; +	phys_addr_t current_cdb_addr;	 +} apm82181_ch_t; + +typedef struct apm82181_adma_plb_desc_slot { +	dma_addr_t phys; +	struct apm82181_adma_plb_desc_slot *group_head; +	struct apm82181_adma_plb_desc_slot *hw_next; +	struct dma_async_tx_descriptor async_tx; +	struct list_head slot_node; +	struct list_head chain_node; +	struct list_head group_list; +	unsigned int unmap_len; +	void *hw_desc; +	u16 stride; +	u16 idx; +	u16 slot_cnt; +	u8 src_cnt; +	u8 dst_cnt; +	u8 slots_per_op; +	u8 descs_per_op; +	unsigned long flags; +	unsigned long reverse_flags[8]; +#define APM82181_DESC_INT       0       /* generate interrupt on complete */ +#define APM82181_DESC_FENCE     1       /* Other tx will use its result */ +					/* This tx needs to be polled to complete */ + +}apm82181_desc_t; + +typedef struct { +	u32 ce:1; +	u32 cie:1; +	u32 td:1; +	u32 pl:1; +	u32 pw:3; +	u32 dai:1; +	u32 sai:1; +	u32 ben:1; +	u32 tm:2; +	u32 psc:2; +	u32 pwc:6; +	u32 phc:3; +	u32 etd:1; +	u32 tce:1; +	u32 cp:2; +	u32 pf:2; +	u32 dec:1; +	u32 sl:1; +	u32 reserved:1; +} __attribute__((packed)) dma_cdb_ctrl_t; + +typedef struct { +	u32 link:1; +	u32 sgl:1; +	u32 tcie:1; +	u32 etie:1; +	u32 eie:1; +	u32 sid:3; +	u32 bten:1; +	u32 bsiz:2; +	u32 pce:1; +	u32 tc:20; +} __attribute__((packed)) dma_cdb_count_t; +/* scatter/gather descriptor struct */ +typedef struct dma_cdb { +	dma_cdb_ctrl_t ctrl; +	dma_cdb_count_t cnt; +	u32 src_hi; +	u32 src_lo; +	u32 dest_hi; +	u32 dest_lo; +	u32 sg_hi; +	u32 sg_lo; +}dma_cdb_t; + +typedef struct { +        uint32_t control; +        phys_addr_t src_addr; +        phys_addr_t dst_addr; +        uint32_t control_count; +        uint32_t next; +} ppc_sgl_t; + +/* + * XOR Accelerator engine Command Block Type + */ +typedef struct { +        /* +         * Basic 64-bit format XOR CB +         */ +        u32     cbc;            /* control */ +        u32     cbbc;           /* byte count */ +        u32     cbs;            /* status */ +        u8      pad0[4];        /* reserved */ +        u32     cbtah;          /* target address high */ +        u32     cbtal;          /* target address low */ +        u32     cblah;          /* link address high */ +        u32     cblal;          /* link address low */ +        struct { +                u32 h; +                u32 l; +        } __attribute__ ((packed)) ops [16]; +} __attribute__ ((packed)) xor_cb_t; + +/* + * XOR hardware registers + */ +typedef struct { +        u32     op_ar[16][2];   /* operand address[0]-high,[1]-low registers */ +        u8      pad0[352];      /* reserved */ +        u32     cbcr;           /* CB control register */ +        u32     cbbcr;          /* CB byte count register */ +        u32     cbsr;           /* CB status register */ +        u8      pad1[4];        /* reserved */ +        u32     cbtahr;         /* operand target address high register */ +        u32     cbtalr;         /* operand target address low register */ +        u32     cblahr;         /* CB link address high register */ +        u32     cblalr;         /* CB link address low register */ +        u32     crsr;           /* control set register */ +        u32     crrr;           /* control reset register */ +        u32     ccbahr;         /* current CB address high register */ +        u32     ccbalr;         /* current CB address low register */ +        u32     plbr;           /* PLB configuration register */ +        u32     ier;            /* interrupt enable register */ +        u32     pecr;           /* parity error count register */ +        u32     sr;             /* status register */ +        u32     revidr;         /* revision ID register */ +} xor_regs_t; + +#endif diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h new file mode 100644 index 00000000000..bde53110363 --- /dev/null +++ b/arch/powerpc/include/asm/archrandom.h @@ -0,0 +1,50 @@ +#ifndef _ASM_POWERPC_ARCHRANDOM_H +#define _ASM_POWERPC_ARCHRANDOM_H + +#ifdef CONFIG_ARCH_RANDOM + +#include <asm/machdep.h> + +static inline int arch_get_random_long(unsigned long *v) +{ +	if (ppc_md.get_random_long) +		return ppc_md.get_random_long(v); + +	return 0; +} + +static inline int arch_get_random_int(unsigned int *v) +{ +	unsigned long val; +	int rc; + +	rc = arch_get_random_long(&val); +	if (rc) +		*v = val; + +	return rc; +} + +static inline int arch_has_random(void) +{ +	return !!ppc_md.get_random_long; +} + +int powernv_get_random_long(unsigned long *v); + +static inline int arch_get_random_seed_long(unsigned long *v) +{ +	return 0; +} +static inline int arch_get_random_seed_int(unsigned int *v) +{ +	return 0; +} +static inline int arch_has_random_seed(void) +{ +	return 0; +} + +#endif /* CONFIG_ARCH_RANDOM */ + +#endif /* _ASM_POWERPC_ARCHRANDOM_H */ diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index e3b1d41c89b..28992d01292 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -8,6 +8,7 @@  #ifdef __KERNEL__  #include <linux/types.h>  #include <asm/cmpxchg.h> +#include <asm/barrier.h>  #define ATOMIC_INIT(i)		{ (i) } @@ -270,11 +271,6 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)  }  #define atomic_dec_if_positive atomic_dec_if_positive -#define smp_mb__before_atomic_dec()     smp_mb() -#define smp_mb__after_atomic_dec()      smp_mb() -#define smp_mb__before_atomic_inc()     smp_mb() -#define smp_mb__after_atomic_inc()      smp_mb() -  #ifdef __powerpc64__  #define ATOMIC64_INIT(i)	{ (i) } diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index ae782254e73..bab79a110c7 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -45,11 +45,15 @@  #    define SMPWMB      eieio  #endif +#define __lwsync()	__asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") +  #define smp_mb()	mb() -#define smp_rmb()	__asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") +#define smp_rmb()	__lwsync()  #define smp_wmb()	__asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")  #define smp_read_barrier_depends()	read_barrier_depends()  #else +#define __lwsync()	barrier() +  #define smp_mb()	barrier()  #define smp_rmb()	barrier()  #define smp_wmb()	barrier() @@ -65,4 +69,22 @@  #define data_barrier(x)	\  	asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); +#define smp_store_release(p, v)						\ +do {									\ +	compiletime_assert_atomic_type(*p);				\ +	__lwsync();							\ +	ACCESS_ONCE(*p) = (v);						\ +} while (0) + +#define smp_load_acquire(p)						\ +({									\ +	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\ +	compiletime_assert_atomic_type(*p);				\ +	__lwsync();							\ +	___p1;								\ +}) + +#define smp_mb__before_atomic()     smp_mb() +#define smp_mb__after_atomic()      smp_mb() +  #endif /* _ASM_POWERPC_BARRIER_H */ diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index 910194e9a1e..bd3bd573d0a 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -46,11 +46,12 @@  #include <asm/asm-compat.h>  #include <asm/synch.h> -/* - * clear_bit doesn't imply a memory barrier - */ -#define smp_mb__before_clear_bit()	smp_mb() -#define smp_mb__after_clear_bit()	smp_mb() +/* PPC bit number conversion */ +#define PPC_BITLSHIFT(be)	(BITS_PER_LONG - 1 - (be)) +#define PPC_BIT(bit)		(1UL << PPC_BITLSHIFT(bit)) +#define PPC_BITMASK(bs, be)	((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs)) + +#include <asm/barrier.h>  /* Macro for generating the ***_bits() functions */  #define DEFINE_BITOP(fn, op, prefix)		\ diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index 9e495c9a6a8..ed0afc1e44a 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h @@ -41,8 +41,20 @@ struct ppc64_caches {  extern struct ppc64_caches ppc64_caches;  #endif /* __powerpc64__ && ! __ASSEMBLY__ */ -#if !defined(__ASSEMBLY__) +#if defined(__ASSEMBLY__) +/* + * For a snooping icache, we still need a dummy icbi to purge all the + * prefetched instructions from the ifetch buffers. We also need a sync + * before the icbi to order the the actual stores to memory that might + * have modified instructions with the icbi. + */ +#define PURGE_PREFETCHED_INS	\ +	sync;			\ +	icbi	0,r3;		\ +	sync;			\ +	isync +#else  #define __read_mostly __attribute__((__section__(".data..read_mostly")))  #ifdef CONFIG_6xx diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h index ce0c28495f9..8251a3ba870 100644 --- a/arch/powerpc/include/asm/checksum.h +++ b/arch/powerpc/include/asm/checksum.h @@ -14,6 +14,9 @@   * which always checksum on 4 octet boundaries.  ihl is the number   * of 32-bit words and is always >= 5.   */ +#ifdef CONFIG_GENERIC_CSUM +#include <asm-generic/checksum.h> +#else  extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);  /* @@ -123,5 +126,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,  	return sum;  #endif  } + +#endif  #endif /* __KERNEL__ */  #endif diff --git a/arch/powerpc/include/asm/clk_interface.h b/arch/powerpc/include/asm/clk_interface.h deleted file mode 100644 index ab1882c1e17..00000000000 --- a/arch/powerpc/include/asm/clk_interface.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef __ASM_POWERPC_CLK_INTERFACE_H -#define __ASM_POWERPC_CLK_INTERFACE_H - -#include <linux/clk.h> - -struct clk_interface { -	struct clk*	(*clk_get)	(struct device *dev, const char *id); -	int		(*clk_enable)	(struct clk *clk); -	void		(*clk_disable)	(struct clk *clk); -	unsigned long	(*clk_get_rate)	(struct clk *clk); -	void		(*clk_put)	(struct clk *clk); -	long		(*clk_round_rate) (struct clk *clk, unsigned long rate); -	int 		(*clk_set_rate)	(struct clk *clk, unsigned long rate); -	int		(*clk_set_parent) (struct clk *clk, struct clk *parent); -	struct clk*	(*clk_get_parent) (struct clk *clk); -}; - -extern struct clk_interface clk_functions; - -#endif /* __ASM_POWERPC_CLK_INTERFACE_H */ diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h index e245aab7f19..d463c68fe7f 100644 --- a/arch/powerpc/include/asm/cmpxchg.h +++ b/arch/powerpc/include/asm/cmpxchg.h @@ -300,6 +300,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,  	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\  	cmpxchg_local((ptr), (o), (n));					\    }) +#define cmpxchg64_relaxed	cmpxchg64_local  #else  #include <asm-generic/cmpxchg-local.h>  #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index a6f8c7a5cbb..840a5509b3f 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -34,19 +34,69 @@ int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr);  unsigned long branch_target(const unsigned int *instr);  unsigned int translate_branch(const unsigned int *dest,  			      const unsigned int *src); +#ifdef CONFIG_PPC_BOOK3E_64 +void __patch_exception(int exc, unsigned long addr); +#define patch_exception(exc, name) do { \ +	extern unsigned int name; \ +	__patch_exception((exc), (unsigned long)&name); \ +} while (0) +#endif + +#define OP_RT_RA_MASK	0xffff0000UL +#define LIS_R2		0x3c020000UL +#define ADDIS_R2_R12	0x3c4c0000UL +#define ADDI_R2_R2	0x38420000UL  static inline unsigned long ppc_function_entry(void *func)  { -#ifdef CONFIG_PPC64 +#if defined(CONFIG_PPC64) +#if defined(_CALL_ELF) && _CALL_ELF == 2 +	u32 *insn = func; +  	/* -	 * On PPC64 the function pointer actually points to the function's -	 * descriptor. The first entry in the descriptor is the address -	 * of the function text. +	 * A PPC64 ABIv2 function may have a local and a global entry +	 * point. We need to use the local entry point when patching +	 * functions, so identify and step over the global entry point +	 * sequence. +	 * +	 * The global entry point sequence is always of the form: +	 * +	 * addis r2,r12,XXXX +	 * addi  r2,r2,XXXX +	 * +	 * A linker optimisation may convert the addis to lis: +	 * +	 * lis   r2,XXXX +	 * addi  r2,r2,XXXX +	 */ +	if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) || +	     ((*insn & OP_RT_RA_MASK) == LIS_R2)) && +	    ((*(insn+1) & OP_RT_RA_MASK) == ADDI_R2_R2)) +		return (unsigned long)(insn + 2); +	else +		return (unsigned long)func; +#else +	/* +	 * On PPC64 ABIv1 the function pointer actually points to the +	 * function's descriptor. The first entry in the descriptor is the +	 * address of the function text.  	 */  	return ((func_descr_t *)func)->entry; +#endif  #else  	return (unsigned long)func;  #endif  } +static inline unsigned long ppc_global_function_entry(void *func) +{ +#if defined(CONFIG_PPC64) && defined(_CALL_ELF) && _CALL_ELF == 2 +	/* PPC64 ABIv2 the global entry point is at the address */ +	return (unsigned long)func; +#else +	/* All other cases there is no change vs ppc_function_entry() */ +	return ppc_function_entry(func); +#endif +} +  #endif /* _ASM_POWERPC_CODE_PATCHING_H */ diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h index 84fdf6857c3..b142b8e0ed9 100644 --- a/arch/powerpc/include/asm/compat.h +++ b/arch/powerpc/include/asm/compat.h @@ -8,7 +8,11 @@  #include <linux/sched.h>  #define COMPAT_USER_HZ		100 +#ifdef __BIG_ENDIAN__  #define COMPAT_UTS_MACHINE	"ppc\0\0" +#else +#define COMPAT_UTS_MACHINE	"ppcle\0\0" +#endif  typedef u32		compat_size_t;  typedef s32		compat_ssize_t; @@ -200,10 +204,11 @@ static inline void __user *arch_compat_alloc_user_space(long len)  	/*  	 * We can't access below the stack pointer in the 32bit ABI and -	 * can access 288 bytes in the 64bit ABI +	 * can access 288 bytes in the 64bit big-endian ABI, +	 * or 512 bytes with the new ELFv2 little-endian ABI.  	 */  	if (!is_32bit_task()) -		usp -= 288; +		usp -= USER_REDZONE_SIZE;  	return (void __user *) (usp - len);  } diff --git a/arch/powerpc/include/asm/context_tracking.h b/arch/powerpc/include/asm/context_tracking.h index b6f5a33b8ee..40014921fff 100644 --- a/arch/powerpc/include/asm/context_tracking.h +++ b/arch/powerpc/include/asm/context_tracking.h @@ -2,9 +2,9 @@  #define _ASM_POWERPC_CONTEXT_TRACKING_H  #ifdef CONFIG_CONTEXT_TRACKING -#define SCHEDULE_USER bl	.schedule_user +#define SCHEDULE_USER bl	schedule_user  #else -#define SCHEDULE_USER bl	.schedule +#define SCHEDULE_USER bl	schedule  #endif  #endif diff --git a/arch/powerpc/include/asm/cpm2.h b/arch/powerpc/include/asm/cpm2.h index f42e9baf3a4..7c8608b0969 100644 --- a/arch/powerpc/include/asm/cpm2.h +++ b/arch/powerpc/include/asm/cpm2.h @@ -489,7 +489,6 @@ typedef struct scc_trans {  #define FCC_GFMR_TCI		((uint)0x20000000)  #define FCC_GFMR_TRX		((uint)0x10000000)  #define FCC_GFMR_TTX		((uint)0x08000000) -#define FCC_GFMR_TTX		((uint)0x08000000)  #define FCC_GFMR_CDP		((uint)0x04000000)  #define FCC_GFMR_CTSP		((uint)0x02000000)  #define FCC_GFMR_CDS		((uint)0x01000000) diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 0d4939ba48e..0fdd7eece6d 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -90,6 +90,18 @@ struct cpu_spec {  	 * if the error is fatal, 1 if it was fully recovered and 0 to  	 * pass up (not CPU originated) */  	int		(*machine_check)(struct pt_regs *regs); + +	/* +	 * Processor specific early machine check handler which is +	 * called in real mode to handle SLB and TLB errors. +	 */ +	long		(*machine_check_early)(struct pt_regs *regs); + +	/* +	 * Processor specific routine to flush tlbs. +	 */ +	void		(*flush_tlb)(unsigned long inval_selector); +  };  extern struct cpu_spec		*cur_cpu_spec; @@ -177,6 +189,7 @@ extern const char *powerpc_base_platform;  #define	CPU_FTR_HAS_PPR			LONG_ASM_CONST(0x0200000000000000)  #define CPU_FTR_DAWR			LONG_ASM_CONST(0x0400000000000000)  #define CPU_FTR_DABRX			LONG_ASM_CONST(0x0800000000000000) +#define CPU_FTR_PMAO_BUG		LONG_ASM_CONST(0x1000000000000000)  #ifndef __ASSEMBLY__ @@ -433,6 +446,8 @@ extern const char *powerpc_base_platform;  	    CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \  	    CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \  	    CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP) +#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG) +#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)  #define CPU_FTRS_CELL	(CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \  	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \  	    CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ @@ -454,8 +469,8 @@ extern const char *powerpc_base_platform;  #define CPU_FTRS_POSSIBLE	\  	    (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 |	\  	    CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 |	\ -	    CPU_FTRS_POWER7 | CPU_FTRS_POWER8 | CPU_FTRS_CELL |		\ -	    CPU_FTRS_PA6T | CPU_FTR_VSX) +	    CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | CPU_FTRS_POWER8 |	\ +	    CPU_FTRS_CELL | CPU_FTRS_PA6T | CPU_FTR_VSX)  #endif  #else  enum { diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h index ac3eedb9b74..2bf8e9307be 100644 --- a/arch/powerpc/include/asm/cputhreads.h +++ b/arch/powerpc/include/asm/cputhreads.h @@ -18,10 +18,12 @@  #ifdef CONFIG_SMP  extern int threads_per_core; +extern int threads_per_subcore;  extern int threads_shift;  extern cpumask_t threads_core_mask;  #else  #define threads_per_core	1 +#define threads_per_subcore	1  #define threads_shift		0  #define threads_core_mask	(CPU_MASK_CPU0)  #endif @@ -74,6 +76,11 @@ static inline int cpu_thread_in_core(int cpu)  	return cpu & (threads_per_core - 1);  } +static inline int cpu_thread_in_subcore(int cpu) +{ +	return cpu & (threads_per_subcore - 1); +} +  static inline int cpu_first_thread_sibling(int cpu)  {  	return cpu & ~(threads_per_core - 1); diff --git a/arch/powerpc/include/asm/dcr-mmio.h b/arch/powerpc/include/asm/dcr-mmio.h index acd491dbd45..93a68b28e69 100644 --- a/arch/powerpc/include/asm/dcr-mmio.h +++ b/arch/powerpc/include/asm/dcr-mmio.h @@ -51,10 +51,6 @@ static inline void dcr_write_mmio(dcr_host_mmio_t host,  	out_be32(host.token + ((host.base + dcr_n) * host.stride), value);  } -extern u64 of_translate_dcr_address(struct device_node *dev, -				    unsigned int dcr_n, -				    unsigned int *stride); -  #endif /* __KERNEL__ */  #endif /* _ASM_POWERPC_DCR_MMIO_H */ diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h index d2516308ed1..a954e497504 100644 --- a/arch/powerpc/include/asm/debug.h +++ b/arch/powerpc/include/asm/debug.h @@ -46,7 +46,8 @@ static inline int debugger_break_match(struct pt_regs *regs) { return 0; }  static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }  #endif -int set_breakpoint(struct arch_hw_breakpoint *brk); +void set_breakpoint(struct arch_hw_breakpoint *brk); +void __set_breakpoint(struct arch_hw_breakpoint *brk);  #ifdef CONFIG_PPC_ADV_DEBUG_REGS  extern void do_send_trap(struct pt_regs *regs, unsigned long address,  			 unsigned long error_code, int signal_code, int brkpt); diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h index 9b198d1b3b2..6330a61b875 100644 --- a/arch/powerpc/include/asm/disassemble.h +++ b/arch/powerpc/include/asm/disassemble.h @@ -77,4 +77,42 @@ static inline unsigned int get_d(u32 inst)  	return inst & 0xffff;  } +static inline unsigned int get_oc(u32 inst) +{ +	return (inst >> 11) & 0x7fff; +} + +#define IS_XFORM(inst)	(get_op(inst)  == 31) +#define IS_DSFORM(inst)	(get_op(inst) >= 56) + +/* + * Create a DSISR value from the instruction + */ +static inline unsigned make_dsisr(unsigned instr) +{ +	unsigned dsisr; + + +	/* bits  6:15 --> 22:31 */ +	dsisr = (instr & 0x03ff0000) >> 16; + +	if (IS_XFORM(instr)) { +		/* bits 29:30 --> 15:16 */ +		dsisr |= (instr & 0x00000006) << 14; +		/* bit     25 -->    17 */ +		dsisr |= (instr & 0x00000040) << 8; +		/* bits 21:24 --> 18:21 */ +		dsisr |= (instr & 0x00000780) << 3; +	} else { +		/* bit      5 -->    17 */ +		dsisr |= (instr & 0x04000000) >> 12; +		/* bits  1: 4 --> 18:21 */ +		dsisr |= (instr & 0x78000000) >> 17; +		/* bits 30:31 --> 12:13 */ +		if (IS_DSFORM(instr)) +			dsisr |= (instr & 0x00000003) << 18; +	} + +	return dsisr; +}  #endif /* __ASM_PPC_DISASSEMBLE_H__ */ diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index e27e9ad6818..150866b2a3f 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -134,6 +134,7 @@ static inline int dma_supported(struct device *dev, u64 mask)  }  extern int dma_set_mask(struct device *dev, u64 dma_mask); +extern int __dma_set_mask(struct device *dev, u64 dma_mask);  #define dma_alloc_coherent(d,s,h,f)	dma_alloc_attrs(d,s,h,f,NULL) diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index d3e5e9bc8f9..fab7743c264 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -32,6 +32,22 @@ struct device_node;  #ifdef CONFIG_EEH +/* EEH subsystem flags */ +#define EEH_ENABLED		0x1	/* EEH enabled		*/ +#define EEH_FORCE_DISABLED	0x2	/* EEH disabled		*/ +#define EEH_PROBE_MODE_DEV	0x4	/* From PCI device	*/ +#define EEH_PROBE_MODE_DEVTREE	0x8	/* From device tree	*/ + +/* + * Delay for PE reset, all in ms + * + * PCI specification has reset hold time of 100 milliseconds. + * We have 250 milliseconds here. The PCI bus settlement time + * is specified as 1.5 seconds and we have 1.8 seconds. + */ +#define EEH_PE_RST_HOLD_TIME		250 +#define EEH_PE_RST_SETTLE_TIME		1800 +  /*   * The struct is used to trace PE related EEH functionality.   * In theory, there will have one instance of the struct to @@ -53,7 +69,7 @@ struct device_node;  #define EEH_PE_ISOLATED		(1 << 0)	/* Isolated PE		*/  #define EEH_PE_RECOVERING	(1 << 1)	/* Recovering PE	*/ -#define EEH_PE_PHB_DEAD		(1 << 2)	/* Dead PHB		*/ +#define EEH_PE_RESET		(1 << 2)	/* PE reset in progress	*/  #define EEH_PE_KEEP		(1 << 8)	/* Keep PE on hotplug	*/ @@ -90,7 +106,9 @@ struct eeh_pe {  #define EEH_DEV_IRQ_DISABLED	(1 << 3)	/* Interrupt disabled	*/  #define EEH_DEV_DISCONNECTED	(1 << 4)	/* Removing from PE	*/ -#define EEH_DEV_SYSFS		(1 << 8)	/* Sysfs created        */ +#define EEH_DEV_NO_HANDLER	(1 << 8)	/* No error handler	*/ +#define EEH_DEV_SYSFS		(1 << 9)	/* Sysfs created	*/ +#define EEH_DEV_REMOVED		(1 << 10)	/* Removed permanently	*/  struct eeh_dev {  	int mode;			/* EEH mode			*/ @@ -98,7 +116,9 @@ struct eeh_dev {  	int config_addr;		/* Config address		*/  	int pe_config_addr;		/* PE config address		*/  	u32 config_space[16];		/* Saved PCI config space	*/ -	u8 pcie_cap;			/* Saved PCIe capability	*/ +	int pcix_cap;			/* Saved PCIx capability	*/ +	int pcie_cap;			/* Saved PCIe capability	*/ +	int aer_cap;			/* Saved AER capability		*/  	struct eeh_pe *pe;		/* Associated PE		*/  	struct list_head list;		/* Form link list in the PE	*/  	struct pci_controller *phb;	/* Associated PHB		*/ @@ -117,6 +137,16 @@ static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev)  	return edev ? edev->pdev : NULL;  } +/* Return values from eeh_ops::next_error */ +enum { +	EEH_NEXT_ERR_NONE = 0, +	EEH_NEXT_ERR_INF, +	EEH_NEXT_ERR_FROZEN_PE, +	EEH_NEXT_ERR_FENCED_PHB, +	EEH_NEXT_ERR_DEAD_PHB, +	EEH_NEXT_ERR_DEAD_IOC +}; +  /*   * The struct is used to trace the registered EEH operation   * callback functions. Actually, those operation callback @@ -157,29 +187,43 @@ struct eeh_ops {  	int (*read_config)(struct device_node *dn, int where, int size, u32 *val);  	int (*write_config)(struct device_node *dn, int where, int size, u32 val);  	int (*next_error)(struct eeh_pe **pe); +	int (*restore_config)(struct device_node *dn);  }; +extern int eeh_subsystem_flags;  extern struct eeh_ops *eeh_ops; -extern int eeh_subsystem_enabled;  extern raw_spinlock_t confirm_error_lock; -extern int eeh_probe_mode; -#define EEH_PROBE_MODE_DEV	(1<<0)	/* From PCI device	*/ -#define EEH_PROBE_MODE_DEVTREE	(1<<1)	/* From device tree	*/ +static inline bool eeh_enabled(void) +{ +	if ((eeh_subsystem_flags & EEH_FORCE_DISABLED) || +	    !(eeh_subsystem_flags & EEH_ENABLED)) +		return false; + +	return true; +} + +static inline void eeh_set_enable(bool mode) +{ +	if (mode) +		eeh_subsystem_flags |= EEH_ENABLED; +	else +		eeh_subsystem_flags &= ~EEH_ENABLED; +}  static inline void eeh_probe_mode_set(int flag)  { -	eeh_probe_mode = flag; +	eeh_subsystem_flags |= flag;  }  static inline int eeh_probe_mode_devtree(void)  { -	return (eeh_probe_mode == EEH_PROBE_MODE_DEVTREE); +	return (eeh_subsystem_flags & EEH_PROBE_MODE_DEVTREE);  }  static inline int eeh_probe_mode_dev(void)  { -	return (eeh_probe_mode == EEH_PROBE_MODE_DEV); +	return (eeh_subsystem_flags & EEH_PROBE_MODE_DEV);  }  static inline void eeh_serialize_lock(unsigned long *flags) @@ -210,6 +254,7 @@ void *eeh_pe_traverse(struct eeh_pe *root,  void *eeh_pe_dev_traverse(struct eeh_pe *root,  		eeh_traverse_func fn, void *flag);  void eeh_pe_restore_bars(struct eeh_pe *pe); +const char *eeh_pe_loc_get(struct eeh_pe *pe);  struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe);  void *eeh_dev_init(struct device_node *dn, void *data); @@ -234,7 +279,7 @@ void eeh_remove_device(struct pci_dev *);   * If this macro yields TRUE, the caller relays to eeh_check_failure()   * which does further tests out of line.   */ -#define EEH_POSSIBLE_ERROR(val, type)	((val) == (type)~0 && eeh_subsystem_enabled) +#define EEH_POSSIBLE_ERROR(val, type)	((val) == (type)~0 && eeh_enabled())  /*   * Reads from a device which has been isolated by EEH will return @@ -245,6 +290,13 @@ void eeh_remove_device(struct pci_dev *);  #else /* !CONFIG_EEH */ +static inline bool eeh_enabled(void) +{ +        return false; +} + +static inline void eeh_set_enable(bool mode) { } +  static inline int eeh_init(void)  {  	return 0; diff --git a/arch/powerpc/include/asm/eeh_event.h b/arch/powerpc/include/asm/eeh_event.h index 89d5670b2ee..1e551a2d6f8 100644 --- a/arch/powerpc/include/asm/eeh_event.h +++ b/arch/powerpc/include/asm/eeh_event.h @@ -33,7 +33,7 @@ struct eeh_event {  int eeh_event_init(void);  int eeh_send_failure_event(struct eeh_pe *pe); -void eeh_remove_event(struct eeh_pe *pe); +void eeh_remove_event(struct eeh_pe *pe, bool force);  void eeh_handle_event(struct eeh_pe *pe);  #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index cc0655a702a..888d8f3f252 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -31,6 +31,8 @@  extern unsigned long randomize_et_dyn(unsigned long base);  #define ELF_ET_DYN_BASE		(randomize_et_dyn(0x20000000)) +#define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0) +  /*   * Our registers are always unsigned longs, whether we're a 32 bit   * process or 64 bit, on either a 64 bit or 32 bit kernel. @@ -86,6 +88,10 @@ typedef elf_vrregset_t elf_fpxregset_t;  #ifdef __powerpc64__  # define SET_PERSONALITY(ex)					\  do {								\ +	if (((ex).e_flags & 0x3) == 2)				\ +		set_thread_flag(TIF_ELF2ABI);			\ +	else							\ +		clear_thread_flag(TIF_ELF2ABI);			\  	if ((ex).e_ident[EI_CLASS] == ELFCLASS32)		\  		set_thread_flag(TIF_32BIT);			\  	else							\ diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h index 5a8b82aa724..f00e10e2a33 100644 --- a/arch/powerpc/include/asm/emulated_ops.h +++ b/arch/powerpc/include/asm/emulated_ops.h @@ -43,6 +43,7 @@ extern struct ppc_emulated {  	struct ppc_emulated_entry popcntb;  	struct ppc_emulated_entry spe;  	struct ppc_emulated_entry string; +	struct ppc_emulated_entry sync;  	struct ppc_emulated_entry unaligned;  #ifdef CONFIG_MATH_EMULATION  	struct ppc_emulated_entry math; @@ -53,6 +54,7 @@ extern struct ppc_emulated {  #ifdef CONFIG_PPC64  	struct ppc_emulated_entry mfdscr;  	struct ppc_emulated_entry mtdscr; +	struct ppc_emulated_entry lq_stq;  #endif  } ppc_emulated; diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h index 86b0ac79990..334459ad145 100644 --- a/arch/powerpc/include/asm/epapr_hcalls.h +++ b/arch/powerpc/include/asm/epapr_hcalls.h @@ -460,5 +460,116 @@ static inline unsigned int ev_idle(void)  	return r3;  } + +#ifdef CONFIG_EPAPR_PARAVIRT +static inline unsigned long epapr_hypercall(unsigned long *in, +			    unsigned long *out, +			    unsigned long nr) +{ +	unsigned long register r0 asm("r0"); +	unsigned long register r3 asm("r3") = in[0]; +	unsigned long register r4 asm("r4") = in[1]; +	unsigned long register r5 asm("r5") = in[2]; +	unsigned long register r6 asm("r6") = in[3]; +	unsigned long register r7 asm("r7") = in[4]; +	unsigned long register r8 asm("r8") = in[5]; +	unsigned long register r9 asm("r9") = in[6]; +	unsigned long register r10 asm("r10") = in[7]; +	unsigned long register r11 asm("r11") = nr; +	unsigned long register r12 asm("r12"); + +	asm volatile("bl	epapr_hypercall_start" +		     : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6), +		       "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11), +		       "=r"(r12) +		     : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8), +		       "r"(r9), "r"(r10), "r"(r11) +		     : "memory", "cc", "xer", "ctr", "lr"); + +	out[0] = r4; +	out[1] = r5; +	out[2] = r6; +	out[3] = r7; +	out[4] = r8; +	out[5] = r9; +	out[6] = r10; +	out[7] = r11; + +	return r3; +} +#else +static unsigned long epapr_hypercall(unsigned long *in, +				   unsigned long *out, +				   unsigned long nr) +{ +	return EV_UNIMPLEMENTED; +} +#endif + +static inline long epapr_hypercall0_1(unsigned int nr, unsigned long *r2) +{ +	unsigned long in[8]; +	unsigned long out[8]; +	unsigned long r; + +	r = epapr_hypercall(in, out, nr); +	*r2 = out[0]; + +	return r; +} + +static inline long epapr_hypercall0(unsigned int nr) +{ +	unsigned long in[8]; +	unsigned long out[8]; + +	return epapr_hypercall(in, out, nr); +} + +static inline long epapr_hypercall1(unsigned int nr, unsigned long p1) +{ +	unsigned long in[8]; +	unsigned long out[8]; + +	in[0] = p1; +	return epapr_hypercall(in, out, nr); +} + +static inline long epapr_hypercall2(unsigned int nr, unsigned long p1, +				    unsigned long p2) +{ +	unsigned long in[8]; +	unsigned long out[8]; + +	in[0] = p1; +	in[1] = p2; +	return epapr_hypercall(in, out, nr); +} + +static inline long epapr_hypercall3(unsigned int nr, unsigned long p1, +				    unsigned long p2, unsigned long p3) +{ +	unsigned long in[8]; +	unsigned long out[8]; + +	in[0] = p1; +	in[1] = p2; +	in[2] = p3; +	return epapr_hypercall(in, out, nr); +} + +static inline long epapr_hypercall4(unsigned int nr, unsigned long p1, +				    unsigned long p2, unsigned long p3, +				    unsigned long p4) +{ +	unsigned long in[8]; +	unsigned long out[8]; + +	in[0] = p1; +	in[1] = p2; +	in[2] = p3; +	in[3] = p4; +	return epapr_hypercall(in, out, nr); +}  #endif /* !__ASSEMBLY__ */  #endif /* _EPAPR_HCALLS_H */ diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h index 51fa43e536b..a8b52b61043 100644 --- a/arch/powerpc/include/asm/exception-64e.h +++ b/arch/powerpc/include/asm/exception-64e.h @@ -46,9 +46,8 @@  #define EX_CR		(1 * 8)  #define EX_R10		(2 * 8)  #define EX_R11		(3 * 8) -#define EX_R13		(4 * 8) -#define EX_R14		(5 * 8) -#define EX_R15		(6 * 8) +#define EX_R14		(4 * 8) +#define EX_R15		(5 * 8)  /*   * The TLB miss exception uses different slots. @@ -173,22 +172,12 @@ exc_##label##_book3e:  	ld	r9,EX_TLB_R9(r12);					    \  	ld	r8,EX_TLB_R8(r12);					    \  	mtlr	r16; -#define TLB_MISS_PROLOG_STATS_BOLTED						    \ -	mflr	r10;							    \ -	std	r8,PACA_EXTLB+EX_TLB_R8(r13);				    \ -	std	r9,PACA_EXTLB+EX_TLB_R9(r13);				    \ -	std	r10,PACA_EXTLB+EX_TLB_LR(r13); -#define TLB_MISS_RESTORE_STATS_BOLTED					            \ -	ld	r16,PACA_EXTLB+EX_TLB_LR(r13);				    \ -	ld	r9,PACA_EXTLB+EX_TLB_R9(r13);				    \ -	ld	r8,PACA_EXTLB+EX_TLB_R8(r13);				    \ -	mtlr	r16;  #define TLB_MISS_STATS_D(name)						    \  	addi	r9,r13,MMSTAT_DSTATS+name;				    \ -	bl	.tlb_stat_inc; +	bl	tlb_stat_inc;  #define TLB_MISS_STATS_I(name)						    \  	addi	r9,r13,MMSTAT_ISTATS+name;				    \ -	bl	.tlb_stat_inc; +	bl	tlb_stat_inc;  #define TLB_MISS_STATS_X(name)						    \  	ld	r8,PACA_EXTLB+EX_TLB_ESR(r13);				    \  	cmpdi	cr2,r8,-1;						    \ @@ -196,7 +185,7 @@ exc_##label##_book3e:  	addi	r9,r13,MMSTAT_DSTATS+name;				    \  	b	62f;							    \  61:	addi	r9,r13,MMSTAT_ISTATS+name;				    \ -62:	bl	.tlb_stat_inc; +62:	bl	tlb_stat_inc;  #define TLB_MISS_STATS_SAVE_INFO					    \  	std	r14,EX_TLB_ESR(r12);	/* save ESR */  #define TLB_MISS_STATS_SAVE_INFO_BOLTED					    \ diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index cca12f08484..8f35cd7d59c 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -147,6 +147,14 @@ BEGIN_FTR_SECTION_NESTED(943)						\  END_FTR_SECTION_NESTED(ftr,ftr,943)  /* + * Set an SPR from a register if the CPU has the given feature + */ +#define OPT_SET_SPR(ra, spr, ftr)					\ +BEGIN_FTR_SECTION_NESTED(943)						\ +	mtspr	spr,ra;							\ +END_FTR_SECTION_NESTED(ftr,ftr,943) + +/*   * Save a register to the PACA if the CPU has the given feature   */  #define OPT_SAVE_REG_TO_PACA(offset, ra, ftr)				\ @@ -198,12 +206,27 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)  	cmpwi	r10,0;							\  	bne	do_kvm_##n +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE +/* + * If hv is possible, interrupts come into to the hv version + * of the kvmppc_interrupt code, which then jumps to the PR handler, + * kvmppc_interrupt_pr, if the guest is a PR guest. + */ +#define kvmppc_interrupt kvmppc_interrupt_hv +#else +#define kvmppc_interrupt kvmppc_interrupt_pr +#endif +  #define __KVM_HANDLER(area, h, n)					\  do_kvm_##n:								\  	BEGIN_FTR_SECTION_NESTED(947)					\  	ld	r10,area+EX_CFAR(r13);					\  	std	r10,HSTATE_CFAR(r13);					\  	END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947);		\ +	BEGIN_FTR_SECTION_NESTED(948)					\ +	ld	r10,area+EX_PPR(r13);					\ +	std	r10,HSTATE_PPR(r13);					\ +	END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948);	\  	ld	r10,area+EX_R10(r13);					\  	stw	r9,HSTATE_SCRATCH1(r13);				\  	ld	r9,area+EX_R9(r13);					\ @@ -217,6 +240,10 @@ do_kvm_##n:								\  	ld	r10,area+EX_R10(r13);					\  	beq	89f;							\  	stw	r9,HSTATE_SCRATCH1(r13);			\ +	BEGIN_FTR_SECTION_NESTED(948)					\ +	ld	r9,area+EX_PPR(r13);					\ +	std	r9,HSTATE_PPR(r13);					\ +	END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948);	\  	ld	r9,area+EX_R9(r13);					\  	std	r12,HSTATE_SCRATCH0(r13);			\  	li	r12,n;							\ @@ -236,7 +263,7 @@ do_kvm_##n:								\  #define KVM_HANDLER_SKIP(area, h, n)  #endif -#ifdef CONFIG_KVM_BOOK3S_PR +#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE  #define KVMTEST_PR(n)			__KVMTEST(n)  #define KVM_HANDLER_PR(area, h, n)	__KVM_HANDLER(area, h, n)  #define KVM_HANDLER_PR_SKIP(area, h, n)	__KVM_HANDLER_SKIP(area, h, n) @@ -265,7 +292,7 @@ do_kvm_##n:								\  	subi	r1,r1,INT_FRAME_SIZE;	/* alloc frame on kernel stack	*/ \  	beq-	1f;							   \  	ld	r1,PACAKSAVE(r13);	/* kernel stack to use		*/ \ -1:	cmpdi	cr1,r1,0;		/* check if r1 is in userspace	*/ \ +1:	cmpdi	cr1,r1,-INT_FRAME_SIZE;	/* check if r1 is in userspace	*/ \  	blt+	cr1,3f;			/* abort if it is		*/ \  	li	r1,(n);			/* will be reloaded later	*/ \  	sth	r1,PACA_TRAP_SAVE(r13);					   \ @@ -282,9 +309,12 @@ do_kvm_##n:								\  	beq	4f;			/* if from kernel mode		*/ \  	ACCOUNT_CPU_USER_ENTRY(r9, r10);				   \  	SAVE_PPR(area, r9, r10);					   \ -4:	std	r2,GPR2(r1);		/* save r2 in stackframe	*/ \ -	SAVE_4GPRS(3, r1);		/* save r3 - r6 in stackframe	*/ \ -	SAVE_2GPRS(7, r1);		/* save r7, r8 in stackframe	*/ \ +4:	EXCEPTION_PROLOG_COMMON_2(area)					   \ +	EXCEPTION_PROLOG_COMMON_3(n)					   \ +	ACCOUNT_STOLEN_TIME + +/* Save original regs values from save area to stack frame. */ +#define EXCEPTION_PROLOG_COMMON_2(area)					   \  	ld	r9,area+EX_R9(r13);	/* move r9, r10 to stackframe	*/ \  	ld	r10,area+EX_R10(r13);					   \  	std	r9,GPR9(r1);						   \ @@ -299,11 +329,16 @@ do_kvm_##n:								\  	ld	r10,area+EX_CFAR(r13);					   \  	std	r10,ORIG_GPR3(r1);					   \  	END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66);		   \ +	GET_CTR(r10, area);						   \ +	std	r10,_CTR(r1); + +#define EXCEPTION_PROLOG_COMMON_3(n)					   \ +	std	r2,GPR2(r1);		/* save r2 in stackframe	*/ \ +	SAVE_4GPRS(3, r1);		/* save r3 - r6 in stackframe   */ \ +	SAVE_2GPRS(7, r1);		/* save r7, r8 in stackframe	*/ \  	mflr	r9;			/* Get LR, later save to stack	*/ \  	ld	r2,PACATOC(r13);	/* get kernel TOC into r2	*/ \  	std	r9,_LINK(r1);						   \ -	GET_CTR(r10, area);						   \ -	std	r10,_CTR(r1);						   \  	lbz	r10,PACASOFTIRQEN(r13);				   \  	mfspr	r11,SPRN_XER;		/* save XER in stackframe	*/ \  	std	r10,SOFTE(r1);						   \ @@ -313,8 +348,7 @@ do_kvm_##n:								\  	li	r10,0;							   \  	ld	r11,exception_marker@toc(r2);				   \  	std	r10,RESULT(r1);		/* clear regs->result		*/ \ -	std	r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame	*/ \ -	ACCOUNT_STOLEN_TIME +	std	r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame	*/  /*   * Exception vectors. @@ -483,7 +517,7 @@ label##_relon_hv:							\  #define DISABLE_INTS	RECONCILE_IRQ_STATE(r10,r11)  #define ADD_NVGPRS				\ -	bl	.save_nvgprs +	bl	save_nvgprs  #define RUNLATCH_ON				\  BEGIN_FTR_SECTION				\ diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h index 88dbf965918..a6774560afe 100644 --- a/arch/powerpc/include/asm/fadump.h +++ b/arch/powerpc/include/asm/fadump.h @@ -210,7 +210,6 @@ extern int is_fadump_active(void);  extern void crash_fadump(struct pt_regs *, const char *);  extern void fadump_cleanup(void); -extern void vmcore_cleanup(void);  #else	/* CONFIG_FA_DUMP */  static inline int is_fadump_active(void) { return 0; }  static inline void crash_fadump(struct pt_regs *regs, const char *str) { } diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h index 5c2c0233175..90f604bbcd1 100644 --- a/arch/powerpc/include/asm/fixmap.h +++ b/arch/powerpc/include/asm/fixmap.h @@ -58,52 +58,12 @@ enum fixed_addresses {  extern void __set_fixmap (enum fixed_addresses idx,  					phys_addr_t phys, pgprot_t flags); -#define set_fixmap(idx, phys) \ -		__set_fixmap(idx, phys, PAGE_KERNEL) -/* - * Some hardware wants to get fixmapped without caching. - */ -#define set_fixmap_nocache(idx, phys) \ -		__set_fixmap(idx, phys, PAGE_KERNEL_NCG) - -#define clear_fixmap(idx) \ -		__set_fixmap(idx, 0, __pgprot(0)) -  #define __FIXADDR_SIZE	(__end_of_fixed_addresses << PAGE_SHIFT)  #define FIXADDR_START		(FIXADDR_TOP - __FIXADDR_SIZE) -#define __fix_to_virt(x)	(FIXADDR_TOP - ((x) << PAGE_SHIFT)) -#define __virt_to_fix(x)	((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) - -extern void __this_fixmap_does_not_exist(void); - -/* - * 'index to address' translation. If anyone tries to use the idx - * directly without tranlation, we catch the bug with a NULL-deference - * kernel oops. Illegal ranges of incoming indices are caught too. - */ -static __always_inline unsigned long fix_to_virt(const unsigned int idx) -{ -	/* -	 * this branch gets completely eliminated after inlining, -	 * except when someone tries to use fixaddr indices in an -	 * illegal way. (such as mixing up address types or using -	 * out-of-range indices). -	 * -	 * If it doesn't get removed, the linker will complain -	 * loudly with a reasonably clear error message.. -	 */ -	if (idx >= __end_of_fixed_addresses) -		__this_fixmap_does_not_exist(); - -        return __fix_to_virt(idx); -} +#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NCG -static inline unsigned long virt_to_fix(const unsigned long vaddr) -{ -	BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); -	return __virt_to_fix(vaddr); -} +#include <asm-generic/fixmap.h>  #endif /* !__ASSEMBLY__ */  #endif diff --git a/arch/powerpc/include/asm/fsl_ifc.h b/arch/powerpc/include/asm/fsl_ifc.h deleted file mode 100644 index b8a4b9bc50b..00000000000 --- a/arch/powerpc/include/asm/fsl_ifc.h +++ /dev/null @@ -1,836 +0,0 @@ -/* Freescale Integrated Flash Controller - * - * Copyright 2011 Freescale Semiconductor, Inc - * - * Author: Dipen Dudhat <dipen.dudhat@freescale.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA - */ - -#ifndef __ASM_FSL_IFC_H -#define __ASM_FSL_IFC_H - -#include <linux/compiler.h> -#include <linux/types.h> -#include <linux/io.h> - -#include <linux/of_platform.h> -#include <linux/interrupt.h> - -#define FSL_IFC_BANK_COUNT 4 - -/* - * CSPR - Chip Select Property Register - */ -#define CSPR_BA				0xFFFF0000 -#define CSPR_BA_SHIFT			16 -#define CSPR_PORT_SIZE			0x00000180 -#define CSPR_PORT_SIZE_SHIFT		7 -/* Port Size 8 bit */ -#define CSPR_PORT_SIZE_8		0x00000080 -/* Port Size 16 bit */ -#define CSPR_PORT_SIZE_16		0x00000100 -/* Port Size 32 bit */ -#define CSPR_PORT_SIZE_32		0x00000180 -/* Write Protect */ -#define CSPR_WP				0x00000040 -#define CSPR_WP_SHIFT			6 -/* Machine Select */ -#define CSPR_MSEL			0x00000006 -#define CSPR_MSEL_SHIFT			1 -/* NOR */ -#define CSPR_MSEL_NOR			0x00000000 -/* NAND */ -#define CSPR_MSEL_NAND			0x00000002 -/* GPCM */ -#define CSPR_MSEL_GPCM			0x00000004 -/* Bank Valid */ -#define CSPR_V				0x00000001 -#define CSPR_V_SHIFT			0 - -/* - * Address Mask Register - */ -#define IFC_AMASK_MASK			0xFFFF0000 -#define IFC_AMASK_SHIFT			16 -#define IFC_AMASK(n)			(IFC_AMASK_MASK << \ -					(__ilog2(n) - IFC_AMASK_SHIFT)) - -/* - * Chip Select Option Register IFC_NAND Machine - */ -/* Enable ECC Encoder */ -#define CSOR_NAND_ECC_ENC_EN		0x80000000 -#define CSOR_NAND_ECC_MODE_MASK		0x30000000 -/* 4 bit correction per 520 Byte sector */ -#define CSOR_NAND_ECC_MODE_4		0x00000000 -/* 8 bit correction per 528 Byte sector */ -#define CSOR_NAND_ECC_MODE_8		0x10000000 -/* Enable ECC Decoder */ -#define CSOR_NAND_ECC_DEC_EN		0x04000000 -/* Row Address Length */ -#define CSOR_NAND_RAL_MASK		0x01800000 -#define CSOR_NAND_RAL_SHIFT		20 -#define CSOR_NAND_RAL_1			0x00000000 -#define CSOR_NAND_RAL_2			0x00800000 -#define CSOR_NAND_RAL_3			0x01000000 -#define CSOR_NAND_RAL_4			0x01800000 -/* Page Size 512b, 2k, 4k */ -#define CSOR_NAND_PGS_MASK		0x00180000 -#define CSOR_NAND_PGS_SHIFT		16 -#define CSOR_NAND_PGS_512		0x00000000 -#define CSOR_NAND_PGS_2K		0x00080000 -#define CSOR_NAND_PGS_4K		0x00100000 -/* Spare region Size */ -#define CSOR_NAND_SPRZ_MASK		0x0000E000 -#define CSOR_NAND_SPRZ_SHIFT		13 -#define CSOR_NAND_SPRZ_16		0x00000000 -#define CSOR_NAND_SPRZ_64		0x00002000 -#define CSOR_NAND_SPRZ_128		0x00004000 -#define CSOR_NAND_SPRZ_210		0x00006000 -#define CSOR_NAND_SPRZ_218		0x00008000 -#define CSOR_NAND_SPRZ_224		0x0000A000 -/* Pages Per Block */ -#define CSOR_NAND_PB_MASK		0x00000700 -#define CSOR_NAND_PB_SHIFT		8 -#define CSOR_NAND_PB(n)		((__ilog2(n) - 5) << CSOR_NAND_PB_SHIFT) -/* Time for Read Enable High to Output High Impedance */ -#define CSOR_NAND_TRHZ_MASK		0x0000001C -#define CSOR_NAND_TRHZ_SHIFT		2 -#define CSOR_NAND_TRHZ_20		0x00000000 -#define CSOR_NAND_TRHZ_40		0x00000004 -#define CSOR_NAND_TRHZ_60		0x00000008 -#define CSOR_NAND_TRHZ_80		0x0000000C -#define CSOR_NAND_TRHZ_100		0x00000010 -/* Buffer control disable */ -#define CSOR_NAND_BCTLD			0x00000001 - -/* - * Chip Select Option Register - NOR Flash Mode - */ -/* Enable Address shift Mode */ -#define CSOR_NOR_ADM_SHFT_MODE_EN	0x80000000 -/* Page Read Enable from NOR device */ -#define CSOR_NOR_PGRD_EN		0x10000000 -/* AVD Toggle Enable during Burst Program */ -#define CSOR_NOR_AVD_TGL_PGM_EN		0x01000000 -/* Address Data Multiplexing Shift */ -#define CSOR_NOR_ADM_MASK		0x0003E000 -#define CSOR_NOR_ADM_SHIFT_SHIFT	13 -#define CSOR_NOR_ADM_SHIFT(n)	((n) << CSOR_NOR_ADM_SHIFT_SHIFT) -/* Type of the NOR device hooked */ -#define CSOR_NOR_NOR_MODE_AYSNC_NOR	0x00000000 -#define CSOR_NOR_NOR_MODE_AVD_NOR	0x00000020 -/* Time for Read Enable High to Output High Impedance */ -#define CSOR_NOR_TRHZ_MASK		0x0000001C -#define CSOR_NOR_TRHZ_SHIFT		2 -#define CSOR_NOR_TRHZ_20		0x00000000 -#define CSOR_NOR_TRHZ_40		0x00000004 -#define CSOR_NOR_TRHZ_60		0x00000008 -#define CSOR_NOR_TRHZ_80		0x0000000C -#define CSOR_NOR_TRHZ_100		0x00000010 -/* Buffer control disable */ -#define CSOR_NOR_BCTLD			0x00000001 - -/* - * Chip Select Option Register - GPCM Mode - */ -/* GPCM Mode - Normal */ -#define CSOR_GPCM_GPMODE_NORMAL		0x00000000 -/* GPCM Mode - GenericASIC */ -#define CSOR_GPCM_GPMODE_ASIC		0x80000000 -/* Parity Mode odd/even */ -#define CSOR_GPCM_PARITY_EVEN		0x40000000 -/* Parity Checking enable/disable */ -#define CSOR_GPCM_PAR_EN		0x20000000 -/* GPCM Timeout Count */ -#define CSOR_GPCM_GPTO_MASK		0x0F000000 -#define CSOR_GPCM_GPTO_SHIFT		24 -#define CSOR_GPCM_GPTO(n)	((__ilog2(n) - 8) << CSOR_GPCM_GPTO_SHIFT) -/* GPCM External Access Termination mode for read access */ -#define CSOR_GPCM_RGETA_EXT		0x00080000 -/* GPCM External Access Termination mode for write access */ -#define CSOR_GPCM_WGETA_EXT		0x00040000 -/* Address Data Multiplexing Shift */ -#define CSOR_GPCM_ADM_MASK		0x0003E000 -#define CSOR_GPCM_ADM_SHIFT_SHIFT	13 -#define CSOR_GPCM_ADM_SHIFT(n)	((n) << CSOR_GPCM_ADM_SHIFT_SHIFT) -/* Generic ASIC Parity error indication delay */ -#define CSOR_GPCM_GAPERRD_MASK		0x00000180 -#define CSOR_GPCM_GAPERRD_SHIFT		7 -#define CSOR_GPCM_GAPERRD(n)	(((n) - 1) << CSOR_GPCM_GAPERRD_SHIFT) -/* Time for Read Enable High to Output High Impedance */ -#define CSOR_GPCM_TRHZ_MASK		0x0000001C -#define CSOR_GPCM_TRHZ_20		0x00000000 -#define CSOR_GPCM_TRHZ_40		0x00000004 -#define CSOR_GPCM_TRHZ_60		0x00000008 -#define CSOR_GPCM_TRHZ_80		0x0000000C -#define CSOR_GPCM_TRHZ_100		0x00000010 -/* Buffer control disable */ -#define CSOR_GPCM_BCTLD			0x00000001 - -/* - * Ready Busy Status Register (RB_STAT) - */ -/* CSn is READY */ -#define IFC_RB_STAT_READY_CS0		0x80000000 -#define IFC_RB_STAT_READY_CS1		0x40000000 -#define IFC_RB_STAT_READY_CS2		0x20000000 -#define IFC_RB_STAT_READY_CS3		0x10000000 - -/* - * General Control Register (GCR) - */ -#define IFC_GCR_MASK			0x8000F800 -/* reset all IFC hardware */ -#define IFC_GCR_SOFT_RST_ALL		0x80000000 -/* Turnaroud Time of external buffer */ -#define IFC_GCR_TBCTL_TRN_TIME		0x0000F800 -#define IFC_GCR_TBCTL_TRN_TIME_SHIFT	11 - -/* - * Common Event and Error Status Register (CM_EVTER_STAT) - */ -/* Chip select error */ -#define IFC_CM_EVTER_STAT_CSER		0x80000000 - -/* - * Common Event and Error Enable Register (CM_EVTER_EN) - */ -/* Chip select error checking enable */ -#define IFC_CM_EVTER_EN_CSEREN		0x80000000 - -/* - * Common Event and Error Interrupt Enable Register (CM_EVTER_INTR_EN) - */ -/* Chip select error interrupt enable */ -#define IFC_CM_EVTER_INTR_EN_CSERIREN	0x80000000 - -/* - * Common Transfer Error Attribute Register-0 (CM_ERATTR0) - */ -/* transaction type of error Read/Write */ -#define IFC_CM_ERATTR0_ERTYP_READ	0x80000000 -#define IFC_CM_ERATTR0_ERAID		0x0FF00000 -#define IFC_CM_ERATTR0_ERAID_SHIFT	20 -#define IFC_CM_ERATTR0_ESRCID		0x0000FF00 -#define IFC_CM_ERATTR0_ESRCID_SHIFT	8 - -/* - * Clock Control Register (CCR) - */ -#define IFC_CCR_MASK			0x0F0F8800 -/* Clock division ratio */ -#define IFC_CCR_CLK_DIV_MASK		0x0F000000 -#define IFC_CCR_CLK_DIV_SHIFT		24 -#define IFC_CCR_CLK_DIV(n)		((n-1) << IFC_CCR_CLK_DIV_SHIFT) -/* IFC Clock Delay */ -#define IFC_CCR_CLK_DLY_MASK		0x000F0000 -#define IFC_CCR_CLK_DLY_SHIFT		16 -#define IFC_CCR_CLK_DLY(n)		((n) << IFC_CCR_CLK_DLY_SHIFT) -/* Invert IFC clock before sending out */ -#define IFC_CCR_INV_CLK_EN		0x00008000 -/* Fedback IFC Clock */ -#define IFC_CCR_FB_IFC_CLK_SEL		0x00000800 - -/* - * Clock Status Register (CSR) - */ -/* Clk is stable */ -#define IFC_CSR_CLK_STAT_STABLE		0x80000000 - -/* - * IFC_NAND Machine Specific Registers - */ -/* - * NAND Configuration Register (NCFGR) - */ -/* Auto Boot Mode */ -#define IFC_NAND_NCFGR_BOOT		0x80000000 -/* Addressing Mode-ROW0+n/COL0 */ -#define IFC_NAND_NCFGR_ADDR_MODE_RC0	0x00000000 -/* Addressing Mode-ROW0+n/COL0+n */ -#define IFC_NAND_NCFGR_ADDR_MODE_RC1	0x00400000 -/* Number of loop iterations of FIR sequences for multi page operations */ -#define IFC_NAND_NCFGR_NUM_LOOP_MASK	0x0000F000 -#define IFC_NAND_NCFGR_NUM_LOOP_SHIFT	12 -#define IFC_NAND_NCFGR_NUM_LOOP(n)	((n) << IFC_NAND_NCFGR_NUM_LOOP_SHIFT) -/* Number of wait cycles */ -#define IFC_NAND_NCFGR_NUM_WAIT_MASK	0x000000FF -#define IFC_NAND_NCFGR_NUM_WAIT_SHIFT	0 - -/* - * NAND Flash Command Registers (NAND_FCR0/NAND_FCR1) - */ -/* General purpose FCM flash command bytes CMD0-CMD7 */ -#define IFC_NAND_FCR0_CMD0		0xFF000000 -#define IFC_NAND_FCR0_CMD0_SHIFT	24 -#define IFC_NAND_FCR0_CMD1		0x00FF0000 -#define IFC_NAND_FCR0_CMD1_SHIFT	16 -#define IFC_NAND_FCR0_CMD2		0x0000FF00 -#define IFC_NAND_FCR0_CMD2_SHIFT	8 -#define IFC_NAND_FCR0_CMD3		0x000000FF -#define IFC_NAND_FCR0_CMD3_SHIFT	0 -#define IFC_NAND_FCR1_CMD4		0xFF000000 -#define IFC_NAND_FCR1_CMD4_SHIFT	24 -#define IFC_NAND_FCR1_CMD5		0x00FF0000 -#define IFC_NAND_FCR1_CMD5_SHIFT	16 -#define IFC_NAND_FCR1_CMD6		0x0000FF00 -#define IFC_NAND_FCR1_CMD6_SHIFT	8 -#define IFC_NAND_FCR1_CMD7		0x000000FF -#define IFC_NAND_FCR1_CMD7_SHIFT	0 - -/* - * Flash ROW and COL Address Register (ROWn, COLn) - */ -/* Main/spare region locator */ -#define IFC_NAND_COL_MS			0x80000000 -/* Column Address */ -#define IFC_NAND_COL_CA_MASK		0x00000FFF - -/* - * NAND Flash Byte Count Register (NAND_BC) - */ -/* Byte Count for read/Write */ -#define IFC_NAND_BC			0x000001FF - -/* - * NAND Flash Instruction Registers (NAND_FIR0/NAND_FIR1/NAND_FIR2) - */ -/* NAND Machine specific opcodes OP0-OP14*/ -#define IFC_NAND_FIR0_OP0		0xFC000000 -#define IFC_NAND_FIR0_OP0_SHIFT		26 -#define IFC_NAND_FIR0_OP1		0x03F00000 -#define IFC_NAND_FIR0_OP1_SHIFT		20 -#define IFC_NAND_FIR0_OP2		0x000FC000 -#define IFC_NAND_FIR0_OP2_SHIFT		14 -#define IFC_NAND_FIR0_OP3		0x00003F00 -#define IFC_NAND_FIR0_OP3_SHIFT		8 -#define IFC_NAND_FIR0_OP4		0x000000FC -#define IFC_NAND_FIR0_OP4_SHIFT		2 -#define IFC_NAND_FIR1_OP5		0xFC000000 -#define IFC_NAND_FIR1_OP5_SHIFT		26 -#define IFC_NAND_FIR1_OP6		0x03F00000 -#define IFC_NAND_FIR1_OP6_SHIFT		20 -#define IFC_NAND_FIR1_OP7		0x000FC000 -#define IFC_NAND_FIR1_OP7_SHIFT		14 -#define IFC_NAND_FIR1_OP8		0x00003F00 -#define IFC_NAND_FIR1_OP8_SHIFT		8 -#define IFC_NAND_FIR1_OP9		0x000000FC -#define IFC_NAND_FIR1_OP9_SHIFT		2 -#define IFC_NAND_FIR2_OP10		0xFC000000 -#define IFC_NAND_FIR2_OP10_SHIFT	26 -#define IFC_NAND_FIR2_OP11		0x03F00000 -#define IFC_NAND_FIR2_OP11_SHIFT	20 -#define IFC_NAND_FIR2_OP12		0x000FC000 -#define IFC_NAND_FIR2_OP12_SHIFT	14 -#define IFC_NAND_FIR2_OP13		0x00003F00 -#define IFC_NAND_FIR2_OP13_SHIFT	8 -#define IFC_NAND_FIR2_OP14		0x000000FC -#define IFC_NAND_FIR2_OP14_SHIFT	2 - -/* - * Instruction opcodes to be programmed - * in FIR registers- 6bits - */ -enum ifc_nand_fir_opcodes { -	IFC_FIR_OP_NOP, -	IFC_FIR_OP_CA0, -	IFC_FIR_OP_CA1, -	IFC_FIR_OP_CA2, -	IFC_FIR_OP_CA3, -	IFC_FIR_OP_RA0, -	IFC_FIR_OP_RA1, -	IFC_FIR_OP_RA2, -	IFC_FIR_OP_RA3, -	IFC_FIR_OP_CMD0, -	IFC_FIR_OP_CMD1, -	IFC_FIR_OP_CMD2, -	IFC_FIR_OP_CMD3, -	IFC_FIR_OP_CMD4, -	IFC_FIR_OP_CMD5, -	IFC_FIR_OP_CMD6, -	IFC_FIR_OP_CMD7, -	IFC_FIR_OP_CW0, -	IFC_FIR_OP_CW1, -	IFC_FIR_OP_CW2, -	IFC_FIR_OP_CW3, -	IFC_FIR_OP_CW4, -	IFC_FIR_OP_CW5, -	IFC_FIR_OP_CW6, -	IFC_FIR_OP_CW7, -	IFC_FIR_OP_WBCD, -	IFC_FIR_OP_RBCD, -	IFC_FIR_OP_BTRD, -	IFC_FIR_OP_RDSTAT, -	IFC_FIR_OP_NWAIT, -	IFC_FIR_OP_WFR, -	IFC_FIR_OP_SBRD, -	IFC_FIR_OP_UA, -	IFC_FIR_OP_RB, -}; - -/* - * NAND Chip Select Register (NAND_CSEL) - */ -#define IFC_NAND_CSEL			0x0C000000 -#define IFC_NAND_CSEL_SHIFT		26 -#define IFC_NAND_CSEL_CS0		0x00000000 -#define IFC_NAND_CSEL_CS1		0x04000000 -#define IFC_NAND_CSEL_CS2		0x08000000 -#define IFC_NAND_CSEL_CS3		0x0C000000 - -/* - * NAND Operation Sequence Start (NANDSEQ_STRT) - */ -/* NAND Flash Operation Start */ -#define IFC_NAND_SEQ_STRT_FIR_STRT	0x80000000 -/* Automatic Erase */ -#define IFC_NAND_SEQ_STRT_AUTO_ERS	0x00800000 -/* Automatic Program */ -#define IFC_NAND_SEQ_STRT_AUTO_PGM	0x00100000 -/* Automatic Copyback */ -#define IFC_NAND_SEQ_STRT_AUTO_CPB	0x00020000 -/* Automatic Read Operation */ -#define IFC_NAND_SEQ_STRT_AUTO_RD	0x00004000 -/* Automatic Status Read */ -#define IFC_NAND_SEQ_STRT_AUTO_STAT_RD	0x00000800 - -/* - * NAND Event and Error Status Register (NAND_EVTER_STAT) - */ -/* Operation Complete */ -#define IFC_NAND_EVTER_STAT_OPC		0x80000000 -/* Flash Timeout Error */ -#define IFC_NAND_EVTER_STAT_FTOER	0x08000000 -/* Write Protect Error */ -#define IFC_NAND_EVTER_STAT_WPER	0x04000000 -/* ECC Error */ -#define IFC_NAND_EVTER_STAT_ECCER	0x02000000 -/* RCW Load Done */ -#define IFC_NAND_EVTER_STAT_RCW_DN	0x00008000 -/* Boot Loadr Done */ -#define IFC_NAND_EVTER_STAT_BOOT_DN	0x00004000 -/* Bad Block Indicator search select */ -#define IFC_NAND_EVTER_STAT_BBI_SRCH_SE	0x00000800 - -/* - * NAND Flash Page Read Completion Event Status Register - * (PGRDCMPL_EVT_STAT) - */ -#define PGRDCMPL_EVT_STAT_MASK		0xFFFF0000 -/* Small Page 0-15 Done */ -#define PGRDCMPL_EVT_STAT_SECTION_SP(n)	(1 << (31 - (n))) -/* Large Page(2K) 0-3 Done */ -#define PGRDCMPL_EVT_STAT_LP_2K(n)	(0xF << (28 - (n)*4)) -/* Large Page(4K) 0-1 Done */ -#define PGRDCMPL_EVT_STAT_LP_4K(n)	(0xFF << (24 - (n)*8)) - -/* - * NAND Event and Error Enable Register (NAND_EVTER_EN) - */ -/* Operation complete event enable */ -#define IFC_NAND_EVTER_EN_OPC_EN	0x80000000 -/* Page read complete event enable */ -#define IFC_NAND_EVTER_EN_PGRDCMPL_EN	0x20000000 -/* Flash Timeout error enable */ -#define IFC_NAND_EVTER_EN_FTOER_EN	0x08000000 -/* Write Protect error enable */ -#define IFC_NAND_EVTER_EN_WPER_EN	0x04000000 -/* ECC error logging enable */ -#define IFC_NAND_EVTER_EN_ECCER_EN	0x02000000 - -/* - * NAND Event and Error Interrupt Enable Register (NAND_EVTER_INTR_EN) - */ -/* Enable interrupt for operation complete */ -#define IFC_NAND_EVTER_INTR_OPCIR_EN		0x80000000 -/* Enable interrupt for Page read complete */ -#define IFC_NAND_EVTER_INTR_PGRDCMPLIR_EN	0x20000000 -/* Enable interrupt for Flash timeout error */ -#define IFC_NAND_EVTER_INTR_FTOERIR_EN		0x08000000 -/* Enable interrupt for Write protect error */ -#define IFC_NAND_EVTER_INTR_WPERIR_EN		0x04000000 -/* Enable interrupt for ECC error*/ -#define IFC_NAND_EVTER_INTR_ECCERIR_EN		0x02000000 - -/* - * NAND Transfer Error Attribute Register-0 (NAND_ERATTR0) - */ -#define IFC_NAND_ERATTR0_MASK		0x0C080000 -/* Error on CS0-3 for NAND */ -#define IFC_NAND_ERATTR0_ERCS_CS0	0x00000000 -#define IFC_NAND_ERATTR0_ERCS_CS1	0x04000000 -#define IFC_NAND_ERATTR0_ERCS_CS2	0x08000000 -#define IFC_NAND_ERATTR0_ERCS_CS3	0x0C000000 -/* Transaction type of error Read/Write */ -#define IFC_NAND_ERATTR0_ERTTYPE_READ	0x00080000 - -/* - * NAND Flash Status Register (NAND_FSR) - */ -/* First byte of data read from read status op */ -#define IFC_NAND_NFSR_RS0		0xFF000000 -/* Second byte of data read from read status op */ -#define IFC_NAND_NFSR_RS1		0x00FF0000 - -/* - * ECC Error Status Registers (ECCSTAT0-ECCSTAT3) - */ -/* Number of ECC errors on sector n (n = 0-15) */ -#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR0_MASK	0x0F000000 -#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR0_SHIFT	24 -#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR1_MASK	0x000F0000 -#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR1_SHIFT	16 -#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR2_MASK	0x00000F00 -#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR2_SHIFT	8 -#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR3_MASK	0x0000000F -#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR3_SHIFT	0 -#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR4_MASK	0x0F000000 -#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR4_SHIFT	24 -#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR5_MASK	0x000F0000 -#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR5_SHIFT	16 -#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR6_MASK	0x00000F00 -#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR6_SHIFT	8 -#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR7_MASK	0x0000000F -#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR7_SHIFT	0 -#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR8_MASK	0x0F000000 -#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR8_SHIFT	24 -#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR9_MASK	0x000F0000 -#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR9_SHIFT	16 -#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR10_MASK	0x00000F00 -#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR10_SHIFT	8 -#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR11_MASK	0x0000000F -#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR11_SHIFT	0 -#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR12_MASK	0x0F000000 -#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR12_SHIFT	24 -#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR13_MASK	0x000F0000 -#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR13_SHIFT	16 -#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR14_MASK	0x00000F00 -#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR14_SHIFT	8 -#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR15_MASK	0x0000000F -#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR15_SHIFT	0 - -/* - * NAND Control Register (NANDCR) - */ -#define IFC_NAND_NCR_FTOCNT_MASK	0x1E000000 -#define IFC_NAND_NCR_FTOCNT_SHIFT	25 -#define IFC_NAND_NCR_FTOCNT(n)	((_ilog2(n) - 8)  << IFC_NAND_NCR_FTOCNT_SHIFT) - -/* - * NAND_AUTOBOOT_TRGR - */ -/* Trigger RCW load */ -#define IFC_NAND_AUTOBOOT_TRGR_RCW_LD	0x80000000 -/* Trigget Auto Boot */ -#define IFC_NAND_AUTOBOOT_TRGR_BOOT_LD	0x20000000 - -/* - * NAND_MDR - */ -/* 1st read data byte when opcode SBRD */ -#define IFC_NAND_MDR_RDATA0		0xFF000000 -/* 2nd read data byte when opcode SBRD */ -#define IFC_NAND_MDR_RDATA1		0x00FF0000 - -/* - * NOR Machine Specific Registers - */ -/* - * NOR Event and Error Status Register (NOR_EVTER_STAT) - */ -/* NOR Command Sequence Operation Complete */ -#define IFC_NOR_EVTER_STAT_OPC_NOR	0x80000000 -/* Write Protect Error */ -#define IFC_NOR_EVTER_STAT_WPER		0x04000000 -/* Command Sequence Timeout Error */ -#define IFC_NOR_EVTER_STAT_STOER	0x01000000 - -/* - * NOR Event and Error Enable Register (NOR_EVTER_EN) - */ -/* NOR Command Seq complete event enable */ -#define IFC_NOR_EVTER_EN_OPCEN_NOR	0x80000000 -/* Write Protect Error Checking Enable */ -#define IFC_NOR_EVTER_EN_WPEREN		0x04000000 -/* Timeout Error Enable */ -#define IFC_NOR_EVTER_EN_STOEREN	0x01000000 - -/* - * NOR Event and Error Interrupt Enable Register (NOR_EVTER_INTR_EN) - */ -/* Enable interrupt for OPC complete */ -#define IFC_NOR_EVTER_INTR_OPCEN_NOR	0x80000000 -/* Enable interrupt for write protect error */ -#define IFC_NOR_EVTER_INTR_WPEREN	0x04000000 -/* Enable interrupt for timeout error */ -#define IFC_NOR_EVTER_INTR_STOEREN	0x01000000 - -/* - * NOR Transfer Error Attribute Register-0 (NOR_ERATTR0) - */ -/* Source ID for error transaction */ -#define IFC_NOR_ERATTR0_ERSRCID		0xFF000000 -/* AXI ID for error transation */ -#define IFC_NOR_ERATTR0_ERAID		0x000FF000 -/* Chip select corresponds to NOR error */ -#define IFC_NOR_ERATTR0_ERCS_CS0	0x00000000 -#define IFC_NOR_ERATTR0_ERCS_CS1	0x00000010 -#define IFC_NOR_ERATTR0_ERCS_CS2	0x00000020 -#define IFC_NOR_ERATTR0_ERCS_CS3	0x00000030 -/* Type of transaction read/write */ -#define IFC_NOR_ERATTR0_ERTYPE_READ	0x00000001 - -/* - * NOR Transfer Error Attribute Register-2 (NOR_ERATTR2) - */ -#define IFC_NOR_ERATTR2_ER_NUM_PHASE_EXP	0x000F0000 -#define IFC_NOR_ERATTR2_ER_NUM_PHASE_PER	0x00000F00 - -/* - * NOR Control Register (NORCR) - */ -#define IFC_NORCR_MASK			0x0F0F0000 -/* No. of Address/Data Phase */ -#define IFC_NORCR_NUM_PHASE_MASK	0x0F000000 -#define IFC_NORCR_NUM_PHASE_SHIFT	24 -#define IFC_NORCR_NUM_PHASE(n)	((n-1) << IFC_NORCR_NUM_PHASE_SHIFT) -/* Sequence Timeout Count */ -#define IFC_NORCR_STOCNT_MASK		0x000F0000 -#define IFC_NORCR_STOCNT_SHIFT		16 -#define IFC_NORCR_STOCNT(n)	((__ilog2(n) - 8) << IFC_NORCR_STOCNT_SHIFT) - -/* - * GPCM Machine specific registers - */ -/* - * GPCM Event and Error Status Register (GPCM_EVTER_STAT) - */ -/* Timeout error */ -#define IFC_GPCM_EVTER_STAT_TOER	0x04000000 -/* Parity error */ -#define IFC_GPCM_EVTER_STAT_PER		0x01000000 - -/* - * GPCM Event and Error Enable Register (GPCM_EVTER_EN) - */ -/* Timeout error enable */ -#define IFC_GPCM_EVTER_EN_TOER_EN	0x04000000 -/* Parity error enable */ -#define IFC_GPCM_EVTER_EN_PER_EN	0x01000000 - -/* - * GPCM Event and Error Interrupt Enable Register (GPCM_EVTER_INTR_EN) - */ -/* Enable Interrupt for timeout error */ -#define IFC_GPCM_EEIER_TOERIR_EN	0x04000000 -/* Enable Interrupt for Parity error */ -#define IFC_GPCM_EEIER_PERIR_EN		0x01000000 - -/* - * GPCM Transfer Error Attribute Register-0 (GPCM_ERATTR0) - */ -/* Source ID for error transaction */ -#define IFC_GPCM_ERATTR0_ERSRCID	0xFF000000 -/* AXI ID for error transaction */ -#define IFC_GPCM_ERATTR0_ERAID		0x000FF000 -/* Chip select corresponds to GPCM error */ -#define IFC_GPCM_ERATTR0_ERCS_CS0	0x00000000 -#define IFC_GPCM_ERATTR0_ERCS_CS1	0x00000040 -#define IFC_GPCM_ERATTR0_ERCS_CS2	0x00000080 -#define IFC_GPCM_ERATTR0_ERCS_CS3	0x000000C0 -/* Type of transaction read/Write */ -#define IFC_GPCM_ERATTR0_ERTYPE_READ	0x00000001 - -/* - * GPCM Transfer Error Attribute Register-2 (GPCM_ERATTR2) - */ -/* On which beat of address/data parity error is observed */ -#define IFC_GPCM_ERATTR2_PERR_BEAT		0x00000C00 -/* Parity Error on byte */ -#define IFC_GPCM_ERATTR2_PERR_BYTE		0x000000F0 -/* Parity Error reported in addr or data phase */ -#define IFC_GPCM_ERATTR2_PERR_DATA_PHASE	0x00000001 - -/* - * GPCM Status Register (GPCM_STAT) - */ -#define IFC_GPCM_STAT_BSY		0x80000000  /* GPCM is busy */ - -/* - * IFC Controller NAND Machine registers - */ -struct fsl_ifc_nand { -	__be32 ncfgr; -	u32 res1[0x4]; -	__be32 nand_fcr0; -	__be32 nand_fcr1; -	u32 res2[0x8]; -	__be32 row0; -	u32 res3; -	__be32 col0; -	u32 res4; -	__be32 row1; -	u32 res5; -	__be32 col1; -	u32 res6; -	__be32 row2; -	u32 res7; -	__be32 col2; -	u32 res8; -	__be32 row3; -	u32 res9; -	__be32 col3; -	u32 res10[0x24]; -	__be32 nand_fbcr; -	u32 res11; -	__be32 nand_fir0; -	__be32 nand_fir1; -	__be32 nand_fir2; -	u32 res12[0x10]; -	__be32 nand_csel; -	u32 res13; -	__be32 nandseq_strt; -	u32 res14; -	__be32 nand_evter_stat; -	u32 res15; -	__be32 pgrdcmpl_evt_stat; -	u32 res16[0x2]; -	__be32 nand_evter_en; -	u32 res17[0x2]; -	__be32 nand_evter_intr_en; -	u32 res18[0x2]; -	__be32 nand_erattr0; -	__be32 nand_erattr1; -	u32 res19[0x10]; -	__be32 nand_fsr; -	u32 res20; -	__be32 nand_eccstat[4]; -	u32 res21[0x20]; -	__be32 nanndcr; -	u32 res22[0x2]; -	__be32 nand_autoboot_trgr; -	u32 res23; -	__be32 nand_mdr; -	u32 res24[0x5C]; -}; - -/* - * IFC controller NOR Machine registers - */ -struct fsl_ifc_nor { -	__be32 nor_evter_stat; -	u32 res1[0x2]; -	__be32 nor_evter_en; -	u32 res2[0x2]; -	__be32 nor_evter_intr_en; -	u32 res3[0x2]; -	__be32 nor_erattr0; -	__be32 nor_erattr1; -	__be32 nor_erattr2; -	u32 res4[0x4]; -	__be32 norcr; -	u32 res5[0xEF]; -}; - -/* - * IFC controller GPCM Machine registers - */ -struct fsl_ifc_gpcm { -	__be32 gpcm_evter_stat; -	u32 res1[0x2]; -	__be32 gpcm_evter_en; -	u32 res2[0x2]; -	__be32 gpcm_evter_intr_en; -	u32 res3[0x2]; -	__be32 gpcm_erattr0; -	__be32 gpcm_erattr1; -	__be32 gpcm_erattr2; -	__be32 gpcm_stat; -	u32 res4[0x1F3]; -}; - -/* - * IFC Controller Registers - */ -struct fsl_ifc_regs { -	__be32 ifc_rev; -	u32 res1[0x2]; -	struct { -		__be32 cspr_ext; -		__be32 cspr; -		u32 res2; -	} cspr_cs[FSL_IFC_BANK_COUNT]; -	u32 res3[0x19]; -	struct { -		__be32 amask; -		u32 res4[0x2]; -	} amask_cs[FSL_IFC_BANK_COUNT]; -	u32 res5[0x17]; -	struct { -		__be32 csor_ext; -		__be32 csor; -		u32 res6; -	} csor_cs[FSL_IFC_BANK_COUNT]; -	u32 res7[0x19]; -	struct { -		__be32 ftim[4]; -		u32 res8[0x8]; -	} ftim_cs[FSL_IFC_BANK_COUNT]; -	u32 res9[0x60]; -	__be32 rb_stat; -	u32 res10[0x2]; -	__be32 ifc_gcr; -	u32 res11[0x2]; -	__be32 cm_evter_stat; -	u32 res12[0x2]; -	__be32 cm_evter_en; -	u32 res13[0x2]; -	__be32 cm_evter_intr_en; -	u32 res14[0x2]; -	__be32 cm_erattr0; -	__be32 cm_erattr1; -	u32 res15[0x2]; -	__be32 ifc_ccr; -	__be32 ifc_csr; -	u32 res16[0x2EB]; -	struct fsl_ifc_nand ifc_nand; -	struct fsl_ifc_nor ifc_nor; -	struct fsl_ifc_gpcm ifc_gpcm; -}; - -extern unsigned int convert_ifc_address(phys_addr_t addr_base); -extern int fsl_ifc_find(phys_addr_t addr_base); - -/* overview of the fsl ifc controller */ - -struct fsl_ifc_ctrl { -	/* device info */ -	struct device			*dev; -	struct fsl_ifc_regs __iomem	*regs; -	int				irq; -	int				nand_irq; -	spinlock_t			lock; -	void				*nand; - -	u32 nand_stat; -	wait_queue_head_t nand_wait; -}; - -extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; - - -#endif /* __ASM_FSL_IFC_H */ diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h index 420b45368fc..067fb0dca54 100644 --- a/arch/powerpc/include/asm/fsl_lbc.h +++ b/arch/powerpc/include/asm/fsl_lbc.h @@ -285,7 +285,7 @@ struct fsl_lbc_ctrl {  	/* device info */  	struct device			*dev;  	struct fsl_lbc_regs __iomem	*regs; -	int				irq; +	int				irq[2];  	wait_queue_head_t		irq_wait;  	spinlock_t			lock;  	void				*nand; diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index 169d039ed40..e3661872fbe 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h @@ -61,6 +61,7 @@ struct dyn_arch_ftrace {  #endif  #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) && !defined(__ASSEMBLY__) +#if !defined(_CALL_ELF) || _CALL_ELF != 2  #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME  static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)  { @@ -72,6 +73,7 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name  	 */  	return !strcmp(sym + 4, name + 3);  } +#endif  #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 && !__ASSEMBLY__ */  #endif /* _ASM_POWERPC_FTRACE */ diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h index 3bdcfce2c42..418fb654370 100644 --- a/arch/powerpc/include/asm/hardirq.h +++ b/arch/powerpc/include/asm/hardirq.h @@ -6,7 +6,8 @@  typedef struct {  	unsigned int __softirq_pending; -	unsigned int timer_irqs; +	unsigned int timer_irqs_event; +	unsigned int timer_irqs_others;  	unsigned int pmu_irqs;  	unsigned int mce_exceptions;  	unsigned int spurious_irqs; diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index d750336b171..623f2971ce0 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -127,7 +127,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,  					    unsigned long addr, pte_t *ptep)  {  #ifdef CONFIG_PPC64 -	return __pte(pte_update(mm, addr, ptep, ~0UL, 1)); +	return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));  #else  	return __pte(pte_update(ptep, ~0UL, 0));  #endif diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 0c7f2bfcf13..5dbbb29f5c3 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -274,6 +274,11 @@  /* Platform specific hcalls, used by KVM */  #define H_RTAS			0xf000 +/* "Platform specific hcalls", provided by PHYP */ +#define H_GET_24X7_CATALOG_PAGE	0xF078 +#define H_GET_24X7_DATA		0xF07C +#define H_GET_PERF_COUNTER_INFO	0xF080 +  #ifndef __ASSEMBLY__  /** @@ -403,6 +408,8 @@ static inline unsigned long cmo_get_page_size(void)  extern long pSeries_enable_reloc_on_exc(void);  extern long pSeries_disable_reloc_on_exc(void); +extern long pseries_big_endian_exceptions(void); +  #else  #define pSeries_enable_reloc_on_exc()  do {} while (0) diff --git a/arch/powerpc/include/asm/hvsi.h b/arch/powerpc/include/asm/hvsi.h index d3f64f36181..d4a5315718c 100644 --- a/arch/powerpc/include/asm/hvsi.h +++ b/arch/powerpc/include/asm/hvsi.h @@ -25,7 +25,7 @@  struct hvsi_header {  	uint8_t  type;  	uint8_t  len; -	uint16_t seqno; +	__be16 seqno;  } __attribute__((packed));  struct hvsi_data { @@ -35,24 +35,24 @@ struct hvsi_data {  struct hvsi_control {  	struct hvsi_header hdr; -	uint16_t verb; +	__be16 verb;  	/* optional depending on verb: */ -	uint32_t word; -	uint32_t mask; +	__be32 word; +	__be32 mask;  } __attribute__((packed));  struct hvsi_query {  	struct hvsi_header hdr; -	uint16_t verb; +	__be16 verb;  } __attribute__((packed));  struct hvsi_query_response {  	struct hvsi_header hdr; -	uint16_t verb; -	uint16_t query_seqno; +	__be16 verb; +	__be16 query_seqno;  	union {  		uint8_t  version; -		uint32_t mctrl_word; +		__be32 mctrl_word;  	} u;  } __attribute__((packed)); diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index eb0f4ac75c4..ac6432d9be4 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -79,7 +79,7 @@ static inline void hw_breakpoint_disable(void)  	brk.address = 0;  	brk.type = 0;  	brk.len = 0; -	set_breakpoint(&brk); +	__set_breakpoint(&brk);  }  extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs); diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 5a64757dc0d..97d3869991c 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -21,7 +21,7 @@ extern struct pci_dev *isa_bridge_pcidev;  /*   * has legacy ISA devices ?   */ -#define arch_has_dev_port()	(isa_bridge_pcidev != NULL) +#define arch_has_dev_port()	(isa_bridge_pcidev != NULL || isa_io_special)  #endif  #include <linux/device.h> @@ -113,7 +113,7 @@ extern bool isa_io_special;  /* gcc 4.0 and older doesn't have 'Z' constraint */  #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ == 0) -#define DEF_MMIO_IN_LE(name, size, insn)				\ +#define DEF_MMIO_IN_X(name, size, insn)				\  static inline u##size name(const volatile u##size __iomem *addr)	\  {									\  	u##size ret;							\ @@ -122,7 +122,7 @@ static inline u##size name(const volatile u##size __iomem *addr)	\  	return ret;							\  } -#define DEF_MMIO_OUT_LE(name, size, insn) 				\ +#define DEF_MMIO_OUT_X(name, size, insn)				\  static inline void name(volatile u##size __iomem *addr, u##size val)	\  {									\  	__asm__ __volatile__("sync;"#insn" %1,0,%2"			\ @@ -130,7 +130,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val)	\  	IO_SET_SYNC_FLAG();						\  }  #else /* newer gcc */ -#define DEF_MMIO_IN_LE(name, size, insn)				\ +#define DEF_MMIO_IN_X(name, size, insn)				\  static inline u##size name(const volatile u##size __iomem *addr)	\  {									\  	u##size ret;							\ @@ -139,7 +139,7 @@ static inline u##size name(const volatile u##size __iomem *addr)	\  	return ret;							\  } -#define DEF_MMIO_OUT_LE(name, size, insn) 				\ +#define DEF_MMIO_OUT_X(name, size, insn)				\  static inline void name(volatile u##size __iomem *addr, u##size val)	\  {									\  	__asm__ __volatile__("sync;"#insn" %1,%y0"			\ @@ -148,7 +148,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val)	\  }  #endif -#define DEF_MMIO_IN_BE(name, size, insn)				\ +#define DEF_MMIO_IN_D(name, size, insn)				\  static inline u##size name(const volatile u##size __iomem *addr)	\  {									\  	u##size ret;							\ @@ -157,7 +157,7 @@ static inline u##size name(const volatile u##size __iomem *addr)	\  	return ret;							\  } -#define DEF_MMIO_OUT_BE(name, size, insn)				\ +#define DEF_MMIO_OUT_D(name, size, insn)				\  static inline void name(volatile u##size __iomem *addr, u##size val)	\  {									\  	__asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0"			\ @@ -165,22 +165,53 @@ static inline void name(volatile u##size __iomem *addr, u##size val)	\  	IO_SET_SYNC_FLAG();						\  } +DEF_MMIO_IN_D(in_8,     8, lbz); +DEF_MMIO_OUT_D(out_8,   8, stb); -DEF_MMIO_IN_BE(in_8,     8, lbz); -DEF_MMIO_IN_BE(in_be16, 16, lhz); -DEF_MMIO_IN_BE(in_be32, 32, lwz); -DEF_MMIO_IN_LE(in_le16, 16, lhbrx); -DEF_MMIO_IN_LE(in_le32, 32, lwbrx); +#ifdef __BIG_ENDIAN__ +DEF_MMIO_IN_D(in_be16, 16, lhz); +DEF_MMIO_IN_D(in_be32, 32, lwz); +DEF_MMIO_IN_X(in_le16, 16, lhbrx); +DEF_MMIO_IN_X(in_le32, 32, lwbrx); -DEF_MMIO_OUT_BE(out_8,     8, stb); -DEF_MMIO_OUT_BE(out_be16, 16, sth); -DEF_MMIO_OUT_BE(out_be32, 32, stw); -DEF_MMIO_OUT_LE(out_le16, 16, sthbrx); -DEF_MMIO_OUT_LE(out_le32, 32, stwbrx); +DEF_MMIO_OUT_D(out_be16, 16, sth); +DEF_MMIO_OUT_D(out_be32, 32, stw); +DEF_MMIO_OUT_X(out_le16, 16, sthbrx); +DEF_MMIO_OUT_X(out_le32, 32, stwbrx); +#else +DEF_MMIO_IN_X(in_be16, 16, lhbrx); +DEF_MMIO_IN_X(in_be32, 32, lwbrx); +DEF_MMIO_IN_D(in_le16, 16, lhz); +DEF_MMIO_IN_D(in_le32, 32, lwz); + +DEF_MMIO_OUT_X(out_be16, 16, sthbrx); +DEF_MMIO_OUT_X(out_be32, 32, stwbrx); +DEF_MMIO_OUT_D(out_le16, 16, sth); +DEF_MMIO_OUT_D(out_le32, 32, stw); + +#endif /* __BIG_ENDIAN */ + +/* + * Cache inhibitied accessors for use in real mode, you don't want to use these + * unless you know what you're doing. + * + * NB. These use the cpu byte ordering. + */ +DEF_MMIO_OUT_X(out_rm8,   8, stbcix); +DEF_MMIO_OUT_X(out_rm16, 16, sthcix); +DEF_MMIO_OUT_X(out_rm32, 32, stwcix); +DEF_MMIO_IN_X(in_rm8,   8, lbzcix); +DEF_MMIO_IN_X(in_rm16, 16, lhzcix); +DEF_MMIO_IN_X(in_rm32, 32, lwzcix);  #ifdef __powerpc64__ -DEF_MMIO_OUT_BE(out_be64, 64, std); -DEF_MMIO_IN_BE(in_be64, 64, ld); + +DEF_MMIO_OUT_X(out_rm64, 64, stdcix); +DEF_MMIO_IN_X(in_rm64, 64, ldcix); + +#ifdef __BIG_ENDIAN__ +DEF_MMIO_OUT_D(out_be64, 64, std); +DEF_MMIO_IN_D(in_be64, 64, ld);  /* There is no asm instructions for 64 bits reverse loads and stores */  static inline u64 in_le64(const volatile u64 __iomem *addr) @@ -192,6 +223,22 @@ static inline void out_le64(volatile u64 __iomem *addr, u64 val)  {  	out_be64(addr, swab64(val));  } +#else +DEF_MMIO_OUT_D(out_le64, 64, std); +DEF_MMIO_IN_D(in_le64, 64, ld); + +/* There is no asm instructions for 64 bits reverse loads and stores */ +static inline u64 in_be64(const volatile u64 __iomem *addr) +{ +	return swab64(in_le64(addr)); +} + +static inline void out_be64(volatile u64 __iomem *addr, u64 val) +{ +	out_le64(addr, swab64(val)); +} + +#endif  #endif /* __powerpc64__ */  /* diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index c34656a8925..42632c7a2a4 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -30,22 +30,19 @@  #include <asm/machdep.h>  #include <asm/types.h> -#define IOMMU_PAGE_SHIFT      12 -#define IOMMU_PAGE_SIZE       (ASM_CONST(1) << IOMMU_PAGE_SHIFT) -#define IOMMU_PAGE_MASK       (~((1 << IOMMU_PAGE_SHIFT) - 1)) -#define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE) +#define IOMMU_PAGE_SHIFT_4K      12 +#define IOMMU_PAGE_SIZE_4K       (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K) +#define IOMMU_PAGE_MASK_4K       (~((1 << IOMMU_PAGE_SHIFT_4K) - 1)) +#define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K) + +#define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift) +#define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1)) +#define IOMMU_PAGE_ALIGN(addr, tblptr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE(tblptr))  /* Boot time flags */  extern int iommu_is_off;  extern int iommu_force_on; -/* Pure 2^n version of get_order */ -static __inline__ __attribute_const__ int get_iommu_order(unsigned long size) -{ -	return __ilog2((size - 1) >> IOMMU_PAGE_SHIFT) + 1; -} - -  /*   * IOMAP_MAX_ORDER defines the largest contiguous block   * of dma space we can get.  IOMAP_MAX_ORDER = 13 @@ -76,11 +73,21 @@ struct iommu_table {  	struct iommu_pool large_pool;  	struct iommu_pool pools[IOMMU_NR_POOLS];  	unsigned long *it_map;       /* A simple allocation bitmap for now */ +	unsigned long  it_page_shift;/* table iommu page size */  #ifdef CONFIG_IOMMU_API  	struct iommu_group *it_group;  #endif +	void (*set_bypass)(struct iommu_table *tbl, bool enable);  }; +/* Pure 2^n version of get_order */ +static inline __attribute_const__ +int get_iommu_order(unsigned long size, struct iommu_table *tbl) +{ +	return __ilog2((size - 1) >> tbl->it_page_shift) + 1; +} + +  struct scatterlist;  static inline void set_iommu_table_base(struct device *dev, void *base) @@ -101,8 +108,34 @@ extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);   */  extern struct iommu_table *iommu_init_table(struct iommu_table * tbl,  					    int nid); +#ifdef CONFIG_IOMMU_API  extern void iommu_register_group(struct iommu_table *tbl,  				 int pci_domain_number, unsigned long pe_num); +extern int iommu_add_device(struct device *dev); +extern void iommu_del_device(struct device *dev); +#else +static inline void iommu_register_group(struct iommu_table *tbl, +					int pci_domain_number, +					unsigned long pe_num) +{ +} + +static inline int iommu_add_device(struct device *dev) +{ +	return 0; +} + +static inline void iommu_del_device(struct device *dev) +{ +} +#endif /* !CONFIG_IOMMU_API */ + +static inline void set_iommu_table_base_and_group(struct device *dev, +						  void *base) +{ +	set_iommu_table_base(dev, base); +	iommu_add_device(dev); +}  extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl,  			struct scatterlist *sglist, int nelems, diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h index f51a5580bfd..e20eb95429a 100644 --- a/arch/powerpc/include/asm/irqflags.h +++ b/arch/powerpc/include/asm/irqflags.h @@ -20,9 +20,9 @@   */  #define TRACE_WITH_FRAME_BUFFER(func)		\  	mflr	r0;				\ -	stdu	r1, -32(r1);			\ +	stdu	r1, -STACK_FRAME_OVERHEAD(r1);	\  	std	r0, 16(r1);			\ -	stdu	r1, -32(r1);			\ +	stdu	r1, -STACK_FRAME_OVERHEAD(r1);	\  	bl func;				\  	ld	r1, 0(r1);			\  	ld	r1, 0(r1); @@ -36,8 +36,8 @@   * have to call a C function so call a wrapper that saves all the   * C-clobbered registers.   */ -#define TRACE_ENABLE_INTS	TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on) -#define TRACE_DISABLE_INTS	TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off) +#define TRACE_ENABLE_INTS	TRACE_WITH_FRAME_BUFFER(trace_hardirqs_on) +#define TRACE_DISABLE_INTS	TRACE_WITH_FRAME_BUFFER(trace_hardirqs_off)  /*   * This is used by assembly code to soft-disable interrupts first and diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h index ae098c438f0..f016bb699b5 100644 --- a/arch/powerpc/include/asm/jump_label.h +++ b/arch/powerpc/include/asm/jump_label.h @@ -19,7 +19,7 @@  static __always_inline bool arch_static_branch(struct static_key *key)  { -	asm goto("1:\n\t" +	asm_volatile_goto("1:\n\t"  		 "nop\n\t"  		 ".pushsection __jump_table,  \"aw\"\n\t"  		 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h index 7b6feab6fd2..af15d4d8d60 100644 --- a/arch/powerpc/include/asm/kprobes.h +++ b/arch/powerpc/include/asm/kprobes.h @@ -30,6 +30,7 @@  #include <linux/ptrace.h>  #include <linux/percpu.h>  #include <asm/probes.h> +#include <asm/code-patching.h>  #define  __ARCH_WANT_KPROBES_INSN_SLOT @@ -56,9 +57,9 @@ typedef ppc_opcode_t kprobe_opcode_t;  		if ((colon = strchr(name, ':')) != NULL) {		\  			colon++;					\  			if (*colon != '\0' && *colon != '.')		\ -				addr = *(kprobe_opcode_t **)addr;	\ +				addr = (kprobe_opcode_t *)ppc_function_entry(addr); \  		} else if (name[0] != '.')				\ -			addr = *(kprobe_opcode_t **)addr;		\ +			addr = (kprobe_opcode_t *)ppc_function_entry(addr); \  	} else {							\  		char dot_name[KSYM_NAME_LEN];				\  		dot_name[0] = '.';					\ diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index 851bac7afa4..9601741080e 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -74,6 +74,7 @@  #define BOOKE_INTERRUPT_GUEST_DBELL_CRIT 39  #define BOOKE_INTERRUPT_HV_SYSCALL 40  #define BOOKE_INTERRUPT_HV_PRIV 41 +#define BOOKE_INTERRUPT_LRAT_ERROR 42  /* book3s */ @@ -91,14 +92,18 @@  #define BOOK3S_INTERRUPT_FP_UNAVAIL	0x800  #define BOOK3S_INTERRUPT_DECREMENTER	0x900  #define BOOK3S_INTERRUPT_HV_DECREMENTER	0x980 +#define BOOK3S_INTERRUPT_DOORBELL	0xa00  #define BOOK3S_INTERRUPT_SYSCALL	0xc00  #define BOOK3S_INTERRUPT_TRACE		0xd00  #define BOOK3S_INTERRUPT_H_DATA_STORAGE	0xe00  #define BOOK3S_INTERRUPT_H_INST_STORAGE	0xe20  #define BOOK3S_INTERRUPT_H_EMUL_ASSIST	0xe40 +#define BOOK3S_INTERRUPT_H_DOORBELL	0xe80  #define BOOK3S_INTERRUPT_PERFMON	0xf00  #define BOOK3S_INTERRUPT_ALTIVEC	0xf20  #define BOOK3S_INTERRUPT_VSX		0xf40 +#define BOOK3S_INTERRUPT_FAC_UNAVAIL	0xf60 +#define BOOK3S_INTERRUPT_H_FAC_UNAVAIL	0xf80  #define BOOK3S_IRQPRIO_SYSTEM_RESET		0  #define BOOK3S_IRQPRIO_DATA_SEGMENT		1 @@ -110,19 +115,22 @@  #define BOOK3S_IRQPRIO_FP_UNAVAIL		7  #define BOOK3S_IRQPRIO_ALTIVEC			8  #define BOOK3S_IRQPRIO_VSX			9 -#define BOOK3S_IRQPRIO_SYSCALL			10 -#define BOOK3S_IRQPRIO_MACHINE_CHECK		11 -#define BOOK3S_IRQPRIO_DEBUG			12 -#define BOOK3S_IRQPRIO_EXTERNAL			13 -#define BOOK3S_IRQPRIO_DECREMENTER		14 -#define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR	15 -#define BOOK3S_IRQPRIO_EXTERNAL_LEVEL		16 -#define BOOK3S_IRQPRIO_MAX			17 +#define BOOK3S_IRQPRIO_FAC_UNAVAIL		10 +#define BOOK3S_IRQPRIO_SYSCALL			11 +#define BOOK3S_IRQPRIO_MACHINE_CHECK		12 +#define BOOK3S_IRQPRIO_DEBUG			13 +#define BOOK3S_IRQPRIO_EXTERNAL			14 +#define BOOK3S_IRQPRIO_DECREMENTER		15 +#define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR	16 +#define BOOK3S_IRQPRIO_EXTERNAL_LEVEL		17 +#define BOOK3S_IRQPRIO_MAX			18  #define BOOK3S_HFLAG_DCBZ32			0x1  #define BOOK3S_HFLAG_SLB			0x2  #define BOOK3S_HFLAG_PAIRED_SINGLE		0x4  #define BOOK3S_HFLAG_NATIVE_PS			0x8 +#define BOOK3S_HFLAG_MULTI_PGSIZE		0x10 +#define BOOK3S_HFLAG_NEW_TLBIE			0x20  #define RESUME_FLAG_NV          (1<<0)  /* Reload guest nonvolatile state? */  #define RESUME_FLAG_HOST        (1<<1)  /* Resume host? */ @@ -136,6 +144,8 @@  #define KVM_GUEST_MODE_NONE	0  #define KVM_GUEST_MODE_GUEST	1  #define KVM_GUEST_MODE_SKIP	2 +#define KVM_GUEST_MODE_GUEST_HV	3 +#define KVM_GUEST_MODE_HOST_HV	4  #define KVM_INST_FETCH_FAILED	-1 diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index fa19e2f1a87..f52f6569452 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -58,16 +58,18 @@ struct hpte_cache {  	struct hlist_node list_pte_long;  	struct hlist_node list_vpte;  	struct hlist_node list_vpte_long; +#ifdef CONFIG_PPC_BOOK3S_64 +	struct hlist_node list_vpte_64k; +#endif  	struct rcu_head rcu_head;  	u64 host_vpn;  	u64 pfn;  	ulong slot;  	struct kvmppc_pte pte; +	int pagesize;  };  struct kvmppc_vcpu_book3s { -	struct kvm_vcpu vcpu; -	struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;  	struct kvmppc_sid_map sid_map[SID_MAP_NUM];  	struct {  		u64 esid; @@ -99,6 +101,9 @@ struct kvmppc_vcpu_book3s {  	struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];  	struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];  	struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; +#ifdef CONFIG_PPC_BOOK3S_64 +	struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K]; +#endif  	int hpte_cache_count;  	spinlock_t mmu_lock;  }; @@ -107,8 +112,9 @@ struct kvmppc_vcpu_book3s {  #define CONTEXT_GUEST		1  #define CONTEXT_GUEST_END	2 -#define VSID_REAL	0x0fffffffffc00000ULL -#define VSID_BAT	0x0fffffffffb00000ULL +#define VSID_REAL	0x07ffffffffc00000ULL +#define VSID_BAT	0x07ffffffffb00000ULL +#define VSID_64K	0x0800000000000000ULL  #define VSID_1T		0x1000000000000000ULL  #define VSID_REAL_DR	0x2000000000000000ULL  #define VSID_REAL_IR	0x4000000000000000ULL @@ -118,11 +124,12 @@ extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask)  extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);  extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);  extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); -extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr);  extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);  extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);  extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); -extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); +extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte, +			       bool iswrite); +extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);  extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);  extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);  extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); @@ -134,6 +141,7 @@ extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,  extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);  extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu); +extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte);  extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);  extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);  extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte); @@ -151,7 +159,8 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,  			   bool upper, u32 val);  extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);  extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); -extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); +extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, +			bool *writable);  extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,  			unsigned long *rmap, long pte_index, int realmode);  extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep, @@ -172,23 +181,24 @@ extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,  			unsigned long *hpret);  extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,  			struct kvm_memory_slot *memslot, unsigned long *map); +extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, +			unsigned long mask);  extern void kvmppc_entry_trampoline(void);  extern void kvmppc_hv_entry_trampoline(void); -extern void kvmppc_load_up_fpu(void); -extern void kvmppc_load_up_altivec(void); -extern void kvmppc_load_up_vsx(void);  extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);  extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);  extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); +extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, +				 struct kvm_vcpu *vcpu); +extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, +				   struct kvmppc_book3s_shadow_vcpu *svcpu);  static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)  { -	return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu); +	return vcpu->arch.book3s;  } -extern void kvm_return_point(void); -  /* Also add subarch specific defines */  #ifdef CONFIG_KVM_BOOK3S_32_HANDLER @@ -198,203 +208,6 @@ extern void kvm_return_point(void);  #include <asm/kvm_book3s_64.h>  #endif -#ifdef CONFIG_KVM_BOOK3S_PR - -static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) -{ -	return to_book3s(vcpu)->hior; -} - -static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, -			unsigned long pending_now, unsigned long old_pending) -{ -	if (pending_now) -		vcpu->arch.shared->int_pending = 1; -	else if (old_pending) -		vcpu->arch.shared->int_pending = 0; -} - -static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) -{ -	if ( num < 14 ) { -		struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); -		svcpu->gpr[num] = val; -		svcpu_put(svcpu); -		to_book3s(vcpu)->shadow_vcpu->gpr[num] = val; -	} else -		vcpu->arch.gpr[num] = val; -} - -static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) -{ -	if ( num < 14 ) { -		struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); -		ulong r = svcpu->gpr[num]; -		svcpu_put(svcpu); -		return r; -	} else -		return vcpu->arch.gpr[num]; -} - -static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) -{ -	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); -	svcpu->cr = val; -	svcpu_put(svcpu); -	to_book3s(vcpu)->shadow_vcpu->cr = val; -} - -static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) -{ -	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); -	u32 r; -	r = svcpu->cr; -	svcpu_put(svcpu); -	return r; -} - -static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) -{ -	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); -	svcpu->xer = val; -	to_book3s(vcpu)->shadow_vcpu->xer = val; -	svcpu_put(svcpu); -} - -static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) -{ -	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); -	u32 r; -	r = svcpu->xer; -	svcpu_put(svcpu); -	return r; -} - -static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) -{ -	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); -	svcpu->ctr = val; -	svcpu_put(svcpu); -} - -static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) -{ -	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); -	ulong r; -	r = svcpu->ctr; -	svcpu_put(svcpu); -	return r; -} - -static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) -{ -	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); -	svcpu->lr = val; -	svcpu_put(svcpu); -} - -static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) -{ -	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); -	ulong r; -	r = svcpu->lr; -	svcpu_put(svcpu); -	return r; -} - -static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) -{ -	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); -	svcpu->pc = val; -	svcpu_put(svcpu); -} - -static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) -{ -	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); -	ulong r; -	r = svcpu->pc; -	svcpu_put(svcpu); -	return r; -} - -static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) -{ -	ulong pc = kvmppc_get_pc(vcpu); -	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); -	u32 r; - -	/* Load the instruction manually if it failed to do so in the -	 * exit path */ -	if (svcpu->last_inst == KVM_INST_FETCH_FAILED) -		kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false); - -	r = svcpu->last_inst; -	svcpu_put(svcpu); -	return r; -} - -/* - * Like kvmppc_get_last_inst(), but for fetching a sc instruction. - * Because the sc instruction sets SRR0 to point to the following - * instruction, we have to fetch from pc - 4. - */ -static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu) -{ -	ulong pc = kvmppc_get_pc(vcpu) - 4; -	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); -	u32 r; - -	/* Load the instruction manually if it failed to do so in the -	 * exit path */ -	if (svcpu->last_inst == KVM_INST_FETCH_FAILED) -		kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false); - -	r = svcpu->last_inst; -	svcpu_put(svcpu); -	return r; -} - -static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) -{ -	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); -	ulong r; -	r = svcpu->fault_dar; -	svcpu_put(svcpu); -	return r; -} - -static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) -{ -	ulong crit_raw = vcpu->arch.shared->critical; -	ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); -	bool crit; - -	/* Truncate crit indicators in 32 bit mode */ -	if (!(vcpu->arch.shared->msr & MSR_SF)) { -		crit_raw &= 0xffffffff; -		crit_r1 &= 0xffffffff; -	} - -	/* Critical section when crit == r1 */ -	crit = (crit_raw == crit_r1); -	/* ... and we're in supervisor mode */ -	crit = crit && !(vcpu->arch.shared->msr & MSR_PR); - -	return crit; -} -#else /* CONFIG_KVM_BOOK3S_PR */ - -static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) -{ -	return 0; -} - -static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, -			unsigned long pending_now, unsigned long old_pending) -{ -} -  static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)  {  	vcpu->arch.gpr[num] = val; @@ -455,16 +268,26 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)  	return vcpu->arch.pc;  } -static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) +static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu); +static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)  { -	ulong pc = kvmppc_get_pc(vcpu); +	return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE); +} +static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc) +{  	/* Load the instruction manually if it failed to do so in the  	 * exit path */  	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)  		kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false); -	return vcpu->arch.last_inst; +	return kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) : +		vcpu->arch.last_inst; +} + +static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) +{ +	return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu));  }  /* @@ -474,14 +297,7 @@ static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)   */  static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)  { -	ulong pc = kvmppc_get_pc(vcpu) - 4; - -	/* Load the instruction manually if it failed to do so in the -	 * exit path */ -	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) -		kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false); - -	return vcpu->arch.last_inst; +	return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu) - 4);  }  static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) @@ -489,11 +305,10 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)  	return vcpu->arch.fault_dar;  } -static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) +static inline bool is_kvmppc_resume_guest(int r)  { -	return false; +	return (r == RESUME_GUEST || r == RESUME_GUEST_NV);  } -#endif  /* Magic register values loaded into r3 and r4 before the 'sc' assembly   * instruction for the OSI hypercalls */ diff --git a/arch/powerpc/include/asm/kvm_book3s_32.h b/arch/powerpc/include/asm/kvm_book3s_32.h index ce0ef6ce8f8..c720e0b3238 100644 --- a/arch/powerpc/include/asm/kvm_book3s_32.h +++ b/arch/powerpc/include/asm/kvm_book3s_32.h @@ -22,7 +22,7 @@  static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)  { -	return to_book3s(vcpu)->shadow_vcpu; +	return vcpu->arch.shadow_vcpu;  }  static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu) diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 86d638a3b35..d645428a65a 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -20,7 +20,7 @@  #ifndef __ASM_KVM_BOOK3S_64_H__  #define __ASM_KVM_BOOK3S_64_H__ -#ifdef CONFIG_KVM_BOOK3S_PR +#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE  static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)  {  	preempt_disable(); @@ -35,7 +35,7 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)  #define SPAPR_TCE_SHIFT		12 -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE  #define KVM_DEFAULT_HPT_ORDER	24	/* 16MB HPT by default */  extern unsigned long kvm_rma_pages;  #endif @@ -77,49 +77,164 @@ static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits)  	return old == 0;  } +static inline int __hpte_actual_psize(unsigned int lp, int psize) +{ +	int i, shift; +	unsigned int mask; + +	/* start from 1 ignoring MMU_PAGE_4K */ +	for (i = 1; i < MMU_PAGE_COUNT; i++) { + +		/* invalid penc */ +		if (mmu_psize_defs[psize].penc[i] == -1) +			continue; +		/* +		 * encoding bits per actual page size +		 *        PTE LP     actual page size +		 *    rrrr rrrz		>=8KB +		 *    rrrr rrzz		>=16KB +		 *    rrrr rzzz		>=32KB +		 *    rrrr zzzz		>=64KB +		 * ....... +		 */ +		shift = mmu_psize_defs[i].shift - LP_SHIFT; +		if (shift > LP_BITS) +			shift = LP_BITS; +		mask = (1 << shift) - 1; +		if ((lp & mask) == mmu_psize_defs[psize].penc[i]) +			return i; +	} +	return -1; +} +  static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,  					     unsigned long pte_index)  { -	unsigned long rb, va_low; +	int b_psize, a_psize; +	unsigned int penc; +	unsigned long rb = 0, va_low, sllp; +	unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1); + +	if (!(v & HPTE_V_LARGE)) { +		/* both base and actual psize is 4k */ +		b_psize = MMU_PAGE_4K; +		a_psize = MMU_PAGE_4K; +	} else { +		for (b_psize = 0; b_psize < MMU_PAGE_COUNT; b_psize++) { + +			/* valid entries have a shift value */ +			if (!mmu_psize_defs[b_psize].shift) +				continue; +			a_psize = __hpte_actual_psize(lp, b_psize); +			if (a_psize != -1) +				break; +		} +	} +	/* +	 * Ignore the top 14 bits of va +	 * v have top two bits covering segment size, hence move +	 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits. +	 * AVA field in v also have the lower 23 bits ignored. +	 * For base page size 4K we need 14 .. 65 bits (so need to +	 * collect extra 11 bits) +	 * For others we need 14..14+i +	 */ +	/* This covers 14..54 bits of va*/  	rb = (v & ~0x7fUL) << 16;		/* AVA field */ +	/* +	 * AVA in v had cleared lower 23 bits. We need to derive +	 * that from pteg index +	 */  	va_low = pte_index >> 3;  	if (v & HPTE_V_SECONDARY)  		va_low = ~va_low; -	/* xor vsid from AVA */ +	/* +	 * get the vpn bits from va_low using reverse of hashing. +	 * In v we have va with 23 bits dropped and then left shifted +	 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need +	 * right shift it with (SID_SHIFT - (23 - 7)) +	 */  	if (!(v & HPTE_V_1TB_SEG)) -		va_low ^= v >> 12; +		va_low ^= v >> (SID_SHIFT - 16);  	else -		va_low ^= v >> 24; +		va_low ^= v >> (SID_SHIFT_1T - 16);  	va_low &= 0x7ff; -	if (v & HPTE_V_LARGE) { -		rb |= 1;			/* L field */ -		if (cpu_has_feature(CPU_FTR_ARCH_206) && -		    (r & 0xff000)) { -			/* non-16MB large page, must be 64k */ -			/* (masks depend on page size) */ -			rb |= 0x1000;		/* page encoding in LP field */ -			rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */ -			rb |= ((va_low << 4) & 0xf0);	/* AVAL field (P7 doesn't seem to care) */ -		} -	} else { -		/* 4kB page */ -		rb |= (va_low & 0x7ff) << 12;	/* remaining 11b of VA */ + +	switch (b_psize) { +	case MMU_PAGE_4K: +		sllp = ((mmu_psize_defs[a_psize].sllp & SLB_VSID_L) >> 6) | +			((mmu_psize_defs[a_psize].sllp & SLB_VSID_LP) >> 4); +		rb |= sllp << 5;	/*  AP field */ +		rb |= (va_low & 0x7ff) << 12;	/* remaining 11 bits of AVA */ +		break; +	default: +	{ +		int aval_shift; +		/* +		 * remaining 7bits of AVA/LP fields +		 * Also contain the rr bits of LP +		 */ +		rb |= (va_low & 0x7f) << 16; +		/* +		 * Now clear not needed LP bits based on actual psize +		 */ +		rb &= ~((1ul << mmu_psize_defs[a_psize].shift) - 1); +		/* +		 * AVAL field 58..77 - base_page_shift bits of va +		 * we have space for 58..64 bits, Missing bits should +		 * be zero filled. +1 is to take care of L bit shift +		 */ +		aval_shift = 64 - (77 - mmu_psize_defs[b_psize].shift) + 1; +		rb |= ((va_low << aval_shift) & 0xfe); + +		rb |= 1;		/* L field */ +		penc = mmu_psize_defs[b_psize].penc[a_psize]; +		rb |= penc << 12;	/* LP field */ +		break; +	}  	}  	rb |= (v >> 54) & 0x300;		/* B field */  	return rb;  } -static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) +static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l, +					     bool is_base_size)  { + +	int size, a_psize; +	/* Look at the 8 bit LP value */ +	unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1); +  	/* only handle 4k, 64k and 16M pages for now */  	if (!(h & HPTE_V_LARGE)) -		return 1ul << 12;		/* 4k page */ -	if ((l & 0xf000) == 0x1000 && cpu_has_feature(CPU_FTR_ARCH_206)) -		return 1ul << 16;		/* 64k page */ -	if ((l & 0xff000) == 0) -		return 1ul << 24;		/* 16M page */ -	return 0;				/* error */ +		return 1ul << 12; +	else { +		for (size = 0; size < MMU_PAGE_COUNT; size++) { +			/* valid entries have a shift value */ +			if (!mmu_psize_defs[size].shift) +				continue; + +			a_psize = __hpte_actual_psize(lp, size); +			if (a_psize != -1) { +				if (is_base_size) +					return 1ul << mmu_psize_defs[size].shift; +				return 1ul << mmu_psize_defs[a_psize].shift; +			} +		} + +	} +	return 0; +} + +static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) +{ +	return __hpte_page_size(h, l, 0); +} + +static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l) +{ +	return __hpte_page_size(h, l, 1);  }  static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize) @@ -278,7 +393,7 @@ static inline int is_vrma_hpte(unsigned long hpte_v)  		(HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));  } -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE  /*   * Note modification of an HPTE; set the HPTE modified bit   * if anyone is interested. @@ -289,6 +404,18 @@ static inline void note_hpte_modification(struct kvm *kvm,  	if (atomic_read(&kvm->arch.hpte_mod_interest))  		rev->guest_rpte |= HPTE_GR_MODIFIED;  } -#endif /* CONFIG_KVM_BOOK3S_64_HV */ + +/* + * Like kvm_memslots(), but for use in real mode when we can't do + * any RCU stuff (since the secondary threads are offline from the + * kernel's point of view), and we can't print anything. + * Thus we use rcu_dereference_raw() rather than rcu_dereference_check(). + */ +static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm) +{ +	return rcu_dereference_raw_notrace(kvm->memslots); +} + +#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */  #endif /* __ASM_KVM_BOOK3S_64_H__ */ diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 9039d3c97ee..5bdfb5dd340 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -79,20 +79,22 @@ struct kvmppc_host_state {  	ulong vmhandler;  	ulong scratch0;  	ulong scratch1; +	ulong scratch2;  	u8 in_guest;  	u8 restore_hid5;  	u8 napping; -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE  	u8 hwthread_req;  	u8 hwthread_state;  	u8 host_ipi; +	u8 ptid;  	struct kvm_vcpu *kvm_vcpu;  	struct kvmppc_vcore *kvm_vcore;  	unsigned long xics_phys;  	u32 saved_xirr;  	u64 dabr; -	u64 host_mmcr[3]; +	u64 host_mmcr[7];	/* MMCR 0,1,A, SIAR, SDAR, MMCR2, SIER */  	u32 host_pmc[8];  	u64 host_purr;  	u64 host_spurr; @@ -101,21 +103,24 @@ struct kvmppc_host_state {  #endif  #ifdef CONFIG_PPC_BOOK3S_64  	u64 cfar; +	u64 ppr; +	u64 host_fscr;  #endif  };  struct kvmppc_book3s_shadow_vcpu { +	bool in_use;  	ulong gpr[14];  	u32 cr;  	u32 xer; - -	u32 fault_dsisr; -	u32 last_inst;  	ulong ctr;  	ulong lr;  	ulong pc; +  	ulong shadow_srr1;  	ulong fault_dar; +	u32 fault_dsisr; +	u32 last_inst;  #ifdef CONFIG_PPC_BOOK3S_32  	u32     sr[16];			/* Guest SRs */ @@ -129,6 +134,7 @@ struct kvmppc_book3s_shadow_vcpu {  		u64     esid;  		u64     vsid;  	} slb[64];			/* guest SLB */ +	u64 shadow_fscr;  #endif  }; diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h index d3c1eb34c98..c7aed6105ff 100644 --- a/arch/powerpc/include/asm/kvm_booke.h +++ b/arch/powerpc/include/asm/kvm_booke.h @@ -26,7 +26,12 @@  /* LPIDs we support with this build -- runtime limit may be lower */  #define KVMPPC_NR_LPIDS                        64 -#define KVMPPC_INST_EHPRIV	0x7c00021c +#define KVMPPC_INST_EHPRIV		0x7c00021c +#define EHPRIV_OC_SHIFT			11 +/* "ehpriv 1" : ehpriv with OC = 1 is used for debug emulation */ +#define EHPRIV_OC_DEBUG			1 +#define KVMPPC_INST_EHPRIV_DEBUG	(KVMPPC_INST_EHPRIV | \ +					 (EHPRIV_OC_DEBUG << EHPRIV_OC_SHIFT))  static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)  { @@ -58,6 +63,12 @@ static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)  	return vcpu->arch.xer;  } +static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) +{ +	/* XXX Would need to check TLB entry */ +	return false; +} +  static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)  {  	return vcpu->arch.last_inst; @@ -97,9 +108,4 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)  {  	return vcpu->arch.fault_dear;  } - -static inline ulong kvmppc_get_msr(struct kvm_vcpu *vcpu) -{ -	return vcpu->arch.shared->msr; -}  #endif /* __ASM_KVM_BOOKE_H__ */ diff --git a/arch/powerpc/include/asm/kvm_booke_hv_asm.h b/arch/powerpc/include/asm/kvm_booke_hv_asm.h index 3a79f532571..e5f048bbcb7 100644 --- a/arch/powerpc/include/asm/kvm_booke_hv_asm.h +++ b/arch/powerpc/include/asm/kvm_booke_hv_asm.h @@ -36,26 +36,21 @@   *   *(r8 + GPR11) = saved r11   *   * 64-bit host - * Expected inputs (GEN/GDBELL/DBG/MC exception types): + * Expected inputs (GEN/GDBELL/DBG/CRIT/MC exception types):   *  r10 = saved CR   *  r13 = PACA_POINTER   *  *(r13 + PACA_EX##type + EX_R10) = saved r10   *  *(r13 + PACA_EX##type + EX_R11) = saved r11   *  SPRN_SPRG_##type##_SCRATCH = saved r13   * -  * Expected inputs (CRIT exception type): - *  r10 = saved CR - *  r13 = PACA_POINTER - *  *(r13 + PACA_EX##type + EX_R10) = saved r10 - *  *(r13 + PACA_EX##type + EX_R11) = saved r11 - *  *(r13 + PACA_EX##type + EX_R13) = saved r13 - *   * Expected inputs (TLB exception type):   *  r10 = saved CR + *  r12 = extlb pointer   *  r13 = PACA_POINTER - *  *(r13 + PACA_EX##type + EX_TLB_R10) = saved r10 - *  *(r13 + PACA_EX##type + EX_TLB_R11) = saved r11 - *  SPRN_SPRG_GEN_SCRATCH = saved r13 + *  *(r12 + EX_TLB_R10) = saved r10 + *  *(r12 + EX_TLB_R11) = saved r11 + *  *(r12 + EX_TLB_R13) = saved r13 + *  SPRN_SPRG_GEN_SCRATCH = saved r12   *   * Only the bolted version of TLB miss exception handlers is supported now.   */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 33283532e9d..bb66d8b8efd 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -63,20 +63,17 @@ extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);  #endif -/* We don't currently support large pages. */ -#define KVM_HPAGE_GFN_SHIFT(x)	0 -#define KVM_NR_PAGE_SIZES	1 -#define KVM_PAGES_PER_HPAGE(x)	(1UL<<31) -  #define HPTEG_CACHE_NUM			(1 << 15)  #define HPTEG_HASH_BITS_PTE		13  #define HPTEG_HASH_BITS_PTE_LONG	12  #define HPTEG_HASH_BITS_VPTE		13  #define HPTEG_HASH_BITS_VPTE_LONG	5 +#define HPTEG_HASH_BITS_VPTE_64K	11  #define HPTEG_HASH_NUM_PTE		(1 << HPTEG_HASH_BITS_PTE)  #define HPTEG_HASH_NUM_PTE_LONG		(1 << HPTEG_HASH_BITS_PTE_LONG)  #define HPTEG_HASH_NUM_VPTE		(1 << HPTEG_HASH_BITS_VPTE)  #define HPTEG_HASH_NUM_VPTE_LONG	(1 << HPTEG_HASH_BITS_VPTE_LONG) +#define HPTEG_HASH_NUM_VPTE_64K		(1 << HPTEG_HASH_BITS_VPTE_64K)  /* Physical Address Mask - allowed range of real mode RAM access */  #define KVM_PAM			0x0fffffffffffffffULL @@ -89,6 +86,9 @@ struct lppaca;  struct slb_shadow;  struct dtl_entry; +struct kvmppc_vcpu_book3s; +struct kvmppc_book3s_shadow_vcpu; +  struct kvm_vm_stat {  	u32 remote_tlb_flush;  }; @@ -224,15 +224,15 @@ struct revmap_entry {  #define KVMPPC_GOT_PAGE		0x80  struct kvm_arch_memory_slot { -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE  	unsigned long *rmap;  	unsigned long *slot_phys; -#endif /* CONFIG_KVM_BOOK3S_64_HV */ +#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */  };  struct kvm_arch {  	unsigned int lpid; -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE  	unsigned long hpt_virt;  	struct revmap_entry *revmap;  	unsigned int host_lpid; @@ -256,7 +256,10 @@ struct kvm_arch {  	cpumask_t need_tlb_flush;  	struct kvmppc_vcore *vcores[KVM_MAX_VCORES];  	int hpt_cma_alloc; -#endif /* CONFIG_KVM_BOOK3S_64_HV */ +#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ +#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE +	struct mutex hpt_mutex; +#endif  #ifdef CONFIG_PPC_BOOK3S_64  	struct list_head spapr_tce_tables;  	struct list_head rtas_tokens; @@ -267,6 +270,7 @@ struct kvm_arch {  #ifdef CONFIG_KVM_XICS  	struct kvmppc_xics *xics;  #endif +	struct kvmppc_ops *kvm_ops;  };  /* @@ -284,6 +288,7 @@ struct kvmppc_vcore {  	int n_woken;  	int nap_count;  	int napping_threads; +	int first_vcpuid;  	u16 pcpu;  	u16 last_cpu;  	u8 vcore_state; @@ -294,6 +299,12 @@ struct kvmppc_vcore {  	u64 stolen_tb;  	u64 preempt_tb;  	struct kvm_vcpu *runner; +	struct kvm *kvm; +	u64 tb_offset;		/* guest timebase - host timebase */ +	ulong lpcr; +	u32 arch_compat; +	ulong pcr; +	ulong dpdes;		/* doorbell state (POWER8) */  };  #define VCORE_ENTRY_COUNT(vc)	((vc)->entry_exit_count & 0xff) @@ -328,6 +339,7 @@ struct kvmppc_pte {  	bool may_read		: 1;  	bool may_write		: 1;  	bool may_execute	: 1; +	u8 page_size;		/* MMU_PAGE_xxx */  };  struct kvmppc_mmu { @@ -340,7 +352,8 @@ struct kvmppc_mmu {  	/* book3s */  	void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value);  	u32  (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum); -	int  (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data); +	int  (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, +		      struct kvmppc_pte *pte, bool data, bool iswrite);  	void (*reset_msr)(struct kvm_vcpu *vcpu);  	void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);  	int  (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid); @@ -360,6 +373,7 @@ struct kvmppc_slb {  	bool large	: 1;	/* PTEs are 16MB */  	bool tb		: 1;	/* 1TB segment */  	bool class	: 1; +	u8 base_page_size;	/* MMU_PAGE_xxx */  };  # ifdef CONFIG_PPC_FSL_BOOK3E @@ -377,17 +391,6 @@ struct kvmppc_slb {  #define KVMPPC_EPR_USER		1 /* exit to userspace to fill EPR */  #define KVMPPC_EPR_KERNEL	2 /* in-kernel irqchip */ -struct kvmppc_booke_debug_reg { -	u32 dbcr0; -	u32 dbcr1; -	u32 dbcr2; -#ifdef CONFIG_KVM_E500MC -	u32 dbcr4; -#endif -	u64 iac[KVMPPC_BOOKE_MAX_IAC]; -	u64 dac[KVMPPC_BOOKE_MAX_DAC]; -}; -  #define KVMPPC_IRQ_DEFAULT	0  #define KVMPPC_IRQ_MPIC		1  #define KVMPPC_IRQ_XICS		2 @@ -402,12 +405,15 @@ struct kvm_vcpu_arch {  	int slb_max;		/* 1 + index of last valid entry in slb[] */  	int slb_nr;		/* total number of entries in SLB */  	struct kvmppc_mmu mmu; +	struct kvmppc_vcpu_book3s *book3s; +#endif +#ifdef CONFIG_PPC_BOOK3S_32 +	struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;  #endif  	ulong gpr[32]; -	u64 fpr[32]; -	u64 fpscr; +	struct thread_fp_state fp;  #ifdef CONFIG_SPE  	ulong evr[32]; @@ -416,12 +422,7 @@ struct kvm_vcpu_arch {  	u64 acc;  #endif  #ifdef CONFIG_ALTIVEC -	vector128 vr[32]; -	vector128 vscr; -#endif - -#ifdef CONFIG_VSX -	u64 vsr[64]; +	struct thread_vr_state vr;  #endif  #ifdef CONFIG_KVM_BOOKE_HV @@ -448,6 +449,9 @@ struct kvm_vcpu_arch {  	ulong pc;  	ulong ctr;  	ulong lr; +#ifdef CONFIG_PPC_BOOK3S +	ulong tar; +#endif  	ulong xer;  	u32 cr; @@ -457,12 +461,32 @@ struct kvm_vcpu_arch {  	ulong guest_owned_ext;  	ulong purr;  	ulong spurr; +	ulong ic; +	ulong vtb;  	ulong dscr;  	ulong amr;  	ulong uamor; +	ulong iamr;  	u32 ctrl; +	u32 dabrx;  	ulong dabr; +	ulong dawr; +	ulong dawrx; +	ulong ciabr;  	ulong cfar; +	ulong ppr; +	ulong pspb; +	ulong fscr; +	ulong shadow_fscr; +	ulong ebbhr; +	ulong ebbrr; +	ulong bescr; +	ulong csigr; +	ulong tacr; +	ulong tcscr; +	ulong acop; +	ulong wort; +	ulong shadow_srr1;  #endif  	u32 vrsave; /* also USPRG0 */  	u32 mmucr; @@ -496,8 +520,33 @@ struct kvm_vcpu_arch {  	u32 ccr1;  	u32 dbsr; -	u64 mmcr[3]; +	u64 mmcr[5];  	u32 pmc[8]; +	u32 spmc[2]; +	u64 siar; +	u64 sdar; +	u64 sier; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +	u64 tfhar; +	u64 texasr; +	u64 tfiar; + +	u32 cr_tm; +	u64 lr_tm; +	u64 ctr_tm; +	u64 amr_tm; +	u64 ppr_tm; +	u64 dscr_tm; +	u64 tar_tm; + +	ulong gpr_tm[32]; + +	struct thread_fp_state fp_tm; + +	struct thread_vr_state vr_tm; +	u32 vrsave_tm; /* also USPRG0 */ + +#endif  #ifdef CONFIG_KVM_EXIT_TIMING  	struct mutex exit_timing_lock; @@ -516,6 +565,7 @@ struct kvm_vcpu_arch {  #ifdef CONFIG_PPC_BOOK3S  	ulong fault_dar;  	u32 fault_dsisr; +	unsigned long intr_msr;  #endif  #ifdef CONFIG_BOOKE @@ -531,10 +581,14 @@ struct kvm_vcpu_arch {  	u32 eptcfg;  	u32 epr;  	u32 crit_save; -	struct kvmppc_booke_debug_reg dbg_reg; +	/* guest debug registers*/ +	struct debug_reg dbg_reg; +	/* hardware visible debug registers when in guest state */ +	struct debug_reg shadow_dbg_reg;  #endif  	gpa_t paddr_accessed;  	gva_t vaddr_accessed; +	pgd_t *pgdir;  	u8 io_gpr; /* GPR used as IO source/target */  	u8 mmio_is_bigendian; @@ -572,8 +626,12 @@ struct kvm_vcpu_arch {  	wait_queue_head_t cpu_run;  	struct kvm_vcpu_arch_shared *shared; +#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) +	bool shared_big_endian; +#endif  	unsigned long magic_page_pa; /* phys addr to map the magic page to */  	unsigned long magic_page_ea; /* effect. addr to map the magic page to */ +	bool disable_kernel_nx;  	int irq_type;		/* one of KVM_IRQ_* */  	int irq_cpu_id; @@ -582,7 +640,7 @@ struct kvm_vcpu_arch {  	struct kvmppc_icp *icp; /* XICS presentation controller */  #endif -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE  	struct kvm_vcpu_arch_shared shregs;  	unsigned long pgfault_addr; @@ -592,7 +650,6 @@ struct kvm_vcpu_arch {  	struct list_head run_list;  	struct task_struct *run_task;  	struct kvm_run *kvm_run; -	pgd_t *pgdir;  	spinlock_t vpa_update_lock;  	struct kvmppc_vpa vpa; @@ -608,6 +665,8 @@ struct kvm_vcpu_arch {  #endif  }; +#define VCPU_FPR(vcpu, i)	(vcpu)->arch.fp.fpr[i][TS_FPROFFSET] +  /* Values for vcpu->arch.state */  #define KVMPPC_VCPU_NOTREADY		0  #define KVMPPC_VCPU_RUNNABLE		1 diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h index 2b119654b4c..336a91acb8b 100644 --- a/arch/powerpc/include/asm/kvm_para.h +++ b/arch/powerpc/include/asm/kvm_para.h @@ -39,10 +39,6 @@ static inline int kvm_para_available(void)  	return 1;  } -extern unsigned long kvm_hypercall(unsigned long *in, -				   unsigned long *out, -				   unsigned long nr); -  #else  static inline int kvm_para_available(void) @@ -50,82 +46,8 @@ static inline int kvm_para_available(void)  	return 0;  } -static unsigned long kvm_hypercall(unsigned long *in, -				   unsigned long *out, -				   unsigned long nr) -{ -	return EV_UNIMPLEMENTED; -} -  #endif -static inline long kvm_hypercall0_1(unsigned int nr, unsigned long *r2) -{ -	unsigned long in[8]; -	unsigned long out[8]; -	unsigned long r; - -	r = kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr)); -	*r2 = out[0]; - -	return r; -} - -static inline long kvm_hypercall0(unsigned int nr) -{ -	unsigned long in[8]; -	unsigned long out[8]; - -	return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr)); -} - -static inline long kvm_hypercall1(unsigned int nr, unsigned long p1) -{ -	unsigned long in[8]; -	unsigned long out[8]; - -	in[0] = p1; -	return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr)); -} - -static inline long kvm_hypercall2(unsigned int nr, unsigned long p1, -				  unsigned long p2) -{ -	unsigned long in[8]; -	unsigned long out[8]; - -	in[0] = p1; -	in[1] = p2; -	return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr)); -} - -static inline long kvm_hypercall3(unsigned int nr, unsigned long p1, -				  unsigned long p2, unsigned long p3) -{ -	unsigned long in[8]; -	unsigned long out[8]; - -	in[0] = p1; -	in[1] = p2; -	in[2] = p3; -	return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr)); -} - -static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, -				  unsigned long p2, unsigned long p3, -				  unsigned long p4) -{ -	unsigned long in[8]; -	unsigned long out[8]; - -	in[0] = p1; -	in[1] = p2; -	in[2] = p3; -	in[3] = p4; -	return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr)); -} - -  static inline unsigned int kvm_arch_para_features(void)  {  	unsigned long r; @@ -133,7 +55,7 @@ static inline unsigned int kvm_arch_para_features(void)  	if (!kvm_para_available())  		return 0; -	if(kvm_hypercall0_1(KVM_HC_FEATURES, &r)) +	if(epapr_hypercall0_1(KVM_HCALL_TOKEN(KVM_HC_FEATURES), &r))  		return 0;  	return r; diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index b15554a26c2..9c89cdd067a 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -54,12 +54,13 @@ extern void kvmppc_handler_highmem(void);  extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);  extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,                                unsigned int rt, unsigned int bytes, -                              int is_bigendian); +			      int is_default_endian);  extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,                                 unsigned int rt, unsigned int bytes, -                               int is_bigendian); +			       int is_default_endian);  extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, -                               u64 val, unsigned int bytes, int is_bigendian); +			       u64 val, unsigned int bytes, +			       int is_default_endian);  extern int kvmppc_emulate_instruction(struct kvm_run *run,                                        struct kvm_vcpu *vcpu); @@ -106,13 +107,6 @@ extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,                                         struct kvm_interrupt *irq);  extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);  extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu); - -extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, -                                  unsigned int op, int *advance); -extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, -				     ulong val); -extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, -				     ulong *val);  extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);  extern int kvmppc_booke_init(void); @@ -135,17 +129,19 @@ extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,  				struct kvm_create_spapr_tce *args);  extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,  			     unsigned long ioba, unsigned long tce); -extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, -				struct kvm_allocate_rma *rma); +extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, +			     unsigned long ioba);  extern struct kvm_rma_info *kvm_alloc_rma(void);  extern void kvm_release_rma(struct kvm_rma_info *ri);  extern struct page *kvm_alloc_hpt(unsigned long nr_pages);  extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);  extern int kvmppc_core_init_vm(struct kvm *kvm);  extern void kvmppc_core_destroy_vm(struct kvm *kvm); -extern void kvmppc_core_free_memslot(struct kvm_memory_slot *free, +extern void kvmppc_core_free_memslot(struct kvm *kvm, +				     struct kvm_memory_slot *free,  				     struct kvm_memory_slot *dont); -extern int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, +extern int kvmppc_core_create_memslot(struct kvm *kvm, +				      struct kvm_memory_slot *slot,  				      unsigned long npages);  extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,  				struct kvm_memory_slot *memslot, @@ -177,6 +173,72 @@ extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,  extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);  extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq); +union kvmppc_one_reg { +	u32	wval; +	u64	dval; +	vector128 vval; +	u64	vsxval[2]; +	struct { +		u64	addr; +		u64	length; +	}	vpaval; +}; + +struct kvmppc_ops { +	struct module *owner; +	int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); +	int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); +	int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id, +			   union kvmppc_one_reg *val); +	int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id, +			   union kvmppc_one_reg *val); +	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); +	void (*vcpu_put)(struct kvm_vcpu *vcpu); +	void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr); +	int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); +	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id); +	void (*vcpu_free)(struct kvm_vcpu *vcpu); +	int (*check_requests)(struct kvm_vcpu *vcpu); +	int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log); +	void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot); +	int (*prepare_memory_region)(struct kvm *kvm, +				     struct kvm_memory_slot *memslot, +				     struct kvm_userspace_memory_region *mem); +	void (*commit_memory_region)(struct kvm *kvm, +				     struct kvm_userspace_memory_region *mem, +				     const struct kvm_memory_slot *old); +	int (*unmap_hva)(struct kvm *kvm, unsigned long hva); +	int (*unmap_hva_range)(struct kvm *kvm, unsigned long start, +			   unsigned long end); +	int (*age_hva)(struct kvm *kvm, unsigned long hva); +	int (*test_age_hva)(struct kvm *kvm, unsigned long hva); +	void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte); +	void (*mmu_destroy)(struct kvm_vcpu *vcpu); +	void (*free_memslot)(struct kvm_memory_slot *free, +			     struct kvm_memory_slot *dont); +	int (*create_memslot)(struct kvm_memory_slot *slot, +			      unsigned long npages); +	int (*init_vm)(struct kvm *kvm); +	void (*destroy_vm)(struct kvm *kvm); +	int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info); +	int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu, +			  unsigned int inst, int *advance); +	int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); +	int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val); +	void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu); +	long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl, +			      unsigned long arg); + +}; + +extern struct kvmppc_ops *kvmppc_hv_ops; +extern struct kvmppc_ops *kvmppc_pr_ops; + +static inline bool is_kvmppc_hv_enabled(struct kvm *kvm) +{ +	return kvm->arch.kvm_ops == kvmppc_hv_ops; +} +  /*   * Cuts out inst bits with ordering according to spec.   * That means the leftmost bit is zero. All given bits are included. @@ -210,17 +272,6 @@ static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)  	return r;  } -union kvmppc_one_reg { -	u32	wval; -	u64	dval; -	vector128 vval; -	u64	vsxval[2]; -	struct { -		u64	addr; -		u64	length; -	}	vpaval; -}; -  #define one_reg_size(id)	\  	(1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) @@ -245,10 +296,10 @@ union kvmppc_one_reg {  	__v;					\  }) -void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); +int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);  int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); -void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); +int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);  int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);  int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg); @@ -260,7 +311,7 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);  struct openpic; -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE  extern void kvm_cma_reserve(void) __init;  static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)  { @@ -269,10 +320,10 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)  static inline u32 kvmppc_get_xics_latch(void)  { -	u32 xirr = get_paca()->kvm_hstate.saved_xirr; +	u32 xirr; +	xirr = get_paca()->kvm_hstate.saved_xirr;  	get_paca()->kvm_hstate.saved_xirr = 0; -  	return xirr;  } @@ -281,7 +332,14 @@ static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)  	paca[cpu].kvm_hstate.host_ipi = host_ipi;  } -extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu); +static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) +{ +	vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu); +} + +extern void kvm_hv_vm_activated(void); +extern void kvm_hv_vm_deactivated(void); +extern bool kvm_hv_mode_active(void);  #else  static inline void __init kvm_cma_reserve(void) @@ -302,6 +360,9 @@ static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)  {  	kvm_vcpu_kick(vcpu);  } + +static inline bool kvm_hv_mode_active(void)		{ return false; } +  #endif  #ifdef CONFIG_KVM_XICS @@ -395,6 +456,84 @@ static inline void kvmppc_mmu_flush_icache(pfn_t pfn)  }  /* + * Shared struct helpers. The shared struct can be little or big endian, + * depending on the guest endianness. So expose helpers to all of them. + */ +static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu) +{ +#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) +	/* Only Book3S_64 PR supports bi-endian for now */ +	return vcpu->arch.shared_big_endian; +#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__) +	/* Book3s_64 HV on little endian is always little endian */ +	return false; +#else +	return true; +#endif +} + +#define SHARED_WRAPPER_GET(reg, size)					\ +static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)	\ +{									\ +	if (kvmppc_shared_big_endian(vcpu))				\ +	       return be##size##_to_cpu(vcpu->arch.shared->reg);	\ +	else								\ +	       return le##size##_to_cpu(vcpu->arch.shared->reg);	\ +}									\ + +#define SHARED_WRAPPER_SET(reg, size)					\ +static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\ +{									\ +	if (kvmppc_shared_big_endian(vcpu))				\ +	       vcpu->arch.shared->reg = cpu_to_be##size(val);		\ +	else								\ +	       vcpu->arch.shared->reg = cpu_to_le##size(val);		\ +}									\ + +#define SHARED_WRAPPER(reg, size)					\ +	SHARED_WRAPPER_GET(reg, size)					\ +	SHARED_WRAPPER_SET(reg, size)					\ + +SHARED_WRAPPER(critical, 64) +SHARED_WRAPPER(sprg0, 64) +SHARED_WRAPPER(sprg1, 64) +SHARED_WRAPPER(sprg2, 64) +SHARED_WRAPPER(sprg3, 64) +SHARED_WRAPPER(srr0, 64) +SHARED_WRAPPER(srr1, 64) +SHARED_WRAPPER(dar, 64) +SHARED_WRAPPER_GET(msr, 64) +static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val) +{ +	if (kvmppc_shared_big_endian(vcpu)) +	       vcpu->arch.shared->msr = cpu_to_be64(val); +	else +	       vcpu->arch.shared->msr = cpu_to_le64(val); +} +SHARED_WRAPPER(dsisr, 32) +SHARED_WRAPPER(int_pending, 32) +SHARED_WRAPPER(sprg4, 64) +SHARED_WRAPPER(sprg5, 64) +SHARED_WRAPPER(sprg6, 64) +SHARED_WRAPPER(sprg7, 64) + +static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr) +{ +	if (kvmppc_shared_big_endian(vcpu)) +	       return be32_to_cpu(vcpu->arch.shared->sr[nr]); +	else +	       return le32_to_cpu(vcpu->arch.shared->sr[nr]); +} + +static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val) +{ +	if (kvmppc_shared_big_endian(vcpu)) +	       vcpu->arch.shared->sr[nr] = cpu_to_be32(val); +	else +	       vcpu->arch.shared->sr[nr] = cpu_to_le32(val); +} + +/*   * Please call after prepare_to_enter. This function puts the lazy ee and irq   * disabled tracking state back to normal mode, without actually enabling   * interrupts. @@ -404,6 +543,12 @@ static inline void kvmppc_fix_ee_before_entry(void)  	trace_hardirqs_on();  #ifdef CONFIG_PPC64 +	/* +	 * To avoid races, the caller must have gone directly from having +	 * interrupts fully-enabled to hard-disabled. +	 */ +	WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS); +  	/* Only need to enable IRQs by hard enabling them after this */  	local_paca->irq_happened = 0;  	local_paca->soft_enabled = 1; @@ -425,7 +570,7 @@ static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)  	msr_64bit = MSR_SF;  #endif -	if (!(vcpu->arch.shared->msr & msr_64bit)) +	if (!(kvmppc_get_msr(vcpu) & msr_64bit))  		ea = (uint32_t)ea;  	return ea; diff --git a/arch/powerpc/include/asm/linkage.h b/arch/powerpc/include/asm/linkage.h index b36f650a13f..e3ad5c72724 100644 --- a/arch/powerpc/include/asm/linkage.h +++ b/arch/powerpc/include/asm/linkage.h @@ -2,6 +2,7 @@  #define _ASM_POWERPC_LINKAGE_H  #ifdef CONFIG_PPC64 +#if !defined(_CALL_ELF) || _CALL_ELF != 2  #define cond_syscall(x) \  	asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n"		\  	     "\t.weak ." #x "\n\t.set ." #x ", .sys_ni_syscall\n") @@ -9,5 +10,6 @@  	asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n"	\  	     "\t.globl ." #alias "\n\t.set ." #alias ", ." #name)  #endif +#endif  #endif	/* _ASM_POWERPC_LINKAGE_H */ diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 4470d1e34d2..d0a2a2f9956 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -84,8 +84,8 @@ struct lppaca {  	 * the processor is yielded (either because of an OS yield or a  	 * hypervisor preempt).  An even value implies that the processor is  	 * currently executing. -	 * NOTE: This value will ALWAYS be zero for dedicated processors and -	 * will NEVER be zero for shared processors (ie, initialized to a 1). +	 * NOTE: Even dedicated processor partitions can yield so this +	 * field cannot be used to determine if we are shared or dedicated.  	 */  	volatile __be32 yield_count;  	volatile __be32 dispersion_count; /* dispatch changed physical cpu */ @@ -106,15 +106,15 @@ extern struct lppaca lppaca[];  #define lppaca_of(cpu)	(*paca[cpu].lppaca_ptr)  /* - * Old kernels used a reserved bit in the VPA to determine if it was running - * in shared processor mode. New kernels look for a non zero yield count - * but KVM still needs to set the bit to keep the old stuff happy. + * We are using a non architected field to determine if a partition is + * shared or dedicated. This currently works on both KVM and PHYP, but + * we will have to transition to something better.   */  #define LPPACA_OLD_SHARED_PROC		2  static inline bool lppaca_shared_proc(struct lppaca *l)  { -	return l->yield_count != 0; +	return !!(l->__old_status & LPPACA_OLD_SHARED_PROC);  }  /* @@ -132,8 +132,6 @@ struct slb_shadow {  	} save_area[SLB_NUM_BOLTED];  } ____cacheline_aligned; -extern struct slb_shadow slb_shadow[]; -  /*   * Layout of entries in the hypervisor's dispatch trace log buffer.   */ diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 8b480901165..f92b0b54e92 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -78,6 +78,18 @@ struct machdep_calls {  				    long index);  	void		(*tce_flush)(struct iommu_table *tbl); +	/* _rm versions are for real mode use only */ +	int		(*tce_build_rm)(struct iommu_table *tbl, +				     long index, +				     long npages, +				     unsigned long uaddr, +				     enum dma_data_direction direction, +				     struct dma_attrs *attrs); +	void		(*tce_free_rm)(struct iommu_table *tbl, +				    long index, +				    long npages); +	void		(*tce_flush_rm)(struct iommu_table *tbl); +  	void __iomem *	(*ioremap)(phys_addr_t addr, unsigned long size,  				   unsigned long flags, void *caller);  	void		(*iounmap)(volatile void __iomem *token); @@ -86,6 +98,9 @@ struct machdep_calls {  	void		(*iommu_save)(void);  	void		(*iommu_restore)(void);  #endif +#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE +	unsigned long	(*memory_block_size)(void); +#endif  #endif /* CONFIG_PPC64 */  	void		(*pci_dma_dev_setup)(struct pci_dev *dev); @@ -101,6 +116,8 @@ struct machdep_calls {  	/* Optional, may be NULL. */  	void		(*show_cpuinfo)(struct seq_file *m);  	void		(*show_percpuinfo)(struct seq_file *m, int i); +	/* Returns the current operating frequency of "cpu" in Hz */ +	unsigned long  	(*get_proc_freq)(unsigned int cpu);  	void		(*init_IRQ)(void); @@ -158,6 +175,9 @@ struct machdep_calls {  	int		(*system_reset_exception)(struct pt_regs *regs);  	int 		(*machine_check_exception)(struct pt_regs *regs); +	/* Called during machine check exception to retrive fixup address. */ +	bool		(*mce_check_early_recovery)(struct pt_regs *regs); +  	/* Motherboard/chipset features. This is a kind of general purpose  	 * hook used to control some machine specific features (like reset  	 * lines, chip power control, etc...). @@ -226,6 +246,9 @@ struct machdep_calls {  	/* Called during PCI resource reassignment */  	resource_size_t (*pcibios_window_alignment)(struct pci_bus *, unsigned long type); +	/* Reset the secondary bus of bridge */ +	void  (*pcibios_reset_secondary_bus)(struct pci_dev *dev); +  	/* Called to shutdown machine specific hardware not already controlled  	 * by other drivers.  	 */ @@ -263,6 +286,14 @@ struct machdep_calls {  	ssize_t (*cpu_probe)(const char *, size_t);  	ssize_t (*cpu_release)(const char *, size_t);  #endif + +#ifdef CONFIG_ARCH_RANDOM +	int (*get_random_long)(unsigned long *v); +#endif + +#ifdef CONFIG_MEMORY_HOTREMOVE +	int (*remove_memory)(u64, u64); +#endif  };  extern void e500_idle(void); diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h new file mode 100644 index 00000000000..f97d8cb6bdf --- /dev/null +++ b/arch/powerpc/include/asm/mce.h @@ -0,0 +1,198 @@ +/* + * Machine check exception header file. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright 2013 IBM Corporation + * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com> + */ + +#ifndef __ASM_PPC64_MCE_H__ +#define __ASM_PPC64_MCE_H__ + +#include <linux/bitops.h> + +/* + * Machine Check bits on power7 and power8 + */ +#define P7_SRR1_MC_LOADSTORE(srr1)	((srr1) & PPC_BIT(42)) /* P8 too */ + +/* SRR1 bits for machine check (On Power7 and Power8) */ +#define P7_SRR1_MC_IFETCH(srr1)	((srr1) & PPC_BITMASK(43, 45)) /* P8 too */ + +#define P7_SRR1_MC_IFETCH_UE		(0x1 << PPC_BITLSHIFT(45)) /* P8 too */ +#define P7_SRR1_MC_IFETCH_SLB_PARITY	(0x2 << PPC_BITLSHIFT(45)) /* P8 too */ +#define P7_SRR1_MC_IFETCH_SLB_MULTIHIT	(0x3 << PPC_BITLSHIFT(45)) /* P8 too */ +#define P7_SRR1_MC_IFETCH_SLB_BOTH	(0x4 << PPC_BITLSHIFT(45)) +#define P7_SRR1_MC_IFETCH_TLB_MULTIHIT	(0x5 << PPC_BITLSHIFT(45)) /* P8 too */ +#define P7_SRR1_MC_IFETCH_UE_TLB_RELOAD	(0x6 << PPC_BITLSHIFT(45)) /* P8 too */ +#define P7_SRR1_MC_IFETCH_UE_IFU_INTERNAL	(0x7 << PPC_BITLSHIFT(45)) + +/* SRR1 bits for machine check (On Power8) */ +#define P8_SRR1_MC_IFETCH_ERAT_MULTIHIT	(0x4 << PPC_BITLSHIFT(45)) + +/* DSISR bits for machine check (On Power7 and Power8) */ +#define P7_DSISR_MC_UE			(PPC_BIT(48))	/* P8 too */ +#define P7_DSISR_MC_UE_TABLEWALK	(PPC_BIT(49))	/* P8 too */ +#define P7_DSISR_MC_ERAT_MULTIHIT	(PPC_BIT(52))	/* P8 too */ +#define P7_DSISR_MC_TLB_MULTIHIT_MFTLB	(PPC_BIT(53))	/* P8 too */ +#define P7_DSISR_MC_SLB_PARITY_MFSLB	(PPC_BIT(55))	/* P8 too */ +#define P7_DSISR_MC_SLB_MULTIHIT	(PPC_BIT(56))	/* P8 too */ +#define P7_DSISR_MC_SLB_MULTIHIT_PARITY	(PPC_BIT(57))	/* P8 too */ + +/* + * DSISR bits for machine check (Power8) in addition to above. + * Secondary DERAT Multihit + */ +#define P8_DSISR_MC_ERAT_MULTIHIT_SEC	(PPC_BIT(54)) + +/* SLB error bits */ +#define P7_DSISR_MC_SLB_ERRORS		(P7_DSISR_MC_ERAT_MULTIHIT | \ +					 P7_DSISR_MC_SLB_PARITY_MFSLB | \ +					 P7_DSISR_MC_SLB_MULTIHIT | \ +					 P7_DSISR_MC_SLB_MULTIHIT_PARITY) + +#define P8_DSISR_MC_SLB_ERRORS		(P7_DSISR_MC_SLB_ERRORS | \ +					 P8_DSISR_MC_ERAT_MULTIHIT_SEC) +enum MCE_Version { +	MCE_V1 = 1, +}; + +enum MCE_Severity { +	MCE_SEV_NO_ERROR = 0, +	MCE_SEV_WARNING = 1, +	MCE_SEV_ERROR_SYNC = 2, +	MCE_SEV_FATAL = 3, +}; + +enum MCE_Disposition { +	MCE_DISPOSITION_RECOVERED = 0, +	MCE_DISPOSITION_NOT_RECOVERED = 1, +}; + +enum MCE_Initiator { +	MCE_INITIATOR_UNKNOWN = 0, +	MCE_INITIATOR_CPU = 1, +}; + +enum MCE_ErrorType { +	MCE_ERROR_TYPE_UNKNOWN = 0, +	MCE_ERROR_TYPE_UE = 1, +	MCE_ERROR_TYPE_SLB = 2, +	MCE_ERROR_TYPE_ERAT = 3, +	MCE_ERROR_TYPE_TLB = 4, +}; + +enum MCE_UeErrorType { +	MCE_UE_ERROR_INDETERMINATE = 0, +	MCE_UE_ERROR_IFETCH = 1, +	MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH = 2, +	MCE_UE_ERROR_LOAD_STORE = 3, +	MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 4, +}; + +enum MCE_SlbErrorType { +	MCE_SLB_ERROR_INDETERMINATE = 0, +	MCE_SLB_ERROR_PARITY = 1, +	MCE_SLB_ERROR_MULTIHIT = 2, +}; + +enum MCE_EratErrorType { +	MCE_ERAT_ERROR_INDETERMINATE = 0, +	MCE_ERAT_ERROR_PARITY = 1, +	MCE_ERAT_ERROR_MULTIHIT = 2, +}; + +enum MCE_TlbErrorType { +	MCE_TLB_ERROR_INDETERMINATE = 0, +	MCE_TLB_ERROR_PARITY = 1, +	MCE_TLB_ERROR_MULTIHIT = 2, +}; + +struct machine_check_event { +	enum MCE_Version	version:8;	/* 0x00 */ +	uint8_t			in_use;		/* 0x01 */ +	enum MCE_Severity	severity:8;	/* 0x02 */ +	enum MCE_Initiator	initiator:8;	/* 0x03 */ +	enum MCE_ErrorType	error_type:8;	/* 0x04 */ +	enum MCE_Disposition	disposition:8;	/* 0x05 */ +	uint8_t			reserved_1[2];	/* 0x06 */ +	uint64_t		gpr3;		/* 0x08 */ +	uint64_t		srr0;		/* 0x10 */ +	uint64_t		srr1;		/* 0x18 */ +	union {					/* 0x20 */ +		struct { +			enum MCE_UeErrorType ue_error_type:8; +			uint8_t		effective_address_provided; +			uint8_t		physical_address_provided; +			uint8_t		reserved_1[5]; +			uint64_t	effective_address; +			uint64_t	physical_address; +			uint8_t		reserved_2[8]; +		} ue_error; + +		struct { +			enum MCE_SlbErrorType slb_error_type:8; +			uint8_t		effective_address_provided; +			uint8_t		reserved_1[6]; +			uint64_t	effective_address; +			uint8_t		reserved_2[16]; +		} slb_error; + +		struct { +			enum MCE_EratErrorType erat_error_type:8; +			uint8_t		effective_address_provided; +			uint8_t		reserved_1[6]; +			uint64_t	effective_address; +			uint8_t		reserved_2[16]; +		} erat_error; + +		struct { +			enum MCE_TlbErrorType tlb_error_type:8; +			uint8_t		effective_address_provided; +			uint8_t		reserved_1[6]; +			uint64_t	effective_address; +			uint8_t		reserved_2[16]; +		} tlb_error; +	} u; +}; + +struct mce_error_info { +	enum MCE_ErrorType error_type:8; +	union { +		enum MCE_UeErrorType ue_error_type:8; +		enum MCE_SlbErrorType slb_error_type:8; +		enum MCE_EratErrorType erat_error_type:8; +		enum MCE_TlbErrorType tlb_error_type:8; +	} u; +	uint8_t		reserved[2]; +}; + +#define MAX_MC_EVT	100 + +/* Release flags for get_mce_event() */ +#define MCE_EVENT_RELEASE	true +#define MCE_EVENT_DONTRELEASE	false + +extern void save_mce_event(struct pt_regs *regs, long handled, +			   struct mce_error_info *mce_err, uint64_t nip, +			   uint64_t addr); +extern int get_mce_event(struct machine_check_event *mce, bool release); +extern void release_mce_event(void); +extern void machine_check_queue_event(void); +extern void machine_check_print_event_info(struct machine_check_event *evt); +extern uint64_t get_mce_fault_addr(struct machine_check_event *evt); + +#endif /* __ASM_PPC64_MCE_H__ */ diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h index 936db360790..d0918e09557 100644 --- a/arch/powerpc/include/asm/mmu-book3e.h +++ b/arch/powerpc/include/asm/mmu-book3e.h @@ -223,10 +223,6 @@ typedef struct {  	unsigned int	id;  	unsigned int	active;  	unsigned long	vdso_base; -#ifdef CONFIG_PPC_ICSWX -	struct spinlock *cop_lockp;	/* guard cop related stuff */ -	unsigned long acop;		/* mask of enabled coprocessor types */ -#endif /* CONFIG_PPC_ICSWX */  #ifdef CONFIG_PPC_MM_SLICES  	u64 low_slices_psize;   /* SLB page size encodings */  	u64 high_slices_psize;  /* 4 bits per slice for now */ @@ -286,8 +282,24 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)  extern int mmu_linear_psize;  extern int mmu_vmemmap_psize; +struct tlb_core_data { +	/* +	 * Per-core spinlock for e6500 TLB handlers (no tlbsrx.) +	 * Must be the first struct element. +	 */ +	u8 lock; + +	/* For software way selection, as on Freescale TLB1 */ +	u8 esel_next, esel_max, esel_first; +}; +  #ifdef CONFIG_PPC64  extern unsigned long linear_map_top; +extern int book3e_htw_mode; + +#define PPC_HTW_NONE	0 +#define PPC_HTW_IBM	1 +#define PPC_HTW_E6500	2  /*   * 64-bit booke platforms don't load the tlb in the tlb miss handler code. diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index c4cf0119727..c2b4dcf23d0 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -22,6 +22,7 @@   */  #include <asm/pgtable-ppc64.h>  #include <asm/bug.h> +#include <asm/processor.h>  /*   * Segment table @@ -135,8 +136,8 @@ extern char initial_stab[];  #ifndef __ASSEMBLY__  struct hash_pte { -	unsigned long v; -	unsigned long r; +	__be64 v; +	__be64 r;  };  extern struct hash_pte *htab_address; @@ -496,7 +497,7 @@ extern void slb_set_size(u16 size);   */  struct subpage_prot_table {  	unsigned long maxaddr;	/* only addresses < this are protected */ -	unsigned int **protptrs[2]; +	unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];  	unsigned int *low_prot[4];  }; diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 691fd8aca93..e61f24ed4e6 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -19,8 +19,7 @@  #define MMU_FTR_TYPE_40x		ASM_CONST(0x00000004)  #define MMU_FTR_TYPE_44x		ASM_CONST(0x00000008)  #define MMU_FTR_TYPE_FSL_E		ASM_CONST(0x00000010) -#define MMU_FTR_TYPE_3E			ASM_CONST(0x00000020) -#define MMU_FTR_TYPE_47x		ASM_CONST(0x00000040) +#define MMU_FTR_TYPE_47x		ASM_CONST(0x00000020)  /*   * This is individual features @@ -106,13 +105,6 @@  				MMU_FTR_CI_LARGE_PAGE  #define MMU_FTRS_PA6T		MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \  				MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B -#define MMU_FTRS_A2		MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX | \ -				MMU_FTR_USE_TLBIVAX_BCAST | \ -				MMU_FTR_LOCK_BCAST_INVAL | \ -				MMU_FTR_USE_TLBRSRV | \ -				MMU_FTR_USE_PAIRED_MAS | \ -				MMU_FTR_TLBIEL | \ -				MMU_FTR_16M_PAGE  #ifndef __ASSEMBLY__  #include <asm/cputable.h> @@ -180,16 +172,17 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)  #define MMU_PAGE_64K_AP	3	/* "Admixed pages" (hash64 only) */  #define MMU_PAGE_256K	4  #define MMU_PAGE_1M	5 -#define MMU_PAGE_4M	6 -#define MMU_PAGE_8M	7 -#define MMU_PAGE_16M	8 -#define MMU_PAGE_64M	9 -#define MMU_PAGE_256M	10 -#define MMU_PAGE_1G	11 -#define MMU_PAGE_16G	12 -#define MMU_PAGE_64G	13 - -#define MMU_PAGE_COUNT	14 +#define MMU_PAGE_2M	6 +#define MMU_PAGE_4M	7 +#define MMU_PAGE_8M	8 +#define MMU_PAGE_16M	9 +#define MMU_PAGE_64M	10 +#define MMU_PAGE_256M	11 +#define MMU_PAGE_1G	12 +#define MMU_PAGE_16G	13 +#define MMU_PAGE_64G	14 + +#define MMU_PAGE_COUNT	15  #if defined(CONFIG_PPC_STD_MMU_64)  /* 64-bit classic hash table MMU */ diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index 49fa55bfbac..dcfcad139bc 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h @@ -35,6 +35,7 @@ struct mod_arch_specific {  #ifdef __powerpc64__  	unsigned int stubs_section;	/* Index of stubs section in module */  	unsigned int toc_section;	/* What section is the TOC? */ +	bool toc_fixed;			/* Have we fixed up .TOC.? */  #ifdef CONFIG_DYNAMIC_FTRACE  	unsigned long toc;  	unsigned long tramp; @@ -77,6 +78,9 @@ struct mod_arch_specific {  #    endif	/* MODULE */  #endif +bool is_module_trampoline(u32 *insns); +int module_trampoline_target(struct module *mod, u32 *trampoline, +			     unsigned long *target);  struct exception_table_entry;  void sort_ex_table(struct exception_table_entry *start, diff --git a/arch/powerpc/include/asm/mpc5121.h b/arch/powerpc/include/asm/mpc5121.h index 887d3d6133e..4a69cd1d504 100644 --- a/arch/powerpc/include/asm/mpc5121.h +++ b/arch/powerpc/include/asm/mpc5121.h @@ -37,7 +37,12 @@ struct mpc512x_ccm {  	u32	cccr;	/* CFM Clock Control Register */  	u32	dccr;	/* DIU Clock Control Register */  	u32	mscan_ccr[4];	/* MSCAN Clock Control Registers */ -	u8	res[0x98]; /* Reserved */ +	u32	out_ccr[4];	/* OUT CLK Configure Registers */ +	u32	rsv0[2];	/* Reserved */ +	u32	scfr3;		/* System Clock Frequency Register 3 */ +	u32	rsv1[3];	/* Reserved */ +	u32	spll_lock_cnt;	/* System PLL Lock Counter */ +	u8	res[0x6c];	/* Reserved */  };  /* diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index c5cd72833d6..0da1dbd42e0 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -12,35 +12,28 @@  #ifndef __OPAL_H  #define __OPAL_H -/****** Takeover interface ********/ - -/* PAPR H-Call used to querty the HAL existence and/or instanciate - * it from within pHyp (tech preview only). +#ifndef __ASSEMBLY__ +/* + * SG entry   * - * This is exclusively used in prom_init.c + * WARNING: The current implementation requires each entry + * to represent a block that is 4k aligned *and* each block + * size except the last one in the list to be as well.   */ - -#ifndef __ASSEMBLY__ - -struct opal_takeover_args { -	u64	k_image;		/* r4 */ -	u64	k_size;			/* r5 */ -	u64	k_entry;		/* r6 */ -	u64	k_entry2;		/* r7 */ -	u64	hal_addr;		/* r8 */ -	u64	rd_image;		/* r9 */ -	u64	rd_size;		/* r10 */ -	u64	rd_loc;			/* r11 */ +struct opal_sg_entry { +	__be64 data; +	__be64 length;  }; -extern long opal_query_takeover(u64 *hal_size, u64 *hal_align); - -extern long opal_do_takeover(struct opal_takeover_args *args); +/* SG list */ +struct opal_sg_list { +	__be64 length; +	__be64 next; +	struct opal_sg_entry entry[]; +}; -struct rtas_args; -extern int opal_enter_rtas(struct rtas_args *args, -			   unsigned long data, -			   unsigned long entry); +/* We calculate number of sg entries based on PAGE_SIZE */ +#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))  #endif /* __ASSEMBLY__ */ @@ -61,8 +54,11 @@ extern int opal_enter_rtas(struct rtas_args *args,  #define OPAL_INTERNAL_ERROR	-11  #define OPAL_BUSY_EVENT		-12  #define OPAL_HARDWARE_FROZEN	-13 +#define OPAL_WRONG_STATE	-14 +#define OPAL_ASYNC_COMPLETION	-15  /* API Tokens (in r0) */ +#define OPAL_INVALID_CALL			-1  #define OPAL_CONSOLE_WRITE			1  #define OPAL_CONSOLE_READ			2  #define OPAL_RTC_READ				3 @@ -129,9 +125,33 @@ extern int opal_enter_rtas(struct rtas_args *args,  #define OPAL_LPC_READ				67  #define OPAL_LPC_WRITE				68  #define OPAL_RETURN_CPU				69 +#define OPAL_REINIT_CPUS			70 +#define OPAL_ELOG_READ				71 +#define OPAL_ELOG_WRITE				72 +#define OPAL_ELOG_ACK				73 +#define OPAL_ELOG_RESEND			74 +#define OPAL_ELOG_SIZE				75 +#define OPAL_FLASH_VALIDATE			76 +#define OPAL_FLASH_MANAGE			77 +#define OPAL_FLASH_UPDATE			78 +#define OPAL_RESYNC_TIMEBASE			79 +#define OPAL_DUMP_INIT				81 +#define OPAL_DUMP_INFO				82 +#define OPAL_DUMP_READ				83 +#define OPAL_DUMP_ACK				84 +#define OPAL_GET_MSG				85 +#define OPAL_CHECK_ASYNC_COMPLETION		86 +#define OPAL_SYNC_HOST_REBOOT			87 +#define OPAL_SENSOR_READ			88 +#define OPAL_GET_PARAM				89 +#define OPAL_SET_PARAM				90 +#define OPAL_DUMP_RESEND			91 +#define OPAL_DUMP_INFO2				94  #ifndef __ASSEMBLY__ +#include <linux/notifier.h> +  /* Other enums */  enum OpalVendorApiTokens {  	OPAL_START_VENDOR_API_RANGE = 1000, OPAL_END_VENDOR_API_RANGE = 1999 @@ -208,7 +228,19 @@ enum OpalPendingState {  	OPAL_EVENT_ERROR_LOG		= 0x40,  	OPAL_EVENT_EPOW			= 0x80,  	OPAL_EVENT_LED_STATUS		= 0x100, -	OPAL_EVENT_PCI_ERROR		= 0x200 +	OPAL_EVENT_PCI_ERROR		= 0x200, +	OPAL_EVENT_DUMP_AVAIL		= 0x400, +	OPAL_EVENT_MSG_PENDING		= 0x800, +}; + +enum OpalMessageType { +	OPAL_MSG_ASYNC_COMP = 0,	/* params[0] = token, params[1] = rc, +					 * additional params function-specific +					 */ +	OPAL_MSG_MEM_ERR, +	OPAL_MSG_EPOW, +	OPAL_MSG_SHUTDOWN, +	OPAL_MSG_TYPE_MAX,  };  /* Machine check related definitions */ @@ -308,12 +340,16 @@ enum OpalMveEnableAction {  	OPAL_ENABLE_MVE = 1  }; -enum OpalPciResetAndReinitScope { +enum OpalPciResetScope {  	OPAL_PHB_COMPLETE = 1, OPAL_PCI_LINK = 2, OPAL_PHB_ERROR = 3,  	OPAL_PCI_HOT_RESET = 4, OPAL_PCI_FUNDAMENTAL_RESET = 5,  	OPAL_PCI_IODA_TABLE_RESET = 6,  }; +enum OpalPciReinitScope { +	OPAL_REINIT_PCI_DEV = 1000 +}; +  enum OpalPciResetState {  	OPAL_DEASSERT_RESET = 0,  	OPAL_ASSERT_RESET = 1 @@ -353,6 +389,19 @@ enum OpalLPCAddressType {  	OPAL_LPC_FW	= 2,  }; +/* System parameter permission */ +enum OpalSysparamPerm { +	OPAL_SYSPARAM_READ      = 0x1, +	OPAL_SYSPARAM_WRITE     = 0x2, +	OPAL_SYSPARAM_RW        = (OPAL_SYSPARAM_READ | OPAL_SYSPARAM_WRITE), +}; + +struct opal_msg { +	__be32 msg_type; +	__be32 reserved; +	__be64 params[8]; +}; +  struct opal_machine_check_event {  	enum OpalMCE_Version	version:8;	/* 0x00 */  	uint8_t			in_use;		/* 0x01 */ @@ -401,6 +450,58 @@ struct opal_machine_check_event {  	} u;  }; +/* FSP memory errors handling */ +enum OpalMemErr_Version { +	OpalMemErr_V1 = 1, +}; + +enum OpalMemErrType { +	OPAL_MEM_ERR_TYPE_RESILIENCE	= 0, +	OPAL_MEM_ERR_TYPE_DYN_DALLOC, +	OPAL_MEM_ERR_TYPE_SCRUB, +}; + +/* Memory Reilience error type */ +enum OpalMemErr_ResilErrType { +	OPAL_MEM_RESILIENCE_CE		= 0, +	OPAL_MEM_RESILIENCE_UE, +	OPAL_MEM_RESILIENCE_UE_SCRUB, +}; + +/* Dynamic Memory Deallocation type */ +enum OpalMemErr_DynErrType { +	OPAL_MEM_DYNAMIC_DEALLOC	= 0, +}; + +/* OpalMemoryErrorData->flags */ +#define OPAL_MEM_CORRECTED_ERROR	0x0001 +#define OPAL_MEM_THRESHOLD_EXCEEDED	0x0002 +#define OPAL_MEM_ACK_REQUIRED		0x8000 + +struct OpalMemoryErrorData { +	enum OpalMemErr_Version	version:8;	/* 0x00 */ +	enum OpalMemErrType	type:8;		/* 0x01 */ +	__be16			flags;		/* 0x02 */ +	uint8_t			reserved_1[4];	/* 0x04 */ + +	union { +		/* Memory Resilience corrected/uncorrected error info */ +		struct { +			enum OpalMemErr_ResilErrType resil_err_type:8; +			uint8_t		reserved_1[7]; +			__be64		physical_address_start; +			__be64		physical_address_end; +		} resilience; +		/* Dynamic memory deallocation error info */ +		struct { +			enum OpalMemErr_DynErrType dyn_err_type:8; +			uint8_t		reserved_1[7]; +			__be64		physical_address_start; +			__be64		physical_address_end; +		} dyn_dealloc; +	} u; +}; +  enum {  	OPAL_P7IOC_DIAG_TYPE_NONE	= 0,  	OPAL_P7IOC_DIAG_TYPE_RGC	= 1, @@ -460,16 +561,18 @@ enum {  enum {  	OPAL_PHB_ERROR_DATA_TYPE_P7IOC = 1, +	OPAL_PHB_ERROR_DATA_TYPE_PHB3 = 2  };  enum {  	OPAL_P7IOC_NUM_PEST_REGS = 128, +	OPAL_PHB3_NUM_PEST_REGS = 256  };  struct OpalIoPhbErrorCommon { -	uint32_t version; -	uint32_t ioType; -	uint32_t len; +	__be32 version; +	__be32 ioType; +	__be32 len;  };  struct OpalIoP7IOCPhbErrorData { @@ -531,28 +634,103 @@ struct OpalIoP7IOCPhbErrorData {  	uint64_t pestB[OPAL_P7IOC_NUM_PEST_REGS];  }; +struct OpalIoPhb3ErrorData { +	struct OpalIoPhbErrorCommon common; + +	__be32 brdgCtl; + +	/* PHB3 UTL regs */ +	__be32 portStatusReg; +	__be32 rootCmplxStatus; +	__be32 busAgentStatus; + +	/* PHB3 cfg regs */ +	__be32 deviceStatus; +	__be32 slotStatus; +	__be32 linkStatus; +	__be32 devCmdStatus; +	__be32 devSecStatus; + +	/* cfg AER regs */ +	__be32 rootErrorStatus; +	__be32 uncorrErrorStatus; +	__be32 corrErrorStatus; +	__be32 tlpHdr1; +	__be32 tlpHdr2; +	__be32 tlpHdr3; +	__be32 tlpHdr4; +	__be32 sourceId; + +	__be32 rsv3; + +	/* Record data about the call to allocate a buffer */ +	__be64 errorClass; +	__be64 correlator; + +	__be64 nFir;			/* 000 */ +	__be64 nFirMask;		/* 003 */ +	__be64 nFirWOF;		/* 008 */ + +	/* PHB3 MMIO Error Regs */ +	__be64 phbPlssr;		/* 120 */ +	__be64 phbCsr;		/* 110 */ +	__be64 lemFir;		/* C00 */ +	__be64 lemErrorMask;		/* C18 */ +	__be64 lemWOF;		/* C40 */ +	__be64 phbErrorStatus;	/* C80 */ +	__be64 phbFirstErrorStatus;	/* C88 */ +	__be64 phbErrorLog0;		/* CC0 */ +	__be64 phbErrorLog1;		/* CC8 */ +	__be64 mmioErrorStatus;	/* D00 */ +	__be64 mmioFirstErrorStatus;	/* D08 */ +	__be64 mmioErrorLog0;		/* D40 */ +	__be64 mmioErrorLog1;		/* D48 */ +	__be64 dma0ErrorStatus;	/* D80 */ +	__be64 dma0FirstErrorStatus;	/* D88 */ +	__be64 dma0ErrorLog0;		/* DC0 */ +	__be64 dma0ErrorLog1;		/* DC8 */ +	__be64 dma1ErrorStatus;	/* E00 */ +	__be64 dma1FirstErrorStatus;	/* E08 */ +	__be64 dma1ErrorLog0;		/* E40 */ +	__be64 dma1ErrorLog1;		/* E48 */ +	__be64 pestA[OPAL_PHB3_NUM_PEST_REGS]; +	__be64 pestB[OPAL_PHB3_NUM_PEST_REGS]; +}; + +enum { +	OPAL_REINIT_CPUS_HILE_BE	= (1 << 0), +	OPAL_REINIT_CPUS_HILE_LE	= (1 << 1), +}; +  typedef struct oppanel_line {  	const char * 	line;  	uint64_t 	line_len;  } oppanel_line_t; +/* /sys/firmware/opal */ +extern struct kobject *opal_kobj; + +/* /ibm,opal */ +extern struct device_node *opal_node; +  /* API functions */ -int64_t opal_console_write(int64_t term_number, int64_t *length, +int64_t opal_invalid_call(void); +int64_t opal_console_write(int64_t term_number, __be64 *length,  			   const uint8_t *buffer); -int64_t opal_console_read(int64_t term_number, int64_t *length, +int64_t opal_console_read(int64_t term_number, __be64 *length,  			  uint8_t *buffer);  int64_t opal_console_write_buffer_space(int64_t term_number, -					int64_t *length); -int64_t opal_rtc_read(uint32_t *year_month_day, -		      uint64_t *hour_minute_second_millisecond); +					__be64 *length); +int64_t opal_rtc_read(__be32 *year_month_day, +		      __be64 *hour_minute_second_millisecond);  int64_t opal_rtc_write(uint32_t year_month_day,  		       uint64_t hour_minute_second_millisecond);  int64_t opal_cec_power_down(uint64_t request);  int64_t opal_cec_reboot(void);  int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset);  int64_t opal_write_nvram(uint64_t buffer, uint64_t size, uint64_t offset); -int64_t opal_handle_interrupt(uint64_t isn, uint64_t *outstanding_event_mask); -int64_t opal_poll_events(uint64_t *outstanding_event_mask); +int64_t opal_handle_interrupt(uint64_t isn, __be64 *outstanding_event_mask); +int64_t opal_poll_events(__be64 *outstanding_event_mask);  int64_t opal_pci_set_hub_tce_memory(uint64_t hub_id, uint64_t tce_mem_addr,  				    uint64_t tce_mem_size);  int64_t opal_pci_set_phb_tce_memory(uint64_t phb_id, uint64_t tce_mem_addr, @@ -560,9 +738,9 @@ int64_t opal_pci_set_phb_tce_memory(uint64_t phb_id, uint64_t tce_mem_addr,  int64_t opal_pci_config_read_byte(uint64_t phb_id, uint64_t bus_dev_func,  				  uint64_t offset, uint8_t *data);  int64_t opal_pci_config_read_half_word(uint64_t phb_id, uint64_t bus_dev_func, -				       uint64_t offset, uint16_t *data); +				       uint64_t offset, __be16 *data);  int64_t opal_pci_config_read_word(uint64_t phb_id, uint64_t bus_dev_func, -				  uint64_t offset, uint32_t *data); +				  uint64_t offset, __be32 *data);  int64_t opal_pci_config_write_byte(uint64_t phb_id, uint64_t bus_dev_func,  				   uint64_t offset, uint8_t data);  int64_t opal_pci_config_write_half_word(uint64_t phb_id, uint64_t bus_dev_func, @@ -570,14 +748,14 @@ int64_t opal_pci_config_write_half_word(uint64_t phb_id, uint64_t bus_dev_func,  int64_t opal_pci_config_write_word(uint64_t phb_id, uint64_t bus_dev_func,  				   uint64_t offset, uint32_t data);  int64_t opal_set_xive(uint32_t isn, uint16_t server, uint8_t priority); -int64_t opal_get_xive(uint32_t isn, uint16_t *server, uint8_t *priority); +int64_t opal_get_xive(uint32_t isn, __be16 *server, uint8_t *priority);  int64_t opal_register_exception_handler(uint64_t opal_exception,  					uint64_t handler_address,  					uint64_t glue_cache_line);  int64_t opal_pci_eeh_freeze_status(uint64_t phb_id, uint64_t pe_number,  				   uint8_t *freeze_state, -				   uint16_t *pci_error_type, -				   uint64_t *phb_status); +				   __be16 *pci_error_type, +				   __be64 *phb_status);  int64_t opal_pci_eeh_freeze_clear(uint64_t phb_id, uint64_t pe_number,  				  uint64_t eeh_action_token);  int64_t opal_pci_shpc(uint64_t phb_id, uint64_t shpc_action, uint8_t *state); @@ -614,13 +792,13 @@ int64_t opal_pci_msi_eoi(uint64_t phb_id, uint32_t hw_irq);  int64_t opal_pci_set_xive_pe(uint64_t phb_id, uint32_t pe_number,  			     uint32_t xive_num);  int64_t opal_get_xive_source(uint64_t phb_id, uint32_t xive_num, -			     int32_t *interrupt_source_number); +			     __be32 *interrupt_source_number);  int64_t opal_get_msi_32(uint64_t phb_id, uint32_t mve_number, uint32_t xive_num, -			uint8_t msi_range, uint32_t *msi_address, -			uint32_t *message_data); +			uint8_t msi_range, __be32 *msi_address, +			__be32 *message_data);  int64_t opal_get_msi_64(uint64_t phb_id, uint32_t mve_number,  			uint32_t xive_num, uint8_t msi_range, -			uint64_t *msi_address, uint32_t *message_data); +			__be64 *msi_address, __be32 *message_data);  int64_t opal_start_cpu(uint64_t thread_number, uint64_t start_address);  int64_t opal_query_cpu_status(uint64_t thread_number, uint8_t *thread_status);  int64_t opal_write_oppanel(oppanel_line_t *lines, uint64_t num_lines); @@ -639,58 +817,101 @@ int64_t opal_pci_get_phb_diag_data(uint64_t phb_id, void *diag_buffer,  int64_t opal_pci_get_phb_diag_data2(uint64_t phb_id, void *diag_buffer,  				    uint64_t diag_buffer_len);  int64_t opal_pci_fence_phb(uint64_t phb_id); -int64_t opal_pci_reinit(uint64_t phb_id, uint8_t reinit_scope); +int64_t opal_pci_reinit(uint64_t phb_id, uint64_t reinit_scope, uint64_t data);  int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action);  int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action); -int64_t opal_get_epow_status(uint64_t *status); +int64_t opal_get_epow_status(__be64 *status);  int64_t opal_set_system_attention_led(uint8_t led_action); -int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, -			    uint16_t *pci_error_type, uint16_t *severity); +int64_t opal_pci_next_error(uint64_t phb_id, __be64 *first_frozen_pe, +			    __be16 *pci_error_type, __be16 *severity);  int64_t opal_pci_poll(uint64_t phb_id);  int64_t opal_return_cpu(void); +int64_t opal_reinit_cpus(uint64_t flags); -int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, uint64_t *val); -int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); +int64_t opal_xscom_read(uint32_t gcid, uint64_t pcb_addr, __be64 *val); +int64_t opal_xscom_write(uint32_t gcid, uint64_t pcb_addr, uint64_t val);  int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,  		       uint32_t addr, uint32_t data, uint32_t sz);  int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, -		      uint32_t addr, uint32_t *data, uint32_t sz); +		      uint32_t addr, __be32 *data, uint32_t sz); + +int64_t opal_read_elog(uint64_t buffer, uint64_t size, uint64_t log_id); +int64_t opal_get_elog_size(__be64 *log_id, __be64 *size, __be64 *elog_type); +int64_t opal_write_elog(uint64_t buffer, uint64_t size, uint64_t offset); +int64_t opal_send_ack_elog(uint64_t log_id); +void opal_resend_pending_logs(void); + +int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result); +int64_t opal_manage_flash(uint8_t op); +int64_t opal_update_flash(uint64_t blk_list); +int64_t opal_dump_init(uint8_t dump_type); +int64_t opal_dump_info(__be32 *dump_id, __be32 *dump_size); +int64_t opal_dump_info2(__be32 *dump_id, __be32 *dump_size, __be32 *dump_type); +int64_t opal_dump_read(uint32_t dump_id, uint64_t buffer); +int64_t opal_dump_ack(uint32_t dump_id); +int64_t opal_dump_resend_notification(void); + +int64_t opal_get_msg(uint64_t buffer, uint64_t size); +int64_t opal_check_completion(uint64_t buffer, uint64_t size, uint64_t token); +int64_t opal_sync_host_reboot(void); +int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer, +		uint64_t length); +int64_t opal_set_param(uint64_t token, uint32_t param_id, uint64_t buffer, +		uint64_t length); +int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data);  /* Internal functions */ -extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data); +extern int early_init_dt_scan_opal(unsigned long node, const char *uname, +				   int depth, void *data); +extern int early_init_dt_scan_recoverable_ranges(unsigned long node, +				 const char *uname, int depth, void *data);  extern int opal_get_chars(uint32_t vtermno, char *buf, int count);  extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);  extern void hvc_opal_init_early(void); -/* Internal functions */ -extern int early_init_dt_scan_opal(unsigned long node, const char *uname, -				   int depth, void *data); -  extern int opal_notifier_register(struct notifier_block *nb); +extern int opal_notifier_unregister(struct notifier_block *nb); + +extern int opal_message_notifier_register(enum OpalMessageType msg_type, +						struct notifier_block *nb);  extern void opal_notifier_enable(void);  extern void opal_notifier_disable(void);  extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val); -extern int opal_get_chars(uint32_t vtermno, char *buf, int count); -extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len); - -extern void hvc_opal_init_early(void); +extern int __opal_async_get_token(void); +extern int opal_async_get_token_interruptible(void); +extern int __opal_async_release_token(int token); +extern int opal_async_release_token(int token); +extern int opal_async_wait_response(uint64_t token, struct opal_msg *msg); +extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data);  struct rtc_time;  extern int opal_set_rtc_time(struct rtc_time *tm);  extern void opal_get_rtc_time(struct rtc_time *tm);  extern unsigned long opal_get_boot_time(void);  extern void opal_nvram_init(void); +extern void opal_flash_init(void); +extern void opal_flash_term_callback(void); +extern int opal_elog_init(void); +extern void opal_platform_dump_init(void); +extern void opal_sys_param_init(void); +extern void opal_msglog_init(void);  extern int opal_machine_check(struct pt_regs *regs); +extern bool opal_mce_check_early_recovery(struct pt_regs *regs);  extern void opal_shutdown(void); +extern int opal_resync_timebase(void);  extern void opal_lpc_init(void); +struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr, +					     unsigned long vmalloc_size); +void opal_free_sg_list(struct opal_sg_list *sg); +  #endif /* __ASSEMBLY__ */  #endif /* __OPAL_H */ diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index a5954cebbc5..bb0bd25f20d 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -16,7 +16,6 @@  #ifdef CONFIG_PPC64 -#include <linux/init.h>  #include <asm/types.h>  #include <asm/lppaca.h>  #include <asm/mmu.h> @@ -93,7 +92,10 @@ struct paca_struct {  	struct slb_shadow *slb_shadow_ptr;  	struct dtl_entry *dispatch_log;  	struct dtl_entry *dispatch_log_end; +#endif /* CONFIG_PPC_STD_MMU_64 */ +	u64 dscr_default;		/* per-CPU default DSCR */ +#ifdef CONFIG_PPC_STD_MMU_64  	/*  	 * Now, starting in cacheline 2, the exception save areas  	 */ @@ -113,8 +115,15 @@ struct paca_struct {  	/* Keep pgd in the same cacheline as the start of extlb */  	pgd_t *pgd __attribute__((aligned(0x80))); /* Current PGD */  	pgd_t *kernel_pgd;		/* Kernel PGD */ -	/* We can have up to 3 levels of reentrancy in the TLB miss handler */ -	u64 extlb[3][EX_TLB_SIZE / sizeof(u64)]; + +	/* Shared by all threads of a core -- points to tcd of first thread */ +	struct tlb_core_data *tcd_ptr; + +	/* +	 * We can have up to 3 levels of reentrancy in the TLB miss handler, +	 * in each of four exception levels (normal, crit, mcheck, debug). +	 */ +	u64 extlb[12][EX_TLB_SIZE / sizeof(u64)];  	u64 exmc[8];		/* used for machine checks */  	u64 excrit[8];		/* used for crit interrupts */  	u64 exdbg[8];		/* used for debug interrupts */ @@ -123,6 +132,8 @@ struct paca_struct {  	void *mc_kstack;  	void *crit_kstack;  	void *dbg_kstack; + +	struct tlb_core_data tcd;  #endif /* CONFIG_PPC_BOOK3E */  	mm_context_t context; @@ -141,7 +152,7 @@ struct paca_struct {  	u8 io_sync;			/* writel() needs spin_unlock sync */  	u8 irq_work_pending;		/* IRQ_WORK interrupt while soft-disable */  	u8 nap_state_lost;		/* NV GPR values lost in power7_idle */ -	u64 sprg3;			/* Saved user-visible sprg */ +	u64 sprg_vdso;			/* Saved user-visible sprg */  #ifdef CONFIG_PPC_TRANSACTIONAL_MEM  	u64 tm_scratch;                 /* TM scratch area for reclaim */  #endif @@ -152,6 +163,15 @@ struct paca_struct {  	 */  	struct opal_machine_check_event *opal_mc_evt;  #endif +#ifdef CONFIG_PPC_BOOK3S_64 +	/* Exclusive emergency stack pointer for machine check exception. */ +	void *mc_emergency_sp; +	/* +	 * Flag to check whether we are in machine check early handler +	 * and already using emergency stack. +	 */ +	u16 in_mce; +#endif  	/* Stuff for accurate time accounting */  	u64 user_time;			/* accumulated usermode TB ticks */ @@ -166,7 +186,7 @@ struct paca_struct {  	struct dtl_entry *dtl_curr;	/* pointer corresponding to dtl_ridx */  #ifdef CONFIG_KVM_BOOK3S_HANDLER -#ifdef CONFIG_KVM_BOOK3S_PR +#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE  	/* We use this to store guest state in */  	struct kvmppc_book3s_shadow_vcpu shadow_vcpu;  #endif diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index b9f426212d3..32e4e212b9c 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -78,7 +78,7 @@ extern unsigned int HPAGE_SHIFT;   *   * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START   * - * There are two was to determine a physical address from a virtual one: + * There are two ways to determine a physical address from a virtual one:   * va = pa + PAGE_OFFSET - MEMORY_START   * va = pa + KERNELBASE - PHYSICAL_START   * @@ -403,7 +403,7 @@ void arch_free_page(struct page *page, int order);  struct vm_area_struct; -#ifdef CONFIG_PPC_64K_PAGES +#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC64)  typedef pte_t *pgtable_t;  #else  typedef struct page *pgtable_t; diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 95145a15c70..1b0739bc14b 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -46,11 +46,6 @@ struct pci_dev;  #define pcibios_assign_all_busses() \  	(pci_has_flag(PCI_REASSIGN_ALL_BUS)) -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ -	/* We don't do dynamic PCI IRQ allocation */ -} -  #define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ  static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)  { diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 3fd2f1b6f90..b3e936027b2 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -14,6 +14,7 @@  #include <linux/device.h>  #include <uapi/asm/perf_event.h> +/* Update perf_event_print_debug() if this changes */  #define MAX_HWEVENTS		8  #define MAX_EVENT_ALTERNATIVES	8  #define MAX_LIMITED_HWCOUNTERS	2 @@ -60,8 +61,7 @@ struct power_pmu {  #define PPMU_SIAR_VALID		0x00000010 /* Processor has SIAR Valid bit */  #define PPMU_HAS_SSLOT		0x00000020 /* Has sampled slot in MMCRA */  #define PPMU_HAS_SIER		0x00000040 /* Has SIER */ -#define PPMU_BHRB		0x00000080 /* has BHRB feature enabled */ -#define PPMU_EBB		0x00000100 /* supports event based branch */ +#define PPMU_ARCH_207S		0x00000080 /* PMC is architecture v2.07S */  /*   * Values for flags to get_alternatives() diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h index 27b2386f738..842846c1b71 100644 --- a/arch/powerpc/include/asm/pgalloc-32.h +++ b/arch/powerpc/include/asm/pgalloc-32.h @@ -84,10 +84,8 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,  static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,  				  unsigned long address)  { -	struct page *page = page_address(table); -  	tlb_flush_pgtable(tlb, address); -	pgtable_page_dtor(page); -	pgtable_free_tlb(tlb, page, 0); +	pgtable_page_dtor(table); +	pgtable_free_tlb(tlb, page_address(table), 0);  }  #endif /* _ASM_POWERPC_PGALLOC_32_H */ diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index f65e27b09bd..4b0be20fcbf 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h @@ -16,6 +16,7 @@ struct vmemmap_backing {  	unsigned long phys;  	unsigned long virt_addr;  }; +extern struct vmemmap_backing *vmemmap_list;  /*   * Functions that deal with pagetables that could be at any level of @@ -91,7 +92,10 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,  	if (!pte)  		return NULL;  	page = virt_to_page(pte); -	pgtable_page_ctor(page); +	if (!pgtable_page_ctor(page)) { +		__free_page(page); +		return NULL; +	}  	return page;  } @@ -144,11 +148,9 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,  static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,  				  unsigned long address)  { -	struct page *page = page_address(table); -  	tlb_flush_pgtable(tlb, address); -	pgtable_page_dtor(page); -	pgtable_free_tlb(tlb, page, 0); +	pgtable_page_dtor(table); +	pgtable_free_tlb(tlb, page_address(table), 0);  }  #else /* if CONFIG_PPC_64K_PAGES */ diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index 46db09414a1..eb9261024f5 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -195,6 +195,7 @@ extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,  static inline unsigned long pte_update(struct mm_struct *mm,  				       unsigned long addr,  				       pte_t *ptep, unsigned long clr, +				       unsigned long set,  				       int huge)  {  #ifdef PTE_ATOMIC_UPDATES @@ -205,14 +206,15 @@ static inline unsigned long pte_update(struct mm_struct *mm,  	andi.	%1,%0,%6\n\  	bne-	1b \n\  	andc	%1,%0,%4 \n\ +	or	%1,%1,%7\n\  	stdcx.	%1,0,%3 \n\  	bne-	1b"  	: "=&r" (old), "=&r" (tmp), "=m" (*ptep) -	: "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY) +	: "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set)  	: "cc" );  #else  	unsigned long old = pte_val(*ptep); -	*ptep = __pte(old & ~clr); +	*ptep = __pte((old & ~clr) | set);  #endif  	/* huge pages use the old page table lock */  	if (!huge) @@ -231,9 +233,9 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,  {  	unsigned long old; -       	if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) +	if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)  		return 0; -	old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0); +	old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);  	return (old & _PAGE_ACCESSED) != 0;  }  #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG @@ -252,7 +254,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,  	if ((pte_val(*ptep) & _PAGE_RW) == 0)  		return; -	pte_update(mm, addr, ptep, _PAGE_RW, 0); +	pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);  }  static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, @@ -261,7 +263,7 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,  	if ((pte_val(*ptep) & _PAGE_RW) == 0)  		return; -	pte_update(mm, addr, ptep, _PAGE_RW, 1); +	pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);  }  /* @@ -284,14 +286,14 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,  static inline pte_t ptep_get_and_clear(struct mm_struct *mm,  				       unsigned long addr, pte_t *ptep)  { -	unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0); +	unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);  	return __pte(old);  }  static inline void pte_clear(struct mm_struct *mm, unsigned long addr,  			     pte_t * ptep)  { -	pte_update(mm, addr, ptep, ~0UL, 0); +	pte_update(mm, addr, ptep, ~0UL, 0, 0);  } @@ -394,6 +396,8 @@ static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,  	hpte_slot_array[index] = hidx << 4 | 0x1 << 3;  } +struct page *realmode_pfn_to_page(unsigned long pfn); +  static inline char *get_hpte_slot_array(pmd_t *pmdp)  {  	/* @@ -504,7 +508,9 @@ extern int pmdp_set_access_flags(struct vm_area_struct *vma,  extern unsigned long pmd_hugepage_update(struct mm_struct *mm,  					 unsigned long addr, -					 pmd_t *pmdp, unsigned long clr); +					 pmd_t *pmdp, +					 unsigned long clr, +					 unsigned long set);  static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,  					      unsigned long addr, pmd_t *pmdp) @@ -513,7 +519,7 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,  	if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)  		return 0; -	old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED); +	old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);  	return ((old & _PAGE_ACCESSED) != 0);  } @@ -540,7 +546,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,  	if ((pmd_val(*pmdp) & _PAGE_RW) == 0)  		return; -	pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW); +	pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0);  }  #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH @@ -556,5 +562,19 @@ extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);  #define __HAVE_ARCH_PMDP_INVALIDATE  extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,  			    pmd_t *pmdp); + +#define pmd_move_must_withdraw pmd_move_must_withdraw +struct spinlock; +static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, +					 struct spinlock *old_pmd_ptl) +{ +	/* +	 * Archs like ppc64 use pgtable to store per pmd +	 * specific information. So when we switch the pmd, +	 * we should also withdraw and deposit the pgtable +	 */ +	return true; +} +  #endif /* __ASSEMBLY__ */  #endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */ diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 7d6eacf249c..d98c1ecc326 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -3,6 +3,7 @@  #ifdef __KERNEL__  #ifndef __ASSEMBLY__ +#include <linux/mmdebug.h>  #include <asm/processor.h>		/* For TASK_SIZE */  #include <asm/mmu.h>  #include <asm/page.h> @@ -33,10 +34,101 @@ static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }  static inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; }  static inline int pte_file(pte_t pte)		{ return pte_val(pte) & _PAGE_FILE; }  static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; } -static inline int pte_present(pte_t pte)	{ return pte_val(pte) & _PAGE_PRESENT; }  static inline int pte_none(pte_t pte)		{ return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }  static inline pgprot_t pte_pgprot(pte_t pte)	{ return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } +#ifdef CONFIG_NUMA_BALANCING + +static inline int pte_present(pte_t pte) +{ +	return pte_val(pte) & (_PAGE_PRESENT | _PAGE_NUMA); +} + +#define pte_present_nonuma pte_present_nonuma +static inline int pte_present_nonuma(pte_t pte) +{ +	return pte_val(pte) & (_PAGE_PRESENT); +} + +#define pte_numa pte_numa +static inline int pte_numa(pte_t pte) +{ +	return (pte_val(pte) & +		(_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA; +} + +#define pte_mknonnuma pte_mknonnuma +static inline pte_t pte_mknonnuma(pte_t pte) +{ +	pte_val(pte) &= ~_PAGE_NUMA; +	pte_val(pte) |=  _PAGE_PRESENT | _PAGE_ACCESSED; +	return pte; +} + +#define pte_mknuma pte_mknuma +static inline pte_t pte_mknuma(pte_t pte) +{ +	/* +	 * We should not set _PAGE_NUMA on non present ptes. Also clear the +	 * present bit so that hash_page will return 1 and we collect this +	 * as numa fault. +	 */ +	if (pte_present(pte)) { +		pte_val(pte) |= _PAGE_NUMA; +		pte_val(pte) &= ~_PAGE_PRESENT; +	} else +		VM_BUG_ON(1); +	return pte; +} + +#define ptep_set_numa ptep_set_numa +static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, +				 pte_t *ptep) +{ +	if ((pte_val(*ptep) & _PAGE_PRESENT) == 0) +		VM_BUG_ON(1); + +	pte_update(mm, addr, ptep, _PAGE_PRESENT, _PAGE_NUMA, 0); +	return; +} + +#define pmd_numa pmd_numa +static inline int pmd_numa(pmd_t pmd) +{ +	return pte_numa(pmd_pte(pmd)); +} + +#define pmdp_set_numa pmdp_set_numa +static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, +				 pmd_t *pmdp) +{ +	if ((pmd_val(*pmdp) & _PAGE_PRESENT) == 0) +		VM_BUG_ON(1); + +	pmd_hugepage_update(mm, addr, pmdp, _PAGE_PRESENT, _PAGE_NUMA); +	return; +} + +#define pmd_mknonnuma pmd_mknonnuma +static inline pmd_t pmd_mknonnuma(pmd_t pmd) +{ +	return pte_pmd(pte_mknonnuma(pmd_pte(pmd))); +} + +#define pmd_mknuma pmd_mknuma +static inline pmd_t pmd_mknuma(pmd_t pmd) +{ +	return pte_pmd(pte_mknuma(pmd_pte(pmd))); +} + +# else + +static inline int pte_present(pte_t pte) +{ +	return pte_val(pte) & _PAGE_PRESENT; +} +#endif /* CONFIG_NUMA_BALANCING */ +  /* Conversion functions: convert a page and protection to a page entry,   * and a page entry and page directory to the page they refer to.   * @@ -223,6 +315,27 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,  #endif  pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,  				 unsigned *shift); + +static inline pte_t *lookup_linux_ptep(pgd_t *pgdir, unsigned long hva, +				     unsigned long *pte_sizep) +{ +	pte_t *ptep; +	unsigned long ps = *pte_sizep; +	unsigned int shift; + +	ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift); +	if (!ptep) +		return NULL; +	if (shift) +		*pte_sizep = 1ul << shift; +	else +		*pte_sizep = PAGE_SIZE; + +	if (ps > *pte_sizep) +		return NULL; + +	return ptep; +}  #endif /* __ASSEMBLY__ */  #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index a63b045e707..12c32c5f533 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -287,6 +287,32 @@ static inline long disable_reloc_on_exceptions(void) {  	return plpar_set_mode(0, 3, 0, 0);  } +/* + * Take exceptions in big endian mode on this partition + * + * Note: this call has a partition wide scope and can take a while to complete. + * If it returns H_LONG_BUSY_* it should be retried periodically until it + * returns H_SUCCESS. + */ +static inline long enable_big_endian_exceptions(void) +{ +	/* mflags = 0: big endian exceptions */ +	return plpar_set_mode(0, 4, 0, 0); +} + +/* + * Take exceptions in little endian mode on this partition + * + * Note: this call has a partition wide scope and can take a while to complete. + * If it returns H_LONG_BUSY_* it should be retried periodically until it + * returns H_SUCCESS. + */ +static inline long enable_little_endian_exceptions(void) +{ +	/* mflags = 1: little endian exceptions */ +	return plpar_set_mode(1, 4, 0, 0); +} +  static inline long plapr_set_ciabr(unsigned long ciabr)  {  	return plpar_set_mode(0, 1, ciabr, 0); diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index d7fe9f5b46d..3132bb9365f 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -143,6 +143,8 @@  #define PPC_INST_LSWX			0x7c00042a  #define PPC_INST_LWARX			0x7c000028  #define PPC_INST_LWSYNC			0x7c2004ac +#define PPC_INST_SYNC			0x7c0004ac +#define PPC_INST_SYNC_MASK		0xfc0007fe  #define PPC_INST_LXVD2X			0x7c000698  #define PPC_INST_MCRXR			0x7c000400  #define PPC_INST_MCRXR_MASK		0xfc0007fe @@ -181,6 +183,7 @@  #define PPC_INST_TLBIVAX		0x7c000624  #define PPC_INST_TLBSRX_DOT		0x7c0006a5  #define PPC_INST_XXLOR			0xf0000510 +#define PPC_INST_XXSWAPD		0xf0000250  #define PPC_INST_XVCPSGNDP		0xf0000780  #define PPC_INST_TRECHKPT		0x7c0007dd  #define PPC_INST_TRECLAIM		0x7c00075d @@ -200,6 +203,7 @@  /* Misc instructions for BPF compiler */  #define PPC_INST_LD			0xe8000000  #define PPC_INST_LHZ			0xa0000000 +#define PPC_INST_LHBRX			0x7c00062c  #define PPC_INST_LWZ			0x80000000  #define PPC_INST_STD			0xf8000000  #define PPC_INST_STDU			0xf8000001 @@ -218,7 +222,7 @@  #define PPC_INST_MULLW			0x7c0001d6  #define PPC_INST_MULHWU			0x7c000016  #define PPC_INST_MULLI			0x1c000000 -#define PPC_INST_DIVWU			0x7c0003d6 +#define PPC_INST_DIVWU			0x7c000396  #define PPC_INST_RLWINM			0x54000000  #define PPC_INST_RLDICR			0x78000004  #define PPC_INST_SLW			0x7c000030 @@ -344,6 +348,8 @@  					       VSX_XX1((s), a, b))  #define XXLOR(t, a, b)		stringify_in_c(.long PPC_INST_XXLOR | \  					       VSX_XX3((t), a, b)) +#define XXSWAPD(t, a)		stringify_in_c(.long PPC_INST_XXSWAPD | \ +					       VSX_XX3((t), a, a))  #define XVCPSGNDP(t, a, b)	stringify_in_c(.long (PPC_INST_XVCPSGNDP | \  					       VSX_XX3((t), (a), (b)))) diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h index ed57fa7920c..db1e2b8eff3 100644 --- a/arch/powerpc/include/asm/ppc-pci.h +++ b/arch/powerpc/include/asm/ppc-pci.h @@ -58,6 +58,7 @@ int rtas_write_config(struct pci_dn *, int where, int size, u32 val);  int rtas_read_config(struct pci_dn *, int where, int size, u32 *val);  void eeh_pe_state_mark(struct eeh_pe *pe, int state);  void eeh_pe_state_clear(struct eeh_pe *pe, int state); +void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode);  void eeh_sysfs_add_device(struct pci_dev *pdev);  void eeh_sysfs_remove_device(struct pci_dev *pdev); diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 599545738af..7e461252854 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -4,7 +4,6 @@  #ifndef _ASM_POWERPC_PPC_ASM_H  #define _ASM_POWERPC_PPC_ASM_H -#include <linux/init.h>  #include <linux/stringify.h>  #include <asm/asm-compat.h>  #include <asm/processor.h> @@ -58,7 +57,7 @@ BEGIN_FW_FTR_SECTION;							\  	LDX_BE	r10,0,r10;		/* get log write index */	\  	cmpd	cr1,r11,r10;						\  	beq+	cr1,33f;						\ -	bl	.accumulate_stolen_time;				\ +	bl	accumulate_stolen_time;				\  	ld	r12,_MSR(r1);						\  	andi.	r10,r12,MSR_PR;		/* Restore cr0 (coming from user) */ \  33:									\ @@ -98,123 +97,51 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)  #define REST_8GPRS(n, base)	REST_4GPRS(n, base); REST_4GPRS(n+4, base)  #define REST_10GPRS(n, base)	REST_8GPRS(n, base); REST_2GPRS(n+8, base) -#define SAVE_FPR(n, base)	stfd	n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base) +#define SAVE_FPR(n, base)	stfd	n,8*TS_FPRWIDTH*(n)(base)  #define SAVE_2FPRS(n, base)	SAVE_FPR(n, base); SAVE_FPR(n+1, base)  #define SAVE_4FPRS(n, base)	SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)  #define SAVE_8FPRS(n, base)	SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)  #define SAVE_16FPRS(n, base)	SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)  #define SAVE_32FPRS(n, base)	SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base) -#define REST_FPR(n, base)	lfd	n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base) +#define REST_FPR(n, base)	lfd	n,8*TS_FPRWIDTH*(n)(base)  #define REST_2FPRS(n, base)	REST_FPR(n, base); REST_FPR(n+1, base)  #define REST_4FPRS(n, base)	REST_2FPRS(n, base); REST_2FPRS(n+2, base)  #define REST_8FPRS(n, base)	REST_4FPRS(n, base); REST_4FPRS(n+4, base)  #define REST_16FPRS(n, base)	REST_8FPRS(n, base); REST_8FPRS(n+8, base)  #define REST_32FPRS(n, base)	REST_16FPRS(n, base); REST_16FPRS(n+16, base) -#define SAVE_VR(n,b,base)	li b,THREAD_VR0+(16*(n));  stvx n,base,b +#define SAVE_VR(n,b,base)	li b,16*(n);  stvx n,base,b  #define SAVE_2VRS(n,b,base)	SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)  #define SAVE_4VRS(n,b,base)	SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)  #define SAVE_8VRS(n,b,base)	SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)  #define SAVE_16VRS(n,b,base)	SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)  #define SAVE_32VRS(n,b,base)	SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base) -#define REST_VR(n,b,base)	li b,THREAD_VR0+(16*(n)); lvx n,base,b +#define REST_VR(n,b,base)	li b,16*(n); lvx n,base,b  #define REST_2VRS(n,b,base)	REST_VR(n,b,base); REST_VR(n+1,b,base)  #define REST_4VRS(n,b,base)	REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)  #define REST_8VRS(n,b,base)	REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)  #define REST_16VRS(n,b,base)	REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)  #define REST_32VRS(n,b,base)	REST_16VRS(n,b,base); REST_16VRS(n+16,b,base) -/* Save/restore FPRs, VRs and VSRs from their checkpointed backups in - * thread_struct: - */ -#define SAVE_FPR_TRANSACT(n, base)	stfd n,THREAD_TRANSACT_FPR0+	\ -					8*TS_FPRWIDTH*(n)(base) -#define SAVE_2FPRS_TRANSACT(n, base)	SAVE_FPR_TRANSACT(n, base);	\ -					SAVE_FPR_TRANSACT(n+1, base) -#define SAVE_4FPRS_TRANSACT(n, base)	SAVE_2FPRS_TRANSACT(n, base);	\ -					SAVE_2FPRS_TRANSACT(n+2, base) -#define SAVE_8FPRS_TRANSACT(n, base)	SAVE_4FPRS_TRANSACT(n, base);	\ -					SAVE_4FPRS_TRANSACT(n+4, base) -#define SAVE_16FPRS_TRANSACT(n, base)	SAVE_8FPRS_TRANSACT(n, base);	\ -					SAVE_8FPRS_TRANSACT(n+8, base) -#define SAVE_32FPRS_TRANSACT(n, base)	SAVE_16FPRS_TRANSACT(n, base);	\ -					SAVE_16FPRS_TRANSACT(n+16, base) - -#define REST_FPR_TRANSACT(n, base)	lfd	n,THREAD_TRANSACT_FPR0+	\ -					8*TS_FPRWIDTH*(n)(base) -#define REST_2FPRS_TRANSACT(n, base)	REST_FPR_TRANSACT(n, base);	\ -					REST_FPR_TRANSACT(n+1, base) -#define REST_4FPRS_TRANSACT(n, base)	REST_2FPRS_TRANSACT(n, base);	\ -					REST_2FPRS_TRANSACT(n+2, base) -#define REST_8FPRS_TRANSACT(n, base)	REST_4FPRS_TRANSACT(n, base);	\ -					REST_4FPRS_TRANSACT(n+4, base) -#define REST_16FPRS_TRANSACT(n, base)	REST_8FPRS_TRANSACT(n, base);	\ -					REST_8FPRS_TRANSACT(n+8, base) -#define REST_32FPRS_TRANSACT(n, base)	REST_16FPRS_TRANSACT(n, base);	\ -					REST_16FPRS_TRANSACT(n+16, base) - - -#define SAVE_VR_TRANSACT(n,b,base)	li b,THREAD_TRANSACT_VR0+(16*(n)); \ -					stvx n,b,base -#define SAVE_2VRS_TRANSACT(n,b,base)	SAVE_VR_TRANSACT(n,b,base);	\ -					SAVE_VR_TRANSACT(n+1,b,base) -#define SAVE_4VRS_TRANSACT(n,b,base)	SAVE_2VRS_TRANSACT(n,b,base);	\ -					SAVE_2VRS_TRANSACT(n+2,b,base) -#define SAVE_8VRS_TRANSACT(n,b,base)	SAVE_4VRS_TRANSACT(n,b,base);	\ -					SAVE_4VRS_TRANSACT(n+4,b,base) -#define SAVE_16VRS_TRANSACT(n,b,base)	SAVE_8VRS_TRANSACT(n,b,base);	\ -					SAVE_8VRS_TRANSACT(n+8,b,base) -#define SAVE_32VRS_TRANSACT(n,b,base)	SAVE_16VRS_TRANSACT(n,b,base);	\ -					SAVE_16VRS_TRANSACT(n+16,b,base) - -#define REST_VR_TRANSACT(n,b,base)	li b,THREAD_TRANSACT_VR0+(16*(n)); \ -					lvx n,b,base -#define REST_2VRS_TRANSACT(n,b,base)	REST_VR_TRANSACT(n,b,base);	\ -					REST_VR_TRANSACT(n+1,b,base) -#define REST_4VRS_TRANSACT(n,b,base)	REST_2VRS_TRANSACT(n,b,base);	\ -					REST_2VRS_TRANSACT(n+2,b,base) -#define REST_8VRS_TRANSACT(n,b,base)	REST_4VRS_TRANSACT(n,b,base);	\ -					REST_4VRS_TRANSACT(n+4,b,base) -#define REST_16VRS_TRANSACT(n,b,base)	REST_8VRS_TRANSACT(n,b,base);	\ -					REST_8VRS_TRANSACT(n+8,b,base) -#define REST_32VRS_TRANSACT(n,b,base)	REST_16VRS_TRANSACT(n,b,base);	\ -					REST_16VRS_TRANSACT(n+16,b,base) - - -#define SAVE_VSR_TRANSACT(n,b,base)	li b,THREAD_TRANSACT_VSR0+(16*(n)); \ -					STXVD2X(n,R##base,R##b) -#define SAVE_2VSRS_TRANSACT(n,b,base)	SAVE_VSR_TRANSACT(n,b,base);	\ -	                                SAVE_VSR_TRANSACT(n+1,b,base) -#define SAVE_4VSRS_TRANSACT(n,b,base)	SAVE_2VSRS_TRANSACT(n,b,base);	\ -	                                SAVE_2VSRS_TRANSACT(n+2,b,base) -#define SAVE_8VSRS_TRANSACT(n,b,base)	SAVE_4VSRS_TRANSACT(n,b,base);	\ -	                                SAVE_4VSRS_TRANSACT(n+4,b,base) -#define SAVE_16VSRS_TRANSACT(n,b,base)	SAVE_8VSRS_TRANSACT(n,b,base);	\ -	                                SAVE_8VSRS_TRANSACT(n+8,b,base) -#define SAVE_32VSRS_TRANSACT(n,b,base)	SAVE_16VSRS_TRANSACT(n,b,base);	\ -	                                SAVE_16VSRS_TRANSACT(n+16,b,base) - -#define REST_VSR_TRANSACT(n,b,base)	li b,THREAD_TRANSACT_VSR0+(16*(n)); \ -					LXVD2X(n,R##base,R##b) -#define REST_2VSRS_TRANSACT(n,b,base)	REST_VSR_TRANSACT(n,b,base);    \ -	                                REST_VSR_TRANSACT(n+1,b,base) -#define REST_4VSRS_TRANSACT(n,b,base)	REST_2VSRS_TRANSACT(n,b,base);	\ -	                                REST_2VSRS_TRANSACT(n+2,b,base) -#define REST_8VSRS_TRANSACT(n,b,base)	REST_4VSRS_TRANSACT(n,b,base);	\ -	                                REST_4VSRS_TRANSACT(n+4,b,base) -#define REST_16VSRS_TRANSACT(n,b,base)	REST_8VSRS_TRANSACT(n,b,base);	\ -	                                REST_8VSRS_TRANSACT(n+8,b,base) -#define REST_32VSRS_TRANSACT(n,b,base)	REST_16VSRS_TRANSACT(n,b,base);	\ -	                                REST_16VSRS_TRANSACT(n+16,b,base) +#ifdef __BIG_ENDIAN__ +#define STXVD2X_ROT(n,b,base)		STXVD2X(n,b,base) +#define LXVD2X_ROT(n,b,base)		LXVD2X(n,b,base) +#else +#define STXVD2X_ROT(n,b,base)		XXSWAPD(n,n);		\ +					STXVD2X(n,b,base);	\ +					XXSWAPD(n,n) +#define LXVD2X_ROT(n,b,base)		LXVD2X(n,b,base);	\ +					XXSWAPD(n,n) +#endif  /* Save the lower 32 VSRs in the thread VSR region */ -#define SAVE_VSR(n,b,base)	li b,THREAD_VSR0+(16*(n));  STXVD2X(n,R##base,R##b) +#define SAVE_VSR(n,b,base)	li b,16*(n);  STXVD2X_ROT(n,R##base,R##b)  #define SAVE_2VSRS(n,b,base)	SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)  #define SAVE_4VSRS(n,b,base)	SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base)  #define SAVE_8VSRS(n,b,base)	SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base)  #define SAVE_16VSRS(n,b,base)	SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base)  #define SAVE_32VSRS(n,b,base)	SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base) -#define REST_VSR(n,b,base)	li b,THREAD_VSR0+(16*(n)); LXVD2X(n,R##base,R##b) +#define REST_VSR(n,b,base)	li b,16*(n); LXVD2X_ROT(n,R##base,R##b)  #define REST_2VSRS(n,b,base)	REST_VSR(n,b,base); REST_VSR(n+1,b,base)  #define REST_4VSRS(n,b,base)	REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base)  #define REST_8VSRS(n,b,base)	REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base) @@ -262,57 +189,53 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)  #define __STK_REG(i)   (112 + ((i)-14)*8)  #define STK_REG(i)     __STK_REG(__REG_##i) +#if defined(_CALL_ELF) && _CALL_ELF == 2 +#define STK_GOT		24 +#define __STK_PARAM(i)	(32 + ((i)-3)*8) +#else +#define STK_GOT		40  #define __STK_PARAM(i)	(48 + ((i)-3)*8) +#endif  #define STK_PARAM(i)	__STK_PARAM(__REG_##i) -#define XGLUE(a,b) a##b -#define GLUE(a,b) XGLUE(a,b) +#if defined(_CALL_ELF) && _CALL_ELF == 2  #define _GLOBAL(name) \  	.section ".text"; \  	.align 2 ; \ +	.type name,@function; \  	.globl name; \ -	.globl GLUE(.,name); \ -	.section ".opd","aw"; \ -name: \ -	.quad GLUE(.,name); \ -	.quad .TOC.@tocbase; \ -	.quad 0; \ -	.previous; \ -	.type GLUE(.,name),@function; \ -GLUE(.,name): +name: -#define _INIT_GLOBAL(name) \ -	__REF; \ +#define _GLOBAL_TOC(name) \ +	.section ".text"; \  	.align 2 ; \ +	.type name,@function; \  	.globl name; \ -	.globl GLUE(.,name); \ -	.section ".opd","aw"; \  name: \ -	.quad GLUE(.,name); \ -	.quad .TOC.@tocbase; \ -	.quad 0; \ -	.previous; \ -	.type GLUE(.,name),@function; \ -GLUE(.,name): +0:	addis r2,r12,(.TOC.-0b)@ha; \ +	addi r2,r2,(.TOC.-0b)@l; \ +	.localentry name,.-name  #define _KPROBE(name) \  	.section ".kprobes.text","a"; \  	.align 2 ; \ +	.type name,@function; \  	.globl name; \ -	.globl GLUE(.,name); \ -	.section ".opd","aw"; \ -name: \ -	.quad GLUE(.,name); \ -	.quad .TOC.@tocbase; \ -	.quad 0; \ -	.previous; \ -	.type GLUE(.,name),@function; \ -GLUE(.,name): +name: + +#define DOTSYM(a)	a -#define _STATIC(name) \ +#else + +#define XGLUE(a,b) a##b +#define GLUE(a,b) XGLUE(a,b) + +#define _GLOBAL(name) \  	.section ".text"; \  	.align 2 ; \ +	.globl name; \ +	.globl GLUE(.,name); \  	.section ".opd","aw"; \  name: \  	.quad GLUE(.,name); \ @@ -322,9 +245,13 @@ name: \  	.type GLUE(.,name),@function; \  GLUE(.,name): -#define _INIT_STATIC(name) \ -	__REF; \ +#define _GLOBAL_TOC(name) _GLOBAL(name) + +#define _KPROBE(name) \ +	.section ".kprobes.text","a"; \  	.align 2 ; \ +	.globl name; \ +	.globl GLUE(.,name); \  	.section ".opd","aw"; \  name: \  	.quad GLUE(.,name); \ @@ -334,6 +261,10 @@ name: \  	.type GLUE(.,name),@function; \  GLUE(.,name): +#define DOTSYM(a)	GLUE(.,a) + +#endif +  #else /* 32-bit */  #define _ENTRY(n)	\ @@ -346,6 +277,8 @@ n:  	.globl n;	\  n: +#define _GLOBAL_TOC(name) _GLOBAL(name) +  #define _KPROBE(n)	\  	.section ".kprobes.text","a";	\  	.globl	n;	\ @@ -367,6 +300,11 @@ n:   *   you want to access various offsets within it).  On ppc32 this is   *   identical to LOAD_REG_IMMEDIATE.   * + * LOAD_REG_ADDR_PIC(rn, name) + *   Loads the address of label 'name' into register 'run'. Use this when + *   the kernel doesn't run at the linked or relocated address. Please + *   note that this macro will clobber the lr register. + *   * LOAD_REG_ADDRBASE(rn, name)   * ADDROFF(name)   *   LOAD_REG_ADDRBASE loads part of the address of label 'name' into @@ -377,12 +315,25 @@ n:   *      LOAD_REG_ADDRBASE(rX, name)   *      ld	rY,ADDROFF(name)(rX)   */ + +/* Be careful, this will clobber the lr register. */ +#define LOAD_REG_ADDR_PIC(reg, name)		\ +	bl	0f;				\ +0:	mflr	reg;				\ +	addis	reg,reg,(name - 0b)@ha;		\ +	addi	reg,reg,(name - 0b)@l; +  #ifdef __powerpc64__ +#ifdef HAVE_AS_ATHIGH +#define __AS_ATHIGH high +#else +#define __AS_ATHIGH h +#endif  #define LOAD_REG_IMMEDIATE(reg,expr)		\  	lis     reg,(expr)@highest;		\  	ori     reg,reg,(expr)@higher;	\  	rldicr  reg,reg,32,31;		\ -	oris    reg,reg,(expr)@h;		\ +	oris    reg,reg,(expr)@__AS_ATHIGH;	\  	ori     reg,reg,(expr)@l;  #define LOAD_REG_ADDR(reg,name)			\ @@ -438,6 +389,8 @@ BEGIN_FTR_SECTION_NESTED(96);		\  	cmpwi dest,0;			\  	beq-  90b;			\  END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) +#elif defined(CONFIG_8xx) +#define MFTB(dest)			mftb dest  #else  #define MFTB(dest)			mfspr dest, SPRN_TBRL  #endif @@ -478,13 +431,6 @@ BEGIN_FTR_SECTION_NESTED(945)						\  	std	ra,TASKTHREADPPR(rb);					\  END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945) -#define RESTORE_PPR(ra, rb)						\ -BEGIN_FTR_SECTION_NESTED(946)						\ -	ld	ra,PACACURRENT(r13);					\ -	ld	rb,TASKTHREADPPR(ra);					\ -	mtspr	SPRN_PPR,rb;	/* Restore PPR */			\ -END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946) -  #endif  /* @@ -832,6 +778,35 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946)  #define N_SLINE	68  #define N_SO	100 -#endif /*  __ASSEMBLY__ */ +/* + * Create an endian fixup trampoline + * + * This starts with a "tdi 0,0,0x48" instruction which is + * essentially a "trap never", and thus akin to a nop. + * + * The opcode for this instruction read with the wrong endian + * however results in a b . + 8 + * + * So essentially we use that trick to execute the following + * trampoline in "reverse endian" if we are running with the + * MSR_LE bit set the "wrong" way for whatever endianness the + * kernel is built for. + */ +#ifdef CONFIG_PPC_BOOK3E +#define FIXUP_ENDIAN +#else +#define FIXUP_ENDIAN						   \ +	tdi   0,0,0x48;	  /* Reverse endian of b . + 8		*/ \ +	b     $+36;	  /* Skip trampoline if endian is good	*/ \ +	.long 0x05009f42; /* bcl 20,31,$+4			*/ \ +	.long 0xa602487d; /* mflr r10				*/ \ +	.long 0x1c004a39; /* addi r10,r10,28			*/ \ +	.long 0xa600607d; /* mfmsr r11				*/ \ +	.long 0x01006b69; /* xori r11,r11,1			*/ \ +	.long 0xa6035a7d; /* mtsrr0 r10				*/ \ +	.long 0xa6037b7d; /* mtsrr1 r11				*/ \ +	.long 0x2400004c  /* rfid				*/ +#endif /* !CONFIG_PPC_BOOK3E */ +#endif /*  __ASSEMBLY__ */  #endif /* _ASM_POWERPC_PPC_ASM_H */ diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index ce4de5aed7b..6d59072e13a 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -14,8 +14,18 @@  #ifdef CONFIG_VSX  #define TS_FPRWIDTH 2 + +#ifdef __BIG_ENDIAN__ +#define TS_FPROFFSET 0 +#define TS_VSRLOWOFFSET 1 +#else +#define TS_FPROFFSET 1 +#define TS_VSRLOWOFFSET 0 +#endif +  #else  #define TS_FPRWIDTH 1 +#define TS_FPROFFSET 0  #endif  #ifdef CONFIG_PPC64 @@ -142,26 +152,22 @@ typedef struct {  	unsigned long seg;  } mm_segment_t; -#define TS_FPROFFSET 0 -#define TS_VSRLOWOFFSET 1 -#define TS_FPR(i) fpr[i][TS_FPROFFSET] -#define TS_TRANS_FPR(i) transact_fpr[i][TS_FPROFFSET] +#define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET] +#define TS_TRANS_FPR(i) transact_fp.fpr[i][TS_FPROFFSET] -struct thread_struct { -	unsigned long	ksp;		/* Kernel stack pointer */ -#ifdef CONFIG_PPC64 -	unsigned long	ksp_vsid; -#endif -	struct pt_regs	*regs;		/* Pointer to saved register state */ -	mm_segment_t	fs;		/* for get_fs() validation */ -#ifdef CONFIG_BOOKE -	/* BookE base exception scratch space; align on cacheline */ -	unsigned long	normsave[8] ____cacheline_aligned; -#endif -#ifdef CONFIG_PPC32 -	void		*pgdir;		/* root of page-table tree */ -	unsigned long	ksp_limit;	/* if ksp <= ksp_limit stack overflow */ -#endif +/* FP and VSX 0-31 register set */ +struct thread_fp_state { +	u64	fpr[32][TS_FPRWIDTH] __attribute__((aligned(16))); +	u64	fpscr;		/* Floating point status */ +}; + +/* Complete AltiVec register set including VSCR */ +struct thread_vr_state { +	vector128	vr[32] __attribute__((aligned(16))); +	vector128	vscr __attribute__((aligned(16))); +}; + +struct debug_reg {  #ifdef CONFIG_PPC_ADV_DEBUG_REGS  	/*  	 * The following help to manage the use of Debug Control Registers @@ -198,13 +204,28 @@ struct thread_struct {  	unsigned long	dvc2;  #endif  #endif -	/* FP and VSX 0-31 register set */ -	double		fpr[32][TS_FPRWIDTH] __attribute__((aligned(16))); -	struct { +}; + +struct thread_struct { +	unsigned long	ksp;		/* Kernel stack pointer */ -		unsigned int pad; -		unsigned int val;	/* Floating point status */ -	} fpscr; +#ifdef CONFIG_PPC64 +	unsigned long	ksp_vsid; +#endif +	struct pt_regs	*regs;		/* Pointer to saved register state */ +	mm_segment_t	fs;		/* for get_fs() validation */ +#ifdef CONFIG_BOOKE +	/* BookE base exception scratch space; align on cacheline */ +	unsigned long	normsave[8] ____cacheline_aligned; +#endif +#ifdef CONFIG_PPC32 +	void		*pgdir;		/* root of page-table tree */ +	unsigned long	ksp_limit;	/* if ksp <= ksp_limit stack overflow */ +#endif +	/* Debug Registers */ +	struct debug_reg debug; +	struct thread_fp_state	fp_state; +	struct thread_fp_state	*fp_save_area;  	int		fpexc_mode;	/* floating-point exception mode */  	unsigned int	align_ctl;	/* alignment handling control */  #ifdef CONFIG_PPC64 @@ -222,10 +243,8 @@ struct thread_struct {  	struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */  	unsigned long	trap_nr;	/* last trap # on this thread */  #ifdef CONFIG_ALTIVEC -	/* Complete AltiVec register set */ -	vector128	vr[32] __attribute__((aligned(16))); -	/* AltiVec status */ -	vector128	vscr __attribute__((aligned(16))); +	struct thread_vr_state vr_state; +	struct thread_vr_state *vr_save_area;  	unsigned long	vrsave;  	int		used_vr;	/* set if process has used altivec */  #endif /* CONFIG_ALTIVEC */ @@ -237,6 +256,8 @@ struct thread_struct {  	unsigned long	evr[32];	/* upper 32-bits of SPE regs */  	u64		acc;		/* Accumulator */  	unsigned long	spefscr;	/* SPE & eFP status */ +	unsigned long	spefscr_last;	/* SPEFSCR value on last prctl +					   call or trap return */  	int		used_spe;	/* set if process has used spe */  #endif /* CONFIG_SPE */  #ifdef CONFIG_PPC_TRANSACTIONAL_MEM @@ -262,13 +283,8 @@ struct thread_struct {  	 * transact_fpr[] is the new set of transactional values.  	 * VRs work the same way.  	 */ -	double		transact_fpr[32][TS_FPRWIDTH]; -	struct { -		unsigned int pad; -		unsigned int val;	/* Floating point status */ -	} transact_fpscr; -	vector128	transact_vr[32] __attribute__((aligned(16))); -	vector128	transact_vscr __attribute__((aligned(16))); +	struct thread_fp_state transact_fp; +	struct thread_vr_state transact_vr;  	unsigned long	transact_vrsave;  #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */  #ifdef CONFIG_KVM_BOOK3S_32_HANDLER @@ -303,7 +319,9 @@ struct thread_struct {  	(_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack)  #ifdef CONFIG_SPE -#define SPEFSCR_INIT .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, +#define SPEFSCR_INIT \ +	.spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, \ +	.spefscr_last = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE,  #else  #define SPEFSCR_INIT  #endif @@ -322,8 +340,6 @@ struct thread_struct {  	.ksp = INIT_SP, \  	.regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \  	.fs = KERNEL_DS, \ -	.fpr = {{0}}, \ -	.fpscr = { .val = 0, }, \  	.fpexc_mode = 0, \  	.ppr = INIT_PPR, \  } @@ -361,6 +377,13 @@ extern int set_endian(struct task_struct *tsk, unsigned int val);  extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);  extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); +extern void fp_enable(void); +extern void vec_enable(void); +extern void load_fp_state(struct thread_fp_state *fp); +extern void store_fp_state(struct thread_fp_state *fp); +extern void load_vr_state(struct thread_vr_state *vr); +extern void store_vr_state(struct thread_vr_state *vr); +  static inline unsigned int __unpack_fe01(unsigned long msr_bits)  {  	return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8); @@ -426,14 +449,8 @@ extern unsigned long cpuidle_disable;  enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};  extern int powersave_nap;	/* set if nap mode can be used in idle loop */ -extern void power7_nap(void); - -#ifdef CONFIG_PSERIES_IDLE -extern void update_smt_snooze_delay(int cpu, int residency); -#else -static inline void update_smt_snooze_delay(int cpu, int residency) {} -#endif - +extern void power7_nap(int check_irq); +extern void power7_sleep(void);  extern void flush_instruction_cache(void);  extern void hard_reset_now(void);  extern void poweroff_now(void); diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index 7d0c7f3a717..74b79f07f04 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h @@ -1,4 +1,3 @@ -#include <linux/of.h>	/* linux/of.h gets to determine #include ordering */  #ifndef _POWERPC_PROM_H  #define _POWERPC_PROM_H  #ifdef __KERNEL__ @@ -20,20 +19,55 @@  #include <asm/irq.h>  #include <linux/atomic.h> -#define HAVE_ARCH_DEVTREE_FIXUPS +/* These includes should be removed once implicit includes are cleaned up. */ +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> + +#define OF_DT_BEGIN_NODE	0x1		/* Start of node, full name */ +#define OF_DT_END_NODE		0x2		/* End node */ +#define OF_DT_PROP		0x3		/* Property: name off, size, +						 * content */ +#define OF_DT_NOP		0x4		/* nop */ +#define OF_DT_END		0x9 + +#define OF_DT_VERSION		0x10  /* - * OF address retreival & translation + * This is what gets passed to the kernel by prom_init or kexec + * + * The dt struct contains the device tree structure, full pathes and + * property contents. The dt strings contain a separate block with just + * the strings for the property names, and is fully page aligned and + * self contained in a page, so that it can be kept around by the kernel, + * each property name appears only once in this page (cheap compression) + * + * the mem_rsvmap contains a map of reserved ranges of physical memory, + * passing it here instead of in the device-tree itself greatly simplifies + * the job of everybody. It's just a list of u64 pairs (base/size) that + * ends when size is 0   */ +struct boot_param_header { +	__be32	magic;			/* magic word OF_DT_HEADER */ +	__be32	totalsize;		/* total size of DT block */ +	__be32	off_dt_struct;		/* offset to structure */ +	__be32	off_dt_strings;		/* offset to strings */ +	__be32	off_mem_rsvmap;		/* offset to memory reserve map */ +	__be32	version;		/* format version */ +	__be32	last_comp_version;	/* last compatible version */ +	/* version 2 fields below */ +	__be32	boot_cpuid_phys;	/* Physical CPU id we're booting on */ +	/* version 3 fields below */ +	__be32	dt_strings_size;	/* size of the DT strings block */ +	/* version 17 fields below */ +	__be32	dt_struct_size;		/* size of the DT structure block */ +}; -/* Translate a DMA address from device space to CPU space */ -extern u64 of_translate_dma_address(struct device_node *dev, -				    const __be32 *in_addr); - -#ifdef CONFIG_PCI -extern unsigned long pci_address_to_pio(phys_addr_t address); -#define pci_address_to_pio pci_address_to_pio -#endif	/* CONFIG_PCI */ +/* + * OF address retreival & translation + */  /* Parse the ibm,dma-window property of an OF node into the busno, phys and   * size parameters. @@ -44,16 +78,6 @@ void of_parse_dma_window(struct device_node *dn, const __be32 *dma_window,  extern void kdump_move_device_tree(void); -/* cache lookup */ -struct device_node *of_find_next_cache_node(struct device_node *np); - -#ifdef CONFIG_NUMA -extern int of_node_to_nid(struct device_node *device); -#else -static inline int of_node_to_nid(struct device_node *device) { return 0; } -#endif -#define of_node_to_nid of_node_to_nid -  extern void of_instantiate_rtc(void);  extern int of_get_ibm_chip_id(struct device_node *np); @@ -143,14 +167,5 @@ struct of_drconf_cell {   */  extern unsigned char ibm_architecture_vec[]; -/* These includes are put at the bottom because they may contain things - * that are overridden by this file.  Ideally they shouldn't be included - * by this file, but there are a bunch of .c files that currently depend - * on it.  Eventually they will be cleaned up. */ -#include <linux/of_fdt.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> -#include <linux/platform_device.h> -  #endif /* __KERNEL__ */  #endif /* _POWERPC_PROM_H */ diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h index 678a7c1d9cb..a1bc7e75842 100644 --- a/arch/powerpc/include/asm/ps3.h +++ b/arch/powerpc/include/asm/ps3.h @@ -21,7 +21,6 @@  #if !defined(_ASM_POWERPC_PS3_H)  #define _ASM_POWERPC_PS3_H -#include <linux/init.h>  #include <linux/types.h>  #include <linux/device.h>  #include <asm/cell-pmu.h> diff --git a/arch/powerpc/include/asm/pte-book3e.h b/arch/powerpc/include/asm/pte-book3e.h index 0156702ba24..576ad88104c 100644 --- a/arch/powerpc/include/asm/pte-book3e.h +++ b/arch/powerpc/include/asm/pte-book3e.h @@ -40,7 +40,7 @@  #define _PAGE_U1	0x010000  #define _PAGE_U0	0x020000  #define _PAGE_ACCESSED	0x040000 -#define _PAGE_LENDIAN	0x080000 +#define _PAGE_ENDIAN	0x080000  #define _PAGE_GUARDED	0x100000  #define _PAGE_COHERENT	0x200000 /* M: enforce memory coherence */  #define _PAGE_NO_CACHE	0x400000 /* I: cache inhibit */ diff --git a/arch/powerpc/include/asm/pte-hash64.h b/arch/powerpc/include/asm/pte-hash64.h index 0419eeb5327..2505d8eab15 100644 --- a/arch/powerpc/include/asm/pte-hash64.h +++ b/arch/powerpc/include/asm/pte-hash64.h @@ -19,7 +19,7 @@  #define _PAGE_FILE		0x0002 /* (!present only) software: pte holds file offset */  #define _PAGE_EXEC		0x0004 /* No execute on POWER4 and newer (we invert) */  #define _PAGE_GUARDED		0x0008 -#define _PAGE_COHERENT		0x0010 /* M: enforce memory coherence (SMP systems) */ +/* We can derive Memory coherence from _PAGE_NO_CACHE */  #define _PAGE_NO_CACHE		0x0020 /* I: cache inhibit */  #define _PAGE_WRITETHRU		0x0040 /* W: cache write-through */  #define _PAGE_DIRTY		0x0080 /* C: page changed */ @@ -27,6 +27,12 @@  #define _PAGE_RW		0x0200 /* software: user write access allowed */  #define _PAGE_BUSY		0x0800 /* software: PTE & hash are busy */ +/* + * Used for tracking numa faults + */ +#define _PAGE_NUMA	0x00000010 /* Gather numa placement stats */ + +  /* No separate kernel read-only */  #define _PAGE_KERNEL_RW		(_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */  #define _PAGE_KERNEL_RO		 _PAGE_KERNEL_RW diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index becc08e6a65..279b80f3bb2 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -28,11 +28,23 @@  #ifdef __powerpc64__ +/* + * Size of redzone that userspace is allowed to use below the stack + * pointer.  This is 288 in the 64-bit big-endian ELF ABI, and 512 in + * the new ELFv2 little-endian ABI, so we allow the larger amount. + * + * For kernel code we allow a 288-byte redzone, in order to conserve + * kernel stack space; gcc currently only uses 288 bytes, and will + * hopefully allow explicit control of the redzone size in future. + */ +#define USER_REDZONE_SIZE	512 +#define KERNEL_REDZONE_SIZE	288 +  #define STACK_FRAME_OVERHEAD	112	/* size of minimum stack frame */  #define STACK_FRAME_LR_SAVE	2	/* Location of LR in stack frame */  #define STACK_FRAME_REGS_MARKER	ASM_CONST(0x7265677368657265)  #define STACK_INT_FRAME_SIZE	(sizeof(struct pt_regs) + \ -					STACK_FRAME_OVERHEAD + 288) +				 STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)  #define STACK_FRAME_MARKER	12  /* Size of dummy stack frame allocated when calling signal handler. */ @@ -41,6 +53,8 @@  #else /* __powerpc64__ */ +#define USER_REDZONE_SIZE	0 +#define KERNEL_REDZONE_SIZE	0  #define STACK_FRAME_OVERHEAD	16	/* size of minimum stack frame */  #define STACK_FRAME_LR_SAVE	1	/* Location of LR in stack frame */  #define STACK_FRAME_REGS_MARKER	ASM_CONST(0x72656773) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 10d1ef016bf..bffd89d2730 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -115,7 +115,12 @@  #define MSR_64BIT	MSR_SF  /* Server variant */ -#define MSR_		(MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV) +#define __MSR		(MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV) +#ifdef __BIG_ENDIAN__ +#define MSR_		__MSR +#else +#define MSR_		(__MSR | MSR_LE) +#endif  #define MSR_KERNEL	(MSR_ | MSR_64BIT)  #define MSR_USER32	(MSR_ | MSR_PR | MSR_EE)  #define MSR_USER64	(MSR_USER32 | MSR_64BIT) @@ -208,7 +213,9 @@  #define SPRN_ACOP	0x1F	/* Available Coprocessor Register */  #define SPRN_TFIAR	0x81	/* Transaction Failure Inst Addr   */  #define SPRN_TEXASR	0x82	/* Transaction EXception & Summary */ +#define   TEXASR_FS	__MASK(63-36)	/* Transaction Failure Summary */  #define SPRN_TEXASRU	0x83	/* ''	   ''	   ''	 Upper 32  */ +#define   TEXASR_FS     __MASK(63-36) /* TEXASR Failure Summary */  #define SPRN_TFHAR	0x80	/* Transaction Failure Handler Addr */  #define SPRN_CTRLF	0x088  #define SPRN_CTRLT	0x098 @@ -218,17 +225,27 @@  #define   CTRL_TE	0x00c00000	/* thread enable */  #define   CTRL_RUNLATCH	0x1  #define SPRN_DAWR	0xB4 +#define SPRN_RPR	0xBA	/* Relative Priority Register */ +#define SPRN_CIABR	0xBB +#define   CIABR_PRIV		0x3 +#define   CIABR_PRIV_USER	1 +#define   CIABR_PRIV_SUPER	2 +#define   CIABR_PRIV_HYPER	3  #define SPRN_DAWRX	0xBC -#define   DAWRX_USER	(1UL << 0) -#define   DAWRX_KERNEL	(1UL << 1) -#define   DAWRX_HYP	(1UL << 2) +#define   DAWRX_USER	__MASK(0) +#define   DAWRX_KERNEL	__MASK(1) +#define   DAWRX_HYP	__MASK(2) +#define   DAWRX_WTI	__MASK(3) +#define   DAWRX_WT	__MASK(4) +#define   DAWRX_DR	__MASK(5) +#define   DAWRX_DW	__MASK(6)  #define SPRN_DABR	0x3F5	/* Data Address Breakpoint Register */  #define SPRN_DABR2	0x13D	/* e300 */  #define SPRN_DABRX	0x3F7	/* Data Address Breakpoint Register Extension */ -#define   DABRX_USER	(1UL << 0) -#define   DABRX_KERNEL	(1UL << 1) -#define   DABRX_HYP	(1UL << 2) -#define   DABRX_BTI	(1UL << 3) +#define   DABRX_USER	__MASK(0) +#define   DABRX_KERNEL	__MASK(1) +#define   DABRX_HYP	__MASK(2) +#define   DABRX_BTI	__MASK(3)  #define   DABRX_ALL     (DABRX_BTI | DABRX_HYP | DABRX_KERNEL | DABRX_USER)  #define SPRN_DAR	0x013	/* Data Address Register */  #define SPRN_DBCR	0x136	/* e300 Data Breakpoint Control Reg */ @@ -243,6 +260,7 @@  #define SPRN_TBRU	0x10D	/* Time Base Read Upper Register (user, R/O) */  #define SPRN_TBWL	0x11C	/* Time Base Lower Register (super, R/W) */  #define SPRN_TBWU	0x11D	/* Time Base Upper Register (super, R/W) */ +#define SPRN_TBU40	0x11E	/* Timebase upper 40 bits (hyper, R/W) */  #define SPRN_SPURR	0x134	/* Scaled PURR */  #define SPRN_HSPRG0	0x130	/* Hypervisor Scratch 0 */  #define SPRN_HSPRG1	0x131	/* Hypervisor Scratch 1 */ @@ -254,6 +272,14 @@  #define SPRN_HRMOR	0x139	/* Real mode offset register */  #define SPRN_HSRR0	0x13A	/* Hypervisor Save/Restore 0 */  #define SPRN_HSRR1	0x13B	/* Hypervisor Save/Restore 1 */ +#define SPRN_IC		0x350	/* Virtual Instruction Count */ +#define SPRN_VTB	0x351	/* Virtual Time Base */ +#define SPRN_LDBAR	0x352	/* LD Base Address Register */ +#define SPRN_PMICR	0x354   /* Power Management Idle Control Reg */ +#define SPRN_PMSR	0x355   /* Power Management Status Reg */ +#define SPRN_PMMAR	0x356	/* Power Management Memory Activity Register */ +#define SPRN_PMCR	0x374	/* Power Management Control Register */ +  /* HFSCR and FSCR bit numbers are the same */  #define FSCR_TAR_LG	8	/* Enable Target Address Register */  #define FSCR_EBB_LG	7	/* Enable Event Based Branching */ @@ -283,6 +309,7 @@  #define   LPCR_ISL	(1ul << (63-2))  #define   LPCR_VC_SH	(63-2)  #define   LPCR_DPFD_SH	(63-11) +#define   LPCR_DPFD	(7ul << LPCR_DPFD_SH)  #define   LPCR_VRMASD	(0x1ful << (63-16))  #define   LPCR_VRMA_L	(1ul << (63-12))  #define   LPCR_VRMA_LP0	(1ul << (63-15)) @@ -291,14 +318,19 @@  #define   LPCR_RMLS    0x1C000000      /* impl dependent rmo limit sel */  #define	  LPCR_RMLS_SH	(63-37)  #define   LPCR_ILE     0x02000000      /* !HV irqs set MSR:LE */ +#define   LPCR_AIL	0x01800000	/* Alternate interrupt location */  #define   LPCR_AIL_0	0x00000000	/* MMU off exception offset 0x0 */  #define   LPCR_AIL_3	0x01800000	/* MMU on exception offset 0xc00...4xxx */ -#define   LPCR_PECE	0x00007000	/* powersave exit cause enable */ +#define   LPCR_ONL	0x00040000	/* online - PURR/SPURR count */ +#define   LPCR_PECE	0x0001f000	/* powersave exit cause enable */ +#define     LPCR_PECEDP	0x00010000	/* directed priv dbells cause exit */ +#define     LPCR_PECEDH	0x00008000	/* directed hyp dbells cause exit */  #define     LPCR_PECE0	0x00004000	/* ext. exceptions can cause exit */  #define     LPCR_PECE1	0x00002000	/* decrementer can cause exit */  #define     LPCR_PECE2	0x00001000	/* machine check etc can cause exit */  #define   LPCR_MER	0x00000800	/* Mediated External Exception */  #define   LPCR_MER_SH	11 +#define   LPCR_TC      0x00000200	/* Translation control */  #define   LPCR_LPES    0x0000000c  #define   LPCR_LPES0   0x00000008      /* LPAR Env selector 0 */  #define   LPCR_LPES1   0x00000004      /* LPAR Env selector 1 */ @@ -311,6 +343,12 @@  #define   LPID_RSVD	0x3ff		/* Reserved LPID for partn switching */  #define	SPRN_HMER	0x150	/* Hardware m? error recovery */  #define	SPRN_HMEER	0x151	/* Hardware m? enable error recovery */ +#define SPRN_PCR	0x152	/* Processor compatibility register */ +#define   PCR_VEC_DIS	(1ul << (63-0))	/* Vec. disable (bit NA since POWER8) */ +#define   PCR_VSX_DIS	(1ul << (63-1))	/* VSX disable (bit NA since POWER8) */ +#define   PCR_TM_DIS	(1ul << (63-2))	/* Trans. memory disable (POWER8) */ +#define   PCR_ARCH_206	0x4		/* Architecture 2.06 */ +#define   PCR_ARCH_205	0x2		/* Architecture 2.05 */  #define	SPRN_HEIR	0x153	/* Hypervisor Emulated Instruction Register */  #define SPRN_TLBINDEXR	0x154	/* P7 TLB control register */  #define SPRN_TLBVPNR	0x155	/* P7 TLB control register */ @@ -356,6 +394,8 @@  #define DER_EBRKE	0x00000002	/* External Breakpoint Interrupt */  #define DER_DPIE	0x00000001	/* Dev. Port Nonmaskable Request */  #define SPRN_DMISS	0x3D0		/* Data TLB Miss Register */ +#define SPRN_DHDES	0x0B1		/* Directed Hyp. Doorbell Exc. State */ +#define SPRN_DPDES	0x0B0		/* Directed Priv. Doorbell Exc. State */  #define SPRN_EAR	0x11A		/* External Address Register */  #define SPRN_HASH1	0x3D2		/* Primary Hash Address Register */  #define SPRN_HASH2	0x3D3		/* Secondary Hash Address Resgister */ @@ -397,6 +437,12 @@  #define HID0_BTCD	(1<<1)		/* Branch target cache disable */  #define HID0_NOPDST	(1<<1)		/* No-op dst, dstt, etc. instr. */  #define HID0_NOPTI	(1<<0)		/* No-op dcbt and dcbst instr. */ +/* POWER8 HID0 bits */ +#define HID0_POWER8_4LPARMODE	__MASK(61) +#define HID0_POWER8_2LPARMODE	__MASK(57) +#define HID0_POWER8_1TO2LPAR	__MASK(52) +#define HID0_POWER8_1TO4LPAR	__MASK(51) +#define HID0_POWER8_DYNLPARDIS	__MASK(48)  #define SPRN_HID1	0x3F1		/* Hardware Implementation Register 1 */  #ifdef CONFIG_6xx @@ -415,11 +461,13 @@  #define SPRN_IABR	0x3F2	/* Instruction Address Breakpoint Register */  #define SPRN_IABR2	0x3FA		/* 83xx */  #define SPRN_IBCR	0x135		/* 83xx Insn Breakpoint Control Reg */ +#define SPRN_IAMR	0x03D		/* Instr. Authority Mask Reg */  #define SPRN_HID4	0x3F4		/* 970 HID4 */  #define  HID4_LPES0	 (1ul << (63-0)) /* LPAR env. sel. bit 0 */  #define	 HID4_RMLS2_SH	 (63 - 2)	/* Real mode limit bottom 2 bits */  #define	 HID4_LPID5_SH	 (63 - 6)	/* partition ID bottom 4 bits */  #define	 HID4_RMOR_SH	 (63 - 22)	/* real mode offset (16 bits) */ +#define  HID4_RMOR	 (0xFFFFul << HID4_RMOR_SH)  #define  HID4_LPES1	 (1 << (63-57))	/* LPAR env. sel. bit 1 */  #define  HID4_RMLS0_SH	 (63 - 58)	/* Real mode limit top bit */  #define	 HID4_LPID1_SH	 0		/* partition ID top 2 bits */ @@ -528,6 +576,7 @@  #define SPRN_PIR	0x3FF	/* Processor Identification Register */  #endif  #define SPRN_TIR	0x1BE	/* Thread Identification Register */ +#define SPRN_PSPB	0x09F	/* Problem State Priority Boost reg */  #define SPRN_PTEHI	0x3D5	/* 981 7450 PTE HI word (S/W TLB load) */  #define SPRN_PTELO	0x3D6	/* 982 7450 PTE LO word (S/W TLB load) */  #define SPRN_PURR	0x135	/* Processor Utilization of Resources Reg */ @@ -543,9 +592,13 @@  #define SPRN_SPRG3	0x113	/* Special Purpose Register General 3 */  #define SPRN_USPRG3	0x103	/* SPRG3 userspace read */  #define SPRN_SPRG4	0x114	/* Special Purpose Register General 4 */ +#define SPRN_USPRG4	0x104	/* SPRG4 userspace read */  #define SPRN_SPRG5	0x115	/* Special Purpose Register General 5 */ +#define SPRN_USPRG5	0x105	/* SPRG5 userspace read */  #define SPRN_SPRG6	0x116	/* Special Purpose Register General 6 */ +#define SPRN_USPRG6	0x106	/* SPRG6 userspace read */  #define SPRN_SPRG7	0x117	/* Special Purpose Register General 7 */ +#define SPRN_USPRG7	0x107	/* SPRG7 userspace read */  #define SPRN_SRR0	0x01A	/* Save/Restore Register 0 */  #define SPRN_SRR1	0x01B	/* Save/Restore Register 1 */  #define   SRR1_ISI_NOPT		0x40000000 /* ISI: Not found in hash */ @@ -627,16 +680,20 @@  #define   MMCR0_PROBLEM_DISABLE MMCR0_FCP  #define   MMCR0_FCM1	0x10000000UL /* freeze counters while MSR mark = 1 */  #define   MMCR0_FCM0	0x08000000UL /* freeze counters while MSR mark = 0 */ -#define   MMCR0_PMXE	0x04000000UL /* performance monitor exception enable */ -#define   MMCR0_FCECE	0x02000000UL /* freeze ctrs on enabled cond or event */ +#define   MMCR0_PMXE	ASM_CONST(0x04000000) /* perf mon exception enable */ +#define   MMCR0_FCECE	ASM_CONST(0x02000000) /* freeze ctrs on enabled cond or event */  #define   MMCR0_TBEE	0x00400000UL /* time base exception enable */ +#define   MMCR0_BHRBA	0x00200000UL /* BHRB Access allowed in userspace */  #define   MMCR0_EBE	0x00100000UL /* Event based branch enable */  #define   MMCR0_PMCC	0x000c0000UL /* PMC control */  #define   MMCR0_PMCC_U6	0x00080000UL /* PMC1-6 are R/W by user (PR) */  #define   MMCR0_PMC1CE	0x00008000UL /* PMC1 count enable*/ -#define   MMCR0_PMCjCE	0x00004000UL /* PMCj count enable*/ +#define   MMCR0_PMCjCE	ASM_CONST(0x00004000) /* PMCj count enable*/  #define   MMCR0_TRIGGER	0x00002000UL /* TRIGGER enable */ -#define   MMCR0_PMAO	0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */ +#define   MMCR0_PMAO_SYNC ASM_CONST(0x00000800) /* PMU intr is synchronous */ +#define   MMCR0_C56RUN	ASM_CONST(0x00000100) /* PMC5/6 count when RUN=0 */ +/* performance monitor alert has occurred, set to 0 after handling exception */ +#define   MMCR0_PMAO	ASM_CONST(0x00000080)  #define   MMCR0_SHRFC	0x00000040UL /* SHRre freeze conditions between threads */  #define   MMCR0_FC56	0x00000010UL /* freeze counters 5 and 6 */  #define   MMCR0_FCTI	0x00000008UL /* freeze counters in tags inactive mode */ @@ -669,6 +726,8 @@  #define SPRN_EBBHR	804	/* Event based branch handler register */  #define SPRN_EBBRR	805	/* Event based branch return register */  #define SPRN_BESCR	806	/* Branch event status and control register */ +#define   BESCR_GE	0x8000000000000000ULL /* Global Enable */ +#define SPRN_WORT	895	/* Workload optimization register - thread */  #define SPRN_PMC1	787  #define SPRN_PMC2	788 @@ -685,6 +744,11 @@  #define   SIER_SIHV		0x1000000	/* Sampled MSR_HV */  #define   SIER_SIAR_VALID	0x0400000	/* SIAR contents valid */  #define   SIER_SDAR_VALID	0x0200000	/* SDAR contents valid */ +#define SPRN_TACR	888 +#define SPRN_TCSCR	889 +#define SPRN_CSIGR	890 +#define SPRN_SPMC1	892 +#define SPRN_SPMC2	893  /* When EBB is enabled, some of MMCR0/MMCR2/SIER are user accessible */  #define MMCR0_USER_MASK	(MMCR0_FC | MMCR0_PMXE | MMCR0_PMAO) @@ -839,11 +903,10 @@   * 64-bit embedded   *	- SPRG0 generic exception scratch   *	- SPRG2 TLB exception stack - *	- SPRG3 critical exception scratch and - *        CPU and NUMA node for VDSO getcpu (user visible) + *	- SPRG3 critical exception scratch (user visible, sorry!)   *	- SPRG4 unused (user visible)   *	- SPRG6 TLB miss scratch (user visible, sorry !) - *	- SPRG7 critical exception scratch + *	- SPRG7 CPU and NUMA node for VDSO getcpu (user visible)   *	- SPRG8 machine check exception scratch   *	- SPRG9 debug exception scratch   * @@ -900,6 +963,8 @@  #define SPRN_SPRG_SCRATCH0	SPRN_SPRG2  #define SPRN_SPRG_HPACA		SPRN_HSPRG0  #define SPRN_SPRG_HSCRATCH0	SPRN_HSPRG1 +#define SPRN_SPRG_VDSO_READ	SPRN_USPRG3 +#define SPRN_SPRG_VDSO_WRITE	SPRN_SPRG3  #define GET_PACA(rX)					\  	BEGIN_FTR_SECTION_NESTED(66);			\ @@ -943,6 +1008,8 @@  #define SPRN_SPRG_TLB_SCRATCH	SPRN_SPRG6  #define SPRN_SPRG_GEN_SCRATCH	SPRN_SPRG0  #define SPRN_SPRG_GDBELL_SCRATCH SPRN_SPRG_GEN_SCRATCH +#define SPRN_SPRG_VDSO_READ	SPRN_USPRG7 +#define SPRN_SPRG_VDSO_WRITE	SPRN_SPRG7  #define SET_PACA(rX)	mtspr	SPRN_SPRG_PACA,rX  #define GET_PACA(rX)	mfspr	rX,SPRN_SPRG_PACA @@ -1062,6 +1129,10 @@  #define PVR_8560	0x80200000  #define PVR_VER_E500V1	0x8020  #define PVR_VER_E500V2	0x8021 +#define PVR_VER_E500MC	0x8023 +#define PVR_VER_E5500	0x8024 +#define PVR_VER_E6500	0x8040 +  /*   * For the 8xx processors, all of them report the same PVR family for   * the PowerPC core. The various versions of these processors must be @@ -1102,6 +1173,13 @@  #define PVR_BE		0x0070  #define PVR_PA6T	0x0090 +/* "Logical" PVR values defined in PAPR, representing architecture levels */ +#define PVR_ARCH_204	0x0f000001 +#define PVR_ARCH_205	0x0f000002 +#define PVR_ARCH_206	0x0f000003 +#define PVR_ARCH_206p	0x0f100003 +#define PVR_ARCH_207	0x0f000004 +  /* Macros for setting and retrieving special purpose registers */  #ifndef __ASSEMBLY__  #define mfmsr()		({unsigned long rval; \ @@ -1154,12 +1232,19 @@  #else /* __powerpc64__ */ +#if defined(CONFIG_8xx) +#define mftbl()		({unsigned long rval;	\ +			asm volatile("mftbl %0" : "=r" (rval)); rval;}) +#define mftbu()		({unsigned long rval;	\ +			asm volatile("mftbu %0" : "=r" (rval)); rval;}) +#else  #define mftbl()		({unsigned long rval;	\  			asm volatile("mfspr %0, %1" : "=r" (rval) : \  				"i" (SPRN_TBRL)); rval;})  #define mftbu()		({unsigned long rval;	\  			asm volatile("mfspr %0, %1" : "=r" (rval) : \  				"i" (SPRN_TBRU)); rval;}) +#endif  #endif /* !__powerpc64__ */  #define mttbl(v)	asm volatile("mttbl %0":: "r"(v)) diff --git a/arch/powerpc/include/asm/reg_a2.h b/arch/powerpc/include/asm/reg_a2.h index 3d52a1132f3..3ba9c6f096f 100644 --- a/arch/powerpc/include/asm/reg_a2.h +++ b/arch/powerpc/include/asm/reg_a2.h @@ -110,15 +110,6 @@  #define TLB1_UR			ASM_CONST(0x0000000000000002)  #define TLB1_SR			ASM_CONST(0x0000000000000001) -#ifdef CONFIG_PPC_EARLY_DEBUG_WSP -#define WSP_UART_PHYS	0xffc000c000 -/* This needs to be careful chosen to hit a !0 congruence class - * in the TLB since we bolt it in way 3, which is already occupied - * by our linear mapping primary bolted entry in CC 0. - */ -#define WSP_UART_VIRT	0xf000000000001000 -#endif -  /* A2 erativax attributes definitions */  #define ERATIVAX_RS_IS_ALL		0x000  #define ERATIVAX_RS_IS_TID		0x040 diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index ed8f836da09..464f1089b53 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -101,6 +101,7 @@  #define SPRN_IVOR39	0x1B1	/* Interrupt Vector Offset Register 39 */  #define SPRN_IVOR40	0x1B2	/* Interrupt Vector Offset Register 40 */  #define SPRN_IVOR41	0x1B3	/* Interrupt Vector Offset Register 41 */ +#define SPRN_IVOR42	0x1B4	/* Interrupt Vector Offset Register 42 */  #define SPRN_GIVOR2	0x1B8	/* Guest IVOR2 */  #define SPRN_GIVOR3	0x1B9	/* Guest IVOR3 */  #define SPRN_GIVOR4	0x1BA	/* Guest IVOR4 */ @@ -170,6 +171,7 @@  #define SPRN_L2CSR1	0x3FA	/* L2 Data Cache Control and Status Register 1 */  #define SPRN_DCCR	0x3FA	/* Data Cache Cacheability Register */  #define SPRN_ICCR	0x3FB	/* Instruction Cache Cacheability Register */ +#define SPRN_PWRMGTCR0	0x3FB	/* Power management control register 0 */  #define SPRN_SVR	0x3FF	/* System Version Register */  /* @@ -216,6 +218,14 @@  #define	CCR1_DPC	0x00000100 /* Disable L1 I-Cache/D-Cache parity checking */  #define	CCR1_TCS	0x00000080 /* Timer Clock Select */ +/* Bit definitions for PWRMGTCR0. */ +#define PWRMGTCR0_PW20_WAIT		(1 << 14) /* PW20 state enable bit */ +#define PWRMGTCR0_PW20_ENT_SHIFT	8 +#define PWRMGTCR0_PW20_ENT		0x3F00 +#define PWRMGTCR0_AV_IDLE_PD_EN		(1 << 22) /* Altivec idle enable */ +#define PWRMGTCR0_AV_IDLE_CNT_SHIFT	16 +#define PWRMGTCR0_AV_IDLE_CNT		0x3F0000 +  /* Bit definitions for the MCSR. */  #define MCSR_MCS	0x80000000 /* Machine Check Summary */  #define MCSR_IB		0x40000000 /* Instruction PLB Error */ @@ -381,7 +391,7 @@  #define DBCR0_IA34T	0x00004000	/* Instr Addr 3-4 range Toggle */  #define DBCR0_FT	0x00000001	/* Freeze Timers on debug event */ -#define dbcr_iac_range(task)	((task)->thread.dbcr0) +#define dbcr_iac_range(task)	((task)->thread.debug.dbcr0)  #define DBCR_IAC12I	DBCR0_IA12			/* Range Inclusive */  #define DBCR_IAC12X	(DBCR0_IA12 | DBCR0_IA12X)	/* Range Exclusive */  #define DBCR_IAC12MODE	(DBCR0_IA12 | DBCR0_IA12X)	/* IAC 1-2 Mode Bits */ @@ -395,7 +405,7 @@  #define DBCR1_DAC1W	0x20000000	/* DAC1 Write Debug Event */  #define DBCR1_DAC2W	0x10000000	/* DAC2 Write Debug Event */ -#define dbcr_dac(task)	((task)->thread.dbcr1) +#define dbcr_dac(task)	((task)->thread.debug.dbcr1)  #define DBCR_DAC1R	DBCR1_DAC1R  #define DBCR_DAC1W	DBCR1_DAC1W  #define DBCR_DAC2R	DBCR1_DAC2R @@ -441,7 +451,7 @@  #define DBCR0_CRET	0x00000020	/* Critical Return Debug Event */  #define DBCR0_FT	0x00000001	/* Freeze Timers on debug event */ -#define dbcr_dac(task)	((task)->thread.dbcr0) +#define dbcr_dac(task)	((task)->thread.debug.dbcr0)  #define DBCR_DAC1R	DBCR0_DAC1R  #define DBCR_DAC1W	DBCR0_DAC1W  #define DBCR_DAC2R	DBCR0_DAC2R @@ -475,7 +485,7 @@  #define DBCR1_IAC34MX	0x000000C0	/* Instr Addr 3-4 range eXclusive */  #define DBCR1_IAC34AT	0x00000001	/* Instr Addr 3-4 range Toggle */ -#define dbcr_iac_range(task)	((task)->thread.dbcr1) +#define dbcr_iac_range(task)	((task)->thread.debug.dbcr1)  #define DBCR_IAC12I	DBCR1_IAC12M	/* Range Inclusive */  #define DBCR_IAC12X	DBCR1_IAC12MX	/* Range Exclusive */  #define DBCR_IAC12MODE	DBCR1_IAC12MX	/* IAC 1-2 Mode Bits */ @@ -573,6 +583,7 @@  /* Bit definitions for L1CSR0. */  #define L1CSR0_CPE	0x00010000	/* Data Cache Parity Enable */ +#define L1CSR0_CUL	0x00000400	/* Data Cache Unable to Lock */  #define L1CSR0_CLFC	0x00000100	/* Cache Lock Bits Flash Clear */  #define L1CSR0_DCFI	0x00000002	/* Data Cache Flash Invalidate */  #define L1CSR0_CFI	0x00000002	/* Cache Flash Invalidate */ diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 9bd52c65e66..b390f55b0df 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -150,19 +150,53 @@ struct rtas_suspend_me_data {  #define RTAS_VECTOR_EXTERNAL_INTERRUPT	0x500  struct rtas_error_log { -	unsigned long version:8;		/* Architectural version */ -	unsigned long severity:3;		/* Severity level of error */ -	unsigned long disposition:2;		/* Degree of recovery */ -	unsigned long extended:1;		/* extended log present? */ -	unsigned long /* reserved */ :2;	/* Reserved for future use */ -	unsigned long initiator:4;		/* Initiator of event */ -	unsigned long target:4;			/* Target of failed operation */ -	unsigned long type:8;			/* General event or error*/ -	unsigned long extended_log_length:32;	/* length in bytes */ -	unsigned char buffer[1];		/* Start of extended log */ +	/* Byte 0 */ +	uint8_t		byte0;			/* Architectural version */ + +	/* Byte 1 */ +	uint8_t		byte1; +	/* XXXXXXXX +	 * XXX		3: Severity level of error +	 *    XX	2: Degree of recovery +	 *      X	1: Extended log present? +	 *       XX	2: Reserved +	 */ + +	/* Byte 2 */ +	uint8_t		byte2; +	/* XXXXXXXX +	 * XXXX		4: Initiator of event +	 *     XXXX	4: Target of failed operation +	 */ +	uint8_t		byte3;			/* General event or error*/ +	__be32		extended_log_length;	/* length in bytes */ +	unsigned char	buffer[1];		/* Start of extended log */  						/* Variable length.      */  }; +static inline uint8_t rtas_error_severity(const struct rtas_error_log *elog) +{ +	return (elog->byte1 & 0xE0) >> 5; +} + +static inline uint8_t rtas_error_disposition(const struct rtas_error_log *elog) +{ +	return (elog->byte1 & 0x18) >> 3; +} + +static inline uint8_t rtas_error_extended(const struct rtas_error_log *elog) +{ +	return (elog->byte1 & 0x04) >> 2; +} + +#define rtas_error_type(x)	((x)->byte3) + +static inline +uint32_t rtas_error_extended_log_length(const struct rtas_error_log *elog) +{ +	return be32_to_cpu(elog->extended_log_length); +} +  #define RTAS_V6EXT_LOG_FORMAT_EVENT_LOG	14  #define RTAS_V6EXT_COMPANY_ID_IBM	(('I' << 24) | ('B' << 16) | ('M' << 8)) @@ -172,32 +206,35 @@ struct rtas_error_log {   */  struct rtas_ext_event_log_v6 {  	/* Byte 0 */ -	uint32_t log_valid:1;		/* 1:Log valid */ -	uint32_t unrecoverable_error:1;	/* 1:Unrecoverable error */ -	uint32_t recoverable_error:1;	/* 1:recoverable (correctable	*/ -					/*   or successfully retried)	*/ -	uint32_t degraded_operation:1;	/* 1:Unrecoverable err, bypassed*/ -					/*   - degraded operation (e.g.	*/ -					/*   CPU or mem taken off-line)	*/ -	uint32_t predictive_error:1; -	uint32_t new_log:1;		/* 1:"New" log (Always 1 for	*/ -					/*   data returned from RTAS	*/ -	uint32_t big_endian:1;		/* 1: Big endian */ -	uint32_t :1;			/* reserved */ +	uint8_t byte0; +	/* XXXXXXXX +	 * X		1: Log valid +	 *  X		1: Unrecoverable error +	 *   X		1: Recoverable (correctable or successfully retried) +	 *    X		1: Bypassed unrecoverable error (degraded operation) +	 *     X	1: Predictive error +	 *      X	1: "New" log (always 1 for data returned from RTAS) +	 *       X	1: Big Endian +	 *        X	1: Reserved +	 */ +  	/* Byte 1 */ -	uint32_t :8;			/* reserved */ +	uint8_t byte1;			/* reserved */ +  	/* Byte 2 */ -	uint32_t powerpc_format:1;	/* Set to 1 (indicating log is	*/ -					/* in PowerPC format		*/ -	uint32_t :3;			/* reserved */ -	uint32_t log_format:4;		/* Log format indicator. Define	*/ -					/* format used for byte 12-2047	*/ +	uint8_t byte2; +	/* XXXXXXXX +	 * X		1: Set to 1 (indicating log is in PowerPC format) +	 *  XXX		3: Reserved +	 *     XXXX	4: Log format used for bytes 12-2047 +	 */ +  	/* Byte 3 */ -	uint32_t :8;			/* reserved */ +	uint8_t byte3;			/* reserved */  	/* Byte 4-11 */  	uint8_t reserved[8];		/* reserved */  	/* Byte 12-15 */ -	uint32_t company_id;		/* Company ID of the company	*/ +	__be32  company_id;		/* Company ID of the company	*/  					/* that defines the format for	*/  					/* the vendor specific log type	*/  	/* Byte 16-end of log */ @@ -205,6 +242,18 @@ struct rtas_ext_event_log_v6 {  					/* Variable length.		*/  }; +static +inline uint8_t rtas_ext_event_log_format(struct rtas_ext_event_log_v6 *ext_log) +{ +	return ext_log->byte2 & 0x0F; +} + +static +inline uint32_t rtas_ext_event_company_id(struct rtas_ext_event_log_v6 *ext_log) +{ +	return be32_to_cpu(ext_log->company_id); +} +  /* pSeries event log format */  /* Two bytes ASCII section IDs */ @@ -227,14 +276,26 @@ struct rtas_ext_event_log_v6 {  /* Vendor specific Platform Event Log Format, Version 6, section header */  struct pseries_errorlog { -	uint16_t id;			/* 0x00 2-byte ASCII section ID	*/ -	uint16_t length;		/* 0x02 Section length in bytes	*/ +	__be16 id;			/* 0x00 2-byte ASCII section ID	*/ +	__be16 length;			/* 0x02 Section length in bytes	*/  	uint8_t version;		/* 0x04 Section version		*/  	uint8_t subtype;		/* 0x05 Section subtype		*/ -	uint16_t creator_component;	/* 0x06 Creator component ID	*/ +	__be16 creator_component;	/* 0x06 Creator component ID	*/  	uint8_t data[];			/* 0x08 Start of section data	*/  }; +static +inline uint16_t pseries_errorlog_id(struct pseries_errorlog *sect) +{ +	return be16_to_cpu(sect->id); +} + +static +inline uint16_t pseries_errorlog_length(struct pseries_errorlog *sect) +{ +	return be16_to_cpu(sect->length); +} +  struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,  					      uint16_t section_id); @@ -283,6 +344,7 @@ extern void pSeries_log_error(char *buf, unsigned int err_type, int fatal);  #ifdef CONFIG_PPC_PSERIES  extern int pseries_devicetree_update(s32 scope); +extern void post_mobility_fixup(void);  #endif  #ifdef CONFIG_PPC_RTAS_DAEMON diff --git a/arch/powerpc/include/asm/scom.h b/arch/powerpc/include/asm/scom.h index 0cabfd7bc2d..f5cde45b116 100644 --- a/arch/powerpc/include/asm/scom.h +++ b/arch/powerpc/include/asm/scom.h @@ -54,8 +54,8 @@ struct scom_controller {  	scom_map_t (*map)(struct device_node *ctrl_dev, u64 reg, u64 count);  	void (*unmap)(scom_map_t map); -	u64 (*read)(scom_map_t map, u32 reg); -	void (*write)(scom_map_t map, u32 reg, u64 value); +	int (*read)(scom_map_t map, u64 reg, u64 *value); +	int (*write)(scom_map_t map, u64 reg, u64 value);  };  extern const struct scom_controller *scom_controller; @@ -133,10 +133,18 @@ static inline void scom_unmap(scom_map_t map)   * scom_read - Read a SCOM register   * @map: Result of scom_map   * @reg: Register index within that map + * @value: Updated with the value read + * + * Returns 0 (success) or a negative error code   */ -static inline u64 scom_read(scom_map_t map, u32 reg) +static inline int scom_read(scom_map_t map, u64 reg, u64 *value)  { -	return scom_controller->read(map, reg); +	int rc; + +	rc = scom_controller->read(map, reg, value); +	if (rc) +		*value = 0xfffffffffffffffful; +	return rc;  }  /** @@ -144,12 +152,15 @@ static inline u64 scom_read(scom_map_t map, u32 reg)   * @map: Result of scom_map   * @reg: Register index within that map   * @value: Value to write + * + * Returns 0 (success) or a negative error code   */ -static inline void scom_write(scom_map_t map, u32 reg, u64 value) +static inline int scom_write(scom_map_t map, u64 reg, u64 value)  { -	scom_controller->write(map, reg, value); +	return scom_controller->write(map, reg, value);  } +  #endif /* CONFIG_PPC_SCOM */  #endif /* __ASSEMBLY__ */  #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index 4ee06fe15de..a5e930aca80 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -8,6 +8,7 @@  #ifdef __powerpc64__ +extern char __start_interrupts[];  extern char __end_interrupts[];  extern char __prom_init_toc_start[]; @@ -21,12 +22,35 @@ static inline int in_kernel_text(unsigned long addr)  	return 0;  } +static inline int overlaps_interrupt_vector_text(unsigned long start, +							unsigned long end) +{ +	unsigned long real_start, real_end; +	real_start = __start_interrupts - _stext; +	real_end = __end_interrupts - _stext; + +	return start < (unsigned long)__va(real_end) && +		(unsigned long)__va(real_start) < end; +} +  static inline int overlaps_kernel_text(unsigned long start, unsigned long end)  {  	return start < (unsigned long)__init_end &&  		(unsigned long)_stext < end;  } +static inline int overlaps_kvm_tmp(unsigned long start, unsigned long end) +{ +#ifdef CONFIG_KVM_GUEST +	extern char kvm_tmp[]; +	return start < (unsigned long)kvm_tmp && +		(unsigned long)&kvm_tmp[1024 * 1024] < end; +#else +	return 0; +#endif +} + +#if !defined(_CALL_ELF) || _CALL_ELF != 2  #undef dereference_function_descriptor  static inline void *dereference_function_descriptor(void *ptr)  { @@ -37,6 +61,7 @@ static inline void *dereference_function_descriptor(void *ptr)  		ptr = p;  	return ptr;  } +#endif  #endif diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index d3ca85529b8..11ba86e1763 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -23,6 +23,11 @@ extern void reloc_got2(unsigned long);  #define PTRRELOC(x)	((typeof(x)) add_reloc_offset((unsigned long)(x))) +void check_for_initrd(void); +void do_init_bootmem(void); +void setup_panic(void); +#define ARCH_PANIC_TIMEOUT 180 +  #endif /* !__ASSEMBLY__ */  #endif	/* _ASM_POWERPC_SETUP_H */ diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h index 3a7a67a0d00..d89beaba26f 100644 --- a/arch/powerpc/include/asm/sfp-machine.h +++ b/arch/powerpc/include/asm/sfp-machine.h @@ -125,7 +125,7 @@  #define FP_EX_DIVZERO         (1 << (31 - 5))  #define FP_EX_INEXACT         (1 << (31 - 6)) -#define __FPU_FPSCR	(current->thread.fpscr.val) +#define __FPU_FPSCR	(current->thread.fp_state.fpscr)  /* We only actually write to the destination register   * if exceptions signalled (if any) will not trap. diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 98da78e0c2c..5a6614a7f0b 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -33,6 +33,7 @@ extern int boot_cpuid;  extern int spinning_secondaries;  extern void cpu_die(void); +extern int cpu_to_chip_id(int cpu);  #ifdef CONFIG_SMP @@ -67,14 +68,6 @@ void generic_mach_cpu_die(void);  void generic_set_cpu_dead(unsigned int cpu);  void generic_set_cpu_up(unsigned int cpu);  int generic_check_cpu_restart(unsigned int cpu); - -extern void inhibit_secondary_onlining(void); -extern void uninhibit_secondary_onlining(void); - -#else /* HOTPLUG_CPU */ -static inline void inhibit_secondary_onlining(void) {} -static inline void uninhibit_secondary_onlining(void) {} -  #endif  #ifdef CONFIG_PPC64 @@ -112,7 +105,6 @@ static inline struct cpumask *cpu_core_mask(int cpu)  }  extern int cpu_to_core_id(int cpu); -extern int cpu_to_chip_id(int cpu);  /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.   * @@ -120,7 +112,7 @@ extern int cpu_to_chip_id(int cpu);   * in /proc/interrupts will be wrong!!! --Troy */  #define PPC_MSG_CALL_FUNCTION   0  #define PPC_MSG_RESCHEDULE      1 -#define PPC_MSG_CALL_FUNC_SINGLE	2 +#define PPC_MSG_TICK_BROADCAST	2  #define PPC_MSG_DEBUGGER_BREAK  3  /* for irq controllers that have dedicated ipis per message (4) */ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 5f54a744dcc..35aa339410b 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -28,7 +28,7 @@  #include <asm/synch.h>  #include <asm/ppc-opcode.h> -#define arch_spin_is_locked(x)		((x)->slock != 0) +#define smp_mb__after_unlock_lock()	smp_mb()  /* Full ordering for lock. */  #ifdef CONFIG_PPC64  /* use 0x800000yy when locked, where yy == CPU number */ @@ -54,6 +54,16 @@  #define SYNC_IO  #endif +static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) +{ +	return lock.slock == 0; +} + +static inline int arch_spin_is_locked(arch_spinlock_t *lock) +{ +	return !arch_spin_value_unlocked(*lock); +} +  /*   * This returns the old value in the lock, so we succeeded   * in getting the lock if the return value is 0. diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h index 93f280e2327..37b7ca39ec9 100644 --- a/arch/powerpc/include/asm/spu.h +++ b/arch/powerpc/include/asm/spu.h @@ -235,6 +235,7 @@ extern long spu_sys_callback(struct spu_syscall_block *s);  /* syscalls implemented in spufs */  struct file; +struct coredump_params;  struct spufs_calls {  	long (*create_thread)(const char __user *name,  					unsigned int flags, umode_t mode, @@ -242,7 +243,7 @@ struct spufs_calls {  	long (*spu_run)(struct file *filp, __u32 __user *unpc,  						__u32 __user *ustatus);  	int (*coredump_extra_notes_size)(void); -	int (*coredump_extra_notes_write)(struct file *file, loff_t *foffset); +	int (*coredump_extra_notes_write)(struct coredump_params *cprm);  	void (*notify_spus_active)(void);  	struct module *owner;  }; diff --git a/arch/powerpc/include/asm/swab.h b/arch/powerpc/include/asm/swab.h index b9bd1ca944d..96f59de6185 100644 --- a/arch/powerpc/include/asm/swab.h +++ b/arch/powerpc/include/asm/swab.h @@ -9,10 +9,6 @@  #include <uapi/asm/swab.h> -#ifdef __GNUC__ -#ifndef __powerpc64__ -#endif /* __powerpc64__ */ -  static __inline__ __u16 ld_le16(const volatile __u16 *addr)  {  	__u16 val; @@ -20,19 +16,12 @@ static __inline__ __u16 ld_le16(const volatile __u16 *addr)  	__asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));  	return val;  } -#define __arch_swab16p ld_le16  static __inline__ void st_le16(volatile __u16 *addr, const __u16 val)  {  	__asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));  } -static inline void __arch_swab16s(__u16 *addr) -{ -	st_le16(addr, *addr); -} -#define __arch_swab16s __arch_swab16s -  static __inline__ __u32 ld_le32(const volatile __u32 *addr)  {  	__u32 val; @@ -40,42 +29,10 @@ static __inline__ __u32 ld_le32(const volatile __u32 *addr)  	__asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));  	return val;  } -#define __arch_swab32p ld_le32  static __inline__ void st_le32(volatile __u32 *addr, const __u32 val)  {  	__asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));  } -static inline void __arch_swab32s(__u32 *addr) -{ -	st_le32(addr, *addr); -} -#define __arch_swab32s __arch_swab32s - -static inline __attribute_const__ __u16 __arch_swab16(__u16 value) -{ -	__u16 result; - -	__asm__("rlwimi %0,%1,8,16,23" -	    : "=r" (result) -	    : "r" (value), "0" (value >> 8)); -	return result; -} -#define __arch_swab16 __arch_swab16 - -static inline __attribute_const__ __u32 __arch_swab32(__u32 value) -{ -	__u32 result; - -	__asm__("rlwimi %0,%1,24,16,23\n\t" -	    "rlwimi %0,%1,8,8,15\n\t" -	    "rlwimi %0,%1,24,0,7" -	    : "=r" (result) -	    : "r" (value), "0" (value >> 24)); -	return result; -} -#define __arch_swab32 __arch_swab32 - -#endif /* __GNUC__ */  #endif /* _ASM_POWERPC_SWAB_H */ diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 2be5618cdec..58abeda64cb 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -16,25 +16,26 @@ struct thread_struct;  extern struct task_struct *_switch(struct thread_struct *prev,  				   struct thread_struct *next);  #ifdef CONFIG_PPC_BOOK3S_64 -static inline void save_tar(struct thread_struct *prev) +static inline void save_early_sprs(struct thread_struct *prev)  {  	if (cpu_has_feature(CPU_FTR_ARCH_207S))  		prev->tar = mfspr(SPRN_TAR); +	if (cpu_has_feature(CPU_FTR_DSCR)) +		prev->dscr = mfspr(SPRN_DSCR);  }  #else -static inline void save_tar(struct thread_struct *prev) {} +static inline void save_early_sprs(struct thread_struct *prev) {}  #endif -extern void load_up_fpu(void);  extern void enable_kernel_fp(void);  extern void enable_kernel_altivec(void); -extern void load_up_altivec(struct task_struct *);  extern int emulate_altivec(struct pt_regs *);  extern void __giveup_vsx(struct task_struct *);  extern void giveup_vsx(struct task_struct *);  extern void enable_kernel_spe(void);  extern void giveup_spe(struct task_struct *);  extern void load_up_spe(struct task_struct *); +extern void switch_booke_debug_regs(struct debug_reg *new_debug);  #ifndef CONFIG_SMP  extern void discard_lazy_cpu_state(void); @@ -85,6 +86,8 @@ static inline void clear_task_ebb(struct task_struct *t)  {  #ifdef CONFIG_PPC_BOOK3S_64      /* EBB perf events are not inherited, so clear all EBB state. */ +    t->thread.ebbrr = 0; +    t->thread.ebbhr = 0;      t->thread.bescr = 0;      t->thread.mmcr2 = 0;      t->thread.mmcr0 = 0; diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 43523fe0d8b..babbeca6850 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -62,7 +62,7 @@ COMPAT_SYS_SPU(fcntl)  SYSCALL(ni_syscall)  SYSCALL_SPU(setpgid)  SYSCALL(ni_syscall) -SYSX(sys_ni_syscall,sys_olduname, sys_olduname) +SYSX(sys_ni_syscall,sys_olduname,sys_olduname)  SYSCALL_SPU(umask)  SYSCALL_SPU(chroot)  COMPAT_SYS(ustat) @@ -190,7 +190,7 @@ SYSCALL_SPU(getcwd)  SYSCALL_SPU(capget)  SYSCALL_SPU(capset)  COMPAT_SYS(sigaltstack) -COMPAT_SYS_SPU(sendfile) +SYSX_SPU(sys_sendfile64,compat_sys_sendfile,sys_sendfile)  SYSCALL(ni_syscall)  SYSCALL(ni_syscall)  PPC_SYS(vfork) @@ -258,7 +258,7 @@ SYSCALL_SPU(tgkill)  COMPAT_SYS_SPU(utimes)  COMPAT_SYS_SPU(statfs64)  COMPAT_SYS_SPU(fstatfs64) -SYSX(sys_ni_syscall, ppc_fadvise64_64, ppc_fadvise64_64) +SYSX(sys_ni_syscall,ppc_fadvise64_64,ppc_fadvise64_64)  PPC_SYS_SPU(rtas)  OLDSYS(debug_setcontext)  SYSCALL(ni_syscall) @@ -295,7 +295,7 @@ SYSCALL_SPU(mkdirat)  SYSCALL_SPU(mknodat)  SYSCALL_SPU(fchownat)  COMPAT_SYS_SPU(futimesat) -SYSX_SPU(sys_newfstatat, sys_fstatat64, sys_fstatat64) +SYSX_SPU(sys_newfstatat,sys_fstatat64,sys_fstatat64)  SYSCALL_SPU(unlinkat)  SYSCALL_SPU(renameat)  SYSCALL_SPU(linkat) @@ -359,3 +359,6 @@ COMPAT_SYS(process_vm_readv)  COMPAT_SYS(process_vm_writev)  SYSCALL(finit_module)  SYSCALL(ni_syscall) /* sys_kcmp */ +SYSCALL_SPU(sched_setattr) +SYSCALL_SPU(sched_getattr) +SYSCALL_SPU(renameat2) diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index ba7b1973866..b034ecdb7c7 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -82,8 +82,6 @@ static inline struct thread_info *current_thread_info(void)  #endif /* __ASSEMBLY__ */ -#define PREEMPT_ACTIVE		0x10000000 -  /*   * thread information flag bit numbers   */ @@ -93,8 +91,7 @@ static inline struct thread_info *current_thread_info(void)  #define TIF_POLLING_NRFLAG	3	/* true if poll_idle() is polling  					   TIF_NEED_RESCHED */  #define TIF_32BIT		4	/* 32 bit binary */ -#define TIF_PERFMON_WORK	5	/* work for pfm_handle_work() */ -#define TIF_PERFMON_CTXSW	6	/* perfmon needs ctxsw calls */ +#define TIF_RESTORE_TM		5	/* need to restore TM FP/VEC/VSX */  #define TIF_SYSCALL_AUDIT	7	/* syscall auditing active */  #define TIF_SINGLESTEP		8	/* singlestepping active */  #define TIF_NOHZ		9	/* in adaptive nohz mode */ @@ -107,6 +104,9 @@ static inline struct thread_info *current_thread_info(void)  #define TIF_EMULATE_STACK_STORE	16	/* Is an instruction emulation  						for stack store? */  #define TIF_MEMDIE		17	/* is terminating due to OOM killer */ +#if defined(CONFIG_PPC64) +#define TIF_ELF2ABI		18	/* function descriptors must die! */ +#endif  /* as above, but as bit values */  #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE) @@ -114,8 +114,7 @@ static inline struct thread_info *current_thread_info(void)  #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)  #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)  #define _TIF_32BIT		(1<<TIF_32BIT) -#define _TIF_PERFMON_WORK	(1<<TIF_PERFMON_WORK) -#define _TIF_PERFMON_CTXSW	(1<<TIF_PERFMON_CTXSW) +#define _TIF_RESTORE_TM		(1<<TIF_RESTORE_TM)  #define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)  #define _TIF_SINGLESTEP		(1<<TIF_SINGLESTEP)  #define _TIF_SECCOMP		(1<<TIF_SECCOMP) @@ -131,7 +130,8 @@ static inline struct thread_info *current_thread_info(void)  				 _TIF_NOHZ)  #define _TIF_USER_WORK_MASK	(_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ -				 _TIF_NOTIFY_RESUME | _TIF_UPROBE) +				 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ +				 _TIF_RESTORE_TM)  #define _TIF_PERSYSCALL_MASK	(_TIF_RESTOREALL|_TIF_NOERROR)  /* Bits in local_flags */ @@ -185,6 +185,12 @@ static inline bool test_thread_local_flags(unsigned int flags)  #define is_32bit_task()	(1)  #endif +#if defined(CONFIG_PPC64) +#define is_elf2_task() (test_thread_flag(TIF_ELF2ABI)) +#else +#define is_elf2_task() (0) +#endif +  #endif	/* !__ASSEMBLY__ */  #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index c1f267694ac..1d428e6007c 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -28,6 +28,7 @@ extern struct clock_event_device decrementer_clockevent;  struct rtc_time;  extern void to_tm(int tim, struct rtc_time * tm);  extern void GregorianDay(struct rtc_time *tm); +extern void tick_broadcast_ipi_handler(void);  extern void generic_calibrate_decr(void); diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h index 18908caa1f3..2cf846edb3f 100644 --- a/arch/powerpc/include/asm/timex.h +++ b/arch/powerpc/include/asm/timex.h @@ -29,7 +29,11 @@ static inline cycles_t get_cycles(void)  	ret = 0;  	__asm__ __volatile__( +#ifdef CONFIG_8xx +		"97:	mftb %0\n" +#else  		"97:	mfspr %0, %2\n" +#endif  		"99:\n"  		".section __ftr_fixup,\"a\"\n"  		".align 2\n" @@ -41,7 +45,11 @@ static inline cycles_t get_cycles(void)  		"	.long 0\n"  		"	.long 0\n"  		".previous" +#ifdef CONFIG_8xx +		: "=r" (ret) : "i" (CPU_FTR_601)); +#else  		: "=r" (ret) : "i" (CPU_FTR_601), "i" (SPRN_TBRL)); +#endif  	return ret;  #endif  } diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h index 9dfbc34bdbf..c22d704b6d4 100644 --- a/arch/powerpc/include/asm/tm.h +++ b/arch/powerpc/include/asm/tm.h @@ -7,6 +7,8 @@  #include <uapi/asm/tm.h> +#ifndef __ASSEMBLY__ +  #ifdef CONFIG_PPC_TRANSACTIONAL_MEM  extern void do_load_up_transact_fpu(struct thread_struct *thread);  extern void do_load_up_transact_altivec(struct thread_struct *thread); @@ -15,8 +17,11 @@ extern void do_load_up_transact_altivec(struct thread_struct *thread);  extern void tm_enable(void);  extern void tm_reclaim(struct thread_struct *thread,  		       unsigned long orig_msr, uint8_t cause); +extern void tm_reclaim_current(uint8_t cause);  extern void tm_recheckpoint(struct thread_struct *thread,  			    unsigned long orig_msr);  extern void tm_abort(uint8_t cause);  extern void tm_save_sprs(struct thread_struct *thread);  extern void tm_restore_sprs(struct thread_struct *thread); + +#endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 89e3ef2496a..5f1048eaa5b 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -9,22 +9,13 @@ struct device_node;  #ifdef CONFIG_NUMA  /* - * Before going off node we want the VM to try and reclaim from the local - * node. It does this if the remote distance is larger than RECLAIM_DISTANCE. - * With the default REMOTE_DISTANCE of 20 and the default RECLAIM_DISTANCE of - * 20, we never reclaim and go off node straight away. - * - * To fix this we choose a smaller value of RECLAIM_DISTANCE. + * If zone_reclaim_mode is enabled, a RECLAIM_DISTANCE of 10 will mean that + * all zones on all nodes will be eligible for zone_reclaim().   */  #define RECLAIM_DISTANCE 10  #include <asm/mmzone.h> -static inline int cpu_to_node(int cpu) -{ -	return numa_cpu_lookup_table[cpu]; -} -  #define parent_node(node)	(node)  #define cpumask_of_node(node) ((node) == -1 ?				\ @@ -91,7 +82,6 @@ static inline int prrn_is_enabled(void)  #ifdef CONFIG_SMP  #include <asm/cputable.h> -#define smt_capable()		(cpu_has_feature(CPU_FTR_SMT))  #ifdef CONFIG_PPC64  #include <asm/smp.h> diff --git a/arch/powerpc/include/asm/unaligned.h b/arch/powerpc/include/asm/unaligned.h index 5f1b1e3c213..8296381ae43 100644 --- a/arch/powerpc/include/asm/unaligned.h +++ b/arch/powerpc/include/asm/unaligned.h @@ -4,13 +4,18 @@  #ifdef __KERNEL__  /* - * The PowerPC can do unaligned accesses itself in big endian mode. + * The PowerPC can do unaligned accesses itself based on its endian mode.   */  #include <linux/unaligned/access_ok.h>  #include <linux/unaligned/generic.h> +#ifdef __LITTLE_ENDIAN__ +#define get_unaligned	__get_unaligned_le +#define put_unaligned	__put_unaligned_le +#else  #define get_unaligned	__get_unaligned_be  #define put_unaligned	__put_unaligned_be +#endif  #endif	/* __KERNEL__ */  #endif	/* _ASM_POWERPC_UNALIGNED_H */ diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 3ca819f541b..5ce5552ab9f 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -12,7 +12,7 @@  #include <uapi/asm/unistd.h> -#define __NR_syscalls		355 +#define __NR_syscalls		358  #define __NR__exit __NR_exit  #define NR_syscalls	__NR_syscalls @@ -29,7 +29,6 @@  #define __ARCH_WANT_SYS_GETHOSTNAME  #define __ARCH_WANT_SYS_IPC  #define __ARCH_WANT_SYS_PAUSE -#define __ARCH_WANT_SYS_SGETMASK  #define __ARCH_WANT_SYS_SIGNAL  #define __ARCH_WANT_SYS_TIME  #define __ARCH_WANT_SYS_UTIME diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h index 23016020915..7422a999a39 100644 --- a/arch/powerpc/include/asm/uprobes.h +++ b/arch/powerpc/include/asm/uprobes.h @@ -36,8 +36,8 @@ typedef ppc_opcode_t uprobe_opcode_t;  struct arch_uprobe {  	union { -		u8	insn[MAX_UINSN_BYTES]; -		u32	ainsn; +		u32	insn; +		u32	ixol;  	};  }; @@ -45,11 +45,4 @@ struct arch_uprobe_task {  	unsigned long	saved_trap_nr;  }; -extern int  arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr); -extern int  arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs); -extern int  arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs); -extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk); -extern int  arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data); -extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); -extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);  #endif	/* _ASM_UPROBES_H */ diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h index 0d9cecddf8a..c53f5f6d176 100644 --- a/arch/powerpc/include/asm/vdso.h +++ b/arch/powerpc/include/asm/vdso.h @@ -4,11 +4,11 @@  #ifdef __KERNEL__  /* Default link addresses for the vDSOs */ -#define VDSO32_LBASE	0x100000 -#define VDSO64_LBASE	0x100000 +#define VDSO32_LBASE	0x0 +#define VDSO64_LBASE	0x0  /* Default map addresses for 32bit vDSO */ -#define VDSO32_MBASE	VDSO32_LBASE +#define VDSO32_MBASE	0x100000  #define VDSO_VERSION_STRING	LINUX_2.6.15 diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h index 68d0cc998b1..4f9b7ca0710 100644 --- a/arch/powerpc/include/asm/vio.h +++ b/arch/powerpc/include/asm/vio.h @@ -15,7 +15,6 @@  #define _ASM_POWERPC_VIO_H  #ifdef __KERNEL__ -#include <linux/init.h>  #include <linux/errno.h>  #include <linux/device.h>  #include <linux/dma-mapping.h> diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h index d0b6d4ac6dd..9a5c928bb3c 100644 --- a/arch/powerpc/include/asm/word-at-a-time.h +++ b/arch/powerpc/include/asm/word-at-a-time.h @@ -8,6 +8,8 @@  #include <linux/kernel.h>  #include <asm/asm-compat.h> +#ifdef __BIG_ENDIAN__ +  struct word_at_a_time {  	const unsigned long high_bits, low_bits;  }; @@ -38,4 +40,80 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct  	return (val + c->high_bits) & ~rhs;  } +#else + +struct word_at_a_time { +	const unsigned long one_bits, high_bits; +}; + +#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } + +#ifdef CONFIG_64BIT + +/* Alan Modra's little-endian strlen tail for 64-bit */ +#define create_zero_mask(mask) (mask) + +static inline unsigned long find_zero(unsigned long mask) +{ +	unsigned long leading_zero_bits; +	long trailing_zero_bit_mask; + +	asm ("addi %1,%2,-1\n\t" +	     "andc %1,%1,%2\n\t" +	     "popcntd %0,%1" +	     : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask) +	     : "r" (mask)); +	return leading_zero_bits >> 3; +} + +#else	/* 32-bit case */ + +/* + * This is largely generic for little-endian machines, but the + * optimal byte mask counting is probably going to be something + * that is architecture-specific. If you have a reliably fast + * bit count instruction, that might be better than the multiply + * and shift, for example. + */ + +/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ +static inline long count_masked_bytes(long mask) +{ +	/* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ +	long a = (0x0ff0001+mask) >> 23; +	/* Fix the 1 for 00 case */ +	return a & mask; +} + +static inline unsigned long create_zero_mask(unsigned long bits) +{ +	bits = (bits - 1) & ~bits; +	return bits >> 7; +} + +static inline unsigned long find_zero(unsigned long mask) +{ +	return count_masked_bytes(mask); +} + +#endif + +/* Return nonzero if it has a zero */ +static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) +{ +	unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; +	*bits = mask; +	return mask; +} + +static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) +{ +	return bits; +} + +/* The mask we created is directly usable as a bytemask */ +#define zero_bytemask(mask) (mask) + +#endif +  #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/arch/powerpc/include/asm/wsp.h b/arch/powerpc/include/asm/wsp.h deleted file mode 100644 index c7dc83088a3..00000000000 --- a/arch/powerpc/include/asm/wsp.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - *  Copyright 2011 Michael Ellerman, IBM Corp. - * - *  This program is free software; you can redistribute it and/or - *  modify it under the terms of the GNU General Public License - *  as published by the Free Software Foundation; either version - *  2 of the License, or (at your option) any later version. - */ -#ifndef __ASM_POWERPC_WSP_H -#define __ASM_POWERPC_WSP_H - -extern int wsp_get_chip_id(struct device_node *dn); - -#endif /* __ASM_POWERPC_WSP_H */ diff --git a/arch/powerpc/include/asm/xor.h b/arch/powerpc/include/asm/xor.h index c82eb12a5b1..0abb97f3be1 100644 --- a/arch/powerpc/include/asm/xor.h +++ b/arch/powerpc/include/asm/xor.h @@ -1 +1,68 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2012 + * + * Author: Anton Blanchard <anton@au.ibm.com> + */ +#ifndef _ASM_POWERPC_XOR_H +#define _ASM_POWERPC_XOR_H + +#ifdef CONFIG_ALTIVEC + +#include <asm/cputable.h> + +void xor_altivec_2(unsigned long bytes, unsigned long *v1_in, +		   unsigned long *v2_in); +void xor_altivec_3(unsigned long bytes, unsigned long *v1_in, +		   unsigned long *v2_in, unsigned long *v3_in); +void xor_altivec_4(unsigned long bytes, unsigned long *v1_in, +		   unsigned long *v2_in, unsigned long *v3_in, +		   unsigned long *v4_in); +void xor_altivec_5(unsigned long bytes, unsigned long *v1_in, +		   unsigned long *v2_in, unsigned long *v3_in, +		   unsigned long *v4_in, unsigned long *v5_in); + +static struct xor_block_template xor_block_altivec = { +	.name = "altivec", +	.do_2 = xor_altivec_2, +	.do_3 = xor_altivec_3, +	.do_4 = xor_altivec_4, +	.do_5 = xor_altivec_5, +}; + +#define XOR_SPEED_ALTIVEC()				\ +	do {						\ +		if (cpu_has_feature(CPU_FTR_ALTIVEC))	\ +			xor_speed(&xor_block_altivec);	\ +	} while (0) +#else +#define XOR_SPEED_ALTIVEC() +#endif + +/* Also try the generic routines. */  #include <asm-generic/xor.h> + +#undef XOR_TRY_TEMPLATES +#define XOR_TRY_TEMPLATES				\ +do {							\ +	xor_speed(&xor_block_8regs);			\ +	xor_speed(&xor_block_8regs_p);			\ +	xor_speed(&xor_block_32regs);			\ +	xor_speed(&xor_block_32regs_p);			\ +	XOR_SPEED_ALTIVEC();				\ +} while (0) + +#endif /* _ASM_POWERPC_XOR_H */  | 
