diff options
Diffstat (limited to 'arch/tile/include')
30 files changed, 297 insertions, 298 deletions
diff --git a/arch/tile/include/arch/mpipe.h b/arch/tile/include/arch/mpipe.h index 8a33912fd6c..904538e754d 100644 --- a/arch/tile/include/arch/mpipe.h +++ b/arch/tile/include/arch/mpipe.h @@ -176,7 +176,18 @@ typedef union       */      uint_reg_t stack_idx    : 5;      /* Reserved. */ -    uint_reg_t __reserved_2 : 5; +    uint_reg_t __reserved_2 : 3; +    /* +     * Instance ID.  For devices that support automatic buffer return between +     * mPIPE instances, this field indicates the buffer owner.  If the INST +     * field does not match the mPIPE's instance number when a packet is +     * egressed, buffers with HWB set will be returned to the other mPIPE +     * instance.  Note that not all devices support multi-mPIPE buffer +     * return.  The MPIPE_EDMA_INFO.REMOTE_BUFF_RTN_SUPPORT bit indicates +     * whether the INST field in the buffer descriptor is populated by iDMA +     * hardware. This field is ignored on writes. +     */ +    uint_reg_t inst         : 2;      /*       * Reads as one to indicate that this is a hardware managed buffer.       * Ignored on writes since all buffers on a given stack are the same size. @@ -205,7 +216,8 @@ typedef union      uint_reg_t c            : 2;      uint_reg_t size         : 3;      uint_reg_t hwb          : 1; -    uint_reg_t __reserved_2 : 5; +    uint_reg_t inst         : 2; +    uint_reg_t __reserved_2 : 3;      uint_reg_t stack_idx    : 5;      uint_reg_t __reserved_1 : 6;      int_reg_t va           : 35; @@ -231,9 +243,9 @@ typedef union      /* Reserved. */      uint_reg_t __reserved_0 : 3;      /* eDMA ring being accessed */ -    uint_reg_t ring         : 5; +    uint_reg_t ring         : 6;      /* Reserved. */ -    uint_reg_t __reserved_1 : 18; +    uint_reg_t __reserved_1 : 17;      /*       * This field of the address selects the region (address space) to be       * accessed.  For the egress DMA post region, this field must be 5. @@ -250,8 +262,8 @@ typedef union      uint_reg_t svc_dom      : 5;      uint_reg_t __reserved_2 : 6;      uint_reg_t region       : 3; -    uint_reg_t __reserved_1 : 18; -    uint_reg_t ring         : 5; +    uint_reg_t __reserved_1 : 17; +    uint_reg_t ring         : 6;      uint_reg_t __reserved_0 : 3;  #endif    }; diff --git a/arch/tile/include/arch/mpipe_constants.h b/arch/tile/include/arch/mpipe_constants.h index 410a0400e05..84022ac5fe8 100644 --- a/arch/tile/include/arch/mpipe_constants.h +++ b/arch/tile/include/arch/mpipe_constants.h @@ -16,13 +16,13 @@  #ifndef __ARCH_MPIPE_CONSTANTS_H__  #define __ARCH_MPIPE_CONSTANTS_H__ -#define MPIPE_NUM_CLASSIFIERS 10 +#define MPIPE_NUM_CLASSIFIERS 16  #define MPIPE_CLS_MHZ 1200 -#define MPIPE_NUM_EDMA_RINGS 32 +#define MPIPE_NUM_EDMA_RINGS 64  #define MPIPE_NUM_SGMII_MACS 16 -#define MPIPE_NUM_XAUI_MACS 4 +#define MPIPE_NUM_XAUI_MACS 16  #define MPIPE_NUM_LOOPBACK_CHANNELS 4  #define MPIPE_NUM_NON_LB_CHANNELS 28 diff --git a/arch/tile/include/arch/mpipe_shm.h b/arch/tile/include/arch/mpipe_shm.h index f2e9e122818..13b3c4300e5 100644 --- a/arch/tile/include/arch/mpipe_shm.h +++ b/arch/tile/include/arch/mpipe_shm.h @@ -44,8 +44,14 @@ typedef union       * descriptors toggles each time the ring tail pointer wraps.       */      uint_reg_t gen        : 1; +    /** +     * For devices with EDMA reorder support, this field allows the +     * descriptor to select the egress FIFO.  The associated DMA ring must +     * have ALLOW_EFIFO_SEL enabled. +     */ +    uint_reg_t efifo_sel  : 6;      /** Reserved.  Must be zero. */ -    uint_reg_t r0         : 7; +    uint_reg_t r0         : 1;      /** Checksum generation enabled for this transfer. */      uint_reg_t csum       : 1;      /** @@ -110,7 +116,8 @@ typedef union      uint_reg_t notif      : 1;      uint_reg_t ns         : 1;      uint_reg_t csum       : 1; -    uint_reg_t r0         : 7; +    uint_reg_t r0         : 1; +    uint_reg_t efifo_sel  : 6;      uint_reg_t gen        : 1;  #endif @@ -126,14 +133,16 @@ typedef union      /** Reserved. */      uint_reg_t __reserved_1 : 3;      /** -     * Instance ID.  For devices that support more than one mPIPE instance, -     * this field indicates the buffer owner.  If the INST field does not -     * match the mPIPE's instance number when a packet is egressed, buffers -     * with HWB set will be returned to the other mPIPE instance. +     * Instance ID.  For devices that support automatic buffer return between +     * mPIPE instances, this field indicates the buffer owner.  If the INST +     * field does not match the mPIPE's instance number when a packet is +     * egressed, buffers with HWB set will be returned to the other mPIPE +     * instance.  Note that not all devices support multi-mPIPE buffer +     * return.  The MPIPE_EDMA_INFO.REMOTE_BUFF_RTN_SUPPORT bit indicates +     * whether the INST field in the buffer descriptor is populated by iDMA +     * hardware.       */ -    uint_reg_t inst         : 1; -    /** Reserved. */ -    uint_reg_t __reserved_2 : 1; +    uint_reg_t inst         : 2;      /**       * Always set to one by hardware in iDMA packet descriptors.  For eDMA,       * indicates whether the buffer will be released to the buffer stack @@ -166,8 +175,7 @@ typedef union      uint_reg_t c            : 2;      uint_reg_t size         : 3;      uint_reg_t hwb          : 1; -    uint_reg_t __reserved_2 : 1; -    uint_reg_t inst         : 1; +    uint_reg_t inst         : 2;      uint_reg_t __reserved_1 : 3;      uint_reg_t stack_idx    : 5;      uint_reg_t __reserved_0 : 6; @@ -408,7 +416,10 @@ typedef union      /**       * Sequence number applied when packet is distributed.   Classifier       * selects which sequence number is to be applied by writing the 13-bit -     * SQN-selector into this field. +     * SQN-selector into this field.  For devices that support EXT_SQN (as +     * indicated in IDMA_INFO.EXT_SQN_SUPPORT), the GP_SQN can be extended to +     * 32-bits via the IDMA_CTL.EXT_SQN register.  In this case the +     * PACKET_SQN will be reduced to 32 bits.       */      uint_reg_t gp_sqn     : 16;      /** @@ -451,14 +462,16 @@ typedef union      /** Reserved. */      uint_reg_t __reserved_5 : 3;      /** -     * Instance ID.  For devices that support more than one mPIPE instance, -     * this field indicates the buffer owner.  If the INST field does not -     * match the mPIPE's instance number when a packet is egressed, buffers -     * with HWB set will be returned to the other mPIPE instance. +     * Instance ID.  For devices that support automatic buffer return between +     * mPIPE instances, this field indicates the buffer owner.  If the INST +     * field does not match the mPIPE's instance number when a packet is +     * egressed, buffers with HWB set will be returned to the other mPIPE +     * instance.  Note that not all devices support multi-mPIPE buffer +     * return.  The MPIPE_EDMA_INFO.REMOTE_BUFF_RTN_SUPPORT bit indicates +     * whether the INST field in the buffer descriptor is populated by iDMA +     * hardware.       */ -    uint_reg_t inst         : 1; -    /** Reserved. */ -    uint_reg_t __reserved_6 : 1; +    uint_reg_t inst         : 2;      /**       * Always set to one by hardware in iDMA packet descriptors.  For eDMA,       * indicates whether the buffer will be released to the buffer stack @@ -491,8 +504,7 @@ typedef union      uint_reg_t c            : 2;      uint_reg_t size         : 3;      uint_reg_t hwb          : 1; -    uint_reg_t __reserved_6 : 1; -    uint_reg_t inst         : 1; +    uint_reg_t inst         : 2;      uint_reg_t __reserved_5 : 3;      uint_reg_t stack_idx    : 5;      uint_reg_t __reserved_4 : 6; diff --git a/arch/tile/include/arch/trio_constants.h b/arch/tile/include/arch/trio_constants.h index 628b045436b..85647e91a45 100644 --- a/arch/tile/include/arch/trio_constants.h +++ b/arch/tile/include/arch/trio_constants.h @@ -16,21 +16,21 @@  #ifndef __ARCH_TRIO_CONSTANTS_H__  #define __ARCH_TRIO_CONSTANTS_H__ -#define TRIO_NUM_ASIDS 16 +#define TRIO_NUM_ASIDS 32  #define TRIO_NUM_TLBS_PER_ASID 16  #define TRIO_NUM_TPIO_REGIONS 8  #define TRIO_LOG2_NUM_TPIO_REGIONS 3 -#define TRIO_NUM_MAP_MEM_REGIONS 16 -#define TRIO_LOG2_NUM_MAP_MEM_REGIONS 4 +#define TRIO_NUM_MAP_MEM_REGIONS 32 +#define TRIO_LOG2_NUM_MAP_MEM_REGIONS 5  #define TRIO_NUM_MAP_SQ_REGIONS 8  #define TRIO_LOG2_NUM_MAP_SQ_REGIONS 3  #define TRIO_LOG2_NUM_SQ_FIFO_ENTRIES 6 -#define TRIO_NUM_PUSH_DMA_RINGS 32 +#define TRIO_NUM_PUSH_DMA_RINGS 64 -#define TRIO_NUM_PULL_DMA_RINGS 32 +#define TRIO_NUM_PULL_DMA_RINGS 64  #endif /* __ARCH_TRIO_CONSTANTS_H__ */ diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild index 664d6ad23f8..0aa5675e702 100644 --- a/arch/tile/include/asm/Kbuild +++ b/arch/tile/include/asm/Kbuild @@ -11,6 +11,7 @@ generic-y += errno.h  generic-y += exec.h  generic-y += fb.h  generic-y += fcntl.h +generic-y += hash.h  generic-y += hw_irq.h  generic-y += ioctl.h  generic-y += ioctls.h @@ -18,12 +19,14 @@ generic-y += ipcbuf.h  generic-y += irq_regs.h  generic-y += local.h  generic-y += local64.h +generic-y += mcs_spinlock.h  generic-y += msgbuf.h  generic-y += mutex.h  generic-y += param.h  generic-y += parport.h  generic-y += poll.h  generic-y += posix_types.h +generic-y += preempt.h  generic-y += resource.h  generic-y += scatterlist.h  generic-y += sembuf.h diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h index d385eaadece..70979846076 100644 --- a/arch/tile/include/asm/atomic.h +++ b/arch/tile/include/asm/atomic.h @@ -166,7 +166,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n)   *   * Atomically sets @v to @i and returns old @v   */ -static inline u64 atomic64_xchg(atomic64_t *v, u64 n) +static inline long long atomic64_xchg(atomic64_t *v, long long n)  {  	return xchg64(&v->counter, n);  } @@ -180,7 +180,8 @@ static inline u64 atomic64_xchg(atomic64_t *v, u64 n)   * Atomically checks if @v holds @o and replaces it with @n if so.   * Returns the old value at @v.   */ -static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) +static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, +					long long n)  {  	return cmpxchg64(&v->counter, o, n);  } diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index 0d0395b1b15..1b109fad9ff 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h @@ -80,7 +80,7 @@ static inline void atomic_set(atomic_t *v, int n)  /* A 64bit atomic type */  typedef struct { -	u64 __aligned(8) counter; +	long long counter;  } atomic64_t;  #define ATOMIC64_INIT(val) { (val) } @@ -91,14 +91,14 @@ typedef struct {   *   * Atomically reads the value of @v.   */ -static inline u64 atomic64_read(const atomic64_t *v) +static inline long long atomic64_read(const atomic64_t *v)  {  	/*  	 * Requires an atomic op to read both 32-bit parts consistently.  	 * Casting away const is safe since the atomic support routines  	 * do not write to memory if the value has not been modified.  	 */ -	return _atomic64_xchg_add((u64 *)&v->counter, 0); +	return _atomic64_xchg_add((long long *)&v->counter, 0);  }  /** @@ -108,7 +108,7 @@ static inline u64 atomic64_read(const atomic64_t *v)   *   * Atomically adds @i to @v.   */ -static inline void atomic64_add(u64 i, atomic64_t *v) +static inline void atomic64_add(long long i, atomic64_t *v)  {  	_atomic64_xchg_add(&v->counter, i);  } @@ -120,7 +120,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)   *   * Atomically adds @i to @v and returns @i + @v   */ -static inline u64 atomic64_add_return(u64 i, atomic64_t *v) +static inline long long atomic64_add_return(long long i, atomic64_t *v)  {  	smp_mb();  /* barrier for proper semantics */  	return _atomic64_xchg_add(&v->counter, i) + i; @@ -135,7 +135,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)   * Atomically adds @a to @v, so long as @v was not already @u.   * Returns non-zero if @v was not @u, and zero otherwise.   */ -static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) +static inline long long atomic64_add_unless(atomic64_t *v, long long a, +					long long u)  {  	smp_mb();  /* barrier for proper semantics */  	return _atomic64_xchg_add_unless(&v->counter, a, u) != u; @@ -151,7 +152,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)   * atomic64_set() can't be just a raw store, since it would be lost if it   * fell between the load and store of one of the other atomic ops.   */ -static inline void atomic64_set(atomic64_t *v, u64 n) +static inline void atomic64_set(atomic64_t *v, long long n)  {  	_atomic64_xchg(&v->counter, n);  } @@ -168,16 +169,6 @@ static inline void atomic64_set(atomic64_t *v, u64 n)  #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)  #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL) -/* - * We need to barrier before modifying the word, since the _atomic_xxx() - * routines just tns the lock and then read/modify/write of the word. - * But after the word is updated, the routine issues an "mf" before returning, - * and since it's a function call, we don't even need a compiler barrier. - */ -#define smp_mb__before_atomic_dec()	smp_mb() -#define smp_mb__before_atomic_inc()	smp_mb() -#define smp_mb__after_atomic_dec()	do { } while (0) -#define smp_mb__after_atomic_inc()	do { } while (0)  #endif /* !__ASSEMBLY__ */ @@ -236,11 +227,13 @@ extern struct __get_user __atomic_xchg_add_unless(volatile int *p,  extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);  extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);  extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); -extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n); -extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n); -extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n); -extern u64 __atomic64_xchg_add_unless(volatile u64 *p, -				      int *lock, u64 o, u64 n); +extern long long __atomic64_cmpxchg(volatile long long *p, int *lock, +					long long o, long long n); +extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n); +extern long long __atomic64_xchg_add(volatile long long *p, int *lock, +					long long n); +extern long long __atomic64_xchg_add_unless(volatile long long *p, +					int *lock, long long o, long long n);  /* Return failure from the atomic wrappers. */  struct __get_user __atomic_bad_address(int __user *addr); diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h index ad220eed05f..7b11c5fadd4 100644 --- a/arch/tile/include/asm/atomic_64.h +++ b/arch/tile/include/asm/atomic_64.h @@ -105,12 +105,6 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)  #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0) -/* Atomic dec and inc don't implement barrier, so provide them if needed. */ -#define smp_mb__before_atomic_dec()	smp_mb() -#define smp_mb__after_atomic_dec()	smp_mb() -#define smp_mb__before_atomic_inc()	smp_mb() -#define smp_mb__after_atomic_inc()	smp_mb() -  /* Define this to indicate that cmpxchg is an efficient operation. */  #define __HAVE_ARCH_CMPXCHG diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h index a9a73da5865..96a42ae79f4 100644 --- a/arch/tile/include/asm/barrier.h +++ b/arch/tile/include/asm/barrier.h @@ -22,59 +22,6 @@  #include <arch/spr_def.h>  #include <asm/timex.h> -/* - * read_barrier_depends - Flush all pending reads that subsequents reads - * depend on. - * - * No data-dependent reads from memory-like regions are ever reordered - * over this barrier.  All reads preceding this primitive are guaranteed - * to access memory (but not necessarily other CPUs' caches) before any - * reads following this primitive that depend on the data return by - * any of the preceding reads.  This primitive is much lighter weight than - * rmb() on most CPUs, and is never heavier weight than is - * rmb(). - * - * These ordering constraints are respected by both the local CPU - * and the compiler. - * - * Ordering is not guaranteed by anything other than these primitives, - * not even by data dependencies.  See the documentation for - * memory_barrier() for examples and URLs to more information. - * - * For example, the following code would force ordering (the initial - * value of "a" is zero, "b" is one, and "p" is "&a"): - * - * <programlisting> - *	CPU 0				CPU 1 - * - *	b = 2; - *	memory_barrier(); - *	p = &b;				q = p; - *					read_barrier_depends(); - *					d = *q; - * </programlisting> - * - * because the read of "*q" depends on the read of "p" and these - * two reads are separated by a read_barrier_depends().  However, - * the following code, with the same initial values for "a" and "b": - * - * <programlisting> - *	CPU 0				CPU 1 - * - *	a = 2; - *	memory_barrier(); - *	b = 3;				y = b; - *					read_barrier_depends(); - *					x = a; - * </programlisting> - * - * does not enforce ordering, since there is no data dependency between - * the read of "a" and the read of "b".  Therefore, on some CPUs, such - * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb() - * in cases like this where there are no data dependencies. - */ -#define read_barrier_depends()	do { } while (0) -  #define __sync()	__insn_mf()  #include <hv/syscall_public.h> @@ -125,20 +72,21 @@ mb_incoherent(void)  #define mb()		fast_mb()  #define iob()		fast_iob() -#ifdef CONFIG_SMP -#define smp_mb()	mb() -#define smp_rmb()	rmb() -#define smp_wmb()	wmb() -#define smp_read_barrier_depends()	read_barrier_depends() -#else -#define smp_mb()	barrier() -#define smp_rmb()	barrier() -#define smp_wmb()	barrier() -#define smp_read_barrier_depends()	do { } while (0) +#ifndef __tilegx__ /* 32 bit */ +/* + * We need to barrier before modifying the word, since the _atomic_xxx() + * routines just tns the lock and then read/modify/write of the word. + * But after the word is updated, the routine issues an "mf" before returning, + * and since it's a function call, we don't even need a compiler barrier. + */ +#define smp_mb__before_atomic()	smp_mb() +#define smp_mb__after_atomic()	do { } while (0) +#else /* 64 bit */ +#define smp_mb__before_atomic()	smp_mb() +#define smp_mb__after_atomic()	smp_mb()  #endif -#define set_mb(var, value) \ -	do { var = value; mb(); } while (0) +#include <asm-generic/barrier.h>  #endif /* !__ASSEMBLY__ */  #endif /* _ASM_TILE_BARRIER_H */ diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h index d5a20686503..20caa346ac0 100644 --- a/arch/tile/include/asm/bitops.h +++ b/arch/tile/include/asm/bitops.h @@ -17,6 +17,7 @@  #define _ASM_TILE_BITOPS_H  #include <linux/types.h> +#include <asm/barrier.h>  #ifndef _LINUX_BITOPS_H  #error only <linux/bitops.h> can be included directly diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h index 386865ad2f5..bbf7b666f21 100644 --- a/arch/tile/include/asm/bitops_32.h +++ b/arch/tile/include/asm/bitops_32.h @@ -49,8 +49,8 @@ static inline void set_bit(unsigned nr, volatile unsigned long *addr)   * restricted to acting on a single-word quantity.   *   * clear_bit() may not contain a memory barrier, so if it is used for - * locking purposes, you should call smp_mb__before_clear_bit() and/or - * smp_mb__after_clear_bit() to ensure changes are visible on other cpus. + * locking purposes, you should call smp_mb__before_atomic() and/or + * smp_mb__after_atomic() to ensure changes are visible on other cpus.   */  static inline void clear_bit(unsigned nr, volatile unsigned long *addr)  { @@ -121,10 +121,6 @@ static inline int test_and_change_bit(unsigned nr,  	return (_atomic_xor(addr, mask) & mask) != 0;  } -/* See discussion at smp_mb__before_atomic_dec() in <asm/atomic_32.h>. */ -#define smp_mb__before_clear_bit()	smp_mb() -#define smp_mb__after_clear_bit()	do {} while (0) -  #include <asm-generic/bitops/ext2-atomic.h>  #endif /* _ASM_TILE_BITOPS_32_H */ diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h index ad34cd05608..bb1a29221fc 100644 --- a/arch/tile/include/asm/bitops_64.h +++ b/arch/tile/include/asm/bitops_64.h @@ -32,10 +32,6 @@ static inline void clear_bit(unsigned nr, volatile unsigned long *addr)  	__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask);  } -#define smp_mb__before_clear_bit()	smp_mb() -#define smp_mb__after_clear_bit()	smp_mb() - -  static inline void change_bit(unsigned nr, volatile unsigned long *addr)  {  	unsigned long mask = (1UL << (nr % BITS_PER_LONG)); diff --git a/arch/tile/include/asm/cmpxchg.h b/arch/tile/include/asm/cmpxchg.h index 4001d5eab4b..0ccda3c425b 100644 --- a/arch/tile/include/asm/cmpxchg.h +++ b/arch/tile/include/asm/cmpxchg.h @@ -35,10 +35,10 @@ int _atomic_xchg(int *ptr, int n);  int _atomic_xchg_add(int *v, int i);  int _atomic_xchg_add_unless(int *v, int a, int u);  int _atomic_cmpxchg(int *ptr, int o, int n); -u64 _atomic64_xchg(u64 *v, u64 n); -u64 _atomic64_xchg_add(u64 *v, u64 i); -u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u); -u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); +long long _atomic64_xchg(long long *v, long long n); +long long _atomic64_xchg_add(long long *v, long long i); +long long _atomic64_xchg_add_unless(long long *v, long long a, long long u); +long long _atomic64_cmpxchg(long long *v, long long o, long long n);  #define xchg(ptr, n)							\  	({								\ @@ -53,7 +53,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);  		if (sizeof(*(ptr)) != 4)				\  			__cmpxchg_called_with_bad_pointer();		\  		smp_mb();						\ -		(typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \ +		(typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o,	\ +						(int)n);		\  	})  #define xchg64(ptr, n)							\ @@ -61,7 +62,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);  		if (sizeof(*(ptr)) != 8)				\  			__xchg_called_with_bad_pointer();		\  		smp_mb();						\ -		(typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n));	\ +		(typeof(*(ptr)))_atomic64_xchg((long long *)(ptr),	\ +						(long long)(n));	\  	})  #define cmpxchg64(ptr, o, n)						\ @@ -69,7 +71,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);  		if (sizeof(*(ptr)) != 8)				\  			__cmpxchg_called_with_bad_pointer();		\  		smp_mb();						\ -		(typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \ +		(typeof(*(ptr)))_atomic64_cmpxchg((long long *)ptr,	\ +					(long long)o, (long long)n);	\  	})  #else @@ -81,10 +84,11 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);  		switch (sizeof(*(ptr))) {				\  		case 4:							\  			__x = (typeof(__x))(unsigned long)		\ -				__insn_exch4((ptr), (u32)(unsigned long)(n)); \ +				__insn_exch4((ptr),			\ +					(u32)(unsigned long)(n));	\  			break;						\  		case 8:							\ -			__x = (typeof(__x))			\ +			__x = (typeof(__x))				\  				__insn_exch((ptr), (unsigned long)(n));	\  			break;						\  		default:						\ @@ -103,10 +107,12 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);  		switch (sizeof(*(ptr))) {				\  		case 4:							\  			__x = (typeof(__x))(unsigned long)		\ -				__insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \ +				__insn_cmpexch4((ptr),			\ +					(u32)(unsigned long)(n));	\  			break;						\  		case 8:							\ -			__x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \ +			__x = (typeof(__x))__insn_cmpexch((ptr),	\ +						(long long)(n));	\  			break;						\  		default:						\  			__cmpxchg_called_with_bad_pointer();		\ diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h index 78f1f2ded86..ffd4493efc7 100644 --- a/arch/tile/include/asm/compat.h +++ b/arch/tile/include/asm/compat.h @@ -281,7 +281,6 @@ long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,  			u32 dummy, u32 low, u32 high);  long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count,  			 u32 dummy, u32 low, u32 high); -long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len);  long compat_sys_sync_file_range2(int fd, unsigned int flags,  				 u32 offset_lo, u32 offset_hi,  				 u32 nbytes_lo, u32 nbytes_hi); diff --git a/arch/tile/include/asm/fixmap.h b/arch/tile/include/asm/fixmap.h index c6b9c1b38fd..ffe2637aeb3 100644 --- a/arch/tile/include/asm/fixmap.h +++ b/arch/tile/include/asm/fixmap.h @@ -25,9 +25,6 @@  #include <asm/kmap_types.h>  #endif -#define __fix_to_virt(x)	(FIXADDR_TOP - ((x) << PAGE_SHIFT)) -#define __virt_to_fix(x)	((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) -  /*   * Here we define all the compile-time 'special' virtual   * addresses. The point is to have a constant address at @@ -83,35 +80,7 @@ enum fixed_addresses {  #define FIXADDR_START		(FIXADDR_TOP + PAGE_SIZE - __FIXADDR_SIZE)  #define FIXADDR_BOOT_START	(FIXADDR_TOP + PAGE_SIZE - __FIXADDR_BOOT_SIZE) -extern void __this_fixmap_does_not_exist(void); - -/* - * 'index to address' translation. If anyone tries to use the idx - * directly without tranlation, we catch the bug with a NULL-deference - * kernel oops. Illegal ranges of incoming indices are caught too. - */ -static __always_inline unsigned long fix_to_virt(const unsigned int idx) -{ -	/* -	 * this branch gets completely eliminated after inlining, -	 * except when someone tries to use fixaddr indices in an -	 * illegal way. (such as mixing up address types or using -	 * out-of-range indices). -	 * -	 * If it doesn't get removed, the linker will complain -	 * loudly with a reasonably clear error message.. -	 */ -	if (idx >= __end_of_fixed_addresses) -		__this_fixmap_does_not_exist(); - -	return __fix_to_virt(idx); -} - -static inline unsigned long virt_to_fix(const unsigned long vaddr) -{ -	BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); -	return __virt_to_fix(vaddr); -} +#include <asm-generic/fixmap.h>  #endif /* !__ASSEMBLY__ */ diff --git a/arch/tile/include/asm/hardirq.h b/arch/tile/include/asm/hardirq.h index 822390f9a15..54110af2398 100644 --- a/arch/tile/include/asm/hardirq.h +++ b/arch/tile/include/asm/hardirq.h @@ -42,6 +42,4 @@ DECLARE_PER_CPU(irq_cpustat_t, irq_stat);  #include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */ -#define HARDIRQ_BITS	8 -  #endif /* _ASM_TILE_HARDIRQ_H */ diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h index 33cff9a3058..1fe86911838 100644 --- a/arch/tile/include/asm/irq.h +++ b/arch/tile/include/asm/irq.h @@ -18,10 +18,12 @@  #include <linux/hardirq.h>  /* The hypervisor interface provides 32 IRQs. */ -#define NR_IRQS 32 +#define NR_IRQS			32  /* IRQ numbers used for linux IPIs. */ -#define IRQ_RESCHEDULE 0 +#define IRQ_RESCHEDULE	0 +/* Interrupts for dynamic allocation start at 1. Let the core allocate irq0 */ +#define NR_IRQS_LEGACY	1  #define irq_canonicalize(irq)   (irq) diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h index 6346888f7bd..67276800861 100644 --- a/arch/tile/include/asm/page.h +++ b/arch/tile/include/asm/page.h @@ -182,10 +182,9 @@ static inline __attribute_const__ int get_order(unsigned long size)  #define PAGE_OFFSET		(-(_AC(1, UL) << (MAX_VA_WIDTH - 1)))  #define KERNEL_HIGH_VADDR	_AC(0xfffffff800000000, UL)  /* high 32GB */ -#define FIXADDR_BASE		(KERNEL_HIGH_VADDR - 0x400000000) /* 4 GB */ -#define FIXADDR_TOP		(KERNEL_HIGH_VADDR - 0x300000000) /* 4 GB */ +#define FIXADDR_BASE		(KERNEL_HIGH_VADDR - 0x300000000) /* 4 GB */ +#define FIXADDR_TOP		(KERNEL_HIGH_VADDR - 0x200000000) /* 4 GB */  #define _VMALLOC_START		FIXADDR_TOP -#define HUGE_VMAP_BASE		(KERNEL_HIGH_VADDR - 0x200000000) /* 4 GB */  #define MEM_SV_START		(KERNEL_HIGH_VADDR - 0x100000000) /* 256 MB */  #define MEM_MODULE_START	(MEM_SV_START + (256*1024*1024)) /* 256 MB */  #define MEM_MODULE_END		(MEM_MODULE_START + (256*1024*1024)) diff --git a/arch/tile/include/asm/percpu.h b/arch/tile/include/asm/percpu.h index 63294f5a8ef..4f7ae39fa20 100644 --- a/arch/tile/include/asm/percpu.h +++ b/arch/tile/include/asm/percpu.h @@ -15,9 +15,37 @@  #ifndef _ASM_TILE_PERCPU_H  #define _ASM_TILE_PERCPU_H -register unsigned long __my_cpu_offset __asm__("tp"); -#define __my_cpu_offset __my_cpu_offset -#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp)) +register unsigned long my_cpu_offset_reg asm("tp"); + +#ifdef CONFIG_PREEMPT +/* + * For full preemption, we can't just use the register variable + * directly, since we need barrier() to hazard against it, causing the + * compiler to reload anything computed from a previous "tp" value. + * But we also don't want to use volatile asm, since we'd like the + * compiler to be able to cache the value across multiple percpu reads. + * So we use a fake stack read as a hazard against barrier(). + * The 'U' constraint is like 'm' but disallows postincrement. + */ +static inline unsigned long __my_cpu_offset(void) +{ +	unsigned long tp; +	register unsigned long *sp asm("sp"); +	asm("move %0, tp" : "=r" (tp) : "U" (*sp)); +	return tp; +} +#define __my_cpu_offset __my_cpu_offset() +#else +/* + * We don't need to hazard against barrier() since "tp" doesn't ever + * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only + * changes at function call points, at which we are already re-reading + * the value of "tp" due to "my_cpu_offset_reg" being a global variable. + */ +#define __my_cpu_offset my_cpu_offset_reg +#endif + +#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp))  #include <asm-generic/percpu.h> diff --git a/arch/tile/include/asm/perf_event.h b/arch/tile/include/asm/perf_event.h new file mode 100644 index 00000000000..59c5b164e5b --- /dev/null +++ b/arch/tile/include/asm/perf_event.h @@ -0,0 +1,22 @@ +/* + * Copyright 2014 Tilera Corporation. All Rights Reserved. + * + *   This program is free software; you can redistribute it and/or + *   modify it under the terms of the GNU General Public License + *   as published by the Free Software Foundation, version 2. + * + *   This program is distributed in the hope that it will be useful, but + *   WITHOUT ANY WARRANTY; without even the implied warranty of + *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + *   NON INFRINGEMENT.  See the GNU General Public License for + *   more details. + */ + +#ifndef _ASM_TILE_PERF_EVENT_H +#define _ASM_TILE_PERF_EVENT_H + +#include <linux/percpu.h> +DECLARE_PER_CPU(u64, perf_irqs); + +unsigned long handle_syscall_link_address(void); +#endif /* _ASM_TILE_PERF_EVENT_H */ diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h index 63142ab3b3d..d26a4227903 100644 --- a/arch/tile/include/asm/pgtable_32.h +++ b/arch/tile/include/asm/pgtable_32.h @@ -55,17 +55,9 @@  #define PKMAP_BASE   ((FIXADDR_BOOT_START - PAGE_SIZE*LAST_PKMAP) & PGDIR_MASK)  #ifdef CONFIG_HIGHMEM -# define __VMAPPING_END	(PKMAP_BASE & ~(HPAGE_SIZE-1)) +# define _VMALLOC_END	(PKMAP_BASE & ~(HPAGE_SIZE-1))  #else -# define __VMAPPING_END	(FIXADDR_START & ~(HPAGE_SIZE-1)) -#endif - -#ifdef CONFIG_HUGEVMAP -#define HUGE_VMAP_END	__VMAPPING_END -#define HUGE_VMAP_BASE	(HUGE_VMAP_END - CONFIG_NR_HUGE_VMAPS * HPAGE_SIZE) -#define _VMALLOC_END	HUGE_VMAP_BASE -#else -#define _VMALLOC_END	__VMAPPING_END +# define _VMALLOC_END	(FIXADDR_START & ~(HPAGE_SIZE-1))  #endif  /* diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h index 3421177f737..2c8a9cd102d 100644 --- a/arch/tile/include/asm/pgtable_64.h +++ b/arch/tile/include/asm/pgtable_64.h @@ -52,12 +52,10 @@   * memory allocation code).  The vmalloc code puts in an internal   * guard page between each allocation.   */ -#define _VMALLOC_END	HUGE_VMAP_BASE +#define _VMALLOC_END	MEM_SV_START  #define VMALLOC_END	_VMALLOC_END  #define VMALLOC_START	_VMALLOC_START -#define HUGE_VMAP_END	(HUGE_VMAP_BASE + PGDIR_SIZE) -  #ifndef __ASSEMBLY__  /* We have no pud since we are a three-level page table. */ diff --git a/arch/tile/include/asm/pmc.h b/arch/tile/include/asm/pmc.h new file mode 100644 index 00000000000..7ae3956d900 --- /dev/null +++ b/arch/tile/include/asm/pmc.h @@ -0,0 +1,64 @@ +/* + * Copyright 2014 Tilera Corporation. All Rights Reserved. + * + *   This program is free software; you can redistribute it and/or + *   modify it under the terms of the GNU General Public License + *   as published by the Free Software Foundation, version 2. + * + *   This program is distributed in the hope that it will be useful, but + *   WITHOUT ANY WARRANTY; without even the implied warranty of + *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + *   NON INFRINGEMENT.  See the GNU General Public License for + *   more details. + */ + +#ifndef _ASM_TILE_PMC_H +#define _ASM_TILE_PMC_H + +#include <linux/ptrace.h> + +#define TILE_BASE_COUNTERS	2 + +/* Bitfields below are derived from SPR PERF_COUNT_CTL*/ +#ifndef __tilegx__ +/* PERF_COUNT_CTL on TILEPro */ +#define TILE_CTL_EXCL_USER	(1 << 7) /* exclude user level */ +#define TILE_CTL_EXCL_KERNEL	(1 << 8) /* exclude kernel level */ +#define TILE_CTL_EXCL_HV	(1 << 9) /* exclude hypervisor level */ + +#define TILE_SEL_MASK		0x7f	/* 7 bits for event SEL, +					COUNT_0_SEL */ +#define TILE_PLM_MASK		0x780	/* 4 bits priv level msks, +					COUNT_0_MASK*/ +#define TILE_EVENT_MASK	(TILE_SEL_MASK | TILE_PLM_MASK) + +#else /* __tilegx__*/ +/* PERF_COUNT_CTL on TILEGx*/ +#define TILE_CTL_EXCL_USER	(1 << 10) /* exclude user level */ +#define TILE_CTL_EXCL_KERNEL	(1 << 11) /* exclude kernel level */ +#define TILE_CTL_EXCL_HV	(1 << 12) /* exclude hypervisor level */ + +#define TILE_SEL_MASK		0x3f	/* 6 bits for event SEL, +					COUNT_0_SEL*/ +#define TILE_BOX_MASK		0x1c0	/* 3 bits box msks, +					COUNT_0_BOX */ +#define TILE_PLM_MASK		0x3c00	/* 4 bits priv level msks, +					COUNT_0_MASK */ +#define TILE_EVENT_MASK	(TILE_SEL_MASK | TILE_BOX_MASK | TILE_PLM_MASK) +#endif /* __tilegx__*/ + +/* Takes register and fault number.  Returns error to disable the interrupt. */ +typedef int (*perf_irq_t)(struct pt_regs *, int); + +int userspace_perf_handler(struct pt_regs *regs, int fault); + +perf_irq_t reserve_pmc_hardware(perf_irq_t new_perf_irq); +void release_pmc_hardware(void); + +unsigned long pmc_get_overflow(void); +void pmc_ack_overflow(unsigned long status); + +void unmask_pmc_interrupts(void); +void mask_pmc_interrupts(void); + +#endif /* _ASM_TILE_PMC_H */ diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index b8aa6df3e10..48e4fd0f38e 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h @@ -94,7 +94,7 @@ register unsigned long stack_pointer __asm__("sp");  /* Sit on a nap instruction until interrupted. */  extern void smp_nap(void); -/* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */ +/* Enable interrupts racelessly and nap forever: helper for arch_cpu_idle(). */  extern void _cpu_idle(void);  #else /* __ASSEMBLY__ */ @@ -113,8 +113,6 @@ extern void _cpu_idle(void);  #endif /* !__ASSEMBLY__ */ -#define PREEMPT_ACTIVE		0x10000000 -  /*   * Thread information flags that various assembly files may need to access.   * Keep flags accessed frequently in low bits, particular since it makes @@ -131,6 +129,7 @@ extern void _cpu_idle(void);  #define TIF_MEMDIE		7	/* OOM killer at work */  #define TIF_NOTIFY_RESUME	8	/* callback before returning to user */  #define TIF_SYSCALL_TRACEPOINT	9	/* syscall tracepoint instrumentation */ +#define TIF_POLLING_NRFLAG	10	/* idle is polling for TIF_NEED_RESCHED */  #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)  #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED) @@ -142,6 +141,7 @@ extern void _cpu_idle(void);  #define _TIF_MEMDIE		(1<<TIF_MEMDIE)  #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)  #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT) +#define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)  /* Work to do on any return to user space. */  #define _TIF_ALLWORK_MASK \ @@ -164,7 +164,6 @@ extern void _cpu_idle(void);  #ifdef __tilegx__  #define TS_COMPAT		0x0001	/* 32-bit compatibility mode */  #endif -#define TS_POLLING		0x0004	/* in idle loop but not sleeping */  #define TS_RESTORE_SIGMASK	0x0008	/* restore signal mask in do_signal */  #ifndef __ASSEMBLY__ diff --git a/arch/tile/include/asm/topology.h b/arch/tile/include/asm/topology.h index d15c0d8d550..93831184423 100644 --- a/arch/tile/include/asm/topology.h +++ b/arch/tile/include/asm/topology.h @@ -44,39 +44,6 @@ static inline const struct cpumask *cpumask_of_node(int node)  /* For now, use numa node -1 for global allocation. */  #define pcibus_to_node(bus)		((void)(bus), -1) -/* - * TILE architecture has many cores integrated in one processor, so we need - * setup bigger balance_interval for both CPU/NODE scheduling domains to - * reduce process scheduling costs. - */ - -/* sched_domains SD_CPU_INIT for TILE architecture */ -#define SD_CPU_INIT (struct sched_domain) {				\ -	.min_interval		= 4,					\ -	.max_interval		= 128,					\ -	.busy_factor		= 64,					\ -	.imbalance_pct		= 125,					\ -	.cache_nice_tries	= 1,					\ -	.busy_idx		= 2,					\ -	.idle_idx		= 1,					\ -	.newidle_idx		= 0,					\ -	.wake_idx		= 0,					\ -	.forkexec_idx		= 0,					\ -									\ -	.flags			= 1*SD_LOAD_BALANCE			\ -				| 1*SD_BALANCE_NEWIDLE			\ -				| 1*SD_BALANCE_EXEC			\ -				| 1*SD_BALANCE_FORK			\ -				| 0*SD_BALANCE_WAKE			\ -				| 0*SD_WAKE_AFFINE			\ -				| 0*SD_SHARE_CPUPOWER			\ -				| 0*SD_SHARE_PKG_RESOURCES		\ -				| 0*SD_SERIALIZE			\ -				,					\ -	.last_balance		= jiffies,				\ -	.balance_interval	= 32,					\ -} -  /* By definition, we create nodes based on online memory. */  #define node_has_online_mem(nid) 1 diff --git a/arch/tile/include/gxio/iorpc_mpipe.h b/arch/tile/include/gxio/iorpc_mpipe.h index fdd07f88cfd..4cda03de734 100644 --- a/arch/tile/include/gxio/iorpc_mpipe.h +++ b/arch/tile/include/gxio/iorpc_mpipe.h @@ -56,89 +56,89 @@  #define GXIO_MPIPE_OP_GET_MMIO_BASE    IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)  #define GXIO_MPIPE_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) -int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t * context, +int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t *context,  				   unsigned int count, unsigned int first,  				   unsigned int flags); -int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t * context, +int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t *context,  				     void *mem_va, size_t mem_size,  				     unsigned int mem_flags, unsigned int stack,  				     unsigned int buffer_size_enum); -int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t * context, +int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t *context,  				 unsigned int count, unsigned int first,  				 unsigned int flags); -int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t * context, void *mem_va, +int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t *context, void *mem_va,  				   size_t mem_size, unsigned int mem_flags,  				   unsigned int ring); -int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t * context, +int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t *context,  					    int inter_x, int inter_y,  					    int inter_ipi, int inter_event,  					    unsigned int ring); -int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t * context, +int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t *context,  					   unsigned int ring); -int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t * context, +int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t *context,  				  unsigned int count, unsigned int first,  				  unsigned int flags); -int gxio_mpipe_init_notif_group(gxio_mpipe_context_t * context, +int gxio_mpipe_init_notif_group(gxio_mpipe_context_t *context,  				unsigned int group,  				gxio_mpipe_notif_group_bits_t bits); -int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t * context, unsigned int count, +int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t *context, unsigned int count,  			     unsigned int first, unsigned int flags); -int gxio_mpipe_init_bucket(gxio_mpipe_context_t * context, unsigned int bucket, +int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context, unsigned int bucket,  			   MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info); -int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t * context, +int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,  				unsigned int count, unsigned int first,  				unsigned int flags); -int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t * context, void *mem_va, +int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t *context, void *mem_va,  				  size_t mem_size, unsigned int mem_flags,  				  unsigned int ring, unsigned int channel); -int gxio_mpipe_commit_rules(gxio_mpipe_context_t * context, const void *blob, +int gxio_mpipe_commit_rules(gxio_mpipe_context_t *context, const void *blob,  			    size_t blob_size); -int gxio_mpipe_register_client_memory(gxio_mpipe_context_t * context, +int gxio_mpipe_register_client_memory(gxio_mpipe_context_t *context,  				      unsigned int iotlb, HV_PTE pte,  				      unsigned int flags); -int gxio_mpipe_link_open_aux(gxio_mpipe_context_t * context, +int gxio_mpipe_link_open_aux(gxio_mpipe_context_t *context,  			     _gxio_mpipe_link_name_t name, unsigned int flags); -int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac); +int gxio_mpipe_link_close_aux(gxio_mpipe_context_t *context, int mac); -int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t * context, int mac, +int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t *context, int mac,  				 uint32_t attr, int64_t val); -int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec, -				 uint64_t * nsec, uint64_t * cycles); +int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t *context, uint64_t *sec, +				 uint64_t *nsec, uint64_t *cycles); -int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t * context, uint64_t sec, +int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t *context, uint64_t sec,  				 uint64_t nsec, uint64_t cycles); -int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context, +int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t *context,  				    int64_t nsec); -int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t * context, +int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t *context,  				     int32_t ppb); -int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie); +int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie); -int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie); +int gxio_mpipe_close_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie); -int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t * context, HV_PTE *base); +int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t *context, HV_PTE *base); -int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t * context, +int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t *context,  				 unsigned long offset, unsigned long size);  #endif /* !__GXIO_MPIPE_LINUX_RPC_H__ */ diff --git a/arch/tile/include/gxio/iorpc_mpipe_info.h b/arch/tile/include/gxio/iorpc_mpipe_info.h index 476c5e5ca22..f0b04284468 100644 --- a/arch/tile/include/gxio/iorpc_mpipe_info.h +++ b/arch/tile/include/gxio/iorpc_mpipe_info.h @@ -33,18 +33,18 @@  #define GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) -int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context, +int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t *context,  				 _gxio_mpipe_link_name_t name); -int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context, +int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t *context,  				  unsigned int idx, -				  _gxio_mpipe_link_name_t * name, -				  _gxio_mpipe_link_mac_t * mac); +				  _gxio_mpipe_link_name_t *name, +				  _gxio_mpipe_link_mac_t *mac); -int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t * context, +int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t *context,  				  HV_PTE *base); -int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t * context, +int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t *context,  				      unsigned long offset, unsigned long size);  #endif /* !__GXIO_MPIPE_INFO_LINUX_RPC_H__ */ diff --git a/arch/tile/include/gxio/iorpc_trio.h b/arch/tile/include/gxio/iorpc_trio.h index d95b96fd6c9..376a4f77116 100644 --- a/arch/tile/include/gxio/iorpc_trio.h +++ b/arch/tile/include/gxio/iorpc_trio.h @@ -46,59 +46,59 @@  #define GXIO_TRIO_OP_GET_MMIO_BASE     IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)  #define GXIO_TRIO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) -int gxio_trio_alloc_asids(gxio_trio_context_t * context, unsigned int count, +int gxio_trio_alloc_asids(gxio_trio_context_t *context, unsigned int count,  			  unsigned int first, unsigned int flags); -int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context, +int gxio_trio_alloc_memory_maps(gxio_trio_context_t *context,  				unsigned int count, unsigned int first,  				unsigned int flags); -int gxio_trio_alloc_scatter_queues(gxio_trio_context_t * context, +int gxio_trio_alloc_scatter_queues(gxio_trio_context_t *context,  				   unsigned int count, unsigned int first,  				   unsigned int flags); -int gxio_trio_alloc_pio_regions(gxio_trio_context_t * context, +int gxio_trio_alloc_pio_regions(gxio_trio_context_t *context,  				unsigned int count, unsigned int first,  				unsigned int flags); -int gxio_trio_init_pio_region_aux(gxio_trio_context_t * context, +int gxio_trio_init_pio_region_aux(gxio_trio_context_t *context,  				  unsigned int pio_region, unsigned int mac,  				  uint32_t bus_address_hi, unsigned int flags); -int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t * context, +int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t *context,  				      unsigned int map, unsigned long va,  				      uint64_t size, unsigned int asid,  				      unsigned int mac, uint64_t bus_address,  				      unsigned int node,  				      unsigned int order_mode); -int gxio_trio_get_port_property(gxio_trio_context_t * context, +int gxio_trio_get_port_property(gxio_trio_context_t *context,  				struct pcie_trio_ports_property *trio_ports); -int gxio_trio_config_legacy_intr(gxio_trio_context_t * context, int inter_x, +int gxio_trio_config_legacy_intr(gxio_trio_context_t *context, int inter_x,  				 int inter_y, int inter_ipi, int inter_event,  				 unsigned int mac, unsigned int intx); -int gxio_trio_config_msi_intr(gxio_trio_context_t * context, int inter_x, +int gxio_trio_config_msi_intr(gxio_trio_context_t *context, int inter_x,  			      int inter_y, int inter_ipi, int inter_event,  			      unsigned int mac, unsigned int mem_map,  			      uint64_t mem_map_base, uint64_t mem_map_limit,  			      unsigned int asid); -int gxio_trio_set_mps_mrs(gxio_trio_context_t * context, uint16_t mps, +int gxio_trio_set_mps_mrs(gxio_trio_context_t *context, uint16_t mps,  			  uint16_t mrs, unsigned int mac); -int gxio_trio_force_rc_link_up(gxio_trio_context_t * context, unsigned int mac); +int gxio_trio_force_rc_link_up(gxio_trio_context_t *context, unsigned int mac); -int gxio_trio_force_ep_link_up(gxio_trio_context_t * context, unsigned int mac); +int gxio_trio_force_ep_link_up(gxio_trio_context_t *context, unsigned int mac); -int gxio_trio_get_mmio_base(gxio_trio_context_t * context, HV_PTE *base); +int gxio_trio_get_mmio_base(gxio_trio_context_t *context, HV_PTE *base); -int gxio_trio_check_mmio_offset(gxio_trio_context_t * context, +int gxio_trio_check_mmio_offset(gxio_trio_context_t *context,  				unsigned long offset, unsigned long size);  #endif /* !__GXIO_TRIO_LINUX_RPC_H__ */ diff --git a/arch/tile/include/gxio/iorpc_usb_host.h b/arch/tile/include/gxio/iorpc_usb_host.h index 8622e7d126a..79962a97de8 100644 --- a/arch/tile/include/gxio/iorpc_usb_host.h +++ b/arch/tile/include/gxio/iorpc_usb_host.h @@ -31,16 +31,16 @@  #define GXIO_USB_HOST_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)  #define GXIO_USB_HOST_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) -int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t * context, int inter_x, +int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t *context, int inter_x,  				int inter_y, int inter_ipi, int inter_event); -int gxio_usb_host_register_client_memory(gxio_usb_host_context_t * context, +int gxio_usb_host_register_client_memory(gxio_usb_host_context_t *context,  					 HV_PTE pte, unsigned int flags); -int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t * context, +int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t *context,  				HV_PTE *base); -int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t * context, +int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t *context,  				    unsigned long offset, unsigned long size);  #endif /* !__GXIO_USB_HOST_LINUX_RPC_H__ */ diff --git a/arch/tile/include/gxio/usb_host.h b/arch/tile/include/gxio/usb_host.h index 5eedec0e988..93c9636d2dd 100644 --- a/arch/tile/include/gxio/usb_host.h +++ b/arch/tile/include/gxio/usb_host.h @@ -53,7 +53,7 @@ typedef struct {   * @return Zero if the context was successfully initialized, else a   *  GXIO_ERR_xxx error code.   */ -extern int gxio_usb_host_init(gxio_usb_host_context_t * context, int usb_index, +extern int gxio_usb_host_init(gxio_usb_host_context_t *context, int usb_index,  			      int is_ehci);  /* Destroy a USB context. @@ -68,20 +68,20 @@ extern int gxio_usb_host_init(gxio_usb_host_context_t * context, int usb_index,   * @return Zero if the context was successfully destroyed, else a   *  GXIO_ERR_xxx error code.   */ -extern int gxio_usb_host_destroy(gxio_usb_host_context_t * context); +extern int gxio_usb_host_destroy(gxio_usb_host_context_t *context);  /* Retrieve the address of the shim's MMIO registers.   *   * @param context Pointer to a properly initialized gxio_usb_host_context_t.   * @return The address of the shim's MMIO registers.   */ -extern void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t * context); +extern void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t *context);  /* Retrieve the length of the shim's MMIO registers.   *   * @param context Pointer to a properly initialized gxio_usb_host_context_t.   * @return The length of the shim's MMIO registers.   */ -extern size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t * context); +extern size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t *context);  #endif /* _GXIO_USB_H_ */  | 
