diff options
author | Paul Mundt <lethal@linux-sh.org> | 2011-05-31 13:10:26 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2011-05-31 13:10:26 +0900 |
commit | 8181d3ef26ed1d9eb21e2cdcac374e1f457fdc06 (patch) | |
tree | 1a081f09ebcf2a84de899ddeadd0e4c5e48b50d2 /arch/tile/include/asm | |
parent | 54525552c6ccfd867e819845da14be994e303218 (diff) | |
parent | 55922c9d1b84b89cb946c777fddccb3247e7df2c (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into sh-fixes-for-linus
Diffstat (limited to 'arch/tile/include/asm')
33 files changed, 2053 insertions, 1471 deletions
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h index 75a16028a95..739cfe0499d 100644 --- a/arch/tile/include/asm/atomic.h +++ b/arch/tile/include/asm/atomic.h @@ -130,17 +130,52 @@ static inline int atomic_read(const atomic_t *v) */ #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) - -/* - * We define xchg() and cmpxchg() in the included headers. - * Note that we do not define __HAVE_ARCH_CMPXCHG, since that would imply - * that cmpxchg() is an efficient operation, which is not particularly true. - */ - /* Nonexistent functions intended to cause link errors. */ extern unsigned long __xchg_called_with_bad_pointer(void); extern unsigned long __cmpxchg_called_with_bad_pointer(void); +#define xchg(ptr, x) \ + ({ \ + typeof(*(ptr)) __x; \ + switch (sizeof(*(ptr))) { \ + case 4: \ + __x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \ + (atomic_t *)(ptr), \ + (u32)(typeof((x)-(x)))(x)); \ + break; \ + case 8: \ + __x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \ + (atomic64_t *)(ptr), \ + (u64)(typeof((x)-(x)))(x)); \ + break; \ + default: \ + __xchg_called_with_bad_pointer(); \ + } \ + __x; \ + }) + +#define cmpxchg(ptr, o, n) \ + ({ \ + typeof(*(ptr)) __x; \ + switch (sizeof(*(ptr))) { \ + case 4: \ + __x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \ + (atomic_t *)(ptr), \ + (u32)(typeof((o)-(o)))(o), \ + (u32)(typeof((n)-(n)))(n)); \ + break; \ + case 8: \ + __x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \ + (atomic64_t *)(ptr), \ + (u64)(typeof((o)-(o)))(o), \ + (u64)(typeof((n)-(n)))(n)); \ + break; \ + default: \ + __cmpxchg_called_with_bad_pointer(); \ + } \ + __x; \ + }) + #define tas(ptr) (xchg((ptr), 1)) #endif /* __ASSEMBLY__ */ diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index ed359aee883..92a8bee3231 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h @@ -110,16 +110,6 @@ static inline void atomic_set(atomic_t *v, int n) _atomic_xchg(v, n); } -#define xchg(ptr, x) ((typeof(*(ptr))) \ - ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \ - atomic_xchg((atomic_t *)(ptr), (long)(x)) : \ - __xchg_called_with_bad_pointer())) - -#define cmpxchg(ptr, o, n) ((typeof(*(ptr))) \ - ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \ - atomic_cmpxchg((atomic_t *)(ptr), (long)(o), (long)(n)) : \ - __cmpxchg_called_with_bad_pointer())) - /* A 64bit atomic type */ typedef struct { diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h new file mode 100644 index 00000000000..1c1e60d8ccb --- /dev/null +++ b/arch/tile/include/asm/atomic_64.h @@ -0,0 +1,156 @@ +/* + * Copyright 2011 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + * + * Do not include directly; use <asm/atomic.h>. + */ + +#ifndef _ASM_TILE_ATOMIC_64_H +#define _ASM_TILE_ATOMIC_64_H + +#ifndef __ASSEMBLY__ + +#include <arch/spr_def.h> + +/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */ + +#define atomic_set(v, i) ((v)->counter = (i)) + +/* + * The smp_mb() operations throughout are to support the fact that + * Linux requires memory barriers before and after the operation, + * on any routine which updates memory and returns a value. + */ + +static inline int atomic_cmpxchg(atomic_t *v, int o, int n) +{ + int val; + __insn_mtspr(SPR_CMPEXCH_VALUE, o); + smp_mb(); /* barrier for proper semantics */ + val = __insn_cmpexch4((void *)&v->counter, n); + smp_mb(); /* barrier for proper semantics */ + return val; +} + +static inline int atomic_xchg(atomic_t *v, int n) +{ + int val; + smp_mb(); /* barrier for proper semantics */ + val = __insn_exch4((void *)&v->counter, n); + smp_mb(); /* barrier for proper semantics */ + return val; +} + +static inline void atomic_add(int i, atomic_t *v) +{ + __insn_fetchadd4((void *)&v->counter, i); +} + +static inline int atomic_add_return(int i, atomic_t *v) +{ + int val; + smp_mb(); /* barrier for proper semantics */ + val = __insn_fetchadd4((void *)&v->counter, i) + i; + barrier(); /* the "+ i" above will wait on memory */ + return val; +} + +static inline int atomic_add_unless(atomic_t *v, int a, int u) +{ + int guess, oldval = v->counter; + do { + if (oldval == u) + break; + guess = oldval; + oldval = atomic_cmpxchg(v, guess, guess + a); + } while (guess != oldval); + return oldval != u; +} + +/* Now the true 64-bit operations. */ + +#define ATOMIC64_INIT(i) { (i) } + +#define atomic64_read(v) ((v)->counter) +#define atomic64_set(v, i) ((v)->counter = (i)) + +static inline long atomic64_cmpxchg(atomic64_t *v, long o, long n) +{ + long val; + smp_mb(); /* barrier for proper semantics */ + __insn_mtspr(SPR_CMPEXCH_VALUE, o); + val = __insn_cmpexch((void *)&v->counter, n); + smp_mb(); /* barrier for proper semantics */ + return val; +} + +static inline long atomic64_xchg(atomic64_t *v, long n) +{ + long val; + smp_mb(); /* barrier for proper semantics */ + val = __insn_exch((void *)&v->counter, n); + smp_mb(); /* barrier for proper semantics */ + return val; +} + +static inline void atomic64_add(long i, atomic64_t *v) +{ + __insn_fetchadd((void *)&v->counter, i); +} + +static inline long atomic64_add_return(long i, atomic64_t *v) +{ + int val; + smp_mb(); /* barrier for proper semantics */ + val = __insn_fetchadd((void *)&v->counter, i) + i; + barrier(); /* the "+ i" above will wait on memory */ + return val; +} + +static inline long atomic64_add_unless(atomic64_t *v, long a, long u) +{ + long guess, oldval = v->counter; + do { + if (oldval == u) + break; + guess = oldval; + oldval = atomic64_cmpxchg(v, guess, guess + a); + } while (guess != oldval); + return oldval != u; +} + +#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) +#define atomic64_sub(i, v) atomic64_add(-(i), (v)) +#define atomic64_inc_return(v) atomic64_add_return(1, (v)) +#define atomic64_dec_return(v) atomic64_sub_return(1, (v)) +#define atomic64_inc(v) atomic64_add(1, (v)) +#define atomic64_dec(v) atomic64_sub(1, (v)) + +#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) +#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) +#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0) +#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0) + +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) + +/* Atomic dec and inc don't implement barrier, so provide them if needed. */ +#define smp_mb__before_atomic_dec() smp_mb() +#define smp_mb__after_atomic_dec() smp_mb() +#define smp_mb__before_atomic_inc() smp_mb() +#define smp_mb__after_atomic_inc() smp_mb() + +/* Define this to indicate that cmpxchg is an efficient operation. */ +#define __HAVE_ARCH_CMPXCHG + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_TILE_ATOMIC_64_H */ diff --git a/arch/tile/include/asm/backtrace.h b/arch/tile/include/asm/backtrace.h index f18887d8239..bd5399a69ed 100644 --- a/arch/tile/include/asm/backtrace.h +++ b/arch/tile/include/asm/backtrace.h @@ -12,80 +12,41 @@ * more details. */ -#ifndef _TILE_BACKTRACE_H -#define _TILE_BACKTRACE_H - - +#ifndef _ASM_TILE_BACKTRACE_H +#define _ASM_TILE_BACKTRACE_H #include <linux/types.h> -#include <arch/chip.h> - -#if defined(__tile__) -typedef unsigned long VirtualAddress; -#elif CHIP_VA_WIDTH() > 32 -typedef unsigned long long VirtualAddress; -#else -typedef unsigned int VirtualAddress; -#endif - - -/** Reads 'size' bytes from 'address' and writes the data to 'result'. +/* Reads 'size' bytes from 'address' and writes the data to 'result'. * Returns true if successful, else false (e.g. memory not readable). */ typedef bool (*BacktraceMemoryReader)(void *result, - VirtualAddress address, + unsigned long address, unsigned int size, void *extra); typedef struct { - /** Current PC. */ - VirtualAddress pc; + /* Current PC. */ + unsigned long pc; - /** Current stack pointer value. */ - VirtualAddress sp; + /* Current stack pointer value. */ + unsigned long sp; - /** Current frame pointer value (i.e. caller's stack pointer) */ - VirtualAddress fp; + /* Current frame pointer value (i.e. caller's stack pointer) */ + unsigned long fp; - /** Internal use only: caller's PC for first frame. */ - VirtualAddress initial_frame_caller_pc; + /* Internal use only: caller's PC for first frame. */ + unsigned long initial_frame_caller_pc; - /** Internal use only: callback to read memory. */ + /* Internal use only: callback to read memory. */ BacktraceMemoryReader read_memory_func; - /** Internal use only: arbitrary argument to read_memory_func. */ + /* Internal use only: arbitrary argument to read_memory_func. */ void *read_memory_func_extra; } BacktraceIterator; -/** Initializes a backtracer to start from the given location. - * - * If the frame pointer cannot be determined it is set to -1. - * - * @param state The state to be filled in. - * @param read_memory_func A callback that reads memory. If NULL, a default - * value is provided. - * @param read_memory_func_extra An arbitrary argument to read_memory_func. - * @param pc The current PC. - * @param lr The current value of the 'lr' register. - * @param sp The current value of the 'sp' register. - * @param r52 The current value of the 'r52' register. - */ -extern void backtrace_init(BacktraceIterator *state, - BacktraceMemoryReader read_memory_func, - void *read_memory_func_extra, - VirtualAddress pc, VirtualAddress lr, - VirtualAddress sp, VirtualAddress r52); - - -/** Advances the backtracing state to the calling frame, returning - * true iff successful. - */ -extern bool backtrace_next(BacktraceIterator *state); - - typedef enum { /* We have no idea what the caller's pc is. */ @@ -138,7 +99,7 @@ enum { }; -/** Internal constants used to define 'info' operands. */ +/* Internal constants used to define 'info' operands. */ enum { /* 0 and 1 are reserved, as are all negative numbers. */ @@ -147,13 +108,10 @@ enum { CALLER_SP_IN_R52_BASE = 4, CALLER_SP_OFFSET_BASE = 8, - - /* Marks the entry point of certain functions. */ - ENTRY_POINT_INFO_OP = 16 }; -/** Current backtracer state describing where it thinks the caller is. */ +/* Current backtracer state describing where it thinks the caller is. */ typedef struct { /* * Public fields @@ -192,7 +150,13 @@ typedef struct { } CallerLocation; +extern void backtrace_init(BacktraceIterator *state, + BacktraceMemoryReader read_memory_func, + void *read_memory_func_extra, + unsigned long pc, unsigned long lr, + unsigned long sp, unsigned long r52); +extern bool backtrace_next(BacktraceIterator *state); -#endif /* _TILE_BACKTRACE_H */ +#endif /* _ASM_TILE_BACKTRACE_H */ diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h index 132e6bbd07e..16f1fa51fea 100644 --- a/arch/tile/include/asm/bitops.h +++ b/arch/tile/include/asm/bitops.h @@ -122,6 +122,7 @@ static inline unsigned long __arch_hweight64(__u64 w) #include <asm-generic/bitops/lock.h> #include <asm-generic/bitops/find.h> #include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/non-atomic.h> #include <asm-generic/bitops/le.h> #endif /* _ASM_TILE_BITOPS_H */ diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h index 2638be51a16..d31ab905cfa 100644 --- a/arch/tile/include/asm/bitops_32.h +++ b/arch/tile/include/asm/bitops_32.h @@ -126,7 +126,6 @@ static inline int test_and_change_bit(unsigned nr, #define smp_mb__before_clear_bit() smp_mb() #define smp_mb__after_clear_bit() do {} while (0) -#include <asm-generic/bitops/non-atomic.h> #include <asm-generic/bitops/ext2-atomic.h> #endif /* _ASM_TILE_BITOPS_32_H */ diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h new file mode 100644 index 00000000000..99615e8d2d8 --- /dev/null +++ b/arch/tile/include/asm/bitops_64.h @@ -0,0 +1,105 @@ +/* + * Copyright 2011 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + */ + +#ifndef _ASM_TILE_BITOPS_64_H +#define _ASM_TILE_BITOPS_64_H + +#include <linux/compiler.h> +#include <asm/atomic.h> +#include <asm/system.h> + +/* See <asm/bitops.h> for API comments. */ + +static inline void set_bit(unsigned nr, volatile unsigned long *addr) +{ + unsigned long mask = (1UL << (nr % BITS_PER_LONG)); + __insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask); +} + +static inline void clear_bit(unsigned nr, volatile unsigned long *addr) +{ + unsigned long mask = (1UL << (nr % BITS_PER_LONG)); + __insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask); +} + +#define smp_mb__before_clear_bit() smp_mb() +#define smp_mb__after_clear_bit() smp_mb() + + +static inline void change_bit(unsigned nr, volatile unsigned long *addr) +{ + unsigned long old, mask = (1UL << (nr % BITS_PER_LONG)); + long guess, oldval; + addr += nr / BITS_PER_LONG; + old = *addr; + do { + guess = oldval; + oldval = atomic64_cmpxchg((atomic64_t *)addr, + guess, guess ^ mask); + } while (guess != oldval); +} + + +/* + * The test_and_xxx_bit() routines require a memory fence before we + * start the operation, and after the operation completes. We use + * smp_mb() before, and rely on the "!= 0" comparison, plus a compiler + * barrier(), to block until the atomic op is complete. + */ + +static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr) +{ + int val; + unsigned long mask = (1UL << (nr % BITS_PER_LONG)); + smp_mb(); /* barrier for proper semantics */ + val = (__insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask) + & mask) != 0; + barrier(); + return val; +} + + +static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr) +{ + int val; + unsigned long mask = (1UL << (nr % BITS_PER_LONG)); + smp_mb(); /* barrier for proper semantics */ + val = (__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask) + & mask) != 0; + barrier(); + return val; +} + + +static inline int test_and_change_bit(unsigned nr, + volatile unsigned long *addr) +{ + unsigned long mask = (1UL << (nr % BITS_PER_LONG)); + long guess, oldval = *addr; + addr += nr / BITS_PER_LONG; + oldval = *addr; + do { + guess = oldval; + oldval = atomic64_cmpxchg((atomic64_t *)addr, + guess, guess ^ mask); + } while (guess != oldval); + return (oldval & mask) != 0; +} + +#define ext2_set_bit_atomic(lock, nr, addr) \ + test_and_set_bit((nr), (unsigned long *)(addr)) +#define ext2_clear_bit_atomic(lock, nr, addr) \ + test_and_clear_bit((nr), (unsigned long *)(addr)) + +#endif /* _ASM_TILE_BITOPS_64_H */ diff --git a/arch/tile/include/asm/cacheflush.h b/arch/tile/include/asm/cacheflush.h index 12fb0fb330e..e925f4bb498 100644 --- a/arch/tile/include/asm/cacheflush.h +++ b/arch/tile/include/asm/cacheflush.h @@ -116,22 +116,28 @@ static inline void __finv_buffer(void *buffer, size_t size) } -/* Invalidate a VA range, then memory fence. */ +/* Invalidate a VA range and wait for it to be complete. */ static inline void inv_buffer(void *buffer, size_t size) { __inv_buffer(buffer, size); - mb_incoherent(); + mb(); } -/* Flush a VA range, then memory fence. */ -static inline void flush_buffer(void *buffer, size_t size) +/* + * Flush a locally-homecached VA range and wait for the evicted + * cachelines to hit memory. + */ +static inline void flush_buffer_local(void *buffer, size_t size) { __flush_buffer(buffer, size); mb_incoherent(); } -/* Flush & invalidate a VA range, then memory fence. */ -static inline void finv_buffer(void *buffer, size_t size) +/* + * Flush and invalidate a locally-homecached VA range and wait for the + * evicted cachelines to hit memory. + */ +static inline void finv_buffer_local(void *buffer, size_t size) { __finv_buffer(buffer, size); mb_incoherent(); diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h index c3ae570c0a5..bf95f55b82b 100644 --- a/arch/tile/include/asm/compat.h +++ b/arch/tile/include/asm/compat.h @@ -215,8 +215,8 @@ struct compat_sigaction; struct compat_siginfo; struct compat_sigaltstack; long compat_sys_execve(const char __user *path, - const compat_uptr_t __user *argv, - const compat_uptr_t __user *envp, struct pt_regs *); + compat_uptr_t __user *argv, + compat_uptr_t __user *envp, struct pt_regs *); long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act, struct compat_sigaction __user *oact, size_t sigsetsize); diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h index 15e1dceecc6..eaa06d175b3 100644 --- a/arch/tile/include/asm/dma-mapping.h +++ b/arch/tile/include/asm/dma-mapping.h @@ -65,7 +65,8 @@ extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t, extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, unsigned long offset, size_t, enum dma_data_direction); -extern void dma_cache_sync(void *vaddr, size_t, enum dma_data_direction); +extern void dma_cache_sync(struct device *dev, void *vaddr, size_t, + enum dma_data_direction); static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) diff --git a/arch/tile/include/asm/fb.h b/arch/tile/include/asm/fb.h new file mode 100644 index 00000000000..3a4988e8df4 --- /dev/null +++ b/arch/tile/include/asm/fb.h @@ -0,0 +1 @@ +#include <asm-generic/fb.h> diff --git a/arch/tile/include/asm/hardwall.h b/arch/tile/include/asm/hardwall.h index 0bed3ec7b42..2ac422848c7 100644 --- a/arch/tile/include/asm/hardwall.h +++ b/arch/tile/include/asm/hardwall.h @@ -40,6 +40,10 @@ #define HARDWALL_DEACTIVATE \ _IO(HARDWALL_IOCTL_BASE, _HARDWALL_DEACTIVATE) +#define _HARDWALL_GET_ID 4 +#define HARDWALL_GET_ID \ + _IO(HARDWALL_IOCTL_BASE, _HARDWALL_GET_ID) + #ifndef __KERNEL__ /* This is the canonical name expected by userspace. */ @@ -47,9 +51,14 @@ #else -/* Hook for /proc/tile/hardwall. */ -struct seq_file; -int proc_tile_hardwall_show(struct seq_file *sf, void *v); +/* /proc hooks for hardwall. */ +struct proc_dir_entry; +#ifdef CONFIG_HARDWALL +void proc_tile_hardwall_init(struct proc_dir_entry *root); +int proc_pid_hardwall(struct task_struct *task, char *buffer); +#else +static inline void proc_tile_hardwall_init(struct proc_dir_entry *root) {} +#endif #endif diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h index d3cbb9b14cb..c9ea1652af0 100644 --- a/arch/tile/include/asm/io.h +++ b/arch/tile/include/asm/io.h @@ -52,6 +52,7 @@ extern void iounmap(volatile void __iomem *addr); #endif #define ioremap_nocache(physaddr, size) ioremap(physaddr, size) +#define ioremap_wc(physaddr, size) ioremap(physaddr, size) #define ioremap_writethrough(physaddr, size) ioremap(physaddr, size) #define ioremap_fullcache(physaddr, size) ioremap(physaddr, size) @@ -161,6 +162,15 @@ static inline void _tile_writeq(u64 val, unsigned long addr) #define iowrite32 writel #define iowrite64 writeq +static inline void memset_io(void *dst, int val, size_t len) +{ + int x; + BUG_ON((unsigned long)dst & 0x3); + val = (val & 0xff) * 0x01010101; + for (x = 0; x < len; x += 4) + writel(val, dst + x); +} + static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, size_t len) { @@ -269,6 +279,11 @@ static inline void outsl(unsigned long addr, const void *buffer, int count) ioport_panic(); } +#define ioread16be(addr) be16_to_cpu(ioread16(addr)) +#define ioread32be(addr) be32_to_cpu(ioread32(addr)) +#define iowrite16be(v, addr) iowrite16(be16_to_cpu(v), (addr)) +#define iowrite32be(v, addr) iowrite32(be32_to_cpu(v), (addr)) + #define ioread8_rep(p, dst, count) \ insb((unsigned long) (p), (dst), (count)) #define ioread16_rep(p, dst, count) \ @@ -283,4 +298,7 @@ static inline void outsl(unsigned long addr, const void *buffer, int count) #define iowrite32_rep(p, src, count) \ outsl((unsigned long) (p), (src), (count)) +#define virt_to_bus virt_to_phys +#define bus_to_virt phys_to_virt + #endif /* _ASM_TILE_IO_H */ diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h index 572fd3ef1d7..94e9a511de8 100644 --- a/arch/tile/include/asm/irq.h +++ b/arch/tile/include/asm/irq.h @@ -23,6 +23,8 @@ /* IRQ numbers used for linux IPIs. */ #define IRQ_RESCHEDULE 1 +#define irq_canonicalize(irq) (irq) + void ack_bad_irq(unsigned int irq); /* diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h index 9bc0d0725c2..15fb2464112 100644 --- a/arch/tile/include/asm/mmu_context.h +++ b/arch/tile/include/asm/mmu_context.h @@ -100,8 +100,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, __get_cpu_var(current_asid) = asid; /* Clear cpu from the old mm, and set it in the new one. */ - cpumask_clear_cpu(cpu, &prev->cpu_vm_mask); - cpumask_set_cpu(cpu, &next->cpu_vm_mask); + cpumask_clear_cpu(cpu, mm_cpumask(prev)); + cpumask_set_cpu(cpu, mm_cpumask(next)); /* Re-load page tables */ install_page_table(next->pgd, asid); diff --git a/arch/tile/include/asm/opcode-tile_32.h b/arch/tile/include/asm/opcode-tile_32.h index eda60ecbae3..03df7b1e77b 100644 --- a/arch/tile/include/asm/opcode-tile_32.h +++ b/arch/tile/include/asm/opcode-tile_32.h @@ -1502,5 +1502,12 @@ extern int parse_insn_tile(tile_bundle_bits bits, decoded[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]); +/* Given a set of bundle bits and a specific pipe, returns which + * instruction the bundle contains in that pipe. + */ +extern const struct tile_opcode * +find_opcode(tile_bundle_bits bits, tile_pipeline pipe); + + #endif /* opcode_tile_h */ diff --git a/arch/tile/include/asm/opcode-tile_64.h b/arch/tile/include/asm/opcode-tile_64.h index eda60ecbae3..c0633466cd5 100644 --- a/arch/tile/include/asm/opcode-tile_64.h +++ b/arch/tile/include/asm/opcode-tile_64.h @@ -5,863 +5,711 @@ #ifndef opcode_tile_h #define opcode_tile_h -typedef unsigned long long tile_bundle_bits; +typedef unsigned long long tilegx_bundle_bits; enum { - TILE_MAX_OPERANDS = 5 /* mm */ + TILEGX_MAX_OPERANDS = 4 /* bfexts */ }; typedef enum { - TILE_OPC_BPT, - TILE_OPC_INFO, - TILE_OPC_INFOL, - TILE_OPC_J, - TILE_OPC_JAL, - TILE_OPC_MOVE, - TILE_OPC_MOVE_SN, - TILE_OPC_MOVEI, - TILE_OPC_MOVEI_SN, - TILE_OPC_MOVELI, - TILE_OPC_MOVELI_SN, - TILE_OPC_MOVELIS, - TILE_OPC_PREFETCH, - TILE_OPC_RAISE, - TILE_OPC_ADD, - TILE_OPC_ADD_SN, - TILE_OPC_ADDB, - TILE_OPC_ADDB_SN, - TILE_OPC_ADDBS_U, - TILE_OPC_ADDBS_U_SN, - TILE_OPC_ADDH, - TILE_OPC_ADDH_SN, - TILE_OPC_ADDHS, - TILE_OPC_ADDHS_SN, - TILE_OPC_ADDI, - TILE_OPC_ADDI_SN, - TILE_OPC_ADDIB, - TILE_OPC_ADDIB_SN, - TILE_OPC_ADDIH, - TILE_OPC_ADDIH_SN, - TILE_OPC_ADDLI, - TILE_OPC_ADDLI_SN, - TILE_OPC_ADDLIS, - TILE_OPC_ADDS, - TILE_OPC_ADDS_SN, - TILE_OPC_ADIFFB_U, - TILE_OPC_ADIFFB_U_SN, - TILE_OPC_ADIFFH, - TILE_OPC_ADIFFH_SN, - TILE_OPC_AND, - TILE_OPC_AND_SN, - TILE_OPC_ANDI, - TILE_OPC_ANDI_SN, - TILE_OPC_AULI, - TILE_OPC_AVGB_U, - TILE_OPC_AVGB_U_SN, - TILE_OPC_AVGH, - TILE_OPC_AVGH_SN, - TILE_OPC_BBNS, - TILE_OPC_BBNS_SN, - TILE_OPC_BBNST, - TILE_OPC_BBNST_SN, - TILE_OPC_BBS, - TILE_OPC_BBS_SN, - TILE_OPC_BBST, - TILE_OPC_BBST_SN, - TILE_OPC_BGEZ, - TILE_OPC_BGEZ_SN, - TILE_OPC_BGEZT, - TILE_OPC_BGEZT_SN, - TILE_OPC_BGZ, - TILE_OPC_BGZ_SN, - TILE_OPC_BGZT, - TILE_OPC_BGZT_SN, - TILE_OPC_BITX, - TILE_OPC_BITX_SN, - TILE_OPC_BLEZ, - TILE_OPC_BLEZ_SN, - TILE_OPC_BLEZT, - TILE_OPC_BLEZT_SN, - TILE_OPC_BLZ, - TILE_OPC_BLZ_SN, - TILE_OPC_BLZT, - TILE_OPC_BLZT_SN, - TILE_OPC_BNZ, - TILE_OPC_BNZ_SN, - TILE_OPC_BNZT, - TILE_OPC_BNZT_SN, - TILE_OPC_BYTEX, - TILE_OPC_BYTEX_SN, - TILE_OPC_BZ, - TILE_OPC_BZ_SN, - TILE_OPC_BZT, - TILE_OPC_BZT_SN, - TILE_OPC_CLZ, - TILE_OPC_CLZ_SN, - TILE_OPC_CRC32_32, - TILE_OPC_CRC32_32_SN, - TILE_OPC_CRC32_8, - TILE_OPC_CRC32_8_SN, - TILE_OPC_CTZ, - TILE_OPC_CTZ_SN, - TILE_OPC_DRAIN, - TILE_OPC_DTLBPR, - TILE_OPC_DWORD_ALIGN, - TILE_OPC_DWORD_ALIGN_SN, - TILE_OPC_FINV, - TILE_OPC_FLUSH, - TILE_OPC_FNOP, - TILE_OPC_ICOH, - TILE_OPC_ILL, - TILE_OPC_INTHB, - TILE_OPC_INTHB_SN, - TILE_OPC_INTHH, - TILE_OPC_INTHH_SN, - TILE_OPC_INTLB, - TILE_OPC_INTLB_SN, - TILE_OPC_INTLH, - TILE_OPC_INTLH_SN, - TILE_OPC_INV, - TILE_OPC_IRET, - TILE_OPC_JALB, - TILE_OPC_JALF, - TILE_OPC_JALR, - TILE_OPC_JALRP, - TILE_OPC_JB, - TILE_OPC_JF, - TILE_OPC_JR, - TILE_OPC_JRP, - TILE_OPC_LB, - TILE_OPC_LB_SN, - TILE_OPC_LB_U, - TILE_OPC_LB_U_SN, - TILE_OPC_LBADD, - TILE_OPC_LBADD_SN, - TILE_OPC_LBADD_U, - TILE_OPC_LBADD_U_SN, - TILE_OPC_LH, - TILE_OPC_LH_SN, - TILE_OPC_LH_U, - TILE_OPC_LH_U_SN, - TILE_OPC_LHADD, - TILE_OPC_LHADD_SN, - TILE_OPC_LHADD_U, - TILE_OPC_LHADD_U_SN, - TILE_OPC_LNK, - TILE_OPC_LNK_SN, - TILE_OPC_LW, - TILE_OPC_LW_SN, - TILE_OPC_LW_NA, - TILE_OPC_LW_NA_SN, - TILE_OPC_LWADD, - TILE_OPC_LWADD_SN, - TILE_OPC_LWADD_NA, - TILE_OPC_LWADD_NA_SN, - TILE_OPC_MAXB_U, - TILE_OPC_MAXB_U_SN, - TILE_OPC_MAXH, - TILE_OPC_MAXH_SN, - TILE_OPC_MAXIB_U, - TILE_OPC_MAXIB_U_SN, - TILE_OPC_MAXIH, - TILE_OPC_MAXIH_SN, - TILE_OPC_MF, - TILE_OPC_MFSPR, - TILE_OPC_MINB_U, - TILE_OPC_MINB_U_SN, - TILE_OPC_MINH, - TILE_OPC_MINH_SN, - TILE_OPC_MINIB_U, - TILE_OPC_MINIB_U_SN, - TILE_OPC_MINIH, - TILE_OPC_MINIH_SN, - TILE_OPC_MM, - TILE_OPC_MNZ, - TILE_OPC_MNZ_SN, - TILE_OPC_MNZB, - TILE_OPC_MNZB_SN, - TILE_OPC_MNZH, - TILE_OPC_MNZH_SN, - TILE_OPC_MTSPR, - TILE_OPC_MULHH_SS, - TILE_OPC_MULHH_SS_SN, - TILE_OPC_MULHH_SU, - TILE_OPC_MULHH_SU_SN, - TILE_OPC_MULHH_UU, - TILE_OPC_MULHH_UU_SN, - TILE_OPC_MULHHA_SS, - TILE_OPC_MULHHA_SS_SN, - TILE_OPC_MULHHA_SU, - TILE_OPC_MULHHA_SU_SN, - TILE_OPC_MULHHA_UU, - TILE_OPC_MULHHA_UU_SN, - TILE_OPC_MULHHSA_UU, - TILE_OPC_MULHHSA_UU_SN, - TILE_OPC_MULHL_SS, - TILE_OPC_MULHL_SS_SN, - TILE_OPC_MULHL_SU, - TILE_OPC_MULHL_SU_SN, - TILE_OPC_MULHL_US, - TILE_OPC_MULHL_US_SN, - TILE_OPC_MULHL_UU, - TILE_OPC_MULHL_UU_SN, - TILE_OPC_MULHLA_SS, - TILE_OPC_MULHLA_SS_SN, - TILE_OPC_MULHLA_SU, - TILE_OPC_MULHLA_SU_SN, - TILE_OPC_MULHLA_US, - TILE_OPC_MULHLA_US_SN, - TILE_OPC_MULHLA_UU, - TILE_OPC_MULHLA_UU_SN, - TILE_OPC_MULHLSA_UU, - TILE_OPC_MULHLSA_UU_SN, - TILE_OPC_MULLL_SS, - TILE_OPC_MULLL_SS_SN, - TILE_OPC_MULLL_SU, - TILE_OPC_MULLL_SU_SN, - TILE_OPC_MULLL_UU, - TILE_OPC_MULLL_UU_SN, - TILE_OPC_MULLLA_SS, - TILE_OPC_MULLLA_SS_SN, - TILE_OPC_MULLLA_SU, - TILE_OPC_MULLLA_SU_SN, - TILE_OPC_MULLLA_UU, - TILE_OPC_MULLLA_UU_SN, - TILE_OPC_MULLLSA_UU, - TILE_OPC_MULLLSA_UU_SN, - TILE_OPC_MVNZ, - TILE_OPC_MVNZ_SN, - TILE_OPC_MVZ, - TILE_OPC_MVZ_SN, - TILE_OPC_MZ, - TILE_OPC_MZ_SN, - TILE_OPC_MZB, - TILE_OPC_MZB_SN, - TILE_OPC_MZH, - TILE_OPC_MZH_SN, - TILE_OPC_NAP, - TILE_OPC_NOP, - TILE_OPC_NOR, - TILE_OPC_NOR_SN, - TILE_OPC_OR, - TILE_OPC_OR_SN, - TILE_OPC_ORI, - TILE_OPC_ORI_SN, - TILE_OPC_PACKBS_U, - TILE_OPC_PACKBS_U_SN, - TILE_OPC_PACKHB, - TILE_OPC_PACKHB_SN, - TILE_OPC_PACKHS, - TILE_OPC_PACKHS_SN, - TILE_OPC_PACKLB, - TILE_OPC_PACKLB_SN, - TILE_OPC_PCNT, - TILE_OPC_PCNT_SN, - TILE_OPC_RL, - TILE_OPC_RL_SN, - TILE_OPC_RLI, - TILE_OPC_RLI_SN, - TILE_OPC_S1A, - TILE_OPC_S1A_SN, - TILE_OPC_S2A, - TILE_OPC_S2A_SN, - TILE_OPC_S3A, - TILE_OPC_S3A_SN, - TILE_OPC_SADAB_U, - TILE_OPC_SADAB_U_SN, - TILE_OPC_SADAH, - TILE_OPC_SADAH_SN, - TILE_OPC_SADAH_U, - TILE_OPC_SADAH_U_SN, - TILE_OPC_SADB_U, - TILE_OPC_SADB_U_SN, - TILE_OPC_SADH, - TILE_OPC_SADH_SN, - TILE_OPC_SADH_U, - TILE_OPC_SADH_U_SN, - TILE_OPC_SB, - TILE_OPC_SBADD, - TILE_OPC_SEQ, - TILE_OPC_SEQ_SN, - TILE_OPC_SEQB, - TILE_OPC_SEQB_SN, - TILE_OPC_SEQH, - TILE_OPC_SEQH_SN, - TILE_OPC_SEQI, - TILE_OPC_SEQI_SN, - TILE_OPC_SEQIB, - TILE_OPC_SEQIB_SN, - TILE_OPC_SEQIH, - TILE_OPC_SEQIH_SN, - TILE_OPC_SH, - TILE_OPC_SHADD, - TILE_OPC_SHL, - TILE_OPC_SHL_SN, - TILE_OPC_SHLB, - TILE_OPC_SHLB_SN, - TILE_OPC_SHLH, - TILE_OPC_SHLH_SN, - TILE_OPC_SHLI, - TILE_OPC_SHLI_SN, - TILE_OPC_SHLIB, - TILE_OPC_SHLIB_SN, - TILE_OPC_SHLIH, - TILE_OPC_SHLIH_SN, - TILE_OPC_SHR, - TILE_OPC_SHR_SN, - TILE_OPC_SHRB, - TILE_OPC_SHRB_SN, - TILE_OPC_SHRH, - TILE_OPC_SHRH_SN, - TILE_OPC_SHRI, - TILE_OPC_SHRI_SN, - TILE_OPC_SHRIB, - TILE_OPC_SHRIB_SN, - TILE_OPC_SHRIH, - TILE_OPC_SHRIH_SN, - TILE_OPC_SLT, - TILE_OPC_SLT_SN, - TILE_OPC_SLT_U, - TILE_OPC_SLT_U_SN, - TILE_OPC_SLTB, - TILE_OPC_SLTB_SN, - TILE_OPC_SLTB_U, - TILE_OPC_SLTB_U_SN, - TILE_OPC_SLTE, - TILE_OPC_SLTE_SN, - TILE_OPC_SLTE_U, - TILE_OPC_SLTE_U_SN, - TILE_OPC_SLTEB, - TILE_OPC_SLTEB_SN, - TILE_OPC_SLTEB_U, - TILE_OPC_SLTEB_U_SN, - TILE_OPC_SLTEH, - TILE_OPC_SLTEH_SN, - TILE_OPC_SLTEH_U, - TILE_OPC_SLTEH_U_SN, - TILE_OPC_SLTH, - TILE_OPC_SLTH_SN, - TILE_OPC_SLTH_U, - TILE_OPC_SLTH_U_SN, - TILE_OPC_SLTI, - TILE_OPC_SLTI_SN, - TILE_OPC_SLTI_U, - TILE_OPC_SLTI_U_SN, - TILE_OPC_SLTIB, - TILE_OPC_SLTIB_SN, - TILE_OPC_SLTIB_U, - TILE_OPC_SLTIB_U_SN, - TILE_OPC_SLTIH, - TILE_OPC_SLTIH_SN, - TILE_OPC_SLTIH_U, - TILE_OPC_SLTIH_U_SN, - TILE_OPC_SNE, - TILE_OPC_SNE_SN, - TILE_OPC_SNEB, - TILE_OPC_SNEB_SN, - TILE_OPC_SNEH, - TILE_OPC_SNEH_SN, - TILE_OPC_SRA, - TILE_OPC_SRA_SN, - TILE_OPC_SRAB, - TILE_OPC_SRAB_SN, - TILE_OPC_SRAH, - TILE_OPC_SRAH_SN, - TILE_OPC_SRAI, - TILE_OPC_SRAI_SN, - TILE_OPC_SRAIB, - TILE_OPC_SRAIB_SN, - TILE_OPC_SRAIH, - TILE_OPC_SRAIH_SN, - TILE_OPC_SUB, - TILE_OPC_SUB_SN, - TILE_OPC_SUBB, - TILE_OPC_SUBB_SN, - TILE_OPC_SUBBS_U, - TILE_OPC_SUBBS_U_SN, - TILE_OPC_SUBH, - TILE_OPC_SUBH_SN, - TILE_OPC_SUBHS, - TILE_OPC_SUBHS_SN, - TILE_OPC_SUBS, - TILE_OPC_SUBS_SN, - TILE_OPC_SW, - TILE_OPC_SWADD, - TILE_OPC_SWINT0, - TILE_OPC_SWINT1, - TILE_OPC_SWINT2, - TILE_OPC_SWINT3, - TILE_OPC_TBLIDXB0, - TILE_OPC_TBLIDXB0_SN, - TILE_OPC_TBLIDXB1, - TILE_OPC_TBLIDXB1_SN, - TILE_OPC_TBLIDXB2, - TILE_OPC_TBLIDXB2_SN, - TILE_OPC_TBLIDXB3, - TILE_OPC_TBLIDXB3_SN, - TILE_OPC_TNS, - TILE_OPC_TNS_SN, - TILE_OPC_WH64, - TILE_OPC_XOR, - TILE_OPC_XOR_SN, - TILE_OPC_XORI, - TILE_OPC_XORI_SN, - TILE_OPC_NONE -} tile_mnemonic; + TILEGX_OPC_BPT, + TILEGX_OPC_INFO, + TILEGX_OPC_INFOL, + TILEGX_OPC_MOVE, + TILEGX_OPC_MOVEI, + TILEGX_OPC_MOVELI, + TILEGX_OPC_PREFETCH, + TILEGX_OPC_PREFETCH_ADD_L1, + TILEGX_OPC_PREFETCH_ADD_L1_FAULT, + TILEGX_OPC_PREFETCH_ADD_L2, + TILEGX_OPC_PREFETCH_ADD_L2_FAULT, + TILEGX_OPC_PREFETCH_ADD_L3, + TILEGX_OPC_PREFETCH_ADD_L3_FAULT, + TILEGX_OPC_PREFETCH_L1, + TILEGX_OPC_PREFETCH_L1_FAULT, + TILEGX_OPC_PREFETCH_L2, + TILEGX_OPC_PREFETCH_L2_FAULT, + TILEGX_OPC_PREFETCH_L3, + TILEGX_OPC_PREFETCH_L3_FAULT, + TILEGX_OPC_RAISE, + TILEGX_OPC_ADD, + TILEGX_OPC_ADDI, + TILEGX_OPC_ADDLI, + TILEGX_OPC_ADDX, + TILEGX_OPC_ADDXI, + TILEGX_OPC_ADDXLI, + TILEGX_OPC_ADDXSC, + TILEGX_OPC_AND, + TILEGX_OPC_ANDI, + TILEGX_OPC_BEQZ, + TILEGX_OPC_BEQZT, + TILEGX_OPC_BFEXTS, + TILEGX_OPC_BFEXTU, + TILEGX_OPC_BFINS, + TILEGX_OPC_BGEZ, + TILEGX_OPC_BGEZT, + TILEGX_OPC_BGTZ, + TILEGX_OPC_BGTZT, + TILEGX_OPC_BLBC, + TILEGX_OPC_BLBCT, + TILEGX_OPC_BLBS, + TILEGX_OPC_BLBST, + TILEGX_OPC_BLEZ, + TILEGX_OPC_BLEZT, + TILEGX_OPC_BLTZ, + TILEGX_OPC_BLTZT, + TILEGX_OPC_BNEZ, + TILEGX_OPC_BNEZT, + TILEGX_OPC_CLZ, + TILEGX_OPC_CMOVEQZ, + TILEGX_OPC_CMOVNEZ, + TILEGX_OPC_CMPEQ, + TILEGX_OPC_CMPEQI, + TILEGX_OPC_CMPEXCH, + TILEGX_OPC_CMPEXCH4, + TILEGX_OPC_CMPLES, + TILEGX_OPC_CMPLEU, + TILEGX_OPC_CMPLTS, + TILEGX_OPC_CMPLTSI, + TILEGX_OPC_CMPLTU, + TILEGX_OPC_CMPLTUI, + TILEGX_OPC_CMPNE, + TILEGX_OPC_CMUL, + TILEGX_OPC_CMULA, + TILEGX_OPC_CMULAF, + TILEGX_OPC_CMULF, + TILEGX_OPC_CMULFR, + TILEGX_OPC_CMULH, + TILEGX_OPC_CMULHR, + TILEGX_OPC_CRC32_32, + TILEGX_OPC_CRC32_8, + TILEGX_OPC_CTZ, + TILEGX_OPC_DBLALIGN, + TILEGX_OPC_DBLALIGN2, + TILEGX_OPC_DBLALIGN4, + TILEGX_OPC_DBLALIGN6, + TILEGX_OPC_DRAIN, + TILEGX_OPC_DTLBPR, + TILEGX_OPC_EXCH, + TILEGX_OPC_EXCH4, + TILEGX_OPC_FDOUBLE_ADD_FLAGS, + TILEGX_OPC_FDOUBLE_ADDSUB, + TILEGX_OPC_FDOUBLE_MUL_FLAGS, + TILEGX_OPC_FDOUBLE_PACK1, + TILEGX_OPC_FDOUBLE_PACK2, + TILEGX_OPC_FDOUBLE_SUB_FLAGS, + TILEGX_OPC_FDOUBLE_UNPACK_MAX, + TILEGX_OPC_FDOUBLE_UNPACK_MIN, + TILEGX_OPC_FETCHADD, + TILEGX_OPC_FETCHADD4, + TILEGX_OPC_FETCHADDGEZ, + TILEGX_OPC_FETCHADDGEZ4, + TILEGX_OPC_FETCHAND, + TILEGX_OPC_FETCHAND4, + TILEGX_OPC_FETCHOR, + TILEGX_OPC_FETCHOR4, + TILEGX_OPC_FINV, + TILEGX_OPC_FLUSH, + TILEGX_OPC_FLUSHWB, + TILEGX_OPC_FNOP, + TILEGX_OPC_FSINGLE_ADD1, + TILEGX_OPC_FSINGLE_ADDSUB2, + TILEGX_OPC_FSINGLE_MUL1, + TILEGX_OPC_FSINGLE_MUL2, + TILEGX_OPC_FSINGLE_PACK1, + TILEGX_OPC_FSINGLE_PACK2, + TILEGX_OPC_FSINGLE_SUB1, + TILEGX_OPC_ICOH, + TILEGX_OPC_ILL, + TILEGX_OPC_INV, + TILEGX_OPC_IRET, + TILEGX_OPC_J, + TILEGX_OPC_JAL, + TILEGX_OPC_JALR, + TILEGX_OPC_JALRP, + TILEGX_OPC_JR, + TILEGX_OPC_JRP, + TILEGX_OPC_LD, + TILEGX_OPC_LD1S, + TILEGX_OPC_LD1S_ADD, + TILEGX_OPC_LD1U, + TILEGX_OPC_LD1U_ADD, + TILEGX_OPC_LD2S, + TILEGX_OPC_LD2S_ADD, + TILEGX_OPC_LD2U, + TILEGX_OPC_LD2U_ADD, + TILEGX_OPC_LD4S, + TILEGX_OPC_LD4S_ADD, + TILEGX_OPC_LD4U, + TILEGX_OPC_LD4U_ADD, + TILEGX_OPC_LD_ADD, + TILEGX_OPC_LDNA, + TILEGX_OPC_LDNA_ADD, + TILEGX_OPC_LDNT, + TILEGX_OPC_LDNT1S, + TILEGX_OPC_LDNT1S_ADD, + TILEGX_OPC_LDNT1U, + TILEGX_OPC_LDNT1U_ADD, + TILEGX_OPC_LDNT2S, + TILEGX_OPC_LDNT2S_ADD, + TILEGX_OPC_LDNT2U, + TILEGX_OPC_LDNT2U_ADD, + TILEGX_OPC_LDNT4S, + TILEGX_OPC_LDNT4S_ADD, + TILEGX_OPC_LDNT4U, + TILEGX_OPC_LDNT4U_ADD, + TILEGX_OPC_LDNT_ADD, + TILEGX_OPC_LNK, + TILEGX_OPC_MF, + TILEGX_OPC_MFSPR, + TILEGX_OPC_MM, + TILEGX_OPC_MNZ, + TILEGX_OPC_MTSPR, + TILEGX_OPC_MUL_HS_HS, + TILEGX_OPC_MUL_HS_HU, + TILEGX_OPC_MUL_HS_LS, + TILEGX_OPC_MUL_HS_LU, + TILEGX_OPC_MUL_HU_HU, + TILEGX_OPC_MUL_HU_LS, + TILEGX_OPC_MUL_HU_LU, + TILEGX_OPC_MUL_LS_LS, + TILEGX_OPC_MUL_LS_LU, + TILEGX_OPC_MUL_LU_LU, + TILEGX_OPC_MULA_HS_HS, + TILEGX_OPC_MULA_HS_HU, + TILEGX_OPC_MULA_HS_LS, + TILEGX_OPC_MULA_HS_LU, + TILEGX_OPC_MULA_HU_HU, + TILEGX_OPC_MULA_HU_LS, + TILEGX_OPC_MULA_HU_LU, + TILEGX_OPC_MULA_LS_LS, + TILEGX_OPC_MULA_LS_LU, + TILEGX_OPC_MULA_LU_LU, + TILEGX_OPC_MULAX, + TILEGX_OPC_MULX, + TILEGX_OPC_MZ, + TILEGX_OPC_NAP, + TILEGX_OPC_NOP, + TILEGX_OPC_NOR, + TILEGX_OPC_OR, + TILEGX_OPC_ORI, + TILEGX_OPC_PCNT, + TILEGX_OPC_REVBITS, + TILEGX_OPC_REVBYTES, + TILEGX_OPC_ROTL, + TILEGX_OPC_ROTLI, + TILEGX_OPC_SHL, + TILEGX_OPC_SHL16INSLI, + TILEGX_OPC_SHL1ADD, + TILEGX_OPC_SHL1ADDX, + TILEGX_OPC_SHL2ADD, + TILEGX_OPC_SHL2ADDX, + TILEGX_OPC_SHL3ADD, + TILEGX_OPC_SHL3ADDX, + TILEGX_OPC_SHLI, + TILEGX_OPC_SHLX, + TILEGX_OPC_SHLXI, + TILEGX_OPC_SHRS, + TILEGX_OPC_SHRSI, + TILEGX_OPC_SHRU, + TILEGX_OPC_SHRUI, + TILEGX_OPC_SHRUX, + TILEGX_OPC_SHRUXI, + TILEGX_OPC_SHUFFLEBYTES, + TILEGX_OPC_ST, + TILEGX_OPC_ST1, + TILEGX_OPC_ST1_ADD, + TILEGX_OPC_ST2, + TILEGX_OPC_ST2_ADD, + TILEGX_OPC_ST4, + TILEGX_OPC_ST4_ADD, + TILEGX_OPC_ST_ADD, + TILEGX_OPC_STNT, + TILEGX_OPC_STNT1, + TILEGX_OPC_STNT1_ADD, + TILEGX_OPC_STNT2, + TILEGX_OPC_STNT2_ADD, + TILEGX_OPC_STNT4, + TILEGX_OPC_STNT4_ADD, + TILEGX_OPC_STNT_ADD, + TILEGX_OPC_SUB, + TILEGX_OPC_SUBX, + TILEGX_OPC_SUBXSC, + TILEGX_OPC_SWINT0, + TILEGX_OPC_SWINT1, + TILEGX_OPC_SWINT2, + TILEGX_OPC_SWINT3, + TILEGX_OPC_TBLIDXB0, + TILEGX_OPC_TBLIDXB1, + TILEGX_OPC_TBLIDXB2, + TILEGX_OPC_TBLIDXB3, + TILEGX_OPC_V1ADD, + TILEGX_OPC_V1ADDI, + TILEGX_OPC_V1ADDUC, + TILEGX_OPC_V1ADIFFU, + TILEGX_OPC_V1AVGU, + TILEGX_OPC_V1CMPEQ, + TILEGX_OPC_V1CMPEQI, + TILEGX_OPC_V1CMPLES, + TILEGX_OPC_V1CMPLEU, + TILEGX_OPC_V1CMPLTS, + TILEGX_OPC_V1CMPLTSI, + TILEGX_OPC_V1CMPLTU, + TILEGX_OPC_V1CMPLTUI, + TILEGX_OPC_V1CMPNE, + TILEGX_OPC_V1DDOTPU, + TILEGX_OPC_V1DDOTPUA, + TILEGX_OPC_V1DDOTPUS, + TILEGX_OPC_V1DDOTPUSA, + TILEGX_OPC_V1DOTP, + TILEGX_OPC_V1DOTPA, + TILEGX_OPC_V1DOTPU, + TILEGX_OPC_V1DOTPUA, + TILEGX_OPC_V1DOTPUS, + TILEGX_OPC_V1DOTPUSA, + TILEGX_OPC_V1INT_H, + TILEGX_OPC_V1INT_L, + TILEGX_OPC_V1MAXU, + TILEGX_OPC_V1MAXUI, + TILEGX_OPC_V1MINU, + TILEGX_OPC_V1MINUI, + TILEGX_OPC_V1MNZ, + TILEGX_OPC_V1MULTU, + TILEGX_OPC_V1MULU, + TILEGX_OPC_V1MULUS, + TILEGX_OPC_V1MZ, + TILEGX_OPC_V1SADAU, + TILEGX_OPC_V1SADU, + TILEGX_OPC_V1SHL, + TILEGX_OPC_V1SHLI, + TILEGX_OPC_V1SHRS, + TILEGX_OPC_V1SHRSI, + TILEGX_OPC_V1SHRU, + TILEGX_OPC_V1SHRUI, + TILEGX_OPC_V1SUB, + TILEGX_OPC_V1SUBUC, + TILEGX_OPC_V2ADD, + TILEGX_OPC_V2ADDI, + TILEGX_OPC_V2ADDSC, + TILEGX_OPC_V2ADIFFS, + TILEGX_OPC_V2AVGS, + TILEGX_OPC_V2CMPEQ, + TILEGX_OPC_V2CMPEQI, + TILEGX_OPC_V2CMPLES, + TILEGX_OPC_V2CMPLEU, + TILEGX_OPC_V2CMPLTS, + TILEGX_OPC_V2CMPLTSI, + TILEGX_OPC_V2CMPLTU, + TILEGX_OPC_V2CMPLTUI, + TILEGX_OPC_V2CMPNE, + TILEGX_OPC_V2DOTP, + TILEGX_OPC_V2DOTPA, + TILEGX_OPC_V2INT_H, + TILEGX_OPC_V2INT_L, + TILEGX_OPC_V2MAXS, + TILEGX_OPC_V2MAXSI, + TILEGX_OPC_V2MINS, + TILEGX_OPC_V2MINSI, + TILEGX_OPC_V2MNZ, + TILEGX_OPC_V2MULFSC, + TILEGX_OPC_V2MULS, + TILEGX_OPC_V2MULTS, + TILEGX_OPC_V2MZ, + TILEGX_OPC_V2PACKH, + TILEGX_OPC_V2PACKL, + TILEGX_OPC_V2PACKUC, + TILEGX_OPC_V2SADAS, + TILEGX_OPC_V2SADAU, + TILEGX_OPC_V2SADS, + TILEGX_OPC_V2SADU, + TILEGX_OPC_V2SHL, + TILEGX_OPC_V2SHLI, + TILEGX_OPC_V2SHLSC, + TILEGX_OPC_V2SHRS, + TILEGX_OPC_V2SHRSI, + TILEGX_OPC_V2SHRU, + TILEGX_OPC_V2SHRUI, + TILEGX_OPC_V2SUB, + TILEGX_OPC_V2SUBSC, + TILEGX_OPC_V4ADD, + TILEGX_OPC_V4ADDSC, + TILEGX_OPC_V4INT_H, + TILEGX_OPC_V4INT_L, + TILEGX_OPC_V4PACKSC, + TILEGX_OPC_V4SHL, + TILEGX_OPC_V4SHLSC, + TILEGX_OPC_V4SHRS, + TILEGX_OPC_V4SHRU, + TILEGX_OPC_V4SUB, + TILEGX_OPC_V4SUBSC, + TILEGX_OPC_WH64, + TILEGX_OPC_XOR, + TILEGX_OPC_XORI, + TILEGX_OPC_NONE +} tilegx_mnemonic; /* 64-bit pattern for a { bpt ; nop } bundle. */ -#define TILE_BPT_BUNDLE 0x400b3cae70166000ULL +#define TILEGX_BPT_BUNDLE 0x286a44ae51485000ULL -#define TILE_ELF_MACHINE_CODE EM_TILEPRO +#define TILE_ELF_MACHINE_CODE EM_TILE64 -#define TILE_ELF_NAME "elf32-tilepro" +#define TILE_ELF_NAME "elf32-tile64" static __inline unsigned int -get_BrOff_SN(tile_bundle_bits num) +get_BFEnd_X0(tilegx_bundle_bits num) { const unsigned int n = (unsigned int)num; - return (((n >> 0)) & 0x3ff); + return (((n >> 12)) & 0x3f); } static __inline unsigned int -get_BrOff_X1(tile_bundle_bits n) +get_BFOpcodeExtension_X0(tilegx_bundle_bits num) { - return (((unsigned int)(n >> 43)) & 0x00007fff) | - (((unsigned int)(n >> 20)) & 0x00018000); + const unsigned int n = (unsigned int)num; + return (((n >> 24)) & 0xf); } static __inline unsigned int -get_BrType_X1(tile_bundle_bits n) +get_BFStart_X0(tilegx_bundle_bits num) { - return (((unsigned int)(n >> 31)) & 0xf); + const unsigned int n = (unsigned int)num; + return (((n >> 18)) & 0x3f); } static __inline unsigned int -get_Dest_Imm8_X1(tile_bundle_bits n) +get_BrOff_X1(tilegx_bundle_bits n) { return (((unsigned int)(n >> 31)) & 0x0000003f) | - (((unsigned int)(n >> 43)) & 0x000000c0); + (((unsigned int)(n >> 37)) & 0x0001ffc0); } static __inline unsigned int -get_Dest_SN(tile_bundle_bits num) +get_BrType_X1(tilegx_bundle_bits n) { - const unsigned int n = (unsigned int)num; - return (((n >> 2)) & 0x3); + return (((unsigned int)(n >> 54)) & 0x1f); } static __inline unsigned int -get_Dest_X0(tile_bundle_bits num) +get_Dest_Imm8_X1(tilegx_bundle_bits n) +{ + return (((unsigned int)(n >> 31)) & 0x0000003f) | + (((unsigned int)(n >> 43)) & 0x000000c0); +} + +static __inline unsigned int +get_Dest_X0(tilegx_bundle_bits num) { const unsigned int n = (unsigned int)num; return (((n >> 0)) & 0x3f); } static __inline unsigned int -get_Dest_X1(tile_bundle_bits n) +get_Dest_X1(tilegx_bundle_bits n) { return (((unsigned int)(n >> 31)) & 0x3f); } static __inline unsigned int -get_Dest_Y0(tile_bundle_bits num) +get_Dest_Y0(tilegx_bundle_bits num) { const unsigned int n = (unsigned int)num; return (((n >> 0)) & 0x3f); } static __inline unsigned int -get_Dest_Y1(tile_bundle_bits n) +get_Dest_Y1(tilegx_bundle_bits n) { return (((unsigned int)(n >> 31)) & 0x3f); } static __inline unsigned int -get_Imm16_X0(tile_bundle_bits num) +get_Imm16_X0(tilegx_bundle_bits num) { const unsigned int n = (unsigned int)num; return (((n >> 12)) & 0xffff); } static __inline unsigned int -get_Imm16_X1(tile_bundle_bits n) +get_Imm16_X1(tilegx_bundle_bits n) { return (((unsigned int)(n >> 43)) & 0xffff); } static __inline unsigned int -get_Imm8_SN(tile_bundle_bits num) -{ - const unsigned int n = (unsigned int)num; - return (((n >> 0)) & 0xff); -} - -static __inline unsigned int -get_Imm8_X0(tile_bundle_bits num) +get_Imm8OpcodeExtension_X0(tilegx_bundle_bits num) { const unsigned int n = (unsigned int)num; - return (((n >> 12)) & 0xff); + return (((n >> 20)) & 0xff); } static __inline unsigned int -get_Imm8_X1(tile_bundle_bits n) +get_Imm8OpcodeExtension_X1(tilegx_bundle_bits n) { - return (((unsigned int)(n >> 43)) & 0xff); + return (((unsigned int)(n >> 51)) & 0xff); } static __inline unsigned int -get_Imm8_Y0(tile_bundle_bits num) +get_Imm8_X0(tilegx_bundle_bits num) { const unsigned int n = (unsigned int)num; return (((n >> 12)) & 0xff); } static __inline unsigned int -get_Imm8_Y1(tile_bundle_bits n) +get_Imm8_X1(tilegx_bundle_bits n) { return (((unsigned int)(n >> 43)) & 0xff); } static __inline unsigned int -get_ImmOpcodeExtension_X0(tile_bundle_bits num) -{ - const unsigned int n = (unsigned int)num; - return (((n >> 20)) & 0x7f); -} - -static __inline unsigned int -get_ImmOpcodeExtension_X1(tile_bundle_bits n) -{ - return (((unsigned int)(n >> 51)) & 0x7f); -} - -static __inline unsigned int -get_ImmRROpcodeExtension_SN(tile_bundle_bits num) +get_Imm8_Y0(tilegx_bundle_bits num) { const unsigned int n = (unsigned int)num; - return (((n >> 8)) & 0x3); -} - -static __inline unsigned int -get_JOffLong_X1(tile_bundle_bits n) -{ - return (((unsigned int)(n >> 43)) & 0x00007fff) | - (((unsigned int)(n >> 20)) & 0x00018000) | - (((unsigned int)(n >> 14)) & 0x001e0000) | - (((unsigned int)(n >> 16)) & 0x07e00000) | - (((unsigned int)(n >> 31)) & 0x18000000); -} - -static __inline unsigned int -get_JOff_X1(tile_bundle_bits n) -{ - return (((unsigned int)(n >> 43)) & 0x00007fff) | - (((unsigned int)(n >> 20)) & 0x00018000) | - (((unsigned int)(n >> 14)) & 0x001e0000) | - (((unsigned int)(n >> 16)) & 0x07e00000) | - (((unsigned int)(n >> 31)) & 0x08000000); -} - -static __inline unsigned int -get_MF_Imm15_X1(tile_bundle_bits n) -{ - return (((unsigned int)(n >> 37)) & 0x00003fff) | - (((unsigned int)(n >> 44)) & 0x00004000); + return (((n >> 12)) & 0xff); } static __inline unsigned int -get_MMEnd_X0(tile_bundle_bits num) +get_Imm8_Y1(tilegx_bundle_bits n) { - const unsigned int n = (unsigned int)num; - return (((n >> 18)) & 0x1f); + return (((unsigned int)(n >> 43)) & 0xff); } static __inline unsigned int -get_MMEnd_X1(tile_bundle_bits n) +get_JumpOff_X1(tilegx_bundle_bits n) { - return (((unsigned int)(n >> 49)) & 0x1f); + return (((unsigned int)(n >> 31)) & 0x7ffffff); } static __inline unsigned int -get_MMStart_X0(tile_bundle_bits num) +get_JumpOpcodeExtension_X1(tilegx_bundle_bits n) { - const unsigned int n = (unsigned int)num; - return (((n >> 23)) & 0x1f); + return (((unsigned int)(n >> 58)) & 0x1); } static __inline unsigned int -get_MMStart_X1(tile_bundle_bits n) +get_MF_Imm14_X1(tilegx_bundle_bits n) { - return (((unsigned int)(n >> 54)) & 0x1f); + return (((unsigned int)(n >> 37)) & 0x3fff); } static __inline unsigned int -get_MT_Imm15_X1(tile_bundle_bits n) +get_MT_Imm14_X1(tilegx_bundle_bits n) { return (((unsigned int)(n >> 31)) & 0x0000003f) | - (((unsigned int)(n >> 37)) & 0x00003fc0) | - (((unsigned int)(n >> 44)) & 0x00004000); + (((unsigned int)(n >> 37)) & 0x00003fc0); } static __inline unsigned int -get_Mode(tile_bundle_bits n) +get_Mode(tilegx_bundle_bits n) { - return (((unsigned int)(n >> 63)) & 0x1); + return (((unsigned int)(n >> 62)) & 0x3); } static __inline unsigned int -get_NoRegOpcodeExtension_SN(tile_bundle_bits num) -{ - const unsigned int n = (unsigned int)num; - return (((n >> 0)) & 0xf); -} - -static __inline unsigned int -get_Opcode_SN(tile_bundle_bits num) -{ - const unsigned int n = (unsigned int)num; - return (((n >> 10)) & 0x3f); -} - -static __inline unsigned int -get_Opcode_X0(tile_bundle_bits num) +get_Opcode_X0(tilegx_bundle_bits num) { const unsigned int n = (unsigned int)num; return (((n >> 28)) & 0x7); } static __inline unsigned int -get_Opcode_X1(tile_bundle_bits n) +get_Opcode_X1(tilegx_bundle_bits n) { - return (((unsigned int)(n >> 59)) & 0xf); + return (((unsigned int)(n >> 59)) & 0x7); } static __inline unsigned int -get_Opcode_Y0(tile_bundle_bits num) +get_Opcode_Y0(tilegx_bundle_bits num) { const unsigned int n = (unsigned int)num; return (((n >> 27)) & 0xf); } static __inline unsigned int -get_Opcode_Y1(tile_bundle_bits n) +get_Opcode_Y1(tilegx_bundle_bits n) { - return (((unsigned int)(n >> 59)) & 0xf); + return (((unsigned int)(n >> 58)) & 0xf); } static __inline unsigned int -get_Opcode_Y2(tile_bundle_bits n) +get_Opcode_Y2(tilegx_bundle_bits n) { - return (((unsigned int)(n >> 56)) & 0x7); -} - -static __inline unsigned int -get_RROpcodeExtension_SN(tile_bundle_bits num) -{ - const unsigned int n = (unsigned int)num; - return (((n >> 4)) & 0xf); + return (((n >> 26)) & 0x00000001) | + (((unsigned int)(n >> 56)) & 0x00000002); } static __inline unsigned int -get_RRROpcodeExtension_X0(tile_bundle_bits num) +get_RRROpcodeExtension_X0(tilegx_bundle_bits num) { const unsigned int n = (unsigned int)num; - return (((n >> 18)) & 0x1ff); + return (((n >> 18)) & 0x3ff); } static __inline unsigned int -get_RRROpcodeExtension_X1(tile_bundle_bits n) +get_RRROpcodeExtension_X1(tilegx_bundle_bits n) { - return (((unsigned int)(n >> 49)) & 0x1ff); + return (((unsigned int)(n >> 49)) & 0x3ff); } static __inline unsigned int -get_RRROpcodeExtension_Y0(tile_bundle_bits num) +get_RRROpcodeExtension_Y0(tilegx_bundle_bits num) { const unsigned int n = (unsigned int)num; return (((n >> 18)) & 0x3); } static __inline unsigned int -get_RRROpcodeExtension_Y1(tile_bundle_bits n) +get_RRROpcodeExtension_Y1(tilegx_bundle_bits n) { return (((unsigned int)(n >> 49)) & 0x3); } static __inline unsigned int -get_RouteOpcodeExtension_SN(tile_bundle_bits num) -{ - const unsigned int n = (unsigned int)num; - return (((n >> 0)) & 0x3ff); -} - -static __inline unsigned int -get_S_X0(tile_bundle_bits num) +get_ShAmt_X0(tilegx_bundle_bits num) { const unsigned int n = (unsigned int)num; - return (((n >> 27)) & 0x1); + return (((n >> 12)) & 0x3f); } static __inline unsigned int -get_S_X1(tile_bundle_bits n) +get_ShAmt_X1(tilegx_bundle_bits n) { - return (((unsigned int)(n >> 58)) & 0x1); + return (((unsigned int)(n >> 43)) & 0x3f); } static __inline unsigned int -get_ShAmt_X0(tile_bundle_bits num) +get_ShAmt_Y0(tilegx_bundle_bits num) { const unsigned int n = (unsigned int)num; - return (((n >> 12)) & 0x1f); + return (((n >> 12)) & 0x3f); } static __inline unsigned int -get_ShAmt_X1(tile_bundle_bits n) +get_ShAmt_Y1(tilegx_bundle_bits n) { - return (((unsigned int)(n >> 43)) & 0x1f); + return (((unsigned int)(n >> 43)) & 0x3f); } static __inline unsigned int -get_ShAmt_Y0(tile_bundle_bits num) +get_ShiftOpcodeExtension_X0(tilegx_bundle_bits num) { const unsigned int n = (unsigned int)num; - return (((n >> 12)) & 0x1f); + return (((n >> 18)) & 0x3ff); } static __inline unsigned int -get_ShAmt_Y1(tile_bundle_bits n) +get_ShiftOpcodeExtension_X1(tilegx_bundle_bits n) { - return (((unsigned int)(n >> 43)) & 0x1f); + return (((unsigned int)(n >> 49)) & 0x3ff); } static __inline unsigned int -get_SrcA_X0(tile_bundle_bits num) +get_ShiftOpcodeExtension_Y0(tilegx_bundle_bits num) { const unsigned int n = (unsigned int)num; - return (((n >> 6)) & 0x3f); + return (((n >> 18)) & 0x3); } static __inline unsigned int -get_SrcA_X1(tile_bundle_bits n) +get_ShiftOpcodeExtension_Y1(tilegx_bundle_bits n) { - return (((unsigned int)(n >> 37)) & 0x3f); + return (((unsigned int)(n >> 49)) & 0x3); } static __inline unsigned int -get_SrcA_Y0(tile_bundle_bits num) +get_SrcA_X0(tilegx_bundle_bits num) { const unsigned int n = (unsigned int)num; return (((n >> 6)) & 0x3f); } static __inline unsigned int -get_SrcA_Y1(tile_bundle_bits n) +get_SrcA_X1(tilegx_bundle_bits n) { return (((unsigned int)(n >> 37)) & 0x3f); } static __inline unsigned int -get_SrcA_Y2(tile_bundle_bits n) +get_SrcA_Y0(tilegx_bundle_bits num) { - return (((n >> 26)) & 0x00000001) | - (((unsigned int)(n >> 50)) & 0x0000003e); + const unsigned int n = (unsigned int)num; + return (((n >> 6)) & 0x3f); } static __inline unsigned int -get_SrcBDest_Y2(tile_bundle_bits num) +get_SrcA_Y1(tilegx_bundle_bits n) { - const unsigned int n = (unsigned int)num; - return (((n >> 20)) & 0x3f); + return (((unsigned int)(n >> 37)) & 0x3f); } static __inline unsigned int -get_SrcB_X0(tile_bundle_bits num) +get_SrcA_Y2(tilegx_bundle_bits num) { const unsigned int n = (unsigned int)num; - return (((n >> 12)) & 0x3f); + return (((n >> 20)) & 0x3f); } static __inline unsigned int -get_SrcB_X1(tile_bundle_bits n) +get_SrcBDest_Y2(tilegx_bundle_bits n) { - return (((unsigned int)(n >> 43)) & 0x3f); + return (((unsigned int)(n >> 51)) & 0x3f); } static __inline unsigned int -get_SrcB_Y0(tile_bundle_bits num) +get_SrcB_X0(tilegx_bundle_bits num) { const unsigned int n = (unsigned int)num; return (((n >> 12)) & 0x3f); } static __inline unsigned int -get_SrcB_Y1(tile_bundle_bits n) +get_SrcB_X1(tilegx_bundle_bits n) { return (((unsigned int)(n >> 43)) & 0x3f); } static __inline unsigned int -get_Src_SN(tile_bundle_bits num) +get_SrcB_Y0(tilegx_bundle_bits num) { const unsigned int n = (unsigned int)num; - return (((n >> 0)) & 0x3); -} - -static __inline unsigned int -get_UnOpcodeExtension_X0(tile_bundle_bits num) -{ - const unsigned int n = (unsigned int)num; - return (((n >> 12)) & 0x1f); -} - -static __inline unsigned int -get_UnOpcodeExtension_X1(tile_bundle_bits n) -{ - return (((unsigned int)(n >> 43)) & 0x1f); -} - -static __inline unsigned int -get_UnOpcodeExtension_Y0(tile_bundle_bits num) -{ - const unsigned int n = (unsigned int)num; - return (((n >> 12)) & 0x1f); + return (((n >> 12)) & 0x3f); } static __inline unsigned int -get_UnOpcodeExtension_Y1(tile_bundle_bits n) +get_SrcB_Y1(tilegx_bundle_bits n) { - return (((unsigned int)(n >> 43)) & 0x1f); + return (((unsigned int)(n >> 43)) & 0x3f); } static __inline unsigned int -get_UnShOpcodeExtension_X0(tile_bundle_bits num) +get_UnaryOpcodeExtension_X0(tilegx_bundle_bits num) { const unsigned int n = (unsigned int)num; - return (((n >> 17)) & 0x3ff); + return (((n >> 12)) & 0x3f); } static __inline unsigned int -get_UnShOpcodeExtension_X1(tile_bundle_bits n) +get_UnaryOpcodeExtension_X1(tilegx_bundle_bits n) { - return (((unsigned int)(n >> 48)) & 0x3ff); + return (((unsigned int)(n >> 43)) & 0x3f); } static __inline unsigned int -get_UnShOpcodeExtension_Y0(tile_bundle_bits num) +get_UnaryOpcodeExtension_Y0(tilegx_bundle_bits num) { const unsigned int n = (unsigned int)num; - return (((n >> 17)) & 0x7); + return (((n >> 12)) & 0x3f); } static __inline unsigned int -get_UnShOpcodeExtension_Y1(tile_bundle_bits n) +get_UnaryOpcodeExtension_Y1(tilegx_bundle_bits n) { - return (((unsigned int)(n >> 48)) & 0x7); + return (((unsigned int)(n >> 43)) & 0x3f); } @@ -874,546 +722,441 @@ sign_extend(int n, int num_bits) -static __inline tile_bundle_bits -create_BrOff_SN(int num) +static __inline tilegx_bundle_bits +create_BFEnd_X0(int num) { const unsigned int n = (unsigned int)num; - return ((n & 0x3ff) << 0); + return ((n & 0x3f) << 12); } -static __inline tile_bundle_bits -create_BrOff_X1(int num) +static __inline tilegx_bundle_bits +create_BFOpcodeExtension_X0(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | - (((tile_bundle_bits)(n & 0x00018000)) << 20); + return ((n & 0xf) << 24); } -static __inline tile_bundle_bits -create_BrType_X1(int num) +static __inline tilegx_bundle_bits +create_BFStart_X0(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0xf)) << 31); + return ((n & 0x3f) << 18); } -static __inline tile_bundle_bits -create_Dest_Imm8_X1(int num) +static __inline tilegx_bundle_bits +create_BrOff_X1(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x0000003f)) << 31) | - (((tile_bundle_bits)(n & 0x000000c0)) << 43); + return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) | + (((tilegx_bundle_bits)(n & 0x0001ffc0)) << 37); } -static __inline tile_bundle_bits -create_Dest_SN(int num) +static __inline tilegx_bundle_bits +create_BrType_X1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x1f)) << 54); +} + +static __inline tilegx_bundle_bits +create_Dest_Imm8_X1(int num) { const unsigned int n = (unsigned int)num; - return ((n & 0x3) << 2); + return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) | + (((tilegx_bundle_bits)(n & 0x000000c0)) << 43); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_Dest_X0(int num) { const unsigned int n = (unsigned int)num; return ((n & 0x3f) << 0); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_Dest_X1(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x3f)) << 31); + return (((tilegx_bundle_bits)(n & 0x3f)) << 31); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_Dest_Y0(int num) { const unsigned int n = (unsigned int)num; return ((n & 0x3f) << 0); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_Dest_Y1(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x3f)) << 31); + return (((tilegx_bundle_bits)(n & 0x3f)) << 31); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_Imm16_X0(int num) { const unsigned int n = (unsigned int)num; return ((n & 0xffff) << 12); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_Imm16_X1(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0xffff)) << 43); + return (((tilegx_bundle_bits)(n & 0xffff)) << 43); } -static __inline tile_bundle_bits -create_Imm8_SN(int num) +static __inline tilegx_bundle_bits +create_Imm8OpcodeExtension_X0(int num) { const unsigned int n = (unsigned int)num; - return ((n & 0xff) << 0); + return ((n & 0xff) << 20); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits +create_Imm8OpcodeExtension_X1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0xff)) << 51); +} + +static __inline tilegx_bundle_bits create_Imm8_X0(int num) { const unsigned int n = (unsigned int)num; return ((n & 0xff) << 12); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_Imm8_X1(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0xff)) << 43); + return (((tilegx_bundle_bits)(n & 0xff)) << 43); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_Imm8_Y0(int num) { const unsigned int n = (unsigned int)num; return ((n & 0xff) << 12); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_Imm8_Y1(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0xff)) << 43); -} - -static __inline tile_bundle_bits -create_ImmOpcodeExtension_X0(int num) -{ - const unsigned int n = (unsigned int)num; - return ((n & 0x7f) << 20); -} - -static __inline tile_bundle_bits -create_ImmOpcodeExtension_X1(int num) -{ - const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x7f)) << 51); -} - -static __inline tile_bundle_bits -create_ImmRROpcodeExtension_SN(int num) -{ - const unsigned int n = (unsigned int)num; - return ((n & 0x3) << 8); -} - -static __inline tile_bundle_bits -create_JOffLong_X1(int num) -{ - const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | - (((tile_bundle_bits)(n & 0x00018000)) << 20) | - (((tile_bundle_bits)(n & 0x001e0000)) << 14) | - (((tile_bundle_bits)(n & 0x07e00000)) << 16) | - (((tile_bundle_bits)(n & 0x18000000)) << 31); -} - -static __inline tile_bundle_bits -create_JOff_X1(int num) -{ - const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | - (((tile_bundle_bits)(n & 0x00018000)) << 20) | - (((tile_bundle_bits)(n & 0x001e0000)) << 14) | - (((tile_bundle_bits)(n & 0x07e00000)) << 16) | - (((tile_bundle_bits)(n & 0x08000000)) << 31); -} - -static __inline tile_bundle_bits -create_MF_Imm15_X1(int num) -{ - const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x00003fff)) << 37) | - (((tile_bundle_bits)(n & 0x00004000)) << 44); + return (((tilegx_bundle_bits)(n & 0xff)) << 43); } -static __inline tile_bundle_bits -create_MMEnd_X0(int num) +static __inline tilegx_bundle_bits +create_JumpOff_X1(int num) { const unsigned int n = (unsigned int)num; - return ((n & 0x1f) << 18); + return (((tilegx_bundle_bits)(n & 0x7ffffff)) << 31); } -static __inline tile_bundle_bits -create_MMEnd_X1(int num) +static __inline tilegx_bundle_bits +create_JumpOpcodeExtension_X1(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x1f)) << 49); + return (((tilegx_bundle_bits)(n & 0x1)) << 58); } -static __inline tile_bundle_bits -create_MMStart_X0(int num) +static __inline tilegx_bundle_bits +create_MF_Imm14_X1(int num) { const unsigned int n = (unsigned int)num; - return ((n & 0x1f) << 23); + return (((tilegx_bundle_bits)(n & 0x3fff)) << 37); } -static __inline tile_bundle_bits -create_MMStart_X1(int num) +static __inline tilegx_bundle_bits +create_MT_Imm14_X1(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x1f)) << 54); + return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) | + (((tilegx_bundle_bits)(n & 0x00003fc0)) << 37); } -static __inline tile_bundle_bits -create_MT_Imm15_X1(int num) -{ - const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x0000003f)) << 31) | - (((tile_bundle_bits)(n & 0x00003fc0)) << 37) | - (((tile_bundle_bits)(n & 0x00004000)) << 44); -} - -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_Mode(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x1)) << 63); + return (((tilegx_bundle_bits)(n & 0x3)) << 62); } -static __inline tile_bundle_bits -create_NoRegOpcodeExtension_SN(int num) -{ - const unsigned int n = (unsigned int)num; - return ((n & 0xf) << 0); -} - -static __inline tile_bundle_bits -create_Opcode_SN(int num) -{ - const unsigned int n = (unsigned int)num; - return ((n & 0x3f) << 10); -} - -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_Opcode_X0(int num) { const unsigned int n = (unsigned int)num; return ((n & 0x7) << 28); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_Opcode_X1(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0xf)) << 59); + return (((tilegx_bundle_bits)(n & 0x7)) << 59); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_Opcode_Y0(int num) { const unsigned int n = (unsigned int)num; return ((n & 0xf) << 27); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_Opcode_Y1(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0xf)) << 59); + return (((tilegx_bundle_bits)(n & 0xf)) << 58); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_Opcode_Y2(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x7)) << 56); -} - -static __inline tile_bundle_bits -create_RROpcodeExtension_SN(int num) -{ - const unsigned int n = (unsigned int)num; - return ((n & 0xf) << 4); + return ((n & 0x00000001) << 26) | + (((tilegx_bundle_bits)(n & 0x00000002)) << 56); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_RRROpcodeExtension_X0(int num) { const unsigned int n = (unsigned int)num; - return ((n & 0x1ff) << 18); + return ((n & 0x3ff) << 18); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_RRROpcodeExtension_X1(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x1ff)) << 49); + return (((tilegx_bundle_bits)(n & 0x3ff)) << 49); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_RRROpcodeExtension_Y0(int num) { const unsigned int n = (unsigned int)num; return ((n & 0x3) << 18); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_RRROpcodeExtension_Y1(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x3)) << 49); + return (((tilegx_bundle_bits)(n & 0x3)) << 49); } -static __inline tile_bundle_bits -create_RouteOpcodeExtension_SN(int num) +static __inline tilegx_bundle_bits +create_ShAmt_X0(int num) { const unsigned int n = (unsigned int)num; - return ((n & 0x3ff) << 0); + return ((n & 0x3f) << 12); } -static __inline tile_bundle_bits -create_S_X0(int num) +static __inline tilegx_bundle_bits +create_ShAmt_X1(int num) { const unsigned int n = (unsigned int)num; - return ((n & 0x1) << 27); + return (((tilegx_bundle_bits)(n & 0x3f)) << 43); } -static __inline tile_bundle_bits -create_S_X1(int num) +static __inline tilegx_bundle_bits +create_ShAmt_Y0(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x1)) << 58); + return ((n & 0x3f) << 12); } -static __inline tile_bundle_bits -create_ShAmt_X0(int num) +static __inline tilegx_bundle_bits +create_ShAmt_Y1(int num) { const unsigned int n = (unsigned int)num; - return ((n & 0x1f) << 12); + return (((tilegx_bundle_bits)(n & 0x3f)) << 43); } -static __inline tile_bundle_bits -create_ShAmt_X1(int num) +static __inline tilegx_bundle_bits +create_ShiftOpcodeExtension_X0(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x1f)) << 43); + return ((n & 0x3ff) << 18); } -static __inline tile_bundle_bits -create_ShAmt_Y0(int num) +static __inline tilegx_bundle_bits +create_ShiftOpcodeExtension_X1(int num) { const unsigned int n = (unsigned int)num; - return ((n & 0x1f) << 12); + return (((tilegx_bundle_bits)(n & 0x3ff)) << 49); } -static __inline tile_bundle_bits -create_ShAmt_Y1(int num) +static __inline tilegx_bundle_bits +create_ShiftOpcodeExtension_Y0(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x1f)) << 43); + return ((n & 0x3) << 18); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits +create_ShiftOpcodeExtension_Y1(int num) +{ + const unsigned int n = (unsigned int)num; + return (((tilegx_bundle_bits)(n & 0x3)) << 49); +} + +static __inline tilegx_bundle_bits create_SrcA_X0(int num) { const unsigned int n = (unsigned int)num; return ((n & 0x3f) << 6); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_SrcA_X1(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x3f)) << 37); + return (((tilegx_bundle_bits)(n & 0x3f)) << 37); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_SrcA_Y0(int num) { const unsigned int n = (unsigned int)num; return ((n & 0x3f) << 6); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_SrcA_Y1(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x3f)) << 37); + return (((tilegx_bundle_bits)(n & 0x3f)) << 37); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_SrcA_Y2(int num) { const unsigned int n = (unsigned int)num; - return ((n & 0x00000001) << 26) | - (((tile_bundle_bits)(n & 0x0000003e)) << 50); + return ((n & 0x3f) << 20); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_SrcBDest_Y2(int num) { const unsigned int n = (unsigned int)num; - return ((n & 0x3f) << 20); + return (((tilegx_bundle_bits)(n & 0x3f)) << 51); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_SrcB_X0(int num) { const unsigned int n = (unsigned int)num; return ((n & 0x3f) << 12); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_SrcB_X1(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x3f)) << 43); + return (((tilegx_bundle_bits)(n & 0x3f)) << 43); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_SrcB_Y0(int num) { const unsigned int n = (unsigned int)num; return ((n & 0x3f) << 12); } -static __inline tile_bundle_bits +static __inline tilegx_bundle_bits create_SrcB_Y1(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x3f)) << 43); + return (((tilegx_bundle_bits)(n & 0x3f)) << 43); } -static __inline tile_bundle_bits -create_Src_SN(int num) +static __inline tilegx_bundle_bits +create_UnaryOpcodeExtension_X0(int num) { const unsigned int n = (unsigned int)num; - return ((n & 0x3) << 0); -} - -static __inline tile_bundle_bits -create_UnOpcodeExtension_X0(int num) -{ - const unsigned int n = (unsigned int)num; - return ((n & 0x1f) << 12); -} - -static __inline tile_bundle_bits -create_UnOpcodeExtension_X1(int num) -{ - const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x1f)) << 43); -} - -static __inline tile_bundle_bits -create_UnOpcodeExtension_Y0(int num) -{ - const unsigned int n = (unsigned int)num; - return ((n & 0x1f) << 12); -} - -static __inline tile_bundle_bits -create_UnOpcodeExtension_Y1(int num) -{ - const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x1f)) << 43); -} - -static __inline tile_bundle_bits -create_UnShOpcodeExtension_X0(int num) -{ - const unsigned int n = (unsigned int)num; - return ((n & 0x3ff) << 17); + return ((n & 0x3f) << 12); } -static __inline tile_bundle_bits -create_UnShOpcodeExtension_X1(int num) +static __inline tilegx_bundle_bits +create_UnaryOpcodeExtension_X1(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x3ff)) << 48); + return (((tilegx_bundle_bits)(n & 0x3f)) << 43); } -static __inline tile_bundle_bits -create_UnShOpcodeExtension_Y0(int num) +static __inline tilegx_bundle_bits +create_UnaryOpcodeExtension_Y0(int num) { const unsigned int n = (unsigned int)num; - return ((n & 0x7) << 17); + return ((n & 0x3f) << 12); } -static __inline tile_bundle_bits -create_UnShOpcodeExtension_Y1(int num) +static __inline tilegx_bundle_bits +create_UnaryOpcodeExtension_Y1(int num) { const unsigned int n = (unsigned int)num; - return (((tile_bundle_bits)(n & 0x7)) << 48); + return (((tilegx_bundle_bits)(n & 0x3f)) << 43); } - typedef enum { - TILE_PIPELINE_X0, - TILE_PIPELINE_X1, - TILE_PIPELINE_Y0, - TILE_PIPELINE_Y1, - TILE_PIPELINE_Y2, -} tile_pipeline; + TILEGX_PIPELINE_X0, + TILEGX_PIPELINE_X1, + TILEGX_PIPELINE_Y0, + TILEGX_PIPELINE_Y1, + TILEGX_PIPELINE_Y2, +} tilegx_pipeline; -#define tile_is_x_pipeline(p) ((int)(p) <= (int)TILE_PIPELINE_X1) +#define tilegx_is_x_pipeline(p) ((int)(p) <= (int)TILEGX_PIPELINE_X1) typedef enum { - TILE_OP_TYPE_REGISTER, - TILE_OP_TYPE_IMMEDIATE, - TILE_OP_TYPE_ADDRESS, - TILE_OP_TYPE_SPR -} tile_operand_type; + TILEGX_OP_TYPE_REGISTER, + TILEGX_OP_TYPE_IMMEDIATE, + TILEGX_OP_TYPE_ADDRESS, + TILEGX_OP_TYPE_SPR +} tilegx_operand_type; -/* This is the bit that determines if a bundle is in the Y encoding. */ -#define TILE_BUNDLE_Y_ENCODING_MASK ((tile_bundle_bits)1 << 63) +/* These are the bits that determine if a bundle is in the X encoding. */ +#define TILEGX_BUNDLE_MODE_MASK ((tilegx_bundle_bits)3 << 62) enum { /* Maximum number of instructions in a bundle (2 for X, 3 for Y). */ - TILE_MAX_INSTRUCTIONS_PER_BUNDLE = 3, + TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE = 3, /* How many different pipeline encodings are there? X0, X1, Y0, Y1, Y2. */ - TILE_NUM_PIPELINE_ENCODINGS = 5, + TILEGX_NUM_PIPELINE_ENCODINGS = 5, - /* Log base 2 of TILE_BUNDLE_SIZE_IN_BYTES. */ - TILE_LOG2_BUNDLE_SIZE_IN_BYTES = 3, + /* Log base 2 of TILEGX_BUNDLE_SIZE_IN_BYTES. */ + TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES = 3, /* Instructions take this many bytes. */ - TILE_BUNDLE_SIZE_IN_BYTES = 1 << TILE_LOG2_BUNDLE_SIZE_IN_BYTES, + TILEGX_BUNDLE_SIZE_IN_BYTES = 1 << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES, - /* Log base 2 of TILE_BUNDLE_ALIGNMENT_IN_BYTES. */ - TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES = 3, + /* Log base 2 of TILEGX_BUNDLE_ALIGNMENT_IN_BYTES. */ + TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES = 3, /* Bundles should be aligned modulo this number of bytes. */ - TILE_BUNDLE_ALIGNMENT_IN_BYTES = - (1 << TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES), - - /* Log base 2 of TILE_SN_INSTRUCTION_SIZE_IN_BYTES. */ - TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES = 1, - - /* Static network instructions take this many bytes. */ - TILE_SN_INSTRUCTION_SIZE_IN_BYTES = - (1 << TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES), + TILEGX_BUNDLE_ALIGNMENT_IN_BYTES = + (1 << TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES), /* Number of registers (some are magic, such as network I/O). */ - TILE_NUM_REGISTERS = 64, - - /* Number of static network registers. */ - TILE_NUM_SN_REGISTERS = 4 + TILEGX_NUM_REGISTERS = 64, }; -struct tile_operand +struct tilegx_operand { /* Is this operand a register, immediate or address? */ - tile_operand_type type; + tilegx_operand_type type; /* The default relocation type for this operand. */ signed int default_reloc : 16; @@ -1437,27 +1180,27 @@ struct tile_operand unsigned int rightshift : 2; /* Return the bits for this operand to be ORed into an existing bundle. */ - tile_bundle_bits (*insert) (int op); + tilegx_bundle_bits (*insert) (int op); /* Extract this operand and return it. */ - unsigned int (*extract) (tile_bundle_bits bundle); + unsigned int (*extract) (tilegx_bundle_bits bundle); }; -extern const struct tile_operand tile_operands[]; +extern const struct tilegx_operand tilegx_operands[]; /* One finite-state machine per pipe for rapid instruction decoding. */ extern const unsigned short * const -tile_bundle_decoder_fsms[TILE_NUM_PIPELINE_ENCODINGS]; +tilegx_bundle_decoder_fsms[TILEGX_NUM_PIPELINE_ENCODINGS]; -struct tile_opcode +struct tilegx_opcode { /* The opcode mnemonic, e.g. "add" */ const char *name; /* The enum value for this mnemonic. */ - tile_mnemonic mnemonic; + tilegx_mnemonic mnemonic; /* A bit mask of which of the five pipes this instruction is compatible with: @@ -1478,29 +1221,28 @@ struct tile_opcode unsigned char can_bundle; /* The description of the operands. Each of these is an - * index into the tile_operands[] table. */ - unsigned char operands[TILE_NUM_PIPELINE_ENCODINGS][TILE_MAX_OPERANDS]; + * index into the tilegx_operands[] table. */ + unsigned char operands[TILEGX_NUM_PIPELINE_ENCODINGS][TILEGX_MAX_OPERANDS]; }; -extern const struct tile_opcode tile_opcodes[]; - +extern const struct tilegx_opcode tilegx_opcodes[]; /* Used for non-textual disassembly into structs. */ -struct tile_decoded_instruction +struct tilegx_decoded_instruction { - const struct tile_opcode *opcode; - const struct tile_operand *operands[TILE_MAX_OPERANDS]; - int operand_values[TILE_MAX_OPERANDS]; + const struct tilegx_opcode *opcode; + const struct tilegx_operand *operands[TILEGX_MAX_OPERANDS]; + long long operand_values[TILEGX_MAX_OPERANDS]; }; /* Disassemble a bundle into a struct for machine processing. */ -extern int parse_insn_tile(tile_bundle_bits bits, - unsigned int pc, - struct tile_decoded_instruction - decoded[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]); +extern int parse_insn_tilegx(tilegx_bundle_bits bits, + unsigned long long pc, + struct tilegx_decoded_instruction + decoded[TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE]); -#endif /* opcode_tile_h */ +#endif /* opcode_tilegx_h */ diff --git a/arch/tile/include/asm/opcode_constants_64.h b/arch/tile/include/asm/opcode_constants_64.h index 227d033b180..71019286947 100644 --- a/arch/tile/include/asm/opcode_constants_64.h +++ b/arch/tile/include/asm/opcode_constants_64.h @@ -1,5 +1,5 @@ /* - * Copyright 2010 Tilera Corporation. All Rights Reserved. + * Copyright 2011 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -19,462 +19,591 @@ #define _TILE_OPCODE_CONSTANTS_H enum { - ADDBS_U_SPECIAL_0_OPCODE_X0 = 98, - ADDBS_U_SPECIAL_0_OPCODE_X1 = 68, - ADDB_SPECIAL_0_OPCODE_X0 = 1, - ADDB_SPECIAL_0_OPCODE_X1 = 1, - ADDHS_SPECIAL_0_OPCODE_X0 = 99, - ADDHS_SPECIAL_0_OPCODE_X1 = 69, - ADDH_SPECIAL_0_OPCODE_X0 = 2, - ADDH_SPECIAL_0_OPCODE_X1 = 2, - ADDIB_IMM_0_OPCODE_X0 = 1, - ADDIB_IMM_0_OPCODE_X1 = 1, - ADDIH_IMM_0_OPCODE_X0 = 2, - ADDIH_IMM_0_OPCODE_X1 = 2, - ADDI_IMM_0_OPCODE_X0 = 3, - ADDI_IMM_0_OPCODE_X1 = 3, - ADDI_IMM_1_OPCODE_SN = 1, - ADDI_OPCODE_Y0 = 9, - ADDI_OPCODE_Y1 = 7, - ADDLIS_OPCODE_X0 = 1, - ADDLIS_OPCODE_X1 = 2, - ADDLI_OPCODE_X0 = 2, - ADDLI_OPCODE_X1 = 3, - ADDS_SPECIAL_0_OPCODE_X0 = 96, - ADDS_SPECIAL_0_OPCODE_X1 = 66, - ADD_SPECIAL_0_OPCODE_X0 = 3, - ADD_SPECIAL_0_OPCODE_X1 = 3, - ADD_SPECIAL_0_OPCODE_Y0 = 0, - ADD_SPECIAL_0_OPCODE_Y1 = 0, - ADIFFB_U_SPECIAL_0_OPCODE_X0 = 4, - ADIFFH_SPECIAL_0_OPCODE_X0 = 5, - ANDI_IMM_0_OPCODE_X0 = 1, - ANDI_IMM_0_OPCODE_X1 = 4, - ANDI_OPCODE_Y0 = 10, - ANDI_OPCODE_Y1 = 8, - AND_SPECIAL_0_OPCODE_X0 = 6, - AND_SPECIAL_0_OPCODE_X1 = 4, - AND_SPECIAL_2_OPCODE_Y0 = 0, - AND_SPECIAL_2_OPCODE_Y1 = 0, - AULI_OPCODE_X0 = 3, - AULI_OPCODE_X1 = 4, - AVGB_U_SPECIAL_0_OPCODE_X0 = 7, - AVGH_SPECIAL_0_OPCODE_X0 = 8, - BBNST_BRANCH_OPCODE_X1 = 15, - BBNS_BRANCH_OPCODE_X1 = 14, - BBNS_OPCODE_SN = 63, - BBST_BRANCH_OPCODE_X1 = 13, - BBS_BRANCH_OPCODE_X1 = 12, - BBS_OPCODE_SN = 62, - BGEZT_BRANCH_OPCODE_X1 = 7, - BGEZ_BRANCH_OPCODE_X1 = 6, - BGEZ_OPCODE_SN = 61, - BGZT_BRANCH_OPCODE_X1 = 5, - BGZ_BRANCH_OPCODE_X1 = 4, - BGZ_OPCODE_SN = 58, - BITX_UN_0_SHUN_0_OPCODE_X0 = 1, - BITX_UN_0_SHUN_0_OPCODE_Y0 = 1, - BLEZT_BRANCH_OPCODE_X1 = 11, - BLEZ_BRANCH_OPCODE_X1 = 10, - BLEZ_OPCODE_SN = 59, - BLZT_BRANCH_OPCODE_X1 = 9, - BLZ_BRANCH_OPCODE_X1 = 8, - BLZ_OPCODE_SN = 60, - BNZT_BRANCH_OPCODE_X1 = 3, - BNZ_BRANCH_OPCODE_X1 = 2, - BNZ_OPCODE_SN = 57, - BPT_NOREG_RR_IMM_0_OPCODE_SN = 1, - BRANCH_OPCODE_X1 = 5, - BYTEX_UN_0_SHUN_0_OPCODE_X0 = 2, - BYTEX_UN_0_SHUN_0_OPCODE_Y0 = 2, - BZT_BRANCH_OPCODE_X1 = 1, - BZ_BRANCH_OPCODE_X1 = 0, - BZ_OPCODE_SN = 56, - CLZ_UN_0_SHUN_0_OPCODE_X0 = 3, - CLZ_UN_0_SHUN_0_OPCODE_Y0 = 3, - CRC32_32_SPECIAL_0_OPCODE_X0 = 9, - CRC32_8_SPECIAL_0_OPCODE_X0 = 10, - CTZ_UN_0_SHUN_0_OPCODE_X0 = 4, - CTZ_UN_0_SHUN_0_OPCODE_Y0 = 4, - DRAIN_UN_0_SHUN_0_OPCODE_X1 = 1, - DTLBPR_UN_0_SHUN_0_OPCODE_X1 = 2, - DWORD_ALIGN_SPECIAL_0_OPCODE_X0 = 95, - FINV_UN_0_SHUN_0_OPCODE_X1 = 3, - FLUSH_UN_0_SHUN_0_OPCODE_X1 = 4, - FNOP_NOREG_RR_IMM_0_OPCODE_SN = 3, - FNOP_UN_0_SHUN_0_OPCODE_X0 = 5, - FNOP_UN_0_SHUN_0_OPCODE_X1 = 5, - FNOP_UN_0_SHUN_0_OPCODE_Y0 = 5, - FNOP_UN_0_SHUN_0_OPCODE_Y1 = 1, - HALT_NOREG_RR_IMM_0_OPCODE_SN = 0, - ICOH_UN_0_SHUN_0_OPCODE_X1 = 6, - ILL_UN_0_SHUN_0_OPCODE_X1 = 7, - ILL_UN_0_SHUN_0_OPCODE_Y1 = 2, - IMM_0_OPCODE_SN = 0, - IMM_0_OPCODE_X0 = 4, - IMM_0_OPCODE_X1 = 6, - IMM_1_OPCODE_SN = 1, - IMM_OPCODE_0_X0 = 5, - INTHB_SPECIAL_0_OPCODE_X0 = 11, - INTHB_SPECIAL_0_OPCODE_X1 = 5, - INTHH_SPECIAL_0_OPCODE_X0 = 12, - INTHH_SPECIAL_0_OPCODE_X1 = 6, - INTLB_SPECIAL_0_OPCODE_X0 = 13, - INTLB_SPECIAL_0_OPCODE_X1 = 7, - INTLH_SPECIAL_0_OPCODE_X0 = 14, - INTLH_SPECIAL_0_OPCODE_X1 = 8, - INV_UN_0_SHUN_0_OPCODE_X1 = 8, - IRET_UN_0_SHUN_0_OPCODE_X1 = 9, - JALB_OPCODE_X1 = 13, - JALF_OPCODE_X1 = 12, - JALRP_SPECIAL_0_OPCODE_X1 = 9, - JALRR_IMM_1_OPCODE_SN = 3, - JALR_RR_IMM_0_OPCODE_SN = 5, - JALR_SPECIAL_0_OPCODE_X1 = 10, - JB_OPCODE_X1 = 11, - JF_OPCODE_X1 = 10, - JRP_SPECIAL_0_OPCODE_X1 = 11, - JRR_IMM_1_OPCODE_SN = 2, - JR_RR_IMM_0_OPCODE_SN = 4, - JR_SPECIAL_0_OPCODE_X1 = 12, - LBADD_IMM_0_OPCODE_X1 = 22, - LBADD_U_IMM_0_OPCODE_X1 = 23, - LB_OPCODE_Y2 = 0, - LB_UN_0_SHUN_0_OPCODE_X1 = 10, - LB_U_OPCODE_Y2 = 1, - LB_U_UN_0_SHUN_0_OPCODE_X1 = 11, - LHADD_IMM_0_OPCODE_X1 = 24, - LHADD_U_IMM_0_OPCODE_X1 = 25, - LH_OPCODE_Y2 = 2, - LH_UN_0_SHUN_0_OPCODE_X1 = 12, - LH_U_OPCODE_Y2 = 3, - LH_U_UN_0_SHUN_0_OPCODE_X1 = 13, - LNK_SPECIAL_0_OPCODE_X1 = 13, - LWADD_IMM_0_OPCODE_X1 = 26, - LWADD_NA_IMM_0_OPCODE_X1 = 27, - LW_NA_UN_0_SHUN_0_OPCODE_X1 = 24, - LW_OPCODE_Y2 = 4, - LW_UN_0_SHUN_0_OPCODE_X1 = 14, - MAXB_U_SPECIAL_0_OPCODE_X0 = 15, - MAXB_U_SPECIAL_0_OPCODE_X1 = 14, - MAXH_SPECIAL_0_OPCODE_X0 = 16, - MAXH_SPECIAL_0_OPCODE_X1 = 15, - MAXIB_U_IMM_0_OPCODE_X0 = 4, - MAXIB_U_IMM_0_OPCODE_X1 = 5, - MAXIH_IMM_0_OPCODE_X0 = 5, - MAXIH_IMM_0_OPCODE_X1 = 6, - MFSPR_IMM_0_OPCODE_X1 = 7, - MF_UN_0_SHUN_0_OPCODE_X1 = 15, - MINB_U_SPECIAL_0_OPCODE_X0 = 17, - MINB_U_SPECIAL_0_OPCODE_X1 = 16, - MINH_SPECIAL_0_OPCODE_X0 = 18, - MINH_SPECIAL_0_OPCODE_X1 = 17, - MINIB_U_IMM_0_OPCODE_X0 = 6, - MINIB_U_IMM_0_OPCODE_X1 = 8, - MINIH_IMM_0_OPCODE_X0 = 7, - MINIH_IMM_0_OPCODE_X1 = 9, - MM_OPCODE_X0 = 6, - MM_OPCODE_X1 = 7, - MNZB_SPECIAL_0_OPCODE_X0 = 19, - MNZB_SPECIAL_0_OPCODE_X1 = 18, - MNZH_SPECIAL_0_OPCODE_X0 = 20, - MNZH_SPECIAL_0_OPCODE_X1 = 19, - MNZ_SPECIAL_0_OPCODE_X0 = 21, - MNZ_SPECIAL_0_OPCODE_X1 = 20, - MNZ_SPECIAL_1_OPCODE_Y0 = 0, - MNZ_SPECIAL_1_OPCODE_Y1 = 1, - MOVEI_IMM_1_OPCODE_SN = 0, - MOVE_RR_IMM_0_OPCODE_SN = 8, - MTSPR_IMM_0_OPCODE_X1 = 10, - MULHHA_SS_SPECIAL_0_OPCODE_X0 = 22, - MULHHA_SS_SPECIAL_7_OPCODE_Y0 = 0, - MULHHA_SU_SPECIAL_0_OPCODE_X0 = 23, - MULHHA_UU_SPECIAL_0_OPCODE_X0 = 24, - MULHHA_UU_SPECIAL_7_OPCODE_Y0 = 1, - MULHHSA_UU_SPECIAL_0_OPCODE_X0 = 25, - MULHH_SS_SPECIAL_0_OPCODE_X0 = 26, - MULHH_SS_SPECIAL_6_OPCODE_Y0 = 0, - MULHH_SU_SPECIAL_0_OPCODE_X0 = 27, - MULHH_UU_SPECIAL_0_OPCODE_X0 = 28, - MULHH_UU_SPECIAL_6_OPCODE_Y0 = 1, - MULHLA_SS_SPECIAL_0_OPCODE_X0 = 29, - MULHLA_SU_SPECIAL_0_OPCODE_X0 = 30, - MULHLA_US_SPECIAL_0_OPCODE_X0 = 31, - MULHLA_UU_SPECIAL_0_OPCODE_X0 = 32, - MULHLSA_UU_SPECIAL_0_OPCODE_X0 = 33, - MULHLSA_UU_SPECIAL_5_OPCODE_Y0 = 0, - MULHL_SS_SPECIAL_0_OPCODE_X0 = 34, - MULHL_SU_SPECIAL_0_OPCODE_X0 = 35, - MULHL_US_SPECIAL_0_OPCODE_X0 = 36, - MULHL_UU_SPECIAL_0_OPCODE_X0 = 37, - MULLLA_SS_SPECIAL_0_OPCODE_X0 = 38, - MULLLA_SS_SPECIAL_7_OPCODE_Y0 = 2, - MULLLA_SU_SPECIAL_0_OPCODE_X0 = 39, - MULLLA_UU_SPECIAL_0_OPCODE_X0 = 40, - MULLLA_UU_SPECIAL_7_OPCODE_Y0 = 3, - MULLLSA_UU_SPECIAL_0_OPCODE_X0 = 41, - MULLL_SS_SPECIAL_0_OPCODE_X0 = 42, - MULLL_SS_SPECIAL_6_OPCODE_Y0 = 2, - MULLL_SU_SPECIAL_0_OPCODE_X0 = 43, - MULLL_UU_SPECIAL_0_OPCODE_X0 = 44, - MULLL_UU_SPECIAL_6_OPCODE_Y0 = 3, - MVNZ_SPECIAL_0_OPCODE_X0 = 45, - MVNZ_SPECIAL_1_OPCODE_Y0 = 1, - MVZ_SPECIAL_0_OPCODE_X0 = 46, - MVZ_SPECIAL_1_OPCODE_Y0 = 2, - MZB_SPECIAL_0_OPCODE_X0 = 47, - MZB_SPECIAL_0_OPCODE_X1 = 21, - MZH_SPECIAL_0_OPCODE_X0 = 48, - MZH_SPECIAL_0_OPCODE_X1 = 22, - MZ_SPECIAL_0_OPCODE_X0 = 49, - MZ_SPECIAL_0_OPCODE_X1 = 23, - MZ_SPECIAL_1_OPCODE_Y0 = 3, - MZ_SPECIAL_1_OPCODE_Y1 = 2, - NAP_UN_0_SHUN_0_OPCODE_X1 = 16, - NOP_NOREG_RR_IMM_0_OPCODE_SN = 2, - NOP_UN_0_SHUN_0_OPCODE_X0 = 6, - NOP_UN_0_SHUN_0_OPCODE_X1 = 17, - NOP_UN_0_SHUN_0_OPCODE_Y0 = 6, - NOP_UN_0_SHUN_0_OPCODE_Y1 = 3, - NOREG_RR_IMM_0_OPCODE_SN = 0, - NOR_SPECIAL_0_OPCODE_X0 = 50, - NOR_SPECIAL_0_OPCODE_X1 = 24, - NOR_SPECIAL_2_OPCODE_Y0 = 1, - NOR_SPECIAL_2_OPCODE_Y1 = 1, - ORI_IMM_0_OPCODE_X0 = 8, - ORI_IMM_0_OPCODE_X1 = 11, - ORI_OPCODE_Y0 = 11, - ORI_OPCODE_Y1 = 9, - OR_SPECIAL_0_OPCODE_X0 = 51, - OR_SPECIAL_0_OPCODE_X1 = 25, - OR_SPECIAL_2_OPCODE_Y0 = 2, - OR_SPECIAL_2_OPCODE_Y1 = 2, - PACKBS_U_SPECIAL_0_OPCODE_X0 = 103, - PACKBS_U_SPECIAL_0_OPCODE_X1 = 73, - PACKHB_SPECIAL_0_OPCODE_X0 = 52, - PACKHB_SPECIAL_0_OPCODE_X1 = 26, - PACKHS_SPECIAL_0_OPCODE_X0 = 102, - PACKHS_SPECIAL_0_OPCODE_X1 = 72, - PACKLB_SPECIAL_0_OPCODE_X0 = 53, - PACKLB_SPECIAL_0_OPCODE_X1 = 27, - PCNT_UN_0_SHUN_0_OPCODE_X0 = 7, - PCNT_UN_0_SHUN_0_OPCODE_Y0 = 7, - RLI_SHUN_0_OPCODE_X0 = 1, - RLI_SHUN_0_OPCODE_X1 = 1, - RLI_SHUN_0_OPCODE_Y0 = 1, - RLI_SHUN_0_OPCODE_Y1 = 1, - RL_SPECIAL_0_OPCODE_X0 = 54, - RL_SPECIAL_0_OPCODE_X1 = 28, - RL_SPECIAL_3_OPCODE_Y0 = 0, - RL_SPECIAL_3_OPCODE_Y1 = 0, - RR_IMM_0_OPCODE_SN = 0, - S1A_SPECIAL_0_OPCODE_X0 = 55, - S1A_SPECIAL_0_OPCODE_X1 = 29, - S1A_SPECIAL_0_OPCODE_Y0 = 1, - S1A_SPECIAL_0_OPCODE_Y1 = 1, - S2A_SPECIAL_0_OPCODE_X0 = 56, - S2A_SPECIAL_0_OPCODE_X1 = 30, - S2A_SPECIAL_0_OPCODE_Y0 = 2, - S2A_SPECIAL_0_OPCODE_Y1 = 2, - S3A_SPECIAL_0_OPCODE_X0 = 57, - S3A_SPECIAL_0_OPCODE_X1 = 31, - S3A_SPECIAL_5_OPCODE_Y0 = 1, - S3A_SPECIAL_5_OPCODE_Y1 = 1, - SADAB_U_SPECIAL_0_OPCODE_X0 = 58, - SADAH_SPECIAL_0_OPCODE_X0 = 59, - SADAH_U_SPECIAL_0_OPCODE_X0 = 60, - SADB_U_SPECIAL_0_OPCODE_X0 = 61, - SADH_SPECIAL_0_OPCODE_X0 = 62, - SADH_U_SPECIAL_0_OPCODE_X0 = 63, - SBADD_IMM_0_OPCODE_X1 = 28, - SB_OPCODE_Y2 = 5, - SB_SPECIAL_0_OPCODE_X1 = 32, - SEQB_SPECIAL_0_OPCODE_X0 = 64, - SEQB_SPECIAL_0_OPCODE_X1 = 33, - SEQH_SPECIAL_0_OPCODE_X0 = 65, - SEQH_SPECIAL_0_OPCODE_X1 = 34, - SEQIB_IMM_0_OPCODE_X0 = 9, - SEQIB_IMM_0_OPCODE_X1 = 12, - SEQIH_IMM_0_OPCODE_X0 = 10, - SEQIH_IMM_0_OPCODE_X1 = 13, - SEQI_IMM_0_OPCODE_X0 = 11, - SEQI_IMM_0_OPCODE_X1 = 14, - SEQI_OPCODE_Y0 = 12, - SEQI_OPCODE_Y1 = 10, - SEQ_SPECIAL_0_OPCODE_X0 = 66, - SEQ_SPECIAL_0_OPCODE_X1 = 35, - SEQ_SPECIAL_5_OPCODE_Y0 = 2, - SEQ_SPECIAL_5_OPCODE_Y1 = 2, - SHADD_IMM_0_OPCODE_X1 = 29, - SHL8II_IMM_0_OPCODE_SN = 3, - SHLB_SPECIAL_0_OPCODE_X0 = 67, - SHLB_SPECIAL_0_OPCODE_X1 = 36, - SHLH_SPECIAL_0_OPCODE_X0 = 68, - SHLH_SPECIAL_0_OPCODE_X1 = 37, - SHLIB_SHUN_0_OPCODE_X0 = 2, - SHLIB_SHUN_0_OPCODE_X1 = 2, - SHLIH_SHUN_0_OPCODE_X0 = 3, - SHLIH_SHUN_0_OPCODE_X1 = 3, - SHLI_SHUN_0_OPCODE_X0 = 4, - SHLI_SHUN_0_OPCODE_X1 = 4, - SHLI_SHUN_0_OPCODE_Y0 = 2, - SHLI_SHUN_0_OPCODE_Y1 = 2, - SHL_SPECIAL_0_OPCODE_X0 = 69, - SHL_SPECIAL_0_OPCODE_X1 = 38, - SHL_SPECIAL_3_OPCODE_Y0 = 1, - SHL_SPECIAL_3_OPCODE_Y1 = 1, - SHR1_RR_IMM_0_OPCODE_SN = 9, - SHRB_SPECIAL_0_OPCODE_X0 = 70, - SHRB_SPECIAL_0_OPCODE_X1 = 39, - SHRH_SPECIAL_0_OPCODE_X0 = 71, - SHRH_SPECIAL_0_OPCODE_X1 = 40, - SHRIB_SHUN_0_OPCODE_X0 = 5, - SHRIB_SHUN_0_OPCODE_X1 = 5, - SHRIH_SHUN_0_OPCODE_X0 = 6, - SHRIH_SHUN_0_OPCODE_X1 = 6, - SHRI_SHUN_0_OPCODE_X0 = 7, - SHRI_SHUN_0_OPCODE_X1 = 7, - SHRI_SHUN_0_OPCODE_Y0 = 3, - SHRI_SHUN_0_OPCODE_Y1 = 3, - SHR_SPECIAL_0_OPCODE_X0 = 72, - SHR_SPECIAL_0_OPCODE_X1 = 41, - SHR_SPECIAL_3_OPCODE_Y0 = 2, - SHR_SPECIAL_3_OPCODE_Y1 = 2, - SHUN_0_OPCODE_X0 = 7, - SHUN_0_OPCODE_X1 = 8, - SHUN_0_OPCODE_Y0 = 13, - SHUN_0_OPCODE_Y1 = 11, - SH_OPCODE_Y2 = 6, - SH_SPECIAL_0_OPCODE_X1 = 42, - SLTB_SPECIAL_0_OPCODE_X0 = 73, - SLTB_SPECIAL_0_OPCODE_X1 = 43, - SLTB_U_SPECIAL_0_OPCODE_X0 = 74, - SLTB_U_SPECIAL_0_OPCODE_X1 = 44, - SLTEB_SPECIAL_0_OPCODE_X0 = 75, - SLTEB_SPECIAL_0_OPCODE_X1 = 45, - SLTEB_U_SPECIAL_0_OPCODE_X0 = 76, - SLTEB_U_SPECIAL_0_OPCODE_X1 = 46, - SLTEH_SPECIAL_0_OPCODE_X0 = 77, - SLTEH_SPECIAL_0_OPCODE_X1 = 47, - SLTEH_U_SPECIAL_0_OPCODE_X0 = 78, - SLTEH_U_SPECIAL_0_OPCODE_X1 = 48, - SLTE_SPECIAL_0_OPCODE_X0 = 79, - SLTE_SPECIAL_0_OPCODE_X1 = 49, - SLTE_SPECIAL_4_OPCODE_Y0 = 0, - SLTE_SPECIAL_4_OPCODE_Y1 = 0, - SLTE_U_SPECIAL_0_OPCODE_X0 = 80, - SLTE_U_SPECIAL_0_OPCODE_X1 = 50, - SLTE_U_SPECIAL_4_OPCODE_Y0 = 1, - SLTE_U_SPECIAL_4_OPCODE_Y1 = 1, - SLTH_SPECIAL_0_OPCODE_X0 = 81, - SLTH_SPECIAL_0_OPCODE_X1 = 51, - SLTH_U_SPECIAL_0_OPCODE_X0 = 82, - SLTH_U_SPECIAL_0_OPCODE_X1 = 52, - SLTIB_IMM_0_OPCODE_X0 = 12, - SLTIB_IMM_0_OPCODE_X1 = 15, - SLTIB_U_IMM_0_OPCODE_X0 = 13, - SLTIB_U_IMM_0_OPCODE_X1 = 16, - SLTIH_IMM_0_OPCODE_X0 = 14, - SLTIH_IMM_0_OPCODE_X1 = 17, - SLTIH_U_IMM_0_OPCODE_X0 = 15, - SLTIH_U_IMM_0_OPCODE_X1 = 18, - SLTI_IMM_0_OPCODE_X0 = 16, - SLTI_IMM_0_OPCODE_X1 = 19, - SLTI_OPCODE_Y0 = 14, - SLTI_OPCODE_Y1 = 12, - SLTI_U_IMM_0_OPCODE_X0 = 17, - SLTI_U_IMM_0_OPCODE_X1 = 20, - SLTI_U_OPCODE_Y0 = 15, - SLTI_U_OPCODE_Y1 = 13, - SLT_SPECIAL_0_OPCODE_X0 = 83, - SLT_SPECIAL_0_OPCODE_X1 = 53, - SLT_SPECIAL_4_OPCODE_Y0 = 2, - SLT_SPECIAL_4_OPCODE_Y1 = 2, - SLT_U_SPECIAL_0_OPCODE_X0 = 84, - SLT_U_SPECIAL_0_OPCODE_X1 = 54, - SLT_U_SPECIAL_4_OPCODE_Y0 = 3, - SLT_U_SPECIAL_4_OPCODE_Y1 = 3, - SNEB_SPECIAL_0_OPCODE_X0 = 85, - SNEB_SPECIAL_0_OPCODE_X1 = 55, - SNEH_SPECIAL_0_OPCODE_X0 = 86, - SNEH_SPECIAL_0_OPCODE_X1 = 56, - SNE_SPECIAL_0_OPCODE_X0 = 87, - SNE_SPECIAL_0_OPCODE_X1 = 57, - SNE_SPECIAL_5_OPCODE_Y0 = 3, - SNE_SPECIAL_5_OPCODE_Y1 = 3, - SPECIAL_0_OPCODE_X0 = 0, - SPECIAL_0_OPCODE_X1 = 1, - SPECIAL_0_OPCODE_Y0 = 1, - SPECIAL_0_OPCODE_Y1 = 1, - SPECIAL_1_OPCODE_Y0 = 2, - SPECIAL_1_OPCODE_Y1 = 2, - SPECIAL_2_OPCODE_Y0 = 3, - SPECIAL_2_OPCODE_Y1 = 3, - SPECIAL_3_OPCODE_Y0 = 4, - SPECIAL_3_OPCODE_Y1 = 4, - SPECIAL_4_OPCODE_Y0 = 5, - SPECIAL_4_OPCODE_Y1 = 5, - SPECIAL_5_OPCODE_Y0 = 6, - SPECIAL_5_OPCODE_Y1 = 6, - SPECIAL_6_OPCODE_Y0 = 7, - SPECIAL_7_OPCODE_Y0 = 8, - SRAB_SPECIAL_0_OPCODE_X0 = 88, - SRAB_SPECIAL_0_OPCODE_X1 = 58, - SRAH_SPECIAL_0_OPCODE_X0 = 89, - SRAH_SPECIAL_0_OPCODE_X1 = 59, - SRAIB_SHUN_0_OPCODE_X0 = 8, - SRAIB_SHUN_0_OPCODE_X1 = 8, - SRAIH_SHUN_0_OPCODE_X0 = 9, - SRAIH_SHUN_0_OPCODE_X1 = 9, - SRAI_SHUN_0_OPCODE_X0 = 10, - SRAI_SHUN_0_OPCODE_X1 = 10, - SRAI_SHUN_0_OPCODE_Y0 = 4, - SRAI_SHUN_0_OPCODE_Y1 = 4, - SRA_SPECIAL_0_OPCODE_X0 = 90, - SRA_SPECIAL_0_OPCODE_X1 = 60, - SRA_SPECIAL_3_OPCODE_Y0 = 3, - SRA_SPECIAL_3_OPCODE_Y1 = 3, - SUBBS_U_SPECIAL_0_OPCODE_X0 = 100, - SUBBS_U_SPECIAL_0_OPCODE_X1 = 70, - SUBB_SPECIAL_0_OPCODE_X0 = 91, - SUBB_SPECIAL_0_OPCODE_X1 = 61, - SUBHS_SPECIAL_0_OPCODE_X0 = 101, - SUBHS_SPECIAL_0_OPCODE_X1 = 71, - SUBH_SPECIAL_0_OPCODE_X0 = 92, - SUBH_SPECIAL_0_OPCODE_X1 = 62, - SUBS_SPECIAL_0_OPCODE_X0 = 97, - SUBS_SPECIAL_0_OPCODE_X1 = 67, - SUB_SPECIAL_0_OPCODE_X0 = 93, - SUB_SPECIAL_0_OPCODE_X1 = 63, - SUB_SPECIAL_0_OPCODE_Y0 = 3, - SUB_SPECIAL_0_OPCODE_Y1 = 3, - SWADD_IMM_0_OPCODE_X1 = 30, - SWINT0_UN_0_SHUN_0_OPCODE_X1 = 18, - SWINT1_UN_0_SHUN_0_OPCODE_X1 = 19, - SWINT2_UN_0_SHUN_0_OPCODE_X1 = 20, - SWINT3_UN_0_SHUN_0_OPCODE_X1 = 21, - SW_OPCODE_Y2 = 7, - SW_SPECIAL_0_OPCODE_X1 = 64, - TBLIDXB0_UN_0_SHUN_0_OPCODE_X0 = 8, - TBLIDXB0_UN_0_SHUN_0_OPCODE_Y0 = 8, - TBLIDXB1_UN_0_SHUN_0_OPCODE_X0 = 9, - TBLIDXB1_UN_0_SHUN_0_OPCODE_Y0 = 9, - TBLIDXB2_UN_0_SHUN_0_OPCODE_X0 = 10, - TBLIDXB2_UN_0_SHUN_0_OPCODE_Y0 = 10, - TBLIDXB3_UN_0_SHUN_0_OPCODE_X0 = 11, - TBLIDXB3_UN_0_SHUN_0_OPCODE_Y0 = 11, - TNS_UN_0_SHUN_0_OPCODE_X1 = 22, - UN_0_SHUN_0_OPCODE_X0 = 11, - UN_0_SHUN_0_OPCODE_X1 = 11, - UN_0_SHUN_0_OPCODE_Y0 = 5, - UN_0_SHUN_0_OPCODE_Y1 = 5, - WH64_UN_0_SHUN_0_OPCODE_X1 = 23, - XORI_IMM_0_OPCODE_X0 = 2, - XORI_IMM_0_OPCODE_X1 = 21, - XOR_SPECIAL_0_OPCODE_X0 = 94, - XOR_SPECIAL_0_OPCODE_X1 = 65, - XOR_SPECIAL_2_OPCODE_Y0 = 3, - XOR_SPECIAL_2_OPCODE_Y1 = 3 + ADDI_IMM8_OPCODE_X0 = 1, + ADDI_IMM8_OPCODE_X1 = 1, + ADDI_OPCODE_Y0 = 0, + ADDI_OPCODE_Y1 = 1, + ADDLI_OPCODE_X0 = 1, + ADDLI_OPCODE_X1 = 0, + ADDXI_IMM8_OPCODE_X0 = 2, + ADDXI_IMM8_OPCODE_X1 = 2, + ADDXI_OPCODE_Y0 = 1, + ADDXI_OPCODE_Y1 = 2, + ADDXLI_OPCODE_X0 = 2, + ADDXLI_OPCODE_X1 = 1, + ADDXSC_RRR_0_OPCODE_X0 = 1, + ADDXSC_RRR_0_OPCODE_X1 = 1, + ADDX_RRR_0_OPCODE_X0 = 2, + ADDX_RRR_0_OPCODE_X1 = 2, + ADDX_RRR_0_OPCODE_Y0 = 0, + ADDX_SPECIAL_0_OPCODE_Y1 = 0, + ADD_RRR_0_OPCODE_X0 = 3, + ADD_RRR_0_OPCODE_X1 = 3, + ADD_RRR_0_OPCODE_Y0 = 1, + ADD_SPECIAL_0_OPCODE_Y1 = 1, + ANDI_IMM8_OPCODE_X0 = 3, + ANDI_IMM8_OPCODE_X1 = 3, + ANDI_OPCODE_Y0 = 2, + ANDI_OPCODE_Y1 = 3, + AND_RRR_0_OPCODE_X0 = 4, + AND_RRR_0_OPCODE_X1 = 4, + AND_RRR_5_OPCODE_Y0 = 0, + AND_RRR_5_OPCODE_Y1 = 0, + BEQZT_BRANCH_OPCODE_X1 = 16, + BEQZ_BRANCH_OPCODE_X1 = 17, + BFEXTS_BF_OPCODE_X0 = 4, + BFEXTU_BF_OPCODE_X0 = 5, + BFINS_BF_OPCODE_X0 = 6, + BF_OPCODE_X0 = 3, + BGEZT_BRANCH_OPCODE_X1 = 18, + BGEZ_BRANCH_OPCODE_X1 = 19, + BGTZT_BRANCH_OPCODE_X1 = 20, + BGTZ_BRANCH_OPCODE_X1 = 21, + BLBCT_BRANCH_OPCODE_X1 = 22, + BLBC_BRANCH_OPCODE_X1 = 23, + BLBST_BRANCH_OPCODE_X1 = 24, + BLBS_BRANCH_OPCODE_X1 = 25, + BLEZT_BRANCH_OPCODE_X1 = 26, + BLEZ_BRANCH_OPCODE_X1 = 27, + BLTZT_BRANCH_OPCODE_X1 = 28, + BLTZ_BRANCH_OPCODE_X1 = 29, + BNEZT_BRANCH_OPCODE_X1 = 30, + BNEZ_BRANCH_OPCODE_X1 = 31, + BRANCH_OPCODE_X1 = 2, + CMOVEQZ_RRR_0_OPCODE_X0 = 5, + CMOVEQZ_RRR_4_OPCODE_Y0 = 0, + CMOVNEZ_RRR_0_OPCODE_X0 = 6, + CMOVNEZ_RRR_4_OPCODE_Y0 = 1, + CMPEQI_IMM8_OPCODE_X0 = 4, + CMPEQI_IMM8_OPCODE_X1 = 4, + CMPEQI_OPCODE_Y0 = 3, + CMPEQI_OPCODE_Y1 = 4, + CMPEQ_RRR_0_OPCODE_X0 = 7, + CMPEQ_RRR_0_OPCODE_X1 = 5, + CMPEQ_RRR_3_OPCODE_Y0 = 0, + CMPEQ_RRR_3_OPCODE_Y1 = 2, + CMPEXCH4_RRR_0_OPCODE_X1 = 6, + CMPEXCH_RRR_0_OPCODE_X1 = 7, + CMPLES_RRR_0_OPCODE_X0 = 8, + CMPLES_RRR_0_OPCODE_X1 = 8, + CMPLES_RRR_2_OPCODE_Y0 = 0, + CMPLES_RRR_2_OPCODE_Y1 = 0, + CMPLEU_RRR_0_OPCODE_X0 = 9, + CMPLEU_RRR_0_OPCODE_X1 = 9, + CMPLEU_RRR_2_OPCODE_Y0 = 1, + CMPLEU_RRR_2_OPCODE_Y1 = 1, + CMPLTSI_IMM8_OPCODE_X0 = 5, + CMPLTSI_IMM8_OPCODE_X1 = 5, + CMPLTSI_OPCODE_Y0 = 4, + CMPLTSI_OPCODE_Y1 = 5, + CMPLTS_RRR_0_OPCODE_X0 = 10, + CMPLTS_RRR_0_OPCODE_X1 = 10, + CMPLTS_RRR_2_OPCODE_Y0 = 2, + CMPLTS_RRR_2_OPCODE_Y1 = 2, + CMPLTUI_IMM8_OPCODE_X0 = 6, + CMPLTUI_IMM8_OPCODE_X1 = 6, + CMPLTU_RRR_0_OPCODE_X0 = 11, + CMPLTU_RRR_0_OPCODE_X1 = 11, + CMPLTU_RRR_2_OPCODE_Y0 = 3, + CMPLTU_RRR_2_OPCODE_Y1 = 3, + CMPNE_RRR_0_OPCODE_X0 = 12, + CMPNE_RRR_0_OPCODE_X1 = 12, + CMPNE_RRR_3_OPCODE_Y0 = 1, + CMPNE_RRR_3_OPCODE_Y1 = 3, + CMULAF_RRR_0_OPCODE_X0 = 13, + CMULA_RRR_0_OPCODE_X0 = 14, + CMULFR_RRR_0_OPCODE_X0 = 15, + CMULF_RRR_0_OPCODE_X0 = 16, + CMULHR_RRR_0_OPCODE_X0 = 17, + CMULH_RRR_0_OPCODE_X0 = 18, + CMUL_RRR_0_OPCODE_X0 = 19, + CNTLZ_UNARY_OPCODE_X0 = 1, + CNTLZ_UNARY_OPCODE_Y0 = 1, + CNTTZ_UNARY_OPCODE_X0 = 2, + CNTTZ_UNARY_OPCODE_Y0 = 2, + CRC32_32_RRR_0_OPCODE_X0 = 20, + CRC32_8_RRR_0_OPCODE_X0 = 21, + DBLALIGN2_RRR_0_OPCODE_X0 = 22, + DBLALIGN2_RRR_0_OPCODE_X1 = 13, + DBLALIGN4_RRR_0_OPCODE_X0 = 23, + DBLALIGN4_RRR_0_OPCODE_X1 = 14, + DBLALIGN6_RRR_0_OPCODE_X0 = 24, + DBLALIGN6_RRR_0_OPCODE_X1 = 15, + DBLALIGN_RRR_0_OPCODE_X0 = 25, + DRAIN_UNARY_OPCODE_X1 = 1, + DTLBPR_UNARY_OPCODE_X1 = 2, + EXCH4_RRR_0_OPCODE_X1 = 16, + EXCH_RRR_0_OPCODE_X1 = 17, + FDOUBLE_ADDSUB_RRR_0_OPCODE_X0 = 26, + FDOUBLE_ADD_FLAGS_RRR_0_OPCODE_X0 = 27, + FDOUBLE_MUL_FLAGS_RRR_0_OPCODE_X0 = 28, + FDOUBLE_PACK1_RRR_0_OPCODE_X0 = 29, + FDOUBLE_PACK2_RRR_0_OPCODE_X0 = 30, + FDOUBLE_SUB_FLAGS_RRR_0_OPCODE_X0 = 31, + FDOUBLE_UNPACK_MAX_RRR_0_OPCODE_X0 = 32, + FDOUBLE_UNPACK_MIN_RRR_0_OPCODE_X0 = 33, + FETCHADD4_RRR_0_OPCODE_X1 = 18, + FETCHADDGEZ4_RRR_0_OPCODE_X1 = 19, + FETCHADDGEZ_RRR_0_OPCODE_X1 = 20, + FETCHADD_RRR_0_OPCODE_X1 = 21, + FETCHAND4_RRR_0_OPCODE_X1 = 22, + FETCHAND_RRR_0_OPCODE_X1 = 23, + FETCHOR4_RRR_0_OPCODE_X1 = 24, + FETCHOR_RRR_0_OPCODE_X1 = 25, + FINV_UNARY_OPCODE_X1 = 3, + FLUSHWB_UNARY_OPCODE_X1 = 4, + FLUSH_UNARY_OPCODE_X1 = 5, + FNOP_UNARY_OPCODE_X0 = 3, + FNOP_UNARY_OPCODE_X1 = 6, + FNOP_UNARY_OPCODE_Y0 = 3, + FNOP_UNARY_OPCODE_Y1 = 8, + FSINGLE_ADD1_RRR_0_OPCODE_X0 = 34, + FSINGLE_ADDSUB2_RRR_0_OPCODE_X0 = 35, + FSINGLE_MUL1_RRR_0_OPCODE_X0 = 36, + FSINGLE_MUL2_RRR_0_OPCODE_X0 = 37, + FSINGLE_PACK1_UNARY_OPCODE_X0 = 4, + FSINGLE_PACK1_UNARY_OPCODE_Y0 = 4, + FSINGLE_PACK2_RRR_0_OPCODE_X0 = 38, + FSINGLE_SUB1_RRR_0_OPCODE_X0 = 39, + ICOH_UNARY_OPCODE_X1 = 7, + ILL_UNARY_OPCODE_X1 = 8, + ILL_UNARY_OPCODE_Y1 = 9, + IMM8_OPCODE_X0 = 4, + IMM8_OPCODE_X1 = 3, + INV_UNARY_OPCODE_X1 = 9, + IRET_UNARY_OPCODE_X1 = 10, + JALRP_UNARY_OPCODE_X1 = 11, + JALRP_UNARY_OPCODE_Y1 = 10, + JALR_UNARY_OPCODE_X1 = 12, + JALR_UNARY_OPCODE_Y1 = 11, + JAL_JUMP_OPCODE_X1 = 0, + JRP_UNARY_OPCODE_X1 = 13, + JRP_UNARY_OPCODE_Y1 = 12, + JR_UNARY_OPCODE_X1 = 14, + JR_UNARY_OPCODE_Y1 = 13, + JUMP_OPCODE_X1 = 4, + J_JUMP_OPCODE_X1 = 1, + LD1S_ADD_IMM8_OPCODE_X1 = 7, + LD1S_OPCODE_Y2 = 0, + LD1S_UNARY_OPCODE_X1 = 15, + LD1U_ADD_IMM8_OPCODE_X1 = 8, + LD1U_OPCODE_Y2 = 1, + LD1U_UNARY_OPCODE_X1 = 16, + LD2S_ADD_IMM8_OPCODE_X1 = 9, + LD2S_OPCODE_Y2 = 2, + LD2S_UNARY_OPCODE_X1 = 17, + LD2U_ADD_IMM8_OPCODE_X1 = 10, + LD2U_OPCODE_Y2 = 3, + LD2U_UNARY_OPCODE_X1 = 18, + LD4S_ADD_IMM8_OPCODE_X1 = 11, + LD4S_OPCODE_Y2 = 1, + LD4S_UNARY_OPCODE_X1 = 19, + LD4U_ADD_IMM8_OPCODE_X1 = 12, + LD4U_OPCODE_Y2 = 2, + LD4U_UNARY_OPCODE_X1 = 20, + LDNA_UNARY_OPCODE_X1 = 21, + LDNT1S_ADD_IMM8_OPCODE_X1 = 13, + LDNT1S_UNARY_OPCODE_X1 = 22, + LDNT1U_ADD_IMM8_OPCODE_X1 = 14, + LDNT1U_UNARY_OPCODE_X1 = 23, + LDNT2S_ADD_IMM8_OPCODE_X1 = 15, + LDNT2S_UNARY_OPCODE_X1 = 24, + LDNT2U_ADD_IMM8_OPCODE_X1 = 16, + LDNT2U_UNARY_OPCODE_X1 = 25, + LDNT4S_ADD_IMM8_OPCODE_X1 = 17, + LDNT4S_UNARY_OPCODE_X1 = 26, + LDNT4U_ADD_IMM8_OPCODE_X1 = 18, + LDNT4U_UNARY_OPCODE_X1 = 27, + LDNT_ADD_IMM8_OPCODE_X1 = 19, + LDNT_UNARY_OPCODE_X1 = 28, + LD_ADD_IMM8_OPCODE_X1 = 20, + LD_OPCODE_Y2 = 3, + LD_UNARY_OPCODE_X1 = 29, + LNK_UNARY_OPCODE_X1 = 30, + LNK_UNARY_OPCODE_Y1 = 14, + LWNA_ADD_IMM8_OPCODE_X1 = 21, + MFSPR_IMM8_OPCODE_X1 = 22, + MF_UNARY_OPCODE_X1 = 31, + MM_BF_OPCODE_X0 = 7, + MNZ_RRR_0_OPCODE_X0 = 40, + MNZ_RRR_0_OPCODE_X1 = 26, + MNZ_RRR_4_OPCODE_Y0 = 2, + MNZ_RRR_4_OPCODE_Y1 = 2, + MODE_OPCODE_YA2 = 1, + MODE_OPCODE_YB2 = 2, + MODE_OPCODE_YC2 = 3, + MTSPR_IMM8_OPCODE_X1 = 23, + MULAX_RRR_0_OPCODE_X0 = 41, + MULAX_RRR_3_OPCODE_Y0 = 2, + MULA_HS_HS_RRR_0_OPCODE_X0 = 42, + MULA_HS_HS_RRR_9_OPCODE_Y0 = 0, + MULA_HS_HU_RRR_0_OPCODE_X0 = 43, + MULA_HS_LS_RRR_0_OPCODE_X0 = 44, + MULA_HS_LU_RRR_0_OPCODE_X0 = 45, + MULA_HU_HU_RRR_0_OPCODE_X0 = 46, + MULA_HU_HU_RRR_9_OPCODE_Y0 = 1, + MULA_HU_LS_RRR_0_OPCODE_X0 = 47, + MULA_HU_LU_RRR_0_OPCODE_X0 = 48, + MULA_LS_LS_RRR_0_OPCODE_X0 = 49, + MULA_LS_LS_RRR_9_OPCODE_Y0 = 2, + MULA_LS_LU_RRR_0_OPCODE_X0 = 50, + MULA_LU_LU_RRR_0_OPCODE_X0 = 51, + MULA_LU_LU_RRR_9_OPCODE_Y0 = 3, + MULX_RRR_0_OPCODE_X0 = 52, + MULX_RRR_3_OPCODE_Y0 = 3, + MUL_HS_HS_RRR_0_OPCODE_X0 = 53, + MUL_HS_HS_RRR_8_OPCODE_Y0 = 0, + MUL_HS_HU_RRR_0_OPCODE_X0 = 54, + MUL_HS_LS_RRR_0_OPCODE_X0 = 55, + MUL_HS_LU_RRR_0_OPCODE_X0 = 56, + MUL_HU_HU_RRR_0_OPCODE_X0 = 57, + MUL_HU_HU_RRR_8_OPCODE_Y0 = 1, + MUL_HU_LS_RRR_0_OPCODE_X0 = 58, + MUL_HU_LU_RRR_0_OPCODE_X0 = 59, + MUL_LS_LS_RRR_0_OPCODE_X0 = 60, + MUL_LS_LS_RRR_8_OPCODE_Y0 = 2, + MUL_LS_LU_RRR_0_OPCODE_X0 = 61, + MUL_LU_LU_RRR_0_OPCODE_X0 = 62, + MUL_LU_LU_RRR_8_OPCODE_Y0 = 3, + MZ_RRR_0_OPCODE_X0 = 63, + MZ_RRR_0_OPCODE_X1 = 27, + MZ_RRR_4_OPCODE_Y0 = 3, + MZ_RRR_4_OPCODE_Y1 = 3, + NAP_UNARY_OPCODE_X1 = 32, + NOP_UNARY_OPCODE_X0 = 5, + NOP_UNARY_OPCODE_X1 = 33, + NOP_UNARY_OPCODE_Y0 = 5, + NOP_UNARY_OPCODE_Y1 = 15, + NOR_RRR_0_OPCODE_X0 = 64, + NOR_RRR_0_OPCODE_X1 = 28, + NOR_RRR_5_OPCODE_Y0 = 1, + NOR_RRR_5_OPCODE_Y1 = 1, + ORI_IMM8_OPCODE_X0 = 7, + ORI_IMM8_OPCODE_X1 = 24, + OR_RRR_0_OPCODE_X0 = 65, + OR_RRR_0_OPCODE_X1 = 29, + OR_RRR_5_OPCODE_Y0 = 2, + OR_RRR_5_OPCODE_Y1 = 2, + PCNT_UNARY_OPCODE_X0 = 6, + PCNT_UNARY_OPCODE_Y0 = 6, + REVBITS_UNARY_OPCODE_X0 = 7, + REVBITS_UNARY_OPCODE_Y0 = 7, + REVBYTES_UNARY_OPCODE_X0 = 8, + REVBYTES_UNARY_OPCODE_Y0 = 8, + ROTLI_SHIFT_OPCODE_X0 = 1, + ROTLI_SHIFT_OPCODE_X1 = 1, + ROTLI_SHIFT_OPCODE_Y0 = 0, + ROTLI_SHIFT_OPCODE_Y1 = 0, + ROTL_RRR_0_OPCODE_X0 = 66, + ROTL_RRR_0_OPCODE_X1 = 30, + ROTL_RRR_6_OPCODE_Y0 = 0, + ROTL_RRR_6_OPCODE_Y1 = 0, + RRR_0_OPCODE_X0 = 5, + RRR_0_OPCODE_X1 = 5, + RRR_0_OPCODE_Y0 = 5, + RRR_0_OPCODE_Y1 = 6, + RRR_1_OPCODE_Y0 = 6, + RRR_1_OPCODE_Y1 = 7, + RRR_2_OPCODE_Y0 = 7, + RRR_2_OPCODE_Y1 = 8, + RRR_3_OPCODE_Y0 = 8, + RRR_3_OPCODE_Y1 = 9, + RRR_4_OPCODE_Y0 = 9, + RRR_4_OPCODE_Y1 = 10, + RRR_5_OPCODE_Y0 = 10, + RRR_5_OPCODE_Y1 = 11, + RRR_6_OPCODE_Y0 = 11, + RRR_6_OPCODE_Y1 = 12, + RRR_7_OPCODE_Y0 = 12, + RRR_7_OPCODE_Y1 = 13, + RRR_8_OPCODE_Y0 = 13, + RRR_9_OPCODE_Y0 = 14, + SHIFT_OPCODE_X0 = 6, + SHIFT_OPCODE_X1 = 6, + SHIFT_OPCODE_Y0 = 15, + SHIFT_OPCODE_Y1 = 14, + SHL16INSLI_OPCODE_X0 = 7, + SHL16INSLI_OPCODE_X1 = 7, + SHL1ADDX_RRR_0_OPCODE_X0 = 67, + SHL1ADDX_RRR_0_OPCODE_X1 = 31, + SHL1ADDX_RRR_7_OPCODE_Y0 = 1, + SHL1ADDX_RRR_7_OPCODE_Y1 = 1, + SHL1ADD_RRR_0_OPCODE_X0 = 68, + SHL1ADD_RRR_0_OPCODE_X1 = 32, + SHL1ADD_RRR_1_OPCODE_Y0 = 0, + SHL1ADD_RRR_1_OPCODE_Y1 = 0, + SHL2ADDX_RRR_0_OPCODE_X0 = 69, + SHL2ADDX_RRR_0_OPCODE_X1 = 33, + SHL2ADDX_RRR_7_OPCODE_Y0 = 2, + SHL2ADDX_RRR_7_OPCODE_Y1 = 2, + SHL2ADD_RRR_0_OPCODE_X0 = 70, + SHL2ADD_RRR_0_OPCODE_X1 = 34, + SHL2ADD_RRR_1_OPCODE_Y0 = 1, + SHL2ADD_RRR_1_OPCODE_Y1 = 1, + SHL3ADDX_RRR_0_OPCODE_X0 = 71, + SHL3ADDX_RRR_0_OPCODE_X1 = 35, + SHL3ADDX_RRR_7_OPCODE_Y0 = 3, + SHL3ADDX_RRR_7_OPCODE_Y1 = 3, + SHL3ADD_RRR_0_OPCODE_X0 = 72, + SHL3ADD_RRR_0_OPCODE_X1 = 36, + SHL3ADD_RRR_1_OPCODE_Y0 = 2, + SHL3ADD_RRR_1_OPCODE_Y1 = 2, + SHLI_SHIFT_OPCODE_X0 = 2, + SHLI_SHIFT_OPCODE_X1 = 2, + SHLI_SHIFT_OPCODE_Y0 = 1, + SHLI_SHIFT_OPCODE_Y1 = 1, + SHLXI_SHIFT_OPCODE_X0 = 3, + SHLXI_SHIFT_OPCODE_X1 = 3, + SHLX_RRR_0_OPCODE_X0 = 73, + SHLX_RRR_0_OPCODE_X1 = 37, + SHL_RRR_0_OPCODE_X0 = 74, + SHL_RRR_0_OPCODE_X1 = 38, + SHL_RRR_6_OPCODE_Y0 = 1, + SHL_RRR_6_OPCODE_Y1 = 1, + SHRSI_SHIFT_OPCODE_X0 = 4, + SHRSI_SHIFT_OPCODE_X1 = 4, + SHRSI_SHIFT_OPCODE_Y0 = 2, + SHRSI_SHIFT_OPCODE_Y1 = 2, + SHRS_RRR_0_OPCODE_X0 = 75, + SHRS_RRR_0_OPCODE_X1 = 39, + SHRS_RRR_6_OPCODE_Y0 = 2, + SHRS_RRR_6_OPCODE_Y1 = 2, + SHRUI_SHIFT_OPCODE_X0 = 5, + SHRUI_SHIFT_OPCODE_X1 = 5, + SHRUI_SHIFT_OPCODE_Y0 = 3, + SHRUI_SHIFT_OPCODE_Y1 = 3, + SHRUXI_SHIFT_OPCODE_X0 = 6, + SHRUXI_SHIFT_OPCODE_X1 = 6, + SHRUX_RRR_0_OPCODE_X0 = 76, + SHRUX_RRR_0_OPCODE_X1 = 40, + SHRU_RRR_0_OPCODE_X0 = 77, + SHRU_RRR_0_OPCODE_X1 = 41, + SHRU_RRR_6_OPCODE_Y0 = 3, + SHRU_RRR_6_OPCODE_Y1 = 3, + SHUFFLEBYTES_RRR_0_OPCODE_X0 = 78, + ST1_ADD_IMM8_OPCODE_X1 = 25, + ST1_OPCODE_Y2 = 0, + ST1_RRR_0_OPCODE_X1 = 42, + ST2_ADD_IMM8_OPCODE_X1 = 26, + ST2_OPCODE_Y2 = 1, + ST2_RRR_0_OPCODE_X1 = 43, + ST4_ADD_IMM8_OPCODE_X1 = 27, + ST4_OPCODE_Y2 = 2, + ST4_RRR_0_OPCODE_X1 = 44, + STNT1_ADD_IMM8_OPCODE_X1 = 28, + STNT1_RRR_0_OPCODE_X1 = 45, + STNT2_ADD_IMM8_OPCODE_X1 = 29, + STNT2_RRR_0_OPCODE_X1 = 46, + STNT4_ADD_IMM8_OPCODE_X1 = 30, + STNT4_RRR_0_OPCODE_X1 = 47, + STNT_ADD_IMM8_OPCODE_X1 = 31, + STNT_RRR_0_OPCODE_X1 = 48, + ST_ADD_IMM8_OPCODE_X1 = 32, + ST_OPCODE_Y2 = 3, + ST_RRR_0_OPCODE_X1 = 49, + SUBXSC_RRR_0_OPCODE_X0 = 79, + SUBXSC_RRR_0_OPCODE_X1 = 50, + SUBX_RRR_0_OPCODE_X0 = 80, + SUBX_RRR_0_OPCODE_X1 = 51, + SUBX_RRR_0_OPCODE_Y0 = 2, + SUBX_RRR_0_OPCODE_Y1 = 2, + SUB_RRR_0_OPCODE_X0 = 81, + SUB_RRR_0_OPCODE_X1 = 52, + SUB_RRR_0_OPCODE_Y0 = 3, + SUB_RRR_0_OPCODE_Y1 = 3, + SWINT0_UNARY_OPCODE_X1 = 34, + SWINT1_UNARY_OPCODE_X1 = 35, + SWINT2_UNARY_OPCODE_X1 = 36, + SWINT3_UNARY_OPCODE_X1 = 37, + TBLIDXB0_UNARY_OPCODE_X0 = 9, + TBLIDXB0_UNARY_OPCODE_Y0 = 9, + TBLIDXB1_UNARY_OPCODE_X0 = 10, + TBLIDXB1_UNARY_OPCODE_Y0 = 10, + TBLIDXB2_UNARY_OPCODE_X0 = 11, + TBLIDXB2_UNARY_OPCODE_Y0 = 11, + TBLIDXB3_UNARY_OPCODE_X0 = 12, + TBLIDXB3_UNARY_OPCODE_Y0 = 12, + UNARY_RRR_0_OPCODE_X0 = 82, + UNARY_RRR_0_OPCODE_X1 = 53, + UNARY_RRR_1_OPCODE_Y0 = 3, + UNARY_RRR_1_OPCODE_Y1 = 3, + V1ADDI_IMM8_OPCODE_X0 = 8, + V1ADDI_IMM8_OPCODE_X1 = 33, + V1ADDUC_RRR_0_OPCODE_X0 = 83, + V1ADDUC_RRR_0_OPCODE_X1 = 54, + V1ADD_RRR_0_OPCODE_X0 = 84, + V1ADD_RRR_0_OPCODE_X1 = 55, + V1ADIFFU_RRR_0_OPCODE_X0 = 85, + V1AVGU_RRR_0_OPCODE_X0 = 86, + V1CMPEQI_IMM8_OPCODE_X0 = 9, + V1CMPEQI_IMM8_OPCODE_X1 = 34, + V1CMPEQ_RRR_0_OPCODE_X0 = 87, + V1CMPEQ_RRR_0_OPCODE_X1 = 56, + V1CMPLES_RRR_0_OPCODE_X0 = 88, + V1CMPLES_RRR_0_OPCODE_X1 = 57, + V1CMPLEU_RRR_0_OPCODE_X0 = 89, + V1CMPLEU_RRR_0_OPCODE_X1 = 58, + V1CMPLTSI_IMM8_OPCODE_X0 = 10, + V1CMPLTSI_IMM8_OPCODE_X1 = 35, + V1CMPLTS_RRR_0_OPCODE_X0 = 90, + V1CMPLTS_RRR_0_OPCODE_X1 = 59, + V1CMPLTUI_IMM8_OPCODE_X0 = 11, + V1CMPLTUI_IMM8_OPCODE_X1 = 36, + V1CMPLTU_RRR_0_OPCODE_X0 = 91, + V1CMPLTU_RRR_0_OPCODE_X1 = 60, + V1CMPNE_RRR_0_OPCODE_X0 = 92, + V1CMPNE_RRR_0_OPCODE_X1 = 61, + V1DDOTPUA_RRR_0_OPCODE_X0 = 161, + V1DDOTPUSA_RRR_0_OPCODE_X0 = 93, + V1DDOTPUS_RRR_0_OPCODE_X0 = 94, + V1DDOTPU_RRR_0_OPCODE_X0 = 162, + V1DOTPA_RRR_0_OPCODE_X0 = 95, + V1DOTPUA_RRR_0_OPCODE_X0 = 163, + V1DOTPUSA_RRR_0_OPCODE_X0 = 96, + V1DOTPUS_RRR_0_OPCODE_X0 = 97, + V1DOTPU_RRR_0_OPCODE_X0 = 164, + V1DOTP_RRR_0_OPCODE_X0 = 98, + V1INT_H_RRR_0_OPCODE_X0 = 99, + V1INT_H_RRR_0_OPCODE_X1 = 62, + V1INT_L_RRR_0_OPCODE_X0 = 100, + V1INT_L_RRR_0_OPCODE_X1 = 63, + V1MAXUI_IMM8_OPCODE_X0 = 12, + V1MAXUI_IMM8_OPCODE_X1 = 37, + V1MAXU_RRR_0_OPCODE_X0 = 101, + V1MAXU_RRR_0_OPCODE_X1 = 64, + V1MINUI_IMM8_OPCODE_X0 = 13, + V1MINUI_IMM8_OPCODE_X1 = 38, + V1MINU_RRR_0_OPCODE_X0 = 102, + V1MINU_RRR_0_OPCODE_X1 = 65, + V1MNZ_RRR_0_OPCODE_X0 = 103, + V1MNZ_RRR_0_OPCODE_X1 = 66, + V1MULTU_RRR_0_OPCODE_X0 = 104, + V1MULUS_RRR_0_OPCODE_X0 = 105, + V1MULU_RRR_0_OPCODE_X0 = 106, + V1MZ_RRR_0_OPCODE_X0 = 107, + V1MZ_RRR_0_OPCODE_X1 = 67, + V1SADAU_RRR_0_OPCODE_X0 = 108, + V1SADU_RRR_0_OPCODE_X0 = 109, + V1SHLI_SHIFT_OPCODE_X0 = 7, + V1SHLI_SHIFT_OPCODE_X1 = 7, + V1SHL_RRR_0_OPCODE_X0 = 110, + V1SHL_RRR_0_OPCODE_X1 = 68, + V1SHRSI_SHIFT_OPCODE_X0 = 8, + V1SHRSI_SHIFT_OPCODE_X1 = 8, + V1SHRS_RRR_0_OPCODE_X0 = 111, + V1SHRS_RRR_0_OPCODE_X1 = 69, + V1SHRUI_SHIFT_OPCODE_X0 = 9, + V1SHRUI_SHIFT_OPCODE_X1 = 9, + V1SHRU_RRR_0_OPCODE_X0 = 112, + V1SHRU_RRR_0_OPCODE_X1 = 70, + V1SUBUC_RRR_0_OPCODE_X0 = 113, + V1SUBUC_RRR_0_OPCODE_X1 = 71, + V1SUB_RRR_0_OPCODE_X0 = 114, + V1SUB_RRR_0_OPCODE_X1 = 72, + V2ADDI_IMM8_OPCODE_X0 = 14, + V2ADDI_IMM8_OPCODE_X1 = 39, + V2ADDSC_RRR_0_OPCODE_X0 = 115, + V2ADDSC_RRR_0_OPCODE_X1 = 73, + V2ADD_RRR_0_OPCODE_X0 = 116, + V2ADD_RRR_0_OPCODE_X1 = 74, + V2ADIFFS_RRR_0_OPCODE_X0 = 117, + V2AVGS_RRR_0_OPCODE_X0 = 118, + V2CMPEQI_IMM8_OPCODE_X0 = 15, + V2CMPEQI_IMM8_OPCODE_X1 = 40, + V2CMPEQ_RRR_0_OPCODE_X0 = 119, + V2CMPEQ_RRR_0_OPCODE_X1 = 75, + V2CMPLES_RRR_0_OPCODE_X0 = 120, + V2CMPLES_RRR_0_OPCODE_X1 = 76, + V2CMPLEU_RRR_0_OPCODE_X0 = 121, + V2CMPLEU_RRR_0_OPCODE_X1 = 77, + V2CMPLTSI_IMM8_OPCODE_X0 = 16, + V2CMPLTSI_IMM8_OPCODE_X1 = 41, + V2CMPLTS_RRR_0_OPCODE_X0 = 122, + V2CMPLTS_RRR_0_OPCODE_X1 = 78, + V2CMPLTUI_IMM8_OPCODE_X0 = 17, + V2CMPLTUI_IMM8_OPCODE_X1 = 42, + V2CMPLTU_RRR_0_OPCODE_X0 = 123, + V2CMPLTU_RRR_0_OPCODE_X1 = 79, + V2CMPNE_RRR_0_OPCODE_X0 = 124, + V2CMPNE_RRR_0_OPCODE_X1 = 80, + V2DOTPA_RRR_0_OPCODE_X0 = 125, + V2DOTP_RRR_0_OPCODE_X0 = 126, + V2INT_H_RRR_0_OPCODE_X0 = 127, + V2INT_H_RRR_0_OPCODE_X1 = 81, + V2INT_L_RRR_0_OPCODE_X0 = 128, + V2INT_L_RRR_0_OPCODE_X1 = 82, + V2MAXSI_IMM8_OPCODE_X0 = 18, + V2MAXSI_IMM8_OPCODE_X1 = 43, + V2MAXS_RRR_0_OPCODE_X0 = 129, + V2MAXS_RRR_0_OPCODE_X1 = 83, + V2MINSI_IMM8_OPCODE_X0 = 19, + V2MINSI_IMM8_OPCODE_X1 = 44, + V2MINS_RRR_0_OPCODE_X0 = 130, + V2MINS_RRR_0_OPCODE_X1 = 84, + V2MNZ_RRR_0_OPCODE_X0 = 131, + V2MNZ_RRR_0_OPCODE_X1 = 85, + V2MULFSC_RRR_0_OPCODE_X0 = 132, + V2MULS_RRR_0_OPCODE_X0 = 133, + V2MULTS_RRR_0_OPCODE_X0 = 134, + V2MZ_RRR_0_OPCODE_X0 = 135, + V2MZ_RRR_0_OPCODE_X1 = 86, + V2PACKH_RRR_0_OPCODE_X0 = 136, + V2PACKH_RRR_0_OPCODE_X1 = 87, + V2PACKL_RRR_0_OPCODE_X0 = 137, + V2PACKL_RRR_0_OPCODE_X1 = 88, + V2PACKUC_RRR_0_OPCODE_X0 = 138, + V2PACKUC_RRR_0_OPCODE_X1 = 89, + V2SADAS_RRR_0_OPCODE_X0 = 139, + V2SADAU_RRR_0_OPCODE_X0 = 140, + V2SADS_RRR_0_OPCODE_X0 = 141, + V2SADU_RRR_0_OPCODE_X0 = 142, + V2SHLI_SHIFT_OPCODE_X0 = 10, + V2SHLI_SHIFT_OPCODE_X1 = 10, + V2SHLSC_RRR_0_OPCODE_X0 = 143, + V2SHLSC_RRR_0_OPCODE_X1 = 90, + V2SHL_RRR_0_OPCODE_X0 = 144, + V2SHL_RRR_0_OPCODE_X1 = 91, + V2SHRSI_SHIFT_OPCODE_X0 = 11, + V2SHRSI_SHIFT_OPCODE_X1 = 11, + V2SHRS_RRR_0_OPCODE_X0 = 145, + V2SHRS_RRR_0_OPCODE_X1 = 92, + V2SHRUI_SHIFT_OPCODE_X0 = 12, + V2SHRUI_SHIFT_OPCODE_X1 = 12, + V2SHRU_RRR_0_OPCODE_X0 = 146, + V2SHRU_RRR_0_OPCODE_X1 = 93, + V2SUBSC_RRR_0_OPCODE_X0 = 147, + V2SUBSC_RRR_0_OPCODE_X1 = 94, + V2SUB_RRR_0_OPCODE_X0 = 148, + V2SUB_RRR_0_OPCODE_X1 = 95, + V4ADDSC_RRR_0_OPCODE_X0 = 149, + V4ADDSC_RRR_0_OPCODE_X1 = 96, + V4ADD_RRR_0_OPCODE_X0 = 150, + V4ADD_RRR_0_OPCODE_X1 = 97, + V4INT_H_RRR_0_OPCODE_X0 = 151, + V4INT_H_RRR_0_OPCODE_X1 = 98, + V4INT_L_RRR_0_OPCODE_X0 = 152, + V4INT_L_RRR_0_OPCODE_X1 = 99, + V4PACKSC_RRR_0_OPCODE_X0 = 153, + V4PACKSC_RRR_0_OPCODE_X1 = 100, + V4SHLSC_RRR_0_OPCODE_X0 = 154, + V4SHLSC_RRR_0_OPCODE_X1 = 101, + V4SHL_RRR_0_OPCODE_X0 = 155, + V4SHL_RRR_0_OPCODE_X1 = 102, + V4SHRS_RRR_0_OPCODE_X0 = 156, + V4SHRS_RRR_0_OPCODE_X1 = 103, + V4SHRU_RRR_0_OPCODE_X0 = 157, + V4SHRU_RRR_0_OPCODE_X1 = 104, + V4SUBSC_RRR_0_OPCODE_X0 = 158, + V4SUBSC_RRR_0_OPCODE_X1 = 105, + V4SUB_RRR_0_OPCODE_X0 = 159, + V4SUB_RRR_0_OPCODE_X1 = 106, + WH64_UNARY_OPCODE_X1 = 38, + XORI_IMM8_OPCODE_X0 = 20, + XORI_IMM8_OPCODE_X1 = 45, + XOR_RRR_0_OPCODE_X0 = 160, + XOR_RRR_0_OPCODE_X1 = 107, + XOR_RRR_5_OPCODE_Y0 = 3, + XOR_RRR_5_OPCODE_Y1 = 3 }; #endif /* !_TILE_OPCODE_CONSTANTS_H */ diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h index 3eb53525bf9..db93518fac0 100644 --- a/arch/tile/include/asm/page.h +++ b/arch/tile/include/asm/page.h @@ -16,7 +16,8 @@ #define _ASM_TILE_PAGE_H #include <linux/const.h> -#include <hv/pagesize.h> +#include <hv/hypervisor.h> +#include <arch/chip.h> /* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */ #define PAGE_SHIFT HV_LOG2_PAGE_SIZE_SMALL @@ -28,8 +29,6 @@ #define PAGE_MASK (~(PAGE_SIZE - 1)) #define HPAGE_MASK (~(HPAGE_SIZE - 1)) -#ifdef __KERNEL__ - /* * If the Kconfig doesn't specify, set a maximum zone order that * is enough so that we can create huge pages from small pages given @@ -39,9 +38,6 @@ #define CONFIG_FORCE_MAX_ZONEORDER (HPAGE_SHIFT - PAGE_SHIFT + 1) #endif -#include <hv/hypervisor.h> -#include <arch/chip.h> - #ifndef __ASSEMBLY__ #include <linux/types.h> @@ -91,6 +87,10 @@ typedef struct page *pgtable_t; /* Must be a macro since it is used to create constants. */ #define __pgprot(val) hv_pte(val) +/* Rarely-used initializers, typically with a "zero" value. */ +#define __pte(x) hv_pte(x) +#define __pgd(x) hv_pte(x) + static inline u64 pgprot_val(pgprot_t pgprot) { return hv_pte_val(pgprot); @@ -110,6 +110,8 @@ static inline u64 pgd_val(pgd_t pgd) typedef HV_PTE pmd_t; +#define __pmd(x) hv_pte(x) + static inline u64 pmd_val(pmd_t pmd) { return hv_pte_val(pmd); @@ -318,7 +320,7 @@ static inline int pfn_valid(unsigned long pfn) /* Provide as macros since these require some other headers included. */ #define page_to_pa(page) ((phys_addr_t)(page_to_pfn(page)) << PAGE_SHIFT) -#define virt_to_page(kaddr) pfn_to_page(kaddr_to_pfn(kaddr)) +#define virt_to_page(kaddr) pfn_to_page(kaddr_to_pfn((void *)(kaddr))) #define page_to_virt(page) pfn_to_kaddr(page_to_pfn(page)) struct mm_struct; @@ -331,6 +333,4 @@ extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); #include <asm-generic/memory_model.h> -#endif /* __KERNEL__ */ - #endif /* _ASM_TILE_PAGE_H */ diff --git a/arch/tile/include/asm/parport.h b/arch/tile/include/asm/parport.h new file mode 100644 index 00000000000..cf252af6459 --- /dev/null +++ b/arch/tile/include/asm/parport.h @@ -0,0 +1 @@ +#include <asm-generic/parport.h> diff --git a/arch/tile/include/asm/pci.h b/arch/tile/include/asm/pci.h index c3fc458a0d3..7f03cefed1b 100644 --- a/arch/tile/include/asm/pci.h +++ b/arch/tile/include/asm/pci.h @@ -46,7 +46,8 @@ struct pci_controller { */ #define PCI_DMA_BUS_IS_PHYS 1 -int __init tile_pci_init(void); +int __devinit tile_pci_init(void); +int __devinit pcibios_init(void); void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {} diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h new file mode 100644 index 00000000000..fd80328523b --- /dev/null +++ b/arch/tile/include/asm/pgtable_64.h @@ -0,0 +1,175 @@ +/* + * Copyright 2011 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + * + */ + +#ifndef _ASM_TILE_PGTABLE_64_H +#define _ASM_TILE_PGTABLE_64_H + +/* The level-0 page table breaks the address space into 32-bit chunks. */ +#define PGDIR_SHIFT HV_LOG2_L1_SPAN +#define PGDIR_SIZE HV_L1_SPAN +#define PGDIR_MASK (~(PGDIR_SIZE-1)) +#define PTRS_PER_PGD HV_L0_ENTRIES +#define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t)) + +/* + * The level-1 index is defined by the huge page size. A PMD is composed + * of PTRS_PER_PMD pgd_t's and is the middle level of the page table. + */ +#define PMD_SHIFT HV_LOG2_PAGE_SIZE_LARGE +#define PMD_SIZE HV_PAGE_SIZE_LARGE +#define PMD_MASK (~(PMD_SIZE-1)) +#define PTRS_PER_PMD (1 << (PGDIR_SHIFT - PMD_SHIFT)) +#define SIZEOF_PMD (PTRS_PER_PMD * sizeof(pmd_t)) + +/* + * The level-2 index is defined by the difference between the huge + * page size and the normal page size. A PTE is composed of + * PTRS_PER_PTE pte_t's and is the bottom level of the page table. + * Note that the hypervisor docs use PTE for what we call pte_t, so + * this nomenclature is somewhat confusing. + */ +#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL)) +#define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t)) + +/* + * Align the vmalloc area to an L2 page table, and leave a guard page + * at the beginning and end. The vmalloc code also puts in an internal + * guard page between each allocation. + */ +#define _VMALLOC_END HUGE_VMAP_BASE +#define VMALLOC_END (_VMALLOC_END - PAGE_SIZE) +#define VMALLOC_START (_VMALLOC_START + PAGE_SIZE) + +#define HUGE_VMAP_END (HUGE_VMAP_BASE + PGDIR_SIZE) + +#ifndef __ASSEMBLY__ + +/* We have no pud since we are a three-level page table. */ +#include <asm-generic/pgtable-nopud.h> + +static inline int pud_none(pud_t pud) +{ + return pud_val(pud) == 0; +} + +static inline int pud_present(pud_t pud) +{ + return pud_val(pud) & _PAGE_PRESENT; +} + +#define pmd_ERROR(e) \ + pr_err("%s:%d: bad pmd 0x%016llx.\n", __FILE__, __LINE__, pmd_val(e)) + +static inline void pud_clear(pud_t *pudp) +{ + __pte_clear(&pudp->pgd); +} + +static inline int pud_bad(pud_t pud) +{ + return ((pud_val(pud) & _PAGE_ALL) != _PAGE_TABLE); +} + +/* Return the page-table frame number (ptfn) that a pud_t points at. */ +#define pud_ptfn(pud) hv_pte_get_ptfn((pud).pgd) + +/* + * A given kernel pud_t maps to a kernel pmd_t table at a specific + * virtual address. Since kernel pmd_t tables can be aligned at + * sub-page granularity, this macro can return non-page-aligned + * pointers, despite its name. + */ +#define pud_page_vaddr(pud) \ + (__va((phys_addr_t)pud_ptfn(pud) << HV_LOG2_PAGE_TABLE_ALIGN)) + +/* + * A pud_t points to a pmd_t array. Since we can have multiple per + * page, we don't have a one-to-one mapping of pud_t's to pages. + */ +#define pud_page(pud) pfn_to_page(HV_PTFN_TO_PFN(pud_ptfn(pud))) + +static inline unsigned long pud_index(unsigned long address) +{ + return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); +} + +#define pmd_offset(pud, address) \ + ((pmd_t *)pud_page_vaddr(*(pud)) + pmd_index(address)) + +static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval) +{ + set_pte(pmdp, pmdval); +} + +/* Create a pmd from a PTFN and pgprot. */ +static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot) +{ + return hv_pte_set_ptfn(prot, ptfn); +} + +/* Return the page-table frame number (ptfn) that a pmd_t points at. */ +static inline unsigned long pmd_ptfn(pmd_t pmd) +{ + return hv_pte_get_ptfn(pmd); +} + +static inline void pmd_clear(pmd_t *pmdp) +{ + __pte_clear(pmdp); +} + +/* Normalize an address to having the correct high bits set. */ +#define pgd_addr_normalize pgd_addr_normalize +static inline unsigned long pgd_addr_normalize(unsigned long addr) +{ + return ((long)addr << (CHIP_WORD_SIZE() - CHIP_VA_WIDTH())) >> + (CHIP_WORD_SIZE() - CHIP_VA_WIDTH()); +} + +/* We don't define any pgds for these addresses. */ +static inline int pgd_addr_invalid(unsigned long addr) +{ + return addr >= MEM_HV_START || + (addr > MEM_LOW_END && addr < MEM_HIGH_START); +} + +/* + * Use atomic instructions to provide atomicity against the hypervisor. + */ +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + return (__insn_fetchand(&ptep->val, ~HV_PTE_ACCESSED) >> + HV_PTE_INDEX_ACCESSED) & 0x1; +} + +#define __HAVE_ARCH_PTEP_SET_WRPROTECT +static inline void ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + __insn_fetchand(&ptep->val, ~HV_PTE_WRITABLE); +} + +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + return hv_pte(__insn_exch(&ptep->val, 0UL)); +} + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_TILE_PGTABLE_64_H */ diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h index e6889474038..34c1e01ffb5 100644 --- a/arch/tile/include/asm/processor.h +++ b/arch/tile/include/asm/processor.h @@ -215,6 +215,8 @@ static inline void release_thread(struct task_struct *dead_task) extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); +extern int do_work_pending(struct pt_regs *regs, u32 flags); + /* * Return saved (kernel) PC of a blocked thread. @@ -255,10 +257,6 @@ static inline void cpu_relax(void) barrier(); } -struct siginfo; -extern void arch_coredump_signal(struct siginfo *, struct pt_regs *); -#define arch_coredump_signal arch_coredump_signal - /* Info on this processor (see fs/proc/cpuinfo.c) */ struct seq_operations; extern const struct seq_operations cpuinfo_op; @@ -269,9 +267,6 @@ extern char chip_model[64]; /* Data on which physical memory controller corresponds to which NUMA node. */ extern int node_controller[]; -/* Do we dump information to the console when a user application crashes? */ -extern int show_crashinfo; - #if CHIP_HAS_CBOX_HOME_MAP() /* Does the heap allocator return hash-for-home pages by default? */ extern int hash_default; diff --git a/arch/tile/include/asm/serial.h b/arch/tile/include/asm/serial.h new file mode 100644 index 00000000000..a0cb0caff15 --- /dev/null +++ b/arch/tile/include/asm/serial.h @@ -0,0 +1 @@ +#include <asm-generic/serial.h> diff --git a/arch/tile/include/asm/signal.h b/arch/tile/include/asm/signal.h index 81d92a45cd4..1e1e616783e 100644 --- a/arch/tile/include/asm/signal.h +++ b/arch/tile/include/asm/signal.h @@ -28,6 +28,10 @@ struct pt_regs; int restore_sigcontext(struct pt_regs *, struct sigcontext __user *); int setup_sigcontext(struct sigcontext __user *, struct pt_regs *); void do_signal(struct pt_regs *regs); +void signal_fault(const char *type, struct pt_regs *, + void __user *frame, int sig); +void trace_unhandled_signal(const char *type, struct pt_regs *regs, + unsigned long address, int signo); #endif #endif /* _ASM_TILE_SIGNAL_H */ diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h new file mode 100644 index 00000000000..72be5904e02 --- /dev/null +++ b/arch/tile/include/asm/spinlock_64.h @@ -0,0 +1,161 @@ +/* + * Copyright 2011 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + * + * 64-bit SMP ticket spinlocks, allowing only a single CPU anywhere + * (the type definitions are in asm/spinlock_types.h) + */ + +#ifndef _ASM_TILE_SPINLOCK_64_H +#define _ASM_TILE_SPINLOCK_64_H + +/* Shifts and masks for the various fields in "lock". */ +#define __ARCH_SPIN_CURRENT_SHIFT 17 +#define __ARCH_SPIN_NEXT_MASK 0x7fff +#define __ARCH_SPIN_NEXT_OVERFLOW 0x8000 + +/* + * Return the "current" portion of a ticket lock value, + * i.e. the number that currently owns the lock. + */ +static inline int arch_spin_current(u32 val) +{ + return val >> __ARCH_SPIN_CURRENT_SHIFT; +} + +/* + * Return the "next" portion of a ticket lock value, + * i.e. the number that the next task to try to acquire the lock will get. + */ +static inline int arch_spin_next(u32 val) +{ + return val & __ARCH_SPIN_NEXT_MASK; +} + +/* The lock is locked if a task would have to wait to get it. */ +static inline int arch_spin_is_locked(arch_spinlock_t *lock) +{ + u32 val = lock->lock; + return arch_spin_current(val) != arch_spin_next(val); +} + +/* Bump the current ticket so the next task owns the lock. */ +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + wmb(); /* guarantee anything modified under the lock is visible */ + __insn_fetchadd4(&lock->lock, 1U << __ARCH_SPIN_CURRENT_SHIFT); +} + +void arch_spin_unlock_wait(arch_spinlock_t *lock); + +void arch_spin_lock_slow(arch_spinlock_t *lock, u32 val); + +/* Grab the "next" ticket number and bump it atomically. + * If the current ticket is not ours, go to the slow path. + * We also take the slow path if the "next" value overflows. + */ +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + u32 val = __insn_fetchadd4(&lock->lock, 1); + u32 ticket = val & (__ARCH_SPIN_NEXT_MASK | __ARCH_SPIN_NEXT_OVERFLOW); + if (unlikely(arch_spin_current(val) != ticket)) + arch_spin_lock_slow(lock, ticket); +} + +/* Try to get the lock, and return whether we succeeded. */ +int arch_spin_trylock(arch_spinlock_t *lock); + +/* We cannot take an interrupt after getting a ticket, so don't enable them. */ +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) + +/* + * Read-write spinlocks, allowing multiple readers + * but only one writer. + * + * We use fetchadd() for readers, and fetchor() with the sign bit + * for writers. + */ + +#define __WRITE_LOCK_BIT (1 << 31) + +static inline int arch_write_val_locked(int val) +{ + return val < 0; /* Optimize "val & __WRITE_LOCK_BIT". */ +} + +/** + * read_can_lock - would read_trylock() succeed? + * @lock: the rwlock in question. + */ +static inline int arch_read_can_lock(arch_rwlock_t *rw) +{ + return !arch_write_val_locked(rw->lock); +} + +/** + * write_can_lock - would write_trylock() succeed? + * @lock: the rwlock in question. + */ +static inline int arch_write_can_lock(arch_rwlock_t *rw) +{ + return rw->lock == 0; +} + +extern void __read_lock_failed(arch_rwlock_t *rw); + +static inline void arch_read_lock(arch_rwlock_t *rw) +{ + u32 val = __insn_fetchaddgez4(&rw->lock, 1); + if (unlikely(arch_write_val_locked(val))) + __read_lock_failed(rw); +} + +extern void __write_lock_failed(arch_rwlock_t *rw, u32 val); + +static inline void arch_write_lock(arch_rwlock_t *rw) +{ + u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT); + if (unlikely(val != 0)) + __write_lock_failed(rw, val); +} + +static inline void arch_read_unlock(arch_rwlock_t *rw) +{ + __insn_mf(); + __insn_fetchadd4(&rw->lock, -1); +} + +static inline void arch_write_unlock(arch_rwlock_t *rw) +{ + __insn_mf(); + rw->lock = 0; +} + +static inline int arch_read_trylock(arch_rwlock_t *rw) +{ + return !arch_write_val_locked(__insn_fetchaddgez4(&rw->lock, 1)); +} + +static inline int arch_write_trylock(arch_rwlock_t *rw) +{ + u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT); + if (likely(val == 0)) + return 1; + if (!arch_write_val_locked(val)) + __insn_fetchand4(&rw->lock, ~__WRITE_LOCK_BIT); + return 0; +} + +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) + +#endif /* _ASM_TILE_SPINLOCK_64_H */ diff --git a/arch/tile/include/asm/stat.h b/arch/tile/include/asm/stat.h index b16e5db8f0e..c0db34d56be 100644 --- a/arch/tile/include/asm/stat.h +++ b/arch/tile/include/asm/stat.h @@ -1,4 +1,4 @@ -#ifdef CONFIG_COMPAT +#if defined(__KERNEL__) && defined(CONFIG_COMPAT) #define __ARCH_WANT_STAT64 /* Used for compat_sys_stat64() etc. */ #endif #include <asm-generic/stat.h> diff --git a/arch/tile/include/asm/swab.h b/arch/tile/include/asm/swab.h index 25c686a00f1..7c37b38f6c8 100644 --- a/arch/tile/include/asm/swab.h +++ b/arch/tile/include/asm/swab.h @@ -18,12 +18,6 @@ /* Tile gcc is always >= 4.3.0, so we use __builtin_bswap. */ #define __arch_swab32(x) __builtin_bswap32(x) #define __arch_swab64(x) __builtin_bswap64(x) - -/* Use the variant that is natural for the wordsize. */ -#ifdef CONFIG_64BIT -#define __arch_swab16(x) (__builtin_bswap64(x) >> 48) -#else #define __arch_swab16(x) (__builtin_bswap32(x) >> 16) -#endif #endif /* _ASM_TILE_SWAB_H */ diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index 3405b52853b..bc4f562bd45 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h @@ -125,6 +125,7 @@ extern void cpu_idle_on_new_stack(struct thread_info *old_ti, #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ #define TIF_SECCOMP 6 /* secure computing */ #define TIF_MEMDIE 7 /* OOM killer at work */ +#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) @@ -134,10 +135,12 @@ extern void cpu_idle_on_new_stack(struct thread_info *old_ti, #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1<<TIF_SECCOMP) #define _TIF_MEMDIE (1<<TIF_MEMDIE) +#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) /* Work to do on any return to user space. */ #define _TIF_ALLWORK_MASK \ - (_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SINGLESTEP|_TIF_ASYNC_TLB) + (_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SINGLESTEP|\ + _TIF_ASYNC_TLB|_TIF_NOTIFY_RESUME) /* * Thread-synchronous status. diff --git a/arch/tile/include/asm/topology.h b/arch/tile/include/asm/topology.h index 343172d422a..6fdd0c86019 100644 --- a/arch/tile/include/asm/topology.h +++ b/arch/tile/include/asm/topology.h @@ -44,25 +44,64 @@ static inline const struct cpumask *cpumask_of_node(int node) /* For now, use numa node -1 for global allocation. */ #define pcibus_to_node(bus) ((void)(bus), -1) +/* + * TILE architecture has many cores integrated in one processor, so we need + * setup bigger balance_interval for both CPU/NODE scheduling domains to + * reduce process scheduling costs. + */ + +/* sched_domains SD_CPU_INIT for TILE architecture */ +#define SD_CPU_INIT (struct sched_domain) { \ + .min_interval = 4, \ + .max_interval = 128, \ + .busy_factor = 64, \ + .imbalance_pct = 125, \ + .cache_nice_tries = 1, \ + .busy_idx = 2, \ + .idle_idx = 1, \ + .newidle_idx = 0, \ + .wake_idx = 0, \ + .forkexec_idx = 0, \ + \ + .flags = 1*SD_LOAD_BALANCE \ + | 1*SD_BALANCE_NEWIDLE \ + | 1*SD_BALANCE_EXEC \ + | 1*SD_BALANCE_FORK \ + | 0*SD_BALANCE_WAKE \ + | 0*SD_WAKE_AFFINE \ + | 0*SD_PREFER_LOCAL \ + | 0*SD_SHARE_CPUPOWER \ + | 0*SD_SHARE_PKG_RESOURCES \ + | 0*SD_SERIALIZE \ + , \ + .last_balance = jiffies, \ + .balance_interval = 32, \ +} + /* sched_domains SD_NODE_INIT for TILE architecture */ -#define SD_NODE_INIT (struct sched_domain) { \ - .min_interval = 8, \ - .max_interval = 32, \ - .busy_factor = 32, \ - .imbalance_pct = 125, \ - .cache_nice_tries = 1, \ - .busy_idx = 3, \ - .idle_idx = 1, \ - .newidle_idx = 2, \ - .wake_idx = 1, \ - .flags = SD_LOAD_BALANCE \ - | SD_BALANCE_NEWIDLE \ - | SD_BALANCE_EXEC \ - | SD_BALANCE_FORK \ - | SD_WAKE_AFFINE \ - | SD_SERIALIZE, \ - .last_balance = jiffies, \ - .balance_interval = 1, \ +#define SD_NODE_INIT (struct sched_domain) { \ + .min_interval = 16, \ + .max_interval = 512, \ + .busy_factor = 32, \ + .imbalance_pct = 125, \ + .cache_nice_tries = 1, \ + .busy_idx = 3, \ + .idle_idx = 1, \ + .newidle_idx = 2, \ + .wake_idx = 1, \ + .flags = 1*SD_LOAD_BALANCE \ + | 1*SD_BALANCE_NEWIDLE \ + | 1*SD_BALANCE_EXEC \ + | 1*SD_BALANCE_FORK \ + | 0*SD_BALANCE_WAKE \ + | 0*SD_WAKE_AFFINE \ + | 0*SD_PREFER_LOCAL \ + | 0*SD_SHARE_CPUPOWER \ + | 0*SD_SHARE_PKG_RESOURCES \ + | 1*SD_SERIALIZE \ + , \ + .last_balance = jiffies, \ + .balance_interval = 128, \ } /* By definition, we create nodes based on online memory. */ diff --git a/arch/tile/include/asm/traps.h b/arch/tile/include/asm/traps.h index d06e35f5720..5f20f920f93 100644 --- a/arch/tile/include/asm/traps.h +++ b/arch/tile/include/asm/traps.h @@ -15,10 +15,14 @@ #ifndef _ASM_TILE_TRAPS_H #define _ASM_TILE_TRAPS_H +#include <arch/chip.h> + /* mm/fault.c */ void do_page_fault(struct pt_regs *, int fault_num, unsigned long address, unsigned long write); +#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() void do_async_page_fault(struct pt_regs *); +#endif #ifndef __tilegx__ /* diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h index b35c2db7119..f70bf1c541f 100644 --- a/arch/tile/include/asm/unistd.h +++ b/arch/tile/include/asm/unistd.h @@ -15,7 +15,7 @@ #if !defined(_ASM_TILE_UNISTD_H) || defined(__SYSCALL) #define _ASM_TILE_UNISTD_H -#ifndef __LP64__ +#if !defined(__LP64__) || defined(__SYSCALL_COMPAT) /* Use the flavor of this syscall that matches the 32-bit API better. */ #define __ARCH_WANT_SYNC_FILE_RANGE2 #endif diff --git a/arch/tile/include/asm/vga.h b/arch/tile/include/asm/vga.h new file mode 100644 index 00000000000..7b46e754d61 --- /dev/null +++ b/arch/tile/include/asm/vga.h @@ -0,0 +1,39 @@ +/* + * Copyright 2010 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + * + * Access to VGA videoram. + */ + +#ifndef _ASM_TILE_VGA_H +#define _ASM_TILE_VGA_H + +#include <asm/io.h> + +#define VT_BUF_HAVE_RW + +static inline void scr_writew(u16 val, volatile u16 *addr) +{ + __raw_writew(val, (volatile u16 __iomem *) addr); +} + +static inline u16 scr_readw(volatile const u16 *addr) +{ + return __raw_readw((volatile const u16 __iomem *) addr); +} + +#define vga_readb(a) readb((u8 __iomem *)(a)) +#define vga_writeb(v,a) writeb(v, (u8 __iomem *)(a)) + +#define VGA_MAP_MEM(x,s) ((unsigned long) ioremap(x, s)) + +#endif |