aboutsummaryrefslogtreecommitdiff
path: root/arch/tile/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/include/asm')
-rw-r--r--arch/tile/include/asm/atomic_32.h17
-rw-r--r--arch/tile/include/asm/compat.h55
-rw-r--r--arch/tile/include/asm/elf.h6
-rw-r--r--arch/tile/include/asm/futex.h17
-rw-r--r--arch/tile/include/asm/page.h9
-rw-r--r--arch/tile/include/asm/pgtable.h9
-rw-r--r--arch/tile/include/asm/pgtable_32.h12
-rw-r--r--arch/tile/include/asm/ptrace.h5
-rw-r--r--arch/tile/include/asm/sections.h9
-rw-r--r--arch/tile/include/asm/signal.h1
-rw-r--r--arch/tile/include/asm/spinlock_32.h3
-rw-r--r--arch/tile/include/asm/stack.h6
-rw-r--r--arch/tile/include/asm/syscalls.h72
-rw-r--r--arch/tile/include/asm/system.h30
-rw-r--r--arch/tile/include/asm/thread_info.h8
-rw-r--r--arch/tile/include/asm/traps.h26
-rw-r--r--arch/tile/include/asm/uaccess.h22
-rw-r--r--arch/tile/include/asm/unistd.h9
18 files changed, 227 insertions, 89 deletions
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index e4f8b4f0489..40a5a3a876d 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -348,6 +348,23 @@ void __init_atomic_per_cpu(void);
/* Support releasing the atomic lock in do_page_fault_ics(). */
void __atomic_fault_unlock(int *lock_ptr);
#endif
+
+/* Private helper routines in lib/atomic_asm_32.S */
+extern struct __get_user __atomic_cmpxchg(volatile int *p,
+ int *lock, int o, int n);
+extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
+extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
+extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
+ int *lock, int o, int n);
+extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
+extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
+extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
+extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n);
+extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n);
+extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);
+extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
+ int *lock, u64 o, u64 n);
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_TILE_ATOMIC_32_H */
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index b09292bcc19..5a34da6cdd7 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -70,48 +70,7 @@ struct compat_timeval {
s32 tv_usec;
};
-struct compat_stat {
- unsigned int st_dev;
- unsigned int st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned int st_rdev;
- unsigned int __pad1;
- int st_size;
- int st_blksize;
- int __pad2;
- int st_blocks;
- int st_atime;
- unsigned int st_atime_nsec;
- int st_mtime;
- unsigned int st_mtime_nsec;
- int st_ctime;
- unsigned int st_ctime_nsec;
- unsigned int __unused[2];
-};
-
-struct compat_stat64 {
- unsigned long st_dev;
- unsigned long st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned long st_rdev;
- long st_size;
- unsigned int st_blksize;
- unsigned long st_blocks __attribute__((packed));
- unsigned int st_atime;
- unsigned int st_atime_nsec;
- unsigned int st_mtime;
- unsigned int st_mtime_nsec;
- unsigned int st_ctime;
- unsigned int st_ctime_nsec;
- unsigned int __unused8;
-};
-
+#define compat_stat stat
#define compat_statfs statfs
struct compat_sysctl {
@@ -233,7 +192,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
/* Sign-extend when storing a kernel pointer to a user's ptregs. */
static inline unsigned long ptr_to_compat_reg(void __user *uptr)
{
- return (long)(int)(long)uptr;
+ return (long)(int)(long __force)uptr;
}
static inline void __user *compat_alloc_user_space(long len)
@@ -278,17 +237,8 @@ long compat_sys_sync_file_range2(int fd, unsigned int flags,
long compat_sys_fallocate(int fd, int mode,
u32 offset_lo, u32 offset_hi,
u32 len_lo, u32 len_hi);
-long compat_sys_stat64(char __user *filename,
- struct compat_stat64 __user *statbuf);
-long compat_sys_lstat64(char __user *filename,
- struct compat_stat64 __user *statbuf);
-long compat_sys_fstat64(unsigned int fd, struct compat_stat64 __user *statbuf);
-long compat_sys_fstatat64(int dfd, char __user *filename,
- struct compat_stat64 __user *statbuf, int flag);
long compat_sys_sched_rr_get_interval(compat_pid_t pid,
struct compat_timespec __user *interval);
-ssize_t compat_sys_sendfile(int out_fd, int in_fd, compat_off_t __user *offset,
- size_t count);
/* Versions of compat functions that differ from generic Linux. */
struct compat_msgbuf;
@@ -302,7 +252,6 @@ long tile_compat_sys_ptrace(compat_long_t request, compat_long_t pid,
compat_long_t addr, compat_long_t data);
/* Tilera Linux syscalls that don't have "compat" versions. */
-#define compat_sys_raise_fpe sys_raise_fpe
#define compat_sys_flush_cache sys_flush_cache
#endif /* _ASM_TILE_COMPAT_H */
diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h
index 1bca0debdb0..623a6bb741c 100644
--- a/arch/tile/include/asm/elf.h
+++ b/arch/tile/include/asm/elf.h
@@ -59,8 +59,7 @@ enum { ELF_ARCH = CHIP_ELF_TYPE() };
*/
#define elf_check_arch(x) \
((x)->e_ident[EI_CLASS] == ELF_CLASS && \
- ((x)->e_machine == CHIP_ELF_TYPE() || \
- (x)->e_machine == CHIP_COMPAT_ELF_TYPE()))
+ (x)->e_machine == CHIP_ELF_TYPE())
/* The module loader only handles a few relocation types. */
#ifndef __tilegx__
@@ -139,8 +138,7 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
*/
#define compat_elf_check_arch(x) \
((x)->e_ident[EI_CLASS] == ELFCLASS32 && \
- ((x)->e_machine == CHIP_ELF_TYPE() || \
- (x)->e_machine == CHIP_COMPAT_ELF_TYPE()))
+ (x)->e_machine == CHIP_ELF_TYPE())
#define compat_start_thread(regs, ip, usp) do { \
regs->pc = ptr_to_compat_reg((void *)(ip)); \
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h
index 9eaeb3c0878..fe0d10dcae5 100644
--- a/arch/tile/include/asm/futex.h
+++ b/arch/tile/include/asm/futex.h
@@ -29,14 +29,14 @@
#include <linux/uaccess.h>
#include <linux/errno.h>
-extern struct __get_user futex_set(int *v, int i);
-extern struct __get_user futex_add(int *v, int n);
-extern struct __get_user futex_or(int *v, int n);
-extern struct __get_user futex_andn(int *v, int n);
-extern struct __get_user futex_cmpxchg(int *v, int o, int n);
+extern struct __get_user futex_set(int __user *v, int i);
+extern struct __get_user futex_add(int __user *v, int n);
+extern struct __get_user futex_or(int __user *v, int n);
+extern struct __get_user futex_andn(int __user *v, int n);
+extern struct __get_user futex_cmpxchg(int __user *v, int o, int n);
#ifndef __tilegx__
-extern struct __get_user futex_xor(int *v, int n);
+extern struct __get_user futex_xor(int __user *v, int n);
#else
static inline struct __get_user futex_xor(int __user *uaddr, int n)
{
@@ -131,6 +131,11 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
return asm_ret.err ? asm_ret.err : asm_ret.val;
}
+#ifndef __tilegx__
+/* Return failure from the atomic wrappers. */
+struct __get_user __atomic_bad_address(int __user *addr);
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_TILE_FUTEX_H */
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
index c8301c43d6d..f894a9016da 100644
--- a/arch/tile/include/asm/page.h
+++ b/arch/tile/include/asm/page.h
@@ -16,8 +16,6 @@
#define _ASM_TILE_PAGE_H
#include <linux/const.h>
-#include <hv/hypervisor.h>
-#include <arch/chip.h>
/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */
#define PAGE_SHIFT 16
@@ -29,6 +27,11 @@
#define PAGE_MASK (~(PAGE_SIZE - 1))
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#ifdef __KERNEL__
+
+#include <hv/hypervisor.h>
+#include <arch/chip.h>
+
/*
* The {,H}PAGE_SHIFT values must match the HV_LOG2_PAGE_SIZE_xxx
* definitions in <hv/hypervisor.h>. We validate this at build time
@@ -331,4 +334,6 @@ extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
+#endif /* __KERNEL__ */
+
#endif /* _ASM_TILE_PAGE_H */
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
index beb1504e9c1..b3367379d53 100644
--- a/arch/tile/include/asm/pgtable.h
+++ b/arch/tile/include/asm/pgtable.h
@@ -229,9 +229,9 @@ static inline void __pte_clear(pte_t *ptep)
#define pte_donemigrate(x) hv_pte_set_present(hv_pte_clear_migrating(x))
#define pte_ERROR(e) \
- printk("%s:%d: bad pte 0x%016llx.\n", __FILE__, __LINE__, pte_val(e))
+ pr_err("%s:%d: bad pte 0x%016llx.\n", __FILE__, __LINE__, pte_val(e))
#define pgd_ERROR(e) \
- printk("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e))
+ pr_err("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e))
/*
* set_pte_order() sets the given PTE and also sanity-checks the
@@ -470,6 +470,11 @@ static inline int pmd_huge_page(pmd_t pmd)
#include <asm-generic/pgtable.h>
+/* Support /proc/NN/pgtable API. */
+struct seq_file;
+int arch_proc_pgtable_show(struct seq_file *m, struct mm_struct *mm,
+ unsigned long vaddr, pte_t *ptep, void **datap);
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_TILE_PGTABLE_H */
diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h
index b935fb2ad4f..53ec3488474 100644
--- a/arch/tile/include/asm/pgtable_32.h
+++ b/arch/tile/include/asm/pgtable_32.h
@@ -89,15 +89,27 @@ static inline int pgd_addr_invalid(unsigned long addr)
/*
* Provide versions of these routines that can be used safely when
* the hypervisor may be asynchronously modifying dirty/accessed bits.
+ * ptep_get_and_clear() matches the generic one but we provide it to
+ * be parallel with the 64-bit code.
*/
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
extern int ptep_test_and_clear_young(struct vm_area_struct *,
unsigned long addr, pte_t *);
extern void ptep_set_wrprotect(struct mm_struct *,
unsigned long addr, pte_t *);
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ pte_clear(_mm, addr, ptep);
+ return pte;
+}
+
/* Create a pmd from a PTFN. */
static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
{
diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h
index 4d1d9953016..acdae814e01 100644
--- a/arch/tile/include/asm/ptrace.h
+++ b/arch/tile/include/asm/ptrace.h
@@ -112,6 +112,9 @@ struct pt_regs {
/* Fill in a struct pt_regs with the current kernel registers. */
struct pt_regs *get_pt_regs(struct pt_regs *);
+/* Trace the current syscall. */
+extern void do_syscall_trace(void);
+
extern void show_regs(struct pt_regs *);
#define arch_has_single_step() (1)
@@ -123,7 +126,7 @@ extern void show_regs(struct pt_regs *);
*/
struct single_step_state {
/* the page to which we will write hacked-up bundles */
- void *buffer;
+ void __user *buffer;
union {
int flags;
diff --git a/arch/tile/include/asm/sections.h b/arch/tile/include/asm/sections.h
index 6c111491f0e..d062d463fca 100644
--- a/arch/tile/include/asm/sections.h
+++ b/arch/tile/include/asm/sections.h
@@ -25,7 +25,14 @@ extern char _sinitdata[], _einitdata[];
/* Write-once data is writable only till the end of initialization. */
extern char __w1data_begin[], __w1data_end[];
-extern char __feedback_section_start[], __feedback_section_end[];
+
+/* Not exactly sections, but PC comparison points in the code. */
+extern char __rt_sigreturn[], __rt_sigreturn_end[];
+#ifndef __tilegx__
+extern char sys_cmpxchg[], __sys_cmpxchg_end[];
+extern char __sys_cmpxchg_grab_lock[];
+extern char __start_atomic_asm_code[], __end_atomic_asm_code[];
+#endif
/* Handle the discontiguity between _sdata and _stext. */
static inline int arch_is_kernel_data(unsigned long addr)
diff --git a/arch/tile/include/asm/signal.h b/arch/tile/include/asm/signal.h
index d20d326d201..eb0253f3220 100644
--- a/arch/tile/include/asm/signal.h
+++ b/arch/tile/include/asm/signal.h
@@ -26,6 +26,7 @@
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *);
int setup_sigcontext(struct sigcontext __user *, struct pt_regs *);
+void do_signal(struct pt_regs *regs);
#endif
#endif /* _ASM_TILE_SIGNAL_H */
diff --git a/arch/tile/include/asm/spinlock_32.h b/arch/tile/include/asm/spinlock_32.h
index f3a8473c68d..88efdde8dd2 100644
--- a/arch/tile/include/asm/spinlock_32.h
+++ b/arch/tile/include/asm/spinlock_32.h
@@ -134,9 +134,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rwlock)
{
int locked;
u32 val = __insn_tns((int *)&rwlock->lock);
- if (unlikely(val & 1)) {
+ if (unlikely(val & 1))
return arch_read_trylock_slow(rwlock);
- }
locked = (val << _RD_COUNT_WIDTH) == 0;
rwlock->lock = val + (locked << _RD_COUNT_SHIFT);
return locked;
diff --git a/arch/tile/include/asm/stack.h b/arch/tile/include/asm/stack.h
index 864913bcfbc..f908473c322 100644
--- a/arch/tile/include/asm/stack.h
+++ b/arch/tile/include/asm/stack.h
@@ -48,6 +48,10 @@ extern void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
/* Initialize iterator based on current stack. */
extern void KBacktraceIterator_init_current(struct KBacktraceIterator *kbt);
+/* Helper method for above. */
+extern void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt,
+ ulong pc, ulong lr, ulong sp, ulong r52);
+
/* No more frames? */
extern int KBacktraceIterator_end(struct KBacktraceIterator *kbt);
@@ -64,5 +68,7 @@ extern void tile_show_stack(struct KBacktraceIterator *, int headers);
/* Dump stack of current process, with registers to seed the backtrace. */
extern void dump_stack_regs(struct pt_regs *);
+/* Helper method for assembly dump_stack(). */
+extern void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52);
#endif /* _ASM_TILE_STACK_H */
diff --git a/arch/tile/include/asm/syscalls.h b/arch/tile/include/asm/syscalls.h
index 9f2b8e2f69d..af165a74537 100644
--- a/arch/tile/include/asm/syscalls.h
+++ b/arch/tile/include/asm/syscalls.h
@@ -22,7 +22,19 @@
#include <linux/linkage.h>
#include <linux/signal.h>
#include <linux/types.h>
-#include <asm-generic/syscalls.h>
+#include <linux/compat.h>
+
+/* The array of function pointers for syscalls. */
+extern void *sys_call_table[];
+#ifdef CONFIG_COMPAT
+extern void *compat_sys_call_table[];
+#endif
+
+/*
+ * Note that by convention, any syscall which requires the current
+ * register set takes an additional "struct pt_regs *" pointer; the
+ * sys_xxx() function just adds the pointer and tail-calls to _sys_xxx().
+ */
/* kernel/sys.c */
ssize_t sys32_readahead(int fd, u32 offset_lo, u32 offset_hi, u32 count);
@@ -31,10 +43,66 @@ long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi,
int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi,
u32 len_lo, u32 len_hi, int advice);
long sys_flush_cache(void);
+long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff);
+#ifdef __tilegx__
+long sys_mmap(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, off_t pgoff);
+#endif
+
+/* kernel/process.c */
+long sys_clone(unsigned long clone_flags, unsigned long newsp,
+ void __user *parent_tid, void __user *child_tid);
+long _sys_clone(unsigned long clone_flags, unsigned long newsp,
+ void __user *parent_tid, void __user *child_tid,
+ struct pt_regs *regs);
+long sys_fork(void);
+long _sys_fork(struct pt_regs *regs);
+long sys_vfork(void);
+long _sys_vfork(struct pt_regs *regs);
+long sys_execve(char __user *filename, char __user * __user *argv,
+ char __user * __user *envp);
+long _sys_execve(char __user *filename, char __user * __user *argv,
+ char __user * __user *envp, struct pt_regs *regs);
+
+/* kernel/signal.c */
+long sys_sigaltstack(const stack_t __user *, stack_t __user *);
+long _sys_sigaltstack(const stack_t __user *, stack_t __user *,
+ struct pt_regs *);
+long sys_rt_sigreturn(void);
+long _sys_rt_sigreturn(struct pt_regs *regs);
+
+/* platform-independent functions */
+long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize);
+long sys_rt_sigaction(int sig, const struct sigaction __user *act,
+ struct sigaction __user *oact, size_t sigsetsize);
#ifndef __tilegx__
/* mm/fault.c */
-int sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *);
+int sys_cmpxchg_badaddr(unsigned long address);
+int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *);
+#endif
+
+#ifdef CONFIG_COMPAT
+long compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
+ compat_uptr_t __user *envp);
+long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
+ compat_uptr_t __user *envp, struct pt_regs *regs);
+long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
+ struct compat_sigaltstack __user *uoss_ptr);
+long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
+ struct compat_sigaltstack __user *uoss_ptr,
+ struct pt_regs *regs);
+long compat_sys_rt_sigreturn(void);
+long _compat_sys_rt_sigreturn(struct pt_regs *regs);
+
+/* These four are not defined for 64-bit, but serve as "compat" syscalls. */
+long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg);
+long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf);
+long sys_truncate64(const char __user *path, loff_t length);
+long sys_ftruncate64(unsigned int fd, loff_t length);
#endif
#endif /* _ASM_TILE_SYSCALLS_H */
diff --git a/arch/tile/include/asm/system.h b/arch/tile/include/asm/system.h
index d6ca7f816c8..0935094f370 100644
--- a/arch/tile/include/asm/system.h
+++ b/arch/tile/include/asm/system.h
@@ -160,6 +160,14 @@ struct task_struct;
extern struct task_struct *_switch_to(struct task_struct *prev,
struct task_struct *next);
+/* Helper function for _switch_to(). */
+extern struct task_struct *__switch_to(struct task_struct *prev,
+ struct task_struct *next,
+ unsigned long new_system_save_1_0);
+
+/* Address that switched-away from tasks are at. */
+extern unsigned long get_switch_to_pc(void);
+
/*
* On SMP systems, when the scheduler does migration-cost autodetection,
* it needs a way to flush as much of the CPU's caches as possible:
@@ -187,10 +195,26 @@ extern int unaligned_printk;
/* Number of unaligned fixups performed */
extern unsigned int unaligned_fixup_count;
+/* Init-time routine to do tile-specific per-cpu setup. */
+void setup_cpu(int boot);
+
/* User-level DMA management functions */
void grant_dma_mpls(void);
void restrict_dma_mpls(void);
+#ifdef CONFIG_HARDWALL
+/* User-level network management functions */
+void reset_network_state(void);
+void grant_network_mpls(void);
+void restrict_network_mpls(void);
+int hardwall_deactivate(struct task_struct *task);
+
+/* Hook hardwall code into changes in affinity. */
+#define arch_set_cpus_allowed(p, new_mask) do { \
+ if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \
+ hardwall_deactivate(p); \
+} while (0)
+#endif
/* Invoke the simulator "syscall" mechanism (see arch/tile/kernel/entry.S). */
extern int _sim_syscall(int syscall_num, ...);
@@ -215,6 +239,12 @@ extern int _sim_syscall(int syscall_num, ...);
homecache_migrate_kthread(); \
} while (0)
+/* Support function for forking a new task. */
+void ret_from_fork(void);
+
+/* Called from ret_from_fork() when a new process starts up. */
+struct task_struct *sim_notify_fork(struct task_struct *prev);
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_TILE_SYSTEM_H */
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index 9024bf3530a..beec8729564 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -55,7 +55,7 @@ struct thread_info {
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
- .step_state = 0, \
+ .step_state = NULL, \
}
#define init_thread_info (init_thread_union.thread_info)
@@ -86,6 +86,12 @@ register unsigned long stack_pointer __asm__("sp");
extern struct thread_info *alloc_thread_info(struct task_struct *task);
extern void free_thread_info(struct thread_info *info);
+/* Sit on a nap instruction until interrupted. */
+extern void smp_nap(void);
+
+/* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */
+extern void _cpu_idle(void);
+
/* Switch boot idle thread to a freshly-allocated stack and free old stack. */
extern void cpu_idle_on_new_stack(struct thread_info *old_ti,
unsigned long new_sp,
diff --git a/arch/tile/include/asm/traps.h b/arch/tile/include/asm/traps.h
index eab33d4a917..432a9c15c8a 100644
--- a/arch/tile/include/asm/traps.h
+++ b/arch/tile/include/asm/traps.h
@@ -18,9 +18,28 @@
/* mm/fault.c */
void do_page_fault(struct pt_regs *, int fault_num,
unsigned long address, unsigned long write);
+void do_async_page_fault(struct pt_regs *);
+
+#ifndef __tilegx__
+/*
+ * We return this structure in registers to avoid having to write
+ * additional save/restore code in the intvec.S caller.
+ */
+struct intvec_state {
+ void *handler;
+ unsigned long vecnum;
+ unsigned long fault_num;
+ unsigned long info;
+ unsigned long retval;
+};
+struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
+ unsigned long address,
+ unsigned long info);
+#endif
/* kernel/traps.c */
void do_trap(struct pt_regs *, int fault_num, unsigned long reason);
+void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52);
/* kernel/time.c */
void do_timer_interrupt(struct pt_regs *, int fault_num);
@@ -31,6 +50,13 @@ void hv_message_intr(struct pt_regs *, int intnum);
/* kernel/irq.c */
void tile_dev_intr(struct pt_regs *, int intnum);
+#ifdef CONFIG_HARDWALL
+/* kernel/hardwall.c */
+void do_hardwall_trap(struct pt_regs *, int fault_num);
+#endif
+
+/* kernel/ptrace.c */
+void do_breakpoint(struct pt_regs *, int fault_num);
#endif /* _ASM_TILE_SYSCALLS_H */
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index f3058afd5a8..ed17a80ec0e 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -89,8 +89,10 @@ int __range_ok(unsigned long addr, unsigned long size);
* checks that the pointer is in the user space range - after calling
* this function, memory access functions may still return -EFAULT.
*/
-#define access_ok(type, addr, size) \
- (likely(__range_ok((unsigned long)addr, size) == 0))
+#define access_ok(type, addr, size) ({ \
+ __chk_user_ptr(addr); \
+ likely(__range_ok((unsigned long)(addr), (size)) == 0); \
+})
/*
* The exception table consists of pairs of addresses: the first is the
@@ -134,14 +136,14 @@ struct __get_user {
* such extended assembler routines, though we will have to use a
* different return code in that case (1, 2, or 4, rather than -EFAULT).
*/
-extern struct __get_user __get_user_1(const void *);
-extern struct __get_user __get_user_2(const void *);
-extern struct __get_user __get_user_4(const void *);
-extern struct __get_user __get_user_8(const void *);
-extern int __put_user_1(long, void *);
-extern int __put_user_2(long, void *);
-extern int __put_user_4(long, void *);
-extern int __put_user_8(long long, void *);
+extern struct __get_user __get_user_1(const void __user *);
+extern struct __get_user __get_user_2(const void __user *);
+extern struct __get_user __get_user_4(const void __user *);
+extern struct __get_user __get_user_8(const void __user *);
+extern int __put_user_1(long, void __user *);
+extern int __put_user_2(long, void __user *);
+extern int __put_user_4(long, void __user *);
+extern int __put_user_8(long long, void __user *);
/* Unimplemented routines to cause linker failures */
extern struct __get_user __get_user_bad(void);
diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h
index 03b3d5d665d..f2e3ff48533 100644
--- a/arch/tile/include/asm/unistd.h
+++ b/arch/tile/include/asm/unistd.h
@@ -15,7 +15,6 @@
#if !defined(_ASM_TILE_UNISTD_H) || defined(__SYSCALL)
#define _ASM_TILE_UNISTD_H
-
#ifndef __LP64__
/* Use the flavor of this syscall that matches the 32-bit API better. */
#define __ARCH_WANT_SYNC_FILE_RANGE2
@@ -24,6 +23,10 @@
/* Use the standard ABI for syscalls. */
#include <asm-generic/unistd.h>
+/* Additional Tilera-specific syscalls. */
+#define __NR_flush_cache (__NR_arch_specific_syscall + 1)
+__SYSCALL(__NR_flush_cache, sys_flush_cache)
+
#ifndef __tilegx__
/* "Fast" syscalls provide atomic support for 32-bit chips. */
#define __NR_FAST_cmpxchg -1
@@ -33,10 +36,6 @@
__SYSCALL(__NR_cmpxchg_badaddr, sys_cmpxchg_badaddr)
#endif
-/* Additional Tilera-specific syscalls. */
-#define __NR_flush_cache (__NR_arch_specific_syscall + 1)
-__SYSCALL(__NR_flush_cache, sys_flush_cache)
-
#ifdef __KERNEL__
/* In compat mode, we use sys_llseek() for compat_sys_llseek(). */
#ifdef CONFIG_COMPAT