aboutsummaryrefslogtreecommitdiff
path: root/arch/arc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/include')
-rw-r--r--arch/arc/include/asm/Kbuild9
-rw-r--r--arch/arc/include/asm/arcregs.h127
-rw-r--r--arch/arc/include/asm/barrier.h42
-rw-r--r--arch/arc/include/asm/bitops.h5
-rw-r--r--arch/arc/include/asm/bug.h5
-rw-r--r--arch/arc/include/asm/cache.h62
-rw-r--r--arch/arc/include/asm/cacheflush.h77
-rw-r--r--arch/arc/include/asm/defines.h56
-rw-r--r--arch/arc/include/asm/delay.h5
-rw-r--r--arch/arc/include/asm/dma-mapping.h2
-rw-r--r--arch/arc/include/asm/elf.h3
-rw-r--r--arch/arc/include/asm/entry.h546
-rw-r--r--arch/arc/include/asm/io.h4
-rw-r--r--arch/arc/include/asm/irq.h9
-rw-r--r--arch/arc/include/asm/irqflags.h59
-rw-r--r--arch/arc/include/asm/kgdb.h8
-rw-r--r--arch/arc/include/asm/kprobes.h6
-rw-r--r--arch/arc/include/asm/linkage.h14
-rw-r--r--arch/arc/include/asm/mach_desc.h17
-rw-r--r--arch/arc/include/asm/mmu.h51
-rw-r--r--arch/arc/include/asm/mmu_context.h200
-rw-r--r--arch/arc/include/asm/page.h18
-rw-r--r--arch/arc/include/asm/perf_event.h204
-rw-r--r--arch/arc/include/asm/pgalloc.h11
-rw-r--r--arch/arc/include/asm/pgtable.h57
-rw-r--r--arch/arc/include/asm/processor.h46
-rw-r--r--arch/arc/include/asm/prom.h14
-rw-r--r--arch/arc/include/asm/ptrace.h83
-rw-r--r--arch/arc/include/asm/sections.h2
-rw-r--r--arch/arc/include/asm/serial.h10
-rw-r--r--arch/arc/include/asm/setup.h2
-rw-r--r--arch/arc/include/asm/shmparam.h18
-rw-r--r--arch/arc/include/asm/smp.h10
-rw-r--r--arch/arc/include/asm/spinlock.h9
-rw-r--r--arch/arc/include/asm/spinlock_types.h6
-rw-r--r--arch/arc/include/asm/syscall.h5
-rw-r--r--arch/arc/include/asm/syscalls.h2
-rw-r--r--arch/arc/include/asm/thread_info.h2
-rw-r--r--arch/arc/include/asm/tlb-mmu1.h4
-rw-r--r--arch/arc/include/asm/tlb.h53
-rw-r--r--arch/arc/include/asm/tlbflush.h11
-rw-r--r--arch/arc/include/asm/uaccess.h4
-rw-r--r--arch/arc/include/asm/unaligned.h7
-rw-r--r--arch/arc/include/uapi/asm/Kbuild7
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h20
-rw-r--r--arch/arc/include/uapi/asm/unistd.h11
46 files changed, 949 insertions, 974 deletions
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 48af742f8b5..e76fd79f32b 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -1,16 +1,18 @@
generic-y += auxvec.h
-generic-y += bugs.h
+generic-y += barrier.h
generic-y += bitsperlong.h
+generic-y += bugs.h
generic-y += clkdev.h
generic-y += cputime.h
generic-y += device.h
generic-y += div64.h
generic-y += emergency-restart.h
generic-y += errno.h
-generic-y += fcntl.h
generic-y += fb.h
+generic-y += fcntl.h
generic-y += ftrace.h
generic-y += hardirq.h
+generic-y += hash.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ioctls.h
@@ -20,6 +22,7 @@ generic-y += kmap_types.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
+generic-y += mcs_spinlock.h
generic-y += mman.h
generic-y += msgbuf.h
generic-y += param.h
@@ -28,11 +31,11 @@ generic-y += pci.h
generic-y += percpu.h
generic-y += poll.h
generic-y += posix_types.h
+generic-y += preempt.h
generic-y += resource.h
generic-y += scatterlist.h
generic-y += sembuf.h
generic-y += shmbuf.h
-generic-y += shmparam.h
generic-y += siginfo.h
generic-y += socket.h
generic-y += sockios.h
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 1b907c46566..355cb470c2a 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -20,7 +20,6 @@
#define ARC_REG_PERIBASE_BCR 0x69
#define ARC_REG_FP_BCR 0x6B /* Single-Precision FPU */
#define ARC_REG_DPFP_BCR 0x6C /* Dbl Precision FPU */
-#define ARC_REG_MMU_BCR 0x6f
#define ARC_REG_DCCM_BCR 0x74 /* DCCM Present + SZ */
#define ARC_REG_TIMERS_BCR 0x75
#define ARC_REG_ICCM_BCR 0x78
@@ -34,22 +33,12 @@
#define ARC_REG_D_UNCACH_BCR 0x6A
/* status32 Bits Positions */
-#define STATUS_H_BIT 0 /* CPU Halted */
-#define STATUS_E1_BIT 1 /* Int 1 enable */
-#define STATUS_E2_BIT 2 /* Int 2 enable */
-#define STATUS_A1_BIT 3 /* Int 1 active */
-#define STATUS_A2_BIT 4 /* Int 2 active */
#define STATUS_AE_BIT 5 /* Exception active */
#define STATUS_DE_BIT 6 /* PC is in delay slot */
#define STATUS_U_BIT 7 /* User/Kernel mode */
#define STATUS_L_BIT 12 /* Loop inhibit */
/* These masks correspond to the status word(STATUS_32) bits */
-#define STATUS_H_MASK (1<<STATUS_H_BIT)
-#define STATUS_E1_MASK (1<<STATUS_E1_BIT)
-#define STATUS_E2_MASK (1<<STATUS_E2_BIT)
-#define STATUS_A1_MASK (1<<STATUS_A1_BIT)
-#define STATUS_A2_MASK (1<<STATUS_A2_BIT)
#define STATUS_AE_MASK (1<<STATUS_AE_BIT)
#define STATUS_DE_MASK (1<<STATUS_DE_BIT)
#define STATUS_U_MASK (1<<STATUS_U_BIT)
@@ -71,6 +60,7 @@
#define ECR_V_ITLB_MISS 0x21
#define ECR_V_DTLB_MISS 0x22
#define ECR_V_PROTV 0x23
+#define ECR_V_TRAP 0x25
/* Protection Violation Exception Cause Code Values */
#define ECR_C_PROTV_INST_FETCH 0x00
@@ -79,94 +69,23 @@
#define ECR_C_PROTV_XCHG 0x03
#define ECR_C_PROTV_MISALIG_DATA 0x04
+#define ECR_C_BIT_PROTV_MISALIG_DATA 10
+
+/* Machine Check Cause Code Values */
+#define ECR_C_MCHK_DUP_TLB 0x01
+
/* DTLB Miss Exception Cause Code Values */
#define ECR_C_BIT_DTLB_LD_MISS 8
#define ECR_C_BIT_DTLB_ST_MISS 9
+/* Dummy ECR values for Interrupts */
+#define event_IRQ1 0x0031abcd
+#define event_IRQ2 0x0032abcd
/* Auxiliary registers */
#define AUX_IDENTITY 4
#define AUX_INTR_VEC_BASE 0x25
-#define AUX_IRQ_LEV 0x200 /* IRQ Priority: L1 or L2 */
-#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
-#define AUX_IRQ_LV12 0x43 /* interrupt level register */
-
-#define AUX_IENABLE 0x40c
-#define AUX_ITRIGGER 0x40d
-#define AUX_IPULSE 0x415
-
-/* Timer related Aux registers */
-#define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */
-#define ARC_REG_TIMER0_CTRL 0x22 /* timer 0 control */
-#define ARC_REG_TIMER0_CNT 0x21 /* timer 0 count */
-#define ARC_REG_TIMER1_LIMIT 0x102 /* timer 1 limit */
-#define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */
-#define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */
-
-#define TIMER_CTRL_IE (1 << 0) /* Interupt when Count reachs limit */
-#define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
-
-/* MMU Management regs */
-#define ARC_REG_TLBPD0 0x405
-#define ARC_REG_TLBPD1 0x406
-#define ARC_REG_TLBINDEX 0x407
-#define ARC_REG_TLBCOMMAND 0x408
-#define ARC_REG_PID 0x409
-#define ARC_REG_SCRATCH_DATA0 0x418
-
-/* Bits in MMU PID register */
-#define MMU_ENABLE (1 << 31) /* Enable MMU for process */
-
-/* Error code if probe fails */
-#define TLB_LKUP_ERR 0x80000000
-
-/* TLB Commands */
-#define TLBWrite 0x1
-#define TLBRead 0x2
-#define TLBGetIndex 0x3
-#define TLBProbe 0x4
-
-#if (CONFIG_ARC_MMU_VER >= 2)
-#define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */
-#define TLBIVUTLB 0x6 /* explicitly inv uTLBs */
-#else
-#undef TLBWriteNI /* These cmds don't exist on older MMU */
-#undef TLBIVUTLB
-#endif
-/* Instruction cache related Auxiliary registers */
-#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
-#define ARC_REG_IC_IVIC 0x10
-#define ARC_REG_IC_CTRL 0x11
-#define ARC_REG_IC_IVIL 0x19
-#if (CONFIG_ARC_MMU_VER > 2)
-#define ARC_REG_IC_PTAG 0x1E
-#endif
-
-/* Bit val in IC_CTRL */
-#define IC_CTRL_CACHE_DISABLE 0x1
-
-/* Data cache related Auxiliary registers */
-#define ARC_REG_DC_BCR 0x72
-#define ARC_REG_DC_IVDC 0x47
-#define ARC_REG_DC_CTRL 0x48
-#define ARC_REG_DC_IVDL 0x4A
-#define ARC_REG_DC_FLSH 0x4B
-#define ARC_REG_DC_FLDL 0x4C
-#if (CONFIG_ARC_MMU_VER > 2)
-#define ARC_REG_DC_PTAG 0x5C
-#endif
-
-/* Bit val in DC_CTRL */
-#define DC_CTRL_INV_MODE_FLUSH 0x40
-#define DC_CTRL_FLUSH_STATUS 0x100
-
-/* MMU Management regs */
-#define ARC_REG_PID 0x409
-#define ARC_REG_SCRATCH_DATA0 0x418
-
-/* Bits in MMU PID register */
-#define MMU_ENABLE (1 << 31) /* Enable MMU for process */
/*
* Floating Pt Registers
@@ -293,24 +212,6 @@ struct bcr_identity {
#endif
};
-struct bcr_mmu_1_2 {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8;
-#else
- unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8;
-#endif
-};
-
-struct bcr_mmu_3 {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4,
- u_itlb:4, u_dtlb:4;
-#else
- unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4,
- ways:4, ver:8;
-#endif
-};
-
#define EXTN_SWAP_VALID 0x1
#define EXTN_NORM_VALID 0x2
#define EXTN_MINMAX_VALID 0x2
@@ -343,14 +244,6 @@ struct bcr_extn_xymem {
#endif
};
-struct bcr_cache {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
-#else
- unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
-#endif
-};
-
struct bcr_perip {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int start:8, pad2:8, sz:8, pad:8;
@@ -403,7 +296,7 @@ struct cpuinfo_arc_mmu {
};
struct cpuinfo_arc_cache {
- unsigned int has_aliasing, sz, line_len, assoc, ver;
+ unsigned int sz, line_len, assoc, ver;
};
struct cpuinfo_arc_ccm {
diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h
deleted file mode 100644
index f6cb7c4ffb3..00000000000
--- a/arch/arc/include/asm/barrier.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_BARRIER_H
-#define __ASM_BARRIER_H
-
-#ifndef __ASSEMBLY__
-
-/* TODO-vineetg: Need to see what this does, don't we need sync anywhere */
-#define mb() __asm__ __volatile__ ("" : : : "memory")
-#define rmb() mb()
-#define wmb() mb()
-#define set_mb(var, value) do { var = value; mb(); } while (0)
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
-#define read_barrier_depends() mb()
-
-/* TODO-vineetg verify the correctness of macros here */
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#endif
-
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
-
-#define smp_read_barrier_depends() do { } while (0)
-
-#endif
-
-#endif
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 647a83a8e75..ebc0cf3164d 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <linux/compiler.h>
+#include <asm/barrier.h>
/*
* Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
@@ -496,10 +497,6 @@ static inline __attribute__ ((const)) int __ffs(unsigned long word)
*/
#define ffz(x) __ffs(~(x))
-/* TODO does this affect uni-processor code */
-#define smp_mb__before_clear_bit() barrier()
-#define smp_mb__after_clear_bit() barrier()
-
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h
index 2ad8f9b1c54..5b18e94c667 100644
--- a/arch/arc/include/asm/bug.h
+++ b/arch/arc/include/asm/bug.h
@@ -18,9 +18,8 @@ struct task_struct;
void show_regs(struct pt_regs *regs);
void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs);
void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
- unsigned long address, unsigned long cause_reg);
-void die(const char *str, struct pt_regs *regs, unsigned long address,
- unsigned long cause_reg);
+ unsigned long address);
+void die(const char *str, struct pt_regs *regs, unsigned long address);
#define BUG() do { \
dump_stack(); \
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index 6632273861f..b3c750979aa 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -17,22 +17,14 @@
#endif
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define CACHE_LINE_MASK (~(L1_CACHE_BYTES - 1))
-#define ARC_ICACHE_WAYS 2
-#define ARC_DCACHE_WAYS 4
-
-/* Helpers */
-#define ARC_ICACHE_LINE_LEN L1_CACHE_BYTES
-#define ARC_DCACHE_LINE_LEN L1_CACHE_BYTES
-
-#define ICACHE_LINE_MASK (~(ARC_ICACHE_LINE_LEN - 1))
-#define DCACHE_LINE_MASK (~(ARC_DCACHE_LINE_LEN - 1))
-
-#if ARC_ICACHE_LINE_LEN != ARC_DCACHE_LINE_LEN
-#error "Need to fix some code as I/D cache lines not same"
-#else
-#define is_not_cache_aligned(p) ((unsigned long)p & (~DCACHE_LINE_MASK))
-#endif
+/*
+ * ARC700 doesn't cache any access in top 256M.
+ * Ideal for wiring memory mapped peripherals as we don't need to do
+ * explicit uncached accesses (LD.di/ST.di) hence more portable drivers
+ */
+#define ARC_UNCACHED_ADDR_SPACE 0xc0000000
#ifndef __ASSEMBLY__
@@ -55,21 +47,39 @@
: "r"(data), "r"(ptr)); \
})
-/* used to give SHMLBA a value to avoid Cache Aliasing */
-extern unsigned int ARC_shmlba;
-
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
-/*
- * ARC700 doesn't cache any access in top 256M.
- * Ideal for wiring memory mapped peripherals as we don't need to do
- * explicit uncached accesses (LD.di/ST.di) hence more portable drivers
- */
-#define ARC_UNCACHED_ADDR_SPACE 0xc0000000
-
extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
-extern void __init read_decode_cache_bcr(void);
+extern void read_decode_cache_bcr(void);
+
+#endif /* !__ASSEMBLY__ */
+
+/* Instruction cache related Auxiliary registers */
+#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
+#define ARC_REG_IC_IVIC 0x10
+#define ARC_REG_IC_CTRL 0x11
+#define ARC_REG_IC_IVIL 0x19
+#if defined(CONFIG_ARC_MMU_V3)
+#define ARC_REG_IC_PTAG 0x1E
+#endif
+
+/* Bit val in IC_CTRL */
+#define IC_CTRL_CACHE_DISABLE 0x1
+
+/* Data cache related Auxiliary registers */
+#define ARC_REG_DC_BCR 0x72 /* Build Config reg */
+#define ARC_REG_DC_IVDC 0x47
+#define ARC_REG_DC_CTRL 0x48
+#define ARC_REG_DC_IVDL 0x4A
+#define ARC_REG_DC_FLSH 0x4B
+#define ARC_REG_DC_FLDL 0x4C
+#if defined(CONFIG_ARC_MMU_V3)
+#define ARC_REG_DC_PTAG 0x5C
#endif
+/* Bit val in DC_CTRL */
+#define DC_CTRL_INV_MODE_FLUSH 0x40
+#define DC_CTRL_FLUSH_STATUS 0x100
+
#endif /* _ASM_CACHE_H */
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
index 97ee96f2650..6abc4972bc9 100644
--- a/arch/arc/include/asm/cacheflush.h
+++ b/arch/arc/include/asm/cacheflush.h
@@ -19,13 +19,24 @@
#define _ASM_CACHEFLUSH_H
#include <linux/mm.h>
+#include <asm/shmparam.h>
+
+/*
+ * Semantically we need this because icache doesn't snoop dcache/dma.
+ * However ARC Cache flush requires paddr as well as vaddr, latter not available
+ * in the flush_icache_page() API. So we no-op it but do the equivalent work
+ * in update_mmu_cache()
+ */
+#define flush_icache_page(vma, page)
void flush_cache_all(void);
void flush_icache_range(unsigned long start, unsigned long end);
-void flush_icache_page(struct vm_area_struct *vma, struct page *page);
-void flush_icache_range_vaddr(unsigned long paddr, unsigned long u_vaddr,
- int len);
+void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len);
+void __inv_icache_page(unsigned long paddr, unsigned long vaddr);
+void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr);
+#define __flush_dcache_page(p, v) \
+ ___flush_dcache_page((unsigned long)p, (unsigned long)v)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
@@ -42,23 +53,65 @@ void dma_cache_wback(unsigned long start, unsigned long sz);
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
-/*
- * VM callbacks when entire/range of user-space V-P mappings are
- * torn-down/get-invalidated
- *
- * Currently we don't support D$ aliasing configs for our VIPT caches
- * NOPS for VIPT Cache with non-aliasing D$ configurations only
- */
-#define flush_cache_dup_mm(mm) /* called on fork */
+#define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */
+
+#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
+
#define flush_cache_mm(mm) /* called on munmap/exit */
#define flush_cache_range(mm, u_vstart, u_vend)
#define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
+#else /* VIPT aliasing dcache */
+
+/* To clear out stale userspace mappings */
+void flush_cache_mm(struct mm_struct *mm);
+void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start,unsigned long end);
+void flush_cache_page(struct vm_area_struct *vma,
+ unsigned long user_addr, unsigned long page);
+
+/*
+ * To make sure that userspace mapping is flushed to memory before
+ * get_user_pages() uses a kernel mapping to access the page
+ */
+#define ARCH_HAS_FLUSH_ANON_PAGE
+void flush_anon_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long u_vaddr);
+
+#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
+
+/*
+ * A new pagecache page has PG_arch_1 clear - thus dcache dirty by default
+ * This works around some PIO based drivers which don't call flush_dcache_page
+ * to record that they dirtied the dcache
+ */
+#define PG_dc_clean PG_arch_1
+
+/*
+ * Simple wrapper over config option
+ * Bootup code ensures that hardware matches kernel configuration
+ */
+static inline int cache_is_vipt_aliasing(void)
+{
+ return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
+}
+
+#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 1)
+
+/*
+ * checks if two addresses (after page aligning) index into same cache set
+ */
+#define addr_not_cache_congruent(addr1, addr2) \
+({ \
+ cache_is_vipt_aliasing() ? \
+ (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0; \
+})
+
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
if (vma->vm_flags & VM_EXEC) \
- flush_icache_range_vaddr((unsigned long)(dst), vaddr, len);\
+ __sync_icache_dcache((unsigned long)(dst), vaddr, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
diff --git a/arch/arc/include/asm/defines.h b/arch/arc/include/asm/defines.h
deleted file mode 100644
index 6097bb439cc..00000000000
--- a/arch/arc/include/asm/defines.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ARC_ASM_DEFINES_H__
-#define __ARC_ASM_DEFINES_H__
-
-#if defined(CONFIG_ARC_MMU_V1)
-#define CONFIG_ARC_MMU_VER 1
-#elif defined(CONFIG_ARC_MMU_V2)
-#define CONFIG_ARC_MMU_VER 2
-#elif defined(CONFIG_ARC_MMU_V3)
-#define CONFIG_ARC_MMU_VER 3
-#endif
-
-#ifdef CONFIG_ARC_HAS_LLSC
-#define __CONFIG_ARC_HAS_LLSC_VAL 1
-#else
-#define __CONFIG_ARC_HAS_LLSC_VAL 0
-#endif
-
-#ifdef CONFIG_ARC_HAS_SWAPE
-#define __CONFIG_ARC_HAS_SWAPE_VAL 1
-#else
-#define __CONFIG_ARC_HAS_SWAPE_VAL 0
-#endif
-
-#ifdef CONFIG_ARC_HAS_RTSC
-#define __CONFIG_ARC_HAS_RTSC_VAL 1
-#else
-#define __CONFIG_ARC_HAS_RTSC_VAL 0
-#endif
-
-#ifdef CONFIG_ARC_MMU_SASID
-#define __CONFIG_ARC_MMU_SASID_VAL 1
-#else
-#define __CONFIG_ARC_MMU_SASID_VAL 0
-#endif
-
-#ifdef CONFIG_ARC_HAS_ICACHE
-#define __CONFIG_ARC_HAS_ICACHE 1
-#else
-#define __CONFIG_ARC_HAS_ICACHE 0
-#endif
-
-#ifdef CONFIG_ARC_HAS_DCACHE
-#define __CONFIG_ARC_HAS_DCACHE 1
-#else
-#define __CONFIG_ARC_HAS_DCACHE 0
-#endif
-
-#endif /* __ARC_ASM_DEFINES_H__ */
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
index 442ce5d0f70..43de3025698 100644
--- a/arch/arc/include/asm/delay.h
+++ b/arch/arc/include/asm/delay.h
@@ -53,11 +53,10 @@ static inline void __udelay(unsigned long usecs)
{
unsigned long loops;
- /* (long long) cast ensures 64 bit MPY - real or emulated
+ /* (u64) cast ensures 64 bit MPY - real or emulated
* HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops
*/
- loops = ((long long)(usecs * 4295 * HZ) *
- (long long)(loops_per_jiffy)) >> 32;
+ loops = ((u64) usecs * 4295 * HZ * loops_per_jiffy) >> 32;
__delay(loops);
}
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
index 31f77aec082..45b8e0cea17 100644
--- a/arch/arc/include/asm/dma-mapping.h
+++ b/arch/arc/include/asm/dma-mapping.h
@@ -126,7 +126,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg,
int i;
for_each_sg(sg, s, nents, i)
- sg->dma_address = dma_map_page(dev, sg_page(s), s->offset,
+ s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
s->length, dir);
return nents;
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
index f4c8d36ebec..a2628285768 100644
--- a/arch/arc/include/asm/elf.h
+++ b/arch/arc/include/asm/elf.h
@@ -72,7 +72,4 @@ extern int elf_check_arch(const struct elf32_hdr *);
*/
#define ELF_PLATFORM (NULL)
-#define SET_PERSONALITY(ex) \
- set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
#endif
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index 23daa326fc9..884081099f8 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -38,6 +38,7 @@
#include <asm/ptrace.h>
#include <asm/processor.h> /* For VMALLOC_START */
#include <asm/thread_info.h> /* For THREAD_SIZE */
+#include <asm/mmu.h>
/* Note on the LD/ST addr modes with addr reg wback
*
@@ -50,194 +51,177 @@
* Eff Addr for load = [reg2]
*/
+.macro PUSH reg
+ st.a \reg, [sp, -4]
+.endm
+
+.macro PUSHAX aux
+ lr r9, [\aux]
+ PUSH r9
+.endm
+
+.macro POP reg
+ ld.ab \reg, [sp, 4]
+.endm
+
+.macro POPAX aux
+ POP r9
+ sr r9, [\aux]
+.endm
+
/*--------------------------------------------------------------
- * Save caller saved registers (scratch registers) ( r0 - r12 )
- * Registers are pushed / popped in the order defined in struct ptregs
- * in asm/ptrace.h
+ * Helpers to save/restore Scratch Regs:
+ * used by Interrupt/Exception Prologue/Epilogue
*-------------------------------------------------------------*/
-.macro SAVE_CALLER_SAVED
- st.a r0, [sp, -4]
- st.a r1, [sp, -4]
- st.a r2, [sp, -4]
- st.a r3, [sp, -4]
- st.a r4, [sp, -4]
- st.a r5, [sp, -4]
- st.a r6, [sp, -4]
- st.a r7, [sp, -4]
- st.a r8, [sp, -4]
- st.a r9, [sp, -4]
- st.a r10, [sp, -4]
- st.a r11, [sp, -4]
- st.a r12, [sp, -4]
+.macro SAVE_R0_TO_R12
+ PUSH r0
+ PUSH r1
+ PUSH r2
+ PUSH r3
+ PUSH r4
+ PUSH r5
+ PUSH r6
+ PUSH r7
+ PUSH r8
+ PUSH r9
+ PUSH r10
+ PUSH r11
+ PUSH r12
+.endm
+
+.macro RESTORE_R12_TO_R0
+ POP r12
+ POP r11
+ POP r10
+ POP r9
+ POP r8
+ POP r7
+ POP r6
+ POP r5
+ POP r4
+ POP r3
+ POP r2
+ POP r1
+ POP r0
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ ld r25, [sp, 12]
+#endif
.endm
/*--------------------------------------------------------------
- * Restore caller saved registers (scratch registers)
+ * Helpers to save/restore callee-saved regs:
+ * used by several macros below
*-------------------------------------------------------------*/
-.macro RESTORE_CALLER_SAVED
- ld.ab r12, [sp, 4]
- ld.ab r11, [sp, 4]
- ld.ab r10, [sp, 4]
- ld.ab r9, [sp, 4]
- ld.ab r8, [sp, 4]
- ld.ab r7, [sp, 4]
- ld.ab r6, [sp, 4]
- ld.ab r5, [sp, 4]
- ld.ab r4, [sp, 4]
- ld.ab r3, [sp, 4]
- ld.ab r2, [sp, 4]
- ld.ab r1, [sp, 4]
- ld.ab r0, [sp, 4]
+.macro SAVE_R13_TO_R24
+ PUSH r13
+ PUSH r14
+ PUSH r15
+ PUSH r16
+ PUSH r17
+ PUSH r18
+ PUSH r19
+ PUSH r20
+ PUSH r21
+ PUSH r22
+ PUSH r23
+ PUSH r24
+.endm
+
+.macro RESTORE_R24_TO_R13
+ POP r24
+ POP r23
+ POP r22
+ POP r21
+ POP r20
+ POP r19
+ POP r18
+ POP r17
+ POP r16
+ POP r15
+ POP r14
+ POP r13
.endm
+#define OFF_USER_R25_FROM_R24 (SZ_CALLEE_REGS + SZ_PT_REGS - 8)/4
/*--------------------------------------------------------------
- * Save callee saved registers (non scratch registers) ( r13 - r25 )
- * on kernel stack.
- * User mode callee regs need to be saved in case of
- * -fork and friends for replicating from parent to child
- * -before going into do_signal( ) for ptrace/core-dump
- * Special case handling is required for r25 in case it is used by kernel
- * for caching task ptr. Low level exception/ISR save user mode r25
- * into task->thread.user_r25. So it needs to be retrieved from there and
- * saved into kernel stack with rest of callee reg-file
+ * Collect User Mode callee regs as struct callee_regs - needed by
+ * fork/do_signal/unaligned-access-emulation.
+ * (By default only scratch regs are saved on entry to kernel)
+ *
+ * Special handling for r25 if used for caching Task Pointer.
+ * It would have been saved in task->thread.user_r25 already, but to keep
+ * the interface same it is copied into regular r25 placeholder in
+ * struct callee_regs.
*-------------------------------------------------------------*/
.macro SAVE_CALLEE_SAVED_USER
- st.a r13, [sp, -4]
- st.a r14, [sp, -4]
- st.a r15, [sp, -4]
- st.a r16, [sp, -4]
- st.a r17, [sp, -4]
- st.a r18, [sp, -4]
- st.a r19, [sp, -4]
- st.a r20, [sp, -4]
- st.a r21, [sp, -4]
- st.a r22, [sp, -4]
- st.a r23, [sp, -4]
- st.a r24, [sp, -4]
+
+ SAVE_R13_TO_R24
#ifdef CONFIG_ARC_CURR_IN_REG
; Retrieve orig r25 and save it on stack
- ld r12, [r25, TASK_THREAD + THREAD_USER_R25]
+ ld.as r12, [sp, OFF_USER_R25_FROM_R24]
st.a r12, [sp, -4]
#else
- st.a r25, [sp, -4]
+ PUSH r25
#endif
- /* move up by 1 word to "create" callee_regs->"stack_place_holder" */
- sub sp, sp, 4
.endm
/*--------------------------------------------------------------
- * Save callee saved registers (non scratch registers) ( r13 - r25 )
- * kernel mode callee regs needed to be saved in case of context switch
- * If r25 is used for caching task pointer then that need not be saved
- * as it can be re-created from current task global
+ * Save kernel Mode callee regs at the time of Contect Switch.
+ *
+ * Special handling for r25 if used for caching Task Pointer.
+ * Kernel simply skips saving it since it will be loaded with
+ * incoming task pointer anyways
*-------------------------------------------------------------*/
.macro SAVE_CALLEE_SAVED_KERNEL
- st.a r13, [sp, -4]
- st.a r14, [sp, -4]
- st.a r15, [sp, -4]
- st.a r16, [sp, -4]
- st.a r17, [sp, -4]
- st.a r18, [sp, -4]
- st.a r19, [sp, -4]
- st.a r20, [sp, -4]
- st.a r21, [sp, -4]
- st.a r22, [sp, -4]
- st.a r23, [sp, -4]
- st.a r24, [sp, -4]
+
+ SAVE_R13_TO_R24
+
#ifdef CONFIG_ARC_CURR_IN_REG
- sub sp, sp, 8
-#else
- st.a r25, [sp, -4]
sub sp, sp, 4
+#else
+ PUSH r25
#endif
.endm
/*--------------------------------------------------------------
- * RESTORE_CALLEE_SAVED_KERNEL:
- * Loads callee (non scratch) Reg File by popping from Kernel mode stack.
- * This is reverse of SAVE_CALLEE_SAVED,
- *
- * NOTE:
- * Ideally this shd only be called in switch_to for loading
- * switched-IN task's CALLEE Reg File.
- * For all other cases RESTORE_CALLEE_SAVED_FAST must be used
- * which simply pops the stack w/o touching regs.
+ * Opposite of SAVE_CALLEE_SAVED_KERNEL
*-------------------------------------------------------------*/
.macro RESTORE_CALLEE_SAVED_KERNEL
-
#ifdef CONFIG_ARC_CURR_IN_REG
- add sp, sp, 8 /* skip callee_reg gutter and user r25 placeholder */
+ add sp, sp, 4 /* skip usual r25 placeholder */
#else
- add sp, sp, 4 /* skip "callee_regs->stack_place_holder" */
- ld.ab r25, [sp, 4]
+ POP r25
#endif
-
- ld.ab r24, [sp, 4]
- ld.ab r23, [sp, 4]
- ld.ab r22, [sp, 4]
- ld.ab r21, [sp, 4]
- ld.ab r20, [sp, 4]
- ld.ab r19, [sp, 4]
- ld.ab r18, [sp, 4]
- ld.ab r17, [sp, 4]
- ld.ab r16, [sp, 4]
- ld.ab r15, [sp, 4]
- ld.ab r14, [sp, 4]
- ld.ab r13, [sp, 4]
-
+ RESTORE_R24_TO_R13
.endm
/*--------------------------------------------------------------
- * RESTORE_CALLEE_SAVED_USER:
- * This is called after do_signal where tracer might have changed callee regs
- * thus we need to restore the reg file.
- * Special case handling is required for r25 in case it is used by kernel
- * for caching task ptr. Ptrace would have modified on-kernel-stack value of
- * r25, which needs to be shoved back into task->thread.user_r25 where from
- * Low level exception/ISR return code will retrieve to populate with rest of
- * callee reg-file.
+ * Opposite of SAVE_CALLEE_SAVED_USER
+ *
+ * ptrace tracer or unaligned-access fixup might have changed a user mode
+ * callee reg which is saved back to usual r25 storage location
*-------------------------------------------------------------*/
.macro RESTORE_CALLEE_SAVED_USER
- add sp, sp, 4 /* skip "callee_regs->stack_place_holder" */
-
#ifdef CONFIG_ARC_CURR_IN_REG
ld.ab r12, [sp, 4]
- st r12, [r25, TASK_THREAD + THREAD_USER_R25]
+ st.as r12, [sp, OFF_USER_R25_FROM_R24]
#else
- ld.ab r25, [sp, 4]
+ POP r25
#endif
-
- ld.ab r24, [sp, 4]
- ld.ab r23, [sp, 4]
- ld.ab r22, [sp, 4]
- ld.ab r21, [sp, 4]
- ld.ab r20, [sp, 4]
- ld.ab r19, [sp, 4]
- ld.ab r18, [sp, 4]
- ld.ab r17, [sp, 4]
- ld.ab r16, [sp, 4]
- ld.ab r15, [sp, 4]
- ld.ab r14, [sp, 4]
- ld.ab r13, [sp, 4]
+ RESTORE_R24_TO_R13
.endm
/*--------------------------------------------------------------
* Super FAST Restore callee saved regs by simply re-adjusting SP
*-------------------------------------------------------------*/
.macro DISCARD_CALLEE_SAVED_USER
- add sp, sp, 14 * 4
-.endm
-
-/*--------------------------------------------------------------
- * Restore User mode r25 saved in task_struct->thread.user_r25
- *-------------------------------------------------------------*/
-.macro RESTORE_USER_R25
- ld r25, [r25, TASK_THREAD + THREAD_USER_R25]
+ add sp, sp, SZ_CALLEE_REGS
.endm
/*-------------------------------------------------------------
@@ -252,7 +236,7 @@
ld \out, [\tsk, TASK_THREAD_INFO]
/* Go to end of page where stack begins (grows upwards) */
- add2 \out, \out, (THREAD_SIZE - 4)/4 /* one word GUTTER */
+ add2 \out, \out, (THREAD_SIZE)/4
.endm
@@ -305,33 +289,28 @@
* safe-keeping not really needed, but it keeps the epilogue code
* (SP restore) simpler/uniform.
*/
- b.d 77f
-
- st.a sp, [sp, -12] ; Make room for orig_r0 and orig_r8
+ b.d 66f
+ mov r9, sp
88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
GET_CURR_TASK_ON_CPU r9
-#ifdef CONFIG_ARC_CURR_IN_REG
-
- /* If current task pointer cached in r25, time to
- * -safekeep USER r25 in task->thread_struct->user_r25
- * -load r25 with current task ptr
- */
- st.as r25, [r9, (TASK_THREAD + THREAD_USER_R25)/4]
- mov r25, r9
-#endif
-
/* With current tsk in r9, get it's kernel mode stack base */
GET_TSK_STACK_BASE r9, r9
-#ifdef PT_REGS_CANARY
- st 0xabcdabcd, [r9, 0]
+66:
+#ifdef CONFIG_ARC_CURR_IN_REG
+ /*
+ * Treat r25 as scratch reg, save it on stack first
+ * Load it with current task pointer
+ */
+ st r25, [r9, -4]
+ GET_CURR_TASK_ON_CPU r25
#endif
/* Save Pre Intr/Exception User SP on kernel stack */
- st.a sp, [r9, -12] ; Make room for orig_r0 and orig_r8
+ st.a sp, [r9, -16] ; Make room for orig_r0, ECR, user_r25
/* CAUTION:
* SP should be set at the very end when we are done with everything
@@ -342,7 +321,7 @@
/* set SP to point to kernel mode stack */
mov sp, r9
-77: /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */
+ /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */
.endm
@@ -369,7 +348,7 @@
* @reg [OUT] &thread_info of "current"
*/
.macro GET_CURR_THR_INFO_FROM_SP reg
- and \reg, sp, ~(THREAD_SIZE - 1)
+ bic \reg, sp, (THREAD_SIZE - 1)
.endm
/*
@@ -386,7 +365,7 @@
* it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
*
* Before saving the full regfile - this reg is restored back, only
- * to be saved again on kernel mode stack, as part of ptregs.
+ * to be saved again on kernel mode stack, as part of pt_regs.
*-------------------------------------------------------------*/
.macro EXCPN_PROLOG_FREEUP_REG reg
#ifdef CONFIG_SMP
@@ -405,6 +384,28 @@
.endm
/*--------------------------------------------------------------
+ * Exception Entry prologue
+ * -Switches stack to K mode (if not already)
+ * -Saves the register file
+ *
+ * After this it is safe to call the "C" handlers
+ *-------------------------------------------------------------*/
+.macro EXCEPTION_PROLOGUE
+
+ /* Need at least 1 reg to code the early exception prologue */
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ /* U/K mode at time of exception (stack not switched if already K) */
+ lr r9, [erstatus]
+
+ /* ARC700 doesn't provide auto-stack switching */
+ SWITCH_TO_KERNEL_STK
+
+ /* save the regfile */
+ SAVE_ALL_SYS
+.endm
+
+/*--------------------------------------------------------------
* Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc)
* Requires SP to be already switched to kernel mode Stack
* sp points to the next free element on the stack at exit of this macro.
@@ -413,62 +414,25 @@
* Note that syscalls are implemented via TRAP which is also a exception
* from CPU's point of view
*-------------------------------------------------------------*/
-.macro SAVE_ALL_EXCEPTION marker
+.macro SAVE_ALL_SYS
- st \marker, [sp, 8]
+ lr r9, [ecr]
+ st r9, [sp, 8] /* ECR */
st r0, [sp, 4] /* orig_r0, needed only for sys calls */
/* Restore r9 used to code the early prologue */
EXCPN_PROLOG_RESTORE_REG r9
- SAVE_CALLER_SAVED
- st.a r26, [sp, -4] /* gp */
- st.a fp, [sp, -4]
- st.a blink, [sp, -4]
- lr r9, [eret]
- st.a r9, [sp, -4]
- lr r9, [erstatus]
- st.a r9, [sp, -4]
- st.a lp_count, [sp, -4]
- lr r9, [lp_end]
- st.a r9, [sp, -4]
- lr r9, [lp_start]
- st.a r9, [sp, -4]
- lr r9, [erbta]
- st.a r9, [sp, -4]
-
-#ifdef PT_REGS_CANARY
- mov r9, 0xdeadbeef
- st r9, [sp, -4]
-#endif
-
- /* move up by 1 word to "create" pt_regs->"stack_place_holder" */
- sub sp, sp, 4
-.endm
-
-/*--------------------------------------------------------------
- * Save scratch regs for exceptions
- *-------------------------------------------------------------*/
-.macro SAVE_ALL_SYS
- SAVE_ALL_EXCEPTION orig_r8_IS_EXCPN
-.endm
-
-/*--------------------------------------------------------------
- * Save scratch regs for sys calls
- *-------------------------------------------------------------*/
-.macro SAVE_ALL_TRAP
- /*
- * Setup pt_regs->orig_r8.
- * Encode syscall number (r8) in upper short word of event type (r9)
- * N.B. #1: This is already endian safe (see ptrace.h)
- * #2: Only r9 can be used as scratch as it is already clobbered
- * and it's contents are no longer needed by the latter part
- * of exception prologue
- */
- lsl r9, r8, 16
- or r9, r9, orig_r8_IS_SCALL
-
- SAVE_ALL_EXCEPTION r9
+ SAVE_R0_TO_R12
+ PUSH gp
+ PUSH fp
+ PUSH blink
+ PUSHAX eret
+ PUSHAX erstatus
+ PUSH lp_count
+ PUSHAX lp_end
+ PUSHAX lp_start
+ PUSHAX erbta
.endm
/*--------------------------------------------------------------
@@ -483,28 +447,22 @@
* by hardware and that is not good.
*-------------------------------------------------------------*/
.macro RESTORE_ALL_SYS
+ POPAX erbta
+ POPAX lp_start
+ POPAX lp_end
- add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */
-
- ld.ab r9, [sp, 4]
- sr r9, [erbta]
- ld.ab r9, [sp, 4]
- sr r9, [lp_start]
- ld.ab r9, [sp, 4]
- sr r9, [lp_end]
- ld.ab r9, [sp, 4]
- mov lp_count, r9
- ld.ab r9, [sp, 4]
- sr r9, [erstatus]
- ld.ab r9, [sp, 4]
- sr r9, [eret]
- ld.ab blink, [sp, 4]
- ld.ab fp, [sp, 4]
- ld.ab r26, [sp, 4] /* gp */
- RESTORE_CALLER_SAVED
+ POP r9
+ mov lp_count, r9 ;LD to lp_count is not allowed
+
+ POPAX erstatus
+ POPAX eret
+ POP blink
+ POP fp
+ POP gp
+ RESTORE_R12_TO_R0
ld sp, [sp] /* restore original sp */
- /* orig_r0 and orig_r8 skipped automatically */
+ /* orig_r0, ECR, user_r25 skipped automatically */
.endm
@@ -513,9 +471,7 @@
*-------------------------------------------------------------*/
.macro SAVE_ALL_INT1
- /* restore original r9 , saved in int1_saved_reg
- * It will be saved on stack in macro: SAVE_CALLER_SAVED
- */
+ /* restore original r9 to be saved as part of reg-file */
#ifdef CONFIG_SMP
lr r9, [ARC_REG_SCRATCH_DATA0]
#else
@@ -523,29 +479,19 @@
#endif
/* now we are ready to save the remaining context :) */
- st orig_r8_IS_IRQ1, [sp, 8] /* Event Type */
+ st event_IRQ1, [sp, 8] /* Dummy ECR */
st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
- SAVE_CALLER_SAVED
- st.a r26, [sp, -4] /* gp */
- st.a fp, [sp, -4]
- st.a blink, [sp, -4]
- st.a ilink1, [sp, -4]
- lr r9, [status32_l1]
- st.a r9, [sp, -4]
- st.a lp_count, [sp, -4]
- lr r9, [lp_end]
- st.a r9, [sp, -4]
- lr r9, [lp_start]
- st.a r9, [sp, -4]
- lr r9, [bta_l1]
- st.a r9, [sp, -4]
-
-#ifdef PT_REGS_CANARY
- mov r9, 0xdeadbee1
- st r9, [sp, -4]
-#endif
- /* move up by 1 word to "create" pt_regs->"stack_place_holder" */
- sub sp, sp, 4
+
+ SAVE_R0_TO_R12
+ PUSH gp
+ PUSH fp
+ PUSH blink
+ PUSH ilink1
+ PUSHAX status32_l1
+ PUSH lp_count
+ PUSHAX lp_end
+ PUSHAX lp_start
+ PUSHAX bta_l1
.endm
.macro SAVE_ALL_INT2
@@ -558,30 +504,19 @@
ld r9, [@int2_saved_reg]
/* now we are ready to save the remaining context :) */
- st orig_r8_IS_IRQ2, [sp, 8] /* Event Type */
+ st event_IRQ2, [sp, 8] /* Dummy ECR */
st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
- SAVE_CALLER_SAVED
- st.a r26, [sp, -4] /* gp */
- st.a fp, [sp, -4]
- st.a blink, [sp, -4]
- st.a ilink2, [sp, -4]
- lr r9, [status32_l2]
- st.a r9, [sp, -4]
- st.a lp_count, [sp, -4]
- lr r9, [lp_end]
- st.a r9, [sp, -4]
- lr r9, [lp_start]
- st.a r9, [sp, -4]
- lr r9, [bta_l2]
- st.a r9, [sp, -4]
-
-#ifdef PT_REGS_CANARY
- mov r9, 0xdeadbee2
- st r9, [sp, -4]
-#endif
- /* move up by 1 word to "create" pt_regs->"stack_place_holder" */
- sub sp, sp, 4
+ SAVE_R0_TO_R12
+ PUSH gp
+ PUSH fp
+ PUSH blink
+ PUSH ilink2
+ PUSHAX status32_l2
+ PUSH lp_count
+ PUSHAX lp_end
+ PUSHAX lp_start
+ PUSHAX bta_l2
.endm
/*--------------------------------------------------------------
@@ -595,52 +530,41 @@
*-------------------------------------------------------------*/
.macro RESTORE_ALL_INT1
- add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */
-
- ld.ab r9, [sp, 4] /* Actual reg file */
- sr r9, [bta_l1]
- ld.ab r9, [sp, 4]
- sr r9, [lp_start]
- ld.ab r9, [sp, 4]
- sr r9, [lp_end]
- ld.ab r9, [sp, 4]
- mov lp_count, r9
- ld.ab r9, [sp, 4]
- sr r9, [status32_l1]
- ld.ab r9, [sp, 4]
- mov ilink1, r9
- ld.ab blink, [sp, 4]
- ld.ab fp, [sp, 4]
- ld.ab r26, [sp, 4] /* gp */
- RESTORE_CALLER_SAVED
+ POPAX bta_l1
+ POPAX lp_start
+ POPAX lp_end
+
+ POP r9
+ mov lp_count, r9 ;LD to lp_count is not allowed
+
+ POPAX status32_l1
+ POP ilink1
+ POP blink
+ POP fp
+ POP gp
+ RESTORE_R12_TO_R0
ld sp, [sp] /* restore original sp */
- /* orig_r0 and orig_r8 skipped automatically */
+ /* orig_r0, ECR, user_r25 skipped automatically */
.endm
.macro RESTORE_ALL_INT2
- add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */
-
- ld.ab r9, [sp, 4]
- sr r9, [bta_l2]
- ld.ab r9, [sp, 4]
- sr r9, [lp_start]
- ld.ab r9, [sp, 4]
- sr r9, [lp_end]
- ld.ab r9, [sp, 4]
- mov lp_count, r9
- ld.ab r9, [sp, 4]
- sr r9, [status32_l2]
- ld.ab r9, [sp, 4]
- mov ilink2, r9
- ld.ab blink, [sp, 4]
- ld.ab fp, [sp, 4]
- ld.ab r26, [sp, 4] /* gp */
- RESTORE_CALLER_SAVED
+ POPAX bta_l2
+ POPAX lp_start
+ POPAX lp_end
- ld sp, [sp] /* restore original sp */
- /* orig_r0 and orig_r8 skipped automatically */
+ POP r9
+ mov lp_count, r9 ;LD to lp_count is not allowed
+ POPAX status32_l2
+ POP ilink2
+ POP blink
+ POP fp
+ POP gp
+ RESTORE_R12_TO_R0
+
+ ld sp, [sp] /* restore original sp */
+ /* orig_r0, ECR, user_r25 skipped automatically */
.endm
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
index 473424d7528..334ce7017a1 100644
--- a/arch/arc/include/asm/io.h
+++ b/arch/arc/include/asm/io.h
@@ -100,6 +100,10 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
}
+#define readb_relaxed readb
+#define readw_relaxed readw
+#define readl_relaxed readl
+
#include <asm-generic/io.h>
#endif /* _ASM_ARC_IO_H */
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
index 4c588f9820c..fb4efb64897 100644
--- a/arch/arc/include/asm/irq.h
+++ b/arch/arc/include/asm/irq.h
@@ -9,7 +9,8 @@
#ifndef __ASM_ARC_IRQ_H
#define __ASM_ARC_IRQ_H
-#define NR_IRQS 32
+#define NR_CPU_IRQS 32 /* number of interrupt lines of ARC770 CPU */
+#define NR_IRQS 128 /* allow some CPU external IRQ handling */
/* Platform Independent IRQs */
#define TIMER0_IRQ 3
@@ -17,9 +18,7 @@
#include <asm-generic/irq.h>
-extern void __init arc_init_IRQ(void);
-extern int __init get_hw_config_num_irq(void);
-
-void __cpuinit arc_local_timer_setup(unsigned int cpu);
+extern void arc_init_IRQ(void);
+void arc_local_timer_setup(void);
#endif
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
index ccd84806b62..cb7efc29f16 100644
--- a/arch/arc/include/asm/irqflags.h
+++ b/arch/arc/include/asm/irqflags.h
@@ -19,6 +19,26 @@
#include <asm/arcregs.h>
+/* status32 Reg bits related to Interrupt Handling */
+#define STATUS_E1_BIT 1 /* Int 1 enable */
+#define STATUS_E2_BIT 2 /* Int 2 enable */
+#define STATUS_A1_BIT 3 /* Int 1 active */
+#define STATUS_A2_BIT 4 /* Int 2 active */
+
+#define STATUS_E1_MASK (1<<STATUS_E1_BIT)
+#define STATUS_E2_MASK (1<<STATUS_E2_BIT)
+#define STATUS_A1_MASK (1<<STATUS_A1_BIT)
+#define STATUS_A2_MASK (1<<STATUS_A2_BIT)
+
+/* Other Interrupt Handling related Aux regs */
+#define AUX_IRQ_LEV 0x200 /* IRQ Priority: L1 or L2 */
+#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
+#define AUX_IRQ_LV12 0x43 /* interrupt level register */
+
+#define AUX_IENABLE 0x40c
+#define AUX_ITRIGGER 0x40d
+#define AUX_IPULSE 0x415
+
#ifndef __ASSEMBLY__
/******************************************************************
@@ -39,7 +59,7 @@ static inline long arch_local_irq_save(void)
" flag.nz %0 \n"
: "=r"(temp), "=r"(flags)
: "n"((STATUS_E1_MASK | STATUS_E2_MASK))
- : "cc");
+ : "memory", "cc");
return flags;
}
@@ -53,7 +73,8 @@ static inline void arch_local_irq_restore(unsigned long flags)
__asm__ __volatile__(
" flag %0 \n"
:
- : "r"(flags));
+ : "r"(flags)
+ : "memory");
}
/*
@@ -73,7 +94,8 @@ static inline void arch_local_irq_disable(void)
" and %0, %0, %1 \n"
" flag %0 \n"
: "=&r"(temp)
- : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)));
+ : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))
+ : "memory");
}
/*
@@ -85,7 +107,9 @@ static inline long arch_local_save_flags(void)
__asm__ __volatile__(
" lr %0, [status32] \n"
- : "=&r"(temp));
+ : "=&r"(temp)
+ :
+ : "memory");
return temp;
}
@@ -127,23 +151,38 @@ static inline void arch_unmask_irq(unsigned int irq)
#else
-.macro IRQ_DISABLE scratch
- lr \scratch, [status32]
- bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
- flag \scratch
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+.macro TRACE_ASM_IRQ_DISABLE
+ bl trace_hardirqs_off
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+ bl trace_hardirqs_on
.endm
-.macro IRQ_DISABLE_SAVE scratch, save
+#else
+
+.macro TRACE_ASM_IRQ_DISABLE
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+.endm
+
+#endif
+
+.macro IRQ_DISABLE scratch
lr \scratch, [status32]
- mov \save, \scratch /* Make a copy */
bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
flag \scratch
+ TRACE_ASM_IRQ_DISABLE
.endm
.macro IRQ_ENABLE scratch
lr \scratch, [status32]
or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
flag \scratch
+ TRACE_ASM_IRQ_ENABLE
.endm
#endif /* __ASSEMBLY__ */
diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h
index f3c4934f0ca..b65fca7ffeb 100644
--- a/arch/arc/include/asm/kgdb.h
+++ b/arch/arc/include/asm/kgdb.h
@@ -13,7 +13,7 @@
#ifdef CONFIG_KGDB
-#include <asm/user.h>
+#include <asm/ptrace.h>
/* to ensure compatibility with Linux 2.6.35, we don't implement the get/set
* register API yet */
@@ -31,7 +31,7 @@ static inline void arch_kgdb_breakpoint(void)
__asm__ __volatile__ ("trap_s 0x4\n");
}
-extern void kgdb_trap(struct pt_regs *regs, int param);
+extern void kgdb_trap(struct pt_regs *regs);
enum arc700_linux_regnums {
_R0 = 0,
@@ -53,9 +53,7 @@ enum arc700_linux_regnums {
};
#else
-static inline void kgdb_trap(struct pt_regs *regs, int param)
-{
-}
+#define kgdb_trap(regs)
#endif
#endif /* __ARC_KGDB_H__ */
diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h
index 4d9c211fce7..944dbedb38b 100644
--- a/arch/arc/include/asm/kprobes.h
+++ b/arch/arc/include/asm/kprobes.h
@@ -50,11 +50,9 @@ struct kprobe_ctlblk {
int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause);
void kretprobe_trampoline(void);
-void trap_is_kprobe(unsigned long cause, unsigned long address,
- struct pt_regs *regs);
+void trap_is_kprobe(unsigned long address, struct pt_regs *regs);
#else
-static void trap_is_kprobe(unsigned long cause, unsigned long address,
- struct pt_regs *regs)
+static void trap_is_kprobe(unsigned long address, struct pt_regs *regs)
{
}
#endif
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
index 0283e9e44e0..5faad17118b 100644
--- a/arch/arc/include/asm/linkage.h
+++ b/arch/arc/include/asm/linkage.h
@@ -11,19 +11,7 @@
#ifdef __ASSEMBLY__
-/* Can't use the ENTRY macro in linux/linkage.h
- * gas considers ';' as comment vs. newline
- */
-.macro ARC_ENTRY name
- .global \name
- .align 4
- \name:
-.endm
-
-.macro ARC_EXIT name
-#define ASM_PREV_SYM_ADDR(name) .-##name
- .size \ name, ASM_PREV_SYM_ADDR(\name)
-.endm
+#define ASM_NL ` /* use '`' to mark new line in macro */
/* annotation for data we want in DCCM - if enabled in .config */
.macro ARCFP_DATA nm
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
index 9998dc846eb..e8993a2be6c 100644
--- a/arch/arc/include/asm/mach_desc.h
+++ b/arch/arc/include/asm/mach_desc.h
@@ -51,22 +51,12 @@ struct machine_desc {
/*
* Current machine - only accessible during boot.
*/
-extern struct machine_desc *machine_desc;
+extern const struct machine_desc *machine_desc;
/*
* Machine type table - also only accessible during boot
*/
-extern struct machine_desc __arch_info_begin[], __arch_info_end[];
-#define for_each_machine_desc(p) \
- for (p = __arch_info_begin; p < __arch_info_end; p++)
-
-static inline struct machine_desc *default_machine_desc(void)
-{
- /* the default machine is the last one linked in */
- if (__arch_info_end - 1 < __arch_info_begin)
- return NULL;
- return __arch_info_end - 1;
-}
+extern const struct machine_desc __arch_info_begin[], __arch_info_end[];
/*
* Set of macros to define architecture features.
@@ -81,7 +71,6 @@ __attribute__((__section__(".arch.info.init"))) = { \
#define MACHINE_END \
};
-extern struct machine_desc *setup_machine_fdt(void *dt);
-extern void __init copy_devtree(void);
+extern const struct machine_desc *setup_machine_fdt(void *dt);
#endif
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
index 56b02320f1a..8c84ae98c33 100644
--- a/arch/arc/include/asm/mmu.h
+++ b/arch/arc/include/asm/mmu.h
@@ -9,15 +9,58 @@
#ifndef _ASM_ARC_MMU_H
#define _ASM_ARC_MMU_H
+#if defined(CONFIG_ARC_MMU_V1)
+#define CONFIG_ARC_MMU_VER 1
+#elif defined(CONFIG_ARC_MMU_V2)
+#define CONFIG_ARC_MMU_VER 2
+#elif defined(CONFIG_ARC_MMU_V3)
+#define CONFIG_ARC_MMU_VER 3
+#endif
+
+/* MMU Management regs */
+#define ARC_REG_MMU_BCR 0x06f
+#define ARC_REG_TLBPD0 0x405
+#define ARC_REG_TLBPD1 0x406
+#define ARC_REG_TLBINDEX 0x407
+#define ARC_REG_TLBCOMMAND 0x408
+#define ARC_REG_PID 0x409
+#define ARC_REG_SCRATCH_DATA0 0x418
+
+/* Bits in MMU PID register */
+#define MMU_ENABLE (1 << 31) /* Enable MMU for process */
+
+/* Error code if probe fails */
+#define TLB_LKUP_ERR 0x80000000
+
+#define TLB_DUP_ERR (TLB_LKUP_ERR | 0x00000001)
+
+/* TLB Commands */
+#define TLBWrite 0x1
+#define TLBRead 0x2
+#define TLBGetIndex 0x3
+#define TLBProbe 0x4
+
+#if (CONFIG_ARC_MMU_VER >= 2)
+#define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */
+#define TLBIVUTLB 0x6 /* explicitly inv uTLBs */
+#endif
+
#ifndef __ASSEMBLY__
typedef struct {
- unsigned long asid; /* Pvt Addr-Space ID for mm */
-#ifdef CONFIG_ARC_TLB_DBG
- struct task_struct *tsk;
-#endif
+ unsigned long asid[NR_CPUS]; /* 8 bit MMU PID + Generation cycle */
} mm_context_t;
+#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
+void tlb_paranoid_check(unsigned int mm_asid, unsigned long address);
+#else
+#define tlb_paranoid_check(a, b)
#endif
+void arc_mmu_init(void);
+extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
+void read_decode_mmu_bcr(void);
+
+#endif /* !__ASSEMBLY__ */
+
#endif
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
index 0d71fb11b57..1fd467ef658 100644
--- a/arch/arc/include/asm/mmu_context.h
+++ b/arch/arc/include/asm/mmu_context.h
@@ -30,99 +30,72 @@
* "Fast Context Switch" i.e. no TLB flush on ctxt-switch
*
* Linux assigns each task a unique ASID. A simple round-robin allocation
- * of H/w ASID is done using software tracker @asid_cache.
+ * of H/w ASID is done using software tracker @asid_cpu.
* When it reaches max 255, the allocation cycle starts afresh by flushing
* the entire TLB and wrapping ASID back to zero.
*
- * For book-keeping, Linux uses a couple of data-structures:
- * -mm_struct has an @asid field to keep a note of task's ASID (needed at the
- * time of say switch_mm( )
- * -An array of mm structs @asid_mm_map[] for asid->mm the reverse mapping,
- * given an ASID, finding the mm struct associated.
- *
- * The round-robin allocation algorithm allows for ASID stealing.
- * If asid tracker is at "x-1", a new req will allocate "x", even if "x" was
- * already assigned to another (switched-out) task. Obviously the prev owner
- * is marked with an invalid ASID to make it request for a new ASID when it
- * gets scheduled next time. However its TLB entries (with ASID "x") could
- * exist, which must be cleared before the same ASID is used by the new owner.
- * Flushing them would be plausible but costly solution. Instead we force a
- * allocation policy quirk, which ensures that a stolen ASID won't have any
- * TLB entries associates, alleviating the need to flush.
- * The quirk essentially is not allowing ASID allocated in prev cycle
- * to be used past a roll-over in the next cycle.
- * When this happens (i.e. task ASID > asid tracker), task needs to refresh
- * its ASID, aligning it to current value of tracker. If the task doesn't get
- * scheduled past a roll-over, hence its ASID is not yet realigned with
- * tracker, such ASID is anyways safely reusable because it is
- * gauranteed that TLB entries with that ASID wont exist.
+ * A new allocation cycle, post rollover, could potentially reassign an ASID
+ * to a different task. Thus the rule is to refresh the ASID in a new cycle.
+ * The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits
+ * serve as cycle/generation indicator and natural 32 bit unsigned math
+ * automagically increments the generation when lower 8 bits rollover.
*/
-#define FIRST_ASID 0
-#define MAX_ASID 255 /* 8 bit PID field in PID Aux reg */
-#define NO_ASID (MAX_ASID + 1) /* ASID Not alloc to mmu ctxt */
-#define NUM_ASID ((MAX_ASID - FIRST_ASID) + 1)
+#define MM_CTXT_ASID_MASK 0x000000ff /* MMU PID reg :8 bit PID */
+#define MM_CTXT_CYCLE_MASK (~MM_CTXT_ASID_MASK)
+
+#define MM_CTXT_FIRST_CYCLE (MM_CTXT_ASID_MASK + 1)
+#define MM_CTXT_NO_ASID 0UL
-/* ASID to mm struct mapping */
-extern struct mm_struct *asid_mm_map[NUM_ASID + 1];
+#define asid_mm(mm, cpu) mm->context.asid[cpu]
+#define hw_pid(mm, cpu) (asid_mm(mm, cpu) & MM_CTXT_ASID_MASK)
-extern int asid_cache;
+DECLARE_PER_CPU(unsigned int, asid_cache);
+#define asid_cpu(cpu) per_cpu(asid_cache, cpu)
/*
- * Assign a new ASID to task. If the task already has an ASID, it is
- * relinquished.
+ * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle)
+ * Also set the MMU PID register to existing/updated ASID
*/
static inline void get_new_mmu_context(struct mm_struct *mm)
{
- struct mm_struct *prev_owner;
+ const unsigned int cpu = smp_processor_id();
unsigned long flags;
local_irq_save(flags);
/*
- * Relinquish the currently owned ASID (if any).
- * Doing unconditionally saves a cmp-n-branch; for already unused
- * ASID slot, the value was/remains NULL
+ * Move to new ASID if it was not from current alloc-cycle/generation.
+ * This is done by ensuring that the generation bits in both mm->ASID
+ * and cpu's ASID counter are exactly same.
+ *
+ * Note: Callers needing new ASID unconditionally, independent of
+ * generation, e.g. local_flush_tlb_mm() for forking parent,
+ * first need to destroy the context, setting it to invalid
+ * value.
*/
- asid_mm_map[mm->context.asid] = (struct mm_struct *)NULL;
+ if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK))
+ goto set_hw;
- /* move to new ASID */
- if (++asid_cache > MAX_ASID) { /* ASID roll-over */
- asid_cache = FIRST_ASID;
- flush_tlb_all();
- }
+ /* move to new ASID and handle rollover */
+ if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) {
- /*
- * Is next ASID already owned by some-one else (we are stealing it).
- * If so, let the orig owner be aware of this, so when it runs, it
- * asks for a brand new ASID. This would only happen for a long-lived
- * task with ASID from prev allocation cycle (before ASID roll-over).
- *
- * This might look wrong - if we are re-using some other task's ASID,
- * won't we use it's stale TLB entries too. Actually switch_mm( ) takes
- * care of such a case: it ensures that task with ASID from prev alloc
- * cycle, when scheduled will refresh it's ASID: see switch_mm( ) below
- * The stealing scenario described here will only happen if that task
- * didn't get a chance to refresh it's ASID - implying stale entries
- * won't exist.
- */
- prev_owner = asid_mm_map[asid_cache];
- if (prev_owner)
- prev_owner->context.asid = NO_ASID;
+ local_flush_tlb_all();
+
+ /*
+ * Above checke for rollover of 8 bit ASID in 32 bit container.
+ * If the container itself wrapped around, set it to a non zero
+ * "generation" to distinguish from no context
+ */
+ if (!asid_cpu(cpu))
+ asid_cpu(cpu) = MM_CTXT_FIRST_CYCLE;
+ }
/* Assign new ASID to tsk */
- asid_mm_map[asid_cache] = mm;
- mm->context.asid = asid_cache;
-
-#ifdef CONFIG_ARC_TLB_DBG
- pr_info("ARC_TLB_DBG: NewMM=0x%x OldMM=0x%x task_struct=0x%x Task: %s,"
- " pid:%u, assigned asid:%lu\n",
- (unsigned int)mm, (unsigned int)prev_owner,
- (unsigned int)(mm->context.tsk), (mm->context.tsk)->comm,
- (mm->context.tsk)->pid, mm->context.asid);
-#endif
+ asid_mm(mm, cpu) = asid_cpu(cpu);
- write_aux_reg(ARC_REG_PID, asid_cache | MMU_ENABLE);
+set_hw:
+ write_aux_reg(ARC_REG_PID, hw_pid(mm, cpu) | MMU_ENABLE);
local_irq_restore(flags);
}
@@ -134,59 +107,61 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
- mm->context.asid = NO_ASID;
-#ifdef CONFIG_ARC_TLB_DBG
- mm->context.tsk = tsk;
-#endif
+ int i;
+
+ for_each_possible_cpu(i)
+ asid_mm(mm, i) = MM_CTXT_NO_ASID;
+
return 0;
}
+static inline void destroy_context(struct mm_struct *mm)
+{
+ unsigned long flags;
+
+ /* Needed to elide CONFIG_DEBUG_PREEMPT warning */
+ local_irq_save(flags);
+ asid_mm(mm, smp_processor_id()) = MM_CTXT_NO_ASID;
+ local_irq_restore(flags);
+}
+
/* Prepare the MMU for task: setup PID reg with allocated ASID
If task doesn't have an ASID (never alloc or stolen, get a new ASID)
*/
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
+ const int cpu = smp_processor_id();
+
+ /*
+ * Note that the mm_cpumask is "aggregating" only, we don't clear it
+ * for the switched-out task, unlike some other arches.
+ * It is used to enlist cpus for sending TLB flush IPIs and not sending
+ * it to CPUs where a task once ran-on, could cause stale TLB entry
+ * re-use, specially for a multi-threaded task.
+ * e.g. T1 runs on C1, migrates to C3. T2 running on C2 munmaps.
+ * For a non-aggregating mm_cpumask, IPI not sent C1, and if T1
+ * were to re-migrate to C1, it could access the unmapped region
+ * via any existing stale TLB entries.
+ */
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+
#ifndef CONFIG_SMP
/* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
#endif
- /*
- * Get a new ASID if task doesn't have a valid one. Possible when
- * -task never had an ASID (fresh after fork)
- * -it's ASID was stolen - past an ASID roll-over.
- * -There's a third obscure scenario (if this task is running for the
- * first time afer an ASID rollover), where despite having a valid
- * ASID, we force a get for new ASID - see comments at top.
- *
- * Both the non-alloc scenario and first-use-after-rollover can be
- * detected using the single condition below: NO_ASID = 256
- * while asid_cache is always a valid ASID value (0-255).
- */
- if (next->context.asid > asid_cache) {
- get_new_mmu_context(next);
- } else {
- /*
- * XXX: This will never happen given the chks above
- * BUG_ON(next->context.asid > MAX_ASID);
- */
- write_aux_reg(ARC_REG_PID, next->context.asid | MMU_ENABLE);
- }
-
+ get_new_mmu_context(next);
}
-static inline void destroy_context(struct mm_struct *mm)
-{
- unsigned long flags;
-
- local_irq_save(flags);
-
- asid_mm_map[mm->context.asid] = NULL;
- mm->context.asid = NO_ASID;
-
- local_irq_restore(flags);
-}
+/*
+ * Called at the time of execve() to get a new ASID
+ * Note the subtlety here: get_new_mmu_context() behaves differently here
+ * vs. in switch_mm(). Here it always returns a new ASID, because mm has
+ * an unallocated "initial" value, while in latter, it moves to a new ASID,
+ * only if it was unallocated
+ */
+#define activate_mm(prev, next) switch_mm(prev, next, NULL)
/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
* for retiring-mm. However destroy_context( ) still needs to do that because
@@ -197,17 +172,6 @@ static inline void destroy_context(struct mm_struct *mm)
*/
#define deactivate_mm(tsk, mm) do { } while (0)
-static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
-{
-#ifndef CONFIG_SMP
- write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
-#endif
-
- /* Unconditionally get a new ASID */
- get_new_mmu_context(next);
-
-}
-
#define enter_lazy_tlb(mm, tsk)
#endif /* __ASM_ARC_MMU_CONTEXT_H */
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index bdf54610455..9c8aa41e45c 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -16,12 +16,17 @@
#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
#define free_user_page(page, addr) free_page(addr)
-/* TBD: for now don't worry about VIPT D$ aliasing */
#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
-#define clear_user_page(addr, vaddr, pg) clear_page(addr)
-#define copy_user_page(vto, vfrom, vaddr, pg) copy_page(vto, vfrom)
+struct vm_area_struct;
+struct page;
+
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
+
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long u_vaddr, struct vm_area_struct *vma);
+void clear_user_page(void *to, unsigned long u_vaddr, struct page *page);
#undef STRICT_MM_TYPECHECKS
@@ -91,13 +96,8 @@ typedef unsigned long pgtable_t;
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-/* Default Permissions for page, used in mmap.c */
-#ifdef CONFIG_ARC_STACK_NONEXEC
+/* Default Permissions for stack/heaps pages (Non Executable) */
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
-#else
-#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#endif
#define WANT_PAGE_VIRTUAL 1
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
index 115ad96480e..cbf755e32a0 100644
--- a/arch/arc/include/asm/perf_event.h
+++ b/arch/arc/include/asm/perf_event.h
@@ -1,5 +1,7 @@
/*
- * Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ * Linux performance counter support for ARC
+ *
+ * Copyright (C) 2011-2013 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -10,4 +12,204 @@
#ifndef __ASM_PERF_EVENT_H
#define __ASM_PERF_EVENT_H
+/* real maximum varies per CPU, this is the maximum supported by the driver */
+#define ARC_PMU_MAX_HWEVENTS 64
+
+#define ARC_REG_CC_BUILD 0xF6
+#define ARC_REG_CC_INDEX 0x240
+#define ARC_REG_CC_NAME0 0x241
+#define ARC_REG_CC_NAME1 0x242
+
+#define ARC_REG_PCT_BUILD 0xF5
+#define ARC_REG_PCT_COUNTL 0x250
+#define ARC_REG_PCT_COUNTH 0x251
+#define ARC_REG_PCT_SNAPL 0x252
+#define ARC_REG_PCT_SNAPH 0x253
+#define ARC_REG_PCT_CONFIG 0x254
+#define ARC_REG_PCT_CONTROL 0x255
+#define ARC_REG_PCT_INDEX 0x256
+
+#define ARC_REG_PCT_CONTROL_CC (1 << 16) /* clear counts */
+#define ARC_REG_PCT_CONTROL_SN (1 << 17) /* snapshot */
+
+struct arc_reg_pct_build {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int m:8, c:8, r:6, s:2, v:8;
+#else
+ unsigned int v:8, s:2, r:6, c:8, m:8;
+#endif
+};
+
+struct arc_reg_cc_build {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int c:16, r:8, v:8;
+#else
+ unsigned int v:8, r:8, c:16;
+#endif
+};
+
+#define PERF_COUNT_ARC_DCLM (PERF_COUNT_HW_MAX + 0)
+#define PERF_COUNT_ARC_DCSM (PERF_COUNT_HW_MAX + 1)
+#define PERF_COUNT_ARC_ICM (PERF_COUNT_HW_MAX + 2)
+#define PERF_COUNT_ARC_BPOK (PERF_COUNT_HW_MAX + 3)
+#define PERF_COUNT_ARC_EDTLB (PERF_COUNT_HW_MAX + 4)
+#define PERF_COUNT_ARC_EITLB (PERF_COUNT_HW_MAX + 5)
+#define PERF_COUNT_ARC_HW_MAX (PERF_COUNT_HW_MAX + 6)
+
+/*
+ * The "generalized" performance events seem to really be a copy
+ * of the available events on x86 processors; the mapping to ARC
+ * events is not always possible 1-to-1. Fortunately, there doesn't
+ * seem to be an exact definition for these events, so we can cheat
+ * a bit where necessary.
+ *
+ * In particular, the following PERF events may behave a bit differently
+ * compared to other architectures:
+ *
+ * PERF_COUNT_HW_CPU_CYCLES
+ * Cycles not in halted state
+ *
+ * PERF_COUNT_HW_REF_CPU_CYCLES
+ * Reference cycles not in halted state, same as PERF_COUNT_HW_CPU_CYCLES
+ * for now as we don't do Dynamic Voltage/Frequency Scaling (yet)
+ *
+ * PERF_COUNT_HW_BUS_CYCLES
+ * Unclear what this means, Intel uses 0x013c, which according to
+ * their datasheet means "unhalted reference cycles". It sounds similar
+ * to PERF_COUNT_HW_REF_CPU_CYCLES, and we use the same counter for it.
+ *
+ * PERF_COUNT_HW_STALLED_CYCLES_BACKEND
+ * PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
+ * The ARC 700 can either measure stalls per pipeline stage, or all stalls
+ * combined; for now we assign all stalls to STALLED_CYCLES_BACKEND
+ * and all pipeline flushes (e.g. caused by mispredicts, etc.) to
+ * STALLED_CYCLES_FRONTEND.
+ *
+ * We could start multiple performance counters and combine everything
+ * afterwards, but that makes it complicated.
+ *
+ * Note that I$ cache misses aren't counted by either of the two!
+ */
+
+static const char * const arc_pmu_ev_hw_map[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = "crun",
+ [PERF_COUNT_HW_REF_CPU_CYCLES] = "crun",
+ [PERF_COUNT_HW_BUS_CYCLES] = "crun",
+ [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
+ [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail",
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp",
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "bflush",
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "bstall",
+ [PERF_COUNT_ARC_DCLM] = "dclm",
+ [PERF_COUNT_ARC_DCSM] = "dcsm",
+ [PERF_COUNT_ARC_ICM] = "icm",
+ [PERF_COUNT_ARC_BPOK] = "bpok",
+ [PERF_COUNT_ARC_EDTLB] = "edtlb",
+ [PERF_COUNT_ARC_EITLB] = "eitlb",
+};
+
+#define C(_x) PERF_COUNT_HW_CACHE_##_x
+#define CACHE_OP_UNSUPPORTED 0xffff
+
+static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+ [C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = PERF_COUNT_ARC_DCLM,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = PERF_COUNT_ARC_DCSM,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = PERF_COUNT_ARC_ICM,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = PERF_COUNT_ARC_EDTLB,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = PERF_COUNT_ARC_EITLB,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = PERF_COUNT_HW_BRANCH_INSTRUCTIONS,
+ [C(RESULT_MISS)] = PERF_COUNT_HW_BRANCH_MISSES,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+};
+
#endif /* __ASM_PERF_EVENT_H */
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index 36a9f20c21a..81208bfd9dc 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -105,11 +105,16 @@ static inline pgtable_t
pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
pgtable_t pte_pg;
+ struct page *page;
pte_pg = __get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
- if (pte_pg) {
- memzero((void *)pte_pg, PTRS_PER_PTE * 4);
- pgtable_page_ctor(virt_to_page(pte_pg));
+ if (!pte_pg)
+ return 0;
+ memzero((void *)pte_pg, PTRS_PER_PTE * 4);
+ page = virt_to_page(pte_pg);
+ if (!pgtable_page_ctor(page)) {
+ __free_page(page);
+ return 0;
}
return pte_pg;
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index b7e36684c09..6b0b7f7ef78 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -60,40 +60,29 @@
#define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */
#define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */
#define _PAGE_READ (1<<5) /* Page has user read perm (H) */
-#define _PAGE_K_EXECUTE (1<<6) /* Page has kernel execute perm (H) */
-#define _PAGE_K_WRITE (1<<7) /* Page has kernel write perm (H) */
-#define _PAGE_K_READ (1<<8) /* Page has kernel perm (H) */
-#define _PAGE_GLOBAL (1<<9) /* Page is global (H) */
-#define _PAGE_MODIFIED (1<<10) /* Page modified (dirty) (S) */
-#define _PAGE_FILE (1<<10) /* page cache/ swap (S) */
-#define _PAGE_PRESENT (1<<11) /* TLB entry is valid (H) */
+#define _PAGE_MODIFIED (1<<6) /* Page modified (dirty) (S) */
+#define _PAGE_FILE (1<<7) /* page cache/ swap (S) */
+#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
+#define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */
-#else
+#else /* MMU v3 onwards */
-/* PD1 */
#define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */
#define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */
#define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */
#define _PAGE_READ (1<<3) /* Page has user read perm (H) */
-#define _PAGE_K_EXECUTE (1<<4) /* Page has kernel execute perm (H) */
-#define _PAGE_K_WRITE (1<<5) /* Page has kernel write perm (H) */
-#define _PAGE_K_READ (1<<6) /* Page has kernel perm (H) */
-#define _PAGE_ACCESSED (1<<7) /* Page is accessed (S) */
-
-/* PD0 */
+#define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */
+#define _PAGE_MODIFIED (1<<5) /* Page modified (dirty) (S) */
+#define _PAGE_FILE (1<<6) /* page cache/ swap (S) */
#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
#define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
-#define _PAGE_SHARED_CODE (1<<10) /* Shared Code page with cmn vaddr
+#define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr
usable for shared TLB entries (H) */
-
-#define _PAGE_MODIFIED (1<<11) /* Page modified (dirty) (S) */
-#define _PAGE_FILE (1<<12) /* page cache/ swap (S) */
-
-#define _PAGE_SHARED_CODE_H (1<<31) /* Hardware counterpart of above */
#endif
-/* Kernel allowed all permissions for all pages */
-#define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
+/* vmalloc permissions */
+#define _K_PAGE_PERMS (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
+ _PAGE_GLOBAL | _PAGE_PRESENT)
#ifdef CONFIG_ARC_CACHE_PAGES
#define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE
@@ -106,7 +95,7 @@
* -by default cached, unless config otherwise
* -present in memory
*/
-#define ___DEF (_PAGE_PRESENT | _K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
+#define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE)
/* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
@@ -121,15 +110,19 @@
#define PAGE_SHARED PAGE_U_W_R
-/* While kernel runs out of unstrslated space, vmalloc/modules use a chunk of
- * kernel vaddr space - visible in all addr spaces, but kernel mode only
+/* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of
+ * user vaddr space - visible in all addr spaces, but kernel mode only
* Thus Global, all-kernel-access, no-user-access, cached
*/
-#define PAGE_KERNEL __pgprot(___DEF | _PAGE_GLOBAL)
+#define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
/* ioremap */
-#define PAGE_KERNEL_NO_CACHE __pgprot(_PAGE_PRESENT | _K_PAGE_PERMS | \
- _PAGE_GLOBAL)
+#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
+
+/* Masks for actual TLB "PD"s */
+#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT)
+#define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
+#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
/**************************************************************************
* Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
@@ -390,11 +383,11 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
* remap a physical page `pfn' of size `size' with page protection `prot'
* into virtual address `from'
*/
-#define io_remap_pfn_range(vma, from, pfn, size, prot) \
- remap_pfn_range(vma, from, pfn, size, prot)
-
#include <asm-generic/pgtable.h>
+/* to cope with aliasing VIPT cache */
+#define HAVE_ARCH_UNMAPPED_AREA
+
/*
* No page table caches to initialise
*/
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index 5f26b2c1cba..d99f9b37cd1 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -18,7 +18,7 @@
#ifndef __ASSEMBLY__
-#include <asm/arcregs.h> /* for STATUS_E1_MASK et all */
+#include <asm/ptrace.h>
/* Arch specific stuff which needs to be saved per task.
* However these items are not so important so as to earn a place in
@@ -28,10 +28,6 @@ struct thread_struct {
unsigned long ksp; /* kernel mode stack pointer */
unsigned long callee_reg; /* pointer to callee regs */
unsigned long fault_address; /* dbls as brkpt holder as well */
- unsigned long cause_code; /* Exception Cause Code (ECR) */
-#ifdef CONFIG_ARC_CURR_IN_REG
- unsigned long user_r25;
-#endif
#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
struct arc_fpu fpu;
#endif
@@ -44,15 +40,13 @@ struct thread_struct {
/* Forward declaration, a strange C thing */
struct task_struct;
-/*
- * Return saved PC of a blocked thread.
- */
+/* Return saved PC of a blocked thread */
unsigned long thread_saved_pc(struct task_struct *t);
#define task_pt_regs(p) \
- ((struct pt_regs *)(THREAD_SIZE - 4 + (void *)task_stack_page(p)) - 1)
+ ((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1)
-/* Free all resources held by a thread. */
+/* Free all resources held by a thread */
#define release_thread(thread) do { } while (0)
/* Prepare to copy thread state - unlazy all lazy status */
@@ -75,32 +69,18 @@ unsigned long thread_saved_pc(struct task_struct *t);
/*
* Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
- * These can't be derived from pt_regs as that would give correp user-mode val
+ * Look in process.c for details of kernel stack layout
*/
#define KSTK_ESP(tsk) (tsk->thread.ksp)
-#define KSTK_BLINK(tsk) (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1+1)*4)))
-#define KSTK_FP(tsk) (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1)*4)))
-/*
- * Do necessary setup to start up a newly executed thread.
- *
- * E1,E2 so that Interrupts are enabled in user mode
- * L set, so Loop inhibited to begin with
- * lp_start and lp_end seeded with bogus non-zero values so to easily catch
- * the ARC700 sr to lp_start hardware bug
- */
-#define start_thread(_regs, _pc, _usp) \
-do { \
- set_fs(USER_DS); /* reads from user space */ \
- (_regs)->ret = (_pc); \
- /* Interrupts enabled in User Mode */ \
- (_regs)->status32 = STATUS_U_MASK | STATUS_L_MASK \
- | STATUS_E1_MASK | STATUS_E2_MASK; \
- (_regs)->sp = (_usp); \
- /* bogus seed values for debugging */ \
- (_regs)->lp_start = 0x10; \
- (_regs)->lp_end = 0x80; \
-} while (0)
+#define KSTK_REG(tsk, off) (*((unsigned int *)(KSTK_ESP(tsk) + \
+ sizeof(struct callee_regs) + off)))
+
+#define KSTK_BLINK(tsk) KSTK_REG(tsk, 4)
+#define KSTK_FP(tsk) KSTK_REG(tsk, 0)
+
+extern void start_thread(struct pt_regs * regs, unsigned long pc,
+ unsigned long usp);
extern unsigned int get_wchan(struct task_struct *p);
diff --git a/arch/arc/include/asm/prom.h b/arch/arc/include/asm/prom.h
deleted file mode 100644
index 692d0d0789a..00000000000
--- a/arch/arc/include/asm/prom.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_ARC_PROM_H_
-#define _ASM_ARC_PROM_H_
-
-#define HAVE_ARCH_DEVTREE_FIXUPS
-
-#endif
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 8ae783d20a8..1bfeec2c055 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -17,68 +17,50 @@
/* THE pt_regs: Defines how regs are saved during entry into kernel */
struct pt_regs {
- /*
- * 1 word gutter after reg-file has been saved
- * Technically not needed, Since SP always points to a "full" location
- * (vs. "empty"). But pt_regs is shared with tools....
- */
- long res;
/* Real registers */
long bta; /* bta_l1, bta_l2, erbta */
- long lp_start;
- long lp_end;
- long lp_count;
+
+ long lp_start, lp_end, lp_count;
+
long status32; /* status32_l1, status32_l2, erstatus */
long ret; /* ilink1, ilink2 or eret */
long blink;
long fp;
long r26; /* gp */
- long r12;
- long r11;
- long r10;
- long r9;
- long r8;
- long r7;
- long r6;
- long r5;
- long r4;
- long r3;
- long r2;
- long r1;
- long r0;
+
+ long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
+
long sp; /* user/kernel sp depending on where we came from */
long orig_r0;
- /*to distinguish bet excp, syscall, irq */
+ /*
+ * To distinguish bet excp, syscall, irq
+ * For traps and exceptions, Exception Cause Register.
+ * ECR: <00> <VV> <CC> <PP>
+ * Last word used by Linux for extra state mgmt (syscall-restart)
+ * For interrupts, use artificial ECR values to note current prio-level
+ */
union {
+ struct {
#ifdef CONFIG_CPU_BIG_ENDIAN
- /* so that assembly code is same for LE/BE */
- unsigned long orig_r8:16, event:16;
+ unsigned long state:8, ecr_vec:8,
+ ecr_cause:8, ecr_param:8;
#else
- unsigned long event:16, orig_r8:16;
+ unsigned long ecr_param:8, ecr_cause:8,
+ ecr_vec:8, state:8;
#endif
- long orig_r8_word;
+ };
+ unsigned long event;
};
+
+ long user_r25;
};
/* Callee saved registers - need to be saved only when you are scheduled out */
struct callee_regs {
- long res; /* Again this is not needed */
- long r25;
- long r24;
- long r23;
- long r22;
- long r21;
- long r20;
- long r19;
- long r18;
- long r17;
- long r16;
- long r15;
- long r14;
- long r13;
+ long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
};
#define instruction_pointer(regs) ((regs)->ret)
@@ -99,18 +81,20 @@ struct callee_regs {
/* return 1 if PC in delay slot */
#define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK)
-#define in_syscall(regs) (regs->event & orig_r8_IS_SCALL)
-#define in_brkpt_trap(regs) (regs->event & orig_r8_IS_BRKPT)
+#define in_syscall(regs) ((regs->ecr_vec == ECR_V_TRAP) && !regs->ecr_param)
+#define in_brkpt_trap(regs) ((regs->ecr_vec == ECR_V_TRAP) && regs->ecr_param)
-#define syscall_wont_restart(regs) (regs->event |= orig_r8_IS_SCALL_RESTARTED)
-#define syscall_restartable(regs) !(regs->event & orig_r8_IS_SCALL_RESTARTED)
+#define STATE_SCALL_RESTARTED 0x01
+
+#define syscall_wont_restart(reg) (reg->state |= STATE_SCALL_RESTARTED)
+#define syscall_restartable(reg) !(reg->state & STATE_SCALL_RESTARTED)
#define current_pt_regs() \
({ \
/* open-coded current_thread_info() */ \
register unsigned long sp asm ("sp"); \
unsigned long pg_start = (sp & ~(THREAD_SIZE - 1)); \
- (struct pt_regs *)(pg_start + THREAD_SIZE - 4) - 1; \
+ (struct pt_regs *)(pg_start + THREAD_SIZE) - 1; \
})
static inline long regs_return_value(struct pt_regs *regs)
@@ -120,11 +104,4 @@ static inline long regs_return_value(struct pt_regs *regs)
#endif /* !__ASSEMBLY__ */
-#define orig_r8_IS_SCALL 0x0001
-#define orig_r8_IS_SCALL_RESTARTED 0x0002
-#define orig_r8_IS_BRKPT 0x0004
-#define orig_r8_IS_EXCPN 0x0004
-#define orig_r8_IS_IRQ1 0x0010
-#define orig_r8_IS_IRQ2 0x0020
-
#endif /* __ASM_PTRACE_H */
diff --git a/arch/arc/include/asm/sections.h b/arch/arc/include/asm/sections.h
index 6fc1159dfef..09db952e14b 100644
--- a/arch/arc/include/asm/sections.h
+++ b/arch/arc/include/asm/sections.h
@@ -11,8 +11,6 @@
#include <asm-generic/sections.h>
-extern char _int_vec_base_lds[];
extern char __arc_dccm_base[];
-extern char __dtb_start[];
#endif
diff --git a/arch/arc/include/asm/serial.h b/arch/arc/include/asm/serial.h
index 4dff5a1e412..602b0970a76 100644
--- a/arch/arc/include/asm/serial.h
+++ b/arch/arc/include/asm/serial.h
@@ -22,4 +22,14 @@
#define BASE_BAUD (arc_get_core_freq() / 16)
+/*
+ * This is definitely going to break early 8250 consoles on multi-platform
+ * images but hey, it won't add any code complexity for a debug feature of
+ * one broken driver.
+ */
+#ifdef CONFIG_ARC_PLAT_TB10X
+#undef BASE_BAUD
+#define BASE_BAUD (arc_get_core_freq() / 16 / 3)
+#endif
+
#endif /* _ASM_ARC_SERIAL_H */
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
index 229e5068149..e10f8cef56a 100644
--- a/arch/arc/include/asm/setup.h
+++ b/arch/arc/include/asm/setup.h
@@ -31,7 +31,7 @@ struct cpuinfo_data {
extern int root_mountflags, end_mem;
extern int running_on_hw;
-void __init setup_processor(void);
+void setup_processor(void);
void __init setup_arch_memory(void);
#endif /* __ASMARC_SETUP_H */
diff --git a/arch/arc/include/asm/shmparam.h b/arch/arc/include/asm/shmparam.h
new file mode 100644
index 00000000000..fffeecc0427
--- /dev/null
+++ b/arch/arc/include/asm/shmparam.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_SHMPARAM_H
+#define __ARC_ASM_SHMPARAM_H
+
+/* Handle upto 2 cache bins */
+#define SHMLBA (2 * PAGE_SIZE)
+
+/* Enforce SHMLBA in shmat */
+#define __ARCH_FORCE_SHMLBA
+
+#endif
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index c4fb211dcd2..5d06eee43ea 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -30,7 +30,7 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
* APIs provided by arch SMP code to rest of arch code
*/
extern void __init smp_init_cpus(void);
-extern void __init first_lines_of_secondary(void);
+extern void first_lines_of_secondary(void);
extern const char *arc_platform_smp_cpuinfo(void);
/*
@@ -46,14 +46,14 @@ extern int smp_ipi_irq_setup(int cpu, int irq);
*
* @info: SoC SMP specific info for /proc/cpuinfo etc
* @cpu_kick: For Master to kickstart a cpu (optionally at a PC)
- * @ipi_send: To send IPI to a @cpumask
- * @ips_clear: To clear IPI received by @cpu at @irq
+ * @ipi_send: To send IPI to a @cpu
+ * @ips_clear: To clear IPI received at @irq
*/
struct plat_smp_ops {
const char *info;
void (*cpu_kick)(int cpu, unsigned long pc);
- void (*ipi_send)(void *callmap);
- void (*ipi_clear)(int cpu, int irq);
+ void (*ipi_send)(int cpu);
+ void (*ipi_clear)(int irq);
};
/* TBD: stop exporting it for direct population by platform */
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index f158197ac5b..b6a8c2dfbe6 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -45,7 +45,14 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
- lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+ unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
+
+ __asm__ __volatile__(
+ " ex %0, [%1] \n"
+ : "+r" (tmp)
+ : "r"(&(lock->slock))
+ : "memory");
+
smp_mb();
}
diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h
index 8276bfd6170..662627ced4f 100644
--- a/arch/arc/include/asm/spinlock_types.h
+++ b/arch/arc/include/asm/spinlock_types.h
@@ -20,9 +20,9 @@ typedef struct {
#define __ARCH_SPIN_LOCK_LOCKED { __ARCH_SPIN_LOCK_LOCKED__ }
/*
- * Unlocked: 0x01_00_00_00
- * Read lock(s): 0x00_FF_00_00 to say 0x01
- * Write lock: 0x0, but only possible if prior value "unlocked" 0x0100_0000
+ * Unlocked : 0x0100_0000
+ * Read lock(s) : 0x00FF_FFFF to 0x01 (Multiple Readers decrement it)
+ * Write lock : 0x0, but only if prior value is "unlocked" 0x0100_0000
*/
typedef struct {
volatile unsigned int counter;
diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h
index 33ab3048e9b..29de0980430 100644
--- a/arch/arc/include/asm/syscall.h
+++ b/arch/arc/include/asm/syscall.h
@@ -18,7 +18,7 @@ static inline long
syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
{
if (user_mode(regs) && in_syscall(regs))
- return regs->orig_r8;
+ return regs->r8;
else
return -1;
}
@@ -26,8 +26,7 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
static inline void
syscall_rollback(struct task_struct *task, struct pt_regs *regs)
{
- /* XXX: I can't fathom how pt_regs->r8 will be clobbered ? */
- regs->r8 = regs->orig_r8;
+ regs->r0 = regs->orig_r0;
}
static inline long
diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h
index e53a5340ba4..dd785befe7f 100644
--- a/arch/arc/include/asm/syscalls.h
+++ b/arch/arc/include/asm/syscalls.h
@@ -16,8 +16,6 @@
#include <linux/types.h>
int sys_clone_wrapper(int, int, int, int, int);
-int sys_fork_wrapper(void);
-int sys_vfork_wrapper(void);
int sys_cacheflush(uint32_t, uint32_t uint32_t);
int sys_arc_settls(void *);
int sys_arc_gettls(void);
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
index 2d50a4cdd7f..45be2167201 100644
--- a/arch/arc/include/asm/thread_info.h
+++ b/arch/arc/include/asm/thread_info.h
@@ -80,8 +80,6 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void)
#endif /* !__ASSEMBLY__ */
-#define PREEMPT_ACTIVE 0x10000000
-
/*
* thread information flags
* - these are process state flags that various assembly files may need to
diff --git a/arch/arc/include/asm/tlb-mmu1.h b/arch/arc/include/asm/tlb-mmu1.h
index a5ff961b1ef..8a1ec96012a 100644
--- a/arch/arc/include/asm/tlb-mmu1.h
+++ b/arch/arc/include/asm/tlb-mmu1.h
@@ -9,9 +9,9 @@
#ifndef __ASM_TLB_MMU_V1_H__
#define __ASM_TLB_MMU_V1_H__
-#if defined(__ASSEMBLY__) && defined(CONFIG_ARC_MMU_VER == 1)
+#include <asm/mmu.h>
-#include <asm/tlb.h>
+#if defined(__ASSEMBLY__) && (CONFIG_ARC_MMU_VER == 1)
.macro TLB_WRITE_HEURISTICS
diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h
index 3eb2ce0bdfa..a9db5f62aaf 100644
--- a/arch/arc/include/asm/tlb.h
+++ b/arch/arc/include/asm/tlb.h
@@ -9,50 +9,39 @@
#ifndef _ASM_ARC_TLB_H
#define _ASM_ARC_TLB_H
-#ifdef __KERNEL__
-
-#include <asm/pgtable.h>
-
-/* Masks for actual TLB "PD"s */
-#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT)
-#define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \
- _PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
- _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
-
-#ifndef __ASSEMBLY__
-
-#define tlb_flush(tlb) local_flush_tlb_mm((tlb)->mm)
+#define tlb_flush(tlb) \
+do { \
+ if (tlb->fullmm) \
+ flush_tlb_mm((tlb)->mm); \
+} while (0)
/*
* This pair is called at time of munmap/exit to flush cache and TLB entries
* for mappings being torn down.
- * 1) cache-flush part -implemented via tlb_start_vma( ) can be NOP (for now)
- * as we don't support aliasing configs in our VIPT D$.
- * 2) tlb-flush part - implemted via tlb_end_vma( ) can be NOP as well-
- * albiet for difft reasons - its better handled by moving to new ASID
+ * 1) cache-flush part -implemented via tlb_start_vma( ) for VIPT aliasing D$
+ * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range
*
* Note, read http://lkml.org/lkml/2004/1/15/6
*/
+#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
#define tlb_start_vma(tlb, vma)
-#define tlb_end_vma(tlb, vma)
-
-#define __tlb_remove_tlb_entry(tlb, ptep, address)
-
-#include <linux/pagemap.h>
-#include <asm-generic/tlb.h>
-
-#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
-void tlb_paranoid_check(unsigned int pid_sw, unsigned long address);
#else
-#define tlb_paranoid_check(a, b)
+#define tlb_start_vma(tlb, vma) \
+do { \
+ if (!tlb->fullmm) \
+ flush_cache_range(vma, vma->vm_start, vma->vm_end); \
+} while(0)
#endif
-void arc_mmu_init(void);
-extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
-void __init read_decode_mmu_bcr(void);
+#define tlb_end_vma(tlb, vma) \
+do { \
+ if (!tlb->fullmm) \
+ flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
+} while (0)
-#endif /* __ASSEMBLY__ */
+#define __tlb_remove_tlb_entry(tlb, ptep, address)
-#endif /* __KERNEL__ */
+#include <linux/pagemap.h>
+#include <asm-generic/tlb.h>
#endif /* _ASM_ARC_TLB_H */
diff --git a/arch/arc/include/asm/tlbflush.h b/arch/arc/include/asm/tlbflush.h
index b2f9bc7f68c..71c7b2e4b87 100644
--- a/arch/arc/include/asm/tlbflush.h
+++ b/arch/arc/include/asm/tlbflush.h
@@ -18,11 +18,18 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
-/* XXX: Revisit for SMP */
+#ifndef CONFIG_SMP
#define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e)
#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
#define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e)
#define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
-
+#else
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+#endif /* CONFIG_SMP */
#endif
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index 32420824375..30c9baffa96 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -43,7 +43,7 @@
* Because it essentially checks if buffer end is within limit and @len is
* non-ngeative, which implies that buffer start will be within limit too.
*
- * The reason for rewriting being, for majorit yof cases, @len is generally
+ * The reason for rewriting being, for majority of cases, @len is generally
* compile time constant, causing first sub-expression to be compile time
* subsumed.
*
@@ -53,7 +53,7 @@
*
*/
#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
- (((addr)+(sz)) <= get_fs()))
+ ((addr) <= (get_fs() - (sz))))
#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
likely(__user_ok((addr), (sz))))
diff --git a/arch/arc/include/asm/unaligned.h b/arch/arc/include/asm/unaligned.h
index 5dbe63f17b6..3e5f071bc00 100644
--- a/arch/arc/include/asm/unaligned.h
+++ b/arch/arc/include/asm/unaligned.h
@@ -16,13 +16,14 @@
#ifdef CONFIG_ARC_MISALIGN_ACCESS
int misaligned_fixup(unsigned long address, struct pt_regs *regs,
- unsigned long cause, struct callee_regs *cregs);
+ struct callee_regs *cregs);
#else
static inline int
misaligned_fixup(unsigned long address, struct pt_regs *regs,
- unsigned long cause, struct callee_regs *cregs)
+ struct callee_regs *cregs)
{
- return 0;
+ /* Not fixed */
+ return 1;
}
#endif
diff --git a/arch/arc/include/uapi/asm/Kbuild b/arch/arc/include/uapi/asm/Kbuild
index 18fefaea73f..f50d02df78d 100644
--- a/arch/arc/include/uapi/asm/Kbuild
+++ b/arch/arc/include/uapi/asm/Kbuild
@@ -2,11 +2,4 @@
include include/uapi/asm-generic/Kbuild.asm
header-y += elf.h
header-y += page.h
-header-y += setup.h
-header-y += byteorder.h
header-y += cachectl.h
-header-y += ptrace.h
-header-y += sigcontext.h
-header-y += signal.h
-header-y += swab.h
-header-y += unistd.h
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
index 6afa4f70207..76a7739aab1 100644
--- a/arch/arc/include/uapi/asm/ptrace.h
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -11,6 +11,7 @@
#ifndef _UAPI__ASM_ARC_PTRACE_H
#define _UAPI__ASM_ARC_PTRACE_H
+#define PTRACE_GET_THREAD_AREA 25
#ifndef __ASSEMBLY__
/*
@@ -20,28 +21,31 @@
*
* This is to decouple pt_regs from user-space ABI, to be able to change it
* w/o affecting the ABI.
- * Although the layout (initial padding) is similar to pt_regs to have some
- * optimizations when copying pt_regs to/from user_regs_struct.
+ *
+ * The intermediate pad,pad2 are relics of initial layout based on pt_regs
+ * for optimizations when copying pt_regs to/from user_regs_struct.
+ * We no longer need them, but can't be changed as they are part of ABI now.
*
* Also, sigcontext only care about the scratch regs as that is what we really
- * save/restore for signal handling.
+ * save/restore for signal handling. However gdb also uses the same struct
+ * hence callee regs need to be in there too.
*/
struct user_regs_struct {
- struct scratch {
- long pad;
+ long pad;
+ struct {
long bta, lp_start, lp_end, lp_count;
long status32, ret, blink, fp, gp;
long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
long sp;
} scratch;
- struct callee {
- long pad;
+ long pad2;
+ struct {
long r25, r24, r23, r22, r21, r20;
long r19, r18, r17, r16, r15, r14, r13;
} callee;
long efa; /* break pt addr, for break points in delay slots */
- long stop_pc; /* give dbg stop_pc directly after checking orig_r8 */
+ long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */
};
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
index 6f30484f34b..39e58d1cdf9 100644
--- a/arch/arc/include/uapi/asm/unistd.h
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -8,6 +8,13 @@
/******** no-legacy-syscalls-ABI *******/
+/*
+ * Non-typical guard macro to enable inclusion twice in ARCH sys.c
+ * That is how the Generic syscall wrapper generator works
+ */
+#if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL)
+#define _UAPI_ASM_ARC_UNISTD_H
+
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_VFORK
@@ -32,3 +39,7 @@ __SYSCALL(__NR_arc_gettls, sys_arc_gettls)
/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
#define __NR_sysfs (__NR_arch_specific_syscall + 3)
__SYSCALL(__NR_sysfs, sys_sysfs)
+
+#undef __SYSCALL
+
+#endif