diff options
Diffstat (limited to 'arch/tile/include')
26 files changed, 281 insertions, 211 deletions
diff --git a/arch/tile/include/arch/interrupts_32.h b/arch/tile/include/arch/interrupts_32.h index 9d0bfa7e59b..96b5710505b 100644 --- a/arch/tile/include/arch/interrupts_32.h +++ b/arch/tile/include/arch/interrupts_32.h @@ -16,10 +16,11 @@ #define __ARCH_INTERRUPTS_H__ /** Mask for an interrupt. */ -#ifdef __ASSEMBLER__ /* Note: must handle breaking interrupts into high and low words manually. */ -#define INT_MASK(intno) (1 << (intno)) -#else +#define INT_MASK_LO(intno) (1 << (intno)) +#define INT_MASK_HI(intno) (1 << ((intno) - 32)) + +#ifndef __ASSEMBLER__ #define INT_MASK(intno) (1ULL << (intno)) #endif @@ -89,6 +90,7 @@ #define NUM_INTERRUPTS 49 +#ifndef __ASSEMBLER__ #define QUEUED_INTERRUPTS ( \ INT_MASK(INT_MEM_ERROR) | \ INT_MASK(INT_DMATLB_MISS) | \ @@ -301,4 +303,5 @@ INT_MASK(INT_DOUBLE_FAULT) | \ INT_MASK(INT_AUX_PERF_COUNT) | \ 0) +#endif /* !__ASSEMBLER__ */ #endif /* !__ARCH_INTERRUPTS_H__ */ diff --git a/arch/tile/include/arch/sim.h b/arch/tile/include/arch/sim.h index 74b7c1624d3..e54b7b0527f 100644 --- a/arch/tile/include/arch/sim.h +++ b/arch/tile/include/arch/sim.h @@ -152,16 +152,33 @@ sim_dump(unsigned int mask) /** * Print a string to the simulator stdout. * - * @param str The string to be written; a newline is automatically added. + * @param str The string to be written. + */ +static __inline void +sim_print(const char* str) +{ + for ( ; *str != '\0'; str++) + { + __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC | + (*str << _SIM_CONTROL_OPERATOR_BITS)); + } + __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC | + (SIM_PUTC_FLUSH_BINARY << _SIM_CONTROL_OPERATOR_BITS)); +} + + +/** + * Print a string to the simulator stdout. + * + * @param str The string to be written (a newline is automatically added). */ static __inline void sim_print_string(const char* str) { - int i; - for (i = 0; str[i] != 0; i++) + for ( ; *str != '\0'; str++) { __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC | - (str[i] << _SIM_CONTROL_OPERATOR_BITS)); + (*str << _SIM_CONTROL_OPERATOR_BITS)); } __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC | (SIM_PUTC_FLUSH_STRING << _SIM_CONTROL_OPERATOR_BITS)); @@ -203,7 +220,7 @@ sim_command(const char* str) * we are passing to the simulator are actually valid in the registers * (i.e. returned from memory) prior to the SIM_CONTROL spr. */ -static __inline int _sim_syscall0(int val) +static __inline long _sim_syscall0(int val) { long result; __asm__ __volatile__ ("mtspr SIM_CONTROL, r0" @@ -211,7 +228,7 @@ static __inline int _sim_syscall0(int val) return result; } -static __inline int _sim_syscall1(int val, long arg1) +static __inline long _sim_syscall1(int val, long arg1) { long result; __asm__ __volatile__ ("{ and zero, r1, r1; mtspr SIM_CONTROL, r0 }" @@ -219,7 +236,7 @@ static __inline int _sim_syscall1(int val, long arg1) return result; } -static __inline int _sim_syscall2(int val, long arg1, long arg2) +static __inline long _sim_syscall2(int val, long arg1, long arg2) { long result; __asm__ __volatile__ ("{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }" @@ -233,7 +250,7 @@ static __inline int _sim_syscall2(int val, long arg1, long arg2) the register values for arguments 3 and up may still be in flight to the core from a stack frame reload. */ -static __inline int _sim_syscall3(int val, long arg1, long arg2, long arg3) +static __inline long _sim_syscall3(int val, long arg1, long arg2, long arg3) { long result; __asm__ __volatile__ ("{ and zero, r3, r3 };" @@ -244,7 +261,7 @@ static __inline int _sim_syscall3(int val, long arg1, long arg2, long arg3) return result; } -static __inline int _sim_syscall4(int val, long arg1, long arg2, long arg3, +static __inline long _sim_syscall4(int val, long arg1, long arg2, long arg3, long arg4) { long result; @@ -256,7 +273,7 @@ static __inline int _sim_syscall4(int val, long arg1, long arg2, long arg3, return result; } -static __inline int _sim_syscall5(int val, long arg1, long arg2, long arg3, +static __inline long _sim_syscall5(int val, long arg1, long arg2, long arg3, long arg4, long arg5) { long result; @@ -268,7 +285,6 @@ static __inline int _sim_syscall5(int val, long arg1, long arg2, long arg3, return result; } - /** * Make a special syscall to the simulator itself, if running under * simulation. This is used as the implementation of other functions @@ -281,7 +297,8 @@ static __inline int _sim_syscall5(int val, long arg1, long arg2, long arg3, */ #define _sim_syscall(syscall_num, nr, args...) \ _sim_syscall##nr( \ - ((syscall_num) << _SIM_CONTROL_OPERATOR_BITS) | SIM_CONTROL_SYSCALL, args) + ((syscall_num) << _SIM_CONTROL_OPERATOR_BITS) | SIM_CONTROL_SYSCALL, \ + ##args) /* Values for the "access_mask" parameters below. */ @@ -365,6 +382,13 @@ sim_validate_lines_evicted(unsigned long long pa, unsigned long length) } +/* Return the current CPU speed in cycles per second. */ +static __inline long +sim_query_cpu_speed(void) +{ + return _sim_syscall(SIM_SYSCALL_QUERY_CPU_SPEED, 0); +} + #endif /* !__DOXYGEN__ */ diff --git a/arch/tile/include/arch/sim_def.h b/arch/tile/include/arch/sim_def.h index 7a17082c377..4b44a2b6a09 100644 --- a/arch/tile/include/arch/sim_def.h +++ b/arch/tile/include/arch/sim_def.h @@ -243,6 +243,9 @@ */ #define SIM_SYSCALL_VALIDATE_LINES_EVICTED 5 +/** Syscall number for sim_query_cpu_speed(). */ +#define SIM_SYSCALL_QUERY_CPU_SPEED 6 + /* * Bit masks which can be shifted by 8, combined with diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild index 3b8f55b82de..849ab2fa1f5 100644 --- a/arch/tile/include/asm/Kbuild +++ b/arch/tile/include/asm/Kbuild @@ -1,3 +1,4 @@ include include/asm-generic/Kbuild.asm header-y += ucontext.h +header-y += hardwall.h diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h index b8c49f98a44..75a16028a95 100644 --- a/arch/tile/include/asm/atomic.h +++ b/arch/tile/include/asm/atomic.h @@ -32,7 +32,7 @@ */ static inline int atomic_read(const atomic_t *v) { - return v->counter; + return ACCESS_ONCE(v->counter); } /** diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h index 6d4f0ff2c68..132e6bbd07e 100644 --- a/arch/tile/include/asm/bitops.h +++ b/arch/tile/include/asm/bitops.h @@ -122,7 +122,6 @@ static inline unsigned long __arch_hweight64(__u64 w) #include <asm-generic/bitops/lock.h> #include <asm-generic/bitops/find.h> #include <asm-generic/bitops/sched.h> -#include <asm-generic/bitops/ext2-non-atomic.h> -#include <asm-generic/bitops/minix.h> +#include <asm-generic/bitops/le.h> #endif /* _ASM_TILE_BITOPS_H */ diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h index 7a93c001ac1..2638be51a16 100644 --- a/arch/tile/include/asm/bitops_32.h +++ b/arch/tile/include/asm/bitops_32.h @@ -122,7 +122,7 @@ static inline int test_and_change_bit(unsigned nr, return (_atomic_xor(addr, mask) & mask) != 0; } -/* See discussion at smp_mb__before_atomic_dec() in <asm/atomic.h>. */ +/* See discussion at smp_mb__before_atomic_dec() in <asm/atomic_32.h>. */ #define smp_mb__before_clear_bit() smp_mb() #define smp_mb__after_clear_bit() do {} while (0) diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h index 08a2815b5e4..392e5333dd8 100644 --- a/arch/tile/include/asm/cache.h +++ b/arch/tile/include/asm/cache.h @@ -40,7 +40,7 @@ #define INTERNODE_CACHE_BYTES L2_CACHE_BYTES /* Group together read-mostly things to avoid cache false sharing */ -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) /* * Attribute for data that is kept read/write coherent until the end of diff --git a/arch/tile/include/asm/cacheflush.h b/arch/tile/include/asm/cacheflush.h index 14a3f8556ac..12fb0fb330e 100644 --- a/arch/tile/include/asm/cacheflush.h +++ b/arch/tile/include/asm/cacheflush.h @@ -138,55 +138,12 @@ static inline void finv_buffer(void *buffer, size_t size) } /* - * Flush & invalidate a VA range that is homed remotely on a single core, - * waiting until the memory controller holds the flushed values. + * Flush and invalidate a VA range that is homed remotely, waiting + * until the memory controller holds the flushed values. If "hfh" is + * true, we will do a more expensive flush involving additional loads + * to make sure we have touched all the possible home cpus of a buffer + * that is homed with "hash for home". */ -static inline void finv_buffer_remote(void *buffer, size_t size) -{ - char *p; - int i; - - /* - * Flush and invalidate the buffer out of the local L1/L2 - * and request the home cache to flush and invalidate as well. - */ - __finv_buffer(buffer, size); - - /* - * Wait for the home cache to acknowledge that it has processed - * all the flush-and-invalidate requests. This does not mean - * that the flushed data has reached the memory controller yet, - * but it does mean the home cache is processing the flushes. - */ - __insn_mf(); - - /* - * Issue a load to the last cache line, which can't complete - * until all the previously-issued flushes to the same memory - * controller have also completed. If we weren't striping - * memory, that one load would be sufficient, but since we may - * be, we also need to back up to the last load issued to - * another memory controller, which would be the point where - * we crossed an 8KB boundary (the granularity of striping - * across memory controllers). Keep backing up and doing this - * until we are before the beginning of the buffer, or have - * hit all the controllers. - */ - for (i = 0, p = (char *)buffer + size - 1; - i < (1 << CHIP_LOG_NUM_MSHIMS()) && p >= (char *)buffer; - ++i) { - const unsigned long STRIPE_WIDTH = 8192; - - /* Force a load instruction to issue. */ - *(volatile char *)p; - - /* Jump to end of previous stripe. */ - p -= STRIPE_WIDTH; - p = (char *)((unsigned long)p | (STRIPE_WIDTH - 1)); - } - - /* Wait for the loads (and thus flushes) to have completed. */ - __insn_mf(); -} +void finv_buffer_remote(void *buffer, size_t size, int hfh); #endif /* _ASM_TILE_CACHEFLUSH_H */ diff --git a/arch/tile/include/asm/edac.h b/arch/tile/include/asm/edac.h new file mode 100644 index 00000000000..87fc83eeaff --- /dev/null +++ b/arch/tile/include/asm/edac.h @@ -0,0 +1,29 @@ +/* + * Copyright 2011 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + */ + +#ifndef _ASM_TILE_EDAC_H +#define _ASM_TILE_EDAC_H + +/* ECC atomic, DMA, SMP and interrupt safe scrub function */ + +static inline void atomic_scrub(void *va, u32 size) +{ + /* + * These is nothing to be done here because CE is + * corrected by the mshim. + */ + return; +} + +#endif /* _ASM_TILE_EDAC_H */ diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h index fe0d10dcae5..d03ec124a59 100644 --- a/arch/tile/include/asm/futex.h +++ b/arch/tile/include/asm/futex.h @@ -29,16 +29,16 @@ #include <linux/uaccess.h> #include <linux/errno.h> -extern struct __get_user futex_set(int __user *v, int i); -extern struct __get_user futex_add(int __user *v, int n); -extern struct __get_user futex_or(int __user *v, int n); -extern struct __get_user futex_andn(int __user *v, int n); -extern struct __get_user futex_cmpxchg(int __user *v, int o, int n); +extern struct __get_user futex_set(u32 __user *v, int i); +extern struct __get_user futex_add(u32 __user *v, int n); +extern struct __get_user futex_or(u32 __user *v, int n); +extern struct __get_user futex_andn(u32 __user *v, int n); +extern struct __get_user futex_cmpxchg(u32 __user *v, int o, int n); #ifndef __tilegx__ -extern struct __get_user futex_xor(int __user *v, int n); +extern struct __get_user futex_xor(u32 __user *v, int n); #else -static inline struct __get_user futex_xor(int __user *uaddr, int n) +static inline struct __get_user futex_xor(u32 __user *uaddr, int n) { struct __get_user asm_ret = __get_user_4(uaddr); if (!asm_ret.err) { @@ -53,7 +53,7 @@ static inline struct __get_user futex_xor(int __user *uaddr, int n) } #endif -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -65,7 +65,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -119,16 +119,17 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) return ret; } -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, - int newval) +static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { struct __get_user asm_ret; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; asm_ret = futex_cmpxchg(uaddr, oldval, newval); - return asm_ret.err ? asm_ret.err : asm_ret.val; + *uval = asm_ret.val; + return asm_ret.err; } #ifndef __tilegx__ diff --git a/arch/tile/include/asm/hugetlb.h b/arch/tile/include/asm/hugetlb.h index 0521c277bbd..d396d180516 100644 --- a/arch/tile/include/asm/hugetlb.h +++ b/arch/tile/include/asm/hugetlb.h @@ -54,7 +54,7 @@ static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { - set_pte_order(ptep, pte, HUGETLB_PAGE_ORDER); + set_pte(ptep, pte); } static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h index 641e4ff3d80..5db0ce54284 100644 --- a/arch/tile/include/asm/irqflags.h +++ b/arch/tile/include/asm/irqflags.h @@ -18,6 +18,8 @@ #include <arch/interrupts.h> #include <arch/chip.h> +#if !defined(__tilegx__) && defined(__ASSEMBLY__) + /* * The set of interrupts we want to allow when interrupts are nominally * disabled. The remainder are effectively "NMI" interrupts from @@ -25,6 +27,16 @@ * interrupts (aka "non-queued") are not blocked by the mask in any case. */ #if CHIP_HAS_AUX_PERF_COUNTERS() +#define LINUX_MASKABLE_INTERRUPTS_HI \ + (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT))) +#else +#define LINUX_MASKABLE_INTERRUPTS_HI \ + (~(INT_MASK_HI(INT_PERF_COUNT))) +#endif + +#else + +#if CHIP_HAS_AUX_PERF_COUNTERS() #define LINUX_MASKABLE_INTERRUPTS \ (~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT))) #else @@ -32,6 +44,8 @@ (~(INT_MASK(INT_PERF_COUNT))) #endif +#endif + #ifndef __ASSEMBLY__ /* NOTE: we can't include <linux/percpu.h> due to #include dependencies. */ @@ -224,11 +238,11 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); #define IRQ_DISABLE(tmp0, tmp1) \ { \ movei tmp0, -1; \ - moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS) \ + moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \ }; \ { \ mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp0; \ - auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS) \ + auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS_HI) \ }; \ mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp1 diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h index 7979a45430d..3eb53525bf9 100644 --- a/arch/tile/include/asm/page.h +++ b/arch/tile/include/asm/page.h @@ -16,10 +16,11 @@ #define _ASM_TILE_PAGE_H #include <linux/const.h> +#include <hv/pagesize.h> /* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */ -#define PAGE_SHIFT 16 -#define HPAGE_SHIFT 24 +#define PAGE_SHIFT HV_LOG2_PAGE_SIZE_SMALL +#define HPAGE_SHIFT HV_LOG2_PAGE_SIZE_LARGE #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) @@ -29,25 +30,18 @@ #ifdef __KERNEL__ -#include <hv/hypervisor.h> -#include <arch/chip.h> - /* - * The {,H}PAGE_SHIFT values must match the HV_LOG2_PAGE_SIZE_xxx - * definitions in <hv/hypervisor.h>. We validate this at build time - * here, and again at runtime during early boot. We provide a - * separate definition since userspace doesn't have <hv/hypervisor.h>. - * - * Be careful to distinguish PAGE_SHIFT from HV_PTE_INDEX_PFN, since - * they are the same on i386 but not TILE. + * If the Kconfig doesn't specify, set a maximum zone order that + * is enough so that we can create huge pages from small pages given + * the respective sizes of the two page types. See <linux/mmzone.h>. */ -#if HV_LOG2_PAGE_SIZE_SMALL != PAGE_SHIFT -# error Small page size mismatch in Linux -#endif -#if HV_LOG2_PAGE_SIZE_LARGE != HPAGE_SHIFT -# error Huge page size mismatch in Linux +#ifndef CONFIG_FORCE_MAX_ZONEORDER +#define CONFIG_FORCE_MAX_ZONEORDER (HPAGE_SHIFT - PAGE_SHIFT + 1) #endif +#include <hv/hypervisor.h> +#include <arch/chip.h> + #ifndef __ASSEMBLY__ #include <linux/types.h> @@ -81,12 +75,6 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr, * Hypervisor page tables are made of the same basic structure. */ -typedef __u64 pteval_t; -typedef __u64 pmdval_t; -typedef __u64 pudval_t; -typedef __u64 pgdval_t; -typedef __u64 pgprotval_t; - typedef HV_PTE pte_t; typedef HV_PTE pgd_t; typedef HV_PTE pgprot_t; diff --git a/arch/tile/include/asm/pgalloc.h b/arch/tile/include/asm/pgalloc.h index cf52791a550..e919c0bdc22 100644 --- a/arch/tile/include/asm/pgalloc.h +++ b/arch/tile/include/asm/pgalloc.h @@ -41,9 +41,9 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) { #ifdef CONFIG_64BIT - set_pte_order(pmdp, pmd, L2_USER_PGTABLE_ORDER); + set_pte(pmdp, pmd); #else - set_pte_order(&pmdp->pud.pgd, pmd.pud.pgd, L2_USER_PGTABLE_ORDER); + set_pte(&pmdp->pud.pgd, pmd.pud.pgd); #endif } @@ -100,6 +100,9 @@ pte_t *get_prealloc_pte(unsigned long pfn); /* During init, we can shatter kernel huge pages if needed. */ void shatter_pmd(pmd_t *pmd); +/* After init, a more complex technique is required. */ +void shatter_huge_page(unsigned long addr); + #ifdef __tilegx__ /* We share a single page allocator for both L1 and L2 page tables. */ #if HV_L1_SIZE != HV_L2_SIZE diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h index a6604e9485d..1a20b7ef8ea 100644 --- a/arch/tile/include/asm/pgtable.h +++ b/arch/tile/include/asm/pgtable.h @@ -233,15 +233,23 @@ static inline void __pte_clear(pte_t *ptep) #define pgd_ERROR(e) \ pr_err("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e)) +/* Return PA and protection info for a given kernel VA. */ +int va_to_cpa_and_pte(void *va, phys_addr_t *cpa, pte_t *pte); + +/* + * __set_pte() ensures we write the 64-bit PTE with 32-bit words in + * the right order on 32-bit platforms and also allows us to write + * hooks to check valid PTEs, etc., if we want. + */ +void __set_pte(pte_t *ptep, pte_t pte); + /* - * set_pte_order() sets the given PTE and also sanity-checks the + * set_pte() sets the given PTE and also sanity-checks the * requested PTE against the page homecaching. Unspecified parts * of the PTE are filled in when it is written to memory, i.e. all * caching attributes if "!forcecache", or the home cpu if "anyhome". */ -extern void set_pte_order(pte_t *ptep, pte_t pte, int order); - -#define set_pte(ptep, pteval) set_pte_order(ptep, pteval, 0) +extern void set_pte(pte_t *ptep, pte_t pte); #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) #define set_pte_atomic(pteptr, pteval) set_pte(pteptr, pteval) @@ -293,21 +301,6 @@ extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next); #define __swp_entry_to_pte(swp) ((pte_t) { (((long long) ((swp).val)) << 32) }) /* - * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); - * - * dst - pointer to pgd range anwhere on a pgd page - * src - "" - * count - the number of pgds to copy. - * - * dst and src can be on the same page, but the range must not overlap, - * and must not cross a page boundary. - */ -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) -{ - memcpy(dst, src, count * sizeof(pgd_t)); -} - -/* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h index 53ec3488474..9f98529761f 100644 --- a/arch/tile/include/asm/pgtable_32.h +++ b/arch/tile/include/asm/pgtable_32.h @@ -24,6 +24,7 @@ #define PGDIR_SIZE HV_PAGE_SIZE_LARGE #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) +#define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t)) /* * The level-2 index is defined by the difference between the huge @@ -33,6 +34,7 @@ * this nomenclature is somewhat confusing. */ #define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL)) +#define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t)) #ifndef __ASSEMBLY__ @@ -94,7 +96,6 @@ static inline int pgd_addr_invalid(unsigned long addr) */ #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define __HAVE_ARCH_PTEP_SET_WRPROTECT -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR extern int ptep_test_and_clear_young(struct vm_area_struct *, unsigned long addr, pte_t *); @@ -110,6 +111,11 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, return pte; } +static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval) +{ + set_pte(&pmdp->pud.pgd, pmdval.pud.pgd); +} + /* Create a pmd from a PTFN. */ static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot) { diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h index a9e7c876033..e6889474038 100644 --- a/arch/tile/include/asm/processor.h +++ b/arch/tile/include/asm/processor.h @@ -269,7 +269,6 @@ extern char chip_model[64]; /* Data on which physical memory controller corresponds to which NUMA node. */ extern int node_controller[]; - /* Do we dump information to the console when a user application crashes? */ extern int show_crashinfo; diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h index ac6d343129d..6be2246e015 100644 --- a/arch/tile/include/asm/ptrace.h +++ b/arch/tile/include/asm/ptrace.h @@ -141,6 +141,9 @@ struct single_step_state { /* Single-step the instruction at regs->pc */ extern void single_step_once(struct pt_regs *regs); +/* Clean up after execve(). */ +extern void single_step_execve(void); + struct task_struct; extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, diff --git a/arch/tile/include/asm/spinlock_32.h b/arch/tile/include/asm/spinlock_32.h index 88efdde8dd2..a8f2c6e31a8 100644 --- a/arch/tile/include/asm/spinlock_32.h +++ b/arch/tile/include/asm/spinlock_32.h @@ -78,13 +78,6 @@ void arch_spin_unlock_wait(arch_spinlock_t *lock); #define _RD_COUNT_SHIFT 24 #define _RD_COUNT_WIDTH 8 -/* Internal functions; do not use. */ -void arch_read_lock_slow(arch_rwlock_t *, u32); -int arch_read_trylock_slow(arch_rwlock_t *); -void arch_read_unlock_slow(arch_rwlock_t *); -void arch_write_lock_slow(arch_rwlock_t *, u32); -void arch_write_unlock_slow(arch_rwlock_t *, u32); - /** * arch_read_can_lock() - would read_trylock() succeed? */ @@ -104,94 +97,32 @@ static inline int arch_write_can_lock(arch_rwlock_t *rwlock) /** * arch_read_lock() - acquire a read lock. */ -static inline void arch_read_lock(arch_rwlock_t *rwlock) -{ - u32 val = __insn_tns((int *)&rwlock->lock); - if (unlikely(val << _RD_COUNT_WIDTH)) { - arch_read_lock_slow(rwlock, val); - return; - } - rwlock->lock = val + (1 << _RD_COUNT_SHIFT); -} +void arch_read_lock(arch_rwlock_t *rwlock); /** - * arch_read_lock() - acquire a write lock. + * arch_write_lock() - acquire a write lock. */ -static inline void arch_write_lock(arch_rwlock_t *rwlock) -{ - u32 val = __insn_tns((int *)&rwlock->lock); - if (unlikely(val != 0)) { - arch_write_lock_slow(rwlock, val); - return; - } - rwlock->lock = 1 << _WR_NEXT_SHIFT; -} +void arch_write_lock(arch_rwlock_t *rwlock); /** * arch_read_trylock() - try to acquire a read lock. */ -static inline int arch_read_trylock(arch_rwlock_t *rwlock) -{ - int locked; - u32 val = __insn_tns((int *)&rwlock->lock); - if (unlikely(val & 1)) - return arch_read_trylock_slow(rwlock); - locked = (val << _RD_COUNT_WIDTH) == 0; - rwlock->lock = val + (locked << _RD_COUNT_SHIFT); - return locked; -} +int arch_read_trylock(arch_rwlock_t *rwlock); /** * arch_write_trylock() - try to acquire a write lock. */ -static inline int arch_write_trylock(arch_rwlock_t *rwlock) -{ - u32 val = __insn_tns((int *)&rwlock->lock); - - /* - * If a tns is in progress, or there's a waiting or active locker, - * or active readers, we can't take the lock, so give up. - */ - if (unlikely(val != 0)) { - if (!(val & 1)) - rwlock->lock = val; - return 0; - } - - /* Set the "next" field to mark it locked. */ - rwlock->lock = 1 << _WR_NEXT_SHIFT; - return 1; -} +int arch_write_trylock(arch_rwlock_t *rwlock); /** * arch_read_unlock() - release a read lock. */ -static inline void arch_read_unlock(arch_rwlock_t *rwlock) -{ - u32 val; - mb(); /* guarantee anything modified under the lock is visible */ - val = __insn_tns((int *)&rwlock->lock); - if (unlikely(val & 1)) { - arch_read_unlock_slow(rwlock); - return; - } - rwlock->lock = val - (1 << _RD_COUNT_SHIFT); -} +void arch_read_unlock(arch_rwlock_t *rwlock); /** * arch_write_unlock() - release a write lock. */ -static inline void arch_write_unlock(arch_rwlock_t *rwlock) -{ - u32 val; - mb(); /* guarantee anything modified under the lock is visible */ - val = __insn_tns((int *)&rwlock->lock); - if (unlikely(val != (1 << _WR_NEXT_SHIFT))) { - arch_write_unlock_slow(rwlock, val); - return; - } - rwlock->lock = 0; -} +void arch_write_unlock(arch_rwlock_t *rwlock); #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) diff --git a/arch/tile/include/asm/stack.h b/arch/tile/include/asm/stack.h index f908473c322..4d97a2db932 100644 --- a/arch/tile/include/asm/stack.h +++ b/arch/tile/include/asm/stack.h @@ -18,13 +18,14 @@ #include <linux/types.h> #include <linux/sched.h> #include <asm/backtrace.h> +#include <asm/page.h> #include <hv/hypervisor.h> /* Everything we need to keep track of a backtrace iteration */ struct KBacktraceIterator { BacktraceIterator it; struct task_struct *task; /* task we are backtracing */ - HV_PTE *pgtable; /* page table for user space access */ + pte_t *pgtable; /* page table for user space access */ int end; /* iteration complete. */ int new_context; /* new context is starting */ int profile; /* profiling, so stop on async intrpt */ diff --git a/arch/tile/include/asm/system.h b/arch/tile/include/asm/system.h index 5388850deeb..23d1842f483 100644 --- a/arch/tile/include/asm/system.h +++ b/arch/tile/include/asm/system.h @@ -90,7 +90,24 @@ #endif #if !CHIP_HAS_MF_WAITS_FOR_VICTIMS() -int __mb_incoherent(void); /* Helper routine for mb_incoherent(). */ +#include <hv/syscall_public.h> +/* + * Issue an uncacheable load to each memory controller, then + * wait until those loads have completed. + */ +static inline void __mb_incoherent(void) +{ + long clobber_r10; + asm volatile("swint2" + : "=R10" (clobber_r10) + : "R10" (HV_SYS_fence_incoherent) + : "r0", "r1", "r2", "r3", "r4", + "r5", "r6", "r7", "r8", "r9", + "r11", "r12", "r13", "r14", + "r15", "r16", "r17", "r18", "r19", + "r20", "r21", "r22", "r23", "r24", + "r25", "r26", "r27", "r28", "r29"); +} #endif /* Fence to guarantee visibility of stores to incoherent memory. */ diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index 3872f2b345d..3405b52853b 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h @@ -68,6 +68,7 @@ struct thread_info { #else #define THREAD_SIZE_ORDER (0) #endif +#define THREAD_SIZE_PAGES (1 << THREAD_SIZE_ORDER) #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) #define LOG2_THREAD_SIZE (PAGE_SHIFT + THREAD_SIZE_ORDER) @@ -83,7 +84,7 @@ register unsigned long stack_pointer __asm__("sp"); ((struct thread_info *)(stack_pointer & -THREAD_SIZE)) #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR -extern struct thread_info *alloc_thread_info(struct task_struct *task); +extern struct thread_info *alloc_thread_info_node(struct task_struct *task, int node); extern void free_thread_info(struct thread_info *info); /* Sit on a nap instruction until interrupted. */ diff --git a/arch/tile/include/asm/timex.h b/arch/tile/include/asm/timex.h index 3baf5fc4c0a..29921f0b86d 100644 --- a/arch/tile/include/asm/timex.h +++ b/arch/tile/include/asm/timex.h @@ -38,6 +38,9 @@ static inline cycles_t get_cycles(void) cycles_t get_clock_rate(void); +/* Convert nanoseconds to core clock cycles. */ +cycles_t ns2cycles(unsigned long nsecs); + /* Called at cpu initialization to set some low-level constants. */ void setup_clock(void); diff --git a/arch/tile/include/hv/drv_mshim_intf.h b/arch/tile/include/hv/drv_mshim_intf.h new file mode 100644 index 00000000000..c6ef3bdc55c --- /dev/null +++ b/arch/tile/include/hv/drv_mshim_intf.h @@ -0,0 +1,50 @@ +/* + * Copyright 2011 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + */ + +/** + * @file drv_mshim_intf.h + * Interface definitions for the Linux EDAC memory controller driver. + */ + +#ifndef _SYS_HV_INCLUDE_DRV_MSHIM_INTF_H +#define _SYS_HV_INCLUDE_DRV_MSHIM_INTF_H + +/** Number of memory controllers in the public API. */ +#define TILE_MAX_MSHIMS 4 + +/** Memory info under each memory controller. */ +struct mshim_mem_info +{ + uint64_t mem_size; /**< Total memory size in bytes. */ + uint8_t mem_type; /**< Memory type, DDR2 or DDR3. */ + uint8_t mem_ecc; /**< Memory supports ECC. */ +}; + +/** + * DIMM error structure. + * For now, only correctable errors are counted and the mshim doesn't record + * the error PA. HV takes panic upon uncorrectable errors. + */ +struct mshim_mem_error +{ + uint32_t sbe_count; /**< Number of single-bit errors. */ +}; + +/** Read this offset to get the memory info per mshim. */ +#define MSHIM_MEM_INFO_OFF 0x100 + +/** Read this offset to check DIMM error. */ +#define MSHIM_MEM_ERROR_OFF 0x200 + +#endif /* _SYS_HV_INCLUDE_DRV_MSHIM_INTF_H */ diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h index f672544cd4f..1b8bf03d62a 100644 --- a/arch/tile/include/hv/hypervisor.h +++ b/arch/tile/include/hv/hypervisor.h @@ -338,9 +338,10 @@ typedef int HV_Errno; #define HV_ENOTREADY -812 /**< Device not ready */ #define HV_EIO -813 /**< I/O error */ #define HV_ENOMEM -814 /**< Out of memory */ +#define HV_EAGAIN -815 /**< Try again */ #define HV_ERR_MAX -801 /**< Largest HV error code */ -#define HV_ERR_MIN -814 /**< Smallest HV error code */ +#define HV_ERR_MIN -815 /**< Smallest HV error code */ #ifndef __ASSEMBLER__ @@ -867,6 +868,43 @@ typedef struct */ HV_PhysAddrRange hv_inquire_physical(int idx); +/** Possible DIMM types. */ +typedef enum +{ + NO_DIMM = 0, /**< No DIMM */ + DDR2 = 1, /**< DDR2 */ + DDR3 = 2 /**< DDR3 */ +} HV_DIMM_Type; + +#ifdef __tilegx__ + +/** Log2 of minimum DIMM bytes supported by the memory controller. */ +#define HV_MSH_MIN_DIMM_SIZE_SHIFT 29 + +/** Max number of DIMMs contained by one memory controller. */ +#define HV_MSH_MAX_DIMMS 8 + +#else + +/** Log2 of minimum DIMM bytes supported by the memory controller. */ +#define HV_MSH_MIN_DIMM_SIZE_SHIFT 26 + +/** Max number of DIMMs contained by one memory controller. */ +#define HV_MSH_MAX_DIMMS 2 + +#endif + +/** Number of bits to right-shift to get the DIMM type. */ +#define HV_DIMM_TYPE_SHIFT 0 + +/** Bits to mask to get the DIMM type. */ +#define HV_DIMM_TYPE_MASK 0xf + +/** Number of bits to right-shift to get the DIMM size. */ +#define HV_DIMM_SIZE_SHIFT 4 + +/** Bits to mask to get the DIMM size. */ +#define HV_DIMM_SIZE_MASK 0xf /** Memory controller information. */ typedef struct @@ -964,6 +1002,11 @@ HV_ASIDRange hv_inquire_asid(int idx); /** Waits for at least the specified number of nanoseconds then returns. * + * NOTE: this deprecated function currently assumes a 750 MHz clock, + * and is thus not generally suitable for use. New code should call + * hv_sysconf(HV_SYSCONF_CPU_SPEED), compute a cycle count to wait for, + * and delay by looping while checking the cycle counter SPR. + * * @param nanosecs The number of nanoseconds to sleep. */ void hv_nanosleep(int nanosecs); @@ -1038,6 +1081,7 @@ int hv_console_write(HV_VirtAddr bytes, int len); * downcall: * * INT_MESSAGE_RCV_DWNCL (hypervisor message available) + * INT_DEV_INTR_DWNCL (device interrupt) * INT_DMATLB_MISS_DWNCL (DMA TLB miss) * INT_SNITLB_MISS_DWNCL (SNI TLB miss) * INT_DMATLB_ACCESS_DWNCL (DMA TLB access violation) |