From 09378d7c21a806d5ddb75ebcae242d98cea30e44 Mon Sep 17 00:00:00 2001 From: Wanlong Gao Date: Wed, 1 Jun 2011 22:37:43 +0800 Subject: xtensa:fix the incompatible pointer type warning in time.c Fix the definition of the function ccount_read to be compatible to the member read of the structure clocksource. Signed-off-by: Wanlong Gao Signed-off-by: Chris Zankel --- arch/xtensa/kernel/time.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index ac62f9cf1e1..19b32a0eaeb 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c @@ -31,7 +31,7 @@ unsigned long ccount_per_jiffy; /* per 1/HZ */ unsigned long nsec_per_ccount; /* nsec per ccount increment */ #endif -static cycle_t ccount_read(void) +static cycle_t ccount_read(struct clocksource *cs) { return (cycle_t)get_ccount(); } -- cgit v1.2.3-18-g5258 From 02b25d811f949fc70badd6535dfaf13a3c5decf9 Mon Sep 17 00:00:00 2001 From: Nicolas Kaiser Date: Sat, 7 May 2011 20:55:42 +0200 Subject: xtensa: unbalanced parentheses Signed-off-by: Nicolas Kaiser Signed-off-by: Chris Zankel --- arch/xtensa/variants/s6000/gpio.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/xtensa/variants/s6000/gpio.c b/arch/xtensa/variants/s6000/gpio.c index b89541ba39a..da9e85c13b0 100644 --- a/arch/xtensa/variants/s6000/gpio.c +++ b/arch/xtensa/variants/s6000/gpio.c @@ -164,7 +164,7 @@ static void demux_irqs(unsigned int irq, struct irq_desc *desc) int cirq; chip->irq_mask(&desc->irq_data); - chip->irq_ack(&desc->irq_data)); + chip->irq_ack(&desc->irq_data); pending = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_MIS) & *mask; cirq = IRQ_BASE - 1; while (pending) { @@ -173,7 +173,7 @@ static void demux_irqs(unsigned int irq, struct irq_desc *desc) pending >>= n; generic_handle_irq(cirq); } - chip->irq_unmask(&desc->irq_data)); + chip->irq_unmask(&desc->irq_data); } extern const signed char *platform_irq_mappings[XTENSA_NR_IRQS]; -- cgit v1.2.3-18-g5258 From 6550162200b40d1e725167f8a617255c42b57552 Mon Sep 17 00:00:00 2001 From: Chris Zankel Date: Tue, 13 Nov 2012 15:16:36 -0800 Subject: xtensa: add config option to disable linker relaxation The default linker behavior is to optimize identical literal values and remove unnecessary overhead from assembler-generated "longcall" sequences to reduce code size. Provide an option to disable this behavior to improve compile time. Signed-off-by: Chris Zankel --- arch/xtensa/Kconfig.debug | 13 +++++++++++-- arch/xtensa/Makefile | 4 ++++ 2 files changed, 15 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/xtensa/Kconfig.debug b/arch/xtensa/Kconfig.debug index 11c585295dd..be5fb4c6243 100644 --- a/arch/xtensa/Kconfig.debug +++ b/arch/xtensa/Kconfig.debug @@ -2,6 +2,15 @@ menu "Kernel hacking" source "lib/Kconfig.debug" -endmenu - +config LD_NO_RELAX + bool "Disable linker relaxation" + default n + help + Enable this function to disable link-time optimizations. + The default linker behavior is to combine identical literal + values to reduce code size and remove unnecessary overhead from + assembler-generated 'longcall' sequences. + Enabling this option improves the link time but increases the + code size, and possibly execution time. +endmenu diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile index bb5ba61723f..ab63c9beb93 100644 --- a/arch/xtensa/Makefile +++ b/arch/xtensa/Makefile @@ -49,6 +49,10 @@ KBUILD_CFLAGS += -pipe -mlongcalls KBUILD_CFLAGS += $(call cc-option,-mforce-no-pic,) +ifneq ($(CONFIG_LD_NO_RELAX),) +LDFLAGS := --no-relax +endif + vardirs := $(patsubst %,arch/xtensa/variants/%/,$(variant-y)) plfdirs := $(patsubst %,arch/xtensa/platforms/%/,$(platform-y)) -- cgit v1.2.3-18-g5258 From c0226e34a4293dee0e7c5787e1ebfc5ee8b44b7c Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Thu, 9 Aug 2012 05:25:35 +0400 Subject: xtensa: make DoubleExceptionVector literals fit the gap Manually load references to exc_table from the explicit literal in order to fit DoubleExceptionVector.literals into the available 16-byte gap before DoubleExceptionVector.text in the absence of link time relaxation. Without this fix DoubleExceptionVector.literal section overlaps DoubleExceptionVector.text section in the linked vmlinux image. Signed-off-by: Max Filippov Signed-off-by: Chris Zankel --- arch/xtensa/kernel/vectors.S | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S index 4462c1e595c..3a57c15f794 100644 --- a/arch/xtensa/kernel/vectors.S +++ b/arch/xtensa/kernel/vectors.S @@ -225,7 +225,13 @@ ENTRY(_DoubleExceptionVector) /* Window overflow/underflow exception. Get stack pointer. */ mov a3, a2 - movi a2, exc_table + /* This explicit literal and the following references to it are made + * in order to fit DoubleExceptionVector.literals into the available + * 16-byte gap before DoubleExceptionVector.text in the absence of + * link time relaxation. See kernel/vmlinux.lds.S + */ + .literal .Lexc_table, exc_table + l32r a2, .Lexc_table l32i a2, a2, EXC_TABLE_KSTK /* Check for overflow/underflow exception, jump if overflow. */ @@ -255,7 +261,7 @@ ENTRY(_DoubleExceptionVector) s32i a0, a2, PT_AREG0 wsr a3, excsave1 # save a3 - movi a3, exc_table + l32r a3, .Lexc_table rsr a0, exccause s32i a0, a2, PT_DEPC # mark it as a regular exception @@ -267,7 +273,7 @@ ENTRY(_DoubleExceptionVector) /* a0: depc, a1: a1, a2: a2, a3: trashed, depc: a0, excsave1: a3 */ - movi a3, exc_table + l32r a3, .Lexc_table s32i a2, a3, EXC_TABLE_DOUBLE_SAVE # temporary variable /* Enter critical section. */ @@ -296,7 +302,7 @@ ENTRY(_DoubleExceptionVector) /* a0: avail, a1: a1, a2: kstk, a3: avail, depc: a2, excsave: a3 */ - movi a3, exc_table + l32r a3, .Lexc_table rsr a0, exccause addx4 a0, a0, a3 l32i a0, a0, EXC_TABLE_FAST_USER -- cgit v1.2.3-18-g5258 From d1538c4675f37d0eeb34bd38bec798b3b29a5a7e Mon Sep 17 00:00:00 2001 From: Chris Zankel Date: Fri, 16 Nov 2012 16:16:20 -0800 Subject: xtensa: provide proper assembler function boundaries with ENDPROC() Use ENDPROC() to mark the end of assembler functions. Signed-off-by: Chris Zankel --- arch/xtensa/kernel/align.S | 1 + arch/xtensa/kernel/coprocessor.S | 22 +++++++++++++++++ arch/xtensa/kernel/entry.S | 30 +++++++++++++++++++++-- arch/xtensa/kernel/head.S | 14 +++++++---- arch/xtensa/kernel/vectors.S | 51 ++++++++++++++++++++++++++-------------- arch/xtensa/lib/checksum.S | 5 +++- arch/xtensa/mm/misc.S | 49 ++++++++++++++++++++++++++++++++++++++ 7 files changed, 147 insertions(+), 25 deletions(-) (limited to 'arch') diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S index 934ae58e2c7..39d2f597382 100644 --- a/arch/xtensa/kernel/align.S +++ b/arch/xtensa/kernel/align.S @@ -450,6 +450,7 @@ ENTRY(fast_unaligned) 1: movi a0, _user_exception jx a0 +ENDPROC(fast_unaligned) #endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */ diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S index 54c3be313bf..60bb1358924 100644 --- a/arch/xtensa/kernel/coprocessor.S +++ b/arch/xtensa/kernel/coprocessor.S @@ -43,10 +43,13 @@ /* IO protection is currently unsupported. */ ENTRY(fast_io_protect) + wsr a0, excsave1 movi a0, unrecoverable_exception callx0 a0 +ENDPROC(fast_io_protect) + #if XTENSA_HAVE_COPROCESSORS /* @@ -139,6 +142,7 @@ ENTRY(fast_io_protect) */ ENTRY(coprocessor_save) + entry a1, 32 s32i a0, a1, 0 movi a0, .Lsave_cp_regs_jump_table @@ -150,7 +154,10 @@ ENTRY(coprocessor_save) 1: l32i a0, a1, 0 retw +ENDPROC(coprocessor_save) + ENTRY(coprocessor_load) + entry a1, 32 s32i a0, a1, 0 movi a0, .Lload_cp_regs_jump_table @@ -162,6 +169,8 @@ ENTRY(coprocessor_load) 1: l32i a0, a1, 0 retw +ENDPROC(coprocessor_load) + /* * coprocessor_flush(struct task_info*, index) * a2 a3 @@ -178,6 +187,7 @@ ENTRY(coprocessor_load) ENTRY(coprocessor_flush) + entry a1, 32 s32i a0, a1, 0 movi a0, .Lsave_cp_regs_jump_table @@ -191,6 +201,8 @@ ENTRY(coprocessor_flush) 1: l32i a0, a1, 0 retw +ENDPROC(coprocessor_flush) + ENTRY(coprocessor_restore) entry a1, 32 s32i a0, a1, 0 @@ -205,6 +217,8 @@ ENTRY(coprocessor_restore) 1: l32i a0, a1, 0 retw +ENDPROC(coprocessor_restore) + /* * Entry condition: * @@ -220,10 +234,12 @@ ENTRY(coprocessor_restore) */ ENTRY(fast_coprocessor_double) + wsr a0, excsave1 movi a0, unrecoverable_exception callx0 a0 +ENDPROC(fast_coprocessor_double) ENTRY(fast_coprocessor) @@ -327,9 +343,15 @@ ENTRY(fast_coprocessor) rfe +ENDPROC(fast_coprocessor) + .data + ENTRY(coprocessor_owner) + .fill XCHAL_CP_MAX, 4, 0 +END(coprocessor_owner) + #endif /* XTENSA_HAVE_COPROCESSORS */ diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 90bfc1dbc13..41ad9cfe9a2 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -219,6 +219,7 @@ _user_exception: j common_exception +ENDPROC(user_exception) /* * First-level exit handler for kernel exceptions @@ -641,6 +642,8 @@ common_exception_exit: l32i a1, a1, PT_AREG1 rfde +ENDPROC(kernel_exception) + /* * Debug exception handler. * @@ -701,6 +704,7 @@ ENTRY(debug_exception) /* Debug exception while in exception mode. */ 1: j 1b // FIXME!! +ENDPROC(debug_exception) /* * We get here in case of an unrecoverable exception. @@ -751,6 +755,7 @@ ENTRY(unrecoverable_exception) 1: j 1b +ENDPROC(unrecoverable_exception) /* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */ @@ -929,6 +934,7 @@ ENTRY(fast_alloca) l32i a2, a2, PT_AREG2 rfe +ENDPROC(fast_alloca) /* * fast system calls. @@ -966,6 +972,8 @@ ENTRY(fast_syscall_kernel) j kernel_exception +ENDPROC(fast_syscall_kernel) + ENTRY(fast_syscall_user) /* Skip syscall. */ @@ -983,6 +991,8 @@ ENTRY(fast_syscall_user) j user_exception +ENDPROC(fast_syscall_user) + ENTRY(fast_syscall_unrecoverable) /* Restore all states. */ @@ -995,7 +1005,7 @@ ENTRY(fast_syscall_unrecoverable) movi a0, unrecoverable_exception callx0 a0 - +ENDPROC(fast_syscall_unrecoverable) /* * sysxtensa syscall handler @@ -1101,7 +1111,7 @@ CATCH movi a2, -EINVAL rfe - +ENDPROC(fast_syscall_xtensa) /* fast_syscall_spill_registers. @@ -1160,6 +1170,8 @@ ENTRY(fast_syscall_spill_registers) movi a2, 0 rfe +ENDPROC(fast_syscall_spill_registers) + /* Fixup handler. * * We get here if the spill routine causes an exception, e.g. tlb miss. @@ -1464,6 +1476,8 @@ ENTRY(_spill_registers) callx0 a0 # should not return 1: j 1b +ENDPROC(_spill_registers) + #ifdef CONFIG_MMU /* * We should never get here. Bail out! @@ -1475,6 +1489,8 @@ ENTRY(fast_second_level_miss_double_kernel) callx0 a0 # should not return 1: j 1b +ENDPROC(fast_second_level_miss_double_kernel) + /* First-level entry handler for user, kernel, and double 2nd-level * TLB miss exceptions. Note that for now, user and kernel miss * exceptions share the same entry point and are handled identically. @@ -1682,6 +1698,7 @@ ENTRY(fast_second_level_miss) j _kernel_exception 1: j _user_exception +ENDPROC(fast_second_level_miss) /* * StoreProhibitedException @@ -1777,6 +1794,9 @@ ENTRY(fast_store_prohibited) bbsi.l a2, PS_UM_BIT, 1f j _kernel_exception 1: j _user_exception + +ENDPROC(fast_store_prohibited) + #endif /* CONFIG_MMU */ /* @@ -1787,6 +1807,7 @@ ENTRY(fast_store_prohibited) */ ENTRY(system_call) + entry a1, 32 /* regs->syscall = regs->areg[2] */ @@ -1831,6 +1852,8 @@ ENTRY(system_call) callx4 a4 retw +ENDPROC(system_call) + /* * Task switch. @@ -1899,6 +1922,7 @@ ENTRY(_switch_to) retw +ENDPROC(_switch_to) ENTRY(ret_from_fork) @@ -1914,6 +1938,8 @@ ENTRY(ret_from_fork) j common_exception_return +ENDPROC(ret_from_fork) + /* * Kernel thread creation helper * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index bdc50788f35..417998c0210 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S @@ -47,16 +47,19 @@ */ __HEAD - .globl _start -_start: _j 2f +ENTRY(_start) + + _j 2f .align 4 1: .word _startup 2: l32r a0, 1b jx a0 +ENDPROC(_start) + .section .init.text, "ax" - .align 4 -_startup: + +ENTRY(_startup) /* Disable interrupts and exceptions. */ @@ -230,6 +233,7 @@ _startup: should_never_return: j should_never_return +ENDPROC(_startup) /* * BSS section @@ -239,6 +243,8 @@ __PAGE_ALIGNED_BSS #ifdef CONFIG_MMU ENTRY(swapper_pg_dir) .fill PAGE_SIZE, 1, 0 +END(swapper_pg_dir) #endif ENTRY(empty_zero_page) .fill PAGE_SIZE, 1, 0 +END(empty_zero_page) diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S index 3a57c15f794..9365ee5064d 100644 --- a/arch/xtensa/kernel/vectors.S +++ b/arch/xtensa/kernel/vectors.S @@ -79,6 +79,8 @@ ENTRY(_UserExceptionVector) l32i a0, a0, EXC_TABLE_FAST_USER # load handler jx a0 +ENDPROC(_UserExceptionVector) + /* * Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0) * @@ -103,6 +105,7 @@ ENTRY(_KernelExceptionVector) l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address jx a0 +ENDPROC(_KernelExceptionVector) /* * Double exception vector (Exceptions with PS.EXCM == 1) @@ -344,6 +347,7 @@ ENTRY(_DoubleExceptionVector) .end literal_prefix +ENDPROC(_DoubleExceptionVector) /* * Debug interrupt vector @@ -355,9 +359,11 @@ ENTRY(_DoubleExceptionVector) .section .DebugInterruptVector.text, "ax" ENTRY(_DebugInterruptVector) + xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL jx a0 +ENDPROC(_DebugInterruptVector) /* Window overflow and underflow handlers. @@ -369,38 +375,43 @@ ENTRY(_DebugInterruptVector) * we try to access any page that would cause a page fault early. */ +#define ENTRY_ALIGN64(name) \ + .globl name; \ + .align 64; \ + name: + .section .WindowVectors.text, "ax" /* 4-Register Window Overflow Vector (Handler) */ - .align 64 -.global _WindowOverflow4 -_WindowOverflow4: +ENTRY_ALIGN64(_WindowOverflow4) + s32e a0, a5, -16 s32e a1, a5, -12 s32e a2, a5, -8 s32e a3, a5, -4 rfwo +ENDPROC(_WindowOverflow4) + /* 4-Register Window Underflow Vector (Handler) */ - .align 64 -.global _WindowUnderflow4 -_WindowUnderflow4: +ENTRY_ALIGN64(_WindowUnderflow4) + l32e a0, a5, -16 l32e a1, a5, -12 l32e a2, a5, -8 l32e a3, a5, -4 rfwu +ENDPROC(_WindowUnderflow4) /* 8-Register Window Overflow Vector (Handler) */ - .align 64 -.global _WindowOverflow8 -_WindowOverflow8: +ENTRY_ALIGN64(_WindowOverflow8) + s32e a0, a9, -16 l32e a0, a1, -12 s32e a2, a9, -8 @@ -412,11 +423,12 @@ _WindowOverflow8: s32e a7, a0, -20 rfwo +ENDPROC(_WindowOverflow8) + /* 8-Register Window Underflow Vector (Handler) */ - .align 64 -.global _WindowUnderflow8 -_WindowUnderflow8: +ENTRY_ALIGN64(_WindowUnderflow8) + l32e a1, a9, -12 l32e a0, a9, -16 l32e a7, a1, -12 @@ -428,12 +440,12 @@ _WindowUnderflow8: l32e a7, a7, -20 rfwu +ENDPROC(_WindowUnderflow8) /* 12-Register Window Overflow Vector (Handler) */ - .align 64 -.global _WindowOverflow12 -_WindowOverflow12: +ENTRY_ALIGN64(_WindowOverflow12) + s32e a0, a13, -16 l32e a0, a1, -12 s32e a1, a13, -12 @@ -449,11 +461,12 @@ _WindowOverflow12: s32e a11, a0, -20 rfwo +ENDPROC(_WindowOverflow12) + /* 12-Register Window Underflow Vector (Handler) */ - .align 64 -.global _WindowUnderflow12 -_WindowUnderflow12: +ENTRY_ALIGN64(_WindowUnderflow12) + l32e a1, a13, -12 l32e a0, a13, -16 l32e a11, a1, -12 @@ -469,6 +482,8 @@ _WindowUnderflow12: l32e a11, a11, -20 rfwu +ENDPROC(_WindowUnderflow12) + .text diff --git a/arch/xtensa/lib/checksum.S b/arch/xtensa/lib/checksum.S index df397f932d0..0470ca21a35 100644 --- a/arch/xtensa/lib/checksum.S +++ b/arch/xtensa/lib/checksum.S @@ -170,7 +170,7 @@ ENTRY(csum_partial) 3: j 5b /* branch to handle the remaining byte */ - +ENDPROC(csum_partial) /* * Copy from ds while checksumming, otherwise like csum_partial @@ -211,6 +211,7 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, int len, */ ENTRY(csum_partial_copy_generic) + entry sp, 32 mov a12, a3 mov a11, a4 @@ -367,6 +368,8 @@ DST( s8i a8, a3, 1 ) 6: j 4b /* process the possible trailing odd byte */ +ENDPROC(csum_partial_copy_generic) + # Exception handler: .section .fixup, "ax" diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S index b048406d875..7f7078f57c4 100644 --- a/arch/xtensa/mm/misc.S +++ b/arch/xtensa/mm/misc.S @@ -29,6 +29,7 @@ */ ENTRY(clear_page) + entry a1, 16 movi a3, 0 @@ -45,6 +46,8 @@ ENTRY(clear_page) retw +ENDPROC(clear_page) + /* * copy_page and copy_user_page are the same for non-cache-aliased configs. * @@ -53,6 +56,7 @@ ENTRY(clear_page) */ ENTRY(copy_page) + entry a1, 16 __loopi a2, a4, PAGE_SIZE, 32 @@ -84,6 +88,8 @@ ENTRY(copy_page) retw +ENDPROC(copy_page) + #ifdef CONFIG_MMU /* * If we have to deal with cache aliasing, we use temporary memory mappings @@ -109,6 +115,7 @@ ENTRY(__tlbtemp_mapping_start) */ ENTRY(clear_user_page) + entry a1, 32 /* Mark page dirty and determine alias. */ @@ -164,6 +171,8 @@ ENTRY(clear_user_page) retw +ENDPROC(clear_user_page) + /* * copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page) * a2 a3 a4 a5 @@ -262,6 +271,8 @@ ENTRY(copy_user_page) retw +ENDPROC(copy_user_page) + #endif #if (DCACHE_WAY_SIZE > PAGE_SIZE) @@ -272,6 +283,7 @@ ENTRY(copy_user_page) */ ENTRY(__flush_invalidate_dcache_page_alias) + entry sp, 16 movi a7, 0 # required for exception handler @@ -287,6 +299,7 @@ ENTRY(__flush_invalidate_dcache_page_alias) retw +ENDPROC(__flush_invalidate_dcache_page_alias) #endif ENTRY(__tlbtemp_mapping_itlb) @@ -294,6 +307,7 @@ ENTRY(__tlbtemp_mapping_itlb) #if (ICACHE_WAY_SIZE > PAGE_SIZE) ENTRY(__invalidate_icache_page_alias) + entry sp, 16 addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE) @@ -307,11 +321,14 @@ ENTRY(__invalidate_icache_page_alias) isync retw +ENDPROC(__invalidate_icache_page_alias) + #endif /* End of special treatment in tlb miss exception */ ENTRY(__tlbtemp_mapping_end) + #endif /* CONFIG_MMU /* @@ -319,6 +336,7 @@ ENTRY(__tlbtemp_mapping_end) */ ENTRY(__invalidate_icache_page) + entry sp, 16 ___invalidate_icache_page a2 a3 @@ -326,11 +344,14 @@ ENTRY(__invalidate_icache_page) retw +ENDPROC(__invalidate_icache_page) + /* * void __invalidate_dcache_page(ulong start) */ ENTRY(__invalidate_dcache_page) + entry sp, 16 ___invalidate_dcache_page a2 a3 @@ -338,11 +359,14 @@ ENTRY(__invalidate_dcache_page) retw +ENDPROC(__invalidate_dcache_page) + /* * void __flush_invalidate_dcache_page(ulong start) */ ENTRY(__flush_invalidate_dcache_page) + entry sp, 16 ___flush_invalidate_dcache_page a2 a3 @@ -350,11 +374,14 @@ ENTRY(__flush_invalidate_dcache_page) dsync retw +ENDPROC(__flush_invalidate_dcache_page) + /* * void __flush_dcache_page(ulong start) */ ENTRY(__flush_dcache_page) + entry sp, 16 ___flush_dcache_page a2 a3 @@ -362,11 +389,14 @@ ENTRY(__flush_dcache_page) dsync retw +ENDPROC(__flush_dcache_page) + /* * void __invalidate_icache_range(ulong start, ulong size) */ ENTRY(__invalidate_icache_range) + entry sp, 16 ___invalidate_icache_range a2 a3 a4 @@ -374,11 +404,14 @@ ENTRY(__invalidate_icache_range) retw +ENDPROC(__invalidate_icache_range) + /* * void __flush_invalidate_dcache_range(ulong start, ulong size) */ ENTRY(__flush_invalidate_dcache_range) + entry sp, 16 ___flush_invalidate_dcache_range a2 a3 a4 @@ -386,11 +419,14 @@ ENTRY(__flush_invalidate_dcache_range) retw +ENDPROC(__flush_invalidate_dcache_range) + /* * void _flush_dcache_range(ulong start, ulong size) */ ENTRY(__flush_dcache_range) + entry sp, 16 ___flush_dcache_range a2 a3 a4 @@ -398,22 +434,28 @@ ENTRY(__flush_dcache_range) retw +ENDPROC(__flush_dcache_range) + /* * void _invalidate_dcache_range(ulong start, ulong size) */ ENTRY(__invalidate_dcache_range) + entry sp, 16 ___invalidate_dcache_range a2 a3 a4 retw +ENDPROC(__invalidate_dcache_range) + /* * void _invalidate_icache_all(void) */ ENTRY(__invalidate_icache_all) + entry sp, 16 ___invalidate_icache_all a2 a3 @@ -421,11 +463,14 @@ ENTRY(__invalidate_icache_all) retw +ENDPROC(__invalidate_icache_all) + /* * void _flush_invalidate_dcache_all(void) */ ENTRY(__flush_invalidate_dcache_all) + entry sp, 16 ___flush_invalidate_dcache_all a2 a3 @@ -433,11 +478,14 @@ ENTRY(__flush_invalidate_dcache_all) retw +ENDPROC(__flush_invalidate_dcache_all) + /* * void _invalidate_dcache_all(void) */ ENTRY(__invalidate_dcache_all) + entry sp, 16 ___invalidate_dcache_all a2 a3 @@ -445,3 +493,4 @@ ENTRY(__invalidate_dcache_all) retw +ENDPROC(__invalidate_dcache_all) -- cgit v1.2.3-18-g5258 From 94d6c61b97cbcb3b538276c3785896ea12ecaecb Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Wed, 31 Oct 2012 12:37:04 +0400 Subject: xtensa: ISS: add BASE_BAUD definition to serial.h This fixes the following build error in allyesconfig: drivers/tty/serial/8250/8250_early.c: In function 'parse_options': drivers/tty/serial/8250/8250_early.c:160:18: error: 'BASE_BAUD' undeclared (first use in this function) Reported-by: Fengguang Wu Signed-off-by: Max Filippov Signed-off-by: Chris Zankel --- arch/xtensa/platforms/iss/include/platform/serial.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'arch') diff --git a/arch/xtensa/platforms/iss/include/platform/serial.h b/arch/xtensa/platforms/iss/include/platform/serial.h index e69de29bb2d..16aec542d43 100644 --- a/arch/xtensa/platforms/iss/include/platform/serial.h +++ b/arch/xtensa/platforms/iss/include/platform/serial.h @@ -0,0 +1,15 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2012 Tensilica Inc. + */ + +#ifndef __ASM_XTENSA_ISS_SERIAL_H +#define __ASM_XTENSA_ISS_SERIAL_H + +/* Have no meaning on ISS, but needed for 8250_early.c */ +#define BASE_BAUD 0 + +#endif /* __ASM_XTENSA_ISS_SERIAL_H */ -- cgit v1.2.3-18-g5258 From 35b16a9a093757a7a1ef288eea1128b0b4a3eef7 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Thu, 1 Nov 2012 18:38:27 +0400 Subject: xtensa: provide DMA_ERROR_CODE definition This fixes the following allmodconfig build error: drivers/uio/uio_dmem_genirq.c:95:18: error: 'DMA_ERROR_CODE' undeclared (first use in this function) drivers/uio/uio_dmem_genirq.c:238:18: error: 'DMA_ERROR_CODE' undeclared (first use in this function) make[3]: *** [drivers/uio/uio_dmem_genirq.o] Error 1 Signed-off-by: Max Filippov Signed-off-by: Chris Zankel --- arch/xtensa/include/asm/dma-mapping.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 492c95790ad..eb69743172b 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -16,6 +16,8 @@ #include #include +#define DMA_ERROR_CODE (~(dma_addr_t)0x0) + /* * DMA-consistent mapping functions. */ -- cgit v1.2.3-18-g5258 From 382cb5b91747f4f4d1f9883a39deec1b3d7fb906 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 5 Nov 2012 07:44:03 +0400 Subject: xtensa: fix build warning for arch/xtensa/mm/tlb.c Signed-off-by: Max Filippov Signed-off-by: Chris Zankel --- arch/xtensa/mm/tlb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c index e2700b21395..070fb7a2523 100644 --- a/arch/xtensa/mm/tlb.c +++ b/arch/xtensa/mm/tlb.c @@ -63,7 +63,7 @@ void flush_tlb_all (void) void flush_tlb_mm(struct mm_struct *mm) { if (mm == current->active_mm) { - int flags; + unsigned long flags; local_save_flags(flags); __get_new_mmu_context(mm); __load_mmu_context(mm); -- cgit v1.2.3-18-g5258 From 288dc2b68c110cd2b14b17f047b85859bc644955 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 22 Oct 2012 08:42:39 +0400 Subject: xtensa: properly fix missing compiler barrier in simcall Signed-off-by: Max Filippov Signed-off-by: Chris Zankel --- arch/xtensa/platforms/iss/include/platform/simcall.h | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/xtensa/platforms/iss/include/platform/simcall.h b/arch/xtensa/platforms/iss/include/platform/simcall.h index bd78192e2fc..b5a4edf02d7 100644 --- a/arch/xtensa/platforms/iss/include/platform/simcall.h +++ b/arch/xtensa/platforms/iss/include/platform/simcall.h @@ -74,13 +74,12 @@ static inline int __simc(int a, int b, int c, int d, int e, int f) "mov %1, a3\n" : "=a" (ret), "=a" (errno), "+r"(a1), "+r"(b1) : "r"(c1), "r"(d1), "r"(e1), "r"(f1) - : ); + : "memory"); return ret; } static inline int simc_open(const char *file, int flags, int mode) { - wmb(); return __simc(SYS_open, (int) file, flags, mode, 0, 0); } @@ -91,19 +90,16 @@ static inline int simc_close(int fd) static inline int simc_ioctl(int fd, int request, void *arg) { - wmb(); return __simc(SYS_ioctl, fd, request, (int) arg, 0, 0); } static inline int simc_read(int fd, void *buf, size_t count) { - rmb(); return __simc(SYS_read, fd, (int) buf, count, 0, 0); } static inline int simc_write(int fd, const void *buf, size_t count) { - wmb(); return __simc(SYS_write, fd, (int) buf, count, 0, 0); } @@ -111,7 +107,6 @@ static inline int simc_poll(int fd) { struct timeval tv = { .tv_sec = 0, .tv_usec = 0 }; - wmb(); return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv, 0, 0); } -- cgit v1.2.3-18-g5258 From 415217efc132b1237c65b15d1986d123380298f4 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Sun, 11 Nov 2012 01:29:10 +0400 Subject: xtensa: fix CPU cache flags formatting Signed-off-by: Max Filippov Signed-off-by: Chris Zankel --- arch/xtensa/kernel/setup.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index b237988ba6d..ffc4fcb3527 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -412,7 +412,7 @@ c_show(struct seq_file *f, void *slot) "icache size\t: %d\n" "icache flags\t: " #if XCHAL_ICACHE_LINE_LOCKABLE - "lock" + "lock " #endif "\n" "dcache line size: %d\n" @@ -420,10 +420,10 @@ c_show(struct seq_file *f, void *slot) "dcache size\t: %d\n" "dcache flags\t: " #if XCHAL_DCACHE_IS_WRITEBACK - "writeback" + "writeback " #endif #if XCHAL_DCACHE_LINE_LOCKABLE - "lock" + "lock " #endif "\n", XCHAL_ICACHE_LINESIZE, -- cgit v1.2.3-18-g5258 From 2f6ea6a767e9e26d23e4de4b03fdebe41f3b88f8 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Sun, 11 Nov 2012 04:44:22 +0400 Subject: xtensa: display s32c1i feature flag in cpuinfo Signed-off-by: Max Filippov Signed-off-by: Chris Zankel --- arch/xtensa/kernel/setup.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index ffc4fcb3527..e53a94b3edb 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -380,6 +380,9 @@ c_show(struct seq_file *f, void *slot) #endif #if XCHAL_HAVE_FP "fpu " +#endif +#if XCHAL_HAVE_S32C1I + "s32c1i " #endif "\n"); -- cgit v1.2.3-18-g5258 From 733536b865441d9bad02c4711a4372d48ce21e54 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Thu, 15 Nov 2012 06:25:48 +0400 Subject: xtensa: save and restore scompare1 SR on kernel entry Although scompare1 may be saved/restored by xchal_ncp_{load,store} macros, explicit save/restore of registers manipulated by the kernel itself is considered more correct. Signed-off-by: Max Filippov Signed-off-by: Chris Zankel --- arch/xtensa/include/asm/ptrace.h | 2 +- arch/xtensa/kernel/asm-offsets.c | 1 + arch/xtensa/kernel/entry.S | 13 +++++++++++++ 3 files changed, 15 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h index da21c17f23a..9fe249c8f90 100644 --- a/arch/xtensa/include/asm/ptrace.h +++ b/arch/xtensa/include/asm/ptrace.h @@ -37,7 +37,7 @@ struct pt_regs { unsigned long windowstart; /* 52 */ unsigned long syscall; /* 56 */ unsigned long icountlevel; /* 60 */ - int reserved[1]; /* 64 */ + unsigned long scompare1; /* 64 */ /* Additional configurable registers that are used by the compiler. */ xtregs_opt_t xtregs_opt; diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c index 7dc3f915718..845475afc68 100644 --- a/arch/xtensa/kernel/asm-offsets.c +++ b/arch/xtensa/kernel/asm-offsets.c @@ -41,6 +41,7 @@ int main(void) DEFINE(PT_SAR, offsetof (struct pt_regs, sar)); DEFINE(PT_ICOUNTLEVEL, offsetof (struct pt_regs, icountlevel)); DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall)); + DEFINE(PT_SCOMPARE1, offsetof(struct pt_regs, scompare1)); DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0])); DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0])); DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1])); diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 41ad9cfe9a2..4c2f2706ad5 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -372,6 +372,13 @@ common_exception: s32i a2, a1, PT_LBEG s32i a3, a1, PT_LEND + /* Save SCOMPARE1 */ + +#if XCHAL_HAVE_S32C1I + rsr a2, scompare1 + s32i a2, a1, PT_SCOMPARE1 +#endif + /* Save optional registers. */ save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT @@ -433,6 +440,12 @@ common_exception_return: load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT + /* Restore SCOMPARE1 */ + +#if XCHAL_HAVE_S32C1I + l32i a2, a1, PT_SCOMPARE1 + wsr a2, scompare1 +#endif wsr a3, ps /* disable interrupts */ _bbci.l a3, PS_UM_BIT, kernel_exception_exit -- cgit v1.2.3-18-g5258 From c622b29d1f38021411965b7e0170ab055551b257 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 19 Nov 2012 07:00:41 +0400 Subject: xtensa: initialize atomctl SR In order to use S32C1I instruction on cores with ATOMCTL SR the register must be properly initialized. Signed-off-by: Max Filippov Signed-off-by: Chris Zankel --- arch/xtensa/include/asm/initialize_mmu.h | 55 ++++++++++++++++++++++++++++++++ arch/xtensa/kernel/head.S | 3 ++ 2 files changed, 58 insertions(+) create mode 100644 arch/xtensa/include/asm/initialize_mmu.h (limited to 'arch') diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h new file mode 100644 index 00000000000..e1f8ba4061e --- /dev/null +++ b/arch/xtensa/include/asm/initialize_mmu.h @@ -0,0 +1,55 @@ +/* + * arch/xtensa/include/asm/initialize_mmu.h + * + * Initializes MMU: + * + * For the new V3 MMU we remap the TLB from virtual == physical + * to the standard Linux mapping used in earlier MMU's. + * + * The the MMU we also support a new configuration register that + * specifies how the S32C1I instruction operates with the cache + * controller. + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of + * this archive for more details. + * + * Copyright (C) 2008 - 2012 Tensilica, Inc. + * + * Marc Gauthier + * Pete Delaney + */ + +#ifndef _XTENSA_INITIALIZE_MMU_H +#define _XTENSA_INITIALIZE_MMU_H + +#ifdef __ASSEMBLY__ + +#define XTENSA_HWVERSION_RC_2009_0 230000 + + .macro initialize_mmu + +#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0) +/* + * We Have Atomic Operation Control (ATOMCTL) Register; Initialize it. + * For details see Documentation/xtensa/atomctl.txt + */ +#if XCHAL_DCACHE_IS_COHERENT + movi a3, 0x25 /* For SMP/MX -- internal for writeback, + * RCW otherwise + */ +#else + movi a3, 0x29 /* non-MX -- Most cores use Std Memory + * Controlers which usually can't use RCW + */ +#endif + wsr a3, atomctl +#endif /* XCHAL_HAVE_S32C1I && + * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0) + */ + + .endm + +#endif /*__ASSEMBLY__*/ + +#endif /* _XTENSA_INITIALIZE_MMU_H */ diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index 417998c0210..9013ae0174f 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -155,6 +156,8 @@ ENTRY(_startup) isync + initialize_mmu + /* Unpack data sections * * The linker script used to build the Linux kernel image -- cgit v1.2.3-18-g5258 From 28570e8dac5c86ab10ce2a7e9c02d3aaece63760 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 19 Nov 2012 08:30:15 +0400 Subject: xtensa: add trap_set_handler function trap_set_handler sets new C-handler in the exception table and returns previous handler. Signed-off-by: Max Filippov Signed-off-by: Chris Zankel --- arch/xtensa/include/asm/traps.h | 23 +++++++++++++++++++++++ arch/xtensa/kernel/traps.c | 11 +++++++++++ 2 files changed, 34 insertions(+) create mode 100644 arch/xtensa/include/asm/traps.h (limited to 'arch') diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h new file mode 100644 index 00000000000..54f70440185 --- /dev/null +++ b/arch/xtensa/include/asm/traps.h @@ -0,0 +1,23 @@ +/* + * arch/xtensa/include/asm/traps.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2012 Tensilica Inc. + */ +#ifndef _XTENSA_TRAPS_H +#define _XTENSA_TRAPS_H + +#include + +/* + * handler must be either of the following: + * void (*)(struct pt_regs *regs); + * void (*)(struct pt_regs *regs, unsigned long exccause); + */ +extern void * __init trap_set_handler(int cause, void *handler); +extern void do_unhandled(struct pt_regs *regs, unsigned long exccause); + +#endif /* _XTENSA_TRAPS_H */ diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 5caf2b64d43..691a792b01d 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -293,6 +293,17 @@ do_debug(struct pt_regs *regs) } +/* Set exception C handler - for temporary use when probing exceptions */ + +void * __init trap_set_handler(int cause, void *handler) +{ + unsigned long *entry = &exc_table[EXC_TABLE_DEFAULT / 4 + cause]; + void *previous = (void *)*entry; + *entry = (unsigned long)handler; + return previous; +} + + /* * Initialize dispatch tables. * -- cgit v1.2.3-18-g5258 From 00273125c39be9cbf619aef90147354a9ed8c385 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Wed, 28 Nov 2012 11:33:02 +0400 Subject: xtensa: add s32c1i sanity check Add a brief sanity test of S32C1I functionality. This instruction is needed by the kernel and userland as part of the base ABI (including GCC atomic builtins, certain threading packages, future atomic support in the C++ standard, etc). However, correct operation of this instruction requires some cooperation by hardware external to the processor (such as bus bridge, bus fabric, or memory controller). Minimally exercising this mechanism and reporting explicit status early in the boot process is helpful to chip vendors using the Linux kernel as a benchmark of correctness of hardware. As it turns out, S32C1I is not exercised by the kernel and by uClibc based userland as of early June 2008. This is expected to change soon as both incorporate more recent open source developments. Signed-off-by: Marc Gauthier Signed-off-by: Max Filippov Signed-off-by: Chris Zankel --- arch/xtensa/Kconfig.debug | 11 ++++ arch/xtensa/include/asm/regs.h | 4 ++ arch/xtensa/kernel/setup.c | 120 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 135 insertions(+) (limited to 'arch') diff --git a/arch/xtensa/Kconfig.debug b/arch/xtensa/Kconfig.debug index be5fb4c6243..a34010e0e51 100644 --- a/arch/xtensa/Kconfig.debug +++ b/arch/xtensa/Kconfig.debug @@ -13,4 +13,15 @@ config LD_NO_RELAX Enabling this option improves the link time but increases the code size, and possibly execution time. +config S32C1I_SELFTEST + bool "Perform S32C1I instruction self-test at boot" + default y + help + Enable this option to test S32C1I instruction behavior at boot. + Correct operation of this instruction requires some cooperation from hardware + external to the processor (such as bus bridge, bus fabric, or memory controller). + It is easy to make wrong hardware configuration, this test should catch it early. + + Say 'N' on stable hardware. + endmenu diff --git a/arch/xtensa/include/asm/regs.h b/arch/xtensa/include/asm/regs.h index 8a8aa61ccc8..6aaf6d6a5fc 100644 --- a/arch/xtensa/include/asm/regs.h +++ b/arch/xtensa/include/asm/regs.h @@ -52,6 +52,10 @@ #define EXCCAUSE_SPECULATION 7 #define EXCCAUSE_PRIVILEGED 8 #define EXCCAUSE_UNALIGNED 9 +#define EXCCAUSE_INSTR_DATA_ERROR 12 +#define EXCCAUSE_LOAD_STORE_DATA_ERROR 13 +#define EXCCAUSE_INSTR_ADDR_ERROR 14 +#define EXCCAUSE_LOAD_STORE_ADDR_ERROR 15 #define EXCCAUSE_ITLB_MISS 16 #define EXCCAUSE_ITLB_MULTIHIT 17 #define EXCCAUSE_ITLB_PRIVILEGE 18 diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index e53a94b3edb..45217617c60 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -42,6 +42,7 @@ #include #include #include +#include #include @@ -235,6 +236,123 @@ extern char _UserExceptionVector_text_end; extern char _DoubleExceptionVector_literal_start; extern char _DoubleExceptionVector_text_end; + +#ifdef CONFIG_S32C1I_SELFTEST +#if XCHAL_HAVE_S32C1I + +static int __initdata rcw_word, rcw_probe_pc, rcw_exc; + +/* + * Basic atomic compare-and-swap, that records PC of S32C1I for probing. + * + * If *v == cmp, set *v = set. Return previous *v. + */ +static inline int probed_compare_swap(int *v, int cmp, int set) +{ + int tmp; + + __asm__ __volatile__( + " movi %1, 1f\n" + " s32i %1, %4, 0\n" + " wsr %2, scompare1\n" + "1: s32c1i %0, %3, 0\n" + : "=a" (set), "=&a" (tmp) + : "a" (cmp), "a" (v), "a" (&rcw_probe_pc), "0" (set) + : "memory" + ); + return set; +} + +/* Handle probed exception */ + +void __init do_probed_exception(struct pt_regs *regs, unsigned long exccause) +{ + if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */ + regs->pc += 3; /* skip the s32c1i instruction */ + rcw_exc = exccause; + } else { + do_unhandled(regs, exccause); + } +} + +/* Simple test of S32C1I (soc bringup assist) */ + +void __init check_s32c1i(void) +{ + int n, cause1, cause2; + void *handbus, *handdata, *handaddr; /* temporarily saved handlers */ + + rcw_probe_pc = 0; + handbus = trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR, + do_probed_exception); + handdata = trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR, + do_probed_exception); + handaddr = trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR, + do_probed_exception); + + /* First try an S32C1I that does not store: */ + rcw_exc = 0; + rcw_word = 1; + n = probed_compare_swap(&rcw_word, 0, 2); + cause1 = rcw_exc; + + /* took exception? */ + if (cause1 != 0) { + /* unclean exception? */ + if (n != 2 || rcw_word != 1) + panic("S32C1I exception error"); + } else if (rcw_word != 1 || n != 1) { + panic("S32C1I compare error"); + } + + /* Then an S32C1I that stores: */ + rcw_exc = 0; + rcw_word = 0x1234567; + n = probed_compare_swap(&rcw_word, 0x1234567, 0xabcde); + cause2 = rcw_exc; + + if (cause2 != 0) { + /* unclean exception? */ + if (n != 0xabcde || rcw_word != 0x1234567) + panic("S32C1I exception error (b)"); + } else if (rcw_word != 0xabcde || n != 0x1234567) { + panic("S32C1I store error"); + } + + /* Verify consistency of exceptions: */ + if (cause1 || cause2) { + pr_warn("S32C1I took exception %d, %d\n", cause1, cause2); + /* If emulation of S32C1I upon bus error gets implemented, + we can get rid of this panic for single core (not SMP) */ + panic("S32C1I exceptions not currently supported"); + } + if (cause1 != cause2) + panic("inconsistent S32C1I exceptions"); + + trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR, handbus); + trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR, handdata); + trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR, handaddr); +} + +#else /* XCHAL_HAVE_S32C1I */ + +/* This condition should not occur with a commercially deployed processor. + Display reminder for early engr test or demo chips / FPGA bitstreams */ +void __init check_s32c1i(void) +{ + pr_warn("Processor configuration lacks atomic compare-and-swap support!\n"); +} + +#endif /* XCHAL_HAVE_S32C1I */ +#else /* CONFIG_S32C1I_SELFTEST */ + +void __init check_s32c1i(void) +{ +} + +#endif /* CONFIG_S32C1I_SELFTEST */ + + void __init setup_arch(char **cmdline_p) { extern int mem_reserve(unsigned long, unsigned long, int); @@ -244,6 +362,8 @@ void __init setup_arch(char **cmdline_p) boot_command_line[COMMAND_LINE_SIZE-1] = '\0'; *cmdline_p = command_line; + check_s32c1i(); + /* Reserve some memory regions */ #ifdef CONFIG_BLK_DEV_INITRD -- cgit v1.2.3-18-g5258 From 219b1e4c61c108731bb665962231b1fa057f6c71 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Sun, 11 Nov 2012 03:30:02 +0400 Subject: xtensa: add s32c1i-based atomic ops implementations Signed-off-by: Max Filippov Signed-off-by: Chris Zankel --- arch/xtensa/include/asm/atomic.h | 271 ++++++++++++++++++++++++++------------ arch/xtensa/include/asm/cmpxchg.h | 71 +++++++--- 2 files changed, 236 insertions(+), 106 deletions(-) (limited to 'arch') diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 24f50cada70..c3f289174c1 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -66,19 +66,35 @@ */ static inline void atomic_add(int i, atomic_t * v) { - unsigned int vval; - - __asm__ __volatile__( - "rsil a15, "__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %2, 0 \n\t" - "add %0, %0, %1 \n\t" - "s32i %0, %2, 0 \n\t" - "wsr a15, ps \n\t" - "rsync \n" - : "=&a" (vval) - : "a" (i), "a" (v) - : "a15", "memory" - ); +#if XCHAL_HAVE_S32C1I + unsigned long tmp; + int result; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " add %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (result), "=&a" (tmp) + : "a" (i), "a" (v) + : "memory" + ); +#else + unsigned int vval; + + __asm__ __volatile__( + " rsil a15, "__stringify(LOCKLEVEL)"\n" + " l32i %0, %2, 0\n" + " add %0, %0, %1\n" + " s32i %0, %2, 0\n" + " wsr a15, ps\n" + " rsync\n" + : "=&a" (vval) + : "a" (i), "a" (v) + : "a15", "memory" + ); +#endif } /** @@ -90,19 +106,35 @@ static inline void atomic_add(int i, atomic_t * v) */ static inline void atomic_sub(int i, atomic_t *v) { - unsigned int vval; - - __asm__ __volatile__( - "rsil a15, "__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %2, 0 \n\t" - "sub %0, %0, %1 \n\t" - "s32i %0, %2, 0 \n\t" - "wsr a15, ps \n\t" - "rsync \n" - : "=&a" (vval) - : "a" (i), "a" (v) - : "a15", "memory" - ); +#if XCHAL_HAVE_S32C1I + unsigned long tmp; + int result; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " sub %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (result), "=&a" (tmp) + : "a" (i), "a" (v) + : "memory" + ); +#else + unsigned int vval; + + __asm__ __volatile__( + " rsil a15, "__stringify(LOCKLEVEL)"\n" + " l32i %0, %2, 0\n" + " sub %0, %0, %1\n" + " s32i %0, %2, 0\n" + " wsr a15, ps\n" + " rsync\n" + : "=&a" (vval) + : "a" (i), "a" (v) + : "a15", "memory" + ); +#endif } /* @@ -111,40 +143,78 @@ static inline void atomic_sub(int i, atomic_t *v) static inline int atomic_add_return(int i, atomic_t * v) { - unsigned int vval; - - __asm__ __volatile__( - "rsil a15,"__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %2, 0 \n\t" - "add %0, %0, %1 \n\t" - "s32i %0, %2, 0 \n\t" - "wsr a15, ps \n\t" - "rsync \n" - : "=&a" (vval) - : "a" (i), "a" (v) - : "a15", "memory" - ); - - return vval; +#if XCHAL_HAVE_S32C1I + unsigned long tmp; + int result; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " add %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + " add %0, %0, %2\n" + : "=&a" (result), "=&a" (tmp) + : "a" (i), "a" (v) + : "memory" + ); + + return result; +#else + unsigned int vval; + + __asm__ __volatile__( + " rsil a15,"__stringify(LOCKLEVEL)"\n" + " l32i %0, %2, 0\n" + " add %0, %0, %1\n" + " s32i %0, %2, 0\n" + " wsr a15, ps\n" + " rsync\n" + : "=&a" (vval) + : "a" (i), "a" (v) + : "a15", "memory" + ); + + return vval; +#endif } static inline int atomic_sub_return(int i, atomic_t * v) { - unsigned int vval; - - __asm__ __volatile__( - "rsil a15,"__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %2, 0 \n\t" - "sub %0, %0, %1 \n\t" - "s32i %0, %2, 0 \n\t" - "wsr a15, ps \n\t" - "rsync \n" - : "=&a" (vval) - : "a" (i), "a" (v) - : "a15", "memory" - ); - - return vval; +#if XCHAL_HAVE_S32C1I + unsigned long tmp; + int result; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " sub %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + " sub %0, %0, %2\n" + : "=&a" (result), "=&a" (tmp) + : "a" (i), "a" (v) + : "memory" + ); + + return result; +#else + unsigned int vval; + + __asm__ __volatile__( + " rsil a15,"__stringify(LOCKLEVEL)"\n" + " l32i %0, %2, 0\n" + " sub %0, %0, %1\n" + " s32i %0, %2, 0\n" + " wsr a15, ps\n" + " rsync\n" + : "=&a" (vval) + : "a" (i), "a" (v) + : "a15", "memory" + ); + + return vval; +#endif } /** @@ -251,38 +321,70 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) { - unsigned int all_f = -1; - unsigned int vval; - - __asm__ __volatile__( - "rsil a15,"__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %2, 0 \n\t" - "xor %1, %4, %3 \n\t" - "and %0, %0, %4 \n\t" - "s32i %0, %2, 0 \n\t" - "wsr a15, ps \n\t" - "rsync \n" - : "=&a" (vval), "=a" (mask) - : "a" (v), "a" (all_f), "1" (mask) - : "a15", "memory" - ); +#if XCHAL_HAVE_S32C1I + unsigned long tmp; + int result; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " and %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (result), "=&a" (tmp) + : "a" (~mask), "a" (v) + : "memory" + ); +#else + unsigned int all_f = -1; + unsigned int vval; + + __asm__ __volatile__( + " rsil a15,"__stringify(LOCKLEVEL)"\n" + " l32i %0, %2, 0\n" + " xor %1, %4, %3\n" + " and %0, %0, %4\n" + " s32i %0, %2, 0\n" + " wsr a15, ps\n" + " rsync\n" + : "=&a" (vval), "=a" (mask) + : "a" (v), "a" (all_f), "1" (mask) + : "a15", "memory" + ); +#endif } static inline void atomic_set_mask(unsigned int mask, atomic_t *v) { - unsigned int vval; - - __asm__ __volatile__( - "rsil a15,"__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %2, 0 \n\t" - "or %0, %0, %1 \n\t" - "s32i %0, %2, 0 \n\t" - "wsr a15, ps \n\t" - "rsync \n" - : "=&a" (vval) - : "a" (mask), "a" (v) - : "a15", "memory" - ); +#if XCHAL_HAVE_S32C1I + unsigned long tmp; + int result; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " or %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (result), "=&a" (tmp) + : "a" (mask), "a" (v) + : "memory" + ); +#else + unsigned int vval; + + __asm__ __volatile__( + " rsil a15,"__stringify(LOCKLEVEL)"\n" + " l32i %0, %2, 0\n" + " or %0, %0, %1\n" + " s32i %0, %2, 0\n" + " wsr a15, ps\n" + " rsync\n" + : "=&a" (vval) + : "a" (mask), "a" (v) + : "a15", "memory" + ); +#endif } /* Atomic operations are already serializing */ @@ -294,4 +396,3 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) #endif /* __KERNEL__ */ #endif /* _XTENSA_ATOMIC_H */ - diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h index 64dad04a9d2..25869a19049 100644 --- a/arch/xtensa/include/asm/cmpxchg.h +++ b/arch/xtensa/include/asm/cmpxchg.h @@ -22,17 +22,30 @@ static inline unsigned long __cmpxchg_u32(volatile int *p, int old, int new) { - __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %1, 0 \n\t" - "bne %0, %2, 1f \n\t" - "s32i %3, %1, 0 \n\t" - "1: \n\t" - "wsr a15, ps \n\t" - "rsync \n\t" - : "=&a" (old) - : "a" (p), "a" (old), "r" (new) - : "a15", "memory"); - return old; +#if XCHAL_HAVE_S32C1I + __asm__ __volatile__( + " wsr %2, scompare1\n" + " s32c1i %0, %1, 0\n" + : "+a" (new) + : "a" (p), "a" (old) + : "memory" + ); + + return new; +#else + __asm__ __volatile__( + " rsil a15, "__stringify(LOCKLEVEL)"\n" + " l32i %0, %1, 0\n" + " bne %0, %2, 1f\n" + " s32i %3, %1, 0\n" + "1:\n" + " wsr a15, ps\n" + " rsync\n" + : "=&a" (old) + : "a" (p), "a" (old), "r" (new) + : "a15", "memory"); + return old; +#endif } /* This function doesn't exist, so you'll get a linker error * if something tries to do an invalid cmpxchg(). */ @@ -93,16 +106,32 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, static inline unsigned long xchg_u32(volatile int * m, unsigned long val) { - unsigned long tmp; - __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %1, 0 \n\t" - "s32i %2, %1, 0 \n\t" - "wsr a15, ps \n\t" - "rsync \n\t" - : "=&a" (tmp) - : "a" (m), "a" (val) - : "a15", "memory"); - return tmp; +#if XCHAL_HAVE_S32C1I + unsigned long tmp, result; + __asm__ __volatile__( + "1: l32i %1, %2, 0\n" + " mov %0, %3\n" + " wsr %1, scompare1\n" + " s32c1i %0, %2, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (result), "=&a" (tmp) + : "a" (m), "a" (val) + : "memory" + ); + return result; +#else + unsigned long tmp; + __asm__ __volatile__( + " rsil a15, "__stringify(LOCKLEVEL)"\n" + " l32i %0, %1, 0\n" + " s32i %2, %1, 0\n" + " wsr a15, ps\n" + " rsync\n" + : "=&a" (tmp) + : "a" (m), "a" (val) + : "a15", "memory"); + return tmp; +#endif } #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) -- cgit v1.2.3-18-g5258 From e5a9f6adba79ce2732330fc6d045e98959af8962 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Sun, 11 Nov 2012 05:47:25 +0400 Subject: xtensa: add s32c1i-based bitops implementations Signed-off-by: Max Filippov Signed-off-by: Chris Zankel --- arch/xtensa/include/asm/bitops.h | 127 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 126 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h index 5270197ddd3..84afe58d5d3 100644 --- a/arch/xtensa/include/asm/bitops.h +++ b/arch/xtensa/include/asm/bitops.h @@ -29,7 +29,6 @@ #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() -#include #include #if XCHAL_HAVE_NSA @@ -104,6 +103,132 @@ static inline unsigned long __fls(unsigned long word) #endif #include + +#if XCHAL_HAVE_S32C1I + +static inline void set_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long tmp, value; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " or %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp), "=&a" (value) + : "a" (mask), "a" (p) + : "memory"); +} + +static inline void clear_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long tmp, value; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " and %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp), "=&a" (value) + : "a" (~mask), "a" (p) + : "memory"); +} + +static inline void change_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long tmp, value; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " xor %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp), "=&a" (value) + : "a" (mask), "a" (p) + : "memory"); +} + +static inline int +test_and_set_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long tmp, value; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " or %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp), "=&a" (value) + : "a" (mask), "a" (p) + : "memory"); + + return tmp & mask; +} + +static inline int +test_and_clear_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long tmp, value; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " and %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp), "=&a" (value) + : "a" (~mask), "a" (p) + : "memory"); + + return tmp & mask; +} + +static inline int +test_and_change_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long tmp, value; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " xor %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp), "=&a" (value) + : "a" (mask), "a" (p) + : "memory"); + + return tmp & mask; +} + +#else + +#include + +#endif /* XCHAL_HAVE_S32C1I */ + #include #include -- cgit v1.2.3-18-g5258 From 71872b5fb2bed294fd5afd900890880e9faa82c1 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Sun, 11 Nov 2012 07:01:21 +0400 Subject: xtensa: add s32c1i-based spinlock implementations Signed-off-by: Max Filippov Signed-off-by: Chris Zankel --- arch/xtensa/include/asm/spinlock.h | 188 ++++++++++++++++++++++++++++++++++++- 1 file changed, 187 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h index 8ff23649581..03975906b36 100644 --- a/arch/xtensa/include/asm/spinlock.h +++ b/arch/xtensa/include/asm/spinlock.h @@ -11,6 +11,192 @@ #ifndef _XTENSA_SPINLOCK_H #define _XTENSA_SPINLOCK_H -#include +/* + * spinlock + * + * There is at most one owner of a spinlock. There are not different + * types of spinlock owners like there are for rwlocks (see below). + * + * When trying to obtain a spinlock, the function "spins" forever, or busy- + * waits, until the lock is obtained. When spinning, presumably some other + * owner will soon give up the spinlock making it available to others. Use + * the trylock functions to avoid spinning forever. + * + * possible values: + * + * 0 nobody owns the spinlock + * 1 somebody owns the spinlock + */ + +#define __raw_spin_is_locked(x) ((x)->slock != 0) +#define __raw_spin_unlock_wait(lock) \ + do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) + +#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) + +static inline void __raw_spin_lock(raw_spinlock_t *lock) +{ + unsigned long tmp; + + __asm__ __volatile__( + " movi %0, 0\n" + " wsr %0, scompare1\n" + "1: movi %0, 1\n" + " s32c1i %0, %1, 0\n" + " bnez %0, 1b\n" + : "=&a" (tmp) + : "a" (&lock->slock) + : "memory"); +} + +/* Returns 1 if the lock is obtained, 0 otherwise. */ + +static inline int __raw_spin_trylock(raw_spinlock_t *lock) +{ + unsigned long tmp; + + __asm__ __volatile__( + " movi %0, 0\n" + " wsr %0, scompare1\n" + " movi %0, 1\n" + " s32c1i %0, %1, 0\n" + : "=&a" (tmp) + : "a" (&lock->slock) + : "memory"); + + return tmp == 0 ? 1 : 0; +} + +static inline void __raw_spin_unlock(raw_spinlock_t *lock) +{ + unsigned long tmp; + + __asm__ __volatile__( + " movi %0, 0\n" + " s32ri %0, %1, 0\n" + : "=&a" (tmp) + : "a" (&lock->slock) + : "memory"); +} + +/* + * rwlock + * + * Read-write locks are really a more flexible spinlock. They allow + * multiple readers but only one writer. Write ownership is exclusive + * (i.e., all other readers and writers are blocked from ownership while + * there is a write owner). These rwlocks are unfair to writers. Writers + * can be starved for an indefinite time by readers. + * + * possible values: + * + * 0 nobody owns the rwlock + * >0 one or more readers own the rwlock + * (the positive value is the actual number of readers) + * 0x80000000 one writer owns the rwlock, no other writers, no readers + */ + +#define __raw_write_can_lock(x) ((x)->lock == 0) + +static inline void __raw_write_lock(raw_rwlock_t *rw) +{ + unsigned long tmp; + + __asm__ __volatile__( + " movi %0, 0\n" + " wsr %0, scompare1\n" + "1: movi %0, 1\n" + " slli %0, %0, 31\n" + " s32c1i %0, %1, 0\n" + " bnez %0, 1b\n" + : "=&a" (tmp) + : "a" (&rw->lock) + : "memory"); +} + +/* Returns 1 if the lock is obtained, 0 otherwise. */ + +static inline int __raw_write_trylock(raw_rwlock_t *rw) +{ + unsigned long tmp; + + __asm__ __volatile__( + " movi %0, 0\n" + " wsr %0, scompare1\n" + " movi %0, 1\n" + " slli %0, %0, 31\n" + " s32c1i %0, %1, 0\n" + : "=&a" (tmp) + : "a" (&rw->lock) + : "memory"); + + return tmp == 0 ? 1 : 0; +} + +static inline void __raw_write_unlock(raw_rwlock_t *rw) +{ + unsigned long tmp; + + __asm__ __volatile__( + " movi %0, 0\n" + " s32ri %0, %1, 0\n" + : "=&a" (tmp) + : "a" (&rw->lock) + : "memory"); +} + +static inline void __raw_read_lock(raw_rwlock_t *rw) +{ + unsigned long tmp; + unsigned long result; + + __asm__ __volatile__( + "1: l32i %1, %2, 0\n" + " bltz %1, 1b\n" + " wsr %1, scompare1\n" + " addi %0, %1, 1\n" + " s32c1i %0, %2, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (result), "=&a" (tmp) + : "a" (&rw->lock) + : "memory"); +} + +/* Returns 1 if the lock is obtained, 0 otherwise. */ + +static inline int __raw_read_trylock(raw_rwlock_t *rw) +{ + unsigned long result; + unsigned long tmp; + + __asm__ __volatile__( + " l32i %1, %2, 0\n" + " addi %0, %1, 1\n" + " bltz %0, 1f\n" + " wsr %1, scompare1\n" + " s32c1i %0, %2, 0\n" + " sub %0, %0, %1\n" + "1:\n" + : "=&a" (result), "=&a" (tmp) + : "a" (&rw->lock) + : "memory"); + + return result == 0; +} + +static inline void __raw_read_unlock(raw_rwlock_t *rw) +{ + unsigned long tmp1, tmp2; + + __asm__ __volatile__( + "1: l32i %1, %2, 0\n" + " addi %0, %1, -1\n" + " wsr %1, scompare1\n" + " s32c1i %0, %2, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp1), "=&a" (tmp2) + : "a" (&rw->lock) + : "memory"); +} #endif /* _XTENSA_SPINLOCK_H */ -- cgit v1.2.3-18-g5258 From 599bf77a0d0b253dd94fd058275b05520c6d25db Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Sat, 24 Nov 2012 06:22:08 +0400 Subject: xtensa: fix mb and wmb definitions Define mb and wmb as memw to force memory barrier. Signed-off-by: Max Filippov Signed-off-by: Chris Zankel --- arch/xtensa/include/asm/barrier.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h index 55707a8009d..ef021677d53 100644 --- a/arch/xtensa/include/asm/barrier.h +++ b/arch/xtensa/include/asm/barrier.h @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2001 - 2012 Tensilica Inc. */ #ifndef _XTENSA_SYSTEM_H @@ -12,8 +12,8 @@ #define smp_read_barrier_depends() do { } while(0) #define read_barrier_depends() do { } while(0) -#define mb() barrier() -#define rmb() mb() +#define mb() ({ __asm__ __volatile__("memw" : : : "memory"); }) +#define rmb() barrier() #define wmb() mb() #ifdef CONFIG_SMP -- cgit v1.2.3-18-g5258 From 3f5ec298e56587462d91320c9e49f3e38f2beb17 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 5 Nov 2012 09:10:00 +0400 Subject: xtensa: clean up boot make rul