diff options
Diffstat (limited to 'arch/mips/lib')
| -rw-r--r-- | arch/mips/lib/Makefile | 13 | ||||
| -rw-r--r-- | arch/mips/lib/ashldi3.c | 29 | ||||
| -rw-r--r-- | arch/mips/lib/ashrdi3.c | 31 | ||||
| -rw-r--r-- | arch/mips/lib/bitops.c | 179 | ||||
| -rw-r--r-- | arch/mips/lib/cmpdi2.c | 27 | ||||
| -rw-r--r-- | arch/mips/lib/csum_partial.S | 835 | ||||
| -rw-r--r-- | arch/mips/lib/csum_partial_copy.c | 49 | ||||
| -rw-r--r-- | arch/mips/lib/delay.c | 64 | ||||
| -rw-r--r-- | arch/mips/lib/dump_tlb.c | 113 | ||||
| -rw-r--r-- | arch/mips/lib/iomap-pci.c | 48 | ||||
| -rw-r--r-- | arch/mips/lib/iomap.c | 252 | ||||
| -rw-r--r-- | arch/mips/lib/libgcc.h | 25 | ||||
| -rw-r--r-- | arch/mips/lib/lshrdi3.c | 29 | ||||
| -rw-r--r-- | arch/mips/lib/memcpy.S | 512 | ||||
| -rw-r--r-- | arch/mips/lib/memset.S | 248 | ||||
| -rw-r--r-- | arch/mips/lib/mips-atomic.c | 161 | ||||
| -rw-r--r-- | arch/mips/lib/promlib.c | 24 | ||||
| -rw-r--r-- | arch/mips/lib/r3k_dump_tlb.c | 63 | ||||
| -rw-r--r-- | arch/mips/lib/strlen_user.S | 43 | ||||
| -rw-r--r-- | arch/mips/lib/strncpy_user.S | 66 | ||||
| -rw-r--r-- | arch/mips/lib/strnlen_user.S | 47 | ||||
| -rw-r--r-- | arch/mips/lib/ucmpdi2.c | 21 | ||||
| -rw-r--r-- | arch/mips/lib/uncached.c | 22 |
23 files changed, 2564 insertions, 337 deletions
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index cf12caf8077..eeddc58802e 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile @@ -2,9 +2,16 @@ # Makefile for MIPS-specific library files.. # -lib-y += csum_partial_copy.o memcpy.o promlib.o strlen_user.o strncpy_user.o \ +lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \ + mips-atomic.o strlen_user.o strncpy_user.o \ strnlen_user.o uncached.o -obj-y += iomap.o +obj-y += iomap.o +obj-$(CONFIG_PCI) += iomap-pci.o -EXTRA_AFLAGS := $(CFLAGS) +obj-$(CONFIG_CPU_GENERIC_DUMP_TLB) += dump_tlb.o +obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o +obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o + +# libgcc-style stuff needed in the kernel +obj-y += ashldi3.o ashrdi3.o cmpdi2.o lshrdi3.o ucmpdi2.o diff --git a/arch/mips/lib/ashldi3.c b/arch/mips/lib/ashldi3.c new file mode 100644 index 00000000000..beb80f31609 --- /dev/null +++ b/arch/mips/lib/ashldi3.c @@ -0,0 +1,29 @@ +#include <linux/module.h> + +#include "libgcc.h" + +long long __ashldi3(long long u, word_type b) +{ + DWunion uu, w; + word_type bm; + + if (b == 0) + return u; + + uu.ll = u; + bm = 32 - b; + + if (bm <= 0) { + w.s.low = 0; + w.s.high = (unsigned int) uu.s.low << -bm; + } else { + const unsigned int carries = (unsigned int) uu.s.low >> bm; + + w.s.low = (unsigned int) uu.s.low << b; + w.s.high = ((unsigned int) uu.s.high << b) | carries; + } + + return w.ll; +} + +EXPORT_SYMBOL(__ashldi3); diff --git a/arch/mips/lib/ashrdi3.c b/arch/mips/lib/ashrdi3.c new file mode 100644 index 00000000000..c884a912b66 --- /dev/null +++ b/arch/mips/lib/ashrdi3.c @@ -0,0 +1,31 @@ +#include <linux/module.h> + +#include "libgcc.h" + +long long __ashrdi3(long long u, word_type b) +{ + DWunion uu, w; + word_type bm; + + if (b == 0) + return u; + + uu.ll = u; + bm = 32 - b; + + if (bm <= 0) { + /* w.s.high = 1..1 or 0..0 */ + w.s.high = + uu.s.high >> 31; + w.s.low = uu.s.high >> -bm; + } else { + const unsigned int carries = (unsigned int) uu.s.high << bm; + + w.s.high = uu.s.high >> b; + w.s.low = ((unsigned int) uu.s.low >> b) | carries; + } + + return w.ll; +} + +EXPORT_SYMBOL(__ashrdi3); diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c new file mode 100644 index 00000000000..3b2a1e78a54 --- /dev/null +++ b/arch/mips/lib/bitops.c @@ -0,0 +1,179 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 1994-1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org) + * Copyright (c) 1999, 2000 Silicon Graphics, Inc. + */ +#include <linux/bitops.h> +#include <linux/irqflags.h> +#include <linux/export.h> + + +/** + * __mips_set_bit - Atomically set a bit in memory. This is called by + * set_bit() if it cannot find a faster solution. + * @nr: the bit to set + * @addr: the address to start counting from + */ +void __mips_set_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long *a = (unsigned long *)addr; + unsigned bit = nr & SZLONG_MASK; + unsigned long mask; + unsigned long flags; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + *a |= mask; + raw_local_irq_restore(flags); +} +EXPORT_SYMBOL(__mips_set_bit); + + +/** + * __mips_clear_bit - Clears a bit in memory. This is called by clear_bit() if + * it cannot find a faster solution. + * @nr: Bit to clear + * @addr: Address to start counting from + */ +void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long *a = (unsigned long *)addr; + unsigned bit = nr & SZLONG_MASK; + unsigned long mask; + unsigned long flags; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + *a &= ~mask; + raw_local_irq_restore(flags); +} +EXPORT_SYMBOL(__mips_clear_bit); + + +/** + * __mips_change_bit - Toggle a bit in memory. This is called by change_bit() + * if it cannot find a faster solution. + * @nr: Bit to change + * @addr: Address to start counting from + */ +void __mips_change_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long *a = (unsigned long *)addr; + unsigned bit = nr & SZLONG_MASK; + unsigned long mask; + unsigned long flags; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + *a ^= mask; + raw_local_irq_restore(flags); +} +EXPORT_SYMBOL(__mips_change_bit); + + +/** + * __mips_test_and_set_bit - Set a bit and return its old value. This is + * called by test_and_set_bit() if it cannot find a faster solution. + * @nr: Bit to set + * @addr: Address to count from + */ +int __mips_test_and_set_bit(unsigned long nr, + volatile unsigned long *addr) +{ + unsigned long *a = (unsigned long *)addr; + unsigned bit = nr & SZLONG_MASK; + unsigned long mask; + unsigned long flags; + int res; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + res = (mask & *a) != 0; + *a |= mask; + raw_local_irq_restore(flags); + return res; +} +EXPORT_SYMBOL(__mips_test_and_set_bit); + + +/** + * __mips_test_and_set_bit_lock - Set a bit and return its old value. This is + * called by test_and_set_bit_lock() if it cannot find a faster solution. + * @nr: Bit to set + * @addr: Address to count from + */ +int __mips_test_and_set_bit_lock(unsigned long nr, + volatile unsigned long *addr) +{ + unsigned long *a = (unsigned long *)addr; + unsigned bit = nr & SZLONG_MASK; + unsigned long mask; + unsigned long flags; + int res; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + res = (mask & *a) != 0; + *a |= mask; + raw_local_irq_restore(flags); + return res; +} +EXPORT_SYMBOL(__mips_test_and_set_bit_lock); + + +/** + * __mips_test_and_clear_bit - Clear a bit and return its old value. This is + * called by test_and_clear_bit() if it cannot find a faster solution. + * @nr: Bit to clear + * @addr: Address to count from + */ +int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long *a = (unsigned long *)addr; + unsigned bit = nr & SZLONG_MASK; + unsigned long mask; + unsigned long flags; + int res; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + res = (mask & *a) != 0; + *a &= ~mask; + raw_local_irq_restore(flags); + return res; +} +EXPORT_SYMBOL(__mips_test_and_clear_bit); + + +/** + * __mips_test_and_change_bit - Change a bit and return its old value. This is + * called by test_and_change_bit() if it cannot find a faster solution. + * @nr: Bit to change + * @addr: Address to count from + */ +int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long *a = (unsigned long *)addr; + unsigned bit = nr & SZLONG_MASK; + unsigned long mask; + unsigned long flags; + int res; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + res = (mask & *a) != 0; + *a ^= mask; + raw_local_irq_restore(flags); + return res; +} +EXPORT_SYMBOL(__mips_test_and_change_bit); diff --git a/arch/mips/lib/cmpdi2.c b/arch/mips/lib/cmpdi2.c new file mode 100644 index 00000000000..8c1306437ed --- /dev/null +++ b/arch/mips/lib/cmpdi2.c @@ -0,0 +1,27 @@ +#include <linux/module.h> + +#include "libgcc.h" + +word_type __cmpdi2(long long a, long long b) +{ + const DWunion au = { + .ll = a + }; + const DWunion bu = { + .ll = b + }; + + if (au.s.high < bu.s.high) + return 0; + else if (au.s.high > bu.s.high) + return 2; + + if ((unsigned int) au.s.low < (unsigned int) bu.s.low) + return 0; + else if ((unsigned int) au.s.low > (unsigned int) bu.s.low) + return 2; + + return 1; +} + +EXPORT_SYMBOL(__cmpdi2); diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S new file mode 100644 index 00000000000..9901237563c --- /dev/null +++ b/arch/mips/lib/csum_partial.S @@ -0,0 +1,835 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Quick'n'dirty IP checksum ... + * + * Copyright (C) 1998, 1999 Ralf Baechle + * Copyright (C) 1999 Silicon Graphics, Inc. + * Copyright (C) 2007 Maciej W. Rozycki + * Copyright (C) 2014 Imagination Technologies Ltd. + */ +#include <linux/errno.h> +#include <asm/asm.h> +#include <asm/asm-offsets.h> +#include <asm/regdef.h> + +#ifdef CONFIG_64BIT +/* + * As we are sharing code base with the mips32 tree (which use the o32 ABI + * register definitions). We need to redefine the register definitions from + * the n64 ABI register naming to the o32 ABI register naming. + */ +#undef t0 +#undef t1 +#undef t2 +#undef t3 +#define t0 $8 +#define t1 $9 +#define t2 $10 +#define t3 $11 +#define t4 $12 +#define t5 $13 +#define t6 $14 +#define t7 $15 + +#define USE_DOUBLE +#endif + +#ifdef USE_DOUBLE + +#define LOAD ld +#define LOAD32 lwu +#define ADD daddu +#define NBYTES 8 + +#else + +#define LOAD lw +#define LOAD32 lw +#define ADD addu +#define NBYTES 4 + +#endif /* USE_DOUBLE */ + +#define UNIT(unit) ((unit)*NBYTES) + +#define ADDC(sum,reg) \ + .set push; \ + .set noat; \ + ADD sum, reg; \ + sltu v1, sum, reg; \ + ADD sum, v1; \ + .set pop + +#define ADDC32(sum,reg) \ + .set push; \ + .set noat; \ + addu sum, reg; \ + sltu v1, sum, reg; \ + addu sum, v1; \ + .set pop + +#define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \ + LOAD _t0, (offset + UNIT(0))(src); \ + LOAD _t1, (offset + UNIT(1))(src); \ + LOAD _t2, (offset + UNIT(2))(src); \ + LOAD _t3, (offset + UNIT(3))(src); \ + ADDC(sum, _t0); \ + ADDC(sum, _t1); \ + ADDC(sum, _t2); \ + ADDC(sum, _t3) + +#ifdef USE_DOUBLE +#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \ + CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) +#else +#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \ + CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3); \ + CSUM_BIGCHUNK1(src, offset + 0x10, sum, _t0, _t1, _t2, _t3) +#endif + +/* + * a0: source address + * a1: length of the area to checksum + * a2: partial checksum + */ + +#define src a0 +#define sum v0 + + .text + .set noreorder + .align 5 +LEAF(csum_partial) + move sum, zero + move t7, zero + + sltiu t8, a1, 0x8 + bnez t8, .Lsmall_csumcpy /* < 8 bytes to copy */ + move t2, a1 + + andi t7, src, 0x1 /* odd buffer? */ + +.Lhword_align: + beqz t7, .Lword_align + andi t8, src, 0x2 + + lbu t0, (src) + LONG_SUBU a1, a1, 0x1 +#ifdef __MIPSEL__ + sll t0, t0, 8 +#endif + ADDC(sum, t0) + PTR_ADDU src, src, 0x1 + andi t8, src, 0x2 + +.Lword_align: + beqz t8, .Ldword_align + sltiu t8, a1, 56 + + lhu t0, (src) + LONG_SUBU a1, a1, 0x2 + ADDC(sum, t0) + sltiu t8, a1, 56 + PTR_ADDU src, src, 0x2 + +.Ldword_align: + bnez t8, .Ldo_end_words + move t8, a1 + + andi t8, src, 0x4 + beqz t8, .Lqword_align + andi t8, src, 0x8 + + LOAD32 t0, 0x00(src) + LONG_SUBU a1, a1, 0x4 + ADDC(sum, t0) + PTR_ADDU src, src, 0x4 + andi t8, src, 0x8 + +.Lqword_align: + beqz t8, .Loword_align + andi t8, src, 0x10 + +#ifdef USE_DOUBLE + ld t0, 0x00(src) + LONG_SUBU a1, a1, 0x8 + ADDC(sum, t0) +#else + lw t0, 0x00(src) + lw t1, 0x04(src) + LONG_SUBU a1, a1, 0x8 + ADDC(sum, t0) + ADDC(sum, t1) +#endif + PTR_ADDU src, src, 0x8 + andi t8, src, 0x10 + +.Loword_align: + beqz t8, .Lbegin_movement + LONG_SRL t8, a1, 0x7 + +#ifdef USE_DOUBLE + ld t0, 0x00(src) + ld t1, 0x08(src) + ADDC(sum, t0) + ADDC(sum, t1) +#else + CSUM_BIGCHUNK1(src, 0x00, sum, t0, t1, t3, t4) +#endif + LONG_SUBU a1, a1, 0x10 + PTR_ADDU src, src, 0x10 + LONG_SRL t8, a1, 0x7 + +.Lbegin_movement: + beqz t8, 1f + andi t2, a1, 0x40 + +.Lmove_128bytes: + CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4) + CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4) + CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4) + CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4) + LONG_SUBU t8, t8, 0x01 + .set reorder /* DADDI_WAR */ + PTR_ADDU src, src, 0x80 + bnez t8, .Lmove_128bytes + .set noreorder + +1: + beqz t2, 1f + andi t2, a1, 0x20 + +.Lmove_64bytes: + CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4) + CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4) + PTR_ADDU src, src, 0x40 + +1: + beqz t2, .Ldo_end_words + andi t8, a1, 0x1c + +.Lmove_32bytes: + CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4) + andi t8, a1, 0x1c + PTR_ADDU src, src, 0x20 + +.Ldo_end_words: + beqz t8, .Lsmall_csumcpy + andi t2, a1, 0x3 + LONG_SRL t8, t8, 0x2 + +.Lend_words: + LOAD32 t0, (src) + LONG_SUBU t8, t8, 0x1 + ADDC(sum, t0) + .set reorder /* DADDI_WAR */ + PTR_ADDU src, src, 0x4 + bnez t8, .Lend_words + .set noreorder + +/* unknown src alignment and < 8 bytes to go */ +.Lsmall_csumcpy: + move a1, t2 + + andi t0, a1, 4 + beqz t0, 1f + andi t0, a1, 2 + + /* Still a full word to go */ + ulw t1, (src) + PTR_ADDIU src, 4 +#ifdef USE_DOUBLE + dsll t1, t1, 32 /* clear lower 32bit */ +#endif + ADDC(sum, t1) + +1: move t1, zero + beqz t0, 1f + andi t0, a1, 1 + + /* Still a halfword to go */ + ulhu t1, (src) + PTR_ADDIU src, 2 + +1: beqz t0, 1f + sll t1, t1, 16 + + lbu t2, (src) + nop + +#ifdef __MIPSEB__ + sll t2, t2, 8 +#endif + or t1, t2 + +1: ADDC(sum, t1) + + /* fold checksum */ +#ifdef USE_DOUBLE + dsll32 v1, sum, 0 + daddu sum, v1 + sltu v1, sum, v1 + dsra32 sum, sum, 0 + addu sum, v1 +#endif + + /* odd buffer alignment? */ +#ifdef CONFIG_CPU_MIPSR2 + wsbh v1, sum + movn sum, v1, t7 +#else + beqz t7, 1f /* odd buffer alignment? */ + lui v1, 0x00ff + addu v1, 0x00ff + and t0, sum, v1 + sll t0, t0, 8 + srl sum, sum, 8 + and sum, sum, v1 + or sum, sum, t0 +1: +#endif + .set reorder + /* Add the passed partial csum. */ + ADDC32(sum, a2) + jr ra + .set noreorder + END(csum_partial) + + +/* + * checksum and copy routines based on memcpy.S + * + * csum_partial_copy_nocheck(src, dst, len, sum) + * __csum_partial_copy_kernel(src, dst, len, sum, errp) + * + * See "Spec" in memcpy.S for details. Unlike __copy_user, all + * function in this file use the standard calling convention. + */ + +#define src a0 +#define dst a1 +#define len a2 +#define psum a3 +#define sum v0 +#define odd t8 +#define errptr t9 + +/* + * The exception handler for loads requires that: + * 1- AT contain the address of the byte just past the end of the source + * of the copy, + * 2- src_entry <= src < AT, and + * 3- (dst - src) == (dst_entry - src_entry), + * The _entry suffix denotes values when __copy_user was called. + * + * (1) is set up up by __csum_partial_copy_from_user and maintained by + * not writing AT in __csum_partial_copy + * (2) is met by incrementing src by the number of bytes copied + * (3) is met by not doing loads between a pair of increments of dst and src + * + * The exception handlers for stores stores -EFAULT to errptr and return. + * These handlers do not need to overwrite any data. + */ + +/* Instruction type */ +#define LD_INSN 1 +#define ST_INSN 2 +#define LEGACY_MODE 1 +#define EVA_MODE 2 +#define USEROP 1 +#define KERNELOP 2 + +/* + * Wrapper to add an entry in the exception table + * in case the insn causes a memory exception. + * Arguments: + * insn : Load/store instruction + * type : Instruction type + * reg : Register + * addr : Address + * handler : Exception handler + */ +#define EXC(insn, type, reg, addr, handler) \ + .if \mode == LEGACY_MODE; \ +9: insn reg, addr; \ + .section __ex_table,"a"; \ + PTR 9b, handler; \ + .previous; \ + /* This is enabled in EVA mode */ \ + .else; \ + /* If loading from user or storing to user */ \ + .if ((\from == USEROP) && (type == LD_INSN)) || \ + ((\to == USEROP) && (type == ST_INSN)); \ +9: __BUILD_EVA_INSN(insn##e, reg, addr); \ + .section __ex_table,"a"; \ + PTR 9b, handler; \ + .previous; \ + .else; \ + /* EVA without exception */ \ + insn reg, addr; \ + .endif; \ + .endif + +#undef LOAD + +#ifdef USE_DOUBLE + +#define LOADK ld /* No exception */ +#define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler) +#define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler) +#define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler) +#define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler) +#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler) +#define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler) +#define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler) +#define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler) +#define ADD daddu +#define SUB dsubu +#define SRL dsrl +#define SLL dsll +#define SLLV dsllv +#define SRLV dsrlv +#define NBYTES 8 +#define LOG_NBYTES 3 + +#else + +#define LOADK lw /* No exception */ +#define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler) +#define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler) +#define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler) +#define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler) +#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler) +#define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler) +#define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler) +#define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler) +#define ADD addu +#define SUB subu +#define SRL srl +#define SLL sll +#define SLLV sllv +#define SRLV srlv +#define NBYTES 4 +#define LOG_NBYTES 2 + +#endif /* USE_DOUBLE */ + +#ifdef CONFIG_CPU_LITTLE_ENDIAN +#define LDFIRST LOADR +#define LDREST LOADL +#define STFIRST STORER +#define STREST STOREL +#define SHIFT_DISCARD SLLV +#define SHIFT_DISCARD_REVERT SRLV +#else +#define LDFIRST LOADL +#define LDREST LOADR +#define STFIRST STOREL +#define STREST STORER +#define SHIFT_DISCARD SRLV +#define SHIFT_DISCARD_REVERT SLLV +#endif + +#define FIRST(unit) ((unit)*NBYTES) +#define REST(unit) (FIRST(unit)+NBYTES-1) + +#define ADDRMASK (NBYTES-1) + +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS + .set noat +#else + .set at=v1 +#endif + + .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to, __nocheck + + PTR_ADDU AT, src, len /* See (1) above. */ + /* initialize __nocheck if this the first time we execute this + * macro + */ +#ifdef CONFIG_64BIT + move errptr, a4 +#else + lw errptr, 16(sp) +#endif + .if \__nocheck == 1 + FEXPORT(csum_partial_copy_nocheck) + .endif + move sum, zero + move odd, zero + /* + * Note: dst & src may be unaligned, len may be 0 + * Temps + */ + /* + * The "issue break"s below are very approximate. + * Issue delays for dcache fills will perturb the schedule, as will + * load queue full replay traps, etc. + * + * If len < NBYTES use byte operations. + */ + sltu t2, len, NBYTES + and t1, dst, ADDRMASK + bnez t2, .Lcopy_bytes_checklen\@ + and t0, src, ADDRMASK + andi odd, dst, 0x1 /* odd buffer? */ + bnez t1, .Ldst_unaligned\@ + nop + bnez t0, .Lsrc_unaligned_dst_aligned\@ + /* + * use delay slot for fall-through + * src and dst are aligned; need to compute rem + */ +.Lboth_aligned\@: + SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter + beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES + nop + SUB len, 8*NBYTES # subtract here for bgez loop + .align 4 +1: + LOAD(t0, UNIT(0)(src), .Ll_exc\@) + LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@) + LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@) + LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@) + LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@) + LOAD(t5, UNIT(5)(src), .Ll_exc_copy\@) + LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@) + LOAD(t7, UNIT(7)(src), .Ll_exc_copy\@) + SUB len, len, 8*NBYTES + ADD src, src, 8*NBYTES + STORE(t0, UNIT(0)(dst), .Ls_exc\@) + ADDC(sum, t0) + STORE(t1, UNIT(1)(dst), .Ls_exc\@) + ADDC(sum, t1) + STORE(t2, UNIT(2)(dst), .Ls_exc\@) + ADDC(sum, t2) + STORE(t3, UNIT(3)(dst), .Ls_exc\@) + ADDC(sum, t3) + STORE(t4, UNIT(4)(dst), .Ls_exc\@) + ADDC(sum, t4) + STORE(t5, UNIT(5)(dst), .Ls_exc\@) + ADDC(sum, t5) + STORE(t6, UNIT(6)(dst), .Ls_exc\@) + ADDC(sum, t6) + STORE(t7, UNIT(7)(dst), .Ls_exc\@) + ADDC(sum, t7) + .set reorder /* DADDI_WAR */ + ADD dst, dst, 8*NBYTES + bgez len, 1b + .set noreorder + ADD len, 8*NBYTES # revert len (see above) + + /* + * len == the number of bytes left to copy < 8*NBYTES + */ +.Lcleanup_both_aligned\@: +#define rem t7 + beqz len, .Ldone\@ + sltu t0, len, 4*NBYTES + bnez t0, .Lless_than_4units\@ + and rem, len, (NBYTES-1) # rem = len % NBYTES + /* + * len >= 4*NBYTES + */ + LOAD(t0, UNIT(0)(src), .Ll_exc\@) + LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@) + LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@) + LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@) + SUB len, len, 4*NBYTES + ADD src, src, 4*NBYTES + STORE(t0, UNIT(0)(dst), .Ls_exc\@) + ADDC(sum, t0) + STORE(t1, UNIT(1)(dst), .Ls_exc\@) + ADDC(sum, t1) + STORE(t2, UNIT(2)(dst), .Ls_exc\@) + ADDC(sum, t2) + STORE(t3, UNIT(3)(dst), .Ls_exc\@) + ADDC(sum, t3) + .set reorder /* DADDI_WAR */ + ADD dst, dst, 4*NBYTES + beqz len, .Ldone\@ + .set noreorder +.Lless_than_4units\@: + /* + * rem = len % NBYTES + */ + beq rem, len, .Lcopy_bytes\@ + nop +1: + LOAD(t0, 0(src), .Ll_exc\@) + ADD src, src, NBYTES + SUB len, len, NBYTES + STORE(t0, 0(dst), .Ls_exc\@) + ADDC(sum, t0) + .set reorder /* DADDI_WAR */ + ADD dst, dst, NBYTES + bne rem, len, 1b + .set noreorder + + /* + * src and dst are aligned, need to copy rem bytes (rem < NBYTES) + * A loop would do only a byte at a time with possible branch + * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE + * because can't assume read-access to dst. Instead, use + * STREST dst, which doesn't require read access to dst. + * + * This code should perform better than a simple loop on modern, + * wide-issue mips processors because the code has fewer branches and + * more instruction-level parallelism. + */ +#define bits t2 + beqz len, .Ldone\@ + ADD t1, dst, len # t1 is just past last byte of dst + li bits, 8*NBYTES + SLL rem, len, 3 # rem = number of bits to keep + LOAD(t0, 0(src), .Ll_exc\@) + SUB bits, bits, rem # bits = number of bits to discard + SHIFT_DISCARD t0, t0, bits + STREST(t0, -1(t1), .Ls_exc\@) + SHIFT_DISCARD_REVERT t0, t0, bits + .set reorder + ADDC(sum, t0) + b .Ldone\@ + .set noreorder +.Ldst_unaligned\@: + /* + * dst is unaligned + * t0 = src & ADDRMASK + * t1 = dst & ADDRMASK; T1 > 0 + * len >= NBYTES + * + * Copy enough bytes to align dst + * Set match = (src and dst have same alignment) + */ +#define match rem + LDFIRST(t3, FIRST(0)(src), .Ll_exc\@) + ADD t2, zero, NBYTES + LDREST(t3, REST(0)(src), .Ll_exc_copy\@) + SUB t2, t2, t1 # t2 = number of bytes copied + xor match, t0, t1 + STFIRST(t3, FIRST(0)(dst), .Ls_exc\@) + SLL t4, t1, 3 # t4 = number of bits to discard + SHIFT_DISCARD t3, t3, t4 + /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */ + ADDC(sum, t3) + beq len, t2, .Ldone\@ + SUB len, len, t2 + ADD dst, dst, t2 + beqz match, .Lboth_aligned\@ + ADD src, src, t2 + +.Lsrc_unaligned_dst_aligned\@: + SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter + beqz t0, .Lcleanup_src_unaligned\@ + and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES +1: +/* + * Avoid consecutive LD*'s to the same register since some mips + * implementations can't issue them in the same cycle. + * It's OK to load FIRST(N+1) before REST(N) because the two addresses + * are to the same unit (unless src is aligned, but it's not). + */ + LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) + LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@) + SUB len, len, 4*NBYTES + LDREST(t0, REST(0)(src), .Ll_exc_copy\@) + LDREST(t1, REST(1)(src), .Ll_exc_copy\@) + LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@) + LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@) + LDREST(t2, REST(2)(src), .Ll_exc_copy\@) + LDREST(t3, REST(3)(src), .Ll_exc_copy\@) + ADD src, src, 4*NBYTES +#ifdef CONFIG_CPU_SB1 + nop # improves slotting +#endif + STORE(t0, UNIT(0)(dst), .Ls_exc\@) + ADDC(sum, t0) + STORE(t1, UNIT(1)(dst), .Ls_exc\@) + ADDC(sum, t1) + STORE(t2, UNIT(2)(dst), .Ls_exc\@) + ADDC(sum, t2) + STORE(t3, UNIT(3)(dst), .Ls_exc\@) + ADDC(sum, t3) + .set reorder /* DADDI_WAR */ + ADD dst, dst, 4*NBYTES + bne len, rem, 1b + .set noreorder + +.Lcleanup_src_unaligned\@: + beqz len, .Ldone\@ + and rem, len, NBYTES-1 # rem = len % NBYTES + beq rem, len, .Lcopy_bytes\@ + nop +1: + LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) + LDREST(t0, REST(0)(src), .Ll_exc_copy\@) + ADD src, src, NBYTES + SUB len, len, NBYTES + STORE(t0, 0(dst), .Ls_exc\@) + ADDC(sum, t0) + .set reorder /* DADDI_WAR */ + ADD dst, dst, NBYTES + bne len, rem, 1b + .set noreorder + +.Lcopy_bytes_checklen\@: + beqz len, .Ldone\@ + nop +.Lcopy_bytes\@: + /* 0 < len < NBYTES */ +#ifdef CONFIG_CPU_LITTLE_ENDIAN +#define SHIFT_START 0 +#define SHIFT_INC 8 +#else +#define SHIFT_START 8*(NBYTES-1) +#define SHIFT_INC -8 +#endif + move t2, zero # partial word + li t3, SHIFT_START # shift +/* use .Ll_exc_copy here to return correct sum on fault */ +#define COPY_BYTE(N) \ + LOADBU(t0, N(src), .Ll_exc_copy\@); \ + SUB len, len, 1; \ + STOREB(t0, N(dst), .Ls_exc\@); \ + SLLV t0, t0, t3; \ + addu t3, SHIFT_INC; \ + beqz len, .Lcopy_bytes_done\@; \ + or t2, t0 + + COPY_BYTE(0) + COPY_BYTE(1) +#ifdef USE_DOUBLE + COPY_BYTE(2) + COPY_BYTE(3) + COPY_BYTE(4) + COPY_BYTE(5) +#endif + LOADBU(t0, NBYTES-2(src), .Ll_exc_copy\@) + SUB len, len, 1 + STOREB(t0, NBYTES-2(dst), .Ls_exc\@) + SLLV t0, t0, t3 + or t2, t0 +.Lcopy_bytes_done\@: + ADDC(sum, t2) +.Ldone\@: + /* fold checksum */ + .set push + .set noat +#ifdef USE_DOUBLE + dsll32 v1, sum, 0 + daddu sum, v1 + sltu v1, sum, v1 + dsra32 sum, sum, 0 + addu sum, v1 +#endif + +#ifdef CONFIG_CPU_MIPSR2 + wsbh v1, sum + movn sum, v1, odd +#else + beqz odd, 1f /* odd buffer alignment? */ + lui v1, 0x00ff + addu v1, 0x00ff + and t0, sum, v1 + sll t0, t0, 8 + srl sum, sum, 8 + and sum, sum, v1 + or sum, sum, t0 +1: +#endif + .set pop + .set reorder + ADDC32(sum, psum) + jr ra + .set noreorder + +.Ll_exc_copy\@: + /* + * Copy bytes from src until faulting load address (or until a + * lb faults) + * + * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28) + * may be more than a byte beyond the last address. + * Hence, the lb below may get an exception. + * + * Assumes src < THREAD_BUADDR($28) + */ + LOADK t0, TI_TASK($28) + li t2, SHIFT_START + LOADK t0, THREAD_BUADDR(t0) +1: + LOADBU(t1, 0(src), .Ll_exc\@) + ADD src, src, 1 + sb t1, 0(dst) # can't fault -- we're copy_from_user + SLLV t1, t1, t2 + addu t2, SHIFT_INC + ADDC(sum, t1) + .set reorder /* DADDI_WAR */ + ADD dst, dst, 1 + bne src, t0, 1b + .set noreorder +.Ll_exc\@: + LOADK t0, TI_TASK($28) + nop + LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address + nop + SUB len, AT, t0 # len number of uncopied bytes + /* + * Here's where we rely on src and dst being incremented in tandem, + * See (3) above. + * dst += (fault addr - src) to put dst at first byte to clear + */ + ADD dst, t0 # compute start address in a1 + SUB dst, src + /* + * Clear len bytes starting at dst. Can't call __bzero because it + * might modify len. An inefficient loop for these rare times... + */ + .set reorder /* DADDI_WAR */ + SUB src, len, 1 + beqz len, .Ldone\@ + .set noreorder +1: sb zero, 0(dst) + ADD dst, dst, 1 + .set push + .set noat +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS + bnez src, 1b + SUB src, src, 1 +#else + li v1, 1 + bnez src, 1b + SUB src, src, v1 +#endif + li v1, -EFAULT + b .Ldone\@ + sw v1, (errptr) + +.Ls_exc\@: + li v0, -1 /* invalid checksum */ + li v1, -EFAULT + jr ra + sw v1, (errptr) + .set pop + .endm + +LEAF(__csum_partial_copy_kernel) +#ifndef CONFIG_EVA +FEXPORT(__csum_partial_copy_to_user) +FEXPORT(__csum_partial_copy_from_user) +#endif +__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP 1 +END(__csum_partial_copy_kernel) + +#ifdef CONFIG_EVA +LEAF(__csum_partial_copy_to_user) +__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP 0 +END(__csum_partial_copy_to_user) + +LEAF(__csum_partial_copy_from_user) +__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP 0 +END(__csum_partial_copy_from_user) +#endif diff --git a/arch/mips/lib/csum_partial_copy.c b/arch/mips/lib/csum_partial_copy.c deleted file mode 100644 index 6e9f366f961..00000000000 --- a/arch/mips/lib/csum_partial_copy.c +++ /dev/null @@ -1,49 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1994, 1995 Waldorf Electronics GmbH - * Copyright (C) 1998, 1999 Ralf Baechle - */ -#include <linux/kernel.h> -#include <linux/types.h> -#include <asm/byteorder.h> -#include <asm/string.h> -#include <asm/uaccess.h> -#include <net/checksum.h> - -/* - * copy while checksumming, otherwise like csum_partial - */ -unsigned int csum_partial_copy_nocheck(const unsigned char *src, - unsigned char *dst, int len, unsigned int sum) -{ - /* - * It's 2:30 am and I don't feel like doing it real ... - * This is lots slower than the real thing (tm) - */ - sum = csum_partial(src, len, sum); - memcpy(dst, src, len); - - return sum; -} - -/* - * Copy from userspace and compute checksum. If we catch an exception - * then zero the rest of the buffer. - */ -unsigned int csum_partial_copy_from_user (const unsigned char __user *src, - unsigned char *dst, int len, unsigned int sum, int *err_ptr) -{ - int missing; - - might_sleep(); - missing = copy_from_user(dst, src, len); - if (missing) { - memset(dst + len - missing, 0, missing); - *err_ptr = -EFAULT; - } - - return csum_partial(dst, len, sum); -} diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c new file mode 100644 index 00000000000..21d27c6819a --- /dev/null +++ b/arch/mips/lib/delay.c @@ -0,0 +1,64 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994 by Waldorf Electronics + * Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2007, 2014 Maciej W. Rozycki + */ +#include <linux/module.h> +#include <linux/param.h> +#include <linux/smp.h> +#include <linux/stringify.h> + +#include <asm/asm.h> +#include <asm/compiler.h> +#include <asm/war.h> + +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS +#define GCC_DADDI_IMM_ASM() "I" +#else +#define GCC_DADDI_IMM_ASM() "r" +#endif + +void __delay(unsigned long loops) +{ + __asm__ __volatile__ ( + " .set noreorder \n" + " .align 3 \n" + "1: bnez %0, 1b \n" + " " __stringify(LONG_SUBU) " %0, %1 \n" + " .set reorder \n" + : "=r" (loops) + : GCC_DADDI_IMM_ASM() (1), "0" (loops)); +} +EXPORT_SYMBOL(__delay); + +/* + * Division by multiplication: you don't have to worry about + * loss of precision. + * + * Use only for very small delays ( < 1 msec). Should probably use a + * lookup table, really, as the multiplications take much too long with + * short delays. This is a "reasonable" implementation, though (and the + * first constant multiplications gets optimized away if the delay is + * a constant) + */ + +void __udelay(unsigned long us) +{ + unsigned int lpj = raw_current_cpu_data.udelay_val; + + __delay((us * 0x000010c7ull * HZ * lpj) >> 32); +} +EXPORT_SYMBOL(__udelay); + +void __ndelay(unsigned long ns) +{ + unsigned int lpj = raw_current_cpu_data.udelay_val; + + __delay((ns * 0x00000005ull * HZ * lpj) >> 32); +} +EXPORT_SYMBOL(__ndelay); diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c new file mode 100644 index 00000000000..32b9f21bfd8 --- /dev/null +++ b/arch/mips/lib/dump_tlb.c @@ -0,0 +1,113 @@ +/* + * Dump R4x00 TLB for debugging purposes. + * + * Copyright (C) 1994, 1995 by Waldorf Electronics, written by Ralf Baechle. + * Copyright (C) 1999 by Silicon Graphics, Inc. + */ +#include <linux/kernel.h> +#include <linux/mm.h> + +#include <asm/mipsregs.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/tlbdebug.h> + +static inline const char *msk2str(unsigned int mask) +{ + switch (mask) { + case PM_4K: return "4kb"; + case PM_16K: return "16kb"; + case PM_64K: return "64kb"; + case PM_256K: return "256kb"; +#ifdef CONFIG_CPU_CAVIUM_OCTEON + case PM_8K: return "8kb"; + case PM_32K: return "32kb"; + case PM_128K: return "128kb"; + case PM_512K: return "512kb"; + case PM_2M: return "2Mb"; + case PM_8M: return "8Mb"; + case PM_32M: return "32Mb"; +#endif +#ifndef CONFIG_CPU_VR41XX + case PM_1M: return "1Mb"; + case PM_4M: return "4Mb"; + case PM_16M: return "16Mb"; + case PM_64M: return "64Mb"; + case PM_256M: return "256Mb"; + case PM_1G: return "1Gb"; +#endif + } + return ""; +} + +#define BARRIER() \ + __asm__ __volatile__( \ + ".set\tnoreorder\n\t" \ + "nop;nop;nop;nop;nop;nop;nop\n\t" \ + ".set\treorder"); + +static void dump_tlb(int first, int last) +{ + unsigned long s_entryhi, entryhi, asid; + unsigned long long entrylo0, entrylo1; + unsigned int s_index, s_pagemask, pagemask, c0, c1, i; + + s_pagemask = read_c0_pagemask(); + s_entryhi = read_c0_entryhi(); + s_index = read_c0_index(); + asid = s_entryhi & 0xff; + + for (i = first; i <= last; i++) { + write_c0_index(i); + BARRIER(); + tlb_read(); + BARRIER(); + pagemask = read_c0_pagemask(); + entryhi = read_c0_entryhi(); + entrylo0 = read_c0_entrylo0(); + entrylo1 = read_c0_entrylo1(); + + /* Unused entries have a virtual address of CKSEG0. */ + if ((entryhi & ~0x1ffffUL) != CKSEG0 + && (entryhi & 0xff) == asid) { +#ifdef CONFIG_32BIT + int width = 8; +#else + int width = 11; +#endif + /* + * Only print entries in use + */ + printk("Index: %2d pgmask=%s ", i, msk2str(pagemask)); + + c0 = (entrylo0 >> 3) & 7; + c1 = (entrylo1 >> 3) & 7; + + printk("va=%0*lx asid=%02lx\n", + width, (entryhi & ~0x1fffUL), + entryhi & 0xff); + printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ", + width, + (entrylo0 << 6) & PAGE_MASK, c0, + (entrylo0 & 4) ? 1 : 0, + (entrylo0 & 2) ? 1 : 0, + (entrylo0 & 1) ? 1 : 0); + printk("[pa=%0*llx c=%d d=%d v=%d g=%d]\n", + width, + (entrylo1 << 6) & PAGE_MASK, c1, + (entrylo1 & 4) ? 1 : 0, + (entrylo1 & 2) ? 1 : 0, + (entrylo1 & 1) ? 1 : 0); + } + } + printk("\n"); + + write_c0_entryhi(s_entryhi); + write_c0_index(s_index); + write_c0_pagemask(s_pagemask); +} + +void dump_tlb_all(void) +{ + dump_tlb(0, current_cpu_data.tlbsize - 1); +} diff --git a/arch/mips/lib/iomap-pci.c b/arch/mips/lib/iomap-pci.c new file mode 100644 index 00000000000..fd35daa4531 --- /dev/null +++ b/arch/mips/lib/iomap-pci.c @@ -0,0 +1,48 @@ +/* + * Implement the default iomap interfaces + * + * (C) Copyright 2004 Linus Torvalds + * (C) Copyright 2006 Ralf Baechle <ralf@linux-mips.org> + * (C) Copyright 2007 MIPS Technologies, Inc. + * written by Ralf Baechle <ralf@linux-mips.org> + */ +#include <linux/pci.h> +#include <linux/module.h> +#include <asm/io.h> + +void __iomem *__pci_ioport_map(struct pci_dev *dev, + unsigned long port, unsigned int nr) +{ + struct pci_controller *ctrl = dev->bus->sysdata; + unsigned long base = ctrl->io_map_base; + + /* This will eventually become a BUG_ON but for now be gentle */ + if (unlikely(!ctrl->io_map_base)) { + struct pci_bus *bus = dev->bus; + char name[8]; + + while (bus->parent) + bus = bus->parent; + + ctrl->io_map_base = base = mips_io_port_base; + + sprintf(name, "%04x:%02x", pci_domain_nr(bus), bus->number); + printk(KERN_WARNING "io_map_base of root PCI bus %s unset. " + "Trying to continue but you better\nfix this issue or " + "report it to linux-mips@linux-mips.org or your " + "vendor.\n", name); +#ifdef CONFIG_PCI_DOMAINS + panic("To avoid data corruption io_map_base MUST be set with " + "multiple PCI domains."); +#endif + } + + return (void __iomem *) (ctrl->io_map_base + port); +} + +void pci_iounmap(struct pci_dev *dev, void __iomem * addr) +{ + iounmap(addr); +} + +EXPORT_SYMBOL(pci_iounmap); diff --git a/arch/mips/lib/iomap.c b/arch/mips/lib/iomap.c index b5d5fa83376..e3acb2dad33 100644 --- a/arch/mips/lib/iomap.c +++ b/arch/mips/lib/iomap.c @@ -1,78 +1,226 @@ /* - * iomap.c, Memory Mapped I/O routines for MIPS architecture. + * Implement the default iomap interfaces * - * This code is based on lib/iomap.c, by Linus Torvalds. - * - * Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * (C) Copyright 2004 Linus Torvalds + * (C) Copyright 2006 Ralf Baechle <ralf@linux-mips.org> + * (C) Copyright 2007 MIPS Technologies, Inc. + * written by Ralf Baechle <ralf@linux-mips.org> + */ +#include <linux/module.h> +#include <asm/io.h> + +/* + * Read/write from/to an (offsettable) iomem cookie. It might be a PIO + * access or a MMIO access, these functions don't care. The info is + * encoded in the hardware mapping set up by the mapping functions + * (or the cookie itself, depending on implementation and hw). * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * The generic routines don't assume any hardware mappings, and just + * encode the PIO/MMIO as part of the cookie. They coldly assume that + * the MMIO IO mappings are not in the low address range. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Architectures for which this is not true can't use this generic + * implementation and should do their own copy. */ -#include <linux/ioport.h> -#include <linux/module.h> -#include <linux/pci.h> -#include <asm/io.h> +#define PIO_MASK 0x0ffffUL -void __iomem *ioport_map(unsigned long port, unsigned int nr) +unsigned int ioread8(void __iomem *addr) { - unsigned long end; + return readb(addr); +} - end = port + nr - 1UL; - if (ioport_resource.start > port || - ioport_resource.end < end || port > end) - return NULL; +EXPORT_SYMBOL(ioread8); - return (void __iomem *)(mips_io_port_base + port); +unsigned int ioread16(void __iomem *addr) +{ + return readw(addr); } -void ioport_unmap(void __iomem *addr) +EXPORT_SYMBOL(ioread16); + +unsigned int ioread16be(void __iomem *addr) { + return be16_to_cpu(__raw_readw(addr)); } -EXPORT_SYMBOL(ioport_map); -EXPORT_SYMBOL(ioport_unmap); -void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) +EXPORT_SYMBOL(ioread16be); + +unsigned int ioread32(void __iomem *addr) { - unsigned long start, len, flags; + return readl(addr); +} - if (dev == NULL) - return NULL; +EXPORT_SYMBOL(ioread32); - start = pci_resource_start(dev, bar); - len = pci_resource_len(dev, bar); - if (!start || !len) - return NULL; +unsigned int ioread32be(void __iomem *addr) +{ + return be32_to_cpu(__raw_readl(addr)); +} + +EXPORT_SYMBOL(ioread32be); + +void iowrite8(u8 val, void __iomem *addr) +{ + writeb(val, addr); +} - if (maxlen != 0 && len > maxlen) - len = maxlen; +EXPORT_SYMBOL(iowrite8); - flags = pci_resource_flags(dev, bar); - if (flags & IORESOURCE_IO) - return ioport_map(start, len); - if (flags & IORESOURCE_MEM) { - if (flags & IORESOURCE_CACHEABLE) - return ioremap_cacheable_cow(start, len); - return ioremap_nocache(start, len); +void iowrite16(u16 val, void __iomem *addr) +{ + writew(val, addr); +} + +EXPORT_SYMBOL(iowrite16); + +void iowrite16be(u16 val, void __iomem *addr) +{ + __raw_writew(cpu_to_be16(val), addr); +} + +EXPORT_SYMBOL(iowrite16be); + +void iowrite32(u32 val, void __iomem *addr) +{ + writel(val, addr); +} + +EXPORT_SYMBOL(iowrite32); + +void iowrite32be(u32 val, void __iomem *addr) +{ + __raw_writel(cpu_to_be32(val), addr); +} + +EXPORT_SYMBOL(iowrite32be); + +/* + * These are the "repeat MMIO read/write" functions. + * Note the "__raw" accesses, since we don't want to + * convert to CPU byte order. We write in "IO byte + * order" (we also don't have IO barriers). + */ +static inline void mmio_insb(void __iomem *addr, u8 *dst, int count) +{ + while (--count >= 0) { + u8 data = __raw_readb(addr); + *dst = data; + dst++; } +} - return NULL; +static inline void mmio_insw(void __iomem *addr, u16 *dst, int count) +{ + while (--count >= 0) { + u16 data = __raw_readw(addr); + *dst = data; + dst++; + } +} + +static inline void mmio_insl(void __iomem *addr, u32 *dst, int count) +{ + while (--count >= 0) { + u32 data = __raw_readl(addr); + *dst = data; + dst++; + } } -void pci_iounmap(struct pci_dev *dev, void __iomem *addr) +static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count) { - iounmap(addr); + while (--count >= 0) { + __raw_writeb(*src, addr); + src++; + } } -EXPORT_SYMBOL(pci_iomap); -EXPORT_SYMBOL(pci_iounmap); + +static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count) +{ + while (--count >= 0) { + __raw_writew(*src, addr); + src++; + } +} + +static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count) +{ + while (--count >= 0) { + __raw_writel(*src, addr); + src++; + } +} + +void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) +{ + mmio_insb(addr, dst, count); +} + +EXPORT_SYMBOL(ioread8_rep); + +void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) +{ + mmio_insw(addr, dst, count); +} + +EXPORT_SYMBOL(ioread16_rep); + +void ioread32_rep(void __iomem *addr, void *dst, unsigned long count) +{ + mmio_insl(addr, dst, count); +} + +EXPORT_SYMBOL(ioread32_rep); + +void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count) +{ + mmio_outsb(addr, src, count); +} + +EXPORT_SYMBOL(iowrite8_rep); + +void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count) +{ + mmio_outsw(addr, src, count); +} + +EXPORT_SYMBOL(iowrite16_rep); + +void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count) +{ + mmio_outsl(addr, src, count); +} + +EXPORT_SYMBOL(iowrite32_rep); + +/* + * Create a virtual mapping cookie for an IO port range + * + * This uses the same mapping are as the in/out family which has to be setup + * by the platform initialization code. + * + * Just to make matters somewhat more interesting on MIPS systems with + * multiple host bridge each will have it's own ioport address space. + */ +static void __iomem *ioport_map_legacy(unsigned long port, unsigned int nr) +{ + return (void __iomem *) (mips_io_port_base + port); +} + +void __iomem *ioport_map(unsigned long port, unsigned int nr) +{ + if (port > PIO_MASK) + return NULL; + + return ioport_map_legacy(port, nr); +} + +EXPORT_SYMBOL(ioport_map); + +void ioport_unmap(void __iomem *addr) +{ + /* Nothing to do */ +} + +EXPORT_SYMBOL(ioport_unmap); diff --git a/arch/mips/lib/libgcc.h b/arch/mips/lib/libgcc.h new file mode 100644 index 00000000000..05909d58e2f --- /dev/null +++ b/arch/mips/lib/libgcc.h @@ -0,0 +1,25 @@ +#ifndef __ASM_LIBGCC_H +#define __ASM_LIBGCC_H + +#include <asm/byteorder.h> + +typedef int word_type __attribute__ ((mode (__word__))); + +#ifdef __BIG_ENDIAN +struct DWstruct { + int high, low; +}; +#elif defined(__LITTLE_ENDIAN) +struct DWstruct { + int low, high; +}; +#else +#error I feel sick. +#endif + +typedef union { + struct DWstruct s; + long long ll; +} DWunion; + +#endif /* __ASM_LIBGCC_H */ diff --git a/arch/mips/lib/lshrdi3.c b/arch/mips/lib/lshrdi3.c new file mode 100644 index 00000000000..dcf8d6810b7 --- /dev/null +++ b/arch/mips/lib/lshrdi3.c @@ -0,0 +1,29 @@ +#include <linux/module.h> + +#include "libgcc.h" + +long long __lshrdi3(long long u, word_type b) +{ + DWunion uu, w; + word_type bm; + + if (b == 0) + return u; + + uu.ll = u; + bm = 32 - b; + + if (bm <= 0) { + w.s.high = 0; + w.s.low = (unsigned int) uu.s.high >> -bm; + } else { + const unsigned int carries = (unsigned int) uu.s.high << bm; + + w.s.high = (unsigned int) uu.s.high >> b; + w.s.low = ((unsigned int) uu.s.low >> b) | carries; + } + + return w.ll; +} + +EXPORT_SYMBOL(__lshrdi3); diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index 7f9aafa4d80..c17ef80cf65 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S @@ -9,10 +9,11 @@ * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc. * Copyright (C) 2002 Broadcom, Inc. * memcpy/copy_user author: Mark Vandevoorde + * Copyright (C) 2007 Maciej W. Rozycki + * Copyright (C) 2014 Imagination Technologies Ltd. * * Mnemonic names for arguments to memcpy/__copy_user */ -#include <linux/config.h> /* * Hack to resolve longstanding prefetch issue @@ -21,7 +22,7 @@ * end of memory on some systems. It's also a seriously bad idea on non * dma-coherent systems. */ -#if !defined(CONFIG_DMA_COHERENT) || !defined(CONFIG_DMA_IP27) +#ifdef CONFIG_DMA_NONCOHERENT #undef CONFIG_CPU_HAS_PREFETCH #endif #ifdef CONFIG_MIPS_MALTA @@ -85,11 +86,51 @@ * they're not protected. */ -#define EXC(inst_reg,addr,handler) \ -9: inst_reg, addr; \ - .section __ex_table,"a"; \ - PTR 9b, handler; \ - .previous +/* Instruction type */ +#define LD_INSN 1 +#define ST_INSN 2 +/* Pretech type */ +#define SRC_PREFETCH 1 +#define DST_PREFETCH 2 +#define LEGACY_MODE 1 +#define EVA_MODE 2 +#define USEROP 1 +#define KERNELOP 2 + +/* + * Wrapper to add an entry in the exception table + * in case the insn causes a memory exception. + * Arguments: + * insn : Load/store instruction + * type : Instruction type + * reg : Register + * addr : Address + * handler : Exception handler + */ + +#define EXC(insn, type, reg, addr, handler) \ + .if \mode == LEGACY_MODE; \ +9: insn reg, addr; \ + .section __ex_table,"a"; \ + PTR 9b, handler; \ + .previous; \ + /* This is assembled in EVA mode */ \ + .else; \ + /* If loading from user or storing to user */ \ + .if ((\from == USEROP) && (type == LD_INSN)) || \ + ((\to == USEROP) && (type == ST_INSN)); \ +9: __BUILD_EVA_INSN(insn##e, reg, addr); \ + .section __ex_table,"a"; \ + PTR 9b, handler; \ + .previous; \ + .else; \ + /* \ + * Still in EVA, but no need for \ + * exception handler or EVA insn \ + */ \ + insn reg, addr; \ + .endif; \ + .endif /* * Only on the 64-bit kernel we can made use of 64-bit registers. @@ -100,12 +141,13 @@ #ifdef USE_DOUBLE -#define LOAD ld -#define LOADL ldl -#define LOADR ldr -#define STOREL sdl -#define STORER sdr -#define STORE sd +#define LOADK ld /* No exception */ +#define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler) +#define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler) +#define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler) +#define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler) +#define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler) +#define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler) #define ADD daddu #define SUB dsubu #define SRL dsrl @@ -136,12 +178,13 @@ #else -#define LOAD lw -#define LOADL lwl -#define LOADR lwr -#define STOREL swl -#define STORER swr -#define STORE sw +#define LOADK lw /* No exception */ +#define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler) +#define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler) +#define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler) +#define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler) +#define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler) +#define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler) #define ADD addu #define SUB subu #define SRL srl @@ -154,17 +197,44 @@ #endif /* USE_DOUBLE */ +#define LOADB(reg, addr, handler) EXC(lb, LD_INSN, reg, addr, handler) +#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler) + +#define _PREF(hint, addr, type) \ + .if \mode == LEGACY_MODE; \ + PREF(hint, addr); \ + .else; \ + .if ((\from == USEROP) && (type == SRC_PREFETCH)) || \ + ((\to == USEROP) && (type == DST_PREFETCH)); \ + /* \ + * PREFE has only 9 bits for the offset \ + * compared to PREF which has 16, so it may \ + * need to use the $at register but this \ + * register should remain intact because it's \ + * used later on. Therefore use $v1. \ + */ \ + .set at=v1; \ + PREFE(hint, addr); \ + .set noat; \ + .else; \ + PREF(hint, addr); \ + .endif; \ + .endif + +#define PREFS(hint, addr) _PREF(hint, addr, SRC_PREFETCH) +#define PREFD(hint, addr) _PREF(hint, addr, DST_PREFETCH) + #ifdef CONFIG_CPU_LITTLE_ENDIAN #define LDFIRST LOADR -#define LDREST LOADL +#define LDREST LOADL #define STFIRST STORER -#define STREST STOREL +#define STREST STOREL #define SHIFT_DISCARD SLLV #else #define LDFIRST LOADL -#define LDREST LOADR +#define LDREST LOADR #define STFIRST STOREL -#define STREST STORER +#define STREST STORER #define SHIFT_DISCARD SRLV #endif @@ -176,25 +246,36 @@ .text .set noreorder +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS .set noat +#else + .set at=v1 +#endif -/* - * A combined memcpy/__copy_user - * __copy_user sets len to 0 for success; else to an upper bound of - * the number of uncopied bytes. - * memcpy sets v0 to dst. - */ .align 5 -LEAF(memcpy) /* a0=dst a1=src a2=len */ - move v0, dst /* return value */ -__memcpy: -FEXPORT(__copy_user) + + /* + * Macro to build the __copy_user common code + * Arguements: + * mode : LEGACY_MODE or EVA_MODE + * from : Source operand. USEROP or KERNELOP + * to : Destination operand. USEROP or KERNELOP + */ + .macro __BUILD_COPY_USER mode, from, to + + /* initialize __memcpy if this the first time we execute this macro */ + .ifnotdef __memcpy + .set __memcpy, 1 + .hidden __memcpy /* make sure it does not leak */ + .endif + /* * Note: dst & src may be unaligned, len may be 0 * Temps */ #define rem t8 + R10KCBARRIER(0(ra)) /* * The "issue break"s below are very approximate. * Issue delays for dcache fills will perturb the schedule, as will @@ -202,96 +283,103 @@ FEXPORT(__copy_user) * * If len < NBYTES use byte operations. */ - PREF( 0, 0(src) ) - PREF( 1, 0(dst) ) + PREFS( 0, 0(src) ) + PREFD( 1, 0(dst) ) sltu t2, len, NBYTES and t1, dst, ADDRMASK - PREF( 0, 1*32(src) ) - PREF( 1, 1*32(dst) ) - bnez t2, copy_bytes_checklen + PREFS( 0, 1*32(src) ) + PREFD( 1, 1*32(dst) ) + bnez t2, .Lcopy_bytes_checklen\@ and t0, src, ADDRMASK - PREF( 0, 2*32(src) ) - PREF( 1, 2*32(dst) ) - bnez t1, dst_unaligned + PREFS( 0, 2*32(src) ) + PREFD( 1, 2*32(dst) ) + bnez t1, .Ldst_unaligned\@ nop - bnez t0, src_unaligned_dst_aligned + bnez t0, .Lsrc_unaligned_dst_aligned\@ /* * use delay slot for fall-through * src and dst are aligned; need to compute rem */ -both_aligned: - SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter - beqz t0, cleanup_both_aligned # len < 8*NBYTES +.Lboth_aligned\@: + SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter + beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES) - PREF( 0, 3*32(src) ) - PREF( 1, 3*32(dst) ) + PREFS( 0, 3*32(src) ) + PREFD( 1, 3*32(dst) ) .align 4 1: -EXC( LOAD t0, UNIT(0)(src), l_exc) -EXC( LOAD t1, UNIT(1)(src), l_exc_copy) -EXC( LOAD t2, UNIT(2)(src), l_exc_copy) -EXC( LOAD t3, UNIT(3)(src), l_exc_copy) + R10KCBARRIER(0(ra)) + LOAD(t0, UNIT(0)(src), .Ll_exc\@) + LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@) + LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@) + LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@) SUB len, len, 8*NBYTES -EXC( LOAD t4, UNIT(4)(src), l_exc_copy) -EXC( LOAD t7, UNIT(5)(src), l_exc_copy) -EXC( STORE t0, UNIT(0)(dst), s_exc_p8u) -EXC( STORE t1, UNIT(1)(dst), s_exc_p7u) -EXC( LOAD t0, UNIT(6)(src), l_exc_copy) -EXC( LOAD t1, UNIT(7)(src), l_exc_copy) + LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@) + LOAD(t7, UNIT(5)(src), .Ll_exc_copy\@) + STORE(t0, UNIT(0)(dst), .Ls_exc_p8u\@) + STORE(t1, UNIT(1)(dst), .Ls_exc_p7u\@) + LOAD(t0, UNIT(6)(src), .Ll_exc_copy\@) + LOAD(t1, UNIT(7)(src), .Ll_exc_copy\@) ADD src, src, 8*NBYTES ADD dst, dst, 8*NBYTES -EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u) -EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u) -EXC( STORE t4, UNIT(-4)(dst), s_exc_p4u) -EXC( STORE t7, UNIT(-3)(dst), s_exc_p3u) -EXC( STORE t0, UNIT(-2)(dst), s_exc_p2u) -EXC( STORE t1, UNIT(-1)(dst), s_exc_p1u) - PREF( 0, 8*32(src) ) - PREF( 1, 8*32(dst) ) + STORE(t2, UNIT(-6)(dst), .Ls_exc_p6u\@) + STORE(t3, UNIT(-5)(dst), .Ls_exc_p5u\@) + STORE(t4, UNIT(-4)(dst), .Ls_exc_p4u\@) + STORE(t7, UNIT(-3)(dst), .Ls_exc_p3u\@) + STORE(t0, UNIT(-2)(dst), .Ls_exc_p2u\@) + STORE(t1, UNIT(-1)(dst), .Ls_exc_p1u\@) + PREFS( 0, 8*32(src) ) + PREFD( 1, 8*32(dst) ) bne len, rem, 1b nop /* * len == rem == the number of bytes left to copy < 8*NBYTES */ -cleanup_both_aligned: - beqz len, done +.Lcleanup_both_aligned\@: + beqz len, .Ldone\@ sltu t0, len, 4*NBYTES - bnez t0, less_than_4units + bnez t0, .Lless_than_4units\@ and rem, len, (NBYTES-1) # rem = len % NBYTES /* * len >= 4*NBYTES */ -EXC( LOAD t0, UNIT(0)(src), l_exc) -EXC( LOAD t1, UNIT(1)(src), l_exc_copy) -EXC( LOAD t2, UNIT(2)(src), l_exc_copy) -EXC( LOAD t3, UNIT(3)(src), l_exc_copy) + LOAD( t0, UNIT(0)(src), .Ll_exc\@) + LOAD( t1, UNIT(1)(src), .Ll_exc_copy\@) + LOAD( t2, UNIT(2)(src), .Ll_exc_copy\@) + LOAD( t3, UNIT(3)(src), .Ll_exc_copy\@) SUB len, len, 4*NBYTES ADD src, src, 4*NBYTES -EXC( STORE t0, UNIT(0)(dst), s_exc_p4u) -EXC( STORE t1, UNIT(1)(dst), s_exc_p3u) -EXC( STORE t2, UNIT(2)(dst), s_exc_p2u) -EXC( STORE t3, UNIT(3)(dst), s_exc_p1u) - beqz len, done - ADD dst, dst, 4*NBYTES -less_than_4units: + R10KCBARRIER(0(ra)) + STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@) + STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@) + STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@) + STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@) + .set reorder /* DADDI_WAR */ + ADD dst, dst, 4*NBYTES + beqz len, .Ldone\@ + .set noreorder +.Lless_than_4units\@: /* * rem = len % NBYTES */ - beq rem, len, copy_bytes + beq rem, len, .Lcopy_bytes\@ nop 1: -EXC( LOAD t0, 0(src), l_exc) + R10KCBARRIER(0(ra)) + LOAD(t0, 0(src), .Ll_exc\@) ADD src, src, NBYTES SUB len, len, NBYTES -EXC( STORE t0, 0(dst), s_exc_p1u) + STORE(t0, 0(dst), .Ls_exc_p1u\@) + .set reorder /* DADDI_WAR */ + ADD dst, dst, NBYTES bne rem, len, 1b - ADD dst, dst, NBYTES + .set noreorder /* * src and dst are aligned, need to copy rem bytes (rem < NBYTES) * A loop would do only a byte at a time with possible branch - * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE + * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE * because can't assume read-access to dst. Instead, use * STREST dst, which doesn't require read access to dst. * @@ -300,17 +388,17 @@ EXC( STORE t0, 0(dst), s_exc_p1u) * more instruction-level parallelism. */ #define bits t2 - beqz len, done + beqz len, .Ldone\@ ADD t1, dst, len # t1 is just past last byte of dst li bits, 8*NBYTES SLL rem, len, 3 # rem = number of bits to keep -EXC( LOAD t0, 0(src), l_exc) - SUB bits, bits, rem # bits = number of bits to discard + LOAD(t0, 0(src), .Ll_exc\@) + SUB bits, bits, rem # bits = number of bits to discard SHIFT_DISCARD t0, t0, bits -EXC( STREST t0, -1(t1), s_exc) + STREST(t0, -1(t1), .Ls_exc\@) jr ra move len, zero -dst_unaligned: +.Ldst_unaligned\@: /* * dst is unaligned * t0 = src & ADDRMASK @@ -321,24 +409,25 @@ dst_unaligned: * Set match = (src and dst have same alignment) */ #define match rem -EXC( LDFIRST t3, FIRST(0)(src), l_exc) + LDFIRST(t3, FIRST(0)(src), .Ll_exc\@) ADD t2, zero, NBYTES -EXC( LDREST t3, REST(0)(src), l_exc_copy) + LDREST(t3, REST(0)(src), .Ll_exc_copy\@) SUB t2, t2, t1 # t2 = number of bytes copied xor match, t0, t1 -EXC( STFIRST t3, FIRST(0)(dst), s_exc) - beq len, t2, done + R10KCBARRIER(0(ra)) + STFIRST(t3, FIRST(0)(dst), .Ls_exc\@) + beq len, t2, .Ldone\@ SUB len, len, t2 ADD dst, dst, t2 - beqz match, both_aligned + beqz match, .Lboth_aligned\@ ADD src, src, t2 -src_unaligned_dst_aligned: - SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter - PREF( 0, 3*32(src) ) - beqz t0, cleanup_src_unaligned - and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES - PREF( 1, 3*32(dst) ) +.Lsrc_unaligned_dst_aligned\@: + SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter + PREFS( 0, 3*32(src) ) + beqz t0, .Lcleanup_src_unaligned\@ + and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES + PREFD( 1, 3*32(dst) ) 1: /* * Avoid consecutive LD*'s to the same register since some mips @@ -346,52 +435,59 @@ src_unaligned_dst_aligned: * It's OK to load FIRST(N+1) before REST(N) because the two addresses * are to the same unit (unless src is aligned, but it's not). */ -EXC( LDFIRST t0, FIRST(0)(src), l_exc) -EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy) - SUB len, len, 4*NBYTES -EXC( LDREST t0, REST(0)(src), l_exc_copy) -EXC( LDREST t1, REST(1)(src), l_exc_copy) -EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy) -EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy) -EXC( LDREST t2, REST(2)(src), l_exc_copy) -EXC( LDREST t3, REST(3)(src), l_exc_copy) - PREF( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed) + R10KCBARRIER(0(ra)) + LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) + LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@) + SUB len, len, 4*NBYTES + LDREST(t0, REST(0)(src), .Ll_exc_copy\@) + LDREST(t1, REST(1)(src), .Ll_exc_copy\@) + LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@) + LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@) + LDREST(t2, REST(2)(src), .Ll_exc_copy\@) + LDREST(t3, REST(3)(src), .Ll_exc_copy\@) + PREFS( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed) ADD src, src, 4*NBYTES #ifdef CONFIG_CPU_SB1 nop # improves slotting #endif -EXC( STORE t0, UNIT(0)(dst), s_exc_p4u) -EXC( STORE t1, UNIT(1)(dst), s_exc_p3u) -EXC( STORE t2, UNIT(2)(dst), s_exc_p2u) -EXC( STORE t3, UNIT(3)(dst), s_exc_p1u) - PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed) + STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@) + STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@) + STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@) + STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@) + PREFD( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed) + .set reorder /* DADDI_WAR */ + ADD dst, dst, 4*NBYTES bne len, rem, 1b - ADD dst, dst, 4*NBYTES + .set noreorder -cleanup_src_unaligned: - beqz len, done +.Lcleanup_src_unaligned\@: + beqz len, .Ldone\@ and rem, len, NBYTES-1 # rem = len % NBYTES - beq rem, len, copy_bytes + beq rem, len, .Lcopy_bytes\@ nop 1: -EXC( LDFIRST t0, FIRST(0)(src), l_exc) -EXC( LDREST t0, REST(0)(src), l_exc_copy) + R10KCBARRIER(0(ra)) + LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) + LDREST(t0, REST(0)(src), .Ll_exc_copy\@) ADD src, src, NBYTES SUB len, len, NBYTES -EXC( STORE t0, 0(dst), s_exc_p1u) + STORE(t0, 0(dst), .Ls_exc_p1u\@) + .set reorder /* DADDI_WAR */ + ADD dst, dst, NBYTES bne len, rem, 1b - ADD dst, dst, NBYTES + .set noreorder -copy_bytes_checklen: - beqz len, done +.Lcopy_bytes_checklen\@: + beqz len, .Ldone\@ nop -copy_bytes: +.Lcopy_bytes\@: /* 0 < len < NBYTES */ + R10KCBARRIER(0(ra)) #define COPY_BYTE(N) \ -EXC( lb t0, N(src), l_exc); \ + LOADB(t0, N(src), .Ll_exc\@); \ SUB len, len, 1; \ - beqz len, done; \ -EXC( sb t0, N(dst), s_exc_p1) + beqz len, .Ldone\@; \ + STOREB(t0, N(dst), .Ls_exc_p1\@) COPY_BYTE(0) COPY_BYTE(1) @@ -401,16 +497,19 @@ EXC( sb t0, N(dst), s_exc_p1) COPY_BYTE(4) COPY_BYTE(5) #endif -EXC( lb t0, NBYTES-2(src), l_exc) + LOADB(t0, NBYTES-2(src), .Ll_exc\@) SUB len, len, 1 jr ra -EXC( sb t0, NBYTES-2(dst), s_exc_p1) -done: + STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@) +.Ldone\@: jr ra - nop + .if __memcpy == 1 END(memcpy) + .set __memcpy, 0 + .hidden __memcpy + .endif -l_exc_copy: +.Ll_exc_copy\@: /* * Copy bytes from src until faulting load address (or until a * lb faults) @@ -421,21 +520,24 @@ l_exc_copy: * * Assumes src < THREAD_BUADDR($28) */ - LOAD t0, TI_TASK($28) + LOADK t0, TI_TASK($28) nop - LOAD t0, THREAD_BUADDR(t0) + LOADK t0, THREAD_BUADDR(t0) 1: -EXC( lb t1, 0(src), l_exc) + LOADB(t1, 0(src), .Ll_exc\@) ADD src, src, 1 sb t1, 0(dst) # can't fault -- we're copy_from_user + .set reorder /* DADDI_WAR */ + ADD dst, dst, 1 bne src, t0, 1b - ADD dst, dst, 1 -l_exc: - LOAD t0, TI_TASK($28) + .set noreorder +.Ll_exc\@: + LOADK t0, TI_TASK($28) nop - LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address + LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address nop SUB len, AT, t0 # len number of uncopied bytes + bnez t6, .Ldone\@ /* Skip the zeroing part if inatomic */ /* * Here's where we rely on src and dst being incremented in tandem, * See (3) above. @@ -447,20 +549,33 @@ l_exc: * Clear len bytes starting at dst. Can't call __bzero because it * might modify len. An inefficient loop for these rare times... */ - beqz len, done - SUB src, len, 1 + .set reorder /* DADDI_WAR */ + SUB src, len, 1 + beqz len, .Ldone\@ + .set noreorder 1: sb zero, 0(dst) ADD dst, dst, 1 +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS bnez src, 1b SUB src, src, 1 +#else + .set push + .set noat + li v1, 1 + bnez src, 1b + SUB src, src, v1 + .set pop +#endif jr ra nop -#define SEXC(n) \ -s_exc_p ## n ## u: \ - jr ra; \ - ADD len, len, n*NBYTES +#define SEXC(n) \ + .set reorder; /* DADDI_WAR */ \ +.Ls_exc_p ## n ## u\@: \ + ADD len, len, n*NBYTES; \ + jr ra; \ + .set noreorder SEXC(8) SEXC(7) @@ -471,12 +586,15 @@ SEXC(3) SEXC(2) SEXC(1) -s_exc_p1: +.Ls_exc_p1\@: + .set reorder /* DADDI_WAR */ + ADD len, len, 1 jr ra - ADD len, len, 1 -s_exc: + .set noreorder +.Ls_exc\@: jr ra nop + .endm .align 5 LEAF(memmove) @@ -485,39 +603,113 @@ LEAF(memmove) sltu t0, a1, t0 # dst + len <= src -> memcpy sltu t1, a0, t1 # dst >= src + len -> memcpy and t0, t1 - beqz t0, __memcpy + beqz t0, .L__memcpy move v0, a0 /* return value */ - beqz a2, r_out + beqz a2, .Lr_out END(memmove) /* fall through to __rmemcpy */ LEAF(__rmemcpy) /* a0=dst a1=src a2=len */ sltu t0, a1, a0 - beqz t0, r_end_bytes_up # src >= dst + beqz t0, .Lr_end_bytes_up # src >= dst nop ADD a0, a2 # dst = dst + len ADD a1, a2 # src = src + len -r_end_bytes: +.Lr_end_bytes: + R10KCBARRIER(0(ra)) lb t0, -1(a1) SUB a2, a2, 0x1 sb t0, -1(a0) SUB a1, a1, 0x1 - bnez a2, r_end_bytes - SUB a0, a0, 0x1 + .set reorder /* DADDI_WAR */ + SUB a0, a0, 0x1 + bnez a2, .Lr_end_bytes + .set noreorder -r_out: +.Lr_out: jr ra move a2, zero -r_end_bytes_up: +.Lr_end_bytes_up: + R10KCBARRIER(0(ra)) lb t0, (a1) SUB a2, a2, 0x1 sb t0, (a0) ADD a1, a1, 0x1 - bnez a2, r_end_bytes_up - ADD a0, a0, 0x1 + .set reorder /* DADDI_WAR */ + ADD a0, a0, 0x1 + bnez a2, .Lr_end_bytes_up + .set noreorder jr ra move a2, zero END(__rmemcpy) + +/* + * t6 is used as a flag to note inatomic mode. + */ +LEAF(__copy_user_inatomic) + b __copy_user_common + li t6, 1 + END(__copy_user_inatomic) + +/* + * A combined memcpy/__copy_user + * __copy_user sets len to 0 for success; else to an upper bound of + * the number of uncopied bytes. + * memcpy sets v0 to dst. + */ + .align 5 +LEAF(memcpy) /* a0=dst a1=src a2=len */ + move v0, dst /* return value */ +.L__memcpy: +FEXPORT(__copy_user) + li t6, 0 /* not inatomic */ +__copy_user_common: + /* Legacy Mode, user <-> user */ + __BUILD_COPY_USER LEGACY_MODE USEROP USEROP + +#ifdef CONFIG_EVA + +/* + * For EVA we need distinct symbols for reading and writing to user space. + * This is because we need to use specific EVA instructions to perform the + * virtual <-> physical translation when a virtual address is actually in user + * space + */ + +LEAF(__copy_user_inatomic_eva) + b __copy_from_user_common + li t6, 1 + END(__copy_user_inatomic_eva) + +/* + * __copy_from_user (EVA) + */ + +LEAF(__copy_from_user_eva) + li t6, 0 /* not inatomic */ +__copy_from_user_common: + __BUILD_COPY_USER EVA_MODE USEROP KERNELOP +END(__copy_from_user_eva) + + + +/* + * __copy_to_user (EVA) + */ + +LEAF(__copy_to_user_eva) +__BUILD_COPY_USER EVA_MODE KERNELOP USEROP +END(__copy_to_user_eva) + +/* + * __copy_in_user (EVA) + */ + +LEAF(__copy_in_user_eva) +__BUILD_COPY_USER EVA_MODE USEROP USEROP +END(__copy_in_user_eva) + +#endif diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S new file mode 100644 index 00000000000..7b0e5462ca5 --- /dev/null +++ b/arch/mips/lib/memset.S @@ -0,0 +1,248 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1998, 1999, 2000 by Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2007 by Maciej W. Rozycki + * Copyright (C) 2011, 2012 MIPS Technologies, Inc. + */ +#include <asm/asm.h> +#include <asm/asm-offsets.h> +#include <asm/regdef.h> + +#if LONGSIZE == 4 +#define LONG_S_L swl +#define LONG_S_R swr +#else +#define LONG_S_L sdl +#define LONG_S_R sdr +#endif + +#ifdef CONFIG_CPU_MICROMIPS +#define STORSIZE (LONGSIZE * 2) +#define STORMASK (STORSIZE - 1) +#define FILL64RG t8 +#define FILLPTRG t7 +#undef LONG_S +#define LONG_S LONG_SP +#else +#define STORSIZE LONGSIZE +#define STORMASK LONGMASK +#define FILL64RG a1 +#define FILLPTRG t0 +#endif + +#define LEGACY_MODE 1 +#define EVA_MODE 2 + +/* + * No need to protect it with EVA #ifdefery. The generated block of code + * will never be assembled if EVA is not enabled. + */ +#define __EVAFY(insn, reg, addr) __BUILD_EVA_INSN(insn##e, reg, addr) +#define ___BUILD_EVA_INSN(insn, reg, addr) __EVAFY(insn, reg, addr) + +#define EX(insn,reg,addr,handler) \ + .if \mode == LEGACY_MODE; \ +9: insn reg, addr; \ + .else; \ +9: ___BUILD_EVA_INSN(insn, reg, addr); \ + .endif; \ + .section __ex_table,"a"; \ + PTR 9b, handler; \ + .previous + + .macro f_fill64 dst, offset, val, fixup, mode + EX(LONG_S, \val, (\offset + 0 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 1 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 2 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 3 * STORSIZE)(\dst), \fixup) +#if ((defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) || !defined(CONFIG_CPU_MICROMIPS)) + EX(LONG_S, \val, (\offset + 4 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 5 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 6 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 7 * STORSIZE)(\dst), \fixup) +#endif +#if (!defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) + EX(LONG_S, \val, (\offset + 8 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 9 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 10 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 11 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 12 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 13 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 14 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 15 * STORSIZE)(\dst), \fixup) +#endif + .endm + + .set noreorder + .align 5 + + /* + * Macro to generate the __bzero{,_user} symbol + * Arguments: + * mode: LEGACY_MODE or EVA_MODE + */ + .macro __BUILD_BZERO mode + /* Initialize __memset if this is the first time we call this macro */ + .ifnotdef __memset + .set __memset, 1 + .hidden __memset /* Make sure it does not leak */ + .endif + + sltiu t0, a2, STORSIZE /* very small region? */ + bnez t0, .Lsmall_memset\@ + andi t0, a0, STORMASK /* aligned? */ + +#ifdef CONFIG_CPU_MICROMIPS + move t8, a1 /* used by 'swp' instruction */ + move t9, a1 +#endif +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS + beqz t0, 1f + PTR_SUBU t0, STORSIZE /* alignment in bytes */ +#else + .set noat + li AT, STORSIZE + beqz t0, 1f + PTR_SUBU t0, AT /* alignment in bytes */ + .set at +#endif + + R10KCBARRIER(0(ra)) +#ifdef __MIPSEB__ + EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ +#endif +#ifdef __MIPSEL__ + EX(LONG_S_R, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ +#endif + PTR_SUBU a0, t0 /* long align ptr */ + PTR_ADDU a2, t0 /* correct size */ + +1: ori t1, a2, 0x3f /* # of full blocks */ + xori t1, 0x3f + beqz t1, .Lmemset_partial\@ /* no block to fill */ + andi t0, a2, 0x40-STORSIZE + + PTR_ADDU t1, a0 /* end address */ + .set reorder +1: PTR_ADDIU a0, 64 + R10KCBARRIER(0(ra)) + f_fill64 a0, -64, FILL64RG, .Lfwd_fixup\@, \mode + bne t1, a0, 1b + .set noreorder + +.Lmemset_partial\@: + R10KCBARRIER(0(ra)) + PTR_LA t1, 2f /* where to start */ +#ifdef CONFIG_CPU_MICROMIPS + LONG_SRL t7, t0, 1 +#endif +#if LONGSIZE == 4 + PTR_SUBU t1, FILLPTRG +#else + .set noat + LONG_SRL AT, FILLPTRG, 1 + PTR_SUBU t1, AT + .set at +#endif + jr t1 + PTR_ADDU a0, t0 /* dest ptr */ + + .set push + .set noreorder + .set nomacro + /* ... but first do longs ... */ + f_fill64 a0, -64, FILL64RG, .Lpartial_fixup\@, \mode +2: .set pop + andi a2, STORMASK /* At most one long to go */ + + beqz a2, 1f + PTR_ADDU a0, a2 /* What's left */ + R10KCBARRIER(0(ra)) +#ifdef __MIPSEB__ + EX(LONG_S_R, a1, -1(a0), .Llast_fixup\@) +#endif +#ifdef __MIPSEL__ + EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@) +#endif +1: jr ra + move a2, zero + +.Lsmall_memset\@: + beqz a2, 2f + PTR_ADDU t1, a0, a2 + +1: PTR_ADDIU a0, 1 /* fill bytewise */ + R10KCBARRIER(0(ra)) + bne t1, a0, 1b + sb a1, -1(a0) + +2: jr ra /* done */ + move a2, zero + .if __memset == 1 + END(memset) + .set __memset, 0 + .hidden __memset + .endif + +.Lfirst_fixup\@: + jr ra + nop + +.Lfwd_fixup\@: + PTR_L t0, TI_TASK($28) + andi a2, 0x3f + LONG_L t0, THREAD_BUADDR(t0) + LONG_ADDU a2, t1 + jr ra + LONG_SUBU a2, t0 + +.Lpartial_fixup\@: + PTR_L t0, TI_TASK($28) + andi a2, STORMASK + LONG_L t0, THREAD_BUADDR(t0) + LONG_ADDU a2, t1 + jr ra + LONG_SUBU a2, t0 + +.Llast_fixup\@: + jr ra + andi v1, a2, STORMASK + + .endm + +/* + * memset(void *s, int c, size_t n) + * + * a0: start of area to clear + * a1: char to fill with + * a2: size of area to clear + */ + +LEAF(memset) + beqz a1, 1f + move v0, a0 /* result */ + + andi a1, 0xff /* spread fillword */ + LONG_SLL t1, a1, 8 + or a1, t1 + LONG_SLL t1, a1, 16 +#if LONGSIZE == 8 + or a1, t1 + LONG_SLL t1, a1, 32 +#endif + or a1, t1 +1: +#ifndef CONFIG_EVA +FEXPORT(__bzero) +#endif + __BUILD_BZERO LEGACY_MODE + +#ifdef CONFIG_EVA +LEAF(__bzero) + __BUILD_BZERO EVA_MODE +END(__bzero) +#endif diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c new file mode 100644 index 00000000000..57bcdaf1f1c --- /dev/null +++ b/arch/mips/lib/mips-atomic.c @@ -0,0 +1,161 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle + * Copyright (C) 1996 by Paul M. Antoine + * Copyright (C) 1999 Silicon Graphics + * Copyright (C) 2000 MIPS Technologies, Inc. + */ +#include <asm/irqflags.h> +#include <asm/hazards.h> +#include <linux/compiler.h> +#include <linux/preempt.h> +#include <linux/export.h> +#include <linux/stringify.h> + +#ifndef CONFIG_CPU_MIPSR2 + +/* + * For cli() we have to insert nops to make sure that the new value + * has actually arrived in the status register before the end of this + * macro. + * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs + * no nops at all. + */ +/* + * For TX49, operating only IE bit is not enough. + * + * If mfc0 $12 follows store and the mfc0 is last instruction of a + * page and fetching the next instruction causes TLB miss, the result + * of the mfc0 might wrongly contain EXL bit. + * + * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008 + * + * Workaround: mask EXL bit of the result or place a nop before mfc0. + */ +notrace void arch_local_irq_disable(void) +{ + preempt_disable(); + + __asm__ __volatile__( + " .set push \n" + " .set noat \n" +#if defined(CONFIG_CPU_MIPSR2) + /* see irqflags.h for inline function */ +#else + " mfc0 $1,$12 \n" + " ori $1,0x1f \n" + " xori $1,0x1f \n" + " .set noreorder \n" + " mtc0 $1,$12 \n" +#endif + " " __stringify(__irq_disable_hazard) " \n" + " .set pop \n" + : /* no outputs */ + : /* no inputs */ + : "memory"); + + preempt_enable(); +} +EXPORT_SYMBOL(arch_local_irq_disable); + + +notrace unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + + preempt_disable(); + + __asm__ __volatile__( + " .set push \n" + " .set reorder \n" + " .set noat \n" +#if defined(CONFIG_CPU_MIPSR2) + /* see irqflags.h for inline function */ +#else + " mfc0 %[flags], $12 \n" + " ori $1, %[flags], 0x1f \n" + " xori $1, 0x1f \n" + " .set noreorder \n" + " mtc0 $1, $12 \n" +#endif + " " __stringify(__irq_disable_hazard) " \n" + " .set pop \n" + : [flags] "=r" (flags) + : /* no inputs */ + : "memory"); + + preempt_enable(); + + return flags; +} +EXPORT_SYMBOL(arch_local_irq_save); + +notrace void arch_local_irq_restore(unsigned long flags) +{ + unsigned long __tmp1; + + preempt_disable(); + + __asm__ __volatile__( + " .set push \n" + " .set noreorder \n" + " .set noat \n" +#if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) + /* see irqflags.h for inline function */ +#elif defined(CONFIG_CPU_MIPSR2) + /* see irqflags.h for inline function */ +#else + " mfc0 $1, $12 \n" + " andi %[flags], 1 \n" + " ori $1, 0x1f \n" + " xori $1, 0x1f \n" + " or %[flags], $1 \n" + " mtc0 %[flags], $12 \n" +#endif + " " __stringify(__irq_disable_hazard) " \n" + " .set pop \n" + : [flags] "=r" (__tmp1) + : "0" (flags) + : "memory"); + + preempt_enable(); +} +EXPORT_SYMBOL(arch_local_irq_restore); + + +notrace void __arch_local_irq_restore(unsigned long flags) +{ + unsigned long __tmp1; + + preempt_disable(); + + __asm__ __volatile__( + " .set push \n" + " .set noreorder \n" + " .set noat \n" +#if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) + /* see irqflags.h for inline function */ +#elif defined(CONFIG_CPU_MIPSR2) + /* see irqflags.h for inline function */ +#else + " mfc0 $1, $12 \n" + " andi %[flags], 1 \n" + " ori $1, 0x1f \n" + " xori $1, 0x1f \n" + " or %[flags], $1 \n" + " mtc0 %[flags], $12 \n" +#endif + " " __stringify(__irq_disable_hazard) " \n" + " .set pop \n" + : [flags] "=r" (__tmp1) + : "0" (flags) + : "memory"); + + preempt_enable(); +} +EXPORT_SYMBOL(__arch_local_irq_restore); + +#endif /* !CONFIG_CPU_MIPSR2 */ diff --git a/arch/mips/lib/promlib.c b/arch/mips/lib/promlib.c deleted file mode 100644 index dddfe98b4de..00000000000 --- a/arch/mips/lib/promlib.c +++ /dev/null @@ -1,24 +0,0 @@ -#include <stdarg.h> -#include <linux/kernel.h> - -extern void prom_putchar(char); - -void prom_printf(char *fmt, ...) -{ - va_list args; - char ppbuf[1024]; - char *bptr; - - va_start(args, fmt); - vsprintf(ppbuf, fmt, args); - - bptr = ppbuf; - - while (*bptr != 0) { - if (*bptr == '\n') - prom_putchar('\r'); - - prom_putchar(*bptr++); - } - va_end(args); -} diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c new file mode 100644 index 00000000000..91615c2ef0c --- /dev/null +++ b/arch/mips/lib/r3k_dump_tlb.c @@ -0,0 +1,63 @@ +/* + * Dump R3000 TLB for debugging purposes. + * + * Copyright (C) 1994, 1995 by Waldorf Electronics, written by Ralf Baechle. + * Copyright (C) 1999 by Silicon Graphics, Inc. + * Copyright (C) 1999 by Harald Koerfgen + */ +#include <linux/kernel.h> +#include <linux/mm.h> + +#include <asm/mipsregs.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/tlbdebug.h> + +extern int r3k_have_wired_reg; /* defined in tlb-r3k.c */ + +static void dump_tlb(int first, int last) +{ + int i; + unsigned int asid; + unsigned long entryhi, entrylo0; + + asid = read_c0_entryhi() & 0xfc0; + + for (i = first; i <= last; i++) { + write_c0_index(i<<8); + __asm__ __volatile__( + ".set\tnoreorder\n\t" + "tlbr\n\t" + "nop\n\t" + ".set\treorder"); + entryhi = read_c0_entryhi(); + entrylo0 = read_c0_entrylo0(); + + /* Unused entries have a virtual address of KSEG0. */ + if ((entryhi & 0xffffe000) != 0x80000000 + && (entryhi & 0xfc0) == asid) { + /* + * Only print entries in use + */ + printk("Index: %2d ", i); + + printk("va=%08lx asid=%08lx" + " [pa=%06lx n=%d d=%d v=%d g=%d]", + (entryhi & 0xffffe000), + entryhi & 0xfc0, + entrylo0 & PAGE_MASK, + (entrylo0 & (1 << 11)) ? 1 : 0, + (entrylo0 & (1 << 10)) ? 1 : 0, + (entrylo0 & (1 << 9)) ? 1 : 0, + (entrylo0 & (1 << 8)) ? 1 : 0); + } + } + printk("\n"); + + write_c0_entryhi(asid); +} + +void dump_tlb_all(void) +{ + dump_tlb(0, current_cpu_data.tlbsize - 1); +} diff --git a/arch/mips/lib/strlen_user.S b/arch/mips/lib/strlen_user.S index eca558d83a3..bef65c98df5 100644 --- a/arch/mips/lib/strlen_user.S +++ b/arch/mips/lib/strlen_user.S @@ -3,8 +3,9 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (c) 1996, 1998, 1999, 2004 by Ralf Baechle - * Copyright (c) 1999 Silicon Graphics, Inc. + * Copyright (C) 1996, 1998, 1999, 2004 by Ralf Baechle + * Copyright (C) 1999 Silicon Graphics, Inc. + * Copyright (C) 2011 MIPS Technologies, Inc. */ #include <asm/asm.h> #include <asm/asm-offsets.h> @@ -21,19 +22,43 @@ * * Return 0 for error */ -LEAF(__strlen_user_asm) + .macro __BUILD_STRLEN_ASM func +LEAF(__strlen_\func\()_asm) LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? and v0, a0 - bnez v0, fault + bnez v0, .Lfault\@ -FEXPORT(__strlen_user_nocheck_asm) +FEXPORT(__strlen_\func\()_nocheck_asm) move v0, a0 -1: EX(lb, t0, (v0), fault) +.ifeqs "\func", "kernel" +1: EX(lbu, v1, (v0), .Lfault\@) +.else +1: EX(lbue, v1, (v0), .Lfault\@) +.endif PTR_ADDIU v0, 1 - bnez t0, 1b + bnez v1, 1b PTR_SUBU v0, a0 jr ra - END(__strlen_user_asm) + END(__strlen_\func\()_asm) -fault: move v0, zero +.Lfault\@: move v0, zero jr ra + .endm + +#ifndef CONFIG_EVA + /* Set aliases */ + .global __strlen_user_asm + .global __strlen_user_nocheck_asm + .set __strlen_user_asm, __strlen_kernel_asm + .set __strlen_user_nocheck_asm, __strlen_kernel_nocheck_asm +#endif + +__BUILD_STRLEN_ASM kernel + +#ifdef CONFIG_EVA + + .set push + .set eva +__BUILD_STRLEN_ASM user + .set pop +#endif diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S index d16c76fbfac..3c32baf8b49 100644 --- a/arch/mips/lib/strncpy_user.S +++ b/arch/mips/lib/strncpy_user.S @@ -3,7 +3,8 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (c) 1996, 1999 by Ralf Baechle + * Copyright (C) 1996, 1999 by Ralf Baechle + * Copyright (C) 2011 MIPS Technologies, Inc. */ #include <linux/errno.h> #include <asm/asm.h> @@ -23,36 +24,61 @@ /* * Ugly special case have to check: we might get passed a user space - * pointer which wraps into the kernel space. We don't deal with that. If + * pointer which wraps into the kernel space. We don't deal with that. If * it happens at most some bytes of the exceptions handlers will be copied. */ -LEAF(__strncpy_from_user_asm) + .macro __BUILD_STRNCPY_ASM func +LEAF(__strncpy_from_\func\()_asm) LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? and v0, a1 - bnez v0, fault + bnez v0, .Lfault\@ -FEXPORT(__strncpy_from_user_nocheck_asm) - move v0, zero +FEXPORT(__strncpy_from_\func\()_nocheck_asm) + move t0, zero move v1, a1 - .set noreorder -1: EX(lbu, t0, (v1), fault) +.ifeqs "\func","kernel" +1: EX(lbu, v0, (v1), .Lfault\@) +.else +1: EX(lbue, v0, (v1), .Lfault\@) +.endif PTR_ADDIU v1, 1 - beqz t0, 2f - sb t0, (a0) - PTR_ADDIU v0, 1 - bne v0, a2, 1b - PTR_ADDIU a0, 1 - .set reorder -2: PTR_ADDU t0, a1, v0 - xor t0, a1 - bltz t0, fault + R10KCBARRIER(0(ra)) + sb v0, (a0) + beqz v0, 2f + PTR_ADDIU t0, 1 + PTR_ADDIU a0, 1 + bne t0, a2, 1b +2: PTR_ADDU v0, a1, t0 + xor v0, a1 + bltz v0, .Lfault\@ + move v0, t0 jr ra # return n - END(__strncpy_from_user_asm) + END(__strncpy_from_\func\()_asm) -fault: li v0, -EFAULT +.Lfault\@: + li v0, -EFAULT jr ra .section __ex_table,"a" - PTR 1b, fault + PTR 1b, .Lfault\@ .previous + + .endm + +#ifndef CONFIG_EVA + /* Set aliases */ + .global __strncpy_from_user_asm + .global __strncpy_from_user_nocheck_asm + .set __strncpy_from_user_asm, __strncpy_from_kernel_asm + .set __strncpy_from_user_nocheck_asm, __strncpy_from_kernel_nocheck_asm +#endif + +__BUILD_STRNCPY_ASM kernel + +#ifdef CONFIG_EVA + .set push + .set eva +__BUILD_STRNCPY_ASM user + .set pop +#endif diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S index c0ea15194a0..f3af6995e2a 100644 --- a/arch/mips/lib/strnlen_user.S +++ b/arch/mips/lib/strnlen_user.S @@ -17,29 +17,54 @@ .previous /* - * Return the size of a string including the ending NUL character upto a + * Return the size of a string including the ending NUL character up to a * maximum of a1 or 0 in case of error. * * Note: for performance reasons we deliberately accept that a user may - * make strlen_user and strnlen_user access the first few KSEG0 - * bytes. There's nothing secret there. On 64-bit accessing beyond - * the maximum is a tad hairier ... + * make strlen_user and strnlen_user access the first few KSEG0 + * bytes. There's nothing secret there. On 64-bit accessing beyond + * the maximum is a tad hairier ... */ -LEAF(__strnlen_user_asm) + .macro __BUILD_STRNLEN_ASM func +LEAF(__strnlen_\func\()_asm) LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? and v0, a0 - bnez v0, fault + bnez v0, .Lfault\@ -FEXPORT(__strnlen_user_nocheck_asm) +FEXPORT(__strnlen_\func\()_nocheck_asm) move v0, a0 PTR_ADDU a1, a0 # stop pointer 1: beq v0, a1, 1f # limit reached? - EX(lb, t0, (v0), fault) - PTR_ADDU v0, 1 +.ifeqs "\func", "kernel" + EX(lb, t0, (v0), .Lfault\@) +.else + EX(lbe, t0, (v0), .Lfault\@) +.endif + PTR_ADDIU v0, 1 bnez t0, 1b 1: PTR_SUBU v0, a0 jr ra - END(__strnlen_user_asm) + END(__strnlen_\func\()_asm) -fault: move v0, zero +.Lfault\@: + move v0, zero jr ra + .endm + +#ifndef CONFIG_EVA + /* Set aliases */ + .global __strnlen_user_asm + .global __strnlen_user_nocheck_asm + .set __strnlen_user_asm, __strnlen_kernel_asm + .set __strnlen_user_nocheck_asm, __strnlen_kernel_nocheck_asm +#endif + +__BUILD_STRNLEN_ASM kernel + +#ifdef CONFIG_EVA + + .set push + .set eva +__BUILD_STRNLEN_ASM user + .set pop +#endif diff --git a/arch/mips/lib/ucmpdi2.c b/arch/mips/lib/ucmpdi2.c new file mode 100644 index 00000000000..bb4cb2f828e --- /dev/null +++ b/arch/mips/lib/ucmpdi2.c @@ -0,0 +1,21 @@ +#include <linux/module.h> + +#include "libgcc.h" + +word_type __ucmpdi2(unsigned long long a, unsigned long long b) +{ + const DWunion au = {.ll = a}; + const DWunion bu = {.ll = b}; + + if ((unsigned int) au.s.high < (unsigned int) bu.s.high) + return 0; + else if ((unsigned int) au.s.high > (unsigned int) bu.s.high) + return 2; + if ((unsigned int) au.s.low < (unsigned int) bu.s.low) + return 0; + else if ((unsigned int) au.s.low > (unsigned int) bu.s.low) + return 2; + return 1; +} + +EXPORT_SYMBOL(__ucmpdi2); diff --git a/arch/mips/lib/uncached.c b/arch/mips/lib/uncached.c index 98ce89f8068..09d5deea747 100644 --- a/arch/mips/lib/uncached.c +++ b/arch/mips/lib/uncached.c @@ -4,14 +4,14 @@ * for more details. * * Copyright (C) 2005 Thiemo Seufer - * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. + * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. * Author: Maciej W. Rozycki <macro@mips.com> */ -#include <linux/init.h> #include <asm/addrspace.h> #include <asm/bug.h> +#include <asm/cacheflush.h> #ifndef CKSEG2 #define CKSEG2 CKSSEG @@ -35,7 +35,7 @@ * values, so we can avoid sharing the same stack area between a cached * and the uncached mode. */ -unsigned long __init run_uncached(void *func) +unsigned long run_uncached(void *func) { register long sp __asm__("$sp"); register long ret __asm__("$2"); @@ -44,20 +44,24 @@ unsigned long __init run_uncached(void *func) if (sp >= (long)CKSEG0 && sp < (long)CKSEG2) usp = CKSEG1ADDR(sp); - else if ((long long)sp >= (long long)PHYS_TO_XKPHYS(0LL, 0) && - (long long)sp < (long long)PHYS_TO_XKPHYS(8LL, 0)) - usp = PHYS_TO_XKPHYS((long long)K_CALG_UNCACHED, +#ifdef CONFIG_64BIT + else if ((long long)sp >= (long long)PHYS_TO_XKPHYS(0, 0) && + (long long)sp < (long long)PHYS_TO_XKPHYS(8, 0)) + usp = PHYS_TO_XKPHYS(K_CALG_UNCACHED, XKPHYS_TO_PHYS((long long)sp)); +#endif else { BUG(); usp = sp; } if (lfunc >= (long)CKSEG0 && lfunc < (long)CKSEG2) ufunc = CKSEG1ADDR(lfunc); - else if ((long long)lfunc >= (long long)PHYS_TO_XKPHYS(0LL, 0) && - (long long)lfunc < (long long)PHYS_TO_XKPHYS(8LL, 0)) - ufunc = PHYS_TO_XKPHYS((long long)K_CALG_UNCACHED, +#ifdef CONFIG_64BIT + else if ((long long)lfunc >= (long long)PHYS_TO_XKPHYS(0, 0) && + (long long)lfunc < (long long)PHYS_TO_XKPHYS(8, 0)) + ufunc = PHYS_TO_XKPHYS(K_CALG_UNCACHED, XKPHYS_TO_PHYS((long long)lfunc)); +#endif else { BUG(); ufunc = lfunc; |
