aboutsummaryrefslogtreecommitdiff
path: root/arch/xtensa
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa')
-rw-r--r--arch/xtensa/Kconfig247
-rw-r--r--arch/xtensa/Kconfig.debug32
-rw-r--r--arch/xtensa/Makefile60
-rw-r--r--arch/xtensa/boot/.gitignore3
-rw-r--r--arch/xtensa/boot/Makefile19
-rw-r--r--arch/xtensa/boot/boot-elf/.gitignore1
-rw-r--r--arch/xtensa/boot/boot-elf/Makefile43
-rw-r--r--arch/xtensa/boot/boot-elf/boot.lds.S71
-rw-r--r--arch/xtensa/boot/boot-elf/bootstrap.S101
-rw-r--r--arch/xtensa/boot/boot-redboot/Makefile24
-rw-r--r--arch/xtensa/boot/boot-redboot/boot.ld9
-rw-r--r--arch/xtensa/boot/boot-redboot/bootstrap.S18
-rw-r--r--arch/xtensa/boot/boot-uboot/Makefile18
-rw-r--r--arch/xtensa/boot/dts/Makefile15
-rw-r--r--arch/xtensa/boot/dts/kc705.dts11
-rw-r--r--arch/xtensa/boot/dts/lx60.dts11
-rw-r--r--arch/xtensa/boot/dts/ml605.dts11
-rw-r--r--arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi28
-rw-r--r--arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi28
-rw-r--r--arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi20
-rw-r--r--arch/xtensa/boot/dts/xtfpga.dtsi69
-rw-r--r--arch/xtensa/boot/lib/.gitignore3
-rw-r--r--arch/xtensa/boot/lib/Makefile7
-rw-r--r--arch/xtensa/boot/ramdisk/Makefile23
-rw-r--r--arch/xtensa/configs/common_defconfig6
-rw-r--r--arch/xtensa/configs/iss_defconfig7
-rw-r--r--arch/xtensa/configs/s6105_defconfig10
-rw-r--r--arch/xtensa/include/asm/Kbuild33
-rw-r--r--arch/xtensa/include/asm/atomic.h286
-rw-r--r--arch/xtensa/include/asm/barrier.h21
-rw-r--r--arch/xtensa/include/asm/bitops.h136
-rw-r--r--arch/xtensa/include/asm/bitsperlong.h1
-rw-r--r--arch/xtensa/include/asm/bootparam.h31
-rw-r--r--arch/xtensa/include/asm/bug.h18
-rw-r--r--arch/xtensa/include/asm/cacheasm.h1
-rw-r--r--arch/xtensa/include/asm/cacheflush.h41
-rw-r--r--arch/xtensa/include/asm/checksum.h20
-rw-r--r--arch/xtensa/include/asm/cmpxchg.h (renamed from arch/xtensa/include/asm/system.h)142
-rw-r--r--arch/xtensa/include/asm/coprocessor.h5
-rw-r--r--arch/xtensa/include/asm/cpumask.h16
-rw-r--r--arch/xtensa/include/asm/cputime.h6
-rw-r--r--arch/xtensa/include/asm/current.h2
-rw-r--r--arch/xtensa/include/asm/delay.h62
-rw-r--r--arch/xtensa/include/asm/device.h7
-rw-r--r--arch/xtensa/include/asm/div64.h16
-rw-r--r--arch/xtensa/include/asm/dma-mapping.h21
-rw-r--r--arch/xtensa/include/asm/elf.h16
-rw-r--r--arch/xtensa/include/asm/emergency-restart.h6
-rw-r--r--arch/xtensa/include/asm/errno.h16
-rw-r--r--arch/xtensa/include/asm/fcntl.h1
-rw-r--r--arch/xtensa/include/asm/fixmap.h58
-rw-r--r--arch/xtensa/include/asm/ftrace.h41
-rw-r--r--arch/xtensa/include/asm/futex.h148
-rw-r--r--arch/xtensa/include/asm/gpio.h60
-rw-r--r--arch/xtensa/include/asm/hardirq.h19
-rw-r--r--arch/xtensa/include/asm/highmem.h46
-rw-r--r--arch/xtensa/include/asm/initialize_mmu.h163
-rw-r--r--arch/xtensa/include/asm/io.h179
-rw-r--r--arch/xtensa/include/asm/ioctl.h1
-rw-r--r--arch/xtensa/include/asm/irq.h9
-rw-r--r--arch/xtensa/include/asm/irq_regs.h1
-rw-r--r--arch/xtensa/include/asm/irqflags.h9
-rw-r--r--arch/xtensa/include/asm/kdebug.h1
-rw-r--r--arch/xtensa/include/asm/kmap_types.h6
-rw-r--r--arch/xtensa/include/asm/linkage.h16
-rw-r--r--arch/xtensa/include/asm/local.h16
-rw-r--r--arch/xtensa/include/asm/local64.h1
-rw-r--r--arch/xtensa/include/asm/mmu.h12
-rw-r--r--arch/xtensa/include/asm/mmu_context.h112
-rw-r--r--arch/xtensa/include/asm/module.h9
-rw-r--r--arch/xtensa/include/asm/mxregs.h46
-rw-r--r--arch/xtensa/include/asm/nommu.h3
-rw-r--r--arch/xtensa/include/asm/nommu_context.h2
-rw-r--r--arch/xtensa/include/asm/page.h20
-rw-r--r--arch/xtensa/include/asm/param.h20
-rw-r--r--arch/xtensa/include/asm/pci-bridge.h2
-rw-r--r--arch/xtensa/include/asm/pci.h7
-rw-r--r--arch/xtensa/include/asm/percpu.h16
-rw-r--r--arch/xtensa/include/asm/perf_event.h4
-rw-r--r--arch/xtensa/include/asm/pgalloc.h31
-rw-r--r--arch/xtensa/include/asm/pgtable.h165
-rw-r--r--arch/xtensa/include/asm/platform.h6
-rw-r--r--arch/xtensa/include/asm/posix_types.h122
-rw-r--r--arch/xtensa/include/asm/processor.h41
-rw-r--r--arch/xtensa/include/asm/ptrace.h83
-rw-r--r--arch/xtensa/include/asm/regs.h62
-rw-r--r--arch/xtensa/include/asm/resource.h16
-rw-r--r--arch/xtensa/include/asm/rmap.h16
-rw-r--r--arch/xtensa/include/asm/scatterlist.h16
-rw-r--r--arch/xtensa/include/asm/sections.h16
-rw-r--r--arch/xtensa/include/asm/siginfo.h16
-rw-r--r--arch/xtensa/include/asm/signal.h153
-rw-r--r--arch/xtensa/include/asm/smp.h38
-rw-r--r--arch/xtensa/include/asm/spinlock.h191
-rw-r--r--arch/xtensa/include/asm/spinlock_types.h20
-rw-r--r--arch/xtensa/include/asm/stacktrace.h36
-rw-r--r--arch/xtensa/include/asm/statfs.h17
-rw-r--r--arch/xtensa/include/asm/string.h4
-rw-r--r--arch/xtensa/include/asm/switch_to.h22
-rw-r--r--arch/xtensa/include/asm/syscall.h22
-rw-r--r--arch/xtensa/include/asm/sysmem.h38
-rw-r--r--arch/xtensa/include/asm/termios.h105
-rw-r--r--arch/xtensa/include/asm/thread_info.h8
-rw-r--r--arch/xtensa/include/asm/timex.h47
-rw-r--r--arch/xtensa/include/asm/tlbflush.h47
-rw-r--r--arch/xtensa/include/asm/topology.h16
-rw-r--r--arch/xtensa/include/asm/traps.h59
-rw-r--r--arch/xtensa/include/asm/types.h15
-rw-r--r--arch/xtensa/include/asm/uaccess.h45
-rw-r--r--arch/xtensa/include/asm/unistd.h721
-rw-r--r--arch/xtensa/include/asm/vectors.h130
-rw-r--r--arch/xtensa/include/asm/xor.h16
-rw-r--r--arch/xtensa/include/uapi/asm/Kbuild25
-rw-r--r--arch/xtensa/include/uapi/asm/auxvec.h (renamed from arch/xtensa/include/asm/auxvec.h)0
-rw-r--r--arch/xtensa/include/uapi/asm/byteorder.h (renamed from arch/xtensa/include/asm/byteorder.h)0
-rw-r--r--arch/xtensa/include/uapi/asm/ioctls.h (renamed from arch/xtensa/include/asm/ioctls.h)7
-rw-r--r--arch/xtensa/include/uapi/asm/ipcbuf.h (renamed from arch/xtensa/include/asm/ipcbuf.h)0
-rw-r--r--arch/xtensa/include/uapi/asm/mman.h (renamed from arch/xtensa/include/asm/mman.h)15
-rw-r--r--arch/xtensa/include/uapi/asm/msgbuf.h (renamed from arch/xtensa/include/asm/msgbuf.h)0
-rw-r--r--arch/xtensa/include/uapi/asm/param.h30
-rw-r--r--arch/xtensa/include/uapi/asm/poll.h (renamed from arch/xtensa/include/asm/poll.h)0
-rw-r--r--arch/xtensa/include/uapi/asm/posix_types.h39
-rw-r--r--arch/xtensa/include/uapi/asm/ptrace.h77
-rw-r--r--arch/xtensa/include/uapi/asm/sembuf.h (renamed from arch/xtensa/include/asm/sembuf.h)0
-rw-r--r--arch/xtensa/include/uapi/asm/setup.h (renamed from arch/xtensa/include/asm/setup.h)2
-rw-r--r--arch/xtensa/include/uapi/asm/shmbuf.h (renamed from arch/xtensa/include/asm/shmbuf.h)0
-rw-r--r--arch/xtensa/include/uapi/asm/sigcontext.h (renamed from arch/xtensa/include/asm/sigcontext.h)0
-rw-r--r--arch/xtensa/include/uapi/asm/signal.h133
-rw-r--r--arch/xtensa/include/uapi/asm/socket.h (renamed from arch/xtensa/include/asm/socket.h)17
-rw-r--r--arch/xtensa/include/uapi/asm/sockios.h (renamed from arch/xtensa/include/asm/sockios.h)0
-rw-r--r--arch/xtensa/include/uapi/asm/stat.h (renamed from arch/xtensa/include/asm/stat.h)0
-rw-r--r--arch/xtensa/include/uapi/asm/swab.h (renamed from arch/xtensa/include/asm/swab.h)0
-rw-r--r--arch/xtensa/include/uapi/asm/termbits.h (renamed from arch/xtensa/include/asm/termbits.h)0
-rw-r--r--arch/xtensa/include/uapi/asm/types.h28
-rw-r--r--arch/xtensa/include/uapi/asm/unistd.h764
-rw-r--r--arch/xtensa/kernel/.gitignore1
-rw-r--r--arch/xtensa/kernel/Makefile17
-rw-r--r--arch/xtensa/kernel/align.S45
-rw-r--r--arch/xtensa/kernel/asm-offsets.c6
-rw-r--r--arch/xtensa/kernel/coprocessor.S52
-rw-r--r--arch/xtensa/kernel/entry.S1148
-rw-r--r--arch/xtensa/kernel/head.S253
-rw-r--r--arch/xtensa/kernel/init_task.c31
-rw-r--r--arch/xtensa/kernel/io.c75
-rw-r--r--arch/xtensa/kernel/irq.c183
-rw-r--r--arch/xtensa/kernel/mcount.S50
-rw-r--r--arch/xtensa/kernel/module.c2
-rw-r--r--arch/xtensa/kernel/mxhead.S85
-rw-r--r--arch/xtensa/kernel/pci-dma.c4
-rw-r--r--arch/xtensa/kernel/pci.c45
-rw-r--r--arch/xtensa/kernel/platform.c7
-rw-r--r--arch/xtensa/kernel/process.c187
-rw-r--r--arch/xtensa/kernel/ptrace.c39
-rw-r--r--arch/xtensa/kernel/setup.c424
-rw-r--r--arch/xtensa/kernel/signal.c133
-rw-r--r--arch/xtensa/kernel/smp.c607
-rw-r--r--arch/xtensa/kernel/stacktrace.c120
-rw-r--r--arch/xtensa/kernel/syscall.c50
-rw-r--r--arch/xtensa/kernel/time.c148
-rw-r--r--arch/xtensa/kernel/traps.c158
-rw-r--r--arch/xtensa/kernel/vectors.S500
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S142
-rw-r--r--arch/xtensa/kernel/xtensa_ksyms.c41
-rw-r--r--arch/xtensa/lib/checksum.S15
-rw-r--r--arch/xtensa/lib/memcopy.S315
-rw-r--r--arch/xtensa/lib/pci-auto.c9
-rw-r--r--arch/xtensa/lib/strncpy_user.S4
-rw-r--r--arch/xtensa/lib/strnlen_user.S1
-rw-r--r--arch/xtensa/lib/usercopy.S1
-rw-r--r--arch/xtensa/mm/Makefile1
-rw-r--r--arch/xtensa/mm/cache.c39
-rw-r--r--arch/xtensa/mm/fault.c39
-rw-r--r--arch/xtensa/mm/highmem.c72
-rw-r--r--arch/xtensa/mm/init.c362
-rw-r--r--arch/xtensa/mm/misc.S55
-rw-r--r--arch/xtensa/mm/mmu.c92
-rw-r--r--arch/xtensa/mm/tlb.c176
-rw-r--r--arch/xtensa/oprofile/Makefile9
-rw-r--r--arch/xtensa/oprofile/backtrace.c169
-rw-r--r--arch/xtensa/oprofile/init.c26
-rw-r--r--arch/xtensa/platforms/iss/Makefile6
-rw-r--r--arch/xtensa/platforms/iss/console.c68
-rw-r--r--arch/xtensa/platforms/iss/include/platform/serial.h15
-rw-r--r--arch/xtensa/platforms/iss/include/platform/simcall.h55
-rw-r--r--arch/xtensa/platforms/iss/io.c32
-rw-r--r--arch/xtensa/platforms/iss/network.c353
-rw-r--r--arch/xtensa/platforms/iss/setup.c20
-rw-r--r--arch/xtensa/platforms/iss/simdisk.c384
-rw-r--r--arch/xtensa/platforms/xt2000/setup.c24
-rw-r--r--arch/xtensa/platforms/xtfpga/Makefile9
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/hardware.h65
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/lcd.h20
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/serial.h18
-rw-r--r--arch/xtensa/platforms/xtfpga/lcd.c76
-rw-r--r--arch/xtensa/platforms/xtfpga/setup.c303
-rw-r--r--arch/xtensa/variants/dc233c/include/variant/core.h475
-rw-r--r--arch/xtensa/variants/dc233c/include/variant/tie-asm.h193
-rw-r--r--arch/xtensa/variants/dc233c/include/variant/tie.h150
-rw-r--r--arch/xtensa/variants/fsf/include/variant/tie.h9
-rw-r--r--arch/xtensa/variants/s6000/delay.c6
-rw-r--r--arch/xtensa/variants/s6000/dmac.c2
-rw-r--r--arch/xtensa/variants/s6000/gpio.c8
-rw-r--r--arch/xtensa/variants/s6000/include/variant/dmac.h2
-rw-r--r--arch/xtensa/variants/s6000/include/variant/irq.h1
-rw-r--r--arch/xtensa/variants/s6000/irq.c4
205 files changed, 9939 insertions, 4600 deletions
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 8a3f8351f43..3a617af60d4 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -1,22 +1,34 @@
-config FRAME_POINTER
- def_bool n
-
config ZONE_DMA
def_bool y
config XTENSA
def_bool y
+ select ARCH_WANT_FRAME_POINTERS
select HAVE_IDE
- select HAVE_GENERIC_HARDIRQS
+ select GENERIC_ATOMIC64
+ select GENERIC_CLOCKEVENTS
+ select VIRT_TO_BUS
select GENERIC_IRQ_SHOW
- select GENERIC_CPU_DEVICES
+ select GENERIC_SCHED_CLOCK
+ select MODULES_USE_ELF_RELA
+ select GENERIC_PCI_IOMAP
+ select ARCH_WANT_IPC_PARSE_VERSION
+ select ARCH_WANT_OPTIONAL_GPIOLIB
+ select BUILDTIME_EXTABLE_SORT
+ select CLONE_BACKWARDS
+ select IRQ_DOMAIN
+ select HAVE_OPROFILE
+ select HAVE_FUNCTION_TRACER
+ select HAVE_IRQ_TIME_ACCOUNTING
+ select HAVE_PERF_EVENTS
+ select COMMON_CLK
help
Xtensa processors are 32-bit RISC machines designed by Tensilica
primarily for embedded systems. These processors are both
configurable and extensible. The Linux port to the Xtensa
architecture supports all processor configurations and extensions,
with reasonable minimum requirements. The Xtensa Linux project has
- a home page at <http://xtensa.sourceforge.net/>.
+ a home page at <http://www.linux-xtensa.org/>.
config RWSEM_XCHGADD_ALGORITHM
def_bool y
@@ -24,17 +36,14 @@ config RWSEM_XCHGADD_ALGORITHM
config GENERIC_HWEIGHT
def_bool y
-config GENERIC_GPIO
- def_bool y
-
config ARCH_HAS_ILOG2_U32
def_bool n
config ARCH_HAS_ILOG2_U64
def_bool n
-config NO_IOPORT
- def_bool y
+config NO_IOPORT_MAP
+ def_bool n
config HZ
int
@@ -43,12 +52,27 @@ config HZ
source "init/Kconfig"
source "kernel/Kconfig.freezer"
+config LOCKDEP_SUPPORT
+ def_bool y
+
+config STACKTRACE_SUPPORT
+ def_bool y
+
+config TRACE_IRQFLAGS_SUPPORT
+ def_bool y
+
config MMU
def_bool n
config VARIANT_IRQ_SWITCH
def_bool n
+config HAVE_XTENSA_GPIO32
+ def_bool n
+
+config MAY_HAVE_SMP
+ def_bool n
+
menu "Processor type and features"
choice
@@ -62,9 +86,17 @@ config XTENSA_VARIANT_FSF
config XTENSA_VARIANT_DC232B
bool "dc232b - Diamond 232L Standard Core Rev.B (LE)"
select MMU
+ select HAVE_XTENSA_GPIO32
help
This variant refers to Tensilica's Diamond 232L Standard core Rev.B (LE).
+config XTENSA_VARIANT_DC233C
+ bool "dc233c - Diamond 233L Standard Core Rev.C (LE)"
+ select MMU
+ select HAVE_XTENSA_GPIO32
+ help
+ This variant refers to Tensilica's Diamond 233L Standard core Rev.C (LE).
+
config XTENSA_VARIANT_S6000
bool "s6000 - Stretch software configurable processor"
select VARIANT_IRQ_SWITCH
@@ -83,11 +115,99 @@ config XTENSA_UNALIGNED_USER
source "kernel/Kconfig.preempt"
+config HAVE_SMP
+ bool "System Supports SMP (MX)"
+ depends on MAY_HAVE_SMP
+ select XTENSA_MX
+ help
+ This option is use to indicate that the system-on-a-chip (SOC)
+ supports Multiprocessing. Multiprocessor support implemented above
+ the CPU core definition and currently needs to be selected manually.
+
+ Multiprocessor support in implemented with external cache and
+ interrupt controlers.
+
+ The MX interrupt distributer adds Interprocessor Interrupts
+ and causes the IRQ numbers to be increased by 4 for devices
+ like the open cores ethernet driver and the serial interface.
+
+ You still have to select "Enable SMP" to enable SMP on this SOC.
+
+config SMP
+ bool "Enable Symmetric multi-processing support"
+ depends on HAVE_SMP
+ select GENERIC_SMP_IDLE_THREAD
+ help
+ Enabled SMP Software; allows more than one CPU/CORE
+ to be activated during startup.
+
+config NR_CPUS
+ depends on SMP
+ int "Maximum number of CPUs (2-32)"
+ range 2 32
+ default "4"
+
+config HOTPLUG_CPU
+ bool "Enable CPU hotplug support"
+ depends on SMP
+ help
+ Say Y here to allow turning CPUs off and on. CPUs can be
+ controlled through /sys/devices/system/cpu.
+
+ Say N if you want to disable CPU hotplug.
+
config MATH_EMULATION
bool "Math emulation"
help
Can we use information of configuration file?
+config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ bool "Initialize Xtensa MMU inside the Linux kernel code"
+ default y
+ help
+ Earlier version initialized the MMU in the exception vector
+ before jumping to _startup in head.S and had an advantage that
+ it was possible to place a software breakpoint at 'reset' and
+ then enter your normal kernel breakpoints once the MMU was mapped
+ to the kernel mappings (0XC0000000).
+
+ This unfortunately doesn't work for U-Boot and likley also wont
+ work for using KEXEC to have a hot kernel ready for doing a
+ KDUMP.
+
+ So now the MMU is initialized in head.S but it's necessary to
+ use hardware breakpoints (gdb 'hbreak' cmd) to break at _startup.
+ xt-gdb can't place a Software Breakpoint in the 0XD region prior
+ to mapping the MMU and after mapping even if the area of low memory
+ was mapped gdb wouldn't remove the breakpoint on hitting it as the
+ PC wouldn't match. Since Hardware Breakpoints are recommended for
+ Linux configurations it seems reasonable to just assume they exist
+ and leave this older mechanism for unfortunate souls that choose
+ not to follow Tensilica's recommendation.
+
+ Selecting this will cause U-Boot to set the KERNEL Load and Entry
+ address at 0x00003000 instead of the mapped std of 0xD0003000.
+
+ If in doubt, say Y.
+
+config HIGHMEM
+ bool "High Memory Support"
+ help
+ Linux can use the full amount of RAM in the system by
+ default. However, the default MMUv2 setup only maps the
+ lowermost 128 MB of memory linearly to the areas starting
+ at 0xd0000000 (cached) and 0xd8000000 (uncached).
+ When there are more than 128 MB memory in the system not
+ all of it can be "permanently mapped" by the kernel.
+ The physical memory that's not permanently mapped is called
+ "high memory".
+
+ If you are compiling a kernel which will never run on a
+ machine with more than 128 MB total physical RAM, answer
+ N here.
+
+ If unsure, say Y.
+
endmenu
config XTENSA_CALIBRATE_CCOUNT
@@ -100,9 +220,6 @@ config XTENSA_CALIBRATE_CCOUNT
config SERIAL_CONSOLE
def_bool n
-config XTENSA_ISS_NETWORK
- def_bool n
-
menu "Bus options"
config PCI
@@ -128,7 +245,6 @@ config XTENSA_PLATFORM_ISS
bool "ISS"
select XTENSA_CALIBRATE_CCOUNT
select SERIAL_CONSOLE
- select XTENSA_ISS_NETWORK
help
ISS is an acronym for Tensilica's Instruction Set Simulator.
@@ -141,6 +257,16 @@ config XTENSA_PLATFORM_XT2000
config XTENSA_PLATFORM_S6105
bool "S6105"
select SERIAL_CONSOLE
+ select NO_IOPORT_MAP
+
+config XTENSA_PLATFORM_XTFPGA
+ bool "XTFPGA"
+ select SERIAL_CONSOLE
+ select ETHOC
+ select XTENSA_CALIBRATE_CCOUNT
+ help
+ XTFPGA is the name of Tensilica board family (LX60, LX110, LX200, ML605).
+ This hardware is capable of running a full Linux distribution.
endchoice
@@ -169,25 +295,54 @@ config CMDLINE
time by entering them here. As a minimum, you should specify the
memory size and the root device (e.g., mem=64M root=/dev/nfs).
-source "mm/Kconfig"
+config USE_OF
+ bool "Flattened Device Tree support"
+ select OF
+ select OF_EARLY_FLATTREE
+ help
+ Include support for flattened device tree machine descriptions.
-config HOTPLUG
- bool "Support for hot-pluggable devices"
+config BUILTIN_DTB
+ string "DTB to build into the kernel image"
+ depends on OF
+
+config BLK_DEV_SIMDISK
+ tristate "Host file-based simulated block device support"
+ default n
+ depends on XTENSA_PLATFORM_ISS
+ help
+ Create block devices that map to files in the host file system.
+ Device binding to host file may be changed at runtime via proc
+ interface provided the device is not in use.
+
+config BLK_DEV_SIMDISK_COUNT
+ int "Number of host file-based simulated block devices"
+ range 1 10
+ depends on BLK_DEV_SIMDISK
+ default 2
help
- Say Y here if you want to plug devices into your computer while
- the system is running, and be able to use them quickly. In many
- cases, the devices can likewise be unplugged at any time too.
+ This is the default minimal number of created block devices.
+ Kernel/module parameter 'simdisk_count' may be used to change this
+ value at runtime. More file names (but no more than 10) may be
+ specified as parameters, simdisk_count grows accordingly.
+
+config SIMDISK0_FILENAME
+ string "Host filename for the first simulated device"
+ depends on BLK_DEV_SIMDISK = y
+ default ""
+ help
+ Attach a first simdisk to a host file. Conventionally, this file
+ contains a root file system.
- One well known example of this is PCMCIA- or PC-cards, credit-card
- size devices such as network cards, modems or hard drives which are
- plugged into slots found on all modern laptop computers. Another
- example, used on modern desktops as well as laptops, is USB.
+config SIMDISK1_FILENAME
+ string "Host filename for the second simulated device"
+ depends on BLK_DEV_SIMDISK = y && BLK_DEV_SIMDISK_COUNT != 1
+ default ""
+ help
+ Another simulated disk in a host file for a buildroot-independent
+ storage.
- Enable HOTPLUG and build a modular kernel. Get agent software
- (from <http://linux-hotplug.sourceforge.net/>) and install it.
- Then your kernel will automatically call out to a user mode "policy
- agent" (/sbin/hotplug) to load modules and set up software needed
- to use devices as you hotplug them.
+source "mm/Kconfig"
source "drivers/pcmcia/Kconfig"
@@ -197,21 +352,6 @@ endmenu
menu "Executable file formats"
-# only elf supported
-config KCORE_ELF
- def_bool y
- depends on PROC_FS
- help
- If you enabled support for /proc file system then the file
- /proc/kcore will contain the kernel core image in ELF format. This
- can be used in gdb:
-
- $ cd /usr/src/linux ; gdb vmlinux /proc/kcore
-
- This is especially useful if you have compiled the kernel with the
- "-g" option to preserve debugging information. It is mainly used
- for examining kernel data structures on the live kernel.
-
source "fs/Kconfig.binfmt"
endmenu
@@ -222,23 +362,6 @@ source "drivers/Kconfig"
source "fs/Kconfig"
-menu "Xtensa initrd options"
- depends on BLK_DEV_INITRD
-
-config EMBEDDED_RAMDISK
- bool "Embed root filesystem ramdisk into the kernel"
-
-config EMBEDDED_RAMDISK_IMAGE
- string "Filename of gzipped ramdisk image"
- depends on EMBEDDED_RAMDISK
- default "ramdisk.gz"
- help
- This is the filename of the ramdisk image to be built into the
- kernel. Relative pathnames are relative to arch/xtensa/boot/ramdisk/.
- The ramdisk image is not part of the kernel distribution; you must
- provide one yourself.
-endmenu
-
source "arch/xtensa/Kconfig.debug"
source "security/Kconfig"
diff --git a/arch/xtensa/Kconfig.debug b/arch/xtensa/Kconfig.debug
index 11c585295dd..af7da74d535 100644
--- a/arch/xtensa/Kconfig.debug
+++ b/arch/xtensa/Kconfig.debug
@@ -2,6 +2,36 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
-endmenu
+config DEBUG_TLB_SANITY
+ bool "Debug TLB sanity"
+ depends on DEBUG_KERNEL
+ help
+ Enable this to turn on TLB sanity check on each entry to userspace.
+ This check can spot missing TLB invalidation/wrong PTE permissions/
+ premature page freeing.
+
+ If unsure, say N.
+
+config LD_NO_RELAX
+ bool "Disable linker relaxation"
+ default n
+ help
+ Enable this function to disable link-time optimizations.
+ The default linker behavior is to combine identical literal
+ values to reduce code size and remove unnecessary overhead from
+ assembler-generated 'longcall' sequences.
+ Enabling this option improves the link time but increases the
+ code size, and possibly execution time.
+config S32C1I_SELFTEST
+ bool "Perform S32C1I instruction self-test at boot"
+ default y
+ help
+ Enable this option to test S32C1I instruction behavior at boot.
+ Correct operation of this instruction requires some cooperation from hardware
+ external to the processor (such as bus bridge, bus fabric, or memory controller).
+ It is easy to make wrong hardware configuration, this test should catch it early.
+ Say 'N' on stable hardware.
+
+endmenu
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 7608559de93..81250ece306 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -15,28 +15,53 @@
variant-$(CONFIG_XTENSA_VARIANT_FSF) := fsf
variant-$(CONFIG_XTENSA_VARIANT_DC232B) := dc232b
+variant-$(CONFIG_XTENSA_VARIANT_DC233C) := dc233c
variant-$(CONFIG_XTENSA_VARIANT_S6000) := s6000
variant-$(CONFIG_XTENSA_VARIANT_LINUX_CUSTOM) := custom
VARIANT = $(variant-y)
export VARIANT
+# Test for cross compiling
+
+ifneq ($(VARIANT),)
+ COMPILE_ARCH = $(shell uname -m)
+
+ ifneq ($(COMPILE_ARCH), xtensa)
+ ifndef CROSS_COMPILE
+ CROSS_COMPILE = xtensa_$(VARIANT)-
+ endif
+ endif
+endif
+
# Platform configuration
platform-$(CONFIG_XTENSA_PLATFORM_XT2000) := xt2000
platform-$(CONFIG_XTENSA_PLATFORM_ISS) := iss
platform-$(CONFIG_XTENSA_PLATFORM_S6105) := s6105
+platform-$(CONFIG_XTENSA_PLATFORM_XTFPGA) := xtfpga
PLATFORM = $(platform-y)
export PLATFORM
# temporarily until string.h is fixed
-KBUILD_CFLAGS += -ffreestanding
+KBUILD_CFLAGS += -ffreestanding -D__linux__
KBUILD_CFLAGS += -pipe -mlongcalls
KBUILD_CFLAGS += $(call cc-option,-mforce-no-pic,)
+ifneq ($(CONFIG_LD_NO_RELAX),)
+LDFLAGS := --no-relax
+endif
+
+ifeq ($(shell echo __XTENSA_EB__ | $(CC) -E - | grep -v "\#"),1)
+CHECKFLAGS += -D__XTENSA_EB__
+endif
+ifeq ($(shell echo __XTENSA_EL__ | $(CC) -E - | grep -v "\#"),1)
+CHECKFLAGS += -D__XTENSA_EL__
+endif
+
vardirs := $(patsubst %,arch/xtensa/variants/%/,$(variant-y))
plfdirs := $(patsubst %,arch/xtensa/platforms/%/,$(platform-y))
@@ -48,28 +73,10 @@ endif
KBUILD_DEFCONFIG := iss_defconfig
-# ramdisk/initrd support
-# You need a compressed ramdisk image, named ramdisk.gz in
-# arch/xtensa/boot/ramdisk
-
-core-$(CONFIG_EMBEDDED_RAMDISK) += arch/xtensa/boot/ramdisk/
-
-# Test for cross compiling
-
-ifneq ($(VARIANT),)
- COMPILE_ARCH = $(shell uname -m)
-
- ifneq ($(COMPILE_ARCH), xtensa)
- ifndef CROSS_COMPILE
- CROSS_COMPILE = xtensa_$(VARIANT)-
- endif
- endif
-endif
-
# Only build variant and/or platform if it includes a Makefile
-buildvar := $(shell test -a $(srctree)/arch/xtensa/variants/$(VARIANT)/Makefile && echo arch/xtensa/variants/$(VARIANT)/)
-buildplf := $(shell test -a $(srctree)/arch/xtensa/platforms/$(PLATFORM)/Makefile && echo arch/xtensa/platforms/$(PLATFORM)/)
+buildvar := $(shell test -e $(srctree)/arch/xtensa/variants/$(VARIANT)/Makefile && echo arch/xtensa/variants/$(VARIANT)/)
+buildplf := $(shell test -e $(srctree)/arch/xtensa/platforms/$(PLATFORM)/Makefile && echo arch/xtensa/platforms/$(PLATFORM)/)
# Find libgcc.a
@@ -80,6 +87,11 @@ core-y += arch/xtensa/kernel/ arch/xtensa/mm/
core-y += $(buildvar) $(buildplf)
libs-y += arch/xtensa/lib/ $(LIBGCC)
+drivers-$(CONFIG_OPROFILE) += arch/xtensa/oprofile/
+
+ifneq ($(CONFIG_BUILTIN_DTB),"")
+core-$(CONFIG_OF) += arch/xtensa/boot/dts/
+endif
boot := arch/xtensa/boot
@@ -87,10 +99,12 @@ all: zImage
bzImage : zImage
-zImage zImage.initrd: vmlinux
+zImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@
+%.dtb:
+ $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@
+
define archhelp
@echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)'
endef
-
diff --git a/arch/xtensa/boot/.gitignore b/arch/xtensa/boot/.gitignore
new file mode 100644
index 00000000000..be7655998b2
--- /dev/null
+++ b/arch/xtensa/boot/.gitignore
@@ -0,0 +1,3 @@
+uImage
+zImage.redboot
+*.dtb
diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile
index 70fd1453e17..ca20a892021 100644
--- a/arch/xtensa/boot/Makefile
+++ b/arch/xtensa/boot/Makefile
@@ -12,7 +12,7 @@
KBUILD_CFLAGS += -fno-builtin -Iarch/$(ARCH)/boot/include
HOSTFLAGS += -Iarch/$(ARCH)/boot/include
-BIG_ENDIAN := $(shell echo -e __XTENSA_EB__ | $(CC) -E - | grep -v "\#")
+BIG_ENDIAN := $(shell echo __XTENSA_EB__ | $(CC) -E - | grep -v "\#")
export ccflags-y
export BIG_ENDIAN
@@ -22,12 +22,23 @@ subdir-y := lib
# Subdirs for the boot loader(s)
bootdir-$(CONFIG_XTENSA_PLATFORM_ISS) += boot-elf
-bootdir-$(CONFIG_XTENSA_PLATFORM_XT2000) += boot-redboot boot-elf
+bootdir-$(CONFIG_XTENSA_PLATFORM_XT2000) += boot-redboot boot-elf boot-uboot
+bootdir-$(CONFIG_XTENSA_PLATFORM_XTFPGA) += boot-redboot boot-elf boot-uboot
-
-zImage zImage.initrd Image Image.initrd: $(bootdir-y)
+zImage Image: $(bootdir-y)
$(bootdir-y): $(addprefix $(obj)/,$(subdir-y)) \
$(addprefix $(obj)/,$(host-progs))
$(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS)
+OBJCOPYFLAGS = --strip-all -R .comment -R .note.gnu.build-id -O binary
+
+vmlinux.bin: vmlinux FORCE
+ $(call if_changed,objcopy)
+
+vmlinux.bin.gz: vmlinux.bin FORCE
+ $(call if_changed,gzip)
+
+boot-elf: vmlinux.bin
+boot-redboot: vmlinux.bin.gz
+boot-uboot: vmlinux.bin.gz
diff --git a/arch/xtensa/boot/boot-elf/.gitignore b/arch/xtensa/boot/boot-elf/.gitignore
new file mode 100644
index 00000000000..5ff8fbb8561
--- /dev/null
+++ b/arch/xtensa/boot/boot-elf/.gitignore
@@ -0,0 +1 @@
+boot.lds
diff --git a/arch/xtensa/boot/boot-elf/Makefile b/arch/xtensa/boot/boot-elf/Makefile
index 08e8814f8c7..89db089f5a1 100644
--- a/arch/xtensa/boot/boot-elf/Makefile
+++ b/arch/xtensa/boot/boot-elf/Makefile
@@ -4,9 +4,6 @@
# for more details.
#
-GZIP = gzip
-GZIP_FLAGS = -v9fc
-
ifeq ($(BIG_ENDIAN),1)
OBJCOPY_ARGS := -O elf32-xtensa-be
else
@@ -15,39 +12,23 @@ endif
export OBJCOPY_ARGS
export CPPFLAGS_boot.lds += -P -C
+export KBUILD_AFLAGS += -mtext-section-literals
boot-y := bootstrap.o
OBJS := $(addprefix $(obj)/,$(boot-y))
-Image: vmlinux $(OBJS) arch/$(ARCH)/boot/boot-elf/boot.lds
- $(OBJCOPY) --strip-all -R .comment -R .note.gnu.build-id -O binary \
- vmlinux vmlinux.tmp
- $(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
- --add-section image=vmlinux.tmp \
- --set-section-flags image=contents,alloc,load,load,data \
- $(OBJS) $@.tmp
- $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) \
- -T arch/$(ARCH)/boot/boot-elf/boot.lds \
- -o arch/$(ARCH)/boot/$@.elf $@.tmp
- rm -f $@.tmp vmlinux.tmp
-
-Image.initrd: vmlinux $(OBJS)
- $(OBJCOPY) --strip-all -R .comment -R .note.gnu.build-id -O binary \
- --add-section .initrd=arch/$(ARCH)/boot/ramdisk \
- --set-section-flags .initrd=contents,alloc,load,load,data \
- vmlinux vmlinux.tmp
- $(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
- --add-section image=vmlinux.tmp \
+$(obj)/Image.o: vmlinux.bin $(OBJS)
+ $(Q)$(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
+ --add-section image=vmlinux.bin \
--set-section-flags image=contents,alloc,load,load,data \
- $(OBJS) $@.tmp
- $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) \
- -T $(srctree)/arch/$(ARCH)/boot/boot-elf/boot.ld \
- -o arch/$(ARCH)/boot/$@.elf $@.tmp
- rm -f $@.tmp vmlinux.tmp
-
-
-zImage: Image
+ $(OBJS) $@
-zImage.initrd: Image.initrd
+$(obj)/../Image.elf: $(obj)/Image.o $(obj)/boot.lds
+ $(Q)$(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) \
+ -T $(obj)/boot.lds \
+ --build-id=none \
+ -o $@ $(obj)/Image.o
+ $(Q)$(kecho) ' Kernel: $@ is ready'
+zImage: $(obj)/../Image.elf
diff --git a/arch/xtensa/boot/boot-elf/boot.lds.S b/arch/xtensa/boot/boot-elf/boot.lds.S
index 4e53b74dc44..932b58ef33d 100644
--- a/arch/xtensa/boot/boot-elf/boot.lds.S
+++ b/arch/xtensa/boot/boot-elf/boot.lds.S
@@ -1,48 +1,29 @@
-#include <variant/core.h>
+/*
+ * linux/arch/xtensa/boot/boot-elf/boot.lds.S
+ *
+ * Copyright (C) 2008 - 2013 by Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com
+ * Pete Delaney <piet@tensilica.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/vectors.h>
OUTPUT_ARCH(xtensa)
ENTRY(_ResetVector)
SECTIONS
{
- .start 0xD0000000 : { *(.start) }
-
- .text 0xD0000000:
- {
- __reloc_start = . ;
- _text_start = . ;
- *(.literal .text.literal .text)
- _text_end = . ;
- }
-
- .rodata ALIGN(0x04):
- {
- *(.rodata)
- *(.rodata1)
- }
-
- .data ALIGN(0x04):
- {
- *(.data)
- *(.data1)
- *(.sdata)
- *(.sdata2)
- *(.got.plt)
- *(.got)
- *(.dynamic)
- }
-
- __reloc_end = . ;
-
- .initrd ALIGN(0x10) :
+ .ResetVector.text XCHAL_RESET_VECTOR_VADDR :
{
- boot_initrd_start = . ;
- *(.initrd)
- boot_initrd_end = .;
+ *(.ResetVector.text)
}
- . = ALIGN(0x10);
- __image_load = . ;
- .image 0xd0001000:
+ .image KERNELOFFSET: AT (LOAD_MEMORY_ADDRESS)
{
_image_start = .;
*(image)
@@ -50,7 +31,6 @@ SECTIONS
_image_end = . ;
}
-
.bss ((LOADADDR(.image) + SIZEOF(.image) + 3) & ~ 3):
{
__bss_start = .;
@@ -60,14 +40,15 @@ SECTIONS
*(.bss)
__bss_end = .;
}
- _end = .;
- _param_start = .;
- .ResetVector.text XCHAL_RESET_VECTOR_VADDR :
+ /*
+ * This is a remapped copy of the Reset Vector Code.
+ * It keeps gdb in sync with the PC after switching
+ * to the temporary mapping used while setting up
+ * the V2 MMU mappings for Linux.
+ */
+ .ResetVector.remapped_text 0x46000000 (INFO):
{
- *(.ResetVector.text)
+ *(.ResetVector.remapped_text)
}
-
-
- PROVIDE (end = .);
}
diff --git a/arch/xtensa/boot/boot-elf/bootstrap.S b/arch/xtensa/boot/boot-elf/bootstrap.S
index 464298bc348..1388a499753 100644
--- a/arch/xtensa/boot/boot-elf/bootstrap.S
+++ b/arch/xtensa/boot/boot-elf/bootstrap.S
@@ -1,18 +1,77 @@
+/*
+ * arch/xtensa/boot/boot-elf/bootstrap.S
+ *
+ * Low-level exception handling
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004 - 2013 by Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com>
+ * Piet Delaney <piet@tensilica.com>
+ */
#include <asm/bootparam.h>
+#include <asm/processor.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/cacheasm.h>
+#include <asm/initialize_mmu.h>
+#include <linux/linkage.h>
-
-/* ResetVector
- */
- .section .ResetVector.text, "ax"
+ .section .ResetVector.text, "ax"
.global _ResetVector
+ .global reset
+
_ResetVector:
- _j reset
+ _j _SetupMMU
+
+ .begin no-absolute-literals
+ .literal_position
+
.align 4
RomInitAddr:
- .word 0xd0001000
+#if defined(CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) && \
+ XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+ .word 0x00003000
+#else
+ .word 0xd0003000
+#endif
RomBootParam:
.word _bootparam
+_bootparam:
+ .short BP_TAG_FIRST
+ .short 4
+ .long BP_VERSION
+ .short BP_TAG_LAST
+ .short 0
+ .long 0
+
+ .align 4
+_SetupMMU:
+ movi a0, 0
+ wsr a0, windowbase
+ rsync
+ movi a0, 1
+ wsr a0, windowstart
+ rsync
+ movi a0, 0x1F
+ wsr a0, ps
+ rsync
+
+ Offset = _SetupMMU - _ResetVector
+
+#ifndef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ initialize_mmu
+#endif
+
+ .end no-absolute-literals
+
+ rsil a0, XCHAL_DEBUGLEVEL-1
+ rsync
reset:
l32r a0, RomInitAddr
l32r a2, RomBootParam
@@ -21,13 +80,25 @@ reset:
jx a0
.align 4
- .section .bootstrap.data, "aw"
- .globl _bootparam
-_bootparam:
- .short BP_TAG_FIRST
- .short 4
- .long BP_VERSION
- .short BP_TAG_LAST
- .short 0
- .long 0
+ .section .ResetVector.remapped_text, "x"
+ .global _RemappedResetVector
+
+ /* Do org before literals */
+ .org 0
+
+_RemappedResetVector:
+ .begin no-absolute-literals
+ .literal_position
+
+ _j _RemappedSetupMMU
+
+ /* Position Remapped code at the same location as the original code */
+ . = _RemappedResetVector + Offset
+
+_RemappedSetupMMU:
+#ifndef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ initialize_mmu
+#endif
+
+ .end no-absolute-literals
diff --git a/arch/xtensa/boot/boot-redboot/Makefile b/arch/xtensa/boot/boot-redboot/Makefile
index 872029b8443..8be8b943698 100644
--- a/arch/xtensa/boot/boot-redboot/Makefile
+++ b/arch/xtensa/boot/boot-redboot/Makefile
@@ -4,8 +4,6 @@
# for more details.
#
-GZIP = gzip
-GZIP_FLAGS = -v9fc
ifeq ($(BIG_ENDIAN),1)
OBJCOPY_ARGS := -O elf32-xtensa-be
else
@@ -21,15 +19,17 @@ LIBS := arch/xtensa/boot/lib/lib.a arch/xtensa/lib/lib.a
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
-zImage: vmlinux $(OBJS) $(LIBS)
- $(OBJCOPY) --strip-all -R .comment -R .note.gnu.build-id -O binary \
- vmlinux vmlinux.tmp
- gzip -vf9 vmlinux.tmp
- $(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
- --add-section image=vmlinux.tmp.gz \
+$(obj)/zImage.o: vmlinux.bin.gz $(OBJS)
+ $(Q)$(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
+ --add-section image=vmlinux.bin.gz \
--set-section-flags image=contents,alloc,load,load,data \
- $(OBJS) $@.tmp
- $(LD) $(LD_ARGS) -o $@.elf $@.tmp $(LIBS) -L/xtensa-elf/lib $(LIBGCC)
- $(OBJCOPY) -S -O binary $@.elf arch/$(ARCH)/boot/$@.redboot
- rm -f $@.tmp $@.elf vmlinux.tmp.gz
+ $(OBJS) $@
+$(obj)/zImage.elf: $(obj)/zImage.o $(LIBS)
+ $(Q)$(LD) $(LD_ARGS) -o $@ $^ -L/xtensa-elf/lib $(LIBGCC)
+
+$(obj)/../zImage.redboot: $(obj)/zImage.elf
+ $(Q)$(OBJCOPY) -S -O binary $< $@
+ $(Q)$(kecho) ' Kernel: $@ is ready'
+
+zImage: $(obj)/../zImage.redboot
diff --git a/arch/xtensa/boot/boot-redboot/boot.ld b/arch/xtensa/boot/boot-redboot/boot.ld
index 774db20d11f..b0b9e95b58b 100644
--- a/arch/xtensa/boot/boot-redboot/boot.ld
+++ b/arch/xtensa/boot/boot-redboot/boot.ld
@@ -31,16 +31,9 @@ SECTIONS
__reloc_end = . ;
- .initrd ALIGN(0x10) :
- {
- boot_initrd_start = . ;
- *(.initrd)
- boot_initrd_end = .;
- }
-
. = ALIGN(0x10);
__image_load = . ;
- .image 0xd0001000: AT(__image_load)
+ .image 0xd0003000: AT(__image_load)
{
_image_start = .;
*(image)
diff --git a/arch/xtensa/boot/boot-redboot/bootstrap.S b/arch/xtensa/boot/boot-redboot/bootstrap.S
index 5582e8cfac8..86c34dbc9cd 100644
--- a/arch/xtensa/boot/boot-redboot/bootstrap.S
+++ b/arch/xtensa/boot/boot-redboot/bootstrap.S
@@ -51,17 +51,17 @@ _start:
/* 'reset' window registers */
movi a4, 1
- wsr a4, PS
+ wsr a4, ps
rsync
- rsr a5, WINDOWBASE
+ rsr a5, windowbase
ssl a5
sll a4, a4
- wsr a4, WINDOWSTART
+ wsr a4, windowstart
rsync
movi a4, 0x00040000
- wsr a4, PS
+ wsr a4, ps
rsync
/* copy the loader to its address
@@ -226,17 +226,7 @@ _reloc:
isync
- movi a5, __start
- movi a3, boot_initrd_start
- movi a4, boot_initrd_end
- sub a3, a3, a5
- sub a4, a4, a5
- add a3, a0, a3
- add a4, a0, a4
-
# a2 Boot parameter list
- # a3 initrd_start (virtual load address)
- # a4 initrd_end (virtual load address)
movi a0, _image_start
jx a0
diff --git a/arch/xtensa/boot/boot-uboot/Makefile b/arch/xtensa/boot/boot-uboot/Makefile
new file mode 100644
index 00000000000..545759819ef
--- /dev/null
+++ b/arch/xtensa/boot/boot-uboot/Makefile
@@ -0,0 +1,18 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+
+ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+UIMAGE_LOADADDR = 0x00003000
+else
+UIMAGE_LOADADDR = 0xd0003000
+endif
+UIMAGE_COMPRESSION = gzip
+
+$(obj)/../uImage: vmlinux.bin.gz FORCE
+ $(call if_changed,uimage)
+ $(Q)$(kecho) ' Kernel: $@ is ready'
+
+zImage: $(obj)/../uImage
diff --git a/arch/xtensa/boot/dts/Makefile b/arch/xtensa/boot/dts/Makefile
new file mode 100644
index 00000000000..5f711bba830
--- /dev/null
+++ b/arch/xtensa/boot/dts/Makefile
@@ -0,0 +1,15 @@
+#
+# arch/xtensa/boot/dts/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+#
+
+BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o
+ifneq ($(CONFIG_BUILTIN_DTB),"")
+obj-$(CONFIG_OF) += $(BUILTIN_DTB)
+endif
+
+clean-files := *.dtb.S
diff --git a/arch/xtensa/boot/dts/kc705.dts b/arch/xtensa/boot/dts/kc705.dts
new file mode 100644
index 00000000000..742a347be67
--- /dev/null
+++ b/arch/xtensa/boot/dts/kc705.dts
@@ -0,0 +1,11 @@
+/dts-v1/;
+/include/ "xtfpga.dtsi"
+/include/ "xtfpga-flash-128m.dtsi"
+
+/ {
+ compatible = "cdns,xtensa-kc705";
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x08000000>;
+ };
+};
diff --git a/arch/xtensa/boot/dts/lx60.dts b/arch/xtensa/boot/dts/lx60.dts
new file mode 100644
index 00000000000..a0f8b8ad392
--- /dev/null
+++ b/arch/xtensa/boot/dts/lx60.dts
@@ -0,0 +1,11 @@
+/dts-v1/;
+/include/ "xtfpga.dtsi"
+/include/ "xtfpga-flash-4m.dtsi"
+
+/ {
+ compatible = "cdns,xtensa-lx60";
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x04000000>;
+ };
+};
diff --git a/arch/xtensa/boot/dts/ml605.dts b/arch/xtensa/boot/dts/ml605.dts
new file mode 100644
index 00000000000..905c3a5035e
--- /dev/null
+++ b/arch/xtensa/boot/dts/ml605.dts
@@ -0,0 +1,11 @@
+/dts-v1/;
+/include/ "xtfpga.dtsi"
+/include/ "xtfpga-flash-16m.dtsi"
+
+/ {
+ compatible = "cdns,xtensa-ml605";
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x08000000>;
+ };
+};
diff --git a/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi
new file mode 100644
index 00000000000..d3a88e02987
--- /dev/null
+++ b/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi
@@ -0,0 +1,28 @@
+/ {
+ soc {
+ flash: flash@00000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x00000000 0x08000000>;
+ bank-width = <2>;
+ device-width = <2>;
+ partition@0x0 {
+ label = "data";
+ reg = <0x00000000 0x06000000>;
+ };
+ partition@0x6000000 {
+ label = "boot loader area";
+ reg = <0x06000000 0x00800000>;
+ };
+ partition@0x6800000 {
+ label = "kernel image";
+ reg = <0x06800000 0x017e0000>;
+ };
+ partition@0x7fe0000 {
+ label = "boot environment";
+ reg = <0x07fe0000 0x00020000>;
+ };
+ };
+ };
+};
diff --git a/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi
new file mode 100644
index 00000000000..1d97203c18e
--- /dev/null
+++ b/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi
@@ -0,0 +1,28 @@
+/ {
+ soc {
+ flash: flash@08000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x08000000 0x01000000>;
+ bank-width = <2>;
+ device-width = <2>;
+ partition@0x0 {
+ label = "boot loader area";
+ reg = <0x00000000 0x00400000>;
+ };
+ partition@0x400000 {
+ label = "kernel image";
+ reg = <0x00400000 0x00600000>;
+ };
+ partition@0xa00000 {
+ label = "data";
+ reg = <0x00a00000 0x005e0000>;
+ };
+ partition@0xfe0000 {
+ label = "boot environment";
+ reg = <0x00fe0000 0x00020000>;
+ };
+ };
+ };
+};
diff --git a/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi
new file mode 100644
index 00000000000..d1c621ca8be
--- /dev/null
+++ b/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi
@@ -0,0 +1,20 @@
+/ {
+ soc {
+ flash: flash@08000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x08000000 0x00400000>;
+ bank-width = <2>;
+ device-width = <2>;
+ partition@0x0 {
+ label = "boot loader area";
+ reg = <0x00000000 0x003f0000>;
+ };
+ partition@0x3f0000 {
+ label = "boot environment";
+ reg = <0x003f0000 0x00010000>;
+ };
+ };
+ };
+};
diff --git a/arch/xtensa/boot/dts/xtfpga.dtsi b/arch/xtensa/boot/dts/xtfpga.dtsi
new file mode 100644
index 00000000000..dec9178840f
--- /dev/null
+++ b/arch/xtensa/boot/dts/xtfpga.dtsi
@@ -0,0 +1,69 @@
+/ {
+ compatible = "cdns,xtensa-xtfpga";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&pic>;
+
+ chosen {
+ bootargs = "earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x06000000>;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cpu@0 {
+ compatible = "cdns,xtensa-cpu";
+ reg = <0>;
+ /* Filled in by platform_setup from FPGA register
+ * clock-frequency = <100000000>;
+ */
+ };
+ };
+
+ pic: pic {
+ compatible = "cdns,xtensa-pic";
+ /* one cell: internal irq number,
+ * two cells: second cell == 0: internal irq number
+ * second cell == 1: external irq number
+ */
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ };
+
+ clocks {
+ osc: main-oscillator {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ };
+ };
+
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ ranges = <0x00000000 0xf0000000 0x10000000>;
+
+ serial0: serial@0d050020 {
+ device_type = "serial";
+ compatible = "ns16550a";
+ no-loopback-test;
+ reg = <0x0d050020 0x20>;
+ reg-shift = <2>;
+ interrupts = <0 1>; /* external irq 0 */
+ clocks = <&osc>;
+ };
+
+ enet0: ethoc@0d030000 {
+ compatible = "opencores,ethoc";
+ reg = <0x0d030000 0x4000 0x0d800000 0x4000>;
+ interrupts = <1 1>; /* external irq 1 */
+ local-mac-address = [00 50 c2 13 6f 00];
+ clocks = <&osc>;
+ };
+ };
+};
diff --git a/arch/xtensa/boot/lib/.gitignore b/arch/xtensa/boot/lib/.gitignore
new file mode 100644
index 00000000000..1629a616775
--- /dev/null
+++ b/arch/xtensa/boot/lib/.gitignore
@@ -0,0 +1,3 @@
+inffast.c
+inflate.c
+inftrees.c
diff --git a/arch/xtensa/boot/lib/Makefile b/arch/xtensa/boot/lib/Makefile
index ad8952e8a07..6868f2ca6af 100644
--- a/arch/xtensa/boot/lib/Makefile
+++ b/arch/xtensa/boot/lib/Makefile
@@ -7,6 +7,13 @@ zlib := inffast.c inflate.c inftrees.c
lib-y += $(zlib:.c=.o) zmem.o
ccflags-y := -Ilib/zlib_inflate
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_inflate.o = -pg
+CFLAGS_REMOVE_zmem.o = -pg
+CFLAGS_REMOVE_inftrees.o = -pg
+CFLAGS_REMOVE_inffast.o = -pg
+endif
+
quiet_cmd_copy_zlib = COPY $@
cmd_copy_zlib = cat $< > $@
diff --git a/arch/xtensa/boot/ramdisk/Makefile b/arch/xtensa/boot/ramdisk/Makefile
deleted file mode 100644
index b12f7635243..00000000000
--- a/arch/xtensa/boot/ramdisk/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# Makefile for a ramdisk image
-#
-
-BIG_ENDIAN := $(shell echo -e "\#ifdef __XTENSA_EL__\nint little;\n\#else\nint big;\n\#endif" | $(CC) -E -|grep -c big)
-
-ifeq ($(BIG_ENDIAN),1)
-OBJCOPY_ARGS := -O elf32-xtensa-be
-else
-OBJCOPY_ARGS := -O elf32-xtensa-le
-endif
-
-obj-y = ramdisk.o
-
-RAMDISK_IMAGE = arch/$(ARCH)/boot/ramdisk/$(CONFIG_EMBEDDED_RAMDISK_IMAGE)
-
-arch/$(ARCH)/boot/ramdisk/ramdisk.o:
- $(Q)echo -e "dummy:" | $(AS) -o $@;
- $(Q)$(OBJCOPY) $(OBJCOPY_ARGS) \
- --add-section .initrd=$(RAMDISK_IMAGE) \
- --set-section-flags .initrd=contents,alloc,load,load,data \
- arch/$(ARCH)/boot/ramdisk/ramdisk.o $@
-
diff --git a/arch/xtensa/configs/common_defconfig b/arch/xtensa/configs/common_defconfig
index b90038e40dd..f6000fe0511 100644
--- a/arch/xtensa/configs/common_defconfig
+++ b/arch/xtensa/configs/common_defconfig
@@ -8,7 +8,6 @@ CONFIG_XTENSA=y
# CONFIG_UID16 is not set
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_HAVE_DEC_LOCK=y
-CONFIG_GENERIC_HARDIRQS=y
#
# Code maturity level options
@@ -333,11 +332,6 @@ CONFIG_XT2000_SONIC=y
# CONFIG_S2IO is not set
#
-# Token Ring devices
-#
-# CONFIG_TR is not set
-
-#
# Wireless LAN (non-hamradio)
#
CONFIG_NET_RADIO=y
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
index f932b30b47f..1493c68352d 100644
--- a/arch/xtensa/configs/iss_defconfig
+++ b/arch/xtensa/configs/iss_defconfig
@@ -9,11 +9,9 @@ CONFIG_XTENSA=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
-CONFIG_GENERIC_HARDIRQS=y
-CONFIG_GENERIC_GPIO=y
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
-CONFIG_NO_IOPORT=y
+CONFIG_NO_IOPORT_MAP=y
CONFIG_HZ=100
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -113,7 +111,7 @@ CONFIG_DEFAULT_IOSCHED="noop"
# CONFIG_INLINE_SPIN_LOCK_BH is not set
# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
-CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_UNINLINE_SPIN_UNLOCK is not set
# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
@@ -629,7 +627,6 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_DEBUG_KOBJECT is not set
# CONFIG_DEBUG_INFO is not set
# CONFIG_DEBUG_VM is not set
-# CONFIG_DEBUG_WRITECOUNT is not set
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
diff --git a/arch/xtensa/configs/s6105_defconfig b/arch/xtensa/configs/s6105_defconfig
index 550e8ed5b5c..12a492ab6d1 100644
--- a/arch/xtensa/configs/s6105_defconfig
+++ b/arch/xtensa/configs/s6105_defconfig
@@ -9,11 +9,9 @@ CONFIG_XTENSA=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
-CONFIG_GENERIC_HARDIRQS=y
-CONFIG_GENERIC_GPIO=y
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
-CONFIG_NO_IOPORT=y
+CONFIG_NO_IOPORT_MAP=y
CONFIG_HZ=100
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
@@ -541,11 +539,6 @@ CONFIG_MSDOS_PARTITION=y
# CONFIG_DLM is not set
#
-# Xtensa initrd options
-#
-# CONFIG_EMBEDDED_RAMDISK is not set
-
-#
# Kernel hacking
#
CONFIG_PRINTK_TIME=y
@@ -576,7 +569,6 @@ CONFIG_DEBUG_SPINLOCK_SLEEP=y
# CONFIG_DEBUG_INFO is not set
# CONFIG_DEBUG_VM is not set
CONFIG_DEBUG_NOMMU_REGIONS=y
-# CONFIG_DEBUG_WRITECOUNT is not set
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index c68e1680da0..c3d20ba6eb8 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -1 +1,32 @@
-include include/asm-generic/Kbuild.asm
+generic-y += bitsperlong.h
+generic-y += bug.h
+generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += exec.h
+generic-y += fcntl.h
+generic-y += hardirq.h
+generic-y += hash.h
+generic-y += ioctl.h
+generic-y += irq_regs.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += kvm_para.h
+generic-y += linkage.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += mcs_spinlock.h
+generic-y += percpu.h
+generic-y += preempt.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sections.h
+generic-y += siginfo.h
+generic-y += statfs.h
+generic-y += termios.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += xor.h
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index 23592eff67a..e5103b47a8c 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -7,7 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
*/
#ifndef _XTENSA_ATOMIC_H
@@ -18,17 +18,18 @@
#ifdef __KERNEL__
#include <asm/processor.h>
-#include <asm/system.h>
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
#define ATOMIC_INIT(i) { (i) }
/*
* This Xtensa implementation assumes that the right mechanism
- * for exclusion is for locking interrupts to level 1.
+ * for exclusion is for locking interrupts to level EXCM_LEVEL.
*
* Locking interrupts looks like this:
*
- * rsil a15, 1
+ * rsil a15, LOCKLEVEL
* <code>
* wsr a15, PS
* rsync
@@ -66,19 +67,35 @@
*/
static inline void atomic_add(int i, atomic_t * v)
{
- unsigned int vval;
-
- __asm__ __volatile__(
- "rsil a15, "__stringify(LOCKLEVEL)"\n\t"
- "l32i %0, %2, 0 \n\t"
- "add %0, %0, %1 \n\t"
- "s32i %0, %2, 0 \n\t"
- "wsr a15, "__stringify(PS)" \n\t"
- "rsync \n"
- : "=&a" (vval)
- : "a" (i), "a" (v)
- : "a15", "memory"
- );
+#if XCHAL_HAVE_S32C1I
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " add %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (i), "a" (v)
+ : "memory"
+ );
+#else
+ unsigned int vval;
+
+ __asm__ __volatile__(
+ " rsil a15, "__stringify(LOCKLEVEL)"\n"
+ " l32i %0, %2, 0\n"
+ " add %0, %0, %1\n"
+ " s32i %0, %2, 0\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (vval)
+ : "a" (i), "a" (v)
+ : "a15", "memory"
+ );
+#endif
}
/**
@@ -90,19 +107,35 @@ static inline void atomic_add(int i, atomic_t * v)
*/
static inline void atomic_sub(int i, atomic_t *v)
{
- unsigned int vval;
-
- __asm__ __volatile__(
- "rsil a15, "__stringify(LOCKLEVEL)"\n\t"
- "l32i %0, %2, 0 \n\t"
- "sub %0, %0, %1 \n\t"
- "s32i %0, %2, 0 \n\t"
- "wsr a15, "__stringify(PS)" \n\t"
- "rsync \n"
- : "=&a" (vval)
- : "a" (i), "a" (v)
- : "a15", "memory"
- );
+#if XCHAL_HAVE_S32C1I
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " sub %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (i), "a" (v)
+ : "memory"
+ );
+#else
+ unsigned int vval;
+
+ __asm__ __volatile__(
+ " rsil a15, "__stringify(LOCKLEVEL)"\n"
+ " l32i %0, %2, 0\n"
+ " sub %0, %0, %1\n"
+ " s32i %0, %2, 0\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (vval)
+ : "a" (i), "a" (v)
+ : "a15", "memory"
+ );
+#endif
}
/*
@@ -111,40 +144,78 @@ static inline void atomic_sub(int i, atomic_t *v)
static inline int atomic_add_return(int i, atomic_t * v)
{
- unsigned int vval;
-
- __asm__ __volatile__(
- "rsil a15,"__stringify(LOCKLEVEL)"\n\t"
- "l32i %0, %2, 0 \n\t"
- "add %0, %0, %1 \n\t"
- "s32i %0, %2, 0 \n\t"
- "wsr a15, "__stringify(PS)" \n\t"
- "rsync \n"
- : "=&a" (vval)
- : "a" (i), "a" (v)
- : "a15", "memory"
- );
-
- return vval;
+#if XCHAL_HAVE_S32C1I
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " add %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ " add %0, %0, %2\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (i), "a" (v)
+ : "memory"
+ );
+
+ return result;
+#else
+ unsigned int vval;
+
+ __asm__ __volatile__(
+ " rsil a15,"__stringify(LOCKLEVEL)"\n"
+ " l32i %0, %2, 0\n"
+ " add %0, %0, %1\n"
+ " s32i %0, %2, 0\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (vval)
+ : "a" (i), "a" (v)
+ : "a15", "memory"
+ );
+
+ return vval;
+#endif
}
static inline int atomic_sub_return(int i, atomic_t * v)
{
- unsigned int vval;
-
- __asm__ __volatile__(
- "rsil a15,"__stringify(LOCKLEVEL)"\n\t"
- "l32i %0, %2, 0 \n\t"
- "sub %0, %0, %1 \n\t"
- "s32i %0, %2, 0 \n\t"
- "wsr a15, "__stringify(PS)" \n\t"
- "rsync \n"
- : "=&a" (vval)
- : "a" (i), "a" (v)
- : "a15", "memory"
- );
-
- return vval;
+#if XCHAL_HAVE_S32C1I
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " sub %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ " sub %0, %0, %2\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (i), "a" (v)
+ : "memory"
+ );
+
+ return result;
+#else
+ unsigned int vval;
+
+ __asm__ __volatile__(
+ " rsil a15,"__stringify(LOCKLEVEL)"\n"
+ " l32i %0, %2, 0\n"
+ " sub %0, %0, %1\n"
+ " s32i %0, %2, 0\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (vval)
+ : "a" (i), "a" (v)
+ : "a15", "memory"
+ );
+
+ return vval;
+#endif
}
/**
@@ -251,47 +322,72 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
- unsigned int all_f = -1;
- unsigned int vval;
-
- __asm__ __volatile__(
- "rsil a15,"__stringify(LOCKLEVEL)"\n\t"
- "l32i %0, %2, 0 \n\t"
- "xor %1, %4, %3 \n\t"
- "and %0, %0, %4 \n\t"
- "s32i %0, %2, 0 \n\t"
- "wsr a15, "__stringify(PS)" \n\t"
- "rsync \n"
- : "=&a" (vval), "=a" (mask)
- : "a" (v), "a" (all_f), "1" (mask)
- : "a15", "memory"
- );
+#if XCHAL_HAVE_S32C1I
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " and %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (~mask), "a" (v)
+ : "memory"
+ );
+#else
+ unsigned int all_f = -1;
+ unsigned int vval;
+
+ __asm__ __volatile__(
+ " rsil a15,"__stringify(LOCKLEVEL)"\n"
+ " l32i %0, %2, 0\n"
+ " xor %1, %4, %3\n"
+ " and %0, %0, %4\n"
+ " s32i %0, %2, 0\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (vval), "=a" (mask)
+ : "a" (v), "a" (all_f), "1" (mask)
+ : "a15", "memory"
+ );
+#endif
}
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
- unsigned int vval;
-
- __asm__ __volatile__(
- "rsil a15,"__stringify(LOCKLEVEL)"\n\t"
- "l32i %0, %2, 0 \n\t"
- "or %0, %0, %1 \n\t"
- "s32i %0, %2, 0 \n\t"
- "wsr a15, "__stringify(PS)" \n\t"
- "rsync \n"
- : "=&a" (vval)
- : "a" (mask), "a" (v)
- : "a15", "memory"
- );
+#if XCHAL_HAVE_S32C1I
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " or %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (mask), "a" (v)
+ : "memory"
+ );
+#else
+ unsigned int vval;
+
+ __asm__ __volatile__(
+ " rsil a15,"__stringify(LOCKLEVEL)"\n"
+ " l32i %0, %2, 0\n"
+ " or %0, %0, %1\n"
+ " s32i %0, %2, 0\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (vval)
+ : "a" (mask), "a" (v)
+ : "a15", "memory"
+ );
+#endif
}
-/* Atomic operations are already serializing */
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
-
#endif /* __KERNEL__ */
#endif /* _XTENSA_ATOMIC_H */
-
diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h
new file mode 100644
index 00000000000..5b88774c75a
--- /dev/null
+++ b/arch/xtensa/include/asm/barrier.h
@@ -0,0 +1,21 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2012 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SYSTEM_H
+#define _XTENSA_SYSTEM_H
+
+#define mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
+#define rmb() barrier()
+#define wmb() mb()
+
+#define smp_mb__before_atomic() barrier()
+#define smp_mb__after_atomic() barrier()
+
+#include <asm-generic/barrier.h>
+
+#endif /* _XTENSA_SYSTEM_H */
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
index 40aa7fe77f6..3f44fa2a53e 100644
--- a/arch/xtensa/include/asm/bitops.h
+++ b/arch/xtensa/include/asm/bitops.h
@@ -21,16 +21,8 @@
#include <asm/processor.h>
#include <asm/byteorder.h>
-#include <asm/system.h>
+#include <asm/barrier.h>
-#ifdef CONFIG_SMP
-# error SMP not supported on this architecture
-#endif
-
-#define smp_mb__before_clear_bit() barrier()
-#define smp_mb__after_clear_bit() barrier()
-
-#include <asm-generic/bitops/atomic.h>
#include <asm-generic/bitops/non-atomic.h>
#if XCHAL_HAVE_NSA
@@ -105,6 +97,132 @@ static inline unsigned long __fls(unsigned long word)
#endif
#include <asm-generic/bitops/fls64.h>
+
+#if XCHAL_HAVE_S32C1I
+
+static inline void set_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " or %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (mask), "a" (p)
+ : "memory");
+}
+
+static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " and %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (~mask), "a" (p)
+ : "memory");
+}
+
+static inline void change_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " xor %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (mask), "a" (p)
+ : "memory");
+}
+
+static inline int
+test_and_set_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " or %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (mask), "a" (p)
+ : "memory");
+
+ return tmp & mask;
+}
+
+static inline int
+test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " and %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (~mask), "a" (p)
+ : "memory");
+
+ return tmp & mask;
+}
+
+static inline int
+test_and_change_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " xor %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (mask), "a" (p)
+ : "memory");
+
+ return tmp & mask;
+}
+
+#else
+
+#include <asm-generic/bitops/atomic.h>
+
+#endif /* XCHAL_HAVE_S32C1I */
+
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/le.h>
diff --git a/arch/xtensa/include/asm/bitsperlong.h b/arch/xtensa/include/asm/bitsperlong.h
deleted file mode 100644
index 6dc0bb0c13b..00000000000
--- a/arch/xtensa/include/asm/bitsperlong.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bitsperlong.h>
diff --git a/arch/xtensa/include/asm/bootparam.h b/arch/xtensa/include/asm/bootparam.h
index 9983f2c1b7e..892aab399ac 100644
--- a/arch/xtensa/include/asm/bootparam.h
+++ b/arch/xtensa/include/asm/bootparam.h
@@ -20,8 +20,9 @@
#define BP_TAG_COMMAND_LINE 0x1001 /* command line (0-terminated string)*/
#define BP_TAG_INITRD 0x1002 /* ramdisk addr and size (bp_meminfo) */
#define BP_TAG_MEMORY 0x1003 /* memory addr and size (bp_meminfo) */
-#define BP_TAG_SERIAL_BAUSRATE 0x1004 /* baud rate of current console. */
+#define BP_TAG_SERIAL_BAUDRATE 0x1004 /* baud rate of current console. */
#define BP_TAG_SERIAL_PORT 0x1005 /* serial device of current console */
+#define BP_TAG_FDT 0x1006 /* flat device tree addr */
#define BP_TAG_FIRST 0x7B0B /* first tag with a version number */
#define BP_TAG_LAST 0x7E0B /* last tag */
@@ -31,31 +32,19 @@
/* All records are aligned to 4 bytes */
typedef struct bp_tag {
- unsigned short id; /* tag id */
- unsigned short size; /* size of this record excluding the structure*/
- unsigned long data[0]; /* data */
+ unsigned short id; /* tag id */
+ unsigned short size; /* size of this record excluding the structure*/
+ unsigned long data[0]; /* data */
} bp_tag_t;
-typedef struct meminfo {
- unsigned long type;
- unsigned long start;
- unsigned long end;
-} meminfo_t;
-
-#define SYSMEM_BANKS_MAX 5
+struct bp_meminfo {
+ unsigned long type;
+ unsigned long start;
+ unsigned long end;
+};
#define MEMORY_TYPE_CONVENTIONAL 0x1000
#define MEMORY_TYPE_NONE 0x2000
-typedef struct sysmem_info {
- int nr_banks;
- meminfo_t bank[SYSMEM_BANKS_MAX];
-} sysmem_info_t;
-
-extern sysmem_info_t sysmem;
-
#endif
#endif
-
-
-
diff --git a/arch/xtensa/include/asm/bug.h b/arch/xtensa/include/asm/bug.h
deleted file mode 100644
index 3e52d72712f..00000000000
--- a/arch/xtensa/include/asm/bug.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * include/asm-xtensa/bug.h
- *
- * Macros to cause a 'bug' message.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_BUG_H
-#define _XTENSA_BUG_H
-
-#include <asm-generic/bug.h>
-
-#endif /* _XTENSA_BUG_H */
diff --git a/arch/xtensa/include/asm/cacheasm.h b/arch/xtensa/include/asm/cacheasm.h
index 2c20a58f94c..60e18773ecb 100644
--- a/arch/xtensa/include/asm/cacheasm.h
+++ b/arch/xtensa/include/asm/cacheasm.h
@@ -174,4 +174,3 @@
__loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH
.endm
-
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
index 376cd9d5f45..555a98a1845 100644
--- a/arch/xtensa/include/asm/cacheflush.h
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -1,18 +1,14 @@
/*
- * include/asm-xtensa/cacheflush.h
- *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * (C) 2001 - 2007 Tensilica Inc.
+ * (C) 2001 - 2013 Tensilica Inc.
*/
#ifndef _XTENSA_CACHEFLUSH_H
#define _XTENSA_CACHEFLUSH_H
-#ifdef __KERNEL__
-
#include <linux/mm.h>
#include <asm/processor.h>
#include <asm/page.h>
@@ -51,7 +47,6 @@ extern void __invalidate_icache_page(unsigned long);
extern void __invalidate_icache_range(unsigned long, unsigned long);
extern void __invalidate_dcache_range(unsigned long, unsigned long);
-
#if XCHAL_DCACHE_IS_WRITEBACK
extern void __flush_invalidate_dcache_all(void);
extern void __flush_dcache_page(unsigned long);
@@ -87,9 +82,22 @@ static inline void __invalidate_icache_page_alias(unsigned long virt,
* (see also Documentation/cachetlb.txt)
*/
-#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP)
-#define flush_cache_all() \
+#ifdef CONFIG_SMP
+void flush_cache_all(void);
+void flush_cache_range(struct vm_area_struct*, ulong, ulong);
+void flush_icache_range(unsigned long start, unsigned long end);
+void flush_cache_page(struct vm_area_struct*,
+ unsigned long, unsigned long);
+#else
+#define flush_cache_all local_flush_cache_all
+#define flush_cache_range local_flush_cache_range
+#define flush_icache_range local_flush_icache_range
+#define flush_cache_page local_flush_cache_page
+#endif
+
+#define local_flush_cache_all() \
do { \
__flush_invalidate_dcache_all(); \
__invalidate_icache_all(); \
@@ -103,8 +111,11 @@ static inline void __invalidate_icache_page_alias(unsigned long virt,
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page*);
-extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
-extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long);
+
+void local_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+void local_flush_cache_page(struct vm_area_struct *vma,
+ unsigned long address, unsigned long pfn);
#else
@@ -118,13 +129,14 @@ extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned lon
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_dcache_page(page) do { } while (0)
-#define flush_cache_page(vma,addr,pfn) do { } while (0)
-#define flush_cache_range(vma,start,end) do { } while (0)
+#define flush_icache_range local_flush_icache_range
+#define flush_cache_page(vma, addr, pfn) do { } while (0)
+#define flush_cache_range(vma, start, end) do { } while (0)
#endif
/* Ensure consistency between data and instruction cache. */
-#define flush_icache_range(start,end) \
+#define local_flush_icache_range(start, end) \
do { \
__flush_dcache_range(start, (end) - (start)); \
__invalidate_icache_range(start,(end) - (start)); \
@@ -165,7 +177,7 @@ extern void copy_from_user_page(struct vm_area_struct*, struct page*,
static inline u32 xtensa_get_cacheattr(void)
{
u32 r;
- asm volatile(" rsr %0, CACHEATTR" : "=a"(r));
+ asm volatile(" rsr %0, cacheattr" : "=a"(r));
return r;
}
@@ -252,5 +264,4 @@ static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size)
}
}
-#endif /* __KERNEL__ */
#endif /* _XTENSA_CACHEFLUSH_H */
diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h
index e4d831a3077..0593de689b5 100644
--- a/arch/xtensa/include/asm/checksum.h
+++ b/arch/xtensa/include/asm/checksum.h
@@ -12,6 +12,7 @@
#define _XTENSA_CHECKSUM_H
#include <linux/in6.h>
+#include <asm/uaccess.h>
#include <variant/core.h>
/*
@@ -36,8 +37,9 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
* better 64-bit) boundary
*/
-asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len, __wsum sum,
- int *src_err_ptr, int *dst_err_ptr);
+asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
+ int len, __wsum sum,
+ int *src_err_ptr, int *dst_err_ptr);
/*
* Note: when you get a NULL pointer exception here this means someone
@@ -54,7 +56,7 @@ __wsum csum_partial_copy_nocheck(const void *src, void *dst,
static inline
__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr)
+ int len, __wsum sum, int *err_ptr)
{
return csum_partial_copy_generic((__force const void *)src, dst,
len, sum, err_ptr, NULL);
@@ -112,7 +114,8 @@ static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
/* Since the input registers which are loaded with iph and ihl
are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */
- : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp), "=&r" (endaddr)
+ : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp),
+ "=&r" (endaddr)
: "1" (iph), "2" (ihl)
: "memory");
@@ -168,7 +171,7 @@ static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
static __inline__ __sum16 ip_compute_csum(const void *buff, int len)
{
- return csum_fold (csum_partial(buff, len, 0));
+ return csum_fold (csum_partial(buff, len, 0));
}
#define _HAVE_ARCH_IPV6_CSUM
@@ -238,11 +241,12 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
-static __inline__ __wsum csum_and_copy_to_user(const void *src, void __user *dst,
- int len, __wsum sum, int *err_ptr)
+static __inline__ __wsum csum_and_copy_to_user(const void *src,
+ void __user *dst, int len,
+ __wsum sum, int *err_ptr)
{
if (access_ok(VERIFY_WRITE, dst, len))
- return csum_partial_copy_generic(src, dst, len, sum, NULL, err_ptr);
+ return csum_partial_copy_generic(src,dst,len,sum,NULL,err_ptr);
if (len)
*err_ptr = -EFAULT;
diff --git a/arch/xtensa/include/asm/system.h b/arch/xtensa/include/asm/cmpxchg.h
index 1e7e09ab6cd..370b26f3841 100644
--- a/arch/xtensa/include/asm/system.h
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -1,5 +1,5 @@
/*
- * include/asm-xtensa/system.h
+ * Atomic xchg and cmpxchg operations.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -8,44 +8,12 @@
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
-#ifndef _XTENSA_SYSTEM_H
-#define _XTENSA_SYSTEM_H
+#ifndef _XTENSA_CMPXCHG_H
+#define _XTENSA_CMPXCHG_H
-#include <linux/stringify.h>
-#include <linux/irqflags.h>
-
-#include <asm/processor.h>
-
-#define smp_read_barrier_depends() do { } while(0)
-#define read_barrier_depends() do { } while(0)
-
-#define mb() barrier()
-#define rmb() mb()
-#define wmb() mb()
-
-#ifdef CONFIG_SMP
-#error smp_* not defined
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#endif
-
-#define set_mb(var, value) do { var = value; mb(); } while (0)
-
-#if !defined (__ASSEMBLY__)
-
-/* * switch_to(n) should switch tasks to task nr n, first
- * checking that n isn't the current task, in which case it does nothing.
- */
-extern void *_switch_to(void *last, void *next);
+#ifndef __ASSEMBLY__
-#endif /* __ASSEMBLY__ */
-
-#define switch_to(prev,next,last) \
-do { \
- (last) = _switch_to(prev, next); \
-} while(0)
+#include <linux/stringify.h>
/*
* cmpxchg
@@ -54,17 +22,30 @@ do { \
static inline unsigned long
__cmpxchg_u32(volatile int *p, int old, int new)
{
- __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
- "l32i %0, %1, 0 \n\t"
- "bne %0, %2, 1f \n\t"
- "s32i %3, %1, 0 \n\t"
- "1: \n\t"
- "wsr a15, "__stringify(PS)" \n\t"
- "rsync \n\t"
- : "=&a" (old)
- : "a" (p), "a" (old), "r" (new)
- : "a15", "memory");
- return old;
+#if XCHAL_HAVE_S32C1I
+ __asm__ __volatile__(
+ " wsr %2, scompare1\n"
+ " s32c1i %0, %1, 0\n"
+ : "+a" (new)
+ : "a" (p), "a" (old)
+ : "memory"
+ );
+
+ return new;
+#else
+ __asm__ __volatile__(
+ " rsil a15, "__stringify(LOCKLEVEL)"\n"
+ " l32i %0, %1, 0\n"
+ " bne %0, %2, 1f\n"
+ " s32i %3, %1, 0\n"
+ "1:\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (old)
+ : "a" (p), "a" (old), "r" (new)
+ : "a15", "memory");
+ return old;
+#endif
}
/* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid cmpxchg(). */
@@ -112,6 +93,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
/*
* xchg_u32
@@ -125,19 +107,36 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
{
- unsigned long tmp;
- __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
- "l32i %0, %1, 0 \n\t"
- "s32i %2, %1, 0 \n\t"
- "wsr a15, "__stringify(PS)" \n\t"
- "rsync \n\t"
- : "=&a" (tmp)
- : "a" (m), "a" (val)
- : "a15", "memory");
- return tmp;
+#if XCHAL_HAVE_S32C1I
+ unsigned long tmp, result;
+ __asm__ __volatile__(
+ "1: l32i %1, %2, 0\n"
+ " mov %0, %3\n"
+ " wsr %1, scompare1\n"
+ " s32c1i %0, %2, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (m), "a" (val)
+ : "memory"
+ );
+ return result;
+#else
+ unsigned long tmp;
+ __asm__ __volatile__(
+ " rsil a15, "__stringify(LOCKLEVEL)"\n"
+ " l32i %0, %1, 0\n"
+ " s32i %2, %1, 0\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (tmp)
+ : "a" (m), "a" (val)
+ : "a15", "memory");
+ return tmp;
+#endif
}
-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+#define xchg(ptr,x) \
+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
/*
* This only works if the compiler isn't horribly bad at optimizing.
@@ -158,27 +157,6 @@ __xchg(unsigned long x, volatile void * ptr, int size)
return x;
}
-extern void set_except_vector(int n, void *addr);
-
-static inline void spill_registers(void)
-{
- unsigned int a0, ps;
-
- __asm__ __volatile__ (
- "movi a14," __stringify (PS_EXCM_BIT) " | 1\n\t"
- "mov a12, a0\n\t"
- "rsr a13," __stringify(SAR) "\n\t"
- "xsr a14," __stringify(PS) "\n\t"
- "movi a0, _spill_registers\n\t"
- "rsync\n\t"
- "callx0 a0\n\t"
- "mov a0, a12\n\t"
- "wsr a13," __stringify(SAR) "\n\t"
- "wsr a14," __stringify(PS) "\n\t"
- :: "a" (&a0), "a" (&ps)
- : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory");
-}
-
-#define arch_align_stack(x) (x)
+#endif /* __ASSEMBLY__ */
-#endif /* _XTENSA_SYSTEM_H */
+#endif /* _XTENSA_CMPXCHG_H */
diff --git a/arch/xtensa/include/asm/coprocessor.h b/arch/xtensa/include/asm/coprocessor.h
index 75c94a1658b..677501b32df 100644
--- a/arch/xtensa/include/asm/coprocessor.h
+++ b/arch/xtensa/include/asm/coprocessor.h
@@ -94,11 +94,10 @@
#if XCHAL_HAVE_CP
#define RSR_CPENABLE(x) do { \
- __asm__ __volatile__("rsr %0," __stringify(CPENABLE) : "=a" (x)); \
+ __asm__ __volatile__("rsr %0, cpenable" : "=a" (x)); \
} while(0);
#define WSR_CPENABLE(x) do { \
- __asm__ __volatile__("wsr %0," __stringify(CPENABLE) "; rsync" \
- :: "a" (x)); \
+ __asm__ __volatile__("wsr %0, cpenable; rsync" :: "a" (x)); \
} while(0);
#endif /* XCHAL_HAVE_CP */
diff --git a/arch/xtensa/include/asm/cpumask.h b/arch/xtensa/include/asm/cpumask.h
deleted file mode 100644
index ebeede397db..00000000000
--- a/arch/xtensa/include/asm/cpumask.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * include/asm-xtensa/cpumask.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_CPUMASK_H
-#define _XTENSA_CPUMASK_H
-
-#include <asm-generic/cpumask.h>
-
-#endif /* _XTENSA_CPUMASK_H */
diff --git a/arch/xtensa/include/asm/cputime.h b/arch/xtensa/include/asm/cputime.h
deleted file mode 100644
index a7fb864a50a..00000000000
--- a/arch/xtensa/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _XTENSA_CPUTIME_H
-#define _XTENSA_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* _XTENSA_CPUTIME_H */
diff --git a/arch/xtensa/include/asm/current.h b/arch/xtensa/include/asm/current.h
index 8d1eb5d7864..47e46dcf5d4 100644
--- a/arch/xtensa/include/asm/current.h
+++ b/arch/xtensa/include/asm/current.h
@@ -30,7 +30,7 @@ static inline struct task_struct *get_current(void)
#define GET_CURRENT(reg,sp) \
GET_THREAD_INFO(reg,sp); \
- l32i reg, reg, TI_TASK \
+ l32i reg, reg, TI_TASK \
#endif
diff --git a/arch/xtensa/include/asm/delay.h b/arch/xtensa/include/asm/delay.h
index e1d8c9e010c..24304b39a5c 100644
--- a/arch/xtensa/include/asm/delay.h
+++ b/arch/xtensa/include/asm/delay.h
@@ -12,38 +12,64 @@
#ifndef _XTENSA_DELAY_H
#define _XTENSA_DELAY_H
-#include <asm/processor.h>
+#include <asm/timex.h>
#include <asm/param.h>
extern unsigned long loops_per_jiffy;
static inline void __delay(unsigned long loops)
{
- /* 2 cycles per loop. */
- __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b"
- : "=r" (loops) : "0" (loops));
+ if (__builtin_constant_p(loops) && loops < 2)
+ __asm__ __volatile__ ("nop");
+ else if (loops >= 2)
+ /* 2 cycles per loop. */
+ __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b"
+ : "+r" (loops));
}
-static __inline__ u32 xtensa_get_ccount(void)
+/* Undefined function to get compile-time error */
+void __bad_udelay(void);
+void __bad_ndelay(void);
+
+#define __MAX_UDELAY 30000
+#define __MAX_NDELAY 30000
+
+static inline void __udelay(unsigned long usecs)
{
- u32 ccount;
- asm volatile ("rsr %0, 234; # CCOUNT\n" : "=r" (ccount));
- return ccount;
+ unsigned long start = get_ccount();
+ unsigned long cycles = (usecs * (ccount_freq >> 15)) >> 5;
+
+ /* Note: all variables are unsigned (can wrap around)! */
+ while (((unsigned long)get_ccount()) - start < cycles)
+ cpu_relax();
}
-/* For SMP/NUMA systems, change boot_cpu_data to something like
- * local_cpu_data->... where local_cpu_data points to the current
- * cpu. */
+static inline void udelay(unsigned long usec)
+{
+ if (__builtin_constant_p(usec) && usec >= __MAX_UDELAY)
+ __bad_udelay();
+ else
+ __udelay(usec);
+}
-static __inline__ void udelay (unsigned long usecs)
+static inline void __ndelay(unsigned long nsec)
{
- unsigned long start = xtensa_get_ccount();
- unsigned long cycles = usecs * (loops_per_jiffy / (1000000UL / HZ));
+ /*
+ * Inner shift makes sure multiplication doesn't overflow
+ * for legitimate nsec values
+ */
+ unsigned long cycles = (nsec * (ccount_freq >> 15)) >> 15;
+ __delay(cycles);
+}
- /* Note: all variables are unsigned (can wrap around)! */
- while (((unsigned long)xtensa_get_ccount()) - start < cycles)
- ;
+#define ndelay(n) ndelay(n)
+
+static inline void ndelay(unsigned long nsec)
+{
+ if (__builtin_constant_p(nsec) && nsec >= __MAX_NDELAY)
+ __bad_ndelay();
+ else
+ __ndelay(nsec);
}
#endif
-
diff --git a/arch/xtensa/include/asm/device.h b/arch/xtensa/include/asm/device.h
deleted file mode 100644
index d8f9872b0e2..00000000000
--- a/arch/xtensa/include/asm/device.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/*
- * Arch specific extensions to struct device
- *
- * This file is released under the GPLv2
- */
-#include <asm-generic/device.h>
-
diff --git a/arch/xtensa/include/asm/div64.h b/arch/xtensa/include/asm/div64.h
deleted file mode 100644
index f35678cb0a9..00000000000
--- a/arch/xtensa/include/asm/div64.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * include/asm-xtensa/div64.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2007 Tensilica Inc.
- */
-
-#ifndef _XTENSA_DIV64_H
-#define _XTENSA_DIV64_H
-
-#include <asm-generic/div64.h>
-
-#endif /* _XTENSA_DIV64_H */
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h
index 492c95790ad..172a02a6ad1 100644
--- a/arch/xtensa/include/asm/dma-mapping.h
+++ b/arch/xtensa/include/asm/dma-mapping.h
@@ -16,6 +16,8 @@
#include <linux/mm.h>
#include <linux/scatterlist.h>
+#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
+
/*
* DMA-consistent mapping functions.
*/
@@ -98,8 +100,8 @@ dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
}
static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction direction)
{
consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
}
@@ -168,4 +170,19 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
consistent_sync(vaddr, size, direction);
}
+/* Not supported for now */
+static inline int dma_mmap_coherent(struct device *dev,
+ struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size)
+{
+ return -EINVAL;
+}
+
+static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size)
+{
+ return -EINVAL;
+}
+
#endif /* _XTENSA_DMA_MAPPING_H */
diff --git a/arch/xtensa/include/asm/elf.h b/arch/xtensa/include/asm/elf.h
index 6e65eadaae1..eacb25a4171 100644
--- a/arch/xtensa/include/asm/elf.h
+++ b/arch/xtensa/include/asm/elf.h
@@ -84,7 +84,8 @@ typedef struct {
elf_greg_t sar;
elf_greg_t windowstart;
elf_greg_t windowbase;
- elf_greg_t reserved[8+48];
+ elf_greg_t threadptr;
+ elf_greg_t reserved[7+48];
elf_greg_t a[64];
} xtensa_gregset_t;
@@ -168,11 +169,11 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *);
*/
#define ELF_PLAT_INIT(_r, load_addr) \
- do { _r->areg[0]=0; /*_r->areg[1]=0;*/ _r->areg[2]=0; _r->areg[3]=0; \
- _r->areg[4]=0; _r->areg[5]=0; _r->areg[6]=0; _r->areg[7]=0; \
- _r->areg[8]=0; _r->areg[9]=0; _r->areg[10]=0; _r->areg[11]=0; \
- _r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \
- } while (0)
+ do { _r->areg[0]=0; /*_r->areg[1]=0;*/ _r->areg[2]=0; _r->areg[3]=0; \
+ _r->areg[4]=0; _r->areg[5]=0; _r->areg[6]=0; _r->areg[7]=0; \
+ _r->areg[8]=0; _r->areg[9]=0; _r->areg[10]=0; _r->areg[11]=0; \
+ _r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \
+ } while (0)
typedef struct {
xtregs_opt_t opt;
@@ -189,7 +190,8 @@ typedef struct {
#endif
} elf_xtregs_t;
-#define SET_PERSONALITY(ex) set_personality(PER_LINUX_32BIT)
+#define SET_PERSONALITY(ex) \
+ set_personality(PER_LINUX_32BIT | (current->personality & (~PER_MASK)))
struct task_struct;
diff --git a/arch/xtensa/include/asm/emergency-restart.h b/arch/xtensa/include/asm/emergency-restart.h
deleted file mode 100644
index 108d8c48e42..00000000000
--- a/arch/xtensa/include/asm/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_EMERGENCY_RESTART_H
-#define _ASM_EMERGENCY_RESTART_H
-
-#include <asm-generic/emergency-restart.h>
-
-#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/xtensa/include/asm/errno.h b/arch/xtensa/include/asm/errno.h
deleted file mode 100644
index a0f3b96b79b..00000000000
--- a/arch/xtensa/include/asm/errno.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * include/asm-xtensa/errno.h
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file "COPYING" in the main directory of
- * this archive for more details.
- *
- * Copyright (C) 2002 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_ERRNO_H
-#define _XTENSA_ERRNO_H
-
-#include <asm-generic/errno.h>
-
-#endif /* _XTENSA_ERRNO_H */
diff --git a/arch/xtensa/include/asm/fcntl.h b/arch/xtensa/include/asm/fcntl.h
deleted file mode 100644
index 46ab12db573..00000000000
--- a/arch/xtensa/include/asm/fcntl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/fcntl.h>
diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h
new file mode 100644
index 00000000000..9f6c33d0428
--- /dev/null
+++ b/arch/xtensa/include/asm/fixmap.h
@@ -0,0 +1,58 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#include <asm/pgtable.h>
+#ifdef CONFIG_HIGHMEM
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#endif
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process. We allocate these special addresses
+ * from the end of the consistent memory region backwards.
+ * Also this lets us do fail-safe vmalloc(), we
+ * can guarantee that these special addresses and
+ * vmalloc()-ed addresses never overlap.
+ *
+ * these 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * higher than 1) use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ */
+enum fixed_addresses {
+#ifdef CONFIG_HIGHMEM
+ /* reserved pte's for temporary kernel mappings */
+ FIX_KMAP_BEGIN,
+ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
+#endif
+ __end_of_fixed_addresses
+};
+
+#define FIXADDR_TOP (VMALLOC_START - PAGE_SIZE)
+#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK)
+
+#include <asm-generic/fixmap.h>
+
+#define kmap_get_fixmap_pte(vaddr) \
+ pte_offset_kernel( \
+ pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \
+ (vaddr) \
+ )
+
+#endif
diff --git a/arch/xtensa/include/asm/ftrace.h b/arch/xtensa/include/asm/ftrace.h
index 40a8c178f10..6c6d9a9f185 100644
--- a/arch/xtensa/include/asm/ftrace.h
+++ b/arch/xtensa/include/asm/ftrace.h
@@ -1 +1,40 @@
-/* empty */
+/*
+ * arch/xtensa/include/asm/ftrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Tensilica Inc.
+ */
+#ifndef _XTENSA_FTRACE_H
+#define _XTENSA_FTRACE_H
+
+#include <asm/processor.h>
+
+#ifndef __ASSEMBLY__
+#define ftrace_return_address0 ({ unsigned long a0, a1; \
+ __asm__ __volatile__ ( \
+ "mov %0, a0\n" \
+ "mov %1, a1\n" \
+ : "=r"(a0), "=r"(a1)); \
+ MAKE_PC_FROM_RA(a0, a1); })
+
+#ifdef CONFIG_FRAME_POINTER
+extern unsigned long return_address(unsigned level);
+#define ftrace_return_address(n) return_address(n)
+#endif
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_FUNCTION_TRACER
+
+#define MCOUNT_ADDR ((unsigned long)(_mcount))
+#define MCOUNT_INSN_SIZE 3
+
+#ifndef __ASSEMBLY__
+extern void _mcount(void);
+#define mcount _mcount
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_FUNCTION_TRACER */
+
+#endif /* _XTENSA_FTRACE_H */
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
index 0b745828f42..b39531babec 100644
--- a/arch/xtensa/include/asm/futex.h
+++ b/arch/xtensa/include/asm/futex.h
@@ -1 +1,147 @@
-#include <asm-generic/futex.h>
+/*
+ * Atomic futex routines
+ *
+ * Based on the PowerPC implementataion
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2013 TangoTec Ltd.
+ *
+ * Baruch Siach <baruch@tkos.co.il>
+ */
+
+#ifndef _ASM_XTENSA_FUTEX_H
+#define _ASM_XTENSA_FUTEX_H
+
+#ifdef __KERNEL__
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+ __asm__ __volatile( \
+ "1: l32i %0, %2, 0\n" \
+ insn "\n" \
+ " wsr %0, scompare1\n" \
+ "2: s32c1i %1, %2, 0\n" \
+ " bne %1, %0, 1b\n" \
+ " movi %1, 0\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 4\n" \
+ "4: .long 3b\n" \
+ "5: l32r %0, 4b\n" \
+ " movi %1, %3\n" \
+ " jx %0\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .long 1b,5b,2b,5b\n" \
+ " .previous\n" \
+ : "=&r" (oldval), "=&r" (ret) \
+ : "r" (uaddr), "I" (-EFAULT), "r" (oparg) \
+ : "memory")
+
+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+#if !XCHAL_HAVE_S32C1I
+ return -ENOSYS;
+#endif
+
+ pagefault_disable();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("mov %1, %4", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("add %1, %0, %4", ret, oldval, uaddr,
+ oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("or %1, %0, %4", ret, oldval, uaddr,
+ oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("and %1, %0, %4", ret, oldval, uaddr,
+ ~oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("xor %1, %0, %4", ret, oldval, uaddr,
+ oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ pagefault_enable();
+
+ if (ret)
+ return ret;
+
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ: return (oldval == cmparg);
+ case FUTEX_OP_CMP_NE: return (oldval != cmparg);
+ case FUTEX_OP_CMP_LT: return (oldval < cmparg);
+ case FUTEX_OP_CMP_GE: return (oldval >= cmparg);
+ case FUTEX_OP_CMP_LE: return (oldval <= cmparg);
+ case FUTEX_OP_CMP_GT: return (oldval > cmparg);
+ }
+
+ return -ENOSYS;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int ret = 0;
+ u32 prev;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+#if !XCHAL_HAVE_S32C1I
+ return -ENOSYS;
+#endif
+
+ __asm__ __volatile__ (
+ " # futex_atomic_cmpxchg_inatomic\n"
+ "1: l32i %1, %3, 0\n"
+ " mov %0, %5\n"
+ " wsr %1, scompare1\n"
+ "2: s32c1i %0, %3, 0\n"
+ "3:\n"
+ " .section .fixup,\"ax\"\n"
+ " .align 4\n"
+ "4: .long 3b\n"
+ "5: l32r %1, 4b\n"
+ " movi %0, %6\n"
+ " jx %1\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ " .long 1b,5b,2b,5b\n"
+ " .previous\n"
+ : "+r" (ret), "=&r" (prev), "+m" (*uaddr)
+ : "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT)
+ : "memory");
+
+ *uval = prev;
+ return ret;
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_XTENSA_FUTEX_H */
diff --git a/arch/xtensa/include/asm/gpio.h b/arch/xtensa/include/asm/gpio.h
index a8c9fc46c79..b3799d88ffc 100644
--- a/arch/xtensa/include/asm/gpio.h
+++ b/arch/xtensa/include/asm/gpio.h
@@ -1,56 +1,4 @@
-/*
- * Generic GPIO API implementation for xtensa.
- *
- * Stolen from x86, which is derived from the generic GPIO API for powerpc:
- *
- * Copyright (c) 2007-2008 MontaVista Software, Inc.
- *
- * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef _ASM_XTENSA_GPIO_H
-#define _ASM_XTENSA_GPIO_H
-
-#include <asm-generic/gpio.h>
-
-#ifdef CONFIG_GPIOLIB
-
-/*
- * Just call gpiolib.
- */
-static inline int gpio_get_value(unsigned int gpio)
-{
- return __gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(unsigned int gpio, int value)
-{
- __gpio_set_value(gpio, value);
-}
-
-static inline int gpio_cansleep(unsigned int gpio)
-{
- return __gpio_cansleep(gpio);
-}
-
-static inline int gpio_to_irq(unsigned int gpio)
-{
- return __gpio_to_irq(gpio);
-}
-
-/*
- * Not implemented, yet.
- */
-static inline int irq_to_gpio(unsigned int irq)
-{
- return -EINVAL;
-}
-
-#endif /* CONFIG_GPIOLIB */
-
-#endif /* _ASM_XTENSA_GPIO_H */
+#ifndef __LINUX_GPIO_H
+#warning Include linux/gpio.h instead of asm/gpio.h
+#include <linux/gpio.h>
+#endif
diff --git a/arch/xtensa/include/asm/hardirq.h b/arch/xtensa/include/asm/hardirq.h
deleted file mode 100644
index 26664cef8f1..00000000000
--- a/arch/xtensa/include/asm/hardirq.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * include/asm-xtensa/hardirq.h
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file "COPYING" in the main directory of
- * this archive for more details.
- *
- * Copyright (C) 2002 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_HARDIRQ_H
-#define _XTENSA_HARDIRQ_H
-
-void ack_bad_irq(unsigned int irq);
-#define ack_bad_irq ack_bad_irq
-
-#include <asm-generic/hardirq.h>
-
-#endif /* _XTENSA_HARDIRQ_H */
diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h
index 0a046ca5a68..2653ef5d55f 100644
--- a/arch/xtensa/include/asm/highmem.h
+++ b/arch/xtensa/include/asm/highmem.h
@@ -6,12 +6,54 @@
* this archive for more details.
*
* Copyright (C) 2003 - 2005 Tensilica Inc.
+ * Copyright (C) 2014 Cadence Design Systems Inc.
*/
#ifndef _XTENSA_HIGHMEM_H
#define _XTENSA_HIGHMEM_H
-extern void flush_cache_kmaps(void);
+#include <asm/cacheflush.h>
+#include <asm/fixmap.h>
+#include <asm/kmap_types.h>
+#include <asm/pgtable.h>
-#endif
+#define PKMAP_BASE (FIXADDR_START - PMD_SIZE)
+#define LAST_PKMAP PTRS_PER_PTE
+#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
+#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+#define kmap_prot PAGE_KERNEL
+
+extern pte_t *pkmap_page_table;
+
+void *kmap_high(struct page *page);
+void kunmap_high(struct page *page);
+
+static inline void *kmap(struct page *page)
+{
+ BUG_ON(in_interrupt());
+ if (!PageHighMem(page))
+ return page_address(page);
+ return kmap_high(page);
+}
+static inline void kunmap(struct page *page)
+{
+ BUG_ON(in_interrupt());
+ if (!PageHighMem(page))
+ return;
+ kunmap_high(page);
+}
+
+static inline void flush_cache_kmaps(void)
+{
+ flush_cache_all();
+}
+
+void *kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
+
+void kmap_init(void);
+
+#endif
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
new file mode 100644
index 00000000000..600781edc8a
--- /dev/null
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -0,0 +1,163 @@
+/*
+ * arch/xtensa/include/asm/initialize_mmu.h
+ *
+ * Initializes MMU:
+ *
+ * For the new V3 MMU we remap the TLB from virtual == physical
+ * to the standard Linux mapping used in earlier MMU's.
+ *
+ * The the MMU we also support a new configuration register that
+ * specifies how the S32C1I instruction operates with the cache
+ * controller.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2008 - 2012 Tensilica, Inc.
+ *
+ * Marc Gauthier <marc@tensilica.com>
+ * Pete Delaney <piet@tensilica.com>
+ */
+
+#ifndef _XTENSA_INITIALIZE_MMU_H
+#define _XTENSA_INITIALIZE_MMU_H
+
+#include <asm/pgtable.h>
+#include <asm/vectors.h>
+
+#define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
+#define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
+
+#ifdef __ASSEMBLY__
+
+#define XTENSA_HWVERSION_RC_2009_0 230000
+
+ .macro initialize_mmu
+
+#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
+/*
+ * We Have Atomic Operation Control (ATOMCTL) Register; Initialize it.
+ * For details see Documentation/xtensa/atomctl.txt
+ */
+#if XCHAL_DCACHE_IS_COHERENT
+ movi a3, 0x25 /* For SMP/MX -- internal for writeback,
+ * RCW otherwise
+ */
+#else
+ movi a3, 0x29 /* non-MX -- Most cores use Std Memory
+ * Controlers which usually can't use RCW
+ */
+#endif
+ wsr a3, atomctl
+#endif /* XCHAL_HAVE_S32C1I &&
+ * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
+ */
+
+#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+/*
+ * Have MMU v3
+ */
+
+#if !XCHAL_HAVE_VECBASE
+# error "MMU v3 requires reloc vectors"
+#endif
+
+ movi a1, 0
+ _call0 1f
+ _j 2f
+
+ .align 4
+1: movi a2, 0x10000000
+ movi a3, 0x18000000
+ add a2, a2, a0
+9: bgeu a2, a3, 9b /* PC is out of the expected range */
+
+ /* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
+
+ movi a2, 0x40000006
+ idtlb a2
+ iitlb a2
+ isync
+
+ /* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code
+ * and jump to the new mapping.
+ */
+
+ srli a3, a0, 27
+ slli a3, a3, 27
+ addi a3, a3, CA_BYPASS
+ addi a7, a2, -1
+ wdtlb a3, a7
+ witlb a3, a7
+ isync
+
+ slli a4, a0, 5
+ srli a4, a4, 5
+ addi a5, a2, -6
+ add a4, a4, a5
+ jx a4
+
+ /* Step 3: unmap everything other than current area.
+ * Start at 0x60000000, wrap around, and end with 0x20000000
+ */
+2: movi a4, 0x20000000
+ add a5, a2, a4
+3: idtlb a5
+ iitlb a5
+ add a5, a5, a4
+ bne a5, a2, 3b
+
+ /* Step 4: Setup MMU with the old V2 mappings. */
+ movi a6, 0x01000000
+ wsr a6, ITLBCFG
+ wsr a6, DTLBCFG
+ isync
+
+ movi a5, 0xd0000005
+ movi a4, CA_WRITEBACK
+ wdtlb a4, a5
+ witlb a4, a5
+
+ movi a5, 0xd8000005
+ movi a4, CA_BYPASS
+ wdtlb a4, a5
+ witlb a4, a5
+
+ movi a5, XCHAL_KIO_CACHED_VADDR + 6
+ movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK
+ wdtlb a4, a5
+ witlb a4, a5
+
+ movi a5, XCHAL_KIO_BYPASS_VADDR + 6
+ movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_BYPASS
+ wdtlb a4, a5
+ witlb a4, a5
+
+ isync
+
+ /* Jump to self, using MMU v2 mappings. */
+ movi a4, 1f
+ jx a4
+
+1:
+ movi a2, VECBASE_RESET_VADDR
+ wsr a2, vecbase
+
+ /* Step 5: remove temporary mapping. */
+ idtlb a7
+ iitlb a7
+ isync
+
+ movi a0, 0
+ wsr a0, ptevaddr
+ rsync
+
+#endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
+ XCHAL_HAVE_SPANNING_WAY */
+
+ .endm
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* _XTENSA_INITIALIZE_MMU_H */
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h
index d04cd3a625f..74944207167 100644
--- a/arch/xtensa/include/asm/io.h
+++ b/arch/xtensa/include/asm/io.h
@@ -14,194 +14,75 @@
#ifdef __KERNEL__
#include <asm/byteorder.h>
#include <asm/page.h>
+#include <asm/vectors.h>
+#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#define XCHAL_KIO_CACHED_VADDR 0xe0000000
-#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
-#define XCHAL_KIO_PADDR 0xf0000000
-#define XCHAL_KIO_SIZE 0x10000000
-
#define IOADDR(x) (XCHAL_KIO_BYPASS_VADDR + (x))
+#define IO_SPACE_LIMIT ~0
-/*
- * swap functions to change byte order from little-endian to big-endian and
- * vice versa.
- */
-
-static inline unsigned short _swapw (unsigned short v)
-{
- return (v << 8) | (v >> 8);
-}
-
-static inline unsigned int _swapl (unsigned int v)
-{
- return (v << 24) | ((v & 0xff00) << 8) | ((v >> 8) & 0xff00) | (v >> 24);
-}
+#ifdef CONFIG_MMU
-/*
- * Change virtual addresses to physical addresses and vv.
- * These are trivial on the 1:1 Linux/Xtensa mapping
- */
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
+extern unsigned long xtensa_kio_paddr;
-static inline unsigned long virt_to_phys(volatile void * address)
+static inline unsigned long xtensa_get_kio_paddr(void)
{
- return __pa(address);
+ return xtensa_kio_paddr;
}
-
-static inline void * phys_to_virt(unsigned long address)
-{
- return __va(address);
-}
-
-/*
- * virt_to_bus and bus_to_virt are deprecated.
- */
-
-#define virt_to_bus(x) virt_to_phys(x)
-#define bus_to_virt(x) phys_to_virt(x)
+#endif
/*
- * Return the virtual (cached) address for the specified bus memory.
+ * Return the virtual address for the specified bus memory.
* Note that we currently don't support any address outside the KIO segment.
*/
-
-static inline void *ioremap(unsigned long offset, unsigned long size)
+static inline void __iomem *ioremap_nocache(unsigned long offset,
+ unsigned long size)
{
-#ifdef CONFIG_MMU
if (offset >= XCHAL_KIO_PADDR
- && offset < XCHAL_KIO_PADDR + XCHAL_KIO_SIZE)
+ && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
else
BUG();
-#else
- return (void *)offset;
-#endif
}
-static inline void *ioremap_nocache(unsigned long offset, unsigned long size)
+static inline void __iomem *ioremap_cache(unsigned long offset,
+ unsigned long size)
{
-#ifdef CONFIG_MMU
if (offset >= XCHAL_KIO_PADDR
- && offset < XCHAL_KIO_PADDR + XCHAL_KIO_SIZE)
+ && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
else
BUG();
-#else
- return (void *)offset;
-#endif
}
-static inline void iounmap(void *addr)
-{
-}
+#define ioremap_wc ioremap_nocache
-/*
- * Generic I/O
- */
-
-#define readb(addr) \
- ({ unsigned char __v = (*(volatile unsigned char *)(addr)); __v; })
-#define readw(addr) \
- ({ unsigned short __v = (*(volatile unsigned short *)(addr)); __v; })
-#define readl(addr) \
- ({ unsigned int __v = (*(volatile unsigned int *)(addr)); __v; })
-#define writeb(b, addr) (void)((*(volatile unsigned char *)(addr)) = (b))
-#define writew(b, addr) (void)((*(volatile unsigned short *)(addr)) = (b))
-#define writel(b, addr) (void)((*(volatile unsigned int *)(addr)) = (b))
-
-static inline __u8 __raw_readb(const volatile void __iomem *addr)
-{
- return *(__force volatile __u8 *)(addr);
-}
-static inline __u16 __raw_readw(const volatile void __iomem *addr)
-{
- return *(__force volatile __u16 *)(addr);
-}
-static inline __u32 __raw_readl(const volatile void __iomem *addr)
-{
- return *(__force volatile __u32 *)(addr);
-}
-static inline void __raw_writeb(__u8 b, volatile void __iomem *addr)
-{
- *(__force volatile __u8 *)(addr) = b;
-}
-static inline void __raw_writew(__u16 b, volatile void __iomem *addr)
+static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
{
- *(__force volatile __u16 *)(addr) = b;
+ return ioremap_nocache(offset, size);
}
-static inline void __raw_writel(__u32 b, volatile void __iomem *addr)
+
+static inline void iounmap(volatile void __iomem *addr)
{
- *(__force volatile __u32 *)(addr) = b;
}
-/* These are the definitions for the x86 IO instructions
- * inb/inw/inl/outb/outw/outl, the "string" versions
- * insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions
- * inb_p/inw_p/...
- * The macros don't do byte-swapping.
- */
-
-#define inb(port) readb((u8 *)((port)))
-#define outb(val, port) writeb((val),(u8 *)((unsigned long)(port)))
-#define inw(port) readw((u16 *)((port)))
-#define outw(val, port) writew((val),(u16 *)((unsigned long)(port)))
-#define inl(port) readl((u32 *)((port)))
-#define outl(val, port) writel((val),(u32 *)((unsigned long)(port)))
-
-#define inb_p(port) inb((port))
-#define outb_p(val, port) outb((val), (port))
-#define inw_p(port) inw((port))
-#define outw_p(val, port) outw((val), (port))
-#define inl_p(port) inl((port))
-#define outl_p(val, port) outl((val), (port))
-
-extern void insb (unsigned long port, void *dst, unsigned long count);
-extern void insw (unsigned long port, void *dst, unsigned long count);
-extern void insl (unsigned long port, void *dst, unsigned long count);
-extern void outsb (unsigned long port, const void *src, unsigned long count);
-extern void outsw (unsigned long port, const void *src, unsigned long count);
-extern void outsl (unsigned long port, const void *src, unsigned long count);
-
-#define IO_SPACE_LIMIT ~0
-
-#define memset_io(a,b,c) memset((void *)(a),(b),(c))
-#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
-#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
-
-/* At this point the Xtensa doesn't provide byte swap instructions */
-
-#ifdef __XTENSA_EB__
-# define in_8(addr) (*(u8*)(addr))
-# define in_le16(addr) _swapw(*(u16*)(addr))
-# define in_le32(addr) _swapl(*(u32*)(addr))
-# define out_8(b, addr) *(u8*)(addr) = (b)
-# define out_le16(b, addr) *(u16*)(addr) = _swapw(b)
-# define out_le32(b, addr) *(u32*)(addr) = _swapl(b)
-#elif defined(__XTENSA_EL__)
-# define in_8(addr) (*(u8*)(addr))
-# define in_le16(addr) (*(u16*)(addr))
-# define in_le32(addr) (*(u32*)(addr))
-# define out_8(b, addr) *(u8*)(addr) = (b)
-# define out_le16(b, addr) *(u16*)(addr) = (b)
-# define out_le32(b, addr) *(u32*)(addr) = (b)
-#else
-# error processor byte order undefined!
-#endif
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+#endif /* CONFIG_MMU */
/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem access
- */
-#define xlate_dev_mem_ptr(p) __va(p)
-
-/*
- * Convert a virtual cached pointer to an uncached pointer
+ * Generic I/O
*/
-#define xlate_dev_kmem_ptr(p) p
-
+#define readb_relaxed readb
+#define readw_relaxed readw
+#define readl_relaxed readl
#endif /* __KERNEL__ */
+#include <asm-generic/io.h>
+
#endif /* _XTENSA_IO_H */
diff --git a/arch/xtensa/include/asm/ioctl.h b/arch/xtensa/include/asm/ioctl.h
deleted file mode 100644
index b279fe06dfe..00000000000
--- a/arch/xtensa/include/asm/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctl.h>
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
index 4c0ccc9c4f4..f71f88ea764 100644
--- a/arch/xtensa/include/asm/irq.h
+++ b/arch/xtensa/include/asm/irq.h
@@ -43,5 +43,14 @@ static __inline__ int irq_canonicalize(int irq)
}
struct irqaction;
+struct irq_domain;
+
+void migrate_irqs(void);
+int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize,
+ unsigned long int_irq, unsigned long ext_irq,
+ unsigned long *out_hwirq, unsigned int *out_type);
+int xtensa_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw);
+unsigned xtensa_map_ext_irq(unsigned ext_irq);
+unsigned xtensa_get_ext_irq_no(unsigned irq);
#endif /* _XTENSA_IRQ_H */
diff --git a/arch/xtensa/include/asm/irq_regs.h b/arch/xtensa/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b7027..00000000000
--- a/arch/xtensa/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/xtensa/include/asm/irqflags.h b/arch/xtensa/include/asm/irqflags.h
index dae9a8bdcb1..ea36674c6ec 100644
--- a/arch/xtensa/include/asm/irqflags.h
+++ b/arch/xtensa/include/asm/irqflags.h
@@ -16,7 +16,7 @@
static inline unsigned long arch_local_save_flags(void)
{
unsigned long flags;
- asm volatile("rsr %0,"__stringify(PS) : "=a" (flags));
+ asm volatile("rsr %0, ps" : "=a" (flags));
return flags;
}
@@ -41,13 +41,16 @@ static inline void arch_local_irq_enable(void)
static inline void arch_local_irq_restore(unsigned long flags)
{
- asm volatile("wsr %0, "__stringify(PS)" ; rsync"
+ asm volatile("wsr %0, ps; rsync"
:: "a" (flags) : "memory");
}
static inline bool arch_irqs_disabled_flags(unsigned long flags)
{
- return (flags & 0xf) != 0;
+#if XCHAL_EXCM_LEVEL < LOCKLEVEL || (1 << PS_EXCM_BIT) < LOCKLEVEL
+#error "XCHAL_EXCM_LEVEL and 1<<PS_EXCM_BIT must be no less than LOCKLEVEL"
+#endif
+ return (flags & (PS_INTLEVEL_MASK | (1 << PS_EXCM_BIT))) >= LOCKLEVEL;
}
static inline bool arch_irqs_disabled(void)
diff --git a/arch/xtensa/include/asm/kdebug.h b/arch/xtensa/include/asm/kdebug.h
deleted file mode 100644
index 6ece1b03766..00000000000
--- a/arch/xtensa/include/asm/kdebug.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kdebug.h>
diff --git a/arch/xtensa/include/asm/kmap_types.h b/arch/xtensa/include/asm/kmap_types.h
deleted file mode 100644
index 11c687e527f..00000000000
--- a/arch/xtensa/include/asm/kmap_types.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _XTENSA_KMAP_TYPES_H
-#define _XTENSA_KMAP_TYPES_H
-
-#include <asm-generic/kmap_types.h>
-
-#endif /* _XTENSA_KMAP_TYPES_H */
diff --git a/arch/xtensa/include/asm/linkage.h b/arch/xtensa/include/asm/linkage.h
deleted file mode 100644
index bf2128a99d7..00000000000
--- a/arch/xtensa/include/asm/linkage.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * include/asm-xtensa/linkage.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_LINKAGE_H
-#define _XTENSA_LINKAGE_H
-
-/* Nothing to do here ... */
-
-#endif /* _XTENSA_LINKAGE_H */
diff --git a/arch/xtensa/include/asm/local.h b/arch/xtensa/include/asm/local.h
deleted file mode 100644
index 48723e550d1..00000000000
--- a/arch/xtensa/include/asm/local.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * include/asm-xtensa/local.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_LOCAL_H
-#define _XTENSA_LOCAL_H
-
-#include <asm-generic/local.h>
-
-#endif /* _XTENSA_LOCAL_H */
diff --git a/arch/xtensa/include/asm/local64.h b/arch/xtensa/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc23..00000000000
--- a/arch/xtensa/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/xtensa/include/asm/mmu.h b/arch/xtensa/include/asm/mmu.h
index 04890d6e233..71afe418d0e 100644
--- a/arch/xtensa/include/asm/mmu.h
+++ b/arch/xtensa/include/asm/mmu.h
@@ -1,22 +1,22 @@
/*
- * include/asm-xtensa/mmu.h
- *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
*/
#ifndef _XTENSA_MMU_H
#define _XTENSA_MMU_H
#ifndef CONFIG_MMU
-#include <asm/nommu.h>
+#include <asm-generic/mmu.h>
#else
-/* Default "unsigned long" context */
-typedef unsigned long mm_context_t;
+typedef struct {
+ unsigned long asid[NR_CPUS];
+ unsigned int cpu;
+} mm_context_t;
#endif /* CONFIG_MMU */
#endif /* _XTENSA_MMU_H */
diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h
index dbd8731a876..d33c71a8c9e 100644
--- a/arch/xtensa/include/asm/mmu_context.h
+++ b/arch/xtensa/include/asm/mmu_context.h
@@ -1,13 +1,11 @@
/*
- * include/asm-xtensa/mmu_context.h
- *
* Switch an MMU context.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
*/
#ifndef _XTENSA_MMU_CONTEXT_H
@@ -20,22 +18,25 @@
#include <linux/stringify.h>
#include <linux/sched.h>
-#include <variant/core.h>
+#include <asm/vectors.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm-generic/mm_hooks.h>
+#include <asm-generic/percpu.h>
#if (XCHAL_HAVE_TLBS != 1)
# error "Linux must have an MMU!"
#endif
-extern unsigned long asid_cache;
+DECLARE_PER_CPU(unsigned long, asid_cache);
+#define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
/*
* NO_CONTEXT is the invalid ASID value that we don't ever assign to
- * any user or kernel context.
+ * any user or kernel context. We use the reserved values in the
+ * ASID_INSERT macro below.
*
* 0 invalid
* 1 kernel
@@ -49,77 +50,96 @@ extern unsigned long asid_cache;
#define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
#define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
+#ifdef CONFIG_MMU
+void init_mmu(void);
+#else
+static inline void init_mmu(void) { }
+#endif
+
static inline void set_rasid_register (unsigned long val)
{
- __asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t"
+ __asm__ __volatile__ (" wsr %0, rasid\n\t"
" isync\n" : : "a" (val));
}
static inline unsigned long get_rasid_register (void)
{
unsigned long tmp;
- __asm__ __volatile__ (" rsr %0,"__stringify(RASID)"\n\t" : "=a" (tmp));
+ __asm__ __volatile__ (" rsr %0, rasid\n\t" : "=a" (tmp));
return tmp;
}
-static inline void
-__get_new_mmu_context(struct mm_struct *mm)
+static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu)
{
- extern void flush_tlb_all(void);
- if (! (++asid_cache & ASID_MASK) ) {
- flush_tlb_all(); /* start new asid cycle */
- asid_cache += ASID_USER_FIRST;
+ unsigned long asid = cpu_asid_cache(cpu);
+ if ((++asid & ASID_MASK) == 0) {
+ /*
+ * Start new asid cycle; continue counting with next
+ * incarnation bits; skipping over 0, 1, 2, 3.
+ */
+ local_flush_tlb_all();
+ asid += ASID_USER_FIRST;
}
- mm->context = asid_cache;
+ cpu_asid_cache(cpu) = asid;
+ mm->context.asid[cpu] = asid;
+ mm->context.cpu = cpu;
}
-static inline void
-__load_mmu_context(struct mm_struct *mm)
+static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
{
- set_rasid_register(ASID_INSERT(mm->context));
+ /*
+ * Check if our ASID is of an older version and thus invalid.
+ */
+
+ if (mm) {
+ unsigned long asid = mm->context.asid[cpu];
+
+ if (asid == NO_CONTEXT ||
+ ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK))
+ get_new_mmu_context(mm, cpu);
+ }
+}
+
+static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
+{
+ get_mmu_context(mm, cpu);
+ set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
invalidate_page_directory();
}
/*
* Initialize the context related info for a new mm_struct
- * instance.
+ * instance. Valid cpu values are 0..(NR_CPUS-1), so initializing
+ * to -1 says the process has never run on any core.
*/
-static inline int
-init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
{
- mm->context = NO_CONTEXT;
+ int cpu;
+ for_each_possible_cpu(cpu) {
+ mm->context.asid[cpu] = NO_CONTEXT;
+ }
+ mm->context.cpu = -1;
return 0;
}
-/*
- * After we have set current->mm to a new value, this activates
- * the context for the new mm so we see the new mappings.
- */
-static inline void
-activate_mm(struct mm_struct *prev, struct mm_struct *next)
-{
- /* Unconditionally get a new ASID. */
-
- __get_new_mmu_context(next);
- __load_mmu_context(next);
-}
-
-
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk)
+ struct task_struct *tsk)
{
- unsigned long asid = asid_cache;
-
- /* Check if our ASID is of an older version and thus invalid */
-
- if (next->context == NO_CONTEXT || ((next->context^asid) & ~ASID_MASK))
- __get_new_mmu_context(next);
-
- __load_mmu_context(next);
+ unsigned int cpu = smp_processor_id();
+ int migrated = next->context.cpu != cpu;
+ /* Flush the icache if we migrated to a new core. */
+ if (migrated) {
+ __invalidate_icache_all();
+ next->context.cpu = cpu;
+ }
+ if (migrated || prev != next)
+ activate_context(next, cpu);
}
-#define deactivate_mm(tsk, mm) do { } while(0)
+#define activate_mm(prev, next) switch_mm((prev), (next), NULL)
+#define deactivate_mm(tsk, mm) do { } while (0)
/*
* Destroy context related info for an mm_struct that is about
diff --git a/arch/xtensa/include/asm/module.h b/arch/xtensa/include/asm/module.h
index d9b34bee4d4..488b40c6f9b 100644
--- a/arch/xtensa/include/asm/module.h
+++ b/arch/xtensa/include/asm/module.h
@@ -13,15 +13,8 @@
#ifndef _XTENSA_MODULE_H
#define _XTENSA_MODULE_H
-struct mod_arch_specific
-{
- /* No special elements, yet. */
-};
-
#define MODULE_ARCH_VERMAGIC "xtensa-" __stringify(XCHAL_CORE_ID) " "
-#define Elf_Shdr Elf32_Shdr
-#define Elf_Sym Elf32_Sym
-#define Elf_Ehdr Elf32_Ehdr
+#include <asm-generic/module.h>
#endif /* _XTENSA_MODULE_H */
diff --git a/arch/xtensa/include/asm/mxregs.h b/arch/xtensa/include/asm/mxregs.h
new file mode 100644
index 00000000000..73dcc5456f6
--- /dev/null
+++ b/arch/xtensa/include/asm/mxregs.h
@@ -0,0 +1,46 @@
+/*
+ * Xtensa MX interrupt distributor
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 - 2013 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_MXREGS_H
+#define _XTENSA_MXREGS_H
+
+/*
+ * RER/WER at, as Read/write external register
+ * at: value
+ * as: address
+ *
+ * Address Value
+ * 00nn 0...0p..p Interrupt Routing, route IRQ n to processor p
+ * 01pp 0...0d..d 16 bits (d) 'ored' as single IPI to processor p
+ * 0180 0...0m..m Clear enable specified by mask (m)
+ * 0184 0...0m..m Set enable specified by mask (m)
+ * 0190 0...0x..x 8-bit IPI partition register
+ * VVVVVVVVPPPPUUUUUUUUUUUUUUUUU
+ * V (10-bit) Release/Version
+ * P ( 4-bit) Number of cores - 1
+ * U (18-bit) ID
+ * 01a0 i.......i 32-bit ConfigID
+ * 0200 0...0m..m RunStall core 'n'
+ * 0220 c Cache coherency enabled
+ */
+
+#define MIROUT(irq) (0x000 + (irq))
+#define MIPICAUSE(cpu) (0x100 + (cpu))
+#define MIPISET(cause) (0x140 + (cause))
+#define MIENG 0x180
+#define MIENGSET 0x184
+#define MIASG 0x188 /* Read Global Assert Register */
+#define MIASGSET 0x18c /* Set Global Addert Regiter */
+#define MIPIPART 0x190
+#define SYSCFGID 0x1a0
+#define MPSCORE 0x200
+#define CCON 0x220
+
+#endif /* _XTENSA_MXREGS_H */
diff --git a/arch/xtensa/include/asm/nommu.h b/arch/xtensa/include/asm/nommu.h
deleted file mode 100644
index dce2c438c5b..00000000000
--- a/arch/xtensa/include/asm/nommu.h
+++ /dev/null
@@ -1,3 +0,0 @@
-typedef struct {
- unsigned long end_brk;
-} mm_context_t;
diff --git a/arch/xtensa/include/asm/nommu_context.h b/arch/xtensa/include/asm/nommu_context.h
index 599e7a2e729..3407cf7989b 100644
--- a/arch/xtensa/include/asm/nommu_context.h
+++ b/arch/xtensa/include/asm/nommu_context.h
@@ -2,7 +2,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
-static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+static inline int init_new_context(struct task_struct *tsk,struct mm_struct *mm)
{
return 0;
}
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index 7a5591a71f8..47f582333f6 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -29,19 +29,19 @@
* PAGE_SHIFT determines the page size
*/
-#define PAGE_SHIFT 12
-#define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
#ifdef CONFIG_MMU
-#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
-#define MAX_MEM_PFN XCHAL_KSEG_SIZE
+#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
+#define MAX_MEM_PFN XCHAL_KSEG_SIZE
#else
-#define PAGE_OFFSET 0
-#define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE)
+#define PAGE_OFFSET 0
+#define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE)
#endif
-#define PGTABLE_START 0x80000000
+#define PGTABLE_START 0x80000000
/*
* Cache aliasing:
@@ -161,7 +161,9 @@ extern void copy_user_page(void*, void*, unsigned long, struct page*);
#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
-#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+#define pfn_valid(pfn) \
+ ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+
#ifdef CONFIG_DISCONTIGMEM
# error CONFIG_DISCONTIGMEM not supported
#endif
diff --git a/arch/xtensa/include/asm/param.h b/arch/xtensa/include/asm/param.h
index ba03d5aeab6..0a70e780ef2 100644
--- a/arch/xtensa/include/asm/param.h
+++ b/arch/xtensa/include/asm/param.h
@@ -7,28 +7,12 @@
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
-
#ifndef _XTENSA_PARAM_H
#define _XTENSA_PARAM_H
-#ifdef __KERNEL__
+#include <uapi/asm/param.h>
+
# define HZ CONFIG_HZ /* internal timer frequency */
# define USER_HZ 100 /* for user interfaces in "ticks" */
# define CLOCKS_PER_SEC (USER_HZ) /* frequnzy at which times() counts */
-#else
-# define HZ 100
-#endif
-
-#define EXEC_PAGESIZE 4096
-
-#ifndef NGROUPS
-#define NGROUPS 32
-#endif
-
-#ifndef NOGROUP
-#define NOGROUP (-1)
-#endif
-
-#define MAXHOSTNAMELEN 64 /* max length of hostname */
-
#endif /* _XTENSA_PARAM_H */
diff --git a/arch/xtensa/include/asm/pci-bridge.h b/arch/xtensa/include/asm/pci-bridge.h
index 00fcbd7c534..0b68c76ec1e 100644
--- a/arch/xtensa/include/asm/pci-bridge.h
+++ b/arch/xtensa/include/asm/pci-bridge.h
@@ -35,7 +35,7 @@ struct pci_space {
struct pci_controller {
int index; /* used for pci_controller_num */
struct pci_controller *next;
- struct pci_bus *bus;
+ struct pci_bus *bus;
void *arch_data;
int first_busno;
diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h
index 05244f07dd3..5d52dc43dfe 100644
--- a/arch/xtensa/include/asm/pci.h
+++ b/arch/xtensa/include/asm/pci.h
@@ -22,11 +22,6 @@
extern struct pci_controller* pcibios_alloc_controller(void);
-static inline void pcibios_penalize_isa_irq(int irq)
-{
- /* We don't do dynamic PCI IRQ allocation */
-}
-
/* Assume some values. (We should revise them, if necessary) */
#define PCIBIOS_MIN_IO 0x2000
@@ -53,7 +48,7 @@ struct pci_dev;
/* Map a range of PCI memory or I/O space for a device into user space */
int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
+ enum pci_mmap_state mmap_state, int write_combine);
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
#define HAVE_PCI_MMAP 1
diff --git a/arch/xtensa/include/asm/percpu.h b/arch/xtensa/include/asm/percpu.h
deleted file mode 100644
index 6d2bc2ada9d..00000000000
--- a/arch/xtensa/include/asm/percpu.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * linux/include/asm-xtensa/percpu.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_PERCPU__
-#define _XTENSA_PERCPU__
-
-#include <asm-generic/percpu.h>
-
-#endif /* _XTENSA_PERCPU__ */
diff --git a/arch/xtensa/include/asm/perf_event.h b/arch/xtensa/include/asm/perf_event.h
new file mode 100644
index 00000000000..5aa4590acaa
--- /dev/null
+++ b/arch/xtensa/include/asm/perf_event.h
@@ -0,0 +1,4 @@
+#ifndef __ASM_XTENSA_PERF_EVENT_H
+#define __ASM_XTENSA_PERF_EVENT_H
+
+#endif /* __ASM_XTENSA_PERF_EVENT_H */
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
index 40cf9bceda2..d38eb9237e6 100644
--- a/arch/xtensa/include/asm/pgalloc.h
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -38,35 +38,46 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
free_page((unsigned long)pgd);
}
-/* Use a slab cache for the pte pages (see also sparc64 implementation) */
-
-extern struct kmem_cache *pgtable_cache;
-
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT);
+ pte_t *ptep;
+ int i;
+
+ ptep = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+ if (!ptep)
+ return NULL;
+ for (i = 0; i < 1024; i++)
+ pte_clear(NULL, 0, ptep + i);
+ return ptep;
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long addr)
{
+ pte_t *pte;
struct page *page;
- page = virt_to_page(pte_alloc_one_kernel(mm, addr));
- pgtable_page_ctor(page);
+ pte = pte_alloc_one_kernel(mm, addr);
+ if (!pte)
+ return NULL;
+ page = virt_to_page(pte);
+ if (!pgtable_page_ctor(page)) {
+ __free_page(page);
+ return NULL;
+ }
return page;
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
- kmem_cache_free(pgtable_cache, pte);
+ free_page((unsigned long)pte);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
pgtable_page_dtor(pte);
- kmem_cache_free(pgtable_cache, page_address(pte));
+ __free_page(pte);
}
#define pmd_pgtable(pmd) pmd_page(pmd)
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index b03c043ce75..4b0ca35a93b 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -5,7 +5,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * Copyright (C) 2001 - 2007 Tensilica Inc.
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
*/
#ifndef _XTENSA_PGTABLE_H
@@ -64,41 +64,82 @@
* Virtual memory area. We keep a distance to other memory regions to be
* on the safe side. We also use this area for cache aliasing.
*/
-
#define VMALLOC_START 0xC0000000
#define VMALLOC_END 0xC7FEFFFF
#define TLBTEMP_BASE_1 0xC7FF0000
#define TLBTEMP_BASE_2 0xC7FF8000
/*
- * Xtensa Linux config PTE layout (when present):
- * 31-12: PPN
- * 11-6: Software
- * 5-4: RING
- * 3-0: CA
+ * For the Xtensa architecture, the PTE layout is as follows:
+ *
+ * 31------12 11 10-9 8-6 5-4 3-2 1-0
+ * +-----------------------------------------+
+ * | | Software | HARDWARE |
+ * | PPN | ADW | RI |Attribute|
+ * +-----------------------------------------+
+ * pte_none | MBZ | 01 | 11 | 00 |
+ * +-----------------------------------------+
+ * present | PPN | 0 | 00 | ADW | RI | CA | wx |
+ * +- - - - - - - - - - - - - - - - - - - - -+
+ * (PAGE_NONE)| PPN | 0 | 00 | ADW | 01 | 11 | 11 |
+ * +-----------------------------------------+
+ * swap | index | type | 01 | 11 | 00 |
+ * +- - - - - - - - - - - - - - - - - - - - -+
+ * file | file offset | 01 | 11 | 10 |
+ * +-----------------------------------------+
*
- * Similar to the Alpha and MIPS ports, we need to keep track of the ref
- * and mod bits in software. We have a software "you can read
- * from this page" bit, and a hardware one which actually lets the
- * process read from the page. On the same token we have a software
- * writable bit and the real hardware one which actually lets the
- * process write to the page.
+ * For T1050 hardware and earlier the layout differs for present and (PAGE_NONE)
+ * +-----------------------------------------+
+ * present | PPN | 0 | 00 | ADW | RI | CA | w1 |
+ * +-----------------------------------------+
+ * (PAGE_NONE)| PPN | 0 | 00 | ADW | 01 | 01 | 00 |
+ * +-----------------------------------------+
*
- * See further below for PTE layout for swapped-out pages.
+ * Legend:
+ * PPN Physical Page Number
+ * ADW software: accessed (young) / dirty / writable
+ * RI ring (0=privileged, 1=user, 2 and 3 are unused)
+ * CA cache attribute: 00 bypass, 01 writeback, 10 writethrough
+ * (11 is invalid and used to mark pages that are not present)
+ * w page is writable (hw)
+ * x page is executable (hw)
+ * index swap offset / PAGE_SIZE (bit 11-31: 21 bits -> 8 GB)
+ * (note that the index is always non-zero)
+ * type swap type (5 bits -> 32 types)
+ * file offset 26-bit offset into the file, in increments of PAGE_SIZE
+ *
+ * Notes:
+ * - (PROT_NONE) is a special case of 'present' but causes an exception for
+ * any access (read, write, and execute).
+ * - 'multihit-exception' has the highest priority of all MMU exceptions,
+ * so the ring must be set to 'RING_USER' even for 'non-present' pages.
+ * - on older hardware, the exectuable flag was not supported and
+ * used as a 'valid' flag, so it needs to be always set.
+ * - we need to keep track of certain flags in software (dirty and young)
+ * to do this, we use write exceptions and have a separate software w-flag.
+ * - attribute value 1101 (and 1111 on T1050 and earlier) is reserved
*/
+#define _PAGE_ATTRIB_MASK 0xf
+
#define _PAGE_HW_EXEC (1<<0) /* hardware: page is executable */
#define _PAGE_HW_WRITE (1<<1) /* hardware: page is writable */
-#define _PAGE_FILE (1<<1) /* non-linear mapping, if !present */
-#define _PAGE_PROTNONE (3<<0) /* special case for VM_PROT_NONE */
-
-/* None of these cache modes include MP coherency: */
#define _PAGE_CA_BYPASS (0<<2) /* bypass, non-speculative */
#define _PAGE_CA_WB (1<<2) /* write-back */
#define _PAGE_CA_WT (2<<2) /* write-through */
#define _PAGE_CA_MASK (3<<2)
-#define _PAGE_INVALID (3<<2)
+#define _PAGE_CA_INVALID (3<<2)
+
+/* We use invalid attribute values to distinguish special pte entries */
+#if XCHAL_HW_VERSION_MAJOR < 2000
+#define _PAGE_HW_VALID 0x01 /* older HW needed this bit set */
+#define _PAGE_NONE 0x04
+#else
+#define _PAGE_HW_VALID 0x00
+#define _PAGE_NONE 0x0f
+#endif
+#define _PAGE_FILE (1<<1) /* file mapped page, only if !present */
#define _PAGE_USER (1<<4) /* user access (ring=1) */
@@ -108,19 +149,12 @@
#define _PAGE_DIRTY (1<<7) /* software: page dirty */
#define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */
-/* On older HW revisions, we always have to set bit 0 */
-#if XCHAL_HW_VERSION_MAJOR < 2000
-# define _PAGE_VALID (1<<0)
-#else
-# define _PAGE_VALID 0
-#endif
-
-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _PAGE_PRESENT (_PAGE_VALID | _PAGE_CA_WB | _PAGE_ACCESSED)
-
#ifdef CONFIG_MMU
-#define PAGE_NONE __pgprot(_PAGE_INVALID | _PAGE_USER | _PAGE_PROTNONE)
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_PRESENT (_PAGE_HW_VALID | _PAGE_CA_WB | _PAGE_ACCESSED)
+
+#define PAGE_NONE __pgprot(_PAGE_NONE | _PAGE_USER)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER)
#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER)
@@ -132,9 +166,9 @@
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
-# define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED)
+# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_BYPASS)
#else
-# define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED | _PAGE_CA_WB)
+# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_WB)
#endif
#else /* no mmu */
@@ -186,12 +220,11 @@ extern unsigned long empty_zero_page[1024];
#ifdef CONFIG_MMU
extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
extern void paging_init(void);
-extern void pgtable_cache_init(void);
#else
# define swapper_pg_dir NULL
static inline void paging_init(void) { }
-static inline void pgtable_cache_init(void) { }
#endif
+static inline void pgtable_cache_init(void) { }
/*
* The pmd contains the kernel virtual address of the pte page.
@@ -202,12 +235,16 @@ static inline void pgtable_cache_init(void) { }
/*
* pte status.
*/
-#define pte_none(pte) (pte_val(pte) == _PAGE_INVALID)
-#define pte_present(pte) \
- (((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_INVALID) \
- || ((pte_val(pte) & _PAGE_PROTNONE) == _PAGE_PROTNONE))
+# define pte_none(pte) (pte_val(pte) == (_PAGE_CA_INVALID | _PAGE_USER))
+#if XCHAL_HW_VERSION_MAJOR < 2000
+# define pte_present(pte) ((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID)
+#else
+# define pte_present(pte) \
+ (((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID) \
+ || ((pte_val(pte) & _PAGE_ATTRIB_MASK) == _PAGE_NONE))
+#endif
#define pte_clear(mm,addr,ptep) \
- do { update_pte(ptep, __pte(_PAGE_INVALID)); } while(0)
+ do { update_pte(ptep, __pte(_PAGE_CA_INVALID | _PAGE_USER)); } while (0)
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK)
@@ -273,6 +310,10 @@ set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
update_pte(ptep, pteval);
}
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+ update_pte(ptep, pteval);
+}
static inline void
set_pmd(pmd_t *pmdp, pmd_t pmdval)
@@ -284,7 +325,7 @@ struct vm_area_struct;
static inline int
ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
- pte_t *ptep)
+ pte_t *ptep)
{
pte_t pte = *ptep;
if (!pte_young(pte))
@@ -304,8 +345,8 @@ ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
static inline void
ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
- pte_t pte = *ptep;
- update_pte(ptep, pte_wrprotect(pte));
+ pte_t pte = *ptep;
+ update_pte(ptep, pte_wrprotect(pte));
}
/* to find an entry in a kernel page-table-directory */
@@ -328,35 +369,23 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
/*
- * Encode and decode a swap entry.
- *
- * Format of swap pte:
- * bit 0 MBZ
- * bit 1 page-file (must be zero)
- * bits 2 - 3 page hw access mode (must be 11: _PAGE_INVALID)
- * bits 4 - 5 ring protection (must be 01: _PAGE_USER)
- * bits 6 - 10 swap type (5 bits -> 32 types)
- * bits 11 - 31 swap offset / PAGE_SIZE (21 bits -> 8GB)
-
- * Format of file pte:
- * bit 0 MBZ
- * bit 1 page-file (must be one: _PAGE_FILE)
- * bits 2 - 3 page hw access mode (must be 11: _PAGE_INVALID)
- * bits 4 - 5 ring protection (must be 01: _PAGE_USER)
- * bits 6 - 31 file offset / PAGE_SIZE
+ * Encode and decode a swap and file entry.
*/
+#define SWP_TYPE_BITS 5
+#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
#define __swp_type(entry) (((entry).val >> 6) & 0x1f)
#define __swp_offset(entry) ((entry).val >> 11)
#define __swp_entry(type,offs) \
- ((swp_entry_t) {((type) << 6) | ((offs) << 11) | _PAGE_INVALID})
+ ((swp_entry_t){((type) << 6) | ((offs) << 11) | \
+ _PAGE_CA_INVALID | _PAGE_USER})
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-#define PTE_FILE_MAX_BITS 28
-#define pte_to_pgoff(pte) (pte_val(pte) >> 4)
+#define PTE_FILE_MAX_BITS 26
+#define pte_to_pgoff(pte) (pte_val(pte) >> 6)
#define pgoff_to_pte(off) \
- ((pte_t) { ((off) << 4) | _PAGE_INVALID | _PAGE_FILE })
+ ((pte_t) { ((off) << 6) | _PAGE_CA_INVALID | _PAGE_FILE | _PAGE_USER })
#endif /* !defined (__ASSEMBLY__) */
@@ -393,14 +422,6 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
extern void update_mmu_cache(struct vm_area_struct * vma,
unsigned long address, pte_t *ptep);
-/*
- * remap a physical page `pfn' of size `size' with page protection `prot'
- * into virtual address `from'
- */
-
-#define io_remap_pfn_range(vma,from,pfn,size,prot) \
- remap_pfn_range(vma, from, pfn, size, prot)
-
typedef pte_t *pte_addr_t;
#endif /* !defined (__ASSEMBLY__) */
@@ -410,6 +431,10 @@ typedef pte_t *pte_addr_t;
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTEP_MKDIRTY
#define __HAVE_ARCH_PTE_SAME
+/* We provide our own get_unmapped_area to cope with
+ * SHM area cache aliasing for userland.
+ */
+#define HAVE_ARCH_UNMAPPED_AREA
#include <asm-generic/pgtable.h>
diff --git a/arch/xtensa/include/asm/platform.h b/arch/xtensa/include/asm/platform.h
index 7d936e58e9b..32e98f27ce9 100644
--- a/arch/xtensa/include/asm/platform.h
+++ b/arch/xtensa/include/asm/platform.h
@@ -30,11 +30,6 @@ extern void platform_init(bp_tag_t*);
extern void platform_setup (char **);
/*
- * platform_init_irq is called from init_IRQ.
- */
-extern void platform_init_irq (void);
-
-/*
* platform_restart is called to restart the system.
*/
extern void platform_restart (void);
@@ -75,4 +70,3 @@ extern int platform_pcibios_fixup (void);
extern void platform_calibrate_ccount (void);
#endif /* _XTENSA_PLATFORM_H */
-
diff --git a/arch/xtensa/include/asm/posix_types.h b/arch/xtensa/include/asm/posix_types.h
deleted file mode 100644
index 6b2190c3588..00000000000
--- a/arch/xtensa/include/asm/posix_types.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * include/asm-xtensa/posix_types.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Largely copied from include/asm-ppc/posix_types.h
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_POSIX_TYPES_H
-#define _XTENSA_POSIX_TYPES_H
-
-/*
- * This file is generally used by user-level software, so you need to
- * be a little careful about namespace pollution etc. Also, we cannot
- * assume GCC is being used.
- */
-
-typedef unsigned long __kernel_ino_t;
-typedef unsigned int __kernel_mode_t;
-typedef unsigned long __kernel_nlink_t;
-typedef long __kernel_off_t;
-typedef int __kernel_pid_t;
-typedef unsigned short __kernel_ipc_pid_t;
-typedef unsigned int __kernel_uid_t;
-typedef unsigned int __kernel_gid_t;
-typedef unsigned int __kernel_size_t;
-typedef int __kernel_ssize_t;
-typedef long __kernel_ptrdiff_t;
-typedef long __kernel_time_t;
-typedef long __kernel_suseconds_t;
-typedef long __kernel_clock_t;
-typedef int __kernel_timer_t;
-typedef int __kernel_clockid_t;
-typedef int __kernel_daddr_t;
-typedef char * __kernel_caddr_t;
-typedef unsigned short __kernel_uid16_t;
-typedef unsigned short __kernel_gid16_t;
-typedef unsigned int __kernel_uid32_t;
-typedef unsigned int __kernel_gid32_t;
-
-typedef unsigned short __kernel_old_uid_t;
-typedef unsigned short __kernel_old_gid_t;
-typedef unsigned short __kernel_old_dev_t;
-
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
-#endif
-
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-
-#ifndef __GNUC__
-
-#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
-#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
-#define __FD_ISSET(d, set) (!!((set)->fds_bits[__FDELT(d)] & __FDMASK(d)))
-#define __FD_ZERO(set) \
- ((void) memset ((void *) (set), 0, sizeof (__kernel_fd_set)))
-
-#else /* __GNUC__ */
-
-#if defined(__KERNEL__)
-/* With GNU C, use inline functions instead so args are evaluated only once: */
-
-#undef __FD_SET
-static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
-}
-
-#undef __FD_CLR
-static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
-}
-
-#undef __FD_ISSET
-static __inline__ int __FD_ISSET(unsigned long fd, __kernel_fd_set *p)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
-}
-
-/*
- * This will unroll the loop for the normal constant case (8 ints,
- * for a 256-bit fd_set)
- */
-#undef __FD_ZERO
-static __inline__ void __FD_ZERO(__kernel_fd_set *p)
-{
- unsigned int *tmp = (unsigned int *)p->fds_bits;
- int i;
-
- if (__builtin_constant_p(__FDSET_LONGS)) {
- switch (__FDSET_LONGS) {
- case 8:
- tmp[0] = 0; tmp[1] = 0; tmp[2] = 0; tmp[3] = 0;
- tmp[4] = 0; tmp[5] = 0; tmp[6] = 0; tmp[7] = 0;
- return;
- }
- }
- i = __FDSET_LONGS;
- while (i) {
- i--;
- *tmp = 0;
- tmp++;
- }
-}
-
-#endif /* defined(__KERNEL__) */
-#endif /* __GNUC__ */
-#endif /* _XTENSA_POSIX_TYPES_H */
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index 3acb26e8dea..abb59708a3b 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -5,7 +5,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
*/
#ifndef _XTENSA_PROCESSOR_H
@@ -68,7 +68,7 @@
/* LOCKLEVEL defines the interrupt level that masks all
* general-purpose interrupts.
*/
-#define LOCKLEVEL 1
+#define LOCKLEVEL XCHAL_EXCM_LEVEL
/* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE
* registers
@@ -89,7 +89,7 @@
#define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000))
typedef struct {
- unsigned long seg;
+ unsigned long seg;
} mm_segment_t;
struct thread_struct {
@@ -145,13 +145,14 @@ struct thread_struct {
* set_thread_state in signal.c depends on it.
*/
#define USER_PS_VALUE ((1 << PS_WOE_BIT) | \
- (1 << PS_CALLINC_SHIFT) | \
- (USER_RING << PS_RING_SHIFT) | \
- (1 << PS_UM_BIT) | \
- (1 << PS_EXCM_BIT))
+ (1 << PS_CALLINC_SHIFT) | \
+ (USER_RING << PS_RING_SHIFT) | \
+ (1 << PS_UM_BIT) | \
+ (1 << PS_EXCM_BIT))
/* Clearing a0 terminates the backtrace. */
#define start_thread(regs, new_pc, new_sp) \
+ memset(regs, 0, sizeof(*regs)); \
regs->pc = new_pc; \
regs->ps = USER_PS_VALUE; \
regs->areg[1] = new_sp; \
@@ -168,12 +169,6 @@ struct mm_struct;
/* Free all resources held by a thread. */
#define release_thread(thread) do { } while(0)
-/* Prepare to copy thread state - unlazy all lazy status */
-extern void prepare_to_copy(struct task_struct*);
-
-/* Create a kernel thread without removing it from tasklists */
-extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-
/* Copy and release all segment info associated with a VM */
#define copy_segments(p, mm) do { } while(0)
#define release_segments(mm) do { } while(0)
@@ -196,5 +191,25 @@ extern unsigned long get_wchan(struct task_struct *p);
#define set_sr(x,sr) ({unsigned int v=(unsigned int)x; WSR(v,sr);})
#define get_sr(sr) ({unsigned int v; RSR(v,sr); v; })
+#ifndef XCHAL_HAVE_EXTERN_REGS
+#define XCHAL_HAVE_EXTERN_REGS 0
+#endif
+
+#if XCHAL_HAVE_EXTERN_REGS
+
+static inline void set_er(unsigned long value, unsigned long addr)
+{
+ asm volatile ("wer %0, %1" : : "a" (value), "a" (addr) : "memory");
+}
+
+static inline unsigned long get_er(unsigned long addr)
+{
+ register unsigned long value;
+ asm volatile ("rer %0, %1" : "=a" (value) : "a" (addr) : "memory");
+ return value;
+}
+
+#endif /* XCHAL_HAVE_EXTERN_REGS */
+
#endif /* __ASSEMBLY__ */
#endif /* _XTENSA_PROCESSOR_H */
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h
index d85d38da8ee..598e752dcbc 100644
--- a/arch/xtensa/include/asm/ptrace.h
+++ b/arch/xtensa/include/asm/ptrace.h
@@ -7,73 +7,11 @@
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
-
#ifndef _XTENSA_PTRACE_H
#define _XTENSA_PTRACE_H
-/*
- * Kernel stack
- *
- * +-----------------------+ -------- STACK_SIZE
- * | register file | |
- * +-----------------------+ |
- * | struct pt_regs | |
- * +-----------------------+ | ------ PT_REGS_OFFSET
- * double : 16 bytes spill area : | ^
- * excetion :- - - - - - - - - - - -: | |
- * frame : struct pt_regs : | |
- * :- - - - - - - - - - - -: | |
- * | | | |
- * | memory stack | | |
- * | | | |
- * ~ ~ ~ ~
- * ~ ~ ~ ~
- * | | | |
- * | | | |
- * +-----------------------+ | | --- STACK_BIAS
- * | struct task_struct | | | ^
- * current --> +-----------------------+ | | |
- * | struct thread_info | | | |
- * +-----------------------+ --------
- */
-
-#define KERNEL_STACK_SIZE (2 * PAGE_SIZE)
-
-/* Offsets for exception_handlers[] (3 x 64-entries x 4-byte tables). */
-
-#define EXC_TABLE_KSTK 0x004 /* Kernel Stack */
-#define EXC_TABLE_DOUBLE_SAVE 0x008 /* Double exception save area for a0 */
-#define EXC_TABLE_FIXUP 0x00c /* Fixup handler */
-#define EXC_TABLE_PARAM 0x010 /* For passing a parameter to fixup */
-#define EXC_TABLE_SYSCALL_SAVE 0x014 /* For fast syscall handler */
-#define EXC_TABLE_FAST_USER 0x100 /* Fast user exception handler */
-#define EXC_TABLE_FAST_KERNEL 0x200 /* Fast kernel exception handler */
-#define EXC_TABLE_DEFAULT 0x300 /* Default C-Handler */
-#define EXC_TABLE_SIZE 0x400
+#include <uapi/asm/ptrace.h>
-/* Registers used by strace */
-
-#define REG_A_BASE 0x0000
-#define REG_AR_BASE 0x0100
-#define REG_PC 0x0020
-#define REG_PS 0x02e6
-#define REG_WB 0x0248
-#define REG_WS 0x0249
-#define REG_LBEG 0x0200
-#define REG_LEND 0x0201
-#define REG_LCOUNT 0x0202
-#define REG_SAR 0x0203
-
-#define SYSCALL_NR 0x00ff
-
-/* Other PTRACE_ values defined in <linux/ptrace.h> using values 0-9,16,17,24 */
-
-#define PTRACE_GETREGS 12
-#define PTRACE_SETREGS 13
-#define PTRACE_GETXTREGS 18
-#define PTRACE_SETXTREGS 19
-
-#ifdef __KERNEL__
#ifndef __ASSEMBLY__
@@ -99,7 +37,8 @@ struct pt_regs {
unsigned long windowstart; /* 52 */
unsigned long syscall; /* 56 */
unsigned long icountlevel; /* 60 */
- int reserved[1]; /* 64 */
+ unsigned long scompare1; /* 64 */
+ unsigned long threadptr; /* 68 */
/* Additional configurable registers that are used by the compiler. */
xtregs_opt_t xtregs_opt;
@@ -110,21 +49,31 @@ struct pt_regs {
/* current register frame.
* Note: The ESF for kernel exceptions ends after 16 registers!
*/
- unsigned long areg[16]; /* 128 (64) */
+ unsigned long areg[16];
};
#include <variant/core.h>
# define arch_has_single_step() (1)
# define task_pt_regs(tsk) ((struct pt_regs*) \
- (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
+ (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
# define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
# define instruction_pointer(regs) ((regs)->pc)
+# define return_pointer(regs) (MAKE_PC_FROM_RA((regs)->areg[0], \
+ (regs)->areg[1]))
# ifndef CONFIG_SMP
# define profile_pc(regs) instruction_pointer(regs)
+# else
+# define profile_pc(regs) \
+ ({ \
+ in_lock_functions(instruction_pointer(regs)) ? \
+ return_pointer(regs) : instruction_pointer(regs); \
+ })
# endif
+#define user_stack_pointer(regs) ((regs)->areg[1])
+
#else /* __ASSEMBLY__ */
# include <asm/asm-offsets.h>
@@ -132,6 +81,4 @@ struct pt_regs {
#endif /* !__ASSEMBLY__ */
-#endif /* __KERNEL__ */
-
#endif /* _XTENSA_PTRACE_H */
diff --git a/arch/xtensa/include/asm/regs.h b/arch/xtensa/include/asm/regs.h
index d4baed24692..4ba9f516b0e 100644
--- a/arch/xtensa/include/asm/regs.h
+++ b/arch/xtensa/include/asm/regs.h
@@ -27,52 +27,15 @@
/* Special registers. */
-#define LBEG 0
-#define LEND 1
-#define LCOUNT 2
-#define SAR 3
-#define BR 4
-#define SCOMPARE1 12
-#define ACCHI 16
-#define ACCLO 17
-#define MR 32
-#define WINDOWBASE 72
-#define WINDOWSTART 73
-#define PTEVADDR 83
-#define RASID 90
-#define ITLBCFG 91
-#define DTLBCFG 92
-#define IBREAKENABLE 96
-#define DDR 104
-#define IBREAKA 128
-#define DBREAKA 144
-#define DBREAKC 160
-#define EPC 176
-#define EPC_1 177
-#define DEPC 192
-#define EPS 192
-#define EPS_1 193
-#define EXCSAVE 208
-#define EXCSAVE_1 209
-#define INTERRUPT 226
-#define INTENABLE 228
-#define PS 230
-#define THREADPTR 231
-#define EXCCAUSE 232
-#define DEBUGCAUSE 233
-#define CCOUNT 234
-#define PRID 235
-#define ICOUNT 236
-#define ICOUNTLEVEL 237
-#define EXCVADDR 238
-#define CCOMPARE 240
-#define MISC 244
-
-/* Special names for read-only and write-only interrupt registers. */
-
-#define INTREAD 226
-#define INTSET 226
-#define INTCLEAR 227
+#define SREG_MR 32
+#define SREG_IBREAKA 128
+#define SREG_DBREAKA 144
+#define SREG_DBREAKC 160
+#define SREG_EPC 176
+#define SREG_EPS 192
+#define SREG_EXCSAVE 208
+#define SREG_CCOMPARE 240
+#define SREG_MISC 244
/* EXCCAUSE register fields */
@@ -89,6 +52,10 @@
#define EXCCAUSE_SPECULATION 7
#define EXCCAUSE_PRIVILEGED 8
#define EXCCAUSE_UNALIGNED 9
+#define EXCCAUSE_INSTR_DATA_ERROR 12
+#define EXCCAUSE_LOAD_STORE_DATA_ERROR 13
+#define EXCCAUSE_INSTR_ADDR_ERROR 14
+#define EXCCAUSE_LOAD_STORE_ADDR_ERROR 15
#define EXCCAUSE_ITLB_MISS 16
#define EXCCAUSE_ITLB_MULTIHIT 17
#define EXCCAUSE_ITLB_PRIVILEGE 18
@@ -115,12 +82,14 @@
#define PS_CALLINC_SHIFT 16
#define PS_CALLINC_MASK 0x00030000
#define PS_OWB_SHIFT 8
+#define PS_OWB_WIDTH 4
#define PS_OWB_MASK 0x00000F00
#define PS_RING_SHIFT 6
#define PS_RING_MASK 0x000000C0
#define PS_UM_BIT 5
#define PS_EXCM_BIT 4
#define PS_INTLEVEL_SHIFT 0
+#define PS_INTLEVEL_WIDTH 4
#define PS_INTLEVEL_MASK 0x0000000F
/* DBREAKCn register fields. */
@@ -142,4 +111,3 @@
#define DEBUGCAUSE_ICOUNT_BIT 0 /* ICOUNT would incr. to zero */
#endif /* _XTENSA_SPECREG_H */
-
diff --git a/arch/xtensa/include/asm/resource.h b/arch/xtensa/include/asm/resource.h
deleted file mode 100644
index 17b5ab31177..00000000000
--- a/arch/xtensa/include/asm/resource.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * include/asm-xtensa/resource.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_RESOURCE_H
-#define _XTENSA_RESOURCE_H
-
-#include <asm-generic/resource.h>
-
-#endif /* _XTENSA_RESOURCE_H */
diff --git a/arch/xtensa/include/asm/rmap.h b/arch/xtensa/include/asm/rmap.h
deleted file mode 100644
index 649588b7e9a..00000000000
--- a/arch/xtensa/include/asm/rmap.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * include/asm-xtensa/rmap.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_RMAP_H
-#define _XTENSA_RMAP_H
-
-#include <asm-generic/rmap.h>
-
-#endif
diff --git a/arch/xtensa/include/asm/scatterlist.h b/arch/xtensa/include/asm/scatterlist.h
deleted file mode 100644
index a0421a61d9e..00000000000
--- a/arch/xtensa/include/asm/scatterlist.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * include/asm-xtensa/scatterlist.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_SCATTERLIST_H
-#define _XTENSA_SCATTERLIST_H
-
-#include <asm-generic/scatterlist.h>
-
-#endif /* _XTENSA_SCATTERLIST_H */
diff --git a/arch/xtensa/include/asm/sections.h b/arch/xtensa/include/asm/sections.h
deleted file mode 100644
index 40b5191b55a..00000000000
--- a/arch/xtensa/include/asm/sections.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * include/asm-xtensa/sections.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_SECTIONS_H
-#define _XTENSA_SECTIONS_H
-
-#include <asm-generic/sections.h>
-
-#endif /* _XTENSA_SECTIONS_H */
diff --git a/arch/xtensa/include/asm/siginfo.h b/arch/xtensa/include/asm/siginfo.h
deleted file mode 100644
index 6916248295d..00000000000
--- a/arch/xtensa/include/asm/siginfo.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * include/asm-xtensa/siginfo.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_SIGINFO_H
-#define _XTENSA_SIGINFO_H
-
-#include <asm-generic/siginfo.h>
-
-#endif /* _XTENSA_SIGINFO_H */
diff --git a/arch/xtensa/include/asm/signal.h b/arch/xtensa/include/asm/signal.h
index 633ba73bc4d..de169b4eaee 100644
--- a/arch/xtensa/include/asm/signal.h
+++ b/arch/xtensa/include/asm/signal.h
@@ -9,164 +9,15 @@
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
-
#ifndef _XTENSA_SIGNAL_H
#define _XTENSA_SIGNAL_H
-
-#define _NSIG 64
-#define _NSIG_BPW 32
-#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
-
-#ifndef __ASSEMBLY__
-
-#include <linux/types.h>
-
-/* Avoid too many header ordering problems. */
-struct siginfo;
-typedef unsigned long old_sigset_t; /* at least 32 bits */
-typedef struct {
- unsigned long sig[_NSIG_WORDS];
-} sigset_t;
-
-#endif
-
-#define SIGHUP 1
-#define SIGINT 2
-#define SIGQUIT 3
-#define SIGILL 4
-#define SIGTRAP 5
-#define SIGABRT 6
-#define SIGIOT 6
-#define SIGBUS 7
-#define SIGFPE 8
-#define SIGKILL 9
-#define SIGUSR1 10
-#define SIGSEGV 11
-#define SIGUSR2 12
-#define SIGPIPE 13
-#define SIGALRM 14
-#define SIGTERM 15
-#define SIGSTKFLT 16
-#define SIGCHLD 17
-#define SIGCONT 18
-#define SIGSTOP 19
-#define SIGTSTP 20
-#define SIGTTIN 21
-#define SIGTTOU 22
-#define SIGURG 23
-#define SIGXCPU 24
-#define SIGXFSZ 25
-#define SIGVTALRM 26
-#define SIGPROF 27
-#define SIGWINCH 28
-#define SIGIO 29
-#define SIGPOLL SIGIO
-/* #define SIGLOST 29 */
-#define SIGPWR 30
-#define SIGSYS 31
-#define SIGUNUSED 31
-
-/* These should not be considered constants from userland. */
-#define SIGRTMIN 32
-#define SIGRTMAX (_NSIG-1)
-
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001
-#define SA_NOCLDWAIT 0x00000002 /* not supported yet */
-#define SA_SIGINFO 0x00000004
-#define SA_ONSTACK 0x08000000
-#define SA_RESTART 0x10000000
-#define SA_NODEFER 0x40000000
-#define SA_RESETHAND 0x80000000
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-
-#define SA_RESTORER 0x04000000
-
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
-#define MINSIGSTKSZ 2048
-#define SIGSTKSZ 8192
+#include <uapi/asm/signal.h>
#ifndef __ASSEMBLY__
+#define __ARCH_HAS_SA_RESTORER
-#define SIG_BLOCK 0 /* for blocking signals */
-#define SIG_UNBLOCK 1 /* for unblocking signals */
-#define SIG_SETMASK 2 /* for setting the signal mask */
-
-/* Type of a signal handler. */
-typedef void (*__sighandler_t)(int);
-
-#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
-#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
-#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */
-
-#ifdef __KERNEL__
-struct old_sigaction {
- __sighandler_t sa_handler;
- old_sigset_t sa_mask;
- unsigned long sa_flags;
- void (*sa_restorer)(void);
-};
-
-struct sigaction {
- __sighandler_t sa_handler;
- unsigned long sa_flags;
- void (*sa_restorer)(void);
- sigset_t sa_mask; /* mask last for extensibility */
-};
-
-struct k_sigaction {
- struct sigaction sa;
-};
-
-#else
-
-/* Here we must cater to libcs that poke about in kernel headers. */
-
-struct sigaction {
- union {
- __sighandler_t _sa_handler;
- void (*_sa_sigaction)(int, struct siginfo *, void *);
- } _u;
- sigset_t sa_mask;
- unsigned long sa_flags;
- void (*sa_restorer)(void);
-};
-
-#define sa_handler _u._sa_handler
-#define sa_sigaction _u._sa_sigaction
-
-#endif /* __KERNEL__ */
-
-typedef struct sigaltstack {
- void *ss_sp;
- int ss_flags;
- size_t ss_size;
-} stack_t;
-
-#ifdef __KERNEL__
#include <asm/sigcontext.h>
-#define ptrace_signal_deliver(regs, cookie) do { } while (0)
-#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#endif /* _XTENSA_SIGNAL_H */
diff --git a/arch/xtensa/include/asm/smp.h b/arch/xtensa/include/asm/smp.h
index 83c569e3bdb..4e43f564389 100644
--- a/arch/xtensa/include/asm/smp.h
+++ b/arch/xtensa/include/asm/smp.h
@@ -1,27 +1,43 @@
/*
- * include/asm-xtensa/smp.h
- *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
*/
#ifndef _XTENSA_SMP_H
#define _XTENSA_SMP_H
-extern struct xtensa_cpuinfo boot_cpu_data;
+#ifdef CONFIG_SMP
-#define cpu_data (&boot_cpu_data)
-#define current_cpu_data boot_cpu_data
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+#define cpu_logical_map(cpu) (cpu)
-struct xtensa_cpuinfo {
- unsigned long *pgd_cache;
- unsigned long *pte_cache;
- unsigned long pgtable_cache_sz;
+struct start_info {
+ unsigned long stack;
};
+extern struct start_info start_info;
-#define cpu_logical_map(cpu) (cpu)
+struct cpumask;
+void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+void arch_send_call_function_single_ipi(int cpu);
+
+void smp_init_cpus(void);
+void secondary_init_irq(void);
+void ipi_init(void);
+struct seq_file;
+void show_ipi_list(struct seq_file *p, int prec);
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+void __cpu_die(unsigned int cpu);
+int __cpu_disable(void);
+void cpu_die(void);
+void cpu_restart(void);
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#endif /* CONFIG_SMP */
#endif /* _XTENSA_SMP_H */
diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h
index 8ff23649581..1d95fa5dcd1 100644
--- a/arch/xtensa/include/asm/spinlock.h
+++ b/arch/xtensa/include/asm/spinlock.h
@@ -11,6 +11,195 @@
#ifndef _XTENSA_SPINLOCK_H
#define _XTENSA_SPINLOCK_H
-#include <linux/spinlock.h>
+/*
+ * spinlock
+ *
+ * There is at most one owner of a spinlock. There are not different
+ * types of spinlock owners like there are for rwlocks (see below).
+ *
+ * When trying to obtain a spinlock, the function "spins" forever, or busy-
+ * waits, until the lock is obtained. When spinning, presumably some other
+ * owner will soon give up the spinlock making it available to others. Use
+ * the trylock functions to avoid spinning forever.
+ *
+ * possible values:
+ *
+ * 0 nobody owns the spinlock
+ * 1 somebody owns the spinlock
+ */
+
+#define arch_spin_is_locked(x) ((x)->slock != 0)
+#define arch_spin_unlock_wait(lock) \
+ do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
+
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " movi %0, 0\n"
+ " wsr %0, scompare1\n"
+ "1: movi %0, 1\n"
+ " s32c1i %0, %1, 0\n"
+ " bnez %0, 1b\n"
+ : "=&a" (tmp)
+ : "a" (&lock->slock)
+ : "memory");
+}
+
+/* Returns 1 if the lock is obtained, 0 otherwise. */
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " movi %0, 0\n"
+ " wsr %0, scompare1\n"
+ " movi %0, 1\n"
+ " s32c1i %0, %1, 0\n"
+ : "=&a" (tmp)
+ : "a" (&lock->slock)
+ : "memory");
+
+ return tmp == 0 ? 1 : 0;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " movi %0, 0\n"
+ " s32ri %0, %1, 0\n"
+ : "=&a" (tmp)
+ : "a" (&lock->slock)
+ : "memory");
+}
+
+/*
+ * rwlock
+ *
+ * Read-write locks are really a more flexible spinlock. They allow
+ * multiple readers but only one writer. Write ownership is exclusive
+ * (i.e., all other readers and writers are blocked from ownership while
+ * there is a write owner). These rwlocks are unfair to writers. Writers
+ * can be starved for an indefinite time by readers.
+ *
+ * possible values:
+ *
+ * 0 nobody owns the rwlock
+ * >0 one or more readers own the rwlock
+ * (the positive value is the actual number of readers)
+ * 0x80000000 one writer owns the rwlock, no other writers, no readers
+ */
+
+#define arch_write_can_lock(x) ((x)->lock == 0)
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " movi %0, 0\n"
+ " wsr %0, scompare1\n"
+ "1: movi %0, 1\n"
+ " slli %0, %0, 31\n"
+ " s32c1i %0, %1, 0\n"
+ " bnez %0, 1b\n"
+ : "=&a" (tmp)
+ : "a" (&rw->lock)
+ : "memory");
+}
+
+/* Returns 1 if the lock is obtained, 0 otherwise. */
+
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " movi %0, 0\n"
+ " wsr %0, scompare1\n"
+ " movi %0, 1\n"
+ " slli %0, %0, 31\n"
+ " s32c1i %0, %1, 0\n"
+ : "=&a" (tmp)
+ : "a" (&rw->lock)
+ : "memory");
+
+ return tmp == 0 ? 1 : 0;
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " movi %0, 0\n"
+ " s32ri %0, %1, 0\n"
+ : "=&a" (tmp)
+ : "a" (&rw->lock)
+ : "memory");
+}
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ unsigned long tmp;
+ unsigned long result;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %2, 0\n"
+ " bltz %1, 1b\n"
+ " wsr %1, scompare1\n"
+ " addi %0, %1, 1\n"
+ " s32c1i %0, %2, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (&rw->lock)
+ : "memory");
+}
+
+/* Returns 1 if the lock is obtained, 0 otherwise. */
+
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ unsigned long result;
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " l32i %1, %2, 0\n"
+ " addi %0, %1, 1\n"
+ " bltz %0, 1f\n"
+ " wsr %1, scompare1\n"
+ " s32c1i %0, %2, 0\n"
+ " sub %0, %0, %1\n"
+ "1:\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (&rw->lock)
+ : "memory");
+
+ return result == 0;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ unsigned long tmp1, tmp2;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %2, 0\n"
+ " addi %0, %1, -1\n"
+ " wsr %1, scompare1\n"
+ " s32c1i %0, %2, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp1), "=&a" (tmp2)
+ : "a" (&rw->lock)
+ : "memory");
+}
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#endif /* _XTENSA_SPINLOCK_H */
diff --git a/arch/xtensa/include/asm/spinlock_types.h b/arch/xtensa/include/asm/spinlock_types.h
new file mode 100644
index 00000000000..7ec5ce10c9e
--- /dev/null
+++ b/arch/xtensa/include/asm/spinlock_types.h
@@ -0,0 +1,20 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+ volatile unsigned int slock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+
+typedef struct {
+ volatile unsigned int lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
+
+#endif
diff --git a/arch/xtensa/include/asm/stacktrace.h b/arch/xtensa/include/asm/stacktrace.h
new file mode 100644
index 00000000000..6a05fcb0a20
--- /dev/null
+++ b/arch/xtensa/include/asm/stacktrace.h
@@ -0,0 +1,36 @@
+/*
+ * arch/xtensa/include/asm/stacktrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+#ifndef _XTENSA_STACKTRACE_H
+#define _XTENSA_STACKTRACE_H
+
+#include <linux/sched.h>
+
+struct stackframe {
+ unsigned long pc;
+ unsigned long sp;
+};
+
+static __always_inline unsigned long *stack_pointer(struct task_struct *task)
+{
+ unsigned long *sp;
+
+ if (!task || task == current)
+ __asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp));
+ else
+ sp = (unsigned long *)task->thread.sp;
+
+ return sp;
+}
+
+void walk_stackframe(unsigned long *sp,
+ int (*fn)(struct stackframe *frame, void *data),
+ void *data);
+
+#endif /* _XTENSA_STACKTRACE_H */
diff --git a/arch/xtensa/include/asm/statfs.h b/arch/xtensa/include/asm/statfs.h
deleted file mode 100644
index 9c3d1a21313..00000000000
--- a/arch/xtensa/include/asm/statfs.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * include/asm-xtensa/statfs.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_STATFS_H
-#define _XTENSA_STATFS_H
-
-#include <asm-generic/statfs.h>
-
-#endif /* _XTENSA_STATFS_H */
-
diff --git a/arch/xtensa/include/asm/string.h b/arch/xtensa/include/asm/string.h
index 405a8c49ff2..8d5d9dfadb0 100644
--- a/arch/xtensa/include/asm/string.h
+++ b/arch/xtensa/include/asm/string.h
@@ -74,7 +74,7 @@ static inline int strcmp(const char *__cs, const char *__ct)
"beqz %2, 2f\n\t"
"beq %2, %3, 1b\n"
"2:\n\t"
- "sub %2, %3, %2"
+ "sub %2, %2, %3"
: "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy)
: "0" (__cs), "1" (__ct));
@@ -99,7 +99,7 @@ static inline int strncmp(const char *__cs, const char *__ct, size_t __n)
"beqz %3, 2f\n\t"
"beq %2, %3, 1b\n"
"2:\n\t"
- "sub %2, %3, %2"
+ "sub %2, %2, %3"
: "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy)
: "0" (__cs), "1" (__ct), "r" (__cs+__n));
diff --git a/arch/xtensa/include/asm/switch_to.h b/arch/xtensa/include/asm/switch_to.h
new file mode 100644
index 00000000000..6b73bf0eb1f
--- /dev/null
+++ b/arch/xtensa/include/asm/switch_to.h
@@ -0,0 +1,22 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SWITCH_TO_H
+#define _XTENSA_SWITCH_TO_H
+
+/* * switch_to(n) should switch tasks to task nr n, first
+ * checking that n isn't the current task, in which case it does nothing.
+ */
+extern void *_switch_to(void *last, void *next);
+
+#define switch_to(prev,next,last) \
+do { \
+ (last) = _switch_to(prev, next); \
+} while(0)
+
+#endif /* _XTENSA_SWITCH_TO_H */
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h
index efcf33b92e4..3673ff1f1bc 100644
--- a/arch/xtensa/include/asm/syscall.h
+++ b/arch/xtensa/include/asm/syscall.h
@@ -9,21 +9,9 @@
*/
struct pt_regs;
-struct sigaction;
-asmlinkage long xtensa_execve(char*, char**, char**, struct pt_regs*);
-asmlinkage long xtensa_clone(unsigned long, unsigned long, struct pt_regs*);
asmlinkage long xtensa_ptrace(long, long, long, long);
asmlinkage long xtensa_sigreturn(struct pt_regs*);
asmlinkage long xtensa_rt_sigreturn(struct pt_regs*);
-asmlinkage long xtensa_sigsuspend(struct pt_regs*);
-asmlinkage long xtensa_rt_sigsuspend(struct pt_regs*);
-asmlinkage long xtensa_sigaction(int, const struct old_sigaction*,
- struct old_sigaction*);
-asmlinkage long xtensa_sigaltstack(struct pt_regs *regs);
-asmlinkage long sys_rt_sigaction(int,
- const struct sigaction __user *,
- struct sigaction __user *,
- size_t);
asmlinkage long xtensa_shmat(int, char __user *, int);
asmlinkage long xtensa_fadvise64_64(int, int,
unsigned long long, unsigned long long);
@@ -31,9 +19,9 @@ asmlinkage long xtensa_fadvise64_64(int, int,
/* Should probably move to linux/syscalls.h */
struct pollfd;
asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
- fd_set __user *exp, struct timespec __user *tsp, void __user *sig);
+ fd_set __user *exp, struct timespec __user *tsp,
+ void __user *sig);
asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
- struct timespec __user *tsp, const sigset_t __user *sigmask,
- size_t sigsetsize);
-
-
+ struct timespec __user *tsp,
+ const sigset_t __user *sigmask,
+ size_t sigsetsize);
diff --git a/arch/xtensa/include/asm/sysmem.h b/arch/xtensa/include/asm/sysmem.h
new file mode 100644
index 00000000000..c015c5c8e3f
--- /dev/null
+++ b/arch/xtensa/include/asm/sysmem.h
@@ -0,0 +1,38 @@
+/*
+ * sysmem-related prototypes.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_SYSMEM_H
+#define _XTENSA_SYSMEM_H
+
+#define SYSMEM_BANKS_MAX 31
+
+struct meminfo {
+ unsigned long start;
+ unsigned long end;
+};
+
+/*
+ * Bank array is sorted by .start.
+ * Banks don't overlap and there's at least one page gap
+ * between adjacent bank entries.
+ */
+struct sysmem_info {
+ int nr_banks;
+ struct meminfo bank[SYSMEM_BANKS_MAX];
+};
+
+extern struct sysmem_info sysmem;
+
+int add_sysmem_bank(unsigned long start, unsigned long end);
+int mem_reserve(unsigned long, unsigned long, int);
+void bootmem_init(void);
+void zones_init(void);
+
+#endif /* _XTENSA_SYSMEM_H */
diff --git a/arch/xtensa/include/asm/termios.h b/arch/xtensa/include/asm/termios.h
deleted file mode 100644
index 4673f42f88a..00000000000
--- a/arch/xtensa/include/asm/termios.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * include/asm-xtensa/termios.h
- *
- * Copied from SH.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_TERMIOS_H
-#define _XTENSA_TERMIOS_H
-
-#include <asm/termbits.h>
-#include <asm/ioctls.h>
-
-struct winsize {
- unsigned short ws_row;
- unsigned short ws_col;
- unsigned short ws_xpixel;
- unsigned short ws_ypixel;
-};
-
-#define NCC 8
-struct termio {
- unsigned short c_iflag; /* input mode flags */
- unsigned short c_oflag; /* output mode flags */
- unsigned short c_cflag; /* control mode flags */
- unsigned short c_lflag; /* local mode flags */
- unsigned char c_line; /* line discipline */
- unsigned char c_cc[NCC]; /* control characters */
-};
-
-/* Modem lines */
-
-#define TIOCM_LE 0x001
-#define TIOCM_DTR 0x002
-#define TIOCM_RTS 0x004
-#define TIOCM_ST 0x008
-#define TIOCM_SR 0x010
-#define TIOCM_CTS 0x020
-#define TIOCM_CAR 0x040
-#define TIOCM_RNG 0x080
-#define TIOCM_DSR 0x100
-#define TIOCM_CD TIOCM_CAR
-#define TIOCM_RI TIOCM_RNG
-#define TIOCM_OUT1 0x2000
-#define TIOCM_OUT2 0x4000
-#define TIOCM_LOOP 0x8000
-
-/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
-
-#ifdef __KERNEL__
-
-/* intr=^C quit=^\ erase=del kill=^U
- eof=^D vtime=\0 vmin=\1 sxtc=\0
- start=^Q stop=^S susp=^Z eol=\0
- reprint=^R discard=^U werase=^W lnext=^V
- eol2=\0
-*/
-#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
-
-/*
- * Translate a "termio" structure into a "termios". Ugh.
- */
-
-#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
- unsigned short __tmp; \
- get_user(__tmp,&(termio)->x); \
- *(unsigned short *) &(termios)->x = __tmp; \
-}
-
-#define user_termio_to_kernel_termios(termios, termio) \
-({ \
- SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
- SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
- SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
- SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
- copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
-})
-
-/*
- * Translate a "termios" structure into a "termio". Ugh.
- */
-
-#define kernel_termios_to_user_termio(termio, termios) \
-({ \
- put_user((termios)->c_iflag, &(termio)->c_iflag); \
- put_user((termios)->c_oflag, &(termio)->c_oflag); \
- put_user((termios)->c_cflag, &(termio)->c_cflag); \
- put_user((termios)->c_lflag, &(termio)->c_lflag); \
- put_user((termios)->c_line, &(termio)->c_line); \
- copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
-})
-
-#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
-#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
-#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
-#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
-
-#endif /* __KERNEL__ */
-
-#endif /* _XTENSA_TERMIOS_H */
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
index 6abbedd09d8..470153e8547 100644
--- a/arch/xtensa/include/asm/thread_info.h
+++ b/arch/xtensa/include/asm/thread_info.h
@@ -76,8 +76,6 @@ struct thread_info {
#endif
-#define PREEMPT_ACTIVE 0x10000000
-
/*
* macros/functions for gaining access to the thread information structure
*/
@@ -128,18 +126,14 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
#define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */
-#define TIF_IRET 4 /* return with iret */
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
-#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
+#define TIF_NOTIFY_RESUME 7 /* callback before returning to user */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
-#define _TIF_IRET (1<<TIF_IRET)
-#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
-#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
diff --git a/arch/xtensa/include/asm/timex.h b/arch/xtensa/include/asm/timex.h
index 053bc427210..ca929e6a38b 100644
--- a/arch/xtensa/include/asm/timex.h
+++ b/arch/xtensa/include/asm/timex.h
@@ -1,72 +1,52 @@
/*
- * include/asm-xtensa/timex.h
- *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
*/
#ifndef _XTENSA_TIMEX_H
#define _XTENSA_TIMEX_H
-#ifdef __KERNEL__
-
#include <asm/processor.h>
#include <linux/stringify.h>
#define _INTLEVEL(x) XCHAL_INT ## x ## _LEVEL
#define INTLEVEL(x) _INTLEVEL(x)
-#if INTLEVEL(XCHAL_TIMER0_INTERRUPT) == 1
+#if XCHAL_NUM_TIMERS > 0 && \
+ INTLEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
# define LINUX_TIMER 0
# define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT
-#elif INTLEVEL(XCHAL_TIMER1_INTERRUPT) == 1
+#elif XCHAL_NUM_TIMERS > 1 && \
+ INTLEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL
# define LINUX_TIMER 1
# define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT
-#elif INTLEVEL(XCHAL_TIMER2_INTERRUPT) == 1
+#elif XCHAL_NUM_TIMERS > 2 && \
+ INTLEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL
# define LINUX_TIMER 2
# define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT
#else
# error "Bad timer number for Linux configurations!"
#endif
-#define LINUX_TIMER_MASK (1L << LINUX_TIMER_INT)
-
-#define CLOCK_TICK_RATE 1193180 /* (everyone is using this value) */
-#define CLOCK_TICK_FACTOR 20 /* Factor of both 10^6 and CLOCK_TICK_RATE */
-
-#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
-extern unsigned long ccount_per_jiffy;
-extern unsigned long nsec_per_ccount;
-#define CCOUNT_PER_JIFFY ccount_per_jiffy
-#define NSEC_PER_CCOUNT nsec_per_ccount
-#else
-#define CCOUNT_PER_JIFFY (CONFIG_XTENSA_CPU_CLOCK*(1000000UL/HZ))
-#define NSEC_PER_CCOUNT (1000UL / CONFIG_XTENSA_CPU_CLOCK)
-#endif
-
+extern unsigned long ccount_freq;
typedef unsigned long long cycles_t;
-/*
- * Only used for SMP.
- */
-
-extern cycles_t cacheflush_time;
-
#define get_cycles() (0)
+void local_timer_setup(unsigned cpu);
/*
* Register access.
*/
-#define WSR_CCOUNT(r) asm volatile ("wsr %0,"__stringify(CCOUNT) :: "a" (r))
-#define RSR_CCOUNT(r) asm volatile ("rsr %0,"__stringify(CCOUNT) : "=a" (r))
-#define WSR_CCOMPARE(x,r) asm volatile ("wsr %0,"__stringify(CCOMPARE)"+"__stringify(x) :: "a"(r))
-#define RSR_CCOMPARE(x,r) asm volatile ("rsr %0,"__stringify(CCOMPARE)"+"__stringify(x) : "=a"(r))
+#define WSR_CCOUNT(r) asm volatile ("wsr %0, ccount" :: "a" (r))
+#define RSR_CCOUNT(r) asm volatile ("rsr %0, ccount" : "=a" (r))
+#define WSR_CCOMPARE(x,r) asm volatile ("wsr %0,"__stringify(SREG_CCOMPARE)"+"__stringify(x) :: "a"(r))
+#define RSR_CCOMPARE(x,r) asm volatile ("rsr %0,"__stringify(SREG_CCOMPARE)"+"__stringify(x) : "=a"(r))
static inline unsigned long get_ccount (void)
{
@@ -92,5 +72,4 @@ static inline void set_linux_timer (unsigned long ccompare)
WSR_CCOMPARE(LINUX_TIMER, ccompare);
}
-#endif /* __KERNEL__ */
#endif /* _XTENSA_TIMEX_H */
diff --git a/arch/xtensa/include/asm/tlbflush.h b/arch/xtensa/include/asm/tlbflush.h
index 46d240074f7..06875feb27c 100644
--- a/arch/xtensa/include/asm/tlbflush.h
+++ b/arch/xtensa/include/asm/tlbflush.h
@@ -1,18 +1,14 @@
/*
- * include/asm-xtensa/tlbflush.h
- *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
*/
#ifndef _XTENSA_TLBFLUSH_H
#define _XTENSA_TLBFLUSH_H
-#ifdef __KERNEL__
-
#include <linux/stringify.h>
#include <asm/processor.h>
@@ -34,12 +30,34 @@
* - flush_tlb_range(mm, start, end) flushes a range of pages
*/
-extern void flush_tlb_all(void);
-extern void flush_tlb_mm(struct mm_struct*);
-extern void flush_tlb_page(struct vm_area_struct*,unsigned long);
-extern void flush_tlb_range(struct vm_area_struct*,unsigned long,unsigned long);
+void local_flush_tlb_all(void);
+void local_flush_tlb_mm(struct mm_struct *mm);
+void local_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long page);
+void local_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+#ifdef CONFIG_SMP
+
+void flush_tlb_all(void);
+void flush_tlb_mm(struct mm_struct *);
+void flush_tlb_page(struct vm_area_struct *, unsigned long);
+void flush_tlb_range(struct vm_area_struct *, unsigned long,
+ unsigned long);
+void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+#else /* !CONFIG_SMP */
+
+#define flush_tlb_all() local_flush_tlb_all()
+#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
+#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
+#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, \
+ end)
+#define flush_tlb_kernel_range(start, end) local_flush_tlb_kernel_range(start, \
+ end)
-#define flush_tlb_kernel_range(start,end) flush_tlb_all()
+#endif /* CONFIG_SMP */
/* TLB operations. */
@@ -86,26 +104,26 @@ static inline void invalidate_dtlb_entry_no_isync (unsigned entry)
static inline void set_itlbcfg_register (unsigned long val)
{
- __asm__ __volatile__("wsr %0, "__stringify(ITLBCFG)"\n\t" "isync\n\t"
+ __asm__ __volatile__("wsr %0, itlbcfg\n\t" "isync\n\t"
: : "a" (val));
}
static inline void set_dtlbcfg_register (unsigned long val)
{
- __asm__ __volatile__("wsr %0, "__stringify(DTLBCFG)"; dsync\n\t"
+ __asm__ __volatile__("wsr %0, dtlbcfg; dsync\n\t"
: : "a" (val));
}
static inline void set_ptevaddr_register (unsigned long val)
{
- __asm__ __volatile__(" wsr %0, "__stringify(PTEVADDR)"; isync\n"
+ __asm__ __volatile__(" wsr %0, ptevaddr; isync\n"
: : "a" (val));
}
static inline unsigned long read_ptevaddr_register (void)
{
unsigned long tmp;
- __asm__ __volatile__("rsr %0, "__stringify(PTEVADDR)"\n\t" : "=a" (tmp));
+ __asm__ __volatile__("rsr %0, ptevaddr\n\t" : "=a" (tmp));
return tmp;
}
@@ -187,5 +205,4 @@ static inline unsigned long read_itlb_translation (int way)
}
#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
#endif /* _XTENSA_TLBFLUSH_H */
diff --git a/arch/xtensa/include/asm/topology.h b/arch/xtensa/include/asm/topology.h
deleted file mode 100644
index 7309e38a0cc..00000000000
--- a/arch/xtensa/include/asm/topology.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * include/asm-xtensa/topology.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_TOPOLOGY_H
-#define _XTENSA_TOPOLOGY_H
-
-#include <asm-generic/topology.h>
-
-#endif /* _XTENSA_TOPOLOGY_H */
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
new file mode 100644
index 00000000000..677bfcf4ee5
--- /dev/null
+++ b/arch/xtensa/include/asm/traps.h
@@ -0,0 +1,59 @@
+/*
+ * arch/xtensa/include/asm/traps.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Tensilica Inc.
+ */
+#ifndef _XTENSA_TRAPS_H
+#define _XTENSA_TRAPS_H
+
+#include <asm/ptrace.h>
+
+/*
+ * handler must be either of the following:
+ * void (*)(struct pt_regs *regs);
+ * void (*)(struct pt_regs *regs, unsigned long exccause);
+ */
+extern void * __init trap_set_handler(int cause, void *handler);
+extern void do_unhandled(struct pt_regs *regs, unsigned long exccause);
+void secondary_trap_init(void);
+
+static inline void spill_registers(void)
+{
+#if XCHAL_NUM_AREGS > 16
+ __asm__ __volatile__ (
+ " call12 1f\n"
+ " _j 2f\n"
+ " retw\n"
+ " .align 4\n"
+ "1:\n"
+ " _entry a1, 48\n"
+ " addi a12, a0, 3\n"
+#if XCHAL_NUM_AREGS > 32
+ " .rept (" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n"
+ " _entry a1, 48\n"
+ " mov a12, a0\n"
+ " .endr\n"
+#endif
+ " _entry a1, 48\n"
+#if XCHAL_NUM_AREGS % 12 == 0
+ " mov a8, a8\n"
+#elif XCHAL_NUM_AREGS % 12 == 4
+ " mov a12, a12\n"
+#elif XCHAL_NUM_AREGS % 12 == 8
+ " mov a4, a4\n"
+#endif
+ " retw\n"
+ "2:\n"
+ : : : "a12", "a13", "memory");
+#else
+ __asm__ __volatile__ (
+ " mov a12, a12\n"
+ : : : "memory");
+#endif
+}
+
+#endif /* _XTENSA_TRAPS_H */
diff --git a/arch/xtensa/include/asm/types.h b/arch/xtensa/include/asm/types.h
index 6d4db7e8ffa..2b410b8c7f7 100644
--- a/arch/xtensa/include/asm/types.h
+++ b/arch/xtensa/include/asm/types.h
@@ -7,30 +7,17 @@
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
-
#ifndef _XTENSA_TYPES_H
#define _XTENSA_TYPES_H
-#include <asm-generic/int-ll64.h>
-
-#ifdef __ASSEMBLY__
-# define __XTENSA_UL(x) (x)
-# define __XTENSA_UL_CONST(x) x
-#else
-# define __XTENSA_UL(x) ((unsigned long)(x))
-# define __XTENSA_UL_CONST(x) x##UL
-#endif
+#include <uapi/asm/types.h>
#ifndef __ASSEMBLY__
-
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
-#ifdef __KERNEL__
#define BITS_PER_LONG 32
-#endif /* __KERNEL__ */
#endif
-
#endif /* _XTENSA_TYPES_H */
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index 3fa526fd3c9..fd686dc45d1 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -17,7 +17,9 @@
#define _XTENSA_UACCESS_H
#include <linux/errno.h>
+#ifndef __ASSEMBLY__
#include <linux/prefetch.h>
+#endif
#include <asm/types.h>
#define VERIFY_READ 0
@@ -178,7 +180,8 @@
#define segment_eq(a,b) ((a).seg == (b).seg)
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
-#define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
+#define __user_ok(addr,size) \
+ (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
@@ -232,10 +235,10 @@ do { \
int __cb; \
retval = 0; \
switch (size) { \
- case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \
- case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \
- case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \
- case 8: { \
+ case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \
+ case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \
+ case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \
+ case 8: { \
__typeof__(*ptr) __v64 = x; \
retval = __copy_to_user(ptr,&__v64,8); \
break; \
@@ -289,7 +292,7 @@ do { \
* __check_align_* macros still work.
*/
#define __put_user_asm(x, addr, err, align, insn, cb) \
- __asm__ __volatile__( \
+__asm__ __volatile__( \
__check_align_##align \
"1: "insn" %2, %3, 0 \n" \
"2: \n" \
@@ -299,8 +302,8 @@ do { \
" .long 2b \n" \
"5: \n" \
" l32r %1, 4b \n" \
- " movi %0, %4 \n" \
- " jx %1 \n" \
+ " movi %0, %4 \n" \
+ " jx %1 \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" .long 1b, 5b \n" \
@@ -332,13 +335,13 @@ extern long __get_user_bad(void);
do { \
int __cb; \
retval = 0; \
- switch (size) { \
- case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \
- case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \
- case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \
- case 8: retval = __copy_from_user(&x,ptr,8); break; \
- default: (x) = __get_user_bad(); \
- } \
+ switch (size) { \
+ case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \
+ case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \
+ case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \
+ case 8: retval = __copy_from_user(&x,ptr,8); break; \
+ default: (x) = __get_user_bad(); \
+ } \
} while (0)
@@ -347,7 +350,7 @@ do { \
* __check_align_* macros still work.
*/
#define __get_user_asm(x, addr, err, align, insn, cb) \
- __asm__ __volatile__( \
+__asm__ __volatile__( \
__check_align_##align \
"1: "insn" %2, %3, 0 \n" \
"2: \n" \
@@ -358,8 +361,8 @@ do { \
"5: \n" \
" l32r %1, 4b \n" \
" movi %2, 0 \n" \
- " movi %0, %4 \n" \
- " jx %1 \n" \
+ " movi %0, %4 \n" \
+ " jx %1 \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" .long 1b, 5b \n" \
@@ -419,8 +422,10 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
#define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n))
#define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n))
-#define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n))
-#define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n))
+#define __copy_to_user(to,from,n) \
+ __generic_copy_to_user_nocheck((to),(from),(n))
+#define __copy_from_user(to,from,n) \
+ __generic_copy_from_user_nocheck((to),(from),(n))
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h
index 798ee6d285a..cb4c2ce8d44 100644
--- a/arch/xtensa/include/asm/unistd.h
+++ b/arch/xtensa/include/asm/unistd.h
@@ -1,726 +1,12 @@
-/*
- * include/asm-xtensa/unistd.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
#ifndef _XTENSA_UNISTD_H
#define _XTENSA_UNISTD_H
-#ifndef __SYSCALL
-# define __SYSCALL(nr,func,nargs)
-#endif
-
-#define __NR_spill 0
-__SYSCALL( 0, sys_ni_syscall, 0)
-#define __NR_xtensa 1
-__SYSCALL( 1, sys_ni_syscall, 0)
-#define __NR_available4 2
-__SYSCALL( 2, sys_ni_syscall, 0)
-#define __NR_available5 3
-__SYSCALL( 3, sys_ni_syscall, 0)
-#define __NR_available6 4
-__SYSCALL( 4, sys_ni_syscall, 0)
-#define __NR_available7 5
-__SYSCALL( 5, sys_ni_syscall, 0)
-#define __NR_available8 6
-__SYSCALL( 6, sys_ni_syscall, 0)
-#define __NR_available9 7
-__SYSCALL( 7, sys_ni_syscall, 0)
-
-/* File Operations */
-
-#define __NR_open 8
-__SYSCALL( 8, sys_open, 3)
-#define __NR_close 9
-__SYSCALL( 9, sys_close, 1)
-#define __NR_dup 10
-__SYSCALL( 10, sys_dup, 1)
-#define __NR_dup2 11
-__SYSCALL( 11, sys_dup2, 2)
-#define __NR_read 12
-__SYSCALL( 12, sys_read, 3)
-#define __NR_write 13
-__SYSCALL( 13, sys_write, 3)
-#define __NR_select 14
-__SYSCALL( 14, sys_select, 5)
-#define __NR_lseek 15
-__SYSCALL( 15, sys_lseek, 3)
-#define __NR_poll 16
-__SYSCALL( 16, sys_poll, 3)
-#define __NR__llseek 17
-__SYSCALL( 17, sys_llseek, 5)
-#define __NR_epoll_wait 18
-__SYSCALL( 18, sys_epoll_wait, 4)
-#define __NR_epoll_ctl 19
-__SYSCALL( 19, sys_epoll_ctl, 4)
-#define __NR_epoll_create 20
-__SYSCALL( 20, sys_epoll_create, 1)
-#define __NR_creat 21
-__SYSCALL( 21, sys_creat, 2)
-#define __NR_truncate 22
-__SYSCALL( 22, sys_truncate, 2)
-#define __NR_ftruncate 23
-__SYSCALL( 23, sys_ftruncate, 2)
-#define __NR_readv 24
-__SYSCALL( 24, sys_readv, 3)
-#define __NR_writev 25
-__SYSCALL( 25, sys_writev, 3)
-#define __NR_fsync 26
-__SYSCALL( 26, sys_fsync, 1)
-#define __NR_fdatasync 27
-__SYSCALL( 27, sys_fdatasync, 1)
-#define __NR_truncate64 28
-__SYSCALL( 28, sys_truncate64, 2)
-#define __NR_ftruncate64 29
-__SYSCALL( 29, sys_ftruncate64, 2)
-#define __NR_pread64 30
-__SYSCALL( 30, sys_pread64, 6)
-#define __NR_pwrite64 31
-__SYSCALL( 31, sys_pwrite64, 6)
-
-#define __NR_link 32
-__SYSCALL( 32, sys_link, 2)
-#define __NR_rename 33
-__SYSCALL( 33, sys_rename, 2)
-#define __NR_symlink 34
-__SYSCALL( 34, sys_symlink, 2)
-#define __NR_readlink 35
-__SYSCALL( 35, sys_readlink, 3)
-#define __NR_mknod 36
-__SYSCALL( 36, sys_mknod, 3)
-#define __NR_pipe 37
-__SYSCALL( 37, sys_pipe, 1)
-#define __NR_unlink 38
-__SYSCALL( 38, sys_unlink, 1)
-#define __NR_rmdir 39
-__SYSCALL( 39, sys_rmdir, 1)
-
-#define __NR_mkdir 40
-__SYSCALL( 40, sys_mkdir, 2)
-#define __NR_chdir 41
-__SYSCALL( 41, sys_chdir, 1)
-#define __NR_fchdir 42
-__SYSCALL( 42, sys_fchdir, 1)
-#define __NR_getcwd 43
-__SYSCALL( 43, sys_getcwd, 2)
-
-#define __NR_chmod 44
-__SYSCALL( 44, sys_chmod, 2)
-#define __NR_chown 45
-__SYSCALL( 45, sys_chown, 3)
-#define __NR_stat 46
-__SYSCALL( 46, sys_newstat, 2)
-#define __NR_stat64 47
-__SYSCALL( 47, sys_stat64, 2)
-
-#define __NR_lchown 48
-__SYSCALL( 48, sys_lchown, 3)
-#define __NR_lstat 49
-__SYSCALL( 49, sys_newlstat, 2)
-#define __NR_lstat64 50
-__SYSCALL( 50, sys_lstat64, 2)
-#define __NR_available51 51
-__SYSCALL( 51, sys_ni_syscall, 0)
-
-#define __NR_fchmod 52
-__SYSCALL( 52, sys_fchmod, 2)
-#define __NR_fchown 53
-__SYSCALL( 53, sys_fchown, 3)
-#define __NR_fstat 54
-__SYSCALL( 54, sys_newfstat, 2)
-#define __NR_fstat64 55
-__SYSCALL( 55, sys_fstat64, 2)
-
-#define __NR_flock 56
-__SYSCALL( 56, sys_flock, 2)
-#define __NR_access 57
-__SYSCALL( 57, sys_access, 2)
-#define __NR_umask 58
-__SYSCALL( 58, sys_umask, 1)
-#define __NR_getdents 59
-__SYSCALL( 59, sys_getdents, 3)
-#define __NR_getdents64 60
-__SYSCALL( 60, sys_getdents64, 3)
-#define __NR_fcntl64 61
-__SYSCALL( 61, sys_fcntl64, 3)
-#define __NR_available62 62
-__SYSCALL( 62, sys_ni_syscall, 0)
-#define __NR_fadvise64_64 63
-__SYSCALL( 63, xtensa_fadvise64_64, 6)
-#define __NR_utime 64 /* glibc 2.3.3 ?? */
-__SYSCALL( 64, sys_utime, 2)
-#define __NR_utimes 65
-__SYSCALL( 65, sys_utimes, 2)
-#define __NR_ioctl 66
-__SYSCALL( 66, sys_ioctl, 3)
-#define __NR_fcntl 67
-__SYSCALL( 67, sys_fcntl, 3)
-
-#define __NR_setxattr 68
-__SYSCALL( 68, sys_setxattr, 5)
-#define __NR_getxattr 69
-__SYSCALL( 69, sys_getxattr, 4)
-#define __NR_listxattr 70
-__SYSCALL( 70, sys_listxattr, 3)
-#define __NR_removexattr 71
-__SYSCALL( 71, sys_removexattr, 2)
-#define __NR_lsetxattr 72
-__SYSCALL( 72, sys_lsetxattr, 5)
-#define __NR_lgetxattr 73
-__SYSCALL( 73, sys_lgetxattr, 4)
-#define __NR_llistxattr 74
-__SYSCALL( 74, sys_llistxattr, 3)
-#define __NR_lremovexattr 75
-__SYSCALL( 75, sys_lremovexattr, 2)
-#define __NR_fsetxattr 76
-__SYSCALL( 76, sys_fsetxattr, 5)
-#define __NR_fgetxattr 77
-__SYSCALL( 77, sys_fgetxattr, 4)
-#define __NR_flistxattr 78
-__SYSCALL( 78, sys_flistxattr, 3)
-#define __NR_fremovexattr 79
-__SYSCALL( 79, sys_fremovexattr, 2)
-
-/* File Map / Shared Memory Operations */
-
-#define __NR_mmap2 80
-__SYSCALL( 80, sys_mmap_pgoff, 6)
-#define __NR_munmap 81
-__SYSCALL( 81, sys_munmap, 2)
-#define __NR_mprotect 82
-__SYSCALL( 82, sys_mprotect, 3)
-#define __NR_brk 83
-__SYSCALL( 83, sys_brk, 1)
-#define __NR_mlock 84
-__SYSCALL( 84, sys_mlock, 2)
-#define __NR_munlock 85
-__SYSCALL( 85, sys_munlock, 2)
-#define __NR_mlockall 86
-__SYSCALL( 86, sys_mlockall, 1)
-#define __NR_munlockall 87
-__SYSCALL( 87, sys_munlockall, 0)
-#define __NR_mremap 88
-__SYSCALL( 88, sys_mremap, 4)
-#define __NR_msync 89
-__SYSCALL( 89, sys_msync, 3)
-#define __NR_mincore 90
-__SYSCALL( 90, sys_mincore, 3)
-#define __NR_madvise 91
-__SYSCALL( 91, sys_madvise, 3)
-#define __NR_shmget 92
-__SYSCALL( 92, sys_shmget, 4)
-#define __NR_shmat 93
-__SYSCALL( 93, xtensa_shmat, 4)
-#define __NR_shmctl 94
-__SYSCALL( 94, sys_shmctl, 4)
-#define __NR_shmdt 95
-__SYSCALL( 95, sys_shmdt, 4)
-
-/* Socket Operations */
-
-#define __NR_socket 96
-__SYSCALL( 96, sys_socket, 3)
-#define __NR_setsockopt 97
-__SYSCALL( 97, sys_setsockopt, 5)
-#define __NR_getsockopt 98
-__SYSCALL( 98, sys_getsockopt, 5)
-#define __NR_shutdown 99
-__SYSCALL( 99, sys_shutdown, 2)
-
-#define __NR_bind 100
-__SYSCALL(100, sys_bind, 3)
-#define __NR_connect 101
-__SYSCALL(101, sys_connect, 3)
-#define __NR_listen 102
-__SYSCALL(102, sys_listen, 2)
-#define __NR_accept 103
-__SYSCALL(103, sys_accept, 3)
-
-#define __NR_getsockname 104
-__SYSCALL(104, sys_getsockname, 3)
-#define __NR_getpeername 105
-__SYSCALL(105, sys_getpeername, 3)
-#define __NR_sendmsg 106
-__SYSCALL(106, sys_sendmsg, 3)
-#define __NR_recvmsg 107
-__SYSCALL(107, sys_recvmsg, 3)
-#define __NR_send 108
-__SYSCALL(108, sys_send, 4)
-#define __NR_recv 109
-__SYSCALL(109, sys_recv, 4)
-#define __NR_sendto 110
-__SYSCALL(110, sys_sendto, 6)
-#define __NR_recvfrom 111
-__SYSCALL(111, sys_recvfrom, 6)
-
-#define __NR_socketpair 112
-__SYSCALL(112, sys_socketpair, 4)
-#define __NR_sendfile 113
-__SYSCALL(113, sys_sendfile, 4)
-#define __NR_sendfile64 114
-__SYSCALL(114, sys_sendfile64, 4)
-#define __NR_available115 115
-__SYSCALL(115, sys_ni_syscall, 0)
-
-/* Process Operations */
-
-#define __NR_clone 116
-__SYSCALL(116, xtensa_clone, 5)
-#define __NR_execve 117
-__SYSCALL(117, xtensa_execve, 3)
-#define __NR_exit 118
-__SYSCALL(118, sys_exit, 1)
-#define __NR_exit_group 119
-__SYSCALL(119, sys_exit_group, 1)
-#define __NR_getpid 120
-__SYSCALL(120, sys_getpid, 0)
-#define __NR_wait4 121
-__SYSCALL(121, sys_wait4, 4)
-#define __NR_waitid 122
-__SYSCALL(122, sys_waitid, 5)
-#define __NR_kill 123
-__SYSCALL(123, sys_kill, 2)
-#define __NR_tkill 124
-__SYSCALL(124, sys_tkill, 2)
-#define __NR_tgkill 125
-__SYSCALL(125, sys_tgkill, 3)
-#define __NR_set_tid_address 126
-__SYSCALL(126, sys_set_tid_address, 1)
-#define __NR_gettid 127
-__SYSCALL(127, sys_gettid, 0)
-#define __NR_setsid 128
-__SYSCALL(128, sys_setsid, 0)
-#define __NR_getsid 129
-__SYSCALL(129, sys_getsid, 1)
-#define __NR_prctl 130
-__SYSCALL(130, sys_prctl, 5)
-#define __NR_personality 131
-__SYSCALL(131, sys_personality, 1)
-#define __NR_getpriority 132
-__SYSCALL(132, sys_getpriority, 2)
-#define __NR_setpriority 133
-__SYSCALL(133, sys_setpriority, 3)
-#define __NR_setitimer 134
-__SYSCALL(134, sys_setitimer, 3)
-#define __NR_getitimer 135
-__SYSCALL(135, sys_getitimer, 2)
-#define __NR_setuid 136
-__SYSCALL(136, sys_setuid, 1)
-#define __NR_getuid 137
-__SYSCALL(137, sys_getuid, 0)
-#define __NR_setgid 138
-__SYSCALL(138, sys_setgid, 1)
-#define __NR_getgid 139
-__SYSCALL(139, sys_getgid, 0)
-#define __NR_geteuid 140
-__SYSCALL(140, sys_geteuid, 0)
-#define __NR_getegid 141
-__SYSCALL(141, sys_getegid, 0)
-#define __NR_setreuid 142
-__SYSCALL(142, sys_setreuid, 2)
-#define __NR_setregid 143
-__SYSCALL(143, sys_setregid, 2)
-#define __NR_setresuid 144
-__SYSCALL(144, sys_setresuid, 3)
-#define __NR_getresuid 145
-__SYSCALL(145, sys_getresuid, 3)
-#define __NR_setresgid 146
-__SYSCALL(146, sys_setresgid, 3)
-#define __NR_getresgid 147
-__SYSCALL(147, sys_getresgid, 3)
-#define __NR_setpgid 148
-__SYSCALL(148, sys_setpgid, 2)
-#define __NR_getpgid 149
-__SYSCALL(149, sys_getpgid, 1)
-#define __NR_getppid 150
-__SYSCALL(150, sys_getppid, 0)
-#define __NR_getpgrp 151
-__SYSCALL(151, sys_getpgrp, 0)
-
-#define __NR_reserved152 152 /* set_thread_area */
-__SYSCALL(152, sys_ni_syscall, 0)
-#define __NR_reserved153 153 /* get_thread_area */
-__SYSCALL(153, sys_ni_syscall, 0)
-#define __NR_times 154
-__SYSCALL(154, sys_times, 1)
-#define __NR_acct 155
-__SYSCALL(155, sys_acct, 1)
-#define __NR_sched_setaffinity 156
-__SYSCALL(156, sys_sched_setaffinity, 3)
-#define __NR_sched_getaffinity 157
-__SYSCALL(157, sys_sched_getaffinity, 3)
-#define __NR_capget 158
-__SYSCALL(158, sys_capget, 2)
-#define __NR_capset 159
-__SYSCALL(159, sys_capset, 2)
-#define __NR_ptrace 160
-__SYSCALL(160, sys_ptrace, 4)
-#define __NR_semtimedop 161
-__SYSCALL(161, sys_semtimedop, 5)
-#define __NR_semget 162
-__SYSCALL(162, sys_semget, 4)
-#define __NR_semop 163
-__SYSCALL(163, sys_semop, 4)
-#define __NR_semctl 164
-__SYSCALL(164, sys_semctl, 4)
-#define __NR_available165 165
-__SYSCALL(165, sys_ni_syscall, 0)
-#define __NR_msgget 166
-__SYSCALL(166, sys_msgget, 4)
-#define __NR_msgsnd 167
-__SYSCALL(167, sys_msgsnd, 4)
-#define __NR_msgrcv 168
-__SYSCALL(168, sys_msgrcv, 4)
-#define __NR_msgctl 169
-__SYSCALL(169, sys_msgctl, 4)
-#define __NR_available170 170
-__SYSCALL(170, sys_ni_syscall, 0)
-#define __NR_available171 171
-__SYSCALL(171, sys_ni_syscall, 0)
-
-/* File System */
-
-#define __NR_mount 172
-__SYSCALL(172, sys_mount, 5)
-#define __NR_swapon 173
-__SYSCALL(173, sys_swapon, 2)
-#define __NR_chroot 174
-__SYSCALL(174, sys_chroot, 1)
-#define __NR_pivot_root 175
-__SYSCALL(175, sys_pivot_root, 2)
-#define __NR_umount 176
-__SYSCALL(176, sys_umount, 2)
-#define __NR_swapoff 177
-__SYSCALL(177, sys_swapoff, 1)
-#define __NR_sync 178
-__SYSCALL(178, sys_sync, 0)
-#define __NR_available179 179
-__SYSCALL(179, sys_ni_syscall, 0)
-#define __NR_setfsuid 180
-__SYSCALL(180, sys_setfsuid, 1)
-#define __NR_setfsgid 181
-__SYSCALL(181, sys_setfsgid, 1)
-#define __NR_sysfs 182
-__SYSCALL(182, sys_sysfs, 3)
-#define __NR_ustat 183
-__SYSCALL(183, sys_ustat, 2)
-#define __NR_statfs 184
-__SYSCALL(184, sys_statfs, 2)
-#define __NR_fstatfs 185
-__SYSCALL(185, sys_fstatfs, 2)
-#define __NR_statfs64 186
-__SYSCALL(186, sys_statfs64, 3)
-#define __NR_fstatfs64 187
-__SYSCALL(187, sys_fstatfs64, 3)
-
-/* System */
-
-#define __NR_setrlimit 188
-__SYSCALL(188, sys_setrlimit, 2)
-#define __NR_getrlimit 189
-__SYSCALL(189, sys_getrlimit, 2)
-#define __NR_getrusage 190
-__SYSCALL(190, sys_getrusage, 2)
-#define __NR_futex 191
-__SYSCALL(191, sys_futex, 5)
-#define __NR_gettimeofday 192
-__SYSCALL(192, sys_gettimeofday, 2)
-#define __NR_settimeofday 193
-__SYSCALL(193, sys_settimeofday, 2)
-#define __NR_adjtimex 194
-__SYSCALL(194, sys_adjtimex, 1)
-#define __NR_nanosleep 195
-__SYSCALL(195, sys_nanosleep, 2)
-#define __NR_getgroups 196
-__SYSCALL(196, sys_getgroups, 2)
-#define __NR_setgroups 197
-__SYSCALL(197, sys_setgroups, 2)
-#define __NR_sethostname 198
-__SYSCALL(198, sys_sethostname, 2)
-#define __NR_setdomainname 199
-__SYSCALL(199, sys_setdomainname, 2)
-#define __NR_syslog 200
-__SYSCALL(200, sys_syslog, 3)
-#define __NR_vhangup 201
-__SYSCALL(201, sys_vhangup, 0)
-#define __NR_uselib 202
-__SYSCALL(202, sys_uselib, 1)
-#define __NR_reboot 203
-__SYSCALL(203, sys_reboot, 3)
-#define __NR_quotactl 204
-__SYSCALL(204, sys_quotactl, 4)
-#define __NR_nfsservctl 205
-__SYSCALL(205, sys_ni_syscall, 0)
-#define __NR__sysctl 206
-__SYSCALL(206, sys_sysctl, 1)
-#define __NR_bdflush 207
-__SYSCALL(207, sys_bdflush, 2)
-#define __NR_uname 208
-__SYSCALL(208, sys_newuname, 1)
-#define __NR_sysinfo 209
-__SYSCALL(209, sys_sysinfo, 1)
-#define __NR_init_module 210
-__SYSCALL(210, sys_init_module, 2)
-#define __NR_delete_module 211
-__SYSCALL(211, sys_delete_module, 1)
-
-#define __NR_sched_setparam 212
-__SYSCALL(212, sys_sched_setparam, 2)
-#define __NR_sched_getparam 213
-__SYSCALL(213, sys_sched_getparam, 2)
-#define __NR_sched_setscheduler 214
-__SYSCALL(214, sys_sched_setscheduler, 3)
-#define __NR_sched_getscheduler 215
-__SYSCALL(215, sys_sched_getscheduler, 1)
-#define __NR_sched_get_priority_max 216
-__SYSCALL(216, sys_sched_get_priority_max, 1)
-#define __NR_sched_get_priority_min 217
-__SYSCALL(217, sys_sched_get_priority_min, 1)
-#define __NR_sched_rr_get_interval 218
-__SYSCALL(218, sys_sched_rr_get_interval, 2)
-#define __NR_sched_yield 219
-__SYSCALL(219, sys_sched_yield, 0)
-#define __NR_available222 222
-__SYSCALL(222, sys_ni_syscall, 0)
-
-/* Signal Handling */
-
-#define __NR_restart_syscall 223
-__SYSCALL(223, sys_restart_syscall, 0)
-#define __NR_sigaltstack 224
-__SYSCALL(224, xtensa_sigaltstack, 2)
-#define __NR_rt_sigreturn 225
-__SYSCALL(225, xtensa_rt_sigreturn, 1)
-#define __NR_rt_sigaction 226
-__SYSCALL(226, sys_rt_sigaction, 4)
-#define __NR_rt_sigprocmask 227
-__SYSCALL(227, sys_rt_sigprocmask, 4)
-#define __NR_rt_sigpending 228
-__SYSCALL(228, sys_rt_sigpending, 2)
-#define __NR_rt_sigtimedwait 229
-__SYSCALL(229, sys_rt_sigtimedwait, 4)
-#define __NR_rt_sigqueueinfo 230
-__SYSCALL(230, sys_rt_sigqueueinfo, 3)
-#define __NR_rt_sigsuspend 231
-__SYSCALL(231, xtensa_rt_sigsuspend, 2)
-
-/* Message */
-
-#define __NR_mq_open 232
-__SYSCALL(232, sys_mq_open, 4)
-#define __NR_mq_unlink 233
-__SYSCALL(233, sys_mq_unlink, 1)
-#define __NR_mq_timedsend 234
-__SYSCALL(234, sys_mq_timedsend, 5)
-#define __NR_mq_timedreceive 235
-__SYSCALL(235, sys_mq_timedreceive, 5)
-#define __NR_mq_notify 236
-__SYSCALL(236, sys_mq_notify, 2)
-#define __NR_mq_getsetattr 237
-__SYSCALL(237, sys_mq_getsetattr, 3)
-#define __NR_available238 238
-__SYSCALL(238, sys_ni_syscall, 0)
-
-/* IO */
-
-#define __NR_io_setup 239
-__SYSCALL(239, sys_io_setup, 2)
-#define __NR_io_destroy 240
-__SYSCALL(240, sys_io_destroy, 1)
-#define __NR_io_submit 241
-__SYSCALL(241, sys_io_submit, 3)
-#define __NR_io_getevents 242
-__SYSCALL(242, sys_io_getevents, 5)
-#define __NR_io_cancel 243
-__SYSCALL(243, sys_io_cancel, 3)
-#define __NR_clock_settime 244
-__SYSCALL(244, sys_clock_settime, 2)
-#define __NR_clock_gettime 245
-__SYSCALL(245, sys_clock_gettime, 2)
-#define __NR_clock_getres 246
-__SYSCALL(246, sys_clock_getres, 2)
-#define __NR_clock_nanosleep 247
-__SYSCALL(247, sys_clock_nanosleep, 4)
-
-/* Timer */
-
-#define __NR_timer_create 248
-__SYSCALL(248, sys_timer_create, 3)
-#define __NR_timer_delete 249
-__SYSCALL(249, sys_timer_delete, 1)
-#define __NR_timer_settime 250
-__SYSCALL(250, sys_timer_settime, 4)
-#define __NR_timer_gettime 251
-__SYSCALL(251, sys_timer_gettime, 2)
-#define __NR_timer_getoverrun 252
-__SYSCALL(252, sys_timer_getoverrun, 1)
-
-/* System */
-
-#define __NR_reserved244 253
-__SYSCALL(253, sys_ni_syscall, 0)
-#define __NR_lookup_dcookie 254
-__SYSCALL(254, sys_lookup_dcookie, 4)
-#define __NR_available255 255
-__SYSCALL(255, sys_ni_syscall, 0)
-#define __NR_add_key 256
-__SYSCALL(256, sys_add_key, 5)
-#define __NR_request_key 257
-__SYSCALL(257, sys_request_key, 5)
-#define __NR_keyctl 258
-__SYSCALL(258, sys_keyctl, 5)
-#define __NR_available259 259
-__SYSCALL(259, sys_ni_syscall, 0)
-
-
-#define __NR_readahead 260
-__SYSCALL(260, sys_readahead, 5)
-#define __NR_remap_file_pages 261
-__SYSCALL(261, sys_remap_file_pages, 5)
-#define __NR_migrate_pages 262
-__SYSCALL(262, sys_migrate_pages, 0)
-#define __NR_mbind 263
-__SYSCALL(263, sys_mbind, 6)
-#define __NR_get_mempolicy 264
-__SYSCALL(264, sys_get_mempolicy, 5)
-#define __NR_set_mempolicy 265
-__SYSCALL(265, sys_set_mempolicy, 3)
-#define __NR_unshare 266
-__SYSCALL(266, sys_unshare, 1)
-#define __NR_move_pages 267
-__SYSCALL(267, sys_move_pages, 0)
-#define __NR_splice 268
-__SYSCALL(268, sys_splice, 0)
-#define __NR_tee 269
-__SYSCALL(269, sys_tee, 0)
-#define __NR_vmsplice 270
-__SYSCALL(270, sys_vmsplice, 0)
-#define __NR_available271 271
-__SYSCALL(271, sys_ni_syscall, 0)
-
-#define __NR_pselect6 272
-__SYSCALL(272, sys_pselect6, 0)
-#define __NR_ppoll 273
-__SYSCALL(273, sys_ppoll, 0)
-#define __NR_epoll_pwait 274
-__SYSCALL(274, sys_epoll_pwait, 0)
-#define __NR_available275 275
-__SYSCALL(275, sys_ni_syscall, 0)
-
-#define __NR_inotify_init 276
-__SYSCALL(276, sys_inotify_init, 0)
-#define __NR_inotify_add_watch 277
-__SYSCALL(277, sys_inotify_add_watch, 3)
-#define __NR_inotify_rm_watch 278
-__SYSCALL(278, sys_inotify_rm_watch, 2)
-#define __NR_available279 279
-__SYSCALL(279, sys_ni_syscall, 0)
-
-#define __NR_getcpu 280
-__SYSCALL(280, sys_getcpu, 0)
-#define __NR_kexec_load 281
-__SYSCALL(281, sys_ni_syscall, 0)
-
-#define __NR_ioprio_set 282
-__SYSCALL(282, sys_ioprio_set, 2)
-#define __NR_ioprio_get 283
-__SYSCALL(283, sys_ioprio_get, 3)
-
-#define __NR_set_robust_list 284
-__SYSCALL(284, sys_set_robust_list, 3)
-#define __NR_get_robust_list 285
-__SYSCALL(285, sys_get_robust_list, 3)
-#define __NR_reserved286 286 /* sync_file_rangeX */
-__SYSCALL(286, sys_ni_syscall, 3)
-#define __NR_available287 287
-__SYSCALL(287, sys_faccessat, 0)
-
-/* Relative File Operations */
-
-#define __NR_openat 288
-__SYSCALL(288, sys_openat, 4)
-#define __NR_mkdirat 289
-__SYSCALL(289, sys_mkdirat, 3)
-#define __NR_mknodat 290
-__SYSCALL(290, sys_mknodat, 4)
-#define __NR_unlinkat 291
-__SYSCALL(291, sys_unlinkat, 3)
-#define __NR_renameat 292
-__SYSCALL(292, sys_renameat, 4)
-#define __NR_linkat 293
-__SYSCALL(293, sys_linkat, 5)
-#define __NR_symlinkat 294
-__SYSCALL(294, sys_symlinkat, 3)
-#define __NR_readlinkat 295
-__SYSCALL(295, sys_readlinkat, 4)
-#define __NR_utimensat 296
-__SYSCALL(296, sys_utimensat, 0)
-#define __NR_fchownat 297
-__SYSCALL(297, sys_fchownat, 5)
-#define __NR_futimesat 298
-__SYSCALL(298, sys_futimesat, 4)
-#define __NR_fstatat64 299
-__SYSCALL(299, sys_fstatat64, 0)
-#define __NR_fchmodat 300
-__SYSCALL(300, sys_fchmodat, 4)
-#define __NR_faccessat 301
-__SYSCALL(301, sys_faccessat, 4)
-#define __NR_available302 302
-__SYSCALL(302, sys_ni_syscall, 0)
-#define __NR_available303 303
-__SYSCALL(303, sys_ni_syscall, 0)
-
-#define __NR_signalfd 304
-__SYSCALL(304, sys_signalfd, 3)
-/* 305 was __NR_timerfd */
-__SYSCALL(305, sys_ni_syscall, 0)
-#define __NR_eventfd 306
-__SYSCALL(306, sys_eventfd, 1)
-#define __NR_recvmmsg 307
-__SYSCALL(307, sys_recvmmsg, 5)
-#define __NR_setns 308
-__SYSCALL(308, sys_setns, 2)
-
-#define __NR_syscall_count 309
-
-/*
- * sysxtensa syscall handler
- *
- * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused);
- * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused);
- * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused);
- * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
- * a2 a6 a3 a4 a5
- */
-
-#define SYS_XTENSA_RESERVED 0 /* don't use this */
-#define SYS_XTENSA_ATOMIC_SET 1 /* set variable */
-#define SYS_XTENSA_ATOMIC_EXG_ADD 2 /* exchange memory and add */
-#define SYS_XTENSA_ATOMIC_ADD 3 /* add to memory */
-#define SYS_XTENSA_ATOMIC_CMP_SWP 4 /* compare and swap */
-
-#define SYS_XTENSA_COUNT 5 /* count */
-
-#ifdef __KERNEL__
-
-/*
- * "Conditional" syscalls
- *
- * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
- * but it doesn't work on all toolchains, so we just do it by hand
- */
-#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall");
+#define __ARCH_WANT_SYS_CLONE
+#include <uapi/asm/unistd.h>
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_UTIME
#define __ARCH_WANT_SYS_LLSEEK
-#define __ARCH_WANT_SYS_RT_SIGACTION
-#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_SYS_GETPGRP
/*
@@ -735,5 +21,4 @@ __SYSCALL(308, sys_setns, 2)
#define __IGNORE_vfork /* use clone */
#define __IGNORE_fadvise64 /* use fadvise64_64 */
-#endif /* __KERNEL__ */
-#endif /* _XTENSA_UNISTD_H */
+#endif /* _XTENSA_UNISTD_H */
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
new file mode 100644
index 00000000000..f74ddfbb92e
--- /dev/null
+++ b/arch/xtensa/include/asm/vectors.h
@@ -0,0 +1,130 @@
+/*
+ * arch/xtensa/include/asm/xchal_vaddr_remap.h
+ *
+ * Xtensa macros for MMU V3 Support. Deals with re-mapping the Virtual
+ * Memory Addresses from "Virtual == Physical" to their prevvious V2 MMU
+ * mappings (KSEG at 0xD0000000 and KIO at 0XF0000000).
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 - 2012 Tensilica Inc.
+ *
+ * Pete Delaney <piet@tensilica.com>
+ * Marc Gauthier <marc@tensilica.com
+ */
+
+#ifndef _XTENSA_VECTORS_H
+#define _XTENSA_VECTORS_H
+
+#include <variant/core.h>
+
+#define XCHAL_KIO_CACHED_VADDR 0xe0000000
+#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
+#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
+#define XCHAL_KIO_SIZE 0x10000000
+
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
+#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
+#else
+#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
+#endif
+
+#if defined(CONFIG_MMU)
+
+/* Will Become VECBASE */
+#define VIRTUAL_MEMORY_ADDRESS 0xD0000000
+
+/* Image Virtual Start Address */
+#define KERNELOFFSET 0xD0003000
+
+#if defined(XCHAL_HAVE_PTP_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+ /* MMU v3 - XCHAL_HAVE_PTP_MMU == 1 */
+ #define LOAD_MEMORY_ADDRESS 0x00003000
+#else
+ /* MMU V2 - XCHAL_HAVE_PTP_MMU == 0 */
+ #define LOAD_MEMORY_ADDRESS 0xD0003000
+#endif
+
+#else /* !defined(CONFIG_MMU) */
+ /* MMU Not being used - Virtual == Physical */
+
+ /* VECBASE */
+ #define VIRTUAL_MEMORY_ADDRESS 0x00002000
+
+ /* Location of the start of the kernel text, _start */
+ #define KERNELOFFSET 0x00003000
+
+ /* Loaded just above possibly live vectors */
+ #define LOAD_MEMORY_ADDRESS 0x00003000
+
+#endif /* CONFIG_MMU */
+
+#define XC_VADDR(offset) (VIRTUAL_MEMORY_ADDRESS + offset)
+
+/* Used to set VECBASE register */
+#define VECBASE_RESET_VADDR VIRTUAL_MEMORY_ADDRESS
+
+#define RESET_VECTOR_VECOFS (XCHAL_RESET_VECTOR_VADDR - \
+ VECBASE_RESET_VADDR)
+#define RESET_VECTOR_VADDR XC_VADDR(RESET_VECTOR_VECOFS)
+
+#define RESET_VECTOR1_VECOFS (XCHAL_RESET_VECTOR1_VADDR - \
+ VECBASE_RESET_VADDR)
+#define RESET_VECTOR1_VADDR XC_VADDR(RESET_VECTOR1_VECOFS)
+
+#if defined(XCHAL_HAVE_VECBASE) && XCHAL_HAVE_VECBASE
+
+#define USER_VECTOR_VADDR XC_VADDR(XCHAL_USER_VECOFS)
+#define KERNEL_VECTOR_VADDR XC_VADDR(XCHAL_KERNEL_VECOFS)
+#define DOUBLEEXC_VECTOR_VADDR XC_VADDR(XCHAL_DOUBLEEXC_VECOFS)
+#define WINDOW_VECTORS_VADDR XC_VADDR(XCHAL_WINDOW_OF4_VECOFS)
+#define INTLEVEL2_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL2_VECOFS)
+#define INTLEVEL3_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL3_VECOFS)
+#define INTLEVEL4_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL4_VECOFS)
+#define INTLEVEL5_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL5_VECOFS)
+#define INTLEVEL6_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL6_VECOFS)
+
+#define DEBUG_VECTOR_VADDR XC_VADDR(XCHAL_DEBUG_VECOFS)
+
+#define NMI_VECTOR_VADDR XC_VADDR(XCHAL_NMI_VECOFS)
+
+#define INTLEVEL7_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL7_VECOFS)
+
+/*
+ * These XCHAL_* #defines from varian/core.h
+ * are not valid to use with V3 MMU. Non-XCHAL
+ * constants are defined above and should be used.
+ */
+#undef XCHAL_VECBASE_RESET_VADDR
+#undef XCHAL_RESET_VECTOR0_VADDR
+#undef XCHAL_USER_VECTOR_VADDR
+#undef XCHAL_KERNEL_VECTOR_VADDR
+#undef XCHAL_DOUBLEEXC_VECTOR_VADDR
+#undef XCHAL_WINDOW_VECTORS_VADDR
+#undef XCHAL_INTLEVEL2_VECTOR_VADDR
+#undef XCHAL_INTLEVEL3_VECTOR_VADDR
+#undef XCHAL_INTLEVEL4_VECTOR_VADDR
+#undef XCHAL_INTLEVEL5_VECTOR_VADDR
+#undef XCHAL_INTLEVEL6_VECTOR_VADDR
+#undef XCHAL_DEBUG_VECTOR_VADDR
+#undef XCHAL_NMI_VECTOR_VADDR
+#undef XCHAL_INTLEVEL7_VECTOR_VADDR
+
+#else
+
+#define USER_VECTOR_VADDR XCHAL_USER_VECTOR_VADDR
+#define KERNEL_VECTOR_VADDR XCHAL_KERNEL_VECTOR_VADDR
+#define DOUBLEEXC_VECTOR_VADDR XCHAL_DOUBLEEXC_VECTOR_VADDR
+#define WINDOW_VECTORS_VADDR XCHAL_WINDOW_VECTORS_VADDR
+#define INTLEVEL2_VECTOR_VADDR XCHAL_INTLEVEL2_VECTOR_VADDR
+#define INTLEVEL3_VECTOR_VADDR XCHAL_INTLEVEL3_VECTOR_VADDR
+#define INTLEVEL4_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR
+#define INTLEVEL5_VECTOR_VADDR XCHAL_INTLEVEL5_VECTOR_VADDR
+#define INTLEVEL6_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define DEBUG_VECTOR_VADDR XCHAL_DEBUG_VECTOR_VADDR
+
+#endif
+
+#endif /* _XTENSA_VECTORS_H */
diff --git a/arch/xtensa/include/asm/xor.h b/arch/xtensa/include/asm/xor.h
deleted file mode 100644
index e7b1f083991..00000000000
--- a/arch/xtensa/include/asm/xor.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * include/asm-xtensa/xor.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_XOR_H
-#define _XTENSA_XOR_H
-
-#include <asm-generic/xor.h>
-
-#endif
diff --git a/arch/xtensa/include/uapi/asm/Kbuild b/arch/xtensa/include/uapi/asm/Kbuild
new file mode 100644
index 00000000000..56aad54e7fb
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/Kbuild
@@ -0,0 +1,25 @@
+# UAPI Header export list
+include include/uapi/asm-generic/Kbuild.asm
+
+header-y += auxvec.h
+header-y += byteorder.h
+header-y += ioctls.h
+header-y += ipcbuf.h
+header-y += mman.h
+header-y += msgbuf.h
+header-y += param.h
+header-y += poll.h
+header-y += posix_types.h
+header-y += ptrace.h
+header-y += sembuf.h
+header-y += setup.h
+header-y += shmbuf.h
+header-y += sigcontext.h
+header-y += signal.h
+header-y += socket.h
+header-y += sockios.h
+header-y += stat.h
+header-y += swab.h
+header-y += termbits.h
+header-y += types.h
+header-y += unistd.h
diff --git a/arch/xtensa/include/asm/auxvec.h b/arch/xtensa/include/uapi/asm/auxvec.h
index 257dec75c5a..257dec75c5a 100644
--- a/arch/xtensa/include/asm/auxvec.h
+++ b/arch/xtensa/include/uapi/asm/auxvec.h
diff --git a/arch/xtensa/include/asm/byteorder.h b/arch/xtensa/include/uapi/asm/byteorder.h
index 54eb6315349..54eb6315349 100644
--- a/arch/xtensa/include/asm/byteorder.h
+++ b/arch/xtensa/include/uapi/asm/byteorder.h
diff --git a/arch/xtensa/include/asm/ioctls.h b/arch/xtensa/include/uapi/asm/ioctls.h
index fd1d1369a40..b4cb1100c0f 100644
--- a/arch/xtensa/include/asm/ioctls.h
+++ b/arch/xtensa/include/uapi/asm/ioctls.h
@@ -71,8 +71,8 @@
#define TIOCSSOFTCAR _IOW('T', 26, unsigned int)
#define TIOCLINUX _IOW('T', 28, char)
#define TIOCCONS _IO('T', 29)
-#define TIOCGSERIAL _IOR('T', 30, struct serial_struct)
-#define TIOCSSERIAL _IOW('T', 31, struct serial_struct)
+#define TIOCGSERIAL 0x803C541E /*_IOR('T', 30, struct serial_struct)*/
+#define TIOCSSERIAL 0x403C541F /*_IOW('T', 31, struct serial_struct)*/
#define TIOCPKT _IOW('T', 32, int)
# define TIOCPKT_DATA 0
# define TIOCPKT_FLUSHREAD 1
@@ -101,6 +101,9 @@
#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */
#define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */
#define TIOCVHANGUP _IO('T', 0x37)
+#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */
+#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
+#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
#define TIOCSERCONFIG _IO('T', 83)
#define TIOCSERGWILD _IOR('T', 84, int)
diff --git a/arch/xtensa/include/asm/ipcbuf.h b/arch/xtensa/include/uapi/asm/ipcbuf.h
index c33aa6a4214..c33aa6a4214 100644
--- a/arch/xtensa/include/asm/ipcbuf.h
+++ b/arch/xtensa/include/uapi/asm/ipcbuf.h
diff --git a/arch/xtensa/include/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h
index 30789010733..00eed6786d7 100644
--- a/arch/xtensa/include/asm/mman.h
+++ b/arch/xtensa/include/uapi/asm/mman.h
@@ -86,7 +86,22 @@
#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */
#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */
+#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump,
+ overrides the coredump filter bits */
+#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */
+
/* compatibility flags */
#define MAP_FILE 0
+/*
+ * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size.
+ * This gives us 6 bits, which is enough until someone invents 128 bit address
+ * spaces.
+ *
+ * Assume these are all power of twos.
+ * When 0 use the default page size.
+ */
+#define MAP_HUGE_SHIFT 26
+#define MAP_HUGE_MASK 0x3f
+
#endif /* _XTENSA_MMAN_H */
diff --git a/arch/xtensa/include/asm/msgbuf.h b/arch/xtensa/include/uapi/asm/msgbuf.h
index 693c9675528..693c9675528 100644
--- a/arch/xtensa/include/asm/msgbuf.h
+++ b/arch/xtensa/include/uapi/asm/msgbuf.h
diff --git a/arch/xtensa/include/uapi/asm/param.h b/arch/xtensa/include/uapi/asm/param.h
new file mode 100644
index 00000000000..87bc2eae630
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/param.h
@@ -0,0 +1,30 @@
+/*
+ * include/asm-xtensa/param.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _UAPI_XTENSA_PARAM_H
+#define _UAPI_XTENSA_PARAM_H
+
+#ifndef __KERNEL__
+# define HZ 100
+#endif
+
+#define EXEC_PAGESIZE 4096
+
+#ifndef NGROUPS
+#define NGROUPS 32
+#endif
+
+#ifndef NOGROUP
+#define NOGROUP (-1)
+#endif
+
+#define MAXHOSTNAMELEN 64 /* max length of hostname */
+
+#endif /* _UAPI_XTENSA_PARAM_H */
diff --git a/arch/xtensa/include/asm/poll.h b/arch/xtensa/include/uapi/asm/poll.h
index 9d2d5993f06..9d2d5993f06 100644
--- a/arch/xtensa/include/asm/poll.h
+++ b/arch/xtensa/include/uapi/asm/poll.h
diff --git a/arch/xtensa/include/uapi/asm/posix_types.h b/arch/xtensa/include/uapi/asm/posix_types.h
new file mode 100644
index 00000000000..6e96be0d02d
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/posix_types.h
@@ -0,0 +1,39 @@
+/*
+ * include/asm-xtensa/posix_types.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Largely copied from include/asm-ppc/posix_types.h
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_POSIX_TYPES_H
+#define _XTENSA_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc. Also, we cannot
+ * assume GCC is being used.
+ */
+
+typedef unsigned short __kernel_ipc_pid_t;
+#define __kernel_ipc_pid_t __kernel_ipc_pid_t
+
+typedef unsigned int __kernel_size_t;
+typedef int __kernel_ssize_t;
+typedef long __kernel_ptrdiff_t;
+#define __kernel_size_t __kernel_size_t
+
+typedef unsigned short __kernel_old_uid_t;
+typedef unsigned short __kernel_old_gid_t;
+#define __kernel_old_uid_t __kernel_old_uid_t
+
+typedef unsigned short __kernel_old_dev_t;
+#define __kernel_old_dev_t __kernel_old_dev_t
+
+#include <asm-generic/posix_types.h>
+
+#endif /* _XTENSA_POSIX_TYPES_H */
diff --git a/arch/xtensa/include/uapi/asm/ptrace.h b/arch/xtensa/include/uapi/asm/ptrace.h
new file mode 100644
index 00000000000..ee17aa842fd
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/ptrace.h
@@ -0,0 +1,77 @@
+/*
+ * include/asm-xtensa/ptrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _UAPI_XTENSA_PTRACE_H
+#define _UAPI_XTENSA_PTRACE_H
+
+/*
+ * Kernel stack
+ *
+ * +-----------------------+ -------- STACK_SIZE
+ * | register file | |
+ * +-----------------------+ |
+ * | struct pt_regs | |
+ * +-----------------------+ | ------ PT_REGS_OFFSET
+ * double : 16 bytes spill area : | ^
+ * excetion :- - - - - - - - - - - -: | |
+ * frame : struct pt_regs : | |
+ * :- - - - - - - - - - - -: | |
+ * | | | |
+ * | memory stack | | |
+ * | | | |
+ * ~ ~ ~ ~
+ * ~ ~ ~ ~
+ * | | | |
+ * | | | |
+ * +-----------------------+ | | --- STACK_BIAS
+ * | struct task_struct | | | ^
+ * current --> +-----------------------+ | | |
+ * | struct thread_info | | | |
+ * +-----------------------+ --------
+ */
+
+#define KERNEL_STACK_SIZE (2 * PAGE_SIZE)
+
+/* Offsets for exception_handlers[] (3 x 64-entries x 4-byte tables). */
+
+#define EXC_TABLE_KSTK 0x004 /* Kernel Stack */
+#define EXC_TABLE_DOUBLE_SAVE 0x008 /* Double exception save area for a0 */
+#define EXC_TABLE_FIXUP 0x00c /* Fixup handler */
+#define EXC_TABLE_PARAM 0x010 /* For passing a parameter to fixup */
+#define EXC_TABLE_SYSCALL_SAVE 0x014 /* For fast syscall handler */
+#define EXC_TABLE_FAST_USER 0x100 /* Fast user exception handler */
+#define EXC_TABLE_FAST_KERNEL 0x200 /* Fast kernel exception handler */
+#define EXC_TABLE_DEFAULT 0x300 /* Default C-Handler */
+#define EXC_TABLE_SIZE 0x400
+
+/* Registers used by strace */
+
+#define REG_A_BASE 0x0000
+#define REG_AR_BASE 0x0100
+#define REG_PC 0x0020
+#define REG_PS 0x02e6
+#define REG_WB 0x0248
+#define REG_WS 0x0249
+#define REG_LBEG 0x0200
+#define REG_LEND 0x0201
+#define REG_LCOUNT 0x0202
+#define REG_SAR 0x0203
+
+#define SYSCALL_NR 0x00ff
+
+/* Other PTRACE_ values defined in <linux/ptrace.h> using values 0-9,16,17,24 */
+
+#define PTRACE_GETREGS 12
+#define PTRACE_SETREGS 13
+#define PTRACE_GETXTREGS 18
+#define PTRACE_SETXTREGS 19
+
+
+#endif /* _UAPI_XTENSA_PTRACE_H */
diff --git a/arch/xtensa/include/asm/sembuf.h b/arch/xtensa/include/uapi/asm/sembuf.h
index c15870493b3..c15870493b3 100644
--- a/arch/xtensa/include/asm/sembuf.h
+++ b/arch/xtensa/include/uapi/asm/sembuf.h
diff --git a/arch/xtensa/include/asm/setup.h b/arch/xtensa/include/uapi/asm/setup.h
index e3636520d8c..9fa8ad97936 100644
--- a/arch/xtensa/include/asm/setup.h
+++ b/arch/xtensa/include/uapi/asm/setup.h
@@ -13,4 +13,6 @@
#define COMMAND_LINE_SIZE 256
+extern void set_except_vector(int n, void *addr);
+
#endif
diff --git a/arch/xtensa/include/asm/shmbuf.h b/arch/xtensa/include/uapi/asm/shmbuf.h
index ad4b0121782..ad4b0121782 100644
--- a/arch/xtensa/include/asm/shmbuf.h
+++ b/arch/xtensa/include/uapi/asm/shmbuf.h
diff --git a/arch/xtensa/include/asm/sigcontext.h b/arch/xtensa/include/uapi/asm/sigcontext.h
index 03383af8c3b..03383af8c3b 100644
--- a/arch/xtensa/include/asm/sigcontext.h
+++ b/arch/xtensa/include/uapi/asm/sigcontext.h
diff --git a/arch/xtensa/include/uapi/asm/signal.h b/arch/xtensa/include/uapi/asm/signal.h
new file mode 100644
index 00000000000..586756ee267
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/signal.h
@@ -0,0 +1,133 @@
+/*
+ * include/asm-xtensa/signal.h
+ *
+ * Swiped from SH.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _UAPI_XTENSA_SIGNAL_H
+#define _UAPI_XTENSA_SIGNAL_H
+
+
+#define _NSIG 64
+#define _NSIG_BPW 32
+#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+/* Avoid too many header ordering problems. */
+struct siginfo;
+typedef unsigned long old_sigset_t; /* at least 32 bits */
+typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+#endif
+
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGIOT 6
+#define SIGBUS 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGUSR1 10
+#define SIGSEGV 11
+#define SIGUSR2 12
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGSTKFLT 16
+#define SIGCHLD 17
+#define SIGCONT 18
+#define SIGSTOP 19
+#define SIGTSTP 20
+#define SIGTTIN 21
+#define SIGTTOU 22
+#define SIGURG 23
+#define SIGXCPU 24
+#define SIGXFSZ 25
+#define SIGVTALRM 26
+#define SIGPROF 27
+#define SIGWINCH 28
+#define SIGIO 29
+#define SIGPOLL SIGIO
+/* #define SIGLOST 29 */
+#define SIGPWR 30
+#define SIGSYS 31
+#define SIGUNUSED 31
+
+/* These should not be considered constants from userland. */
+#define SIGRTMIN 32
+#define SIGRTMAX (_NSIG-1)
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_NOCLDSTOP 0x00000001
+#define SA_NOCLDWAIT 0x00000002 /* not supported yet */
+#define SA_SIGINFO 0x00000004
+#define SA_ONSTACK 0x08000000
+#define SA_RESTART 0x10000000
+#define SA_NODEFER 0x40000000
+#define SA_RESETHAND 0x80000000
+
+#define SA_NOMASK SA_NODEFER
+#define SA_ONESHOT SA_RESETHAND
+
+#define SA_RESTORER 0x04000000
+
+#define MINSIGSTKSZ 2048
+#define SIGSTKSZ 8192
+
+#ifndef __ASSEMBLY__
+
+#include <asm-generic/signal-defs.h>
+
+#ifndef __KERNEL__
+
+/* Here we must cater to libcs that poke about in kernel headers. */
+
+struct sigaction {
+ union {
+ __sighandler_t _sa_handler;
+ void (*_sa_sigaction)(int, struct siginfo *, void *);
+ } _u;
+ sigset_t sa_mask;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+};
+
+#define sa_handler _u._sa_handler
+#define sa_sigaction _u._sa_sigaction
+
+#endif /* __KERNEL__ */
+
+typedef struct sigaltstack {
+ void *ss_sp;
+ int ss_flags;
+ size_t ss_size;
+} stack_t;
+
+#endif /* __ASSEMBLY__ */
+#endif /* _UAPI_XTENSA_SIGNAL_H */
diff --git a/arch/xtensa/include/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h
index bb06968be22..39acec0cf0b 100644
--- a/arch/xtensa/include/asm/socket.h
+++ b/arch/xtensa/include/uapi/asm/socket.h
@@ -32,7 +32,7 @@
#define SO_PRIORITY 12
#define SO_LINGER 13
#define SO_BSDCOMPAT 14
-/* To add :#define SO_REUSEPORT 15 */
+#define SO_REUSEPORT 15
#define SO_PASSCRED 16
#define SO_PEERCRED 17
#define SO_RCVLOWAT 18
@@ -52,6 +52,7 @@
#define SO_ATTACH_FILTER 26
#define SO_DETACH_FILTER 27
+#define SO_GET_FILTER SO_ATTACH_FILTER
#define SO_PEERNAME 28
#define SO_TIMESTAMP 29
@@ -75,5 +76,19 @@
#define SO_WIFI_STATUS 41
#define SCM_WIFI_STATUS SO_WIFI_STATUS
+#define SO_PEEK_OFF 42
+
+/* Instruct lower device to use last 4-bytes of skb data as FCS */
+#define SO_NOFCS 43
+
+#define SO_LOCK_FILTER 44
+
+#define SO_SELECT_ERR_QUEUE 45
+
+#define SO_BUSY_POLL 46
+
+#define SO_MAX_PACING_RATE 47
+
+#define SO_BPF_EXTENSIONS 48
#endif /* _XTENSA_SOCKET_H */
diff --git a/arch/xtensa/include/asm/sockios.h b/arch/xtensa/include/uapi/asm/sockios.h
index efe0af379f0..efe0af379f0 100644
--- a/arch/xtensa/include/asm/sockios.h
+++ b/arch/xtensa/include/uapi/asm/sockios.h
diff --git a/arch/xtensa/include/asm/stat.h b/arch/xtensa/include/uapi/asm/stat.h
index c4992038cee..c4992038cee 100644
--- a/arch/xtensa/include/asm/stat.h
+++ b/arch/xtensa/include/uapi/asm/stat.h
diff --git a/arch/xtensa/include/asm/swab.h b/arch/xtensa/include/uapi/asm/swab.h
index 226a3916231..226a3916231 100644
--- a/arch/xtensa/include/asm/swab.h
+++ b/arch/xtensa/include/uapi/asm/swab.h
diff --git a/arch/xtensa/include/asm/termbits.h b/arch/xtensa/include/uapi/asm/termbits.h
index 0d6c8715b24..0d6c8715b24 100644
--- a/arch/xtensa/include/asm/termbits.h
+++ b/arch/xtensa/include/uapi/asm/termbits.h
diff --git a/arch/xtensa/include/uapi/asm/types.h b/arch/xtensa/include/uapi/asm/types.h
new file mode 100644
index 00000000000..87ec7ae73cb
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/types.h
@@ -0,0 +1,28 @@
+/*
+ * include/asm-xtensa/types.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _UAPI_XTENSA_TYPES_H
+#define _UAPI_XTENSA_TYPES_H
+
+#include <asm-generic/int-ll64.h>
+
+#ifdef __ASSEMBLY__
+# define __XTENSA_UL(x) (x)
+# define __XTENSA_UL_CONST(x) x
+#else
+# define __XTENSA_UL(x) ((unsigned long)(x))
+# define __XTENSA_UL_CONST(x) x##UL
+#endif
+
+#ifndef __ASSEMBLY__
+
+#endif
+
+#endif /* _UAPI_XTENSA_TYPES_H */
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
new file mode 100644
index 00000000000..b9395529f02
--- /dev/null
+++ b/arch/xtensa/include/uapi/asm/unistd.h
@@ -0,0 +1,764 @@
+#if !defined(_UAPI_XTENSA_UNISTD_H) || defined(__SYSCALL)
+#define _UAPI_XTENSA_UNISTD_H
+
+#ifndef __SYSCALL
+# define __SYSCALL(nr,func,nargs)
+#endif
+
+#define __NR_spill 0
+__SYSCALL( 0, sys_ni_syscall, 0)
+#define __NR_xtensa 1
+__SYSCALL( 1, sys_ni_syscall, 0)
+#define __NR_available4 2
+__SYSCALL( 2, sys_ni_syscall, 0)
+#define __NR_available5 3
+__SYSCALL( 3, sys_ni_syscall, 0)
+#define __NR_available6 4
+__SYSCALL( 4, sys_ni_syscall, 0)
+#define __NR_available7 5
+__SYSCALL( 5, sys_ni_syscall, 0)
+#define __NR_available8 6
+__SYSCALL( 6, sys_ni_syscall, 0)
+#define __NR_available9 7
+__SYSCALL( 7, sys_ni_syscall, 0)
+
+/* File Operations */
+
+#define __NR_open 8
+__SYSCALL( 8, sys_open, 3)
+#define __NR_close 9
+__SYSCALL( 9, sys_close, 1)
+#define __NR_dup 10
+__SYSCALL( 10, sys_dup, 1)
+#define __NR_dup2 11
+__SYSCALL( 11, sys_dup2, 2)
+#define __NR_read 12
+__SYSCALL( 12, sys_read, 3)
+#define __NR_write 13
+__SYSCALL( 13, sys_write, 3)
+#define __NR_select 14
+__SYSCALL( 14, sys_select, 5)
+#define __NR_lseek 15
+__SYSCALL( 15, sys_lseek, 3)
+#define __NR_poll 16
+__SYSCALL( 16, sys_poll, 3)
+#define __NR__llseek 17
+__SYSCALL( 17, sys_llseek, 5)
+#define __NR_epoll_wait 18
+__SYSCALL( 18, sys_epoll_wait, 4)
+#define __NR_epoll_ctl 19
+__SYSCALL( 19, sys_epoll_ctl, 4)
+#define __NR_epoll_create 20
+__SYSCALL( 20, sys_epoll_create, 1)
+#define __NR_creat 21
+__SYSCALL( 21, sys_creat, 2)
+#define __NR_truncate 22
+__SYSCALL( 22, sys_truncate, 2)
+#define __NR_ftruncate 23
+__SYSCALL( 23, sys_ftruncate, 2)
+#define __NR_readv 24
+__SYSCALL( 24, sys_readv, 3)
+#define __NR_writev 25
+__SYSCALL( 25, sys_writev, 3)
+#define __NR_fsync 26
+__SYSCALL( 26, sys_fsync, 1)
+#define __NR_fdatasync 27
+__SYSCALL( 27, sys_fdatasync, 1)
+#define __NR_truncate64 28
+__SYSCALL( 28, sys_truncate64, 2)
+#define __NR_ftruncate64 29
+__SYSCALL( 29, sys_ftruncate64, 2)
+#define __NR_pread64 30
+__SYSCALL( 30, sys_pread64, 6)
+#define __NR_pwrite64 31
+__SYSCALL( 31, sys_pwrite64, 6)
+
+#define __NR_link 32
+__SYSCALL( 32, sys_link, 2)
+#define __NR_rename 33
+__SYSCALL( 33, sys_rename, 2)
+#define __NR_symlink 34
+__SYSCALL( 34, sys_symlink, 2)
+#define __NR_readlink 35
+__SYSCALL( 35, sys_readlink, 3)
+#define __NR_mknod 36
+__SYSCALL( 36, sys_mknod, 3)
+#define __NR_pipe 37
+__SYSCALL( 37, sys_pipe, 1)
+#define __NR_unlink 38
+__SYSCALL( 38, sys_unlink, 1)
+#define __NR_rmdir 39
+__SYSCALL( 39, sys_rmdir, 1)
+
+#define __NR_mkdir 40
+__SYSCALL( 40, sys_mkdir, 2)
+#define __NR_chdir 41
+__SYSCALL( 41, sys_chdir, 1)
+#define __NR_fchdir 42
+__SYSCALL( 42, sys_fchdir, 1)
+#define __NR_getcwd 43
+__SYSCALL( 43, sys_getcwd, 2)
+
+#define __NR_chmod 44
+__SYSCALL( 44, sys_chmod, 2)
+#define __NR_chown 45
+__SYSCALL( 45, sys_chown, 3)
+#define __NR_stat 46
+__SYSCALL( 46, sys_newstat, 2)
+#define __NR_stat64 47
+__SYSCALL( 47, sys_stat64, 2)
+
+#define __NR_lchown 48
+__SYSCALL( 48, sys_lchown, 3)
+#define __NR_lstat 49
+__SYSCALL( 49, sys_newlstat, 2)
+#define __NR_lstat64 50
+__SYSCALL( 50, sys_lstat64, 2)
+#define __NR_available51 51
+__SYSCALL( 51, sys_ni_syscall, 0)
+
+#define __NR_fchmod 52
+__SYSCALL( 52, sys_fchmod, 2)
+#define __NR_fchown 53
+__SYSCALL( 53, sys_fchown, 3)
+#define __NR_fstat 54
+__SYSCALL( 54, sys_newfstat, 2)
+#define __NR_fstat64 55
+__SYSCALL( 55, sys_fstat64, 2)
+
+#define __NR_flock 56
+__SYSCALL( 56, sys_flock, 2)
+#define __NR_access 57
+__SYSCALL( 57, sys_access, 2)
+#define __NR_umask 58
+__SYSCALL( 58, sys_umask, 1)
+#define __NR_getdents 59
+__SYSCALL( 59, sys_getdents, 3)
+#define __NR_getdents64 60
+__SYSCALL( 60, sys_getdents64, 3)
+#define __NR_fcntl64 61
+__SYSCALL( 61, sys_fcntl64, 3)
+#define __NR_fallocate 62
+__SYSCALL( 62, sys_fallocate, 6)
+#define __NR_fadvise64_64 63
+__SYSCALL( 63, xtensa_fadvise64_64, 6)
+#define __NR_utime 64 /* glibc 2.3.3 ?? */
+__SYSCALL( 64, sys_utime, 2)
+#define __NR_utimes 65
+__SYSCALL( 65, sys_utimes, 2)
+#define __NR_ioctl 66
+__SYSCALL( 66, sys_ioctl, 3)
+#define __NR_fcntl 67
+__SYSCALL( 67, sys_fcntl, 3)
+
+#define __NR_setxattr 68
+__SYSCALL( 68, sys_setxattr, 5)
+#define __NR_getxattr 69
+__SYSCALL( 69, sys_getxattr, 4)
+#define __NR_listxattr 70
+__SYSCALL( 70, sys_listxattr, 3)
+#define __NR_removexattr 71
+__SYSCALL( 71, sys_removexattr, 2)
+#define __NR_lsetxattr 72
+__SYSCALL( 72, sys_lsetxattr, 5)
+#define __NR_lgetxattr 73
+__SYSCALL( 73, sys_lgetxattr, 4)
+#define __NR_llistxattr 74
+__SYSCALL( 74, sys_llistxattr, 3)
+#define __NR_lremovexattr 75
+__SYSCALL( 75, sys_lremovexattr, 2)
+#define __NR_fsetxattr 76
+__SYSCALL( 76, sys_fsetxattr, 5)
+#define __NR_fgetxattr 77
+__SYSCALL( 77, sys_fgetxattr, 4)
+#define __NR_flistxattr 78
+__SYSCALL( 78, sys_flistxattr, 3)
+#define __NR_fremovexattr 79
+__SYSCALL( 79, sys_fremovexattr, 2)
+
+/* File Map / Shared Memory Operations */
+
+#define __NR_mmap2 80
+__SYSCALL( 80, sys_mmap_pgoff, 6)
+#define __NR_munmap 81
+__SYSCALL( 81, sys_munmap, 2)
+#define __NR_mprotect 82
+__SYSCALL( 82, sys_mprotect, 3)
+#define __NR_brk 83
+__SYSCALL( 83, sys_brk, 1)
+#define __NR_mlock 84
+__SYSCALL( 84, sys_mlock, 2)
+#define __NR_munlock 85
+__SYSCALL( 85, sys_munlock, 2)
+#define __NR_mlockall 86
+__SYSCALL( 86, sys_mlockall, 1)
+#define __NR_munlockall 87
+__SYSCALL( 87, sys_munlockall, 0)
+#define __NR_mremap 88
+__SYSCALL( 88, sys_mremap, 4)
+#define __NR_msync 89
+__SYSCALL( 89, sys_msync, 3)
+#define __NR_mincore 90
+__SYSCALL( 90, sys_mincore, 3)
+#define __NR_madvise 91
+__SYSCALL( 91, sys_madvise, 3)
+#define __NR_shmget 92
+__SYSCALL( 92, sys_shmget, 4)
+#define __NR_shmat 93
+__SYSCALL( 93, xtensa_shmat, 4)
+#define __NR_shmctl 94
+__SYSCALL( 94, sys_shmctl, 4)
+#define __NR_shmdt 95
+__SYSCALL( 95, sys_shmdt, 4)
+
+/* Socket Operations */
+
+#define __NR_socket 96
+__SYSCALL( 96, sys_socket, 3)
+#define __NR_setsockopt 97
+__SYSCALL( 97, sys_setsockopt, 5)
+#define __NR_getsockopt 98
+__SYSCALL( 98, sys_getsockopt, 5)
+#define __NR_shutdown 99
+__SYSCALL( 99, sys_shutdown, 2)
+
+#define __NR_bind 100
+__SYSCALL(100, sys_bind, 3)
+#define __NR_connect 101
+__SYSCALL(101, sys_connect, 3)
+#define __NR_listen 102
+__SYSCALL(102, sys_listen, 2)
+#define __NR_accept 103
+__SYSCALL(103, sys_accept, 3)
+
+#define __NR_getsockname 104
+__SYSCALL(104, sys_getsockname, 3)
+#define __NR_getpeername 105
+__SYSCALL(105, sys_getpeername, 3)
+#define __NR_sendmsg 106
+__SYSCALL(106, sys_sendmsg, 3)
+#define __NR_recvmsg 107
+__SYSCALL(107, sys_recvmsg, 3)
+#define __NR_send 108
+__SYSCALL(108, sys_send, 4)
+#define __NR_recv 109
+__SYSCALL(109, sys_recv, 4)
+#define __NR_sendto 110
+__SYSCALL(110, sys_sendto, 6)
+#define __NR_recvfrom 111
+__SYSCALL(111, sys_recvfrom, 6)
+
+#define __NR_socketpair 112
+__SYSCALL(112, sys_socketpair, 4)
+#define __NR_sendfile 113
+__SYSCALL(113, sys_sendfile, 4)
+#define __NR_sendfile64 114
+__SYSCALL(114, sys_sendfile64, 4)
+#define __NR_sendmmsg 115
+__SYSCALL(115, sys_sendmmsg, 4)
+
+/* Process Operations */
+
+#define __NR_clone 116
+__SYSCALL(116, sys_clone, 5)
+#define __NR_execve 117
+__SYSCALL(117, sys_execve, 3)
+#define __NR_exit 118
+__SYSCALL(118, sys_exit, 1)
+#define __NR_exit_group 119
+__SYSCALL(119, sys_exit_group, 1)
+#define __NR_getpid 120
+__SYSCALL(120, sys_getpid, 0)
+#define __NR_wait4 121
+__SYSCALL(121, sys_wait4, 4)
+#define __NR_waitid 122
+__SYSCALL(122, sys_waitid, 5)
+#define __NR_kill 123
+__SYSCALL(123, sys_kill, 2)
+#define __NR_tkill 124
+__SYSCALL(124, sys_tkill, 2)
+#define __NR_tgkill 125
+__SYSCALL(125, sys_tgkill, 3)
+#define __NR_set_tid_address 126
+__SYSCALL(126, sys_set_tid_address, 1)
+#define __NR_gettid 127
+__SYSCALL(127, sys_gettid, 0)
+#define __NR_setsid 128
+__SYSCALL(128, sys_setsid, 0)
+#define __NR_getsid 129
+__SYSCALL(129, sys_getsid, 1)
+#define __NR_prctl 130
+__SYSCALL(130, sys_prctl, 5)
+#define __NR_personality 131
+__SYSCALL(131, sys_personality, 1)
+#define __NR_getpriority 132
+__SYSCALL(132, sys_getpriority, 2)
+#define __NR_setpriority 133
+__SYSCALL(133, sys_setpriority, 3)
+#define __NR_setitimer 134
+__SYSCALL(134, sys_setitimer, 3)
+#define __NR_getitimer 135
+__SYSCALL(135, sys_getitimer, 2)
+#define __NR_setuid 136
+__SYSCALL(136, sys_setuid, 1)
+#define __NR_getuid 137
+__SYSCALL(137, sys_getuid, 0)
+#define __NR_setgid 138
+__SYSCALL(138, sys_setgid, 1)
+#define __NR_getgid 139
+__SYSCALL(139, sys_getgid, 0)
+#define __NR_geteuid 140
+__SYSCALL(140, sys_geteuid, 0)
+#define __NR_getegid 141
+__SYSCALL(141, sys_getegid, 0)
+#define __NR_setreuid 142
+__SYSCALL(142, sys_setreuid, 2)
+#define __NR_setregid 143
+__SYSCALL(143, sys_setregid, 2)
+#define __NR_setresuid 144
+__SYSCALL(144, sys_setresuid, 3)
+#define __NR_getresuid 145
+__SYSCALL(145, sys_getresuid, 3)
+#define __NR_setresgid 146
+__SYSCALL(146, sys_setresgid, 3)
+#define __NR_getresgid 147
+__SYSCALL(147, sys_getresgid, 3)
+#define __NR_setpgid 148
+__SYSCALL(148, sys_setpgid, 2)
+#define __NR_getpgid 149
+__SYSCALL(149, sys_getpgid, 1)
+#define __NR_getppid 150
+__SYSCALL(150, sys_getppid, 0)
+#define __NR_getpgrp 151
+__SYSCALL(151, sys_getpgrp, 0)
+
+#define __NR_reserved152 152 /* set_thread_area */
+__SYSCALL(152, sys_ni_syscall, 0)
+#define __NR_reserved153 153 /* get_thread_area */
+__SYSCALL(153, sys_ni_syscall, 0)
+#define __NR_times 154
+__SYSCALL(154, sys_times, 1)
+#define __NR_acct 155
+__SYSCALL(155, sys_acct, 1)
+#define __NR_sched_setaffinity 156
+__SYSCALL(156, sys_sched_setaffinity, 3)
+#define __NR_sched_getaffinity 157
+__SYSCALL(157, sys_sched_getaffinity, 3)
+#define __NR_capget 158
+__SYSCALL(158, sys_capget, 2)
+#define __NR_capset 159
+__SYSCALL(159, sys_capset, 2)
+#define __NR_ptrace 160
+__SYSCALL(160, sys_ptrace, 4)
+#define __NR_semtimedop 161
+__SYSCALL(161, sys_semtimedop, 5)
+#define __NR_semget 162
+__SYSCALL(162, sys_semget, 4)
+#define __NR_semop 163
+__SYSCALL(163, sys_semop, 4)
+#define __NR_semctl 164
+__SYSCALL(164, sys_semctl, 4)
+#define __NR_available165 165
+__SYSCALL(165, sys_ni_syscall, 0)
+#define __NR_msgget 166
+__SYSCALL(166, sys_msgget, 4)
+#define __NR_msgsnd 167
+__SYSCALL(167, sys_msgsnd, 4)
+#define __NR_msgrcv 168
+__SYSCALL(168, sys_msgrcv, 4)
+#define __NR_msgctl 169
+__SYSCALL(169, sys_msgctl, 4)
+#define __NR_available170 170
+__SYSCALL(170, sys_ni_syscall, 0)
+
+/* File System */
+
+#define __NR_umount2 171
+__SYSCALL(171, sys_umount, 2)
+#define __NR_mount 172
+__SYSCALL(172, sys_mount, 5)
+#define __NR_swapon 173
+__SYSCALL(173, sys_swapon, 2)
+#define __NR_chroot 174
+__SYSCALL(174, sys_chroot, 1)
+#define __NR_pivot_root 175
+__SYSCALL(175, sys_pivot_root, 2)
+#define __NR_umount 176
+__SYSCALL(176, sys_umount, 2)
+#define __NR_swapoff 177
+__SYSCALL(177, sys_swapoff, 1)
+#define __NR_sync 178
+__SYSCALL(178, sys_sync, 0)
+#define __NR_syncfs 179
+__SYSCALL(179, sys_syncfs, 1)
+#define __NR_setfsuid 180
+__SYSCALL(180, sys_setfsuid, 1)
+#define __NR_setfsgid 181
+__SYSCALL(181, sys_setfsgid, 1)
+#define __NR_sysfs 182
+__SYSCALL(182, sys_sysfs, 3)
+#define __NR_ustat 183
+__SYSCALL(183, sys_ustat, 2)
+#define __NR_statfs 184
+__SYSCALL(184, sys_statfs, 2)
+#define __NR_fstatfs 185
+__SYSCALL(185, sys_fstatfs, 2)
+#define __NR_statfs64 186
+__SYSCALL(186, sys_statfs64, 3)
+#define __NR_fstatfs64 187
+__SYSCALL(187, sys_fstatfs64, 3)
+
+/* System */
+
+#define __NR_setrlimit 188
+__SYSCALL(188, sys_setrlimit, 2)
+#define __NR_getrlimit 189
+__SYSCALL(189, sys_getrlimit, 2)
+#define __NR_getrusage 190
+__SYSCALL(190, sys_getrusage, 2)
+#define __NR_futex 191
+__SYSCALL(191, sys_futex, 5)
+#define __NR_gettimeofday 192
+__SYSCALL(192, sys_gettimeofday, 2)
+#define __NR_settimeofday 193
+__SYSCALL(193, sys_settimeofday, 2)
+#define __NR_adjtimex 194
+__SYSCALL(194, sys_adjtimex, 1)
+#define __NR_nanosleep 195
+__SYSCALL(195, sys_nanosleep, 2)
+#define __NR_getgroups 196
+__SYSCALL(196, sys_getgroups, 2)
+#define __NR_setgroups 197
+__SYSCALL(197, sys_setgroups, 2)
+#define __NR_sethostname 198
+__SYSCALL(198, sys_sethostname, 2)
+#define __NR_setdomainname 199
+__SYSCALL(199, sys_setdomainname, 2)
+#define __NR_syslog 200
+__SYSCALL(200, sys_syslog, 3)
+#define __NR_vhangup 201
+__SYSCALL(201, sys_vhangup, 0)
+#define __NR_uselib 202
+__SYSCALL(202, sys_uselib, 1)
+#define __NR_reboot 203
+__SYSCALL(203, sys_reboot, 3)
+#define __NR_quotactl 204
+__SYSCALL(204, sys_quotactl, 4)
+#define __NR_nfsservctl 205
+__SYSCALL(205, sys_ni_syscall, 0) /* old nfsservctl */
+#define __NR__sysctl 206
+__SYSCALL(206, sys_sysctl, 1)
+#define __NR_bdflush 207
+__SYSCALL(207, sys_bdflush, 2)
+#define __NR_uname 208
+__SYSCALL(208, sys_newuname, 1)
+#define __NR_sysinfo 209
+__SYSCALL(209, sys_sysinfo, 1)
+#define __NR_init_module 210
+__SYSCALL(210, sys_init_module, 2)
+#define __NR_delete_module 211
+__SYSCALL(211, sys_delete_module, 1)
+
+#define __NR_sched_setparam 212
+__SYSCALL(212, sys_sched_setparam, 2)
+#define __NR_sched_getparam 213
+__SYSCALL(213, sys_sched_getparam, 2)
+#define __NR_sched_setscheduler 214
+__SYSCALL(214, sys_sched_setscheduler, 3)
+#define __NR_sched_getscheduler 215
+__SYSCALL(215, sys_sched_getscheduler, 1)
+#define __NR_sched_get_priority_max 216
+__SYSCALL(216, sys_sched_get_priority_max, 1)
+#define __NR_sched_get_priority_min 217
+__SYSCALL(217, sys_sched_get_priority_min, 1)
+#define __NR_sched_rr_get_interval 218
+__SYSCALL(218, sys_sched_rr_get_interval, 2)
+#define __NR_sched_yield 219
+__SYSCALL(219, sys_sched_yield, 0)
+#define __NR_available222 222
+__SYSCALL(222, sys_ni_syscall, 0)
+
+/* Signal Handling */
+
+#define __NR_restart_syscall 223
+__SYSCALL(223, sys_restart_syscall, 0)
+#define __NR_sigaltstack 224
+__SYSCALL(224, sys_sigaltstack, 2)
+#define __NR_rt_sigreturn 225
+__SYSCALL(225, xtensa_rt_sigreturn, 1)
+#define __NR_rt_sigaction 226
+__SYSCALL(226, sys_rt_sigaction, 4)
+#define __NR_rt_sigprocmask 227
+__SYSCALL(227, sys_rt_sigprocmask, 4)
+#define __NR_rt_sigpending 228
+__SYSCALL(228, sys_rt_sigpending, 2)
+#define __NR_rt_sigtimedwait 229
+__SYSCALL(229, sys_rt_sigtimedwait, 4)
+#define __NR_rt_sigqueueinfo 230
+__SYSCALL(230, sys_rt_sigqueueinfo, 3)
+#define __NR_rt_sigsuspend 231
+__SYSCALL(231, sys_rt_sigsuspend, 2)
+
+/* Message */
+
+#define __NR_mq_open 232
+__SYSCALL(232, sys_mq_open, 4)
+#define __NR_mq_unlink 233
+__SYSCALL(233, sys_mq_unlink, 1)
+#define __NR_mq_timedsend 234
+__SYSCALL(234, sys_mq_timedsend, 5)
+#define __NR_mq_timedreceive 235
+__SYSCALL(235, sys_mq_timedreceive, 5)
+#define __NR_mq_notify 236
+__SYSCALL(236, sys_mq_notify, 2)
+#define __NR_mq_getsetattr 237
+__SYSCALL(237, sys_mq_getsetattr, 3)
+#define __NR_available238 238
+__SYSCALL(238, sys_ni_syscall, 0)
+
+/* IO */
+
+#define __NR_io_setup 239
+__SYSCALL(239, sys_io_setup, 2)
+#define __NR_io_destroy 240
+__SYSCALL(240, sys_io_destroy, 1)
+#define __NR_io_submit 241
+__SYSCALL(241, sys_io_submit, 3)
+#define __NR_io_getevents 242
+__SYSCALL(242, sys_io_getevents, 5)
+#define __NR_io_cancel 243
+__SYSCALL(243, sys_io_cancel, 3)
+#define __NR_clock_settime 244
+__SYSCALL(244, sys_clock_settime, 2)
+#define __NR_clock_gettime 245
+__SYSCALL(245, sys_clock_gettime, 2)
+#define __NR_clock_getres 246
+__SYSCALL(246, sys_clock_getres, 2)
+#define __NR_clock_nanosleep 247
+__SYSCALL(247, sys_clock_nanosleep, 4)
+
+/* Timer */
+
+#define __NR_timer_create 248
+__SYSCALL(248, sys_timer_create, 3)
+#define __NR_timer_delete 249
+__SYSCALL(249, sys_timer_delete, 1)
+#define __NR_timer_settime 250
+__SYSCALL(250, sys_timer_settime, 4)
+#define __NR_timer_gettime 251
+__SYSCALL(251, sys_timer_gettime, 2)
+#define __NR_timer_getoverrun 252
+__SYSCALL(252, sys_timer_getoverrun, 1)
+
+/* System */
+
+#define __NR_reserved253 253
+__SYSCALL(253, sys_ni_syscall, 0)
+#define __NR_lookup_dcookie 254
+__SYSCALL(254, sys_lookup_dcookie, 4)
+#define __NR_available255 255
+__SYSCALL(255, sys_ni_syscall, 0)
+#define __NR_add_key 256
+__SYSCALL(256, sys_add_key, 5)
+#define __NR_request_key 257
+__SYSCALL(257, sys_request_key, 5)
+#define __NR_keyctl 258
+__SYSCALL(258, sys_keyctl, 5)
+#define __NR_available259 259
+__SYSCALL(259, sys_ni_syscall, 0)
+
+
+#define __NR_readahead 260
+__SYSCALL(260, sys_readahead, 5)
+#define __NR_remap_file_pages 261
+__SYSCALL(261, sys_remap_file_pages, 5)
+#define __NR_migrate_pages 262
+__SYSCALL(262, sys_migrate_pages, 0)
+#define __NR_mbind 263
+__SYSCALL(263, sys_mbind, 6)
+#define __NR_get_mempolicy 264
+__SYSCALL(264, sys_get_mempolicy, 5)
+#define __NR_set_mempolicy 265
+__SYSCALL(265, sys_set_mempolicy, 3)
+#define __NR_unshare 266
+__SYSCALL(266, sys_unshare, 1)
+#define __NR_move_pages 267
+__SYSCALL(267, sys_move_pages, 0)
+#define __NR_splice 268
+__SYSCALL(268, sys_splice, 0)
+#define __NR_tee 269
+__SYSCALL(269, sys_tee, 0)
+#define __NR_vmsplice 270
+__SYSCALL(270, sys_vmsplice, 0)
+#define __NR_available271 271
+__SYSCALL(271, sys_ni_syscall, 0)
+
+#define __NR_pselect6 272
+__SYSCALL(272, sys_pselect6, 0)
+#define __NR_ppoll 273
+__SYSCALL(273, sys_ppoll, 0)
+#define __NR_epoll_pwait 274
+__SYSCALL(274, sys_epoll_pwait, 0)
+#define __NR_epoll_create1 275
+__SYSCALL(275, sys_epoll_create1, 1)
+
+#define __NR_inotify_init 276
+__SYSCALL(276, sys_inotify_init, 0)
+#define __NR_inotify_add_watch 277
+__SYSCALL(277, sys_inotify_add_watch, 3)
+#define __NR_inotify_rm_watch 278
+__SYSCALL(278, sys_inotify_rm_watch, 2)
+#define __NR_inotify_init1 279
+__SYSCALL(279, sys_inotify_init1, 1)
+
+#define __NR_getcpu 280
+__SYSCALL(280, sys_getcpu, 0)
+#define __NR_kexec_load 281
+__SYSCALL(281, sys_ni_syscall, 0)
+
+#define __NR_ioprio_set 282
+__SYSCALL(282, sys_ioprio_set, 2)
+#define __NR_ioprio_get 283
+__SYSCALL(283, sys_ioprio_get, 3)
+
+#define __NR_set_robust_list 284
+__SYSCALL(284, sys_set_robust_list, 3)
+#define __NR_get_robust_list 285
+__SYSCALL(285, sys_get_robust_list, 3)
+#define __NR_available286 286
+__SYSCALL(286, sys_ni_syscall, 0)
+#define __NR_available287 287
+__SYSCALL(287, sys_ni_syscall, 0)
+
+/* Relative File Operations */
+
+#define __NR_openat 288
+__SYSCALL(288, sys_openat, 4)
+#define __NR_mkdirat 289
+__SYSCALL(289, sys_mkdirat, 3)
+#define __NR_mknodat 290
+__SYSCALL(290, sys_mknodat, 4)
+#define __NR_unlinkat 291
+__SYSCALL(291, sys_unlinkat, 3)
+#define __NR_renameat 292
+__SYSCALL(292, sys_renameat, 4)
+#define __NR_linkat 293
+__SYSCALL(293, sys_linkat, 5)
+#define __NR_symlinkat 294
+__SYSCALL(294, sys_symlinkat, 3)
+#define __NR_readlinkat 295
+__SYSCALL(295, sys_readlinkat, 4)
+#define __NR_utimensat 296
+__SYSCALL(296, sys_utimensat, 0)
+#define __NR_fchownat 297
+__SYSCALL(297, sys_fchownat, 5)
+#define __NR_futimesat 298
+__SYSCALL(298, sys_futimesat, 4)
+#define __NR_fstatat64 299
+__SYSCALL(299, sys_fstatat64, 0)
+#define __NR_fchmodat 300
+__SYSCALL(300, sys_fchmodat, 4)
+#define __NR_faccessat 301
+__SYSCALL(301, sys_faccessat, 4)
+#define __NR_available302 302
+__SYSCALL(302, sys_ni_syscall, 0)
+#define __NR_available303 303
+__SYSCALL(303, sys_ni_syscall, 0)
+
+#define __NR_signalfd 304
+__SYSCALL(304, sys_signalfd, 3)
+/* 305 was __NR_timerfd */
+__SYSCALL(305, sys_ni_syscall, 0)
+#define __NR_eventfd 306
+__SYSCALL(306, sys_eventfd, 1)
+#define __NR_recvmmsg 307
+__SYSCALL(307, sys_recvmmsg, 5)
+
+#define __NR_setns 308
+__SYSCALL(308, sys_setns, 2)
+#define __NR_signalfd4 309
+__SYSCALL(309, sys_signalfd4, 4)
+#define __NR_dup3 310
+__SYSCALL(310, sys_dup3, 3)
+#define __NR_pipe2 311
+__SYSCALL(311, sys_pipe2, 2)
+
+#define __NR_timerfd_create 312
+__SYSCALL(312, sys_timerfd_create, 2)
+#define __NR_timerfd_settime 313
+__SYSCALL(313, sys_timerfd_settime, 4)
+#define __NR_timerfd_gettime 314
+__SYSCALL(314, sys_timerfd_gettime, 2)
+#define __NR_available315 315
+__SYSCALL(315, sys_ni_syscall, 0)
+
+#define __NR_eventfd2 316
+__SYSCALL(316, sys_eventfd2, 2)
+#define __NR_preadv 317
+__SYSCALL(317, sys_preadv, 5)
+#define __NR_pwritev 318
+__SYSCALL(318, sys_pwritev, 5)
+#define __NR_available319 319
+__SYSCALL(319, sys_ni_syscall, 0)
+
+#define __NR_fanotify_init 320
+__SYSCALL(320, sys_fanotify_init, 2)
+#define __NR_fanotify_mark 321
+__SYSCALL(321, sys_fanotify_mark, 6)
+#define __NR_process_vm_readv 322
+__SYSCALL(322, sys_process_vm_readv, 6)
+#define __NR_process_vm_writev 323
+__SYSCALL(323, sys_process_vm_writev, 6)
+
+#define __NR_name_to_handle_at 324
+__SYSCALL(324, sys_name_to_handle_at, 5)
+#define __NR_open_by_handle_at 325
+__SYSCALL(325, sys_open_by_handle_at, 3)
+#define __NR_sync_file_range 326
+__SYSCALL(326, sys_sync_file_range2, 6)
+#define __NR_perf_event_open 327
+__SYSCALL(327, sys_perf_event_open, 5)
+
+#define __NR_rt_tgsigqueueinfo 328
+__SYSCALL(328, sys_rt_tgsigqueueinfo, 4)
+#define __NR_clock_adjtime 329
+__SYSCALL(329, sys_clock_adjtime, 2)
+#define __NR_prlimit64 330
+__SYSCALL(330, sys_prlimit64, 4)
+#define __NR_kcmp 331
+__SYSCALL(331, sys_kcmp, 5)
+
+#define __NR_finit_module 332
+__SYSCALL(332, sys_finit_module, 3)
+
+#define __NR_accept4 333
+__SYSCALL(333, sys_accept4, 4)
+
+#define __NR_sched_setattr 334
+__SYSCALL(334, sys_sched_setattr, 2)
+#define __NR_sched_getattr 335
+__SYSCALL(335, sys_sched_getattr, 3)
+
+#define __NR_syscall_count 336
+
+/*
+ * sysxtensa syscall handler
+ *
+ * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused);
+ * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused);
+ * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused);
+ * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
+ * a2 a6 a3 a4 a5
+ */
+
+#define SYS_XTENSA_RESERVED 0 /* don't use this */
+#define SYS_XTENSA_ATOMIC_SET 1 /* set variable */
+#define SYS_XTENSA_ATOMIC_EXG_ADD 2 /* exchange memory and add */
+#define SYS_XTENSA_ATOMIC_ADD 3 /* add to memory */
+#define SYS_XTENSA_ATOMIC_CMP_SWP 4 /* compare and swap */
+
+#define SYS_XTENSA_COUNT 5 /* count */
+
+#undef __SYSCALL
+
+#endif /* _UAPI_XTENSA_UNISTD_H */
diff --git a/arch/xtensa/kernel/.gitignore b/arch/xtensa/kernel/.gitignore
new file mode 100644
index 00000000000..c5f676c3c22
--- /dev/null
+++ b/arch/xtensa/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index 2d2728b3e86..18d962a8c0c 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -4,13 +4,17 @@
extra-y := head.o vmlinux.lds
-obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o \
- setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \
- pci-dma.o init_task.o io.o
+obj-y := align.o coprocessor.o entry.o irq.o pci-dma.o platform.o process.o \
+ ptrace.o setup.o signal.o stacktrace.o syscall.o time.o traps.o \
+ vectors.o
obj-$(CONFIG_KGDB) += xtensa-stub.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
+obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
+obj-$(CONFIG_SMP) += smp.o mxhead.o
+
+AFLAGS_head.o += -mtext-section-literals
# In the Xtensa architecture, assembly generates literals which must always
# precede the L32R instruction with a relative offset less than 256 kB.
@@ -23,12 +27,13 @@ obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
#
# Replicate rules in scripts/Makefile.build
-sed-y = -e 's/\*(\(\.[a-z]*it\|\.ref\|\)\.text)/*(\1.literal \1.text)/g' \
+sed-y = -e 's/\*(\(\.[a-z]*it\|\.ref\|\)\.text)/*(\1.literal \1.text)/g' \
+ -e 's/\.text\.unlikely/.literal.unlikely .text.unlikely/g' \
-e 's/\*(\(\.text\.[a-z]*\))/*(\1.literal \1)/g'
quiet_cmd__cpp_lds_S = LDS $@
- cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $< \
- | sed $(sed-y) >$@
+cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $< \
+ | sed $(sed-y) >$@
$(obj)/vmlinux.lds: $(src)/vmlinux.lds.S FORCE
$(call if_changed_dep,_cpp_lds_S)
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S
index 33d6e9d2e83..d4cef6039a5 100644
--- a/arch/xtensa/kernel/align.S
+++ b/arch/xtensa/kernel/align.S
@@ -146,9 +146,9 @@
* a0: trashed, original value saved on stack (PT_AREG0)
* a1: a1
* a2: new stack pointer, original in DEPC
- * a3: dispatch table
+ * a3: a3
* depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: a3
+ * excsave_1: dispatch table
*
* PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
@@ -170,15 +170,14 @@ ENTRY(fast_unaligned)
s32i a7, a2, PT_AREG7
s32i a8, a2, PT_AREG8
- rsr a0, DEPC
- xsr a3, EXCSAVE_1
+ rsr a0, depc
s32i a0, a2, PT_AREG2
s32i a3, a2, PT_AREG3
/* Keep value of SAR in a0 */
- rsr a0, SAR
- rsr a8, EXCVADDR # load unaligned memory address
+ rsr a0, sar
+ rsr a8, excvaddr # load unaligned memory address
/* Now, identify one of the following load/store instructions.
*
@@ -197,7 +196,7 @@ ENTRY(fast_unaligned)
/* Extract the instruction that caused the unaligned access. */
- rsr a7, EPC_1 # load exception address
+ rsr a7, epc1 # load exception address
movi a3, ~3
and a3, a3, a7 # mask lower bits
@@ -275,16 +274,16 @@ ENTRY(fast_unaligned)
1:
#if XCHAL_HAVE_LOOPS
- rsr a5, LEND # check if we reached LEND
+ rsr a5, lend # check if we reached LEND
bne a7, a5, 1f
- rsr a5, LCOUNT # and LCOUNT != 0
+ rsr a5, lcount # and LCOUNT != 0
beqz a5, 1f
addi a5, a5, -1 # decrement LCOUNT and set
- rsr a7, LBEG # set PC to LBEGIN
- wsr a5, LCOUNT
+ rsr a7, lbeg # set PC to LBEGIN
+ wsr a5, lcount
#endif
-1: wsr a7, EPC_1 # skip load instruction
+1: wsr a7, epc1 # skip load instruction
extui a4, a4, INSN_T, 4 # extract target register
movi a5, .Lload_table
addx8 a4, a4, a5
@@ -355,16 +354,16 @@ ENTRY(fast_unaligned)
1:
#if XCHAL_HAVE_LOOPS
- rsr a4, LEND # check if we reached LEND
+ rsr a4, lend # check if we reached LEND
bne a7, a4, 1f
- rsr a4, LCOUNT # and LCOUNT != 0
+ rsr a4, lcount # and LCOUNT != 0
beqz a4, 1f
addi a4, a4, -1 # decrement LCOUNT and set
- rsr a7, LBEG # set PC to LBEGIN
- wsr a4, LCOUNT
+ rsr a7, lbeg # set PC to LBEGIN
+ wsr a4, lcount
#endif
-1: wsr a7, EPC_1 # skip store instruction
+1: wsr a7, epc1 # skip store instruction
movi a4, ~3
and a4, a4, a8 # align memory address
@@ -406,7 +405,7 @@ ENTRY(fast_unaligned)
.Lexit:
movi a4, 0
- rsr a3, EXCSAVE_1
+ rsr a3, excsave1
s32i a4, a3, EXC_TABLE_FIXUP
/* Restore working register */
@@ -420,7 +419,7 @@ ENTRY(fast_unaligned)
/* restore SAR and return */
- wsr a0, SAR
+ wsr a0, sar
l32i a0, a2, PT_AREG0
l32i a2, a2, PT_AREG2
rfe
@@ -438,11 +437,11 @@ ENTRY(fast_unaligned)
l32i a6, a2, PT_AREG6
l32i a5, a2, PT_AREG5
l32i a4, a2, PT_AREG4
- wsr a0, SAR
+ wsr a0, sar
mov a1, a2
- rsr a0, PS
- bbsi.l a2, PS_UM_BIT, 1f # jump if user mode
+ rsr a0, ps
+ bbsi.l a2, PS_UM_BIT, 1f # jump if user mode
movi a0, _kernel_exception
jx a0
@@ -450,6 +449,6 @@ ENTRY(fast_unaligned)
1: movi a0, _user_exception
jx a0
+ENDPROC(fast_unaligned)
#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
-
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
index 7dc3f915718..1915c7c889b 100644
--- a/arch/xtensa/kernel/asm-offsets.c
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -41,6 +41,8 @@ int main(void)
DEFINE(PT_SAR, offsetof (struct pt_regs, sar));
DEFINE(PT_ICOUNTLEVEL, offsetof (struct pt_regs, icountlevel));
DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall));
+ DEFINE(PT_SCOMPARE1, offsetof(struct pt_regs, scompare1));
+ DEFINE(PT_THREADPTR, offsetof(struct pt_regs, threadptr));
DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0]));
DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0]));
DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1]));
@@ -91,7 +93,8 @@ int main(void)
#endif
DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
- DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds));
+ DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, \
+ thread.current_ds));
/* struct mm_struct */
DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users));
@@ -108,4 +111,3 @@ int main(void)
return 0;
}
-
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index 2bc1e145c0a..a482df5df2b 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -32,9 +32,9 @@
* a0: trashed, original value saved on stack (PT_AREG0)
* a1: a1
* a2: new stack pointer, original in DEPC
- * a3: dispatch table
+ * a3: a3
* depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: a3
+ * excsave_1: dispatch table
*
* PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
@@ -43,10 +43,13 @@
/* IO protection is currently unsupported. */
ENTRY(fast_io_protect)
- wsr a0, EXCSAVE_1
+
+ wsr a0, excsave1
movi a0, unrecoverable_exception
callx0 a0
+ENDPROC(fast_io_protect)
+
#if XTENSA_HAVE_COPROCESSORS
/*
@@ -139,6 +142,7 @@ ENTRY(fast_io_protect)
*/
ENTRY(coprocessor_save)
+
entry a1, 32
s32i a0, a1, 0
movi a0, .Lsave_cp_regs_jump_table
@@ -150,7 +154,10 @@ ENTRY(coprocessor_save)
1: l32i a0, a1, 0
retw
+ENDPROC(coprocessor_save)
+
ENTRY(coprocessor_load)
+
entry a1, 32
s32i a0, a1, 0
movi a0, .Lload_cp_regs_jump_table
@@ -162,8 +169,10 @@ ENTRY(coprocessor_load)
1: l32i a0, a1, 0
retw
+ENDPROC(coprocessor_load)
+
/*
- * coprocessor_flush(struct task_info*, index)
+ * coprocessor_flush(struct task_info*, index)
* a2 a3
* coprocessor_restore(struct task_info*, index)
* a2 a3
@@ -178,6 +187,7 @@ ENTRY(coprocessor_load)
ENTRY(coprocessor_flush)
+
entry a1, 32
s32i a0, a1, 0
movi a0, .Lsave_cp_regs_jump_table
@@ -191,6 +201,8 @@ ENTRY(coprocessor_flush)
1: l32i a0, a1, 0
retw
+ENDPROC(coprocessor_flush)
+
ENTRY(coprocessor_restore)
entry a1, 32
s32i a0, a1, 0
@@ -205,37 +217,40 @@ ENTRY(coprocessor_restore)
1: l32i a0, a1, 0
retw
+ENDPROC(coprocessor_restore)
+
/*
* Entry condition:
*
* a0: trashed, original value saved on stack (PT_AREG0)
* a1: a1
* a2: new stack pointer, original in DEPC
- * a3: dispatch table
+ * a3: a3
* depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: a3
+ * excsave_1: dispatch table
*
* PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
*/
ENTRY(fast_coprocessor_double)
- wsr a0, EXCSAVE_1
+
+ wsr a0, excsave1
movi a0, unrecoverable_exception
callx0 a0
+ENDPROC(fast_coprocessor_double)
ENTRY(fast_coprocessor)
/* Save remaining registers a1-a3 and SAR */
- xsr a3, EXCSAVE_1
s32i a3, a2, PT_AREG3
- rsr a3, SAR
+ rsr a3, sar
s32i a1, a2, PT_AREG1
s32i a3, a2, PT_SAR
mov a1, a2
- rsr a2, DEPC
+ rsr a2, depc
s32i a2, a1, PT_AREG2
/*
@@ -248,17 +263,17 @@ ENTRY(fast_coprocessor)
/* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
- rsr a3, EXCCAUSE
+ rsr a3, exccause
addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
/* Set corresponding CPENABLE bit -> (sar:cp-index, a3: 1<<cp-index)*/
ssl a3 # SAR: 32 - coprocessor_number
movi a2, 1
- rsr a0, CPENABLE
+ rsr a0, cpenable
sll a2, a2
or a0, a0, a2
- wsr a0, CPENABLE
+ wsr a0, cpenable
rsync
/* Retrieve previous owner. (a3 still holds CP number) */
@@ -291,7 +306,7 @@ ENTRY(fast_coprocessor)
/* Note that only a0 and a1 were preserved. */
-2: rsr a3, EXCCAUSE
+2: rsr a3, exccause
addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
movi a0, coprocessor_owner
addx4 a0, a3, a0
@@ -321,15 +336,20 @@ ENTRY(fast_coprocessor)
l32i a0, a1, PT_SAR
l32i a3, a1, PT_AREG3
l32i a2, a1, PT_AREG2
- wsr a0, SAR
+ wsr a0, sar
l32i a0, a1, PT_AREG0
l32i a1, a1, PT_AREG1
rfe
+ENDPROC(fast_coprocessor)
+
.data
+
ENTRY(coprocessor_owner)
+
.fill XCHAL_CP_MAX, 4, 0
-#endif /* XTENSA_HAVE_COPROCESSORS */
+END(coprocessor_owner)
+#endif /* XTENSA_HAVE_COPROCESSORS */
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 6223f3346b5..ef7f4990722 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -7,7 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2004-2007 by Tensilica Inc.
+ * Copyright (C) 2004 - 2008 by Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
*
@@ -31,8 +31,6 @@
/* Unimplemented features. */
#undef KERNEL_STACK_OVERFLOW_CHECK
-#undef PREEMPTIBLE_KERNEL
-#undef ALLOCA_EXCEPTION_IN_IRAM
/* Not well tested.
*
@@ -92,9 +90,9 @@
* a0: trashed, original value saved on stack (PT_AREG0)
* a1: a1
* a2: new stack pointer, original value in depc
- * a3: dispatch table
+ * a3: a3
* depc: a2, original value saved on stack (PT_DEPC)
- * excsave1: a3
+ * excsave1: dispatch table
*
* PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
@@ -110,10 +108,9 @@
ENTRY(user_exception)
- /* Save a2, a3, and depc, restore excsave_1 and set SP. */
+ /* Save a1, a2, a3, and set SP. */
- xsr a3, EXCSAVE_1
- rsr a0, DEPC
+ rsr a0, depc
s32i a1, a2, PT_AREG1
s32i a0, a2, PT_AREG2
s32i a3, a2, PT_AREG3
@@ -125,16 +122,21 @@ _user_exception:
/* Save SAR and turn off single stepping */
movi a2, 0
- rsr a3, SAR
- xsr a2, ICOUNTLEVEL
+ rsr a3, sar
+ xsr a2, icountlevel
s32i a3, a1, PT_SAR
s32i a2, a1, PT_ICOUNTLEVEL
+#if XCHAL_HAVE_THREADPTR
+ rur a2, threadptr
+ s32i a2, a1, PT_THREADPTR
+#endif
+
/* Rotate ws so that the current windowbase is at bit0. */
/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
- rsr a2, WINDOWBASE
- rsr a3, WINDOWSTART
+ rsr a2, windowbase
+ rsr a3, windowstart
ssr a2
s32i a2, a1, PT_WINDOWBASE
s32i a3, a1, PT_WINDOWSTART
@@ -205,12 +207,12 @@ _user_exception:
/* WINDOWBASE still in SAR! */
- rsr a2, SAR # original WINDOWBASE
+ rsr a2, sar # original WINDOWBASE
movi a3, 1
ssl a2
sll a3, a3
- wsr a3, WINDOWSTART # set corresponding WINDOWSTART bit
- wsr a2, WINDOWBASE # and WINDOWSTART
+ wsr a3, windowstart # set corresponding WINDOWSTART bit
+ wsr a2, windowbase # and WINDOWSTART
rsync
/* We are back to the original stack pointer (a1) */
@@ -219,6 +221,7 @@ _user_exception:
j common_exception
+ENDPROC(user_exception)
/*
* First-level exit handler for kernel exceptions
@@ -232,9 +235,9 @@ _user_exception:
* a0: trashed, original value saved on stack (PT_AREG0)
* a1: a1
* a2: new stack pointer, original in DEPC
- * a3: dispatch table
+ * a3: a3
* depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: a3
+ * excsave_1: dispatch table
*
* PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
@@ -250,10 +253,9 @@ _user_exception:
ENTRY(kernel_exception)
- /* Save a0, a2, a3, DEPC and set SP. */
+ /* Save a1, a2, a3, and set SP. */
- xsr a3, EXCSAVE_1 # restore a3, excsave_1
- rsr a0, DEPC # get a2
+ rsr a0, depc # get a2
s32i a1, a2, PT_AREG1
s32i a0, a2, PT_AREG2
s32i a3, a2, PT_AREG3
@@ -265,16 +267,16 @@ _kernel_exception:
/* Save SAR and turn off single stepping */
movi a2, 0
- rsr a3, SAR
- xsr a2, ICOUNTLEVEL
+ rsr a3, sar
+ xsr a2, icountlevel
s32i a3, a1, PT_SAR
s32i a2, a1, PT_ICOUNTLEVEL
/* Rotate ws so that the current windowbase is at bit0. */
/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
- rsr a2, WINDOWBASE # don't need to save these, we only
- rsr a3, WINDOWSTART # need shifted windowstart: windowmask
+ rsr a2, windowbase # don't need to save these, we only
+ rsr a3, windowstart # need shifted windowstart: windowmask
ssr a2
slli a2, a3, 32-WSBITS
src a2, a3, a2
@@ -323,24 +325,24 @@ common_exception:
/* Save some registers, disable loops and clear the syscall flag. */
- rsr a2, DEBUGCAUSE
- rsr a3, EPC_1
+ rsr a2, debugcause
+ rsr a3, epc1
s32i a2, a1, PT_DEBUGCAUSE
s32i a3, a1, PT_PC
movi a2, -1
- rsr a3, EXCVADDR
+ rsr a3, excvaddr
s32i a2, a1, PT_SYSCALL
movi a2, 0
s32i a3, a1, PT_EXCVADDR
- xsr a2, LCOUNT
+ xsr a2, lcount
s32i a2, a1, PT_LCOUNT
/* It is now save to restore the EXC_TABLE_FIXUP variable. */
- rsr a0, EXCCAUSE
+ rsr a0, exccause
movi a3, 0
- rsr a2, EXCSAVE_1
+ rsr a2, excsave1
s32i a0, a1, PT_EXCCAUSE
s32i a3, a2, EXC_TABLE_FIXUP
@@ -348,38 +350,62 @@ common_exception:
* so we can allow exceptions and interrupts (*) again.
* Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
*
- * (*) We only allow interrupts if PS.INTLEVEL was not set to 1 before
- * (interrupts disabled) and if this exception is not an interrupt.
+ * (*) We only allow interrupts if they were previously enabled and
+ * we're not handling an IRQ
*/
- rsr a3, PS
- addi a0, a0, -4
- movi a2, 1
- extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0]
- moveqz a3, a2, a0 # a3 = 1 iff interrupt exception
+ rsr a3, ps
+ addi a0, a0, -EXCCAUSE_LEVEL1_INTERRUPT
+ movi a2, LOCKLEVEL
+ extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+ # a3 = PS.INTLEVEL
+ moveqz a3, a2, a0 # a3 = LOCKLEVEL iff interrupt
movi a2, 1 << PS_WOE_BIT
or a3, a3, a2
- rsr a0, EXCCAUSE
- xsr a3, PS
+ rsr a0, exccause
+ xsr a3, ps
s32i a3, a1, PT_PS # save ps
- /* Save LBEG, LEND */
+ /* Save lbeg, lend */
- rsr a2, LBEG
- rsr a3, LEND
+ rsr a2, lbeg
+ rsr a3, lend
s32i a2, a1, PT_LBEG
s32i a3, a1, PT_LEND
+ /* Save SCOMPARE1 */
+
+#if XCHAL_HAVE_S32C1I
+ rsr a2, scompare1
+ s32i a2, a1, PT_SCOMPARE1
+#endif
+
/* Save optional registers. */
save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
+#ifdef CONFIG_TRACE_IRQFLAGS
+ l32i a4, a1, PT_DEPC
+ /* Double exception means we came here with an exception
+ * while PS.EXCM was set, i.e. interrupts disabled.
+ */
+ bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+ l32i a4, a1, PT_EXCCAUSE
+ bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f
+ /* We came here with an interrupt means interrupts were enabled
+ * and we've just disabled them.
+ */
+ movi a4, trace_hardirqs_off
+ callx4 a4
+1:
+#endif
+
/* Go to second-level dispatcher. Set up parameters to pass to the
* exception handler and call the exception handler.
*/
- movi a4, exc_table
+ rsr a4, excsave1
mov a6, a1 # pass stack frame
mov a7, a0 # pass EXCCAUSE
addx4 a4, a0, a4
@@ -390,13 +416,18 @@ common_exception:
callx4 a4
/* Jump here for exception exit */
-
+ .global common_exception_return
common_exception_return:
+1:
+ rsil a2, LOCKLEVEL
+
/* Jump if we are returning from kernel exceptions. */
-1: l32i a3, a1, PT_PS
- _bbci.l a3, PS_UM_BIT, 4f
+ l32i a3, a1, PT_PS
+ GET_THREAD_INFO(a2, a1)
+ l32i a4, a2, TI_FLAGS
+ _bbci.l a3, PS_UM_BIT, 6f
/* Specific to a user exception exit:
* We need to check some flags for signal handling and rescheduling,
@@ -405,34 +436,76 @@ common_exception_return:
* Note that we don't disable interrupts here.
*/
- GET_THREAD_INFO(a2,a1)
- l32i a4, a2, TI_FLAGS
-
_bbsi.l a4, TIF_NEED_RESCHED, 3f
- _bbci.l a4, TIF_SIGPENDING, 4f
+ _bbsi.l a4, TIF_NOTIFY_RESUME, 2f
+ _bbci.l a4, TIF_SIGPENDING, 5f
- l32i a4, a1, PT_DEPC
+2: l32i a4, a1, PT_DEPC
bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
/* Call do_signal() */
- movi a4, do_signal # int do_signal(struct pt_regs*, sigset_t*)
+ rsil a2, 0
+ movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*)
mov a6, a1
- movi a7, 0
callx4 a4
j 1b
3: /* Reschedule */
+ rsil a2, 0
movi a4, schedule # void schedule (void)
callx4 a4
j 1b
-4: /* Restore optional registers. */
+#ifdef CONFIG_PREEMPT
+6:
+ _bbci.l a4, TIF_NEED_RESCHED, 4f
+
+ /* Check current_thread_info->preempt_count */
+
+ l32i a4, a2, TI_PRE_COUNT
+ bnez a4, 4f
+ movi a4, preempt_schedule_irq
+ callx4 a4
+ j 1b
+#endif
+
+5:
+#ifdef CONFIG_DEBUG_TLB_SANITY
+ l32i a4, a1, PT_DEPC
+ bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
+ movi a4, check_tlb_sanity
+ callx4 a4
+#endif
+6:
+4:
+#ifdef CONFIG_TRACE_IRQFLAGS
+ l32i a4, a1, PT_DEPC
+ /* Double exception means we came here with an exception
+ * while PS.EXCM was set, i.e. interrupts disabled.
+ */
+ bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+ l32i a4, a1, PT_EXCCAUSE
+ bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f
+ /* We came here with an interrupt means interrupts were enabled
+ * and we'll reenable them on return.
+ */
+ movi a4, trace_hardirqs_on
+ callx4 a4
+1:
+#endif
+ /* Restore optional registers. */
load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
- wsr a3, PS /* disable interrupts */
+ /* Restore SCOMPARE1 */
+
+#if XCHAL_HAVE_S32C1I
+ l32i a2, a1, PT_SCOMPARE1
+ wsr a2, scompare1
+#endif
+ wsr a3, ps /* disable interrupts */
_bbci.l a3, PS_UM_BIT, kernel_exception_exit
@@ -444,12 +517,12 @@ user_exception_exit:
l32i a2, a1, PT_WINDOWBASE
l32i a3, a1, PT_WINDOWSTART
- wsr a1, DEPC # use DEPC as temp storage
- wsr a3, WINDOWSTART # restore WINDOWSTART
+ wsr a1, depc # use DEPC as temp storage
+ wsr a3, windowstart # restore WINDOWSTART
ssr a2 # preserve user's WB in the SAR
- wsr a2, WINDOWBASE # switch to user's saved WB
+ wsr a2, windowbase # switch to user's saved WB
rsync
- rsr a1, DEPC # restore stack pointer
+ rsr a1, depc # restore stack pointer
l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9)
rotw -1 # we restore a4..a7
_bltui a6, 16, 1f # only have to restore current window?
@@ -475,8 +548,8 @@ user_exception_exit:
/* Clear unrestored registers (don't leak anything to user-land */
-1: rsr a0, WINDOWBASE
- rsr a3, SAR
+1: rsr a0, windowbase
+ rsr a3, sar
sub a3, a0, a3
beqz a3, 2f
extui a3, a3, 0, WBBITS
@@ -495,6 +568,11 @@ user_exception_exit:
* (if we have restored WSBITS-1 frames).
*/
+#if XCHAL_HAVE_THREADPTR
+ l32i a3, a1, PT_THREADPTR
+ wur a3, threadptr
+#endif
+
2: j common_exception_exit
/* This is the kernel exception exit.
@@ -504,29 +582,6 @@ user_exception_exit:
kernel_exception_exit:
-#ifdef PREEMPTIBLE_KERNEL
-
-#ifdef CONFIG_PREEMPT
-
- /*
- * Note: We've just returned from a call4, so we have
- * at least 4 addt'l regs.
- */
-
- /* Check current_thread_info->preempt_count */
-
- GET_THREAD_INFO(a2)
- l32i a3, a2, TI_PREEMPT
- bnez a3, 1f
-
- l32i a2, a2, TI_FLAGS
-
-1:
-
-#endif
-
-#endif
-
/* Check if we have to do a movsp.
*
* We only have to do a movsp if the previous window-frame has
@@ -556,7 +611,7 @@ kernel_exception_exit:
/* Test WINDOWSTART now. If spilled, do the movsp */
- rsr a3, WINDOWSTART
+ rsr a3, windowstart
addi a0, a3, -1
and a3, a3, a0
_bnez a3, common_exception_exit
@@ -604,24 +659,24 @@ common_exception_exit:
1: l32i a2, a1, PT_PC
l32i a3, a1, PT_SAR
- wsr a2, EPC_1
- wsr a3, SAR
+ wsr a2, epc1
+ wsr a3, sar
/* Restore LBEG, LEND, LCOUNT */
l32i a2, a1, PT_LBEG
l32i a3, a1, PT_LEND
- wsr a2, LBEG
+ wsr a2, lbeg
l32i a2, a1, PT_LCOUNT
- wsr a3, LEND
- wsr a2, LCOUNT
+ wsr a3, lend
+ wsr a2, lcount
/* We control single stepping through the ICOUNTLEVEL register. */
l32i a2, a1, PT_ICOUNTLEVEL
movi a3, -2
- wsr a2, ICOUNTLEVEL
- wsr a3, ICOUNT
+ wsr a2, icountlevel
+ wsr a3, icount
/* Check if it was double exception. */
@@ -636,11 +691,13 @@ common_exception_exit:
l32i a1, a1, PT_AREG1
rfe
-1: wsr a0, DEPC
+1: wsr a0, depc
l32i a0, a1, PT_AREG0
l32i a1, a1, PT_AREG1
rfde
+ENDPROC(kernel_exception)
+
/*
* Debug exception handler.
*
@@ -651,25 +708,25 @@ common_exception_exit:
ENTRY(debug_exception)
- rsr a0, EPS + XCHAL_DEBUGLEVEL
+ rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL
bbsi.l a0, PS_EXCM_BIT, 1f # exception mode
- /* Set EPC_1 and EXCCAUSE */
+ /* Set EPC1 and EXCCAUSE */
- wsr a2, DEPC # save a2 temporarily
- rsr a2, EPC + XCHAL_DEBUGLEVEL
- wsr a2, EPC_1
+ wsr a2, depc # save a2 temporarily
+ rsr a2, SREG_EPC + XCHAL_DEBUGLEVEL
+ wsr a2, epc1
movi a2, EXCCAUSE_MAPPED_DEBUG
- wsr a2, EXCCAUSE
+ wsr a2, exccause
/* Restore PS to the value before the debug exc but with PS.EXCM set.*/
movi a2, 1 << PS_EXCM_BIT
or a2, a0, a2
movi a0, debug_exception # restore a3, debug jump vector
- wsr a2, PS
- xsr a0, EXCSAVE + XCHAL_DEBUGLEVEL
+ wsr a2, ps
+ xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
/* Switch to kernel/user stack, restore jump vector, and save a0 */
@@ -680,19 +737,19 @@ ENTRY(debug_exception)
movi a0, 0
s32i a1, a2, PT_AREG1
s32i a0, a2, PT_DEPC # mark it as a regular exception
- xsr a0, DEPC
+ xsr a0, depc
s32i a3, a2, PT_AREG3
s32i a0, a2, PT_AREG2
mov a1, a2
j _kernel_exception
-2: rsr a2, EXCSAVE_1
+2: rsr a2, excsave1
l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer
s32i a0, a2, PT_AREG0
movi a0, 0
s32i a1, a2, PT_AREG1
s32i a0, a2, PT_DEPC
- xsr a0, DEPC
+ xsr a0, depc
s32i a3, a2, PT_AREG3
s32i a0, a2, PT_AREG2
mov a1, a2
@@ -701,6 +758,7 @@ ENTRY(debug_exception)
/* Debug exception while in exception mode. */
1: j 1b // FIXME!!
+ENDPROC(debug_exception)
/*
* We get here in case of an unrecoverable exception.
@@ -732,12 +790,12 @@ ENTRY(unrecoverable_exception)
movi a0, 1
movi a1, 0
- wsr a0, WINDOWSTART
- wsr a1, WINDOWBASE
+ wsr a0, windowstart
+ wsr a1, windowbase
rsync
- movi a1, (1 << PS_WOE_BIT) | 1
- wsr a1, PS
+ movi a1, (1 << PS_WOE_BIT) | LOCKLEVEL
+ wsr a1, ps
rsync
movi a1, init_task
@@ -751,6 +809,7 @@ ENTRY(unrecoverable_exception)
1: j 1b
+ENDPROC(unrecoverable_exception)
/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
@@ -759,176 +818,64 @@ ENTRY(unrecoverable_exception)
*
* The ALLOCA handler is entered when user code executes the MOVSP
* instruction and the caller's frame is not in the register file.
- * In this case, the caller frame's a0..a3 are on the stack just
- * below sp (a1), and this handler moves them.
*
- * For "MOVSP <ar>,<as>" without destination register a1, this routine
- * simply moves the value from <as> to <ar> without moving the save area.
+ * This algorithm was taken from the Ross Morley's RTOS Porting Layer:
+ *
+ * /home/ross/rtos/porting/XtensaRTOS-PortingLayer-20090507/xtensa_vectors.S
+ *
+ * It leverages the existing window spill/fill routines and their support for
+ * double exceptions. The 'movsp' instruction will only cause an exception if
+ * the next window needs to be loaded. In fact this ALLOCA exception may be
+ * replaced at some point by changing the hardware to do a underflow exception
+ * of the proper size instead.
+ *
+ * This algorithm simply backs out the register changes started by the user
+ * excpetion handler, makes it appear that we have started a window underflow
+ * by rotating the window back and then setting the old window base (OWB) in
+ * the 'ps' register with the rolled back window base. The 'movsp' instruction
+ * will be re-executed and this time since the next window frames is in the
+ * active AR registers it won't cause an exception.
+ *
+ * If the WindowUnderflow code gets a TLB miss the page will get mapped
+ * the the partial windeowUnderflow will be handeled in the double exception
+ * handler.
*
* Entry condition:
*
* a0: trashed, original value saved on stack (PT_AREG0)
* a1: a1
* a2: new stack pointer, original in DEPC
- * a3: dispatch table
+ * a3: a3
* depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: a3
+ * excsave_1: dispatch table
*
* PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
*/
-#if XCHAL_HAVE_BE
-#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 4, 4
-#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 0, 4
-#else
-#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 0, 4
-#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 4, 4
-#endif
-
ENTRY(fast_alloca)
+ rsr a0, windowbase
+ rotw -1
+ rsr a2, ps
+ extui a3, a2, PS_OWB_SHIFT, PS_OWB_WIDTH
+ xor a3, a3, a4
+ l32i a4, a6, PT_AREG0
+ l32i a1, a6, PT_DEPC
+ rsr a6, depc
+ wsr a1, depc
+ slli a3, a3, PS_OWB_SHIFT
+ xor a2, a2, a3
+ wsr a2, ps
+ rsync
- /* We shouldn't be in a double exception. */
-
- l32i a0, a2, PT_DEPC
- _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double
-
- rsr a0, DEPC # get a2
- s32i a4, a2, PT_AREG4 # save a4 and
- s32i a0, a2, PT_AREG2 # a2 to stack
-
- /* Exit critical section. */
-
- movi a0, 0
- s32i a0, a3, EXC_TABLE_FIXUP
-
- /* Restore a3, excsave_1 */
-
- xsr a3, EXCSAVE_1 # make sure excsave_1 is valid for dbl.
- rsr a4, EPC_1 # get exception address
- s32i a3, a2, PT_AREG3 # save a3 to stack
-
-#ifdef ALLOCA_EXCEPTION_IN_IRAM
-#error iram not supported
-#else
- /* Note: l8ui not allowed in IRAM/IROM!! */
- l8ui a0, a4, 1 # read as(src) from MOVSP instruction
-#endif
- movi a3, .Lmovsp_src
- _EXTUI_MOVSP_SRC(a0) # extract source register number
- addx8 a3, a0, a3
- jx a3
-
-.Lunhandled_double:
- wsr a0, EXCSAVE_1
- movi a0, unrecoverable_exception
- callx0 a0
-
- .align 8
-.Lmovsp_src:
- l32i a3, a2, PT_AREG0; _j 1f; .align 8
- mov a3, a1; _j 1f; .align 8
- l32i a3, a2, PT_AREG2; _j 1f; .align 8
- l32i a3, a2, PT_AREG3; _j 1f; .align 8
- l32i a3, a2, PT_AREG4; _j 1f; .align 8
- mov a3, a5; _j 1f; .align 8
- mov a3, a6; _j 1f; .align 8
- mov a3, a7; _j 1f; .align 8
- mov a3, a8; _j 1f; .align 8
- mov a3, a9; _j 1f; .align 8
- mov a3, a10; _j 1f; .align 8
- mov a3, a11; _j 1f; .align 8
- mov a3, a12; _j 1f; .align 8
- mov a3, a13; _j 1f; .align 8
- mov a3, a14; _j 1f; .align 8
- mov a3, a15; _j 1f; .align 8
-
-1:
-
-#ifdef ALLOCA_EXCEPTION_IN_IRAM
-#error iram not supported
-#else
- l8ui a0, a4, 0 # read ar(dst) from MOVSP instruction
-#endif
- addi a4, a4, 3 # step over movsp
- _EXTUI_MOVSP_DST(a0) # extract destination register
- wsr a4, EPC_1 # save new epc_1
-
- _bnei a0, 1, 1f # no 'movsp a1, ax': jump
-
- /* Move the save area. This implies the use of the L32E
- * and S32E instructions, because this move must be done with
- * the user's PS.RING privilege levels, not with ring 0
- * (kernel's) privileges currently active with PS.EXCM
- * set. Note that we have stil registered a fixup routine with the
- * double exception vector in case a double exception occurs.
- */
-
- /* a0,a4:avail a1:old user stack a2:exc. stack a3:new user stack. */
-
- l32e a0, a1, -16
- l32e a4, a1, -12
- s32e a0, a3, -16
- s32e a4, a3, -12
- l32e a0, a1, -8
- l32e a4, a1, -4
- s32e a0, a3, -8
- s32e a4, a3, -4
-
- /* Restore stack-pointer and all the other saved registers. */
-
- mov a1, a3
-
- l32i a4, a2, PT_AREG4
- l32i a3, a2, PT_AREG3
- l32i a0, a2, PT_AREG0
- l32i a2, a2, PT_AREG2
- rfe
-
- /* MOVSP <at>,<as> was invoked with <at> != a1.
- * Because the stack pointer is not being modified,
- * we should be able to just modify the pointer
- * without moving any save area.
- * The processor only traps these occurrences if the
- * caller window isn't live, so unfortunately we can't
- * use this as an alternate trap mechanism.
- * So we just do the move. This requires that we
- * resolve the destination register, not just the source,
- * so there's some extra work.
- * (PERHAPS NOT REALLY NEEDED, BUT CLEANER...)
- */
-
- /* a0 dst-reg, a1 user-stack, a2 stack, a3 value of src reg. */
-
-1: movi a4, .Lmovsp_dst
- addx8 a4, a0, a4
- jx a4
-
- .align 8
-.Lmovsp_dst:
- s32i a3, a2, PT_AREG0; _j 1f; .align 8
- mov a1, a3; _j 1f; .align 8
- s32i a3, a2, PT_AREG2; _j 1f; .align 8
- s32i a3, a2, PT_AREG3; _j 1f; .align 8
- s32i a3, a2, PT_AREG4; _j 1f; .align 8
- mov a5, a3; _j 1f; .align 8
- mov a6, a3; _j 1f; .align 8
- mov a7, a3; _j 1f; .align 8
- mov a8, a3; _j 1f; .align 8
- mov a9, a3; _j 1f; .align 8
- mov a10, a3; _j 1f; .align 8
- mov a11, a3; _j 1f; .align 8
- mov a12, a3; _j 1f; .align 8
- mov a13, a3; _j 1f; .align 8
- mov a14, a3; _j 1f; .align 8
- mov a15, a3; _j 1f; .align 8
-
-1: l32i a4, a2, PT_AREG4
- l32i a3, a2, PT_AREG3
- l32i a0, a2, PT_AREG0
- l32i a2, a2, PT_AREG2
- rfe
-
+ _bbci.l a4, 31, 4f
+ rotw -1
+ _bbci.l a8, 30, 8f
+ rotw -1
+ j _WindowUnderflow12
+8: j _WindowUnderflow8
+4: j _WindowUnderflow4
+ENDPROC(fast_alloca)
/*
* fast system calls.
@@ -944,58 +891,61 @@ ENTRY(fast_alloca)
* a0: trashed, original value saved on stack (PT_AREG0)
* a1: a1
* a2: new stack pointer, original in DEPC
- * a3: dispatch table
+ * a3: a3
* depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: a3
+ * excsave_1: dispatch table
*/
ENTRY(fast_syscall_kernel)
/* Skip syscall. */
- rsr a0, EPC_1
+ rsr a0, epc1
addi a0, a0, 3
- wsr a0, EPC_1
+ wsr a0, epc1
l32i a0, a2, PT_DEPC
bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
- rsr a0, DEPC # get syscall-nr
+ rsr a0, depc # get syscall-nr
_beqz a0, fast_syscall_spill_registers
_beqi a0, __NR_xtensa, fast_syscall_xtensa
j kernel_exception
+ENDPROC(fast_syscall_kernel)
+
ENTRY(fast_syscall_user)
/* Skip syscall. */
- rsr a0, EPC_1
+ rsr a0, epc1
addi a0, a0, 3
- wsr a0, EPC_1
+ wsr a0, epc1
l32i a0, a2, PT_DEPC
bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
- rsr a0, DEPC # get syscall-nr
+ rsr a0, depc # get syscall-nr
_beqz a0, fast_syscall_spill_registers
_beqi a0, __NR_xtensa, fast_syscall_xtensa
j user_exception
-ENTRY(fast_syscall_unrecoverable)
+ENDPROC(fast_syscall_user)
- /* Restore all states. */
+ENTRY(fast_syscall_unrecoverable)
- l32i a0, a2, PT_AREG0 # restore a0
- xsr a2, DEPC # restore a2, depc
- rsr a3, EXCSAVE_1
+ /* Restore all states. */
- wsr a0, EXCSAVE_1
- movi a0, unrecoverable_exception
- callx0 a0
+ l32i a0, a2, PT_AREG0 # restore a0
+ xsr a2, depc # restore a2, depc
+ wsr a0, excsave1
+ movi a0, unrecoverable_exception
+ callx0 a0
+ENDPROC(fast_syscall_unrecoverable)
/*
* sysxtensa syscall handler
@@ -1011,10 +961,10 @@ ENTRY(fast_syscall_unrecoverable)
* a0: a2 (syscall-nr), original value saved on stack (PT_AREG0)
* a1: a1
* a2: new stack pointer, original in a0 and DEPC
- * a3: dispatch table, original in excsave_1
+ * a3: a3
* a4..a15: unchanged
* depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: a3
+ * excsave_1: dispatch table
*
* PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
@@ -1047,8 +997,6 @@ ENTRY(fast_syscall_unrecoverable)
ENTRY(fast_syscall_xtensa)
- xsr a3, EXCSAVE_1 # restore a3, excsave1
-
s32i a7, a2, PT_AREG7 # we need an additional register
movi a7, 4 # sizeof(unsigned int)
access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp
@@ -1101,7 +1049,7 @@ CATCH
movi a2, -EINVAL
rfe
-
+ENDPROC(fast_syscall_xtensa)
/* fast_syscall_spill_registers.
@@ -1111,9 +1059,9 @@ CATCH
* a0: trashed, original value saved on stack (PT_AREG0)
* a1: a1
* a2: new stack pointer, original in DEPC
- * a3: dispatch table
+ * a3: a3
* depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: a3
+ * excsave_1: dispatch table
*
* Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
*/
@@ -1122,201 +1070,68 @@ ENTRY(fast_syscall_spill_registers)
/* Register a FIXUP handler (pass current wb as a parameter) */
+ xsr a3, excsave1
movi a0, fast_syscall_spill_registers_fixup
s32i a0, a3, EXC_TABLE_FIXUP
- rsr a0, WINDOWBASE
+ rsr a0, windowbase
s32i a0, a3, EXC_TABLE_PARAM
+ xsr a3, excsave1 # restore a3 and excsave_1
- /* Save a3 and SAR on stack. */
+ /* Save a3, a4 and SAR on stack. */
- rsr a0, SAR
- xsr a3, EXCSAVE_1 # restore a3 and excsave_1
+ rsr a0, sar
s32i a3, a2, PT_AREG3
- s32i a4, a2, PT_AREG4
- s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5
+ s32i a0, a2, PT_SAR
- /* The spill routine might clobber a7, a11, and a15. */
+ /* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */
+ s32i a4, a2, PT_AREG4
s32i a7, a2, PT_AREG7
+ s32i a8, a2, PT_AREG8
s32i a11, a2, PT_AREG11
+ s32i a12, a2, PT_AREG12
s32i a15, a2, PT_AREG15
- call0 _spill_registers # destroys a3, a4, and SAR
-
- /* Advance PC, restore registers and SAR, and return from exception. */
-
- l32i a3, a2, PT_AREG5
- l32i a4, a2, PT_AREG4
- l32i a0, a2, PT_AREG0
- wsr a3, SAR
- l32i a3, a2, PT_AREG3
-
- /* Restore clobbered registers. */
-
- l32i a7, a2, PT_AREG7
- l32i a11, a2, PT_AREG11
- l32i a15, a2, PT_AREG15
-
- movi a2, 0
- rfe
-
-/* Fixup handler.
- *
- * We get here if the spill routine causes an exception, e.g. tlb miss.
- * We basically restore WINDOWBASE and WINDOWSTART to the condition when
- * we entered the spill routine and jump to the user exception handler.
- *
- * a0: value of depc, original value in depc
- * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
- * a3: exctable, original value in excsave1
- */
-
-fast_syscall_spill_registers_fixup:
-
- rsr a2, WINDOWBASE # get current windowbase (a2 is saved)
- xsr a0, DEPC # restore depc and a0
- ssl a2 # set shift (32 - WB)
-
- /* We need to make sure the current registers (a0-a3) are preserved.
- * To do this, we simply set the bit for the current window frame
- * in WS, so that the exception handlers save them to the task stack.
- */
-
- rsr a3, EXCSAVE_1 # get spill-mask
- slli a2, a3, 1 # shift left by one
-
- slli a3, a2, 32-WSBITS
- src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy......
- wsr a2, WINDOWSTART # set corrected windowstart
-
- movi a3, exc_table
- l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2
- l32i a3, a3, EXC_TABLE_PARAM # original WB (in user task)
-
- /* Return to the original (user task) WINDOWBASE.
- * We leave the following frame behind:
- * a0, a1, a2 same
- * a3: trashed (saved in excsave_1)
- * depc: depc (we have to return to that address)
- * excsave_1: a3
- */
-
- wsr a3, WINDOWBASE
- rsync
-
- /* We are now in the original frame when we entered _spill_registers:
- * a0: return address
- * a1: used, stack pointer
- * a2: kernel stack pointer
- * a3: available, saved in EXCSAVE_1
- * depc: exception address
- * excsave: a3
- * Note: This frame might be the same as above.
- */
-
- /* Setup stack pointer. */
-
- addi a2, a2, -PT_USER_SIZE
- s32i a0, a2, PT_AREG0
-
- /* Make sure we return to this fixup handler. */
-
- movi a3, fast_syscall_spill_registers_fixup_return
- s32i a3, a2, PT_DEPC # setup depc
-
- /* Jump to the exception handler. */
-
- movi a3, exc_table
- rsr a0, EXCCAUSE
- addx4 a0, a0, a3 # find entry in table
- l32i a0, a0, EXC_TABLE_FAST_USER # load handler
- jx a0
-
-fast_syscall_spill_registers_fixup_return:
-
- /* When we return here, all registers have been restored (a2: DEPC) */
-
- wsr a2, DEPC # exception address
-
- /* Restore fixup handler. */
-
- xsr a3, EXCSAVE_1
- movi a2, fast_syscall_spill_registers_fixup
- s32i a2, a3, EXC_TABLE_FIXUP
- rsr a2, WINDOWBASE
- s32i a2, a3, EXC_TABLE_PARAM
- l32i a2, a3, EXC_TABLE_KSTK
-
- /* Load WB at the time the exception occurred. */
-
- rsr a3, SAR # WB is still in SAR
- neg a3, a3
- wsr a3, WINDOWBASE
- rsync
-
- /* Restore a3 and return. */
-
- movi a3, exc_table
- xsr a3, EXCSAVE_1
-
- rfde
-
-
-/*
- * spill all registers.
- *
- * This is not a real function. The following conditions must be met:
- *
- * - must be called with call0.
- * - uses a3, a4 and SAR.
- * - the last 'valid' register of each frame are clobbered.
- * - the caller must have registered a fixup handler
- * (or be inside a critical section)
- * - PS_EXCM must be set (PS_WOE cleared?)
- */
-
-ENTRY(_spill_registers)
-
/*
* Rotate ws so that the current windowbase is at bit 0.
* Assume ws = xxxwww1yy (www1 current window frame).
* Rotate ws right so that a4 = yyxxxwww1.
*/
- rsr a4, WINDOWBASE
- rsr a3, WINDOWSTART # a3 = xxxwww1yy
- ssr a4 # holds WB
- slli a4, a3, WSBITS
- or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy
+ rsr a0, windowbase
+ rsr a3, windowstart # a3 = xxxwww1yy
+ ssr a0 # holds WB
+ slli a0, a3, WSBITS
+ or a3, a3, a0 # a3 = xxxwww1yyxxxwww1yy
srl a3, a3 # a3 = 00xxxwww1yyxxxwww1
/* We are done if there are no more than the current register frame. */
extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww
- movi a4, (1 << (WSBITS-1))
+ movi a0, (1 << (WSBITS-1))
_beqz a3, .Lnospill # only one active frame? jump
/* We want 1 at the top, so that we return to the current windowbase */
- or a3, a3, a4 # 1yyxxxwww
+ or a3, a3, a0 # 1yyxxxwww
/* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
- wsr a3, WINDOWSTART # save shifted windowstart
- neg a4, a3
- and a3, a4, a3 # first bit set from right: 000010000
+ wsr a3, windowstart # save shifted windowstart
+ neg a0, a3
+ and a3, a0, a3 # first bit set from right: 000010000
- ffs_ws a4, a3 # a4: shifts to skip empty frames
+ ffs_ws a0, a3 # a0: shifts to skip empty frames
movi a3, WSBITS
- sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right
- ssr a4 # save in SAR for later.
+ sub a0, a3, a0 # WSBITS-a0:number of 0-bits from right
+ ssr a0 # save in SAR for later.
- rsr a3, WINDOWBASE
- add a3, a3, a4
- wsr a3, WINDOWBASE
+ rsr a3, windowbase
+ add a3, a3, a0
+ wsr a3, windowbase
rsync
- rsr a3, WINDOWSTART
+ rsr a3, windowstart
srl a3, a3 # shift windowstart
/* WB is now just one frame below the oldest frame in the register
@@ -1327,22 +1142,6 @@ ENTRY(_spill_registers)
* we have to save 4,8. or 12 registers.
*/
- _bbsi.l a3, 1, .Lc4
- _bbsi.l a3, 2, .Lc8
-
- /* Special case: we have a call12-frame starting at a4. */
-
- _bbci.l a3, 3, .Lc12 # bit 3 shouldn't be zero! (Jump to Lc12 first)
-
- s32e a4, a1, -16 # a1 is valid with an empty spill area
- l32e a4, a5, -12
- s32e a8, a4, -48
- mov a8, a4
- l32e a4, a1, -16
- j .Lc12c
-
-.Lnospill:
- ret
.Lloop: _bbsi.l a3, 1, .Lc4
_bbci.l a3, 2, .Lc12
@@ -1356,20 +1155,10 @@ ENTRY(_spill_registers)
s32e a9, a4, -28
s32e a10, a4, -24
s32e a11, a4, -20
-
srli a11, a3, 2 # shift windowbase by 2
rotw 2
_bnei a3, 1, .Lloop
-
-.Lexit: /* Done. Do the final rotation, set WS, and return. */
-
- rotw 1
- rsr a3, WINDOWBASE
- ssl a3
- movi a3, 1
- sll a3, a3
- wsr a3, WINDOWSTART
- ret
+ j .Lexit
.Lc4: s32e a4, a9, -16
s32e a5, a9, -12
@@ -1385,11 +1174,11 @@ ENTRY(_spill_registers)
/* 12-register frame (call12) */
- l32e a2, a5, -12
- s32e a8, a2, -48
- mov a8, a2
+ l32e a0, a5, -12
+ s32e a8, a0, -48
+ mov a8, a0
-.Lc12c: s32e a9, a8, -44
+ s32e a9, a8, -44
s32e a10, a8, -40
s32e a11, a8, -36
s32e a12, a8, -32
@@ -1409,61 +1198,208 @@ ENTRY(_spill_registers)
*/
rotw 1
- mov a5, a13
+ mov a4, a13
rotw -1
- s32e a4, a9, -16
- s32e a5, a9, -12
- s32e a6, a9, -8
- s32e a7, a9, -4
+ s32e a4, a8, -16
+ s32e a5, a8, -12
+ s32e a6, a8, -8
+ s32e a7, a8, -4
rotw 3
_beqi a3, 1, .Lexit
j .Lloop
-.Linvalid_mask:
+.Lexit:
- /* We get here because of an unrecoverable error in the window
- * registers. If we are in user space, we kill the application,
- * however, this condition is unrecoverable in kernel space.
- */
+ /* Done. Do the final rotation and set WS */
- rsr a0, PS
- _bbci.l a0, PS_UM_BIT, 1f
+ rotw 1
+ rsr a3, windowbase
+ ssl a3
+ movi a3, 1
+ sll a3, a3
+ wsr a3, windowstart
+.Lnospill:
- /* User space: Setup a dummy frame and kill application.
+ /* Advance PC, restore registers and SAR, and return from exception. */
+
+ l32i a3, a2, PT_SAR
+ l32i a0, a2, PT_AREG0
+ wsr a3, sar
+ l32i a3, a2, PT_AREG3
+
+ /* Restore clobbered registers. */
+
+ l32i a4, a2, PT_AREG4
+ l32i a7, a2, PT_AREG7
+ l32i a8, a2, PT_AREG8
+ l32i a11, a2, PT_AREG11
+ l32i a12, a2, PT_AREG12
+ l32i a15, a2, PT_AREG15
+
+ movi a2, 0
+ rfe
+
+.Linvalid_mask:
+
+ /* We get here because of an unrecoverable error in the window
+ * registers, so set up a dummy frame and kill the user application.
* Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
*/
movi a0, 1
movi a1, 0
- wsr a0, WINDOWSTART
- wsr a1, WINDOWBASE
+ wsr a0, windowstart
+ wsr a1, windowbase
rsync
movi a0, 0
- movi a3, exc_table
+ rsr a3, excsave1
l32i a1, a3, EXC_TABLE_KSTK
- wsr a3, EXCSAVE_1
- movi a4, (1 << PS_WOE_BIT) | 1
- wsr a4, PS
+ movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL
+ wsr a4, ps
rsync
movi a6, SIGSEGV
movi a4, do_exit
callx4 a4
-1: /* Kernel space: PANIC! */
+ /* shouldn't return, so panic */
- wsr a0, EXCSAVE_1
+ wsr a0, excsave1
movi a0, unrecoverable_exception
callx0 a0 # should not return
1: j 1b
+
+ENDPROC(fast_syscall_spill_registers)
+
+/* Fixup handler.
+ *
+ * We get here if the spill routine causes an exception, e.g. tlb miss.
+ * We basically restore WINDOWBASE and WINDOWSTART to the condition when
+ * we entered the spill routine and jump to the user exception handler.
+ *
+ * Note that we only need to restore the bits in windowstart that have not
+ * been spilled yet by the _spill_register routine. Luckily, a3 contains a
+ * rotated windowstart with only those bits set for frames that haven't been
+ * spilled yet. Because a3 is rotated such that bit 0 represents the register
+ * frame for the current windowbase - 1, we need to rotate a3 left by the
+ * value of the current windowbase + 1 and move it to windowstart.
+ *
+ * a0: value of depc, original value in depc
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ * a3: exctable, original value in excsave1
+ */
+
+ENTRY(fast_syscall_spill_registers_fixup)
+
+ rsr a2, windowbase # get current windowbase (a2 is saved)
+ xsr a0, depc # restore depc and a0
+ ssl a2 # set shift (32 - WB)
+
+ /* We need to make sure the current registers (a0-a3) are preserved.
+ * To do this, we simply set the bit for the current window frame
+ * in WS, so that the exception handlers save them to the task stack.
+ *
+ * Note: we use a3 to set the windowbase, so we take a special care
+ * of it, saving it in the original _spill_registers frame across
+ * the exception handler call.
+ */
+
+ xsr a3, excsave1 # get spill-mask
+ slli a3, a3, 1 # shift left by one
+ addi a3, a3, 1 # set the bit for the current window frame
+
+ slli a2, a3, 32-WSBITS
+ src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy......
+ wsr a2, windowstart # set corrected windowstart
+
+ srli a3, a3, 1
+ rsr a2, excsave1
+ l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2
+ xsr a2, excsave1
+ s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3
+ l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task)
+ xsr a2, excsave1
+
+ /* Return to the original (user task) WINDOWBASE.
+ * We leave the following frame behind:
+ * a0, a1, a2 same
+ * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE)
+ * depc: depc (we have to return to that address)
+ * excsave_1: exctable
+ */
+
+ wsr a3, windowbase
+ rsync
+
+ /* We are now in the original frame when we entered _spill_registers:
+ * a0: return address
+ * a1: used, stack pointer
+ * a2: kernel stack pointer
+ * a3: available
+ * depc: exception address
+ * excsave: exctable
+ * Note: This frame might be the same as above.
+ */
+
+ /* Setup stack pointer. */
+
+ addi a2, a2, -PT_USER_SIZE
+ s32i a0, a2, PT_AREG0
+
+ /* Make sure we return to this fixup handler. */
+
+ movi a3, fast_syscall_spill_registers_fixup_return
+ s32i a3, a2, PT_DEPC # setup depc
+
+ /* Jump to the exception handler. */
+
+ rsr a3, excsave1
+ rsr a0, exccause
+ addx4 a0, a0, a3 # find entry in table
+ l32i a0, a0, EXC_TABLE_FAST_USER # load handler
+ l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
+ jx a0
+
+ENDPROC(fast_syscall_spill_registers_fixup)
+
+ENTRY(fast_syscall_spill_registers_fixup_return)
+
+ /* When we return here, all registers have been restored (a2: DEPC) */
+
+ wsr a2, depc # exception address
+
+ /* Restore fixup handler. */
+
+ rsr a2, excsave1
+ s32i a3, a2, EXC_TABLE_DOUBLE_SAVE
+ movi a3, fast_syscall_spill_registers_fixup
+ s32i a3, a2, EXC_TABLE_FIXUP
+ rsr a3, windowbase
+ s32i a3, a2, EXC_TABLE_PARAM
+ l32i a2, a2, EXC_TABLE_KSTK
+
+ /* Load WB at the time the exception occurred. */
+
+ rsr a3, sar # WB is still in SAR
+ neg a3, a3
+ wsr a3, windowbase
+ rsync
+
+ rsr a3, excsave1
+ l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
+
+ rfde
+
+ENDPROC(fast_syscall_spill_registers_fixup_return)
+
#ifdef CONFIG_MMU
/*
* We should never get here. Bail out!
@@ -1475,6 +1411,8 @@ ENTRY(fast_second_level_miss_double_kernel)
callx0 a0 # should not return
1: j 1b
+ENDPROC(fast_second_level_miss_double_kernel)
+
/* First-level entry handler for user, kernel, and double 2nd-level
* TLB miss exceptions. Note that for now, user and kernel miss
* exceptions share the same entry point and are handled identically.
@@ -1487,9 +1425,9 @@ ENTRY(fast_second_level_miss_double_kernel)
* a0: trashed, original value saved on stack (PT_AREG0)
* a1: a1
* a2: new stack pointer, original in DEPC
- * a3: dispatch table
+ * a3: a3
* depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: a3
+ * excsave_1: dispatch table
*
* PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
@@ -1497,9 +1435,10 @@ ENTRY(fast_second_level_miss_double_kernel)
ENTRY(fast_second_level_miss)
- /* Save a1. Note: we don't expect a double exception. */
+ /* Save a1 and a3. Note: we don't expect a double exception. */
s32i a1, a2, PT_AREG1
+ s32i a3, a2, PT_AREG3
/* We need to map the page of PTEs for the user task. Find
* the pointer to that page. Also, it's possible for tsk->mm
@@ -1521,10 +1460,7 @@ ENTRY(fast_second_level_miss)
l32i a0, a1, TASK_MM # tsk->mm
beqz a0, 9f
-
- /* We deliberately destroy a3 that holds the exception table. */
-
-8: rsr a3, EXCVADDR # fault address
+8: rsr a3, excvaddr # fault address
_PGD_OFFSET(a0, a3, a1)
l32i a0, a0, 0 # read pmdval
beqz a0, 2f
@@ -1542,7 +1478,7 @@ ENTRY(fast_second_level_miss)
* pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_DIRECTORY
*/
- movi a1, -PAGE_OFFSET
+ movi a1, (-PAGE_OFFSET) & 0xffffffff
add a0, a0, a1 # pmdval - PAGE_OFFSET
extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK
xor a0, a0, a1
@@ -1561,7 +1497,7 @@ ENTRY(fast_second_level_miss)
*/
extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3
- rsr a1, PTEVADDR
+ rsr a1, ptevaddr
addx2 a3, a3, a3 # -> 0,3,6,9
srli a1, a1, PAGE_SHIFT
extui a3, a3, 2, 2 # -> 0,0,1,2
@@ -1574,7 +1510,7 @@ ENTRY(fast_second_level_miss)
/* Exit critical section. */
-4: movi a3, exc_table # restore a3
+4: rsr a3, excsave1
movi a0, 0
s32i a0, a3, EXC_TABLE_FIXUP
@@ -1582,19 +1518,19 @@ ENTRY(fast_second_level_miss)
l32i a0, a2, PT_AREG0
l32i a1, a2, PT_AREG1
+ l32i a3, a2, PT_AREG3
l32i a2, a2, PT_DEPC
- xsr a3, EXCSAVE_1
bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
/* Restore excsave1 and return. */
- rsr a2, DEPC
+ rsr a2, depc
rfe
/* Return from double exception. */
-1: xsr a2, DEPC
+1: xsr a2, depc
esync
rfde
@@ -1618,7 +1554,7 @@ ENTRY(fast_second_level_miss)
/* Make sure the exception originated in the special functions */
movi a0, __tlbtemp_mapping_start
- rsr a3, EPC_1
+ rsr a3, epc1
bltu a3, a0, 2f
movi a0, __tlbtemp_mapping_end
bgeu a3, a0, 2f
@@ -1626,7 +1562,7 @@ ENTRY(fast_second_level_miss)
/* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
movi a3, TLBTEMP_BASE_1
- rsr a0, EXCVADDR
+ rsr a0, excvaddr
bltu a0, a3, 2f
addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT))
@@ -1635,7 +1571,7 @@ ENTRY(fast_second_level_miss)
/* Check if we have to restore an ITLB mapping. */
movi a1, __tlbtemp_mapping_itlb
- rsr a3, EPC_1
+ rsr a3, epc1
sub a3, a3, a1
/* Calculate VPN */
@@ -1670,18 +1606,16 @@ ENTRY(fast_second_level_miss)
2: /* Invalid PGD, default exception handling */
- movi a3, exc_table
- rsr a1, DEPC
- xsr a3, EXCSAVE_1
+ rsr a1, depc
s32i a1, a2, PT_AREG2
- s32i a3, a2, PT_AREG3
mov a1, a2
- rsr a2, PS
+ rsr a2, ps
bbsi.l a2, PS_UM_BIT, 1f
j _kernel_exception
1: j _user_exception
+ENDPROC(fast_second_level_miss)
/*
* StoreProhibitedException
@@ -1693,9 +1627,9 @@ ENTRY(fast_second_level_miss)
* a0: trashed, original value saved on stack (PT_AREG0)
* a1: a1
* a2: new stack pointer, original in DEPC
- * a3: dispatch table
+ * a3: a3
* depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: a3
+ * excsave_1: dispatch table
*
* PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
@@ -1703,61 +1637,64 @@ ENTRY(fast_second_level_miss)
ENTRY(fast_store_prohibited)
- /* Save a1 and a4. */
+ /* Save a1 and a3. */
s32i a1, a2, PT_AREG1
- s32i a4, a2, PT_AREG4
+ s32i a3, a2, PT_AREG3
GET_CURRENT(a1,a2)
l32i a0, a1, TASK_MM # tsk->mm
beqz a0, 9f
-8: rsr a1, EXCVADDR # fault address
- _PGD_OFFSET(a0, a1, a4)
+8: rsr a1, excvaddr # fault address
+ _PGD_OFFSET(a0, a1, a3)
l32i a0, a0, 0
beqz a0, 2f
- /* Note that we assume _PAGE_WRITABLE_BIT is only set if pte is valid.*/
+ /*
+ * Note that we test _PAGE_WRITABLE_BIT only if PTE is present
+ * and is not PAGE_NONE. See pgtable.h for possible PTE layouts.
+ */
- _PTE_OFFSET(a0, a1, a4)
- l32i a4, a0, 0 # read pteval
- bbci.l a4, _PAGE_WRITABLE_BIT, 2f
+ _PTE_OFFSET(a0, a1, a3)
+ l32i a3, a0, 0 # read pteval
+ movi a1, _PAGE_CA_INVALID
+ ball a3, a1, 2f
+ bbci.l a3, _PAGE_WRITABLE_BIT, 2f
movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
- or a4, a4, a1
- rsr a1, EXCVADDR
- s32i a4, a0, 0
+ or a3, a3, a1
+ rsr a1, excvaddr
+ s32i a3, a0, 0
/* We need to flush the cache if we have page coloring. */
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
dhwb a0, 0
#endif
pdtlb a0, a1
- wdtlb a4, a0
+ wdtlb a3, a0
/* Exit critical section. */
movi a0, 0
+ rsr a3, excsave1
s32i a0, a3, EXC_TABLE_FIXUP
/* Restore the working registers, and return. */
- l32i a4, a2, PT_AREG4
+ l32i a3, a2, PT_AREG3
l32i a1, a2, PT_AREG1
l32i a0, a2, PT_AREG0
l32i a2, a2, PT_DEPC
- /* Restore excsave1 and a3. */
-
- xsr a3, EXCSAVE_1
bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
- rsr a2, DEPC
+ rsr a2, depc
rfe
/* Double exception. Restore FIXUP handler and return. */
-1: xsr a2, DEPC
+1: xsr a2, depc
esync
rfde
@@ -1766,17 +1703,17 @@ ENTRY(fast_store_prohibited)
2: /* If there was a problem, handle fault in C */
- rsr a4, DEPC # still holds a2
- xsr a3, EXCSAVE_1
- s32i a4, a2, PT_AREG2
- s32i a3, a2, PT_AREG3
- l32i a4, a2, PT_AREG4
+ rsr a3, depc # still holds a2
+ s32i a3, a2, PT_AREG2
mov a1, a2
- rsr a2, PS
+ rsr a2, ps
bbsi.l a2, PS_UM_BIT, 1f
j _kernel_exception
1: j _user_exception
+
+ENDPROC(fast_store_prohibited)
+
#endif /* CONFIG_MMU */
/*
@@ -1787,6 +1724,7 @@ ENTRY(fast_store_prohibited)
*/
ENTRY(system_call)
+
entry a1, 32
/* regs->syscall = regs->areg[2] */
@@ -1831,50 +1769,45 @@ ENTRY(system_call)
callx4 a4
retw
+ENDPROC(system_call)
/*
- * Create a kernel thread
+ * Spill live registers on the kernel stack macro.
*
- * int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
- * a2 a2 a3 a4
+ * Entry condition: ps.woe is set, ps.excm is cleared
+ * Exit condition: windowstart has single bit set
+ * May clobber: a12, a13
*/
+ .macro spill_registers_kernel
-ENTRY(kernel_thread)
- entry a1, 16
-
- mov a5, a2 # preserve fn over syscall
- mov a7, a3 # preserve args over syscall
-
- movi a3, _CLONE_VM | _CLONE_UNTRACED
- movi a2, __NR_clone
- or a6, a4, a3 # arg0: flags
- mov a3, a1 # arg1: sp
- syscall
-
- beq a3, a1, 1f # branch if parent
- mov a6, a7 # args
- callx4 a5 # fn(args)
-
- movi a2, __NR_exit
- syscall # return value of fn(args) still in a6
-
-1: retw
-
-/*
- * Do a system call from kernel instead of calling sys_execve, so we end up
- * with proper pt_regs.
- *
- * int kernel_execve(const char *fname, char *const argv[], charg *const envp[])
- * a2 a2 a3 a4
- */
-
-ENTRY(kernel_execve)
- entry a1, 16
- mov a6, a2 # arg0 is in a6
- movi a2, __NR_execve
- syscall
-
+#if XCHAL_NUM_AREGS > 16
+ call12 1f
+ _j 2f
+ retw
+ .align 4
+1:
+ _entry a1, 48
+ addi a12, a0, 3
+#if XCHAL_NUM_AREGS > 32
+ .rept (XCHAL_NUM_AREGS - 32) / 12
+ _entry a1, 48
+ mov a12, a0
+ .endr
+#endif
+ _entry a1, 48
+#if XCHAL_NUM_AREGS % 12 == 0
+ mov a8, a8
+#elif XCHAL_NUM_AREGS % 12 == 4
+ mov a12, a12
+#elif XCHAL_NUM_AREGS % 12 == 8
+ mov a4, a4
+#endif
retw
+2:
+#else
+ mov a12, a12
+#endif
+ .endm
/*
* Task switch.
@@ -1887,22 +1820,21 @@ ENTRY(_switch_to)
entry a1, 16
- mov a12, a2 # preserve 'prev' (a2)
- mov a13, a3 # and 'next' (a3)
+ mov a10, a2 # preserve 'prev' (a2)
+ mov a11, a3 # and 'next' (a3)
l32i a4, a2, TASK_THREAD_INFO
l32i a5, a3, TASK_THREAD_INFO
- save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
+ save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
- s32i a0, a12, THREAD_RA # save return address
- s32i a1, a12, THREAD_SP # save stack pointer
+ s32i a0, a10, THREAD_RA # save return address
+ s32i a1, a10, THREAD_SP # save stack pointer
/* Disable ints while we manipulate the stack pointer. */
- movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL
- xsr a14, PS
- rsr a3, EXCSAVE_1
+ rsil a14, LOCKLEVEL
+ rsr a3, excsave1
rsync
s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
@@ -1910,13 +1842,13 @@ ENTRY(_switch_to)
#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
l32i a3, a5, THREAD_CPENABLE
- xsr a3, CPENABLE
+ xsr a3, cpenable
s32i a3, a4, THREAD_CPENABLE
#endif
/* Flush register file. */
- call0 _spill_registers # destroys a3, a4, and SAR
+ spill_registers_kernel
/* Set kernel stack (and leave critical section)
* Note: It's save to set it here. The stack will not be overwritten
@@ -1924,25 +1856,26 @@ ENTRY(_switch_to)
* we return from kernel space.
*/
- rsr a3, EXCSAVE_1 # exc_table
+ rsr a3, excsave1 # exc_table
movi a6, 0
addi a7, a5, PT_REGS_OFFSET
s32i a6, a3, EXC_TABLE_FIXUP
s32i a7, a3, EXC_TABLE_KSTK
- /* restore context of the task that 'next' addresses */
+ /* restore context of the task 'next' */
- l32i a0, a13, THREAD_RA # restore return address
- l32i a1, a13, THREAD_SP # restore stack pointer
+ l32i a0, a11, THREAD_RA # restore return address
+ l32i a1, a11, THREAD_SP # restore stack pointer
- load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
+ load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
- wsr a14, PS
- mov a2, a12 # return 'prev'
+ wsr a14, ps
+ mov a2, a10 # return 'prev'
rsync
retw
+ENDPROC(_switch_to)
ENTRY(ret_from_fork)
@@ -1958,3 +1891,18 @@ ENTRY(ret_from_fork)
j common_exception_return
+ENDPROC(ret_from_fork)
+
+/*
+ * Kernel thread creation helper
+ * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg
+ * left from _switch_to: a6 = prev
+ */
+ENTRY(ret_from_kernel_thread)
+
+ call4 schedule_tail
+ mov a6, a3
+ callx4 a2
+ j common_exception_return
+
+ENDPROC(ret_from_kernel_thread)
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index 3ef91a73652..aeeb3cc8a41 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -7,7 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
* Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
@@ -18,6 +18,8 @@
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/cacheasm.h>
+#include <asm/initialize_mmu.h>
+#include <asm/mxregs.h>
#include <linux/init.h>
#include <linux/linkage.h>
@@ -47,34 +49,65 @@
*/
__HEAD
- .globl _start
-_start: _j 2f
- .align 4
-1: .word _startup
-2: l32r a0, 1b
- jx a0
-
- .section .init.text, "ax"
- .align 4
-_startup:
+ .begin no-absolute-literals
- /* Disable interrupts and exceptions. */
-
- movi a0, LOCKLEVEL
- wsr a0, PS
+ENTRY(_start)
/* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
+ wsr a2, excsave1
+ _j _SetupOCD
- wsr a2, EXCSAVE_1
-
- /* Start with a fresh windowbase and windowstart. */
+ .align 4
+ .literal_position
+.Lstartup:
+ .word _startup
+ .align 4
+_SetupOCD:
+ /*
+ * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
+ * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow
+ * xt-gdb to single step via DEBUG exceptions received directly
+ * by ocd.
+ */
movi a1, 1
movi a0, 0
- wsr a1, WINDOWSTART
- wsr a0, WINDOWBASE
+ wsr a1, windowstart
+ wsr a0, windowbase
+ rsync
+
+ movi a1, LOCKLEVEL
+ wsr a1, ps
rsync
+ .global _SetupMMU
+_SetupMMU:
+ Offset = _SetupMMU - _start
+
+#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ initialize_mmu
+#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+ rsr a2, excsave1
+ movi a3, 0x08000000
+ bgeu a2, a3, 1f
+ movi a3, 0xd0000000
+ add a2, a2, a3
+ wsr a2, excsave1
+1:
+#endif
+#endif
+ .end no-absolute-literals
+
+ l32r a0, .Lstartup
+ jx a0
+
+ENDPROC(_start)
+
+ __REF
+ .literal_position
+
+ENTRY(_startup)
+
/* Set a0 to 0 for the remaining initialization. */
movi a0, 0
@@ -82,59 +115,50 @@ _startup:
/* Clear debugging registers. */
#if XCHAL_HAVE_DEBUG
- wsr a0, IBREAKENABLE
- wsr a0, ICOUNT
+#if XCHAL_NUM_IBREAK > 0
+ wsr a0, ibreakenable
+#endif
+ wsr a0, icount
movi a1, 15
- wsr a0, ICOUNTLEVEL
+ wsr a0, icountlevel
.set _index, 0
.rept XCHAL_NUM_DBREAK - 1
- wsr a0, DBREAKC + _index
+ wsr a0, SREG_DBREAKC + _index
.set _index, _index + 1
.endr
#endif
/* Clear CCOUNT (not really necessary, but nice) */
- wsr a0, CCOUNT # not really necessary, but nice
+ wsr a0, ccount # not really necessary, but nice
/* Disable zero-loops. */
#if XCHAL_HAVE_LOOPS
- wsr a0, LCOUNT
+ wsr a0, lcount
#endif
/* Disable all timers. */
.set _index, 0
- .rept XCHAL_NUM_TIMERS - 1
- wsr a0, CCOMPARE + _index
+ .rept XCHAL_NUM_TIMERS
+ wsr a0, SREG_CCOMPARE + _index
.set _index, _index + 1
.endr
/* Interrupt initialization. */
movi a2, XCHAL_INTTYPE_MASK_SOFTWARE | XCHAL_INTTYPE_MASK_EXTERN_EDGE
- wsr a0, INTENABLE
- wsr a2, INTCLEAR
+ wsr a0, intenable
+ wsr a2, intclear
/* Disable coprocessors. */
-#if XCHAL_CP_NUM > 0
- wsr a0, CPENABLE
+#if XCHAL_HAVE_CP
+ wsr a0, cpenable
#endif
- /* Set PS.INTLEVEL=1, PS.WOE=0, kernel stack, PS.EXCM=0
- *
- * Note: PS.EXCM must be cleared before using any loop
- * instructions; otherwise, they are silently disabled, and
- * at most one iteration of the loop is executed.
- */
-
- movi a1, 1
- wsr a1, PS
- rsync
-
/* Initialize the caches.
* a2, a3 are just working registers (clobbered).
*/
@@ -152,6 +176,37 @@ _startup:
isync
+#ifdef CONFIG_HAVE_SMP
+ movi a2, CCON # MX External Register to Configure Cache
+ movi a3, 1
+ wer a3, a2
+#endif
+
+ /* Setup stack and enable window exceptions (keep irqs disabled) */
+
+ movi a1, start_info
+ l32i a1, a1, 0
+
+ movi a2, (1 << PS_WOE_BIT) | LOCKLEVEL
+ # WOE=1, INTLEVEL=LOCKLEVEL, UM=0
+ wsr a2, ps # (enable reg-windows; progmode stack)
+ rsync
+
+ /* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/
+
+ movi a2, debug_exception
+ wsr a2, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
+
+#ifdef CONFIG_SMP
+ /*
+ * Notice that we assume with SMP that cores have PRID
+ * supported by the cores.
+ */
+ rsr a2, prid
+ bnez a2, .Lboot_secondary
+
+#endif /* CONFIG_SMP */
+
/* Unpack data sections
*
* The linker script used to build the Linux kernel image
@@ -199,25 +254,13 @@ _startup:
___flush_dcache_all a2 a3
#endif
+ memw
+ isync
+ ___invalidate_icache_all a2 a3
+ isync
- /* Setup stack and enable window exceptions (keep irqs disabled) */
-
- movi a1, init_thread_union
- addi a1, a1, KERNEL_STACK_SIZE
-
- movi a2, 0x00040001 # WOE=1, INTLEVEL=1, UM=0
- wsr a2, PS # (enable reg-windows; progmode stack)
- rsync
-
- /* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/
-
- movi a2, debug_exception
- wsr a2, EXCSAVE + XCHAL_DEBUGLEVEL
-
- /* Set up EXCSAVE[1] to point to the exc_table. */
-
- movi a6, exc_table
- xsr a6, EXCSAVE_1
+ movi a6, 0
+ xsr a6, excsave1
/* init_arch kick-starts the linux kernel */
@@ -230,6 +273,92 @@ _startup:
should_never_return:
j should_never_return
+#ifdef CONFIG_SMP
+.Lboot_secondary:
+
+ movi a2, cpu_start_ccount
+1:
+ l32i a3, a2, 0
+ beqi a3, 0, 1b
+ movi a3, 0
+ s32i a3, a2, 0
+ memw
+1:
+ l32i a3, a2, 0
+ beqi a3, 0, 1b
+ wsr a3, ccount
+ movi a3, 0
+ s32i a3, a2, 0
+ memw
+
+ movi a6, 0
+ wsr a6, excsave1
+
+ movi a4, secondary_start_kernel
+ callx4 a4
+ j should_never_return
+
+#endif /* CONFIG_SMP */
+
+ENDPROC(_startup)
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+ENTRY(cpu_restart)
+
+#if XCHAL_DCACHE_IS_WRITEBACK
+ ___flush_invalidate_dcache_all a2 a3
+#else
+ ___invalidate_dcache_all a2 a3
+#endif
+ memw
+ movi a2, CCON # MX External Register to Configure Cache
+ movi a3, 0
+ wer a3, a2
+ extw
+
+ rsr a0, prid
+ neg a2, a0
+ movi a3, cpu_start_id
+ s32i a2, a3, 0
+#if XCHAL_DCACHE_IS_WRITEBACK
+ dhwbi a3, 0
+#endif
+1:
+ l32i a2, a3, 0
+ dhi a3, 0
+ bne a2, a0, 1b
+
+ /*
+ * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
+ * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow
+ * xt-gdb to single step via DEBUG exceptions received directly
+ * by ocd.
+ */
+ movi a1, 1
+ movi a0, 0
+ wsr a1, windowstart
+ wsr a0, windowbase
+ rsync
+
+ movi a1, LOCKLEVEL
+ wsr a1, ps
+ rsync
+
+ j _startup
+
+ENDPROC(cpu_restart)
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/*
+ * DATA section
+ */
+
+ .section ".data.init.refok"
+ .align 4
+ENTRY(start_info)
+ .long init_thread_union + KERNEL_STACK_SIZE
/*
* BSS section
@@ -239,6 +368,8 @@ __PAGE_ALIGNED_BSS
#ifdef CONFIG_MMU
ENTRY(swapper_pg_dir)
.fill PAGE_SIZE, 1, 0
+END(swapper_pg_dir)
#endif
ENTRY(empty_zero_page)
.fill PAGE_SIZE, 1, 0
+END(empty_zero_page)
diff --git a/arch/xtensa/kernel/init_task.c b/arch/xtensa/kernel/init_task.c
deleted file mode 100644
index cd122fb7e48..00000000000
--- a/arch/xtensa/kernel/init_task.c
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * arch/xtensa/kernel/init_task.c
- *
- * Xtensa Processor version.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2007 Tensilica Inc.
- *
- * Chris Zankel <chris@zankel.net>
- */
-
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/init_task.h>
-#include <linux/module.h>
-#include <linux/mqueue.h>
-
-#include <asm/uaccess.h>
-
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-union thread_union init_thread_union __init_task_data =
- { INIT_THREAD_INFO(init_task) };
-
-struct task_struct init_task = INIT_TASK(init_task);
-
-EXPORT_SYMBOL(init_task);
diff --git a/arch/xtensa/kernel/io.c b/arch/xtensa/kernel/io.c
deleted file mode 100644
index 5b65269b1d2..00000000000
--- a/arch/xtensa/kernel/io.c
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * arch/xtensa/io.c
- *
- * IO primitives
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * Copied from sparc.
- *
- * Chris Zankel <chris@zankel.net>
- *
- */
-
-#include <asm/io.h>
-#include <asm/byteorder.h>
-
-void outsb(unsigned long addr, const void *src, unsigned long count) {
- while (count) {
- count -= 1;
- writeb(*(const char *)src, addr);
- src += 1;
- addr += 1;
- }
-}
-
-void outsw(unsigned long addr, const void *src, unsigned long count) {
- while (count) {
- count -= 2;
- writew(*(const short *)src, addr);
- src += 2;
- addr += 2;
- }
-}
-
-void outsl(unsigned long addr, const void *src, unsigned long count) {
- while (count) {
- count -= 4;
- writel(*(const long *)src, addr);
- src += 4;
- addr += 4;
- }
-}
-
-void insb(unsigned long addr, void *dst, unsigned long count) {
- while (count) {
- count -= 1;
- *(unsigned char *)dst = readb(addr);
- dst += 1;
- addr += 1;
- }
-}
-
-void insw(unsigned long addr, void *dst, unsigned long count) {
- while (count) {
- count -= 2;
- *(unsigned short *)dst = readw(addr);
- dst += 2;
- addr += 2;
- }
-}
-
-void insl(unsigned long addr, void *dst, unsigned long count) {
- while (count) {
- count -= 4;
- /*
- * XXX I am sure we are in for an unaligned trap here.
- */
- *(unsigned long *)dst = readl(addr);
- dst += 4;
- addr += 4;
- }
-}
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 4340ee076bd..3eee94f621e 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -4,7 +4,7 @@
* Xtensa built-in interrupt controller and some generic functions copied
* from i386.
*
- * Copyright (C) 2002 - 2006 Tensilica, Inc.
+ * Copyright (C) 2002 - 2013 Tensilica, Inc.
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
*
*
@@ -18,31 +18,27 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel_stat.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/xtensa-mx.h>
+#include <linux/irqchip/xtensa-pic.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <asm/mxregs.h>
#include <asm/uaccess.h>
#include <asm/platform.h>
-static unsigned int cached_irq_mask;
-
atomic_t irq_err_count;
-/*
- * do_IRQ handles all normal device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- */
-
-asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
+asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
{
- struct pt_regs *old_regs = set_irq_regs(regs);
+ int irq = irq_find_mapping(NULL, hwirq);
- if (irq >= NR_IRQS) {
+ if (hwirq >= NR_IRQS) {
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
- __func__, irq);
+ __func__, hwirq);
}
- irq_enter();
-
#ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */
{
@@ -57,95 +53,136 @@ asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
}
#endif
generic_handle_irq(irq);
-
- irq_exit();
- set_irq_regs(old_regs);
}
int arch_show_interrupts(struct seq_file *p, int prec)
{
+#ifdef CONFIG_SMP
+ show_ipi_list(p, prec);
+#endif
seq_printf(p, "%*s: ", prec, "ERR");
seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
return 0;
}
-static void xtensa_irq_mask(struct irq_data *d)
+int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize,
+ unsigned long int_irq, unsigned long ext_irq,
+ unsigned long *out_hwirq, unsigned int *out_type)
{
- cached_irq_mask &= ~(1 << d->irq);
- set_sr (cached_irq_mask, INTENABLE);
+ if (WARN_ON(intsize < 1 || intsize > 2))
+ return -EINVAL;
+ if (intsize == 2 && intspec[1] == 1) {
+ int_irq = xtensa_map_ext_irq(ext_irq);
+ if (int_irq < XCHAL_NUM_INTERRUPTS)
+ *out_hwirq = int_irq;
+ else
+ return -EINVAL;
+ } else {
+ *out_hwirq = int_irq;
+ }
+ *out_type = IRQ_TYPE_NONE;
+ return 0;
}
-static void xtensa_irq_unmask(struct irq_data *d)
+int xtensa_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
{
- cached_irq_mask |= 1 << d->irq;
- set_sr (cached_irq_mask, INTENABLE);
+ struct irq_chip *irq_chip = d->host_data;
+ u32 mask = 1 << hw;
+
+ if (mask & XCHAL_INTTYPE_MASK_SOFTWARE) {
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_simple_irq, "level");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE) {
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_edge_irq, "edge");
+ irq_clear_status_flags(irq, IRQ_LEVEL);
+ } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL) {
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_level_irq, "level");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ } else if (mask & XCHAL_INTTYPE_MASK_TIMER) {
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_percpu_irq, "timer");
+ irq_clear_status_flags(irq, IRQ_LEVEL);
+ } else {/* XCHAL_INTTYPE_MASK_WRITE_ERROR */
+ /* XCHAL_INTTYPE_MASK_NMI */
+ irq_set_chip_and_handler_name(irq, irq_chip,
+ handle_level_irq, "level");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ }
+ return 0;
}
-static void xtensa_irq_enable(struct irq_data *d)
+unsigned xtensa_map_ext_irq(unsigned ext_irq)
{
- variant_irq_enable(d->irq);
- xtensa_irq_unmask(d->irq);
-}
+ unsigned mask = XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+ XCHAL_INTTYPE_MASK_EXTERN_LEVEL;
+ unsigned i;
-static void xtensa_irq_disable(struct irq_data *d)
-{
- xtensa_irq_mask(d->irq);
- variant_irq_disable(d->irq);
+ for (i = 0; mask; ++i, mask >>= 1) {
+ if ((mask & 1) && ext_irq-- == 0)
+ return i;
+ }
+ return XCHAL_NUM_INTERRUPTS;
}
-static void xtensa_irq_ack(struct irq_data *d)
+unsigned xtensa_get_ext_irq_no(unsigned irq)
{
- set_sr(1 << d->irq, INTCLEAR);
+ unsigned mask = (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+ XCHAL_INTTYPE_MASK_EXTERN_LEVEL) &
+ ((1u << irq) - 1);
+ return hweight32(mask);
}
-static int xtensa_irq_retrigger(struct irq_data *d)
+void __init init_IRQ(void)
{
- set_sr (1 << d->irq, INTSET);
- return 1;
-}
-
+#ifdef CONFIG_OF
+ irqchip_init();
+#else
+#ifdef CONFIG_HAVE_SMP
+ xtensa_mx_init_legacy(NULL);
+#else
+ xtensa_pic_init_legacy(NULL);
+#endif
+#endif
-static struct irq_chip xtensa_irq_chip = {
- .name = "xtensa",
- .irq_enable = xtensa_irq_enable,
- .irq_disable = xtensa_irq_disable,
- .irq_mask = xtensa_irq_mask,
- .irq_unmask = xtensa_irq_unmask,
- .irq_ack = xtensa_irq_ack,
- .irq_retrigger = xtensa_irq_retrigger,
-};
+#ifdef CONFIG_SMP
+ ipi_init();
+#endif
+ variant_init_irq();
+}
-void __init init_IRQ(void)
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * The CPU has been marked offline. Migrate IRQs off this CPU. If
+ * the affinity settings do not allow other CPUs, force them onto any
+ * available CPU.
+ */
+void migrate_irqs(void)
{
- int index;
-
- for (index = 0; index < XTENSA_NR_IRQS; index++) {
- int mask = 1 << index;
+ unsigned int i, cpu = smp_processor_id();
- if (mask & XCHAL_INTTYPE_MASK_SOFTWARE)
- irq_set_chip_and_handler(index, &xtensa_irq_chip,
- handle_simple_irq);
+ for_each_active_irq(i) {
+ struct irq_data *data = irq_get_irq_data(i);
+ unsigned int newcpu;
- else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE)
- irq_set_chip_and_handler(index, &xtensa_irq_chip,
- handle_edge_irq);
+ if (irqd_is_per_cpu(data))
+ continue;
- else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL)
- irq_set_chip_and_handler(index, &xtensa_irq_chip,
- handle_level_irq);
+ if (!cpumask_test_cpu(cpu, data->affinity))
+ continue;
- else if (mask & XCHAL_INTTYPE_MASK_TIMER)
- irq_set_chip_and_handler(index, &xtensa_irq_chip,
- handle_edge_irq);
+ newcpu = cpumask_any_and(data->affinity, cpu_online_mask);
- else /* XCHAL_INTTYPE_MASK_WRITE_ERROR */
- /* XCHAL_INTTYPE_MASK_NMI */
+ if (newcpu >= nr_cpu_ids) {
+ pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
+ i, cpu);
- irq_set_chip_and_handler(index, &xtensa_irq_chip,
- handle_level_irq);
+ cpumask_setall(data->affinity);
+ }
+ irq_set_affinity(i, data->affinity);
}
-
- cached_irq_mask = 0;
-
- variant_init_irq();
}
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/xtensa/kernel/mcount.S b/arch/xtensa/kernel/mcount.S
new file mode 100644
index 00000000000..0eeda2e4a25
--- /dev/null
+++ b/arch/xtensa/kernel/mcount.S
@@ -0,0 +1,50 @@
+/*
+ * arch/xtensa/kernel/mcount.S
+ *
+ * Xtensa specific mcount support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Tensilica Inc.
+ */
+
+#include <linux/linkage.h>
+#include <asm/ftrace.h>
+
+/*
+ * Entry condition:
+ *
+ * a2: a0 of the caller
+ */
+
+ENTRY(_mcount)
+
+ entry a1, 16
+
+ movi a4, ftrace_trace_function
+ l32i a4, a4, 0
+ movi a3, ftrace_stub
+ bne a3, a4, 1f
+ retw
+
+1: xor a7, a2, a1
+ movi a3, 0x3fffffff
+ and a7, a7, a3
+ xor a7, a7, a1
+
+ xor a6, a0, a1
+ and a6, a6, a3
+ xor a6, a6, a1
+ addi a6, a6, -MCOUNT_INSN_SIZE
+ callx4 a4
+
+ retw
+
+ENDPROC(_mcount)
+
+ENTRY(ftrace_stub)
+ entry a1, 16
+ retw
+ENDPROC(ftrace_stub)
diff --git a/arch/xtensa/kernel/module.c b/arch/xtensa/kernel/module.c
index 451dda928c9..b715237bae6 100644
--- a/arch/xtensa/kernel/module.c
+++ b/arch/xtensa/kernel/module.c
@@ -53,7 +53,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
struct module *mod)
{
unsigned int i;
- Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
unsigned char *location;
uint32_t value;
diff --git a/arch/xtensa/kernel/mxhead.S b/arch/xtensa/kernel/mxhead.S
new file mode 100644
index 00000000000..77a161a112c
--- /dev/null
+++ b/arch/xtensa/kernel/mxhead.S
@@ -0,0 +1,85 @@
+/*
+ * Xtensa Secondary Processors startup code.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ *
+ * Joe Taylor <joe@tensilica.com>
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
+ * Pete Delaney <piet@tensilica.com>
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/cacheasm.h>
+#include <asm/initialize_mmu.h>
+#include <asm/mxregs.h>
+#include <asm/regs.h>
+
+
+ .section .SecondaryResetVector.text, "ax"
+
+
+ENTRY(_SecondaryResetVector)
+ _j _SetupOCD
+
+ .begin no-absolute-literals
+ .literal_position
+
+_SetupOCD:
+ /*
+ * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
+ * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow
+ * xt-gdb to single step via DEBUG exceptions received directly
+ * by ocd.
+ */
+ movi a1, 1
+ movi a0, 0
+ wsr a1, windowstart
+ wsr a0, windowbase
+ rsync
+
+ movi a1, LOCKLEVEL
+ wsr a1, ps
+ rsync
+
+_SetupMMU:
+ Offset = _SetupMMU - _SecondaryResetVector
+
+#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ initialize_mmu
+#endif
+
+ /*
+ * Start Secondary Processors with NULL pointer to boot params.
+ */
+ movi a2, 0 # a2 == NULL
+ movi a3, _startup
+ jx a3
+
+ .end no-absolute-literals
+
+
+ .section .SecondaryResetVector.remapped_text, "ax"
+ .global _RemappedSecondaryResetVector
+
+ .org 0 # Need to do org before literals
+
+_RemappedSecondaryResetVector:
+ .begin no-absolute-literals
+ .literal_position
+
+ _j _RemappedSetupMMU
+ . = _RemappedSecondaryResetVector + Offset
+
+_RemappedSetupMMU:
+
+#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ initialize_mmu
+#endif
+
+ .end no-absolute-literals
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index 2783fda76dd..2d9cc6dbfd7 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -21,6 +21,7 @@
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/gfp.h>
+#include <linux/module.h>
#include <asm/io.h>
#include <asm/cacheflush.h>
@@ -62,6 +63,7 @@ dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag)
return (void*)uncached;
}
+EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_coherent(struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
@@ -73,6 +75,7 @@ void dma_free_coherent(struct device *hwdev, size_t size,
free_pages(addr, get_order(size));
}
+EXPORT_SYMBOL(dma_free_coherent);
void consistent_sync(void *vaddr, size_t size, int direction)
@@ -92,3 +95,4 @@ void consistent_sync(void *vaddr, size_t size, int direction)
break;
}
}
+EXPORT_SYMBOL(consistent_sync);
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
index 61045c192e8..5b3403388d7 100644
--- a/arch/xtensa/kernel/pci.c
+++ b/arch/xtensa/kernel/pci.c
@@ -46,7 +46,6 @@
* pcibios_fixups
* pcibios_align_resource
* pcibios_fixup_bus
- * pcibios_setup
* pci_bus_add_device
* pci_mmap_page_range
*/
@@ -78,9 +77,9 @@ pcibios_align_resource(void *data, const struct resource *res,
if (res->flags & IORESOURCE_IO) {
if (size > 0x100) {
- printk(KERN_ERR "PCI: I/O Region %s/%d too large"
- " (%ld bytes)\n", pci_name(dev),
- dev->resource - res, size);
+ pr_err("PCI: I/O Region %s/%d too large (%u bytes)\n",
+ pci_name(dev), dev->resource - res,
+ size);
}
if (start & 0x300)
@@ -153,7 +152,7 @@ static void __init pci_controller_apertures(struct pci_controller *pci_ctrl,
}
res->start += io_offset;
res->end += io_offset;
- pci_add_resource(resources, res);
+ pci_add_resource_offset(resources, res, io_offset);
for (i = 0; i < 3; i++) {
res = &pci_ctrl->mem_resources[i];
@@ -175,7 +174,7 @@ static int __init pcibios_init(void)
struct pci_controller *pci_ctrl;
struct list_head resources;
struct pci_bus *bus;
- int next_busno = 0, i;
+ int next_busno = 0;
printk("PCI: Probing PCI hardware\n");
@@ -187,7 +186,7 @@ static int __init pcibios_init(void)
bus = pci_scan_root_bus(NULL, pci_ctrl->first_busno,
pci_ctrl->ops, pci_ctrl, &resources);
pci_ctrl->bus = bus;
- pci_ctrl->last_busno = bus->subordinate;
+ pci_ctrl->last_busno = bus->busn_res.end;
if (next_busno <= pci_ctrl->last_busno)
next_busno = pci_ctrl->last_busno+1;
}
@@ -198,47 +197,19 @@ static int __init pcibios_init(void)
subsys_initcall(pcibios_init);
-void __init pcibios_fixup_bus(struct pci_bus *bus)
+void pcibios_fixup_bus(struct pci_bus *bus)
{
- struct pci_controller *pci_ctrl = bus->sysdata;
- struct resource *res;
- unsigned long io_offset;
- int i;
-
- io_offset = (unsigned long)pci_ctrl->io_space.base;
if (bus->parent) {
/* This is a subordinate bridge */
pci_read_bridge_bases(bus);
-
- for (i = 0; i < 4; i++) {
- if ((res = bus->resource[i]) == NULL || !res->flags)
- continue;
- if (io_offset && (res->flags & IORESOURCE_IO)) {
- res->start += io_offset;
- res->end += io_offset;
- }
- }
}
}
-char __init *pcibios_setup(char *str)
-{
- return str;
-}
-
void pcibios_set_master(struct pci_dev *dev)
{
/* No special bus mastering setup handling */
}
-/* the next one is stolen from the alpha port... */
-
-void __init
-pcibios_update_irq(struct pci_dev *dev, int irq)
-{
- pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
-}
-
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
u16 cmd, old_cmd;
@@ -362,7 +333,7 @@ __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
int prot = pgprot_val(vma->vm_page_prot);
/* Set to write-through */
- prot &= ~_PAGE_NO_CACHE;
+ prot = (prot & _PAGE_CA_MASK) | _PAGE_CA_WT;
#if 0
if (!write_combine)
prot |= _PAGE_WRITETHRU;
diff --git a/arch/xtensa/kernel/platform.c b/arch/xtensa/kernel/platform.c
index 1b91a97f1d8..1cf008284dd 100644
--- a/arch/xtensa/kernel/platform.c
+++ b/arch/xtensa/kernel/platform.c
@@ -29,19 +29,18 @@
*/
_F(void, setup, (char** cmd), { });
-_F(void, init_irq, (void), { });
_F(void, restart, (void), { while(1); });
_F(void, halt, (void), { while(1); });
_F(void, power_off, (void), { while(1); });
_F(void, idle, (void), { __asm__ __volatile__ ("waiti 0" ::: "memory"); });
_F(void, heartbeat, (void), { });
_F(int, pcibios_fixup, (void), { return 0; });
+_F(void, pcibios_init, (void), { });
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
_F(void, calibrate_ccount, (void),
{
- printk ("ERROR: Cannot calibrate cpu frequency! Assuming 100MHz.\n");
- ccount_per_jiffy = 100 * (1000000UL/HZ);
+ pr_err("ERROR: Cannot calibrate cpu frequency! Assuming 10MHz.\n");
+ ccount_freq = 10 * 1000000UL;
});
#endif
-
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 47041e7c088..1c85323f01d 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -31,10 +31,10 @@
#include <linux/mqueue.h>
#include <linux/fs.h>
#include <linux/slab.h>
+#include <linux/rcupdate.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
-#include <asm/system.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/platform.h>
@@ -45,6 +45,7 @@
#include <asm/regs.h>
extern void ret_from_fork(void);
+extern void ret_from_kernel_thread(void);
struct task_struct *current_set[NR_CPUS] = {&init_task, };
@@ -104,19 +105,9 @@ void coprocessor_flush_all(struct thread_info *ti)
/*
* Powermanagement idle function, if any is provided by the platform.
*/
-
-void cpu_idle(void)
+void arch_cpu_idle(void)
{
- local_irq_enable();
-
- /* endless idle loop with no priority at all */
- while (1) {
- while (!need_resched())
- platform_idle();
- preempt_enable_no_resched();
- schedule();
- preempt_disable();
- }
+ platform_idle();
}
/*
@@ -143,76 +134,138 @@ void flush_thread(void)
}
/*
- * This is called before the thread is copied.
+ * this gets called so that we can store coprocessor state into memory and
+ * copy the current task into the new thread.
*/
-void prepare_to_copy(struct task_struct *tsk)
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
#if XTENSA_HAVE_COPROCESSORS
- coprocessor_flush_all(task_thread_info(tsk));
+ coprocessor_flush_all(task_thread_info(src));
#endif
+ *dst = *src;
+ return 0;
}
/*
* Copy thread.
*
+ * There are two modes in which this function is called:
+ * 1) Userspace thread creation,
+ * regs != NULL, usp_thread_fn is userspace stack pointer.
+ * It is expected to copy parent regs (in case CLONE_VM is not set
+ * in the clone_flags) and set up passed usp in the childregs.
+ * 2) Kernel thread creation,
+ * regs == NULL, usp_thread_fn is the function to run in the new thread
+ * and thread_fn_arg is its parameter.
+ * childregs are not used for the kernel threads.
+ *
* The stack layout for the new thread looks like this:
*
- * +------------------------+ <- sp in childregs (= tos)
+ * +------------------------+
* | childregs |
* +------------------------+ <- thread.sp = sp in dummy-frame
* | dummy-frame | (saved in dummy-frame spill-area)
* +------------------------+
*
- * We create a dummy frame to return to ret_from_fork:
- * a0 points to ret_from_fork (simulating a call4)
+ * We create a dummy frame to return to either ret_from_fork or
+ * ret_from_kernel_thread:
+ * a0 points to ret_from_fork/ret_from_kernel_thread (simulating a call4)
* sp points to itself (thread.sp)
- * a2, a3 are unused.
+ * a2, a3 are unused for userspace threads,
+ * a2 points to thread_fn, a3 holds thread_fn arg for kernel threads.
*
* Note: This is a pristine frame, so we don't need any spill region on top of
* childregs.
+ *
+ * The fun part: if we're keeping the same VM (i.e. cloning a thread,
+ * not an entire process), we're normally given a new usp, and we CANNOT share
+ * any live address register windows. If we just copy those live frames over,
+ * the two threads (parent and child) will overflow the same frames onto the
+ * parent stack at different times, likely corrupting the parent stack (esp.
+ * if the parent returns from functions that called clone() and calls new
+ * ones, before the child overflows its now old copies of its parent windows).
+ * One solution is to spill windows to the parent stack, but that's fairly
+ * involved. Much simpler to just not copy those live frames across.
*/
-int copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long unused,
- struct task_struct * p, struct pt_regs * regs)
+int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
+ unsigned long thread_fn_arg, struct task_struct *p)
{
- struct pt_regs *childregs;
- struct thread_info *ti;
- unsigned long tos;
- int user_mode = user_mode(regs);
+ struct pt_regs *childregs = task_pt_regs(p);
- /* Set up new TSS. */
- tos = (unsigned long)task_stack_page(p) + THREAD_SIZE;
- if (user_mode)
- childregs = (struct pt_regs*)(tos - PT_USER_SIZE);
- else
- childregs = (struct pt_regs*)tos - 1;
-
- *childregs = *regs;
+#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
+ struct thread_info *ti;
+#endif
/* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
*((int*)childregs - 3) = (unsigned long)childregs;
*((int*)childregs - 4) = 0;
- childregs->areg[1] = tos;
- childregs->areg[2] = 0;
- p->set_child_tid = p->clear_child_tid = NULL;
- p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1);
p->thread.sp = (unsigned long)childregs;
- if (user_mode(regs)) {
+ if (!(p->flags & PF_KTHREAD)) {
+ struct pt_regs *regs = current_pt_regs();
+ unsigned long usp = usp_thread_fn ?
+ usp_thread_fn : regs->areg[1];
+
+ p->thread.ra = MAKE_RA_FOR_CALL(
+ (unsigned long)ret_from_fork, 0x1);
- int len = childregs->wmask & ~0xf;
+ /* This does not copy all the regs.
+ * In a bout of brilliance or madness,
+ * ARs beyond a0-a15 exist past the end of the struct.
+ */
+ *childregs = *regs;
childregs->areg[1] = usp;
- memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
- &regs->areg[XCHAL_NUM_AREGS - len/4], len);
-// FIXME: we need to set THREADPTR in thread_info...
- if (clone_flags & CLONE_SETTLS)
- childregs->areg[2] = childregs->areg[6];
+ childregs->areg[2] = 0;
+
+ /* When sharing memory with the parent thread, the child
+ usually starts on a pristine stack, so we have to reset
+ windowbase, windowstart and wmask.
+ (Note that such a new thread is required to always create
+ an initial call4 frame)
+ The exception is vfork, where the new thread continues to
+ run on the parent's stack until it calls execve. This could
+ be a call8 or call12, which requires a legal stack frame
+ of the previous caller for the overflow handlers to work.
+ (Note that it's always legal to overflow live registers).
+ In this case, ensure to spill at least the stack pointer
+ of that frame. */
+
+ if (clone_flags & CLONE_VM) {
+ /* check that caller window is live and same stack */
+ int len = childregs->wmask & ~0xf;
+ if (regs->areg[1] == usp && len != 0) {
+ int callinc = (regs->areg[0] >> 30) & 3;
+ int caller_ars = XCHAL_NUM_AREGS - callinc * 4;
+ put_user(regs->areg[caller_ars+1],
+ (unsigned __user*)(usp - 12));
+ }
+ childregs->wmask = 1;
+ childregs->windowstart = 1;
+ childregs->windowbase = 0;
+ } else {
+ int len = childregs->wmask & ~0xf;
+ memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
+ &regs->areg[XCHAL_NUM_AREGS - len/4], len);
+ }
+ /* The thread pointer is passed in the '4th argument' (= a5) */
+ if (clone_flags & CLONE_SETTLS)
+ childregs->threadptr = childregs->areg[5];
} else {
- /* In kernel space, we start a new thread with a new stack. */
- childregs->wmask = 1;
+ p->thread.ra = MAKE_RA_FOR_CALL(
+ (unsigned long)ret_from_kernel_thread, 1);
+
+ /* pass parameters to ret_from_kernel_thread:
+ * a2 = thread_fn, a3 = thread_fn arg
+ */
+ *((int *)childregs - 1) = thread_fn_arg;
+ *((int *)childregs - 2) = usp_thread_fn;
+
+ /* Childregs are only used when we're going to userspace
+ * in which case start_thread will set them up.
+ */
}
#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
@@ -277,7 +330,7 @@ void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
/* Don't leak any random bits. */
- memset(elfregs, 0, sizeof (elfregs));
+ memset(elfregs, 0, sizeof(*elfregs));
/* Note: PS.EXCM is not set while user task is running; its
* being set in regs->ps is for exception handling convenience.
@@ -301,39 +354,3 @@ int dump_fpu(void)
{
return 0;
}
-
-asmlinkage
-long xtensa_clone(unsigned long clone_flags, unsigned long newsp,
- void __user *parent_tid, void *child_tls,
- void __user *child_tid, long a5,
- struct pt_regs *regs)
-{
- if (!newsp)
- newsp = regs->areg[1];
- return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
-}
-
-/*
- * xtensa_execve() executes a new program.
- */
-
-asmlinkage
-long xtensa_execve(const char __user *name,
- const char __user *const __user *argv,
- const char __user *const __user *envp,
- long a3, long a4, long a5,
- struct pt_regs *regs)
-{
- long error;
- char * filename;
-
- filename = getname(name);
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- goto out;
- error = do_execve(filename, argv, envp, regs);
- putname(filename);
-out:
- return error;
-}
-
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index 2dff698ab02..562fac66475 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -24,7 +24,6 @@
#include <asm/pgtable.h>
#include <asm/page.h>
-#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/ptrace.h>
#include <asm/elf.h>
@@ -54,9 +53,8 @@ int ptrace_getregs(struct task_struct *child, void __user *uregs)
{
struct pt_regs *regs = task_pt_regs(child);
xtensa_gregset_t __user *gregset = uregs;
- unsigned long wm = regs->wmask;
unsigned long wb = regs->windowbase;
- int live, i;
+ int i;
if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t)))
return -EIO;
@@ -68,13 +66,11 @@ int ptrace_getregs(struct task_struct *child, void __user *uregs)
__put_user(regs->lcount, &gregset->lcount);
__put_user(regs->windowstart, &gregset->windowstart);
__put_user(regs->windowbase, &gregset->windowbase);
+ __put_user(regs->threadptr, &gregset->threadptr);
- live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16;
-
- for (i = 0; i < live; i++)
- __put_user(regs->areg[i],gregset->a+((wb*4+i)%XCHAL_NUM_AREGS));
- for (i = XCHAL_NUM_AREGS - (wm >> 4) * 4; i < XCHAL_NUM_AREGS; i++)
- __put_user(regs->areg[i],gregset->a+((wb*4+i)%XCHAL_NUM_AREGS));
+ for (i = 0; i < XCHAL_NUM_AREGS; i++)
+ __put_user(regs->areg[i],
+ gregset->a + ((wb * 4 + i) % XCHAL_NUM_AREGS));
return 0;
}
@@ -85,7 +81,7 @@ int ptrace_setregs(struct task_struct *child, void __user *uregs)
xtensa_gregset_t *gregset = uregs;
const unsigned long ps_mask = PS_CALLINC_MASK | PS_OWB_MASK;
unsigned long ps;
- unsigned long wb;
+ unsigned long wb, ws;
if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t)))
return -EIO;
@@ -95,21 +91,33 @@ int ptrace_setregs(struct task_struct *child, void __user *uregs)
__get_user(regs->lbeg, &gregset->lbeg);
__get_user(regs->lend, &gregset->lend);
__get_user(regs->lcount, &gregset->lcount);
- __get_user(regs->windowstart, &gregset->windowstart);
+ __get_user(ws, &gregset->windowstart);
__get_user(wb, &gregset->windowbase);
+ __get_user(regs->threadptr, &gregset->threadptr);
regs->ps = (regs->ps & ~ps_mask) | (ps & ps_mask) | (1 << PS_EXCM_BIT);
if (wb >= XCHAL_NUM_AREGS / 4)
return -EFAULT;
- regs->windowbase = wb;
+ if (wb != regs->windowbase || ws != regs->windowstart) {
+ unsigned long rotws, wmask;
+
+ rotws = (((ws | (ws << WSBITS)) >> wb) &
+ ((1 << WSBITS) - 1)) & ~1;
+ wmask = ((rotws ? WSBITS + 1 - ffs(rotws) : 0) << 4) |
+ (rotws & 0xF) | 1;
+ regs->windowbase = wb;
+ regs->windowstart = ws;
+ regs->wmask = wmask;
+ }
if (wb != 0 && __copy_from_user(regs->areg + XCHAL_NUM_AREGS - wb * 4,
- gregset->a, wb * 16))
+ gregset->a, wb * 16))
return -EFAULT;
- if (__copy_from_user(regs->areg, gregset->a + wb*4, (WSBITS-wb) * 16))
+ if (__copy_from_user(regs->areg, gregset->a + wb * 4,
+ (WSBITS - wb) * 16))
return -EFAULT;
return 0;
@@ -155,7 +163,7 @@ int ptrace_setxregs(struct task_struct *child, void __user *uregs)
coprocessor_flush_all(ti);
coprocessor_release_all(ti);
- ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0,
+ ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0,
sizeof(xtregs_coprocessor_t));
#endif
ret |= __copy_from_user(&regs->xtregs_opt, &xtregs->opt,
@@ -344,4 +352,3 @@ void do_syscall_trace_leave(struct pt_regs *regs)
&& (current->ptrace & PT_PTRACED))
do_syscall_trace();
}
-
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 1e5a034fe01..06370ccea9e 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -21,6 +21,11 @@
#include <linux/screen_info.h>
#include <linux/bootmem.h>
#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/clk-provider.h>
+#include <linux/cpu.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
# include <linux/console.h>
@@ -34,8 +39,8 @@
# include <linux/seq_file.h>
#endif
-#include <asm/system.h>
#include <asm/bootparam.h>
+#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/timex.h>
@@ -43,6 +48,9 @@
#include <asm/page.h>
#include <asm/setup.h>
#include <asm/param.h>
+#include <asm/traps.h>
+#include <asm/smp.h>
+#include <asm/sysmem.h>
#include <platform/hardware.h>
@@ -59,14 +67,16 @@ extern struct rtc_ops no_rtc_ops;
struct rtc_ops *rtc_ops;
#ifdef CONFIG_BLK_DEV_INITRD
-extern void *initrd_start;
-extern void *initrd_end;
-extern void *__initrd_start;
-extern void *__initrd_end;
+extern unsigned long initrd_start;
+extern unsigned long initrd_end;
int initrd_is_mapped = 0;
extern int initrd_below_start_ok;
#endif
+#ifdef CONFIG_OF
+void *dtb_start = __dtb_start;
+#endif
+
unsigned char aux_device_present;
extern unsigned long loops_per_jiffy;
@@ -78,20 +88,6 @@ static char __initdata command_line[COMMAND_LINE_SIZE];
static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
#endif
-sysmem_info_t __initdata sysmem;
-
-#ifdef CONFIG_BLK_DEV_INITRD
-int initrd_is_mapped;
-#endif
-
-#ifdef CONFIG_MMU
-extern void init_mmu(void);
-#else
-static inline void init_mmu(void) { }
-#endif
-
-extern void zones_init(void);
-
/*
* Boot parameter parsing.
*
@@ -107,30 +103,18 @@ typedef struct tagtable {
} tagtable_t;
#define __tagtable(tag, fn) static tagtable_t __tagtable_##fn \
- __attribute__((unused, __section__(".taglist"))) = { tag, fn }
+ __attribute__((used, section(".taglist"))) = { tag, fn }
/* parse current tag */
static int __init parse_tag_mem(const bp_tag_t *tag)
{
- meminfo_t *mi = (meminfo_t*)(tag->data);
+ struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data);
if (mi->type != MEMORY_TYPE_CONVENTIONAL)
return -1;
- if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) {
- printk(KERN_WARNING
- "Ignoring memory bank 0x%08lx size %ldKB\n",
- (unsigned long)mi->start,
- (unsigned long)mi->end - (unsigned long)mi->start);
- return -EINVAL;
- }
- sysmem.bank[sysmem.nr_banks].type = mi->type;
- sysmem.bank[sysmem.nr_banks].start = PAGE_ALIGN(mi->start);
- sysmem.bank[sysmem.nr_banks].end = mi->end & PAGE_SIZE;
- sysmem.nr_banks++;
-
- return 0;
+ return add_sysmem_bank(mi->start, mi->end);
}
__tagtable(BP_TAG_MEMORY, parse_tag_mem);
@@ -139,22 +123,33 @@ __tagtable(BP_TAG_MEMORY, parse_tag_mem);
static int __init parse_tag_initrd(const bp_tag_t* tag)
{
- meminfo_t* mi;
- mi = (meminfo_t*)(tag->data);
- initrd_start = (void*)(mi->start);
- initrd_end = (void*)(mi->end);
+ struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data);
+
+ initrd_start = (unsigned long)__va(mi->start);
+ initrd_end = (unsigned long)__va(mi->end);
return 0;
}
__tagtable(BP_TAG_INITRD, parse_tag_initrd);
+#ifdef CONFIG_OF
+
+static int __init parse_tag_fdt(const bp_tag_t *tag)
+{
+ dtb_start = __va(tag->data[0]);
+ return 0;
+}
+
+__tagtable(BP_TAG_FDT, parse_tag_fdt);
+
+#endif /* CONFIG_OF */
+
#endif /* CONFIG_BLK_DEV_INITRD */
static int __init parse_tag_cmdline(const bp_tag_t* tag)
{
- strncpy(command_line, (char*)(tag->data), COMMAND_LINE_SIZE);
- command_line[COMMAND_LINE_SIZE - 1] = '\0';
+ strlcpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE);
return 0;
}
@@ -192,36 +187,108 @@ static int __init parse_bootparam(const bp_tag_t* tag)
return 0;
}
-/*
- * Initialize architecture. (Early stage)
- */
+#ifdef CONFIG_OF
+bool __initdata dt_memory_scan = false;
-void __init init_arch(bp_tag_t *bp_start)
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR;
+EXPORT_SYMBOL(xtensa_kio_paddr);
+
+static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
+ int depth, void *data)
{
+ const __be32 *ranges;
+ int len;
-#ifdef CONFIG_BLK_DEV_INITRD
- initrd_start = &__initrd_start;
- initrd_end = &__initrd_end;
-#endif
+ if (depth > 1)
+ return 0;
- sysmem.nr_banks = 0;
+ if (!of_flat_dt_is_compatible(node, "simple-bus"))
+ return 0;
-#ifdef CONFIG_CMDLINE_BOOL
- strcpy(command_line, default_command_line);
+ ranges = of_get_flat_dt_prop(node, "ranges", &len);
+ if (!ranges)
+ return 1;
+ if (len == 0)
+ return 1;
+
+ xtensa_kio_paddr = of_read_ulong(ranges+1, 1);
+ /* round down to nearest 256MB boundary */
+ xtensa_kio_paddr &= 0xf0000000;
+
+ return 1;
+}
+#else
+static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ return 1;
+}
#endif
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+ if (!dt_memory_scan)
+ return;
+
+ size &= PAGE_MASK;
+ add_sysmem_bank(base, base + size);
+}
+
+void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+ return __alloc_bootmem(size, align, 0);
+}
+
+void __init early_init_devtree(void *params)
+{
+ if (sysmem.nr_banks == 0)
+ dt_memory_scan = true;
+
+ early_init_dt_scan(params);
+ of_scan_flat_dt(xtensa_dt_io_area, NULL);
+
+ if (!command_line[0])
+ strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+}
+
+static int __init xtensa_device_probe(void)
+{
+ of_clk_init(NULL);
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ return 0;
+}
+
+device_initcall(xtensa_device_probe);
+
+#endif /* CONFIG_OF */
+
+/*
+ * Initialize architecture. (Early stage)
+ */
+
+void __init init_arch(bp_tag_t *bp_start)
+{
/* Parse boot parameters */
- if (bp_start)
- parse_bootparam(bp_start);
+ if (bp_start)
+ parse_bootparam(bp_start);
+
+#ifdef CONFIG_OF
+ early_init_devtree(dtb_start);
+#endif
if (sysmem.nr_banks == 0) {
- sysmem.nr_banks = 1;
- sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START;
- sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START
- + PLATFORM_DEFAULT_MEM_SIZE;
+ add_sysmem_bank(PLATFORM_DEFAULT_MEM_START,
+ PLATFORM_DEFAULT_MEM_START +
+ PLATFORM_DEFAULT_MEM_SIZE);
}
+#ifdef CONFIG_CMDLINE_BOOL
+ if (!command_line[0])
+ strlcpy(command_line, default_command_line, COMMAND_LINE_SIZE);
+#endif
+
/* Early hook for platforms */
platform_init(bp_start);
@@ -247,14 +314,146 @@ extern char _UserExceptionVector_literal_start;
extern char _UserExceptionVector_text_end;
extern char _DoubleExceptionVector_literal_start;
extern char _DoubleExceptionVector_text_end;
+#if XCHAL_EXCM_LEVEL >= 2
+extern char _Level2InterruptVector_text_start;
+extern char _Level2InterruptVector_text_end;
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+extern char _Level3InterruptVector_text_start;
+extern char _Level3InterruptVector_text_end;
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+extern char _Level4InterruptVector_text_start;
+extern char _Level4InterruptVector_text_end;
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+extern char _Level5InterruptVector_text_start;
+extern char _Level5InterruptVector_text_end;
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+extern char _Level6InterruptVector_text_start;
+extern char _Level6InterruptVector_text_end;
+#endif
-void __init setup_arch(char **cmdline_p)
+
+
+#ifdef CONFIG_S32C1I_SELFTEST
+#if XCHAL_HAVE_S32C1I
+
+static int __initdata rcw_word, rcw_probe_pc, rcw_exc;
+
+/*
+ * Basic atomic compare-and-swap, that records PC of S32C1I for probing.
+ *
+ * If *v == cmp, set *v = set. Return previous *v.
+ */
+static inline int probed_compare_swap(int *v, int cmp, int set)
+{
+ int tmp;
+
+ __asm__ __volatile__(
+ " movi %1, 1f\n"
+ " s32i %1, %4, 0\n"
+ " wsr %2, scompare1\n"
+ "1: s32c1i %0, %3, 0\n"
+ : "=a" (set), "=&a" (tmp)
+ : "a" (cmp), "a" (v), "a" (&rcw_probe_pc), "0" (set)
+ : "memory"
+ );
+ return set;
+}
+
+/* Handle probed exception */
+
+static void __init do_probed_exception(struct pt_regs *regs,
+ unsigned long exccause)
+{
+ if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */
+ regs->pc += 3; /* skip the s32c1i instruction */
+ rcw_exc = exccause;
+ } else {
+ do_unhandled(regs, exccause);
+ }
+}
+
+/* Simple test of S32C1I (soc bringup assist) */
+
+static int __init check_s32c1i(void)
+{
+ int n, cause1, cause2;
+ void *handbus, *handdata, *handaddr; /* temporarily saved handlers */
+
+ rcw_probe_pc = 0;
+ handbus = trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR,
+ do_probed_exception);
+ handdata = trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR,
+ do_probed_exception);
+ handaddr = trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR,
+ do_probed_exception);
+
+ /* First try an S32C1I that does not store: */
+ rcw_exc = 0;
+ rcw_word = 1;
+ n = probed_compare_swap(&rcw_word, 0, 2);
+ cause1 = rcw_exc;
+
+ /* took exception? */
+ if (cause1 != 0) {
+ /* unclean exception? */
+ if (n != 2 || rcw_word != 1)
+ panic("S32C1I exception error");
+ } else if (rcw_word != 1 || n != 1) {
+ panic("S32C1I compare error");
+ }
+
+ /* Then an S32C1I that stores: */
+ rcw_exc = 0;
+ rcw_word = 0x1234567;
+ n = probed_compare_swap(&rcw_word, 0x1234567, 0xabcde);
+ cause2 = rcw_exc;
+
+ if (cause2 != 0) {
+ /* unclean exception? */
+ if (n != 0xabcde || rcw_word != 0x1234567)
+ panic("S32C1I exception error (b)");
+ } else if (rcw_word != 0xabcde || n != 0x1234567) {
+ panic("S32C1I store error");
+ }
+
+ /* Verify consistency of exceptions: */
+ if (cause1 || cause2) {
+ pr_warn("S32C1I took exception %d, %d\n", cause1, cause2);
+ /* If emulation of S32C1I upon bus error gets implemented,
+ we can get rid of this panic for single core (not SMP) */
+ panic("S32C1I exceptions not currently supported");
+ }
+ if (cause1 != cause2)
+ panic("inconsistent S32C1I exceptions");
+
+ trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR, handbus);
+ trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR, handdata);
+ trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR, handaddr);
+ return 0;
+}
+
+#else /* XCHAL_HAVE_S32C1I */
+
+/* This condition should not occur with a commercially deployed processor.
+ Display reminder for early engr test or demo chips / FPGA bitstreams */
+static int __init check_s32c1i(void)
{
- extern int mem_reserve(unsigned long, unsigned long, int);
- extern void bootmem_init(void);
+ pr_warn("Processor configuration lacks atomic compare-and-swap support!\n");
+ return 0;
+}
- memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
- boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
+#endif /* XCHAL_HAVE_S32C1I */
+early_initcall(check_s32c1i);
+#endif /* CONFIG_S32C1I_SELFTEST */
+
+
+void __init setup_arch(char **cmdline_p)
+{
+ strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
/* Reserve some memory regions */
@@ -262,9 +461,9 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start < initrd_end) {
initrd_is_mapped = mem_reserve(__pa(initrd_start),
- __pa(initrd_end), 0);
+ __pa(initrd_end), 0) == 0;
initrd_below_start_ok = 1;
- } else {
+ } else {
initrd_start = 0;
}
#endif
@@ -286,10 +485,37 @@ void __init setup_arch(char **cmdline_p)
mem_reserve(__pa(&_DoubleExceptionVector_literal_start),
__pa(&_DoubleExceptionVector_text_end), 0);
+#if XCHAL_EXCM_LEVEL >= 2
+ mem_reserve(__pa(&_Level2InterruptVector_text_start),
+ __pa(&_Level2InterruptVector_text_end), 0);
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+ mem_reserve(__pa(&_Level3InterruptVector_text_start),
+ __pa(&_Level3InterruptVector_text_end), 0);
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+ mem_reserve(__pa(&_Level4InterruptVector_text_start),
+ __pa(&_Level4InterruptVector_text_end), 0);
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+ mem_reserve(__pa(&_Level5InterruptVector_text_start),
+ __pa(&_Level5InterruptVector_text_end), 0);
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+ mem_reserve(__pa(&_Level6InterruptVector_text_start),
+ __pa(&_Level6InterruptVector_text_end), 0);
+#endif
+
+ parse_early_param();
bootmem_init();
+ unflatten_and_copy_device_tree();
+
platform_setup(cmdline_p);
+#ifdef CONFIG_SMP
+ smp_init_cpus();
+#endif
paging_init();
zones_init();
@@ -307,6 +533,22 @@ void __init setup_arch(char **cmdline_p)
#endif
}
+static DEFINE_PER_CPU(struct cpu, cpu_data);
+
+static int __init topology_init(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct cpu *cpu = &per_cpu(cpu_data, i);
+ cpu->hotpluggable = !!i;
+ register_cpu(cpu, i);
+ }
+
+ return 0;
+}
+subsys_initcall(topology_init);
+
void machine_restart(char * cmd)
{
platform_restart();
@@ -332,21 +574,27 @@ void machine_power_off(void)
static int
c_show(struct seq_file *f, void *slot)
{
+ char buf[NR_CPUS * 5];
+
+ cpulist_scnprintf(buf, sizeof(buf), cpu_online_mask);
/* high-level stuff */
- seq_printf(f,"processor\t: 0\n"
- "vendor_id\t: Tensilica\n"
- "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n"
- "core ID\t\t: " XCHAL_CORE_ID "\n"
- "build ID\t: 0x%x\n"
- "byte order\t: %s\n"
- "cpu MHz\t\t: %lu.%02lu\n"
- "bogomips\t: %lu.%02lu\n",
- XCHAL_BUILD_UNIQUE_ID,
- XCHAL_HAVE_BE ? "big" : "little",
- CCOUNT_PER_JIFFY/(1000000/HZ),
- (CCOUNT_PER_JIFFY/(10000/HZ)) % 100,
- loops_per_jiffy/(500000/HZ),
- (loops_per_jiffy/(5000/HZ)) % 100);
+ seq_printf(f, "CPU count\t: %u\n"
+ "CPU list\t: %s\n"
+ "vendor_id\t: Tensilica\n"
+ "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n"
+ "core ID\t\t: " XCHAL_CORE_ID "\n"
+ "build ID\t: 0x%x\n"
+ "byte order\t: %s\n"
+ "cpu MHz\t\t: %lu.%02lu\n"
+ "bogomips\t: %lu.%02lu\n",
+ num_online_cpus(),
+ buf,
+ XCHAL_BUILD_UNIQUE_ID,
+ XCHAL_HAVE_BE ? "big" : "little",
+ ccount_freq/1000000,
+ (ccount_freq/10000) % 100,
+ loops_per_jiffy/(500000/HZ),
+ (loops_per_jiffy/(5000/HZ)) % 100);
seq_printf(f,"flags\t\t: "
#if XCHAL_HAVE_NMI
@@ -394,6 +642,9 @@ c_show(struct seq_file *f, void *slot)
#if XCHAL_HAVE_FP
"fpu "
#endif
+#if XCHAL_HAVE_S32C1I
+ "s32c1i "
+#endif
"\n");
/* Registers. */
@@ -425,7 +676,7 @@ c_show(struct seq_file *f, void *slot)
"icache size\t: %d\n"
"icache flags\t: "
#if XCHAL_ICACHE_LINE_LOCKABLE
- "lock"
+ "lock "
#endif
"\n"
"dcache line size: %d\n"
@@ -433,10 +684,10 @@ c_show(struct seq_file *f, void *slot)
"dcache size\t: %d\n"
"dcache flags\t: "
#if XCHAL_DCACHE_IS_WRITEBACK
- "writeback"
+ "writeback "
#endif
#if XCHAL_DCACHE_LINE_LOCKABLE
- "lock"
+ "lock "
#endif
"\n",
XCHAL_ICACHE_LINESIZE,
@@ -455,7 +706,7 @@ c_show(struct seq_file *f, void *slot)
static void *
c_start(struct seq_file *f, loff_t *pos)
{
- return (void *) ((*pos == 0) ? (void *)1 : NULL);
+ return (*pos == 0) ? (void *)1 : NULL;
}
static void *
@@ -471,11 +722,10 @@ c_stop(struct seq_file *f, void *v)
const struct seq_operations cpuinfo_op =
{
- start: c_start,
- next: c_next,
- stop: c_stop,
- show: c_show
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = c_show,
};
#endif /* CONFIG_PROC_FS */
-
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index f2220b5bdce..98b67d5f151 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -19,7 +19,7 @@
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/personality.h>
-#include <linux/freezer.h>
+#include <linux/tracehook.h>
#include <asm/ucontext.h>
#include <asm/uaccess.h>
@@ -29,10 +29,6 @@
#define DEBUG_SIG 0
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
-asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
-
extern struct task_struct *coproc_owners[];
struct rt_sigframe
@@ -216,7 +212,7 @@ restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame)
if (err)
return err;
- /* The signal handler may have used coprocessors in which
+ /* The signal handler may have used coprocessors in which
* case they are still enabled. We disable them to force a
* reloading of the original task's CP state by the lazy
* context-switching mechanisms of CP exception handling.
@@ -248,6 +244,9 @@ asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3,
sigset_t set;
int ret;
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
if (regs->depc > 64)
panic("rt_sigreturn in double exception!\n");
@@ -259,18 +258,14 @@ asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3,
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
if (restore_sigcontext(regs, frame))
goto badframe;
ret = regs->areg[2];
- if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->areg[1]) == -EFAULT)
+ if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
return ret;
@@ -336,17 +331,17 @@ gen_return_code(unsigned char *codemem)
}
-static void setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
- sigset_t *set, struct pt_regs *regs)
+static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe *frame;
int err = 0;
int signal;
- unsigned long sp, ra;
+ unsigned long sp, ra, tp;
sp = regs->areg[1];
- if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) {
+ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) {
sp = current->sas_ss_sp + current->sas_ss_size;
}
@@ -373,11 +368,7 @@ static void setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
- err |= __put_user((void *)current->sas_ss_sp,
- &frame->uc.uc_stack.ss_sp);
- err |= __put_user(sas_ss_flags(regs->areg[1]),
- &frame->uc.uc_stack.ss_flags);
- err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= __save_altstack(&frame->uc.uc_stack, regs->areg[1]);
err |= setup_sigcontext(frame, regs);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
@@ -400,8 +391,9 @@ static void setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
* Return context not modified until this point.
*/
- /* Set up registers for signal handler */
- start_thread(regs, (unsigned long) ka->sa.sa_handler,
+ /* Set up registers for signal handler; preserve the threadptr */
+ tp = regs->threadptr;
+ start_thread(regs, (unsigned long) ka->sa.sa_handler,
(unsigned long) frame);
/* Set up a stack frame for a call4
@@ -411,6 +403,7 @@ static void setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->areg[6] = (unsigned long) signal;
regs->areg[7] = (unsigned long) &frame->info;
regs->areg[8] = (unsigned long) &frame->uc;
+ regs->threadptr = tp;
/* Set access mode to USER_DS. Nomenclature is outdated, but
* functionality is used in uaccess.h
@@ -422,58 +415,13 @@ static void setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
current->comm, current->pid, signal, frame, regs->pc);
#endif
- return;
+ return 0;
give_sigsegv:
- if (sig == SIGSEGV)
- ka->sa.sa_handler = SIG_DFL;
- force_sig(SIGSEGV, current);
-}
-
-/*
- * Atomically swap in the new signal mask, and wait for a signal.
- */
-
-asmlinkage long xtensa_rt_sigsuspend(sigset_t __user *unewset,
- size_t sigsetsize,
- long a2, long a3, long a4, long a5,
- struct pt_regs *regs)
-{
- sigset_t saveset, newset;
-
- /* XXX: Don't preclude handling different sized sigset_t's. */
- if (sigsetsize != sizeof(sigset_t))
- return -EINVAL;
-
- if (copy_from_user(&newset, unewset, sizeof(newset)))
- return -EFAULT;
-
- sigdelsetmask(&newset, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- saveset = current->blocked;
- current->blocked = newset;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- regs->areg[2] = -EINTR;
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- if (do_signal(regs, &saveset))
- return -EINTR;
- }
-}
-
-asmlinkage long xtensa_sigaltstack(const stack_t __user *uss,
- stack_t __user *uoss,
- long a2, long a3, long a4, long a5,
- struct pt_regs *regs)
-{
- return do_sigaltstack(uss, uoss, regs->areg[1]);
+ force_sigsegv(sig, current);
+ return -EFAULT;
}
-
-
/*
* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
@@ -483,26 +431,18 @@ asmlinkage long xtensa_sigaltstack(const stack_t __user *uss,
* the kernel can handle, and then we build all the user-level signal handling
* stack-frames in one go after that.
*/
-int do_signal(struct pt_regs *regs, sigset_t *oldset)
+static void do_signal(struct pt_regs *regs)
{
siginfo_t info;
int signr;
struct k_sigaction ka;
- if (!user_mode(regs))
- return 0;
-
- if (try_to_freeze())
- goto no_signal;
-
- if (!oldset)
- oldset = &current->blocked;
-
task_pt_regs(current)->icountlevel = 0;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
+ int ret;
/* Are we from a system call? */
@@ -536,24 +476,17 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
/* Whee! Actually deliver the signal. */
/* Set up the stack frame */
- setup_frame(signr, &ka, &info, oldset, regs);
-
- if (ka.sa.sa_flags & SA_ONESHOT)
- ka.sa.sa_handler = SIG_DFL;
+ ret = setup_frame(signr, &ka, &info, sigmask_to_save(), regs);
+ if (ret)
+ return;
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked, &current->blocked, &ka.sa.sa_mask);
- if (!(ka.sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked, signr);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ signal_delivered(signr, &info, &ka, regs, 0);
if (current->ptrace & PT_SINGLESTEP)
task_pt_regs(current)->icountlevel = 1;
- return 1;
+ return;
}
-no_signal:
/* Did we come from a system call? */
if ((signed) regs->syscall >= 0) {
/* Restart the system call - no handlers present */
@@ -570,8 +503,20 @@ no_signal:
break;
}
}
+
+ /* If there's no signal to deliver, we just restore the saved mask. */
+ restore_saved_sigmask();
+
if (current->ptrace & PT_SINGLESTEP)
task_pt_regs(current)->icountlevel = 1;
- return 0;
+ return;
}
+void do_notify_resume(struct pt_regs *regs)
+{
+ if (test_thread_flag(TIF_SIGPENDING))
+ do_signal(regs);
+
+ if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
+ tracehook_notify_resume(regs);
+}
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
new file mode 100644
index 00000000000..40b5a3771fb
--- /dev/null
+++ b/arch/xtensa/kernel/smp.c
@@ -0,0 +1,607 @@
+/*
+ * Xtensa SMP support functions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 - 2013 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com>
+ * Pete Delaney <piet@tensilica.com
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kdebug.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/seq_file.h>
+#include <linux/smp.h>
+#include <linux/thread_info.h>
+
+#include <asm/cacheflush.h>
+#include <asm/kdebug.h>
+#include <asm/mmu_context.h>
+#include <asm/mxregs.h>
+#include <asm/platform.h>
+#include <asm/tlbflush.h>
+#include <asm/traps.h>
+
+#ifdef CONFIG_SMP
+# if XCHAL_HAVE_S32C1I == 0
+# error "The S32C1I option is required for SMP."
+# endif
+#endif
+
+static void system_invalidate_dcache_range(unsigned long start,
+ unsigned long size);
+static void system_flush_invalidate_dcache_range(unsigned long start,
+ unsigned long size);
+
+/* IPI (Inter Process Interrupt) */
+
+#define IPI_IRQ 0
+
+static irqreturn_t ipi_interrupt(int irq, void *dev_id);
+static struct irqaction ipi_irqaction = {
+ .handler = ipi_interrupt,
+ .flags = IRQF_PERCPU,
+ .name = "ipi",
+};
+
+void ipi_init(void)
+{
+ unsigned irq = irq_create_mapping(NULL, IPI_IRQ);
+ setup_irq(irq, &ipi_irqaction);
+}
+
+static inline unsigned int get_core_count(void)
+{
+ /* Bits 18..21 of SYSCFGID contain the core count minus 1. */
+ unsigned int syscfgid = get_er(SYSCFGID);
+ return ((syscfgid >> 18) & 0xf) + 1;
+}
+
+static inline int get_core_id(void)
+{
+ /* Bits 0...18 of SYSCFGID contain the core id */
+ unsigned int core_id = get_er(SYSCFGID);
+ return core_id & 0x3fff;
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ unsigned i;
+
+ for (i = 0; i < max_cpus; ++i)
+ set_cpu_present(i, true);
+}
+
+void __init smp_init_cpus(void)
+{
+ unsigned i;
+ unsigned int ncpus = get_core_count();
+ unsigned int core_id = get_core_id();
+
+ pr_info("%s: Core Count = %d\n", __func__, ncpus);
+ pr_info("%s: Core Id = %d\n", __func__, core_id);
+
+ for (i = 0; i < ncpus; ++i)
+ set_cpu_possible(i, true);
+}
+
+void __init smp_prepare_boot_cpu(void)
+{
+ unsigned int cpu = smp_processor_id();
+ BUG_ON(cpu != 0);
+ cpu_asid_cache(cpu) = ASID_USER_FIRST;
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */
+static DECLARE_COMPLETION(cpu_running);
+
+void secondary_start_kernel(void)
+{
+ struct mm_struct *mm = &init_mm;
+ unsigned int cpu = smp_processor_id();
+
+ init_mmu();
+
+#ifdef CONFIG_DEBUG_KERNEL
+ if (boot_secondary_processors == 0) {
+ pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n",
+ __func__, boot_secondary_processors, cpu);
+ for (;;)
+ __asm__ __volatile__ ("waiti " __stringify(LOCKLEVEL));
+ }
+
+ pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n",
+ __func__, boot_secondary_processors, cpu);
+#endif
+ /* Init EXCSAVE1 */
+
+ secondary_trap_init();
+
+ /* All kernel threads share the same mm context. */
+
+ atomic_inc(&mm->mm_users);
+ atomic_inc(&mm->mm_count);
+ current->active_mm = mm;
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
+ enter_lazy_tlb(mm, current);
+
+ preempt_disable();
+ trace_hardirqs_off();
+
+ calibrate_delay();
+
+ notify_cpu_starting(cpu);
+
+ secondary_init_irq();
+ local_timer_setup(cpu);
+
+ set_cpu_online(cpu, true);
+
+ local_irq_enable();
+
+ complete(&cpu_running);
+
+ cpu_startup_entry(CPUHP_ONLINE);
+}
+
+static void mx_cpu_start(void *p)
+{
+ unsigned cpu = (unsigned)p;
+ unsigned long run_stall_mask = get_er(MPSCORE);
+
+ set_er(run_stall_mask & ~(1u << cpu), MPSCORE);
+ pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
+ __func__, cpu, run_stall_mask, get_er(MPSCORE));
+}
+
+static void mx_cpu_stop(void *p)
+{
+ unsigned cpu = (unsigned)p;
+ unsigned long run_stall_mask = get_er(MPSCORE);
+
+ set_er(run_stall_mask | (1u << cpu), MPSCORE);
+ pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
+ __func__, cpu, run_stall_mask, get_er(MPSCORE));
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+unsigned long cpu_start_id __cacheline_aligned;
+#endif
+unsigned long cpu_start_ccount;
+
+static int boot_secondary(unsigned int cpu, struct task_struct *ts)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ unsigned long ccount;
+ int i;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ cpu_start_id = cpu;
+ system_flush_invalidate_dcache_range(
+ (unsigned long)&cpu_start_id, sizeof(cpu_start_id));
+#endif
+ smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
+
+ for (i = 0; i < 2; ++i) {
+ do
+ ccount = get_ccount();
+ while (!ccount);
+
+ cpu_start_ccount = ccount;
+
+ while (time_before(jiffies, timeout)) {
+ mb();
+ if (!cpu_start_ccount)
+ break;
+ }
+
+ if (cpu_start_ccount) {
+ smp_call_function_single(0, mx_cpu_stop,
+ (void *)cpu, 1);
+ cpu_start_ccount = 0;
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+int __cpu_up(unsigned int cpu, struct task_struct *idle)
+{
+ int ret = 0;
+
+ if (cpu_asid_cache(cpu) == 0)
+ cpu_asid_cache(cpu) = ASID_USER_FIRST;
+
+ start_info.stack = (unsigned long)task_pt_regs(idle);
+ wmb();
+
+ pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
+ __func__, cpu, idle, start_info.stack);
+
+ ret = boot_secondary(cpu, idle);
+ if (ret == 0) {
+ wait_for_completion_timeout(&cpu_running,
+ msecs_to_jiffies(1000));
+ if (!cpu_online(cpu))
+ ret = -EIO;
+ }
+
+ if (ret)
+ pr_err("CPU %u failed to boot\n", cpu);
+
+ return ret;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * __cpu_disable runs on the processor to be shutdown.
+ */
+int __cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ /*
+ * Take this CPU offline. Once we clear this, we can't return,
+ * and we must not schedule until we're ready to give up the cpu.
+ */
+ set_cpu_online(cpu, false);
+
+ /*
+ * OK - migrate IRQs away from this CPU
+ */
+ migrate_irqs();
+
+ /*
+ * Flush user cache and TLB mappings, and then remove this CPU
+ * from the vm mask set of all processes.
+ */
+ local_flush_cache_all();
+ local_flush_tlb_all();
+ invalidate_page_directory();
+
+ clear_tasks_mm_cpumask(cpu);
+
+ return 0;
+}
+
+static void platform_cpu_kill(unsigned int cpu)
+{
+ smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true);
+}
+
+/*
+ * called on the thread which is asking for a CPU to be shutdown -
+ * waits until shutdown has completed, or it is timed out.
+ */
+void __cpu_die(unsigned int cpu)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ while (time_before(jiffies, timeout)) {
+ system_invalidate_dcache_range((unsigned long)&cpu_start_id,
+ sizeof(cpu_start_id));
+ if (cpu_start_id == -cpu) {
+ platform_cpu_kill(cpu);
+ return;
+ }
+ }
+ pr_err("CPU%u: unable to kill\n", cpu);
+}
+
+void arch_cpu_idle_dead(void)
+{
+ cpu_die();
+}
+/*
+ * Called from the idle thread for the CPU which has been shutdown.
+ *
+ * Note that we disable IRQs here, but do not re-enable them
+ * before returning to the caller. This is also the behaviour
+ * of the other hotplug-cpu capable cores, so presumably coming
+ * out of idle fixes this.
+ */
+void __ref cpu_die(void)
+{
+ idle_task_exit();
+ local_irq_disable();
+ __asm__ __volatile__(
+ " movi a2, cpu_restart\n"
+ " jx a2\n");
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+enum ipi_msg_type {
+ IPI_RESCHEDULE = 0,
+ IPI_CALL_FUNC,
+ IPI_CPU_STOP,
+ IPI_MAX
+};
+
+static const struct {
+ const char *short_text;
+ const char *long_text;
+} ipi_text[] = {
+ { .short_text = "RES", .long_text = "Rescheduling interrupts" },
+ { .short_text = "CAL", .long_text = "Function call interrupts" },
+ { .short_text = "DIE", .long_text = "CPU shutdown interrupts" },
+};
+
+struct ipi_data {
+ unsigned long ipi_count[IPI_MAX];
+};
+
+static DEFINE_PER_CPU(struct ipi_data, ipi_data);
+
+static void send_ipi_message(const struct cpumask *callmask,
+ enum ipi_msg_type msg_id)
+{
+ int index;
+ unsigned long mask = 0;
+
+ for_each_cpu(index, callmask)
+ if (index != smp_processor_id())
+ mask |= 1 << index;
+
+ set_er(mask, MIPISET(msg_id));
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ send_ipi_message(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
+}
+
+void smp_send_reschedule(int cpu)
+{
+ send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
+}
+
+void smp_send_stop(void)
+{
+ struct cpumask targets;
+
+ cpumask_copy(&targets, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &targets);
+ send_ipi_message(&targets, IPI_CPU_STOP);
+}
+
+static void ipi_cpu_stop(unsigned int cpu)
+{
+ set_cpu_online(cpu, false);
+ machine_halt();
+}
+
+irqreturn_t ipi_interrupt(int irq, void *dev_id)
+{
+ unsigned int cpu = smp_processor_id();
+ struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+ unsigned int msg;
+ unsigned i;
+
+ msg = get_er(MIPICAUSE(cpu));
+ for (i = 0; i < IPI_MAX; i++)
+ if (msg & (1 << i)) {
+ set_er(1 << i, MIPICAUSE(cpu));
+ ++ipi->ipi_count[i];
+ }
+
+ if (msg & (1 << IPI_RESCHEDULE))
+ scheduler_ipi();
+ if (msg & (1 << IPI_CALL_FUNC))
+ generic_smp_call_function_interrupt();
+ if (msg & (1 << IPI_CPU_STOP))
+ ipi_cpu_stop(cpu);
+
+ return IRQ_HANDLED;
+}
+
+void show_ipi_list(struct seq_file *p, int prec)
+{
+ unsigned int cpu;
+ unsigned i;
+
+ for (i = 0; i < IPI_MAX; ++i) {
+ seq_printf(p, "%*s:", prec, ipi_text[i].short_text);
+ for_each_online_cpu(cpu)
+ seq_printf(p, " %10lu",
+ per_cpu(ipi_data, cpu).ipi_count[i]);
+ seq_printf(p, " %s\n", ipi_text[i].long_text);
+ }
+}
+
+int setup_profiling_timer(unsigned int multiplier)
+{
+ pr_debug("setup_profiling_timer %d\n", multiplier);
+ return 0;
+}
+
+/* TLB flush functions */
+
+struct flush_data {
+ struct vm_area_struct *vma;
+ unsigned long addr1;
+ unsigned long addr2;
+};
+
+static void ipi_flush_tlb_all(void *arg)
+{
+ local_flush_tlb_all();
+}
+
+void flush_tlb_all(void)
+{
+ on_each_cpu(ipi_flush_tlb_all, NULL, 1);
+}
+
+static void ipi_flush_tlb_mm(void *arg)
+{
+ local_flush_tlb_mm(arg);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ on_each_cpu(ipi_flush_tlb_mm, mm, 1);
+}
+
+static void ipi_flush_tlb_page(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_tlb_page(fd->vma, fd->addr1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ struct flush_data fd = {
+ .vma = vma,
+ .addr1 = addr,
+ };
+ on_each_cpu(ipi_flush_tlb_page, &fd, 1);
+}
+
+static void ipi_flush_tlb_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ struct flush_data fd = {
+ .vma = vma,
+ .addr1 = start,
+ .addr2 = end,
+ };
+ on_each_cpu(ipi_flush_tlb_range, &fd, 1);
+}
+
+static void ipi_flush_tlb_kernel_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ struct flush_data fd = {
+ .addr1 = start,
+ .addr2 = end,
+ };
+ on_each_cpu(ipi_flush_tlb_kernel_range, &fd, 1);
+}
+
+/* Cache flush functions */
+
+static void ipi_flush_cache_all(void *arg)
+{
+ local_flush_cache_all();
+}
+
+void flush_cache_all(void)
+{
+ on_each_cpu(ipi_flush_cache_all, NULL, 1);
+}
+
+static void ipi_flush_cache_page(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_cache_page(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_cache_page(struct vm_area_struct *vma,
+ unsigned long address, unsigned long pfn)
+{
+ struct flush_data fd = {
+ .vma = vma,
+ .addr1 = address,
+ .addr2 = pfn,
+ };
+ on_each_cpu(ipi_flush_cache_page, &fd, 1);
+}
+
+static void ipi_flush_cache_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_cache_range(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ struct flush_data fd = {
+ .vma = vma,
+ .addr1 = start,
+ .addr2 = end,
+ };
+ on_each_cpu(ipi_flush_cache_range, &fd, 1);
+}
+
+static void ipi_flush_icache_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ local_flush_icache_range(fd->addr1, fd->addr2);
+}
+
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+ struct flush_data fd = {
+ .addr1 = start,
+ .addr2 = end,
+ };
+ on_each_cpu(ipi_flush_icache_range, &fd, 1);
+}
+
+/* ------------------------------------------------------------------------- */
+
+static void ipi_invalidate_dcache_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ __invalidate_dcache_range(fd->addr1, fd->addr2);
+}
+
+static void system_invalidate_dcache_range(unsigned long start,
+ unsigned long size)
+{
+ struct flush_data fd = {
+ .addr1 = start,
+ .addr2 = size,
+ };
+ on_each_cpu(ipi_invalidate_dcache_range, &fd, 1);
+}
+
+static void ipi_flush_invalidate_dcache_range(void *arg)
+{
+ struct flush_data *fd = arg;
+ __flush_invalidate_dcache_range(fd->addr1, fd->addr2);
+}
+
+static void system_flush_invalidate_dcache_range(unsigned long start,
+ unsigned long size)
+{
+ struct flush_data fd = {
+ .addr1 = start,
+ .addr2 = size,
+ };
+ on_each_cpu(ipi_flush_invalidate_dcache_range, &fd, 1);
+}
diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
new file mode 100644
index 00000000000..7d2c317bd98
--- /dev/null
+++ b/arch/xtensa/kernel/stacktrace.c
@@ -0,0 +1,120 @@
+/*
+ * arch/xtensa/kernel/stacktrace.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+
+#include <asm/stacktrace.h>
+#include <asm/traps.h>
+
+void walk_stackframe(unsigned long *sp,
+ int (*fn)(struct stackframe *frame, void *data),
+ void *data)
+{
+ unsigned long a0, a1;
+ unsigned long sp_end;
+
+ a1 = (unsigned long)sp;
+ sp_end = ALIGN(a1, THREAD_SIZE);
+
+ spill_registers();
+
+ while (a1 < sp_end) {
+ struct stackframe frame;
+
+ sp = (unsigned long *)a1;
+
+ a0 = *(sp - 4);
+ a1 = *(sp - 3);
+
+ if (a1 <= (unsigned long)sp)
+ break;
+
+ frame.pc = MAKE_PC_FROM_RA(a0, a1);
+ frame.sp = a1;
+
+ if (fn(&frame, data))
+ return;
+ }
+}
+
+#ifdef CONFIG_STACKTRACE
+
+struct stack_trace_data {
+ struct stack_trace *trace;
+ unsigned skip;
+};
+
+static int stack_trace_cb(struct stackframe *frame, void *data)
+{
+ struct stack_trace_data *trace_data = data;
+ struct stack_trace *trace = trace_data->trace;
+
+ if (trace_data->skip) {
+ --trace_data->skip;
+ return 0;
+ }
+ if (!kernel_text_address(frame->pc))
+ return 0;
+
+ trace->entries[trace->nr_entries++] = frame->pc;
+ return trace->nr_entries >= trace->max_entries;
+}
+
+void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
+{
+ struct stack_trace_data trace_data = {
+ .trace = trace,
+ .skip = trace->skip,
+ };
+ walk_stackframe(stack_pointer(task), stack_trace_cb, &trace_data);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+void save_stack_trace(struct stack_trace *trace)
+{
+ save_stack_trace_tsk(current, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+#endif
+
+#ifdef CONFIG_FRAME_POINTER
+
+struct return_addr_data {
+ unsigned long addr;
+ unsigned skip;
+};
+
+static int return_address_cb(struct stackframe *frame, void *data)
+{
+ struct return_addr_data *r = data;
+
+ if (r->skip) {
+ --r->skip;
+ return 0;
+ }
+ if (!kernel_text_address(frame->pc))
+ return 0;
+ r->addr = frame->pc;
+ return 1;
+}
+
+unsigned long return_address(unsigned level)
+{
+ struct return_addr_data r = {
+ .skip = level + 1,
+ };
+ walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
+ return r.addr;
+}
+EXPORT_SYMBOL(return_address);
+
+#endif
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index 816e6d0d686..5d3f7a119ed 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -32,26 +32,64 @@ typedef void (*syscall_t)(void);
syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= {
[0 ... __NR_syscall_count - 1] = (syscall_t)&sys_ni_syscall,
-#undef __SYSCALL
#define __SYSCALL(nr,symbol,nargs) [ nr ] = (syscall_t)symbol,
-#undef _XTENSA_UNISTD_H
-#undef __KERNEL_SYSCALLS__
-#include <asm/unistd.h>
+#include <uapi/asm/unistd.h>
};
+#define COLOUR_ALIGN(addr, pgoff) \
+ ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
+ (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
+
asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
{
unsigned long ret;
long err;
- err = do_shmat(shmid, shmaddr, shmflg, &ret);
+ err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
if (err)
return err;
return (long)ret;
}
-asmlinkage long xtensa_fadvise64_64(int fd, int advice, unsigned long long offset, unsigned long long len)
+asmlinkage long xtensa_fadvise64_64(int fd, int advice,
+ unsigned long long offset, unsigned long long len)
{
return sys_fadvise64_64(fd, offset, len, advice);
}
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct vm_area_struct *vmm;
+
+ if (flags & MAP_FIXED) {
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+ if ((flags & MAP_SHARED) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+ }
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+ if (!addr)
+ addr = TASK_UNMAPPED_BASE;
+
+ if (flags & MAP_SHARED)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
+ /* At this point: (!vmm || addr < vmm->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+ if (!vmm || addr + len <= vmm->vm_start)
+ return addr;
+ addr = vmm->vm_end;
+ if (flags & MAP_SHARED)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ }
+}
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index ac62f9cf1e1..2a1823de69c 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -16,99 +16,163 @@
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/clocksource.h>
+#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/profile.h>
#include <linux/delay.h>
+#include <linux/irqdomain.h>
+#include <linux/sched_clock.h>
#include <asm/timex.h>
#include <asm/platform.h>
-#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
-unsigned long ccount_per_jiffy; /* per 1/HZ */
-unsigned long nsec_per_ccount; /* nsec per ccount increment */
-#endif
+unsigned long ccount_freq; /* ccount Hz */
+EXPORT_SYMBOL(ccount_freq);
-static cycle_t ccount_read(void)
+static cycle_t ccount_read(struct clocksource *cs)
{
return (cycle_t)get_ccount();
}
+static u64 notrace ccount_sched_clock_read(void)
+{
+ return get_ccount();
+}
+
static struct clocksource ccount_clocksource = {
.name = "ccount",
.rating = 200,
.read = ccount_read,
.mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int ccount_timer_set_next_event(unsigned long delta,
+ struct clock_event_device *dev);
+static void ccount_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt);
+struct ccount_timer {
+ struct clock_event_device evt;
+ int irq_enabled;
+ char name[24];
};
+static DEFINE_PER_CPU(struct ccount_timer, ccount_timer);
+
+static int ccount_timer_set_next_event(unsigned long delta,
+ struct clock_event_device *dev)
+{
+ unsigned long flags, next;
+ int ret = 0;
+
+ local_irq_save(flags);
+ next = get_ccount() + delta;
+ set_linux_timer(next);
+ if (next - get_ccount() > delta)
+ ret = -ETIME;
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static void ccount_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ struct ccount_timer *timer =
+ container_of(evt, struct ccount_timer, evt);
+
+ /*
+ * There is no way to disable the timer interrupt at the device level,
+ * only at the intenable register itself. Since enable_irq/disable_irq
+ * calls are nested, we need to make sure that these calls are
+ * balanced.
+ */
+ switch (mode) {
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
+ if (timer->irq_enabled) {
+ disable_irq(evt->irq);
+ timer->irq_enabled = 0;
+ }
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ case CLOCK_EVT_MODE_ONESHOT:
+ if (!timer->irq_enabled) {
+ enable_irq(evt->irq);
+ timer->irq_enabled = 1;
+ }
+ default:
+ break;
+ }
+}
static irqreturn_t timer_interrupt(int irq, void *dev_id);
static struct irqaction timer_irqaction = {
.handler = timer_interrupt,
- .flags = IRQF_DISABLED,
+ .flags = IRQF_TIMER,
.name = "timer",
};
+void local_timer_setup(unsigned cpu)
+{
+ struct ccount_timer *timer = &per_cpu(ccount_timer, cpu);
+ struct clock_event_device *clockevent = &timer->evt;
+
+ timer->irq_enabled = 1;
+ clockevent->name = timer->name;
+ snprintf(timer->name, sizeof(timer->name), "ccount_clockevent_%u", cpu);
+ clockevent->features = CLOCK_EVT_FEAT_ONESHOT;
+ clockevent->rating = 300;
+ clockevent->set_next_event = ccount_timer_set_next_event;
+ clockevent->set_mode = ccount_timer_set_mode;
+ clockevent->cpumask = cpumask_of(cpu);
+ clockevent->irq = irq_create_mapping(NULL, LINUX_TIMER_INT);
+ if (WARN(!clockevent->irq, "error: can't map timer irq"))
+ return;
+ clockevents_config_and_register(clockevent, ccount_freq,
+ 0xf, 0xffffffff);
+}
+
void __init time_init(void)
{
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
printk("Calibrating CPU frequency ");
platform_calibrate_ccount();
- printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ),
- (int)(ccount_per_jiffy/(10000/HZ))%100);
+ printk("%d.%02d MHz\n", (int)ccount_freq/1000000,
+ (int)(ccount_freq/10000)%100);
+#else
+ ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL;
#endif
- clocksource_register_hz(&ccount_clocksource, CCOUNT_PER_JIFFY * HZ);
-
- /* Initialize the linux timer interrupt. */
-
- setup_irq(LINUX_TIMER_INT, &timer_irqaction);
- set_linux_timer(get_ccount() + CCOUNT_PER_JIFFY);
+ clocksource_register_hz(&ccount_clocksource, ccount_freq);
+ local_timer_setup(0);
+ setup_irq(this_cpu_ptr(&ccount_timer)->evt.irq, &timer_irqaction);
+ sched_clock_register(ccount_sched_clock_read, 32, ccount_freq);
+ clocksource_of_init();
}
/*
* The timer interrupt is called HZ times per second.
*/
-irqreturn_t timer_interrupt (int irq, void *dev_id)
+irqreturn_t timer_interrupt(int irq, void *dev_id)
{
+ struct clock_event_device *evt = &this_cpu_ptr(&ccount_timer)->evt;
- unsigned long next;
-
- next = get_linux_timer();
-
-again:
- while ((signed long)(get_ccount() - next) > 0) {
-
- profile_tick(CPU_PROFILING);
-#ifndef CONFIG_SMP
- update_process_times(user_mode(get_irq_regs()));
-#endif
-
- xtime_update(1); /* Linux handler in kernel/time/timekeeping */
-
- /* Note that writing CCOMPARE clears the interrupt. */
-
- next += CCOUNT_PER_JIFFY;
- set_linux_timer(next);
- }
+ set_linux_timer(get_linux_timer());
+ evt->event_handler(evt);
/* Allow platform to do something useful (Wdog). */
-
platform_heartbeat();
- /* Make sure we didn't miss any tick... */
-
- if ((signed long)(get_ccount() - next) > 0)
- goto again;
-
return IRQ_HANDLED;
}
#ifndef CONFIG_GENERIC_CALIBRATE_DELAY
-void __cpuinit calibrate_delay(void)
+void calibrate_delay(void)
{
- loops_per_jiffy = CCOUNT_PER_JIFFY;
+ loops_per_jiffy = ccount_freq / HZ;
printk("Calibrating delay loop (skipped)... "
"%lu.%02lu BogoMIPS preset\n",
loops_per_jiffy/(1000000/HZ),
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index e64efac3b9d..eebbfd8c26f 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -11,7 +11,7 @@
*
* Essentially rewritten for the Xtensa architecture port.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
*
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
* Chris Zankel <chris@zankel.net>
@@ -32,11 +32,13 @@
#include <linux/delay.h>
#include <linux/hardirq.h>
+#include <asm/stacktrace.h>
#include <asm/ptrace.h>
#include <asm/timex.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
+#include <asm/traps.h>
#ifdef CONFIG_KGDB
extern int gdb_enter;
@@ -97,7 +99,7 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = {
/* EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */
/* EXCCAUSE_PRIVILEGED unhandled */
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
-#ifdef CONFIG_UNALIGNED_USER
+#ifdef CONFIG_XTENSA_UNALIGNED_USER
{ EXCCAUSE_UNALIGNED, USER, fast_unaligned },
#else
{ EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
@@ -155,7 +157,7 @@ COPROCESSOR(7),
* 2. it is a temporary memory buffer for the exception handlers.
*/
-unsigned long exc_table[EXC_TABLE_SIZE/4];
+DEFINE_PER_CPU(unsigned long, exc_table[EXC_TABLE_SIZE/4]);
void die(const char*, struct pt_regs*, long);
@@ -193,30 +195,48 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause)
}
/*
- * Level-1 interrupt.
- * We currently have no priority encoding.
+ * IRQ handler.
*/
-unsigned long ignored_level1_interrupts;
extern void do_IRQ(int, struct pt_regs *);
-void do_interrupt (struct pt_regs *regs)
+void do_interrupt(struct pt_regs *regs)
{
- unsigned long intread = get_sr (INTREAD);
- unsigned long intenable = get_sr (INTENABLE);
- int i, mask;
+ static const unsigned int_level_mask[] = {
+ 0,
+ XCHAL_INTLEVEL1_MASK,
+ XCHAL_INTLEVEL2_MASK,
+ XCHAL_INTLEVEL3_MASK,
+ XCHAL_INTLEVEL4_MASK,
+ XCHAL_INTLEVEL5_MASK,
+ XCHAL_INTLEVEL6_MASK,
+ XCHAL_INTLEVEL7_MASK,
+ };
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+
+ for (;;) {
+ unsigned intread = get_sr(interrupt);
+ unsigned intenable = get_sr(intenable);
+ unsigned int_at_level = intread & intenable;
+ unsigned level;
+
+ for (level = LOCKLEVEL; level > 0; --level) {
+ if (int_at_level & int_level_mask[level]) {
+ int_at_level &= int_level_mask[level];
+ break;
+ }
+ }
- /* Handle all interrupts (no priorities).
- * (Clear the interrupt before processing, in case it's
- * edge-triggered or software-generated)
- */
+ if (level == 0)
+ break;
- for (i=0, mask = 1; i < XCHAL_NUM_INTERRUPTS; i++, mask <<= 1) {
- if (mask & (intread & intenable)) {
- set_sr (mask, INTCLEAR);
- do_IRQ (i,regs);
- }
+ do_IRQ(__ffs(int_at_level), regs);
}
+
+ irq_exit();
+ set_irq_regs(old_regs);
}
/*
@@ -244,7 +264,7 @@ do_illegal_instruction(struct pt_regs *regs)
*/
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
-#ifndef CONFIG_UNALIGNED_USER
+#ifndef CONFIG_XTENSA_UNALIGNED_USER
void
do_unaligned_user (struct pt_regs *regs)
{
@@ -293,6 +313,31 @@ do_debug(struct pt_regs *regs)
}
+static void set_handler(int idx, void *handler)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ per_cpu(exc_table, cpu)[idx] = (unsigned long)handler;
+}
+
+/* Set exception C handler - for temporary use when probing exceptions */
+
+void * __init trap_set_handler(int cause, void *handler)
+{
+ void *previous = (void *)per_cpu(exc_table, 0)[
+ EXC_TABLE_DEFAULT / 4 + cause];
+ set_handler(EXC_TABLE_DEFAULT / 4 + cause, handler);
+ return previous;
+}
+
+
+static void trap_init_excsave(void)
+{
+ unsigned long excsave1 = (unsigned long)this_cpu_ptr(exc_table);
+ __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1));
+}
+
/*
* Initialize dispatch tables.
*
@@ -306,8 +351,6 @@ do_debug(struct pt_regs *regs)
* See vectors.S for more details.
*/
-#define set_handler(idx,handler) (exc_table[idx] = (unsigned long) (handler))
-
void __init trap_init(void)
{
int i;
@@ -337,10 +380,15 @@ void __init trap_init(void)
}
/* Initialize EXCSAVE_1 to hold the address of the exception table. */
+ trap_init_excsave();
+}
- i = (unsigned long)exc_table;
- __asm__ __volatile__("wsr %0, "__stringify(EXCSAVE_1)"\n" : : "a" (i));
+#ifdef CONFIG_SMP
+void secondary_trap_init(void)
+{
+ trap_init_excsave();
}
+#endif
/*
* This function dumps the current valid window frame and other base registers.
@@ -350,6 +398,8 @@ void show_regs(struct pt_regs * regs)
{
int i, wmask;
+ show_regs_print_info(KERN_DEFAULT);
+
wmask = regs->wmask & ~1;
for (i = 0; i < 16; i++) {
@@ -369,53 +419,25 @@ void show_regs(struct pt_regs * regs)
regs->syscall);
}
-static __always_inline unsigned long *stack_pointer(struct task_struct *task)
+static int show_trace_cb(struct stackframe *frame, void *data)
{
- unsigned long *sp;
-
- if (!task || task == current)
- __asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp));
- else
- sp = (unsigned long *)task->thread.sp;
-
- return sp;
+ if (kernel_text_address(frame->pc)) {
+ printk(" [<%08lx>] ", frame->pc);
+ print_symbol("%s\n", frame->pc);
+ }
+ return 0;
}
void show_trace(struct task_struct *task, unsigned long *sp)
{
- unsigned long a0, a1, pc;
- unsigned long sp_start, sp_end;
-
- if (sp)
- a1 = (unsigned long)sp;
- else
- a1 = (unsigned long)stack_pointer(task);
-
- sp_start = a1 & ~(THREAD_SIZE-1);
- sp_end = sp_start + THREAD_SIZE;
+ if (!sp)
+ sp = stack_pointer(task);
printk("Call Trace:");
#ifdef CONFIG_KALLSYMS
printk("\n");
#endif
- spill_registers();
-
- while (a1 > sp_start && a1 < sp_end) {
- sp = (unsigned long*)a1;
-
- a0 = *(sp - 4);
- a1 = *(sp - 3);
-
- if (a1 <= (unsigned long) sp)
- break;
-
- pc = MAKE_PC_FROM_RA(a0, a1);
-
- if (kernel_text_address(pc)) {
- printk(" [<%08lx>] ", pc);
- print_symbol("%s\n", pc);
- }
- }
+ walk_stackframe(sp, show_trace_cb, NULL);
printk("\n");
}
@@ -433,7 +455,7 @@ void show_stack(struct task_struct *task, unsigned long *sp)
if (!sp)
sp = stack_pointer(task);
- stack = sp;
+ stack = sp;
printk("\nStack: ");
@@ -448,14 +470,6 @@ void show_stack(struct task_struct *task, unsigned long *sp)
show_trace(task, stack);
}
-void dump_stack(void)
-{
- show_stack(current, NULL);
-}
-
-EXPORT_SYMBOL(dump_stack);
-
-
void show_code(unsigned int *pc)
{
long i;
@@ -493,7 +507,7 @@ void die(const char * str, struct pt_regs * regs, long err)
if (!user_mode(regs))
show_stack(NULL, (unsigned long*)regs->areg[1]);
- add_taint(TAINT_DIE);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irq(&die_lock);
if (in_interrupt())
@@ -504,5 +518,3 @@ void die(const char * str, struct pt_regs * regs, long err)
do_exit(err);
}
-
-
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index 70066e3582d..8453e6e3989 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -10,7 +10,7 @@
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
- * Copyright (C) 2005 Tensilica, Inc.
+ * Copyright (C) 2005 - 2008 Tensilica, Inc.
*
* Chris Zankel <chris@zankel.net>
*
@@ -50,6 +50,7 @@
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/thread_info.h>
+#include <asm/vectors.h>
#define WINDOW_VECTORS_SIZE 0x180
@@ -69,16 +70,19 @@
ENTRY(_UserExceptionVector)
- xsr a3, EXCSAVE_1 # save a3 and get dispatch table
- wsr a2, DEPC # save a2
+ xsr a3, excsave1 # save a3 and get dispatch table
+ wsr a2, depc # save a2
l32i a2, a3, EXC_TABLE_KSTK # load kernel stack to a2
s32i a0, a2, PT_AREG0 # save a0 to ESF
- rsr a0, EXCCAUSE # retrieve exception cause
+ rsr a0, exccause # retrieve exception cause
s32i a0, a2, PT_DEPC # mark it as a regular exception
addx4 a0, a0, a3 # find entry in table
l32i a0, a0, EXC_TABLE_FAST_USER # load handler
+ xsr a3, excsave1 # restore a3 and dispatch table
jx a0
+ENDPROC(_UserExceptionVector)
+
/*
* Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0)
*
@@ -93,16 +97,18 @@ ENTRY(_UserExceptionVector)
ENTRY(_KernelExceptionVector)
- xsr a3, EXCSAVE_1 # save a3, and get dispatch table
- wsr a2, DEPC # save a2
+ xsr a3, excsave1 # save a3, and get dispatch table
+ wsr a2, depc # save a2
addi a2, a1, -16-PT_SIZE # adjust stack pointer
s32i a0, a2, PT_AREG0 # save a0 to ESF
- rsr a0, EXCCAUSE # retrieve exception cause
+ rsr a0, exccause # retrieve exception cause
s32i a0, a2, PT_DEPC # mark it as a regular exception
addx4 a0, a0, a3 # find entry in table
l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address
+ xsr a3, excsave1 # restore a3 and dispatch table
jx a0
+ENDPROC(_KernelExceptionVector)
/*
* Double exception vector (Exceptions with PS.EXCM == 1)
@@ -164,7 +170,7 @@ ENTRY(_KernelExceptionVector)
*
* a0: DEPC
* a1: a1
- * a2: trashed, original value in EXC_TABLE_DOUBLE_A2
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
* a3: exctable
* depc: a0
* excsave_1: a3
@@ -200,41 +206,46 @@ ENTRY(_KernelExceptionVector)
.section .DoubleExceptionVector.text, "ax"
.begin literal_prefix .DoubleExceptionVector
+ .globl _DoubleExceptionVector_WindowUnderflow
+ .globl _DoubleExceptionVector_WindowOverflow
ENTRY(_DoubleExceptionVector)
- /* Deliberately destroy excsave (don't assume it's value was valid). */
-
- wsr a3, EXCSAVE_1 # save a3
+ xsr a3, excsave1
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
/* Check for kernel double exception (usually fatal). */
- rsr a3, PS
- _bbci.l a3, PS_UM_BIT, .Lksp
+ rsr a2, ps
+ _bbci.l a2, PS_UM_BIT, .Lksp
/* Check if we are currently handling a window exception. */
/* Note: We don't need to indicate that we enter a critical section. */
- xsr a0, DEPC # get DEPC, save a0
+ xsr a0, depc # get DEPC, save a0
- movi a3, XCHAL_WINDOW_VECTORS_VADDR
- _bltu a0, a3, .Lfixup
- addi a3, a3, WINDOW_VECTORS_SIZE
- _bgeu a0, a3, .Lfixup
+ movi a2, WINDOW_VECTORS_VADDR
+ _bltu a0, a2, .Lfixup
+ addi a2, a2, WINDOW_VECTORS_SIZE
+ _bgeu a0, a2, .Lfixup
/* Window overflow/underflow exception. Get stack pointer. */
- mov a3, a2
- movi a2, exc_table
- l32i a2, a2, EXC_TABLE_KSTK
+ l32i a2, a3, EXC_TABLE_KSTK
/* Check for overflow/underflow exception, jump if overflow. */
- _bbci.l a0, 6, .Lovfl
-
- /* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */
+ bbci.l a0, 6, _DoubleExceptionVector_WindowOverflow
- /* Restart window underflow exception.
+ /*
+ * Restart window underflow exception.
+ * Currently:
+ * depc = orig a0,
+ * a0 = orig DEPC,
+ * a2 = new sp based on KSTK from exc_table
+ * a3 = excsave_1
+ * excsave_1 = orig a3
+ *
* We return to the instruction in user space that caused the window
* underflow exception. Therefore, we change window base to the value
* before we entered the window underflow exception and prepare the
@@ -242,39 +253,69 @@ ENTRY(_DoubleExceptionVector)
* by changing depc (in a0).
* Note: We can trash the current window frame (a0...a3) and depc!
*/
-
- wsr a2, DEPC # save stack pointer temporarily
- rsr a0, PS
- extui a0, a0, PS_OWB_SHIFT, 4
- wsr a0, WINDOWBASE
+_DoubleExceptionVector_WindowUnderflow:
+ xsr a3, excsave1
+ wsr a2, depc # save stack pointer temporarily
+ rsr a0, ps
+ extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
+ wsr a0, windowbase
rsync
/* We are now in the previous window frame. Save registers again. */
- xsr a2, DEPC # save a2 and get stack pointer
+ xsr a2, depc # save a2 and get stack pointer
s32i a0, a2, PT_AREG0
-
- wsr a3, EXCSAVE_1 # save a3
- movi a3, exc_table
-
- rsr a0, EXCCAUSE
+ xsr a3, excsave1
+ rsr a0, exccause
s32i a0, a2, PT_DEPC # mark it as a regular exception
addx4 a0, a0, a3
+ xsr a3, excsave1
l32i a0, a0, EXC_TABLE_FAST_USER
jx a0
-.Lfixup:/* Check for a fixup handler or if we were in a critical section. */
+ /*
+ * We only allow the ITLB miss exception if we are in kernel space.
+ * All other exceptions are unexpected and thus unrecoverable!
+ */
+
+#ifdef CONFIG_MMU
+ .extern fast_second_level_miss_double_kernel
+
+.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */
+
+ rsr a3, exccause
+ beqi a3, EXCCAUSE_ITLB_MISS, 1f
+ addi a3, a3, -EXCCAUSE_DTLB_MISS
+ bnez a3, .Lunrecoverable
+1: movi a3, fast_second_level_miss_double_kernel
+ jx a3
+#else
+.equ .Lksp, .Lunrecoverable
+#endif
+
+ /* Critical! We can't handle this situation. PANIC! */
- /* a0: depc, a1: a1, a2: a2, a3: trashed, depc: a0, excsave1: a3 */
+ .extern unrecoverable_exception
- movi a3, exc_table
- s32i a2, a3, EXC_TABLE_DOUBLE_SAVE # temporary variable
+.Lunrecoverable_fixup:
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a0, depc
+
+.Lunrecoverable:
+ rsr a3, excsave1
+ wsr a0, excsave1
+ movi a0, unrecoverable_exception
+ callx0 a0
+
+.Lfixup:/* Check for a fixup handler or if we were in a critical section. */
+
+ /* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave1: a3 */
/* Enter critical section. */
l32i a2, a3, EXC_TABLE_FIXUP
s32i a3, a3, EXC_TABLE_FIXUP
- beq a2, a3, .Lunrecoverable_fixup # critical!
+ beq a2, a3, .Lunrecoverable_fixup # critical section
beqz a2, .Ldflt # no handler was registered
/* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave: a3 */
@@ -283,62 +324,264 @@ ENTRY(_DoubleExceptionVector)
.Ldflt: /* Get stack pointer. */
- l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
- addi a2, a3, -PT_USER_SIZE
-
-.Lovfl: /* Jump to default handlers. */
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ addi a2, a2, -PT_USER_SIZE
- /* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */
+ /* a0: depc, a1: a1, a2: kstk, a3: exctable, depc: a0, excsave: a3 */
- xsr a3, DEPC
s32i a0, a2, PT_DEPC
- s32i a3, a2, PT_AREG0
+ l32i a0, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a0, depc
+ s32i a0, a2, PT_AREG0
- /* a0: avail, a1: a1, a2: kstk, a3: avail, depc: a2, excsave: a3 */
+ /* a0: avail, a1: a1, a2: kstk, a3: exctable, depc: a2, excsave: a3 */
- movi a3, exc_table
- rsr a0, EXCCAUSE
+ rsr a0, exccause
addx4 a0, a0, a3
+ xsr a3, excsave1
l32i a0, a0, EXC_TABLE_FAST_USER
jx a0
/*
- * We only allow the ITLB miss exception if we are in kernel space.
- * All other exceptions are unexpected and thus unrecoverable!
+ * Restart window OVERFLOW exception.
+ * Currently:
+ * depc = orig a0,
+ * a0 = orig DEPC,
+ * a2 = new sp based on KSTK from exc_table
+ * a3 = EXCSAVE_1
+ * excsave_1 = orig a3
+ *
+ * We return to the instruction in user space that caused the window
+ * overflow exception. Therefore, we change window base to the value
+ * before we entered the window overflow exception and prepare the
+ * registers to return as if we were coming from a regular exception
+ * by changing DEPC (in a0).
+ *
+ * NOTE: We CANNOT trash the current window frame (a0...a3), but we
+ * can clobber depc.
+ *
+ * The tricky part here is that overflow8 and overflow12 handlers
+ * save a0, then clobber a0. To restart the handler, we have to restore
+ * a0 if the double exception was past the point where a0 was clobbered.
+ *
+ * To keep things simple, we take advantage of the fact all overflow
+ * handlers save a0 in their very first instruction. If DEPC was past
+ * that instruction, we can safely restore a0 from where it was saved
+ * on the stack.
+ *
+ * a0: depc, a1: a1, a2: kstk, a3: exc_table, depc: a0, excsave1: a3
*/
+_DoubleExceptionVector_WindowOverflow:
+ extui a2, a0, 0, 6 # get offset into 64-byte vector handler
+ beqz a2, 1f # if at start of vector, don't restore
-#ifdef CONFIG_MMU
- .extern fast_second_level_miss_double_kernel
+ addi a0, a0, -128
+ bbsi.l a0, 8, 1f # don't restore except for overflow 8 and 12
-.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */
+ /*
+ * This fixup handler is for the extremely unlikely case where the
+ * overflow handler's reference thru a0 gets a hardware TLB refill
+ * that bumps out the (distinct, aliasing) TLB entry that mapped its
+ * prior references thru a9/a13, and where our reference now thru
+ * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill).
+ */
+ movi a2, window_overflow_restore_a0_fixup
+ s32i a2, a3, EXC_TABLE_FIXUP
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
- rsr a3, EXCCAUSE
- beqi a3, EXCCAUSE_ITLB_MISS, 1f
- addi a3, a3, -EXCCAUSE_DTLB_MISS
- bnez a3, .Lunrecoverable
-1: movi a3, fast_second_level_miss_double_kernel
- jx a3
-#else
-.equ .Lksp, .Lunrecoverable
-#endif
+ bbsi.l a0, 7, 2f
- /* Critical! We can't handle this situation. PANIC! */
+ /*
+ * Restore a0 as saved by _WindowOverflow8().
+ */
- .extern unrecoverable_exception
+ l32e a0, a9, -16
+ wsr a0, depc # replace the saved a0
+ j 3f
+
+2:
+ /*
+ * Restore a0 as saved by _WindowOverflow12().
+ */
+
+ l32e a0, a13, -16
+ wsr a0, depc # replace the saved a0
+3:
+ xsr a3, excsave1
+ movi a0, 0
+ s32i a0, a3, EXC_TABLE_FIXUP
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+1:
+ /*
+ * Restore WindowBase while leaving all address registers restored.
+ * We have to use ROTW for this, because WSR.WINDOWBASE requires
+ * an address register (which would prevent restore).
+ *
+ * Window Base goes from 0 ... 7 (Module 8)
+ * Window Start is 8 bits; Ex: (0b1010 1010):0x55 from series of call4s
+ */
+
+ rsr a0, ps
+ extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
+ rsr a2, windowbase
+ sub a0, a2, a0
+ extui a0, a0, 0, 3
-.Lunrecoverable_fixup:
l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
- xsr a0, DEPC
+ xsr a3, excsave1
+ beqi a0, 1, .L1pane
+ beqi a0, 3, .L3pane
-.Lunrecoverable:
- rsr a3, EXCSAVE_1
- wsr a0, EXCSAVE_1
- movi a0, unrecoverable_exception
- callx0 a0
+ rsr a0, depc
+ rotw -2
- .end literal_prefix
+ /*
+ * We are now in the user code's original window frame.
+ * Process the exception as a user exception as if it was
+ * taken by the user code.
+ *
+ * This is similar to the user exception vector,
+ * except that PT_DEPC isn't set to EXCCAUSE.
+ */
+1:
+ xsr a3, excsave1
+ wsr a2, depc
+ l32i a2, a3, EXC_TABLE_KSTK
+ s32i a0, a2, PT_AREG0
+ rsr a0, exccause
+
+ s32i a0, a2, PT_DEPC
+
+_DoubleExceptionVector_handle_exception:
+ addx4 a0, a0, a3
+ l32i a0, a0, EXC_TABLE_FAST_USER
+ xsr a3, excsave1
+ jx a0
+
+.L1pane:
+ rsr a0, depc
+ rotw -1
+ j 1b
+
+.L3pane:
+ rsr a0, depc
+ rotw -3
+ j 1b
+
+
+ENDPROC(_DoubleExceptionVector)
+
+/*
+ * Fixup handler for TLB miss in double exception handler for window owerflow.
+ * We get here with windowbase set to the window that was being spilled and
+ * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12
+ * (bit set) window.
+ *
+ * We do the following here:
+ * - go to the original window retaining a0 value;
+ * - set up exception stack to return back to appropriate a0 restore code
+ * (we'll need to rotate window back and there's no place to save this
+ * information, use different return address for that);
+ * - handle the exception;
+ * - go to the window that was being spilled;
+ * - set up window_overflow_restore_a0_fixup as a fixup routine;
+ * - reload a0;
+ * - restore the original window;
+ * - reset the default fixup routine;
+ * - return to user. By the time we get to this fixup handler all information
+ * about the conditions of the original double exception that happened in
+ * the window overflow handler is lost, so we just return to userspace to
+ * retry overflow from start.
+ *
+ * a0: value of depc, original value in depc
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ * a3: exctable, original value in excsave1
+ */
+
+ENTRY(window_overflow_restore_a0_fixup)
+
+ rsr a0, ps
+ extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
+ rsr a2, windowbase
+ sub a0, a2, a0
+ extui a0, a0, 0, 3
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
+
+ _beqi a0, 1, .Lhandle_1
+ _beqi a0, 3, .Lhandle_3
+
+ .macro overflow_fixup_handle_exception_pane n
+
+ rsr a0, depc
+ rotw -\n
+
+ xsr a3, excsave1
+ wsr a2, depc
+ l32i a2, a3, EXC_TABLE_KSTK
+ s32i a0, a2, PT_AREG0
+
+ movi a0, .Lrestore_\n
+ s32i a0, a2, PT_DEPC
+ rsr a0, exccause
+ j _DoubleExceptionVector_handle_exception
+
+ .endm
+
+ overflow_fixup_handle_exception_pane 2
+.Lhandle_1:
+ overflow_fixup_handle_exception_pane 1
+.Lhandle_3:
+ overflow_fixup_handle_exception_pane 3
+
+ .macro overflow_fixup_restore_a0_pane n
+
+ rotw \n
+ /* Need to preserve a0 value here to be able to handle exception
+ * that may occur on a0 reload from stack. It may occur because
+ * TLB miss handler may not be atomic and pointer to page table
+ * may be lost before we get here. There are no free registers,
+ * so we need to use EXC_TABLE_DOUBLE_SAVE area.
+ */
+ xsr a3, excsave1
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ movi a2, window_overflow_restore_a0_fixup
+ s32i a2, a3, EXC_TABLE_FIXUP
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
+ bbsi.l a0, 7, 1f
+ l32e a0, a9, -16
+ j 2f
+1:
+ l32e a0, a13, -16
+2:
+ rotw -\n
+
+ .endm
+
+.Lrestore_2:
+ overflow_fixup_restore_a0_pane 2
+
+.Lset_default_fixup:
+ xsr a3, excsave1
+ s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ movi a2, 0
+ s32i a2, a3, EXC_TABLE_FIXUP
+ l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
+ xsr a3, excsave1
+ rfe
+
+.Lrestore_1:
+ overflow_fixup_restore_a0_pane 1
+ j .Lset_default_fixup
+.Lrestore_3:
+ overflow_fixup_restore_a0_pane 3
+ j .Lset_default_fixup
+ENDPROC(window_overflow_restore_a0_fixup)
+ .end literal_prefix
/*
* Debug interrupt vector
*
@@ -349,9 +592,49 @@ ENTRY(_DoubleExceptionVector)
.section .DebugInterruptVector.text, "ax"
ENTRY(_DebugInterruptVector)
- xsr a0, EXCSAVE + XCHAL_DEBUGLEVEL
+
+ xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
jx a0
+ENDPROC(_DebugInterruptVector)
+
+
+
+/*
+ * Medium priority level interrupt vectors
+ *
+ * Each takes less than 16 (0x10) bytes, no literals, by placing
+ * the extra 8 bytes that would otherwise be required in the window
+ * vectors area where there is space. With relocatable vectors,
+ * all vectors are within ~ 4 kB range of each other, so we can
+ * simply jump (J) to another vector without having to use JX.
+ *
+ * common_exception code gets current IRQ level in PS.INTLEVEL
+ * and preserves it for the IRQ handling time.
+ */
+
+ .macro irq_entry_level level
+
+ .if XCHAL_EXCM_LEVEL >= \level
+ .section .Level\level\()InterruptVector.text, "ax"
+ENTRY(_Level\level\()InterruptVector)
+ wsr a0, excsave2
+ rsr a0, epc\level
+ wsr a0, epc1
+ movi a0, EXCCAUSE_LEVEL1_INTERRUPT
+ wsr a0, exccause
+ rsr a0, eps\level
+ # branch to user or kernel vector
+ j _SimulateUserKernelVectorException
+ .endif
+
+ .endm
+
+ irq_entry_level 2
+ irq_entry_level 3
+ irq_entry_level 4
+ irq_entry_level 5
+ irq_entry_level 6
/* Window overflow and underflow handlers.
@@ -363,38 +646,61 @@ ENTRY(_DebugInterruptVector)
* we try to access any page that would cause a page fault early.
*/
+#define ENTRY_ALIGN64(name) \
+ .globl name; \
+ .align 64; \
+ name:
+
.section .WindowVectors.text, "ax"
/* 4-Register Window Overflow Vector (Handler) */
- .align 64
-.global _WindowOverflow4
-_WindowOverflow4:
+ENTRY_ALIGN64(_WindowOverflow4)
+
s32e a0, a5, -16
s32e a1, a5, -12
s32e a2, a5, -8
s32e a3, a5, -4
rfwo
+ENDPROC(_WindowOverflow4)
+
+
+#if XCHAL_EXCM_LEVEL >= 2
+ /* Not a window vector - but a convenient location
+ * (where we know there's space) for continuation of
+ * medium priority interrupt dispatch code.
+ * On entry here, a0 contains PS, and EPC2 contains saved a0:
+ */
+ .align 4
+_SimulateUserKernelVectorException:
+ addi a0, a0, (1 << PS_EXCM_BIT)
+ wsr a0, ps
+ bbsi.l a0, PS_UM_BIT, 1f # branch if user mode
+ rsr a0, excsave2 # restore a0
+ j _KernelExceptionVector # simulate kernel vector exception
+1: rsr a0, excsave2 # restore a0
+ j _UserExceptionVector # simulate user vector exception
+#endif
+
/* 4-Register Window Underflow Vector (Handler) */
- .align 64
-.global _WindowUnderflow4
-_WindowUnderflow4:
+ENTRY_ALIGN64(_WindowUnderflow4)
+
l32e a0, a5, -16
l32e a1, a5, -12
l32e a2, a5, -8
l32e a3, a5, -4
rfwu
+ENDPROC(_WindowUnderflow4)
/* 8-Register Window Overflow Vector (Handler) */
- .align 64
-.global _WindowOverflow8
-_WindowOverflow8:
+ENTRY_ALIGN64(_WindowOverflow8)
+
s32e a0, a9, -16
l32e a0, a1, -12
s32e a2, a9, -8
@@ -406,11 +712,12 @@ _WindowOverflow8:
s32e a7, a0, -20
rfwo
+ENDPROC(_WindowOverflow8)
+
/* 8-Register Window Underflow Vector (Handler) */
- .align 64
-.global _WindowUnderflow8
-_WindowUnderflow8:
+ENTRY_ALIGN64(_WindowUnderflow8)
+
l32e a1, a9, -12
l32e a0, a9, -16
l32e a7, a1, -12
@@ -422,12 +729,12 @@ _WindowUnderflow8:
l32e a7, a7, -20
rfwu
+ENDPROC(_WindowUnderflow8)
/* 12-Register Window Overflow Vector (Handler) */
- .align 64
-.global _WindowOverflow12
-_WindowOverflow12:
+ENTRY_ALIGN64(_WindowOverflow12)
+
s32e a0, a13, -16
l32e a0, a1, -12
s32e a1, a13, -12
@@ -443,11 +750,12 @@ _WindowOverflow12:
s32e a11, a0, -20
rfwo
+ENDPROC(_WindowOverflow12)
+
/* 12-Register Window Underflow Vector (Handler) */
- .align 64
-.global _WindowUnderflow12
-_WindowUnderflow12:
+ENTRY_ALIGN64(_WindowUnderflow12)
+
l32e a1, a13, -12
l32e a0, a13, -16
l32e a11, a1, -12
@@ -463,6 +771,6 @@ _WindowUnderflow12:
l32e a11, a11, -20
rfwu
- .text
-
+ENDPROC(_WindowUnderflow12)
+ .text
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 88ecea3facb..d16db6df86f 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -7,7 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
* Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
@@ -18,6 +18,7 @@
#include <asm/page.h>
#include <asm/thread_info.h>
+#include <asm/vectors.h>
#include <variant/core.h>
#include <platform/hardware.h>
OUTPUT_ARCH(xtensa)
@@ -30,7 +31,7 @@ jiffies = jiffies_64;
#endif
#ifndef KERNELOFFSET
-#define KERNELOFFSET 0xd0001000
+#define KERNELOFFSET 0xd0003000
#endif
/* Note: In the following macros, it would be nice to specify only the
@@ -83,7 +84,6 @@ SECTIONS
_text = .;
_stext = .;
- _ftext = .;
.text :
{
@@ -112,7 +112,7 @@ SECTIONS
EXCEPTION_TABLE(16)
/* Data section */
- _fdata = .;
+ _sdata = .;
RW_DATA_SECTION(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
_edata = .;
@@ -135,6 +135,26 @@ SECTIONS
RELOCATE_ENTRY(_WindowVectors_text,
.WindowVectors.text);
+#if XCHAL_EXCM_LEVEL >= 2
+ RELOCATE_ENTRY(_Level2InterruptVector_text,
+ .Level2InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+ RELOCATE_ENTRY(_Level3InterruptVector_text,
+ .Level3InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+ RELOCATE_ENTRY(_Level4InterruptVector_text,
+ .Level4InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+ RELOCATE_ENTRY(_Level5InterruptVector_text,
+ .Level5InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+ RELOCATE_ENTRY(_Level6InterruptVector_text,
+ .Level6InterruptVector.text);
+#endif
RELOCATE_ENTRY(_KernelExceptionVector_text,
.KernelExceptionVector.text);
RELOCATE_ENTRY(_UserExceptionVector_text,
@@ -145,6 +165,13 @@ SECTIONS
.DoubleExceptionVector.text);
RELOCATE_ENTRY(_DebugInterruptVector_text,
.DebugInterruptVector.text);
+#if defined(CONFIG_SMP)
+ RELOCATE_ENTRY(_SecondaryResetVector_literal,
+ .SecondaryResetVector.literal);
+ RELOCATE_ENTRY(_SecondaryResetVector_text,
+ .SecondaryResetVector.text);
+#endif
+
__boot_reloc_table_end = ABSOLUTE(.) ;
@@ -166,50 +193,111 @@ SECTIONS
SECTION_VECTOR (_WindowVectors_text,
.WindowVectors.text,
- XCHAL_WINDOW_VECTORS_VADDR, 4,
+ WINDOW_VECTORS_VADDR, 4,
.dummy)
SECTION_VECTOR (_DebugInterruptVector_literal,
.DebugInterruptVector.literal,
- XCHAL_DEBUG_VECTOR_VADDR - 4,
+ DEBUG_VECTOR_VADDR - 4,
SIZEOF(.WindowVectors.text),
.WindowVectors.text)
SECTION_VECTOR (_DebugInterruptVector_text,
.DebugInterruptVector.text,
- XCHAL_DEBUG_VECTOR_VADDR,
+ DEBUG_VECTOR_VADDR,
4,
.DebugInterruptVector.literal)
+#undef LAST
+#define LAST .DebugInterruptVector.text
+#if XCHAL_EXCM_LEVEL >= 2
+ SECTION_VECTOR (_Level2InterruptVector_text,
+ .Level2InterruptVector.text,
+ INTLEVEL2_VECTOR_VADDR,
+ SIZEOF(LAST), LAST)
+# undef LAST
+# define LAST .Level2InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+ SECTION_VECTOR (_Level3InterruptVector_text,
+ .Level3InterruptVector.text,
+ INTLEVEL3_VECTOR_VADDR,
+ SIZEOF(LAST), LAST)
+# undef LAST
+# define LAST .Level3InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+ SECTION_VECTOR (_Level4InterruptVector_text,
+ .Level4InterruptVector.text,
+ INTLEVEL4_VECTOR_VADDR,
+ SIZEOF(LAST), LAST)
+# undef LAST
+# define LAST .Level4InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+ SECTION_VECTOR (_Level5InterruptVector_text,
+ .Level5InterruptVector.text,
+ INTLEVEL5_VECTOR_VADDR,
+ SIZEOF(LAST), LAST)
+# undef LAST
+# define LAST .Level5InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+ SECTION_VECTOR (_Level6InterruptVector_text,
+ .Level6InterruptVector.text,
+ INTLEVEL6_VECTOR_VADDR,
+ SIZEOF(LAST), LAST)
+# undef LAST
+# define LAST .Level6InterruptVector.text
+#endif
SECTION_VECTOR (_KernelExceptionVector_literal,
.KernelExceptionVector.literal,
- XCHAL_KERNEL_VECTOR_VADDR - 4,
- SIZEOF(.DebugInterruptVector.text),
- .DebugInterruptVector.text)
+ KERNEL_VECTOR_VADDR - 4,
+ SIZEOF(LAST), LAST)
+#undef LAST
SECTION_VECTOR (_KernelExceptionVector_text,
.KernelExceptionVector.text,
- XCHAL_KERNEL_VECTOR_VADDR,
+ KERNEL_VECTOR_VADDR,
4,
.KernelExceptionVector.literal)
SECTION_VECTOR (_UserExceptionVector_literal,
.UserExceptionVector.literal,
- XCHAL_USER_VECTOR_VADDR - 4,
+ USER_VECTOR_VADDR - 4,
SIZEOF(.KernelExceptionVector.text),
.KernelExceptionVector.text)
SECTION_VECTOR (_UserExceptionVector_text,
.UserExceptionVector.text,
- XCHAL_USER_VECTOR_VADDR,
+ USER_VECTOR_VADDR,
4,
.UserExceptionVector.literal)
SECTION_VECTOR (_DoubleExceptionVector_literal,
.DoubleExceptionVector.literal,
- XCHAL_DOUBLEEXC_VECTOR_VADDR - 16,
+ DOUBLEEXC_VECTOR_VADDR - 40,
SIZEOF(.UserExceptionVector.text),
.UserExceptionVector.text)
SECTION_VECTOR (_DoubleExceptionVector_text,
.DoubleExceptionVector.text,
- XCHAL_DOUBLEEXC_VECTOR_VADDR,
- 32,
+ DOUBLEEXC_VECTOR_VADDR,
+ 40,
.DoubleExceptionVector.literal)
. = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
+
+#if defined(CONFIG_SMP)
+
+ SECTION_VECTOR (_SecondaryResetVector_literal,
+ .SecondaryResetVector.literal,
+ RESET_VECTOR1_VADDR - 4,
+ SIZEOF(.DoubleExceptionVector.text),
+ .DoubleExceptionVector.text)
+
+ SECTION_VECTOR (_SecondaryResetVector_text,
+ .SecondaryResetVector.text,
+ RESET_VECTOR1_VADDR,
+ 4,
+ .SecondaryResetVector.literal)
+
+ . = LOADADDR(.SecondaryResetVector.text)+SIZEOF(.SecondaryResetVector.text);
+
+#endif
+
. = ALIGN(PAGE_SIZE);
__init_end = .;
@@ -223,16 +311,26 @@ SECTIONS
. = ALIGN(0x10);
.bootstrap : { *(.bootstrap.literal .bootstrap.text .bootstrap.data) }
- . = ALIGN(0x1000);
- __initrd_start = .;
- .initrd : { *(.initrd) }
- __initrd_end = .;
-
- .ResetVector.text XCHAL_RESET_VECTOR_VADDR :
+ .ResetVector.text RESET_VECTOR_VADDR :
{
*(.ResetVector.text)
}
+
+ /*
+ * This is a remapped copy of the Secondary Reset Vector Code.
+ * It keeps gdb in sync with the PC after switching
+ * to the temporary mapping used while setting up
+ * the V2 MMU mappings for Linux.
+ *
+ * Only debug information about this section is put in the kernel image.
+ */
+ .SecondaryResetVector.remapped_text 0x46000000 (INFO):
+ {
+ *(.SecondaryResetVector.remapped_text)
+ }
+
+
.xt.lit : { *(.xt.lit) }
.xt.prop : { *(.xt.prop) }
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index c9a7c5b74a0..4d2872fd9bb 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -20,11 +20,13 @@
#include <linux/in6.h>
#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
#include <asm/checksum.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
+#include <asm/ftrace.h>
#ifdef CONFIG_BLK_DEV_FD
#include <asm/floppy.h>
#endif
@@ -39,8 +41,11 @@
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(__strncpy_user);
+EXPORT_SYMBOL(clear_page);
+EXPORT_SYMBOL(copy_page);
-EXPORT_SYMBOL(kernel_thread);
+EXPORT_SYMBOL(empty_zero_page);
/*
* gcc internal math functions
@@ -56,6 +61,7 @@ extern unsigned int __udivsi3(unsigned int, unsigned int);
extern unsigned int __umodsi3(unsigned int, unsigned int);
extern unsigned long long __umoddi3(unsigned long long, unsigned long long);
extern unsigned long long __udivdi3(unsigned long long, unsigned long long);
+extern int __ucmpdi2(int, int);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
@@ -68,11 +74,31 @@ EXPORT_SYMBOL(__udivsi3);
EXPORT_SYMBOL(__umodsi3);
EXPORT_SYMBOL(__udivdi3);
EXPORT_SYMBOL(__umoddi3);
+EXPORT_SYMBOL(__ucmpdi2);
+
+void __xtensa_libgcc_window_spill(void)
+{
+ BUG();
+}
+EXPORT_SYMBOL(__xtensa_libgcc_window_spill);
+
+unsigned long __sync_fetch_and_and_4(unsigned long *p, unsigned long v)
+{
+ BUG();
+}
+EXPORT_SYMBOL(__sync_fetch_and_and_4);
+
+unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v)
+{
+ BUG();
+}
+EXPORT_SYMBOL(__sync_fetch_and_or_4);
#ifdef CONFIG_NET
/*
* Networking support
*/
+EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_generic);
#endif /* CONFIG_NET */
@@ -80,6 +106,7 @@ EXPORT_SYMBOL(csum_partial_copy_generic);
* Architecture-specific symbols
*/
EXPORT_SYMBOL(__xtensa_copy_user);
+EXPORT_SYMBOL(__invalidate_icache_range);
/*
* Kernel hacking ...
@@ -95,3 +122,15 @@ EXPORT_SYMBOL(outsl);
EXPORT_SYMBOL(insb);
EXPORT_SYMBOL(insw);
EXPORT_SYMBOL(insl);
+
+extern long common_exception_return;
+EXPORT_SYMBOL(common_exception_return);
+
+#ifdef CONFIG_FUNCTION_TRACER
+EXPORT_SYMBOL(_mcount);
+#endif
+
+EXPORT_SYMBOL(__invalidate_dcache_range);
+#if XCHAL_DCACHE_IS_WRITEBACK
+EXPORT_SYMBOL(__flush_dcache_range);
+#endif
diff --git a/arch/xtensa/lib/checksum.S b/arch/xtensa/lib/checksum.S
index df397f932d0..4eb573d2720 100644
--- a/arch/xtensa/lib/checksum.S
+++ b/arch/xtensa/lib/checksum.S
@@ -41,10 +41,11 @@
.text
ENTRY(csum_partial)
- /*
- * Experiments with Ethernet and SLIP connections show that buf
- * is aligned on either a 2-byte or 4-byte boundary.
- */
+
+ /*
+ * Experiments with Ethernet and SLIP connections show that buf
+ * is aligned on either a 2-byte or 4-byte boundary.
+ */
entry sp, 32
extui a5, a2, 0, 2
bnez a5, 8f /* branch if 2-byte aligned */
@@ -170,7 +171,7 @@ ENTRY(csum_partial)
3:
j 5b /* branch to handle the remaining byte */
-
+ENDPROC(csum_partial)
/*
* Copy from ds while checksumming, otherwise like csum_partial
@@ -211,6 +212,7 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
*/
ENTRY(csum_partial_copy_generic)
+
entry sp, 32
mov a12, a3
mov a11, a4
@@ -367,6 +369,8 @@ DST( s8i a8, a3, 1 )
6:
j 4b /* process the possible trailing odd byte */
+ENDPROC(csum_partial_copy_generic)
+
# Exception handler:
.section .fixup, "ax"
@@ -406,4 +410,3 @@ DST( s8i a8, a3, 1 )
retw
.previous
-
diff --git a/arch/xtensa/lib/memcopy.S b/arch/xtensa/lib/memcopy.S
index ea59dcd0386..b1c219acabe 100644
--- a/arch/xtensa/lib/memcopy.S
+++ b/arch/xtensa/lib/memcopy.S
@@ -6,7 +6,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2002 - 2005 Tensilica Inc.
+ * Copyright (C) 2002 - 2012 Tensilica Inc.
*/
#include <variant/core.h>
@@ -27,14 +27,11 @@
#endif
.endm
-
/*
* void *memcpy(void *dst, const void *src, size_t len);
- * void *memmove(void *dst, const void *src, size_t len);
- * void *bcopy(const void *src, void *dst, size_t len);
*
* This function is intended to do the same thing as the standard
- * library function memcpy() (or bcopy()) for most cases.
+ * library function memcpy() for most cases.
* However, where the source and/or destination references
* an instruction RAM or ROM or a data RAM or ROM, that
* source and/or destination will always be accessed with
@@ -45,9 +42,6 @@
* !!!!!!! Handling of IRAM/IROM has not yet
* !!!!!!! been implemented.
*
- * The bcopy version is provided here to avoid the overhead
- * of an extra call, for callers that require this convention.
- *
* The (general case) algorithm is as follows:
* If destination is unaligned, align it by conditionally
* copying 1 and 2 bytes.
@@ -76,17 +70,6 @@
*/
.text
- .align 4
- .global bcopy
- .type bcopy,@function
-bcopy:
- entry sp, 16 # minimal stack frame
- # a2=src, a3=dst, a4=len
- mov a5, a3 # copy dst so that a2 is return value
- mov a3, a2
- mov a2, a5
- j .Lcommon # go to common code for memcpy+bcopy
-
/*
* Byte by byte copy
@@ -107,7 +90,7 @@ bcopy:
s8i a6, a5, 0
addi a5, a5, 1
#if !XCHAL_HAVE_LOOPS
- blt a3, a7, .Lnextbyte
+ bne a3, a7, .Lnextbyte # continue loop if $a3:src != $a7:src_end
#endif /* !XCHAL_HAVE_LOOPS */
.Lbytecopydone:
retw
@@ -144,9 +127,6 @@ bcopy:
.global memcpy
.type memcpy,@function
memcpy:
- .global memmove
- .type memmove,@function
-memmove:
entry sp, 16 # minimal stack frame
# a2/ dst, a3/ src, a4/ len
@@ -182,7 +162,7 @@ memmove:
s32i a7, a5, 12
addi a5, a5, 16
#if !XCHAL_HAVE_LOOPS
- blt a3, a8, .Loop1
+ bne a3, a8, .Loop1 # continue loop if a3:src != a8:src_end
#endif /* !XCHAL_HAVE_LOOPS */
.Loop1done:
bbci.l a4, 3, .L2
@@ -230,8 +210,10 @@ memmove:
_beqz a4, .Ldone # avoid loading anything for zero-length copies
# copy 16 bytes per iteration for word-aligned dst and unaligned src
ssa8 a3 # set shift amount from byte offset
-#define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS (simulator) with the
- lint or ferret client, or 0 to save a few cycles */
+
+/* set to 1 when running on ISS (simulator) with the
+ lint or ferret client, or 0 to save a few cycles */
+#define SIM_CHECKS_ALIGNMENT 1
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT
and a11, a3, a8 # save unalignment offset for below
sub a3, a3, a11 # align a3
@@ -260,7 +242,7 @@ memmove:
s32i a9, a5, 12
addi a5, a5, 16
#if !XCHAL_HAVE_LOOPS
- blt a3, a10, .Loop2
+ bne a3, a10, .Loop2 # continue loop if a3:src != a10:src_end
#endif /* !XCHAL_HAVE_LOOPS */
.Loop2done:
bbci.l a4, 3, .L12
@@ -305,6 +287,285 @@ memmove:
l8ui a6, a3, 0
s8i a6, a5, 0
retw
+
+
+/*
+ * void bcopy(const void *src, void *dest, size_t n);
+ */
+ .align 4
+ .global bcopy
+ .type bcopy,@function
+bcopy:
+ entry sp, 16 # minimal stack frame
+ # a2=src, a3=dst, a4=len
+ mov a5, a3
+ mov a3, a2
+ mov a2, a5
+ j .Lmovecommon # go to common code for memmove+bcopy
+
+/*
+ * void *memmove(void *dst, const void *src, size_t len);
+ *
+ * This function is intended to do the same thing as the standard
+ * library function memmove() for most cases.
+ * However, where the source and/or destination references
+ * an instruction RAM or ROM or a data RAM or ROM, that
+ * source and/or destination will always be accessed with
+ * 32-bit load and store instructions (as required for these
+ * types of devices).
+ *
+ * !!!!!!! XTFIXME:
+ * !!!!!!! Handling of IRAM/IROM has not yet
+ * !!!!!!! been implemented.
+ *
+ * The (general case) algorithm is as follows:
+ * If end of source doesn't overlap destination then use memcpy.
+ * Otherwise do memcpy backwards.
+ *
+ * Register use:
+ * a0/ return address
+ * a1/ stack pointer
+ * a2/ return value
+ * a3/ src
+ * a4/ length
+ * a5/ dst
+ * a6/ tmp
+ * a7/ tmp
+ * a8/ tmp
+ * a9/ tmp
+ * a10/ tmp
+ * a11/ tmp
+ */
+
+/*
+ * Byte by byte copy
+ */
+ .align 4
+ .byte 0 # 1 mod 4 alignment for LOOPNEZ
+ # (0 mod 4 alignment for LBEG)
+.Lbackbytecopy:
+#if XCHAL_HAVE_LOOPS
+ loopnez a4, .Lbackbytecopydone
+#else /* !XCHAL_HAVE_LOOPS */
+ beqz a4, .Lbackbytecopydone
+ sub a7, a3, a4 # a7 = start address for source
+#endif /* !XCHAL_HAVE_LOOPS */
+.Lbacknextbyte:
+ addi a3, a3, -1
+ l8ui a6, a3, 0
+ addi a5, a5, -1
+ s8i a6, a5, 0
+#if !XCHAL_HAVE_LOOPS
+ bne a3, a7, .Lbacknextbyte # continue loop if
+ # $a3:src != $a7:src_start
+#endif /* !XCHAL_HAVE_LOOPS */
+.Lbackbytecopydone:
+ retw
+
+/*
+ * Destination is unaligned
+ */
+
+ .align 4
+.Lbackdst1mod2: # dst is only byte aligned
+ _bltui a4, 7, .Lbackbytecopy # do short copies byte by byte
+
+ # copy 1 byte
+ addi a3, a3, -1
+ l8ui a6, a3, 0
+ addi a5, a5, -1
+ s8i a6, a5, 0
+ addi a4, a4, -1
+ _bbci.l a5, 1, .Lbackdstaligned # if dst is now aligned, then
+ # return to main algorithm
+.Lbackdst2mod4: # dst 16-bit aligned
+ # copy 2 bytes
+ _bltui a4, 6, .Lbackbytecopy # do short copies byte by byte
+ addi a3, a3, -2
+ l8ui a6, a3, 0
+ l8ui a7, a3, 1
+ addi a5, a5, -2
+ s8i a6, a5, 0
+ s8i a7, a5, 1
+ addi a4, a4, -2
+ j .Lbackdstaligned # dst is now aligned,
+ # return to main algorithm
+
+ .align 4
+ .global memmove
+ .type memmove,@function
+memmove:
+
+ entry sp, 16 # minimal stack frame
+ # a2/ dst, a3/ src, a4/ len
+ mov a5, a2 # copy dst so that a2 is return value
+.Lmovecommon:
+ sub a6, a5, a3
+ bgeu a6, a4, .Lcommon
+
+ add a5, a5, a4
+ add a3, a3, a4
+
+ _bbsi.l a5, 0, .Lbackdst1mod2 # if dst is 1 mod 2
+ _bbsi.l a5, 1, .Lbackdst2mod4 # if dst is 2 mod 4
+.Lbackdstaligned: # return here from .Lbackdst?mod? once dst is aligned
+ srli a7, a4, 4 # number of loop iterations with 16B
+ # per iteration
+ movi a8, 3 # if source is not aligned,
+ _bany a3, a8, .Lbacksrcunaligned # then use shifting copy
+ /*
+ * Destination and source are word-aligned, use word copy.
+ */
+ # copy 16 bytes per iteration for word-aligned dst and word-aligned src
+#if XCHAL_HAVE_LOOPS
+ loopnez a7, .backLoop1done
+#else /* !XCHAL_HAVE_LOOPS */
+ beqz a7, .backLoop1done
+ slli a8, a7, 4
+ sub a8, a3, a8 # a8 = start of first 16B source chunk
+#endif /* !XCHAL_HAVE_LOOPS */
+.backLoop1:
+ addi a3, a3, -16
+ l32i a7, a3, 12
+ l32i a6, a3, 8
+ addi a5, a5, -16
+ s32i a7, a5, 12
+ l32i a7, a3, 4
+ s32i a6, a5, 8
+ l32i a6, a3, 0
+ s32i a7, a5, 4
+ s32i a6, a5, 0
+#if !XCHAL_HAVE_LOOPS
+ bne a3, a8, .backLoop1 # continue loop if a3:src != a8:src_start
+#endif /* !XCHAL_HAVE_LOOPS */
+.backLoop1done:
+ bbci.l a4, 3, .Lback2
+ # copy 8 bytes
+ addi a3, a3, -8
+ l32i a6, a3, 0
+ l32i a7, a3, 4
+ addi a5, a5, -8
+ s32i a6, a5, 0
+ s32i a7, a5, 4
+.Lback2:
+ bbsi.l a4, 2, .Lback3
+ bbsi.l a4, 1, .Lback4
+ bbsi.l a4, 0, .Lback5
+ retw
+.Lback3:
+ # copy 4 bytes
+ addi a3, a3, -4
+ l32i a6, a3, 0
+ addi a5, a5, -4
+ s32i a6, a5, 0
+ bbsi.l a4, 1, .Lback4
+ bbsi.l a4, 0, .Lback5
+ retw
+.Lback4:
+ # copy 2 bytes
+ addi a3, a3, -2
+ l16ui a6, a3, 0
+ addi a5, a5, -2
+ s16i a6, a5, 0
+ bbsi.l a4, 0, .Lback5
+ retw
+.Lback5:
+ # copy 1 byte
+ addi a3, a3, -1
+ l8ui a6, a3, 0
+ addi a5, a5, -1
+ s8i a6, a5, 0
+ retw
+
+/*
+ * Destination is aligned, Source is unaligned
+ */
+
+ .align 4
+.Lbacksrcunaligned:
+ _beqz a4, .Lbackdone # avoid loading anything for zero-length copies
+ # copy 16 bytes per iteration for word-aligned dst and unaligned src
+ ssa8 a3 # set shift amount from byte offset
+#define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS with
+ * the lint or ferret client, or 0
+ * to save a few cycles */
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT
+ and a11, a3, a8 # save unalignment offset for below
+ sub a3, a3, a11 # align a3
+#endif
+ l32i a6, a3, 0 # load first word
+#if XCHAL_HAVE_LOOPS
+ loopnez a7, .backLoop2done
+#else /* !XCHAL_HAVE_LOOPS */
+ beqz a7, .backLoop2done
+ slli a10, a7, 4
+ sub a10, a3, a10 # a10 = start of first 16B source chunk
+#endif /* !XCHAL_HAVE_LOOPS */
+.backLoop2:
+ addi a3, a3, -16
+ l32i a7, a3, 12
+ l32i a8, a3, 8
+ addi a5, a5, -16
+ src_b a6, a7, a6
+ s32i a6, a5, 12
+ l32i a9, a3, 4
+ src_b a7, a8, a7
+ s32i a7, a5, 8
+ l32i a6, a3, 0
+ src_b a8, a9, a8
+ s32i a8, a5, 4
+ src_b a9, a6, a9
+ s32i a9, a5, 0
+#if !XCHAL_HAVE_LOOPS
+ bne a3, a10, .backLoop2 # continue loop if a3:src != a10:src_start
+#endif /* !XCHAL_HAVE_LOOPS */
+.backLoop2done:
+ bbci.l a4, 3, .Lback12
+ # copy 8 bytes
+ addi a3, a3, -8
+ l32i a7, a3, 4
+ l32i a8, a3, 0
+ addi a5, a5, -8
+ src_b a6, a7, a6
+ s32i a6, a5, 4
+ src_b a7, a8, a7
+ s32i a7, a5, 0
+ mov a6, a8
+.Lback12:
+ bbci.l a4, 2, .Lback13
+ # copy 4 bytes
+ addi a3, a3, -4
+ l32i a7, a3, 0
+ addi a5, a5, -4
+ src_b a6, a7, a6
+ s32i a6, a5, 0
+ mov a6, a7
+.Lback13:
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT
+ add a3, a3, a11 # readjust a3 with correct misalignment
+#endif
+ bbsi.l a4, 1, .Lback14
+ bbsi.l a4, 0, .Lback15
+.Lbackdone:
+ retw
+.Lback14:
+ # copy 2 bytes
+ addi a3, a3, -2
+ l8ui a6, a3, 0
+ l8ui a7, a3, 1
+ addi a5, a5, -2
+ s8i a6, a5, 0
+ s8i a7, a5, 1
+ bbsi.l a4, 0, .Lback15
+ retw
+.Lback15:
+ # copy 1 byte
+ addi a3, a3, -1
+ addi a5, a5, -1
+ l8ui a6, a3, 0
+ s8i a6, a5, 0
+ retw
+
/*
* Local Variables:
diff --git a/arch/xtensa/lib/pci-auto.c b/arch/xtensa/lib/pci-auto.c
index a71733ae119..34d05abbd92 100644
--- a/arch/xtensa/lib/pci-auto.c
+++ b/arch/xtensa/lib/pci-auto.c
@@ -241,8 +241,8 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
unsigned char header_type;
struct pci_dev *dev = &pciauto_dev;
- pciauto_dev.bus = &pciauto_bus;
- pciauto_dev.sysdata = pci_ctrl;
+ pciauto_dev.bus = &pciauto_bus;
+ pciauto_dev.sysdata = pci_ctrl;
pciauto_bus.ops = pci_ctrl->ops;
/*
@@ -345,8 +345,3 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
}
return sub_bus;
}
-
-
-
-
-
diff --git a/arch/xtensa/lib/strncpy_user.S b/arch/xtensa/lib/strncpy_user.S
index 9f603cdaaa6..1ad0ecf4536 100644
--- a/arch/xtensa/lib/strncpy_user.S
+++ b/arch/xtensa/lib/strncpy_user.S
@@ -166,7 +166,7 @@ __strncpy_user:
retw
.Lz1: # byte 1 is zero
#ifdef __XTENSA_EB__
- extui a9, a9, 16, 16
+ extui a9, a9, 16, 16
#endif /* __XTENSA_EB__ */
EX(s16i, a9, a11, 0, fixup_s)
addi a11, a11, 1 # advance dst pointer
@@ -174,7 +174,7 @@ __strncpy_user:
retw
.Lz2: # byte 2 is zero
#ifdef __XTENSA_EB__
- extui a9, a9, 16, 16
+ extui a9, a9, 16, 16
#endif /* __XTENSA_EB__ */
EX(s16i, a9, a11, 0, fixup_s)
movi a9, 0
diff --git a/arch/xtensa/lib/strnlen_user.S b/arch/xtensa/lib/strnlen_user.S
index 23f2a89816a..4c03b1e581e 100644
--- a/arch/xtensa/lib/strnlen_user.S
+++ b/arch/xtensa/lib/strnlen_user.S
@@ -145,4 +145,3 @@ __strnlen_user:
lenfixup:
movi a2, 0
retw
-
diff --git a/arch/xtensa/lib/usercopy.S b/arch/xtensa/lib/usercopy.S
index 46d60314bb1..ace1892a875 100644
--- a/arch/xtensa/lib/usercopy.S
+++ b/arch/xtensa/lib/usercopy.S
@@ -318,4 +318,3 @@ l_fixup:
/* Ignore memset return value in a6. */
/* a2 still contains bytes not copied. */
retw
-
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile
index f0b646d2f84..f54f78e24d7 100644
--- a/arch/xtensa/mm/Makefile
+++ b/arch/xtensa/mm/Makefile
@@ -4,3 +4,4 @@
obj-y := init.o cache.o misc.o
obj-$(CONFIG_MMU) += fault.o mmu.o tlb.o
+obj-$(CONFIG_HIGHMEM) += highmem.o
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 85df4655d32..63cbb867dad 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -59,6 +59,10 @@
*
*/
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && defined(CONFIG_HIGHMEM)
+#error "HIGHMEM is not supported on cores with aliasing cache."
+#endif
+
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
/*
@@ -118,7 +122,7 @@ void flush_dcache_page(struct page *page)
* For now, flush the whole cache. FIXME??
*/
-void flush_cache_range(struct vm_area_struct* vma,
+void local_flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
__flush_invalidate_dcache_all();
@@ -132,8 +136,8 @@ void flush_cache_range(struct vm_area_struct* vma,
* alias versions of the cache flush functions.
*/
-void flush_cache_page(struct vm_area_struct* vma, unsigned long address,
- unsigned long pfn)
+void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
+ unsigned long pfn)
{
/* Note that we have to use the 'alias' address to avoid multi-hit */
@@ -159,31 +163,31 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
/* Invalidate old entry in TLBs */
- invalidate_itlb_mapping(addr);
- invalidate_dtlb_mapping(addr);
+ flush_tlb_page(vma, addr);
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
- unsigned long vaddr = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
unsigned long paddr = (unsigned long) page_address(page);
unsigned long phys = page_to_phys(page);
+ unsigned long tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page(paddr);
- __flush_invalidate_dcache_page_alias(vaddr, phys);
- __invalidate_icache_page_alias(vaddr, phys);
+ __flush_invalidate_dcache_page_alias(tmp, phys);
+ __invalidate_icache_page_alias(tmp, phys);
clear_bit(PG_arch_1, &page->flags);
}
#else
if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
&& (vma->vm_flags & VM_EXEC) != 0) {
- unsigned long paddr = (unsigned long) page_address(page);
+ unsigned long paddr = (unsigned long)kmap_atomic(page);
__flush_dcache_page(paddr);
__invalidate_icache_page(paddr);
set_bit(PG_arch_1, &page->flags);
+ kunmap_atomic((void *)paddr);
}
#endif
}
@@ -195,7 +199,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
-void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
@@ -205,8 +209,8 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
/* Flush and invalidate user page if aliased. */
if (alias) {
- unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
- __flush_invalidate_dcache_page_alias(temp, phys);
+ unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ __flush_invalidate_dcache_page_alias(t, phys);
}
/* Copy data */
@@ -219,12 +223,11 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
*/
if (alias) {
- unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_range((unsigned long) dst, len);
- if ((vma->vm_flags & VM_EXEC) != 0) {
- __invalidate_icache_page_alias(temp, phys);
- }
+ if ((vma->vm_flags & VM_EXEC) != 0)
+ __invalidate_icache_page_alias(t, phys);
} else if ((vma->vm_flags & VM_EXEC) != 0) {
__flush_dcache_range((unsigned long)dst,len);
@@ -245,8 +248,8 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
*/
if (alias) {
- unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
- __flush_invalidate_dcache_page_alias(temp, phys);
+ unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ __flush_invalidate_dcache_page_alias(t, phys);
}
memcpy(dst, src, len);
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index e367e302643..b57c4f91f48 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -6,7 +6,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2010 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
@@ -19,10 +19,9 @@
#include <asm/cacheflush.h>
#include <asm/hardirq.h>
#include <asm/uaccess.h>
-#include <asm/system.h>
#include <asm/pgalloc.h>
-unsigned long asid_cache = ASID_USER_FIRST;
+DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
void bad_page_fault(struct pt_regs*, unsigned long, int);
#undef DEBUG_PAGE_FAULT
@@ -45,6 +44,7 @@ void do_page_fault(struct pt_regs *regs)
int is_write, is_exec;
int fault;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
info.si_code = SEGV_MAPERR;
@@ -72,6 +72,9 @@ void do_page_fault(struct pt_regs *regs)
address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
#endif
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+retry:
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
@@ -94,6 +97,7 @@ good_area:
if (is_write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
+ flags |= FAULT_FLAG_WRITE;
} else if (is_exec) {
if (!(vma->vm_flags & VM_EXEC))
goto bad_area;
@@ -105,7 +109,11 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
+ fault = handle_mm_fault(mm, vma, address, flags);
+
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -113,10 +121,23 @@ good_area:
goto do_sigbus;
BUG();
}
- if (fault & VM_FAULT_MAJOR)
- current->maj_flt++;
- else
- current->min_flt++;
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR)
+ current->maj_flt++;
+ else
+ current->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ flags |= FAULT_FLAG_TRIED;
+
+ /* No need to up_read(&mm->mmap_sem) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
+ }
+ }
up_read(&mm->mmap_sem);
return;
@@ -167,6 +188,7 @@ do_sigbus:
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
bad_page_fault(regs, address, SIGBUS);
+ return;
vmalloc_fault:
{
@@ -234,4 +256,3 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
die("Oops", regs, sig);
do_exit(sig);
}
-
diff --git a/arch/xtensa/mm/highmem.c b/arch/xtensa/mm/highmem.c
new file mode 100644
index 00000000000..17a8c0d6fd1
--- /dev/null
+++ b/arch/xtensa/mm/highmem.c
@@ -0,0 +1,72 @@
+/*
+ * High memory support for Xtensa architecture
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ */
+
+#include <linux/export.h>
+#include <linux/highmem.h>
+#include <asm/tlbflush.h>
+
+static pte_t *kmap_pte;
+
+void *kmap_atomic(struct page *page)
+{
+ enum fixed_addresses idx;
+ unsigned long vaddr;
+ int type;
+
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+
+ type = kmap_atomic_idx_push();
+ idx = type + KM_TYPE_NR * smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(!pte_none(*(kmap_pte - idx)));
+#endif
+ set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL_EXEC));
+
+ return (void *)vaddr;
+}
+EXPORT_SYMBOL(kmap_atomic);
+
+void __kunmap_atomic(void *kvaddr)
+{
+ int idx, type;
+
+ if (kvaddr >= (void *)FIXADDR_START &&
+ kvaddr < (void *)FIXADDR_TOP) {
+ type = kmap_atomic_idx();
+ idx = type + KM_TYPE_NR * smp_processor_id();
+
+ /*
+ * Force other mappings to Oops if they'll try to access this
+ * pte without first remap it. Keeping stale mappings around
+ * is a bad idea also, in case the page changes cacheability
+ * attributes or becomes a protected page in a hypervisor.
+ */
+ pte_clear(&init_mm, kvaddr, kmap_pte - idx);
+ local_flush_tlb_kernel_range((unsigned long)kvaddr,
+ (unsigned long)kvaddr + PAGE_SIZE);
+
+ kmap_atomic_idx_pop();
+ }
+
+ pagefault_enable();
+}
+EXPORT_SYMBOL(__kunmap_atomic);
+
+void __init kmap_init(void)
+{
+ unsigned long kmap_vstart;
+
+ /* cache the first kmap pte */
+ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
+ kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+}
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index ba150e5de2e..77ed20209ca 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -8,6 +8,7 @@
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2014 Cadence Design Systems Inc.
*
* Chris Zankel <chris@zankel.net>
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
@@ -19,6 +20,7 @@
#include <linux/errno.h>
#include <linux/bootmem.h>
#include <linux/gfp.h>
+#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
@@ -26,16 +28,134 @@
#include <asm/bootparam.h>
#include <asm/page.h>
+#include <asm/sections.h>
+#include <asm/sysmem.h>
-/* References to section boundaries */
+struct sysmem_info sysmem __initdata;
-extern char _ftext, _etext, _fdata, _edata, _rodata_end;
-extern char __init_begin, __init_end;
+static void __init sysmem_dump(void)
+{
+ unsigned i;
+
+ pr_debug("Sysmem:\n");
+ for (i = 0; i < sysmem.nr_banks; ++i)
+ pr_debug(" 0x%08lx - 0x%08lx (%ldK)\n",
+ sysmem.bank[i].start, sysmem.bank[i].end,
+ (sysmem.bank[i].end - sysmem.bank[i].start) >> 10);
+}
+
+/*
+ * Find bank with maximal .start such that bank.start <= start
+ */
+static inline struct meminfo * __init find_bank(unsigned long start)
+{
+ unsigned i;
+ struct meminfo *it = NULL;
+
+ for (i = 0; i < sysmem.nr_banks; ++i)
+ if (sysmem.bank[i].start <= start)
+ it = sysmem.bank + i;
+ else
+ break;
+ return it;
+}
+
+/*
+ * Move all memory banks starting at 'from' to a new place at 'to',
+ * adjust nr_banks accordingly.
+ * Both 'from' and 'to' must be inside the sysmem.bank.
+ *
+ * Returns: 0 (success), -ENOMEM (not enough space in the sysmem.bank).
+ */
+static int __init move_banks(struct meminfo *to, struct meminfo *from)
+{
+ unsigned n = sysmem.nr_banks - (from - sysmem.bank);
+
+ if (to > from && to - from + sysmem.nr_banks > SYSMEM_BANKS_MAX)
+ return -ENOMEM;
+ if (to != from)
+ memmove(to, from, n * sizeof(struct meminfo));
+ sysmem.nr_banks += to - from;
+ return 0;
+}
+
+/*
+ * Add new bank to sysmem. Resulting sysmem is the union of bytes of the
+ * original sysmem and the new bank.
+ *
+ * Returns: 0 (success), < 0 (error)
+ */
+int __init add_sysmem_bank(unsigned long start, unsigned long end)
+{
+ unsigned i;
+ struct meminfo *it = NULL;
+ unsigned long sz;
+ unsigned long bank_sz = 0;
+
+ if (start == end ||
+ (start < end) != (PAGE_ALIGN(start) < (end & PAGE_MASK))) {
+ pr_warn("Ignoring small memory bank 0x%08lx size: %ld bytes\n",
+ start, end - start);
+ return -EINVAL;
+ }
+
+ start = PAGE_ALIGN(start);
+ end &= PAGE_MASK;
+ sz = end - start;
+
+ it = find_bank(start);
+
+ if (it)
+ bank_sz = it->end - it->start;
+
+ if (it && bank_sz >= start - it->start) {
+ if (end - it->start > bank_sz)
+ it->end = end;
+ else
+ return 0;
+ } else {
+ if (!it)
+ it = sysmem.bank;
+ else
+ ++it;
+
+ if (it - sysmem.bank < sysmem.nr_banks &&
+ it->start - start <= sz) {
+ it->start = start;
+ if (it->end - it->start < sz)
+ it->end = end;
+ else
+ return 0;
+ } else {
+ if (move_banks(it + 1, it) < 0) {
+ pr_warn("Ignoring memory bank 0x%08lx size %ld bytes\n",
+ start, end - start);
+ return -EINVAL;
+ }
+ it->start = start;
+ it->end = end;
+ return 0;
+ }
+ }
+ sz = it->end - it->start;
+ for (i = it + 1 - sysmem.bank; i < sysmem.nr_banks; ++i)
+ if (sysmem.bank[i].start - it->start <= sz) {
+ if (sz < sysmem.bank[i].end - it->start)
+ it->end = sysmem.bank[i].end;
+ } else {
+ break;
+ }
+
+ move_banks(it + 1, sysmem.bank + i);
+ return 0;
+}
/*
* mem_reserve(start, end, must_exist)
*
* Reserve some memory from the memory pool.
+ * If must_exist is set and a part of the region being reserved does not exist
+ * memory map is not altered.
*
* Parameters:
* start Start of region,
@@ -43,58 +163,74 @@ extern char __init_begin, __init_end;
* must_exist Must exist in memory pool.
*
* Returns:
- * 0 (memory area couldn't be mapped)
- * -1 (success)
+ * 0 (success)
+ * < 0 (error)
*/
int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
{
- int i;
-
- if (start == end)
- return 0;
+ struct meminfo *it;
+ struct meminfo *rm = NULL;
+ unsigned long sz;
+ unsigned long bank_sz = 0;
start = start & PAGE_MASK;
end = PAGE_ALIGN(end);
+ sz = end - start;
+ if (!sz)
+ return -EINVAL;
- for (i = 0; i < sysmem.nr_banks; i++)
- if (start < sysmem.bank[i].end
- && end >= sysmem.bank[i].start)
- break;
+ it = find_bank(start);
- if (i == sysmem.nr_banks) {
- if (must_exist)
- printk (KERN_WARNING "mem_reserve: [0x%0lx, 0x%0lx) "
- "not in any region!\n", start, end);
- return 0;
+ if (it)
+ bank_sz = it->end - it->start;
+
+ if ((!it || end - it->start > bank_sz) && must_exist) {
+ pr_warn("mem_reserve: [0x%0lx, 0x%0lx) not in any region!\n",
+ start, end);
+ return -EINVAL;
}
- if (start > sysmem.bank[i].start) {
- if (end < sysmem.bank[i].end) {
- /* split entry */
- if (sysmem.nr_banks >= SYSMEM_BANKS_MAX)
- panic("meminfo overflow\n");
- sysmem.bank[sysmem.nr_banks].start = end;
- sysmem.bank[sysmem.nr_banks].end = sysmem.bank[i].end;
- sysmem.nr_banks++;
+ if (it && start - it->start <= bank_sz) {
+ if (start == it->start) {
+ if (end - it->start < bank_sz) {
+ it->start = end;
+ return 0;
+ } else {
+ rm = it;
+ }
+ } else {
+ it->end = start;
+ if (end - it->start < bank_sz)
+ return add_sysmem_bank(end,
+ it->start + bank_sz);
+ ++it;
}
- sysmem.bank[i].end = start;
- } else {
- if (end < sysmem.bank[i].end)
- sysmem.bank[i].start = end;
- else {
- /* remove entry */
- sysmem.nr_banks--;
- sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start;
- sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end;
+ }
+
+ if (!it)
+ it = sysmem.bank;
+
+ for (; it < sysmem.bank + sysmem.nr_banks; ++it) {
+ if (it->end - start <= sz) {
+ if (!rm)
+ rm = it;
+ } else {
+ if (it->start - start < sz)
+ it->start = end;
+ break;
}
}
- return -1;
+
+ if (rm)
+ move_banks(rm, it);
+
+ return 0;
}
/*
- * Initialize the bootmem system and give it all the memory we have available.
+ * Initialize the bootmem system and give it all low memory we have available.
*/
void __init bootmem_init(void)
@@ -103,6 +239,7 @@ void __init bootmem_init(void)
unsigned long bootmap_start, bootmap_size;
int i;
+ sysmem_dump();
max_low_pfn = max_pfn = 0;
min_low_pfn = ~0;
@@ -146,28 +283,27 @@ void __init bootmem_init(void)
/* Add all remaining memory pieces into the bootmem map */
- for (i=0; i<sysmem.nr_banks; i++)
- free_bootmem(sysmem.bank[i].start,
- sysmem.bank[i].end - sysmem.bank[i].start);
+ for (i = 0; i < sysmem.nr_banks; i++) {
+ if (sysmem.bank[i].start >> PAGE_SHIFT < max_low_pfn) {
+ unsigned long end = min(max_low_pfn << PAGE_SHIFT,
+ sysmem.bank[i].end);
+ free_bootmem(sysmem.bank[i].start,
+ end - sysmem.bank[i].start);
+ }
+ }
}
void __init zones_init(void)
{
- unsigned long zones_size[MAX_NR_ZONES];
- int i;
-
/* All pages are DMA-able, so we put them all in the DMA zone. */
-
- zones_size[ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET;
- for (i = 1; i < MAX_NR_ZONES; i++)
- zones_size[i] = 0;
-
+ unsigned long zones_size[MAX_NR_ZONES] = {
+ [ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET,
#ifdef CONFIG_HIGHMEM
- zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
+ [ZONE_HIGHMEM] = max_pfn - max_low_pfn,
#endif
-
+ };
free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL);
}
@@ -177,50 +313,38 @@ void __init zones_init(void)
void __init mem_init(void)
{
- unsigned long codesize, reservedpages, datasize, initsize;
- unsigned long highmemsize, tmp, ram;
-
- max_mapnr = num_physpages = max_low_pfn - ARCH_PFN_OFFSET;
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
- highmemsize = 0;
-
#ifdef CONFIG_HIGHMEM
-#error HIGHGMEM not implemented in init.c
-#endif
+ unsigned long tmp;
- totalram_pages += free_all_bootmem();
+ reset_all_zones_managed_pages();
+ for (tmp = max_low_pfn; tmp < max_pfn; tmp++)
+ free_highmem_page(pfn_to_page(tmp));
+#endif
- reservedpages = ram = 0;
- for (tmp = 0; tmp < max_mapnr; tmp++) {
- ram++;
- if (PageReserved(mem_map+tmp))
- reservedpages++;
- }
+ max_mapnr = max_pfn - ARCH_PFN_OFFSET;
+ high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
- codesize = (unsigned long) &_etext - (unsigned long) &_ftext;
- datasize = (unsigned long) &_edata - (unsigned long) &_fdata;
- initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
-
- printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, "
- "%ldk data, %ldk init %ldk highmem)\n",
- nr_free_pages() << (PAGE_SHIFT-10),
- ram << (PAGE_SHIFT-10),
- codesize >> 10,
- reservedpages << (PAGE_SHIFT-10),
- datasize >> 10,
- initsize >> 10,
- highmemsize >> 10);
-}
+ free_all_bootmem();
-void
-free_reserved_mem(void *start, void *end)
-{
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page((unsigned long)start);
- totalram_pages++;
- }
+ mem_init_print_info(NULL);
+ pr_info("virtual kernel memory layout:\n"
+#ifdef CONFIG_HIGHMEM
+ " pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
+#endif
+ " vmalloc : 0x%08x - 0x%08x (%5u MB)\n"
+ " lowmem : 0x%08x - 0x%08lx (%5lu MB)\n",
+#ifdef CONFIG_HIGHMEM
+ PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
+ (LAST_PKMAP*PAGE_SIZE) >> 10,
+ FIXADDR_START, FIXADDR_TOP,
+ (FIXADDR_TOP - FIXADDR_START) >> 10,
+#endif
+ VMALLOC_START, VMALLOC_END,
+ (VMALLOC_END - VMALLOC_START) >> 20,
+ PAGE_OFFSET, PAGE_OFFSET +
+ (max_low_pfn - min_low_pfn) * PAGE_SIZE,
+ ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20);
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -228,16 +352,62 @@ extern int initrd_is_mapped;
void free_initrd_mem(unsigned long start, unsigned long end)
{
- if (initrd_is_mapped) {
- free_reserved_mem((void*)start, (void*)end);
- printk ("Freeing initrd memory: %ldk freed\n",(end-start)>>10);
- }
+ if (initrd_is_mapped)
+ free_reserved_area((void *)start, (void *)end, -1, "initrd");
}
#endif
void free_initmem(void)
{
- free_reserved_mem(&__init_begin, &__init_end);
- printk("Freeing unused kernel memory: %dk freed\n",
- (&__init_end - &__init_begin) >> 10);
+ free_initmem_default(-1);
+}
+
+static void __init parse_memmap_one(char *p)
+{
+ char *oldp;
+ unsigned long start_at, mem_size;
+
+ if (!p)
+ return;
+
+ oldp = p;
+ mem_size = memparse(p, &p);
+ if (p == oldp)
+ return;
+
+ switch (*p) {
+ case '@':
+ start_at = memparse(p + 1, &p);
+ add_sysmem_bank(start_at, start_at + mem_size);
+ break;
+
+ case '$':
+ start_at = memparse(p + 1, &p);
+ mem_reserve(start_at, start_at + mem_size, 0);
+ break;
+
+ case 0:
+ mem_reserve(mem_size, 0, 0);
+ break;
+
+ default:
+ pr_warn("Unrecognized memmap syntax: %s\n", p);
+ break;
+ }
+}
+
+static int __init parse_memmap_opt(char *str)
+{
+ while (str) {
+ char *k = strchr(str, ',');
+
+ if (k)
+ *k++ = 0;
+
+ parse_memmap_one(str);
+ str = k;
+ }
+
+ return 0;
}
+early_param("memmap", parse_memmap_opt);
diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S
index b048406d875..1f68558dbcc 100644
--- a/arch/xtensa/mm/misc.S
+++ b/arch/xtensa/mm/misc.S
@@ -29,6 +29,7 @@
*/
ENTRY(clear_page)
+
entry a1, 16
movi a3, 0
@@ -45,6 +46,8 @@ ENTRY(clear_page)
retw
+ENDPROC(clear_page)
+
/*
* copy_page and copy_user_page are the same for non-cache-aliased configs.
*
@@ -53,6 +56,7 @@ ENTRY(clear_page)
*/
ENTRY(copy_page)
+
entry a1, 16
__loopi a2, a4, PAGE_SIZE, 32
@@ -84,6 +88,8 @@ ENTRY(copy_page)
retw
+ENDPROC(copy_page)
+
#ifdef CONFIG_MMU
/*
* If we have to deal with cache aliasing, we use temporary memory mappings
@@ -109,6 +115,7 @@ ENTRY(__tlbtemp_mapping_start)
*/
ENTRY(clear_user_page)
+
entry a1, 32
/* Mark page dirty and determine alias. */
@@ -133,7 +140,7 @@ ENTRY(clear_user_page)
/* Setup a temporary DTLB with the color of the VPN */
- movi a4, -PAGE_OFFSET + (PAGE_KERNEL | _PAGE_HW_WRITE)
+ movi a4, ((PAGE_KERNEL | _PAGE_HW_WRITE) - PAGE_OFFSET) & 0xffffffff
movi a5, TLBTEMP_BASE_1 # virt
add a6, a2, a4 # ppn
add a2, a5, a3 # add 'color'
@@ -164,6 +171,8 @@ ENTRY(clear_user_page)
retw
+ENDPROC(clear_user_page)
+
/*
* copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page)
* a2 a3 a4 a5
@@ -171,7 +180,7 @@ ENTRY(clear_user_page)
ENTRY(copy_user_page)
- entry a1, 32
+ entry a1, 32
/* Mark page dirty and determine alias for destination. */
@@ -185,7 +194,7 @@ ENTRY(copy_user_page)
or a9, a9, a8
slli a4, a4, PAGE_SHIFT
s32i a9, a5, PAGE_FLAGS
- movi a5, -PAGE_OFFSET + (PAGE_KERNEL | _PAGE_HW_WRITE)
+ movi a5, ((PAGE_KERNEL | _PAGE_HW_WRITE) - PAGE_OFFSET) & 0xffffffff
beqz a6, 1f
@@ -262,6 +271,8 @@ ENTRY(copy_user_page)
retw
+ENDPROC(copy_user_page)
+
#endif
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
@@ -272,6 +283,7 @@ ENTRY(copy_user_page)
*/
ENTRY(__flush_invalidate_dcache_page_alias)
+
entry sp, 16
movi a7, 0 # required for exception handler
@@ -287,6 +299,7 @@ ENTRY(__flush_invalidate_dcache_page_alias)
retw
+ENDPROC(__flush_invalidate_dcache_page_alias)
#endif
ENTRY(__tlbtemp_mapping_itlb)
@@ -294,6 +307,7 @@ ENTRY(__tlbtemp_mapping_itlb)
#if (ICACHE_WAY_SIZE > PAGE_SIZE)
ENTRY(__invalidate_icache_page_alias)
+
entry sp, 16
addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE)
@@ -307,11 +321,14 @@ ENTRY(__invalidate_icache_page_alias)
isync
retw
+ENDPROC(__invalidate_icache_page_alias)
+
#endif
/* End of special treatment in tlb miss exception */
ENTRY(__tlbtemp_mapping_end)
+
#endif /* CONFIG_MMU
/*
@@ -319,6 +336,7 @@ ENTRY(__tlbtemp_mapping_end)
*/
ENTRY(__invalidate_icache_page)
+
entry sp, 16
___invalidate_icache_page a2 a3
@@ -326,11 +344,14 @@ ENTRY(__invalidate_icache_page)
retw
+ENDPROC(__invalidate_icache_page)
+
/*
* void __invalidate_dcache_page(ulong start)
*/
ENTRY(__invalidate_dcache_page)
+
entry sp, 16
___invalidate_dcache_page a2 a3
@@ -338,11 +359,14 @@ ENTRY(__invalidate_dcache_page)
retw
+ENDPROC(__invalidate_dcache_page)
+
/*
* void __flush_invalidate_dcache_page(ulong start)
*/
ENTRY(__flush_invalidate_dcache_page)
+
entry sp, 16
___flush_invalidate_dcache_page a2 a3
@@ -350,11 +374,14 @@ ENTRY(__flush_invalidate_dcache_page)
dsync
retw
+ENDPROC(__flush_invalidate_dcache_page)
+
/*
* void __flush_dcache_page(ulong start)
*/
ENTRY(__flush_dcache_page)
+
entry sp, 16
___flush_dcache_page a2 a3
@@ -362,11 +389,14 @@ ENTRY(__flush_dcache_page)
dsync
retw
+ENDPROC(__flush_dcache_page)
+
/*
* void __invalidate_icache_range(ulong start, ulong size)
*/
ENTRY(__invalidate_icache_range)
+
entry sp, 16
___invalidate_icache_range a2 a3 a4
@@ -374,11 +404,14 @@ ENTRY(__invalidate_icache_range)
retw
+ENDPROC(__invalidate_icache_range)
+
/*
* void __flush_invalidate_dcache_range(ulong start, ulong size)
*/
ENTRY(__flush_invalidate_dcache_range)
+
entry sp, 16
___flush_invalidate_dcache_range a2 a3 a4
@@ -386,11 +419,14 @@ ENTRY(__flush_invalidate_dcache_range)
retw
+ENDPROC(__flush_invalidate_dcache_range)
+
/*
* void _flush_dcache_range(ulong start, ulong size)
*/
ENTRY(__flush_dcache_range)
+
entry sp, 16
___flush_dcache_range a2 a3 a4
@@ -398,22 +434,28 @@ ENTRY(__flush_dcache_range)
retw
+ENDPROC(__flush_dcache_range)
+
/*
* void _invalidate_dcache_range(ulong start, ulong size)
*/
ENTRY(__invalidate_dcache_range)
+
entry sp, 16
___invalidate_dcache_range a2 a3 a4
retw
+ENDPROC(__invalidate_dcache_range)
+
/*
* void _invalidate_icache_all(void)
*/
ENTRY(__invalidate_icache_all)
+
entry sp, 16
___invalidate_icache_all a2 a3
@@ -421,11 +463,14 @@ ENTRY(__invalidate_icache_all)
retw
+ENDPROC(__invalidate_icache_all)
+
/*
* void _flush_invalidate_dcache_all(void)
*/
ENTRY(__flush_invalidate_dcache_all)
+
entry sp, 16
___flush_invalidate_dcache_all a2 a3
@@ -433,11 +478,14 @@ ENTRY(__flush_invalidate_dcache_all)
retw
+ENDPROC(__flush_invalidate_dcache_all)
+
/*
* void _invalidate_dcache_all(void)
*/
ENTRY(__invalidate_dcache_all)
+
entry sp, 16
___invalidate_dcache_all a2 a3
@@ -445,3 +493,4 @@ ENTRY(__invalidate_dcache_all)
retw
+ENDPROC(__invalidate_dcache_all)
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index ca81654f3ec..3429b483d9f 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -3,6 +3,7 @@
*
* Extracted from init.c
*/
+#include <linux/bootmem.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/string.h>
@@ -13,31 +14,86 @@
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
+#include <asm/initialize_mmu.h>
+#include <asm/io.h>
+
+#if defined(CONFIG_HIGHMEM)
+static void * __init init_pmd(unsigned long vaddr)
+{
+ pgd_t *pgd = pgd_offset_k(vaddr);
+ pmd_t *pmd = pmd_offset(pgd, vaddr);
+
+ if (pmd_none(*pmd)) {
+ unsigned i;
+ pte_t *pte = alloc_bootmem_low_pages(PAGE_SIZE);
+
+ for (i = 0; i < 1024; i++)
+ pte_clear(NULL, 0, pte + i);
+
+ set_pmd(pmd, __pmd(((unsigned long)pte) & PAGE_MASK));
+ BUG_ON(pte != pte_offset_kernel(pmd, 0));
+ pr_debug("%s: vaddr: 0x%08lx, pmd: 0x%p, pte: 0x%p\n",
+ __func__, vaddr, pmd, pte);
+ return pte;
+ } else {
+ return pte_offset_kernel(pmd, 0);
+ }
+}
+
+static void __init fixedrange_init(void)
+{
+ BUILD_BUG_ON(FIXADDR_SIZE > PMD_SIZE);
+ init_pmd(__fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK);
+}
+#endif
void __init paging_init(void)
{
memset(swapper_pg_dir, 0, PAGE_SIZE);
+#ifdef CONFIG_HIGHMEM
+ fixedrange_init();
+ pkmap_page_table = init_pmd(PKMAP_BASE);
+ kmap_init();
+#endif
}
/*
* Flush the mmu and reset associated register to default values.
*/
-void __init init_mmu(void)
+void init_mmu(void)
{
- /* Writing zeros to the <t>TLBCFG special registers ensure
- * that valid values exist in the register. For existing
- * PGSZID<w> fields, zero selects the first element of the
- * page-size array. For nonexistent PGSZID<w> fields, zero is
- * the best value to write. Also, when changing PGSZID<w>
+#if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
+ /*
+ * Writing zeros to the instruction and data TLBCFG special
+ * registers ensure that valid values exist in the register.
+ *
+ * For existing PGSZID<w> fields, zero selects the first element
+ * of the page-size array. For nonexistent PGSZID<w> fields,
+ * zero is the best value to write. Also, when changing PGSZID<w>
* fields, the corresponding TLB must be flushed.
*/
set_itlbcfg_register(0);
set_dtlbcfg_register(0);
- flush_tlb_all();
+#endif
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
+ /*
+ * Update the IO area mapping in case xtensa_kio_paddr has changed
+ */
+ write_dtlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK),
+ XCHAL_KIO_CACHED_VADDR + 6);
+ write_itlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK),
+ XCHAL_KIO_CACHED_VADDR + 6);
+ write_dtlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
+ XCHAL_KIO_BYPASS_VADDR + 6);
+ write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
+ XCHAL_KIO_BYPASS_VADDR + 6);
+#endif
+
+ local_flush_tlb_all();
/* Set rasid register to a known value. */
- set_rasid_register(ASID_USER_FIRST);
+ set_rasid_register(ASID_INSERT(ASID_USER_FIRST));
/* Set PTEVADDR special register to the start of the page
* table, which is in kernel mappable space (ie. not
@@ -46,23 +102,3 @@ void __init init_mmu(void)
*/
set_ptevaddr_register(PGTABLE_START);
}
-
-struct kmem_cache *pgtable_cache __read_mostly;
-
-static void pgd_ctor(void *addr)
-{
- pte_t *ptep = (pte_t *)addr;
- int i;
-
- for (i = 0; i < 1024; i++, ptep++)
- pte_clear(NULL, 0, ptep);
-
-}
-
-void __init pgtable_cache_init(void)
-{
- pgtable_cache = kmem_cache_create("pgd",
- PAGE_SIZE, PAGE_SIZE,
- SLAB_HWCACHE_ALIGN,
- pgd_ctor);
-}
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
index 239461d8ea8..5ece856c572 100644
--- a/arch/xtensa/mm/tlb.c
+++ b/arch/xtensa/mm/tlb.c
@@ -18,7 +18,6 @@
#include <asm/processor.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
-#include <asm/system.h>
#include <asm/cacheflush.h>
@@ -49,7 +48,7 @@ static inline void __flush_dtlb_all (void)
}
-void flush_tlb_all (void)
+void local_flush_tlb_all(void)
{
__flush_itlb_all();
__flush_dtlb_all();
@@ -61,19 +60,23 @@ void flush_tlb_all (void)
* a new context will be assigned to it.
*/
-void flush_tlb_mm(struct mm_struct *mm)
+void local_flush_tlb_mm(struct mm_struct *mm)
{
+ int cpu = smp_processor_id();
+
if (mm == current->active_mm) {
- int flags;
- local_save_flags(flags);
- __get_new_mmu_context(mm);
- __load_mmu_context(mm);
+ unsigned long flags;
+ local_irq_save(flags);
+ mm->context.asid[cpu] = NO_CONTEXT;
+ activate_context(mm, cpu);
local_irq_restore(flags);
+ } else {
+ mm->context.asid[cpu] = NO_CONTEXT;
+ mm->context.cpu = -1;
}
- else
- mm->context = 0;
}
+
#define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
#define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
#if _ITLB_ENTRIES > _DTLB_ENTRIES
@@ -82,26 +85,28 @@ void flush_tlb_mm(struct mm_struct *mm)
# define _TLB_ENTRIES _DTLB_ENTRIES
#endif
-void flush_tlb_range (struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+void local_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
{
+ int cpu = smp_processor_id();
struct mm_struct *mm = vma->vm_mm;
unsigned long flags;
- if (mm->context == NO_CONTEXT)
+ if (mm->context.asid[cpu] == NO_CONTEXT)
return;
#if 0
printk("[tlbrange<%02lx,%08lx,%08lx>]\n",
- (unsigned long)mm->context, start, end);
+ (unsigned long)mm->context.asid[cpu], start, end);
#endif
- local_save_flags(flags);
+ local_irq_save(flags);
if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
int oldpid = get_rasid_register();
- set_rasid_register (ASID_INSERT(mm->context));
+
+ set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
start &= PAGE_MASK;
- if (vma->vm_flags & VM_EXEC)
+ if (vma->vm_flags & VM_EXEC)
while(start < end) {
invalidate_itlb_mapping(start);
invalidate_dtlb_mapping(start);
@@ -115,23 +120,25 @@ void flush_tlb_range (struct vm_area_struct *vma,
set_rasid_register(oldpid);
} else {
- flush_tlb_mm(mm);
+ local_flush_tlb_mm(mm);
}
local_irq_restore(flags);
}
-void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
+ int cpu = smp_processor_id();
struct mm_struct* mm = vma->vm_mm;
unsigned long flags;
int oldpid;
- if(mm->context == NO_CONTEXT)
+ if (mm->context.asid[cpu] == NO_CONTEXT)
return;
- local_save_flags(flags);
+ local_irq_save(flags);
- oldpid = get_rasid_register();
+ oldpid = get_rasid_register();
+ set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
if (vma->vm_flags & VM_EXEC)
invalidate_itlb_mapping(page);
@@ -142,3 +149,130 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
local_irq_restore(flags);
}
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
+ end - start < _TLB_ENTRIES << PAGE_SHIFT) {
+ start &= PAGE_MASK;
+ while (start < end) {
+ invalidate_itlb_mapping(start);
+ invalidate_dtlb_mapping(start);
+ start += PAGE_SIZE;
+ }
+ } else {
+ local_flush_tlb_all();
+ }
+}
+
+#ifdef CONFIG_DEBUG_TLB_SANITY
+
+static unsigned get_pte_for_vaddr(unsigned vaddr)
+{
+ struct task_struct *task = get_current();
+ struct mm_struct *mm = task->mm;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ if (!mm)
+ mm = task->active_mm;
+ pgd = pgd_offset(mm, vaddr);
+ if (pgd_none_or_clear_bad(pgd))
+ return 0;
+ pmd = pmd_offset(pgd, vaddr);
+ if (pmd_none_or_clear_bad(pmd))
+ return 0;
+ pte = pte_offset_map(pmd, vaddr);
+ if (!pte)
+ return 0;
+ return pte_val(*pte);
+}
+
+enum {
+ TLB_SUSPICIOUS = 1,
+ TLB_INSANE = 2,
+};
+
+static void tlb_insane(void)
+{
+ BUG_ON(1);
+}
+
+static void tlb_suspicious(void)
+{
+ WARN_ON(1);
+}
+
+/*
+ * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
+ * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
+ *
+ * Check that valid TLB entries either have the same PA as the PTE, or PTE is
+ * marked as non-present. Non-present PTE and the page with non-zero refcount
+ * and zero mapcount is normal for batched TLB flush operation. Zero refcount
+ * means that the page was freed prematurely. Non-zero mapcount is unusual,
+ * but does not necessary means an error, thus marked as suspicious.
+ */
+static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
+{
+ unsigned tlbidx = w | (e << PAGE_SHIFT);
+ unsigned r0 = dtlb ?
+ read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
+ unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
+ unsigned pte = get_pte_for_vaddr(vpn);
+ unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
+ unsigned tlb_asid = r0 & ASID_MASK;
+ bool kernel = tlb_asid == 1;
+ int rc = 0;
+
+ if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
+ pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
+ dtlb ? 'D' : 'I', w, e, vpn,
+ kernel ? "kernel" : "user");
+ rc |= TLB_INSANE;
+ }
+
+ if (tlb_asid == mm_asid) {
+ unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
+ read_itlb_translation(tlbidx);
+ if ((pte ^ r1) & PAGE_MASK) {
+ pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
+ dtlb ? 'D' : 'I', w, e, r0, r1, pte);
+ if (pte == 0 || !pte_present(__pte(pte))) {
+ struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
+ pr_err("page refcount: %d, mapcount: %d\n",
+ page_count(p),
+ page_mapcount(p));
+ if (!page_count(p))
+ rc |= TLB_INSANE;
+ else if (page_mapped(p))
+ rc |= TLB_SUSPICIOUS;
+ } else {
+ rc |= TLB_INSANE;
+ }
+ }
+ }
+ return rc;
+}
+
+void check_tlb_sanity(void)
+{
+ unsigned long flags;
+ unsigned w, e;
+ int bug = 0;
+
+ local_irq_save(flags);
+ for (w = 0; w < DTLB_ARF_WAYS; ++w)
+ for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e)
+ bug |= check_tlb_entry(w, e, true);
+ for (w = 0; w < ITLB_ARF_WAYS; ++w)
+ for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e)
+ bug |= check_tlb_entry(w, e, false);
+ if (bug & TLB_INSANE)
+ tlb_insane();
+ if (bug & TLB_SUSPICIOUS)
+ tlb_suspicious();
+ local_irq_restore(flags);
+}
+
+#endif /* CONFIG_DEBUG_TLB_SANITY */
diff --git a/arch/xtensa/oprofile/Makefile b/arch/xtensa/oprofile/Makefile
new file mode 100644
index 00000000000..69ffbe80f18
--- /dev/null
+++ b/arch/xtensa/oprofile/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_OPROFILE) += oprofile.o
+
+DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
+ oprof.o cpu_buffer.o buffer_sync.o \
+ event_buffer.o oprofile_files.o \
+ oprofilefs.o oprofile_stats.o \
+ timer_int.o )
+
+oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
diff --git a/arch/xtensa/oprofile/backtrace.c b/arch/xtensa/oprofile/backtrace.c
new file mode 100644
index 00000000000..5f03a593d84
--- /dev/null
+++ b/arch/xtensa/oprofile/backtrace.c
@@ -0,0 +1,169 @@
+/**
+ * @file backtrace.c
+ *
+ * @remark Copyright 2008 Tensilica Inc.
+ * @remark Read the file COPYING
+ *
+ */
+
+#include <linux/oprofile.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <asm/ptrace.h>
+#include <asm/uaccess.h>
+#include <asm/traps.h>
+
+/* Address of common_exception_return, used to check the
+ * transition from kernel to user space.
+ */
+extern int common_exception_return;
+
+/* A struct that maps to the part of the frame containing the a0 and
+ * a1 registers.
+ */
+struct frame_start {
+ unsigned long a0;
+ unsigned long a1;
+};
+
+static void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth)
+{
+ unsigned long windowstart = regs->windowstart;
+ unsigned long windowbase = regs->windowbase;
+ unsigned long a0 = regs->areg[0];
+ unsigned long a1 = regs->areg[1];
+ unsigned long pc = MAKE_PC_FROM_RA(a0, regs->pc);
+ int index;
+
+ /* First add the current PC to the trace. */
+ if (pc != 0 && pc <= TASK_SIZE)
+ oprofile_add_trace(pc);
+ else
+ return;
+
+ /* Two steps:
+ *
+ * 1. Look through the register window for the
+ * previous PCs in the call trace.
+ *
+ * 2. Look on the stack.
+ */
+
+ /* Step 1. */
+ /* Rotate WINDOWSTART to move the bit corresponding to
+ * the current window to the bit #0.
+ */
+ windowstart = (windowstart << WSBITS | windowstart) >> windowbase;
+
+ /* Look for bits that are set, they correspond to
+ * valid windows.
+ */
+ for (index = WSBITS - 1; (index > 0) && depth; depth--, index--)
+ if (windowstart & (1 << index)) {
+ /* Read a0 and a1 from the
+ * corresponding position in AREGs.
+ */
+ a0 = regs->areg[index * 4];
+ a1 = regs->areg[index * 4 + 1];
+ /* Get the PC from a0 and a1. */
+ pc = MAKE_PC_FROM_RA(a0, pc);
+
+ /* Add the PC to the trace. */
+ if (pc != 0 && pc <= TASK_SIZE)
+ oprofile_add_trace(pc);
+ else
+ return;
+ }
+
+ /* Step 2. */
+ /* We are done with the register window, we need to
+ * look through the stack.
+ */
+ if (depth > 0) {
+ /* Start from the a1 register. */
+ /* a1 = regs->areg[1]; */
+ while (a0 != 0 && depth--) {
+
+ struct frame_start frame_start;
+ /* Get the location for a1, a0 for the
+ * previous frame from the current a1.
+ */
+ unsigned long *psp = (unsigned long *)a1;
+ psp -= 4;
+
+ /* Check if the region is OK to access. */
+ if (!access_ok(VERIFY_READ, psp, sizeof(frame_start)))
+ return;
+ /* Copy a1, a0 from user space stack frame. */
+ if (__copy_from_user_inatomic(&frame_start, psp,
+ sizeof(frame_start)))
+ return;
+
+ a0 = frame_start.a0;
+ a1 = frame_start.a1;
+ pc = MAKE_PC_FROM_RA(a0, pc);
+
+ if (pc != 0 && pc <= TASK_SIZE)
+ oprofile_add_trace(pc);
+ else
+ return;
+ }
+ }
+}
+
+static void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth)
+{
+ unsigned long pc = regs->pc;
+ unsigned long *psp;
+ unsigned long sp_start, sp_end;
+ unsigned long a0 = regs->areg[0];
+ unsigned long a1 = regs->areg[1];
+
+ sp_start = a1 & ~(THREAD_SIZE-1);
+ sp_end = sp_start + THREAD_SIZE;
+
+ /* Spill the register window to the stack first. */
+ spill_registers();
+
+ /* Read the stack frames one by one and create the PC
+ * from the a0 and a1 registers saved there.
+ */
+ while (a1 > sp_start && a1 < sp_end && depth--) {
+ pc = MAKE_PC_FROM_RA(a0, pc);
+
+ /* Add the PC to the trace. */
+ oprofile_add_trace(pc);
+ if (pc == (unsigned long) &common_exception_return) {
+ regs = (struct pt_regs *)a1;
+ if (user_mode(regs)) {
+ pc = regs->pc;
+ if (pc != 0 && pc <= TASK_SIZE)
+ oprofile_add_trace(pc);
+ else
+ return;
+ return xtensa_backtrace_user(regs, depth);
+ }
+ a0 = regs->areg[0];
+ a1 = regs->areg[1];
+ continue;
+ }
+
+ psp = (unsigned long *)a1;
+
+ a0 = *(psp - 4);
+ a1 = *(psp - 3);
+
+ if (a1 <= (unsigned long)psp)
+ return;
+
+ }
+ return;
+}
+
+void xtensa_backtrace(struct pt_regs * const regs, unsigned int depth)
+{
+ if (user_mode(regs))
+ xtensa_backtrace_user(regs, depth);
+ else
+ xtensa_backtrace_kernel(regs, depth);
+}
diff --git a/arch/xtensa/oprofile/init.c b/arch/xtensa/oprofile/init.c
new file mode 100644
index 00000000000..a67eea37976
--- /dev/null
+++ b/arch/xtensa/oprofile/init.c
@@ -0,0 +1,26 @@
+/**
+ * @file init.c
+ *
+ * @remark Copyright 2008 Tensilica Inc.
+ * @remark Read the file COPYING
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/oprofile.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+
+
+extern void xtensa_backtrace(struct pt_regs *const regs, unsigned int depth);
+
+int __init oprofile_arch_init(struct oprofile_operations *ops)
+{
+ ops->backtrace = xtensa_backtrace;
+ return -ENODEV;
+}
+
+
+void oprofile_arch_exit(void)
+{
+}
diff --git a/arch/xtensa/platforms/iss/Makefile b/arch/xtensa/platforms/iss/Makefile
index af96e314d71..b3e89291cfb 100644
--- a/arch/xtensa/platforms/iss/Makefile
+++ b/arch/xtensa/platforms/iss/Makefile
@@ -4,5 +4,7 @@
# "prom monitor" library routines under Linux.
#
-obj-y = io.o console.o setup.o network.o
-
+obj-y = setup.o
+obj-$(CONFIG_TTY) += console.o
+obj-$(CONFIG_NET) += network.o
+obj-$(CONFIG_BLK_DEV_SIMDISK) += simdisk.o
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
index 2c723e8b30d..70cb408bc20 100644
--- a/arch/xtensa/platforms/iss/console.c
+++ b/arch/xtensa/platforms/iss/console.c
@@ -19,7 +19,6 @@
#include <linux/param.h>
#include <linux/seq_file.h>
#include <linux/serial.h>
-#include <linux/serialP.h>
#include <asm/uaccess.h>
#include <asm/irq.h>
@@ -34,26 +33,14 @@
#endif
#define SERIAL_MAX_NUM_LINES 1
-#define SERIAL_TIMER_VALUE (20 * HZ)
+#define SERIAL_TIMER_VALUE (HZ / 10)
static struct tty_driver *serial_driver;
+static struct tty_port serial_port;
static struct timer_list serial_timer;
static DEFINE_SPINLOCK(timer_lock);
-int errno;
-
-static int __simc (int a, int b, int c, int d, int e, int f) __attribute__((__noinline__));
-static int __simc (int a, int b, int c, int d, int e, int f)
-{
- int ret;
- __asm__ __volatile__ ("simcall\n"
- "mov %0, a2\n"
- "mov %1, a3\n" : "=a" (ret), "=a" (errno)
- : : "a2", "a3");
- return ret;
-}
-
static char *serial_version = "0.1";
static char *serial_name = "ISS serial driver";
@@ -68,20 +55,14 @@ static void rs_poll(unsigned long);
static int rs_open(struct tty_struct *tty, struct file * filp)
{
- int line = tty->index;
-
- if ((line < 0) || (line >= SERIAL_MAX_NUM_LINES))
- return -ENODEV;
-
- spin_lock(&timer_lock);
-
+ tty->port = &serial_port;
+ spin_lock_bh(&timer_lock);
if (tty->count == 1) {
- init_timer(&serial_timer);
- serial_timer.data = (unsigned long) tty;
- serial_timer.function = rs_poll;
+ setup_timer(&serial_timer, rs_poll,
+ (unsigned long)&serial_port);
mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
}
- spin_unlock(&timer_lock);
+ spin_unlock_bh(&timer_lock);
return 0;
}
@@ -99,10 +80,10 @@ static int rs_open(struct tty_struct *tty, struct file * filp)
*/
static void rs_close(struct tty_struct *tty, struct file * filp)
{
- spin_lock(&timer_lock);
+ spin_lock_bh(&timer_lock);
if (tty->count == 1)
del_timer_sync(&serial_timer);
- spin_unlock(&timer_lock);
+ spin_unlock_bh(&timer_lock);
}
@@ -111,28 +92,26 @@ static int rs_write(struct tty_struct * tty,
{
/* see drivers/char/serialX.c to reference original version */
- __simc (SYS_write, 1, (unsigned long)buf, count, 0, 0);
+ simc_write(1, buf, count);
return count;
}
static void rs_poll(unsigned long priv)
{
- struct tty_struct* tty = (struct tty_struct*) priv;
-
- struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
+ struct tty_port *port = (struct tty_port *)priv;
int i = 0;
unsigned char c;
spin_lock(&timer_lock);
- while (__simc(SYS_select_one, 0, XTISS_SELECT_ONE_READ, (int)&tv,0,0)){
- __simc (SYS_read, 0, (unsigned long)&c, 1, 0, 0);
- tty_insert_flip_char(tty, c, TTY_NORMAL);
+ while (simc_poll(0)) {
+ simc_read(0, &c, 1);
+ tty_insert_flip_char(port, c, TTY_NORMAL);
i++;
}
if (i)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
@@ -142,12 +121,7 @@ static void rs_poll(unsigned long priv)
static int rs_put_char(struct tty_struct *tty, unsigned char ch)
{
- char buf[2];
-
- buf[0] = ch;
- buf[1] = '\0'; /* Is this NULL necessary? */
- __simc (SYS_write, 1, (unsigned long) buf, 1, 0, 0);
- return 1;
+ return rs_write(tty, &ch, 1);
}
static void rs_flush_chars(struct tty_struct *tty)
@@ -210,13 +184,14 @@ static const struct tty_operations serial_ops = {
int __init rs_init(void)
{
- serial_driver = alloc_tty_driver(1);
+ tty_port_init(&serial_port);
+
+ serial_driver = alloc_tty_driver(SERIAL_MAX_NUM_LINES);
printk ("%s %s\n", serial_name, serial_version);
/* Initialize the tty_driver structure */
- serial_driver->owner = THIS_MODULE;
serial_driver->driver_name = "iss_serial";
serial_driver->name = "ttyS";
serial_driver->major = TTY_MAJOR;
@@ -229,6 +204,7 @@ int __init rs_init(void)
serial_driver->flags = TTY_DRIVER_REAL_RAW;
tty_set_operations(serial_driver, &serial_ops);
+ tty_port_link_device(&serial_port, serial_driver, 0);
if (tty_register_driver(serial_driver))
panic("Couldn't register serial driver\n");
@@ -244,6 +220,7 @@ static __exit void rs_exit(void)
printk("ISS_SERIAL: failed to unregister serial driver (%d)\n",
error);
put_tty_driver(serial_driver);
+ tty_port_destroy(&serial_port);
}
@@ -266,8 +243,7 @@ static void iss_console_write(struct console *co, const char *s, unsigned count)
int len = strlen(s);
if (s != 0 && *s != 0)
- __simc (SYS_write, 1, (unsigned long)s,
- count < len ? count : len,0,0);
+ simc_write(1, s, count < len ? count : len);
}
static struct tty_driver* iss_console_device(struct console *c, int *index)
diff --git a/arch/xtensa/platforms/iss/include/platform/serial.h b/arch/xtensa/platforms/iss/include/platform/serial.h
new file mode 100644
index 00000000000..16aec542d43
--- /dev/null
+++ b/arch/xtensa/platforms/iss/include/platform/serial.h
@@ -0,0 +1,15 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Tensilica Inc.
+ */
+
+#ifndef __ASM_XTENSA_ISS_SERIAL_H
+#define __ASM_XTENSA_ISS_SERIAL_H
+
+/* Have no meaning on ISS, but needed for 8250_early.c */
+#define BASE_BAUD 0
+
+#endif /* __ASM_XTENSA_ISS_SERIAL_H */
diff --git a/arch/xtensa/platforms/iss/include/platform/simcall.h b/arch/xtensa/platforms/iss/include/platform/simcall.h
index b7952c06a2b..12b15ad1e58 100644
--- a/arch/xtensa/platforms/iss/include/platform/simcall.h
+++ b/arch/xtensa/platforms/iss/include/platform/simcall.h
@@ -57,6 +57,61 @@
#define XTISS_SELECT_ONE_WRITE 2
#define XTISS_SELECT_ONE_EXCEPT 3
+static int errno;
+
+static inline int __simc(int a, int b, int c, int d)
+{
+ int ret;
+ register int a1 asm("a2") = a;
+ register int b1 asm("a3") = b;
+ register int c1 asm("a4") = c;
+ register int d1 asm("a5") = d;
+ __asm__ __volatile__ (
+ "simcall\n"
+ "mov %0, a2\n"
+ "mov %1, a3\n"
+ : "=a" (ret), "=a" (errno), "+r"(a1), "+r"(b1)
+ : "r"(c1), "r"(d1)
+ : "memory");
+ return ret;
+}
+
+static inline int simc_open(const char *file, int flags, int mode)
+{
+ return __simc(SYS_open, (int) file, flags, mode);
+}
+
+static inline int simc_close(int fd)
+{
+ return __simc(SYS_close, fd, 0, 0);
+}
+
+static inline int simc_ioctl(int fd, int request, void *arg)
+{
+ return __simc(SYS_ioctl, fd, request, (int) arg);
+}
+
+static inline int simc_read(int fd, void *buf, size_t count)
+{
+ return __simc(SYS_read, fd, (int) buf, count);
+}
+
+static inline int simc_write(int fd, const void *buf, size_t count)
+{
+ return __simc(SYS_write, fd, (int) buf, count);
+}
+
+static inline int simc_poll(int fd)
+{
+ struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
+
+ return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv);
+}
+
+static inline int simc_lseek(int fd, uint32_t off, int whence)
+{
+ return __simc(SYS_lseek, fd, off, whence);
+}
#endif /* _XTENSA_PLATFORM_ISS_SIMCALL_H */
diff --git a/arch/xtensa/platforms/iss/io.c b/arch/xtensa/platforms/iss/io.c
deleted file mode 100644
index 571d0b24f89..00000000000
--- a/arch/xtensa/platforms/iss/io.c
+++ /dev/null
@@ -1,32 +0,0 @@
-/* This file isn't really needed right now. */
-
-#if 0
-
-#include <asm/io.h>
-#include <platform/platform-iss/simcall.h>
-
-extern int __simc ();
-
-
-char iss_serial_getc()
-{
- char c;
- __simc( SYS_read, 0, &c, 1 );
- return c;
-}
-
-void iss_serial_putc( char c )
-{
- __simc( SYS_write, 1, &c, 1 );
-}
-
-void iss_serial_puts( char *s )
-{
- if( s != 0 && *s != 0 )
- __simc( SYS_write, 1, s, strlen(s) );
-}
-
-/*#error Need I/O ports to specific hardware!*/
-
-#endif
-
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
index 7dde2445642..d05f8feeb8d 100644
--- a/arch/xtensa/platforms/iss/network.c
+++ b/arch/xtensa/platforms/iss/network.c
@@ -38,7 +38,7 @@
#define DRIVER_NAME "iss-netdev"
#define ETH_MAX_PACKET 1500
#define ETH_HEADER_OTHER 14
-#define ISS_NET_TIMER_VALUE (2 * HZ)
+#define ISS_NET_TIMER_VALUE (HZ / 10)
static DEFINE_SPINLOCK(opened_lock);
@@ -56,8 +56,6 @@ static LIST_HEAD(devices);
struct tuntap_info {
char dev_name[IFNAMSIZ];
- int fixed_config;
- unsigned char gw[ETH_ALEN];
int fd;
};
@@ -67,7 +65,6 @@ struct tuntap_info {
/* This structure contains out private information for the driver. */
struct iss_net_private {
-
struct list_head device_list;
struct list_head opened_list;
@@ -83,9 +80,6 @@ struct iss_net_private {
int index;
int mtu;
- unsigned char mac[ETH_ALEN];
- int have_mac;
-
struct {
union {
struct tuntap_info tuntap;
@@ -101,55 +95,6 @@ struct iss_net_private {
};
-/* ======================= ISS SIMCALL INTERFACE =========================== */
-
-/* Note: __simc must _not_ be declared inline! */
-
-static int errno;
-
-static int __simc (int a, int b, int c, int d, int e, int f) __attribute__((__noinline__));
-static int __simc (int a, int b, int c, int d, int e, int f)
-{
- int ret;
- __asm__ __volatile__ ("simcall\n"
- "mov %0, a2\n"
- "mov %1, a3\n" : "=a" (ret), "=a" (errno)
- : : "a2", "a3");
- return ret;
-}
-
-static int inline simc_open(char *file, int flags, int mode)
-{
- return __simc(SYS_open, (int) file, flags, mode, 0, 0);
-}
-
-static int inline simc_close(int fd)
-{
- return __simc(SYS_close, fd, 0, 0, 0, 0);
-}
-
-static int inline simc_ioctl(int fd, int request, void *arg)
-{
- return __simc(SYS_ioctl, fd, request, (int) arg, 0, 0);
-}
-
-static int inline simc_read(int fd, void *buf, size_t count)
-{
- return __simc(SYS_read, fd, (int) buf, count, 0, 0);
-}
-
-static int inline simc_write(int fd, void *buf, size_t count)
-{
- return __simc(SYS_write, fd, (int) buf, count, 0, 0);
-}
-
-static int inline simc_poll(int fd)
-{
- struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
-
- return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv,0,0);
-}
-
/* ================================ HELPERS ================================ */
@@ -167,68 +112,48 @@ static char *split_if_spec(char *str, ...)
*arg = str;
if (end == NULL)
return NULL;
- *end ++ = '\0';
+ *end++ = '\0';
str = end;
}
va_end(ap);
return str;
}
+/* Set Ethernet address of the specified device. */
-#if 0
-/* Adjust SKB. */
-
-struct sk_buff *ether_adjust_skb(struct sk_buff *skb, int extra)
+static void setup_etheraddr(struct net_device *dev, char *str)
{
- if ((skb != NULL) && (skb_tailroom(skb) < extra)) {
- struct sk_buff *skb2;
+ unsigned char *addr = dev->dev_addr;
- skb2 = skb_copy_expand(skb, 0, extra, GFP_ATOMIC);
- dev_kfree_skb(skb);
- skb = skb2;
- }
- if (skb != NULL)
- skb_put(skb, extra);
+ if (str == NULL)
+ goto random;
- return skb;
-}
-#endif
-
-/* Return the IP address as a string for a given device. */
-
-static void dev_ip_addr(void *d, char *buf, char *bin_buf)
-{
- struct net_device *dev = d;
- struct in_device *ip = dev->ip_ptr;
- struct in_ifaddr *in;
- __be32 addr;
-
- if ((ip == NULL) || ((in = ip->ifa_list) == NULL)) {
- printk(KERN_WARNING "Device not assigned an IP address!\n");
- return;
+ if (!mac_pton(str, addr)) {
+ pr_err("%s: failed to parse '%s' as an ethernet address\n",
+ dev->name, str);
+ goto random;
}
-
- addr = in->ifa_address;
- sprintf(buf, "%d.%d.%d.%d", addr & 0xff, (addr >> 8) & 0xff,
- (addr >> 16) & 0xff, addr >> 24);
-
- if (bin_buf) {
- bin_buf[0] = addr & 0xff;
- bin_buf[1] = (addr >> 8) & 0xff;
- bin_buf[2] = (addr >> 16) & 0xff;
- bin_buf[3] = addr >> 24;
+ if (is_multicast_ether_addr(addr)) {
+ pr_err("%s: attempt to assign a multicast ethernet address\n",
+ dev->name);
+ goto random;
}
-}
-
-/* Set Ethernet address of the specified device. */
+ if (!is_valid_ether_addr(addr)) {
+ pr_err("%s: attempt to assign an invalid ethernet address\n",
+ dev->name);
+ goto random;
+ }
+ if (!is_local_ether_addr(addr))
+ pr_warn("%s: assigning a globally valid ethernet address\n",
+ dev->name);
+ return;
-static void inline set_ether_mac(void *d, unsigned char *addr)
-{
- struct net_device *dev = d;
- memcpy(dev->dev_addr, addr, ETH_ALEN);
+random:
+ pr_info("%s: choosing a random ethernet address\n",
+ dev->name);
+ eth_hw_addr_random(dev);
}
-
/* ======================= TUNTAP TRANSPORT INTERFACE ====================== */
static int tuntap_open(struct iss_net_private *lp)
@@ -238,24 +163,21 @@ static int tuntap_open(struct iss_net_private *lp)
int err = -EINVAL;
int fd;
- /* We currently only support a fixed configuration. */
-
- if (!lp->tp.info.tuntap.fixed_config)
- return -EINVAL;
-
- if ((fd = simc_open("/dev/net/tun", 02, 0)) < 0) { /* O_RDWR */
- printk("Failed to open /dev/net/tun, returned %d "
- "(errno = %d)\n", fd, errno);
+ fd = simc_open("/dev/net/tun", 02, 0); /* O_RDWR */
+ if (fd < 0) {
+ pr_err("%s: failed to open /dev/net/tun, returned %d (errno = %d)\n",
+ lp->dev->name, fd, errno);
return fd;
}
- memset(&ifr, 0, sizeof ifr);
+ memset(&ifr, 0, sizeof(ifr));
ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
- strlcpy(ifr.ifr_name, dev_name, sizeof ifr.ifr_name);
+ strlcpy(ifr.ifr_name, dev_name, sizeof(ifr.ifr_name));
- if ((err = simc_ioctl(fd, TUNSETIFF, (void*) &ifr)) < 0) {
- printk("Failed to set interface, returned %d "
- "(errno = %d)\n", err, errno);
+ err = simc_ioctl(fd, TUNSETIFF, &ifr);
+ if (err < 0) {
+ pr_err("%s: failed to set interface %s, returned %d (errno = %d)\n",
+ lp->dev->name, dev_name, err, errno);
simc_close(fd);
return err;
}
@@ -266,27 +188,17 @@ static int tuntap_open(struct iss_net_private *lp)
static void tuntap_close(struct iss_net_private *lp)
{
-#if 0
- if (lp->tp.info.tuntap.fixed_config)
- iter_addresses(lp->tp.info.tuntap.dev, close_addr, lp->host.dev_name);
-#endif
simc_close(lp->tp.info.tuntap.fd);
lp->tp.info.tuntap.fd = -1;
}
-static int tuntap_read (struct iss_net_private *lp, struct sk_buff **skb)
+static int tuntap_read(struct iss_net_private *lp, struct sk_buff **skb)
{
-#if 0
- *skb = ether_adjust_skb(*skb, ETH_HEADER_OTHER);
- if (*skb == NULL)
- return -ENOMEM;
-#endif
-
return simc_read(lp->tp.info.tuntap.fd,
(*skb)->data, (*skb)->dev->mtu + ETH_HEADER_OTHER);
}
-static int tuntap_write (struct iss_net_private *lp, struct sk_buff **skb)
+static int tuntap_write(struct iss_net_private *lp, struct sk_buff **skb)
{
return simc_write(lp->tp.info.tuntap.fd, (*skb)->data, (*skb)->len);
}
@@ -302,45 +214,45 @@ static int tuntap_poll(struct iss_net_private *lp)
}
/*
- * Currently only a device name is supported.
- * ethX=tuntap[,[mac address][,[device name]]]
+ * ethX=tuntap,[mac address],device name
*/
static int tuntap_probe(struct iss_net_private *lp, int index, char *init)
{
- const int len = strlen(TRANSPORT_TUNTAP_NAME);
+ struct net_device *dev = lp->dev;
char *dev_name = NULL, *mac_str = NULL, *rem = NULL;
/* Transport should be 'tuntap': ethX=tuntap,mac,dev_name */
- if (strncmp(init, TRANSPORT_TUNTAP_NAME, len))
+ if (strncmp(init, TRANSPORT_TUNTAP_NAME,
+ sizeof(TRANSPORT_TUNTAP_NAME) - 1))
return 0;
- if (*(init += strlen(TRANSPORT_TUNTAP_NAME)) == ',') {
- if ((rem=split_if_spec(init+1, &mac_str, &dev_name)) != NULL) {
- printk("Extra garbage on specification : '%s'\n", rem);
+ init += sizeof(TRANSPORT_TUNTAP_NAME) - 1;
+ if (*init == ',') {
+ rem = split_if_spec(init + 1, &mac_str, &dev_name);
+ if (rem != NULL) {
+ pr_err("%s: extra garbage on specification : '%s'\n",
+ dev->name, rem);
return 0;
}
} else if (*init != '\0') {
- printk("Invalid argument: %s. Skipping device!\n", init);
+ pr_err("%s: invalid argument: %s. Skipping device!\n",
+ dev->name, init);
return 0;
}
- if (dev_name) {
- strncpy(lp->tp.info.tuntap.dev_name, dev_name,
- sizeof lp->tp.info.tuntap.dev_name);
- lp->tp.info.tuntap.fixed_config = 1;
- } else
- strcpy(lp->tp.info.tuntap.dev_name, TRANSPORT_TUNTAP_NAME);
+ if (!dev_name) {
+ pr_err("%s: missing tuntap device name\n", dev->name);
+ return 0;
+ }
+ strlcpy(lp->tp.info.tuntap.dev_name, dev_name,
+ sizeof(lp->tp.info.tuntap.dev_name));
-#if 0
- if (setup_etheraddr(mac_str, lp->mac))
- lp->have_mac = 1;
-#endif
- lp->mtu = TRANSPORT_TUNTAP_MTU;
+ setup_etheraddr(dev, mac_str);
- //lp->info.tuntap.gate_addr = gate_addr;
+ lp->mtu = TRANSPORT_TUNTAP_MTU;
lp->tp.info.tuntap.fd = -1;
@@ -351,13 +263,6 @@ static int tuntap_probe(struct iss_net_private *lp, int index, char *init)
lp->tp.protocol = tuntap_protocol;
lp->tp.poll = tuntap_poll;
- printk("TUN/TAP backend - ");
-#if 0
- if (lp->host.gate_addr != NULL)
- printk("IP = %s", lp->host.gate_addr);
-#endif
- printk("\n");
-
return 1;
}
@@ -376,7 +281,8 @@ static int iss_net_rx(struct net_device *dev)
/* Try to allocate memory, if it fails, try again next round. */
- if ((skb = dev_alloc_skb(dev->mtu + 2 + ETH_HEADER_OTHER)) == NULL) {
+ skb = dev_alloc_skb(dev->mtu + 2 + ETH_HEADER_OTHER);
+ if (skb == NULL) {
lp->stats.rx_dropped++;
return 0;
}
@@ -396,7 +302,6 @@ static int iss_net_rx(struct net_device *dev)
lp->stats.rx_bytes += skb->len;
lp->stats.rx_packets++;
- // netif_rx(skb);
netif_rx_ni(skb);
return pkt_len;
}
@@ -427,11 +332,11 @@ static int iss_net_poll(void)
spin_unlock(&lp->lock);
if (err < 0) {
- printk(KERN_ERR "Device '%s' read returned %d, "
- "shutting it down\n", lp->dev->name, err);
+ pr_err("Device '%s' read returned %d, shutting it down\n",
+ lp->dev->name, err);
dev_close(lp->dev);
} else {
- // FIXME reactivate_fd(lp->fd, ISS_ETH_IRQ);
+ /* FIXME reactivate_fd(lp->fd, ISS_ETH_IRQ); */
}
}
@@ -442,14 +347,11 @@ static int iss_net_poll(void)
static void iss_net_timer(unsigned long priv)
{
- struct iss_net_private* lp = (struct iss_net_private*) priv;
+ struct iss_net_private *lp = (struct iss_net_private *)priv;
spin_lock(&lp->lock);
-
iss_net_poll();
-
mod_timer(&lp->timer, jiffies + lp->timer_val);
-
spin_unlock(&lp->lock);
}
@@ -457,19 +359,14 @@ static void iss_net_timer(unsigned long priv)
static int iss_net_open(struct net_device *dev)
{
struct iss_net_private *lp = netdev_priv(dev);
- char addr[sizeof "255.255.255.255\0"];
int err;
spin_lock(&lp->lock);
- if ((err = lp->tp.open(lp)) < 0)
+ err = lp->tp.open(lp);
+ if (err < 0)
goto out;
- if (!lp->have_mac) {
- dev_ip_addr(dev, addr, &lp->mac[2]);
- set_ether_mac(dev, lp->mac);
- }
-
netif_start_queue(dev);
/* clear buffer - it can happen that the host side of the interface
@@ -497,7 +394,6 @@ out:
static int iss_net_close(struct net_device *dev)
{
struct iss_net_private *lp = netdev_priv(dev);
-printk("iss_net_close!\n");
netif_stop_queue(dev);
spin_lock(&lp->lock);
@@ -539,7 +435,7 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
netif_start_queue(dev);
- printk(KERN_ERR "iss_net_start_xmit: failed(%d)\n", len);
+ pr_err("%s: %s failed(%d)\n", dev->name, __func__, len);
}
spin_unlock_irqrestore(&lp->lock, flags);
@@ -557,56 +453,27 @@ static struct net_device_stats *iss_net_get_stats(struct net_device *dev)
static void iss_net_set_multicast_list(struct net_device *dev)
{
-#if 0
- if (dev->flags & IFF_PROMISC)
- return;
- else if (!netdev_mc_empty(dev))
- dev->flags |= IFF_ALLMULTI;
- else
- dev->flags &= ~IFF_ALLMULTI;
-#endif
}
static void iss_net_tx_timeout(struct net_device *dev)
{
-#if 0
- dev->trans_start = jiffies;
- netif_wake_queue(dev);
-#endif
}
static int iss_net_set_mac(struct net_device *dev, void *addr)
{
-#if 0
struct iss_net_private *lp = netdev_priv(dev);
struct sockaddr *hwaddr = addr;
+ if (!is_valid_ether_addr(hwaddr->sa_data))
+ return -EADDRNOTAVAIL;
spin_lock(&lp->lock);
memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN);
spin_unlock(&lp->lock);
-#endif
-
return 0;
}
static int iss_net_change_mtu(struct net_device *dev, int new_mtu)
{
-#if 0
- struct iss_net_private *lp = netdev_priv(dev);
- int err = 0;
-
- spin_lock(&lp->lock);
-
- // FIXME not needed new_mtu = transport_set_mtu(new_mtu, &lp->user);
-
- if (new_mtu < 0)
- err = new_mtu;
- else
- dev->mtu = new_mtu;
-
- spin_unlock(&lp->lock);
- return err;
-#endif
return -EINVAL;
}
@@ -631,7 +498,6 @@ static const struct net_device_ops iss_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = iss_net_change_mtu,
.ndo_set_mac_address = iss_net_set_mac,
- //.ndo_do_ioctl = iss_net_ioctl,
.ndo_tx_timeout = iss_net_tx_timeout,
.ndo_set_rx_mode = iss_net_set_multicast_list,
};
@@ -642,24 +508,29 @@ static int iss_net_configure(int index, char *init)
struct iss_net_private *lp;
int err;
- if ((dev = alloc_etherdev(sizeof *lp)) == NULL) {
- printk(KERN_ERR "eth_configure: failed to allocate device\n");
+ dev = alloc_etherdev(sizeof(*lp));
+ if (dev == NULL) {
+ pr_err("eth_configure: failed to allocate device\n");
return 1;
}
/* Initialize private element. */
lp = netdev_priv(dev);
- *lp = ((struct iss_net_private) {
+ *lp = (struct iss_net_private) {
.device_list = LIST_HEAD_INIT(lp->device_list),
.opened_list = LIST_HEAD_INIT(lp->opened_list),
.lock = __SPIN_LOCK_UNLOCKED(lp.lock),
.dev = dev,
.index = index,
- //.fd = -1,
- .mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0 },
- .have_mac = 0,
- });
+ };
+
+ /*
+ * If this name ends up conflicting with an existing registered
+ * netdevice, that is OK, register_netdev{,ice}() will notice this
+ * and fail.
+ */
+ snprintf(dev->name, sizeof(dev->name), "eth%d", index);
/*
* Try all transport protocols.
@@ -667,14 +538,12 @@ static int iss_net_configure(int index, char *init)
*/
if (!tuntap_probe(lp, index, init)) {
- printk("Invalid arguments. Skipping device!\n");
+ pr_err("%s: invalid arguments. Skipping device!\n",
+ dev->name);
goto errout;
}
- printk(KERN_INFO "Netdevice %d ", index);
- if (lp->have_mac)
- printk("(%pM) ", lp->mac);
- printk(": ");
+ pr_info("Netdevice %d (%pM)\n", index, dev->dev_addr);
/* sysfs register */
@@ -690,14 +559,7 @@ static int iss_net_configure(int index, char *init)
lp->pdev.id = index;
lp->pdev.name = DRIVER_NAME;
platform_device_register(&lp->pdev);
- SET_NETDEV_DEV(dev,&lp->pdev.dev);
-
- /*
- * If this name ends up conflicting with an existing registered
- * netdevice, that is OK, register_netdev{,ice}() will notice this
- * and fail.
- */
- snprintf(dev->name, sizeof dev->name, "eth%d", index);
+ SET_NETDEV_DEV(dev, &lp->pdev.dev);
dev->netdev_ops = &iss_netdev_ops;
dev->mtu = lp->mtu;
@@ -709,7 +571,7 @@ static int iss_net_configure(int index, char *init)
rtnl_unlock();
if (err) {
- printk("Error registering net device!\n");
+ pr_err("%s: error registering net device!\n", dev->name);
/* XXX: should we call ->remove() here? */
free_netdev(dev);
return 1;
@@ -718,16 +580,11 @@ static int iss_net_configure(int index, char *init)
init_timer(&lp->tl);
lp->tl.function = iss_net_user_timer_expire;
-#if 0
- if (lp->have_mac)
- set_ether_mac(dev, lp->mac);
-#endif
return 0;
errout:
- // FIXME: unregister; free, etc..
+ /* FIXME: unregister; free, etc.. */
return -EIO;
-
}
/* ------------------------------------------------------------------------- */
@@ -749,27 +606,28 @@ struct iss_net_init {
#define ERR KERN_ERR "iss_net_setup: "
-static int iss_net_setup(char *str)
+static int __init iss_net_setup(char *str)
{
struct iss_net_private *device = NULL;
struct iss_net_init *new;
struct list_head *ele;
char *end;
- int n;
+ int rc;
+ unsigned n;
- n = simple_strtoul(str, &end, 0);
- if (end == str) {
- printk(ERR "Failed to parse '%s'\n", str);
- return 1;
- }
- if (n < 0) {
- printk(ERR "Device %d is negative\n", n);
+ end = strchr(str, '=');
+ if (!end) {
+ printk(ERR "Expected '=' after device number\n");
return 1;
}
- if (*(str = end) != '=') {
- printk(ERR "Expected '=' after device number\n");
+ *end = 0;
+ rc = kstrtouint(str, 0, &n);
+ *end = '=';
+ if (rc < 0) {
+ printk(ERR "Failed to parse '%s'\n", str);
return 1;
}
+ str = end;
spin_lock(&devices_lock);
@@ -782,12 +640,13 @@ static int iss_net_setup(char *str)
spin_unlock(&devices_lock);
if (device && device->index == n) {
- printk(ERR "Device %d already configured\n", n);
+ printk(ERR "Device %u already configured\n", n);
return 1;
}
- if ((new = alloc_bootmem(sizeof new)) == NULL) {
- printk("Alloc_bootmem failed\n");
+ new = alloc_bootmem(sizeof(*new));
+ if (new == NULL) {
+ printk(ERR "Alloc_bootmem failed\n");
return 1;
}
@@ -801,7 +660,7 @@ static int iss_net_setup(char *str)
#undef ERR
-__setup("eth=", iss_net_setup);
+__setup("eth", iss_net_setup);
/*
* Initialize all ISS Ethernet devices previously registered in iss_net_setup.
diff --git a/arch/xtensa/platforms/iss/setup.c b/arch/xtensa/platforms/iss/setup.c
index f60c8cf6dfb..da7d1824086 100644
--- a/arch/xtensa/platforms/iss/setup.c
+++ b/arch/xtensa/platforms/iss/setup.c
@@ -40,14 +40,14 @@ void __init platform_init(bp_tag_t* bootparam)
void platform_halt(void)
{
- printk (" ** Called platform_halt(), looping forever! **\n");
- while (1);
+ pr_info(" ** Called platform_halt() **\n");
+ __asm__ __volatile__("movi a2, 1\nsimcall\n");
}
void platform_power_off(void)
{
- printk (" ** Called platform_power_off(), looping forever! **\n");
- while (1);
+ pr_info(" ** Called platform_power_off() **\n");
+ __asm__ __volatile__("movi a2, 1\nsimcall\n");
}
void platform_restart(void)
{
@@ -55,13 +55,15 @@ void platform_restart(void)
* jump to the reset vector. */
__asm__ __volatile__("movi a2, 15\n\t"
- "wsr a2, " __stringify(ICOUNTLEVEL) "\n\t"
+ "wsr a2, icountlevel\n\t"
"movi a2, 0\n\t"
- "wsr a2, " __stringify(ICOUNT) "\n\t"
- "wsr a2, " __stringify(IBREAKENABLE) "\n\t"
- "wsr a2, " __stringify(LCOUNT) "\n\t"
+ "wsr a2, icount\n\t"
+#if XCHAL_NUM_IBREAK > 0
+ "wsr a2, ibreakenable\n\t"
+#endif
+ "wsr a2, lcount\n\t"
"movi a2, 0x1f\n\t"
- "wsr a2, " __stringify(PS) "\n\t"
+ "wsr a2, ps\n\t"
"isync\n\t"
"jx %0\n\t"
:
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
new file mode 100644
index 00000000000..48eebacdf5f
--- /dev/null
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -0,0 +1,384 @@
+/*
+ * arch/xtensa/platforms/iss/simdisk.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001-2013 Tensilica Inc.
+ * Authors Victor Prupis
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/proc_fs.h>
+#include <asm/uaccess.h>
+#include <platform/simcall.h>
+
+#define SIMDISK_MAJOR 240
+#define SECTOR_SHIFT 9
+#define SIMDISK_MINORS 1
+#define MAX_SIMDISK_COUNT 10
+
+struct simdisk {
+ const char *filename;
+ spinlock_t lock;
+ struct request_queue *queue;
+ struct gendisk *gd;
+ struct proc_dir_entry *procfile;
+ int users;
+ unsigned long size;
+ int fd;
+};
+
+
+static int simdisk_count = CONFIG_BLK_DEV_SIMDISK_COUNT;
+module_param(simdisk_count, int, S_IRUGO);
+MODULE_PARM_DESC(simdisk_count, "Number of simdisk units.");
+
+static int n_files;
+static const char *filename[MAX_SIMDISK_COUNT] = {
+#ifdef CONFIG_SIMDISK0_FILENAME
+ CONFIG_SIMDISK0_FILENAME,
+#ifdef CONFIG_SIMDISK1_FILENAME
+ CONFIG_SIMDISK1_FILENAME,
+#endif
+#endif
+};
+
+static int simdisk_param_set_filename(const char *val,
+ const struct kernel_param *kp)
+{
+ if (n_files < ARRAY_SIZE(filename))
+ filename[n_files++] = val;
+ else
+ return -EINVAL;
+ return 0;
+}
+
+static const struct kernel_param_ops simdisk_param_ops_filename = {
+ .set = simdisk_param_set_filename,
+};
+module_param_cb(filename, &simdisk_param_ops_filename, &n_files, 0);
+MODULE_PARM_DESC(filename, "Backing storage filename.");
+
+static int simdisk_major = SIMDISK_MAJOR;
+
+static void simdisk_transfer(struct simdisk *dev, unsigned long sector,
+ unsigned long nsect, char *buffer, int write)
+{
+ unsigned long offset = sector << SECTOR_SHIFT;
+ unsigned long nbytes = nsect << SECTOR_SHIFT;
+
+ if (offset > dev->size || dev->size - offset < nbytes) {
+ pr_notice("Beyond-end %s (%ld %ld)\n",
+ write ? "write" : "read", offset, nbytes);
+ return;
+ }
+
+ spin_lock(&dev->lock);
+ while (nbytes > 0) {
+ unsigned long io;
+
+ simc_lseek(dev->fd, offset, SEEK_SET);
+ if (write)
+ io = simc_write(dev->fd, buffer, nbytes);
+ else
+ io = simc_read(dev->fd, buffer, nbytes);
+ if (io == -1) {
+ pr_err("SIMDISK: IO error %d\n", errno);
+ break;
+ }
+ buffer += io;
+ offset += io;
+ nbytes -= io;
+ }
+ spin_unlock(&dev->lock);
+}
+
+static int simdisk_xfer_bio(struct simdisk *dev, struct bio *bio)
+{
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+ sector_t sector = bio->bi_iter.bi_sector;
+
+ bio_for_each_segment(bvec, bio, iter) {
+ char *buffer = __bio_kmap_atomic(bio, iter);
+ unsigned len = bvec.bv_len >> SECTOR_SHIFT;
+
+ simdisk_transfer(dev, sector, len, buffer,
+ bio_data_dir(bio) == WRITE);
+ sector += len;
+ __bio_kunmap_atomic(buffer);
+ }
+ return 0;
+}
+
+static void simdisk_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct simdisk *dev = q->queuedata;
+ int status = simdisk_xfer_bio(dev, bio);
+ bio_endio(bio, status);
+}
+
+
+static int simdisk_open(struct block_device *bdev, fmode_t mode)
+{
+ struct simdisk *dev = bdev->bd_disk->private_data;
+
+ spin_lock(&dev->lock);
+ if (!dev->users)
+ check_disk_change(bdev);
+ ++dev->users;
+ spin_unlock(&dev->lock);
+ return 0;
+}
+
+static void simdisk_release(struct gendisk *disk, fmode_t mode)
+{
+ struct simdisk *dev = disk->private_data;
+ spin_lock(&dev->lock);
+ --dev->users;
+ spin_unlock(&dev->lock);
+}
+
+static const struct block_device_operations simdisk_ops = {
+ .owner = THIS_MODULE,
+ .open = simdisk_open,
+ .release = simdisk_release,
+};
+
+static struct simdisk *sddev;
+static struct proc_dir_entry *simdisk_procdir;
+
+static int simdisk_attach(struct simdisk *dev, const char *filename)
+{
+ int err = 0;
+
+ filename = kstrdup(filename, GFP_KERNEL);
+ if (filename == NULL)
+ return -ENOMEM;
+
+ spin_lock(&dev->lock);
+
+ if (dev->fd != -1) {
+ err = -EBUSY;
+ goto out;
+ }
+ dev->fd = simc_open(filename, O_RDWR, 0);
+ if (dev->fd == -1) {
+ pr_err("SIMDISK: Can't open %s: %d\n", filename, errno);
+ err = -ENODEV;
+ goto out;
+ }
+ dev->size = simc_lseek(dev->fd, 0, SEEK_END);
+ set_capacity(dev->gd, dev->size >> SECTOR_SHIFT);
+ dev->filename = filename;
+ pr_info("SIMDISK: %s=%s\n", dev->gd->disk_name, dev->filename);
+out:
+ if (err)
+ kfree(filename);
+ spin_unlock(&dev->lock);
+
+ return err;
+}
+
+static int simdisk_detach(struct simdisk *dev)
+{
+ int err = 0;
+
+ spin_lock(&dev->lock);
+
+ if (dev->users != 0) {
+ err = -EBUSY;
+ } else if (dev->fd != -1) {
+ if (simc_close(dev->fd)) {
+ pr_err("SIMDISK: error closing %s: %d\n",
+ dev->filename, errno);
+ err = -EIO;
+ } else {
+ pr_info("SIMDISK: %s detached from %s\n",
+ dev->gd->disk_name, dev->filename);
+ dev->fd = -1;
+ kfree(dev->filename);
+ dev->filename = NULL;
+ }
+ }
+ spin_unlock(&dev->lock);
+ return err;
+}
+
+static ssize_t proc_read_simdisk(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct simdisk *dev = PDE_DATA(file_inode(file));
+ const char *s = dev->filename;
+ if (s) {
+ ssize_t n = simple_read_from_buffer(buf, size, ppos,
+ s, strlen(s));
+ if (n < 0)
+ return n;
+ buf += n;
+ size -= n;
+ }
+ return simple_read_from_buffer(buf, size, ppos, "\n", 1);
+}
+
+static ssize_t proc_write_simdisk(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *tmp = kmalloc(count + 1, GFP_KERNEL);
+ struct simdisk *dev = PDE_DATA(file_inode(file));
+ int err;
+
+ if (tmp == NULL)
+ return -ENOMEM;
+ if (copy_from_user(tmp, buf, count)) {
+ err = -EFAULT;
+ goto out_free;
+ }
+
+ err = simdisk_detach(dev);
+ if (err != 0)
+ goto out_free;
+
+ if (count > 0 && tmp[count - 1] == '\n')
+ tmp[count - 1] = 0;
+ else
+ tmp[count] = 0;
+
+ if (tmp[0])
+ err = simdisk_attach(dev, tmp);
+
+ if (err == 0)
+ err = count;
+out_free:
+ kfree(tmp);
+ return err;
+}
+
+static const struct file_operations fops = {
+ .read = proc_read_simdisk,
+ .write = proc_write_simdisk,
+ .llseek = default_llseek,
+};
+
+static int __init simdisk_setup(struct simdisk *dev, int which,
+ struct proc_dir_entry *procdir)
+{
+ char tmp[2] = { '0' + which, 0 };
+
+ dev->fd = -1;
+ dev->filename = NULL;
+ spin_lock_init(&dev->lock);
+ dev->users = 0;
+
+ dev->queue = blk_alloc_queue(GFP_KERNEL);
+ if (dev->queue == NULL) {
+ pr_err("blk_alloc_queue failed\n");
+ goto out_alloc_queue;
+ }
+
+ blk_queue_make_request(dev->queue, simdisk_make_request);
+ dev->queue->queuedata = dev;
+
+ dev->gd = alloc_disk(SIMDISK_MINORS);
+ if (dev->gd == NULL) {
+ pr_err("alloc_disk failed\n");
+ goto out_alloc_disk;
+ }
+ dev->gd->major = simdisk_major;
+ dev->gd->first_minor = which;
+ dev->gd->fops = &simdisk_ops;
+ dev->gd->queue = dev->queue;
+ dev->gd->private_data = dev;
+ snprintf(dev->gd->disk_name, 32, "simdisk%d", which);
+ set_capacity(dev->gd, 0);
+ add_disk(dev->gd);
+
+ dev->procfile = proc_create_data(tmp, 0644, procdir, &fops, dev);
+ return 0;
+
+out_alloc_disk:
+ blk_cleanup_queue(dev->queue);
+ dev->queue = NULL;
+out_alloc_queue:
+ simc_close(dev->fd);
+ return -EIO;
+}
+
+static int __init simdisk_init(void)
+{
+ int i;
+
+ if (register_blkdev(simdisk_major, "simdisk") < 0) {
+ pr_err("SIMDISK: register_blkdev: %d\n", simdisk_major);
+ return -EIO;
+ }
+ pr_info("SIMDISK: major: %d\n", simdisk_major);
+
+ if (n_files > simdisk_count)
+ simdisk_count = n_files;
+ if (simdisk_count > MAX_SIMDISK_COUNT)
+ simdisk_count = MAX_SIMDISK_COUNT;
+
+ sddev = kmalloc(simdisk_count * sizeof(struct simdisk),
+ GFP_KERNEL);
+ if (sddev == NULL)
+ goto out_unregister;
+
+ simdisk_procdir = proc_mkdir("simdisk", 0);
+ if (simdisk_procdir == NULL)
+ goto out_free_unregister;
+
+ for (i = 0; i < simdisk_count; ++i) {
+ if (simdisk_setup(sddev + i, i, simdisk_procdir) == 0) {
+ if (filename[i] != NULL && filename[i][0] != 0 &&
+ (n_files == 0 || i < n_files))
+ simdisk_attach(sddev + i, filename[i]);
+ }
+ }
+
+ return 0;
+
+out_free_unregister:
+ kfree(sddev);
+out_unregister:
+ unregister_blkdev(simdisk_major, "simdisk");
+ return -ENOMEM;
+}
+module_init(simdisk_init);
+
+static void simdisk_teardown(struct simdisk *dev, int which,
+ struct proc_dir_entry *procdir)
+{
+ char tmp[2] = { '0' + which, 0 };
+
+ simdisk_detach(dev);
+ if (dev->gd)
+ del_gendisk(dev->gd);
+ if (dev->queue)
+ blk_cleanup_queue(dev->queue);
+ remove_proc_entry(tmp, procdir);
+}
+
+static void __exit simdisk_exit(void)
+{
+ int i;
+
+ for (i = 0; i < simdisk_count; ++i)
+ simdisk_teardown(sddev + i, i, simdisk_procdir);
+ remove_proc_entry("simdisk", 0);
+ kfree(sddev);
+ unregister_blkdev(simdisk_major, "simdisk");
+}
+module_exit(simdisk_exit);
+
+MODULE_ALIAS_BLOCKDEV_MAJOR(SIMDISK_MAJOR);
+
+MODULE_LICENSE("GPL");
diff --git a/arch/xtensa/platforms/xt2000/setup.c b/arch/xtensa/platforms/xt2000/setup.c
index 9e83940ac26..b90555cb808 100644
--- a/arch/xtensa/platforms/xt2000/setup.c
+++ b/arch/xtensa/platforms/xt2000/setup.c
@@ -66,13 +66,15 @@ void platform_restart(void)
* jump to the reset vector. */
__asm__ __volatile__ ("movi a2, 15\n\t"
- "wsr a2, " __stringify(ICOUNTLEVEL) "\n\t"
+ "wsr a2, icountlevel\n\t"
"movi a2, 0\n\t"
- "wsr a2, " __stringify(ICOUNT) "\n\t"
- "wsr a2, " __stringify(IBREAKENABLE) "\n\t"
- "wsr a2, " __stringify(LCOUNT) "\n\t"
+ "wsr a2, icount\n\t"
+#if XCHAL_NUM_IBREAK > 0
+ "wsr a2, ibreakenable\n\t"
+#endif
+ "wsr a2, lcount\n\t"
"movi a2, 0x1f\n\t"
- "wsr a2, " __stringify(PS) "\n\t"
+ "wsr a2, ps\n\t"
"isync\n\t"
"jx %0\n\t"
:
@@ -90,18 +92,8 @@ void __init platform_setup(char** cmdline)
/* early initialization */
-extern sysmem_info_t __initdata sysmem;
-
-void platform_init(bp_tag_t* first)
+void __init platform_init(bp_tag_t *first)
{
- /* Set default memory block if not provided by the bootloader. */
-
- if (sysmem.nr_banks == 0) {
- sysmem.nr_banks = 1;
- sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START;
- sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START
- + PLATFORM_DEFAULT_MEM_SIZE;
- }
}
/* Heartbeat. Let the LED blink. */
diff --git a/arch/xtensa/platforms/xtfpga/Makefile b/arch/xtensa/platforms/xtfpga/Makefile
new file mode 100644
index 00000000000..b9ae206340c
--- /dev/null
+++ b/arch/xtensa/platforms/xtfpga/Makefile
@@ -0,0 +1,9 @@
+# Makefile for the Tensilica xtavnet Emulation Board
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definitions are in the main makefile...
+
+obj-y = setup.o lcd.o
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
new file mode 100644
index 00000000000..aeb316b7ff8
--- /dev/null
+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
@@ -0,0 +1,65 @@
+/*
+ * arch/xtensa/platform/xtavnet/include/platform/hardware.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Tensilica Inc.
+ */
+
+/*
+ * This file contains the hardware configuration of the XTAVNET boards.
+ */
+
+#ifndef __XTENSA_XTAVNET_HARDWARE_H
+#define __XTENSA_XTAVNET_HARDWARE_H
+
+/* Memory configuration. */
+
+#define PLATFORM_DEFAULT_MEM_START 0x00000000
+#define PLATFORM_DEFAULT_MEM_SIZE 0x04000000
+
+/* Interrupt configuration. */
+
+#define PLATFORM_NR_IRQS 10
+
+/* Default assignment of LX60 devices to external interrupts. */
+
+#ifdef CONFIG_XTENSA_MX
+#define DUART16552_INTNUM XCHAL_EXTINT3_NUM
+#define OETH_IRQ XCHAL_EXTINT4_NUM
+#else
+#define DUART16552_INTNUM XCHAL_EXTINT0_NUM
+#define OETH_IRQ XCHAL_EXTINT1_NUM
+#endif
+
+/*
+ * Device addresses and parameters.
+ */
+
+/* UART */
+#define DUART16552_PADDR (XCHAL_KIO_PADDR + 0x0D050020)
+/* LCD instruction and data addresses. */
+#define LCD_INSTR_ADDR ((char *)IOADDR(0x0D040000))
+#define LCD_DATA_ADDR ((char *)IOADDR(0x0D040004))
+
+/* Misc. */
+#define XTFPGA_FPGAREGS_VADDR IOADDR(0x0D020000)
+/* Clock frequency in Hz (read-only): */
+#define XTFPGA_CLKFRQ_VADDR (XTFPGA_FPGAREGS_VADDR + 0x04)
+/* Setting of 8 DIP switches: */
+#define DIP_SWITCHES_VADDR (XTFPGA_FPGAREGS_VADDR + 0x0C)
+/* Software reset (write 0xdead): */
+#define XTFPGA_SWRST_VADDR (XTFPGA_FPGAREGS_VADDR + 0x10)
+
+/* OpenCores Ethernet controller: */
+ /* regs + RX/TX descriptors */
+#define OETH_REGS_PADDR (XCHAL_KIO_PADDR + 0x0D030000)
+#define OETH_REGS_SIZE 0x1000
+#define OETH_SRAMBUFF_PADDR (XCHAL_KIO_PADDR + 0x0D800000)
+
+ /* 5*rx buffs + 5*tx buffs */
+#define OETH_SRAMBUFF_SIZE (5 * 0x600 + 5 * 0x600)
+
+#endif /* __XTENSA_XTAVNET_HARDWARE_H */
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
new file mode 100644
index 00000000000..0e435645af5
--- /dev/null
+++ b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
@@ -0,0 +1,20 @@
+/*
+ * arch/xtensa/platform/xtavnet/include/platform/lcd.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001, 2006 Tensilica Inc.
+ */
+
+#ifndef __XTENSA_XTAVNET_LCD_H
+#define __XTENSA_XTAVNET_LCD_H
+
+/* Display string STR at position POS on the LCD. */
+void lcd_disp_at_pos(char *str, unsigned char pos);
+
+/* Shift the contents of the LCD display left or right. */
+void lcd_shiftleft(void);
+void lcd_shiftright(void);
+#endif
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/serial.h b/arch/xtensa/platforms/xtfpga/include/platform/serial.h
new file mode 100644
index 00000000000..14d8f7beebf
--- /dev/null
+++ b/arch/xtensa/platforms/xtfpga/include/platform/serial.h
@@ -0,0 +1,18 @@
+/*
+ * arch/xtensa/platform/xtavnet/include/platform/serial.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001, 2006 Tensilica Inc.
+ */
+
+#ifndef __ASM_XTENSA_XTAVNET_SERIAL_H
+#define __ASM_XTENSA_XTAVNET_SERIAL_H
+
+#include <platform/hardware.h>
+
+#define BASE_BAUD (*(long *)XTFPGA_CLKFRQ_VADDR / 16)
+
+#endif /* __ASM_XTENSA_XTAVNET_SERIAL_H */
diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c
new file mode 100644
index 00000000000..2872301598d
--- /dev/null
+++ b/arch/xtensa/platforms/xtfpga/lcd.c
@@ -0,0 +1,76 @@
+/*
+ * Driver for the LCD display on the Tensilica LX60 Board.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001, 2006 Tensilica Inc.
+ */
+
+/*
+ *
+ * FIXME: this code is from the examples from the LX60 user guide.
+ *
+ * The lcd_pause function does busy waiting, which is probably not
+ * great. Maybe the code could be changed to use kernel timers, or
+ * change the hardware to not need to wait.
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include <platform/hardware.h>
+#include <platform/lcd.h>
+#include <linux/delay.h>
+
+#define LCD_PAUSE_ITERATIONS 4000
+#define LCD_CLEAR 0x1
+#define LCD_DISPLAY_ON 0xc
+
+/* 8bit and 2 lines display */
+#define LCD_DISPLAY_MODE8BIT 0x38
+#define LCD_DISPLAY_POS 0x80
+#define LCD_SHIFT_LEFT 0x18
+#define LCD_SHIFT_RIGHT 0x1c
+
+static int __init lcd_init(void)
+{
+ *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
+ mdelay(5);
+ *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
+ udelay(200);
+ *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
+ udelay(50);
+ *LCD_INSTR_ADDR = LCD_DISPLAY_ON;
+ udelay(50);
+ *LCD_INSTR_ADDR = LCD_CLEAR;
+ mdelay(10);
+ lcd_disp_at_pos("XTENSA LINUX", 0);
+ return 0;
+}
+
+void lcd_disp_at_pos(char *str, unsigned char pos)
+{
+ *LCD_INSTR_ADDR = LCD_DISPLAY_POS | pos;
+ udelay(100);
+ while (*str != 0) {
+ *LCD_DATA_ADDR = *str;
+ udelay(200);
+ str++;
+ }
+}
+
+void lcd_shiftleft(void)
+{
+ *LCD_INSTR_ADDR = LCD_SHIFT_LEFT;
+ udelay(50);
+}
+
+void lcd_shiftright(void)
+{
+ *LCD_INSTR_ADDR = LCD_SHIFT_RIGHT;
+ udelay(50);
+}
+
+arch_initcall(lcd_init);
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
new file mode 100644
index 00000000000..57fd08b36f5
--- /dev/null
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -0,0 +1,303 @@
+/*
+ *
+ * arch/xtensa/platform/xtavnet/setup.c
+ *
+ * ...
+ *
+ * Authors: Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com>
+ *
+ * Copyright 2001 - 2006 Tensilica Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/kdev_t.h>
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+
+#include <asm/timex.h>
+#include <asm/processor.h>
+#include <asm/platform.h>
+#include <asm/bootparam.h>
+#include <platform/lcd.h>
+#include <platform/hardware.h>
+
+void platform_halt(void)
+{
+ lcd_disp_at_pos(" HALT ", 0);
+ local_irq_disable();
+ while (1)
+ cpu_relax();
+}
+
+void platform_power_off(void)
+{
+ lcd_disp_at_pos("POWEROFF", 0);
+ local_irq_disable();
+ while (1)
+ cpu_relax();
+}
+
+void platform_restart(void)
+{
+ /* Flush and reset the mmu, simulate a processor reset, and
+ * jump to the reset vector. */
+
+
+ __asm__ __volatile__ ("movi a2, 15\n\t"
+ "wsr a2, icountlevel\n\t"
+ "movi a2, 0\n\t"
+ "wsr a2, icount\n\t"
+#if XCHAL_NUM_IBREAK > 0
+ "wsr a2, ibreakenable\n\t"
+#endif
+ "wsr a2, lcount\n\t"
+ "movi a2, 0x1f\n\t"
+ "wsr a2, ps\n\t"
+ "isync\n\t"
+ "jx %0\n\t"
+ :
+ : "a" (XCHAL_RESET_VECTOR_VADDR)
+ : "a2"
+ );
+
+ /* control never gets here */
+}
+
+void __init platform_setup(char **cmdline)
+{
+}
+
+#ifdef CONFIG_OF
+
+static void __init update_clock_frequency(struct device_node *node)
+{
+ struct property *newfreq;
+ u32 freq;
+
+ if (!of_property_read_u32(node, "clock-frequency", &freq) && freq != 0)
+ return;
+
+ newfreq = kzalloc(sizeof(*newfreq) + sizeof(u32), GFP_KERNEL);
+ if (!newfreq)
+ return;
+ newfreq->value = newfreq + 1;
+ newfreq->length = sizeof(freq);
+ newfreq->name = kstrdup("clock-frequency", GFP_KERNEL);
+ if (!newfreq->name) {
+ kfree(newfreq);
+ return;
+ }
+
+ *(u32 *)newfreq->value = cpu_to_be32(*(u32 *)XTFPGA_CLKFRQ_VADDR);
+ of_update_property(node, newfreq);
+}
+
+#define MAC_LEN 6
+static void __init update_local_mac(struct device_node *node)
+{
+ struct property *newmac;
+ const u8* macaddr;
+ int prop_len;
+
+ macaddr = of_get_property(node, "local-mac-address", &prop_len);
+ if (macaddr == NULL || prop_len != MAC_LEN)
+ return;
+
+ newmac = kzalloc(sizeof(*newmac) + MAC_LEN, GFP_KERNEL);
+ if (newmac == NULL)
+ return;
+
+ newmac->value = newmac + 1;
+ newmac->length = MAC_LEN;
+ newmac->name = kstrdup("local-mac-address", GFP_KERNEL);
+ if (newmac->name == NULL) {
+ kfree(newmac);
+ return;
+ }
+
+ memcpy(newmac->value, macaddr, MAC_LEN);
+ ((u8*)newmac->value)[5] = (*(u32*)DIP_SWITCHES_VADDR) & 0x3f;
+ of_update_property(node, newmac);
+}
+
+static int __init machine_setup(void)
+{
+ struct device_node *clock;
+ struct device_node *eth = NULL;
+
+ for_each_node_by_name(clock, "main-oscillator")
+ update_clock_frequency(clock);
+
+ if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc")))
+ update_local_mac(eth);
+ return 0;
+}
+arch_initcall(machine_setup);
+
+#endif
+
+/* early initialization */
+
+void __init platform_init(bp_tag_t *first)
+{
+}
+
+/* Heartbeat. */
+
+void platform_heartbeat(void)
+{
+}
+
+#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
+
+void __init platform_calibrate_ccount(void)
+{
+ long clk_freq = 0;
+#ifdef CONFIG_OF
+ struct device_node *cpu =
+ of_find_compatible_node(NULL, NULL, "cdns,xtensa-cpu");
+ if (cpu) {
+ u32 freq;
+ update_clock_frequency(cpu);
+ if (!of_property_read_u32(cpu, "clock-frequency", &freq))
+ clk_freq = freq;
+ }
+#endif
+ if (!clk_freq)
+ clk_freq = *(long *)XTFPGA_CLKFRQ_VADDR;
+
+ ccount_freq = clk_freq;
+}
+
+#endif
+
+#ifndef CONFIG_OF
+
+#include <linux/serial_8250.h>
+#include <linux/if.h>
+#include <net/ethoc.h>
+
+/*----------------------------------------------------------------------------
+ * Ethernet -- OpenCores Ethernet MAC (ethoc driver)
+ */
+
+static struct resource ethoc_res[] = {
+ [0] = { /* register space */
+ .start = OETH_REGS_PADDR,
+ .end = OETH_REGS_PADDR + OETH_REGS_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = { /* buffer space */
+ .start = OETH_SRAMBUFF_PADDR,
+ .end = OETH_SRAMBUFF_PADDR + OETH_SRAMBUFF_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = { /* IRQ number */
+ .start = OETH_IRQ,
+ .end = OETH_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct ethoc_platform_data ethoc_pdata = {
+ /*
+ * The MAC address for these boards is 00:50:c2:13:6f:xx.
+ * The last byte (here as zero) is read from the DIP switches on the
+ * board.
+ */
+ .hwaddr = { 0x00, 0x50, 0xc2, 0x13, 0x6f, 0 },
+ .phy_id = -1,
+};
+
+static struct platform_device ethoc_device = {
+ .name = "ethoc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(ethoc_res),
+ .resource = ethoc_res,
+ .dev = {
+ .platform_data = &ethoc_pdata,
+ },
+};
+
+/*----------------------------------------------------------------------------
+ * UART
+ */
+
+static struct resource serial_resource = {
+ .start = DUART16552_PADDR,
+ .end = DUART16552_PADDR + 0x1f,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct plat_serial8250_port serial_platform_data[] = {
+ [0] = {
+ .mapbase = DUART16552_PADDR,
+ .irq = DUART16552_INTNUM,
+ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
+ UPF_IOREMAP,
+ .iotype = UPIO_MEM32,
+ .regshift = 2,
+ .uartclk = 0, /* set in xtavnet_init() */
+ },
+ { },
+};
+
+static struct platform_device xtavnet_uart = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = serial_platform_data,
+ },
+ .num_resources = 1,
+ .resource = &serial_resource,
+};
+
+/* platform devices */
+static struct platform_device *platform_devices[] __initdata = {
+ &ethoc_device,
+ &xtavnet_uart,
+};
+
+
+static int __init xtavnet_init(void)
+{
+ /* Ethernet MAC address. */
+ ethoc_pdata.hwaddr[5] = *(u32 *)DIP_SWITCHES_VADDR;
+
+ /* Clock rate varies among FPGA bitstreams; board specific FPGA register
+ * reports the actual clock rate.
+ */
+ serial_platform_data[0].uartclk = *(long *)XTFPGA_CLKFRQ_VADDR;
+
+
+ /* register platform devices */
+ platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
+
+ /* ETHOC driver is a bit quiet; at least display Ethernet MAC, so user
+ * knows whether they set it correctly on the DIP switches.
+ */
+ pr_info("XTFPGA: Ethernet MAC %pM\n", ethoc_pdata.hwaddr);
+ ethoc_pdata.eth_clkfreq = *(long *)XTFPGA_CLKFRQ_VADDR;
+
+ return 0;
+}
+
+/*
+ * Register to be done during do_initcalls().
+ */
+arch_initcall(xtavnet_init);
+
+#endif /* CONFIG_OF */
diff --git a/arch/xtensa/variants/dc233c/include/variant/core.h b/arch/xtensa/variants/dc233c/include/variant/core.h
new file mode 100644
index 00000000000..3a2e53b9493
--- /dev/null
+++ b/arch/xtensa/variants/dc233c/include/variant/core.h
@@ -0,0 +1,475 @@
+/*
+ * xtensa/config/core-isa.h -- HAL definitions that are dependent on Xtensa
+ * processor CORE configuration
+ *
+ * See <xtensa/config/core.h>, which includes this file, for more details.
+ */
+
+/* Xtensa processor core configuration information.
+
+ Copyright (c) 1999-2010 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_CONFIGURATION_H
+#define _XTENSA_CORE_CONFIGURATION_H
+
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
+#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU instructions */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_EXTERN_REGS 1 /* WER/RER instructions */
+#define XCHAL_HAVE_MP_INTERRUPTS 0 /* interrupt distributor port */
+#define XCHAL_HAVE_MP_RUNSTALL 0 /* core RunStall control port */
+#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */
+#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 8 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 1 /* MAC16 package */
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* floating point pkg */
+#define XCHAL_HAVE_DFP 0 /* double precision FP pkg */
+#define XCHAL_HAVE_DFP_accel 0 /* double precision FP acceleration pkg */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_HIFIPRO 0 /* HiFiPro Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2EP 0 /* HiFi2EP */
+#define XCHAL_HAVE_CONNXD2 0 /* ConnX D2 pkg */
+#define XCHAL_HAVE_BBE16 0 /* ConnX BBE16 pkg */
+#define XCHAL_HAVE_BBE16_RSQRT 0 /* BBE16 & vector recip sqrt */
+#define XCHAL_HAVE_BBE16_VECDIV 0 /* BBE16 & vector divide */
+#define XCHAL_HAVE_BBE16_DESPREAD 0 /* BBE16 & despread */
+#define XCHAL_HAVE_BSP3 0 /* ConnX BSP3 pkg */
+#define XCHAL_HAVE_SSP16 0 /* ConnX SSP16 pkg */
+#define XCHAL_HAVE_SSP16_VITERBI 0 /* SSP16 & viterbi */
+#define XCHAL_HAVE_TURBO16 0 /* ConnX Turbo16 pkg */
+#define XCHAL_HAVE_BBP16 0 /* ConnX BBP16 pkg */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 4 /* data width in bytes */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+#define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */
+#define XCHAL_UNALIGNED_STORE_HW 0 /* unaligned stores work in hw*/
+
+#define XCHAL_SW_VERSION 900001 /* sw version of this header */
+
+#define XCHAL_CORE_ID "dc233c" /* alphanum core name
+ (CoreID) set in the Xtensa
+ Processor Generator */
+
+#define XCHAL_CORE_DESCRIPTION "dc233c"
+#define XCHAL_BUILD_UNIQUE_ID 0x00004B21 /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC56707FE /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x14404B21 /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX4.0.1" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2400 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 1 /* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION 240001 /* major*100+minor */
+#define XCHAL_HW_REL_LX4 1
+#define XCHAL_HW_REL_LX4_0 1
+#define XCHAL_HW_REL_LX4_0_1 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2400 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 1 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION 240001 /* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2400 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 1 /* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION 240001 /* latest targeted hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
+#define XCHAL_DCACHE_IS_COHERENT 0 /* MP coherence feature */
+
+#define XCHAL_HAVE_PREFETCH 0 /* PREFCTL register */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 7
+#define XCHAL_DCACHE_SETWIDTH 7
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 4
+#define XCHAL_DCACHE_WAYS 4
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 1
+#define XCHAL_DCACHE_LINE_LOCKABLE 1
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Cache access size in bytes (affects operation of SICW instruction): */
+#define XCHAL_ICACHE_ACCESS_SIZE 4
+#define XCHAL_DCACHE_ACCESS_SIZE 4
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+
+#define XCHAL_HAVE_IMEM_LOADSTORE 1 /* can load/store to IROM/IRAM*/
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 17 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels
+ (not including level zero) */
+#define XCHAL_EXCM_LEVEL 3 /* level masked by PS.EXCM */
+ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_MASK 0x00000100
+#define XCHAL_INTLEVEL3_MASK 0x00200E00
+#define XCHAL_INTLEVEL4_MASK 0x00001000
+#define XCHAL_INTLEVEL5_MASK 0x00002000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00004000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F81FF
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F8FFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F9FFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 1
+#define XCHAL_INT2_LEVEL 1
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 1
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 3
+#define XCHAL_INT11_LEVEL 3
+#define XCHAL_INT12_LEVEL 4
+#define XCHAL_INT13_LEVEL 5
+#define XCHAL_INT14_LEVEL 7
+#define XCHAL_INT15_LEVEL 1
+#define XCHAL_INT16_LEVEL 1
+#define XCHAL_INT17_LEVEL 1
+#define XCHAL_INT18_LEVEL 1
+#define XCHAL_INT19_LEVEL 1
+#define XCHAL_INT20_LEVEL 1
+#define XCHAL_INT21_LEVEL 3
+#define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
+#define XCHAL_NMILEVEL 7 /* NMI "level" (for use with
+ EXCSAVE/EPS/EPC_n, RFI n) */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F8000
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000133F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00002440
+#define XCHAL_INTTYPE_MASK_NMI 0x00004000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+#define XCHAL_INTLEVEL2_NUM 8
+#define XCHAL_INTLEVEL4_NUM 12
+#define XCHAL_INTLEVEL5_NUM 13
+#define XCHAL_INTLEVEL7_NUM 14
+/* (There are many interrupts each at level(s) 1, 3.) */
+
+
+/*
+ * External interrupt vectors/levels.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */
+#define XCHAL_EXTINT8_NUM 12 /* (intlevel 4) */
+#define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */
+#define XCHAL_EXTINT10_NUM 15 /* (intlevel 1) */
+#define XCHAL_EXTINT11_NUM 16 /* (intlevel 1) */
+#define XCHAL_EXTINT12_NUM 17 /* (intlevel 1) */
+#define XCHAL_EXTINT13_NUM 18 /* (intlevel 1) */
+#define XCHAL_EXTINT14_NUM 19 /* (intlevel 1) */
+#define XCHAL_EXTINT15_NUM 20 /* (intlevel 1) */
+#define XCHAL_EXTINT16_NUM 21 /* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+ number: 1 == XEA1 (old)
+ 2 == XEA2 (new)
+ 0 == XEAX (extern) or TX */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_HALT 0 /* halt architecture option */
+#define XCHAL_HAVE_BOOTLOADER 0 /* boot loader (for TX) */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
+#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR 0x00002000 /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR 0x00002000
+#define XCHAL_RESET_VECBASE_OVERLAP 0
+
+#define XCHAL_RESET_VECTOR0_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR0_PADDR 0xFE000000
+#define XCHAL_RESET_VECTOR1_VADDR 0x00001000
+#define XCHAL_RESET_VECTOR1_PADDR 0x00001000
+#define XCHAL_RESET_VECTOR_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR_PADDR 0xFE000000
+#define XCHAL_USER_VECOFS 0x00000340
+#define XCHAL_USER_VECTOR_VADDR 0x00002340
+#define XCHAL_USER_VECTOR_PADDR 0x00002340
+#define XCHAL_KERNEL_VECOFS 0x00000300
+#define XCHAL_KERNEL_VECTOR_VADDR 0x00002300
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00002300
+#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x000023C0
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x000023C0
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR 0x00002000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00002000
+#define XCHAL_INTLEVEL2_VECOFS 0x00000180
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0x00002180
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00002180
+#define XCHAL_INTLEVEL3_VECOFS 0x000001C0
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0x000021C0
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x000021C0
+#define XCHAL_INTLEVEL4_VECOFS 0x00000200
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0x00002200
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x00002200
+#define XCHAL_INTLEVEL5_VECOFS 0x00000240
+#define XCHAL_INTLEVEL5_VECTOR_VADDR 0x00002240
+#define XCHAL_INTLEVEL5_VECTOR_PADDR 0x00002240
+#define XCHAL_INTLEVEL6_VECOFS 0x00000280
+#define XCHAL_INTLEVEL6_VECTOR_VADDR 0x00002280
+#define XCHAL_INTLEVEL6_VECTOR_PADDR 0x00002280
+#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR
+#define XCHAL_NMI_VECOFS 0x000002C0
+#define XCHAL_NMI_VECTOR_VADDR 0x000022C0
+#define XCHAL_NMI_VECTOR_PADDR 0x000022C0
+#define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS
+#define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 1 /* faster OCD option */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See core-matmap.h header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */
+#define XCHAL_SPANNING_WAY 6 /* TLB spanning way number */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
+ [autorefill] and protection)
+ usable for an MMU-based OS */
+/* If none of the above last 4 are set, it's a custom TLB configuration. */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* _XTENSA_CORE_CONFIGURATION_H */
+
diff --git a/arch/xtensa/variants/dc233c/include/variant/tie-asm.h b/arch/xtensa/variants/dc233c/include/variant/tie-asm.h
new file mode 100644
index 00000000000..5dbd981ea42
--- /dev/null
+++ b/arch/xtensa/variants/dc233c/include/variant/tie-asm.h
@@ -0,0 +1,193 @@
+/*
+ * tie-asm.h -- compile-time HAL assembler definitions dependent on CORE & TIE
+ *
+ * NOTE: This header file is not meant to be included directly.
+ */
+
+/* This header file contains assembly-language definitions (assembly
+ macros, etc.) for this specific Xtensa processor's TIE extensions
+ and options. It is customized to this Xtensa processor configuration.
+
+ Copyright (c) 1999-2010 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_TIE_ASM_H
+#define _XTENSA_CORE_TIE_ASM_H
+
+/* Selection parameter values for save-area save/restore macros: */
+/* Option vs. TIE: */
+#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */
+#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */
+#define XTHAL_SAS_ANYOT 0x0003 /* both of the above */
+/* Whether used automatically by compiler: */
+#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */
+#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */
+#define XTHAL_SAS_ANYCC 0x000C /* both of the above */
+/* ABI handling across function calls: */
+#define XTHAL_SAS_CALR 0x0010 /* caller-saved */
+#define XTHAL_SAS_CALE 0x0020 /* callee-saved */
+#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */
+#define XTHAL_SAS_ANYABI 0x0070 /* all of the above three */
+/* Misc */
+#define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */
+#define XTHAL_SAS3(optie,ccuse,abi) ( ((optie) & XTHAL_SAS_ANYOT) \
+ | ((ccuse) & XTHAL_SAS_ANYCC) \
+ | ((abi) & XTHAL_SAS_ANYABI) )
+
+
+
+ /*
+ * Macro to save all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Required parameters:
+ * ptr Save area pointer address register (clobbered)
+ * (register must contain a 4 byte aligned address).
+ * at1..at4 Four temporary address registers (first XCHAL_NCP_NUM_ATMPS
+ * registers are clobbered, the remaining are unused).
+ * Optional parameters:
+ * continue If macro invoked as part of a larger store sequence, set to 1
+ * if this is not the first in the sequence. Defaults to 0.
+ * ofs Offset from start of larger sequence (from value of first ptr
+ * in sequence) at which to store. Defaults to next available space
+ * (or 0 if <continue> is 0).
+ * select Select what category(ies) of registers to store, as a bitmask
+ * (see XTHAL_SAS_xxx constants). Defaults to all registers.
+ * alloc Select what category(ies) of registers to allocate; if any
+ * category is selected here that is not in <select>, space for
+ * the corresponding registers is skipped without doing any store.
+ */
+ .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+ xchal_sa_start \continue, \ofs
+ // Optional global register used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ rur.THREADPTR \at1 // threadptr option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ // Optional caller-saved registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ rsr \at1, ACCLO // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ rsr \at1, ACCHI // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .endif
+ // Optional caller-saved registers not used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1004, 4, 4
+ rsr \at1, M0 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+0
+ rsr \at1, M1 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+4
+ rsr \at1, M2 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+8
+ rsr \at1, M3 // MAC16 option
+ s32i \at1, \ptr, .Lxchal_ofs_+12
+ rsr \at1, SCOMPARE1 // conditional store option
+ s32i \at1, \ptr, .Lxchal_ofs_+16
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 20
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1004, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 20
+ .endif
+ .endm // xchal_ncp_store
+
+ /*
+ * Macro to restore all non-coprocessor (extra) custom TIE and optional state
+ * (not including zero-overhead loop registers).
+ * Required parameters:
+ * ptr Save area pointer address register (clobbered)
+ * (register must contain a 4 byte aligned address).
+ * at1..at4 Four temporary address registers (first XCHAL_NCP_NUM_ATMPS
+ * registers are clobbered, the remaining are unused).
+ * Optional parameters:
+ * continue If macro invoked as part of a larger load sequence, set to 1
+ * if this is not the first in the sequence. Defaults to 0.
+ * ofs Offset from start of larger sequence (from value of first ptr
+ * in sequence) at which to load. Defaults to next available space
+ * (or 0 if <continue> is 0).
+ * select Select what category(ies) of registers to load, as a bitmask
+ * (see XTHAL_SAS_xxx constants). Defaults to all registers.
+ * alloc Select what category(ies) of registers to allocate; if any
+ * category is selected here that is not in <select>, space for
+ * the corresponding registers is skipped without doing any load.
+ */
+ .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+ xchal_sa_start \continue, \ofs
+ // Optional global register used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wur.THREADPTR \at1 // threadptr option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1020, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
+ .endif
+ // Optional caller-saved registers used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wsr \at1, ACCLO // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+4
+ wsr \at1, ACCHI // MAC16 option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1016, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
+ .endif
+ // Optional caller-saved registers not used by default by the compiler:
+ .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+ xchal_sa_align \ptr, 0, 1004, 4, 4
+ l32i \at1, \ptr, .Lxchal_ofs_+0
+ wsr \at1, M0 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+4
+ wsr \at1, M1 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+8
+ wsr \at1, M2 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+12
+ wsr \at1, M3 // MAC16 option
+ l32i \at1, \ptr, .Lxchal_ofs_+16
+ wsr \at1, SCOMPARE1 // conditional store option
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 20
+ .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+ xchal_sa_align \ptr, 0, 1004, 4, 4
+ .set .Lxchal_ofs_, .Lxchal_ofs_ + 20
+ .endif
+ .endm // xchal_ncp_load
+
+
+#define XCHAL_NCP_NUM_ATMPS 1
+
+
+
+#define XCHAL_SA_NUM_ATMPS 1
+
+#endif /*_XTENSA_CORE_TIE_ASM_H*/
+
diff --git a/arch/xtensa/variants/dc233c/include/variant/tie.h b/arch/xtensa/variants/dc233c/include/variant/tie.h
new file mode 100644
index 00000000000..815e52bc3d0
--- /dev/null
+++ b/arch/xtensa/variants/dc233c/include/variant/tie.h
@@ -0,0 +1,150 @@
+/*
+ * tie.h -- compile-time HAL definitions dependent on CORE & TIE configuration
+ *
+ * NOTE: This header file is not meant to be included directly.
+ */
+
+/* This header file describes this specific Xtensa processor's TIE extensions
+ that extend basic Xtensa core functionality. It is customized to this
+ Xtensa processor configuration.
+
+ Copyright (c) 1999-2010 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_TIE_H
+#define _XTENSA_CORE_TIE_H
+
+#define XCHAL_CP_NUM 1 /* number of coprocessors */
+#define XCHAL_CP_MAX 8 /* max CP ID + 1 (0 if none) */
+#define XCHAL_CP_MASK 0x80 /* bitmask of all CPs by ID */
+#define XCHAL_CP_PORT_MASK 0x80 /* bitmask of only port CPs */
+
+/* Basic parameters of each coprocessor: */
+#define XCHAL_CP7_NAME "XTIOP"
+#define XCHAL_CP7_IDENT XTIOP
+#define XCHAL_CP7_SA_SIZE 0 /* size of state save area */
+#define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */
+#define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */
+
+/* Filler info for unassigned coprocessors, to simplify arrays etc: */
+#define XCHAL_CP0_SA_SIZE 0
+#define XCHAL_CP0_SA_ALIGN 1
+#define XCHAL_CP1_SA_SIZE 0
+#define XCHAL_CP1_SA_ALIGN 1
+#define XCHAL_CP2_SA_SIZE 0
+#define XCHAL_CP2_SA_ALIGN 1
+#define XCHAL_CP3_SA_SIZE 0
+#define XCHAL_CP3_SA_ALIGN 1
+#define XCHAL_CP4_SA_SIZE 0
+#define XCHAL_CP4_SA_ALIGN 1
+#define XCHAL_CP5_SA_SIZE 0
+#define XCHAL_CP5_SA_ALIGN 1
+#define XCHAL_CP6_SA_SIZE 0
+#define XCHAL_CP6_SA_ALIGN 1
+
+/* Save area for non-coprocessor optional and custom (TIE) state: */
+#define XCHAL_NCP_SA_SIZE 32
+#define XCHAL_NCP_SA_ALIGN 4
+
+/* Total save area for optional and custom state (NCP + CPn): */
+#define XCHAL_TOTAL_SA_SIZE 32 /* with 16-byte align padding */
+#define XCHAL_TOTAL_SA_ALIGN 4 /* actual minimum alignment */
+
+/*
+ * Detailed contents of save areas.
+ * NOTE: caller must define the XCHAL_SA_REG macro (not defined here)
+ * before expanding the XCHAL_xxx_SA_LIST() macros.
+ *
+ * XCHAL_SA_REG(s,ccused,abikind,kind,opt,name,galign,align,asize,
+ * dbnum,base,regnum,bitsz,gapsz,reset,x...)
+ *
+ * s = passed from XCHAL_*_LIST(s), eg. to select how to expand
+ * ccused = set if used by compiler without special options or code
+ * abikind = 0 (caller-saved), 1 (callee-saved), or 2 (thread-global)
+ * kind = 0 (special reg), 1 (TIE user reg), or 2 (TIE regfile reg)
+ * opt = 0 (custom TIE extension or coprocessor), or 1 (optional reg)
+ * name = lowercase reg name (no quotes)
+ * galign = group byte alignment (power of 2) (galign >= align)
+ * align = register byte alignment (power of 2)
+ * asize = allocated size in bytes (asize*8 == bitsz + gapsz + padsz)
+ * (not including any pad bytes required to galign this or next reg)
+ * dbnum = unique target number f/debug (see <xtensa-libdb-macros.h>)
+ * base = reg shortname w/o index (or sr=special, ur=TIE user reg)
+ * regnum = reg index in regfile, or special/TIE-user reg number
+ * bitsz = number of significant bits (regfile width, or ur/sr mask bits)
+ * gapsz = intervening bits, if bitsz bits not stored contiguously
+ * (padsz = pad bits at end [TIE regfile] or at msbits [ur,sr] of asize)
+ * reset = register reset value (or 0 if undefined at reset)
+ * x = reserved for future use (0 until then)
+ *
+ * To filter out certain registers, e.g. to expand only the non-global
+ * registers used by the compiler, you can do something like this:
+ *
+ * #define XCHAL_SA_REG(s,ccused,p...) SELCC##ccused(p)
+ * #define SELCC0(p...)
+ * #define SELCC1(abikind,p...) SELAK##abikind(p)
+ * #define SELAK0(p...) REG(p)
+ * #define SELAK1(p...) REG(p)
+ * #define SELAK2(p...)
+ * #define REG(kind,tie,name,galn,aln,asz,csz,dbnum,base,rnum,bsz,rst,x...) \
+ * ...what you want to expand...
+ */
+
+#define XCHAL_NCP_SA_NUM 8
+#define XCHAL_NCP_SA_LIST(s) \
+ XCHAL_SA_REG(s,1,2,1,1, threadptr, 4, 4, 4,0x03E7, ur,231, 32,0,0,0) \
+ XCHAL_SA_REG(s,1,0,0,1, acclo, 4, 4, 4,0x0210, sr,16 , 32,0,0,0) \
+ XCHAL_SA_REG(s,1,0,0,1, acchi, 4, 4, 4,0x0211, sr,17 , 8,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m0, 4, 4, 4,0x0220, sr,32 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m1, 4, 4, 4,0x0221, sr,33 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m2, 4, 4, 4,0x0222, sr,34 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, m3, 4, 4, 4,0x0223, sr,35 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1, scompare1, 4, 4, 4,0x020C, sr,12 , 32,0,0,0)
+
+#define XCHAL_CP0_SA_NUM 0
+#define XCHAL_CP0_SA_LIST(s) /* empty */
+
+#define XCHAL_CP1_SA_NUM 0
+#define XCHAL_CP1_SA_LIST(s) /* empty */
+
+#define XCHAL_CP2_SA_NUM 0
+#define XCHAL_CP2_SA_LIST(s) /* empty */
+
+#define XCHAL_CP3_SA_NUM 0
+#define XCHAL_CP3_SA_LIST(s) /* empty */
+
+#define XCHAL_CP4_SA_NUM 0
+#define XCHAL_CP4_SA_LIST(s) /* empty */
+
+#define XCHAL_CP5_SA_NUM 0
+#define XCHAL_CP5_SA_LIST(s) /* empty */
+
+#define XCHAL_CP6_SA_NUM 0
+#define XCHAL_CP6_SA_LIST(s) /* empty */
+
+#define XCHAL_CP7_SA_NUM 0
+#define XCHAL_CP7_SA_LIST(s) /* empty */
+
+/* Byte length of instruction from its first nibble (op0 field), per FLIX. */
+#define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3
+
+#endif /*_XTENSA_CORE_TIE_H*/
+
diff --git a/arch/xtensa/variants/fsf/include/variant/tie.h b/arch/xtensa/variants/fsf/include/variant/tie.h
index bf4020116df..244cdea4dee 100644
--- a/arch/xtensa/variants/fsf/include/variant/tie.h
+++ b/arch/xtensa/variants/fsf/include/variant/tie.h
@@ -18,13 +18,6 @@
#define XCHAL_CP_MASK 0x00 /* bitmask of all CPs by ID */
#define XCHAL_CP_PORT_MASK 0x00 /* bitmask of only port CPs */
-/* Basic parameters of each coprocessor: */
-#define XCHAL_CP7_NAME "XTIOP"
-#define XCHAL_CP7_IDENT XTIOP
-#define XCHAL_CP7_SA_SIZE 0 /* size of state save area */
-#define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */
-#define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */
-
/* Filler info for unassigned coprocessors, to simplify arrays etc: */
#define XCHAL_NCP_SA_SIZE 0
#define XCHAL_NCP_SA_ALIGN 1
@@ -42,6 +35,8 @@
#define XCHAL_CP5_SA_ALIGN 1
#define XCHAL_CP6_SA_SIZE 0
#define XCHAL_CP6_SA_ALIGN 1
+#define XCHAL_CP7_SA_SIZE 0
+#define XCHAL_CP7_SA_ALIGN 1
/* Save area for non-coprocessor optional and custom (TIE) state: */
#define XCHAL_NCP_SA_SIZE 0
diff --git a/arch/xtensa/variants/s6000/delay.c b/arch/xtensa/variants/s6000/delay.c
index 54b2b573f16..39154563ee1 100644
--- a/arch/xtensa/variants/s6000/delay.c
+++ b/arch/xtensa/variants/s6000/delay.c
@@ -1,4 +1,3 @@
-#include <asm/delay.h>
#include <asm/timex.h>
#include <asm/io.h>
#include <variant/hardware.h>
@@ -17,11 +16,10 @@ void platform_calibrate_ccount(void)
"1: l32i %0, %2, 0 ;"
" beq %0, %1, 1b ;"
: "=&a"(u) : "a"(t), "a"(tstamp));
- b = xtensa_get_ccount();
+ b = get_ccount();
if (i == LOOPS)
a = b;
} while (--i >= 0);
b -= a;
- nsec_per_ccount = (LOOPS * 10000) / b;
- ccount_per_jiffy = b * (100000UL / (LOOPS * HZ));
+ ccount_freq = b * (100000UL / LOOPS);
}
diff --git a/arch/xtensa/variants/s6000/dmac.c b/arch/xtensa/variants/s6000/dmac.c
index dc7f7c57351..340f5bb0b5e 100644
--- a/arch/xtensa/variants/s6000/dmac.c
+++ b/arch/xtensa/variants/s6000/dmac.c
@@ -1,5 +1,5 @@
/*
- * Authors: Oskar Schirmer <os@emlix.com>
+ * Authors: Oskar Schirmer <oskar@scara.com>
* Daniel Gloeckner <dg@emlix.com>
* (c) 2008 emlix GmbH http://www.emlix.com
*
diff --git a/arch/xtensa/variants/s6000/gpio.c b/arch/xtensa/variants/s6000/gpio.c
index 7af0757e001..da9e85c13b0 100644
--- a/arch/xtensa/variants/s6000/gpio.c
+++ b/arch/xtensa/variants/s6000/gpio.c
@@ -2,8 +2,8 @@
* s6000 gpio driver
*
* Copyright (c) 2009 emlix GmbH
- * Authors: Oskar Schirmer <os@emlix.com>
- * Johannes Weiner <jw@emlix.com>
+ * Authors: Oskar Schirmer <oskar@scara.com>
+ * Johannes Weiner <hannes@cmpxchg.org>
* Daniel Gloeckner <dg@emlix.com>
*/
#include <linux/bitops.h>
@@ -164,7 +164,7 @@ static void demux_irqs(unsigned int irq, struct irq_desc *desc)
int cirq;
chip->irq_mask(&desc->irq_data);
- chip->irq_ack(&desc->irq_data));
+ chip->irq_ack(&desc->irq_data);
pending = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_MIS) & *mask;
cirq = IRQ_BASE - 1;
while (pending) {
@@ -173,7 +173,7 @@ static void demux_irqs(unsigned int irq, struct irq_desc *desc)
pending >>= n;
generic_handle_irq(cirq);
}
- chip->irq_unmask(&desc->irq_data));
+ chip->irq_unmask(&desc->irq_data);
}
extern const signed char *platform_irq_mappings[XTENSA_NR_IRQS];
diff --git a/arch/xtensa/variants/s6000/include/variant/dmac.h b/arch/xtensa/variants/s6000/include/variant/dmac.h
index e81735b2a20..3f88d9fc689 100644
--- a/arch/xtensa/variants/s6000/include/variant/dmac.h
+++ b/arch/xtensa/variants/s6000/include/variant/dmac.h
@@ -8,7 +8,7 @@
* Copyright (C) 2006 Tensilica Inc.
* Copyright (C) 2008 Emlix GmbH <info@emlix.com>
* Authors: Fabian Godehardt <fg@emlix.com>
- * Oskar Schirmer <os@emlix.com>
+ * Oskar Schirmer <oskar@scara.com>
* Daniel Gloeckner <dg@emlix.com>
*/
diff --git a/arch/xtensa/variants/s6000/include/variant/irq.h b/arch/xtensa/variants/s6000/include/variant/irq.h
index 97d6fc48def..39ca751a625 100644
--- a/arch/xtensa/variants/s6000/include/variant/irq.h
+++ b/arch/xtensa/variants/s6000/include/variant/irq.h
@@ -1,7 +1,6 @@
#ifndef _XTENSA_S6000_IRQ_H
#define _XTENSA_S6000_IRQ_H
-#define NO_IRQ (-1)
#define VARIANT_NR_IRQS 8 /* GPIO interrupts */
extern void variant_irq_enable(unsigned int irq);
diff --git a/arch/xtensa/variants/s6000/irq.c b/arch/xtensa/variants/s6000/irq.c
index 6651e3285fc..81a241e7907 100644
--- a/arch/xtensa/variants/s6000/irq.c
+++ b/arch/xtensa/variants/s6000/irq.c
@@ -2,8 +2,8 @@
* s6000 irq crossbar
*
* Copyright (c) 2009 emlix GmbH
- * Authors: Johannes Weiner <jw@emlix.com>
- * Oskar Schirmer <os@emlix.com>
+ * Authors: Johannes Weiner <hannes@cmpxchg.org>
+ * Oskar Schirmer <oskar@scara.com>
*/
#include <linux/io.h>
#include <asm/irq.h>