aboutsummaryrefslogtreecommitdiff
path: root/arch/um/sys-i386
diff options
context:
space:
mode:
Diffstat (limited to 'arch/um/sys-i386')
-rw-r--r--arch/um/sys-i386/Makefile24
-rw-r--r--arch/um/sys-i386/bug.c21
-rw-r--r--arch/um/sys-i386/bugs.c74
-rw-r--r--arch/um/sys-i386/checksum.S458
-rw-r--r--arch/um/sys-i386/delay.c60
-rw-r--r--arch/um/sys-i386/elfcore.c83
-rw-r--r--arch/um/sys-i386/fault.c28
-rw-r--r--arch/um/sys-i386/ksyms.c5
-rw-r--r--arch/um/sys-i386/ldt.c502
-rw-r--r--arch/um/sys-i386/mem.c62
-rw-r--r--arch/um/sys-i386/ptrace.c273
-rw-r--r--arch/um/sys-i386/ptrace_user.c21
-rw-r--r--arch/um/sys-i386/setjmp.S58
-rw-r--r--arch/um/sys-i386/signal.c498
-rw-r--r--arch/um/sys-i386/stub.S51
-rw-r--r--arch/um/sys-i386/stub_segv.c17
-rw-r--r--arch/um/sys-i386/sys_call_table.S28
-rw-r--r--arch/um/sys-i386/syscalls.c66
-rw-r--r--arch/um/sys-i386/sysrq.c101
-rw-r--r--arch/um/sys-i386/tls.c396
-rw-r--r--arch/um/sys-i386/user-offsets.c53
21 files changed, 0 insertions, 2879 deletions
diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile
deleted file mode 100644
index 231bb983a12..00000000000
--- a/arch/um/sys-i386/Makefile
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
-#
-
-obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \
- ptrace_user.o setjmp.o signal.o stub.o stub_segv.o syscalls.o sysrq.o \
- sys_call_table.o tls.o mem.o
-
-obj-$(CONFIG_BINFMT_ELF) += elfcore.o
-
-subarch-obj-y = lib/string_32.o lib/atomic64_32.o lib/atomic64_cx8_32.o
-subarch-obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += lib/rwsem.o
-subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o
-subarch-obj-$(CONFIG_MODULES) += kernel/module.o
-
-USER_OBJS := bugs.o ptrace_user.o fault.o
-
-extra-y += user-offsets.s
-$(obj)/user-offsets.s: c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS)
-
-UNPROFILE_OBJS := stub_segv.o
-CFLAGS_stub_segv.o := $(CFLAGS_NO_HARDENING)
-
-include arch/um/scripts/Makefile.rules
diff --git a/arch/um/sys-i386/bug.c b/arch/um/sys-i386/bug.c
deleted file mode 100644
index 8d4f273f121..00000000000
--- a/arch/um/sys-i386/bug.c
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (C) 2006 Jeff Dike (jdike@addtoit.com)
- * Licensed under the GPL V2
- */
-
-#include <linux/uaccess.h>
-#include <asm/errno.h>
-
-/* Mostly copied from i386/x86_86 - eliminated the eip < PAGE_OFFSET because
- * that's not relevant in skas mode.
- */
-
-int is_valid_bugaddr(unsigned long eip)
-{
- unsigned short ud2;
-
- if (probe_kernel_address((unsigned short __user *)eip, ud2))
- return 0;
-
- return ud2 == 0x0b0f;
-}
diff --git a/arch/um/sys-i386/bugs.c b/arch/um/sys-i386/bugs.c
deleted file mode 100644
index 7058e1fa903..00000000000
--- a/arch/um/sys-i386/bugs.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- * Licensed under the GPL
- */
-
-#include <signal.h>
-#include "kern_util.h"
-#include "longjmp.h"
-#include "task.h"
-#include "sysdep/ptrace.h"
-
-/* Set during early boot */
-static int host_has_cmov = 1;
-static jmp_buf cmov_test_return;
-
-static void cmov_sigill_test_handler(int sig)
-{
- host_has_cmov = 0;
- longjmp(cmov_test_return, 1);
-}
-
-void arch_check_bugs(void)
-{
- struct sigaction old, new;
-
- printk(UM_KERN_INFO "Checking for host processor cmov support...");
- new.sa_handler = cmov_sigill_test_handler;
-
- /* Make sure that SIGILL is enabled after the handler longjmps back */
- new.sa_flags = SA_NODEFER;
- sigemptyset(&new.sa_mask);
- sigaction(SIGILL, &new, &old);
-
- if (setjmp(cmov_test_return) == 0) {
- unsigned long foo = 0;
- __asm__ __volatile__("cmovz %0, %1" : "=r" (foo) : "0" (foo));
- printk(UM_KERN_CONT "Yes\n");
- } else
- printk(UM_KERN_CONT "No\n");
-
- sigaction(SIGILL, &old, &new);
-}
-
-void arch_examine_signal(int sig, struct uml_pt_regs *regs)
-{
- unsigned char tmp[2];
-
- /*
- * This is testing for a cmov (0x0f 0x4x) instruction causing a
- * SIGILL in init.
- */
- if ((sig != SIGILL) || (TASK_PID(get_current()) != 1))
- return;
-
- if (copy_from_user_proc(tmp, (void *) UPT_IP(regs), 2)) {
- printk(UM_KERN_ERR "SIGILL in init, could not read "
- "instructions!\n");
- return;
- }
-
- if ((tmp[0] != 0x0f) || ((tmp[1] & 0xf0) != 0x40))
- return;
-
- if (host_has_cmov == 0)
- printk(UM_KERN_ERR "SIGILL caused by cmov, which this "
- "processor doesn't implement. Boot a filesystem "
- "compiled for older processors");
- else if (host_has_cmov == 1)
- printk(UM_KERN_ERR "SIGILL caused by cmov, which this "
- "processor claims to implement");
- else
- printk(UM_KERN_ERR "Bad value for host_has_cmov (%d)",
- host_has_cmov);
-}
diff --git a/arch/um/sys-i386/checksum.S b/arch/um/sys-i386/checksum.S
deleted file mode 100644
index f058d2f82e1..00000000000
--- a/arch/um/sys-i386/checksum.S
+++ /dev/null
@@ -1,458 +0,0 @@
-/*
- * INET An implementation of the TCP/IP protocol suite for the LINUX
- * operating system. INET is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * IP/TCP/UDP checksumming routines
- *
- * Authors: Jorge Cwik, <jorge@laser.satlink.net>
- * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
- * Tom May, <ftom@netcom.com>
- * Pentium Pro/II routines:
- * Alexander Kjeldaas <astor@guardian.no>
- * Finn Arne Gangstad <finnag@guardian.no>
- * Lots of code moved from tcp.c and ip.c; see those files
- * for more names.
- *
- * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception
- * handling.
- * Andi Kleen, add zeroing on error
- * converted to pure assembler
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <asm/errno.h>
-
-/*
- * computes a partial checksum, e.g. for TCP/UDP fragments
- */
-
-/*
-unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
- */
-
-.text
-.align 4
-.globl csum_partial
-
-#ifndef CONFIG_X86_USE_PPRO_CHECKSUM
-
- /*
- * Experiments with Ethernet and SLIP connections show that buff
- * is aligned on either a 2-byte or 4-byte boundary. We get at
- * least a twofold speedup on 486 and Pentium if it is 4-byte aligned.
- * Fortunately, it is easy to convert 2-byte alignment to 4-byte
- * alignment for the unrolled loop.
- */
-csum_partial:
- pushl %esi
- pushl %ebx
- movl 20(%esp),%eax # Function arg: unsigned int sum
- movl 16(%esp),%ecx # Function arg: int len
- movl 12(%esp),%esi # Function arg: unsigned char *buff
- testl $2, %esi # Check alignment.
- jz 2f # Jump if alignment is ok.
- subl $2, %ecx # Alignment uses up two bytes.
- jae 1f # Jump if we had at least two bytes.
- addl $2, %ecx # ecx was < 2. Deal with it.
- jmp 4f
-1: movw (%esi), %bx
- addl $2, %esi
- addw %bx, %ax
- adcl $0, %eax
-2:
- movl %ecx, %edx
- shrl $5, %ecx
- jz 2f
- testl %esi, %esi
-1: movl (%esi), %ebx
- adcl %ebx, %eax
- movl 4(%esi), %ebx
- adcl %ebx, %eax
- movl 8(%esi), %ebx
- adcl %ebx, %eax
- movl 12(%esi), %ebx
- adcl %ebx, %eax
- movl 16(%esi), %ebx
- adcl %ebx, %eax
- movl 20(%esi), %ebx
- adcl %ebx, %eax
- movl 24(%esi), %ebx
- adcl %ebx, %eax
- movl 28(%esi), %ebx
- adcl %ebx, %eax
- lea 32(%esi), %esi
- dec %ecx
- jne 1b
- adcl $0, %eax
-2: movl %edx, %ecx
- andl $0x1c, %edx
- je 4f
- shrl $2, %edx # This clears CF
-3: adcl (%esi), %eax
- lea 4(%esi), %esi
- dec %edx
- jne 3b
- adcl $0, %eax
-4: andl $3, %ecx
- jz 7f
- cmpl $2, %ecx
- jb 5f
- movw (%esi),%cx
- leal 2(%esi),%esi
- je 6f
- shll $16,%ecx
-5: movb (%esi),%cl
-6: addl %ecx,%eax
- adcl $0, %eax
-7:
- popl %ebx
- popl %esi
- ret
-
-#else
-
-/* Version for PentiumII/PPro */
-
-csum_partial:
- pushl %esi
- pushl %ebx
- movl 20(%esp),%eax # Function arg: unsigned int sum
- movl 16(%esp),%ecx # Function arg: int len
- movl 12(%esp),%esi # Function arg: const unsigned char *buf
-
- testl $2, %esi
- jnz 30f
-10:
- movl %ecx, %edx
- movl %ecx, %ebx
- andl $0x7c, %ebx
- shrl $7, %ecx
- addl %ebx,%esi
- shrl $2, %ebx
- negl %ebx
- lea 45f(%ebx,%ebx,2), %ebx
- testl %esi, %esi
- jmp *%ebx
-
- # Handle 2-byte-aligned regions
-20: addw (%esi), %ax
- lea 2(%esi), %esi
- adcl $0, %eax
- jmp 10b
-
-30: subl $2, %ecx
- ja 20b
- je 32f
- movzbl (%esi),%ebx # csumming 1 byte, 2-aligned
- addl %ebx, %eax
- adcl $0, %eax
- jmp 80f
-32:
- addw (%esi), %ax # csumming 2 bytes, 2-aligned
- adcl $0, %eax
- jmp 80f
-
-40:
- addl -128(%esi), %eax
- adcl -124(%esi), %eax
- adcl -120(%esi), %eax
- adcl -116(%esi), %eax
- adcl -112(%esi), %eax
- adcl -108(%esi), %eax
- adcl -104(%esi), %eax
- adcl -100(%esi), %eax
- adcl -96(%esi), %eax
- adcl -92(%esi), %eax
- adcl -88(%esi), %eax
- adcl -84(%esi), %eax
- adcl -80(%esi), %eax
- adcl -76(%esi), %eax
- adcl -72(%esi), %eax
- adcl -68(%esi), %eax
- adcl -64(%esi), %eax
- adcl -60(%esi), %eax
- adcl -56(%esi), %eax
- adcl -52(%esi), %eax
- adcl -48(%esi), %eax
- adcl -44(%esi), %eax
- adcl -40(%esi), %eax
- adcl -36(%esi), %eax
- adcl -32(%esi), %eax
- adcl -28(%esi), %eax
- adcl -24(%esi), %eax
- adcl -20(%esi), %eax
- adcl -16(%esi), %eax
- adcl -12(%esi), %eax
- adcl -8(%esi), %eax
- adcl -4(%esi), %eax
-45:
- lea 128(%esi), %esi
- adcl $0, %eax
- dec %ecx
- jge 40b
- movl %edx, %ecx
-50: andl $3, %ecx
- jz 80f
-
- # Handle the last 1-3 bytes without jumping
- notl %ecx # 1->2, 2->1, 3->0, higher bits are masked
- movl $0xffffff,%ebx # by the shll and shrl instructions
- shll $3,%ecx
- shrl %cl,%ebx
- andl -128(%esi),%ebx # esi is 4-aligned so should be ok
- addl %ebx,%eax
- adcl $0,%eax
-80:
- popl %ebx
- popl %esi
- ret
-
-#endif
-
-/*
-unsigned int csum_partial_copy_generic (const char *src, char *dst,
- int len, int sum, int *src_err_ptr, int *dst_err_ptr)
- */
-
-/*
- * Copy from ds while checksumming, otherwise like csum_partial
- *
- * The macros SRC and DST specify the type of access for the instruction.
- * thus we can call a custom exception handler for all access types.
- *
- * FIXME: could someone double-check whether I haven't mixed up some SRC and
- * DST definitions? It's damn hard to trigger all cases. I hope I got
- * them all but there's no guarantee.
- */
-
-#define SRC(y...) \
- 9999: y; \
- .section __ex_table, "a"; \
- .long 9999b, 6001f ; \
- .previous
-
-#define DST(y...) \
- 9999: y; \
- .section __ex_table, "a"; \
- .long 9999b, 6002f ; \
- .previous
-
-.align 4
-
-#ifndef CONFIG_X86_USE_PPRO_CHECKSUM
-
-#define ARGBASE 16
-#define FP 12
-
-csum_partial_copy_generic_i386:
- subl $4,%esp
- pushl %edi
- pushl %esi
- pushl %ebx
- movl ARGBASE+16(%esp),%eax # sum
- movl ARGBASE+12(%esp),%ecx # len
- movl ARGBASE+4(%esp),%esi # src
- movl ARGBASE+8(%esp),%edi # dst
-
- testl $2, %edi # Check alignment.
- jz 2f # Jump if alignment is ok.
- subl $2, %ecx # Alignment uses up two bytes.
- jae 1f # Jump if we had at least two bytes.
- addl $2, %ecx # ecx was < 2. Deal with it.
- jmp 4f
-SRC(1: movw (%esi), %bx )
- addl $2, %esi
-DST( movw %bx, (%edi) )
- addl $2, %edi
- addw %bx, %ax
- adcl $0, %eax
-2:
- movl %ecx, FP(%esp)
- shrl $5, %ecx
- jz 2f
- testl %esi, %esi
-SRC(1: movl (%esi), %ebx )
-SRC( movl 4(%esi), %edx )
- adcl %ebx, %eax
-DST( movl %ebx, (%edi) )
- adcl %edx, %eax
-DST( movl %edx, 4(%edi) )
-
-SRC( movl 8(%esi), %ebx )
-SRC( movl 12(%esi), %edx )
- adcl %ebx, %eax
-DST( movl %ebx, 8(%edi) )
- adcl %edx, %eax
-DST( movl %edx, 12(%edi) )
-
-SRC( movl 16(%esi), %ebx )
-SRC( movl 20(%esi), %edx )
- adcl %ebx, %eax
-DST( movl %ebx, 16(%edi) )
- adcl %edx, %eax
-DST( movl %edx, 20(%edi) )
-
-SRC( movl 24(%esi), %ebx )
-SRC( movl 28(%esi), %edx )
- adcl %ebx, %eax
-DST( movl %ebx, 24(%edi) )
- adcl %edx, %eax
-DST( movl %edx, 28(%edi) )
-
- lea 32(%esi), %esi
- lea 32(%edi), %edi
- dec %ecx
- jne 1b
- adcl $0, %eax
-2: movl FP(%esp), %edx
- movl %edx, %ecx
- andl $0x1c, %edx
- je 4f
- shrl $2, %edx # This clears CF
-SRC(3: movl (%esi), %ebx )
- adcl %ebx, %eax
-DST( movl %ebx, (%edi) )
- lea 4(%esi), %esi
- lea 4(%edi), %edi
- dec %edx
- jne 3b
- adcl $0, %eax
-4: andl $3, %ecx
- jz 7f
- cmpl $2, %ecx
- jb 5f
-SRC( movw (%esi), %cx )
- leal 2(%esi), %esi
-DST( movw %cx, (%edi) )
- leal 2(%edi), %edi
- je 6f
- shll $16,%ecx
-SRC(5: movb (%esi), %cl )
-DST( movb %cl, (%edi) )
-6: addl %ecx, %eax
- adcl $0, %eax
-7:
-5000:
-
-# Exception handler:
-.section .fixup, "ax"
-
-6001:
- movl ARGBASE+20(%esp), %ebx # src_err_ptr
- movl $-EFAULT, (%ebx)
-
- # zero the complete destination - computing the rest
- # is too much work
- movl ARGBASE+8(%esp), %edi # dst
- movl ARGBASE+12(%esp), %ecx # len
- xorl %eax,%eax
- rep ; stosb
-
- jmp 5000b
-
-6002:
- movl ARGBASE+24(%esp), %ebx # dst_err_ptr
- movl $-EFAULT,(%ebx)
- jmp 5000b
-
-.previous
-
- popl %ebx
- popl %esi
- popl %edi
- popl %ecx # equivalent to addl $4,%esp
- ret
-
-#else
-
-/* Version for PentiumII/PPro */
-
-#define ROUND1(x) \
- SRC(movl x(%esi), %ebx ) ; \
- addl %ebx, %eax ; \
- DST(movl %ebx, x(%edi) ) ;
-
-#define ROUND(x) \
- SRC(movl x(%esi), %ebx ) ; \
- adcl %ebx, %eax ; \
- DST(movl %ebx, x(%edi) ) ;
-
-#define ARGBASE 12
-
-csum_partial_copy_generic_i386:
- pushl %ebx
- pushl %edi
- pushl %esi
- movl ARGBASE+4(%esp),%esi #src
- movl ARGBASE+8(%esp),%edi #dst
- movl ARGBASE+12(%esp),%ecx #len
- movl ARGBASE+16(%esp),%eax #sum
-# movl %ecx, %edx
- movl %ecx, %ebx
- movl %esi, %edx
- shrl $6, %ecx
- andl $0x3c, %ebx
- negl %ebx
- subl %ebx, %esi
- subl %ebx, %edi
- lea -1(%esi),%edx
- andl $-32,%edx
- lea 3f(%ebx,%ebx), %ebx
- testl %esi, %esi
- jmp *%ebx
-1: addl $64,%esi
- addl $64,%edi
- SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl)
- ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52)
- ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36)
- ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20)
- ROUND (-16) ROUND(-12) ROUND(-8) ROUND(-4)
-3: adcl $0,%eax
- addl $64, %edx
- dec %ecx
- jge 1b
-4: movl ARGBASE+12(%esp),%edx #len
- andl $3, %edx
- jz 7f
- cmpl $2, %edx
- jb 5f
-SRC( movw (%esi), %dx )
- leal 2(%esi), %esi
-DST( movw %dx, (%edi) )
- leal 2(%edi), %edi
- je 6f
- shll $16,%edx
-5:
-SRC( movb (%esi), %dl )
-DST( movb %dl, (%edi) )
-6: addl %edx, %eax
- adcl $0, %eax
-7:
-.section .fixup, "ax"
-6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
- movl $-EFAULT, (%ebx)
- # zero the complete destination (computing the rest is too much work)
- movl ARGBASE+8(%esp),%edi # dst
- movl ARGBASE+12(%esp),%ecx # len
- xorl %eax,%eax
- rep; stosb
- jmp 7b
-6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
- movl $-EFAULT, (%ebx)
- jmp 7b
-.previous
-
- popl %esi
- popl %edi
- popl %ebx
- ret
-
-#undef ROUND
-#undef ROUND1
-
-#endif
diff --git a/arch/um/sys-i386/delay.c b/arch/um/sys-i386/delay.c
deleted file mode 100644
index f3fe1a688f7..00000000000
--- a/arch/um/sys-i386/delay.c
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
- * Mostly copied from arch/x86/lib/delay.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <asm/param.h>
-
-void __delay(unsigned long loops)
-{
- asm volatile(
- "test %0,%0\n"
- "jz 3f\n"
- "jmp 1f\n"
-
- ".align 16\n"
- "1: jmp 2f\n"
-
- ".align 16\n"
- "2: dec %0\n"
- " jnz 2b\n"
- "3: dec %0\n"
-
- : /* we don't need output */
- : "a" (loops)
- );
-}
-EXPORT_SYMBOL(__delay);
-
-inline void __const_udelay(unsigned long xloops)
-{
- int d0;
-
- xloops *= 4;
- asm("mull %%edx"
- : "=d" (xloops), "=&a" (d0)
- : "1" (xloops), "0"
- (loops_per_jiffy * (HZ/4)));
-
- __delay(++xloops);
-}
-EXPORT_SYMBOL(__const_udelay);
-
-void __udelay(unsigned long usecs)
-{
- __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
-}
-EXPORT_SYMBOL(__udelay);
-
-void __ndelay(unsigned long nsecs)
-{
- __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
-}
-EXPORT_SYMBOL(__ndelay);
diff --git a/arch/um/sys-i386/elfcore.c b/arch/um/sys-i386/elfcore.c
deleted file mode 100644
index 6bb49b687c9..00000000000
--- a/arch/um/sys-i386/elfcore.c
+++ /dev/null
@@ -1,83 +0,0 @@
-#include <linux/elf.h>
-#include <linux/coredump.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-
-#include <asm/elf.h>
-
-
-Elf32_Half elf_core_extra_phdrs(void)
-{
- return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0;
-}
-
-int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size,
- unsigned long limit)
-{
- if ( vsyscall_ehdr ) {
- const struct elfhdr *const ehdrp =
- (struct elfhdr *) vsyscall_ehdr;
- const struct elf_phdr *const phdrp =
- (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
- int i;
- Elf32_Off ofs = 0;
-
- for (i = 0; i < ehdrp->e_phnum; ++i) {
- struct elf_phdr phdr = phdrp[i];
-
- if (phdr.p_type == PT_LOAD) {
- ofs = phdr.p_offset = offset;
- offset += phdr.p_filesz;
- } else {
- phdr.p_offset += ofs;
- }
- phdr.p_paddr = 0; /* match other core phdrs */
- *size += sizeof(phdr);
- if (*size > limit
- || !dump_write(file, &phdr, sizeof(phdr)))
- return 0;
- }
- }
- return 1;
-}
-
-int elf_core_write_extra_data(struct file *file, size_t *size,
- unsigned long limit)
-{
- if ( vsyscall_ehdr ) {
- const struct elfhdr *const ehdrp =
- (struct elfhdr *) vsyscall_ehdr;
- const struct elf_phdr *const phdrp =
- (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
- int i;
-
- for (i = 0; i < ehdrp->e_phnum; ++i) {
- if (phdrp[i].p_type == PT_LOAD) {
- void *addr = (void *) phdrp[i].p_vaddr;
- size_t filesz = phdrp[i].p_filesz;
-
- *size += filesz;
- if (*size > limit
- || !dump_write(file, addr, filesz))
- return 0;
- }
- }
- }
- return 1;
-}
-
-size_t elf_core_extra_data_size(void)
-{
- if ( vsyscall_ehdr ) {
- const struct elfhdr *const ehdrp =
- (struct elfhdr *)vsyscall_ehdr;
- const struct elf_phdr *const phdrp =
- (const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
- int i;
-
- for (i = 0; i < ehdrp->e_phnum; ++i)
- if (phdrp[i].p_type == PT_LOAD)
- return (size_t) phdrp[i].p_filesz;
- }
- return 0;
-}
diff --git a/arch/um/sys-i386/fault.c b/arch/um/sys-i386/fault.c
deleted file mode 100644
index d670f68532f..00000000000
--- a/arch/um/sys-i386/fault.c
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- * Licensed under the GPL
- */
-
-#include "sysdep/ptrace.h"
-
-/* These two are from asm-um/uaccess.h and linux/module.h, check them. */
-struct exception_table_entry
-{
- unsigned long insn;
- unsigned long fixup;
-};
-
-const struct exception_table_entry *search_exception_tables(unsigned long add);
-
-/* Compare this to arch/i386/mm/extable.c:fixup_exception() */
-int arch_fixup(unsigned long address, struct uml_pt_regs *regs)
-{
- const struct exception_table_entry *fixup;
-
- fixup = search_exception_tables(address);
- if (fixup != 0) {
- UPT_IP(regs) = fixup->fixup;
- return 1;
- }
- return 0;
-}
diff --git a/arch/um/sys-i386/ksyms.c b/arch/um/sys-i386/ksyms.c
deleted file mode 100644
index bfbefd30db8..00000000000
--- a/arch/um/sys-i386/ksyms.c
+++ /dev/null
@@ -1,5 +0,0 @@
-#include "linux/module.h"
-#include "asm/checksum.h"
-
-/* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial);
diff --git a/arch/um/sys-i386/ldt.c b/arch/um/sys-i386/ldt.c
deleted file mode 100644
index 3f2bf208d88..00000000000
--- a/arch/um/sys-i386/ldt.c
+++ /dev/null
@@ -1,502 +0,0 @@
-/*
- * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- * Licensed under the GPL
- */
-
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <asm/unistd.h>
-#include "os.h"
-#include "proc_mm.h"
-#include "skas.h"
-#include "skas_ptrace.h"
-#include "sysdep/tls.h"
-
-extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
-
-static long write_ldt_entry(struct mm_id *mm_idp, int func,
- struct user_desc *desc, void **addr, int done)
-{
- long res;
-
- if (proc_mm) {
- /*
- * This is a special handling for the case, that the mm to
- * modify isn't current->active_mm.
- * If this is called directly by modify_ldt,
- * (current->active_mm->context.skas.u == mm_idp)
- * will be true. So no call to __switch_mm(mm_idp) is done.
- * If this is called in case of init_new_ldt or PTRACE_LDT,
- * mm_idp won't belong to current->active_mm, but child->mm.
- * So we need to switch child's mm into our userspace, then
- * later switch back.
- *
- * Note: I'm unsure: should interrupts be disabled here?
- */
- if (!current->active_mm || current->active_mm == &init_mm ||
- mm_idp != &current->active_mm->context.id)
- __switch_mm(mm_idp);
- }
-
- if (ptrace_ldt) {
- struct ptrace_ldt ldt_op = (struct ptrace_ldt) {
- .func = func,
- .ptr = desc,
- .bytecount = sizeof(*desc)};
- u32 cpu;
- int pid;
-
- if (!proc_mm)
- pid = mm_idp->u.pid;
- else {
- cpu = get_cpu();
- pid = userspace_pid[cpu];
- }
-
- res = os_ptrace_ldt(pid, 0, (unsigned long) &ldt_op);
-
- if (proc_mm)
- put_cpu();
- }
- else {
- void *stub_addr;
- res = syscall_stub_data(mm_idp, (unsigned long *)desc,
- (sizeof(*desc) + sizeof(long) - 1) &
- ~(sizeof(long) - 1),
- addr, &stub_addr);
- if (!res) {
- unsigned long args[] = { func,
- (unsigned long)stub_addr,
- sizeof(*desc),
- 0, 0, 0 };
- res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
- 0, addr, done);
- }
- }
-
- if (proc_mm) {
- /*
- * This is the second part of special handling, that makes
- * PTRACE_LDT possible to implement.
- */
- if (current->active_mm && current->active_mm != &init_mm &&
- mm_idp != &current->active_mm->context.id)
- __switch_mm(&current->active_mm->context.id);
- }
-
- return res;
-}
-
-static long read_ldt_from_host(void __user * ptr, unsigned long bytecount)
-{
- int res, n;
- struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) {
- .func = 0,
- .bytecount = bytecount,
- .ptr = kmalloc(bytecount, GFP_KERNEL)};
- u32 cpu;
-
- if (ptrace_ldt.ptr == NULL)
- return -ENOMEM;
-
- /*
- * This is called from sys_modify_ldt only, so userspace_pid gives
- * us the right number
- */
-
- cpu = get_cpu();
- res = os_ptrace_ldt(userspace_pid[cpu], 0, (unsigned long) &ptrace_ldt);
- put_cpu();
- if (res < 0)
- goto out;
-
- n = copy_to_user(ptr, ptrace_ldt.ptr, res);
- if (n != 0)
- res = -EFAULT;
-
- out:
- kfree(ptrace_ldt.ptr);
-
- return res;
-}
-
-/*
- * In skas mode, we hold our own ldt data in UML.
- * Thus, the code implementing sys_modify_ldt_skas
- * is very similar to (and mostly stolen from) sys_modify_ldt
- * for arch/i386/kernel/ldt.c
- * The routines copied and modified in part are:
- * - read_ldt
- * - read_default_ldt
- * - write_ldt
- * - sys_modify_ldt_skas
- */
-
-static int read_ldt(void __user * ptr, unsigned long bytecount)
-{
- int i, err = 0;
- unsigned long size;
- uml_ldt_t * ldt = &current->mm->context.ldt;
-
- if (!ldt->entry_count)
- goto out;
- if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
- bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
- err = bytecount;
-
- if (ptrace_ldt)
- return read_ldt_from_host(ptr, bytecount);
-
- mutex_lock(&ldt->lock);
- if (ldt->entry_count <= LDT_DIRECT_ENTRIES) {
- size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
- if (size > bytecount)
- size = bytecount;
- if (copy_to_user(ptr, ldt->u.entries, size))
- err = -EFAULT;
- bytecount -= size;
- ptr += size;
- }
- else {
- for (i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount;
- i++) {
- size = PAGE_SIZE;
- if (size > bytecount)
- size = bytecount;
- if (copy_to_user(ptr, ldt->u.pages[i], size)) {
- err = -EFAULT;
- break;
- }
- bytecount -= size;
- ptr += size;
- }
- }
- mutex_unlock(&ldt->lock);
-
- if (bytecount == 0 || err == -EFAULT)
- goto out;
-
- if (clear_user(ptr, bytecount))
- err = -EFAULT;
-
-out:
- return err;
-}
-
-static int read_default_ldt(void __user * ptr, unsigned long bytecount)
-{
- int err;
-
- if (bytecount > 5*LDT_ENTRY_SIZE)
- bytecount = 5*LDT_ENTRY_SIZE;
-
- err = bytecount;
- /*
- * UML doesn't support lcall7 and lcall27.
- * So, we don't really have a default ldt, but emulate
- * an empty ldt of common host default ldt size.
- */
- if (clear_user(ptr, bytecount))
- err = -EFAULT;
-
- return err;
-}
-
-static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
-{
- uml_ldt_t * ldt = &current->mm->context.ldt;
- struct mm_id * mm_idp = &current->mm->context.id;
- int i, err;
- struct user_desc ldt_info;
- struct ldt_entry entry0, *ldt_p;
- void *addr = NULL;
-
- err = -EINVAL;
- if (bytecount != sizeof(ldt_info))
- goto out;
- err = -EFAULT;
- if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
- goto out;
-
- err = -EINVAL;
- if (ldt_info.entry_number >= LDT_ENTRIES)
- goto out;
- if (ldt_info.contents == 3) {
- if (func == 1)
- goto out;
- if (ldt_info.seg_not_present == 0)
- goto out;
- }
-
- if (!ptrace_ldt)
- mutex_lock(&ldt->lock);
-
- err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
- if (err)
- goto out_unlock;
- else if (ptrace_ldt) {
- /* With PTRACE_LDT available, this is used as a flag only */
- ldt->entry_count = 1;
- goto out;
- }
-
- if (ldt_info.entry_number >= ldt->entry_count &&
- ldt_info.entry_number >= LDT_DIRECT_ENTRIES) {
- for (i=ldt->entry_count/LDT_ENTRIES_PER_PAGE;
- i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number;
- i++) {
- if (i == 0)
- memcpy(&entry0, ldt->u.entries,
- sizeof(entry0));
- ldt->u.pages[i] = (struct ldt_entry *)
- __get_free_page(GFP_KERNEL|__GFP_ZERO);
- if (!ldt->u.pages[i]) {
- err = -ENOMEM;
- /* Undo the change in host */
- memset(&ldt_info, 0, sizeof(ldt_info));
- write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1);
- goto out_unlock;
- }
- if (i == 0) {
- memcpy(ldt->u.pages[0], &entry0,
- sizeof(entry0));
- memcpy(ldt->u.pages[0]+1, ldt->u.entries+1,
- sizeof(entry0)*(LDT_DIRECT_ENTRIES-1));
- }
- ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE;
- }
- }
- if (ldt->entry_count <= ldt_info.entry_number)
- ldt->entry_count = ldt_info.entry_number + 1;
-
- if (ldt->entry_count <= LDT_DIRECT_ENTRIES)
- ldt_p = ldt->u.entries + ldt_info.entry_number;
- else
- ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] +
- ldt_info.entry_number%LDT_ENTRIES_PER_PAGE;
-
- if (ldt_info.base_addr == 0 && ldt_info.limit == 0 &&
- (func == 1 || LDT_empty(&ldt_info))) {
- ldt_p->a = 0;
- ldt_p->b = 0;
- }
- else{
- if (func == 1)
- ldt_info.useable = 0;
- ldt_p->a = LDT_entry_a(&ldt_info);
- ldt_p->b = LDT_entry_b(&ldt_info);
- }
- err = 0;
-
-out_unlock:
- mutex_unlock(&ldt->lock);
-out:
- return err;
-}
-
-static long do_modify_ldt_skas(int func, void __user *ptr,
- unsigned long bytecount)
-{
- int ret = -ENOSYS;
-
- switch (func) {
- case 0:
-