aboutsummaryrefslogtreecommitdiff
path: root/include/asm-s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-s390
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'include/asm-s390')
-rw-r--r--include/asm-s390/a.out.h38
-rw-r--r--include/asm-s390/atomic.h207
-rw-r--r--include/asm-s390/bitops.h1188
-rw-r--r--include/asm-s390/bug.h14
-rw-r--r--include/asm-s390/bugs.h22
-rw-r--r--include/asm-s390/byteorder.h131
-rw-r--r--include/asm-s390/cache.h20
-rw-r--r--include/asm-s390/cacheflush.h26
-rw-r--r--include/asm-s390/ccwdev.h192
-rw-r--r--include/asm-s390/ccwgroup.h45
-rw-r--r--include/asm-s390/checksum.h264
-rw-r--r--include/asm-s390/cio.h281
-rw-r--r--include/asm-s390/cmb.h98
-rw-r--r--include/asm-s390/compat.h198
-rw-r--r--include/asm-s390/cpcmd.h24
-rw-r--r--include/asm-s390/cputime.h176
-rw-r--r--include/asm-s390/current.h23
-rw-r--r--include/asm-s390/dasd.h268
-rw-r--r--include/asm-s390/debug.h249
-rw-r--r--include/asm-s390/delay.h22
-rw-r--r--include/asm-s390/div64.h49
-rw-r--r--include/asm-s390/dma-mapping.h14
-rw-r--r--include/asm-s390/dma.h16
-rw-r--r--include/asm-s390/ebcdic.h49
-rw-r--r--include/asm-s390/elf.h215
-rw-r--r--include/asm-s390/errno.h13
-rw-r--r--include/asm-s390/extmem.h32
-rw-r--r--include/asm-s390/fcntl.h97
-rw-r--r--include/asm-s390/hardirq.h38
-rw-r--r--include/asm-s390/idals.h257
-rw-r--r--include/asm-s390/io.h123
-rw-r--r--include/asm-s390/ioctl.h88
-rw-r--r--include/asm-s390/ioctls.h88
-rw-r--r--include/asm-s390/ipc.h1
-rw-r--r--include/asm-s390/ipcbuf.h31
-rw-r--r--include/asm-s390/irq.h30
-rw-r--r--include/asm-s390/kmap_types.h23
-rw-r--r--include/asm-s390/linkage.h6
-rw-r--r--include/asm-s390/local.h59
-rw-r--r--include/asm-s390/lowcore.h351
-rw-r--r--include/asm-s390/mathemu.h29
-rw-r--r--include/asm-s390/mman.h51
-rw-r--r--include/asm-s390/mmu.h7
-rw-r--r--include/asm-s390/mmu_context.h54
-rw-r--r--include/asm-s390/module.h46
-rw-r--r--include/asm-s390/msgbuf.h37
-rw-r--r--include/asm-s390/namei.h21
-rw-r--r--include/asm-s390/page.h208
-rw-r--r--include/asm-s390/param.h30
-rw-r--r--include/asm-s390/pci.h10
-rw-r--r--include/asm-s390/percpu.h70
-rw-r--r--include/asm-s390/pgalloc.h168
-rw-r--r--include/asm-s390/pgtable.h813
-rw-r--r--include/asm-s390/poll.h34
-rw-r--r--include/asm-s390/posix_types.h99
-rw-r--r--include/asm-s390/processor.h355
-rw-r--r--include/asm-s390/ptrace.h475
-rw-r--r--include/asm-s390/qdio.h401
-rw-r--r--include/asm-s390/qeth.h78
-rw-r--r--include/asm-s390/resource.h15
-rw-r--r--include/asm-s390/rwsem.h355
-rw-r--r--include/asm-s390/s390_ext.h34
-rw-r--r--include/asm-s390/scatterlist.h16
-rw-r--r--include/asm-s390/sections.h6
-rw-r--r--include/asm-s390/segment.h4
-rw-r--r--include/asm-s390/semaphore.h110
-rw-r--r--include/asm-s390/sembuf.h29
-rw-r--r--include/asm-s390/setup.h82
-rw-r--r--include/asm-s390/sfp-machine.h139
-rw-r--r--include/asm-s390/shmbuf.h48
-rw-r--r--include/asm-s390/shmparam.h13
-rw-r--r--include/asm-s390/sigcontext.h69
-rw-r--r--include/asm-s390/siginfo.h24
-rw-r--r--include/asm-s390/signal.h197
-rw-r--r--include/asm-s390/sigp.h131
-rw-r--r--include/asm-s390/smp.h108
-rw-r--r--include/asm-s390/socket.h58
-rw-r--r--include/asm-s390/sockios.h20
-rw-r--r--include/asm-s390/spinlock.h251
-rw-r--r--include/asm-s390/stat.h105
-rw-r--r--include/asm-s390/statfs.h71
-rw-r--r--include/asm-s390/string.h137
-rw-r--r--include/asm-s390/suspend.h5
-rw-r--r--include/asm-s390/system.h477
-rw-r--r--include/asm-s390/tape390.h39
-rw-r--r--include/asm-s390/termbits.h181
-rw-r--r--include/asm-s390/termios.h114
-rw-r--r--include/asm-s390/thread_info.h120
-rw-r--r--include/asm-s390/timer.h46
-rw-r--r--include/asm-s390/timex.h34
-rw-r--r--include/asm-s390/tlb.h20
-rw-r--r--include/asm-s390/tlbflush.h153
-rw-r--r--include/asm-s390/todclk.h23
-rw-r--r--include/asm-s390/topology.h6
-rw-r--r--include/asm-s390/types.h101
-rw-r--r--include/asm-s390/uaccess.h436
-rw-r--r--include/asm-s390/ucontext.h20
-rw-r--r--include/asm-s390/unaligned.h24
-rw-r--r--include/asm-s390/unistd.h605
-rw-r--r--include/asm-s390/user.h77
-rw-r--r--include/asm-s390/vtoc.h372
-rw-r--r--include/asm-s390/xor.h1
102 files changed, 12928 insertions, 0 deletions
diff --git a/include/asm-s390/a.out.h b/include/asm-s390/a.out.h
new file mode 100644
index 00000000000..72adee6ef33
--- /dev/null
+++ b/include/asm-s390/a.out.h
@@ -0,0 +1,38 @@
+/*
+ * include/asm-s390/a.out.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *
+ * Derived from "include/asm-i386/a.out.h"
+ * Copyright (C) 1992, Linus Torvalds
+ *
+ * I don't think we'll ever need a.out ...
+ */
+
+#ifndef __S390_A_OUT_H__
+#define __S390_A_OUT_H__
+
+struct exec
+{
+ unsigned long a_info; /* Use macros N_MAGIC, etc for access */
+ unsigned a_text; /* length of text, in bytes */
+ unsigned a_data; /* length of data, in bytes */
+ unsigned a_bss; /* length of uninitialized data area for file, in bytes */
+ unsigned a_syms; /* length of symbol table data in file, in bytes */
+ unsigned a_entry; /* start address */
+ unsigned a_trsize; /* length of relocation info for text, in bytes */
+ unsigned a_drsize; /* length of relocation info for data, in bytes */
+};
+
+#define N_TRSIZE(a) ((a).a_trsize)
+#define N_DRSIZE(a) ((a).a_drsize)
+#define N_SYMSIZE(a) ((a).a_syms)
+
+#ifdef __KERNEL__
+
+#define STACK_TOP TASK_SIZE
+
+#endif
+
+#endif /* __A_OUT_GNU_H__ */
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h
new file mode 100644
index 00000000000..d5a05cf4716
--- /dev/null
+++ b/include/asm-s390/atomic.h
@@ -0,0 +1,207 @@
+#ifndef __ARCH_S390_ATOMIC__
+#define __ARCH_S390_ATOMIC__
+
+/*
+ * include/asm-s390/atomic.h
+ *
+ * S390 version
+ * Copyright (C) 1999-2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ * Denis Joseph Barrow,
+ * Arnd Bergmann (arndb@de.ibm.com)
+ *
+ * Derived from "include/asm-i386/bitops.h"
+ * Copyright (C) 1992, Linus Torvalds
+ *
+ */
+
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc..
+ * S390 uses 'Compare And Swap' for atomicity in SMP enviroment
+ */
+
+typedef struct {
+ volatile int counter;
+} __attribute__ ((aligned (4))) atomic_t;
+#define ATOMIC_INIT(i) { (i) }
+
+#ifdef __KERNEL__
+
+#define __CS_LOOP(ptr, op_val, op_string) ({ \
+ typeof(ptr->counter) old_val, new_val; \
+ __asm__ __volatile__(" l %0,0(%3)\n" \
+ "0: lr %1,%0\n" \
+ op_string " %1,%4\n" \
+ " cs %0,%1,0(%3)\n" \
+ " jl 0b" \
+ : "=&d" (old_val), "=&d" (new_val), \
+ "=m" (((atomic_t *)(ptr))->counter) \
+ : "a" (ptr), "d" (op_val), \
+ "m" (((atomic_t *)(ptr))->counter) \
+ : "cc", "memory" ); \
+ new_val; \
+})
+#define atomic_read(v) ((v)->counter)
+#define atomic_set(v,i) (((v)->counter) = (i))
+
+static __inline__ void atomic_add(int i, atomic_t * v)
+{
+ __CS_LOOP(v, i, "ar");
+}
+static __inline__ int atomic_add_return(int i, atomic_t * v)
+{
+ return __CS_LOOP(v, i, "ar");
+}
+static __inline__ int atomic_add_negative(int i, atomic_t * v)
+{
+ return __CS_LOOP(v, i, "ar") < 0;
+}
+static __inline__ void atomic_sub(int i, atomic_t * v)
+{
+ __CS_LOOP(v, i, "sr");
+}
+static __inline__ int atomic_sub_return(int i, atomic_t * v)
+{
+ return __CS_LOOP(v, i, "sr");
+}
+static __inline__ void atomic_inc(volatile atomic_t * v)
+{
+ __CS_LOOP(v, 1, "ar");
+}
+static __inline__ int atomic_inc_return(volatile atomic_t * v)
+{
+ return __CS_LOOP(v, 1, "ar");
+}
+
+static __inline__ int atomic_inc_and_test(volatile atomic_t * v)
+{
+ return __CS_LOOP(v, 1, "ar") == 0;
+}
+static __inline__ void atomic_dec(volatile atomic_t * v)
+{
+ __CS_LOOP(v, 1, "sr");
+}
+static __inline__ int atomic_dec_return(volatile atomic_t * v)
+{
+ return __CS_LOOP(v, 1, "sr");
+}
+static __inline__ int atomic_dec_and_test(volatile atomic_t * v)
+{
+ return __CS_LOOP(v, 1, "sr") == 0;
+}
+static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v)
+{
+ __CS_LOOP(v, ~mask, "nr");
+}
+static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
+{
+ __CS_LOOP(v, mask, "or");
+}
+#undef __CS_LOOP
+
+#ifdef __s390x__
+typedef struct {
+ volatile long long counter;
+} __attribute__ ((aligned (8))) atomic64_t;
+#define ATOMIC64_INIT(i) { (i) }
+
+#define __CSG_LOOP(ptr, op_val, op_string) ({ \
+ typeof(ptr->counter) old_val, new_val; \
+ __asm__ __volatile__(" lg %0,0(%3)\n" \
+ "0: lgr %1,%0\n" \
+ op_string " %1,%4\n" \
+ " csg %0,%1,0(%3)\n" \
+ " jl 0b" \
+ : "=&d" (old_val), "=&d" (new_val), \
+ "=m" (((atomic_t *)(ptr))->counter) \
+ : "a" (ptr), "d" (op_val), \
+ "m" (((atomic_t *)(ptr))->counter) \
+ : "cc", "memory" ); \
+ new_val; \
+})
+#define atomic64_read(v) ((v)->counter)
+#define atomic64_set(v,i) (((v)->counter) = (i))
+
+static __inline__ void atomic64_add(int i, atomic64_t * v)
+{
+ __CSG_LOOP(v, i, "agr");
+}
+static __inline__ long long atomic64_add_return(int i, atomic64_t * v)
+{
+ return __CSG_LOOP(v, i, "agr");
+}
+static __inline__ long long atomic64_add_negative(int i, atomic64_t * v)
+{
+ return __CSG_LOOP(v, i, "agr") < 0;
+}
+static __inline__ void atomic64_sub(int i, atomic64_t * v)
+{
+ __CSG_LOOP(v, i, "sgr");
+}
+static __inline__ void atomic64_inc(volatile atomic64_t * v)
+{
+ __CSG_LOOP(v, 1, "agr");
+}
+static __inline__ long long atomic64_inc_return(volatile atomic64_t * v)
+{
+ return __CSG_LOOP(v, 1, "agr");
+}
+static __inline__ long long atomic64_inc_and_test(volatile atomic64_t * v)
+{
+ return __CSG_LOOP(v, 1, "agr") == 0;
+}
+static __inline__ void atomic64_dec(volatile atomic64_t * v)
+{
+ __CSG_LOOP(v, 1, "sgr");
+}
+static __inline__ long long atomic64_dec_return(volatile atomic64_t * v)
+{
+ return __CSG_LOOP(v, 1, "sgr");
+}
+static __inline__ long long atomic64_dec_and_test(volatile atomic64_t * v)
+{
+ return __CSG_LOOP(v, 1, "sgr") == 0;
+}
+static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v)
+{
+ __CSG_LOOP(v, ~mask, "ngr");
+}
+static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
+{
+ __CSG_LOOP(v, mask, "ogr");
+}
+
+#undef __CSG_LOOP
+#endif
+
+/*
+ returns 0 if expected_oldval==value in *v ( swap was successful )
+ returns 1 if unsuccessful.
+
+ This is non-portable, use bitops or spinlocks instead!
+*/
+static __inline__ int
+atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
+{
+ int retval;
+
+ __asm__ __volatile__(
+ " lr %0,%3\n"
+ " cs %0,%4,0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ "0:"
+ : "=&d" (retval), "=m" (v->counter)
+ : "a" (v), "d" (expected_oldval) , "d" (new_val),
+ "m" (v->counter) : "cc", "memory" );
+ return retval;
+}
+
+#define smp_mb__before_atomic_dec() smp_mb()
+#define smp_mb__after_atomic_dec() smp_mb()
+#define smp_mb__before_atomic_inc() smp_mb()
+#define smp_mb__after_atomic_inc() smp_mb()
+
+#endif /* __KERNEL__ */
+#endif /* __ARCH_S390_ATOMIC__ */
diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h
new file mode 100644
index 00000000000..16bb08499c7
--- /dev/null
+++ b/include/asm-s390/bitops.h
@@ -0,0 +1,1188 @@
+#ifndef _S390_BITOPS_H
+#define _S390_BITOPS_H
+
+/*
+ * include/asm-s390/bitops.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Derived from "include/asm-i386/bitops.h"
+ * Copyright (C) 1992, Linus Torvalds
+ *
+ */
+#include <linux/config.h>
+#include <linux/compiler.h>
+
+/*
+ * 32 bit bitops format:
+ * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
+ * bit 32 is the LSB of *(addr+4). That combined with the
+ * big endian byte order on S390 give the following bit
+ * order in memory:
+ * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
+ * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
+ * after that follows the next long with bit numbers
+ * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
+ * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
+ * The reason for this bit ordering is the fact that
+ * in the architecture independent code bits operations
+ * of the form "flags |= (1 << bitnr)" are used INTERMIXED
+ * with operation of the form "set_bit(bitnr, flags)".
+ *
+ * 64 bit bitops format:
+ * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
+ * bit 64 is the LSB of *(addr+8). That combined with the
+ * big endian byte order on S390 give the following bit
+ * order in memory: