aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-11-15 04:51:40 -0500
committerJeff Garzik <jgarzik@pobox.com>2005-11-15 04:51:40 -0500
commitf055408957750cf759162c364c2a4dfe19765844 (patch)
treeaecc0a13c582d310902e6fa95d8853c627828fcc /include
parent83cbd33aae2c3cd14f80a8abf733033a57aa4923 (diff)
parent4060994c3e337b40e0f6fa8ce2cc178e021baf3d (diff)
Merge branch 'master'
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/atomic.h12
-rw-r--r--include/asm-arm/arch-pxa/akita.h2
-rw-r--r--include/asm-arm/atomic.h42
-rw-r--r--include/asm-arm26/atomic.h29
-rw-r--r--include/asm-cris/atomic.h27
-rw-r--r--include/asm-frv/atomic.h12
-rw-r--r--include/asm-generic/sections.h1
-rw-r--r--include/asm-h8300/atomic.h27
-rw-r--r--include/asm-i386/atomic.h21
-rw-r--r--include/asm-i386/mach-default/mach_reboot.h2
-rw-r--r--include/asm-i386/processor.h4
-rw-r--r--include/asm-i386/system.h42
-rw-r--r--include/asm-ia64/atomic.h12
-rw-r--r--include/asm-m68k/atomic.h12
-rw-r--r--include/asm-m68k/processor.h14
-rw-r--r--include/asm-m68k/thread_info.h91
-rw-r--r--include/asm-m68knommu/atomic.h12
-rw-r--r--include/asm-mips/atomic.h21
-rw-r--r--include/asm-parisc/atomic.h20
-rw-r--r--include/asm-powerpc/atomic.h27
-rw-r--r--include/asm-powerpc/btext.h (renamed from include/asm-ppc64/btext.h)0
-rw-r--r--include/asm-powerpc/delay.h (renamed from include/asm-ppc64/delay.h)19
-rw-r--r--include/asm-powerpc/eeh.h (renamed from include/asm-ppc64/eeh.h)0
-rw-r--r--include/asm-powerpc/floppy.h (renamed from include/asm-ppc64/floppy.h)25
-rw-r--r--include/asm-powerpc/hvconsole.h (renamed from include/asm-ppc64/hvconsole.h)0
-rw-r--r--include/asm-powerpc/hvcserver.h (renamed from include/asm-ppc64/hvcserver.h)0
-rw-r--r--include/asm-powerpc/kexec.h1
-rw-r--r--include/asm-powerpc/machdep.h4
-rw-r--r--include/asm-powerpc/nvram.h (renamed from include/asm-ppc64/nvram.h)17
-rw-r--r--include/asm-powerpc/page.h179
-rw-r--r--include/asm-powerpc/page_32.h40
-rw-r--r--include/asm-powerpc/page_64.h174
-rw-r--r--include/asm-powerpc/serial.h (renamed from include/asm-ppc64/serial.h)19
-rw-r--r--include/asm-powerpc/vdso_datapage.h2
-rw-r--r--include/asm-ppc/immap_85xx.h2
-rw-r--r--include/asm-ppc/ipic.h2
-rw-r--r--include/asm-ppc/mpc83xx.h2
-rw-r--r--include/asm-ppc/mpc85xx.h2
-rw-r--r--include/asm-ppc/nvram.h73
-rw-r--r--include/asm-ppc/ppc_sys.h2
-rw-r--r--include/asm-ppc64/page.h328
-rw-r--r--include/asm-ppc64/prom.h220
-rw-r--r--include/asm-ppc64/system.h310
-rw-r--r--include/asm-s390/atomic.h12
-rw-r--r--include/asm-sh/atomic.h29
-rw-r--r--include/asm-sh64/atomic.h29
-rw-r--r--include/asm-sparc/atomic.h4
-rw-r--r--include/asm-sparc64/atomic.h12
-rw-r--r--include/asm-v850/atomic.h30
-rw-r--r--include/asm-x86_64/apic.h2
-rw-r--r--include/asm-x86_64/atomic.h21
-rw-r--r--include/asm-x86_64/cache.h2
-rw-r--r--include/asm-x86_64/desc.h16
-rw-r--r--include/asm-x86_64/dma.h11
-rw-r--r--include/asm-x86_64/hpet.h35
-rw-r--r--include/asm-x86_64/hw_irq.h2
-rw-r--r--include/asm-x86_64/ia32.h5
-rw-r--r--include/asm-x86_64/mce.h10
-rw-r--r--include/asm-x86_64/mmzone.h9
-rw-r--r--include/asm-x86_64/mpspec.h7
-rw-r--r--include/asm-x86_64/msr.h2
-rw-r--r--include/asm-x86_64/numa.h2
-rw-r--r--include/asm-x86_64/page.h2
-rw-r--r--include/asm-x86_64/pda.h1
-rw-r--r--include/asm-x86_64/pgtable.h5
-rw-r--r--include/asm-x86_64/processor.h4
-rw-r--r--include/asm-x86_64/proto.h4
-rw-r--r--include/asm-x86_64/rwsem.h283
-rw-r--r--include/asm-x86_64/smp.h3
-rw-r--r--include/asm-x86_64/spinlock.h12
-rw-r--r--include/asm-x86_64/topology.h2
-rw-r--r--include/asm-x86_64/unistd.h3
-rw-r--r--include/asm-xtensa/atomic.h20
-rw-r--r--include/linux/acct.h2
-rw-r--r--include/linux/aio.h13
-rw-r--r--include/linux/bitops.h10
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/cm4000_cs.h66
-rw-r--r--include/linux/compat_ioctl.h8
-rw-r--r--include/linux/file.h10
-rw-r--r--include/linux/font.h2
-rw-r--r--include/linux/fsl_devices.h2
-rw-r--r--include/linux/genhd.h4
-rw-r--r--include/linux/gfp.h16
-rw-r--r--include/linux/hardirq.h2
-rw-r--r--include/linux/hugetlb.h4
-rw-r--r--include/linux/i2c-id.h1
-rw-r--r--include/linux/init_task.h1
-rw-r--r--include/linux/interrupt.h1
-rw-r--r--include/linux/mm.h10
-rw-r--r--include/linux/mmzone.h22
-rw-r--r--include/linux/netfilter/nfnetlink.h6
-rw-r--r--include/linux/pagemap.h4
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/percpu.h2
-rw-r--r--include/linux/pm.h49
-rw-r--r--include/linux/pm_legacy.h56
-rw-r--r--include/linux/preempt.h1
-rw-r--r--include/linux/sched.h32
-rw-r--r--include/linux/smp_lock.h3
-rw-r--r--include/linux/thread_info.h47
-rw-r--r--include/linux/time.h2
-rw-r--r--include/linux/usb.h6
-rw-r--r--include/linux/videodev2.h1
-rw-r--r--include/media/ir-common.h1
-rw-r--r--include/media/ir-kbd-i2c.h2
-rw-r--r--include/media/tuner.h1
-rw-r--r--include/media/v4l2-common.h110
108 files changed, 1402 insertions, 1531 deletions
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h
index 20ac3d95ecd..36505bb4e8c 100644
--- a/include/asm-alpha/atomic.h
+++ b/include/asm-alpha/atomic.h
@@ -177,6 +177,18 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
return result;
}
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
diff --git a/include/asm-arm/arch-pxa/akita.h b/include/asm-arm/arch-pxa/akita.h
index 4a1fbcfccc3..5d8cc1d9cb1 100644
--- a/include/asm-arm/arch-pxa/akita.h
+++ b/include/asm-arm/arch-pxa/akita.h
@@ -25,6 +25,8 @@
/* Default Values */
#define AKITA_IOEXP_IO_OUT (AKITA_IOEXP_IR_ON | AKITA_IOEXP_AKIN_PULLUP)
+extern struct platform_device akitaioexp_device;
+
void akita_set_ioexp(struct device *dev, unsigned char bitmask);
void akita_reset_ioexp(struct device *dev, unsigned char bitmask);
diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h
index 2885972b085..75b80271972 100644
--- a/include/asm-arm/atomic.h
+++ b/include/asm-arm/atomic.h
@@ -80,6 +80,23 @@ static inline int atomic_sub_return(int i, atomic_t *v)
return result;
}
+static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+{
+ u32 oldval, res;
+
+ do {
+ __asm__ __volatile__("@ atomic_cmpxchg\n"
+ "ldrex %1, [%2]\n"
+ "teq %1, %3\n"
+ "strexeq %0, %4, [%2]\n"
+ : "=&r" (res), "=&r" (oldval)
+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
+ : "cc");
+ } while (res);
+
+ return oldval;
+}
+
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{
unsigned long tmp, tmp2;
@@ -131,6 +148,20 @@ static inline int atomic_sub_return(int i, atomic_t *v)
return val;
}
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ int ret;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ ret = v->counter;
+ if (likely(ret == old))
+ v->counter = new;
+ local_irq_restore(flags);
+
+ return ret;
+}
+
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{
unsigned long flags;
@@ -142,6 +173,17 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
#endif /* __LINUX_ARM_ARCH__ */
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int c, old;
+
+ c = atomic_read(v);
+ while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
+ c = old;
+ return c != u;
+}
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
#define atomic_add(i, v) (void) atomic_add_return(i, v)
#define atomic_inc(v) (void) atomic_add_return(1, v)
#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
diff --git a/include/asm-arm26/atomic.h b/include/asm-arm26/atomic.h
index 4a88235c0e7..a47cadc5968 100644
--- a/include/asm-arm26/atomic.h
+++ b/include/asm-arm26/atomic.h
@@ -62,6 +62,35 @@ static inline int atomic_sub_return(int i, atomic_t *v)
return val;
}
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ int ret;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ ret = v->counter;
+ if (likely(ret == old))
+ v->counter = new;
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int ret;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ ret = v->counter;
+ if (ret != u)
+ v->counter += a;
+ local_irq_restore(flags);
+
+ return ret != u;
+}
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{
unsigned long flags;
diff --git a/include/asm-cris/atomic.h b/include/asm-cris/atomic.h
index 8c2e7830452..683b05a57d8 100644
--- a/include/asm-cris/atomic.h
+++ b/include/asm-cris/atomic.h
@@ -123,6 +123,33 @@ static inline int atomic_inc_and_test(volatile atomic_t *v)
return retval;
}
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ int ret;
+ unsigned long flags;
+
+ cris_atomic_save(v, flags);
+ ret = v->counter;
+ if (likely(ret == old))
+ v->counter = new;
+ cris_atomic_restore(v, flags);
+ return ret;
+}
+
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int ret;
+ unsigned long flags;
+
+ cris_atomic_save(v, flags);
+ ret = v->counter;
+ if (ret != u)
+ v->counter += a;
+ cris_atomic_restore(v, flags);
+ return ret != u;
+}
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
diff --git a/include/asm-frv/atomic.h b/include/asm-frv/atomic.h
index e7596846342..f6539ff569c 100644
--- a/include/asm-frv/atomic.h
+++ b/include/asm-frv/atomic.h
@@ -414,4 +414,16 @@ extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
#endif
+#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
+
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
#endif /* _ASM_ATOMIC_H */
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 886dbd11689..0b49f9e070f 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -13,5 +13,6 @@ extern char _eextratext[] __attribute__((weak));
extern char _end[];
extern char __per_cpu_start[], __per_cpu_end[];
extern char __kprobes_text_start[], __kprobes_text_end[];
+extern char __initdata_begin[], __initdata_end[];
#endif /* _ASM_GENERIC_SECTIONS_H_ */
diff --git a/include/asm-h8300/atomic.h b/include/asm-h8300/atomic.h
index 7230f650799..f23d86819ea 100644
--- a/include/asm-h8300/atomic.h
+++ b/include/asm-h8300/atomic.h
@@ -82,6 +82,33 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
return ret == 0;
}
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ int ret;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ ret = v->counter;
+ if (likely(ret == old))
+ v->counter = new;
+ local_irq_restore(flags);
+ return ret;
+}
+
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int ret;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ ret = v->counter;
+ if (ret != u)
+ v->counter += a;
+ local_irq_restore(flags);
+ return ret != u;
+}
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v)
{
__asm__ __volatile__("stc ccr,r1l\n\t"
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index 509720be772..c68557aa04b 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -215,6 +215,27 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
return atomic_add_return(-i,v);
}
+#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
+
+/**
+ * atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
#define atomic_inc_return(v) (atomic_add_return(1,v))
#define atomic_dec_return(v) (atomic_sub_return(1,v))
diff --git a/include/asm-i386/mach-default/mach_reboot.h b/include/asm-i386/mach-default/mach_reboot.h
index 06ae4d81ba6..a955e57ad01 100644
--- a/include/asm-i386/mach-default/mach_reboot.h
+++ b/include/asm-i386/mach-default/mach_reboot.h
@@ -19,7 +19,7 @@ static inline void kb_wait(void)
static inline void mach_reboot(void)
{
int i;
- for (i = 0; i < 100; i++) {
+ for (i = 0; i < 10; i++) {
kb_wait();
udelay(50);
outb(0x60, 0x64); /* write Controller Command Byte */
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 8c02b031870..5c96cf6dcb3 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -65,7 +65,9 @@ struct cpuinfo_x86 {
int f00f_bug;
int coma_bug;
unsigned long loops_per_jiffy;
- unsigned char x86_num_cores;
+ unsigned char x86_max_cores; /* cpuid returned max cores value */
+ unsigned char booted_cores; /* number of cores as seen by OS */
+ unsigned char apicid;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
#define X86_VENDOR_INTEL 0
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 97d52ac49e4..772f85da120 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -263,6 +263,10 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
#ifdef CONFIG_X86_CMPXCHG
#define __HAVE_ARCH_CMPXCHG 1
+#define cmpxchg(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
+#endif
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, int size)
@@ -291,10 +295,42 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
return old;
}
-#define cmpxchg(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
- (unsigned long)(n),sizeof(*(ptr))))
+#ifndef CONFIG_X86_CMPXCHG
+/*
+ * Building a kernel capable running on 80386. It may be necessary to
+ * simulate the cmpxchg on the 80386 CPU. For that purpose we define
+ * a function for each of the sizes we support.
+ */
+extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
+extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
+extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
+
+static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ switch (size) {
+ case 1:
+ return cmpxchg_386_u8(ptr, old, new);
+ case 2:
+ return cmpxchg_386_u16(ptr, old, new);
+ case 4:
+ return cmpxchg_386_u32(ptr, old, new);
+ }
+ return old;
+}
+
+#define cmpxchg(ptr,o,n) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ if (likely(boot_cpu_data.x86 > 3)) \
+ __ret = __cmpxchg((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))); \
+ else \
+ __ret = cmpxchg_386((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))); \
+ __ret; \
+})
#endif
#ifdef CONFIG_X86_CMPXCHG64
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h
index 874a6f890e7..2fbebf85c31 100644
--- a/include/asm-ia64/atomic.h
+++ b/include/asm-ia64/atomic.h
@@ -88,6 +88,18 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
return new;
}
+#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
+
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
#define atomic_add_return(i,v) \
({ \
int __ia64_aar_i = (i); \
diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h
index 38f3043e7fe..e3c962eeabf 100644
--- a/include/asm-m68k/atomic.h
+++ b/include/asm-m68k/atomic.h
@@ -139,6 +139,18 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
__asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
}
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
diff --git a/include/asm-m68k/processor.h b/include/asm-m68k/processor.h
index df1575db32a..7982285e84e 100644
--- a/include/asm-m68k/processor.h
+++ b/include/asm-m68k/processor.h
@@ -14,6 +14,7 @@
#define current_text_addr() ({ __label__ _l; _l: &&_l;})
#include <linux/config.h>
+#include <linux/thread_info.h>
#include <asm/segment.h>
#include <asm/fpu.h>
#include <asm/ptrace.h>
@@ -55,17 +56,6 @@ static inline void wrusp(unsigned long usp)
#endif
#define TASK_UNMAPPED_ALIGN(addr, off) PAGE_ALIGN(addr)
-struct task_work {
- unsigned char sigpending;
- unsigned char notify_resume; /* request for notification on
- userspace execution resumption */
- char need_resched;
- unsigned char delayed_trace; /* single step a syscall */
- unsigned char syscall_trace; /* count of syscall interceptors */
- unsigned char memdie; /* task was selected to be killed */
- unsigned char pad[2];
-};
-
struct thread_struct {
unsigned long ksp; /* kernel stack pointer */
unsigned long usp; /* user stack pointer */
@@ -78,7 +68,7 @@ struct thread_struct {
unsigned long fp[8*3];
unsigned long fpcntl[3]; /* fp control regs */
unsigned char fpstate[FPSTATESIZE]; /* floating point state */
- struct task_work work;
+ struct thread_info info;
};
#define INIT_THREAD { \
diff --git a/include/asm-m68k/thread_info.h b/include/asm-m68k/thread_info.h
index 2aed24f6fd2..9532ca3c45c 100644
--- a/include/asm-m68k/thread_info.h
+++ b/include/asm-m68k/thread_info.h
@@ -2,17 +2,15 @@
#define _ASM_M68K_THREAD_INFO_H
#include <asm/types.h>
-#include <asm/processor.h>
#include <asm/page.h>
struct thread_info {
struct task_struct *task; /* main task structure */
+ unsigned long flags;
struct exec_domain *exec_domain; /* execution domain */
int preempt_count; /* 0 => preemptable, <0 => BUG */
__u32 cpu; /* should always be 0 on m68k */
struct restart_block restart_block;
-
- __u8 supervisor_stack[0];
};
#define PREEMPT_ACTIVE 0x4000000
@@ -35,84 +33,29 @@ struct thread_info {
#define free_thread_info(ti) free_pages((unsigned long)(ti),1)
#endif /* PAGE_SHIFT == 13 */
-//#define init_thread_info (init_task.thread.info)
+#define init_thread_info (init_task.thread.info)
#define init_stack (init_thread_union.stack)
-#define current_thread_info() (current->thread_info)
-
+#define task_thread_info(tsk) (&(tsk)->thread.info)
+#define current_thread_info() task_thread_info(current)
#define __HAVE_THREAD_FUNCTIONS
-#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
-#define TIF_DELAYED_TRACE 1 /* single step a syscall */
-#define TIF_NOTIFY_RESUME 2 /* resumption notification requested */
-#define TIF_SIGPENDING 3 /* signal pending */
-#define TIF_NEED_RESCHED 4 /* rescheduling necessary */
-#define TIF_MEMDIE 5
-
-extern int thread_flag_fixme(void);
-
-/*
- * flag set/clear/test wrappers
- * - pass TIF_xxxx constants to these functions
- */
-
-#define __set_tsk_thread_flag(tsk, flag, val) ({ \
- switch (flag) { \
- case TIF_SIGPENDING: \
- tsk->thread.work.sigpending = val; \
- break; \
- case TIF_NEED_RESCHED: \
- tsk->thread.work.need_resched = val; \
- break; \
- case TIF_SYSCALL_TRACE: \
- tsk->thread.work.syscall_trace = val; \
- break; \
- case TIF_MEMDIE: \
- tsk->thread.work.memdie = val; \
- break; \
- default: \
- thread_flag_fixme(); \
- } \
+#define setup_thread_stack(p, org) ({ \
+ *(struct task_struct **)(p)->thread_info = (p); \
+ task_thread_info(p)->task = (p); \
})
-#define __get_tsk_thread_flag(tsk, flag) ({ \
- int ___res; \
- switch (flag) { \
- case TIF_SIGPENDING: \
- ___res = tsk->thread.work.sigpending; \
- break; \
- case TIF_NEED_RESCHED: \
- ___res = tsk->thread.work.need_resched; \
- break; \
- case TIF_SYSCALL_TRACE: \
- ___res = tsk->thread.work.syscall_trace;\
- break; \
- case TIF_MEMDIE: \
- ___res = tsk->thread.work.memdie;\
- break; \
- default: \
- ___res = thread_flag_fixme(); \
- } \
- ___res; \
-})
-
-#define __get_set_tsk_thread_flag(tsk, flag, val) ({ \
- int __res = __get_tsk_thread_flag(tsk, flag); \
- __set_tsk_thread_flag(tsk, flag, val); \
- __res; \
-})
+#define end_of_stack(p) ((unsigned long *)(p)->thread_info + 1)
-#define set_tsk_thread_flag(tsk, flag) __set_tsk_thread_flag(tsk, flag, ~0)
-#define clear_tsk_thread_flag(tsk, flag) __set_tsk_thread_flag(tsk, flag, 0)
-#define test_and_set_tsk_thread_flag(tsk, flag) __get_set_tsk_thread_flag(tsk, flag, ~0)
-#define test_tsk_thread_flag(tsk, flag) __get_tsk_thread_flag(tsk, flag)
-
-#define set_thread_flag(flag) set_tsk_thread_flag(current, flag)
-#define clear_thread_flag(flag) clear_tsk_thread_flag(current, flag)
-#define test_thread_flag(flag) test_tsk_thread_flag(current, flag)
-
-#define set_need_resched() set_thread_flag(TIF_NEED_RESCHED)
-#define clear_need_resched() clear_thread_flag(TIF_NEED_RESCHED)
+/* entry.S relies on these definitions!
+ * bits 0-7 are tested at every exception exit
+ * bits 8-15 are also tested at syscall exit
+ */
+#define TIF_SIGPENDING 6 /* signal pending */
+#define TIF_NEED_RESCHED 7 /* rescheduling necessary */
+#define TIF_DELAYED_TRACE 14 /* single step a syscall */
+#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
+#define TIF_MEMDIE 16
#endif /* _ASM_M68K_THREAD_INFO_H */
diff --git a/include/asm-m68knommu/atomic.h b/include/asm-m68knommu/atomic.h
index a83631ed8c8..3c1cc153c41 100644
--- a/include/asm-m68knommu/atomic.h
+++ b/include/asm-m68knommu/atomic.h
@@ -128,6 +128,18 @@ static inline int atomic_sub_return(int i, atomic_t * v)
return temp;
}
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h
index 6202eb8a14b..2c87b41e69b 100644
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -287,6 +287,27 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
return result;
}
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+
+/**
+ * atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))
diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h
index 048a2c7fd0c..983e9a2b604 100644
--- a/include/asm-parisc/atomic.h
+++ b/include/asm-parisc/atomic.h
@@ -164,6 +164,26 @@ static __inline__ int atomic_read(const atomic_t *v)
}
/* exported interface */
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+
+/**
+ * atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v))))
#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v))))
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h
index 9c0b372a46e..ec4b1446895 100644
--- a/include/asm-powerpc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -164,6 +164,33 @@ static __inline__ int atomic_dec_return(atomic_t *v)
return t;
}
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+
+/**
+ * atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ for (;;) { \
+ if (unlikely(c == (u))) \
+ break; \
+ old = atomic_cmpxchg((v), c, c + (a)); \
+ if (likely(old == c)) \
+ break; \
+ c = old; \
+ } \
+ c != (u); \
+})
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
diff --git a/include/asm-ppc64/btext.h b/include/asm-powerpc/btext.h
index 71cce36bc63..71cce36bc63 100644
--- a/include/asm-ppc64/btext.h
+++ b/include/asm-powerpc/btext.h
diff --git a/include/asm-ppc64/delay.h b/include/asm-powerpc/delay.h
index 05f198cf73d..1492aa9ab71 100644
--- a/include/asm-ppc64/delay.h
+++ b/include/asm-powerpc/delay.h
@@ -1,5 +1,5 @@
-#ifndef _PPC64_DELAY_H
-#define _PPC64_DELAY_H
+#ifndef _ASM_POWERPC_DELAY_H
+#define _ASM_POWERPC_DELAY_H
/*
* Copyright 1996, Paul Mackerras.
@@ -15,10 +15,17 @@
extern unsigned long tb_ticks_per_usec;
-/* define these here to prevent circular dependencies */
+#ifdef CONFIG_PPC64
+/* define these here to prevent circular dependencies */
+/* these instructions control the thread priority on multi-threaded cpus */
#define __HMT_low() asm volatile("or 1,1,1")
#define __HMT_medium() asm volatile("or 2,2,2")
-#define __barrier() asm volatile("":::"memory")
+#else
+#define __HMT_low()
+#define __HMT_medium()
+#endif
+
+#define __barrier() asm volatile("" ::: "memory")
static inline unsigned long __get_tb(void)
{
@@ -32,7 +39,7 @@ static inline void __delay(unsigned long loops)
{
unsigned long start = __get_tb();
- while((__get_tb()-start) < loops)
+ while((__get_tb() - start) < loops)
__HMT_low();
__HMT_medium();
__barrier();
@@ -45,4 +52,4 @@ static inline void udelay(unsigned long usecs)
__delay(loops);
}
-#endif /* _PPC64_DELAY_H */
+#endif /* _ASM_POWERPC_DELAY_H */
diff --git a/include/asm-ppc64/eeh.h b/include/asm-powerpc/eeh.h
index 89f26ab3190..89f26ab3190 100644
--- a/include/asm-ppc64/eeh.h
+++ b/include/asm-powerpc/eeh.h
diff --git a/include/asm-ppc64/floppy.h b/include/asm-powerpc/floppy.h
index 5c497b588e5..64276a3f615 100644
--- a/include/asm-ppc64/floppy.h
+++ b/include/asm-powerpc/floppy.h
@@ -7,22 +7,22 @@
*
* Copyright (C) 1995
*/
-#ifndef __ASM_PPC64_FLOPPY_H
-#define __ASM_PPC64_FLOPPY_H
+#ifndef __ASM_POWERPC_FLOPPY_H
+#define __ASM_POWERPC_FLOPPY_H
#include <linux/config.h>
#include <asm/machdep.h>
-#define fd_inb(port) inb_p(port)
-#define fd_outb(value,port) outb_p(value,port)
+#define fd_inb(port) inb_p(port)
+#define fd_outb(value,port) outb_p(value,port)
#define fd_enable_dma() enable_dma(FLOPPY_DMA)
#define fd_disable_dma() disable_dma(FLOPPY_DMA)
-#define fd_request_dma() request_dma(FLOPPY_DMA,"floppy")
+#define fd_request_dma() request_dma(FLOPPY_DMA, "floppy")
#define fd_free_dma() free_dma(FLOPPY_DMA)
#define fd_clear_dma_ff() clear_dma_ff(FLOPPY_DMA)
-#define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA,mode)
-#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,count)
+#define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA, mode)
+#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA, count)
#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
#define fd_cacheflush(addr,size) /* nothing */
@@ -35,10 +35,10 @@
#include <linux/pci.h>
-#define fd_dma_setup(addr,size,mode,io) ppc64_fd_dma_setup(addr,size,mode,io)
+#define fd_dma_setup(addr,size,mode,io) powerpc_fd_dma_setup(addr,size,mode,io)
-static __inline__ int
-ppc64_fd_dma_setup(char *addr, unsigned long size, int mode, int io)
+static __inline__ int powerpc_fd_dma_setup(char *addr, unsigned long size,
+ int mode, int io)
{
static unsigned long prev_size;
static dma_addr_t bus_addr = 0;
@@ -55,9 +55,8 @@ ppc64_fd_dma_setup(char *addr, unsigned long size, int mode, int io)
bus_addr = 0;
}
- if (!bus_addr) /* need to map it */ {
+ if (!bus_addr) /* need to map it */
bus_addr = pci_map_single(NULL, addr, size, dir);
- }
/* remember this one as prev */
prev_addr = addr;
@@ -103,4 +102,4 @@ static int FDC2 = -1;
#define EXTRA_FLOPPY_PARAMS
-#endif /* __ASM_PPC64_FLOPPY_H */
+#endif /* __ASM_POWERPC_FLOPPY_H */
diff --git a/include/asm-ppc64/hvconsole.h b/include/asm-powerpc/hvconsole.h
index 6da93ce74dc..6da93ce74dc 100644
--- a/include/asm-ppc64/hvconsole.h
+++ b/include/asm-powerpc/hvconsole.h
diff --git a/include/asm-ppc64/hvcserver.h b/include/asm-powerpc/hvcserver.h
index aecba966579..aecba966579 100644
--- a/include/asm-ppc64/hvcserver.h
+++ b/include/asm-powerpc/hvcserver.h
diff --git a/include/asm-powerpc/kexec.h b/include/asm-powerpc/kexec.h
index 062ab9ba68e..c72ffc709ea 100644
--- a/include/asm-powerpc/kexec.h
+++ b/include/asm-powerpc/kexec.h
@@ -40,6 +40,7 @@ extern note_buf_t crash_notes[];
#ifdef __powerpc64__
extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
master to copy new code to 0 */
+extern void __init kexec_setup(void);
#else
struct kimage;
extern void machine_kexec_simple(struct kimage *image);
diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h
index 5670f0cd614..c011abb8b60 100644
--- a/include/asm-powerpc/machdep.h
+++ b/include/asm-powerpc/machdep.h
@@ -93,7 +93,9 @@ struct machdep_calls {
void (*init_IRQ)(void);
int (*get_irq)(struct pt_regs *);
- void (*cpu_irq_down)(int secondary);
+#ifdef CONFIG_KEXEC
+ void (*kexec_cpu_down)(int crash_shutdown, int secondary);
+#endif
/* PCI stuff */
/* Called after scanning the bus, before allocating resources */
diff --git a/include/asm-ppc64/nvram.h b/include/asm-powerpc/nvram.h
index def47d720d3..24bd8c2388e 100644
--- a/include/asm-ppc64/nvram.h
+++ b/include/asm-powerpc/nvram.h
@@ -1,6 +1,5 @@
/*
- * PreP compliant NVRAM access
- * This needs to be updated for PPC64
+ * NVRAM definitions and access functions.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -8,8 +7,8 @@
* 2 of the License, or (at your option) any later version.
*/
-#ifndef _PPC64_NVRAM_H
-#define _PPC64_NVRAM_H
+#ifndef _ASM_POWERPC_NVRAM_H
+#define _ASM_POWERPC_NVRAM_H
#define NVRW_CNT 0x20
#define NVRAM_HEADER_LEN 16 /* sizeof(struct nvram_header) */
@@ -69,7 +68,6 @@ extern int nvram_clear_error_log(void);
extern struct nvram_partition *nvram_find_partition(int sig, const char *name);
extern int pSeries_nvram_init(void);
-extern int pmac_nvram_init(void);
extern int mmio_nvram_init(void);
/* PowerMac specific nvram stuffs */
@@ -88,7 +86,11 @@ extern u8 pmac_xpram_read(int xpaddr);
extern void pmac_xpram_write(int xpaddr, u8 data);
/* Synchronize NVRAM */
-extern int nvram_sync(void);
+extern void nvram_sync(void);
+
+/* Normal access to NVRAM */
+extern unsigned char nvram_read_byte(int i);
+extern void nvram_write_byte(unsigned char c, int i);
/* Some offsets in XPRAM */
#define PMAC_XPRAM_MACHINE_LOC 0xe4
@@ -112,5 +114,6 @@ struct pmac_machine_location {
_IOWR('p', 0x40, int)
#define IOC_NVRAM_GET_OFFSET _IOWR('p', 0x42, int) /* Get NVRAM partition offset */
+#define IOC_NVRAM_SYNC _IO('p', 0x43) /* Sync NVRAM image */
-#endif /* _PPC64_NVRAM_H */
+#endif /* _ASM_POWERPC_NVRAM_H */
diff --git a/include/asm-powerpc/page.h b/include/asm-powerpc/page.h
new file mode 100644
index 00000000000..18c1e5ee81a
--- /dev/null
+++ b/include/asm-powerpc/page.h
@@ -0,0 +1,179 @@
+#ifndef _ASM_POWERPC_PAGE_H
+#define _ASM_POWERPC_PAGE_H
+
+/*
+ * Copyright (C) 2001,2005 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifdef __KERNEL__
+#include <linux/config.h>
+#include <asm/asm-compat.h>
+
+/*
+ * On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software
+ * page size. When using 64K pages however, whether we are really supporting
+ * 64K pages in HW or not is irrelevant to those definitions.
+ */
+#ifdef CONFIG_PPC_64K_PAGES
+#define PAGE_SHIFT 16
+#else
+#define PAGE_SHIFT 12
+#endif
+
+#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
+
+/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
+#define __HAVE_ARCH_GATE_AREA 1
+
+/*
+ * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
+ * assign PAGE_MASK to a larger type it gets extended the way we want
+ * (i.e. with 1s in the high bits)
+ */
+#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
+
+#define PAGE_OFFSET ASM_CONST(CONFIG_KERNEL_START)
+#define KERNELBASE PAGE_OFFSET
+
+#ifdef CONFIG_DISCONTIGMEM
+#define page_to_pfn(page) discontigmem_page_to_pfn(page)
+#define pfn_to_page(pfn) discontigmem_pfn_to_page(pfn)
+#define pfn_valid(pfn) discontigmem_pfn_valid(pfn)
+#endif
+
+#ifdef CONFIG_FLATMEM
+#define pfn_to_page(pfn) (mem_map + (pfn))
+#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
+#define pfn_valid(pfn) ((pfn) < max_mapnr)
+#endif
+
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE))
+#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
+
+/*
+ * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
+ * and needs to be executable. This means the whole heap ends
+ * up being executable.
+ */
+#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#ifdef __powerpc64__
+#include <asm/page_64.h>
+#else
+#include <asm/page_32.h>
+#endif
+
+/* align addr on a size boundary - adjust address up/down if needed */
+#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
+#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
+
+/* align addr on a size boundary - adjust address up if needed */
+#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
+
+#ifndef __ASSEMBLY__
+
+#undef STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+/* These are used to make use of C type-checking. */
+
+/* PTE level */
+typedef struct { pte_basic_t pte; } pte_t;
+#define pte_val(x) ((x).pte)
+#define __pte(x) ((pte_t) { (x) })
+
+/* 64k pages additionally define a bigger "real PTE" type that gathers
+ * the "second half" part of the PTE for pseudo 64k pages
+ */
+#ifdef CONFIG_PPC_64K_PAGES
+typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
+#else
+typedef struct { pte_t pte; } real_pte_t;
+#endif
+
+/* PMD level */
+typedef struct { unsigned long pmd; } pmd_t;
+#define pmd_val(x) ((x).pmd)
+#define __pmd(x) ((pmd_t) { (x) })
+
+/* PUD level exusts only on 4k pages */
+#ifndef CONFIG_PPC_64K_PAGES
+typedef struct { unsigned long pud; } pud_t;
+#define pud_val(x) ((x).pud)
+#define __pud(x) ((pud_t) { (x) })
+#endif
+
+/* PGD level */
+typedef struct { unsigned long pgd; } pgd_t;
+#define pgd_val(x) ((x).pgd)
+#define __pgd(x) ((pgd_t) { (x) })
+
+/* Page protection bits */
+typedef struct { unsigned long pgprot; } pgprot_t;
+#define pgprot_val(x) ((x).pgprot)
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+#else
+
+/*
+ * .. while these make it easier on the compiler
+ */
+
+typedef pte_basic_t pte_t;
+#define pte_val(x) (x)
+#define __pte(x) (x)
+
+#ifdef CONFIG_PPC_64K_PAGES
+typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
+#else
+typedef unsigned long real_pte_t;
+#endif
+
+
+typedef unsigned long pmd_t;
+#define pmd_val(x) (x)
+#define __pmd(x) (x)
+
+#ifndef CONFIG_PPC_64K_PAGES
+typedef unsigned long pud_t;
+#define pud_val(x) (x)
+#define __pud(x) (x)
+#endif
+
+typedef unsigned long pgd_t;
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+
+typedef unsigned long pgprot_t;
+#define __pgd(x) (x)
+#define __pgprot(x) (x)
+
+#endif
+
+struct page;
+extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
+extern void copy_user_page(void *to, void *from, unsigned long vaddr,
+ struct page *p);
+extern int page_is_ram(unsigned long pfn);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_PAGE_H */
diff --git a/include/asm-powerpc/page_32.h b/include/asm-powerpc/page_32.h
new file mode 100644
index 00000000000..7259cfd85da
--- /dev/null
+++ b/include/asm-powerpc/page_32.h
@@ -0,0 +1,40 @@
+#ifndef _ASM_POWERPC_PAGE_32_H
+#define _ASM_POWERPC_PAGE_32_H
+
+#define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32
+
+#define PPC_MEMSTART 0
+
+#ifndef __ASSEMBLY__
+/*
+ * The basic type of a PTE - 64 bits for those CPUs with > 32 bit
+ * physical addressing. For now this just the IBM PPC440.
+ */
+#ifdef CONFIG_PTE_64BIT
+typedef unsigned long long pte_basic_t;
+#define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */
+#define PTE_FMT "%16Lx"
+#else
+typedef unsigned long pte_basic_t;
+#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
+#define PTE_FMT "%.8lx"
+#endif
+
+struct page;
+extern void clear_pages(void *page, int order);
+static inline void clear_page(void *page) { clear_pages(page, 0); }
+extern void copy_page(void *to, void *from);
+
+/* Pure 2^n version of get_order */
+extern __inline__ int get_order(unsigned long size)
+{
+ int lz;
+
+ size = (size-1) >> PAGE_SHIFT;
+ asm ("cntlzw %0,%1" : "=r" (lz) : "r" (size));
+ return 32 - lz;
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_PAGE_32_H */
diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h
new file mode 100644
index 00000000000..c16f106b537
--- /dev/null
+++ b/include/asm-powerpc/page_64.h
@@ -0,0 +1,174 @@
+#ifndef _ASM_POWERPC_PAGE_64_H
+#define _ASM_POWERPC_PAGE_64_H
+
+/*
+ * Copyright (C) 2001 PPC64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * We always define HW_PAGE_SHIFT to 12 as use of 64K pages remains Linux
+ * specific, every notion of page number shared with the firmware, TCEs,
+ * iommu, etc... still uses a page size of 4K.
+ */
+#define HW_PAGE_SHIFT 12
+#define HW_PAGE_SIZE (ASM_CONST(1) << HW_PAGE_SHIFT)
+#define HW_PAGE_MASK (~(HW_PAGE_SIZE-1))
+
+/*
+ * PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and
+ * HW_PAGE_SHIFT, that is 4K pages.
+ */
+#define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT)
+
+#define REGION_SIZE 4UL
+#define REGION_SHIFT 60UL
+#define REGION_MASK (((1UL<<REGION_SIZE)-1UL)<<REGION_SHIFT)
+
+#define VMALLOCBASE ASM_CONST(0xD000000000000000)
+#define VMALLOC_REGION_ID (VMALLOCBASE >> REGION_SHIFT)
+#define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT)
+#define USER_REGION_ID (0UL)
+#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
+
+/* Segment size */
+#define SID_SHIFT 28
+#define SID_MASK 0xfffffffffUL
+#define ESID_MASK 0xfffffffff0000000UL
+#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
+
+#ifndef __ASSEMBLY__
+#include <asm/cache.h>
+
+typedef unsigned long pte_basic_t;
+
+static __inline__ void clear_page(void *addr)
+{
+ unsigned long lines, line_size;
+
+ line_size = ppc64_caches.dline_size;
+ lines = ppc64_caches.dlines_per_page;
+
+ __asm__ __volatile__(
+ "mtctr %1 # clear_page\n\
+1: dcbz 0,%0\n\
+ add %0,%0,%3\n\
+ bdnz+ 1b"
+ : "=r" (addr)
+ : "r" (lines), "0" (addr), "r" (line_size)
+ : "ctr", "memory");
+}
+
+extern void copy_4K_page(void *to, void *from);
+
+#ifdef CONFIG_PPC_64K_PAGES
+static inline void copy_page(void *to, void *from)
+{
+ unsigned int i;
+ for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) {
+ copy_4K_page(to, from);
+ to += 4096;
+ from += 4096;
+ }
+}
+#else /* CONFIG_PPC_64K_PAGES */
+static inline void copy_page(void *to, void *from)
+{
+ copy_4K_page(to, from);
+}
+#endif /* CONFIG_PPC_64K_PAGES */
+
+/* Log 2 of page table size */
+extern u64 ppc64_pft_size;
+
+/* Large pages size */
+extern unsigned int HPAGE_SHIFT;
+#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_HUGETLB_PAGE
+
+#define HTLB_AREA_SHIFT 40
+#define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT)
+#define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT)
+
+#define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \
+ - (1U << GET_ESID(addr))) & 0xffff)
+#define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \
+ - (1U << GET_HTLB_AREA(addr))) & 0xffff)
+
+#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
+#define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
+#define ARCH_HAS_SETCLEAR_HUGE_PTE
+
+#define touches_hugepage_low_range(mm, addr, len) \
+ (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas)
+#define touches_hugepage_high_range(mm, addr, len) \
+ (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas)
+
+#define __within_hugepage_low_range(addr, len, segmask) \
+ ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask))
+#define within_hugepage_low_range(addr, len) \
+ __within_hugepage_low_range((addr), (len), \
+ current->mm->context.low_htlb_areas)
+#define __within_hugepage_high_range(addr, len, zonemask) \
+ ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask))
+#define within_hugepage_high_range(addr, len) \
+ __within_hugepage_high_range((addr), (len), \
+ current->mm->context.high_htlb_areas)
+
+#define is_hugepage_only_range(mm, addr, len) \
+ (touches_hugepage_high_range((mm), (addr), (len)) || \
+ touches_hugepage_low_range((mm), (addr), (len)))
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+
+#define in_hugepage_area(context, addr) \
+ (cpu_has_feature(CPU_FTR_16M_PAGE) && \
+ ( ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) || \
+ ( ((addr) < 0x100000000L) && \
+ ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) ) )
+
+#else /* !CONFIG_HUGETLB_PAGE */
+
+#define in_hugepage_area(mm, addr) 0
+
+#endif /* !CONFIG_HUGETLB_PAGE */
+
+#ifdef MODULE
+#define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
+#else
+#define __page_aligned \
+ __attribute__((__aligned__(PAGE_SIZE), \
+ __section__(".data.page_aligned")))
+#endif
+
+#define VM_DATA_DEFAULT_FLAGS \
+ (test_thread_flag(TIF_32BIT) ? \
+ VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
+
+/*
+ * This is the default if a program doesn't have a PT_GNU_STACK
+ * program header entry. The PPC64 ELF ABI has a non executable stack
+ * stack by default, so in the absense of a PT_GNU_STACK program header
+ * we turn execute permission off.
+ */
+#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#define VM_STACK_DEFAULT_FLAGS \
+ (test_thread_flag(TIF_32BIT) ? \
+ VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
+
+#include <asm-generic/page.h>
+
+#endif /* _ASM_POWERPC_PAGE_64_H */
diff --git a/include/asm-ppc64/serial.h b/include/asm-powerpc/serial.h
index d6bcb79b7d7..b273d630b32 100644
--- a/include/asm-ppc64/serial.h
+++ b/include/asm-powerpc/serial.h
@@ -1,21 +1,16 @@
/*
- * include/asm-ppc64/serial.h
- */
-#ifndef _PPC64_SERIAL_H
-#define _PPC64_SERIAL_H
-
-/*
- * This assumes you have a 1.8432 MHz clock for your UART.
- *
- * It'd be nice if someone built a serial card with a 24.576 MHz
- * clock, since the 16550A is capable of handling a top speed of 1.5
- * megabits/second; but this requires the faster clock.
- *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#ifndef _ASM_POWERPC_SERIAL_H
+#define _ASM_POWERPC_SERIAL_H
+
+/*
+ * Serial ports are not listed here, because they are discovered
+ * through the device tree.
+ */
/* Default baud base if not found in device-tree */
#define BASE_BAUD ( 1843200 / 16 )
diff --git a/include/asm-powerpc/vdso_datapage.h b/include/asm-powerpc/vdso_datapage.h
index fc323b51366..411832d5bbd 100644
--- a/include/asm-powerpc/vdso_datapage.h
+++ b/include/asm-powerpc/vdso_datapage.h
@@ -73,7 +73,7 @@ struct vdso_data {
/* those additional ones don't have to be located anywhere
* special as they were not part of the original systemcfg
*/
- __s64 wtom_clock_sec; /* Wall to monotonic clock */
+ __s32 wtom_clock_sec; /* Wall to monotonic clock */
__s32 wtom_clock_nsec;
__u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */
__u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
diff --git a/include/asm-ppc/immap_85xx.h b/include/asm-ppc/immap_85xx.h
index 50fb5e47094..9383d0c13ff 100644
--- a/include/asm-ppc/immap_85xx.h
+++ b/include/asm-ppc/immap_85xx.h
@@ -3,7 +3,7 @@
*
* MPC85xx Internal Memory Map
*
- * Maintainer: Kumar Gala <kumar.gala@freescale.com>
+ * Maintainer: Kumar Gala <galak@kernel.crashing.org>
*
* Copyright 2004 Freescale Semiconductor, Inc
*
diff --git a/include/asm-ppc/ipic.h b/include/asm-ppc/ipic.h
index 9092b920997..0fe396a2b66 100644
--- a/include/asm-ppc/ipic.h
+++ b/include/asm-ppc/ipic.h
@@ -3,7 +3,7 @@
*
* IPIC external definitions and structure.
*
- * Maintainer: Kumar Gala <kumar.gala@freescale.com>
+ * Maintainer: Kumar Gala <galak@kernel.crashing.org>
*
* Copyright 2005 Freescale Semiconductor, Inc
*
diff --git a/include/asm-ppc/mpc83xx.h b/include/asm-ppc/mpc83xx.h
index ce212201db2..7cdf60fa69b 100644
--- a/include/asm-ppc/mpc83xx.h
+++ b/include/asm-ppc/mpc83xx.h
@@ -3,7 +3,7 @@
*
* MPC83xx definitions
*
- * Maintainer: Kumar Gala <kumar.gala@freescale.com>
+ * Maintainer: Kumar Gala <galak@kernel.crashing.org>
*
* Copyright 2005 Freescale Semiconductor, Inc
*
diff --git a/include/asm-ppc/mpc85xx.h b/include/asm-ppc/mpc85xx.h
index d98db980cd4..9d14baea3d7 100644
--- a/include/asm-ppc/mpc85xx.h
+++ b/include/asm-ppc/mpc85xx.h
@@ -3,7 +3,7 @@
*
* MPC85xx definitions
*
- * Maintainer: Kumar Gala <kumar.gala@freescale.com>
+ * Maintainer: Kumar Gala <galak@kernel.crashing.org>
*
* Copyright 2004 Freescale Semiconductor, Inc
*
diff --git a/include/asm-ppc/nvram.h b/include/asm-ppc/nvram.h
deleted file mode 100644
index 31ef16e3fc4..00000000000
--- a/include/asm-ppc/nvram.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * PreP compliant NVRAM access
- */
-
-#ifdef __KERNEL__
-#ifndef _PPC_NVRAM_H
-#define _PPC_NVRAM_H
-
-#define NVRAM_AS0 0x74
-#define NVRAM_AS1 0x75
-#define NVRAM_DATA 0x77
-
-
-/* RTC Offsets */
-
-#define MOTO_RTC_SECONDS 0x1FF9
-#define MOTO_RTC_MINUTES 0x1FFA
-#define MOTO_RTC_HOURS 0x1FFB
-#define MOTO_RTC_DAY_OF_WEEK 0x1FFC
-#define MOTO_RTC_DAY_OF_MONTH 0x1FFD
-#define MOTO_RTC_MONTH 0x1FFE
-#define MOTO_RTC_YEAR 0x1FFF
-#define MOTO_RTC_CONTROLA 0x1FF8
-#define MOTO_RTC_CONTROLB 0x1FF9
-
-/* PowerMac specific nvram stuffs */
-
-enum {
- pmac_nvram_OF, /* Open Firmware partition */
- pmac_nvram_XPRAM, /* MacOS XPRAM partition */
- pmac_nvram_NR /* MacOS Name Registry partition */
-};
-
-/* Return partition offset in nvram */
-extern int pmac_get_partition(int partition);
-
-/* Direct access to XPRAM on PowerMacs */
-extern u8 pmac_xpram_read(int xpaddr);
-extern void pmac_xpram_write(int xpaddr, u8 data);
-
-/* Synchronize NVRAM */
-extern void nvram_sync(void);
-
-/* Normal access to NVRAM */
-extern unsigned char nvram_read_byte(int i);
-extern void nvram_write_byte(unsigned char c, int i);
-
-/* Some offsets in XPRAM */
-#define PMAC_XPRAM_MACHINE_LOC 0xe4
-#define PMAC_XPRAM_SOUND_VOLUME 0x08
-
-/* Machine location structure in PowerMac XPRAM */
-struct pmac_machine_location {
- unsigned int latitude; /* 2+30 bit Fractional number */
- unsigned int longitude; /* 2+30 bit Fractional number */
- unsigned int delta; /* mix of GMT delta and DLS */
-};
-
-/*
- * /dev/nvram ioctls
- *
- * Note that PMAC_NVRAM_GET_OFFSET is still supported, but is
- * definitely obsolete. Do not use it if you can avoid it
- */
-
-#define OBSOLETE_PMAC_NVRAM_GET_OFFSET \
- _IOWR('p', 0x40, int)
-
-#define IOC_NVRAM_GET_OFFSET _IOWR('p', 0x42, int) /* Get NVRAM partition offset */
-#define IOC_NVRAM_SYNC _IO('p', 0x43) /* Sync NVRAM image */
-
-#endif
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/ppc_sys.h b/include/asm-ppc/ppc_sys.h
index bba5305c29e..83d8c77c124 100644
--- a/include/asm-ppc/ppc_sys.h
+++ b/include/asm-ppc/ppc_sys.h
@@ -3,7 +3,7 @@
*
* PPC system definitions and library functions
*
- * Maintainer: Kumar Gala <kumar.gala@freescale.com>
+ * Maintainer: Kumar Gala <galak@kernel.crashing.org>
*
* Copyright 2005 Freescale Semiconductor, Inc
*
diff --git a/include/asm-ppc64/page.h b/include/asm-ppc64/page.h
deleted file mode 100644
index 3efc3288f7e..00000000000
--- a/include/asm-ppc64/page.h
+++ /dev/null
@@ -1,328 +0,0 @@
-#ifndef _PPC64_PAGE_H
-#define _PPC64_PAGE_H
-
-/*
- * Copyright (C) 2001 PPC64 Team, IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <asm/asm-compat.h>
-
-/*
- * We support either 4k or 64k software page size. When using 64k pages
- * however, wether we are really supporting 64k pages in HW or not is
- * irrelevant to those definitions. We always define HW_PAGE_SHIFT to 12
- * as use of 64k pages remains a linux kernel specific, every notion of
- * page number shared with the firmware, TCEs, iommu, etc... still assumes
- * a page size of 4096.
- */
-#ifdef CONFIG_PPC_64K_PAGES
-#define PAGE_SHIFT 16
-#else
-#define PAGE_SHIFT 12
-#endif
-
-#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
-
-/* HW_PAGE_SHIFT is always 4k pages */
-#define HW_PAGE_SHIFT 12
-#define HW_PAGE_SIZE (ASM_CONST(1) << HW_PAGE_SHIFT)
-#define HW_PAGE_MASK (~(HW_PAGE_SIZE-1))
-
-/* PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and
- * HW_PAGE_SHIFT, that is 4k pages
- */
-#define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT)
-
-/* Segment size */
-#define SID_SHIFT 28
-#define SID_MASK 0xfffffffffUL
-#define ESID_MASK 0xfffffffff0000000UL
-#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
-
-/* Large pages size */
-
-#ifndef __ASSEMBLY__
-extern unsigned int HPAGE_SHIFT;
-#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
-#define HPAGE_MASK (~(HPAGE_SIZE - 1))
-#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-#endif /* __ASSEMBLY__ */
-
-#ifdef CONFIG_HUGETLB_PAGE
-
-
-#define HTLB_AREA_SHIFT 40
-#define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT)
-#define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT)
-
-#define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \
- - (1U << GET_ESID(addr))) & 0xffff)
-#define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \
- - (1U << GET_HTLB_AREA(addr))) & 0xffff)
-
-#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
-#define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
-#define ARCH_HAS_SETCLEAR_HUGE_PTE
-
-#define touches_hugepage_low_range(mm, addr, len) \
- (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas)
-#define touches_hugepage_high_range(mm, addr, len) \
- (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas)
-
-#define __within_hugepage_low_range(addr, len, segmask) \
- ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask))
-#define within_hugepage_low_range(addr, len) \
- __within_hugepage_low_range((addr), (len), \
- current->mm->context.low_htlb_areas)
-#define __within_hugepage_high_range(addr, len, zonemask) \
- ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask))
-#define within_hugepage_high_range(addr, len) \
- __within_hugepage_high_range((addr), (len), \
- current->mm->context.high_htlb_areas)
-
-#define is_hugepage_only_range(mm, addr, len) \
- (touches_hugepage_high_range((mm), (addr), (len)) || \
- touches_hugepage_low_range((mm), (addr), (len)))
-#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-
-#define in_hugepage_area(context, addr) \
- (cpu_has_feature(CPU_FTR_16M_PAGE) && \
- ( ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) || \
- ( ((addr) < 0x100000000L) && \
- ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) ) )
-
-#else /* !CONFIG_HUGETLB_PAGE */
-
-#define in_hugepage_area(mm, addr) 0
-
-#endif /* !CONFIG_HUGETLB_PAGE */
-
-/* align addr on a size boundary - adjust address up/down if needed */
-#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
-#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
-
-/* align addr on a size boundary - adjust address up if needed */
-#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
-
-/* to align the pointer to the (next) page boundary */
-#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
-
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-#include <asm/cache.h>
-
-#undef STRICT_MM_TYPECHECKS
-
-#define REGION_SIZE 4UL
-#define REGION_SHIFT 60UL
-#define REGION_MASK (((1UL<<REGION_SIZE)-1UL)<<REGION_SHIFT)
-
-static __inline__ void clear_page(void *addr)
-{
- unsigned long lines, line_size;
-
- line_size = ppc64_caches.dline_size;
- lines = ppc64_caches.dlines_per_page;
-
- __asm__ __volatile__(
- "mtctr %1 # clear_page\n\
-1: dcbz 0,%0\n\
- add %0,%0,%3\n\
- bdnz+ 1b"
- : "=r" (addr)
- : "r" (lines), "0" (addr), "r" (line_size)
- : "ctr", "memory");
-}
-
-extern void copy_4K_page(void *to, void *from);
-
-#ifdef CONFIG_PPC_64K_PAGES
-static inline void copy_page(void *to, void *from)
-{
- unsigned int i;
- for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) {
- copy_4K_page(to, from);
- to += 4096;
- from += 4096;
- }
-}
-#else /* CONFIG_PPC_64K_PAGES */
-static inline void copy_page(void *to, void *from)
-{
- copy_4K_page(to, from);
-}
-#endif /* CONFIG_PPC_64K_PAGES */
-
-struct page;
-extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
-extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *p);
-
-#ifdef STRICT_MM_TYPECHECKS
-/*
- * These are used to make use of C type-checking.
- * Entries in the pte table are 64b, while entries in the pgd & pmd are 32b.
- */
-
-/* PTE level */
-typedef struct { unsigned long pte; } pte_t;
-#define pte_val(x) ((x).pte)
-#define __pte(x) ((pte_t) { (x) })
-
-/* 64k pages additionally define a bigger "real PTE" type that gathers
- * the "second half" part of the PTE for pseudo 64k pages
- */
-#ifdef CONFIG_PPC_64K_PAGES
-typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
-#else
-typedef struct { pte_t pte; } real_pte_t;
-#endif
-
-/* PMD level */
-typedef struct { unsigned long pmd; } pmd_t;
-#define pmd_val(x) ((x).pmd)
-#define __pmd(x) ((pmd_t) { (x) })
-
-/* PUD level exusts only on 4k pages */
-#ifndef CONFIG_PPC_64K_PAGES
-typedef struct { unsigned long pud; } pud_t;
-#define pud_val(x) ((x).pud)
-#define __pud(x) ((pud_t) { (x) })
-#endif
-
-/* PGD level */
-typedef struct { unsigned long pgd; } pgd_t;
-#define pgd_val(x) ((x).pgd)
-#define __pgd(x) ((pgd_t) { (x) })
-
-/* Page protection bits */
-typedef struct { unsigned long pgprot; } pgprot_t;
-#define pgprot_val(x) ((x).pgprot)
-#define __pgprot(x) ((pgprot_t) { (x) })
-
-#else
-
-/*
- * .. while these make it easier on the compiler
- */
-
-typedef unsigned long pte_t;
-#define pte_val(x) (x)
-#define __pte(x) (x)
-
-#ifdef CONFIG_PPC_64K_PAGES
-typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
-#else
-typedef unsigned long real_pte_t;
-#endif
-
-
-typedef unsigned long pmd_t;
-#define pmd_val(x) (x)
-#define __pmd(x) (x)
-
-#ifndef CONFIG_PPC_64K_PAGES
-typedef unsigned long pud_t;
-#define pud_val(x) (x)
-#define __pud(x) (x)
-#endif
-
-typedef unsigned long pgd_t;
-#define pgd_val(x) (x)
-#define pgprot_val(x) (x)
-
-typedef unsigned long pgprot_t;
-#define __pgd(x) (x)
-#define __pgprot(x) (x)
-
-#endif
-
-#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
-
-extern int page_is_ram(unsigned long pfn);
-
-extern u64 ppc64_pft_size; /* Log 2 of page table size */
-
-/* We do define AT_SYSINFO_EHDR but don't use the gate mecanism */
-#define __HAVE_ARCH_GATE_AREA 1
-
-#endif /* __ASSEMBLY__ */
-
-#ifdef MODULE
-#define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
-#else
-#define __page_aligned \
- __attribute__((__aligned__(PAGE_SIZE), \
- __section__(".data.page_aligned")))
-#endif
-
-
-/* This must match the -Ttext linker address */
-/* Note: tophys & tovirt make assumptions about how */
-/* KERNELBASE is defined for performance reasons. */
-/* When KERNELBASE moves, those macros may have */
-/* to change! */
-#define PAGE_OFFSET ASM_CONST(0xC000000000000000)
-#define KERNELBASE PAGE_OFFSET
-#define VMALLOCBASE ASM_CONST(0xD000000000000000)
-
-#define VMALLOC_REGION_ID (VMALLOCBASE >> REGION_SHIFT)
-#define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT)
-#define USER_REGION_ID (0UL)
-#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
-
-#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE))
-
-#ifdef CONFIG_FLATMEM
-#define pfn_to_page(pfn) (mem_map + (pfn))
-#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
-#define pfn_valid(pfn) ((pfn) < max_mapnr)
-#endif
-
-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
-
-#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-
-/*
- * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
- * and needs to be executable. This means the whole heap ends
- * up being executable.
- */
-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-#define VM_DATA_DEFAULT_FLAGS \
- (test_thread_flag(TIF_32BIT) ? \
- VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
-
-/*
- * This is the default if a program doesn't have a PT_GNU_STACK
- * program header entry. The PPC64 ELF ABI has a non executable stack
- * stack by default, so in the absense of a PT_GNU_STACK program header
- * we turn execute permission off.
- */
-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-#define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-#define VM_STACK_DEFAULT_FLAGS \
- (test_thread_flag(TIF_32BIT) ? \
- VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
-
-#endif /* __KERNEL__ */
-
-#include <asm-generic/page.h>
-
-#endif /* _PPC64_PAGE_H */
diff --git a/include/asm-ppc64/prom.h b/include/asm-ppc64/prom.h
deleted file mode 100644
index ddfe186589f..00000000000
--- a/include/asm-ppc64/prom.h
+++ /dev/null
@@ -1,220 +0,0 @@
-#ifndef _PPC64_PROM_H
-#define _PPC64_PROM_H
-
-/*
- * Definitions for talking to the Open Firmware PROM on
- * Power Macintosh computers.
- *
- * Copyright (C) 1996 Paul Mackerras.
- *
- * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <linux/config.h>
-#include <linux/proc_fs.h>
-#include <asm/atomic.h>
-
-#define PTRRELOC(x) ((typeof(x))((unsigned long)(x) - offset))
-#define PTRUNRELOC(x) ((typeof(x))((unsigned long)(x) + offset))
-#define RELOC(x) (*PTRRELOC(&(x)))
-
-/* Definitions used by the flattened device tree */
-#define OF_DT_HEADER 0xd00dfeed /* marker */
-#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
-#define OF_DT_END_NODE 0x2 /* End node */
-#define OF_DT_PROP 0x3 /* Property: name off, size,
- * content */
-#define OF_DT_NOP 0x4 /* nop */
-#define OF_DT_END 0x9
-
-#define OF_DT_VERSION 0x10
-
-/*
- * This is what gets passed to the kernel by prom_init or kexec
- *
- * The dt struct contains the device tree structure, full pathes and
- * property contents. The dt strings contain a separate block with just
- * the strings for the property names, and is fully page aligned and
- * self contained in a page, so that it can be kept around by the kernel,
- * each property name appears only once in this page (cheap compression)
- *
- * the mem_rsvmap contains a map of reserved ranges of physical memory,
- * passing it here instead of in the device-tree itself greatly simplifies
- * the job of everybody. It's just a list of u64 pairs (base/size) that
- * ends when size is 0
- */
-struct boot_param_header
-{
- u32 magic; /* magic word OF_DT_HEADER */
- u32 totalsize; /* total size of DT block */
- u32 off_dt_struct; /* offset to structure */
- u32 off_dt_strings; /* offset to strings */
- u32 off_mem_rsvmap; /* offset to memory reserve map */
- u32 version; /* format version */
- u32 last_comp_version; /* last compatible version */
- /* version 2 fields below */
- u32 boot_cpuid_phys; /* Physical CPU id we're booting on */
- /* version 3 fields below */
- u32 dt_strings_size; /* size of the DT strings block */
-};
-
-
-
-typedef u32 phandle;
-typedef u32 ihandle;
-
-struct address_range {
- unsigned long space;
- unsigned long address;
- unsigned long size;
-};
-
-struct interrupt_info {
- int line;
- int sense; /* +ve/-ve logic, edge or level, etc. */
-};
-
-struct pci_address {
- u32 a_hi;
- u32 a_mid;
- u32 a_lo;
-};
-
-struct isa_address {
- u32 a_hi;
- u32 a_lo;
-};
-
-struct isa_range {
- struct isa_address isa_addr;
- struct pci_address pci_addr;
- unsigned int size;
-};
-
-struct reg_property {
- unsigned long address;
- unsigned long size;
-};
-
-struct reg_property32 {
- unsigned int address;
- unsigned int size;
-};
-
-struct reg_property64 {
- unsigned long address;
- unsigned long size;
-};
-
-struct property {
- char *name;
- int length;
- unsigned char *value;
- struct property *next;
-};
-
-struct device_node {
- char *name;
- char *type;
- phandle node;
- phandle linux_phandle;
- int n_addrs;
- struct address_range *addrs;
- int n_intrs;
- struct interrupt_info *intrs;
- char *full_name;
-
- struct property *properties;
- struct device_node *parent;
- struct device_node *child;
- struct device_node *sibling;
- struct device_node *next; /* next device of same type */
- struct device_node *allnext; /* next in list of all nodes */
- struct proc_dir_entry *pde; /* this node's proc directory */
- struct kref kref;
- unsigned long _flags;
- void *data;
-#ifdef CONFIG_PPC_ISERIES
- struct list_head Device_List;
-#endif
-};
-
-extern struct device_node *of_chosen;
-
-/* flag descriptions */
-#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
-
-#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
-#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
-
-/*
- * Until 32-bit ppc can add proc_dir_entries to its device_node
- * definition, we cannot refer to pde, name_link, and addr_link
- * in arch-independent code.
- */
-#define HAVE_ARCH_DEVTREE_FIXUPS
-
-static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de)
-{
- dn->pde = de;
-}
-
-
-/* OBSOLETE: Old stlye node lookup */
-extern struct device_node *find_devices(const char *name);
-extern struct device_node *find_type_devices(const char *type);
-extern struct device_node *find_path_device(const char *path);
-extern struct device_node *find_compatible_devices(const char *type,
- const char *compat);
-extern struct device_node *find_all_nodes(void);
-
-/* New style node lookup */
-extern struct device_node *of_find_node_by_name(struct device_node *from,
- const char *name);
-extern struct device_node *of_find_node_by_type(struct device_node *from,
- const char *type);
-extern struct device_node *of_find_compatible_node(struct device_node *from,
- const char *type, const char *compat);
-extern struct device_node *of_find_node_by_path(const char *path);
-extern struct device_node *of_find_node_by_phandle(phandle handle);
-extern struct device_node *of_find_all_nodes(struct device_node *prev);
-extern struct device_node *of_get_parent(const struct device_node *node);
-extern struct device_node *of_get_next_child(const struct device_node *node,
- struct device_node *prev);
-extern struct device_node *of_node_get(struct device_node *node);
-extern void of_node_put(struct device_node *node);
-
-/* For scanning the flat device-tree at boot time */
-int __init of_scan_flat_dt(int (*it)(unsigned long node,
- const char *uname, int depth,
- void *data),
- void *data);
-void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
- unsigned long *size);
-
-/* For updating the device tree at runtime */
-extern void of_attach_node(struct device_node *);
-extern void of_detach_node(const struct device_node *);
-
-/* Other Prototypes */
-extern unsigned long prom_init(unsigned long, unsigned long, unsigned long,
- unsigned long, unsigned long);
-extern void finish_device_tree(void);
-extern void unflatten_device_tree(void);
-extern void early_init_devtree(void *);
-extern int device_is_compatible(struct device_node *device, const char *);
-extern int machine_is_compatible(const char *compat);
-extern unsigned char *get_property(struct device_node *node, const char *name,
- int *lenp);
-extern void print_properties(struct device_node *node);
-extern int prom_n_addr_cells(struct device_node* np);
-extern int prom_n_size_cells(struct device_node* np);
-extern int prom_n_intr_cells(struct device_node* np);
-extern void prom_get_irq_senses(unsigned char *senses, int off, int max);
-extern int prom_add_property(struct device_node* np, struct property* prop);
-
-#endif /* _PPC64_PROM_H */
diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h
deleted file mode 100644
index bf9a6aba19c..00000000000
--- a/include/asm-ppc64/system.h
+++ /dev/null
@@ -1,310 +0,0 @@
-#ifndef __PPC64_SYSTEM_H
-#define __PPC64_SYSTEM_H
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/compiler.h>
-#include <asm/page.h>
-#include <asm/processor.h>
-#include <asm/hw_irq.h>
-#include <asm/synch.h>
-
-/*
- * Memory barrier.
- * The sync instruction guarantees that all memory accesses initiated
- * by this processor have been performed (with respect to all other
- * mechanisms that access memory). The eieio instruction is a barrier
- * providing an ordering (separately) for (a) cacheable stores and (b)
- * loads and stores to non-cacheable memory (e.g. I/O devices).
- *
- * mb() prevents loads and stores being reordered across this point.
- * rmb() prevents loads being reordered across this point.
- * wmb() prevents stores being reordered across this point.
- * read_barrier_depends() prevents data-dependent loads being reordered
- * across this point (nop on PPC).
- *
- * We have to use the sync instructions for mb(), since lwsync doesn't
- * order loads with respect to previous stores. Lwsync is fine for
- * rmb(), though.
- * For wmb(), we use sync since wmb is used in drivers to order
- * stores to system memory with respect to writes to the device.
- * However, smp_wmb() can be a lighter-weight eieio barrier on
- * SMP since it is only used to order updates to system memory.
- */
-#define mb() __asm__ __volatile__ ("sync" : : : "memory")
-#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
-#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
-#define read_barrier_depends() do { } while(0)
-
-#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
-#define set_wmb(var, value) do { var = value; smp_wmb(); } while (0)
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() eieio()
-#define smp_read_barrier_depends() read_barrier_depends()
-#else
-#define smp_mb() __asm__ __volatile__("": : :"memory")
-#define smp_rmb() __asm__ __volatile__("": : :"memory")
-#define smp_wmb() __asm__ __volatile__("": : :"memory")
-#define smp_read_barrier_depends() do { } while(0)
-#endif /* CONFIG_SMP */
-
-#ifdef __KERNEL__
-struct task_struct;
-struct pt_regs;
-
-#ifdef CONFIG_DEBUGGER
-
-extern int (*__debugger)(struct pt_regs *regs);
-extern int (*__debugger_ipi)(struct pt_regs *regs);
-extern int (*__debugger_bpt)(struct pt_regs *regs);
-extern int (*__debugger_sstep)(struct pt_regs *regs);
-extern int (*__debugger_iabr_match)(struct pt_regs *regs);
-extern int (*__debugger_dabr_match)(struct pt_regs *regs);
-extern int (*__debugger_fault_handler)(struct pt_regs *regs);
-
-#define DEBUGGER_BOILERPLATE(__NAME) \
-static inline int __NAME(struct pt_regs *regs) \
-{ \
- if (unlikely(__ ## __NAME)) \
- return __ ## __NAME(regs); \
- return 0; \
-}
-
-DEBUGGER_BOILERPLATE(debugger)
-DEBUGGER_BOILERPLATE(debugger_ipi)
-DEBUGGER_BOILERPLATE(debugger_bpt)
-DEBUGGER_BOILERPLATE(debugger_sstep)
-DEBUGGER_BOILERPLATE(debugger_iabr_match)
-DEBUGGER_BOILERPLATE(debugger_dabr_match)
-DEBUGGER_BOILERPLATE(debugger_fault_handler)
-
-#ifdef CONFIG_XMON
-extern void xmon_init(int enable);
-#endif
-
-#else
-static inline int debugger(struct pt_regs *regs) { return 0; }
-static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
-static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
-static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
-static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
-static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
-static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
-#endif
-
-extern int set_dabr(unsigned long dabr);
-extern void _exception(int signr, struct pt_regs *regs, int code,
- unsigned long addr);
-extern int fix_alignment(struct pt_regs *regs);
-extern void bad_page_fault(struct pt_regs *regs, unsigned long address,
- int sig);
-extern void show_regs(struct pt_regs * regs);
-extern void low_hash_fault(struct pt_regs *regs, unsigned long address);
-extern int die(const char *str, struct pt_regs *regs, long err);
-
-extern int _get_PVR(void);
-extern void giveup_fpu(struct task_struct *);
-extern void disable_kernel_fp(void);
-extern void flush_fp_to_thread(struct task_struct *);
-extern void enable_kernel_fp(void);
-extern void giveup_altivec(struct task_struct *);
-extern void disable_kernel_altivec(void);
-extern void enable_kernel_altivec(void);
-extern int emulate_altivec(struct pt_regs *);
-extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
-extern void cvt_df(double *from, float *to, struct thread_struct *thread);
-
-#ifdef CONFIG_ALTIVEC
-extern void flush_altivec_to_thread(struct task_struct *);
-#else
-static inline void flush_altivec_to_thread(struct task_struct *t)
-{
-}
-#endif
-
-static inline void flush_spe_to_thread(struct task_struct *t)
-{
-}
-
-extern int mem_init_done; /* set on boot once kmalloc can be called */
-extern unsigned long memory_limit;
-
-/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
-extern unsigned char e2a(unsigned char);
-
-extern struct task_struct *__switch_to(struct task_struct *,
- struct task_struct *);
-#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
-
-struct thread_struct;
-extern struct task_struct * _switch(struct thread_struct *prev,
- struct thread_struct *next);
-
-extern unsigned long klimit;
-
-extern int powersave_nap; /* set if nap mode can be used in idle loop */
-
-/*
- * Atomic exchange
- *
- * Changes the memory location '*ptr' to be val and returns
- * the previous value stored there.
- *
- * Inline asm pulled from arch/ppc/kernel/misc.S so ppc64
- * is more like most of the other architectures.
- */
-static __inline__ unsigned long
-__xchg_u32(volatile unsigned int *m, unsigned long val)
-{
- unsigned long dummy;
-
- __asm__ __volatile__(
- EIEIO_ON_SMP
-"1: lwarx %0,0,%3 # __xchg_u32\n\
- stwcx. %2,0,%3\n\
-2: bne- 1b"
- ISYNC_ON_SMP
- : "=&r" (dummy), "=m" (*m)
- : "r" (val), "r" (m)
- : "cc", "memory");
-
- return (dummy);
-}
-
-static __inline__ unsigned long
-__xchg_u64(volatile long *m, unsigned long val)
-{
- unsigned long dummy;
-
- __asm__ __volatile__(
- EIEIO_ON_SMP
-"1: ldarx %0,0,%3 # __xchg_u64\n\
- stdcx. %2,0,%3\n\
-2: bne- 1b"
- ISYNC_ON_SMP
- : "=&r" (dummy), "=m" (*m)
- : "r" (val), "r" (m)
- : "cc", "memory");
-
- return (dummy);
-}
-
-/*
- * This function doesn't exist, so you'll get a linker error
- * if something tries to do an invalid xchg().
- */
-extern void __xchg_called_with_bad_pointer(void);
-
-static __inline__ unsigned long
-__xchg(volatile void *ptr, unsigned long x, unsigned int size)
-{
- switch (size) {
- case 4:
- return __xchg_u32(ptr, x);
- case 8:
- return __xchg_u64(ptr, x);
- }
- __xchg_called_with_bad_pointer();
- return x;
-}
-
-#define xchg(ptr,x) \
- ({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
- })
-
-#define tas(ptr) (xchg((ptr),1))
-
-#define __HAVE_ARCH_CMPXCHG 1
-
-static __inline__ unsigned long
-__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
-{
- unsigned int prev;
-
- __asm__ __volatile__ (
- EIEIO_ON_SMP
-"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
- cmpw 0,%0,%3\n\
- bne- 2f\n\
- stwcx. %4,0,%2\n\
- bne- 1b"
- ISYNC_ON_SMP
- "\n\
-2:"
- : "=&r" (prev), "=m" (*p)
- : "r" (p), "r" (old), "r" (new), "m" (*p)
- : "cc", "memory");
-
- return prev;
-}
-
-static __inline__ unsigned long
-__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
-{
- unsigned long prev;
-
- __asm__ __volatile__ (
- EIEIO_ON_SMP
-"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
- cmpd 0,%0,%3\n\
- bne- 2f\n\
- stdcx. %4,0,%2\n\
- bne- 1b"
- ISYNC_ON_SMP
- "\n\
-2:"
- : "=&r" (prev), "=m" (*p)
- : "r" (p), "r" (old), "r" (new), "m" (*p)
- : "cc", "memory");
-
- return prev;
-}
-
-/* This function doesn't exist, so you'll get a linker error
- if something tries to do an invalid cmpxchg(). */
-extern void __cmpxchg_called_with_bad_pointer(void);
-
-static __inline__ unsigned long
-__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
- unsigned int size)
-{
- switch (size) {
- case 4:
- return __cmpxchg_u32(ptr, old, new);
- case 8:
- return __cmpxchg_u64(ptr, old, new);
- }
- __cmpxchg_called_with_bad_pointer();
- return old;
-}
-
-#define cmpxchg(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
- (unsigned long)(n),sizeof(*(ptr))))
-
-/*
- * We handle most unaligned accesses in hardware. On the other hand
- * unaligned DMA can be very expensive on some ppc64 IO chips (it does
- * powers of 2 writes until it reaches sufficient alignment).
- *
- * Based on this we disable the IP header alignment in network drivers.
- */
-#define NET_IP_ALIGN 0
-
-#define arch_align_stack(x) (x)
-
-extern unsigned long reloc_offset(void);
-
-#endif /* __KERNEL__ */
-#endif
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h
index 9d86ba6f12d..b3bd4f679f7 100644
--- a/include/asm-s390/atomic.h
+++ b/include/asm-s390/atomic.h
@@ -198,6 +198,18 @@ atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
return retval;
}
+#define atomic_cmpxchg(v, o, n) (atomic_compare_and_swap((o), (n), &((v)->counter)))
+
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
#define smp_mb__before_atomic_inc() smp_mb()
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index 3c4f805da1a..aabfd334462 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -87,6 +87,35 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
#define atomic_inc(v) atomic_add(1,(v))
#define atomic_dec(v) atomic_sub(1,(v))
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ int ret;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ ret = v->counter;
+ if (likely(ret == old))
+ v->counter = new;
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int ret;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ ret = v->counter;
+ if (ret != u)
+ v->counter += a;
+ local_irq_restore(flags);
+
+ return ret != u;
+}
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;
diff --git a/include/asm-sh64/atomic.h b/include/asm-sh64/atomic.h
index 8c3872d3e65..927a2bc27b3 100644
--- a/include/asm-sh64/atomic.h
+++ b/include/asm-sh64/atomic.h
@@ -99,6 +99,35 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
#define atomic_inc(v) atomic_add(1,(v))
#define atomic_dec(v) atomic_sub(1,(v))
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ int ret;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ ret = v->counter;
+ if (likely(ret == old))
+ v->counter = new;
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int ret;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ ret = v->counter;
+ if (ret != u)
+ v->counter += a;
+ local_irq_restore(flags);
+
+ return ret != u;
+}
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;
diff --git a/include/asm-sparc/atomic.h b/include/asm-sparc/atomic.h
index 37f6ab601c3..62bec7ad271 100644
--- a/include/asm-sparc/atomic.h
+++ b/include/asm-sparc/atomic.h
@@ -19,6 +19,8 @@ typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i) { (i) }
extern int __atomic_add_return(int, atomic_t *);
+extern int atomic_cmpxchg(atomic_t *, int, int);
+extern int atomic_add_unless(atomic_t *, int, int);
extern void atomic_set(atomic_t *, int);
#define atomic_read(v) ((v)->counter)
@@ -48,6 +50,8 @@ extern void atomic_set(atomic_t *, int);
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
/* This is the old 24-bit implementation. It's still used internally
* by some sparc-specific code, notably the semaphore implementation.
*/
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h
index e175afcf2cd..8198c3d0d00 100644
--- a/include/asm-sparc64/atomic.h
+++ b/include/asm-sparc64/atomic.h
@@ -70,6 +70,18 @@ extern int atomic64_sub_ret(int, atomic64_t *);
#define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
/* Atomic operations are already serializing */
#ifdef CONFIG_SMP
#define smp_mb__before_atomic_dec() membar_storeload_loadload();
diff --git a/include/asm-v850/atomic.h b/include/asm-v850/atomic.h
index 395268a8c0d..bede3172ce7 100644
--- a/include/asm-v850/atomic.h
+++ b/include/asm-v850/atomic.h
@@ -90,6 +90,36 @@ static __inline__ void atomic_clear_mask (unsigned long mask, unsigned long *add
#define atomic_dec_and_test(v) (atomic_sub_return (1, (v)) == 0)
#define atomic_add_negative(i,v) (atomic_add_return ((i), (v)) < 0)
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ int ret;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ ret = v->counter;
+ if (likely(ret == old))
+ v->counter = new;
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int ret;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ ret = v->counter;
+ if (ret != u)
+ v->counter += a;
+ local_irq_restore(flags);
+
+ return ret != u;
+}
+
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
/* Atomic operations are already serializing on ARM */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h
index 6c5d5ca8383..5647b7de174 100644
--- a/include/asm-x86_64/apic.h
+++ b/include/asm-x86_64/apic.h
@@ -111,6 +111,8 @@ extern unsigned int nmi_watchdog;
extern int disable_timer_pin_1;
+extern void setup_threshold_lvt(unsigned long lvt_off);
+
#endif /* CONFIG_X86_LOCAL_APIC */
extern unsigned boot_cpu_id;
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index fc4c5956e1e..0866ef67f19 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -360,6 +360,27 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
return atomic_add_return(-i,v);
}
+#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
+
+/**
+ * atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
#define atomic_inc_return(v) (atomic_add_return(1,v))
#define atomic_dec_return(v) (atomic_sub_return(1,v))
diff --git a/include/asm-x86_64/cache.h b/include/asm-x86_64/cache.h
index eda62bae124..33e53424128 100644
--- a/include/asm-x86_64/cache.h
+++ b/include/asm-x86_64/cache.h
@@ -9,6 +9,6 @@
/* L1 cache line size */
#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-#define L1_CACHE_SHIFT_MAX 6 /* largest L1 which this arch supports */
+#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
#endif
diff --git a/include/asm-x86_64/desc.h b/include/asm-x86_64/desc.h
index 68ac3c62fe3..33764869387 100644
--- a/include/asm-x86_64/desc.h
+++ b/include/asm-x86_64/desc.h
@@ -98,16 +98,19 @@ static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsig
static inline void set_intr_gate(int nr, void *func)
{
+ BUG_ON((unsigned)nr > 0xFF);
_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0);
}
static inline void set_intr_gate_ist(int nr, void *func, unsigned ist)
{
+ BUG_ON((unsigned)nr > 0xFF);
_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist);
}
static inline void set_system_gate(int nr, void *func)
{
+ BUG_ON((unsigned)nr > 0xFF);
_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0);
}
@@ -129,9 +132,16 @@ static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned
static inline void set_tss_desc(unsigned cpu, void *addr)
{
- set_tssldt_descriptor(&cpu_gdt_table[cpu][GDT_ENTRY_TSS], (unsigned long)addr,
- DESC_TSS,
- sizeof(struct tss_struct) - 1);
+ /*
+ * sizeof(unsigned long) coming from an extra "long" at the end
+ * of the iobitmap. See tss_struct definition in processor.h
+ *
+ * -1? seg base+limit should be pointing to the address of the
+ * last valid byte
+ */
+ set_tssldt_descriptor(&cpu_gdt_table[cpu][GDT_ENTRY_TSS],
+ (unsigned long)addr, DESC_TSS,
+ IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
}
static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
diff --git a/include/asm-x86_64/dma.h b/include/asm-x86_64/dma.h
index 16fa3a064d0..6f2a817b6a7 100644
--- a/include/asm-x86_64/dma.h
+++ b/include/asm-x86_64/dma.h
@@ -72,8 +72,15 @@
#define MAX_DMA_CHANNELS 8
-/* The maximum address that we can perform a DMA transfer to on this platform */
-#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000)
+
+/* 16MB ISA DMA zone */
+#define MAX_DMA_PFN ((16*1024*1024) >> PAGE_SHIFT)
+
+/* 4GB broken PCI/AGP hardware bus master zone */
+#define MAX_DMA32_PFN ((4UL*1024*1024*1024) >> PAGE_SHIFT)
+
+/* Compat define for old dma zone */
+#define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT))
/* 8237 DMA controllers */
#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
diff --git a/include/asm-x86_64/hpet.h b/include/asm-x86_64/hpet.h
index a3877f57099..c20c28f5c7a 100644
--- a/include/asm-x86_64/hpet.h
+++ b/include/asm-x86_64/hpet.h
@@ -14,18 +14,18 @@
#define HPET_CFG 0x010
#define HPET_STATUS 0x020
#define HPET_COUNTER 0x0f0
-#define HPET_T0_CFG 0x100
-#define HPET_T0_CMP 0x108
-#define HPET_T0_ROUTE 0x110
-#define HPET_T1_CFG 0x120
-#define HPET_T1_CMP 0x128
-#define HPET_T1_ROUTE 0x130
-#define HPET_T2_CFG 0x140
-#define HPET_T2_CMP 0x148
-#define HPET_T2_ROUTE 0x150
+#define HPET_Tn_OFFSET 0x20
+#define HPET_Tn_CFG(n) (0x100 + (n) * HPET_Tn_OFFSET)
+#define HPET_Tn_ROUTE(n) (0x104 + (n) * HPET_Tn_OFFSET)
+#define HPET_Tn_CMP(n) (0x108 + (n) * HPET_Tn_OFFSET)
+#define HPET_T0_CFG HPET_Tn_CFG(0)
+#define HPET_T0_CMP HPET_Tn_CMP(0)
+#define HPET_T1_CFG HPET_Tn_CFG(1)
+#define HPET_T1_CMP HPET_Tn_CMP(1)
#define HPET_ID_VENDOR 0xffff0000
#define HPET_ID_LEGSUP 0x00008000
+#define HPET_ID_64BIT 0x00002000
#define HPET_ID_NUMBER 0x00001f00
#define HPET_ID_REV 0x000000ff
#define HPET_ID_NUMBER_SHIFT 8
@@ -38,11 +38,18 @@
#define HPET_LEGACY_8254 2
#define HPET_LEGACY_RTC 8
-#define HPET_TN_ENABLE 0x004
-#define HPET_TN_PERIODIC 0x008
-#define HPET_TN_PERIODIC_CAP 0x010
-#define HPET_TN_SETVAL 0x040
-#define HPET_TN_32BIT 0x100
+#define HPET_TN_LEVEL 0x0002
+#define HPET_TN_ENABLE 0x0004
+#define HPET_TN_PERIODIC 0x0008
+#define HPET_TN_PERIODIC_CAP 0x0010
+#define HPET_TN_64BIT_CAP 0x0020
+#define HPET_TN_SETVAL 0x0040
+#define HPET_TN_32BIT 0x0100
+#define HPET_TN_ROUTE 0x3e00
+#define HPET_TN_FSB 0x4000
+#define HPET_TN_FSB_CAP 0x8000
+
+#define HPET_TN_ROUTE_SHIFT 9
extern int is_hpet_enabled(void);
extern int hpet_rtc_timer_init(void);
diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h
index dc97668ea0f..c14a8c7267a 100644
--- a/include/asm-x86_64/hw_irq.h
+++ b/include/asm-x86_64/hw_irq.h
@@ -55,7 +55,7 @@ struct hw_interrupt_type;
#define CALL_FUNCTION_VECTOR 0xfc
#define KDB_VECTOR 0xfb /* reserved for KDB */
#define THERMAL_APIC_VECTOR 0xfa
-/* 0xf9 free */
+#define THRESHOLD_APIC_VECTOR 0xf9
#define INVALIDATE_TLB_VECTOR_END 0xf8
#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f8 used for TLB flush */
diff --git a/include/asm-x86_64/ia32.h b/include/asm-x86_64/ia32.h
index 6efa00fe4e7..c7bc9c0525b 100644
--- a/include/asm-x86_64/ia32.h
+++ b/include/asm-x86_64/ia32.h
@@ -165,6 +165,11 @@ struct siginfo_t;
int do_get_thread_area(struct thread_struct *t, struct user_desc __user *info);
int do_set_thread_area(struct thread_struct *t, struct user_desc __user *info);
int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs);
+
+struct linux_binprm;
+extern int ia32_setup_arg_pages(struct linux_binprm *bprm,
+ unsigned long stack_top, int exec_stack);
+
#endif
#endif /* !CONFIG_IA32_SUPPORT */
diff --git a/include/asm-x86_64/mce.h b/include/asm-x86_64/mce.h
index 869249db679..5d298b799a9 100644
--- a/include/asm-x86_64/mce.h
+++ b/include/asm-x86_64/mce.h
@@ -67,6 +67,8 @@ struct mce_log {
/* Software defined banks */
#define MCE_EXTENDED_BANK 128
#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
+#define MCE_THRESHOLD_BASE MCE_EXTENDED_BANK + 1 /* MCE_AMD */
+#define MCE_THRESHOLD_DRAM_ECC MCE_THRESHOLD_BASE + 4
void mce_log(struct mce *m);
#ifdef CONFIG_X86_MCE_INTEL
@@ -77,4 +79,12 @@ static inline void mce_intel_feature_init(struct cpuinfo_x86 *c)
}
#endif
+#ifdef CONFIG_X86_MCE_AMD
+void mce_amd_feature_init(struct cpuinfo_x86 *c);
+#else
+static inline void mce_amd_feature_init(struct cpuinfo_x86 *c)
+{
+}
+#endif
+
#endif
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h
index b40c661f111..69baaa8a3ce 100644
--- a/include/asm-x86_64/mmzone.h
+++ b/include/asm-x86_64/mmzone.h
@@ -17,16 +17,15 @@
/* Simple perfect hash to map physical addresses to node numbers */
extern int memnode_shift;
extern u8 memnodemap[NODEMAPSIZE];
-extern int maxnode;
extern struct pglist_data *node_data[];
static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
{
- int nid;
+ unsigned nid;
VIRTUAL_BUG_ON((addr >> memnode_shift) >= NODEMAPSIZE);
nid = memnodemap[addr >> memnode_shift];
- VIRTUAL_BUG_ON(nid > maxnode);
+ VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]);
return nid;
}
@@ -41,9 +40,7 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT)
#define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr))
-/* AK: this currently doesn't deal with invalid addresses. We'll see
- if the 2.5 kernel doesn't pass them
- (2.4 used to). */
+/* Requires pfn_valid(pfn) to be true */
#define pfn_to_page(pfn) ({ \
int nid = phys_to_nid(((unsigned long)(pfn)) << PAGE_SHIFT); \
((pfn) - node_start_pfn(nid)) + NODE_DATA(nid)->node_mem_map; \
diff --git a/include/asm-x86_64/mpspec.h b/include/asm-x86_64/mpspec.h
index f267e10c023..6f8a17d105a 100644
--- a/include/asm-x86_64/mpspec.h
+++ b/include/asm-x86_64/mpspec.h
@@ -16,7 +16,7 @@
/*
* A maximum of 255 APICs with the current APIC ID architecture.
*/
-#define MAX_APICS 128
+#define MAX_APICS 255
struct intel_mp_floating
{
@@ -157,7 +157,8 @@ struct mpc_config_lintsrc
*/
#define MAX_MP_BUSSES 256
-#define MAX_IRQ_SOURCES 256
+/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */
+#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
enum mp_bustype {
MP_BUS_ISA = 1,
MP_BUS_EISA,
@@ -172,7 +173,7 @@ extern int smp_found_config;
extern void find_smp_config (void);
extern void get_smp_config (void);
extern int nr_ioapics;
-extern int apic_version [MAX_APICS];
+extern unsigned char apic_version [MAX_APICS];
extern int mp_irq_entries;
extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES];
extern int mpc_default_type;
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h
index 5a7fe3c6c3d..24dc39651bc 100644
--- a/include/asm-x86_64/msr.h
+++ b/include/asm-x86_64/msr.h
@@ -19,7 +19,7 @@
: "=a" (a__), "=d" (b__) \
: "c" (msr)); \
val = a__ | (b__<<32); \
-} while(0);
+} while(0)
#define wrmsr(msr,val1,val2) \
__asm__ __volatile__("wrmsr" \
diff --git a/include/asm-x86_64/numa.h b/include/asm-x86_64/numa.h
index bcf55c3f7f7..d51e56fdc3d 100644
--- a/include/asm-x86_64/numa.h
+++ b/include/asm-x86_64/numa.h
@@ -17,6 +17,8 @@ extern void numa_add_cpu(int cpu);
extern void numa_init_array(void);
extern int numa_off;
+extern void numa_set_node(int cpu, int node);
+
extern unsigned char apicid_to_node[256];
#define NUMA_NO_NODE 0xff
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index e5ab4d231f2..06e489f3247 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -11,7 +11,7 @@
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#endif
#define PAGE_MASK (~(PAGE_SIZE-1))
-#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & (__PHYSICAL_MASK << PAGE_SHIFT))
+#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK)
#define THREAD_ORDER 1
#ifdef __ASSEMBLY__
diff --git a/include/asm-x86_64/pda.h b/include/asm-x86_64/pda.h
index bbf89aa8a1a..8733ccfa442 100644
--- a/include/asm-x86_64/pda.h
+++ b/include/asm-x86_64/pda.h
@@ -15,6 +15,7 @@ struct x8664_pda {
int irqcount; /* Irq nesting counter. Starts with -1 */
int cpunumber; /* Logical CPU number */
char *irqstackptr; /* top of irqstack */
+ int nodenumber; /* number of current node */
unsigned int __softirq_pending;
unsigned int __nmi_count; /* number of NMI on this CPUs */
struct mm_struct *active_mm;
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 7309fffeec9..ecf58c7c165 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -16,6 +16,7 @@ extern pud_t level3_physmem_pgt[512];
extern pud_t level3_ident_pgt[512];
extern pmd_t level2_kernel_pgt[512];
extern pgd_t init_level4_pgt[];
+extern pgd_t boot_level4_pgt[];
extern unsigned long __supported_pte_mask;
#define swapper_pg_dir init_level4_pgt
@@ -247,7 +248,7 @@ static inline unsigned long pud_bad(pud_t pud)
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this
right? */
#define pte_page(x) pfn_to_page(pte_pfn(x))
-#define pte_pfn(x) ((pte_val(x) >> PAGE_SHIFT) & __PHYSICAL_MASK)
+#define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
{
@@ -354,7 +355,7 @@ static inline pud_t *__pud_offset_k(pud_t *pud, unsigned long address)
#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
#define pmd_bad(x) ((pmd_val(x) & (~PTE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE )
#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
-#define pmd_pfn(x) ((pmd_val(x) >> PAGE_SHIFT) & __PHYSICAL_MASK)
+#define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
#define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
#define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 03837d34fba..4861246548f 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -61,10 +61,12 @@ struct cpuinfo_x86 {
int x86_cache_alignment;
int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/
__u8 x86_virt_bits, x86_phys_bits;
- __u8 x86_num_cores;
+ __u8 x86_max_cores; /* cpuid returned max cores value */
__u32 x86_power;
__u32 extended_cpuid_level; /* Max extended CPUID function supported */
unsigned long loops_per_jiffy;
+ __u8 apicid;
+ __u8 booted_cores; /* number of cores as seen by OS */
} ____cacheline_aligned;
#define X86_VENDOR_INTEL 0
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index dbb37b0adb4..34501086afe 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -11,6 +11,8 @@ struct pt_regs;
extern void start_kernel(void);
extern void pda_init(int);
+extern void zap_low_mappings(int cpu);
+
extern void early_idt_handler(void);
extern void mcheck_init(struct cpuinfo_x86 *c);
@@ -22,6 +24,8 @@ extern void mtrr_bp_init(void);
#define mtrr_bp_init() do {} while (0)
#endif
extern void init_memory_mapping(unsigned long start, unsigned long end);
+extern void size_zones(unsigned long *z, unsigned long *h,
+ unsigned long start_pfn, unsigned long end_pfn);
extern void system_call(void);
extern int kernel_syscall(void);
diff --git a/include/asm-x86_64/rwsem.h b/include/asm-x86_64/rwsem.h
deleted file mode 100644
index 46077e9c191..00000000000
--- a/include/asm-x86_64/rwsem.h
+++ /dev/null
@@ -1,283 +0,0 @@
-/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for x86_64+
- *
- * Written by David Howells (dhowells@redhat.com).
- * Ported by Andi Kleen <ak@suse.de> to x86-64.
- *
- * Derived from asm-i386/semaphore.h and asm-i386/rwsem.h
- *
- *
- * The MSW of the count is the negated number of active writers and waiting
- * lockers, and the LSW is the total number of active locks
- *
- * The lock count is initialized to 0 (no active and no waiting lockers).
- *
- * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
- * uncontended lock. This can be determined because XADD returns the old value.
- * Readers increment by 1 and see a positive value when uncontended, negative
- * if there are writers (and maybe) readers waiting (in which case it goes to
- * sleep).
- *
- * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
- * be extended to 65534 by manually checking the whole MSW rather than relying
- * on the S flag.
- *
- * The value of ACTIVE_BIAS supports up to 65535 active processes.
- *
- * This should be totally fair - if anything is waiting, a process that wants a
- * lock will go to the back of the queue. When the currently active lock is
- * released, if there's a writer at the front of the queue, then that and only
- * that will be woken up; if there's a bunch of consecutive readers at the
- * front, then they'll all be woken up, but no other readers will be.
- */
-
-#ifndef _X8664_RWSEM_H
-#define _X8664_RWSEM_H
-
-#ifndef _LINUX_RWSEM_H
-#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
-#endif
-
-#ifdef __KERNEL__
-
-#include <linux/list.h>
-#include <linux/spinlock.h>
-
-struct rwsem_waiter;
-
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
-
-/*
- * the semaphore definition
- */
-struct rw_semaphore {
- signed int count;
-#define RWSEM_UNLOCKED_VALUE 0x00000000
-#define RWSEM_ACTIVE_BIAS 0x00000001
-#define RWSEM_ACTIVE_MASK 0x0000ffff
-#define RWSEM_WAITING_BIAS (-0x00010000)
-#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
- spinlock_t wait_lock;
- struct list_head wait_list;
-#if RWSEM_DEBUG
- int debug;
-#endif
-};
-
-/*
- * initialisation
- */
-#if RWSEM_DEBUG
-#define __RWSEM_DEBUG_INIT , 0
-#else
-#define __RWSEM_DEBUG_INIT /* */
-#endif
-
-#define __RWSEM_INITIALIZER(name) \
-{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
- __RWSEM_DEBUG_INIT }
-
-#define DECLARE_RWSEM(name) \
- struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-
-static inline void init_rwsem(struct rw_semaphore *sem)
-{
- sem->count = RWSEM_UNLOCKED_VALUE;
- spin_lock_init(&sem->wait_lock);
- INIT_LIST_HEAD(&sem->wait_list);
-#if RWSEM_DEBUG
- sem->debug = 0;
-#endif
-}
-
-/*
- * lock for reading
- */
-static inline void __down_read(struct rw_semaphore *sem)
-{
- __asm__ __volatile__(
- "# beginning down_read\n\t"
-LOCK_PREFIX " incl (%%rdi)\n\t" /* adds 0x00000001, returns the old value */
- " js 2f\n\t" /* jump if we weren't granted the lock */
- "1:\n\t"
- LOCK_SECTION_START("") \
- "2:\n\t"
- " call rwsem_down_read_failed_thunk\n\t"
- " jmp 1b\n"
- LOCK_SECTION_END \
- "# ending down_read\n\t"
- : "+m"(sem->count)
- : "D"(sem)
- : "memory", "cc");
-}
-
-
-/*
- * trylock for reading -- returns 1 if successful, 0 if contention
- */
-static inline int __down_read_trylock(struct rw_semaphore *sem)
-{
- __s32 result, tmp;
- __asm__ __volatile__(
- "# beginning __down_read_trylock\n\t"
- " movl %0,%1\n\t"
- "1:\n\t"
- " movl %1,%2\n\t"
- " addl %3,%2\n\t"
- " jle 2f\n\t"
-LOCK_PREFIX " cmpxchgl %2,%0\n\t"
- " jnz 1b\n\t"
- "2:\n\t"
- "# ending __down_read_trylock\n\t"
- : "+m"(sem->count), "=&a"(result), "=&r"(tmp)
- : "i"(RWSEM_ACTIVE_READ_BIAS)
- : "memory", "cc");
- return result>=0 ? 1 : 0;
-}
-
-
-/*
- * lock for writing
- */
-static inline void __down_write(struct rw_semaphore *sem)
-{
- int tmp;
-
- tmp = RWSEM_ACTIVE_WRITE_BIAS;
- __asm__ __volatile__(
- "# beginning down_write\n\t"
-LOCK_PREFIX " xaddl %0,(%%rdi)\n\t" /* subtract 0x0000ffff, returns the old value */
- " testl %0,%0\n\t" /* was the count 0 before? */
- " jnz 2f\n\t" /* jump if we weren't granted the lock */
- "1:\n\t"
- LOCK_SECTION_START("")
- "2:\n\t"
- " call rwsem_down_write_failed_thunk\n\t"
- " jmp 1b\n"
- LOCK_SECTION_END
- "# ending down_write"
- : "=&r" (tmp)
- : "0"(tmp), "D"(sem)
- : "memory", "cc");
-}
-
-/*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
-static inline int __down_write_trylock(struct rw_semaphore *sem)
-{
- signed long ret = cmpxchg(&sem->count,
- RWSEM_UNLOCKED_VALUE,
- RWSEM_ACTIVE_WRITE_BIAS);
- if (ret == RWSEM_UNLOCKED_VALUE)
- return 1;
- return 0;
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
- __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
- __asm__ __volatile__(
- "# beginning __up_read\n\t"
-LOCK_PREFIX " xaddl %[tmp],(%%rdi)\n\t" /* subtracts 1, returns the old value */
- " js 2f\n\t" /* jump if the lock is being waited upon */
- "1:\n\t"
- LOCK_SECTION_START("")
- "2:\n\t"
- " decw %w[tmp]\n\t" /* do nothing if still outstanding active readers */
- " jnz 1b\n\t"
- " call rwsem_wake_thunk\n\t"
- " jmp 1b\n"
- LOCK_SECTION_END
- "# ending __up_read\n"
- : "+m"(sem->count), [tmp] "+r" (tmp)
- : "D"(sem)
- : "memory", "cc");
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
- unsigned tmp;
- __asm__ __volatile__(
- "# beginning __up_write\n\t"
- " movl %[bias],%[tmp]\n\t"
-LOCK_PREFIX " xaddl %[tmp],(%%rdi)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
- " jnz 2f\n\t" /* jump if the lock is being waited upon */
- "1:\n\t"
- LOCK_SECTION_START("")
- "2:\n\t"
- " decw %w[tmp]\n\t" /* did the active count reduce to 0? */
- " jnz 1b\n\t" /* jump back if not */
- " call rwsem_wake_thunk\n\t"
- " jmp 1b\n"
- LOCK_SECTION_END
- "# ending __up_write\n"
- : "+m"(sem->count), [tmp] "=r" (tmp)
- : "D"(sem), [bias] "i"(-RWSEM_ACTIVE_WRITE_BIAS)
- : "memory", "cc");
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
- __asm__ __volatile__(
- "# beginning __downgrade_write\n\t"
-LOCK_PREFIX " addl %[bias],(%%rdi)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
- " js 2f\n\t" /* jump if the lock is being waited upon */
- "1:\n\t"
- LOCK_SECTION_START("")
- "2:\n\t"
- " call rwsem_downgrade_thunk\n"
- " jmp 1b\n"
- LOCK_SECTION_END
- "# ending __downgrade_write\n"
- : "=m"(sem->count)
- : "D"(sem), [bias] "i"(-RWSEM_WAITING_BIAS), "m"(sem->count)
- : "memory", "cc");
-}
-
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
-{
- __asm__ __volatile__(
-LOCK_PREFIX "addl %1,%0"
- :"=m"(sem->count)
- :"ir"(delta), "m"(sem->count));
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
-{
- int tmp = delta;
-
- __asm__ __volatile__(
-LOCK_PREFIX "xaddl %0,(%2)"
- : "=r"(tmp), "=m"(sem->count)
- : "r"(sem), "m"(sem->count), "0" (tmp)
- : "memory");
-
- return tmp+delta;
-}
-
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
- return (sem->count != 0);
-}
-
-#endif /* __KERNEL__ */
-#endif /* _X8664_RWSEM_H */
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index b9fb2173ef9..d030409a8fb 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -47,7 +47,6 @@ extern void lock_ipi_call_lock(void);
extern void unlock_ipi_call_lock(void);
extern int smp_num_siblings;
extern void smp_send_reschedule(int cpu);
-extern void zap_low_mappings(void);
void smp_stop_cpu(void);
extern int smp_call_function_single(int cpuid, void (*func) (void *info),
void *info, int retry, int wait);
@@ -82,6 +81,8 @@ extern int safe_smp_processor_id(void);
extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
extern void prefill_possible_map(void);
+extern unsigned num_processors;
+extern unsigned disabled_cpus;
#endif /* !ASSEMBLY */
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
index 69636831ad2..fe484a699cc 100644
--- a/include/asm-x86_64/spinlock.h
+++ b/include/asm-x86_64/spinlock.h
@@ -18,22 +18,22 @@
*/
#define __raw_spin_is_locked(x) \
- (*(volatile signed char *)(&(x)->slock) <= 0)
+ (*(volatile signed int *)(&(x)->slock) <= 0)
#define __raw_spin_lock_string \
"\n1:\t" \
- "lock ; decb %0\n\t" \
+ "lock ; decl %0\n\t" \
"js 2f\n" \
LOCK_SECTION_START("") \
"2:\t" \
"rep;nop\n\t" \
- "cmpb $0,%0\n\t" \
+ "cmpl $0,%0\n\t" \
"jle 2b\n\t" \
"jmp 1b\n" \
LOCK_SECTION_END
#define __raw_spin_unlock_string \
- "movb $1,%0" \
+ "movl $1,%0" \
:"=m" (lock->slock) : : "memory"
static inline void __raw_spin_lock(raw_spinlock_t *lock)
@@ -47,10 +47,10 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
- char oldval;
+ int oldval;
__asm__ __volatile__(
- "xchgb %b0,%1"
+ "xchgl %0,%1"
:"=q" (oldval), "=m" (lock->slock)
:"0" (0) : "memory");
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h
index 1c603cd7e4d..d39ebd5263e 100644
--- a/include/asm-x86_64/topology.h
+++ b/include/asm-x86_64/topology.h
@@ -28,6 +28,8 @@ extern int __node_distance(int, int);
#define pcibus_to_node(bus) ((long)(bus->sysdata))
#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus));
+#define numa_node_id() read_pda(nodenumber)
+
/* sched_domains SD_NODE_INIT for x86_64 machines */
#define SD_NODE_INIT (struct sched_domain) { \
.span = CPU_MASK_NONE, \
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index 3c494b65d33..2c42150bce0 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -462,7 +462,7 @@ __SYSCALL(__NR_fremovexattr, sys_fremovexattr)
#define __NR_tkill 200
__SYSCALL(__NR_tkill, sys_tkill)
#define __NR_time 201
-__SYSCALL(__NR_time, sys_time64)
+__SYSCALL(__NR_time, sys_time)
#define __NR_futex 202
__SYSCALL(__NR_futex, sys_futex)
#define __NR_sched_setaffinity 203
@@ -608,6 +608,7 @@ do { \
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
+#define __ARCH_WANT_SYS_TIME
#define __ARCH_WANT_COMPAT_SYS_TIME
#endif
diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h
index 12b5732dc6e..3670cc7695d 100644
--- a/include/asm-xtensa/atomic.h
+++ b/include/asm-xtensa/atomic.h
@@ -223,6 +223,26 @@ static inline int atomic_sub_return(int i, atomic_t * v)
*/
#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+
+/**
+ * atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
diff --git a/include/linux/acct.h b/include/linux/acct.h
index 93c5b3cdf95..9a66401073f 100644
--- a/include/linux/acct.h
+++ b/include/linux/acct.h
@@ -16,6 +16,8 @@
#define _LINUX_ACCT_H
#include <linux/types.h>
+#include <linux/jiffies.h>
+
#include <asm/param.h>
#include <asm/byteorder.h>
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 403d71dcb7c..49fd37629ee 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -124,7 +124,7 @@ struct kiocb {
(x)->ki_users = 1; \
(x)->ki_key = KIOCB_SYNC_KEY; \
(x)->ki_filp = (filp); \
- (x)->ki_ctx = &tsk->active_mm->default_kioctx; \
+ (x)->ki_ctx = NULL; \
(x)->ki_cancel = NULL; \
(x)->ki_dtor = NULL; \
(x)->ki_obj.tsk = tsk; \
@@ -210,8 +210,15 @@ struct kioctx *lookup_ioctx(unsigned long ctx_id);
int FASTCALL(io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
struct iocb *iocb));
-#define get_ioctx(kioctx) do { if (unlikely(atomic_read(&(kioctx)->users) <= 0)) BUG(); atomic_inc(&(kioctx)->users); } while (0)
-#define put_ioctx(kioctx) do { if (unlikely(atomic_dec_and_test(&(kioctx)->users))) __put_ioctx(kioctx); else if (unlikely(atomic_read(&(kioctx)->users) < 0)) BUG(); } while (0)
+#define get_ioctx(kioctx) do { \
+ BUG_ON(unlikely(atomic_read(&(kioctx)->users) <= 0)); \
+ atomic_inc(&(kioctx)->users); \
+} while (0)
+#define put_ioctx(kioctx) do { \
+ BUG_ON(unlikely(atomic_read(&(kioctx)->users) <= 0)); \
+ if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \
+ __put_ioctx(kioctx); \
+} while (0)
#define in_aio() !is_sync_wait(current->io_wait)
/* may be used for debugging */
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index cb3c3ef50f5..38c2fb7ebe0 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -84,6 +84,16 @@ static __inline__ int get_bitmask_order(unsigned int count)
return order; /* We could be slightly more clever with -1 here... */
}
+static __inline__ int get_count_order(unsigned int count)
+{
+ int order;
+
+ order = fls(count) - 1;
+ if (count & (count - 1))
+ order++;
+ return order;
+}
+
/*
* hweightN: returns the hamming weight (i.e. the number
* of bits set) of a N-bit word
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 025a7f084db..a33a31e71bb 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -406,6 +406,7 @@ struct request_queue
atomic_t refcnt;
+ unsigned int nr_sorted;
unsigned int in_flight;
/*
@@ -631,6 +632,7 @@ static inline void elv_dispatch_add_tail(struct request_queue *q,
{
if (q->last_merge == rq)
q->last_merge = NULL;
+ q->nr_sorted--;
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
diff --git a/include/linux/cm4000_cs.h b/include/linux/cm4000_cs.h
new file mode 100644
index 00000000000..605ebe24bb2
--- /dev/null
+++ b/include/linux/cm4000_cs.h
@@ -0,0 +1,66 @@
+#ifndef _CM4000_H_
+#define _CM4000_H_
+
+#define MAX_ATR 33
+
+#define CM4000_MAX_DEV 4
+
+/* those two structures are passed via ioctl() from/to userspace. They are
+ * used by existing userspace programs, so I kepth the awkward "bIFSD" naming
+ * not to break compilation of userspace apps. -HW */
+
+typedef struct atreq {
+ int32_t atr_len;
+ unsigned char atr[64];
+ int32_t power_act;
+ unsigned char bIFSD;
+ unsigned char bIFSC;
+} atreq_t;
+
+
+/* what is particularly stupid in the original driver is the arch-dependant
+ * member sizes. This leads to CONFIG_COMPAT breakage, since 32bit userspace
+ * will lay out the structure members differently than the 64bit kernel.
+ *
+ * I've changed "ptsreq.protocol" from "unsigned long" to "u_int32_t".
+ * On 32bit this will make no difference. With 64bit kernels, it will make
+ * 32bit apps work, too.
+ */
+
+typedef struct ptsreq {
+ u_int32_t protocol; /*T=0: 2^0, T=1: 2^1*/
+ unsigned char flags;
+ unsigned char pts1;
+ unsigned char pts2;
+ unsigned char pts3;
+} ptsreq_t;
+
+#define CM_IOC_MAGIC 'c'
+#define CM_IOC_MAXNR 255
+
+#define CM_IOCGSTATUS _IOR (CM_IOC_MAGIC, 0, unsigned char *)
+#define CM_IOCGATR _IOWR(CM_IOC_MAGIC, 1, atreq_t *)
+#define CM_IOCSPTS _IOW (CM_IOC_MAGIC, 2, ptsreq_t *)
+#define CM_IOCSRDR _IO (CM_IOC_MAGIC, 3)
+#define CM_IOCARDOFF _IO (CM_IOC_MAGIC, 4)
+
+#define CM_IOSDBGLVL _IOW(CM_IOC_MAGIC, 250, int*)
+
+/* card and device states */
+#define CM_CARD_INSERTED 0x01
+#define CM_CARD_POWERED 0x02
+#define CM_ATR_PRESENT 0x04
+#define CM_ATR_VALID 0x08
+#define CM_STATE_VALID 0x0f
+/* extra info only from CM4000 */
+#define CM_NO_READER 0x10
+#define CM_BAD_CARD 0x20
+
+
+#ifdef __KERNEL__
+
+#define DEVICE_NAME "cmm"
+#define MODULE_NAME "cm4000_cs"
+
+#endif /* __KERNEL__ */
+#endif /* _CM4000_H_ */
diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h
index 2209ad3499a..174f3379e5d 100644
--- a/include/linux/compat_ioctl.h
+++ b/include/linux/compat_ioctl.h
@@ -259,6 +259,14 @@ COMPATIBLE_IOCTL(RTC_RD_TIME)
COMPATIBLE_IOCTL(RTC_SET_TIME)
COMPATIBLE_IOCTL(RTC_WKALM_SET)
COMPATIBLE_IOCTL(RTC_WKALM_RD)
+/*
+ * These two are only for the sbus rtc driver, but
+ * hwclock tries them on every rtc device first when
+ * running on sparc. On other architectures the entries
+ * are useless but harmless.
+ */
+COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */
+COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */
/* Little m */
COMPATIBLE_IOCTL(MTIOCTOP)
/* Socket level stuff */
diff --git a/include/linux/file.h b/include/linux/file.h
index d3b1a15d5f2..418b6101b59 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -33,13 +33,13 @@ struct fdtable {
* Open file table structure
*/
struct files_struct {
- atomic_t count;
- spinlock_t file_lock; /* Protects all the below members. Nests inside tsk->alloc_lock */
+ atomic_t count;
struct fdtable *fdt;
struct fdtable fdtab;
- fd_set close_on_exec_init;
- fd_set open_fds_init;
- struct file * fd_array[NR_OPEN_DEFAULT];
+ fd_set close_on_exec_init;
+ fd_set open_fds_init;
+ struct file * fd_array[NR_OPEN_DEFAULT];
+ spinlock_t file_lock; /* Protects concurrent writers. Nests inside tsk->alloc_lock */
};
#define files_fdtable(files) (rcu_dereference((files)->fdt))
diff --git a/include/linux/font.h b/include/linux/font.h
index 8aac48c37f3..53b129f07f6 100644
--- a/include/linux/font.h
+++ b/include/linux/font.h
@@ -31,7 +31,6 @@ struct font_desc {
#define SUN12x22_IDX 7
#define ACORN8x8_IDX 8
#define MINI4x6_IDX 9
-#define RL_IDX 10
extern const struct font_desc font_vga_8x8,
font_vga_8x16,
@@ -42,7 +41,6 @@ extern const struct font_desc font_vga_8x8,
font_sun_8x16,
font_sun_12x22,
font_acorn_8x8,
- font_rl,
font_mini_4x6;
/* Find a font with a specific name */
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 114d5d59f69..934aa9bda48 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -4,7 +4,7 @@
* Definitions for any platform device related flags or structures for
* Freescale processor devices
*
- * Maintainer: Kumar Gala (kumar.gala@freescale.com)
+ * Maintainer: Kumar Gala <galak@kernel.crashing.org>
*
* Copyright 2004 Freescale Semiconductor, Inc
*
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 8eeaa53a68c..eef5ccdcd73 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -78,7 +78,7 @@ struct hd_struct {
sector_t start_sect;
sector_t nr_sects;
struct kobject kobj;
- unsigned ios[2], sectors[2];
+ unsigned ios[2], sectors[2]; /* READs and WRITEs */
int policy, partno;
};
@@ -89,7 +89,7 @@ struct hd_struct {
#define GENHD_FL_SUPPRESS_PARTITION_INFO 32
struct disk_stats {
- unsigned sectors[2];
+ unsigned sectors[2]; /* READs and WRITEs */
unsigned ios[2];
unsigned merges[2];
unsigned ticks[2];
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index c3779432a72..313dfe9b443 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -14,6 +14,13 @@ struct vm_area_struct;
/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low two bits) */
#define __GFP_DMA ((__force gfp_t)0x01u)
#define __GFP_HIGHMEM ((__force gfp_t)0x02u)
+#ifdef CONFIG_DMA_IS_DMA32
+#define __GFP_DMA32 ((__force gfp_t)0x01) /* ZONE_DMA is ZONE_DMA32 */
+#elif BITS_PER_LONG < 64
+#define __GFP_DMA32 ((__force gfp_t)0x00) /* ZONE_NORMAL is ZONE_DMA32 */
+#else
+#define __GFP_DMA32 ((__force gfp_t)0x04) /* Has own ZONE_DMA32 */
+#endif
/*
* Action modifiers - doesn't change the zoning
@@ -39,8 +46,7 @@ struct vm_area_struct;
#define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */
#define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */
#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
-#define __GFP_NORECLAIM ((__force gfp_t)0x20000u) /* No realy zone reclaim during allocation */
-#define __GFP_HARDWALL ((__force gfp_t)0x40000u) /* Enforce hardwall cpuset memory allocs */
+#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
@@ -49,7 +55,7 @@ struct vm_area_struct;
#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
__GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
__GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \
- __GFP_NOMEMALLOC|__GFP_NORECLAIM|__GFP_HARDWALL)
+ __GFP_NOMEMALLOC|__GFP_HARDWALL)
#define GFP_ATOMIC (__GFP_HIGH)
#define GFP_NOIO (__GFP_WAIT)
@@ -64,6 +70,10 @@ struct vm_area_struct;
#define GFP_DMA __GFP_DMA
+/* 4GB DMA on some platforms */
+#define GFP_DMA32 __GFP_DMA32
+
+
#define gfp_zone(mask) ((__force int)((mask) & (__force gfp_t)GFP_ZONEMASK))
/*
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 5912874ca83..71d2b8a723b 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -90,6 +90,8 @@ extern void synchronize_irq(unsigned int irq);
#define nmi_enter() irq_enter()
#define nmi_exit() sub_preempt_count(HARDIRQ_OFFSET)
+struct task_struct;
+
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
static inline void account_user_vtime(struct task_struct *tsk)
{
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 0cea162b08c..1056717ee50 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -102,8 +102,8 @@ static inline unsigned long hugetlb_total_pages(void)
#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; })
#ifndef HPAGE_MASK
-#define HPAGE_MASK 0 /* Keep the compiler happy */
-#define HPAGE_SIZE 0
+#define HPAGE_MASK PAGE_MASK /* Keep the compiler happy */
+#define HPAGE_SIZE PAGE_SIZE
#endif
#endif /* !CONFIG_HUGETLB_PAGE */
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index 74abaecdb57..1543daaa9c5 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -107,6 +107,7 @@
#define I2C_DRIVERID_CX25840 71 /* cx2584x video encoder */
#define I2C_DRIVERID_SAA7127 72 /* saa7124 video encoder */
#define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */
+#define I2C_DRIVERID_AKITAIOEXP 74 /* IO Expander on Sharp SL-C1000 */
#define I2C_DRIVERID_EXP0 0xF0 /* experimental use id's */
#define I2C_DRIVERID_EXP1 0xF1
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 68ab5f2ab9c..dcfd2ecccb5 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -51,7 +51,6 @@
.page_table_lock = SPIN_LOCK_UNLOCKED, \
.mmlist = LIST_HEAD_INIT(name.mmlist), \
.cpu_vm_mask = CPU_MASK_ALL, \
- .default_kioctx = INIT_KIOCTX(name.default_kioctx, name), \
}
#define INIT_SIGNALS(sig) { \
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 0a90205184b..41f150a3d2d 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -9,6 +9,7 @@
#include <linux/preempt.h>
#include <linux/cpumask.h>
#include <linux/hardirq.h>
+#include <linux/sched.h>
#include <asm/atomic.h>
#include <asm/ptrace.h>
#include <asm/system.h>
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7b115feca4d..1013a42d10b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -206,12 +206,6 @@ struct vm_operations_struct {
struct mmu_gather;
struct inode;
-#ifdef ARCH_HAS_ATOMIC_UNSIGNED
-typedef unsigned page_flags_t;
-#else
-typedef unsigned long page_flags_t;
-#endif
-
/*
* Each physical page in the system has a struct page associated with
* it to keep track of whatever it is we are using the page for at the
@@ -219,7 +213,7 @@ typedef unsigned long page_flags_t;
* a page.
*/
struct page {
- page_flags_t flags; /* Atomic flags, some possibly
+ unsigned long flags; /* Atomic flags, some possibly
* updated asynchronously */
atomic_t _count; /* Usage count, see below. */
atomic_t _mapcount; /* Count of ptes mapped in mms,
@@ -435,7 +429,7 @@ static inline void put_page(struct page *page)
#endif
/* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */
-#define SECTIONS_PGOFF ((sizeof(page_flags_t)*8) - SECTIONS_WIDTH)
+#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index f5fa3082fd6..2c8edad5dcc 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -71,10 +71,11 @@ struct per_cpu_pageset {
#endif
#define ZONE_DMA 0
-#define ZONE_NORMAL 1
-#define ZONE_HIGHMEM 2
+#define ZONE_DMA32 1
+#define ZONE_NORMAL 2
+#define ZONE_HIGHMEM 3
-#define MAX_NR_ZONES 3 /* Sync this with ZONES_SHIFT */
+#define MAX_NR_ZONES 4 /* Sync this with ZONES_SHIFT */
#define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */
@@ -108,9 +109,10 @@ struct per_cpu_pageset {
/*
* On machines where it is needed (eg PCs) we divide physical memory
- * into multiple physical zones. On a PC we have 3 zones:
+ * into multiple physical zones. On a PC we have 4 zones:
*
* ZONE_DMA < 16 MB ISA DMA capable memory
+ * ZONE_DMA32 0 MB Empty
* ZONE_NORMAL 16-896 MB direct mapped by the kernel
* ZONE_HIGHMEM > 896 MB only page cache and user processes
*/
@@ -329,7 +331,7 @@ void get_zone_counts(unsigned long *active, unsigned long *inactive,
void build_all_zonelists(void);
void wakeup_kswapd(struct zone *zone, int order);
int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
- int alloc_type, int can_try_harder, gfp_t gfp_high);
+ int classzone_idx, int alloc_flags);
#ifdef CONFIG_HAVE_MEMORY_PRESENT
void memory_present(int nid, unsigned long start, unsigned long end);
@@ -433,7 +435,9 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
#include <linux/topology.h>
/* Returns the number of the current Node. */
+#ifndef numa_node_id
#define numa_node_id() (cpu_to_node(raw_smp_processor_id()))
+#endif
#ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -453,12 +457,12 @@ extern struct pglist_data contig_page_data;
#include <asm/sparsemem.h>
#endif
-#if BITS_PER_LONG == 32 || defined(ARCH_HAS_ATOMIC_UNSIGNED)
+#if BITS_PER_LONG == 32
/*
- * with 32 bit page->flags field, we reserve 8 bits for node/zone info.
- * there are 3 zones (2 bits) and this leaves 8-2=6 bits for nodes.
+ * with 32 bit page->flags field, we reserve 9 bits for node/zone info.
+ * there are 4 zones (3 bits) and this leaves 9-3=6 bits for nodes.
*/
-#define FLAGS_RESERVED 8
+#define FLAGS_RESERVED 9
#elif BITS_PER_LONG == 64
/*
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 72975fa8795..934a2479f16 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -112,7 +112,6 @@ struct nfnl_callback
{
int (*call)(struct sock *nl, struct sk_buff *skb,
struct nlmsghdr *nlh, struct nfattr *cda[], int *errp);
- kernel_cap_t cap_required; /* capabilities required for this msg */
u_int16_t attr_count; /* number of nfattr's */
};
@@ -154,11 +153,14 @@ extern void nfattr_parse(struct nfattr *tb[], int maxattr,
#define nfattr_bad_size(tb, max, cta_min) \
({ int __i, __res = 0; \
- for (__i=0; __i<max; __i++) \
+ for (__i=0; __i<max; __i++) { \
+ if (!cta_min[__i]) \
+ continue; \
if (tb[__i] && NFA_PAYLOAD(tb[__i]) < cta_min[__i]){ \
__res = 1; \
break; \
} \
+ } \
__res; \
})
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index ba6c310a055..ee700c6eb44 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -53,12 +53,12 @@ void release_pages(struct page **pages, int nr, int cold);
static inline struct page *page_cache_alloc(struct address_space *x)
{
- return alloc_pages(mapping_gfp_mask(x)|__GFP_NORECLAIM, 0);
+ return alloc_pages(mapping_gfp_mask(x), 0);
}
static inline struct page *page_cache_alloc_cold(struct address_space *x)
{
- return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD|__GFP_NORECLAIM, 0);
+ return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
}
typedef int filler_t(void *, struct page *);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index d00f8ba7f22..d4c1c8fd292 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -805,6 +805,10 @@
#define PCI_DEVICE_ID_APPLE_SH_SUNGEM 0x0051
#define PCI_DEVICE_ID_APPLE_U3L_AGP 0x0058
#define PCI_DEVICE_ID_APPLE_U3H_AGP 0x0059
+#define PCI_DEVICE_ID_APPLE_IPID2_AGP 0x0066
+#define PCI_DEVICE_ID_APPLE_IPID2_ATA 0x0069
+#define PCI_DEVICE_ID_APPLE_IPID2_FW 0x006a
+#define PCI_DEVICE_ID_APPLE_IPID2_GMAC 0x006b
#define PCI_DEVICE_ID_APPLE_TIGON3 0x1645
#define PCI_VENDOR_ID_YAMAHA 0x1073
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 5451eb1e781..fb8d2d24e4b 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -38,7 +38,7 @@ extern void free_percpu(const void *);
#else /* CONFIG_SMP */
-#define per_cpu_ptr(ptr, cpu) (ptr)
+#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
static inline void *__alloc_percpu(size_t size, size_t align)
{
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 1514098d156..5be87ba3b7a 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -94,55 +94,6 @@ struct pm_dev
struct list_head entry;
};
-#ifdef CONFIG_PM
-
-extern int pm_active;
-
-#define PM_IS_ACTIVE() (pm_active != 0)
-
-/*
- * Register a device with power management
- */
-struct pm_dev __deprecated *
-pm_register(pm_dev_t type, unsigned long id, pm_callback callback);
-
-/*
- * Unregister a device with power management
- */
-void __deprecated pm_unregister(struct pm_dev *dev);
-
-/*
- * Unregister all devices with matching callback
- */
-void __deprecated pm_unregister_all(pm_callback callback);
-
-/*
- * Send a request to all devices
- */
-int __deprecated pm_send_all(pm_request_t rqst, void *data);
-
-#else /* CONFIG_PM */
-
-#define PM_IS_ACTIVE() 0
-
-static inline struct pm_dev *pm_register(pm_dev_t type,
- unsigned long id,
- pm_callback callback)
-{
- return NULL;
-}
-
-static inline void pm_unregister(struct pm_dev *dev) {}
-
-static inline void pm_unregister_all(pm_callback callback) {}
-
-static inline int pm_send_all(pm_request_t rqst, void *data)
-{
- return 0;
-}
-
-#endif /* CONFIG_PM */
-
/* Functions above this comment are list-based old-style power
* managment. Please avoid using them. */
diff --git a/include/linux/pm_legacy.h b/include/linux/pm_legacy.h
new file mode 100644
index 00000000000..1252b45face
--- /dev/null
+++ b/include/linux/pm_legacy.h
@@ -0,0 +1,56 @@
+#ifndef __LINUX_PM_LEGACY_H__
+#define __LINUX_PM_LEGACY_H__
+
+#include <linux/config.h>
+
+#ifdef CONFIG_PM_LEGACY
+
+extern int pm_active;
+
+#define PM_IS_ACTIVE() (pm_active != 0)
+
+/*
+ * Register a device with power management
+ */
+struct pm_dev __deprecated *
+pm_register(pm_dev_t type, unsigned long id, pm_callback callback);
+
+/*
+ * Unregister a device with power management
+ */
+void __deprecated pm_unregister(struct pm_dev *dev);
+
+/*
+ * Unregister all devices with matching callback
+ */
+void __deprecated pm_unregister_all(pm_callback callback);
+
+/*
+ * Send a request to all devices
+ */
+int __deprecated pm_send_all(pm_request_t rqst, void *data);
+
+#else /* CONFIG_PM_LEGACY */
+
+#define PM_IS_ACTIVE() 0
+
+static inline struct pm_dev *pm_register(pm_dev_t type,
+ unsigned long id,
+ pm_callback callback)
+{
+ return NULL;
+}
+
+static inline void pm_unregister(struct pm_dev *dev) {}
+
+static inline void pm_unregister_all(pm_callback callback) {}
+
+static inline int pm_send_all(pm_request_t rqst, void *data)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PM_LEGACY */
+
+#endif /* __LINUX_PM_LEGACY_H__ */
+
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index dd98c54a23b..d9a2f5254a5 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -7,6 +7,7 @@
*/
#include <linux/config.h>
+#include <linux/thread_info.h>
#include <linux/linkage.h>
#ifdef CONFIG_DEBUG_PREEMPT
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2bbf968b23d..2038bd27b04 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -357,7 +357,6 @@ struct mm_struct {
/* aio bits */
rwlock_t ioctx_list_lock;
struct kioctx *ioctx_list;
- struct kioctx default_kioctx;
};
struct sighand_struct {
@@ -1233,32 +1232,49 @@ static inline void task_unlock(struct task_struct *p)
spin_unlock(&p->alloc_lock);
}
+#ifndef __HAVE_THREAD_FUNCTIONS
+
+#define task_thread_info(task) (task)->thread_info
+
+static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
+{
+ *task_thread_info(p) = *task_thread_info(org);
+ task_thread_info(p)->task = p;
+}
+
+static inline unsigned long *end_of_stack(struct task_struct *p)
+{
+ return (unsigned long *)(p->thread_info + 1);
+}
+
+#endif
+
/* set thread flags in other task's structures
* - see asm/thread_info.h for TIF_xxxx flags available
*/
static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
- set_ti_thread_flag(tsk->thread_info,flag);
+ set_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
- clear_ti_thread_flag(tsk->thread_info,flag);
+ clear_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
- return test_and_set_ti_thread_flag(tsk->thread_info,flag);
+ return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
- return test_and_clear_ti_thread_flag(tsk->thread_info,flag);
+ return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
{
- return test_ti_thread_flag(tsk->thread_info,flag);
+ return test_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline void set_tsk_need_resched(struct task_struct *tsk)
@@ -1329,12 +1345,12 @@ extern void signal_wake_up(struct task_struct *t, int resume_stopped);
static inline unsigned int task_cpu(const struct task_struct *p)
{
- return p->thread_info->cpu;
+ return task_thread_info(p)->cpu;
}
static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
{
- p->thread_info->cpu = cpu;
+ task_thread_info(p)->cpu = cpu;
}
#else
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h
index b63ce701409..fa1ff3b165f 100644
--- a/include/linux/smp_lock.h
+++ b/include/linux/smp_lock.h
@@ -2,11 +2,10 @@
#define __LINUX_SMPLOCK_H
#include <linux/config.h>
+#ifdef CONFIG_LOCK_KERNEL
#include <linux/sched.h>
#include <linux/spinlock.h>
-#ifdef CONFIG_LOCK_KERNEL
-
#define kernel_locked() (current->lock_depth >= 0)
extern int __lockfunc __reacquire_kernel_lock(void);
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index d252f45a0f9..1c4eb41dbd8 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -27,31 +27,6 @@ extern long do_no_restart_syscall(struct restart_block *parm);
* - pass TIF_xxxx constants to these functions
*/
-static inline void set_thread_flag(int flag)
-{
- set_bit(flag,&current_thread_info()->flags);
-}
-
-static inline void clear_thread_flag(int flag)
-{
- clear_bit(flag,&current_thread_info()->flags);
-}
-
-static inline int test_and_set_thread_flag(int flag)
-{
- return test_and_set_bit(flag,&current_thread_info()->flags);
-}
-
-static inline int test_and_clear_thread_flag(int flag)
-{
- return test_and_clear_bit(flag,&current_thread_info()->flags);
-}
-
-static inline int test_thread_flag(int flag)
-{
- return test_bit(flag,&current_thread_info()->flags);
-}
-
static inline void set_ti_thread_flag(struct thread_info *ti, int flag)
{
set_bit(flag,&ti->flags);
@@ -77,15 +52,19 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
return test_bit(flag,&ti->flags);
}
-static inline void set_need_resched(void)
-{
- set_thread_flag(TIF_NEED_RESCHED);
-}
-
-static inline void clear_need_resched(void)
-{
- clear_thread_flag(TIF_NEED_RESCHED);
-}
+#define set_thread_flag(flag) \
+ set_ti_thread_flag(current_thread_info(), flag)
+#define clear_thread_flag(flag) \
+ clear_ti_thread_flag(current_thread_info(), flag)
+#define test_and_set_thread_flag(flag) \
+ test_and_set_ti_thread_flag(current_thread_info(), flag)
+#define test_and_clear_thread_flag(flag) \
+ test_and_clear_ti_thread_flag(current_thread_info(), flag)
+#define test_thread_flag(flag) \
+ test_ti_thread_flag(current_thread_info(), flag)
+
+#define set_need_resched() set_thread_flag(TIF_NEED_RESCHED)
+#define clear_need_resched() clear_thread_flag(TIF_NEED_RESCHED)
#endif
diff --git a/include/linux/time.h b/include/linux/time.h
index 8e83f4e778b..bfbe92d0767 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -101,7 +101,7 @@ extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
static inline void
set_normalized_timespec (struct timespec *ts, time_t sec, long nsec)
{
- while (nsec > NSEC_PER_SEC) {
+ while (nsec >= NSEC_PER_SEC) {
nsec -= NSEC_PER_SEC;
++sec;
}
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 748d0438525..856d232c756 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -819,7 +819,7 @@ typedef void (*usb_complete_t)(struct urb *, struct pt_regs *);
*/
struct urb
{
- /* private, usb core and host controller only fields in the urb */
+ /* private: usb core and host controller only fields in the urb */
struct kref kref; /* reference count of the URB */
spinlock_t lock; /* lock for the URB */
void *hcpriv; /* private data for host controller */
@@ -827,7 +827,7 @@ struct urb
atomic_t use_count; /* concurrent submissions counter */
u8 reject; /* submissions will fail */
- /* public, documented fields in the urb that can be used by drivers */
+ /* public: documented fields in the urb that can be used by drivers */
struct list_head urb_list; /* list head for use by the urb's
* current owner */
struct usb_device *dev; /* (in) pointer to associated device */
@@ -1045,7 +1045,7 @@ struct usb_sg_request {
size_t bytes;
/*
- * members below are private to usbcore,
+ * members below are private: to usbcore,
* and are not provided for driver access!
*/
spinlock_t lock;
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index a114fff6568..1cded681eb6 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -636,6 +636,7 @@ typedef __u64 v4l2_std_id;
#define V4L2_STD_SECAM_K ((v4l2_std_id)0x00100000)
#define V4L2_STD_SECAM_K1 ((v4l2_std_id)0x00200000)
#define V4L2_STD_SECAM_L ((v4l2_std_id)0x00400000)
+#define V4L2_STD_SECAM_LC ((v4l2_std_id)0x00800000)
/* ATSC/HDTV */
#define V4L2_STD_ATSC_8_VSB ((v4l2_std_id)0x01000000)
diff --git a/include/media/ir-common.h b/include/media/ir-common.h
index 0f1ba95ec8d..ad3e9bb670c 100644
--- a/include/media/ir-common.h
+++ b/include/media/ir-common.h
@@ -49,6 +49,7 @@ struct ir_input_state {
extern IR_KEYTAB_TYPE ir_codes_rc5_tv[IR_KEYTAB_SIZE];
extern IR_KEYTAB_TYPE ir_codes_winfast[IR_KEYTAB_SIZE];
+extern IR_KEYTAB_TYPE ir_codes_pinnacle[IR_KEYTAB_SIZE];
extern IR_KEYTAB_TYPE ir_codes_empty[IR_KEYTAB_SIZE];
extern IR_KEYTAB_TYPE ir_codes_hauppauge_new[IR_KEYTAB_SIZE];
extern IR_KEYTAB_TYPE ir_codes_pixelview[IR_KEYTAB_SIZE];
diff --git a/include/media/ir-kbd-i2c.h b/include/media/ir-kbd-i2c.h
index 00fa57eb9fd..730f21ed91d 100644
--- a/include/media/ir-kbd-i2c.h
+++ b/include/media/ir-kbd-i2c.h
@@ -19,4 +19,6 @@ struct IR_i2c {
char phys[32];
int (*get_key)(struct IR_i2c*, u32*, u32*);
};
+
+int get_key_pinnacle(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw);
#endif
diff --git a/include/media/tuner.h b/include/media/tuner.h
index 9184e534b7e..faa0f8e3091 100644
--- a/include/media/tuner.h
+++ b/include/media/tuner.h
@@ -113,6 +113,7 @@
#define TUNER_PHILIPS_TD1316 67
#define TUNER_PHILIPS_TUV1236D 68 /* ATI HDTV Wonder */
+#define TUNER_TNF_5335MF 69 /* Sabrent Bt848 */
#define NOTUNER 0
#define PAL 1 /* PAL_BG */
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
new file mode 100644
index 00000000000..d3fd48157eb
--- /dev/null
+++ b/include/media/v4l2-common.h
@@ -0,0 +1,110 @@
+/*
+ v4l2 common internal API header
+
+ This header contains internal shared ioctl definitions for use by the
+ internal low-level v4l2 drivers.
+ Each ioctl begins with VIDIOC_INT_ to clearly mark that it is an internal
+ define,
+
+ Copyright (C) 2005 Hans Verkuil <hverkuil@xs4all.nl>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef V4L2_COMMON_H_
+#define V4L2_COMMON_H_
+
+/* VIDIOC_INT_AUDIO_CLOCK_FREQ */
+enum v4l2_audio_clock_freq {
+ V4L2_AUDCLK_32_KHZ = 32000,
+ V4L2_AUDCLK_441_KHZ = 44100,
+ V4L2_AUDCLK_48_KHZ = 48000,
+};
+
+/* VIDIOC_INT_G_REGISTER and VIDIOC_INT_S_REGISTER */
+struct v4l2_register {
+ u32 i2c_id; /* I2C driver ID of the I2C chip. 0 for the I2C adapter. */
+ unsigned long reg;
+ u32 val;
+};
+
+/* VIDIOC_INT_DECODE_VBI_LINE */
+struct v4l2_decode_vbi_line {
+ u32 is_second_field; /* Set to 0 for the first (odd) field,
+ set to 1 for the second (even) field. */
+ u8 *p; /* Pointer to the sliced VBI data from the decoder.
+ On exit points to the start of the payload. */
+ u32 line; /* Line number of the sliced VBI data (1-23) */
+ u32 type; /* VBI service type (V4L2_SLICED_*). 0 if no service found */
+};
+
+/* VIDIOC_INT_G_CHIP_IDENT: identifies the actual chip installed on the board */
+enum v4l2_chip_ident {
+ /* general idents: reserved range 0-49 */
+ V4L2_IDENT_UNKNOWN = 0,
+
+ /* module saa7115: reserved range 100-149 */
+ V4L2_IDENT_SAA7114 = 104,
+ V4L2_IDENT_SAA7115 = 105,
+
+ /* module saa7127: reserved range 150-199 */
+ V4L2_IDENT_SAA7127 = 157,
+ V4L2_IDENT_SAA7129 = 159,
+
+ /* module cx25840: reserved range 200-249 */
+ V4L2_IDENT_CX25840 = 240,
+ V4L2_IDENT_CX25841 = 241,
+ V4L2_IDENT_CX25842 = 242,
+ V4L2_IDENT_CX25843 = 243,
+};
+
+/* only implemented if CONFIG_VIDEO_ADV_DEBUG is defined */
+#define VIDIOC_INT_S_REGISTER _IOR ('d', 100, struct v4l2_register)
+#define VIDIOC_INT_G_REGISTER _IOWR('d', 101, struct v4l2_register)
+
+/* Reset the I2C chip */
+#define VIDIOC_INT_RESET _IO ('d', 102)
+
+/* Set the frequency of the audio clock output.
+ Used to slave an audio processor to the video decoder, ensuring that audio
+ and video remain synchronized. */
+#define VIDIOC_INT_AUDIO_CLOCK_FREQ _IOR ('d', 103, enum v4l2_audio_clock_freq)
+
+/* Video decoders that support sliced VBI need to implement this ioctl.
+ Field p of the v4l2_sliced_vbi_line struct is set to the start of the VBI
+ data that was generated by the decoder. The driver then parses the sliced
+ VBI data and sets the other fields in the struct accordingly. The pointer p
+ is updated to point to the start of the payload which can be copied
+ verbatim into the data field of the v4l2_sliced_vbi_data struct. If no
+ valid VBI data was found, then the type field is set to 0 on return. */
+#define VIDIOC_INT_DECODE_VBI_LINE _IOWR('d', 104, struct v4l2_decode_vbi_line)
+
+/* Used to generate VBI signals on a video signal. v4l2_sliced_vbi_data is
+ filled with the data packets that should be output. Note that if you set
+ the line field to 0, then that VBI signal is disabled. */
+#define VIDIOC_INT_S_VBI_DATA _IOW ('d', 105, struct v4l2_sliced_vbi_data)
+
+/* Used to obtain the sliced VBI packet from a readback register. Not all
+ video decoders support this. If no data is available because the readback
+ register contains invalid or erroneous data -EIO is returned. Note that
+ you must fill in the 'id' member and the 'field' member (to determine
+ whether CC data from the first or second field should be obtained). */
+#define VIDIOC_INT_G_VBI_DATA _IOWR('d', 106, struct v4l2_sliced_vbi_data *)
+
+/* Returns the chip identifier or V4L2_IDENT_UNKNOWN if no identification can
+ be made. */
+#define VIDIOC_INT_G_CHIP_IDENT _IOR ('d', 107, enum v4l2_chip_ident *)
+
+#endif /* V4L2_COMMON_H_ */