aboutsummaryrefslogtreecommitdiff
path: root/include/asm-sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r--include/asm-sparc64/auxio.h2
-rw-r--r--include/asm-sparc64/bitops.h56
-rw-r--r--include/asm-sparc64/compat.h1
-rw-r--r--include/asm-sparc64/emergency-restart.h6
-rw-r--r--include/asm-sparc64/floppy.h16
-rw-r--r--include/asm-sparc64/irq.h56
-rw-r--r--include/asm-sparc64/kdebug.h2
-rw-r--r--include/asm-sparc64/param.h5
-rw-r--r--include/asm-sparc64/parport.h4
-rw-r--r--include/asm-sparc64/pbm.h3
-rw-r--r--include/asm-sparc64/pci.h21
-rw-r--r--include/asm-sparc64/ptrace.h5
-rw-r--r--include/asm-sparc64/rwsem.h47
-rw-r--r--include/asm-sparc64/seccomp.h21
-rw-r--r--include/asm-sparc64/signal.h15
-rw-r--r--include/asm-sparc64/spinlock.h29
-rw-r--r--include/asm-sparc64/spitfire.h131
-rw-r--r--include/asm-sparc64/system.h25
-rw-r--r--include/asm-sparc64/termios.h78
-rw-r--r--include/asm-sparc64/thread_info.h23
-rw-r--r--include/asm-sparc64/timer.h41
-rw-r--r--include/asm-sparc64/unistd.h10
22 files changed, 211 insertions, 386 deletions
diff --git a/include/asm-sparc64/auxio.h b/include/asm-sparc64/auxio.h
index 5eb01dd4715..81a590a50a1 100644
--- a/include/asm-sparc64/auxio.h
+++ b/include/asm-sparc64/auxio.h
@@ -75,6 +75,8 @@
#ifndef __ASSEMBLY__
+extern void __iomem *auxio_register;
+
#define AUXIO_LTE_ON 1
#define AUXIO_LTE_OFF 0
diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h
index 9d722dc8cca..9c5e7197028 100644
--- a/include/asm-sparc64/bitops.h
+++ b/include/asm-sparc64/bitops.h
@@ -20,52 +20,52 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);
/* "non-atomic" versions... */
-static __inline__ void __set_bit(int nr, volatile unsigned long *addr)
+static inline void __set_bit(int nr, volatile unsigned long *addr)
{
- volatile unsigned long *m = addr + (nr >> 6);
+ unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
*m |= (1UL << (nr & 63));
}
-static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
+static inline void __clear_bit(int nr, volatile unsigned long *addr)
{
- volatile unsigned long *m = addr + (nr >> 6);
+ unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
*m &= ~(1UL << (nr & 63));
}
-static __inline__ void __change_bit(int nr, volatile unsigned long *addr)
+static inline void __change_bit(int nr, volatile unsigned long *addr)
{
- volatile unsigned long *m = addr + (nr >> 6);
+ unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
*m ^= (1UL << (nr & 63));
}
-static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
{
- volatile unsigned long *m = addr + (nr >> 6);
- long old = *m;
- long mask = (1UL << (nr & 63));
+ unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
+ unsigned long old = *m;
+ unsigned long mask = (1UL << (nr & 63));
*m = (old | mask);
return ((old & mask) != 0);
}
-static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{
- volatile unsigned long *m = addr + (nr >> 6);
- long old = *m;
- long mask = (1UL << (nr & 63));
+ unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
+ unsigned long old = *m;
+ unsigned long mask = (1UL << (nr & 63));
*m = (old & ~mask);
return ((old & mask) != 0);
}
-static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr)
+static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
{
- volatile unsigned long *m = addr + (nr >> 6);
- long old = *m;
- long mask = (1UL << (nr & 63));
+ unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
+ unsigned long old = *m;
+ unsigned long mask = (1UL << (nr & 63));
*m = (old ^ mask);
return ((old & mask) != 0);
@@ -79,13 +79,13 @@ static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr
#define smp_mb__after_clear_bit() barrier()
#endif
-static __inline__ int test_bit(int nr, __const__ volatile unsigned long *addr)
+static inline int test_bit(int nr, __const__ volatile unsigned long *addr)
{
- return (1UL & ((addr)[nr >> 6] >> (nr & 63))) != 0UL;
+ return (1UL & (addr[nr >> 6] >> (nr & 63))) != 0UL;
}
/* The easy/cheese version for now. */
-static __inline__ unsigned long ffz(unsigned long word)
+static inline unsigned long ffz(unsigned long word)
{
unsigned long result;
@@ -103,7 +103,7 @@ static __inline__ unsigned long ffz(unsigned long word)
*
* Undefined if no bit exists, so code should check against 0 first.
*/
-static __inline__ unsigned long __ffs(unsigned long word)
+static inline unsigned long __ffs(unsigned long word)
{
unsigned long result = 0;
@@ -144,7 +144,7 @@ static inline int sched_find_first_bit(unsigned long *b)
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
-static __inline__ int ffs(int x)
+static inline int ffs(int x)
{
if (!x)
return 0;
@@ -158,7 +158,7 @@ static __inline__ int ffs(int x)
#ifdef ULTRA_HAS_POPULATION_COUNT
-static __inline__ unsigned int hweight64(unsigned long w)
+static inline unsigned int hweight64(unsigned long w)
{
unsigned int res;
@@ -166,7 +166,7 @@ static __inline__ unsigned int hweight64(unsigned long w)
return res;
}
-static __inline__ unsigned int hweight32(unsigned int w)
+static inline unsigned int hweight32(unsigned int w)
{
unsigned int res;
@@ -174,7 +174,7 @@ static __inline__ unsigned int hweight32(unsigned int w)
return res;
}
-static __inline__ unsigned int hweight16(unsigned int w)
+static inline unsigned int hweight16(unsigned int w)
{
unsigned int res;
@@ -182,7 +182,7 @@ static __inline__ unsigned int hweight16(unsigned int w)
return res;
}
-static __inline__ unsigned int hweight8(unsigned int w)
+static inline unsigned int hweight8(unsigned int w)
{
unsigned int res;
@@ -236,7 +236,7 @@ extern unsigned long find_next_zero_bit(const unsigned long *,
#define test_and_clear_le_bit(nr,addr) \
test_and_clear_bit((nr) ^ 0x38, (addr))
-static __inline__ int test_le_bit(int nr, __const__ unsigned long * addr)
+static inline int test_le_bit(int nr, __const__ unsigned long * addr)
{
int mask;
__const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
diff --git a/include/asm-sparc64/compat.h b/include/asm-sparc64/compat.h
index 22f58055b8a..b59122dd176 100644
--- a/include/asm-sparc64/compat.h
+++ b/include/asm-sparc64/compat.h
@@ -25,6 +25,7 @@ typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
typedef s32 compat_key_t;
+typedef s32 compat_timer_t;
typedef s32 compat_int_t;
typedef s32 compat_long_t;
diff --git a/include/asm-sparc64/emergency-restart.h b/include/asm-sparc64/emergency-restart.h
new file mode 100644
index 00000000000..108d8c48e42
--- /dev/null
+++ b/include/asm-sparc64/emergency-restart.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_EMERGENCY_RESTART_H
+#define _ASM_EMERGENCY_RESTART_H
+
+#include <asm-generic/emergency-restart.h>
+
+#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/include/asm-sparc64/floppy.h b/include/asm-sparc64/floppy.h
index e071b4b4edf..49d49a28594 100644
--- a/include/asm-sparc64/floppy.h
+++ b/include/asm-sparc64/floppy.h
@@ -159,7 +159,7 @@ static void sun_82077_fd_outb(unsigned char value, unsigned long port)
* underruns. If non-zero, doing_pdma encodes the direction of
* the transfer for debugging. 1=read 2=write
*/
-char *pdma_vaddr;
+unsigned char *pdma_vaddr;
unsigned long pdma_size;
volatile int doing_pdma = 0;
@@ -209,8 +209,7 @@ static void sun_fd_enable_dma(void)
pdma_areasize = pdma_size;
}
-/* Our low-level entry point in arch/sparc/kernel/entry.S */
-extern irqreturn_t floppy_hardint(int irq, void *unused, struct pt_regs *regs);
+extern irqreturn_t sparc_floppy_irq(int, void *, struct pt_regs *);
static int sun_fd_request_irq(void)
{
@@ -220,8 +219,8 @@ static int sun_fd_request_irq(void)
if(!once) {
once = 1;
- error = request_fast_irq(FLOPPY_IRQ, floppy_hardint,
- SA_INTERRUPT, "floppy", NULL);
+ error = request_irq(FLOPPY_IRQ, sparc_floppy_irq,
+ SA_INTERRUPT, "floppy", NULL);
return ((error == 0) ? 0 : -1);
}
@@ -615,7 +614,7 @@ static unsigned long __init sun_floppy_init(void)
struct linux_ebus *ebus;
struct linux_ebus_device *edev = NULL;
unsigned long config = 0;
- unsigned long auxio_reg;
+ void __iomem *auxio_reg;
for_each_ebus(ebus) {
for_each_ebusdev(edev, ebus) {
@@ -642,7 +641,7 @@ static unsigned long __init sun_floppy_init(void)
/* Make sure the high density bit is set, some systems
* (most notably Ultra5/Ultra10) come up with it clear.
*/
- auxio_reg = edev->resource[2].start;
+ auxio_reg = (void __iomem *) edev->resource[2].start;
writel(readl(auxio_reg)|0x2, auxio_reg);
sun_pci_ebus_dev = ebus->self;
@@ -650,7 +649,8 @@ static unsigned long __init sun_floppy_init(void)
spin_lock_init(&sun_pci_fd_ebus_dma.lock);
/* XXX ioremap */
- sun_pci_fd_ebus_dma.regs = edev->resource[1].start;
+ sun_pci_fd_ebus_dma.regs = (void __iomem *)
+ edev->resource[1].start;
if (!sun_pci_fd_ebus_dma.regs)
return 0;
diff --git a/include/asm-sparc64/irq.h b/include/asm-sparc64/irq.h
index 3aef0ca6775..8b70edcb80d 100644
--- a/include/asm-sparc64/irq.h
+++ b/include/asm-sparc64/irq.h
@@ -16,10 +16,22 @@
#include <asm/pil.h>
#include <asm/ptrace.h>
+struct ino_bucket;
+
+#define MAX_IRQ_DESC_ACTION 4
+
+struct irq_desc {
+ void (*pre_handler)(struct ino_bucket *, void *, void *);
+ void *pre_handler_arg1;
+ void *pre_handler_arg2;
+ u32 action_active_mask;
+ struct irqaction action[MAX_IRQ_DESC_ACTION];
+};
+
/* You should not mess with this directly. That's the job of irq.c.
*
* If you make changes here, please update hand coded assembler of
- * SBUS/floppy interrupt handler in entry.S -DaveM
+ * the vectored interrupt trap handler in entry.S -DaveM
*
* This is currently one DCACHE line, two buckets per L2 cache
* line. Keep this in mind please.
@@ -42,24 +54,11 @@ struct ino_bucket {
/* Miscellaneous flags. */
/*0x06*/unsigned char flags;
- /* This is used to deal with IBF_DMA_SYNC on
- * Sabre systems.
- */
-/*0x07*/unsigned char synctab_ent;
-
- /* Reference to handler for this IRQ. If this is
- * non-NULL this means it is active and should be
- * serviced. Else the pending member is set to one
- * and later registry of the interrupt checks for
- * this condition.
- *
- * Normally this is just an irq_action structure.
- * But, on PCI, if multiple interrupt sources behind
- * a bridge have multiple interrupt sources that share
- * the same INO bucket, this points to an array of
- * pointers to four IRQ action structures.
- */
-/*0x08*/void *irq_info;
+ /* Currently unused. */
+/*0x07*/unsigned char __pad;
+
+ /* Reference to IRQ descriptor for this bucket. */
+/*0x08*/struct irq_desc *irq_info;
/* Sun5 Interrupt Clear Register. */
/*0x10*/unsigned long iclr;
@@ -69,12 +68,6 @@ struct ino_bucket {
};
-#ifdef CONFIG_PCI
-extern unsigned long pci_dma_wsync;
-extern unsigned long dma_sync_reg_table[256];
-extern unsigned char dma_sync_reg_table_entry;
-#endif
-
/* IMAP/ICLR register defines */
#define IMAP_VALID 0x80000000 /* IRQ Enabled */
#define IMAP_TID_UPA 0x7c000000 /* UPA TargetID */
@@ -90,11 +83,9 @@ extern unsigned char dma_sync_reg_table_entry;
#define ICLR_PENDING 0x00000003 /* Pending state */
/* Only 8-bits are available, be careful. -DaveM */
-#define IBF_DMA_SYNC 0x01 /* DMA synchronization behind PCI bridge needed. */
-#define IBF_PCI 0x02 /* Indicates PSYCHO/SABRE/SCHIZO PCI interrupt. */
-#define IBF_ACTIVE 0x04 /* This interrupt is active and has a handler. */
-#define IBF_MULTI 0x08 /* On PCI, indicates shared bucket. */
-#define IBF_INPROGRESS 0x10 /* IRQ is being serviced. */
+#define IBF_PCI 0x02 /* PSYCHO/SABRE/SCHIZO PCI interrupt. */
+#define IBF_ACTIVE 0x04 /* Interrupt is active and has a handler.*/
+#define IBF_INPROGRESS 0x10 /* IRQ is being serviced. */
#define NUM_IVECS (IMAP_INR + 1)
extern struct ino_bucket ivector_table[NUM_IVECS];
@@ -122,11 +113,6 @@ extern void enable_irq(unsigned int);
extern unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap);
extern unsigned int sbus_build_irq(void *sbus, unsigned int ino);
-extern int request_fast_irq(unsigned int irq,
- irqreturn_t (*handler)(int, void *, struct pt_regs *),
- unsigned long flags, __const__ char *devname,
- void *dev_id);
-
static __inline__ void set_softint(unsigned long bits)
{
__asm__ __volatile__("wr %0, 0x0, %%set_softint"
diff --git a/include/asm-sparc64/kdebug.h b/include/asm-sparc64/kdebug.h
index f70d3dad01f..6321f5a0198 100644
--- a/include/asm-sparc64/kdebug.h
+++ b/include/asm-sparc64/kdebug.h
@@ -16,7 +16,7 @@ struct die_args {
};
/* Note - you should never unregister because that can race with NMIs.
- * If you really want to do it first unregister - then synchronize_kernel
+ * If you really want to do it first unregister - then synchronize_sched
* - then free.
*/
int register_die_notifier(struct notifier_block *nb);
diff --git a/include/asm-sparc64/param.h b/include/asm-sparc64/param.h
index 6a12f3ac035..a1cd4974630 100644
--- a/include/asm-sparc64/param.h
+++ b/include/asm-sparc64/param.h
@@ -1,9 +1,10 @@
-/* $Id: param.h,v 1.2 2000/10/30 21:01:41 davem Exp $ */
#ifndef _ASMSPARC64_PARAM_H
#define _ASMSPARC64_PARAM_H
+#include <linux/config.h>
+
#ifdef __KERNEL__
-# define HZ 1000 /* Internal kernel timer frequency */
+# define HZ CONFIG_HZ /* Internal kernel timer frequency */
# define USER_HZ 100 /* .. some user interfaces are in "ticks" */
# define CLOCKS_PER_SEC (USER_HZ)
#endif
diff --git a/include/asm-sparc64/parport.h b/include/asm-sparc64/parport.h
index b7e635544ce..56b5197d789 100644
--- a/include/asm-sparc64/parport.h
+++ b/include/asm-sparc64/parport.h
@@ -27,12 +27,12 @@ static struct sparc_ebus_info {
static __inline__ void enable_dma(unsigned int dmanr)
{
+ ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 1);
+
if (ebus_dma_request(&sparc_ebus_dmas[dmanr].info,
sparc_ebus_dmas[dmanr].addr,
sparc_ebus_dmas[dmanr].count))
BUG();
-
- ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 1);
}
static __inline__ void disable_dma(unsigned int dmanr)
diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h
index 4c15610a2ba..38bbbccb406 100644
--- a/include/asm-sparc64/pbm.h
+++ b/include/asm-sparc64/pbm.h
@@ -145,6 +145,9 @@ struct pci_pbm_info {
/* Physical address base of PBM registers. */
unsigned long pbm_regs;
+ /* Physical address of DMA sync register, if any. */
+ unsigned long sync_reg;
+
/* Opaque 32-bit system bus Port ID. */
u32 portid;
diff --git a/include/asm-sparc64/pci.h b/include/asm-sparc64/pci.h
index 2a0c85cd1c1..a4ab0ec7143 100644
--- a/include/asm-sparc64/pci.h
+++ b/include/asm-sparc64/pci.h
@@ -23,7 +23,7 @@ static inline void pcibios_set_master(struct pci_dev *dev)
/* No special bus mastering setup handling */
}
-static inline void pcibios_penalize_isa_irq(int irq)
+static inline void pcibios_penalize_isa_irq(int irq, int active)
{
/* We don't do dynamic PCI IRQ allocation */
}
@@ -220,6 +220,25 @@ static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
return (dma_addr == PCI_DMA_ERROR_CODE);
}
+#ifdef CONFIG_PCI
+static inline void pci_dma_burst_advice(struct pci_dev *pdev,
+ enum pci_dma_burst_strategy *strat,
+ unsigned long *strategy_parameter)
+{
+ unsigned long cacheline_size;
+ u8 byte;
+
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
+ if (byte == 0)
+ cacheline_size = 1024;
+ else
+ cacheline_size = (int) byte * 4;
+
+ *strat = PCI_DMA_BURST_BOUNDARY;
+ *strategy_parameter = cacheline_size;
+}
+#endif
+
/* Return the index of the PCI controller for device PDEV. */
extern int pci_domain_nr(struct pci_bus *bus);
diff --git a/include/asm-sparc64/ptrace.h b/include/asm-sparc64/ptrace.h
index 2d2b5a113d2..6194f771e9f 100644
--- a/include/asm-sparc64/ptrace.h
+++ b/include/asm-sparc64/ptrace.h
@@ -94,8 +94,9 @@ struct sparc_trapf {
#define STACKFRAME32_SZ sizeof(struct sparc_stackf32)
#ifdef __KERNEL__
-#define force_successful_syscall_return() \
- set_thread_flag(TIF_SYSCALL_SUCCESS)
+#define force_successful_syscall_return() \
+do { current_thread_info()->syscall_noerror = 1; \
+} while (0)
#define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV))
#define instruction_pointer(regs) ((regs)->tpc)
#ifdef CONFIG_SMP
diff --git a/include/asm-sparc64/rwsem.h b/include/asm-sparc64/rwsem.h
index bf2ae90ed3d..4568ee4022d 100644
--- a/include/asm-sparc64/rwsem.h
+++ b/include/asm-sparc64/rwsem.h
@@ -46,53 +46,14 @@ extern void __up_read(struct rw_semaphore *sem);
extern void __up_write(struct rw_semaphore *sem);
extern void __downgrade_write(struct rw_semaphore *sem);
-static __inline__ int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
+static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
{
- int tmp = delta;
-
- __asm__ __volatile__(
- "1:\tlduw [%2], %%g1\n\t"
- "add %%g1, %1, %%g7\n\t"
- "cas [%2], %%g1, %%g7\n\t"
- "cmp %%g1, %%g7\n\t"
- "bne,pn %%icc, 1b\n\t"
- " membar #StoreLoad | #StoreStore\n\t"
- "mov %%g7, %0\n\t"
- : "=&r" (tmp)
- : "0" (tmp), "r" (sem)
- : "g1", "g7", "memory", "cc");
-
- return tmp + delta;
-}
-
-#define rwsem_atomic_add rwsem_atomic_update
-
-static __inline__ __u16 rwsem_cmpxchgw(struct rw_semaphore *sem, __u16 __old, __u16 __new)
-{
- u32 old = (sem->count & 0xffff0000) | (u32) __old;
- u32 new = (old & 0xffff0000) | (u32) __new;
- u32 prev;
-
-again:
- __asm__ __volatile__("cas [%2], %3, %0\n\t"
- "membar #StoreLoad | #StoreStore"
- : "=&r" (prev)
- : "0" (new), "r" (sem), "r" (old)
- : "memory");
-
- /* To give the same semantics as x86 cmpxchgw, keep trying
- * if only the upper 16-bits changed.
- */
- if (prev != old &&
- ((prev & 0xffff) == (old & 0xffff)))
- goto again;
-
- return prev & 0xffff;
+ return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
-static __inline__ signed long rwsem_cmpxchg(struct rw_semaphore *sem, signed long old, signed long new)
+static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
{
- return cmpxchg(&sem->count,old,new);
+ atomic_add(delta, (atomic_t *)(&sem->count));
}
#endif /* __KERNEL__ */
diff --git a/include/asm-sparc64/seccomp.h b/include/asm-sparc64/seccomp.h
new file mode 100644
index 00000000000..7fcd9968192
--- /dev/null
+++ b/include/asm-sparc64/seccomp.h
@@ -0,0 +1,21 @@
+#ifndef _ASM_SECCOMP_H
+
+#include <linux/thread_info.h> /* already defines TIF_32BIT */
+
+#ifndef TIF_32BIT
+#error "unexpected TIF_32BIT on sparc64"
+#endif
+
+#include <linux/unistd.h>
+
+#define __NR_seccomp_read __NR_read
+#define __NR_seccomp_write __NR_write
+#define __NR_seccomp_exit __NR_exit
+#define __NR_seccomp_sigreturn __NR_rt_sigreturn
+
+#define __NR_seccomp_read_32 __NR_read
+#define __NR_seccomp_write_32 __NR_write
+#define __NR_seccomp_exit_32 __NR_exit
+#define __NR_seccomp_sigreturn_32 __NR_sigreturn
+
+#endif /* _ASM_SECCOMP_H */
diff --git a/include/asm-sparc64/signal.h b/include/asm-sparc64/signal.h
index becdf1bc592..e3059bb4a46 100644
--- a/include/asm-sparc64/signal.h
+++ b/include/asm-sparc64/signal.h
@@ -162,21 +162,6 @@ struct sigstack {
#define MINSIGSTKSZ 4096
#define SIGSTKSZ 16384
-#ifdef __KERNEL__
-/*
- * DJHR
- * SA_STATIC_ALLOC is used for the SPARC system to indicate that this
- * interrupt handler's irq structure should be statically allocated
- * by the request_irq routine.
- * The alternative is that arch/sparc/kernel/irq.c has carnal knowledge
- * of interrupt usage and that sucks. Also without a flag like this
- * it may be possible for the free_irq routine to attempt to free
- * statically allocated data.. which is NOT GOOD.
- *
- */
-#define SA_STATIC_ALLOC 0x80
-#endif
-
#include <asm-generic/signal.h>
struct __new_sigaction {
diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h
index db7581bdb53..9cb93a5c2b4 100644
--- a/include/asm-sparc64/spinlock.h
+++ b/include/asm-sparc64/spinlock.h
@@ -52,12 +52,14 @@ static inline void _raw_spin_lock(spinlock_t *lock)
__asm__ __volatile__(
"1: ldstub [%1], %0\n"
+" membar #StoreLoad | #StoreStore\n"
" brnz,pn %0, 2f\n"
-" membar #StoreLoad | #StoreStore\n"
+" nop\n"
" .subsection 2\n"
"2: ldub [%1], %0\n"
+" membar #LoadLoad\n"
" brnz,pt %0, 2b\n"
-" membar #LoadLoad\n"
+" nop\n"
" ba,a,pt %%xcc, 1b\n"
" .previous"
: "=&r" (tmp)
@@ -95,16 +97,18 @@ static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
__asm__ __volatile__(
"1: ldstub [%2], %0\n"
-" brnz,pn %0, 2f\n"
" membar #StoreLoad | #StoreStore\n"
+" brnz,pn %0, 2f\n"
+" nop\n"
" .subsection 2\n"
"2: rdpr %%pil, %1\n"
" wrpr %3, %%pil\n"
"3: ldub [%2], %0\n"
-" brnz,pt %0, 3b\n"
" membar #LoadLoad\n"
+" brnz,pt %0, 3b\n"
+" nop\n"
" ba,pt %%xcc, 1b\n"
-" wrpr %1, %%pil\n"
+" wrpr %1, %%pil\n"
" .previous"
: "=&r" (tmp1), "=&r" (tmp2)
: "r"(lock), "r"(flags)
@@ -162,12 +166,14 @@ static void inline __read_lock(rwlock_t *lock)
"4: add %0, 1, %1\n"
" cas [%2], %0, %1\n"
" cmp %0, %1\n"
+" membar #StoreLoad | #StoreStore\n"
" bne,pn %%icc, 1b\n"
-" membar #StoreLoad | #StoreStore\n"
+" nop\n"
" .subsection 2\n"
"2: ldsw [%2], %0\n"
+" membar #LoadLoad\n"
" brlz,pt %0, 2b\n"
-" membar #LoadLoad\n"
+" nop\n"
" ba,a,pt %%xcc, 4b\n"
" .previous"
: "=&r" (tmp1), "=&r" (tmp2)
@@ -204,12 +210,14 @@ static void inline __write_lock(rwlock_t *lock)
"4: or %0, %3, %1\n"
" cas [%2], %0, %1\n"
" cmp %0, %1\n"
+" membar #StoreLoad | #StoreStore\n"
" bne,pn %%icc, 1b\n"
-" membar #StoreLoad | #StoreStore\n"
+" nop\n"
" .subsection 2\n"
"2: lduw [%2], %0\n"
+" membar #LoadLoad\n"
" brnz,pt %0, 2b\n"
-" membar #LoadLoad\n"
+" nop\n"
" ba,a,pt %%xcc, 4b\n"
" .previous"
: "=&r" (tmp1), "=&r" (tmp2)
@@ -240,8 +248,9 @@ static int inline __write_trylock(rwlock_t *lock)
" or %0, %4, %1\n"
" cas [%3], %0, %1\n"
" cmp %0, %1\n"
+" membar #StoreLoad | #StoreStore\n"
" bne,pn %%icc, 1b\n"
-" membar #StoreLoad | #StoreStore\n"
+" nop\n"
" mov 1, %2\n"
"2:"
: "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
diff --git a/include/asm-sparc64/spitfire.h b/include/asm-sparc64/spitfire.h
index 9d7613eea81..962638c9d12 100644
--- a/include/asm-sparc64/spitfire.h
+++ b/include/asm-sparc64/spitfire.h
@@ -56,52 +56,6 @@ extern void cheetah_enable_pcache(void);
SPITFIRE_HIGHEST_LOCKED_TLBENT : \
CHEETAH_HIGHEST_LOCKED_TLBENT)
-static __inline__ unsigned long spitfire_get_isfsr(void)
-{
- unsigned long ret;
-
- __asm__ __volatile__("ldxa [%1] %2, %0"
- : "=r" (ret)
- : "r" (TLB_SFSR), "i" (ASI_IMMU));
- return ret;
-}
-
-static __inline__ unsigned long spitfire_get_dsfsr(void)
-{
- unsigned long ret;
-
- __asm__ __volatile__("ldxa [%1] %2, %0"
- : "=r" (ret)
- : "r" (TLB_SFSR), "i" (ASI_DMMU));
- return ret;
-}
-
-static __inline__ unsigned long spitfire_get_sfar(void)
-{
- unsigned long ret;
-
- __asm__ __volatile__("ldxa [%1] %2, %0"
- : "=r" (ret)
- : "r" (DMMU_SFAR), "i" (ASI_DMMU));
- return ret;
-}
-
-static __inline__ void spitfire_put_isfsr(unsigned long sfsr)
-{
- __asm__ __volatile__("stxa %0, [%1] %2\n\t"
- "membar #Sync"
- : /* no outputs */
- : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_IMMU));
-}
-
-static __inline__ void spitfire_put_dsfsr(unsigned long sfsr)
-{
- __asm__ __volatile__("stxa %0, [%1] %2\n\t"
- "membar #Sync"
- : /* no outputs */
- : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_DMMU));
-}
-
/* The data cache is write through, so this just invalidates the
* specified line.
*/
@@ -111,7 +65,6 @@ static __inline__ void spitfire_put_dcache_tag(unsigned long addr, unsigned long
"membar #Sync"
: /* No outputs */
: "r" (tag), "r" (addr), "i" (ASI_DCACHE_TAG));
- __asm__ __volatile__ ("membar #Sync" : : : "memory");
}
/* The instruction cache lines are flushed with this, but note that
@@ -194,90 +147,6 @@ static __inline__ void spitfire_put_itlb_data(int entry, unsigned long data)
"i" (ASI_ITLB_DATA_ACCESS));
}
-/* Spitfire hardware assisted TLB flushes. */
-
-/* Context level flushes. */
-static __inline__ void spitfire_flush_dtlb_primary_context(void)
-{
- __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
- "membar #Sync"
- : /* No outputs */
- : "r" (0x40), "i" (ASI_DMMU_DEMAP));
-}
-
-static __inline__ void spitfire_flush_itlb_primary_context(void)
-{
- __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
- "membar #Sync"
- : /* No outputs */
- : "r" (0x40), "i" (ASI_IMMU_DEMAP));
-}
-
-static __inline__ void spitfire_flush_dtlb_secondary_context(void)
-{
- __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
- "membar #Sync"
- : /* No outputs */
- : "r" (0x50), "i" (ASI_DMMU_DEMAP));
-}
-
-static __inline__ void spitfire_flush_itlb_secondary_context(void)
-{
- __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
- "membar #Sync"
- : /* No outputs */
- : "r" (0x50), "i" (ASI_IMMU_DEMAP));
-}
-
-static __inline__ void spitfire_flush_dtlb_nucleus_context(void)
-{
- __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
- "membar #Sync"
- : /* No outputs */
- : "r" (0x60), "i" (ASI_DMMU_DEMAP));
-}
-
-static __inline__ void spitfire_flush_itlb_nucleus_context(void)
-{
- __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
- "membar #Sync"
- : /* No outputs */
- : "r" (0x60), "i" (ASI_IMMU_DEMAP));
-}
-
-/* Page level flushes. */
-static __inline__ void spitfire_flush_dtlb_primary_page(unsigned long page)
-{
- __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
- "membar #Sync"
- : /* No outputs */
- : "r" (page), "i" (ASI_DMMU_DEMAP));
-}
-
-static __inline__ void spitfire_flush_itlb_primary_page(unsigned long page)
-{
- __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
- "membar #Sync"
- : /* No outputs */
- : "r" (page), "i" (ASI_IMMU_DEMAP));
-}
-
-static __inline__ void spitfire_flush_dtlb_secondary_page(unsigned long page)
-{
- __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
- "membar #Sync"
- : /* No outputs */
- : "r" (page | 0x10), "i" (ASI_DMMU_DEMAP));
-}
-
-static __inline__ void spitfire_flush_itlb_secondary_page(unsigned long page)
-{
- __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
- "membar #Sync"
- : /* No outputs */
- : "r" (page | 0x10), "i" (ASI_IMMU_DEMAP));
-}
-
static __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page)
{
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index fd12ca386f4..ee4bdfc6b88 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -139,19 +139,13 @@ extern void __flushw_user(void);
#define flush_user_windows flushw_user
#define flush_register_windows flushw_all
-#define prepare_arch_switch(rq, next) \
-do { spin_lock(&(next)->switch_lock); \
- spin_unlock(&(rq)->lock); \
+/* Don't hold the runqueue lock over context switch */
+#define __ARCH_WANT_UNLOCKED_CTXSW
+#define prepare_arch_switch(next) \
+do { \
flushw_all(); \
} while (0)
-#define finish_arch_switch(rq, prev) \
-do { spin_unlock_irq(&(prev)->switch_lock); \
-} while (0)
-
-#define task_running(rq, p) \
- ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
-
/* See what happens when you design the chip correctly?
*
* We tell gcc we clobber all non-fixed-usage registers except
@@ -196,24 +190,23 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
"wrpr %%g1, %%cwp\n\t" \
"ldx [%%g6 + %3], %%o6\n\t" \
"ldub [%%g6 + %2], %%o5\n\t" \
- "ldx [%%g6 + %4], %%o7\n\t" \
+ "ldub [%%g6 + %4], %%o7\n\t" \
"mov %%g6, %%l2\n\t" \
"wrpr %%o5, 0x0, %%wstate\n\t" \
"ldx [%%sp + 2047 + 0x70], %%i6\n\t" \
"ldx [%%sp + 2047 + 0x78], %%i7\n\t" \
"wrpr %%g0, 0x94, %%pstate\n\t" \
"mov %%l2, %%g6\n\t" \
- "ldx [%%g6 + %7], %%g4\n\t" \
+ "ldx [%%g6 + %6], %%g4\n\t" \
"wrpr %%g0, 0x96, %%pstate\n\t" \
- "andcc %%o7, %6, %%g0\n\t" \
- "beq,pt %%icc, 1f\n\t" \
+ "brz,pt %%o7, 1f\n\t" \
" mov %%g7, %0\n\t" \
"b,a ret_from_syscall\n\t" \
"1:\n\t" \
: "=&r" (last) \
: "0" (next->thread_info), \
- "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_FLAGS), "i" (TI_CWP), \
- "i" (_TIF_NEWCHILD), "i" (TI_TASK) \
+ "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \
+ "i" (TI_CWP), "i" (TI_TASK) \
: "cc", \
"g1", "g2", "g3", "g7", \
"l2", "l3", "l4", "l5", "l6", "l7", \
diff --git a/include/asm-sparc64/termios.h b/include/asm-sparc64/termios.h
index 8effce0da08..9777a9cca88 100644
--- a/include/asm-sparc64/termios.h
+++ b/include/asm-sparc64/termios.h
@@ -100,16 +100,17 @@ struct winsize {
#define user_termio_to_kernel_termios(termios, termio) \
({ \
unsigned short tmp; \
- get_user(tmp, &(termio)->c_iflag); \
+ int err; \
+ err = get_user(tmp, &(termio)->c_iflag); \
(termios)->c_iflag = (0xffff0000 & ((termios)->c_iflag)) | tmp; \
- get_user(tmp, &(termio)->c_oflag); \
+ err |= get_user(tmp, &(termio)->c_oflag); \
(termios)->c_oflag = (0xffff0000 & ((termios)->c_oflag)) | tmp; \
- get_user(tmp, &(termio)->c_cflag); \
+ err |= get_user(tmp, &(termio)->c_cflag); \
(termios)->c_cflag = (0xffff0000 & ((termios)->c_cflag)) | tmp; \
- get_user(tmp, &(termio)->c_lflag); \
+ err |= get_user(tmp, &(termio)->c_lflag); \
(termios)->c_lflag = (0xffff0000 & ((termios)->c_lflag)) | tmp; \
- copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
- 0; \
+ err |= copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
+ err; \
})
/*
@@ -119,53 +120,56 @@ struct winsize {
*/
#define kernel_termios_to_user_termio(termio, termios) \
({ \
- put_user((termios)->c_iflag, &(termio)->c_iflag); \
- put_user((termios)->c_oflag, &(termio)->c_oflag); \
- put_user((termios)->c_cflag, &(termio)->c_cflag); \
- put_user((termios)->c_lflag, &(termio)->c_lflag); \
- put_user((termios)->c_line, &(termio)->c_line); \
- copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
+ int err; \
+ err = put_user((termios)->c_iflag, &(termio)->c_iflag); \
+ err |= put_user((termios)->c_oflag, &(termio)->c_oflag); \
+ err |= put_user((termios)->c_cflag, &(termio)->c_cflag); \
+ err |= put_user((termios)->c_lflag, &(termio)->c_lflag); \
+ err |= put_user((termios)->c_line, &(termio)->c_line); \
+ err |= copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
if (!((termios)->c_lflag & ICANON)) { \
- put_user((termios)->c_cc[VMIN], &(termio)->c_cc[_VMIN]); \
- put_user((termios)->c_cc[VTIME], &(termio)->c_cc[_VTIME]); \
+ err |= put_user((termios)->c_cc[VMIN], &(termio)->c_cc[_VMIN]); \
+ err |= put_user((termios)->c_cc[VTIME], &(termio)->c_cc[_VTIME]); \
} \
- 0; \
+ err; \
})
#define user_termios_to_kernel_termios(k, u) \
({ \
- get_user((k)->c_iflag, &(u)->c_iflag); \
- get_user((k)->c_oflag, &(u)->c_oflag); \
- get_user((k)->c_cflag, &(u)->c_cflag); \
- get_user((k)->c_lflag, &(u)->c_lflag); \
- get_user((k)->c_line, &(u)->c_line); \
- copy_from_user((k)->c_cc, (u)->c_cc, NCCS); \
+ int err; \
+ err = get_user((k)->c_iflag, &(u)->c_iflag); \
+ err |= get_user((k)->c_oflag, &(u)->c_oflag); \
+ err |= get_user((k)->c_cflag, &(u)->c_cflag); \
+ err |= get_user((k)->c_lflag, &(u)->c_lflag); \
+ err |= get_user((k)->c_line, &(u)->c_line); \
+ err |= copy_from_user((k)->c_cc, (u)->c_cc, NCCS); \
if((k)->c_lflag & ICANON) { \
- get_user((k)->c_cc[VEOF], &(u)->c_cc[VEOF]); \
- get_user((k)->c_cc[VEOL], &(u)->c_cc[VEOL]); \
+ err |= get_user((k)->c_cc[VEOF], &(u)->c_cc[VEOF]); \
+ err |= get_user((k)->c_cc[VEOL], &(u)->c_cc[VEOL]); \
} else { \
- get_user((k)->c_cc[VMIN], &(u)->c_cc[_VMIN]); \
- get_user((k)->c_cc[VTIME], &(u)->c_cc[_VTIME]); \
+ err |= get_user((k)->c_cc[VMIN], &(u)->c_cc[_VMIN]); \
+ err |= get_user((k)->c_cc[VTIME], &(u)->c_cc[_VTIME]); \
} \
- 0; \
+ err; \
})
#define kernel_termios_to_user_termios(u, k) \
({ \
- put_user((k)->c_iflag, &(u)->c_iflag); \
- put_user((k)->c_oflag, &(u)->c_oflag); \
- put_user((k)->c_cflag, &(u)->c_cflag); \
- put_user((k)->c_lflag, &(u)->c_lflag); \
- put_user((k)->c_line, &(u)->c_line); \
- copy_to_user((u)->c_cc, (k)->c_cc, NCCS); \
+ int err; \
+ err = put_user((k)->c_iflag, &(u)->c_iflag); \
+ err |= put_user((k)->c_oflag, &(u)->c_oflag); \
+ err |= put_user((k)->c_cflag, &(u)->c_cflag); \
+ err |= put_user((k)->c_lflag, &(u)->c_lflag); \
+ err |= put_user((k)->c_line, &(u)->c_line); \
+ err |= copy_to_user((u)->c_cc, (k)->c_cc, NCCS); \
if(!((k)->c_lflag & ICANON)) { \
- put_user((k)->c_cc[VMIN], &(u)->c_cc[_VMIN]); \
- put_user((k)->c_cc[VTIME], &(u)->c_cc[_VTIME]); \
+ err |= put_user((k)->c_cc[VMIN], &(u)->c_cc[_VMIN]); \
+ err |= put_user((k)->c_cc[VTIME], &(u)->c_cc[_VTIME]); \
} else { \
- put_user((k)->c_cc[VEOF], &(u)->c_cc[VEOF]); \
- put_user((k)->c_cc[VEOL], &(u)->c_cc[VEOL]); \
+ err |= put_user((k)->c_cc[VEOF], &(u)->c_cc[VEOF]); \
+ err |= put_user((k)->c_cc[VEOL], &(u)->c_cc[VEOL]); \
} \
- 0; \
+ err; \
})
#endif /* __KERNEL__ */
diff --git a/include/asm-sparc64/thread_info.h b/include/asm-sparc64/thread_info.h
index 517caaba1c8..352d9943661 100644
--- a/include/asm-sparc64/thread_info.h
+++ b/include/asm-sparc64/thread_info.h
@@ -46,8 +46,10 @@ struct thread_info {
unsigned long fault_address;
struct pt_regs *kregs;
struct exec_domain *exec_domain;
- int preempt_count;
- int __pad;
+ int preempt_count; /* 0 => preemptable, <0 => BUG */
+ __u8 new_child;
+ __u8 syscall_noerror;
+ __u16 __pad;
unsigned long *utraps;
@@ -87,6 +89,8 @@ struct thread_info {
#define TI_KREGS 0x00000028
#define TI_EXEC_DOMAIN 0x00000030
#define TI_PRE_COUNT 0x00000038
+#define TI_NEW_CHILD 0x0000003c
+#define TI_SYS_NOERROR 0x0000003d
#define TI_UTRAPS 0x00000040
#define TI_REG_WINDOW 0x00000048
#define TI_RWIN_SPTRS 0x000003c8
@@ -219,16 +223,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
#define TIF_NEWSIGNALS 6 /* wants new-style signals */
#define TIF_32BIT 7 /* 32-bit binary */
-#define TIF_NEWCHILD 8 /* just-spawned child process */
-/* TIF_* value 9 is available */
-#define TIF_POLLING_NRFLAG 10
-#define TIF_SYSCALL_SUCCESS 11
+/* flag bit 8 is available */
+#define TIF_SECCOMP 9 /* secure computing */
+#define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
+/* flag bit 11 is available */
/* NOTE: Thread flags >= 12 should be ones we have no interest
* in using in assembly, else we can't use the mask as
* an immediate value in instructions such as andcc.
*/
#define TIF_ABI_PENDING 12
#define TIF_MEMDIE 13
+#define TIF_POLLING_NRFLAG 14
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -238,10 +243,10 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define _TIF_UNALIGNED (1<<TIF_UNALIGNED)
#define _TIF_NEWSIGNALS (1<<TIF_NEWSIGNALS)
#define _TIF_32BIT (1<<TIF_32BIT)
-#define _TIF_NEWCHILD (1<<TIF_NEWCHILD)
-#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
+#define _TIF_SECCOMP (1<<TIF_SECCOMP)
+#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
-#define _TIF_SYSCALL_SUCCESS (1<<TIF_SYSCALL_SUCCESS)
+#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
(_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \
diff --git a/include/asm-sparc64/timer.h b/include/asm-sparc64/timer.h
index ba33a2b6b7b..edc8e08c3a3 100644
--- a/include/asm-sparc64/timer.h
+++ b/include/asm-sparc64/timer.h
@@ -9,49 +9,8 @@
#include <linux/types.h>
-/* How timers work:
- *
- * On uniprocessors we just use counter zero for the system wide
- * ticker, this performs thread scheduling, clock book keeping,
- * and runs timer based events. Previously we used the Ultra
- * %tick interrupt for this purpose.
- *
- * On multiprocessors we pick one cpu as the master level 10 tick
- * processor. Here this counter zero tick handles clock book
- * keeping and timer events only. Each Ultra has it's level
- * 14 %tick interrupt set to fire off as well, even the master
- * tick cpu runs this locally. This ticker performs thread
- * scheduling, system/user tick counting for the current thread,
- * and also profiling if enabled.
- */
-
#include <linux/config.h>
-/* Two timers, traditionally steered to PIL's 10 and 14 respectively.
- * But since INO packets are used on sun5, we could use any PIL level
- * we like, however for now we use the normal ones.
- *
- * The 'reg' and 'interrupts' properties for these live in nodes named
- * 'counter-timer'. The first of three 'reg' properties describe where
- * the sun5_timer registers are. The other two I have no idea. (XXX)
- */
-struct sun5_timer {
- u64 count0;
- u64 limit0;
- u64 count1;
- u64 limit1;
-};
-
-#define SUN5_LIMIT_ENABLE 0x80000000
-#define SUN5_LIMIT_TOZERO 0x40000000
-#define SUN5_LIMIT_ZRESTART 0x20000000
-#define SUN5_LIMIT_CMASK 0x1fffffff
-
-/* Given a HZ value, set the limit register to so that the timer IRQ
- * gets delivered that often.
- */
-#define SUN5_HZ_TO_LIMIT(__hz) (1000000/(__hz))
-
struct sparc64_tick_ops {
void (*init_tick)(unsigned long);
unsigned long (*get_tick)(void);
diff --git a/include/asm-sparc64/unistd.h b/include/asm-sparc64/unistd.h
index 5b8dcf5786a..51ec2879b88 100644
--- a/include/asm-sparc64/unistd.h
+++ b/include/asm-sparc64/unistd.h
@@ -167,12 +167,12 @@
#define __NR_pciconfig_read 148 /* ENOSYS under SunOS */
#define __NR_pciconfig_write 149 /* ENOSYS under SunOS */
#define __NR_getsockname 150 /* Common */
-/* #define __NR_getmsg 151 SunOS Specific */
-/* #define __NR_putmsg 152 SunOS Specific */
+#define __NR_inotify_init 151 /* Linux specific */
+#define __NR_inotify_add_watch 152 /* Linux specific */
#define __NR_poll 153 /* Common */
#define __NR_getdents64 154 /* Linux specific */
/* #define __NR_fcntl64 155 Linux sparc32 Specific */
-/* #define __NR_getdirentries 156 SunOS Specific */
+#define __NR_inotify_rm_watch 156 /* Linux specific */
#define __NR_statfs 157 /* Common */
#define __NR_fstatfs 158 /* Common */
#define __NR_umount 159 /* Common */
@@ -212,7 +212,7 @@
#define __NR_epoll_create 193 /* Linux Specific */
#define __NR_epoll_ctl 194 /* Linux Specific */
#define __NR_epoll_wait 195 /* Linux Specific */
-/* #define __NR_ulimit 196 Linux Specific */
+#define __NR_ioprio_set 196 /* Linux Specific */
#define __NR_getppid 197 /* Linux Specific */
#define __NR_sigaction 198 /* Linux Specific */
#define __NR_sgetmask 199 /* Linux Specific */
@@ -234,7 +234,7 @@
#define __NR_ipc 215 /* Linux Specific */
#define __NR_sigreturn 216 /* Linux Specific */
#define __NR_clone 217 /* Linux Specific */
-/* #define __NR_modify_ldt 218 Linux Specific - i386 specific, unused */
+#define __NR_ioprio_get 218 /* Linux Specific */
#define __NR_adjtimex 219 /* Linux Specific */
#define __NR_sigprocmask 220 /* Linux Specific */
#define __NR_create_module 221 /* Linux Specific */