aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-ia64/sn/xpc.h6
-rw-r--r--include/asm-powerpc/spu.h4
-rw-r--r--include/asm-x86/byteorder.h4
-rw-r--r--include/asm-x86/msr.h74
-rw-r--r--include/linux/cpu.h4
-rw-r--r--include/linux/cpumask.h2
-rw-r--r--include/linux/key.h2
-rw-r--r--include/linux/netdevice.h18
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/pm.h9
-rw-r--r--include/linux/pm_legacy.h6
-rw-r--r--include/linux/ptrace.h1
-rw-r--r--include/linux/slab.h5
-rw-r--r--include/linux/slab_def.h3
-rw-r--r--include/linux/tty.h1
-rw-r--r--include/net/sch_generic.h1
-rw-r--r--include/net/sctp/user.h2
-rw-r--r--include/net/sock.h2
-rw-r--r--include/net/xfrm.h7
19 files changed, 91 insertions, 62 deletions
diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h
index 8e5d7de9c63..3c0900ab800 100644
--- a/include/asm-ia64/sn/xpc.h
+++ b/include/asm-ia64/sn/xpc.h
@@ -1211,11 +1211,13 @@ xpc_IPI_init(int index)
static inline enum xpc_retval
xpc_map_bte_errors(bte_result_t error)
{
+ if (error == BTE_SUCCESS)
+ return xpcSuccess;
+
if (is_shub2()) {
if (BTE_VALID_SH2_ERROR(error))
return xpcBteSh2Start + error;
- else
- return xpcBteUnmappedError;
+ return xpcBteUnmappedError;
}
switch (error) {
case BTE_SUCCESS: return xpcSuccess;
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h
index b1accce77bb..34b7807f068 100644
--- a/include/asm-powerpc/spu.h
+++ b/include/asm-powerpc/spu.h
@@ -246,6 +246,7 @@ struct spufs_calls {
__u32 __user *ustatus);
int (*coredump_extra_notes_size)(void);
int (*coredump_extra_notes_write)(struct file *file, loff_t *foffset);
+ void (*notify_spus_active)(void);
struct module *owner;
};
@@ -298,6 +299,9 @@ struct notifier_block;
int spu_switch_event_register(struct notifier_block * n);
int spu_switch_event_unregister(struct notifier_block * n);
+extern void notify_spus_active(void);
+extern void do_notify_spus_active(void);
+
/*
* This defines the Local Store, Problem Area and Privlege Area of an SPU.
*/
diff --git a/include/asm-x86/byteorder.h b/include/asm-x86/byteorder.h
index 1f2d6d5bf20..fe2f2e5d51b 100644
--- a/include/asm-x86/byteorder.h
+++ b/include/asm-x86/byteorder.h
@@ -30,13 +30,13 @@ static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val)
} v;
v.u = val;
#ifdef CONFIG_X86_BSWAP
- asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
+ __asm__("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
: "=r" (v.s.a), "=r" (v.s.b)
: "0" (v.s.a), "1" (v.s.b));
#else
v.s.a = ___arch__swab32(v.s.a);
v.s.b = ___arch__swab32(v.s.b);
- asm("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b));
+ __asm__("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b));
#endif
return v.u;
}
diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h
index ba4b3143212..664a2fa7adc 100644
--- a/include/asm-x86/msr.h
+++ b/include/asm-x86/msr.h
@@ -191,38 +191,6 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
-/* wrmsr with exception handling */
-#define wrmsr_safe(msr,a,b) ({ int ret__; \
- asm volatile("2: wrmsr ; xorl %0,%0\n" \
- "1:\n\t" \
- ".section .fixup,\"ax\"\n\t" \
- "3: movl %4,%0 ; jmp 1b\n\t" \
- ".previous\n\t" \
- ".section __ex_table,\"a\"\n" \
- " .align 8\n\t" \
- " .quad 2b,3b\n\t" \
- ".previous" \
- : "=a" (ret__) \
- : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
- ret__; })
-
-#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
-
-#define rdmsr_safe(msr,a,b) \
- ({ int ret__; \
- asm volatile ("1: rdmsr\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: movl %4,%0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 8\n" \
- " .quad 1b,3b\n" \
- ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b)) \
- :"c"(msr), "i"(-EIO), "0"(0)); \
- ret__; })
-
#define rdtsc(low,high) \
__asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
@@ -230,17 +198,17 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
__asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
#define rdtscp(low,high,aux) \
- asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux))
+ __asm__ __volatile__ (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux))
#define rdtscll(val) do { \
unsigned int __a,__d; \
- asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
+ __asm__ __volatile__("rdtsc" : "=a" (__a), "=d" (__d)); \
(val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
} while(0)
#define rdtscpll(val, aux) do { \
unsigned long __a, __d; \
- asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \
+ __asm__ __volatile__ (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \
(val) = (__d << 32) | __a; \
} while (0)
@@ -253,6 +221,7 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
: "=a" (low), "=d" (high) \
: "c" (counter))
+
static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
@@ -320,6 +289,40 @@ static inline unsigned int cpuid_edx(unsigned int op)
return edx;
}
+#ifdef __KERNEL__
+
+/* wrmsr with exception handling */
+#define wrmsr_safe(msr,a,b) ({ int ret__; \
+ asm volatile("2: wrmsr ; xorl %0,%0\n" \
+ "1:\n\t" \
+ ".section .fixup,\"ax\"\n\t" \
+ "3: movl %4,%0 ; jmp 1b\n\t" \
+ ".previous\n\t" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 8\n\t" \
+ " .quad 2b,3b\n\t" \
+ ".previous" \
+ : "=a" (ret__) \
+ : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
+ ret__; })
+
+#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
+
+#define rdmsr_safe(msr,a,b) \
+ ({ int ret__; \
+ asm volatile ("1: rdmsr\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: movl %4,%0\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 8\n" \
+ " .quad 1b,3b\n" \
+ ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b)) \
+ :"c"(msr), "i"(-EIO), "0"(0)); \
+ ret__; })
+
#ifdef CONFIG_SMP
void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
@@ -343,6 +346,7 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
return wrmsr_safe(msr_no, l, h);
}
#endif /* CONFIG_SMP */
+#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#endif /* !__i386__ */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index b79c5756936..92f2029a34f 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -107,7 +107,6 @@ extern void unlock_cpu_hotplug(void);
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
int cpu_down(unsigned int cpu);
-#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
#else /* CONFIG_HOTPLUG_CPU */
@@ -122,9 +121,6 @@ static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
/* These aren't inline functions due to a GCC bug. */
#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
#define unregister_hotcpu_notifier(nb) ({ (void)(nb); })
-
-/* CPUs don't go offline once they're online w/o CONFIG_HOTPLUG_CPU */
-static inline int cpu_is_offline(int cpu) { return 0; }
#endif /* CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_PM_SLEEP_SMP
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 23f55140ccd..85bd790c201 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -397,6 +397,8 @@ extern cpumask_t cpu_present_map;
#define cpu_present(cpu) ((cpu) == 0)
#endif
+#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
+
#ifdef CONFIG_SMP
extern int nr_cpu_ids;
#define any_online_cpu(mask) __any_online_cpu(&(mask))
diff --git a/include/linux/key.h b/include/linux/key.h
index fcdbd5ed227..a70b8a8f200 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -290,7 +290,7 @@ extern void key_init(void);
#define key_get(k) ({ NULL; })
#define key_put(k) do { } while(0)
#define key_ref_put(k) do { } while(0)
-#define make_key_ref(k) ({ NULL; })
+#define make_key_ref(k, p) ({ NULL; })
#define key_ref_to_ptr(k) ({ NULL; })
#define is_key_possessed(k) 0
#define alloc_uid_keyring(u,c) 0
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 1e6af4f174b..b0813c3286b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -319,21 +319,29 @@ struct napi_struct {
enum
{
NAPI_STATE_SCHED, /* Poll is scheduled */
+ NAPI_STATE_DISABLE, /* Disable pending */
};
extern void FASTCALL(__napi_schedule(struct napi_struct *n));
+static inline int napi_disable_pending(struct napi_struct *n)
+{
+ return test_bit(NAPI_STATE_DISABLE, &n->state);
+}
+
/**
* napi_schedule_prep - check if napi can be scheduled
* @n: napi context
*
* Test if NAPI routine is already running, and if not mark
* it as running. This is used as a condition variable
- * insure only one NAPI poll instance runs
+ * insure only one NAPI poll instance runs. We also make
+ * sure there is no pending NAPI disable.
*/
static inline int napi_schedule_prep(struct napi_struct *n)
{
- return !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
+ return !napi_disable_pending(n) &&
+ !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
}
/**
@@ -389,8 +397,10 @@ static inline void napi_complete(struct napi_struct *n)
*/
static inline void napi_disable(struct napi_struct *n)
{
+ set_bit(NAPI_STATE_DISABLE, &n->state);
while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
msleep(1);
+ clear_bit(NAPI_STATE_DISABLE, &n->state);
}
/**
@@ -1268,7 +1278,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
static inline int netif_rx_schedule_prep(struct net_device *dev,
struct napi_struct *napi)
{
- return netif_running(dev) && napi_schedule_prep(napi);
+ return napi_schedule_prep(napi);
}
/* Add interface to tail of rx poll list. This assumes that _prep has
@@ -1277,7 +1287,6 @@ static inline int netif_rx_schedule_prep(struct net_device *dev,
static inline void __netif_rx_schedule(struct net_device *dev,
struct napi_struct *napi)
{
- dev_hold(dev);
__napi_schedule(napi);
}
@@ -1308,7 +1317,6 @@ static inline void __netif_rx_complete(struct net_device *dev,
struct napi_struct *napi)
{
__napi_complete(napi);
- dev_put(dev);
}
/* Remove interface from poll list: it must be in the poll list
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 023656d2f1d..7f2215139e9 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2322,6 +2322,8 @@
#define PCI_DEVICE_ID_INTEL_ICH9_4 0x2914
#define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919
#define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930
+#define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916
+#define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918
#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 09a309b7b5d..b78e0295adf 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -246,6 +246,15 @@ static inline int call_platform_enable_wakeup(struct device *dev, int is_on)
device_set_wakeup_enable(dev,val); \
} while(0)
+/*
+ * Global Power Management flags
+ * Used to keep APM and ACPI from both being active
+ */
+extern unsigned int pm_flags;
+
+#define PM_APM 1
+#define PM_ACPI 2
+
#endif /* __KERNEL__ */
#endif /* _LINUX_PM_H */
diff --git a/include/linux/pm_legacy.h b/include/linux/pm_legacy.h
index 514729a4468..446f4f42b95 100644
--- a/include/linux/pm_legacy.h
+++ b/include/linux/pm_legacy.h
@@ -4,10 +4,6 @@
#ifdef CONFIG_PM_LEGACY
-extern int pm_active;
-
-#define PM_IS_ACTIVE() (pm_active != 0)
-
/*
* Register a device with power management
*/
@@ -21,8 +17,6 @@ int __deprecated pm_send_all(pm_request_t rqst, void *data);
#else /* CONFIG_PM_LEGACY */
-#define PM_IS_ACTIVE() 0
-
static inline struct pm_dev *pm_register(pm_dev_t type,
unsigned long id,
pm_callback callback)
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index ae8146abd74..3ea5750a0f7 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -97,6 +97,7 @@ extern void __ptrace_link(struct task_struct *child,
extern void __ptrace_unlink(struct task_struct *child);
extern void ptrace_untrace(struct task_struct *child);
extern int ptrace_may_attach(struct task_struct *task);
+extern int __ptrace_may_attach(struct task_struct *task);
static inline void ptrace_link(struct task_struct *child,
struct task_struct *new_parent)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index f3a8eecd99f..f62caaad94e 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -271,5 +271,10 @@ static inline void *kzalloc(size_t size, gfp_t flags)
return kmalloc(size, flags | __GFP_ZERO);
}
+#ifdef CONFIG_SLABINFO
+extern const struct seq_operations slabinfo_op;
+ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *);
+#endif
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SLAB_H */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 32bdc2ffd71..fcc48096ee6 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -95,7 +95,4 @@ found:
#endif /* CONFIG_NUMA */
-extern const struct seq_operations slabinfo_op;
-ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *);
-
#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index c555f5442bd..defd2ab7244 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -319,6 +319,7 @@ extern speed_t tty_termios_input_baud_rate(struct ktermios *termios);
extern void tty_termios_encode_baud_rate(struct ktermios *termios, speed_t ibaud, speed_t obaud);
extern void tty_encode_baud_rate(struct tty_struct *tty, speed_t ibaud, speed_t obaud);
extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old);
+extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b);
extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *);
extern void tty_ldisc_deref(struct tty_ldisc *);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index c9265518a37..4c3b35153c3 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -325,7 +325,6 @@ static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask)
n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
- n->iif = skb->iif;
}
return n;
}
diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h
index 00848b641f5..954090b1e35 100644
--- a/include/net/sctp/user.h
+++ b/include/net/sctp/user.h
@@ -450,7 +450,7 @@ enum sctp_sn_type {
SCTP_SHUTDOWN_EVENT,
SCTP_PARTIAL_DELIVERY_EVENT,
SCTP_ADAPTATION_INDICATION,
- SCTP_AUTHENTICATION_EVENT,
+ SCTP_AUTHENTICATION_INDICATION,
};
/* Notification error codes used to fill up the error fields in some
diff --git a/include/net/sock.h b/include/net/sock.h
index 67e35c7e230..6e1542da33a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -944,7 +944,7 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
return err;
rcu_read_lock_bh();
- filter = sk->sk_filter;
+ filter = rcu_dereference(sk->sk_filter);
if (filter) {
unsigned int pkt_len = sk_run_filter(skb, filter->insns,
filter->len);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 58dfa82889a..1dd20cf1798 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1188,10 +1188,15 @@ static inline int xfrm_aevent_is_on(void)
return ret;
}
+static inline int xfrm_alg_len(struct xfrm_algo *alg)
+{
+ return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
+}
+
#ifdef CONFIG_XFRM_MIGRATE
static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
{
- return (struct xfrm_algo *)kmemdup(orig, sizeof(*orig) + orig->alg_key_len, GFP_KERNEL);
+ return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
}
static inline void xfrm_states_put(struct xfrm_state **states, int n)