aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/decompress/mm.h4
-rw-r--r--include/linux/elf.h2
-rw-r--r--include/linux/kfifo.h555
-rw-r--r--include/linux/mm.h3
-rw-r--r--include/linux/perf_counter.h444
-rw-r--r--include/linux/rcutiny.h5
-rw-r--r--include/linux/rcutree.h11
-rw-r--r--include/linux/sched.h13
-rw-r--r--include/scsi/libiscsi.h3
-rw-r--r--include/scsi/libiscsi_tcp.h2
-rw-r--r--include/scsi/libsrp.h2
11 files changed, 542 insertions, 502 deletions
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
index 12ff8c3f1d0..5032b9a31ae 100644
--- a/include/linux/decompress/mm.h
+++ b/include/linux/decompress/mm.h
@@ -25,7 +25,7 @@ static void *malloc(int size)
void *p;
if (size < 0)
- error("Malloc error");
+ return NULL;
if (!malloc_ptr)
malloc_ptr = free_mem_ptr;
@@ -35,7 +35,7 @@ static void *malloc(int size)
malloc_ptr += size;
if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr)
- error("Out of memory");
+ return NULL;
malloc_count++;
return p;
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 90a4ed0ea0e..0cc4d55151b 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -361,7 +361,7 @@ typedef struct elf64_shdr {
#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */
#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */
#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */
-#define NT_PRXSTATUS 0x300 /* s390 upper register halves */
+#define NT_S390_HIGH_GPRS 0x300 /* s390 upper register halves */
/* Note header in a PT_NOTE section */
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index ad6bdf5a597..486e8ad3bb5 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -1,6 +1,7 @@
/*
- * A simple kernel FIFO implementation.
+ * A generic kernel FIFO implementation.
*
+ * Copyright (C) 2009 Stefani Seibold <stefani@seibold.net>
* Copyright (C) 2004 Stelian Pop <stelian@popies.net>
*
* This program is free software; you can redistribute it and/or modify
@@ -18,6 +19,25 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
+
+/*
+ * Howto porting drivers to the new generic fifo API:
+ *
+ * - Modify the declaration of the "struct kfifo *" object into a
+ * in-place "struct kfifo" object
+ * - Init the in-place object with kfifo_alloc() or kfifo_init()
+ * Note: The address of the in-place "struct kfifo" object must be
+ * passed as the first argument to this functions
+ * - Replace the use of __kfifo_put into kfifo_in and __kfifo_get
+ * into kfifo_out
+ * - Replace the use of kfifo_put into kfifo_in_locked and kfifo_get
+ * into kfifo_out_locked
+ * Note: the spinlock pointer formerly passed to kfifo_init/kfifo_alloc
+ * must be passed now to the kfifo_in_locked and kfifo_out_locked
+ * as the last parameter.
+ * - All formerly name __kfifo_* functions has been renamed into kfifo_*
+ */
+
#ifndef _LINUX_KFIFO_H
#define _LINUX_KFIFO_H
@@ -29,124 +49,563 @@ struct kfifo {
unsigned int size; /* the size of the allocated buffer */
unsigned int in; /* data is added at offset (in % size) */
unsigned int out; /* data is extracted from off. (out % size) */
- spinlock_t *lock; /* protects concurrent modifications */
};
-extern struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
- gfp_t gfp_mask, spinlock_t *lock);
-extern struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask,
- spinlock_t *lock);
+/*
+ * Macros for declaration and initialization of the kfifo datatype
+ */
+
+/* helper macro */
+#define __kfifo_initializer(s, b) \
+ (struct kfifo) { \
+ .size = s, \
+ .in = 0, \
+ .out = 0, \
+ .buffer = b \
+ }
+
+/**
+ * DECLARE_KFIFO - macro to declare a kfifo and the associated buffer
+ * @name: name of the declared kfifo datatype
+ * @size: size of the fifo buffer
+ *
+ * Note: the macro can be used inside struct or union declaration
+ * Note: the macro creates two objects:
+ * A kfifo object with the given name and a buffer for the kfifo
+ * object named name##kfifo_buffer
+ */
+#define DECLARE_KFIFO(name, size) \
+union { \
+ struct kfifo name; \
+ unsigned char name##kfifo_buffer[size + sizeof(struct kfifo)]; \
+}
+
+/**
+ * INIT_KFIFO - Initialize a kfifo declared by DECLARED_KFIFO
+ * @name: name of the declared kfifo datatype
+ * @size: size of the fifo buffer
+ */
+#define INIT_KFIFO(name) \
+ name = __kfifo_initializer(sizeof(name##kfifo_buffer) - \
+ sizeof(struct kfifo), name##kfifo_buffer)
+
+/**
+ * DEFINE_KFIFO - macro to define and initialize a kfifo
+ * @name: name of the declared kfifo datatype
+ * @size: size of the fifo buffer
+ *
+ * Note: the macro can be used for global and local kfifo data type variables
+ * Note: the macro creates two objects:
+ * A kfifo object with the given name and a buffer for the kfifo
+ * object named name##kfifo_buffer
+ */
+#define DEFINE_KFIFO(name, size) \
+ unsigned char name##kfifo_buffer[size]; \
+ struct kfifo name = __kfifo_initializer(size, name##kfifo_buffer)
+
+#undef __kfifo_initializer
+
+extern void kfifo_init(struct kfifo *fifo, unsigned char *buffer,
+ unsigned int size);
+extern __must_check int kfifo_alloc(struct kfifo *fifo, unsigned int size,
+ gfp_t gfp_mask);
extern void kfifo_free(struct kfifo *fifo);
-extern unsigned int __kfifo_put(struct kfifo *fifo,
- const unsigned char *buffer, unsigned int len);
-extern unsigned int __kfifo_get(struct kfifo *fifo,
- unsigned char *buffer, unsigned int len);
+extern unsigned int kfifo_in(struct kfifo *fifo,
+ const unsigned char *from, unsigned int len);
+extern __must_check unsigned int kfifo_out(struct kfifo *fifo,
+ unsigned char *to, unsigned int len);
/**
- * __kfifo_reset - removes the entire FIFO contents, no locking version
+ * kfifo_reset - removes the entire FIFO contents
* @fifo: the fifo to be emptied.
*/
-static inline void __kfifo_reset(struct kfifo *fifo)
+static inline void kfifo_reset(struct kfifo *fifo)
{
fifo->in = fifo->out = 0;
}
/**
- * kfifo_reset - removes the entire FIFO contents
+ * kfifo_reset_out - skip FIFO contents
* @fifo: the fifo to be emptied.
*/
-static inline void kfifo_reset(struct kfifo *fifo)
+static inline void kfifo_reset_out(struct kfifo *fifo)
{
- unsigned long flags;
+ smp_mb();
+ fifo->out = fifo->in;
+}
- spin_lock_irqsave(fifo->lock, flags);
+/**
+ * kfifo_size - returns the size of the fifo in bytes
+ * @fifo: the fifo to be used.
+ */
+static inline __must_check unsigned int kfifo_size(struct kfifo *fifo)
+{
+ return fifo->size;
+}
- __kfifo_reset(fifo);
+/**
+ * kfifo_len - returns the number of used bytes in the FIFO
+ * @fifo: the fifo to be used.
+ */
+static inline unsigned int kfifo_len(struct kfifo *fifo)
+{
+ register unsigned int out;
- spin_unlock_irqrestore(fifo->lock, flags);
+ out = fifo->out;
+ smp_rmb();
+ return fifo->in - out;
}
/**
- * kfifo_put - puts some data into the FIFO
+ * kfifo_is_empty - returns true if the fifo is empty
* @fifo: the fifo to be used.
- * @buffer: the data to be added.
- * @len: the length of the data to be added.
+ */
+static inline __must_check int kfifo_is_empty(struct kfifo *fifo)
+{
+ return fifo->in == fifo->out;
+}
+
+/**
+ * kfifo_is_full - returns true if the fifo is full
+ * @fifo: the fifo to be used.
+ */
+static inline __must_check int kfifo_is_full(struct kfifo *fifo)
+{
+ return kfifo_len(fifo) == kfifo_size(fifo);
+}
+
+/**
+ * kfifo_avail - returns the number of bytes available in the FIFO
+ * @fifo: the fifo to be used.
+ */
+static inline __must_check unsigned int kfifo_avail(struct kfifo *fifo)
+{
+ return kfifo_size(fifo) - kfifo_len(fifo);
+}
+
+/**
+ * kfifo_in_locked - puts some data into the FIFO using a spinlock for locking
+ * @fifo: the fifo to be used.
+ * @from: the data to be added.
+ * @n: the length of the data to be added.
+ * @lock: pointer to the spinlock to use for locking.
*
- * This function copies at most @len bytes from the @buffer into
+ * This function copies at most @len bytes from the @from buffer into
* the FIFO depending on the free space, and returns the number of
* bytes copied.
*/
-static inline unsigned int kfifo_put(struct kfifo *fifo,
- const unsigned char *buffer, unsigned int len)
+static inline unsigned int kfifo_in_locked(struct kfifo *fifo,
+ const unsigned char *from, unsigned int n, spinlock_t *lock)
{
unsigned long flags;
unsigned int ret;
- spin_lock_irqsave(fifo->lock, flags);
+ spin_lock_irqsave(lock, flags);
- ret = __kfifo_put(fifo, buffer, len);
+ ret = kfifo_in(fifo, from, n);
- spin_unlock_irqrestore(fifo->lock, flags);
+ spin_unlock_irqrestore(lock, flags);
return ret;
}
/**
- * kfifo_get - gets some data from the FIFO
+ * kfifo_out_locked - gets some data from the FIFO using a spinlock for locking
* @fifo: the fifo to be used.
- * @buffer: where the data must be copied.
- * @len: the size of the destination buffer.
+ * @to: where the data must be copied.
+ * @n: the size of the destination buffer.
+ * @lock: pointer to the spinlock to use for locking.
*
* This function copies at most @len bytes from the FIFO into the
- * @buffer and returns the number of copied bytes.
+ * @to buffer and returns the number of copied bytes.
*/
-static inline unsigned int kfifo_get(struct kfifo *fifo,
- unsigned char *buffer, unsigned int len)
+static inline __must_check unsigned int kfifo_out_locked(struct kfifo *fifo,
+ unsigned char *to, unsigned int n, spinlock_t *lock)
{
unsigned long flags;
unsigned int ret;
- spin_lock_irqsave(fifo->lock, flags);
+ spin_lock_irqsave(lock, flags);
- ret = __kfifo_get(fifo, buffer, len);
+ ret = kfifo_out(fifo, to, n);
/*
* optimization: if the FIFO is empty, set the indices to 0
* so we don't wrap the next time
*/
- if (fifo->in == fifo->out)
- fifo->in = fifo->out = 0;
+ if (kfifo_is_empty(fifo))
+ kfifo_reset(fifo);
+
+ spin_unlock_irqrestore(lock, flags);
+
+ return ret;
+}
+
+extern void kfifo_skip(struct kfifo *fifo, unsigned int len);
+
+extern __must_check unsigned int kfifo_from_user(struct kfifo *fifo,
+ const void __user *from, unsigned int n);
+
+extern __must_check unsigned int kfifo_to_user(struct kfifo *fifo,
+ void __user *to, unsigned int n);
+
+/**
+ * __kfifo_add_out internal helper function for updating the out offset
+ */
+static inline void __kfifo_add_out(struct kfifo *fifo,
+ unsigned int off)
+{
+ smp_mb();
+ fifo->out += off;
+}
+
+/**
+ * __kfifo_add_in internal helper function for updating the in offset
+ */
+static inline void __kfifo_add_in(struct kfifo *fifo,
+ unsigned int off)
+{
+ smp_wmb();
+ fifo->in += off;
+}
+
+/**
+ * __kfifo_off internal helper function for calculating the index of a
+ * given offeset
+ */
+static inline unsigned int __kfifo_off(struct kfifo *fifo, unsigned int off)
+{
+ return off & (fifo->size - 1);
+}
+
+/**
+ * __kfifo_peek_n internal helper function for determinate the length of
+ * the next record in the fifo
+ */
+static inline unsigned int __kfifo_peek_n(struct kfifo *fifo,
+ unsigned int recsize)
+{
+#define __KFIFO_GET(fifo, off, shift) \
+ ((fifo)->buffer[__kfifo_off((fifo), (fifo)->out+(off))] << (shift))
+
+ unsigned int l;
+
+ l = __KFIFO_GET(fifo, 0, 0);
+
+ if (--recsize)
+ l |= __KFIFO_GET(fifo, 1, 8);
+
+ return l;
+#undef __KFIFO_GET
+}
+
+/**
+ * __kfifo_poke_n internal helper function for storing the length of
+ * the next record into the fifo
+ */
+static inline void __kfifo_poke_n(struct kfifo *fifo,
+ unsigned int recsize, unsigned int n)
+{
+#define __KFIFO_PUT(fifo, off, val, shift) \
+ ( \
+ (fifo)->buffer[__kfifo_off((fifo), (fifo)->in+(off))] = \
+ (unsigned char)((val) >> (shift)) \
+ )
- spin_unlock_irqrestore(fifo->lock, flags);
+ __KFIFO_PUT(fifo, 0, n, 0);
+ if (--recsize)
+ __KFIFO_PUT(fifo, 1, n, 8);
+#undef __KFIFO_PUT
+}
+
+/**
+ * __kfifo_in_... internal functions for put date into the fifo
+ * do not call it directly, use kfifo_in_rec() instead
+ */
+extern unsigned int __kfifo_in_n(struct kfifo *fifo,
+ const void *from, unsigned int n, unsigned int recsize);
+
+extern unsigned int __kfifo_in_generic(struct kfifo *fifo,
+ const void *from, unsigned int n, unsigned int recsize);
+
+static inline unsigned int __kfifo_in_rec(struct kfifo *fifo,
+ const void *from, unsigned int n, unsigned int recsize)
+{
+ unsigned int ret;
+
+ ret = __kfifo_in_n(fifo, from, n, recsize);
+
+ if (likely(ret == 0)) {
+ if (recsize)
+ __kfifo_poke_n(fifo, recsize, n);
+ __kfifo_add_in(fifo, n + recsize);
+ }
return ret;
}
/**
- * __kfifo_len - returns the number of bytes available in the FIFO, no locking version
+ * kfifo_in_rec - puts some record data into the FIFO
* @fifo: the fifo to be used.
+ * @from: the data to be added.
+ * @n: the length of the data to be added.
+ * @recsize: size of record field
+ *
+ * This function copies @n bytes from the @from into the FIFO and returns
+ * the number of bytes which cannot be copied.
+ * A returned value greater than the @n value means that the record doesn't
+ * fit into the buffer.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these functions.
*/
-static inline unsigned int __kfifo_len(struct kfifo *fifo)
+static inline __must_check unsigned int kfifo_in_rec(struct kfifo *fifo,
+ void *from, unsigned int n, unsigned int recsize)
{
- return fifo->in - fifo->out;
+ if (!__builtin_constant_p(recsize))
+ return __kfifo_in_generic(fifo, from, n, recsize);
+ return __kfifo_in_rec(fifo, from, n, recsize);
}
/**
- * kfifo_len - returns the number of bytes available in the FIFO
+ * __kfifo_out_... internal functions for get date from the fifo
+ * do not call it directly, use kfifo_out_rec() instead
+ */
+extern unsigned int __kfifo_out_n(struct kfifo *fifo,
+ void *to, unsigned int reclen, unsigned int recsize);
+
+extern unsigned int __kfifo_out_generic(struct kfifo *fifo,
+ void *to, unsigned int n,
+ unsigned int recsize, unsigned int *total);
+
+static inline unsigned int __kfifo_out_rec(struct kfifo *fifo,
+ void *to, unsigned int n, unsigned int recsize,
+ unsigned int *total)
+{
+ unsigned int l;
+
+ if (!recsize) {
+ l = n;
+ if (total)
+ *total = l;
+ } else {
+ l = __kfifo_peek_n(fifo, recsize);
+ if (total)
+ *total = l;
+ if (n < l)
+ return l;
+ }
+
+ return __kfifo_out_n(fifo, to, l, recsize);
+}
+
+/**
+ * kfifo_out_rec - gets some record data from the FIFO
* @fifo: the fifo to be used.
+ * @to: where the data must be copied.
+ * @n: the size of the destination buffer.
+ * @recsize: size of record field
+ * @total: pointer where the total number of to copied bytes should stored
+ *
+ * This function copies at most @n bytes from the FIFO to @to and returns the
+ * number of bytes which cannot be copied.
+ * A returned value greater than the @n value means that the record doesn't
+ * fit into the @to buffer.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these functions.
*/
-static inline unsigned int kfifo_len(struct kfifo *fifo)
+static inline __must_check unsigned int kfifo_out_rec(struct kfifo *fifo,
+ void *to, unsigned int n, unsigned int recsize,
+ unsigned int *total)
+
{
- unsigned long flags;
- unsigned int ret;
+ if (!__builtin_constant_p(recsize))
+ return __kfifo_out_generic(fifo, to, n, recsize, total);
+ return __kfifo_out_rec(fifo, to, n, recsize, total);
+}
+
+/**
+ * __kfifo_from_user_... internal functions for transfer from user space into
+ * the fifo. do not call it directly, use kfifo_from_user_rec() instead
+ */
+extern unsigned int __kfifo_from_user_n(struct kfifo *fifo,
+ const void __user *from, unsigned int n, unsigned int recsize);
- spin_lock_irqsave(fifo->lock, flags);
+extern unsigned int __kfifo_from_user_generic(struct kfifo *fifo,
+ const void __user *from, unsigned int n, unsigned int recsize);
- ret = __kfifo_len(fifo);
+static inline unsigned int __kfifo_from_user_rec(struct kfifo *fifo,
+ const void __user *from, unsigned int n, unsigned int recsize)
+{
+ unsigned int ret;
- spin_unlock_irqrestore(fifo->lock, flags);
+ ret = __kfifo_from_user_n(fifo, from, n, recsize);
+ if (likely(ret == 0)) {
+ if (recsize)
+ __kfifo_poke_n(fifo, recsize, n);
+ __kfifo_add_in(fifo, n + recsize);
+ }
return ret;
}
+/**
+ * kfifo_from_user_rec - puts some data from user space into the FIFO
+ * @fifo: the fifo to be used.
+ * @from: pointer to the data to be added.
+ * @n: the length of the data to be added.
+ * @recsize: size of record field
+ *
+ * This function copies @n bytes from the @from into the
+ * FIFO and returns the number of bytes which cannot be copied.
+ *
+ * If the returned value is equal or less the @n value, the copy_from_user()
+ * functions has failed. Otherwise the record doesn't fit into the buffer.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these functions.
+ */
+static inline __must_check unsigned int kfifo_from_user_rec(struct kfifo *fifo,
+ const void __user *from, unsigned int n, unsigned int recsize)
+{
+ if (!__builtin_constant_p(recsize))
+ return __kfifo_from_user_generic(fifo, from, n, recsize);
+ return __kfifo_from_user_rec(fifo, from, n, recsize);
+}
+
+/**
+ * __kfifo_to_user_... internal functions for transfer fifo data into user space
+ * do not call it directly, use kfifo_to_user_rec() instead
+ */
+extern unsigned int __kfifo_to_user_n(struct kfifo *fifo,
+ void __user *to, unsigned int n, unsigned int reclen,
+ unsigned int recsize);
+
+extern unsigned int __kfifo_to_user_generic(struct kfifo *fifo,
+ void __user *to, unsigned int n, unsigned int recsize,
+ unsigned int *total);
+
+static inline unsigned int __kfifo_to_user_rec(struct kfifo *fifo,
+ void __user *to, unsigned int n,
+ unsigned int recsize, unsigned int *total)
+{
+ unsigned int l;
+
+ if (!recsize) {
+ l = n;
+ if (total)
+ *total = l;
+ } else {
+ l = __kfifo_peek_n(fifo, recsize);
+ if (total)
+ *total = l;
+ if (n < l)
+ return l;
+ }
+
+ return __kfifo_to_user_n(fifo, to, n, l, recsize);
+}
+
+/**
+ * kfifo_to_user_rec - gets data from the FIFO and write it to user space
+ * @fifo: the fifo to be used.
+ * @to: where the data must be copied.
+ * @n: the size of the destination buffer.
+ * @recsize: size of record field
+ * @total: pointer where the total number of to copied bytes should stored
+ *
+ * This function copies at most @n bytes from the FIFO to the @to.
+ * In case of an error, the function returns the number of bytes which cannot
+ * be copied.
+ * If the returned value is equal or less the @n value, the copy_to_user()
+ * functions has failed. Otherwise the record doesn't fit into the @to buffer.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these functions.
+ */
+static inline __must_check unsigned int kfifo_to_user_rec(struct kfifo *fifo,
+ void __user *to, unsigned int n, unsigned int recsize,
+ unsigned int *total)
+{
+ if (!__builtin_constant_p(recsize))
+ return __kfifo_to_user_generic(fifo, to, n, recsize, total);
+ return __kfifo_to_user_rec(fifo, to, n, recsize, total);
+}
+
+/**
+ * __kfifo_peek_... internal functions for peek into the next fifo record
+ * do not call it directly, use kfifo_peek_rec() instead
+ */
+extern unsigned int __kfifo_peek_generic(struct kfifo *fifo,
+ unsigned int recsize);
+
+/**
+ * kfifo_peek_rec - gets the size of the next FIFO record data
+ * @fifo: the fifo to be used.
+ * @recsize: size of record field
+ *
+ * This function returns the size of the next FIFO record in number of bytes
+ */
+static inline __must_check unsigned int kfifo_peek_rec(struct kfifo *fifo,
+ unsigned int recsize)
+{
+ if (!__builtin_constant_p(recsize))
+ return __kfifo_peek_generic(fifo, recsize);
+ if (!recsize)
+ return kfifo_len(fifo);
+ return __kfifo_peek_n(fifo, recsize);
+}
+
+/**
+ * __kfifo_skip_... internal functions for skip the next fifo record
+ * do not call it directly, use kfifo_skip_rec() instead
+ */
+extern void __kfifo_skip_generic(struct kfifo *fifo, unsigned int recsize);
+
+static inline void __kfifo_skip_rec(struct kfifo *fifo,
+ unsigned int recsize)
+{
+ unsigned int l;
+
+ if (recsize) {
+ l = __kfifo_peek_n(fifo, recsize);
+
+ if (l + recsize <= kfifo_len(fifo)) {
+ __kfifo_add_out(fifo, l + recsize);
+ return;
+ }
+ }
+ kfifo_reset_out(fifo);
+}
+
+/**
+ * kfifo_skip_rec - skip the next fifo out record
+ * @fifo: the fifo to be used.
+ * @recsize: size of record field
+ *
+ * This function skips the next FIFO record
+ */
+static inline void kfifo_skip_rec(struct kfifo *fifo,
+ unsigned int recsize)
+{
+ if (!__builtin_constant_p(recsize))
+ __kfifo_skip_generic(fifo, recsize);
+ else
+ __kfifo_skip_rec(fifo, recsize);
+}
+
+/**
+ * kfifo_avail_rec - returns the number of bytes available in a record FIFO
+ * @fifo: the fifo to be used.
+ * @recsize: size of record field
+ */
+static inline __must_check unsigned int kfifo_avail_rec(struct kfifo *fifo,
+ unsigned int recsize)
+{
+ unsigned int l = kfifo_size(fifo) - kfifo_len(fifo);
+
+ return (l > recsize) ? l - recsize : 0;
+}
+
#endif
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 849b4a61bd8..2265f28eb47 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1037,6 +1037,9 @@ extern void add_active_range(unsigned int nid, unsigned long start_pfn,
extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
unsigned long end_pfn);
extern void remove_all_active_ranges(void);
+void sort_node_map(void);
+unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
+ unsigned long end_pfn);
extern unsigned long absent_pages_in_range(unsigned long start_pfn,
unsigned long end_pfn);
extern void get_pfn_range_for_nid(unsigned int nid,
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
deleted file mode 100644
index e3fb2560670..00000000000
--- a/include/linux/perf_counter.h
+++ /dev/null
@@ -1,444 +0,0 @@
-/*
- * NOTE: this file will be removed in a future kernel release, it is
- * provided as a courtesy copy of user-space code that relies on the
- * old (pre-rename) symbols and constants.
- *
- * Performance events:
- *
- * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
- * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
- * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
- *
- * Data type definitions, declarations, prototypes.
- *
- * Started by: Thomas Gleixner and Ingo Molnar
- *
- * For licencing details see kernel-base/COPYING
- */
-#ifndef _LINUX_PERF_COUNTER_H
-#define _LINUX_PERF_COUNTER_H
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-#include <asm/byteorder.h>
-
-/*
- * User-space ABI bits:
- */
-
-/*
- * attr.type
- */
-enum perf_type_id {
- PERF_TYPE_HARDWARE = 0,
- PERF_TYPE_SOFTWARE = 1,
- PERF_TYPE_TRACEPOINT = 2,
- PERF_TYPE_HW_CACHE = 3,
- PERF_TYPE_RAW = 4,
-
- PERF_TYPE_MAX, /* non-ABI */
-};
-
-/*
- * Generalized performance counter event types, used by the
- * attr.event_id parameter of the sys_perf_counter_open()
- * syscall:
- */
-enum perf_hw_id {
- /*
- * Common hardware events, generalized by the kernel:
- */
- PERF_COUNT_HW_CPU_CYCLES = 0,
- PERF_COUNT_HW_INSTRUCTIONS = 1,
- PERF_COUNT_HW_CACHE_REFERENCES = 2,
- PERF_COUNT_HW_CACHE_MISSES = 3,
- PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
- PERF_COUNT_HW_BRANCH_MISSES = 5,
- PERF_COUNT_HW_BUS_CYCLES = 6,
-
- PERF_COUNT_HW_MAX, /* non-ABI */
-};
-
-/*
- * Generalized hardware cache counters:
- *
- * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
- * { read, write, prefetch } x
- * { accesses, misses }
- */
-enum perf_hw_cache_id {
- PERF_COUNT_HW_CACHE_L1D = 0,
- PERF_COUNT_HW_CACHE_L1I = 1,
- PERF_COUNT_HW_CACHE_LL = 2,
- PERF_COUNT_HW_CACHE_DTLB = 3,
- PERF_COUNT_HW_CACHE_ITLB = 4,
- PERF_COUNT_HW_CACHE_BPU = 5,
-
- PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
-};
-
-enum perf_hw_cache_op_id {
- PERF_COUNT_HW_CACHE_OP_READ = 0,
- PERF_COUNT_HW_CACHE_OP_WRITE = 1,
- PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
-
- PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
-};
-
-enum perf_hw_cache_op_result_id {
- PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
- PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
-
- PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
-};
-
-/*
- * Special "software" counters provided by the kernel, even if the hardware
- * does not support performance counters. These counters measure various
- * physical and sw events of the kernel (and allow the profiling of them as
- * well):
- */
-enum perf_sw_ids {
- PERF_COUNT_SW_CPU_CLOCK = 0,
- PERF_COUNT_SW_TASK_CLOCK = 1,
- PERF_COUNT_SW_PAGE_FAULTS = 2,
- PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
- PERF_COUNT_SW_CPU_MIGRATIONS = 4,
- PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
- PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
- PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
- PERF_COUNT_SW_EMULATION_FAULTS = 8,
-
- PERF_COUNT_SW_MAX, /* non-ABI */
-};
-
-/*
- * Bits that can be set in attr.sample_type to request information
- * in the overflow packets.
- */
-enum perf_counter_sample_format {
- PERF_SAMPLE_IP = 1U << 0,
- PERF_SAMPLE_TID = 1U << 1,
- PERF_SAMPLE_TIME = 1U << 2,
- PERF_SAMPLE_ADDR = 1U << 3,
- PERF_SAMPLE_READ = 1U << 4,
- PERF_SAMPLE_CALLCHAIN = 1U << 5,
- PERF_SAMPLE_ID = 1U << 6,
- PERF_SAMPLE_CPU = 1U << 7,
- PERF_SAMPLE_PERIOD = 1U << 8,
- PERF_SAMPLE_STREAM_ID = 1U << 9,
- PERF_SAMPLE_RAW = 1U << 10,
-
- PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
-};
-
-/*
- * The format of the data returned by read() on a perf counter fd,
- * as specified by attr.read_format:
- *
- * struct read_format {
- * { u64 value;
- * { u64 time_enabled; } && PERF_FORMAT_ENABLED
- * { u64 time_running; } && PERF_FORMAT_RUNNING
- * { u64 id; } && PERF_FORMAT_ID
- * } && !PERF_FORMAT_GROUP
- *
- * { u64 nr;
- * { u64 time_enabled; } && PERF_FORMAT_ENABLED
- * { u64 time_running; } && PERF_FORMAT_RUNNING
- * { u64 value;
- * { u64 id; } && PERF_FORMAT_ID
- * } cntr[nr];
- * } && PERF_FORMAT_GROUP
- * };
- */
-enum perf_counter_read_format {
- PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
- PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
- PERF_FORMAT_ID = 1U << 2,
- PERF_FORMAT_GROUP = 1U << 3,
-
- PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
-};
-
-#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
-
-/*
- * Hardware event to monitor via a performance monitoring counter:
- */
-struct perf_counter_attr {
-
- /*
- * Major type: hardware/software/tracepoint/etc.
- */
- __u32 type;
-
- /*
- * Size of the attr structure, for fwd/bwd compat.
- */
- __u32 size;
-
- /*
- * Type specific configuration information.
- */
- __u64 config;
-
- union {
- __u64 sample_period;
- __u64 sample_freq;
- };
-
- __u64 sample_type;
- __u64 read_format;
-
- __u64 disabled : 1, /* off by default */
- inherit : 1, /* children inherit it */
- pinned : 1, /* must always be on PMU */
- exclusive : 1, /* only group on PMU */
- exclude_user : 1, /* don't count user */
- exclude_kernel : 1, /* ditto kernel */
- exclude_hv : 1, /* ditto hypervisor */
- exclude_idle : 1, /* don't count when idle */
- mmap : 1, /* include mmap data */
- comm : 1, /* include comm data */
- freq : 1, /* use freq, not period */
- inherit_stat : 1, /* per task counts */
- enable_on_exec : 1, /* next exec enables */
- task : 1, /* trace fork/exit */
- watermark : 1, /* wakeup_watermark */
-
- __reserved_1 : 49;
-
- union {
- __u32 wakeup_events; /* wakeup every n events */
- __u32 wakeup_watermark; /* bytes before wakeup */
- };
- __u32 __reserved_2;
-
- __u64 __reserved_3;
-};
-
-/*
- * Ioctls that can be done on a perf counter fd:
- */
-#define PERF_COUNTER_IOC_ENABLE _IO ('$', 0)
-#define PERF_COUNTER_IOC_DISABLE _IO ('$', 1)
-#define PERF_COUNTER_IOC_REFRESH _IO ('$', 2)
-#define PERF_COUNTER_IOC_RESET _IO ('$', 3)
-#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64)
-#define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5)
-#define PERF_COUNTER_IOC_SET_FILTER _IOW('$', 6, char *)
-
-enum perf_counter_ioc_flags {
- PERF_IOC_FLAG_GROUP = 1U << 0,
-};
-
-/*
- * Structure of the page that can be mapped via mmap
- */
-struct perf_counter_mmap_page {
- __u32 version; /* version number of this structure */
- __u32 compat_version; /* lowest version this is compat with */
-
- /*
- * Bits needed to read the hw counters in user-space.
- *
- * u32 seq;
- * s64 count;
- *
- * do {
- * seq = pc->lock;
- *
- * barrier()
- * if (pc->index) {
- * count = pmc_read(pc->index - 1);
- * count += pc->offset;
- * } else
- * goto regular_read;
- *
- * barrier();
- * } while (pc->lock != seq);
- *
- * NOTE: for obvious reason this only works on self-monitoring
- * processes.
- */
- __u32 lock; /* seqlock for synchronization */
- __u32 index; /* hardware counter identifier */
- __s64 offset; /* add to hardware counter value */
- __u64 time_enabled; /* time counter active */
- __u64 time_running; /* time counter on cpu */
-
- /*
- * Hole for extension of the self monitor capabilities
- */
-
- __u64 __reserved[123]; /* align to 1k */
-
- /*
- * Control data for the mmap() data buffer.
- *
- * User-space reading the @data_head value should issue an rmb(), on
- * SMP capable platforms, after reading this value -- see
- * perf_counter_wakeup().
- *
- * When the mapping is PROT_WRITE the @data_tail value should be
- * written by userspace to reflect the last read data. In this case
- * the kernel will not over-write unread data.
- */
- __u64 data_head; /* head in the data section */
- __u64 data_tail; /* user-space written tail */
-};
-
-#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0)
-#define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0)
-#define PERF_EVENT_MISC_KERNEL (1 << 0)
-#define PERF_EVENT_MISC_USER (2 << 0)
-#define PERF_EVENT_MISC_HYPERVISOR (3 << 0)
-
-struct perf_event_header {
- __u32 type;
- __u16 misc;
- __u16 size;
-};
-
-enum perf_event_type {
-
- /*
- * The MMAP events record the PROT_EXEC mappings so that we can
- * correlate userspace IPs to code. They have the following structure:
- *
- * struct {
- * struct perf_event_header header;
- *
- * u32 pid, tid;
- * u64 addr;
- * u64 len;
- * u64 pgoff;
- * char filename[];
- * };
- */
- PERF_EVENT_MMAP = 1,
-
- /*
- * struct {
- * struct perf_event_header header;
- * u64 id;
- * u64 lost;
- * };
- */
- PERF_EVENT_LOST = 2,
-
- /*
- * struct {
- * struct perf_event_header header;
- *
- * u32 pid, tid;
- * char comm[];
- * };
- */
- PERF_EVENT_COMM = 3,
-
- /*
- * struct {
- * struct perf_event_header header;
- * u32 pid, ppid;
- * u32 tid, ptid;
- * u64 time;
- * };
- */
- PERF_EVENT_EXIT = 4,
-
- /*
- * struct {
- * struct perf_event_header header;
- * u64 time;
- * u64 id;
- * u64 stream_id;
- * };
- */