aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Makefile1
-rw-r--r--lib/genalloc.c263
-rw-r--r--lib/iomap_copy.c28
-rw-r--r--lib/kobject.c6
-rw-r--r--lib/percpu_counter.c46
-rw-r--r--lib/radix-tree.c197
-rw-r--r--lib/string.c30
-rw-r--r--lib/zlib_deflate/deflate.c25
-rw-r--r--lib/zlib_deflate/deflate_syms.c3
-rw-r--r--lib/zlib_inflate/Makefile4
-rw-r--r--lib/zlib_inflate/infblock.c365
-rw-r--r--lib/zlib_inflate/infblock.h48
-rw-r--r--lib/zlib_inflate/infcodes.c202
-rw-r--r--lib/zlib_inflate/infcodes.h33
-rw-r--r--lib/zlib_inflate/inffast.c462
-rw-r--r--lib/zlib_inflate/inffast.h12
-rw-r--r--lib/zlib_inflate/inffixed.h94
-rw-r--r--lib/zlib_inflate/inflate.c1086
-rw-r--r--lib/zlib_inflate/inflate.h107
-rw-r--r--lib/zlib_inflate/inflate_syms.c3
-rw-r--r--lib/zlib_inflate/inflate_sync.c152
-rw-r--r--lib/zlib_inflate/inftrees.c683
-rw-r--r--lib/zlib_inflate/inftrees.h99
-rw-r--r--lib/zlib_inflate/infutil.c88
-rw-r--r--lib/zlib_inflate/infutil.h176
25 files changed, 2074 insertions, 2139 deletions
diff --git a/lib/Makefile b/lib/Makefile
index b830c9a1554..79358ad1f11 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_TEXTSEARCH) += textsearch.o
obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
+obj-$(CONFIG_SMP) += percpu_counter.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 9ce0a6a3b85..71338b48e88 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -4,10 +4,6 @@
* Uses for this includes on-device special memory, uncached memory
* etc.
*
- * This code is based on the buddy allocator found in the sym53c8xx_2
- * driver Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>,
- * and adapted for general purpose use.
- *
* Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
*
* This source code is licensed under the GNU General Public License,
@@ -15,172 +11,155 @@
*/
#include <linux/module.h>
-#include <linux/stddef.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/spinlock.h>
#include <linux/genalloc.h>
-#include <asm/page.h>
-
-struct gen_pool *gen_pool_create(int nr_chunks, int max_chunk_shift,
- unsigned long (*fp)(struct gen_pool *),
- unsigned long data)
+/*
+ * Create a new special memory pool.
+ *
+ * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
+ * @nid: node id of the node the pool structure should be allocated on, or -1
+ */
+struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
{
- struct gen_pool *poolp;
- unsigned long tmp;
- int i;
-
- /*
- * This is really an arbitrary limit, +10 is enough for
- * IA64_GRANULE_SHIFT, aka 16MB. If anyone needs a large limit
- * this can be increased without problems.
- */
- if ((max_chunk_shift > (PAGE_SHIFT + 10)) ||
- ((max_chunk_shift < ALLOC_MIN_SHIFT) && max_chunk_shift))
- return NULL;
-
- if (!max_chunk_shift)
- max_chunk_shift = PAGE_SHIFT;
-
- poolp = kmalloc(sizeof(struct gen_pool), GFP_KERNEL);
- if (!poolp)
- return NULL;
- memset(poolp, 0, sizeof(struct gen_pool));
- poolp->h = kmalloc(sizeof(struct gen_pool_link) *
- (max_chunk_shift - ALLOC_MIN_SHIFT + 1),
- GFP_KERNEL);
- if (!poolp->h) {
- printk(KERN_WARNING "gen_pool_alloc() failed to allocate\n");
- kfree(poolp);
- return NULL;
- }
- memset(poolp->h, 0, sizeof(struct gen_pool_link) *
- (max_chunk_shift - ALLOC_MIN_SHIFT + 1));
-
- spin_lock_init(&poolp->lock);
- poolp->get_new_chunk = fp;
- poolp->max_chunk_shift = max_chunk_shift;
- poolp->private = data;
-
- for (i = 0; i < nr_chunks; i++) {
- tmp = poolp->get_new_chunk(poolp);
- printk(KERN_INFO "allocated %lx\n", tmp);
- if (!tmp)
- break;
- gen_pool_free(poolp, tmp, (1 << poolp->max_chunk_shift));
- }
+ struct gen_pool *pool;
- return poolp;
+ pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
+ if (pool != NULL) {
+ rwlock_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->chunks);
+ pool->min_alloc_order = min_alloc_order;
+ }
+ return pool;
}
EXPORT_SYMBOL(gen_pool_create);
/*
- * Simple power of two buddy-like generic allocator.
- * Provides naturally aligned memory chunks.
+ * Add a new chunk of memory to the specified pool.
+ *
+ * @pool: pool to add new memory chunk to
+ * @addr: starting address of memory chunk to add to pool
+ * @size: size in bytes of the memory chunk to add to pool
+ * @nid: node id of the node the chunk structure and bitmap should be
+ * allocated on, or -1
*/
-unsigned long gen_pool_alloc(struct gen_pool *poolp, int size)
+int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
+ int nid)
{
- int j, i, s, max_chunk_size;
- unsigned long a, flags;
- struct gen_pool_link *h = poolp->h;
+ struct gen_pool_chunk *chunk;
+ int nbits = size >> pool->min_alloc_order;
+ int nbytes = sizeof(struct gen_pool_chunk) +
+ (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
- max_chunk_size = 1 << poolp->max_chunk_shift;
+ chunk = kmalloc_node(nbytes, GFP_KERNEL, nid);
+ if (unlikely(chunk == NULL))
+ return -1;
- if (size > max_chunk_size)
- return 0;
+ memset(chunk, 0, nbytes);
+ spin_lock_init(&chunk->lock);
+ chunk->start_addr = addr;
+ chunk->end_addr = addr + size;
- size = max(size, 1 << ALLOC_MIN_SHIFT);
- i = fls(size - 1);
- s = 1 << i;
- j = i -= ALLOC_MIN_SHIFT;
-
- spin_lock_irqsave(&poolp->lock, flags);
- while (!h[j].next) {
- if (s == max_chunk_size) {
- struct gen_pool_link *ptr;
- spin_unlock_irqrestore(&poolp->lock, flags);
- ptr = (struct gen_pool_link *)poolp->get_new_chunk(poolp);
- spin_lock_irqsave(&poolp->lock, flags);
- h[j].next = ptr;
- if (h[j].next)
- h[j].next->next = NULL;
- break;
- }
- j++;
- s <<= 1;
- }
- a = (unsigned long) h[j].next;
- if (a) {
- h[j].next = h[j].next->next;
- /*
- * This should be split into a seperate function doing
- * the chunk split in order to support custom
- * handling memory not physically accessible by host
- */
- while (j > i) {
- j -= 1;
- s >>= 1;
- h[j].next = (struct gen_pool_link *) (a + s);
- h[j].next->next = NULL;
- }
- }
- spin_unlock_irqrestore(&poolp->lock, flags);
- return a;
+ write_lock(&pool->lock);
+ list_add(&chunk->next_chunk, &pool->chunks);
+ write_unlock(&pool->lock);
+
+ return 0;
}
-EXPORT_SYMBOL(gen_pool_alloc);
+EXPORT_SYMBOL(gen_pool_add);
/*
- * Counter-part of the generic allocator.
+ * Allocate the requested number of bytes from the specified pool.
+ * Uses a first-fit algorithm.
+ *
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
*/
-void gen_pool_free(struct gen_pool *poolp, unsigned long ptr, int size)
+unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
{
- struct gen_pool_link *q;
- struct gen_pool_link *h = poolp->h;
- unsigned long a, b, flags;
- int i, s, max_chunk_size;
-
- max_chunk_size = 1 << poolp->max_chunk_shift;
+ struct list_head *_chunk;
+ struct gen_pool_chunk *chunk;
+ unsigned long addr, flags;
+ int order = pool->min_alloc_order;
+ int nbits, bit, start_bit, end_bit;
- if (size > max_chunk_size)
- return;
-
- size = max(size, 1 << ALLOC_MIN_SHIFT);
- i = fls(size - 1);
- s = 1 << i;
- i -= ALLOC_MIN_SHIFT;
-
- a = ptr;
+ if (size == 0)
+ return 0;
- spin_lock_irqsave(&poolp->lock, flags);
- while (1) {
- if (s == max_chunk_size) {
- ((struct gen_pool_link *)a)->next = h[i].next;
- h[i].next = (struct gen_pool_link *)a;
- break;
+ nbits = (size + (1UL << order) - 1) >> order;
+
+ read_lock(&pool->lock);
+ list_for_each(_chunk, &pool->chunks) {
+ chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
+
+ end_bit = (chunk->end_addr - chunk->start_addr) >> order;
+ end_bit -= nbits + 1;
+
+ spin_lock_irqsave(&chunk->lock, flags);
+ bit = -1;
+ while (bit + 1 < end_bit) {
+ bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1);
+ if (bit >= end_bit)
+ break;
+
+ start_bit = bit;
+ if (nbits > 1) {
+ bit = find_next_bit(chunk->bits, bit + nbits,
+ bit + 1);
+ if (bit - start_bit < nbits)
+ continue;
+ }
+
+ addr = chunk->start_addr +
+ ((unsigned long)start_bit << order);
+ while (nbits--)
+ __set_bit(start_bit++, &chunk->bits);
+ spin_unlock_irqrestore(&chunk->lock, flags);
+ read_unlock(&pool->lock);
+ return addr;
}
- b = a ^ s;
- q = &h[i];
+ spin_unlock_irqrestore(&chunk->lock, flags);
+ }
+ read_unlock(&pool->lock);
+ return 0;
+}
+EXPORT_SYMBOL(gen_pool_alloc);
- while (q->next && q->next != (struct gen_pool_link *)b)
- q = q->next;
- if (!q->next) {
- ((struct gen_pool_link *)a)->next = h[i].next;
- h[i].next = (struct gen_pool_link *)a;
+/*
+ * Free the specified memory back to the specified pool.
+ *
+ * @pool: pool to free to
+ * @addr: starting address of memory to free back to pool
+ * @size: size in bytes of memory to free
+ */
+void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
+{
+ struct list_head *_chunk;
+ struct gen_pool_chunk *chunk;
+ unsigned long flags;
+ int order = pool->min_alloc_order;
+ int bit, nbits;
+
+ nbits = (size + (1UL << order) - 1) >> order;
+
+ read_lock(&pool->lock);
+ list_for_each(_chunk, &pool->chunks) {
+ chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
+
+ if (addr >= chunk->start_addr && addr < chunk->end_addr) {
+ BUG_ON(addr + size > chunk->end_addr);
+ spin_lock_irqsave(&chunk->lock, flags);
+ bit = (addr - chunk->start_addr) >> order;
+ while (nbits--)
+ __clear_bit(bit++, &chunk->bits);
+ spin_unlock_irqrestore(&chunk->lock, flags);
break;
}
- q->next = q->next->next;
- a = a & b;
- s <<= 1;
- i++;
}
- spin_unlock_irqrestore(&poolp->lock, flags);
+ BUG_ON(nbits > 0);
+ read_unlock(&pool->lock);
}
EXPORT_SYMBOL(gen_pool_free);
diff --git a/lib/iomap_copy.c b/lib/iomap_copy.c
index 351045f4f63..864fc5ea398 100644
--- a/lib/iomap_copy.c
+++ b/lib/iomap_copy.c
@@ -40,3 +40,31 @@ void __attribute__((weak)) __iowrite32_copy(void __iomem *to,
__raw_writel(*src++, dst++);
}
EXPORT_SYMBOL_GPL(__iowrite32_copy);
+
+/**
+ * __iowrite64_copy - copy data to MMIO space, in 64-bit or 32-bit units
+ * @to: destination, in MMIO space (must be 64-bit aligned)
+ * @from: source (must be 64-bit aligned)
+ * @count: number of 64-bit quantities to copy
+ *
+ * Copy data from kernel space to MMIO space, in units of 32 or 64 bits at a
+ * time. Order of access is not guaranteed, nor is a memory barrier
+ * performed afterwards.
+ */
+void __attribute__((weak)) __iowrite64_copy(void __iomem *to,
+ const void *from,
+ size_t count)
+{
+#ifdef CONFIG_64BIT
+ u64 __iomem *dst = to;
+ const u64 *src = from;
+ const u64 *end = src + count;
+
+ while (src < end)
+ __raw_writeq(*src++, dst++);
+#else
+ __iowrite32_copy(to, from, count * 2);
+#endif
+}
+
+EXPORT_SYMBOL_GPL(__iowrite64_copy);
diff --git a/lib/kobject.c b/lib/kobject.c
index 687ab418d29..8e7c7199348 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -198,14 +198,14 @@ int kobject_add(struct kobject * kobj)
/* be noisy on error issues */
if (error == -EEXIST)
- pr_debug("kobject_add failed for %s with -EEXIST, "
+ printk("kobject_add failed for %s with -EEXIST, "
"don't try to register things with the "
"same name in the same directory.\n",
kobject_name(kobj));
else
- pr_debug("kobject_add failed for %s (%d)\n",
+ printk("kobject_add failed for %s (%d)\n",
kobject_name(kobj), error);
- /* dump_stack(); */
+ dump_stack();
}
return error;
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
new file mode 100644
index 00000000000..850449080e1
--- /dev/null
+++ b/lib/percpu_counter.c
@@ -0,0 +1,46 @@
+/*
+ * Fast batching percpu counters.
+ */
+
+#include <linux/percpu_counter.h>
+#include <linux/module.h>
+
+void percpu_counter_mod(struct percpu_counter *fbc, s32 amount)
+{
+ long count;
+ s32 *pcount;
+ int cpu = get_cpu();
+
+ pcount = per_cpu_ptr(fbc->counters, cpu);
+ count = *pcount + amount;
+ if (count >= FBC_BATCH || count <= -FBC_BATCH) {
+ spin_lock(&fbc->lock);
+ fbc->count += count;
+ *pcount = 0;
+ spin_unlock(&fbc->lock);
+ } else {
+ *pcount = count;
+ }
+ put_cpu();
+}
+EXPORT_SYMBOL(percpu_counter_mod);
+
+/*
+ * Add up all the per-cpu counts, return the result. This is a more accurate
+ * but much slower version of percpu_counter_read_positive()
+ */
+s64 percpu_counter_sum(struct percpu_counter *fbc)
+{
+ s64 ret;
+ int cpu;
+
+ spin_lock(&fbc->lock);
+ ret = fbc->count;
+ for_each_possible_cpu(cpu) {
+ s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
+ ret += *pcount;
+ }
+ spin_unlock(&fbc->lock);
+ return ret < 0 ? 0 : ret;
+}
+EXPORT_SYMBOL(percpu_counter_sum);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 7097bb239e4..b32efae7688 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -33,7 +33,7 @@
#ifdef __KERNEL__
-#define RADIX_TREE_MAP_SHIFT 6
+#define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
#else
#define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */
#endif
@@ -74,6 +74,11 @@ struct radix_tree_preload {
};
DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
+static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
+{
+ return root->gfp_mask & __GFP_BITS_MASK;
+}
+
/*
* This assumes that the caller has performed appropriate preallocation, and
* that the caller has pinned this thread of control to the current CPU.
@@ -82,9 +87,10 @@ static struct radix_tree_node *
radix_tree_node_alloc(struct radix_tree_root *root)
{
struct radix_tree_node *ret;
+ gfp_t gfp_mask = root_gfp_mask(root);
- ret = kmem_cache_alloc(radix_tree_node_cachep, root->gfp_mask);
- if (ret == NULL && !(root->gfp_mask & __GFP_WAIT)) {
+ ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
+ if (ret == NULL && !(gfp_mask & __GFP_WAIT)) {
struct radix_tree_preload *rtp;
rtp = &__get_cpu_var(radix_tree_preloads);
@@ -152,6 +158,27 @@ static inline int tag_get(struct radix_tree_node *node, unsigned int tag,
return test_bit(offset, node->tags[tag]);
}
+static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag)
+{
+ root->gfp_mask |= (1 << (tag + __GFP_BITS_SHIFT));
+}
+
+
+static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag)
+{
+ root->gfp_mask &= ~(1 << (tag + __GFP_BITS_SHIFT));
+}
+
+static inline void root_tag_clear_all(struct radix_tree_root *root)
+{
+ root->gfp_mask &= __GFP_BITS_MASK;
+}
+
+static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag)
+{
+ return root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
+}
+
/*
* Returns 1 if any slot in the node has this tag set.
* Otherwise returns 0.
@@ -182,7 +209,6 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
{
struct radix_tree_node *node;
unsigned int height;
- char tags[RADIX_TREE_MAX_TAGS];
int tag;
/* Figure out what the height should be. */
@@ -195,16 +221,6 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
goto out;
}
- /*
- * Prepare the tag status of the top-level node for propagation
- * into the newly-pushed top-level node(s)
- */
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
- tags[tag] = 0;
- if (any_tag_set(root->rnode, tag))
- tags[tag] = 1;
- }
-
do {
if (!(node = radix_tree_node_alloc(root)))
return -ENOMEM;
@@ -214,7 +230,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
/* Propagate the aggregated tag info into the new root */
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
- if (tags[tag])
+ if (root_tag_get(root, tag))
tag_set(node, tag, 0);
}
@@ -243,8 +259,7 @@ int radix_tree_insert(struct radix_tree_root *root,
int error;
/* Make sure the tree is high enough. */
- if ((!index && !root->rnode) ||
- index > radix_tree_maxindex(root->height)) {
+ if (index > radix_tree_maxindex(root->height)) {
error = radix_tree_extend(root, index);
if (error)
return error;
@@ -255,7 +270,7 @@ int radix_tree_insert(struct radix_tree_root *root,
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
offset = 0; /* uninitialised var warning */
- do {
+ while (height > 0) {
if (slot == NULL) {
/* Have to add a child node. */
if (!(slot = radix_tree_node_alloc(root)))
@@ -273,16 +288,21 @@ int radix_tree_insert(struct radix_tree_root *root,
slot = node->slots[offset];
shift -= RADIX_TREE_MAP_SHIFT;
height--;
- } while (height > 0);
+ }
if (slot != NULL)
return -EEXIST;
- BUG_ON(!node);
- node->count++;
- node->slots[offset] = item;
- BUG_ON(tag_get(node, 0, offset));
- BUG_ON(tag_get(node, 1, offset));
+ if (node) {
+ node->count++;
+ node->slots[offset] = item;
+ BUG_ON(tag_get(node, 0, offset));
+ BUG_ON(tag_get(node, 1, offset));
+ } else {
+ root->rnode = item;
+ BUG_ON(root_tag_get(root, 0));
+ BUG_ON(root_tag_get(root, 1));
+ }
return 0;
}
@@ -295,9 +315,13 @@ static inline void **__lookup_slot(struct radix_tree_root *root,
struct radix_tree_node **slot;
height = root->height;
+
if (index > radix_tree_maxindex(height))
return NULL;
+ if (height == 0 && root->rnode)
+ return (void **)&root->rnode;
+
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
slot = &root->rnode;
@@ -365,11 +389,10 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
struct radix_tree_node *slot;
height = root->height;
- if (index > radix_tree_maxindex(height))
- return NULL;
+ BUG_ON(index > radix_tree_maxindex(height));
- shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
slot = root->rnode;
+ shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
while (height > 0) {
int offset;
@@ -383,6 +406,10 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
height--;
}
+ /* set the root's tag bit */
+ if (slot && !root_tag_get(root, tag))
+ root_tag_set(root, tag);
+
return slot;
}
EXPORT_SYMBOL(radix_tree_tag_set);
@@ -405,9 +432,8 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
unsigned long index, unsigned int tag)
{
struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path;
- struct radix_tree_node *slot;
+ struct radix_tree_node *slot = NULL;
unsigned int height, shift;
- void *ret = NULL;
height = root->height;
if (index > radix_tree_maxindex(height))
@@ -432,20 +458,24 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
height--;
}
- ret = slot;
- if (ret == NULL)
+ if (slot == NULL)
goto out;
- do {
+ while (pathp->node) {
if (!tag_get(pathp->node, tag, pathp->offset))
goto out;
tag_clear(pathp->node, tag, pathp->offset);
if (any_tag_set(pathp->node, tag))
goto out;
pathp--;
- } while (pathp->node);
+ }
+
+ /* clear the root's tag bit */
+ if (root_tag_get(root, tag))
+ root_tag_clear(root, tag);
+
out:
- return ret;
+ return slot;
}
EXPORT_SYMBOL(radix_tree_tag_clear);
@@ -458,9 +488,8 @@ EXPORT_SYMBOL(radix_tree_tag_clear);
*
* Return values:
*
- * 0: tag not present
- * 1: tag present, set
- * -1: tag present, unset
+ * 0: tag not present or not set
+ * 1: tag set
*/
int radix_tree_tag_get(struct radix_tree_root *root,
unsigned long index, unsigned int tag)
@@ -473,6 +502,13 @@ int radix_tree_tag_get(struct radix_tree_root *root,
if (index > radix_tree_maxindex(height))
return 0;
+ /* check the root's tag bit */
+ if (!root_tag_get(root, tag))
+ return 0;
+
+ if (height == 0)
+ return 1;
+
shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
slot = root->rnode;
@@ -494,7 +530,7 @@ int radix_tree_tag_get(struct radix_tree_root *root,
int ret = tag_get(slot, tag, offset);
BUG_ON(ret && saw_unset_tag);
- return ret ? 1 : -1;
+ return ret;
}
slot = slot->slots[offset];
shift -= RADIX_TREE_MAP_SHIFT;
@@ -514,8 +550,11 @@ __lookup(struct radix_tree_root *root, void **results, unsigned long index,
unsigned long i;
height = root->height;
- if (height == 0)
+ if (height == 0) {
+ if (root->rnode && index == 0)
+ results[nr_found++] = root->rnode;
goto out;
+ }
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
slot = root->rnode;
@@ -603,10 +642,16 @@ __lookup_tag(struct radix_tree_root *root, void **results, unsigned long index,
unsigned int height = root->height;
struct radix_tree_node *slot;
+ if (height == 0) {
+ if (root->rnode && index == 0)
+ results[nr_found++] = root->rnode;
+ goto out;
+ }
+
shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
slot = root->rnode;
- while (height > 0) {
+ do {
unsigned long i = (index >> shift) & RADIX_TREE_MAP_MASK;
for ( ; i < RADIX_TREE_MAP_SIZE; i++) {
@@ -637,7 +682,7 @@ __lookup_tag(struct radix_tree_root *root, void **results, unsigned long index,
}
shift -= RADIX_TREE_MAP_SHIFT;
slot = slot->slots[i];
- }
+ } while (height > 0);
out:
*next_index = index;
return nr_found;
@@ -665,6 +710,10 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
unsigned long cur_index = first_index;
unsigned int ret = 0;
+ /* check the root's tag bit */
+ if (!root_tag_get(root, tag))
+ return 0;
+
while (ret < max_items) {
unsigned int nr_found;
unsigned long next_index; /* Index of next search */
@@ -689,7 +738,7 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
static inline void radix_tree_shrink(struct radix_tree_root *root)
{
/* try to shrink tree height */
- while (root->height > 1 &&
+ while (root->height > 0 &&
root->rnode->count == 1 &&
root->rnode->slots[0]) {
struct radix_tree_node *to_free = root->rnode;
@@ -717,12 +766,8 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
{
struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path;
- struct radix_tree_path *orig_pathp;
- struct radix_tree_node *slot;
+ struct radix_tree_node *slot = NULL;
unsigned int height, shift;
- void *ret = NULL;
- char tags[RADIX_TREE_MAX_TAGS];
- int nr_cleared_tags;
int tag;
int offset;
@@ -730,11 +775,17 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
if (index > radix_tree_maxindex(height))
goto out;
+ slot = root->rnode;
+ if (height == 0 && root->rnode) {
+ root_tag_clear_all(root);
+ root->rnode = NULL;
+ goto out;
+ }
+
shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
pathp->node = NULL;
- slot = root->rnode;
- for ( ; height > 0; height--) {
+ do {
if (slot == NULL)
goto out;
@@ -744,44 +795,22 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
pathp->node = slot;
slot = slot->slots[offset];
shift -= RADIX_TREE_MAP_SHIFT;
- }
+ height--;
+ } while (height > 0);
- ret = slot;
- if (ret == NULL)
+ if (slot == NULL)
goto out;
- orig_pathp = pathp;
-
/*
* Clear all tags associated with the just-deleted item
*/
- nr_cleared_tags = 0;
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
- tags[tag] = 1;
- if (tag_get(pathp->node, tag, pathp->offset)) {
- tag_clear(pathp->node, tag, pathp->offset);
- if (!any_tag_set(pathp->node, tag)) {
- tags[tag] = 0;
- nr_cleared_tags++;
- }
- }
- }
-
- for (pathp--; nr_cleared_tags && pathp->node; pathp--) {
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
- if (tags[tag])
- continue;
-
- tag_clear(pathp->node, tag, pathp->offset);
- if (any_tag_set(pathp->node, tag)) {
- tags[tag] = 1;
- nr_cleared_tags--;
- }
- }
+ if (tag_get(pathp->node, tag, pathp->offset))
+ radix_tree_tag_clear(root, index, tag);
}
/* Now free the nodes we do not need anymore */
- for (pathp = orig_pathp; pathp->node; pathp--) {
+ while (pathp->node) {
pathp->node->slots[pathp->offset] = NULL;
pathp->node->count--;
@@ -793,11 +822,15 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
/* Node with zero slots in use so free it */
radix_tree_node_free(pathp->node);
+
+ pathp--;
}
- root->rnode = NULL;
+ root_tag_clear_all(root);
root->height = 0;
+ root->rnode = NULL;
+
out:
- return ret;
+ return slot;
}
EXPORT_SYMBOL(radix_tree_delete);
@@ -808,11 +841,7 @@ EXPORT_SYMBOL(radix_tree_delete);
*/
int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag)
{
- struct radix_tree_node *rnode;
- rnode = root->rnode;
- if (!rnode)
- return 0;
- return any_tag_set(rnode, tag);
+ return root_tag_get(root, tag);
}
EXPORT_SYMBOL(radix_tree_tagged);
diff --git a/lib/string.c b/lib/string.c
index 064f6315b1c..63077267367 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -301,6 +301,36 @@ char *strnchr(const char *s, size_t count, int c)
EXPORT_SYMBOL(strnchr);
#endif
+/**
+ * strstrip - Removes leading and trailing whitespace from @s.
+ * @s: The string to be stripped.
+ *
+ * Note that the first trailing whitespace is replaced with a %NUL-terminator
+ * in the given string @s. Returns a pointer to the first non-whitespace
+ * character in @s.
+ */
+char *strstrip(char *s)
+{
+ size_t size;
+ char *end;
+
+ size = strlen(s);
+
+ if (!size)
+ return s;
+
+ end = s + size - 1;
+ while (end != s && isspace(*end))
+ end--;
+ *(end + 1) = '\0';
+
+ while (*s && isspace(*s))
+ s++;
+
+ return s;
+}
+EXPORT_SYMBOL(strstrip);
+
#ifndef __HAVE_ARCH_STRLEN
/**
* strlen - Find the length of a string
diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c
index 1653dd9bb01..c3e4a2baf83 100644
--- a/lib/zlib_deflate/deflate.c
+++ b/lib/zlib_deflate/deflate.c
@@ -164,34 +164,17 @@ static const config configuration_table[10] = {
memset((char *)s->head, 0, (unsigned)(s->hash_size-1)*sizeof(*s->head));
/* ========================================================================= */
-int zlib_deflateInit_(
- z_streamp strm,
- int level,
- const char *version,
- int stream_size
-)
-{
- return zlib_deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS,
- DEF_MEM_LEVEL,
- Z_DEFAULT_STRATEGY, version, stream_size);
- /* To do: ignore strm->next_in if we use it as window */
-}
-
-/* ========================================================================= */
-int zlib_deflateInit2_(
+int zlib_deflateInit2(
z_streamp strm,
int level,
int method,
int windowBits,
int memLevel,
- int strategy,
- const char *version,
- int stream_size
+ int strategy
)
{
deflate_state *s;
int noheader = 0;
- static char* my_version = ZLIB_VERSION;
deflate_workspace *mem;
ush *overlay;
@@ -199,10 +182,6 @@ int zlib_deflateInit2_(
* output size for (length,distance) codes is <= 24 bits.
*/
- if (version == NULL || version[0] != my_version[0] ||
- stream_size != sizeof(z_stream)) {
- return Z_VERSION_ERROR;
- }
if (strm == NULL) return Z_STREAM_ERROR;
strm->msg = NULL;
diff --git a/lib/zlib_deflate/deflate_syms.c b/lib/zlib_deflate/deflate_syms.c
index 767b573d1ef..ccfe25f3920 100644
--- a/lib/zlib_deflate/deflate_syms.c
+++ b/lib/zlib_deflate/deflate_syms.c
@@ -12,8 +12,7 @@
EXPORT_SYMBOL(zlib_deflate_workspacesize);
EXPORT_SYMBOL(zlib_deflate);
-EXPORT_SYMBOL(zlib_deflateInit_);
-EXPORT_SYMBOL(zlib_deflateInit2_);
+EXPORT_SYMBOL(zlib_deflateInit2);
EXPORT_SYMBOL(zlib_deflateEnd);
EXPORT_SYMBOL(zlib_deflateReset);
MODULE_LICENSE("GPL");
diff --git a/lib/zlib_inflate/Makefile b/lib/zlib_inflate/Makefile
index 221c139e0df..bf065482fa6 100644
--- a/lib/zlib_inflate/Makefile
+++ b/