From 34814545890db603b7648ea2ea477d1f83b61297 Mon Sep 17 00:00:00 2001 From: Eric Sesterhenn Date: Fri, 24 Mar 2006 18:47:11 +0100 Subject: BUG_ON() Conversion in lib/swiotlb.c this changes if() BUG(); constructs to BUG_ON() which is cleaner, contains unlikely() and can better optimized away. Signed-off-by: Eric Sesterhenn Signed-off-by: Adrian Bunk --- lib/swiotlb.c | 32 ++++++++++++-------------------- 1 file changed, 12 insertions(+), 20 deletions(-) (limited to 'lib') diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 0af497b6b9a..10625785eef 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -296,8 +296,7 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir) else stride = 1; - if (!nslots) - BUG(); + BUG_ON(!nslots); /* * Find suitable number of IO TLB entries size that will fit this @@ -416,14 +415,14 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size, case SYNC_FOR_CPU: if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) memcpy(buffer, dma_addr, size); - else if (dir != DMA_TO_DEVICE) - BUG(); + else + BUG_ON(dir != DMA_TO_DEVICE); break; case SYNC_FOR_DEVICE: if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) memcpy(dma_addr, buffer, size); - else if (dir != DMA_FROM_DEVICE) - BUG(); + else + BUG_ON(dir != DMA_FROM_DEVICE); break; default: BUG(); @@ -529,8 +528,7 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) unsigned long dev_addr = virt_to_phys(ptr); void *map; - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); /* * If the pointer passed in happens to be in the device's DMA window, * we can safely return the device addr and not worry about bounce @@ -592,8 +590,7 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, { char *dma_addr = phys_to_virt(dev_addr); - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) unmap_single(hwdev, dma_addr, size, dir); else if (dir == DMA_FROM_DEVICE) @@ -616,8 +613,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, { char *dma_addr = phys_to_virt(dev_addr); - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) sync_single(hwdev, dma_addr, size, dir, target); else if (dir == DMA_FROM_DEVICE) @@ -648,8 +644,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, { char *dma_addr = phys_to_virt(dev_addr) + offset; - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) sync_single(hwdev, dma_addr, size, dir, target); else if (dir == DMA_FROM_DEVICE) @@ -696,8 +691,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, unsigned long dev_addr; int i; - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); for (i = 0; i < nelems; i++, sg++) { addr = SG_ENT_VIRT_ADDRESS(sg); @@ -730,8 +724,7 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems, { int i; - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); for (i = 0; i < nelems; i++, sg++) if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) @@ -753,8 +746,7 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg, { int i; - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); for (i = 0; i < nelems; i++, sg++) if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) -- cgit v1.2.3-70-g09d2 From 871751e25d956ad24f129ca972b7851feaa61d53 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 25 Mar 2006 03:06:39 -0800 Subject: [PATCH] slab: implement /proc/slab_allocators Implement /proc/slab_allocators. It produces output like: idr_layer_cache: 80 idr_pre_get+0x33/0x4e buffer_head: 2555 alloc_buffer_head+0x20/0x75 mm_struct: 9 mm_alloc+0x1e/0x42 mm_struct: 20 dup_mm+0x36/0x370 vm_area_struct: 384 dup_mm+0x18f/0x370 vm_area_struct: 151 do_mmap_pgoff+0x2e0/0x7c3 vm_area_struct: 1 split_vma+0x5a/0x10e vm_area_struct: 11 do_brk+0x206/0x2e2 vm_area_struct: 2 copy_vma+0xda/0x142 vm_area_struct: 9 setup_arg_pages+0x99/0x214 fs_cache: 8 copy_fs_struct+0x21/0x133 fs_cache: 29 copy_process+0xf38/0x10e3 files_cache: 30 alloc_files+0x1b/0xcf signal_cache: 81 copy_process+0xbaa/0x10e3 sighand_cache: 77 copy_process+0xe65/0x10e3 sighand_cache: 1 de_thread+0x4d/0x5f8 anon_vma: 241 anon_vma_prepare+0xd9/0xf3 size-2048: 1 add_sect_attrs+0x5f/0x145 size-2048: 2 journal_init_revoke+0x99/0x302 size-2048: 2 journal_init_revoke+0x137/0x302 size-2048: 2 journal_init_inode+0xf9/0x1c4 Cc: Manfred Spraul Cc: Alexander Nyberg Cc: Pekka Enberg Cc: Christoph Lameter Cc: Ravikiran Thirumalai Signed-off-by: Al Viro DESC slab-leaks3-locking-fix EDESC From: Andrew Morton Update for slab-remove-cachep-spinlock.patch Cc: Al Viro Cc: Manfred Spraul Cc: Alexander Nyberg Cc: Pekka Enberg Cc: Christoph Lameter Cc: Ravikiran Thirumalai Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/proc_misc.c | 37 +++++++++++ include/linux/slab.h | 6 +- lib/Kconfig.debug | 4 ++ mm/slab.c | 180 +++++++++++++++++++++++++++++++++++++++++++++++++-- mm/util.c | 4 +- net/core/skbuff.c | 2 +- 6 files changed, 222 insertions(+), 11 deletions(-) (limited to 'lib') diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 826c131994c..1e9ea37d457 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c @@ -485,6 +485,40 @@ static struct file_operations proc_slabinfo_operations = { .llseek = seq_lseek, .release = seq_release, }; + +#ifdef CONFIG_DEBUG_SLAB_LEAK +extern struct seq_operations slabstats_op; +static int slabstats_open(struct inode *inode, struct file *file) +{ + unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL); + int ret = -ENOMEM; + if (n) { + ret = seq_open(file, &slabstats_op); + if (!ret) { + struct seq_file *m = file->private_data; + *n = PAGE_SIZE / (2 * sizeof(unsigned long)); + m->private = n; + n = NULL; + } + kfree(n); + } + return ret; +} + +static int slabstats_release(struct inode *inode, struct file *file) +{ + struct seq_file *m = file->private_data; + kfree(m->private); + return seq_release(inode, file); +} + +static struct file_operations proc_slabstats_operations = { + .open = slabstats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = slabstats_release, +}; +#endif #endif static int show_stat(struct seq_file *p, void *v) @@ -744,6 +778,9 @@ void __init proc_misc_init(void) create_seq_entry("interrupts", 0, &proc_interrupts_operations); #ifdef CONFIG_SLAB create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations); +#ifdef CONFIG_DEBUG_SLAB_LEAK + create_seq_entry("slab_allocators", 0 ,&proc_slabstats_operations); +#endif #endif create_seq_entry("buddyinfo",S_IRUGO, &fragmentation_file_operations); create_seq_entry("vmstat",S_IRUGO, &proc_vmstat_file_operations); diff --git a/include/linux/slab.h b/include/linux/slab.h index e2ee5b26879..f88e08a5802 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -77,11 +77,12 @@ struct cache_sizes { }; extern struct cache_sizes malloc_sizes[]; -#ifndef CONFIG_DEBUG_SLAB extern void *__kmalloc(size_t, gfp_t); +#ifndef CONFIG_DEBUG_SLAB +#define ____kmalloc(size, flags) __kmalloc(size, flags) #else extern void *__kmalloc_track_caller(size_t, gfp_t, void*); -#define __kmalloc(size, flags) \ +#define ____kmalloc(size, flags) \ __kmalloc_track_caller(size, flags, __builtin_return_address(0)) #endif @@ -173,6 +174,7 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) #define kmem_ptr_validate(a, b) (0) #define kmem_cache_alloc_node(c, f, n) kmem_cache_alloc(c, f) #define kmalloc_node(s, f, n) kmalloc(s, f) +#define ____kmalloc kmalloc #endif /* CONFIG_SLOB */ diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index f2618e1c2b9..1fe3f897145 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -85,6 +85,10 @@ config DEBUG_SLAB allocation as well as poisoning memory on free to catch use of freed memory. This can make kmalloc/kfree-intensive workloads much slower. +config DEBUG_SLAB_LEAK + bool "Memory leak debugging" + depends on DEBUG_SLAB + config DEBUG_PREEMPT bool "Debug preemptible kernel" depends on DEBUG_KERNEL && PREEMPT diff --git a/mm/slab.c b/mm/slab.c index 26138c9f8f0..a5047161084 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -204,7 +204,8 @@ typedef unsigned int kmem_bufctl_t; #define BUFCTL_END (((kmem_bufctl_t)(~0U))-0) #define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1) -#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-2) +#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) +#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) /* Max number of objs-per-slab for caches which use off-slab slabs. * Needed to avoid a possible looping condition in cache_grow(). @@ -2399,7 +2400,7 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, /* Verify that the slab belongs to the intended node */ WARN_ON(slabp->nodeid != nodeid); - if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) { + if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) { printk(KERN_ERR "slab: double free detected in cache " "'%s', objp %p\n", cachep->name, objp); BUG(); @@ -2605,6 +2606,9 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, */ cachep->dtor(objp + obj_offset(cachep), cachep, 0); } +#ifdef CONFIG_DEBUG_SLAB_LEAK + slab_bufctl(slabp)[objnr] = BUFCTL_FREE; +#endif if (cachep->flags & SLAB_POISON) { #ifdef CONFIG_DEBUG_PAGEALLOC if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { @@ -2788,6 +2792,16 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, *dbg_redzone1(cachep, objp) = RED_ACTIVE; *dbg_redzone2(cachep, objp) = RED_ACTIVE; } +#ifdef CONFIG_DEBUG_SLAB_LEAK + { + struct slab *slabp; + unsigned objnr; + + slabp = page_get_slab(virt_to_page(objp)); + objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; + slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; + } +#endif objp += obj_offset(cachep); if (cachep->ctor && cachep->flags & SLAB_POISON) { unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; @@ -3220,22 +3234,23 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, return __cache_alloc(cachep, flags, caller); } -#ifndef CONFIG_DEBUG_SLAB void *__kmalloc(size_t size, gfp_t flags) { +#ifndef CONFIG_DEBUG_SLAB return __do_kmalloc(size, flags, NULL); +#else + return __do_kmalloc(size, flags, __builtin_return_address(0)); +#endif } EXPORT_SYMBOL(__kmalloc); -#else - +#ifdef CONFIG_DEBUG_SLAB void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) { return __do_kmalloc(size, flags, caller); } EXPORT_SYMBOL(__kmalloc_track_caller); - #endif #ifdef CONFIG_SMP @@ -3899,6 +3914,159 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer, res = count; return res; } + +#ifdef CONFIG_DEBUG_SLAB_LEAK + +static void *leaks_start(struct seq_file *m, loff_t *pos) +{ + loff_t n = *pos; + struct list_head *p; + + mutex_lock(&cache_chain_mutex); + p = cache_chain.next; + while (n--) { + p = p->next; + if (p == &cache_chain) + return NULL; + } + return list_entry(p, struct kmem_cache, next); +} + +static inline int add_caller(unsigned long *n, unsigned long v) +{ + unsigned long *p; + int l; + if (!v) + return 1; + l = n[1]; + p = n + 2; + while (l) { + int i = l/2; + unsigned long *q = p + 2 * i; + if (*q == v) { + q[1]++; + return 1; + } + if (*q > v) { + l = i; + } else { + p = q + 2; + l -= i + 1; + } + } + if (++n[1] == n[0]) + return 0; + memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n)); + p[0] = v; + p[1] = 1; + return 1; +} + +static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) +{ + void *p; + int i; + if (n[0] == n[1]) + return; + for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) { + if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) + continue; + if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) + return; + } +} + +static void show_symbol(struct seq_file *m, unsigned long address) +{ +#ifdef CONFIG_KALLSYMS + char *modname; + const char *name; + unsigned long offset, size; + char namebuf[KSYM_NAME_LEN+1]; + + name = kallsyms_lookup(address, &size, &offset, &modname, namebuf); + + if (name) { + seq_printf(m, "%s+%#lx/%#lx", name, offset, size); + if (modname) + seq_printf(m, " [%s]", modname); + return; + } +#endif + seq_printf(m, "%p", (void *)address); +} + +static int leaks_show(struct seq_file *m, void *p) +{ + struct kmem_cache *cachep = p; + struct list_head *q; + struct slab *slabp; + struct kmem_list3 *l3; + const char *name; + unsigned long *n = m->private; + int node; + int i; + + if (!(cachep->flags & SLAB_STORE_USER)) + return 0; + if (!(cachep->flags & SLAB_RED_ZONE)) + return 0; + + /* OK, we can do it */ + + n[1] = 0; + + for_each_online_node(node) { + l3 = cachep->nodelists[node]; + if (!l3) + continue; + + check_irq_on(); + spin_lock_irq(&l3->list_lock); + + list_for_each(q, &l3->slabs_full) { + slabp = list_entry(q, struct slab, list); + handle_slab(n, cachep, slabp); + } + list_for_each(q, &l3->slabs_partial) { + slabp = list_entry(q, struct slab, list); + handle_slab(n, cachep, slabp); + } + spin_unlock_irq(&l3->list_lock); + } + name = cachep->name; + if (n[0] == n[1]) { + /* Increase the buffer size */ + mutex_unlock(&cache_chain_mutex); + m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL); + if (!m->private) { + /* Too bad, we are really out */ + m->private = n; + mutex_lock(&cache_chain_mutex); + return -ENOMEM; + } + *(unsigned long *)m->private = n[0] * 2; + kfree(n); + mutex_lock(&cache_chain_mutex); + /* Now make sure this entry will be retried */ + m->count = m->size; + return 0; + } + for (i = 0; i < n[1]; i++) { + seq_printf(m, "%s: %lu ", name, n[2*i+3]); + show_symbol(m, n[2*i+2]); + seq_putc(m, '\n'); + } + return 0; +} + +struct seq_operations slabstats_op = { + .start = leaks_start, + .next = s_next, + .stop = s_stop, + .show = leaks_show, +}; +#endif #endif /** diff --git a/mm/util.c b/mm/util.c index 49e29f751b5..b68d3d7d035 100644 --- a/mm/util.c +++ b/mm/util.c @@ -11,7 +11,7 @@ */ void *kzalloc(size_t size, gfp_t flags) { - void *ret = kmalloc(size, flags); + void *ret = ____kmalloc(size, flags); if (ret) memset(ret, 0, size); return ret; @@ -33,7 +33,7 @@ char *kstrdup(const char *s, gfp_t gfp) return NULL; len = strlen(s) + 1; - buf = kmalloc(len, gfp); + buf = ____kmalloc(len, gfp); if (buf) memcpy(buf, s, len); return buf; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c9f87845453..09464fa8d72 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -149,7 +149,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, /* Get the DATA. Size must match skb_add_mtu(). */ size = SKB_DATA_ALIGN(size); - data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); + data = ____kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); if (!data) goto nodata; -- cgit v1.2.3-70-g09d2 From 4a2f0acf0f951599fd9e4af95cf9483449970c26 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Sat, 25 Mar 2006 03:07:22 -0800 Subject: [PATCH] kconfig: clarify memory debug options The Kconfig text for CONFIG_DEBUG_SLAB and CONFIG_DEBUG_PAGEALLOC have always seemed a bit confusing. Change them to: CONFIG_DEBUG_SLAB: "Debug slab memory allocations" CONFIG_DEBUG_PAGEALLOC: "Debug page memory allocations" Cc: "David S. Miller" Cc: Hirokazu Takata Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/i386/Kconfig.debug | 2 +- arch/m32r/Kconfig.debug | 2 +- arch/sparc64/Kconfig.debug | 2 +- lib/Kconfig.debug | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/arch/i386/Kconfig.debug b/arch/i386/Kconfig.debug index c23da8896f3..6e97df6979e 100644 --- a/arch/i386/Kconfig.debug +++ b/arch/i386/Kconfig.debug @@ -44,7 +44,7 @@ comment "Page alloc debug is incompatible with Software Suspend on i386" depends on DEBUG_KERNEL && SOFTWARE_SUSPEND config DEBUG_PAGEALLOC - bool "Page alloc debugging" + bool "Debug page memory allocations" depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND && !HUGETLBFS help Unmap pages from the kernel linear mapping after free_pages(). diff --git a/arch/m32r/Kconfig.debug b/arch/m32r/Kconfig.debug index bbf711bab69..2e1019ddbb2 100644 --- a/arch/m32r/Kconfig.debug +++ b/arch/m32r/Kconfig.debug @@ -19,7 +19,7 @@ config DEBUG_STACK_USAGE This option will slow down process creation somewhat. config DEBUG_PAGEALLOC - bool "Page alloc debugging" + bool "Debug page memory allocations" depends on DEBUG_KERNEL && BROKEN help Unmap pages from the kernel linear mapping after free_pages(). diff --git a/arch/sparc64/Kconfig.debug b/arch/sparc64/Kconfig.debug index 3e31be494e5..afe0a7720a2 100644 --- a/arch/sparc64/Kconfig.debug +++ b/arch/sparc64/Kconfig.debug @@ -24,7 +24,7 @@ config DEBUG_BOOTMEM bool "Debug BOOTMEM initialization" config DEBUG_PAGEALLOC - bool "Page alloc debugging" + bool "Debug page memory allocations" depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND help Unmap pages from the kernel linear mapping after free_pages(). diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1fe3f897145..0bda3c5259f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -78,7 +78,7 @@ config SCHEDSTATS this adds. config DEBUG_SLAB - bool "Debug memory allocations" + bool "Debug slab memory allocations" depends on DEBUG_KERNEL && SLAB help Say Y here to have the kernel do limited verification on memory -- cgit v1.2.3-70-g09d2 From daff89f324755f87a060d5125a205c0755811ea9 Mon Sep 17 00:00:00 2001 From: Jonathan Corbet Date: Sat, 25 Mar 2006 03:08:05 -0800 Subject: [PATCH] radix-tree documentation cleanups Documentation changes to help radix tree users avoid overrunning the tags array. RADIX_TREE_TAGS moves to linux/radix-tree.h and is now known as RADIX_TREE_MAX_TAGS (Nick Piggin's idea). Tag parameters are changed to unsigned, and some comments are updated. Signed-off-by: Jonathan Corbet Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/radix-tree.h | 13 +++++++----- lib/radix-tree.c | 49 +++++++++++++++++++++++++--------------------- 2 files changed, 35 insertions(+), 27 deletions(-) (limited to 'lib') diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index c57ff2fcb30..dd83cca2800 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -45,6 +45,8 @@ do { \ (root)->rnode = NULL; \ } while (0) +#define RADIX_TREE_MAX_TAGS 2 + int radix_tree_insert(struct radix_tree_root *, unsigned long, void *); void *radix_tree_lookup(struct radix_tree_root *, unsigned long); void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); @@ -55,15 +57,16 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, int radix_tree_preload(gfp_t gfp_mask); void radix_tree_init(void); void *radix_tree_tag_set(struct radix_tree_root *root, - unsigned long index, int tag); + unsigned long index, unsigned int tag); void *radix_tree_tag_clear(struct radix_tree_root *root, - unsigned long index, int tag); + unsigned long index, unsigned int tag); int radix_tree_tag_get(struct radix_tree_root *root, - unsigned long index, int tag); + unsigned long index, unsigned int tag); unsigned int radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, - unsigned long first_index, unsigned int max_items, int tag); -int radix_tree_tagged(struct radix_tree_root *root, int tag); + unsigned long first_index, unsigned int max_items, + unsigned int tag); +int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); static inline void radix_tree_preload_end(void) { diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 1e5b17dc7e3..7097bb239e4 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -37,7 +37,6 @@ #else #define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */ #endif -#define RADIX_TREE_TAGS 2 #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) @@ -48,7 +47,7 @@ struct radix_tree_node { unsigned int count; void *slots[RADIX_TREE_MAP_SIZE]; - unsigned long tags[RADIX_TREE_TAGS][RADIX_TREE_TAG_LONGS]; + unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; }; struct radix_tree_path { @@ -135,17 +134,20 @@ out: return ret; } -static inline void tag_set(struct radix_tree_node *node, int tag, int offset) +static inline void tag_set(struct radix_tree_node *node, unsigned int tag, + int offset) { __set_bit(offset, node->tags[tag]); } -static inline void tag_clear(struct radix_tree_node *node, int tag, int offset) +static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, + int offset) { __clear_bit(offset, node->tags[tag]); } -static inline int tag_get(struct radix_tree_node *node, int tag, int offset) +static inline int tag_get(struct radix_tree_node *node, unsigned int tag, + int offset) { return test_bit(offset, node->tags[tag]); } @@ -154,7 +156,7 @@ static inline int tag_get(struct radix_tree_node *node, int tag, int offset) * Returns 1 if any slot in the node has this tag set. * Otherwise returns 0. */ -static inline int any_tag_set(struct radix_tree_node *node, int tag) +static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) { int idx; for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { @@ -180,7 +182,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) { struct radix_tree_node *node; unsigned int height; - char tags[RADIX_TREE_TAGS]; + char tags[RADIX_TREE_MAX_TAGS]; int tag; /* Figure out what the height should be. */ @@ -197,7 +199,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) * Prepare the tag status of the top-level node for propagation * into the newly-pushed top-level node(s) */ - for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { tags[tag] = 0; if (any_tag_set(root->rnode, tag)) tags[tag] = 1; @@ -211,7 +213,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) node->slots[0] = root->rnode; /* Propagate the aggregated tag info into the new root */ - for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { if (tags[tag]) tag_set(node, tag, 0); } @@ -349,14 +351,15 @@ EXPORT_SYMBOL(radix_tree_lookup); * @index: index key * @tag: tag index * - * Set the search tag corresponging to @index in the radix tree. From + * Set the search tag (which must be < RADIX_TREE_MAX_TAGS) + * corresponding to @index in the radix tree. From * the root all the way down to the leaf node. * * Returns the address of the tagged item. Setting a tag on a not-present * item is a bug. */ void *radix_tree_tag_set(struct radix_tree_root *root, - unsigned long index, int tag) + unsigned long index, unsigned int tag) { unsigned int height, shift; struct radix_tree_node *slot; @@ -390,7 +393,8 @@ EXPORT_SYMBOL(radix_tree_tag_set); * @index: index key * @tag: tag index * - * Clear the search tag corresponging to @index in the radix tree. If + * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS) + * corresponding to @index in the radix tree. If * this causes the leaf node to have no tags set then clear the tag in the * next-to-leaf node, etc. * @@ -398,7 +402,7 @@ EXPORT_SYMBOL(radix_tree_tag_set); * has the same return value and semantics as radix_tree_lookup(). */ void *radix_tree_tag_clear(struct radix_tree_root *root, - unsigned long index, int tag) + unsigned long index, unsigned int tag) { struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path; struct radix_tree_node *slot; @@ -450,7 +454,7 @@ EXPORT_SYMBOL(radix_tree_tag_clear); * radix_tree_tag_get - get a tag on a radix tree node * @root: radix tree root * @index: index key - * @tag: tag index + * @tag: tag index (< RADIX_TREE_MAX_TAGS) * * Return values: * @@ -459,7 +463,7 @@ EXPORT_SYMBOL(radix_tree_tag_clear); * -1: tag present, unset */ int radix_tree_tag_get(struct radix_tree_root *root, - unsigned long index, int tag) + unsigned long index, unsigned int tag) { unsigned int height, shift; struct radix_tree_node *slot; @@ -592,7 +596,7 @@ EXPORT_SYMBOL(radix_tree_gang_lookup); */ static unsigned int __lookup_tag(struct radix_tree_root *root, void **results, unsigned long index, - unsigned int max_items, unsigned long *next_index, int tag) + unsigned int max_items, unsigned long *next_index, unsigned int tag) { unsigned int nr_found = 0; unsigned int shift; @@ -646,7 +650,7 @@ out: * @results: where the results of the lookup are placed * @first_index: start the lookup from this key * @max_items: place up to this many items at *results - * @tag: the tag index + * @tag: the tag index (< RADIX_TREE_MAX_TAGS) * * Performs an index-ascending scan of the tree for present items which * have the tag indexed by @tag set. Places the items at *@results and @@ -654,7 +658,8 @@ out: */ unsigned int radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, - unsigned long first_index, unsigned int max_items, int tag) + unsigned long first_index, unsigned int max_items, + unsigned int tag) { const unsigned long max_index = radix_tree_maxindex(root->height); unsigned long cur_index = first_index; @@ -716,7 +721,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) struct radix_tree_node *slot; unsigned int height, shift; void *ret = NULL; - char tags[RADIX_TREE_TAGS]; + char tags[RADIX_TREE_MAX_TAGS]; int nr_cleared_tags; int tag; int offset; @@ -751,7 +756,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) * Clear all tags associated with the just-deleted item */ nr_cleared_tags = 0; - for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { tags[tag] = 1; if (tag_get(pathp->node, tag, pathp->offset)) { tag_clear(pathp->node, tag, pathp->offset); @@ -763,7 +768,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) } for (pathp--; nr_cleared_tags && pathp->node; pathp--) { - for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { if (tags[tag]) continue; @@ -801,7 +806,7 @@ EXPORT_SYMBOL(radix_tree_delete); * @root: radix tree root * @tag: tag to test */ -int radix_tree_tagged(struct radix_tree_root *root, int tag) +int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag) { struct radix_tree_node *rnode; rnode = root->rnode; -- cgit v1.2.3-70-g09d2 From ccb46000f4bb459777686611157ac0eac928704e Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Sat, 25 Mar 2006 03:08:08 -0800 Subject: [PATCH] cpumask: uninline first_cpu() text data bss dec hex filename before: 3490577 1322408 360000 5172985 4eeef9 vmlinux after: 3488027 1322496 360128 5170651 4ee5db vmlinux Cc: Paul Jackson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/cpumask.h | 11 ++++++----- lib/Makefile | 2 ++ lib/cpumask.c | 11 +++++++++++ 3 files changed, 19 insertions(+), 5 deletions(-) create mode 100644 lib/cpumask.c (limited to 'lib') diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 60e56c6e03d..9b702fd24a7 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -212,11 +212,12 @@ static inline void __cpus_shift_left(cpumask_t *dstp, bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); } -#define first_cpu(src) __first_cpu(&(src), NR_CPUS) -static inline int __first_cpu(const cpumask_t *srcp, int nbits) -{ - return min_t(int, nbits, find_first_bit(srcp->bits, nbits)); -} +#ifdef CONFIG_SMP +int __first_cpu(const cpumask_t *srcp); +#define first_cpu(src) __first_cpu(&(src)) +#else +#define first_cpu(src) 0 +#endif #define next_cpu(n, src) __next_cpu((n), &(src), NR_CPUS) static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits) diff --git a/lib/Makefile b/lib/Makefile index 648b2c1242f..f827e3c24ec 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -7,6 +7,8 @@ lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \ idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \ sha1.o +lib-$(CONFIG_SMP) += cpumask.o + lib-y += kobject.o kref.o kobject_uevent.o klist.o obj-y += sort.o parser.o halfmd4.o iomap_copy.o diff --git a/lib/cpumask.c b/lib/cpumask.c new file mode 100644 index 00000000000..1560d97390d --- /dev/null +++ b/lib/cpumask.c @@ -0,0 +1,11 @@ +#include +#include +#include +#include + +int __first_cpu(const cpumask_t *srcp) +{ + return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS)); +} +EXPORT_SYMBOL(__first_cpu); + -- cgit v1.2.3-70-g09d2 From 3d18bd74a22d0bed3bc81fc64c4ba6344a10f155 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Sat, 25 Mar 2006 03:08:09 -0800 Subject: [PATCH] cpumask: uninline next_cpu() text data bss dec hex filename before: 3488027 1322496 360128 5170651 4ee5db vmlinux after: 3485112 1322480 359968 5167560 4ed9c8 vmlinux 2931 bytes saved Cc: Paul Jackson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/cpumask.h | 11 ++++------- lib/cpumask.c | 5 +++++ 2 files changed, 9 insertions(+), 7 deletions(-) (limited to 'lib') diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 9b702fd24a7..4b29e508a0b 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -215,16 +215,13 @@ static inline void __cpus_shift_left(cpumask_t *dstp, #ifdef CONFIG_SMP int __first_cpu(const cpumask_t *srcp); #define first_cpu(src) __first_cpu(&(src)) +int __next_cpu(int n, const cpumask_t *srcp); +#define next_cpu(n, src) __next_cpu((n), &(src)) #else -#define first_cpu(src) 0 +#define first_cpu(src) 0 +#define next_cpu(n, src) 1 #endif -#define next_cpu(n, src) __next_cpu((n), &(src), NR_CPUS) -static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits) -{ - return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1)); -} - #define cpumask_of_cpu(cpu) \ ({ \ typeof(_unused_cpumask_arg_) m; \ diff --git a/lib/cpumask.c b/lib/cpumask.c index 1560d97390d..ba2f8543052 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -9,3 +9,8 @@ int __first_cpu(const cpumask_t *srcp) } EXPORT_SYMBOL(__first_cpu); +int __next_cpu(int n, const cpumask_t *srcp) +{ + return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1)); +} +EXPORT_SYMBOL(__next_cpu); -- cgit v1.2.3-70-g09d2 From 8630282070b4a52b12cfa514ba8558e2f3d56360 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Sat, 25 Mar 2006 03:08:09 -0800 Subject: [PATCH] cpumask: uninline highest_possible_processor_id() Shrinks the only caller (net/bridge/netfilter/ebtables.c) by 174 bytes. Also, optimise highest_possible_processor_id() out of existence on CONFIG_SMP=n. Cc: Paul Jackson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/cpumask.h | 15 ++++++--------- lib/cpumask.c | 17 +++++++++++++++++ 2 files changed, 23 insertions(+), 9 deletions(-) (limited to 'lib') diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 4b29e508a0b..f770039344c 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -396,6 +396,12 @@ extern cpumask_t cpu_present_map; #define cpu_present(cpu) ((cpu) == 0) #endif +#ifdef CONFIG_SMP +int highest_possible_processor_id(void); +#else +#define highest_possible_processor_id() 0 +#endif + #define any_online_cpu(mask) \ ({ \ int cpu; \ @@ -409,14 +415,5 @@ extern cpumask_t cpu_present_map; #define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map) #define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map) -/* Find the highest possible smp_processor_id() */ -#define highest_possible_processor_id() \ -({ \ - unsigned int cpu, highest = 0; \ - for_each_cpu_mask(cpu, cpu_possible_map) \ - highest = cpu; \ - highest; \ -}) - #endif /* __LINUX_CPUMASK_H */ diff --git a/lib/cpumask.c b/lib/cpumask.c index ba2f8543052..ea25a034276 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -14,3 +14,20 @@ int __next_cpu(int n, const cpumask_t *srcp) return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1)); } EXPORT_SYMBOL(__next_cpu); + +/* + * Find the highest possible smp_processor_id() + * + * Note: if we're prepared to assume that cpu_possible_map never changes + * (reasonable) then this function should cache its return value. + */ +int highest_possible_processor_id(void) +{ + unsigned int cpu; + unsigned highest = 0; + + for_each_cpu_mask(cpu, cpu_possible_map) + highest = cpu; + return highest; +} +EXPORT_SYMBOL(highest_possible_processor_id); -- cgit v1.2.3-70-g09d2 From 96a9b4d31eba4722ba7aad2cc15118a7799f499f Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Sat, 25 Mar 2006 03:08:10 -0800 Subject: [PATCH] cpumask: uninline any_online_cpu() text data bss dec hex filename before: 3605597 1363528 363328 5332453 515de5 vmlinux after: 3605295 1363612 363200 5332107 515c8b vmlinux 218 bytes saved. Also, optimise any_online_cpu() out of existence on CONFIG_SMP=n. This function seems inefficient. Can't we simply AND the two masks, then use find_first_bit()? Cc: Paul Jackson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/cpumask.h | 13 +++---------- lib/cpumask.c | 12 ++++++++++++ 2 files changed, 15 insertions(+), 10 deletions(-) (limited to 'lib') diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index f770039344c..99e6115d8e5 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -398,22 +398,15 @@ extern cpumask_t cpu_present_map; #ifdef CONFIG_SMP int highest_possible_processor_id(void); +#define any_online_cpu(mask) __any_online_cpu(&(mask)) +int __any_online_cpu(const cpumask_t *mask); #else #define highest_possible_processor_id() 0 +#define any_online_cpu(mask) 0 #endif -#define any_online_cpu(mask) \ -({ \ - int cpu; \ - for_each_cpu_mask(cpu, (mask)) \ - if (cpu_online(cpu)) \ - break; \ - cpu; \ -}) - #define for_each_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map) #define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map) #define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map) - #endif /* __LINUX_CPUMASK_H */ diff --git a/lib/cpumask.c b/lib/cpumask.c index ea25a034276..3a67dc5ada7 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -31,3 +31,15 @@ int highest_possible_processor_id(void) return highest; } EXPORT_SYMBOL(highest_possible_processor_id); + +int __any_online_cpu(const cpumask_t *mask) +{ + int cpu; + + for_each_cpu_mask(cpu, *mask) { + if (cpu_online(cpu)) + break; + } + return cpu; +} +EXPORT_SYMBOL(__any_online_cpu); -- cgit v1.2.3-70-g09d2 From 6a0f03e0d35c10e07f1160ca75fc9a367931e38b Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 25 Mar 2006 16:32:01 +0100 Subject: [PATCH] x86_64: Don't enable CONFIG_UNWIND_INFO by default for DEBUG_KERNEL DEBUG_KERNEL is often enabled just for sysrq, but this doesn't mean the user wants more heavyweight debugging information. Cc: jbeulich@novell.com Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 1 - 1 file changed, 1 deletion(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 0bda3c5259f..7e70ab13e19 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -203,7 +203,6 @@ config UNWIND_INFO bool "Compile the kernel with frame unwind information" depends on !IA64 depends on !MODULES || !(MIPS || PARISC || PPC || SUPERH || SPARC64 || V850) - default DEBUG_KERNEL help If you say Y here the resulting kernel image will be slightly larger but not slower, and it will give very useful debugging information. -- cgit v1.2.3-70-g09d2 From c7f612cdf091def01454e7e132c7d7a3f419fbc4 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sun, 26 Mar 2006 01:39:11 -0800 Subject: [PATCH] bitops: generic find_{next,first}{,_zero}_bit() This patch introduces the C-language equivalents of the functions below: unsigned logn find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset); unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset); unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size); unsigned long find_first_bit(const unsigned long *addr, unsigned long size); In include/asm-generic/bitops/find.h This code largely copied from: arch/powerpc/lib/bitops.c Signed-off-by: Akinobu Mita Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/bitops/find.h | 13 +++++ lib/find_next_bit.c | 112 +++++++++++++++++++++++++++----------- 2 files changed, 94 insertions(+), 31 deletions(-) create mode 100644 include/asm-generic/bitops/find.h (limited to 'lib') diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h new file mode 100644 index 00000000000..72a51e5a12e --- /dev/null +++ b/include/asm-generic/bitops/find.h @@ -0,0 +1,13 @@ +#ifndef _ASM_GENERIC_BITOPS_FIND_H_ +#define _ASM_GENERIC_BITOPS_FIND_H_ + +extern unsigned long find_next_bit(const unsigned long *addr, unsigned long + size, unsigned long offset); + +extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned + long size, unsigned long offset); + +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) +#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) + +#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */ diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index c05b4b19cf6..9c90853b447 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c @@ -11,48 +11,98 @@ #include #include +#include -int find_next_bit(const unsigned long *addr, int size, int offset) +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) + +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) { - const unsigned long *base; - const int NBITS = sizeof(*addr) * 8; + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); unsigned long tmp; - base = addr; + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; if (offset) { - int suboffset; - - addr += offset / NBITS; - - suboffset = offset % NBITS; - if (suboffset) { - tmp = *addr; - tmp >>= suboffset; - if (tmp) - goto finish; - } - - addr++; + tmp = *(p++); + tmp &= (~0UL << offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; } + if (!size) + return result; + tmp = *p; - while ((tmp = *addr) == 0) - addr++; +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + __ffs(tmp); +} - offset = (addr - base) * NBITS; +EXPORT_SYMBOL(find_next_bit); - finish: - /* count the remaining bits without using __ffs() since that takes a 32-bit arg */ - while (!(tmp & 0xff)) { - offset += 8; - tmp >>= 8; - } +/* + * This implementation of find_{first,next}_zero_bit was stolen from + * Linus' asm-alpha/bitops.h. + */ +unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); + unsigned long tmp; - while (!(tmp & 1)) { - offset++; - tmp >>= 1; + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp |= ~0UL >> (BITS_PER_LONG - offset); + if (size < BITS_PER_LONG) + goto found_first; + if (~tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if (~(tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; } + if (!size) + return result; + tmp = *p; - return offset; +found_first: + tmp |= ~0UL << size; + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. */ +found_middle: + return result + ffz(tmp); } -EXPORT_SYMBOL(find_next_bit); +EXPORT_SYMBOL(find_next_zero_bit); -- cgit v1.2.3-70-g09d2 From 3b9ed1a5d2d121f32d2cb4f2b05f1fc57c99c946 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sun, 26 Mar 2006 01:39:13 -0800 Subject: [PATCH] bitops: generic hweight{64,32,16,8}() This patch introduces the C-language equivalents of the functions below: unsigned int hweight32(unsigned int w); unsigned int hweight16(unsigned int w); unsigned int hweight8(unsigned int w); unsigned long hweight64(__u64 w); In include/asm-generic/bitops/hweight.h This code largely copied from: include/linux/bitops.h Signed-off-by: Akinobu Mita Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/bitops/hweight.h | 9 ++++++ lib/Makefile | 1 + lib/hweight.c | 54 ++++++++++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+) create mode 100644 include/asm-generic/bitops/hweight.h create mode 100644 lib/hweight.c (limited to 'lib') diff --git a/include/asm-generic/bitops/hweight.h b/include/asm-generic/bitops/hweight.h new file mode 100644 index 00000000000..8be6f17d6a5 --- /dev/null +++ b/include/asm-generic/bitops/hweight.h @@ -0,0 +1,9 @@ +#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_ +#define _ASM_GENERIC_BITOPS_HWEIGHT_H_ + +extern unsigned int hweight32(unsigned int w); +extern unsigned int hweight16(unsigned int w); +extern unsigned int hweight8(unsigned int w); +extern unsigned long hweight64(__u64 w); + +#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ diff --git a/lib/Makefile b/lib/Makefile index f827e3c24ec..b830c9a1554 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -23,6 +23,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o +lib-$(CONFIG_GENERIC_HWEIGHT) += hweight.o obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o diff --git a/lib/hweight.c b/lib/hweight.c new file mode 100644 index 00000000000..721a4b8b4fb --- /dev/null +++ b/lib/hweight.c @@ -0,0 +1,54 @@ +#include +#include + +/** + * hweightN - returns the hamming weight of a N-bit word + * @x: the word to weigh + * + * The Hamming Weight of a number is the total number of bits set in it. + */ + +unsigned int hweight32(unsigned int w) +{ + unsigned int res = (w & 0x55555555) + ((w >> 1) & 0x55555555); + res = (res & 0x33333333) + ((res >> 2) & 0x33333333); + res = (res & 0x0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F); + res = (res & 0x00FF00FF) + ((res >> 8) & 0x00FF00FF); + return (res & 0x0000FFFF) + ((res >> 16) & 0x0000FFFF); +} +EXPORT_SYMBOL(hweight32); + +unsigned int hweight16(unsigned int w) +{ + unsigned int res = (w & 0x5555) + ((w >> 1) & 0x5555); + res = (res & 0x3333) + ((res >> 2) & 0x3333); + res = (res & 0x0F0F) + ((res >> 4) & 0x0F0F); + return (res & 0x00FF) + ((res >> 8) & 0x00FF); +} +EXPORT_SYMBOL(hweight16); + +unsigned int hweight8(unsigned int w) +{ + unsigned int res = (w & 0x55) + ((w >> 1) & 0x55); + res = (res & 0x33) + ((res >> 2) & 0x33); + return (res & 0x0F) + ((res >> 4) & 0x0F); +} +EXPORT_SYMBOL(hweight8); + +unsigned long hweight64(__u64 w) +{ +#if BITS_PER_LONG == 32 + return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); +#elif BITS_PER_LONG == 64 + u64 res; + res = (w & 0x5555555555555555ul) + ((w >> 1) & 0x5555555555555555ul); + res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); + res = (res & 0x0F0F0F0F0F0F0F0Ful) + ((res >> 4) & 0x0F0F0F0F0F0F0F0Ful); + res = (res & 0x00FF00FF00FF00FFul) + ((res >> 8) & 0x00FF00FF00FF00FFul); + res = (res & 0x0000FFFF0000FFFFul) + ((res >> 16) & 0x0000FFFF0000FFFFul); + return (res & 0x00000000FFFFFFFFul) + ((res >> 32) & 0x00000000FFFFFFFFul); +#else +#error BITS_PER_LONG not defined +#endif +} +EXPORT_SYMBOL(hweight64); -- cgit v1.2.3-70-g09d2 From 930ae745f50088279fdc06057a429f16495b53a2 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sun, 26 Mar 2006 01:39:15 -0800 Subject: [PATCH] bitops: generic ext2_{set,clear,test,find_first_zero,find_next_zero}_bit() This patch introduces the C-language equivalents of the functions below: int ext2_set_bit(int nr, volatile unsigned long *addr); int ext2_clear_bit(int nr, volatile unsigned long *addr); int ext2_test_bit(int nr, const volatile unsigned long *addr); unsigned long ext2_find_first_zero_bit(const unsigned long *addr, unsigned long size); unsinged long ext2_find_next_zero_bit(const unsigned long *addr, unsigned long size); In include/asm-generic/bitops/ext2-non-atomic.h This code largely copied from: include/asm-powerpc/bitops.h include/asm-parisc/bitops.h Signed-off-by: Akinobu Mita Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/bitops/ext2-non-atomic.h | 18 +++++++ include/asm-generic/bitops/le.h | 53 ++++++++++++++++++++ lib/find_next_bit.c | 73 ++++++++++++++++++++++++++++ 3 files changed, 144 insertions(+) create mode 100644 include/asm-generic/bitops/ext2-non-atomic.h create mode 100644 include/asm-generic/bitops/le.h (limited to 'lib') diff --git a/include/asm-generic/bitops/ext2-non-atomic.h b/include/asm-generic/bitops/ext2-non-atomic.h new file mode 100644 index 00000000000..1697404afa0 --- /dev/null +++ b/include/asm-generic/bitops/ext2-non-atomic.h @@ -0,0 +1,18 @@ +#ifndef _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ +#define _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ + +#include + +#define ext2_set_bit(nr,addr) \ + generic___test_and_set_le_bit((nr),(unsigned long *)(addr)) +#define ext2_clear_bit(nr,addr) \ + generic___test_and_clear_le_bit((nr),(unsigned long *)(addr)) + +#define ext2_test_bit(nr,addr) \ + generic_test_le_bit((nr),(unsigned long *)(addr)) +#define ext2_find_first_zero_bit(addr, size) \ + generic_find_first_zero_le_bit((unsigned long *)(addr), (size)) +#define ext2_find_next_zero_bit(addr, size, off) \ + generic_find_next_zero_le_bit((unsigned long *)(addr), (size), (off)) + +#endif /* _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ */ diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h new file mode 100644 index 00000000000..b9c7e5d2d2a --- /dev/null +++ b/include/asm-generic/bitops/le.h @@ -0,0 +1,53 @@ +#ifndef _ASM_GENERIC_BITOPS_LE_H_ +#define _ASM_GENERIC_BITOPS_LE_H_ + +#include +#include + +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) +#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) + +#if defined(__LITTLE_ENDIAN) + +#define generic_test_le_bit(nr, addr) test_bit(nr, addr) +#define generic___set_le_bit(nr, addr) __set_bit(nr, addr) +#define generic___clear_le_bit(nr, addr) __clear_bit(nr, addr) + +#define generic_test_and_set_le_bit(nr, addr) test_and_set_bit(nr, addr) +#define generic_test_and_clear_le_bit(nr, addr) test_and_clear_bit(nr, addr) + +#define generic___test_and_set_le_bit(nr, addr) __test_and_set_bit(nr, addr) +#define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr) + +#define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset) + +#elif defined(__BIG_ENDIAN) + +#define generic_test_le_bit(nr, addr) \ + test_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) +#define generic___set_le_bit(nr, addr) \ + __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) +#define generic___clear_le_bit(nr, addr) \ + __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) + +#define generic_test_and_set_le_bit(nr, addr) \ + test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) +#define generic_test_and_clear_le_bit(nr, addr) \ + test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) + +#define generic___test_and_set_le_bit(nr, addr) \ + __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) +#define generic___test_and_clear_le_bit(nr, addr) \ + __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) + +extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, + unsigned long size, unsigned long offset); + +#else +#error "Please fix " +#endif + +#define generic_find_first_zero_le_bit(addr, size) \ + generic_find_next_zero_le_bit((addr), (size), 0) + +#endif /* _ASM_GENERIC_BITOPS_LE_H_ */ diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index 9c90853b447..bda0d71a251 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c @@ -12,6 +12,7 @@ #include #include #include +#include #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) @@ -106,3 +107,75 @@ found_middle: } EXPORT_SYMBOL(find_next_zero_bit); + +#ifdef __BIG_ENDIAN + +/* include/linux/byteorder does not support "unsigned long" type */ +static inline unsigned long ext2_swabp(const unsigned long * x) +{ +#if BITS_PER_LONG == 64 + return (unsigned long) __swab64p((u64 *) x); +#elif BITS_PER_LONG == 32 + return (unsigned long) __swab32p((u32 *) x); +#else +#error BITS_PER_LONG not defined +#endif +} + +/* include/linux/byteorder doesn't support "unsigned long" type */ +static inline unsigned long ext2_swab(const unsigned long y) +{ +#if BITS_PER_LONG == 64 + return (unsigned long) __swab64((u64) y); +#elif BITS_PER_LONG == 32 + return (unsigned long) __swab32((u32) y); +#else +#error BITS_PER_LONG not defined +#endif +} + +unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned + long size, unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG - 1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= (BITS_PER_LONG - 1UL); + if (offset) { + tmp = ext2_swabp(p++); + tmp |= (~0UL >> (BITS_PER_LONG - offset)); + if (size < BITS_PER_LONG) + goto found_first; + if (~tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + + while (size & ~(BITS_PER_LONG - 1)) { + if (~(tmp = *(p++))) + goto found_middle_swap; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = ext2_swabp(p); +found_first: + tmp |= ~0UL << size; + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. Skip ffz */ +found_middle: + return result + ffz(tmp); + +found_middle_swap: + return result + ffz(ext2_swab(tmp)); +} + +EXPORT_SYMBOL(generic_find_next_zero_le_bit); + +#endif /* __BIG_ENDIAN */ -- cgit v1.2.3-70-g09d2 From 37d54111c133bea05fbae9dfe6d3d61a1b19c09b Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sun, 26 Mar 2006 01:39:56 -0800 Subject: [PATCH] bitops: hweight() related cleanup By defining generic hweight*() routines - hweight64() will be defined on all architectures - hweight_long() will use architecture optimized hweight32() or hweight64() I found two possible cleanups by these reasons. Signed-off-by: Akinobu Mita Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/ieee1394/highlevel.c | 3 +-- lib/bitmap.c | 19 ++----------------- 2 files changed, 3 insertions(+), 19 deletions(-) (limited to 'lib') diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c index 734b121a055..491e6032bde 100644 --- a/drivers/ieee1394/highlevel.c +++ b/drivers/ieee1394/highlevel.c @@ -306,8 +306,7 @@ u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl, u64 align_mask = ~(alignment - 1); if ((alignment & 3) || (alignment > 0x800000000000ULL) || - ((hweight32(alignment >> 32) + - hweight32(alignment & 0xffffffff) != 1))) { + (hweight64(alignment) != 1)) { HPSB_ERR("%s called with invalid alignment: 0x%048llx", __FUNCTION__, (unsigned long long)alignment); return retval; diff --git a/lib/bitmap.c b/lib/bitmap.c index 8acab0e176e..ed2ae3b0cd0 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -253,33 +253,18 @@ int __bitmap_subset(const unsigned long *bitmap1, } EXPORT_SYMBOL(__bitmap_subset); -#if BITS_PER_LONG == 32 int __bitmap_weight(const unsigned long *bitmap, int bits) { int k, w = 0, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; k++) - w += hweight32(bitmap[k]); + w += hweight_long(bitmap[k]); if (bits % BITS_PER_LONG) - w += hweight32(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); + w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); return w; } -#else -int __bitmap_weight(const unsigned long *bitmap, int bits) -{ - int k, w = 0, lim = bits/BITS_PER_LONG; - - for (k = 0; k < lim; k++) - w += hweight64(bitmap[k]); - - if (bits % BITS_PER_LONG) - w += hweight64(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); - - return w; -} -#endif EXPORT_SYMBOL(__bitmap_weight); /* -- cgit v1.2.3-70-g09d2 From f9b4192923fa6e38331e88214b1fe5fc21583fcc Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sun, 26 Mar 2006 01:40:00 -0800 Subject: [PATCH] bitops: hweight() speedup wrote: This is an extremely well-known technique. You can see a similar version that uses a multiply for the last few steps at http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel whch refers to "Software Optimization Guide for AMD Athlon 64 and Opteron Processors" http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/25112.PDF It's section 8.6, "Efficient Implementation of Population-Count Function in 32-bit Mode", pages 179-180. It uses the name that I am more familiar with, "popcount" (population count), although "Hamming weight" also makes sense. Anyway, the proof of correctness proceeds as follows: b = a - ((a >> 1) & 0x55555555); c = (b & 0x33333333) + ((b >> 2) & 0x33333333); d = (c + (c >> 4)) & 0x0f0f0f0f; #if SLOW_MULTIPLY e = d + (d >> 8) f = e + (e >> 16); return f & 63; #else /* Useful if multiply takes at most 4 cycles */ return (d * 0x01010101) >> 24; #endif The input value a can be thought of as 32 1-bit fields each holding their own hamming weight. Now look at it as 16 2-bit fields. Each 2-bit field a1..a0 has the value 2*a1 + a0. This can be converted into the hamming weight of the 2-bit field a1+a0 by subtracting a1. That's what the (a >> 1) & mask subtraction does. Since there can be no borrows, you can just do it all at once. Enumerating the 4 possible cases: 0b00 = 0 -> 0 - 0 = 0 0b01 = 1 -> 1 - 0 = 1 0b10 = 2 -> 2 - 1 = 1 0b11 = 3 -> 3 - 1 = 2 The next step consists of breaking up b (made of 16 2-bir fields) into even and odd halves and adding them into 4-bit fields. Since the largest possible sum is 2+2 = 4, which will not fit into a 4-bit field, the 2-bit ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ "which will not fit into a 2-bit field" fields have to be masked before they are added. After this point, the masking can be delayed. Each 4-bit field holds a population count from 0..4, taking at most 3 bits. These numbers can be added without overflowing a 4-bit field, so we can compute c + (c >> 4), and only then mask off the unwanted bits. This produces d, a number of 4 8-bit fields, each in the range 0..8. From this point, we can shift and add d multiple times without overflowing an 8-bit field, and only do a final mask at the end. The number to mask with has to be at least 63 (so that 32 on't be truncated), but can also be 128 or 255. The x86 has a special encoding for signed immediate byte values -128..127, so the value of 255 is slower. On other processors, a special "sign extend byte" instruction might be faster. On a processor with fast integer multiplies (Athlon but not P4), you can reduce the final few serially dependent instructions to a single integer multiply. Consider d to be 3 8-bit values d3, d2, d1 and d0, each in the range 0..8. The multiply forms the partial products: d3 d2 d1 d0 d3 d2 d1 d0 d3 d2 d1 d0 + d3 d2 d1 d0 ---------------------- e3 e2 e1 e0 Where e3 = d3 + d2 + d1 + d0. e2, e1 and e0 obviously cannot generate any carries. Signed-off-by: Akinobu Mita Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/hweight.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) (limited to 'lib') diff --git a/lib/hweight.c b/lib/hweight.c index 721a4b8b4fb..43825767170 100644 --- a/lib/hweight.c +++ b/lib/hweight.c @@ -10,28 +10,28 @@ unsigned int hweight32(unsigned int w) { - unsigned int res = (w & 0x55555555) + ((w >> 1) & 0x55555555); + unsigned int res = w - ((w >> 1) & 0x55555555); res = (res & 0x33333333) + ((res >> 2) & 0x33333333); - res = (res & 0x0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F); - res = (res & 0x00FF00FF) + ((res >> 8) & 0x00FF00FF); - return (res & 0x0000FFFF) + ((res >> 16) & 0x0000FFFF); + res = (res + (res >> 4)) & 0x0F0F0F0F; + res = res + (res >> 8); + return (res + (res >> 16)) & 0x000000FF; } EXPORT_SYMBOL(hweight32); unsigned int hweight16(unsigned int w) { - unsigned int res = (w & 0x5555) + ((w >> 1) & 0x5555); + unsigned int res = w - ((w >> 1) & 0x5555); res = (res & 0x3333) + ((res >> 2) & 0x3333); - res = (res & 0x0F0F) + ((res >> 4) & 0x0F0F); - return (res & 0x00FF) + ((res >> 8) & 0x00FF); + res = (res + (res >> 4)) & 0x0F0F; + return (res + (res >> 8)) & 0x00FF; } EXPORT_SYMBOL(hweight16); unsigned int hweight8(unsigned int w) { - unsigned int res = (w & 0x55) + ((w >> 1) & 0x55); + unsigned int res = w - ((w >> 1) & 0x55); res = (res & 0x33) + ((res >> 2) & 0x33); - return (res & 0x0F) + ((res >> 4) & 0x0F); + return (res + (res >> 4)) & 0x0F; } EXPORT_SYMBOL(hweight8); @@ -40,13 +40,12 @@ unsigned long hweight64(__u64 w) #if BITS_PER_LONG == 32 return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); #elif BITS_PER_LONG == 64 - u64 res; - res = (w & 0x5555555555555555ul) + ((w >> 1) & 0x5555555555555555ul); + __u64 res = w - ((w >> 1) & 0x5555555555555555ul); res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); - res = (res & 0x0F0F0F0F0F0F0F0Ful) + ((res >> 4) & 0x0F0F0F0F0F0F0F0Ful); - res = (res & 0x00FF00FF00FF00FFul) + ((res >> 8) & 0x00FF00FF00FF00FFul); - res = (res & 0x0000FFFF0000FFFFul) + ((res >> 16) & 0x0000FFFF0000FFFFul); - return (res & 0x00000000FFFFFFFFul) + ((res >> 32) & 0x00000000FFFFFFFFul); + res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful; + res = res + (res >> 8); + res = res + (res >> 16); + return (res + (res >> 32)) & 0x00000000000000FFul; #else #error BITS_PER_LONG not defined #endif -- cgit v1.2.3-70-g09d2 From ae36b883d29e53b6083ed3d1d44f254cee7507d3 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sun, 26 Mar 2006 14:38:54 +0200 Subject: [PATCH] Don't make debugfs depend on DEBUG_KERNEL We use it generally now, at least blktrace isn't a specific debug kernel feature. Signed-off-by: Jens Axboe --- lib/Kconfig.debug | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 7e70ab13e19..6e8a60f67c7 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -172,7 +172,7 @@ config DEBUG_IOREMAP config DEBUG_FS bool "Debug Filesystem" - depends on DEBUG_KERNEL && SYSFS + depends on SYSFS help debugfs is a virtual file system that kernel developers use to put debugging files into. Enable this option to be able to read and -- cgit v1.2.3-70-g09d2