diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-10-12 12:39:30 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-12 12:39:50 +0200 |
commit | 4c7145a1ec1bb789d5f07e47510e8bda546a7c4a (patch) | |
tree | e2767b77e5413473a3bba302237f4669a203f183 /lib | |
parent | 74e91604b2452c15bbe72d77b37cf47ed0310d13 (diff) | |
parent | fd048088306656824958e7783ffcee27e241b361 (diff) |
Merge branch 'linus' into x86/spinlocks
Done to prevent this failure of an Octopus merge:
Added arch/arm/include/asm/byteorder.h in both, but differently.
ERROR: Merge conflict in arch/arm/include/asm/byteorder.h
Auto-merging include/asm-x86/spinlock.h
ERROR: Merge conflict in include/asm-x86/spinlock.h
fatal: merge program failed
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 48 | ||||
-rw-r--r-- | lib/Makefile | 3 | ||||
-rw-r--r-- | lib/debugobjects.c | 31 | ||||
-rw-r--r-- | lib/iommu-helper.c | 5 | ||||
-rw-r--r-- | lib/klist.c | 96 | ||||
-rw-r--r-- | lib/percpu_counter.c | 8 | ||||
-rw-r--r-- | lib/scatterlist.c | 4 | ||||
-rw-r--r-- | lib/string_helpers.c | 64 | ||||
-rw-r--r-- | lib/swiotlb.c | 49 | ||||
-rw-r--r-- | lib/vsprintf.c | 11 |
10 files changed, 236 insertions, 83 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 0b504814e37..ce697e0b319 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -597,6 +597,19 @@ config RCU_TORTURE_TEST_RUNNABLE Say N here if you want the RCU torture tests to start only after being manually enabled via /proc. +config RCU_CPU_STALL_DETECTOR + bool "Check for stalled CPUs delaying RCU grace periods" + depends on CLASSIC_RCU + default n + help + This option causes RCU to printk information on which + CPUs are delaying the current grace period, but only when + the grace period extends for excessive time periods. + + Say Y if you want RCU to perform such checks. + + Say N if you are unsure. + config KPROBES_SANITY_TEST bool "Kprobes sanity tests" depends on DEBUG_KERNEL @@ -624,6 +637,28 @@ config BACKTRACE_SELF_TEST Say N if you are unsure. +config DEBUG_BLOCK_EXT_DEVT + bool "Force extended block device numbers and spread them" + depends on DEBUG_KERNEL + depends on BLOCK + default n + help + Conventionally, block device numbers are allocated from + predetermined contiguous area. However, extended block area + may introduce non-contiguous block device numbers. This + option forces most block device numbers to be allocated from + the extended space and spreads them to discover kernel or + userland code paths which assume predetermined contiguous + device number allocation. + + Note that turning on this debug option shuffles all the + device numbers for all IDE and SCSI devices including libata + ones, so root partition specified using device number + directly (via rdev or root=MAJ:MIN) won't work anymore. + Textual device names (root=/dev/sdXn) will continue to work. + + Say N if you are unsure. + config LKDTM tristate "Linux Kernel Dump Test Tool Module" depends on DEBUG_KERNEL @@ -661,10 +696,21 @@ config FAIL_PAGE_ALLOC config FAIL_MAKE_REQUEST bool "Fault-injection capability for disk IO" - depends on FAULT_INJECTION + depends on FAULT_INJECTION && BLOCK help Provide fault-injection capability for disk IO. +config FAIL_IO_TIMEOUT + bool "Faul-injection capability for faking disk interrupts" + depends on FAULT_INJECTION && BLOCK + help + Provide fault-injection capability on end IO handling. This + will make the block layer "forget" an interrupt as configured, + thus exercising the error handling. + + Only works with drivers that use the generic timeout handling, + for others it wont do anything. + config FAULT_INJECTION_DEBUG_FS bool "Debugfs entries for fault-injection capabilities" depends on FAULT_INJECTION && SYSFS && DEBUG_FS diff --git a/lib/Makefile b/lib/Makefile index 3b1f94bbe9d..44001af76a7 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -19,7 +19,8 @@ lib-$(CONFIG_SMP) += cpumask.o lib-y += kobject.o kref.o klist.o obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ - bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o + bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ + string_helpers.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 45a6bde762d..e3ab374e133 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -112,6 +112,7 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) /* * Allocate a new object. If the pool is empty, switch off the debugger. + * Must be called with interrupts disabled. */ static struct debug_obj * alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) @@ -148,17 +149,18 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) static void free_object(struct debug_obj *obj) { unsigned long idx = (unsigned long)(obj - obj_static_pool); + unsigned long flags; if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { - spin_lock(&pool_lock); + spin_lock_irqsave(&pool_lock, flags); hlist_add_head(&obj->node, &obj_pool); obj_pool_free++; obj_pool_used--; - spin_unlock(&pool_lock); + spin_unlock_irqrestore(&pool_lock, flags); } else { - spin_lock(&pool_lock); + spin_lock_irqsave(&pool_lock, flags); obj_pool_used--; - spin_unlock(&pool_lock); + spin_unlock_irqrestore(&pool_lock, flags); kmem_cache_free(obj_cache, obj); } } @@ -171,6 +173,7 @@ static void debug_objects_oom(void) { struct debug_bucket *db = obj_hash; struct hlist_node *node, *tmp; + HLIST_HEAD(freelist); struct debug_obj *obj; unsigned long flags; int i; @@ -179,11 +182,14 @@ static void debug_objects_oom(void) for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { spin_lock_irqsave(&db->lock, flags); - hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { + hlist_move_list(&db->list, &freelist); + spin_unlock_irqrestore(&db->lock, flags); + + /* Now free them */ + hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { hlist_del(&obj->node); free_object(obj); } - spin_unlock_irqrestore(&db->lock, flags); } } @@ -498,8 +504,9 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr) return; default: hlist_del(&obj->node); + spin_unlock_irqrestore(&db->lock, flags); free_object(obj); - break; + return; } out_unlock: spin_unlock_irqrestore(&db->lock, flags); @@ -510,6 +517,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size) { unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; struct hlist_node *node, *tmp; + HLIST_HEAD(freelist); struct debug_obj_descr *descr; enum debug_obj_state state; struct debug_bucket *db; @@ -545,11 +553,18 @@ repeat: goto repeat; default: hlist_del(&obj->node); - free_object(obj); + hlist_add_head(&obj->node, &freelist); break; } } spin_unlock_irqrestore(&db->lock, flags); + + /* Now free them */ + hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { + hlist_del(&obj->node); + free_object(obj); + } + if (cnt > debug_objects_maxchain) debug_objects_maxchain = cnt; } diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c index a3b8d4c3f77..5d90074dca7 100644 --- a/lib/iommu-helper.c +++ b/lib/iommu-helper.c @@ -30,8 +30,7 @@ again: return index; } -static inline void set_bit_area(unsigned long *map, unsigned long i, - int len) +void iommu_area_reserve(unsigned long *map, unsigned long i, int len) { unsigned long end = i + len; while (i < end) { @@ -64,7 +63,7 @@ again: start = index + 1; goto again; } - set_bit_area(map, index, nr); + iommu_area_reserve(map, index, nr); } return index; } diff --git a/lib/klist.c b/lib/klist.c index cca37f96faa..bbdd3015c2c 100644 --- a/lib/klist.c +++ b/lib/klist.c @@ -37,6 +37,37 @@ #include <linux/klist.h> #include <linux/module.h> +/* + * Use the lowest bit of n_klist to mark deleted nodes and exclude + * dead ones from iteration. + */ +#define KNODE_DEAD 1LU +#define KNODE_KLIST_MASK ~KNODE_DEAD + +static struct klist *knode_klist(struct klist_node *knode) +{ + return (struct klist *) + ((unsigned long)knode->n_klist & KNODE_KLIST_MASK); +} + +static bool knode_dead(struct klist_node *knode) +{ + return (unsigned long)knode->n_klist & KNODE_DEAD; +} + +static void knode_set_klist(struct klist_node *knode, struct klist *klist) +{ + knode->n_klist = klist; + /* no knode deserves to start its life dead */ + WARN_ON(knode_dead(knode)); +} + +static void knode_kill(struct klist_node *knode) +{ + /* and no knode should die twice ever either, see we're very humane */ + WARN_ON(knode_dead(knode)); + *(unsigned long *)&knode->n_klist |= KNODE_DEAD; +} /** * klist_init - Initialize a klist structure. @@ -79,7 +110,7 @@ static void klist_node_init(struct klist *k, struct klist_node *n) INIT_LIST_HEAD(&n->n_node); init_completion(&n->n_removed); kref_init(&n->n_ref); - n->n_klist = k; + knode_set_klist(n, k); if (k->get) k->get(n); } @@ -115,7 +146,7 @@ EXPORT_SYMBOL_GPL(klist_add_tail); */ void klist_add_after(struct klist_node *n, struct klist_node *pos) { - struct klist *k = pos->n_klist; + struct klist *k = knode_klist(pos); klist_node_init(k, n); spin_lock(&k->k_lock); @@ -131,7 +162,7 @@ EXPORT_SYMBOL_GPL(klist_add_after); */ void klist_add_before(struct klist_node *n, struct klist_node *pos) { - struct klist *k = pos->n_klist; + struct klist *k = knode_klist(pos); klist_node_init(k, n); spin_lock(&k->k_lock); @@ -144,9 +175,10 @@ static void klist_release(struct kref *kref) { struct klist_node *n = container_of(kref, struct klist_node, n_ref); + WARN_ON(!knode_dead(n)); list_del(&n->n_node); complete(&n->n_removed); - n->n_klist = NULL; + knode_set_klist(n, NULL); } static int klist_dec_and_del(struct klist_node *n) @@ -154,22 +186,29 @@ static int klist_dec_and_del(struct klist_node *n) return kref_put(&n->n_ref, klist_release); } -/** - * klist_del - Decrement the reference count of node and try to remove. - * @n: node we're deleting. - */ -void klist_del(struct klist_node *n) +static void klist_put(struct klist_node *n, bool kill) { - struct klist *k = n->n_klist; + struct klist *k = knode_klist(n); void (*put)(struct klist_node *) = k->put; spin_lock(&k->k_lock); + if (kill) + knode_kill(n); if (!klist_dec_and_del(n)) put = NULL; spin_unlock(&k->k_lock); if (put) put(n); } + +/** + * klist_del - Decrement the reference count of node and try to remove. + * @n: node we're deleting. + */ +void klist_del(struct klist_node *n) +{ + klist_put(n, true); +} EXPORT_SYMBOL_GPL(klist_del); /** @@ -206,7 +245,6 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i, struct klist_node *n) { i->i_klist = k; - i->i_head = &k->k_list; i->i_cur = n; if (n) kref_get(&n->n_ref); @@ -237,7 +275,7 @@ EXPORT_SYMBOL_GPL(klist_iter_init); void klist_iter_exit(struct klist_iter *i) { if (i->i_cur) { - klist_del(i->i_cur); + klist_put(i->i_cur, false); i->i_cur = NULL; } } @@ -258,27 +296,33 @@ static struct klist_node *to_klist_node(struct list_head *n) */ struct klist_node *klist_next(struct klist_iter *i) { - struct list_head *next; - struct klist_node *lnode = i->i_cur; - struct klist_node *knode = NULL; void (*put)(struct klist_node *) = i->i_klist->put; + struct klist_node *last = i->i_cur; + struct klist_node *next; spin_lock(&i->i_klist->k_lock); - if (lnode) { - next = lnode->n_node.next; - if (!klist_dec_and_del(lnode)) + + if (last) { + next = to_klist_node(last->n_node.next); + if (!klist_dec_and_del(last)) put = NULL; } else - next = i->i_head->next; + next = to_klist_node(i->i_klist->k_list.next); - if (next != i->i_head) { - knode = to_klist_node(next); - kref_get(&knode->n_ref); + i->i_cur = NULL; + while (next != to_klist_node(&i->i_klist->k_list)) { + if (likely(!knode_dead(next))) { + kref_get(&next->n_ref); + i->i_cur = next; + break; + } + next = to_klist_node(next->n_node.next); } - i->i_cur = knode; + spin_unlock(&i->i_klist->k_lock); - if (put && lnode) - put(lnode); - return knode; + + if (put && last) + put(last); + return i->i_cur; } EXPORT_SYMBOL_GPL(klist_next); diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 4a8ba4bf5f6..a8663890a88 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add); * Add up all the per-cpu counts, return the result. This is a more accurate * but much slower version of percpu_counter_read_positive() */ -s64 __percpu_counter_sum(struct percpu_counter *fbc, int set) +s64 __percpu_counter_sum(struct percpu_counter *fbc) { s64 ret; int cpu; @@ -62,11 +62,9 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc, int set) for_each_online_cpu(cpu) { s32 *pcount = per_cpu_ptr(fbc->counters, cpu); ret += *pcount; - if (set) - *pcount = 0; + *pcount = 0; } - if (set) - fbc->count = ret; + fbc->count = ret; spin_unlock(&fbc->lock); return ret; diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 876ba6d5b67..8d2688ff135 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -422,9 +422,12 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, { unsigned int offset = 0; struct sg_mapping_iter miter; + unsigned long flags; sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC); + local_irq_save(flags); + while (sg_miter_next(&miter) && offset < buflen) { unsigned int len; @@ -442,6 +445,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, sg_miter_stop(&miter); + local_irq_restore(flags); return offset; } diff --git a/lib/string_helpers.c b/lib/string_helpers.c new file mode 100644 index 00000000000..8347925030f --- /dev/null +++ b/lib/string_helpers.c @@ -0,0 +1,64 @@ +/* + * Helpers for formatting and printing strings + * + * Copyright 31 August 2008 James Bottomley + */ +#include <linux/kernel.h> +#include <linux/math64.h> +#include <linux/module.h> +#include <linux/string_helpers.h> + +/** + * string_get_size - get the size in the specified units + * @size: The size to be converted + * @units: units to use (powers of 1000 or 1024) + * @buf: buffer to format to + * @len: length of buffer + * + * This function returns a string formatted to 3 significant figures + * giving the size in the required units. Returns 0 on success or + * error on failure. @buf is always zero terminated. + * + */ +int string_get_size(u64 size, const enum string_size_units units, + char *buf, int len) +{ + const char *units_10[] = { "B", "KB", "MB", "GB", "TB", "PB", + "EB", "ZB", "YB", NULL}; + const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB", + "EiB", "ZiB", "YiB", NULL }; + const char **units_str[] = { + [STRING_UNITS_10] = units_10, + [STRING_UNITS_2] = units_2, + }; + const int divisor[] = { + [STRING_UNITS_10] = 1000, + [STRING_UNITS_2] = 1024, + }; + int i, j; + u64 remainder = 0, sf_cap; + char tmp[8]; + + tmp[0] = '\0'; + + for (i = 0; size > divisor[units] && units_str[units][i]; i++) + remainder = do_div(size, divisor[units]); + + sf_cap = size; + for (j = 0; sf_cap*10 < 1000; j++) + sf_cap *= 10; + + if (j) { + remainder *= 1000; + do_div(remainder, divisor[units]); + snprintf(tmp, sizeof(tmp), ".%03lld", + (unsigned long long)remainder); + tmp[j+1] = '\0'; + } + + snprintf(buf, len, "%lld%s%s", (unsigned long long)size, + tmp, units_str[units][i]); + + return 0; +} +EXPORT_SYMBOL(string_get_size); diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 977edbdbc1d..f8eebd48914 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -274,13 +274,14 @@ cleanup1: } static int -address_needs_mapping(struct device *hwdev, dma_addr_t addr) +address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) { - dma_addr_t mask = 0xffffffff; - /* If the device has a mask, use it, otherwise default to 32 bits */ - if (hwdev && hwdev->dma_mask) - mask = *hwdev->dma_mask; - return (addr & ~mask) != 0; + return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); +} + +static int is_swiotlb_buffer(char *addr) +{ + return addr >= io_tlb_start && addr < io_tlb_end; } /* @@ -467,15 +468,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, void *ret; int order = get_order(size); - /* - * XXX fix me: the DMA API should pass us an explicit DMA mask - * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32 - * bit range instead of a 16MB one). - */ - flags |= GFP_DMA; - ret = (void *)__get_free_pages(flags, order); - if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) { + if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) { /* * The allocated memory isn't reachable by the device. * Fall back on swiotlb_map_single(). @@ -490,19 +484,16 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, * swiotlb_map_single(), which will grab memory from * the lowest available address range. */ - dma_addr_t handle; - handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); - if (swiotlb_dma_mapping_error(hwdev, handle)) + ret = map_single(hwdev, NULL, size, DMA_FROM_DEVICE); + if (!ret) return NULL; - - ret = bus_to_virt(handle); } memset(ret, 0, size); dev_addr = virt_to_bus(ret); /* Confirm address can be DMA'd by device */ - if (address_needs_mapping(hwdev, dev_addr)) { + if (address_needs_mapping(hwdev, dev_addr, size)) { printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", (unsigned long long)*hwdev->dma_mask, (unsigned long long)dev_addr); @@ -518,12 +509,11 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) { WARN_ON(irqs_disabled()); - if (!(vaddr >= (void *)io_tlb_start - && vaddr < (void *)io_tlb_end)) + if (!is_swiotlb_buffer(vaddr)) free_pages((unsigned long) vaddr, get_order(size)); else /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ - swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); + unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); } static void @@ -567,7 +557,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, * we can safely return the device addr and not worry about bounce * buffering it. */ - if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force) + if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force) return dev_addr; /* @@ -584,7 +574,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, /* * Ensure that the address returned is DMA'ble */ - if (address_needs_mapping(hwdev, dev_addr)) + if (address_needs_mapping(hwdev, dev_addr, size)) panic("map_single: bounce buffer is not DMA'ble"); return dev_addr; @@ -612,7 +602,7 @@ swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, char *dma_addr = bus_to_virt(dev_addr); BUG_ON(dir == DMA_NONE); - if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) + if (is_swiotlb_buffer(dma_addr)) unmap_single(hwdev, dma_addr, size, dir); else if (dir == DMA_FROM_DEVICE) dma_mark_clean(dma_addr, size); @@ -642,7 +632,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, char *dma_addr = bus_to_virt(dev_addr); BUG_ON(dir == DMA_NONE); - if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) + if (is_swiotlb_buffer(dma_addr)) sync_single(hwdev, dma_addr, size, dir, target); else if (dir == DMA_FROM_DEVICE) dma_mark_clean(dma_addr, size); @@ -673,7 +663,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, char *dma_addr = bus_to_virt(dev_addr) + offset; BUG_ON(dir == DMA_NONE); - if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) + if (is_swiotlb_buffer(dma_addr)) sync_single(hwdev, dma_addr, size, dir, target); else if (dir == DMA_FROM_DEVICE) dma_mark_clean(dma_addr, size); @@ -727,7 +717,8 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, for_each_sg(sgl, sg, nelems, i) { addr = SG_ENT_VIRT_ADDRESS(sg); dev_addr = virt_to_bus(addr); - if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { + if (swiotlb_force || + address_needs_mapping(hwdev, dev_addr, sg->length)) { void *map = map_single(hwdev, addr, sg->length, dir); if (!map) { /* Don't panic here, we expect map_sg users diff --git a/lib/vsprintf.c b/lib/vsprintf.c index d8d1d114224..c399bc1093c 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -27,6 +27,7 @@ #include <asm/page.h> /* for PAGE_SIZE */ #include <asm/div64.h> +#include <asm/sections.h> /* for dereference_function_descriptor() */ /* Works only for digits and letters, but small and fast */ #define TOLOWER(x) ((x) | 0x20) @@ -513,16 +514,6 @@ static char *string(char *buf, char *end, char *s, int field_width, int precisio return buf; } -static inline void *dereference_function_descriptor(void *ptr) -{ -#if defined(CONFIG_IA64) || defined(CONFIG_PPC64) - void *p; - if (!probe_kernel_address(ptr, p)) - ptr = p; -#endif - return ptr; -} - static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags) { unsigned long value = (unsigned long) ptr; |