diff options
Diffstat (limited to 'lib/idr.c')
| -rw-r--r-- | lib/idr.c | 180 |
1 files changed, 84 insertions, 96 deletions
diff --git a/lib/idr.c b/lib/idr.c index 00739aaf95a..39158abebad 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -18,12 +18,6 @@ * pointer or what ever, we treat it as a (void *). You can pass this * id to a user for him to pass back at a later time. You then pass * that id to this code and it returns your pointer. - - * You can release ids at any time. When all ids are released, most of - * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we - * don't need to go to the memory "store" during an id allocate, just - * so you don't need to be too concerned about locking and conflicts - * with the slab allocator. */ #ifndef TEST // to test in user space... @@ -106,8 +100,14 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) if (layer_idr) return get_from_free_list(layer_idr); - /* try to allocate directly from kmem_cache */ - new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); + /* + * Try to allocate directly from kmem_cache. We want to try this + * before preload buffer; otherwise, non-preloading idr_alloc() + * users will end up taking advantage of preloading ones. As the + * following is allowed to fail for preloaded cases, suppress + * warning this time. + */ + new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN); if (new) return new; @@ -115,18 +115,24 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) * Try to fetch one from the per-cpu preload buffer if in process * context. See idr_preload() for details. */ - if (in_interrupt()) - return NULL; - - preempt_disable(); - new = __this_cpu_read(idr_preload_head); - if (new) { - __this_cpu_write(idr_preload_head, new->ary[0]); - __this_cpu_dec(idr_preload_cnt); - new->ary[0] = NULL; + if (!in_interrupt()) { + preempt_disable(); + new = __this_cpu_read(idr_preload_head); + if (new) { + __this_cpu_write(idr_preload_head, new->ary[0]); + __this_cpu_dec(idr_preload_cnt); + new->ary[0] = NULL; + } + preempt_enable(); + if (new) + return new; } - preempt_enable(); - return new; + + /* + * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so + * that memory allocation failure warning is printed as intended. + */ + return kmem_cache_zalloc(idr_layer_cache, gfp_mask); } static void idr_layer_rcu_free(struct rcu_head *head) @@ -139,7 +145,7 @@ static void idr_layer_rcu_free(struct rcu_head *head) static inline void free_layer(struct idr *idr, struct idr_layer *p) { - if (idr->hint && idr->hint == p) + if (idr->hint == p) RCU_INIT_POINTER(idr->hint, NULL); call_rcu(&p->rcu_head, idr_layer_rcu_free); } @@ -184,20 +190,7 @@ static void idr_mark_full(struct idr_layer **pa, int id) } } -/** - * idr_pre_get - reserve resources for idr allocation - * @idp: idr handle - * @gfp_mask: memory allocation flags - * - * This function should be called prior to calling the idr_get_new* functions. - * It preallocates enough memory to satisfy the worst possible allocation. The - * caller should pass in GFP_KERNEL if possible. This of course requires that - * no spinning locks be held. - * - * If the system is REALLY out of memory this function returns %0, - * otherwise %1. - */ -int idr_pre_get(struct idr *idp, gfp_t gfp_mask) +static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) { while (idp->id_free_cnt < MAX_IDR_FREE) { struct idr_layer *new; @@ -208,13 +201,11 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask) } return 1; } -EXPORT_SYMBOL(idr_pre_get); /** * sub_alloc - try to allocate an id without growing the tree depth * @idp: idr handle * @starting_id: id to start search at - * @id: pointer to the allocated handle * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer * @gfp_mask: allocation mask for idr_layer_alloc() * @layer_idr: optional idr passed to idr_layer_alloc() @@ -252,7 +243,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa, id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; /* if already at the top layer, we need to grow */ - if (id >= 1 << (idp->layers * IDR_BITS)) { + if (id > idr_max(idp->layers)) { *starting_id = id; return -EAGAIN; } @@ -376,38 +367,6 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id, idr_mark_full(pa, id); } -/** - * idr_get_new_above - allocate new idr entry above or equal to a start id - * @idp: idr handle - * @ptr: pointer you want associated with the id - * @starting_id: id to start search at - * @id: pointer to the allocated handle - * - * This is the allocate id function. It should be called with any - * required locks. - * - * If allocation from IDR's private freelist fails, idr_get_new_above() will - * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill - * IDR's preallocation and then retry the idr_get_new_above() call. - * - * If the idr is full idr_get_new_above() will return %-ENOSPC. - * - * @id returns a value in the range @starting_id ... %0x7fffffff - */ -int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) -{ - struct idr_layer *pa[MAX_IDR_LEVEL + 1]; - int rv; - - rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp); - if (rv < 0) - return rv == -ENOMEM ? -EAGAIN : rv; - - idr_fill_slot(idp, ptr, rv, pa); - *id = rv; - return 0; -} -EXPORT_SYMBOL(idr_get_new_above); /** * idr_preload - preload for idr_alloc() @@ -515,11 +474,36 @@ int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) } EXPORT_SYMBOL_GPL(idr_alloc); +/** + * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion + * @idr: the (initialized) idr + * @ptr: pointer to be associated with the new id + * @start: the minimum id (inclusive) + * @end: the maximum id (exclusive, <= 0 for max) + * @gfp_mask: memory allocation flags + * + * Essentially the same as idr_alloc, but prefers to allocate progressively + * higher ids if it can. If the "cur" counter wraps, then it will start again + * at the "start" end of the range and allocate one that has already been used. + */ +int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, + gfp_t gfp_mask) +{ + int id; + + id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask); + if (id == -ENOSPC) + id = idr_alloc(idr, ptr, start, end, gfp_mask); + + if (likely(id >= 0)) + idr->cur = id + 1; + return id; +} +EXPORT_SYMBOL(idr_alloc_cyclic); + static void idr_remove_warning(int id) { - printk(KERN_WARNING - "idr_remove called for id=%d which is not allocated.\n", id); - dump_stack(); + WARN(1, "idr_remove called for id=%d which is not allocated.\n", id); } static void sub_remove(struct idr *idp, int shift, int id) @@ -543,7 +527,7 @@ static void sub_remove(struct idr *idp, int shift, int id) n = id & IDR_MASK; if (likely(p != NULL && test_bit(n, p->bitmap))) { __clear_bit(n, p->bitmap); - rcu_assign_pointer(p->ary[n], NULL); + RCU_INIT_POINTER(p->ary[n], NULL); to_free = NULL; while(*paa && ! --((**paa)->count)){ if (to_free) @@ -572,6 +556,11 @@ void idr_remove(struct idr *idp, int id) if (id < 0) return; + if (id > idr_max(idp->layers)) { + idr_remove_warning(id); + return; + } + sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); if (idp->top && idp->top->count == 1 && (idp->layers > 1) && idp->top->ary[0]) { @@ -589,20 +578,10 @@ void idr_remove(struct idr *idp, int id) bitmap_clear(to_free->bitmap, 0, IDR_SIZE); free_layer(idp, to_free); } - while (idp->id_free_cnt >= MAX_IDR_FREE) { - p = get_from_free_list(idp); - /* - * Note: we don't call the rcu callback here, since the only - * layers that fall into the freelist are those that have been - * preallocated. - */ - kmem_cache_free(idr_layer_cache, p); - } - return; } EXPORT_SYMBOL(idr_remove); -void __idr_remove_all(struct idr *idp) +static void __idr_remove_all(struct idr *idp) { int n, id, max; int bt_mask; @@ -612,7 +591,7 @@ void __idr_remove_all(struct idr *idp) n = idp->layers * IDR_BITS; p = idp->top; - rcu_assign_pointer(idp->top, NULL); + RCU_INIT_POINTER(idp->top, NULL); max = idr_max(idp->layers); id = 0; @@ -635,7 +614,6 @@ void __idr_remove_all(struct idr *idp) } idp->layers = 0; } -EXPORT_SYMBOL(__idr_remove_all); /** * idr_destroy - release all cached layers within an idr tree @@ -820,14 +798,12 @@ void *idr_replace(struct idr *idp, void *ptr, int id) p = idp->top; if (!p) - return ERR_PTR(-EINVAL); - - n = (p->layer+1) * IDR_BITS; + return ERR_PTR(-ENOENT); - if (id >= (1 << n)) - return ERR_PTR(-EINVAL); + if (id > idr_max(p->layer + 1)) + return ERR_PTR(-ENOENT); - n -= IDR_BITS; + n = p->layer * IDR_BITS; while ((n > 0) && p) { p = p->ary[(id >> n) & IDR_MASK]; n -= IDR_BITS; @@ -864,6 +840,16 @@ void idr_init(struct idr *idp) } EXPORT_SYMBOL(idr_init); +static int idr_has_entry(int id, void *p, void *data) +{ + return 1; +} + +bool idr_is_empty(struct idr *idp) +{ + return !idr_for_each(idp, idr_has_entry, NULL); +} +EXPORT_SYMBOL(idr_is_empty); /** * DOC: IDA description @@ -908,7 +894,7 @@ static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) int ida_pre_get(struct ida *ida, gfp_t gfp_mask) { /* allocate idr_layers */ - if (!idr_pre_get(&ida->idr, gfp_mask)) + if (!__idr_pre_get(&ida->idr, gfp_mask)) return 0; /* allocate free_bitmap */ @@ -1028,6 +1014,9 @@ void ida_remove(struct ida *ida, int id) int n; struct ida_bitmap *bitmap; + if (idr_id > idr_max(ida->idr.layers)) + goto err; + /* clear full bits while looking up the leaf idr_layer */ while ((shift > 0) && p) { n = (idr_id >> shift) & IDR_MASK; @@ -1043,7 +1032,7 @@ void ida_remove(struct ida *ida, int id) __clear_bit(n, p->bitmap); bitmap = (void *)p->ary[n]; - if (!test_bit(offset, bitmap->bitmap)) + if (!bitmap || !test_bit(offset, bitmap->bitmap)) goto err; /* update bitmap and remove it if empty */ @@ -1057,8 +1046,7 @@ void ida_remove(struct ida *ida, int id) return; err: - printk(KERN_WARNING - "ida_remove called for id=%d which is not allocated.\n", id); + WARN(1, "ida_remove called for id=%d which is not allocated.\n", id); } EXPORT_SYMBOL(ida_remove); |
