idr_get_new() currently returns an incrementing counter in the top 8 bits of the counter. Which means that most users have to mask it off again, and we only have a 24-bit range. So remove that counter. Also: - Remove the BITS_PER_INT define due to namespace collision risk. - Make MAX_ID_SHIFT 31, so counters have a 0 to 2G-1 range. - Why is MAX_ID_SHIFT using sizeof(int) and not sizeof(long)? If it's for consistency across 32- and 64-bit machines, why not just make it "31"? - Does this still hold true with the counter removed? /* We can only use half the bits in the top level because there are only four possible bits in the top level (5 bits * 4 levels = 25 bits, but you only use 24 bits in the id). */ If not, what needs to change? Signed-off-by: Andrew Morton <akpm@osdl.org> --- 25-akpm/include/linux/idr.h | 18 +++++------------- 25-akpm/lib/idr.c | 19 +++---------------- 2 files changed, 8 insertions(+), 29 deletions(-) diff -puN include/linux/idr.h~idr-remove-counter include/linux/idr.h --- 25/include/linux/idr.h~idr-remove-counter 2004-05-27 02:08:54.000011048 -0700 +++ 25-akpm/include/linux/idr.h 2004-05-27 02:08:54.006010136 -0700 @@ -11,8 +11,6 @@ #include <linux/types.h> #include <asm/bitops.h> -#define RESERVED_ID_BITS 8 - #if BITS_PER_LONG == 32 # define IDR_BITS 5 # define IDR_FULL 0xfffffffful @@ -32,11 +30,8 @@ #define IDR_SIZE (1 << IDR_BITS) #define IDR_MASK ((1 << IDR_BITS)-1) -/* Define the size of the id's */ -#define BITS_PER_INT (sizeof(int)*8) - -#define MAX_ID_SHIFT (BITS_PER_INT - RESERVED_ID_BITS) -#define MAX_ID_BIT (1 << MAX_ID_SHIFT) +#define MAX_ID_SHIFT (sizeof(int)*8 - 1) +#define MAX_ID_BIT (1U << MAX_ID_SHIFT) #define MAX_ID_MASK (MAX_ID_BIT - 1) /* Leave the possibility of an incomplete final layer */ @@ -46,25 +41,23 @@ #define IDR_FREE_MAX MAX_LEVEL + MAX_LEVEL struct idr_layer { - unsigned long bitmap; /* A zero bit means "space here" */ + unsigned long bitmap; /* A zero bit means "space here" */ struct idr_layer *ary[1<<IDR_BITS]; - int count; /* When zero, we can release it */ + int count; /* When zero, we can release it */ }; struct idr { struct idr_layer *top; struct idr_layer *id_free; - long count; int layers; int id_free_cnt; spinlock_t lock; }; -#define IDR_INIT(name) \ +#define IDR_INIT(name) \ { \ .top = NULL, \ .id_free = NULL, \ - .count = 0, \ .layers = 0, \ .id_free_cnt = 0, \ .lock = SPIN_LOCK_UNLOCKED, \ @@ -82,4 +75,3 @@ void idr_remove(struct idr *idp, int id) void idr_init(struct idr *idp); extern kmem_cache_t *idr_layer_cache; - diff -puN lib/idr.c~idr-remove-counter lib/idr.c --- 25/lib/idr.c~idr-remove-counter 2004-05-27 02:08:54.002010744 -0700 +++ 25-akpm/lib/idr.c 2004-05-27 02:08:54.007009984 -0700 @@ -27,15 +27,6 @@ * so you don't need to be too concerned about locking and conflicts * with the slab allocator. - * A word on reuse. We reuse empty id slots as soon as we can, always - * using the lowest one available. But we also merge a counter in the - * high bits of the id. The counter is RESERVED_ID_BITS (8 at this time) - * long. This means that if you allocate and release the same id in a - * loop we will reuse an id after about 256 times around the loop. The - * word about is used here as we will NOT return a valid id of -1 so if - * you loop on the largest possible id (and that is 24 bits, wow!) we - * will kick the counter to avoid -1. (Paranoid? You bet!) - * * What you need to do is, since we don't keep the counter as part of * id / ptr pair, to keep a copy of it in the pointed to structure * (or else where) so that when you ask for a ptr you can varify that @@ -78,7 +69,8 @@ * If memory is required, it will return -EAGAIN, you should unlock * and go back to the idr_pre_get() call. If the idr is full, it * will return a -ENOSPC. ptr is the pointer you want associated - * with the id. The value is returned in the "id" field. + * with the id. The value is returned in the "id" field. idr_get_new() + * returns a value in the range 0 ... 0x7fffffff * void *idr_find(struct idr *idp, int id); @@ -271,12 +263,6 @@ build_up: v = sub_alloc(idp, ptr, &id); if (v == -2) goto build_up; - if ( likely(v >= 0 )) { - idp->count++; - v += (idp->count << MAX_ID_SHIFT); - if ( unlikely( v == -1 )) - v += (1L << MAX_ID_SHIFT); - } return(v); } EXPORT_SYMBOL(idr_get_new_above); @@ -334,6 +320,7 @@ static void sub_remove(struct idr *idp, idp->layers = 0; } } + void idr_remove(struct idr *idp, int id) { struct idr_layer *p; _