42 struct idr { 43 struct idr_layer __rcu *hint; /* the last layer allocated from */44 struct idr_layer __rcu *top;45 int layers; /* only valid w/o concurrent changes */46 int cur; /* current pos for cyclic allocation */47 spinlock_t lock;48 int id_free_cnt;49 struct idr_layer *id_free;50 };30 struct idr_layer {31 int prefix; /* the ID prefix of this idr_layer */32 int layer; /* distance from leaf */33 struct idr_layer __rcu *ary[1<<IDR_BITS];34 int count; /* When zero, we can release it */35 union {36 /* A zero bit means "space here" */37 DECLARE_BITMAP(bitmap, IDR_SIZE);38 struct rcu_head rcu_head;39 };40 };
932 int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)933 {934 struct idr_layer *pa[MAX_IDR_LEVEL + 1];935 struct ida_bitmap *bitmap;936 unsigned long flags;937 int idr_id = starting_id / IDA_BITMAP_BITS;938 int offset = starting_id % IDA_BITMAP_BITS;939 int t, id;940 941 restart:942 /* get vacant slot */943 t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);944 if (t < 0)945 return t == -ENOMEM ? -EAGAIN : t;946 947 if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)948 return -ENOSPC;949 950 if (t != idr_id)951 offset = 0;952 idr_id = t;953 954 /* if bitmap isn't there, create a new one */955 bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];956 if (!bitmap) {957 spin_lock_irqsave(&ida->idr.lock, flags);958 bitmap = ida->free_bitmap;959 ida->free_bitmap = NULL;960 spin_unlock_irqrestore(&ida->idr.lock, flags);961 962 if (!bitmap)963 return -EAGAIN;964 965 memset(bitmap, 0, sizeof(struct ida_bitmap));966 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],967 (void *)bitmap);968 pa[0]->count++;969 }970 971 /* lookup for empty slot */972 t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);973 if (t == IDA_BITMAP_BITS) { 974 /* no empty slot after offset, continue to the next chunk */975 idr_id++;976 offset = 0;977 goto restart;978 }979 980 id = idr_id * IDA_BITMAP_BITS + t;981 if (id >= MAX_IDR_BIT)982 return -ENOSPC;983 984 __set_bit(t, bitmap->bitmap);985 if (++bitmap->nr_busy == IDA_BITMAP_BITS)986 idr_mark_full(pa, idr_id);987 988 *p_id = id;989 990 /* Each leaf node can handle nearly a thousand slots and the991 * whole idea of ida is to have small memory foot print.992 * Throw away extra resources one by one after each successful993 * allocation.994 */995 if (ida->idr.id_free_cnt || ida->free_bitmap) {996 struct idr_layer *p = get_from_free_list(&ida->idr);997 if (p)998 kmem_cache_free(idr_layer_cache, p);999 }
1000
1001 return 0;
1002 }289 static int idr_get_empty_slot(struct idr *idp, int starting_id, 290 struct idr_layer **pa, gfp_t gfp_mask,291 struct idr *layer_idr)292 {293 struct idr_layer *p, *new;294 int layers, v, id;295 unsigned long flags;296 297 id = starting_id;298 build_up:299 p = idp->top;300 layers = idp->layers;301 if (unlikely(!p)) {302 if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))303 return -ENOMEM;304 p->layer = 0;305 layers = 1;306 }307 /*308 * Add a new layer to the top of the tree if the requested309 * id is larger than the currently allocated space.310 */311 while (id > idr_max(layers)) {312 layers++; 313 if (!p->count) {314 /* special case: if the tree is currently empty,315 * then we grow the tree by moving the top node316 * upwards.317 */318 p->layer++;319 WARN_ON_ONCE(p->prefix);320 continue;321 }322 if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {323 /*324 * The allocation failed. If we built part of325 * the structure tear it down.326 */327 spin_lock_irqsave(&idp->lock, flags);328 for (new = p; p && p != idp->top; new = p) {329 p = p->ary[0];330 new->ary[0] = NULL;331 new->count = 0;332 bitmap_clear(new->bitmap, 0, IDR_SIZE);333 __move_to_free_list(idp, new);334 }335 spin_unlock_irqrestore(&idp->lock, flags);336 return -ENOMEM;337 }338 new->ary[0] = p;339 new->count = 1;340 new->layer = layers-1;341 new->prefix = id & idr_layer_prefix_mask(new->layer);342 if (bitmap_full(p->bitmap, IDR_SIZE))343 __set_bit(0, new->bitmap);344 p = new;345 }346 rcu_assign_pointer(idp->top, p);347 idp->layers = layers;348 v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr); 349 if (v == -EAGAIN)350 goto build_up;351 return(v);352 }220 static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,221 gfp_t gfp_mask, struct idr *layer_idr)222 {223 int n, m, sh;224 struct idr_layer *p, *new;225 int l, id, oid;226 227 id = *starting_id;228 restart:229 p = idp->top;230 l = idp->layers;231 pa[l--] = NULL;232 while (1) {233 /*234 * We run around this while until we reach the leaf node...235 */236 n = (id >> (IDR_BITS*l)) & IDR_MASK;237 m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);238 if (m == IDR_SIZE) {239 /* no space available go back to previous layer. */240 l++;241 oid = id;242 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;243 244 /* if already at the top layer, we need to grow */245 if (id > idr_max(idp->layers)) {246 *starting_id = id;247 return -EAGAIN;248 }249 p = pa[l];250 BUG_ON(!p);251 252 /* If we need to go up one layer, continue the253 * loop; otherwise, restart from the top.254 */255 sh = IDR_BITS * (l + 1);256 if (oid >> sh == id >> sh)257 continue;258 else259 goto restart;260 } 261 if (m != n) {262 sh = IDR_BITS*l;263 id = ((id >> sh) ^ n ^ m) << sh;264 }265 if ((id >= MAX_IDR_BIT) || (id < 0))266 return -ENOSPC;267 if (l == 0)268 break;269 /*270 * Create the layer below if it is missing.271 */272 if (!p->ary[m]) {273 new = idr_layer_alloc(gfp_mask, layer_idr);274 if (!new)275 return -ENOMEM;276 new->layer = l-1;277 new->prefix = id & idr_layer_prefix_mask(new->layer);278 rcu_assign_pointer(p->ary[m], new);279 p->count++;280 }281 pa[l--] = p;282 p = p->ary[m];283 }284 285 pa[l] = p;286 return id;287 }