libsignal-protocol-c  master
uthash.h
1 /*
2 Copyright (c) 2003-2016, Troy D. Hanson http://troydhanson.github.com/uthash/
3 All rights reserved.
4 
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are met:
7 
8  * Redistributions of source code must retain the above copyright
9  notice, this list of conditions and the following disclaimer.
10 
11 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12 IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
13 TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
14 PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
15 OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
16 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
17 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
18 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
19 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
20 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
21 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22 */
23 
24 #ifndef UTHASH_H
25 #define UTHASH_H
26 
27 #define UTHASH_VERSION 2.0.1
28 
29 #include <string.h> /* memcmp,strlen */
30 #include <stddef.h> /* ptrdiff_t */
31 #include <stdlib.h> /* exit() */
32 
33 /* These macros use decltype or the earlier __typeof GNU extension.
34  As decltype is only available in newer compilers (VS2010 or gcc 4.3+
35  when compiling c++ source) this code uses whatever method is needed
36  or, for VS2008 where neither is available, uses casting workarounds. */
37 #if defined(_MSC_VER) /* MS compiler */
38 #if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */
39 #define DECLTYPE(x) (decltype(x))
40 #else /* VS2008 or older (or VS2010 in C mode) */
41 #define NO_DECLTYPE
42 #define DECLTYPE(x)
43 #endif
44 #elif defined(__BORLANDC__) || defined(__LCC__) || defined(__WATCOMC__)
45 #define NO_DECLTYPE
46 #define DECLTYPE(x)
47 #else /* GNU, Sun and other compilers */
48 #define DECLTYPE(x) (__typeof(x))
49 #endif
50 
51 #ifdef NO_DECLTYPE
52 #define DECLTYPE_ASSIGN(dst,src) \
53 do { \
54  char **_da_dst = (char**)(&(dst)); \
55  *_da_dst = (char*)(src); \
56 } while (0)
57 #else
58 #define DECLTYPE_ASSIGN(dst,src) \
59 do { \
60  (dst) = DECLTYPE(dst)(src); \
61 } while (0)
62 #endif
63 
64 /* a number of the hash function use uint32_t which isn't defined on Pre VS2010 */
65 #if defined(_WIN32)
66 #if defined(_MSC_VER) && _MSC_VER >= 1600
67 #include <stdint.h>
68 #elif defined(__WATCOMC__) || defined(__MINGW32__) || defined(__CYGWIN__)
69 #include <stdint.h>
70 #else
71 typedef unsigned int uint32_t;
72 typedef unsigned char uint8_t;
73 #endif
74 #elif defined(__GNUC__) && !defined(__VXWORKS__)
75 #include <stdint.h>
76 #else
77 typedef unsigned int uint32_t;
78 typedef unsigned char uint8_t;
79 #endif
80 
81 #ifndef uthash_fatal
82 #define uthash_fatal(msg) exit(-1) /* fatal error (out of memory,etc) */
83 #endif
84 #ifndef uthash_malloc
85 #define uthash_malloc(sz) malloc(sz) /* malloc fcn */
86 #endif
87 #ifndef uthash_free
88 #define uthash_free(ptr,sz) free(ptr) /* free fcn */
89 #endif
90 #ifndef uthash_strlen
91 #define uthash_strlen(s) strlen(s)
92 #endif
93 #ifndef uthash_memcmp
94 #define uthash_memcmp(a,b,n) memcmp(a,b,n)
95 #endif
96 
97 #ifndef uthash_noexpand_fyi
98 #define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */
99 #endif
100 #ifndef uthash_expand_fyi
101 #define uthash_expand_fyi(tbl) /* can be defined to log expands */
102 #endif
103 
104 /* initial number of buckets */
105 #define HASH_INITIAL_NUM_BUCKETS 32U /* initial number of buckets */
106 #define HASH_INITIAL_NUM_BUCKETS_LOG2 5U /* lg2 of initial number of buckets */
107 #define HASH_BKT_CAPACITY_THRESH 10U /* expand when bucket count reaches */
108 
109 /* calculate the element whose hash handle address is hhp */
110 #define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho)))
111 /* calculate the hash handle from element address elp */
112 #define HH_FROM_ELMT(tbl,elp) ((UT_hash_handle *)(((char*)(elp)) + ((tbl)->hho)))
113 
114 #define HASH_VALUE(keyptr,keylen,hashv) \
115 do { \
116  HASH_FCN(keyptr, keylen, hashv); \
117 } while (0)
118 
119 #define HASH_FIND_BYHASHVALUE(hh,head,keyptr,keylen,hashval,out) \
120 do { \
121  (out) = NULL; \
122  if (head) { \
123  unsigned _hf_bkt; \
124  HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _hf_bkt); \
125  if (HASH_BLOOM_TEST((head)->hh.tbl, hashval) != 0) { \
126  HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], keyptr, keylen, hashval, out); \
127  } \
128  } \
129 } while (0)
130 
131 #define HASH_FIND(hh,head,keyptr,keylen,out) \
132 do { \
133  unsigned _hf_hashv; \
134  HASH_VALUE(keyptr, keylen, _hf_hashv); \
135  HASH_FIND_BYHASHVALUE(hh, head, keyptr, keylen, _hf_hashv, out); \
136 } while (0)
137 
138 #ifdef HASH_BLOOM
139 #define HASH_BLOOM_BITLEN (1UL << HASH_BLOOM)
140 #define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8UL) + (((HASH_BLOOM_BITLEN%8UL)!=0UL) ? 1UL : 0UL)
141 #define HASH_BLOOM_MAKE(tbl) \
142 do { \
143  (tbl)->bloom_nbits = HASH_BLOOM; \
144  (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \
145  if (!((tbl)->bloom_bv)) { uthash_fatal( "out of memory"); } \
146  memset((tbl)->bloom_bv, 0, HASH_BLOOM_BYTELEN); \
147  (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \
148 } while (0)
149 
150 #define HASH_BLOOM_FREE(tbl) \
151 do { \
152  uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \
153 } while (0)
154 
155 #define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8U] |= (1U << ((idx)%8U)))
156 #define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8U] & (1U << ((idx)%8U)))
157 
158 #define HASH_BLOOM_ADD(tbl,hashv) \
159  HASH_BLOOM_BITSET((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1U)))
160 
161 #define HASH_BLOOM_TEST(tbl,hashv) \
162  HASH_BLOOM_BITTEST((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1U)))
163 
164 #else
165 #define HASH_BLOOM_MAKE(tbl)
166 #define HASH_BLOOM_FREE(tbl)
167 #define HASH_BLOOM_ADD(tbl,hashv)
168 #define HASH_BLOOM_TEST(tbl,hashv) (1)
169 #define HASH_BLOOM_BYTELEN 0U
170 #endif
171 
172 #define HASH_MAKE_TABLE(hh,head) \
173 do { \
174  (head)->hh.tbl = (UT_hash_table*)uthash_malloc( \
175  sizeof(UT_hash_table)); \
176  if (!((head)->hh.tbl)) { uthash_fatal( "out of memory"); } \
177  memset((head)->hh.tbl, 0, sizeof(UT_hash_table)); \
178  (head)->hh.tbl->tail = &((head)->hh); \
179  (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \
180  (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \
181  (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \
182  (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \
183  HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
184  if (! (head)->hh.tbl->buckets) { uthash_fatal( "out of memory"); } \
185  memset((head)->hh.tbl->buckets, 0, \
186  HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
187  HASH_BLOOM_MAKE((head)->hh.tbl); \
188  (head)->hh.tbl->signature = HASH_SIGNATURE; \
189 } while (0)
190 
191 #define HASH_REPLACE_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,replaced,cmpfcn) \
192 do { \
193  (replaced) = NULL; \
194  HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \
195  if (replaced) { \
196  HASH_DELETE(hh, head, replaced); \
197  } \
198  HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn); \
199 } while (0)
200 
201 #define HASH_REPLACE_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add,replaced) \
202 do { \
203  (replaced) = NULL; \
204  HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \
205  if (replaced) { \
206  HASH_DELETE(hh, head, replaced); \
207  } \
208  HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add); \
209 } while (0)
210 
211 #define HASH_REPLACE(hh,head,fieldname,keylen_in,add,replaced) \
212 do { \
213  unsigned _hr_hashv; \
214  HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \
215  HASH_REPLACE_BYHASHVALUE(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced); \
216 } while (0)
217 
218 #define HASH_REPLACE_INORDER(hh,head,fieldname,keylen_in,add,replaced,cmpfcn) \
219 do { \
220  unsigned _hr_hashv; \
221  HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \
222  HASH_REPLACE_BYHASHVALUE_INORDER(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced, cmpfcn); \
223 } while (0)
224 
225 #define HASH_APPEND_LIST(hh, head, add) \
226 do { \
227  (add)->hh.next = NULL; \
228  (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \
229  (head)->hh.tbl->tail->next = (add); \
230  (head)->hh.tbl->tail = &((add)->hh); \
231 } while (0)
232 
233 #define HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh,head,keyptr,keylen_in,hashval,add,cmpfcn) \
234 do { \
235  unsigned _ha_bkt; \
236  (add)->hh.hashv = (hashval); \
237  (add)->hh.key = (char*) (keyptr); \
238  (add)->hh.keylen = (unsigned) (keylen_in); \
239  if (!(head)) { \
240  (add)->hh.next = NULL; \
241  (add)->hh.prev = NULL; \
242  (head) = (add); \
243  HASH_MAKE_TABLE(hh, head); \
244  } else { \
245  void *_hs_iter = (head); \
246  (add)->hh.tbl = (head)->hh.tbl; \
247  do { \
248  if (cmpfcn(DECLTYPE(head)(_hs_iter), add) > 0) \
249  break; \
250  } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \
251  if (_hs_iter) { \
252  (add)->hh.next = _hs_iter; \
253  if (((add)->hh.prev = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev)) { \
254  HH_FROM_ELMT((head)->hh.tbl, (add)->hh.prev)->next = (add); \
255  } else { \
256  (head) = (add); \
257  } \
258  HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev = (add); \
259  } else { \
260  HASH_APPEND_LIST(hh, head, add); \
261  } \
262  } \
263  (head)->hh.tbl->num_items++; \
264  HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \
265  HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], &(add)->hh); \
266  HASH_BLOOM_ADD((head)->hh.tbl, hashval); \
267  HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \
268  HASH_FSCK(hh, head); \
269 } while (0)
270 
271 #define HASH_ADD_KEYPTR_INORDER(hh,head,keyptr,keylen_in,add,cmpfcn) \
272 do { \
273  unsigned _hs_hashv; \
274  HASH_VALUE(keyptr, keylen_in, _hs_hashv); \
275  HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, keyptr, keylen_in, _hs_hashv, add, cmpfcn); \
276 } while (0)
277 
278 #define HASH_ADD_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,cmpfcn) \
279  HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn)
280 
281 #define HASH_ADD_INORDER(hh,head,fieldname,keylen_in,add,cmpfcn) \
282  HASH_ADD_KEYPTR_INORDER(hh, head, &((add)->fieldname), keylen_in, add, cmpfcn)
283 
284 #define HASH_ADD_KEYPTR_BYHASHVALUE(hh,head,keyptr,keylen_in,hashval,add) \
285 do { \
286  unsigned _ha_bkt; \
287  (add)->hh.hashv = (hashval); \
288  (add)->hh.key = (char*) (keyptr); \
289  (add)->hh.keylen = (unsigned) (keylen_in); \
290  if (!(head)) { \
291  (add)->hh.next = NULL; \
292  (add)->hh.prev = NULL; \
293  (head) = (add); \
294  HASH_MAKE_TABLE(hh, head); \
295  } else { \
296  (add)->hh.tbl = (head)->hh.tbl; \
297  HASH_APPEND_LIST(hh, head, add); \
298  } \
299  (head)->hh.tbl->num_items++; \
300  HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \
301  HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], &(add)->hh); \
302  HASH_BLOOM_ADD((head)->hh.tbl, hashval); \
303  HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \
304  HASH_FSCK(hh, head); \
305 } while (0)
306 
307 #define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \
308 do { \
309  unsigned _ha_hashv; \
310  HASH_VALUE(keyptr, keylen_in, _ha_hashv); \
311  HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, keyptr, keylen_in, _ha_hashv, add); \
312 } while (0)
313 
314 #define HASH_ADD_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add) \
315  HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add)
316 
317 #define HASH_ADD(hh,head,fieldname,keylen_in,add) \
318  HASH_ADD_KEYPTR(hh, head, &((add)->fieldname), keylen_in, add)
319 
320 #define HASH_TO_BKT(hashv,num_bkts,bkt) \
321 do { \
322  bkt = ((hashv) & ((num_bkts) - 1U)); \
323 } while (0)
324 
325 /* delete "delptr" from the hash table.
326  * "the usual" patch-up process for the app-order doubly-linked-list.
327  * The use of _hd_hh_del below deserves special explanation.
328  * These used to be expressed using (delptr) but that led to a bug
329  * if someone used the same symbol for the head and deletee, like
330  * HASH_DELETE(hh,users,users);
331  * We want that to work, but by changing the head (users) below
332  * we were forfeiting our ability to further refer to the deletee (users)
333  * in the patch-up process. Solution: use scratch space to
334  * copy the deletee pointer, then the latter references are via that
335  * scratch pointer rather than through the repointed (users) symbol.
336  */
337 #define HASH_DELETE(hh,head,delptr) \
338 do { \
339  struct UT_hash_handle *_hd_hh_del; \
340  if ( ((delptr)->hh.prev == NULL) && ((delptr)->hh.next == NULL) ) { \
341  uthash_free((head)->hh.tbl->buckets, \
342  (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \
343  HASH_BLOOM_FREE((head)->hh.tbl); \
344  uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
345  head = NULL; \
346  } else { \
347  unsigned _hd_bkt; \
348  _hd_hh_del = &((delptr)->hh); \
349  if ((delptr) == ELMT_FROM_HH((head)->hh.tbl,(head)->hh.tbl->tail)) { \
350  (head)->hh.tbl->tail = \
351  (UT_hash_handle*)((ptrdiff_t)((delptr)->hh.prev) + \
352  (head)->hh.tbl->hho); \
353  } \
354  if ((delptr)->hh.prev != NULL) { \
355  ((UT_hash_handle*)((ptrdiff_t)((delptr)->hh.prev) + \
356  (head)->hh.tbl->hho))->next = (delptr)->hh.next; \
357  } else { \
358  DECLTYPE_ASSIGN(head,(delptr)->hh.next); \
359  } \
360  if (_hd_hh_del->next != NULL) { \
361  ((UT_hash_handle*)((ptrdiff_t)_hd_hh_del->next + \
362  (head)->hh.tbl->hho))->prev = \
363  _hd_hh_del->prev; \
364  } \
365  HASH_TO_BKT( _hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \
366  HASH_DEL_IN_BKT(hh,(head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \
367  (head)->hh.tbl->num_items--; \
368  } \
369  HASH_FSCK(hh,head); \
370 } while (0)
371 
372 
373 /* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */
374 #define HASH_FIND_STR(head,findstr,out) \
375  HASH_FIND(hh,head,findstr,(unsigned)uthash_strlen(findstr),out)
376 #define HASH_ADD_STR(head,strfield,add) \
377  HASH_ADD(hh,head,strfield[0],(unsigned)uthash_strlen(add->strfield),add)
378 #define HASH_REPLACE_STR(head,strfield,add,replaced) \
379  HASH_REPLACE(hh,head,strfield[0],(unsigned)uthash_strlen(add->strfield),add,replaced)
380 #define HASH_FIND_INT(head,findint,out) \
381  HASH_FIND(hh,head,findint,sizeof(int),out)
382 #define HASH_ADD_INT(head,intfield,add) \
383  HASH_ADD(hh,head,intfield,sizeof(int),add)
384 #define HASH_REPLACE_INT(head,intfield,add,replaced) \
385  HASH_REPLACE(hh,head,intfield,sizeof(int),add,replaced)
386 #define HASH_FIND_PTR(head,findptr,out) \
387  HASH_FIND(hh,head,findptr,sizeof(void *),out)
388 #define HASH_ADD_PTR(head,ptrfield,add) \
389  HASH_ADD(hh,head,ptrfield,sizeof(void *),add)
390 #define HASH_REPLACE_PTR(head,ptrfield,add,replaced) \
391  HASH_REPLACE(hh,head,ptrfield,sizeof(void *),add,replaced)
392 #define HASH_DEL(head,delptr) \
393  HASH_DELETE(hh,head,delptr)
394 
395 /* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined.
396  * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined.
397  */
398 #ifdef HASH_DEBUG
399 #define HASH_OOPS(...) do { fprintf(stderr,__VA_ARGS__); exit(-1); } while (0)
400 #define HASH_FSCK(hh,head) \
401 do { \
402  struct UT_hash_handle *_thh; \
403  if (head) { \
404  unsigned _bkt_i; \
405  unsigned _count; \
406  char *_prev; \
407  _count = 0; \
408  for( _bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; _bkt_i++) { \
409  unsigned _bkt_count = 0; \
410  _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \
411  _prev = NULL; \
412  while (_thh) { \
413  if (_prev != (char*)(_thh->hh_prev)) { \
414  HASH_OOPS("invalid hh_prev %p, actual %p\n", \
415  _thh->hh_prev, _prev ); \
416  } \
417  _bkt_count++; \
418  _prev = (char*)(_thh); \
419  _thh = _thh->hh_next; \
420  } \
421  _count += _bkt_count; \
422  if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \
423  HASH_OOPS("invalid bucket count %u, actual %u\n", \
424  (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \
425  } \
426  } \
427  if (_count != (head)->hh.tbl->num_items) { \
428  HASH_OOPS("invalid hh item count %u, actual %u\n", \
429  (head)->hh.tbl->num_items, _count ); \
430  } \
431  /* traverse hh in app order; check next/prev integrity, count */ \
432  _count = 0; \
433  _prev = NULL; \
434  _thh = &(head)->hh; \
435  while (_thh) { \
436  _count++; \
437  if (_prev !=(char*)(_thh->prev)) { \
438  HASH_OOPS("invalid prev %p, actual %p\n", \
439  _thh->prev, _prev ); \
440  } \
441  _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \
442  _thh = ( _thh->next ? (UT_hash_handle*)((char*)(_thh->next) + \
443  (head)->hh.tbl->hho) : NULL ); \
444  } \
445  if (_count != (head)->hh.tbl->num_items) { \
446  HASH_OOPS("invalid app item count %u, actual %u\n", \
447  (head)->hh.tbl->num_items, _count ); \
448  } \
449  } \
450 } while (0)
451 #else
452 #define HASH_FSCK(hh,head)
453 #endif
454 
455 /* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to
456  * the descriptor to which this macro is defined for tuning the hash function.
457  * The app can #include <unistd.h> to get the prototype for write(2). */
458 #ifdef HASH_EMIT_KEYS
459 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \
460 do { \
461  unsigned _klen = fieldlen; \
462  write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \
463  write(HASH_EMIT_KEYS, keyptr, (unsigned long)fieldlen); \
464 } while (0)
465 #else
466 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen)
467 #endif
468 
469 /* default to Jenkin's hash unless overridden e.g. DHASH_FUNCTION=HASH_SAX */
470 #ifdef HASH_FUNCTION
471 #define HASH_FCN HASH_FUNCTION
472 #else
473 #define HASH_FCN HASH_JEN
474 #endif
475 
476 /* The Bernstein hash function, used in Perl prior to v5.6. Note (x<<5+x)=x*33. */
477 #define HASH_BER(key,keylen,hashv) \
478 do { \
479  unsigned _hb_keylen=(unsigned)keylen; \
480  const unsigned char *_hb_key=(const unsigned char*)(key); \
481  (hashv) = 0; \
482  while (_hb_keylen-- != 0U) { \
483  (hashv) = (((hashv) << 5) + (hashv)) + *_hb_key++; \
484  } \
485 } while (0)
486 
487 
488 /* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at
489  * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */
490 #define HASH_SAX(key,keylen,hashv) \
491 do { \
492  unsigned _sx_i; \
493  const unsigned char *_hs_key=(const unsigned char*)(key); \
494  hashv = 0; \
495  for(_sx_i=0; _sx_i < keylen; _sx_i++) { \
496  hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \
497  } \
498 } while (0)
499 /* FNV-1a variation */
500 #define HASH_FNV(key,keylen,hashv) \
501 do { \
502  unsigned _fn_i; \
503  const unsigned char *_hf_key=(const unsigned char*)(key); \
504  hashv = 2166136261U; \
505  for(_fn_i=0; _fn_i < keylen; _fn_i++) { \
506  hashv = hashv ^ _hf_key[_fn_i]; \
507  hashv = hashv * 16777619U; \
508  } \
509 } while (0)
510 
511 #define HASH_OAT(key,keylen,hashv) \
512 do { \
513  unsigned _ho_i; \
514  const unsigned char *_ho_key=(const unsigned char*)(key); \
515  hashv = 0; \
516  for(_ho_i=0; _ho_i < keylen; _ho_i++) { \
517  hashv += _ho_key[_ho_i]; \
518  hashv += (hashv << 10); \
519  hashv ^= (hashv >> 6); \
520  } \
521  hashv += (hashv << 3); \
522  hashv ^= (hashv >> 11); \
523  hashv += (hashv << 15); \
524 } while (0)
525 
526 #define HASH_JEN_MIX(a,b,c) \
527 do { \
528  a -= b; a -= c; a ^= ( c >> 13 ); \
529  b -= c; b -= a; b ^= ( a << 8 ); \
530  c -= a; c -= b; c ^= ( b >> 13 ); \
531  a -= b; a -= c; a ^= ( c >> 12 ); \
532  b -= c; b -= a; b ^= ( a << 16 ); \
533  c -= a; c -= b; c ^= ( b >> 5 ); \
534  a -= b; a -= c; a ^= ( c >> 3 ); \
535  b -= c; b -= a; b ^= ( a << 10 ); \
536  c -= a; c -= b; c ^= ( b >> 15 ); \
537 } while (0)
538 
539 #define HASH_JEN(key,keylen,hashv) \
540 do { \
541  unsigned _hj_i,_hj_j,_hj_k; \
542  unsigned const char *_hj_key=(unsigned const char*)(key); \
543  hashv = 0xfeedbeefu; \
544  _hj_i = _hj_j = 0x9e3779b9u; \
545  _hj_k = (unsigned)(keylen); \
546  while (_hj_k >= 12U) { \
547  _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \
548  + ( (unsigned)_hj_key[2] << 16 ) \
549  + ( (unsigned)_hj_key[3] << 24 ) ); \
550  _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \
551  + ( (unsigned)_hj_key[6] << 16 ) \
552  + ( (unsigned)_hj_key[7] << 24 ) ); \
553  hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \
554  + ( (unsigned)_hj_key[10] << 16 ) \
555  + ( (unsigned)_hj_key[11] << 24 ) ); \
556  \
557  HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
558  \
559  _hj_key += 12; \
560  _hj_k -= 12U; \
561  } \
562  hashv += (unsigned)(keylen); \
563  switch ( _hj_k ) { \
564  case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); /* FALLTHROUGH */ \
565  case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); /* FALLTHROUGH */ \
566  case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); /* FALLTHROUGH */ \
567  case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); /* FALLTHROUGH */ \
568  case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); /* FALLTHROUGH */ \
569  case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); /* FALLTHROUGH */ \
570  case 5: _hj_j += _hj_key[4]; /* FALLTHROUGH */ \
571  case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); /* FALLTHROUGH */ \
572  case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); /* FALLTHROUGH */ \
573  case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); /* FALLTHROUGH */ \
574  case 1: _hj_i += _hj_key[0]; \
575  } \
576  HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
577 } while (0)
578 
579 /* The Paul Hsieh hash function */
580 #undef get16bits
581 #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
582  || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
583 #define get16bits(d) (*((const uint16_t *) (d)))
584 #endif
585 
586 #if !defined (get16bits)
587 #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \
588  +(uint32_t)(((const uint8_t *)(d))[0]) )
589 #endif
590 #define HASH_SFH(key,keylen,hashv) \
591 do { \
592  unsigned const char *_sfh_key=(unsigned const char*)(key); \
593  uint32_t _sfh_tmp, _sfh_len = (uint32_t)keylen; \
594  \
595  unsigned _sfh_rem = _sfh_len & 3U; \
596  _sfh_len >>= 2; \
597  hashv = 0xcafebabeu; \
598  \
599  /* Main loop */ \
600  for (;_sfh_len > 0U; _sfh_len--) { \
601  hashv += get16bits (_sfh_key); \
602  _sfh_tmp = ((uint32_t)(get16bits (_sfh_key+2)) << 11) ^ hashv; \
603  hashv = (hashv << 16) ^ _sfh_tmp; \
604  _sfh_key += 2U*sizeof (uint16_t); \
605  hashv += hashv >> 11; \
606  } \
607  \
608  /* Handle end cases */ \
609  switch (_sfh_rem) { \
610  case 3: hashv += get16bits (_sfh_key); \
611  hashv ^= hashv << 16; \
612  hashv ^= (uint32_t)(_sfh_key[sizeof (uint16_t)]) << 18; \
613  hashv += hashv >> 11; \
614  break; \
615  case 2: hashv += get16bits (_sfh_key); \
616  hashv ^= hashv << 11; \
617  hashv += hashv >> 17; \
618  break; \
619  case 1: hashv += *_sfh_key; \
620  hashv ^= hashv << 10; \
621  hashv += hashv >> 1; \
622  } \
623  \
624  /* Force "avalanching" of final 127 bits */ \
625  hashv ^= hashv << 3; \
626  hashv += hashv >> 5; \
627  hashv ^= hashv << 4; \
628  hashv += hashv >> 17; \
629  hashv ^= hashv << 25; \
630  hashv += hashv >> 6; \
631 } while (0)
632 
633 #ifdef HASH_USING_NO_STRICT_ALIASING
634 /* The MurmurHash exploits some CPU's (x86,x86_64) tolerance for unaligned reads.
635  * For other types of CPU's (e.g. Sparc) an unaligned read causes a bus error.
636  * MurmurHash uses the faster approach only on CPU's where we know it's safe.
637  *
638  * Note the preprocessor built-in defines can be emitted using:
639  *
640  * gcc -m64 -dM -E - < /dev/null (on gcc)
641  * cc -## a.c (where a.c is a simple test file) (Sun Studio)
642  */
643 #if (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86))
644 #define MUR_GETBLOCK(p,i) p[i]
645 #else /* non intel */
646 #define MUR_PLUS0_ALIGNED(p) (((unsigned long)p & 3UL) == 0UL)
647 #define MUR_PLUS1_ALIGNED(p) (((unsigned long)p & 3UL) == 1UL)
648 #define MUR_PLUS2_ALIGNED(p) (((unsigned long)p & 3UL) == 2UL)
649 #define MUR_PLUS3_ALIGNED(p) (((unsigned long)p & 3UL) == 3UL)
650 #define WP(p) ((uint32_t*)((unsigned long)(p) & ~3UL))
651 #if (defined(__BIG_ENDIAN__) || defined(SPARC) || defined(__ppc__) || defined(__ppc64__))
652 #define MUR_THREE_ONE(p) ((((*WP(p))&0x00ffffff) << 8) | (((*(WP(p)+1))&0xff000000) >> 24))
653 #define MUR_TWO_TWO(p) ((((*WP(p))&0x0000ffff) <<16) | (((*(WP(p)+1))&0xffff0000) >> 16))
654 #define MUR_ONE_THREE(p) ((((*WP(p))&0x000000ff) <<24) | (((*(WP(p)+1))&0xffffff00) >> 8))
655 #else /* assume little endian non-intel */
656 #define MUR_THREE_ONE(p) ((((*WP(p))&0xffffff00) >> 8) | (((*(WP(p)+1))&0x000000ff) << 24))
657 #define MUR_TWO_TWO(p) ((((*WP(p))&0xffff0000) >>16) | (((*(WP(p)+1))&0x0000ffff) << 16))
658 #define MUR_ONE_THREE(p) ((((*WP(p))&0xff000000) >>24) | (((*(WP(p)+1))&0x00ffffff) << 8))
659 #endif
660 #define MUR_GETBLOCK(p,i) (MUR_PLUS0_ALIGNED(p) ? ((p)[i]) : \
661  (MUR_PLUS1_ALIGNED(p) ? MUR_THREE_ONE(p) : \
662  (MUR_PLUS2_ALIGNED(p) ? MUR_TWO_TWO(p) : \
663  MUR_ONE_THREE(p))))
664 #endif
665 #define MUR_ROTL32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
666 #define MUR_FMIX(_h) \
667 do { \
668  _h ^= _h >> 16; \
669  _h *= 0x85ebca6bu; \
670  _h ^= _h >> 13; \
671  _h *= 0xc2b2ae35u; \
672  _h ^= _h >> 16; \
673 } while (0)
674 
675 #define HASH_MUR(key,keylen,hashv) \
676 do { \
677  const uint8_t *_mur_data = (const uint8_t*)(key); \
678  const int _mur_nblocks = (int)(keylen) / 4; \
679  uint32_t _mur_h1 = 0xf88D5353u; \
680  uint32_t _mur_c1 = 0xcc9e2d51u; \
681  uint32_t _mur_c2 = 0x1b873593u; \
682  uint32_t _mur_k1 = 0; \
683  const uint8_t *_mur_tail; \
684  const uint32_t *_mur_blocks = (const uint32_t*)(_mur_data+(_mur_nblocks*4)); \
685  int _mur_i; \
686  for(_mur_i = -_mur_nblocks; _mur_i!=0; _mur_i++) { \
687  _mur_k1 = MUR_GETBLOCK(_mur_blocks,_mur_i); \
688  _mur_k1 *= _mur_c1; \
689  _mur_k1 = MUR_ROTL32(_mur_k1,15); \
690  _mur_k1 *= _mur_c2; \
691  \
692  _mur_h1 ^= _mur_k1; \
693  _mur_h1 = MUR_ROTL32(_mur_h1,13); \
694  _mur_h1 = (_mur_h1*5U) + 0xe6546b64u; \
695  } \
696  _mur_tail = (const uint8_t*)(_mur_data + (_mur_nblocks*4)); \
697  _mur_k1=0; \
698  switch((keylen) & 3U) { \
699  case 3: _mur_k1 ^= (uint32_t)_mur_tail[2] << 16; /* FALLTHROUGH */ \
700  case 2: _mur_k1 ^= (uint32_t)_mur_tail[1] << 8; /* FALLTHROUGH */ \
701  case 1: _mur_k1 ^= (uint32_t)_mur_tail[0]; \
702  _mur_k1 *= _mur_c1; \
703  _mur_k1 = MUR_ROTL32(_mur_k1,15); \
704  _mur_k1 *= _mur_c2; \
705  _mur_h1 ^= _mur_k1; \
706  } \
707  _mur_h1 ^= (uint32_t)(keylen); \
708  MUR_FMIX(_mur_h1); \
709  hashv = _mur_h1; \
710 } while (0)
711 #endif /* HASH_USING_NO_STRICT_ALIASING */
712 
713 /* iterate over items in a known bucket to find desired item */
714 #define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,hashval,out) \
715 do { \
716  if ((head).hh_head != NULL) { \
717  DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (head).hh_head)); \
718  } else { \
719  (out) = NULL; \
720  } \
721  while ((out) != NULL) { \
722  if ((out)->hh.hashv == (hashval) && (out)->hh.keylen == (keylen_in)) { \
723  if (uthash_memcmp((out)->hh.key, keyptr, keylen_in) == 0) { \
724  break; \
725  } \
726  } \
727  if ((out)->hh.hh_next != NULL) { \
728  DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (out)->hh.hh_next)); \
729  } else { \
730  (out) = NULL; \
731  } \
732  } \
733 } while (0)
734 
735 /* add an item to a bucket */
736 #define HASH_ADD_TO_BKT(head,addhh) \
737 do { \
738  head.count++; \
739  (addhh)->hh_next = head.hh_head; \
740  (addhh)->hh_prev = NULL; \
741  if (head.hh_head != NULL) { (head).hh_head->hh_prev = (addhh); } \
742  (head).hh_head=addhh; \
743  if ((head.count >= ((head.expand_mult+1U) * HASH_BKT_CAPACITY_THRESH)) \
744  && ((addhh)->tbl->noexpand != 1U)) { \
745  HASH_EXPAND_BUCKETS((addhh)->tbl); \
746  } \
747 } while (0)
748 
749 /* remove an item from a given bucket */
750 #define HASH_DEL_IN_BKT(hh,head,hh_del) \
751  (head).count--; \
752  if ((head).hh_head == hh_del) { \
753  (head).hh_head = hh_del->hh_next; \
754  } \
755  if (hh_del->hh_prev) { \
756  hh_del->hh_prev->hh_next = hh_del->hh_next; \
757  } \
758  if (hh_del->hh_next) { \
759  hh_del->hh_next->hh_prev = hh_del->hh_prev; \
760  }
761 
762 /* Bucket expansion has the effect of doubling the number of buckets
763  * and redistributing the items into the new buckets. Ideally the
764  * items will distribute more or less evenly into the new buckets
765  * (the extent to which this is true is a measure of the quality of
766  * the hash function as it applies to the key domain).
767  *
768  * With the items distributed into more buckets, the chain length
769  * (item count) in each bucket is reduced. Thus by expanding buckets
770  * the hash keeps a bound on the chain length. This bounded chain
771  * length is the essence of how a hash provides constant time lookup.
772  *
773  * The calculation of tbl->ideal_chain_maxlen below deserves some
774  * explanation. First, keep in mind that we're calculating the ideal
775  * maximum chain length based on the *new* (doubled) bucket count.
776  * In fractions this is just n/b (n=number of items,b=new num buckets).
777  * Since the ideal chain length is an integer, we want to calculate
778  * ceil(n/b). We don't depend on floating point arithmetic in this
779  * hash, so to calculate ceil(n/b) with integers we could write
780  *
781  * ceil(n/b) = (n/b) + ((n%b)?1:0)
782  *
783  * and in fact a previous version of this hash did just that.
784  * But now we have improved things a bit by recognizing that b is
785  * always a power of two. We keep its base 2 log handy (call it lb),
786  * so now we can write this with a bit shift and logical AND:
787  *
788  * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0)
789  *
790  */
791 #define HASH_EXPAND_BUCKETS(tbl) \
792 do { \
793  unsigned _he_bkt; \
794  unsigned _he_bkt_i; \
795  struct UT_hash_handle *_he_thh, *_he_hh_nxt; \
796  UT_hash_bucket *_he_new_buckets, *_he_newbkt; \
797  _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \
798  2UL * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
799  if (!_he_new_buckets) { uthash_fatal( "out of memory"); } \
800  memset(_he_new_buckets, 0, \
801  2UL * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
802  tbl->ideal_chain_maxlen = \
803  (tbl->num_items >> (tbl->log2_num_buckets+1U)) + \
804  (((tbl->num_items & ((tbl->num_buckets*2U)-1U)) != 0U) ? 1U : 0U); \
805  tbl->nonideal_items = 0; \
806  for(_he_bkt_i = 0; _he_bkt_i < tbl->num_buckets; _he_bkt_i++) \
807  { \
808  _he_thh = tbl->buckets[ _he_bkt_i ].hh_head; \
809  while (_he_thh != NULL) { \
810  _he_hh_nxt = _he_thh->hh_next; \
811  HASH_TO_BKT( _he_thh->hashv, tbl->num_buckets*2U, _he_bkt); \
812  _he_newbkt = &(_he_new_buckets[ _he_bkt ]); \
813  if (++(_he_newbkt->count) > tbl->ideal_chain_maxlen) { \
814  tbl->nonideal_items++; \
815  _he_newbkt->expand_mult = _he_newbkt->count / \
816  tbl->ideal_chain_maxlen; \
817  } \
818  _he_thh->hh_prev = NULL; \
819  _he_thh->hh_next = _he_newbkt->hh_head; \
820  if (_he_newbkt->hh_head != NULL) { _he_newbkt->hh_head->hh_prev = \
821  _he_thh; } \
822  _he_newbkt->hh_head = _he_thh; \
823  _he_thh = _he_hh_nxt; \
824  } \
825  } \
826  uthash_free( tbl->buckets, tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \
827  tbl->num_buckets *= 2U; \
828  tbl->log2_num_buckets++; \
829  tbl->buckets = _he_new_buckets; \
830  tbl->ineff_expands = (tbl->nonideal_items > (tbl->num_items >> 1)) ? \
831  (tbl->ineff_expands+1U) : 0U; \
832  if (tbl->ineff_expands > 1U) { \
833  tbl->noexpand=1; \
834  uthash_noexpand_fyi(tbl); \
835  } \
836  uthash_expand_fyi(tbl); \
837 } while (0)
838 
839 
840 /* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */
841 /* Note that HASH_SORT assumes the hash handle name to be hh.
842  * HASH_SRT was added to allow the hash handle name to be passed in. */
843 #define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn)
844 #define HASH_SRT(hh,head,cmpfcn) \
845 do { \
846  unsigned _hs_i; \
847  unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \
848  struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \
849  if (head != NULL) { \
850  _hs_insize = 1; \
851  _hs_looping = 1; \
852  _hs_list = &((head)->hh); \
853  while (_hs_looping != 0U) { \
854  _hs_p = _hs_list; \
855  _hs_list = NULL; \
856  _hs_tail = NULL; \
857  _hs_nmerges = 0; \
858  while (_hs_p != NULL) { \
859  _hs_nmerges++; \
860  _hs_q = _hs_p; \
861  _hs_psize = 0; \
862  for ( _hs_i = 0; _hs_i < _hs_insize; _hs_i++ ) { \
863  _hs_psize++; \
864  _hs_q = (UT_hash_handle*)((_hs_q->next != NULL) ? \
865  ((void*)((char*)(_hs_q->next) + \
866  (head)->hh.tbl->hho)) : NULL); \
867  if (! (_hs_q) ) { break; } \
868  } \
869  _hs_qsize = _hs_insize; \
870  while ((_hs_psize > 0U) || ((_hs_qsize > 0U) && (_hs_q != NULL))) {\
871  if (_hs_psize == 0U) { \
872  _hs_e = _hs_q; \
873  _hs_q = (UT_hash_handle*)((_hs_q->next != NULL) ? \
874  ((void*)((char*)(_hs_q->next) + \
875  (head)->hh.tbl->hho)) : NULL); \
876  _hs_qsize--; \
877  } else if ( (_hs_qsize == 0U) || (_hs_q == NULL) ) { \
878  _hs_e = _hs_p; \
879  if (_hs_p != NULL){ \
880  _hs_p = (UT_hash_handle*)((_hs_p->next != NULL) ? \
881  ((void*)((char*)(_hs_p->next) + \
882  (head)->hh.tbl->hho)) : NULL); \
883  } \
884  _hs_psize--; \
885  } else if (( \
886  cmpfcn(DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_p)), \
887  DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_q))) \
888  ) <= 0) { \
889  _hs_e = _hs_p; \
890  if (_hs_p != NULL){ \
891  _hs_p = (UT_hash_handle*)((_hs_p->next != NULL) ? \
892  ((void*)((char*)(_hs_p->next) + \
893  (head)->hh.tbl->hho)) : NULL); \
894  } \
895  _hs_psize--; \
896  } else { \
897  _hs_e = _hs_q; \
898  _hs_q = (UT_hash_handle*)((_hs_q->next != NULL) ? \
899  ((void*)((char*)(_hs_q->next) + \
900  (head)->hh.tbl->hho)) : NULL); \
901  _hs_qsize--; \
902  } \
903  if ( _hs_tail != NULL ) { \
904  _hs_tail->next = ((_hs_e != NULL) ? \
905  ELMT_FROM_HH((head)->hh.tbl,_hs_e) : NULL); \
906  } else { \
907  _hs_list = _hs_e; \
908  } \
909  if (_hs_e != NULL) { \
910  _hs_e->prev = ((_hs_tail != NULL) ? \
911  ELMT_FROM_HH((head)->hh.tbl,_hs_tail) : NULL); \
912  } \
913  _hs_tail = _hs_e; \
914  } \
915  _hs_p = _hs_q; \
916  } \
917  if (_hs_tail != NULL){ \
918  _hs_tail->next = NULL; \
919  } \
920  if ( _hs_nmerges <= 1U ) { \
921  _hs_looping=0; \
922  (head)->hh.tbl->tail = _hs_tail; \
923  DECLTYPE_ASSIGN(head,ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \
924  } \
925  _hs_insize *= 2U; \
926  } \
927  HASH_FSCK(hh,head); \
928  } \
929 } while (0)
930 
931 /* This function selects items from one hash into another hash.
932  * The end result is that the selected items have dual presence
933  * in both hashes. There is no copy of the items made; rather
934  * they are added into the new hash through a secondary hash
935  * hash handle that must be present in the structure. */
936 #define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \
937 do { \
938  unsigned _src_bkt, _dst_bkt; \
939  void *_last_elt=NULL, *_elt; \
940  UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \
941  ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \
942  if (src != NULL) { \
943  for(_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \
944  for(_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \
945  _src_hh != NULL; \
946  _src_hh = _src_hh->hh_next) { \
947  _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \
948  if (cond(_elt)) { \
949  _dst_hh = (UT_hash_handle*)(((char*)_elt) + _dst_hho); \
950  _dst_hh->key = _src_hh->key; \
951  _dst_hh->keylen = _src_hh->keylen; \
952  _dst_hh->hashv = _src_hh->hashv; \
953  _dst_hh->prev = _last_elt; \
954  _dst_hh->next = NULL; \
955  if (_last_elt_hh != NULL) { _last_elt_hh->next = _elt; } \
956  if (dst == NULL) { \
957  DECLTYPE_ASSIGN(dst,_elt); \
958  HASH_MAKE_TABLE(hh_dst,dst); \
959  } else { \
960  _dst_hh->tbl = (dst)->hh_dst.tbl; \
961  } \
962  HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \
963  HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt],_dst_hh); \
964  (dst)->hh_dst.tbl->num_items++; \
965  _last_elt = _elt; \
966  _last_elt_hh = _dst_hh; \
967  } \
968  } \
969  } \
970  } \
971  HASH_FSCK(hh_dst,dst); \
972 } while (0)
973 
974 #define HASH_CLEAR(hh,head) \
975 do { \
976  if (head != NULL) { \
977  uthash_free((head)->hh.tbl->buckets, \
978  (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \
979  HASH_BLOOM_FREE((head)->hh.tbl); \
980  uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
981  (head)=NULL; \
982  } \
983 } while (0)
984 
985 #define HASH_OVERHEAD(hh,head) \
986  ((head != NULL) ? ( \
987  (size_t)(((head)->hh.tbl->num_items * sizeof(UT_hash_handle)) + \
988  ((head)->hh.tbl->num_buckets * sizeof(UT_hash_bucket)) + \
989  sizeof(UT_hash_table) + \
990  (HASH_BLOOM_BYTELEN))) : 0U)
991 
992 #ifdef NO_DECLTYPE
993 #define HASH_ITER(hh,head,el,tmp) \
994 for(((el)=(head)), ((*(char**)(&(tmp)))=(char*)((head!=NULL)?(head)->hh.next:NULL)); \
995  (el) != NULL; ((el)=(tmp)), ((*(char**)(&(tmp)))=(char*)((tmp!=NULL)?(tmp)->hh.next:NULL)))
996 #else
997 #define HASH_ITER(hh,head,el,tmp) \
998 for(((el)=(head)), ((tmp)=DECLTYPE(el)((head!=NULL)?(head)->hh.next:NULL)); \
999  (el) != NULL; ((el)=(tmp)), ((tmp)=DECLTYPE(el)((tmp!=NULL)?(tmp)->hh.next:NULL)))
1000 #endif
1001 
1002 /* obtain a count of items in the hash */
1003 #define HASH_COUNT(head) HASH_CNT(hh,head)
1004 #define HASH_CNT(hh,head) ((head != NULL)?((head)->hh.tbl->num_items):0U)
1005 
1006 typedef struct UT_hash_bucket {
1007  struct UT_hash_handle *hh_head;
1008  unsigned count;
1009 
1010  /* expand_mult is normally set to 0. In this situation, the max chain length
1011  * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If
1012  * the bucket's chain exceeds this length, bucket expansion is triggered).
1013  * However, setting expand_mult to a non-zero value delays bucket expansion
1014  * (that would be triggered by additions to this particular bucket)
1015  * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH.
1016  * (The multiplier is simply expand_mult+1). The whole idea of this
1017  * multiplier is to reduce bucket expansions, since they are expensive, in
1018  * situations where we know that a particular bucket tends to be overused.
1019  * It is better to let its chain length grow to a longer yet-still-bounded
1020  * value, than to do an O(n) bucket expansion too often.
1021  */
1022  unsigned expand_mult;
1023 
1024 } UT_hash_bucket;
1025 
1026 /* random signature used only to find hash tables in external analysis */
1027 #define HASH_SIGNATURE 0xa0111fe1u
1028 #define HASH_BLOOM_SIGNATURE 0xb12220f2u
1029 
1030 typedef struct UT_hash_table {
1031  UT_hash_bucket *buckets;
1032  unsigned num_buckets, log2_num_buckets;
1033  unsigned num_items;
1034  struct UT_hash_handle *tail; /* tail hh in app order, for fast append */
1035  ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */
1036 
1037  /* in an ideal situation (all buckets used equally), no bucket would have
1038  * more than ceil(#items/#buckets) items. that's the ideal chain length. */
1039  unsigned ideal_chain_maxlen;
1040 
1041  /* nonideal_items is the number of items in the hash whose chain position
1042  * exceeds the ideal chain maxlen. these items pay the penalty for an uneven
1043  * hash distribution; reaching them in a chain traversal takes >ideal steps */
1044  unsigned nonideal_items;
1045 
1046  /* ineffective expands occur when a bucket doubling was performed, but
1047  * afterward, more than half the items in the hash had nonideal chain
1048  * positions. If this happens on two consecutive expansions we inhibit any
1049  * further expansion, as it's not helping; this happens when the hash
1050  * function isn't a good fit for the key domain. When expansion is inhibited
1051  * the hash will still work, albeit no longer in constant time. */
1052  unsigned ineff_expands, noexpand;
1053 
1054  uint32_t signature; /* used only to find hash tables in external analysis */
1055 #ifdef HASH_BLOOM
1056  uint32_t bloom_sig; /* used only to test bloom exists in external analysis */
1057  uint8_t *bloom_bv;
1058  uint8_t bloom_nbits;
1059 #endif
1060 
1061 } UT_hash_table;
1062 
1063 typedef struct UT_hash_handle {
1064  struct UT_hash_table *tbl;
1065  void *prev; /* prev element in app order */
1066  void *next; /* next element in app order */
1067  struct UT_hash_handle *hh_prev; /* previous hh in bucket order */
1068  struct UT_hash_handle *hh_next; /* next hh in bucket order */
1069  void *key; /* ptr to enclosing struct's key */
1070  unsigned keylen; /* enclosing struct's key len */
1071  unsigned hashv; /* result of hash-fcn(key) */
1072 } UT_hash_handle;
1073 
1074 #endif /* UTHASH_H */
Definition: uthash.h:1030
Definition: uthash.h:1006
Definition: uthash.h:1063