rhashtable.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844
  1. /*
  2. * Resizable, Scalable, Concurrent Hash Table
  3. *
  4. * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
  5. * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
  6. * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
  7. *
  8. * Code partially derived from nft_hash
  9. * Rewritten with rehash code from br_multicast plus single list
  10. * pointer as suggested by Josh Triplett
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License version 2 as
  14. * published by the Free Software Foundation.
  15. */
  16. #include <linux/atomic.h>
  17. #include <linux/kernel.h>
  18. #include <linux/init.h>
  19. #include <linux/log2.h>
  20. #include <linux/sched.h>
  21. #include <linux/slab.h>
  22. #include <linux/vmalloc.h>
  23. #include <linux/mm.h>
  24. #include <linux/jhash.h>
  25. #include <linux/random.h>
  26. #include <linux/err.h>
  27. #include <linux/export.h>
  28. #define HASH_DEFAULT_SIZE 64UL
  29. #define HASH_MIN_SIZE 4U
  30. #define BUCKET_LOCKS_PER_CPU 128UL
  31. static u32 head_hashfn(struct rhashtable *ht,
  32. const struct bucket_table *tbl,
  33. const struct rhash_head *he)
  34. {
  35. return rht_head_hashfn(ht, tbl, he, ht->p);
  36. }
  37. #ifdef CONFIG_PROVE_LOCKING
  38. #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
  39. int lockdep_rht_mutex_is_held(struct rhashtable *ht)
  40. {
  41. return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
  42. }
  43. int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
  44. {
  45. spinlock_t *lock = rht_bucket_lock(tbl, hash);
  46. return (debug_locks) ? lockdep_is_held(lock) : 1;
  47. }
  48. #else
  49. #define ASSERT_RHT_MUTEX(HT)
  50. #endif
  51. static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
  52. gfp_t gfp)
  53. {
  54. unsigned int i, size;
  55. #if defined(CONFIG_PROVE_LOCKING)
  56. unsigned int nr_pcpus = 2;
  57. #else
  58. unsigned int nr_pcpus = num_possible_cpus();
  59. #endif
  60. nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
  61. size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
  62. /* Never allocate more than 0.5 locks per bucket */
  63. size = min_t(unsigned int, size, tbl->size >> 1);
  64. if (sizeof(spinlock_t) != 0) {
  65. #ifdef CONFIG_NUMA
  66. if (size * sizeof(spinlock_t) > PAGE_SIZE &&
  67. gfp == GFP_KERNEL)
  68. tbl->locks = vmalloc(size * sizeof(spinlock_t));
  69. else
  70. #endif
  71. tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
  72. gfp);
  73. if (!tbl->locks)
  74. return -ENOMEM;
  75. for (i = 0; i < size; i++)
  76. spin_lock_init(&tbl->locks[i]);
  77. }
  78. tbl->locks_mask = size - 1;
  79. return 0;
  80. }
  81. static void bucket_table_free(const struct bucket_table *tbl)
  82. {
  83. if (tbl)
  84. kvfree(tbl->locks);
  85. kvfree(tbl);
  86. }
  87. static void bucket_table_free_rcu(struct rcu_head *head)
  88. {
  89. bucket_table_free(container_of(head, struct bucket_table, rcu));
  90. }
  91. static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
  92. size_t nbuckets,
  93. gfp_t gfp)
  94. {
  95. struct bucket_table *tbl = NULL;
  96. size_t size;
  97. int i;
  98. size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
  99. if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
  100. gfp != GFP_KERNEL)
  101. tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
  102. if (tbl == NULL && gfp == GFP_KERNEL)
  103. tbl = vzalloc(size);
  104. if (tbl == NULL)
  105. return NULL;
  106. tbl->size = nbuckets;
  107. if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
  108. bucket_table_free(tbl);
  109. return NULL;
  110. }
  111. INIT_LIST_HEAD(&tbl->walkers);
  112. get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
  113. for (i = 0; i < nbuckets; i++)
  114. INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
  115. return tbl;
  116. }
  117. static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
  118. struct bucket_table *tbl)
  119. {
  120. struct bucket_table *new_tbl;
  121. do {
  122. new_tbl = tbl;
  123. tbl = rht_dereference_rcu(tbl->future_tbl, ht);
  124. } while (tbl);
  125. return new_tbl;
  126. }
  127. static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
  128. {
  129. struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
  130. struct bucket_table *new_tbl = rhashtable_last_table(ht,
  131. rht_dereference_rcu(old_tbl->future_tbl, ht));
  132. struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
  133. int err = -ENOENT;
  134. struct rhash_head *head, *next, *entry;
  135. spinlock_t *new_bucket_lock;
  136. unsigned int new_hash;
  137. rht_for_each(entry, old_tbl, old_hash) {
  138. err = 0;
  139. next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
  140. if (rht_is_a_nulls(next))
  141. break;
  142. pprev = &entry->next;
  143. }
  144. if (err)
  145. goto out;
  146. new_hash = head_hashfn(ht, new_tbl, entry);
  147. new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
  148. spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
  149. head = rht_dereference_bucket(new_tbl->buckets[new_hash],
  150. new_tbl, new_hash);
  151. RCU_INIT_POINTER(entry->next, head);
  152. rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
  153. spin_unlock(new_bucket_lock);
  154. rcu_assign_pointer(*pprev, next);
  155. out:
  156. return err;
  157. }
  158. static void rhashtable_rehash_chain(struct rhashtable *ht,
  159. unsigned int old_hash)
  160. {
  161. struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
  162. spinlock_t *old_bucket_lock;
  163. old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
  164. spin_lock_bh(old_bucket_lock);
  165. while (!rhashtable_rehash_one(ht, old_hash))
  166. ;
  167. old_tbl->rehash++;
  168. spin_unlock_bh(old_bucket_lock);
  169. }
  170. static int rhashtable_rehash_attach(struct rhashtable *ht,
  171. struct bucket_table *old_tbl,
  172. struct bucket_table *new_tbl)
  173. {
  174. /* Protect future_tbl using the first bucket lock. */
  175. spin_lock_bh(old_tbl->locks);
  176. /* Did somebody beat us to it? */
  177. if (rcu_access_pointer(old_tbl->future_tbl)) {
  178. spin_unlock_bh(old_tbl->locks);
  179. return -EEXIST;
  180. }
  181. /* Make insertions go into the new, empty table right away. Deletions
  182. * and lookups will be attempted in both tables until we synchronize.
  183. */
  184. rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
  185. /* Ensure the new table is visible to readers. */
  186. smp_wmb();
  187. spin_unlock_bh(old_tbl->locks);
  188. return 0;
  189. }
  190. static int rhashtable_rehash_table(struct rhashtable *ht)
  191. {
  192. struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
  193. struct bucket_table *new_tbl;
  194. struct rhashtable_walker *walker;
  195. unsigned int old_hash;
  196. new_tbl = rht_dereference(old_tbl->future_tbl, ht);
  197. if (!new_tbl)
  198. return 0;
  199. for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
  200. rhashtable_rehash_chain(ht, old_hash);
  201. /* Publish the new table pointer. */
  202. rcu_assign_pointer(ht->tbl, new_tbl);
  203. spin_lock(&ht->lock);
  204. list_for_each_entry(walker, &old_tbl->walkers, list)
  205. walker->tbl = NULL;
  206. spin_unlock(&ht->lock);
  207. /* Wait for readers. All new readers will see the new
  208. * table, and thus no references to the old table will
  209. * remain.
  210. */
  211. call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
  212. return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
  213. }
  214. /**
  215. * rhashtable_expand - Expand hash table while allowing concurrent lookups
  216. * @ht: the hash table to expand
  217. *
  218. * A secondary bucket array is allocated and the hash entries are migrated.
  219. *
  220. * This function may only be called in a context where it is safe to call
  221. * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
  222. *
  223. * The caller must ensure that no concurrent resizing occurs by holding
  224. * ht->mutex.
  225. *
  226. * It is valid to have concurrent insertions and deletions protected by per
  227. * bucket locks or concurrent RCU protected lookups and traversals.
  228. */
  229. static int rhashtable_expand(struct rhashtable *ht)
  230. {
  231. struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
  232. int err;
  233. ASSERT_RHT_MUTEX(ht);
  234. old_tbl = rhashtable_last_table(ht, old_tbl);
  235. new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
  236. if (new_tbl == NULL)
  237. return -ENOMEM;
  238. err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
  239. if (err)
  240. bucket_table_free(new_tbl);
  241. return err;
  242. }
  243. /**
  244. * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
  245. * @ht: the hash table to shrink
  246. *
  247. * This function shrinks the hash table to fit, i.e., the smallest
  248. * size would not cause it to expand right away automatically.
  249. *
  250. * The caller must ensure that no concurrent resizing occurs by holding
  251. * ht->mutex.
  252. *
  253. * The caller must ensure that no concurrent table mutations take place.
  254. * It is however valid to have concurrent lookups if they are RCU protected.
  255. *
  256. * It is valid to have concurrent insertions and deletions protected by per
  257. * bucket locks or concurrent RCU protected lookups and traversals.
  258. */
  259. static int rhashtable_shrink(struct rhashtable *ht)
  260. {
  261. struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
  262. unsigned int size;
  263. int err;
  264. ASSERT_RHT_MUTEX(ht);
  265. size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
  266. if (size < ht->p.min_size)
  267. size = ht->p.min_size;
  268. if (old_tbl->size <= size)
  269. return 0;
  270. if (rht_dereference(old_tbl->future_tbl, ht))
  271. return -EEXIST;
  272. new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
  273. if (new_tbl == NULL)
  274. return -ENOMEM;
  275. err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
  276. if (err)
  277. bucket_table_free(new_tbl);
  278. return err;
  279. }
  280. static void rht_deferred_worker(struct work_struct *work)
  281. {
  282. struct rhashtable *ht;
  283. struct bucket_table *tbl;
  284. int err = 0;
  285. ht = container_of(work, struct rhashtable, run_work);
  286. mutex_lock(&ht->mutex);
  287. tbl = rht_dereference(ht->tbl, ht);
  288. tbl = rhashtable_last_table(ht, tbl);
  289. if (rht_grow_above_75(ht, tbl))
  290. rhashtable_expand(ht);
  291. else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
  292. rhashtable_shrink(ht);
  293. err = rhashtable_rehash_table(ht);
  294. mutex_unlock(&ht->mutex);
  295. if (err)
  296. schedule_work(&ht->run_work);
  297. }
  298. static bool rhashtable_check_elasticity(struct rhashtable *ht,
  299. struct bucket_table *tbl,
  300. unsigned int hash)
  301. {
  302. unsigned int elasticity = ht->elasticity;
  303. struct rhash_head *head;
  304. rht_for_each(head, tbl, hash)
  305. if (!--elasticity)
  306. return true;
  307. return false;
  308. }
  309. int rhashtable_insert_rehash(struct rhashtable *ht,
  310. struct bucket_table *tbl)
  311. {
  312. struct bucket_table *old_tbl;
  313. struct bucket_table *new_tbl;
  314. unsigned int size;
  315. int err;
  316. old_tbl = rht_dereference_rcu(ht->tbl, ht);
  317. size = tbl->size;
  318. err = -EBUSY;
  319. if (rht_grow_above_75(ht, tbl))
  320. size *= 2;
  321. /* Do not schedule more than one rehash */
  322. else if (old_tbl != tbl)
  323. goto fail;
  324. err = -ENOMEM;
  325. new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
  326. if (new_tbl == NULL)
  327. goto fail;
  328. err = rhashtable_rehash_attach(ht, tbl, new_tbl);
  329. if (err) {
  330. bucket_table_free(new_tbl);
  331. if (err == -EEXIST)
  332. err = 0;
  333. } else
  334. schedule_work(&ht->run_work);
  335. return err;
  336. fail:
  337. /* Do not fail the insert if someone else did a rehash. */
  338. if (likely(rcu_dereference_raw(tbl->future_tbl)))
  339. return 0;
  340. /* Schedule async rehash to retry allocation in process context. */
  341. if (err == -ENOMEM)
  342. schedule_work(&ht->run_work);
  343. return err;
  344. }
  345. struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
  346. const void *key,
  347. struct rhash_head *obj,
  348. struct bucket_table *tbl)
  349. {
  350. struct rhash_head *head;
  351. unsigned int hash;
  352. int err;
  353. tbl = rhashtable_last_table(ht, tbl);
  354. hash = head_hashfn(ht, tbl, obj);
  355. spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
  356. err = -EEXIST;
  357. if (key && rhashtable_lookup_fast(ht, key, ht->p))
  358. goto exit;
  359. err = -E2BIG;
  360. if (unlikely(rht_grow_above_max(ht, tbl)))
  361. goto exit;
  362. err = -EAGAIN;
  363. if (rhashtable_check_elasticity(ht, tbl, hash) ||
  364. rht_grow_above_100(ht, tbl))
  365. goto exit;
  366. err = 0;
  367. head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
  368. RCU_INIT_POINTER(obj->next, head);
  369. rcu_assign_pointer(tbl->buckets[hash], obj);
  370. atomic_inc(&ht->nelems);
  371. exit:
  372. spin_unlock(rht_bucket_lock(tbl, hash));
  373. if (err == 0)
  374. return NULL;
  375. else if (err == -EAGAIN)
  376. return tbl;
  377. else
  378. return ERR_PTR(err);
  379. }
  380. /**
  381. * rhashtable_walk_init - Initialise an iterator
  382. * @ht: Table to walk over
  383. * @iter: Hash table Iterator
  384. *
  385. * This function prepares a hash table walk.
  386. *
  387. * Note that if you restart a walk after rhashtable_walk_stop you
  388. * may see the same object twice. Also, you may miss objects if
  389. * there are removals in between rhashtable_walk_stop and the next
  390. * call to rhashtable_walk_start.
  391. *
  392. * For a completely stable walk you should construct your own data
  393. * structure outside the hash table.
  394. *
  395. * This function may sleep so you must not call it from interrupt
  396. * context or with spin locks held.
  397. *
  398. * You must call rhashtable_walk_exit if this function returns
  399. * successfully.
  400. */
  401. int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
  402. {
  403. iter->ht = ht;
  404. iter->p = NULL;
  405. iter->slot = 0;
  406. iter->skip = 0;
  407. iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
  408. if (!iter->walker)
  409. return -ENOMEM;
  410. spin_lock(&ht->lock);
  411. iter->walker->tbl =
  412. rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
  413. list_add(&iter->walker->list, &iter->walker->tbl->walkers);
  414. spin_unlock(&ht->lock);
  415. return 0;
  416. }
  417. /**
  418. * rhashtable_walk_exit - Free an iterator
  419. * @iter: Hash table Iterator
  420. *
  421. * This function frees resources allocated by rhashtable_walk_init.
  422. */
  423. void rhashtable_walk_exit(struct rhashtable_iter *iter)
  424. {
  425. spin_lock(&iter->ht->lock);
  426. if (iter->walker->tbl)
  427. list_del(&iter->walker->list);
  428. spin_unlock(&iter->ht->lock);
  429. kfree(iter->walker);
  430. }
  431. /**
  432. * rhashtable_walk_start - Start a hash table walk
  433. * @iter: Hash table iterator
  434. *
  435. * Start a hash table walk. Note that we take the RCU lock in all
  436. * cases including when we return an error. So you must always call
  437. * rhashtable_walk_stop to clean up.
  438. *
  439. * Returns zero if successful.
  440. *
  441. * Returns -EAGAIN if resize event occured. Note that the iterator
  442. * will rewind back to the beginning and you may use it immediately
  443. * by calling rhashtable_walk_next.
  444. */
  445. int rhashtable_walk_start(struct rhashtable_iter *iter)
  446. __acquires(RCU)
  447. {
  448. struct rhashtable *ht = iter->ht;
  449. rcu_read_lock();
  450. spin_lock(&ht->lock);
  451. if (iter->walker->tbl)
  452. list_del(&iter->walker->list);
  453. spin_unlock(&ht->lock);
  454. if (!iter->walker->tbl) {
  455. iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
  456. return -EAGAIN;
  457. }
  458. return 0;
  459. }
  460. /**
  461. * rhashtable_walk_next - Return the next object and advance the iterator
  462. * @iter: Hash table iterator
  463. *
  464. * Note that you must call rhashtable_walk_stop when you are finished
  465. * with the walk.
  466. *
  467. * Returns the next object or NULL when the end of the table is reached.
  468. *
  469. * Returns -EAGAIN if resize event occured. Note that the iterator
  470. * will rewind back to the beginning and you may continue to use it.
  471. */
  472. void *rhashtable_walk_next(struct rhashtable_iter *iter)
  473. {
  474. struct bucket_table *tbl = iter->walker->tbl;
  475. struct rhashtable *ht = iter->ht;
  476. struct rhash_head *p = iter->p;
  477. if (p) {
  478. p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
  479. goto next;
  480. }
  481. for (; iter->slot < tbl->size; iter->slot++) {
  482. int skip = iter->skip;
  483. rht_for_each_rcu(p, tbl, iter->slot) {
  484. if (!skip)
  485. break;
  486. skip--;
  487. }
  488. next:
  489. if (!rht_is_a_nulls(p)) {
  490. iter->skip++;
  491. iter->p = p;
  492. return rht_obj(ht, p);
  493. }
  494. iter->skip = 0;
  495. }
  496. iter->p = NULL;
  497. /* Ensure we see any new tables. */
  498. smp_rmb();
  499. iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
  500. if (iter->walker->tbl) {
  501. iter->slot = 0;
  502. iter->skip = 0;
  503. return ERR_PTR(-EAGAIN);
  504. }
  505. return NULL;
  506. }
  507. /**
  508. * rhashtable_walk_stop - Finish a hash table walk
  509. * @iter: Hash table iterator
  510. *
  511. * Finish a hash table walk.
  512. */
  513. void rhashtable_walk_stop(struct rhashtable_iter *iter)
  514. __releases(RCU)
  515. {
  516. struct rhashtable *ht;
  517. struct bucket_table *tbl = iter->walker->tbl;
  518. if (!tbl)
  519. goto out;
  520. ht = iter->ht;
  521. spin_lock(&ht->lock);
  522. if (tbl->rehash < tbl->size)
  523. list_add(&iter->walker->list, &tbl->walkers);
  524. else
  525. iter->walker->tbl = NULL;
  526. spin_unlock(&ht->lock);
  527. iter->p = NULL;
  528. out:
  529. rcu_read_unlock();
  530. }
  531. static size_t rounded_hashtable_size(const struct rhashtable_params *params)
  532. {
  533. return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
  534. (unsigned long)params->min_size);
  535. }
  536. static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
  537. {
  538. return jhash2(key, length, seed);
  539. }
  540. /**
  541. * rhashtable_init - initialize a new hash table
  542. * @ht: hash table to be initialized
  543. * @params: configuration parameters
  544. *
  545. * Initializes a new hash table based on the provided configuration
  546. * parameters. A table can be configured either with a variable or
  547. * fixed length key:
  548. *
  549. * Configuration Example 1: Fixed length keys
  550. * struct test_obj {
  551. * int key;
  552. * void * my_member;
  553. * struct rhash_head node;
  554. * };
  555. *
  556. * struct rhashtable_params params = {
  557. * .head_offset = offsetof(struct test_obj, node),
  558. * .key_offset = offsetof(struct test_obj, key),
  559. * .key_len = sizeof(int),
  560. * .hashfn = jhash,
  561. * .nulls_base = (1U << RHT_BASE_SHIFT),
  562. * };
  563. *
  564. * Configuration Example 2: Variable length keys
  565. * struct test_obj {
  566. * [...]
  567. * struct rhash_head node;
  568. * };
  569. *
  570. * u32 my_hash_fn(const void *data, u32 len, u32 seed)
  571. * {
  572. * struct test_obj *obj = data;
  573. *
  574. * return [... hash ...];
  575. * }
  576. *
  577. * struct rhashtable_params params = {
  578. * .head_offset = offsetof(struct test_obj, node),
  579. * .hashfn = jhash,
  580. * .obj_hashfn = my_hash_fn,
  581. * };
  582. */
  583. int rhashtable_init(struct rhashtable *ht,
  584. const struct rhashtable_params *params)
  585. {
  586. struct bucket_table *tbl;
  587. size_t size;
  588. size = HASH_DEFAULT_SIZE;
  589. if ((!params->key_len && !params->obj_hashfn) ||
  590. (params->obj_hashfn && !params->obj_cmpfn))
  591. return -EINVAL;
  592. if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
  593. return -EINVAL;
  594. memset(ht, 0, sizeof(*ht));
  595. mutex_init(&ht->mutex);
  596. spin_lock_init(&ht->lock);
  597. memcpy(&ht->p, params, sizeof(*params));
  598. if (params->min_size)
  599. ht->p.min_size = roundup_pow_of_two(params->min_size);
  600. if (params->max_size)
  601. ht->p.max_size = rounddown_pow_of_two(params->max_size);
  602. if (params->insecure_max_entries)
  603. ht->p.insecure_max_entries =
  604. rounddown_pow_of_two(params->insecure_max_entries);
  605. else
  606. ht->p.insecure_max_entries = ht->p.max_size * 2;
  607. ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
  608. if (params->nelem_hint)
  609. size = rounded_hashtable_size(&ht->p);
  610. /* The maximum (not average) chain length grows with the
  611. * size of the hash table, at a rate of (log N)/(log log N).
  612. * The value of 16 is selected so that even if the hash
  613. * table grew to 2^32 you would not expect the maximum
  614. * chain length to exceed it unless we are under attack
  615. * (or extremely unlucky).
  616. *
  617. * As this limit is only to detect attacks, we don't need
  618. * to set it to a lower value as you'd need the chain
  619. * length to vastly exceed 16 to have any real effect
  620. * on the system.
  621. */
  622. if (!params->insecure_elasticity)
  623. ht->elasticity = 16;
  624. if (params->locks_mul)
  625. ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
  626. else
  627. ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
  628. ht->key_len = ht->p.key_len;
  629. if (!params->hashfn) {
  630. ht->p.hashfn = jhash;
  631. if (!(ht->key_len & (sizeof(u32) - 1))) {
  632. ht->key_len /= sizeof(u32);
  633. ht->p.hashfn = rhashtable_jhash2;
  634. }
  635. }
  636. tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
  637. if (tbl == NULL)
  638. return -ENOMEM;
  639. atomic_set(&ht->nelems, 0);
  640. RCU_INIT_POINTER(ht->tbl, tbl);
  641. INIT_WORK(&ht->run_work, rht_deferred_worker);
  642. return 0;
  643. }
  644. /**
  645. * rhashtable_free_and_destroy - free elements and destroy hash table
  646. * @ht: the hash table to destroy
  647. * @free_fn: callback to release resources of element
  648. * @arg: pointer passed to free_fn
  649. *
  650. * Stops an eventual async resize. If defined, invokes free_fn for each
  651. * element to releasal resources. Please note that RCU protected
  652. * readers may still be accessing the elements. Releasing of resources
  653. * must occur in a compatible manner. Then frees the bucket array.
  654. *
  655. * This function will eventually sleep to wait for an async resize
  656. * to complete. The caller is responsible that no further write operations
  657. * occurs in parallel.
  658. */
  659. void rhashtable_free_and_destroy(struct rhashtable *ht,
  660. void (*free_fn)(void *ptr, void *arg),
  661. void *arg)
  662. {
  663. const struct bucket_table *tbl;
  664. unsigned int i;
  665. cancel_work_sync(&ht->run_work);
  666. mutex_lock(&ht->mutex);
  667. tbl = rht_dereference(ht->tbl, ht);
  668. if (free_fn) {
  669. for (i = 0; i < tbl->size; i++) {
  670. struct rhash_head *pos, *next;
  671. for (pos = rht_dereference(tbl->buckets[i], ht),
  672. next = !rht_is_a_nulls(pos) ?
  673. rht_dereference(pos->next, ht) : NULL;
  674. !rht_is_a_nulls(pos);
  675. pos = next,
  676. next = !rht_is_a_nulls(pos) ?
  677. rht_dereference(pos->next, ht) : NULL)
  678. free_fn(rht_obj(ht, pos), arg);
  679. }
  680. }
  681. bucket_table_free(tbl);
  682. mutex_unlock(&ht->mutex);
  683. }
  684. void rhashtable_destroy(struct rhashtable *ht)
  685. {
  686. return rhashtable_free_and_destroy(ht, NULL, NULL);
  687. }