Merge tag 'linux-watchdog-4.19-rc1' of git://www.linux-watchdog.org/linux-watchdog
[linux] / lib / rhashtable.c
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7  *
8  * Code partially derived from nft_hash
9  * Rewritten with rehash code from br_multicast plus single list
10  * pointer as suggested by Josh Triplett
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16
17 #include <linux/atomic.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/log2.h>
21 #include <linux/sched.h>
22 #include <linux/rculist.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/mm.h>
26 #include <linux/jhash.h>
27 #include <linux/random.h>
28 #include <linux/rhashtable.h>
29 #include <linux/err.h>
30 #include <linux/export.h>
31 #include <linux/rhashtable.h>
32
33 #define HASH_DEFAULT_SIZE       64UL
34 #define HASH_MIN_SIZE           4U
35 #define BUCKET_LOCKS_PER_CPU    32UL
36
37 union nested_table {
38         union nested_table __rcu *table;
39         struct rhash_head __rcu *bucket;
40 };
41
42 static u32 head_hashfn(struct rhashtable *ht,
43                        const struct bucket_table *tbl,
44                        const struct rhash_head *he)
45 {
46         return rht_head_hashfn(ht, tbl, he, ht->p);
47 }
48
49 #ifdef CONFIG_PROVE_LOCKING
50 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
51
52 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
53 {
54         return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
55 }
56 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
57
58 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
59 {
60         spinlock_t *lock = rht_bucket_lock(tbl, hash);
61
62         return (debug_locks) ? lockdep_is_held(lock) : 1;
63 }
64 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
65 #else
66 #define ASSERT_RHT_MUTEX(HT)
67 #endif
68
69 static void nested_table_free(union nested_table *ntbl, unsigned int size)
70 {
71         const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
72         const unsigned int len = 1 << shift;
73         unsigned int i;
74
75         ntbl = rcu_dereference_raw(ntbl->table);
76         if (!ntbl)
77                 return;
78
79         if (size > len) {
80                 size >>= shift;
81                 for (i = 0; i < len; i++)
82                         nested_table_free(ntbl + i, size);
83         }
84
85         kfree(ntbl);
86 }
87
88 static void nested_bucket_table_free(const struct bucket_table *tbl)
89 {
90         unsigned int size = tbl->size >> tbl->nest;
91         unsigned int len = 1 << tbl->nest;
92         union nested_table *ntbl;
93         unsigned int i;
94
95         ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
96
97         for (i = 0; i < len; i++)
98                 nested_table_free(ntbl + i, size);
99
100         kfree(ntbl);
101 }
102
103 static void bucket_table_free(const struct bucket_table *tbl)
104 {
105         if (tbl->nest)
106                 nested_bucket_table_free(tbl);
107
108         free_bucket_spinlocks(tbl->locks);
109         kvfree(tbl);
110 }
111
112 static void bucket_table_free_rcu(struct rcu_head *head)
113 {
114         bucket_table_free(container_of(head, struct bucket_table, rcu));
115 }
116
117 static union nested_table *nested_table_alloc(struct rhashtable *ht,
118                                               union nested_table __rcu **prev,
119                                               bool leaf)
120 {
121         union nested_table *ntbl;
122         int i;
123
124         ntbl = rcu_dereference(*prev);
125         if (ntbl)
126                 return ntbl;
127
128         ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
129
130         if (ntbl && leaf) {
131                 for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
132                         INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
133         }
134
135         rcu_assign_pointer(*prev, ntbl);
136
137         return ntbl;
138 }
139
140 static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
141                                                       size_t nbuckets,
142                                                       gfp_t gfp)
143 {
144         const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
145         struct bucket_table *tbl;
146         size_t size;
147
148         if (nbuckets < (1 << (shift + 1)))
149                 return NULL;
150
151         size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
152
153         tbl = kzalloc(size, gfp);
154         if (!tbl)
155                 return NULL;
156
157         if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
158                                 false)) {
159                 kfree(tbl);
160                 return NULL;
161         }
162
163         tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
164
165         return tbl;
166 }
167
168 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
169                                                size_t nbuckets,
170                                                gfp_t gfp)
171 {
172         struct bucket_table *tbl = NULL;
173         size_t size, max_locks;
174         int i;
175
176         size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
177         if (gfp != GFP_KERNEL)
178                 tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
179         else
180                 tbl = kvzalloc(size, gfp);
181
182         size = nbuckets;
183
184         if (tbl == NULL && gfp != GFP_KERNEL) {
185                 tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
186                 nbuckets = 0;
187         }
188         if (tbl == NULL)
189                 return NULL;
190
191         tbl->size = size;
192
193         max_locks = size >> 1;
194         if (tbl->nest)
195                 max_locks = min_t(size_t, max_locks, 1U << tbl->nest);
196
197         if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks,
198                                    ht->p.locks_mul, gfp) < 0) {
199                 bucket_table_free(tbl);
200                 return NULL;
201         }
202
203         INIT_LIST_HEAD(&tbl->walkers);
204
205         tbl->hash_rnd = get_random_u32();
206
207         for (i = 0; i < nbuckets; i++)
208                 INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
209
210         return tbl;
211 }
212
213 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
214                                                   struct bucket_table *tbl)
215 {
216         struct bucket_table *new_tbl;
217
218         do {
219                 new_tbl = tbl;
220                 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
221         } while (tbl);
222
223         return new_tbl;
224 }
225
226 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
227 {
228         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
229         struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
230         struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
231         int err = -EAGAIN;
232         struct rhash_head *head, *next, *entry;
233         spinlock_t *new_bucket_lock;
234         unsigned int new_hash;
235
236         if (new_tbl->nest)
237                 goto out;
238
239         err = -ENOENT;
240
241         rht_for_each(entry, old_tbl, old_hash) {
242                 err = 0;
243                 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
244
245                 if (rht_is_a_nulls(next))
246                         break;
247
248                 pprev = &entry->next;
249         }
250
251         if (err)
252                 goto out;
253
254         new_hash = head_hashfn(ht, new_tbl, entry);
255
256         new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
257
258         spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
259         head = rht_dereference_bucket(new_tbl->buckets[new_hash],
260                                       new_tbl, new_hash);
261
262         RCU_INIT_POINTER(entry->next, head);
263
264         rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
265         spin_unlock(new_bucket_lock);
266
267         rcu_assign_pointer(*pprev, next);
268
269 out:
270         return err;
271 }
272
273 static int rhashtable_rehash_chain(struct rhashtable *ht,
274                                     unsigned int old_hash)
275 {
276         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
277         spinlock_t *old_bucket_lock;
278         int err;
279
280         old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
281
282         spin_lock_bh(old_bucket_lock);
283         while (!(err = rhashtable_rehash_one(ht, old_hash)))
284                 ;
285
286         if (err == -ENOENT) {
287                 old_tbl->rehash++;
288                 err = 0;
289         }
290         spin_unlock_bh(old_bucket_lock);
291
292         return err;
293 }
294
295 static int rhashtable_rehash_attach(struct rhashtable *ht,
296                                     struct bucket_table *old_tbl,
297                                     struct bucket_table *new_tbl)
298 {
299         /* Make insertions go into the new, empty table right away. Deletions
300          * and lookups will be attempted in both tables until we synchronize.
301          * As cmpxchg() provides strong barriers, we do not need
302          * rcu_assign_pointer().
303          */
304
305         if (cmpxchg(&old_tbl->future_tbl, NULL, new_tbl) != NULL)
306                 return -EEXIST;
307
308         return 0;
309 }
310
311 static int rhashtable_rehash_table(struct rhashtable *ht)
312 {
313         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
314         struct bucket_table *new_tbl;
315         struct rhashtable_walker *walker;
316         unsigned int old_hash;
317         int err;
318
319         new_tbl = rht_dereference(old_tbl->future_tbl, ht);
320         if (!new_tbl)
321                 return 0;
322
323         for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
324                 err = rhashtable_rehash_chain(ht, old_hash);
325                 if (err)
326                         return err;
327                 cond_resched();
328         }
329
330         /* Publish the new table pointer. */
331         rcu_assign_pointer(ht->tbl, new_tbl);
332
333         spin_lock(&ht->lock);
334         list_for_each_entry(walker, &old_tbl->walkers, list)
335                 walker->tbl = NULL;
336         spin_unlock(&ht->lock);
337
338         /* Wait for readers. All new readers will see the new
339          * table, and thus no references to the old table will
340          * remain.
341          */
342         call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
343
344         return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
345 }
346
347 static int rhashtable_rehash_alloc(struct rhashtable *ht,
348                                    struct bucket_table *old_tbl,
349                                    unsigned int size)
350 {
351         struct bucket_table *new_tbl;
352         int err;
353
354         ASSERT_RHT_MUTEX(ht);
355
356         new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
357         if (new_tbl == NULL)
358                 return -ENOMEM;
359
360         err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
361         if (err)
362                 bucket_table_free(new_tbl);
363
364         return err;
365 }
366
367 /**
368  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
369  * @ht:         the hash table to shrink
370  *
371  * This function shrinks the hash table to fit, i.e., the smallest
372  * size would not cause it to expand right away automatically.
373  *
374  * The caller must ensure that no concurrent resizing occurs by holding
375  * ht->mutex.
376  *
377  * The caller must ensure that no concurrent table mutations take place.
378  * It is however valid to have concurrent lookups if they are RCU protected.
379  *
380  * It is valid to have concurrent insertions and deletions protected by per
381  * bucket locks or concurrent RCU protected lookups and traversals.
382  */
383 static int rhashtable_shrink(struct rhashtable *ht)
384 {
385         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
386         unsigned int nelems = atomic_read(&ht->nelems);
387         unsigned int size = 0;
388
389         if (nelems)
390                 size = roundup_pow_of_two(nelems * 3 / 2);
391         if (size < ht->p.min_size)
392                 size = ht->p.min_size;
393
394         if (old_tbl->size <= size)
395                 return 0;
396
397         if (rht_dereference(old_tbl->future_tbl, ht))
398                 return -EEXIST;
399
400         return rhashtable_rehash_alloc(ht, old_tbl, size);
401 }
402
403 static void rht_deferred_worker(struct work_struct *work)
404 {
405         struct rhashtable *ht;
406         struct bucket_table *tbl;
407         int err = 0;
408
409         ht = container_of(work, struct rhashtable, run_work);
410         mutex_lock(&ht->mutex);
411
412         tbl = rht_dereference(ht->tbl, ht);
413         tbl = rhashtable_last_table(ht, tbl);
414
415         if (rht_grow_above_75(ht, tbl))
416                 err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
417         else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
418                 err = rhashtable_shrink(ht);
419         else if (tbl->nest)
420                 err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
421
422         if (!err)
423                 err = rhashtable_rehash_table(ht);
424
425         mutex_unlock(&ht->mutex);
426
427         if (err)
428                 schedule_work(&ht->run_work);
429 }
430
431 static int rhashtable_insert_rehash(struct rhashtable *ht,
432                                     struct bucket_table *tbl)
433 {
434         struct bucket_table *old_tbl;
435         struct bucket_table *new_tbl;
436         unsigned int size;
437         int err;
438
439         old_tbl = rht_dereference_rcu(ht->tbl, ht);
440
441         size = tbl->size;
442
443         err = -EBUSY;
444
445         if (rht_grow_above_75(ht, tbl))
446                 size *= 2;
447         /* Do not schedule more than one rehash */
448         else if (old_tbl != tbl)
449                 goto fail;
450
451         err = -ENOMEM;
452
453         new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
454         if (new_tbl == NULL)
455                 goto fail;
456
457         err = rhashtable_rehash_attach(ht, tbl, new_tbl);
458         if (err) {
459                 bucket_table_free(new_tbl);
460                 if (err == -EEXIST)
461                         err = 0;
462         } else
463                 schedule_work(&ht->run_work);
464
465         return err;
466
467 fail:
468         /* Do not fail the insert if someone else did a rehash. */
469         if (likely(rcu_access_pointer(tbl->future_tbl)))
470                 return 0;
471
472         /* Schedule async rehash to retry allocation in process context. */
473         if (err == -ENOMEM)
474                 schedule_work(&ht->run_work);
475
476         return err;
477 }
478
479 static void *rhashtable_lookup_one(struct rhashtable *ht,
480                                    struct bucket_table *tbl, unsigned int hash,
481                                    const void *key, struct rhash_head *obj)
482 {
483         struct rhashtable_compare_arg arg = {
484                 .ht = ht,
485                 .key = key,
486         };
487         struct rhash_head __rcu **pprev;
488         struct rhash_head *head;
489         int elasticity;
490
491         elasticity = RHT_ELASTICITY;
492         pprev = rht_bucket_var(tbl, hash);
493         rht_for_each_continue(head, *pprev, tbl, hash) {
494                 struct rhlist_head *list;
495                 struct rhlist_head *plist;
496
497                 elasticity--;
498                 if (!key ||
499                     (ht->p.obj_cmpfn ?
500                      ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
501                      rhashtable_compare(&arg, rht_obj(ht, head)))) {
502                         pprev = &head->next;
503                         continue;
504                 }
505
506                 if (!ht->rhlist)
507                         return rht_obj(ht, head);
508
509                 list = container_of(obj, struct rhlist_head, rhead);
510                 plist = container_of(head, struct rhlist_head, rhead);
511
512                 RCU_INIT_POINTER(list->next, plist);
513                 head = rht_dereference_bucket(head->next, tbl, hash);
514                 RCU_INIT_POINTER(list->rhead.next, head);
515                 rcu_assign_pointer(*pprev, obj);
516
517                 return NULL;
518         }
519
520         if (elasticity <= 0)
521                 return ERR_PTR(-EAGAIN);
522
523         return ERR_PTR(-ENOENT);
524 }
525
526 static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
527                                                   struct bucket_table *tbl,
528                                                   unsigned int hash,
529                                                   struct rhash_head *obj,
530                                                   void *data)
531 {
532         struct rhash_head __rcu **pprev;
533         struct bucket_table *new_tbl;
534         struct rhash_head *head;
535
536         if (!IS_ERR_OR_NULL(data))
537                 return ERR_PTR(-EEXIST);
538
539         if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
540                 return ERR_CAST(data);
541
542         new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
543         if (new_tbl)
544                 return new_tbl;
545
546         if (PTR_ERR(data) != -ENOENT)
547                 return ERR_CAST(data);
548
549         if (unlikely(rht_grow_above_max(ht, tbl)))
550                 return ERR_PTR(-E2BIG);
551
552         if (unlikely(rht_grow_above_100(ht, tbl)))
553                 return ERR_PTR(-EAGAIN);
554
555         pprev = rht_bucket_insert(ht, tbl, hash);
556         if (!pprev)
557                 return ERR_PTR(-ENOMEM);
558
559         head = rht_dereference_bucket(*pprev, tbl, hash);
560
561         RCU_INIT_POINTER(obj->next, head);
562         if (ht->rhlist) {
563                 struct rhlist_head *list;
564
565                 list = container_of(obj, struct rhlist_head, rhead);
566                 RCU_INIT_POINTER(list->next, NULL);
567         }
568
569         rcu_assign_pointer(*pprev, obj);
570
571         atomic_inc(&ht->nelems);
572         if (rht_grow_above_75(ht, tbl))
573                 schedule_work(&ht->run_work);
574
575         return NULL;
576 }
577
578 static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
579                                    struct rhash_head *obj)
580 {
581         struct bucket_table *new_tbl;
582         struct bucket_table *tbl;
583         unsigned int hash;
584         spinlock_t *lock;
585         void *data;
586
587         tbl = rcu_dereference(ht->tbl);
588
589         /* All insertions must grab the oldest table containing
590          * the hashed bucket that is yet to be rehashed.
591          */
592         for (;;) {
593                 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
594                 lock = rht_bucket_lock(tbl, hash);
595                 spin_lock_bh(lock);
596
597                 if (tbl->rehash <= hash)
598                         break;
599
600                 spin_unlock_bh(lock);
601                 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
602         }
603
604         data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
605         new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
606         if (PTR_ERR(new_tbl) != -EEXIST)
607                 data = ERR_CAST(new_tbl);
608
609         while (!IS_ERR_OR_NULL(new_tbl)) {
610                 tbl = new_tbl;
611                 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
612                 spin_lock_nested(rht_bucket_lock(tbl, hash),
613                                  SINGLE_DEPTH_NESTING);
614
615                 data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
616                 new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
617                 if (PTR_ERR(new_tbl) != -EEXIST)
618                         data = ERR_CAST(new_tbl);
619
620                 spin_unlock(rht_bucket_lock(tbl, hash));
621         }
622
623         spin_unlock_bh(lock);
624
625         if (PTR_ERR(data) == -EAGAIN)
626                 data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
627                                -EAGAIN);
628
629         return data;
630 }
631
632 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
633                              struct rhash_head *obj)
634 {
635         void *data;
636
637         do {
638                 rcu_read_lock();
639                 data = rhashtable_try_insert(ht, key, obj);
640                 rcu_read_unlock();
641         } while (PTR_ERR(data) == -EAGAIN);
642
643         return data;
644 }
645 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
646
647 /**
648  * rhashtable_walk_enter - Initialise an iterator
649  * @ht:         Table to walk over
650  * @iter:       Hash table Iterator
651  *
652  * This function prepares a hash table walk.
653  *
654  * Note that if you restart a walk after rhashtable_walk_stop you
655  * may see the same object twice.  Also, you may miss objects if
656  * there are removals in between rhashtable_walk_stop and the next
657  * call to rhashtable_walk_start.
658  *
659  * For a completely stable walk you should construct your own data
660  * structure outside the hash table.
661  *
662  * This function may be called from any process context, including
663  * non-preemptable context, but cannot be called from softirq or
664  * hardirq context.
665  *
666  * You must call rhashtable_walk_exit after this function returns.
667  */
668 void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
669 {
670         iter->ht = ht;
671         iter->p = NULL;
672         iter->slot = 0;
673         iter->skip = 0;
674         iter->end_of_table = 0;
675
676         spin_lock(&ht->lock);
677         iter->walker.tbl =
678                 rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
679         list_add(&iter->walker.list, &iter->walker.tbl->walkers);
680         spin_unlock(&ht->lock);
681 }
682 EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
683
684 /**
685  * rhashtable_walk_exit - Free an iterator
686  * @iter:       Hash table Iterator
687  *
688  * This function frees resources allocated by rhashtable_walk_init.
689  */
690 void rhashtable_walk_exit(struct rhashtable_iter *iter)
691 {
692         spin_lock(&iter->ht->lock);
693         if (iter->walker.tbl)
694                 list_del(&iter->walker.list);
695         spin_unlock(&iter->ht->lock);
696 }
697 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
698
699 /**
700  * rhashtable_walk_start_check - Start a hash table walk
701  * @iter:       Hash table iterator
702  *
703  * Start a hash table walk at the current iterator position.  Note that we take
704  * the RCU lock in all cases including when we return an error.  So you must
705  * always call rhashtable_walk_stop to clean up.
706  *
707  * Returns zero if successful.
708  *
709  * Returns -EAGAIN if resize event occured.  Note that the iterator
710  * will rewind back to the beginning and you may use it immediately
711  * by calling rhashtable_walk_next.
712  *
713  * rhashtable_walk_start is defined as an inline variant that returns
714  * void. This is preferred in cases where the caller would ignore
715  * resize events and always continue.
716  */
717 int rhashtable_walk_start_check(struct rhashtable_iter *iter)
718         __acquires(RCU)
719 {
720         struct rhashtable *ht = iter->ht;
721         bool rhlist = ht->rhlist;
722
723         rcu_read_lock();
724
725         spin_lock(&ht->lock);
726         if (iter->walker.tbl)
727                 list_del(&iter->walker.list);
728         spin_unlock(&ht->lock);
729
730         if (iter->end_of_table)
731                 return 0;
732         if (!iter->walker.tbl) {
733                 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
734                 iter->slot = 0;
735                 iter->skip = 0;
736                 return -EAGAIN;
737         }
738
739         if (iter->p && !rhlist) {
740                 /*
741                  * We need to validate that 'p' is still in the table, and
742                  * if so, update 'skip'
743                  */
744                 struct rhash_head *p;
745                 int skip = 0;
746                 rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
747                         skip++;
748                         if (p == iter->p) {
749                                 iter->skip = skip;
750                                 goto found;
751                         }
752                 }
753                 iter->p = NULL;
754         } else if (iter->p && rhlist) {
755                 /* Need to validate that 'list' is still in the table, and
756                  * if so, update 'skip' and 'p'.
757                  */
758                 struct rhash_head *p;
759                 struct rhlist_head *list;
760                 int skip = 0;
761                 rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
762                         for (list = container_of(p, struct rhlist_head, rhead);
763                              list;
764                              list = rcu_dereference(list->next)) {
765                                 skip++;
766                                 if (list == iter->list) {
767                                         iter->p = p;
768                                         iter->skip = skip;
769                                         goto found;
770                                 }
771                         }
772                 }
773                 iter->p = NULL;
774         }
775 found:
776         return 0;
777 }
778 EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
779
780 /**
781  * __rhashtable_walk_find_next - Find the next element in a table (or the first
782  * one in case of a new walk).
783  *
784  * @iter:       Hash table iterator
785  *
786  * Returns the found object or NULL when the end of the table is reached.
787  *
788  * Returns -EAGAIN if resize event occurred.
789  */
790 static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
791 {
792         struct bucket_table *tbl = iter->walker.tbl;
793         struct rhlist_head *list = iter->list;
794         struct rhashtable *ht = iter->ht;
795         struct rhash_head *p = iter->p;
796         bool rhlist = ht->rhlist;
797
798         if (!tbl)
799                 return NULL;
800
801         for (; iter->slot < tbl->size; iter->slot++) {
802                 int skip = iter->skip;
803
804                 rht_for_each_rcu(p, tbl, iter->slot) {
805                         if (rhlist) {
806                                 list = container_of(p, struct rhlist_head,
807                                                     rhead);
808                                 do {
809                                         if (!skip)
810                                                 goto next;
811                                         skip--;
812                                         list = rcu_dereference(list->next);
813                                 } while (list);
814
815                                 continue;
816                         }
817                         if (!skip)
818                                 break;
819                         skip--;
820                 }
821
822 next:
823                 if (!rht_is_a_nulls(p)) {
824                         iter->skip++;
825                         iter->p = p;
826                         iter->list = list;
827                         return rht_obj(ht, rhlist ? &list->rhead : p);
828                 }
829
830                 iter->skip = 0;
831         }
832
833         iter->p = NULL;
834
835         /* Ensure we see any new tables. */
836         smp_rmb();
837
838         iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
839         if (iter->walker.tbl) {
840                 iter->slot = 0;
841                 iter->skip = 0;
842                 return ERR_PTR(-EAGAIN);
843         } else {
844                 iter->end_of_table = true;
845         }
846
847         return NULL;
848 }
849
850 /**
851  * rhashtable_walk_next - Return the next object and advance the iterator
852  * @iter:       Hash table iterator
853  *
854  * Note that you must call rhashtable_walk_stop when you are finished
855  * with the walk.
856  *
857  * Returns the next object or NULL when the end of the table is reached.
858  *
859  * Returns -EAGAIN if resize event occurred.  Note that the iterator
860  * will rewind back to the beginning and you may continue to use it.
861  */
862 void *rhashtable_walk_next(struct rhashtable_iter *iter)
863 {
864         struct rhlist_head *list = iter->list;
865         struct rhashtable *ht = iter->ht;
866         struct rhash_head *p = iter->p;
867         bool rhlist = ht->rhlist;
868
869         if (p) {
870                 if (!rhlist || !(list = rcu_dereference(list->next))) {
871                         p = rcu_dereference(p->next);
872                         list = container_of(p, struct rhlist_head, rhead);
873                 }
874                 if (!rht_is_a_nulls(p)) {
875                         iter->skip++;
876                         iter->p = p;
877                         iter->list = list;
878                         return rht_obj(ht, rhlist ? &list->rhead : p);
879                 }
880
881                 /* At the end of this slot, switch to next one and then find
882                  * next entry from that point.
883                  */
884                 iter->skip = 0;
885                 iter->slot++;
886         }
887
888         return __rhashtable_walk_find_next(iter);
889 }
890 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
891
892 /**
893  * rhashtable_walk_peek - Return the next object but don't advance the iterator
894  * @iter:       Hash table iterator
895  *
896  * Returns the next object or NULL when the end of the table is reached.
897  *
898  * Returns -EAGAIN if resize event occurred.  Note that the iterator
899  * will rewind back to the beginning and you may continue to use it.
900  */
901 void *rhashtable_walk_peek(struct rhashtable_iter *iter)
902 {
903         struct rhlist_head *list = iter->list;
904         struct rhashtable *ht = iter->ht;
905         struct rhash_head *p = iter->p;
906
907         if (p)
908                 return rht_obj(ht, ht->rhlist ? &list->rhead : p);
909
910         /* No object found in current iter, find next one in the table. */
911
912         if (iter->skip) {
913                 /* A nonzero skip value points to the next entry in the table
914                  * beyond that last one that was found. Decrement skip so
915                  * we find the current value. __rhashtable_walk_find_next
916                  * will restore the original value of skip assuming that
917                  * the table hasn't changed.
918                  */
919                 iter->skip--;
920         }
921
922         return __rhashtable_walk_find_next(iter);
923 }
924 EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
925
926 /**
927  * rhashtable_walk_stop - Finish a hash table walk
928  * @iter:       Hash table iterator
929  *
930  * Finish a hash table walk.  Does not reset the iterator to the start of the
931  * hash table.
932  */
933 void rhashtable_walk_stop(struct rhashtable_iter *iter)
934         __releases(RCU)
935 {
936         struct rhashtable *ht;
937         struct bucket_table *tbl = iter->walker.tbl;
938
939         if (!tbl)
940                 goto out;
941
942         ht = iter->ht;
943
944         spin_lock(&ht->lock);
945         if (tbl->rehash < tbl->size)
946                 list_add(&iter->walker.list, &tbl->walkers);
947         else
948                 iter->walker.tbl = NULL;
949         spin_unlock(&ht->lock);
950
951 out:
952         rcu_read_unlock();
953 }
954 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
955
956 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
957 {
958         size_t retsize;
959
960         if (params->nelem_hint)
961                 retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
962                               (unsigned long)params->min_size);
963         else
964                 retsize = max(HASH_DEFAULT_SIZE,
965                               (unsigned long)params->min_size);
966
967         return retsize;
968 }
969
970 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
971 {
972         return jhash2(key, length, seed);
973 }
974
975 /**
976  * rhashtable_init - initialize a new hash table
977  * @ht:         hash table to be initialized
978  * @params:     configuration parameters
979  *
980  * Initializes a new hash table based on the provided configuration
981  * parameters. A table can be configured either with a variable or
982  * fixed length key:
983  *
984  * Configuration Example 1: Fixed length keys
985  * struct test_obj {
986  *      int                     key;
987  *      void *                  my_member;
988  *      struct rhash_head       node;
989  * };
990  *
991  * struct rhashtable_params params = {
992  *      .head_offset = offsetof(struct test_obj, node),
993  *      .key_offset = offsetof(struct test_obj, key),
994  *      .key_len = sizeof(int),
995  *      .hashfn = jhash,
996  * };
997  *
998  * Configuration Example 2: Variable length keys
999  * struct test_obj {
1000  *      [...]
1001  *      struct rhash_head       node;
1002  * };
1003  *
1004  * u32 my_hash_fn(const void *data, u32 len, u32 seed)
1005  * {
1006  *      struct test_obj *obj = data;
1007  *
1008  *      return [... hash ...];
1009  * }
1010  *
1011  * struct rhashtable_params params = {
1012  *      .head_offset = offsetof(struct test_obj, node),
1013  *      .hashfn = jhash,
1014  *      .obj_hashfn = my_hash_fn,
1015  * };
1016  */
1017 int rhashtable_init(struct rhashtable *ht,
1018                     const struct rhashtable_params *params)
1019 {
1020         struct bucket_table *tbl;
1021         size_t size;
1022
1023         if ((!params->key_len && !params->obj_hashfn) ||
1024             (params->obj_hashfn && !params->obj_cmpfn))
1025                 return -EINVAL;
1026
1027         memset(ht, 0, sizeof(*ht));
1028         mutex_init(&ht->mutex);
1029         spin_lock_init(&ht->lock);
1030         memcpy(&ht->p, params, sizeof(*params));
1031
1032         if (params->min_size)
1033                 ht->p.min_size = roundup_pow_of_two(params->min_size);
1034
1035         /* Cap total entries at 2^31 to avoid nelems overflow. */
1036         ht->max_elems = 1u << 31;
1037
1038         if (params->max_size) {
1039                 ht->p.max_size = rounddown_pow_of_two(params->max_size);
1040                 if (ht->p.max_size < ht->max_elems / 2)
1041                         ht->max_elems = ht->p.max_size * 2;
1042         }
1043
1044         ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1045
1046         size = rounded_hashtable_size(&ht->p);
1047
1048         if (params->locks_mul)
1049                 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
1050         else
1051                 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
1052
1053         ht->key_len = ht->p.key_len;
1054         if (!params->hashfn) {
1055                 ht->p.hashfn = jhash;
1056
1057                 if (!(ht->key_len & (sizeof(u32) - 1))) {
1058                         ht->key_len /= sizeof(u32);
1059                         ht->p.hashfn = rhashtable_jhash2;
1060                 }
1061         }
1062
1063         tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
1064         if (tbl == NULL)
1065                 return -ENOMEM;
1066
1067         atomic_set(&ht->nelems, 0);
1068
1069         RCU_INIT_POINTER(ht->tbl, tbl);
1070
1071         INIT_WORK(&ht->run_work, rht_deferred_worker);
1072
1073         return 0;
1074 }
1075 EXPORT_SYMBOL_GPL(rhashtable_init);
1076
1077 /**
1078  * rhltable_init - initialize a new hash list table
1079  * @hlt:        hash list table to be initialized
1080  * @params:     configuration parameters
1081  *
1082  * Initializes a new hash list table.
1083  *
1084  * See documentation for rhashtable_init.
1085  */
1086 int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
1087 {
1088         int err;
1089
1090         err = rhashtable_init(&hlt->ht, params);
1091         hlt->ht.rhlist = true;
1092         return err;
1093 }
1094 EXPORT_SYMBOL_GPL(rhltable_init);
1095
1096 static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
1097                                 void (*free_fn)(void *ptr, void *arg),
1098                                 void *arg)
1099 {
1100         struct rhlist_head *list;
1101
1102         if (!ht->rhlist) {
1103                 free_fn(rht_obj(ht, obj), arg);
1104                 return;
1105         }
1106
1107         list = container_of(obj, struct rhlist_head, rhead);
1108         do {
1109                 obj = &list->rhead;
1110                 list = rht_dereference(list->next, ht);
1111                 free_fn(rht_obj(ht, obj), arg);
1112         } while (list);
1113 }
1114
1115 /**
1116  * rhashtable_free_and_destroy - free elements and destroy hash table
1117  * @ht:         the hash table to destroy
1118  * @free_fn:    callback to release resources of element
1119  * @arg:        pointer passed to free_fn
1120  *
1121  * Stops an eventual async resize. If defined, invokes free_fn for each
1122  * element to releasal resources. Please note that RCU protected
1123  * readers may still be accessing the elements. Releasing of resources
1124  * must occur in a compatible manner. Then frees the bucket array.
1125  *
1126  * This function will eventually sleep to wait for an async resize
1127  * to complete. The caller is responsible that no further write operations
1128  * occurs in parallel.
1129  */
1130 void rhashtable_free_and_destroy(struct rhashtable *ht,
1131                                  void (*free_fn)(void *ptr, void *arg),
1132                                  void *arg)
1133 {
1134         struct bucket_table *tbl, *next_tbl;
1135         unsigned int i;
1136
1137         cancel_work_sync(&ht->run_work);
1138
1139         mutex_lock(&ht->mutex);
1140         tbl = rht_dereference(ht->tbl, ht);
1141 restart:
1142         if (free_fn) {
1143                 for (i = 0; i < tbl->size; i++) {
1144                         struct rhash_head *pos, *next;
1145
1146                         cond_resched();
1147                         for (pos = rht_dereference(*rht_bucket(tbl, i), ht),
1148                              next = !rht_is_a_nulls(pos) ?
1149                                         rht_dereference(pos->next, ht) : NULL;
1150                              !rht_is_a_nulls(pos);
1151                              pos = next,
1152                              next = !rht_is_a_nulls(pos) ?
1153                                         rht_dereference(pos->next, ht) : NULL)
1154                                 rhashtable_free_one(ht, pos, free_fn, arg);
1155                 }
1156         }
1157
1158         next_tbl = rht_dereference(tbl->future_tbl, ht);
1159         bucket_table_free(tbl);
1160         if (next_tbl) {
1161                 tbl = next_tbl;
1162                 goto restart;
1163         }
1164         mutex_unlock(&ht->mutex);
1165 }
1166 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
1167
1168 void rhashtable_destroy(struct rhashtable *ht)
1169 {
1170         return rhashtable_free_and_destroy(ht, NULL, NULL);
1171 }
1172 EXPORT_SYMBOL_GPL(rhashtable_destroy);
1173
1174 struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
1175                                             unsigned int hash)
1176 {
1177         const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1178         static struct rhash_head __rcu *rhnull =
1179                 (struct rhash_head __rcu *)NULLS_MARKER(0);
1180         unsigned int index = hash & ((1 << tbl->nest) - 1);
1181         unsigned int size = tbl->size >> tbl->nest;
1182         unsigned int subhash = hash;
1183         union nested_table *ntbl;
1184
1185         ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1186         ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
1187         subhash >>= tbl->nest;
1188
1189         while (ntbl && size > (1 << shift)) {
1190                 index = subhash & ((1 << shift) - 1);
1191                 ntbl = rht_dereference_bucket_rcu(ntbl[index].table,
1192                                                   tbl, hash);
1193                 size >>= shift;
1194                 subhash >>= shift;
1195         }
1196
1197         if (!ntbl)
1198                 return &rhnull;
1199
1200         return &ntbl[subhash].bucket;
1201
1202 }
1203 EXPORT_SYMBOL_GPL(rht_bucket_nested);
1204
1205 struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
1206                                                    struct bucket_table *tbl,
1207                                                    unsigned int hash)
1208 {
1209         const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1210         unsigned int index = hash & ((1 << tbl->nest) - 1);
1211         unsigned int size = tbl->size >> tbl->nest;
1212         union nested_table *ntbl;
1213
1214         ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1215         hash >>= tbl->nest;
1216         ntbl = nested_table_alloc(ht, &ntbl[index].table,
1217                                   size <= (1 << shift));
1218
1219         while (ntbl && size > (1 << shift)) {
1220                 index = hash & ((1 << shift) - 1);
1221                 size >>= shift;
1222                 hash >>= shift;
1223                 ntbl = nested_table_alloc(ht, &ntbl[index].table,
1224                                           size <= (1 << shift));
1225         }
1226
1227         if (!ntbl)
1228                 return NULL;
1229
1230         return &ntbl[hash].bucket;
1231
1232 }
1233 EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);