xref: /openbmc/qemu/util/qht.c (revision 416296a9)
1 /*
2  * qht.c - QEMU Hash Table, designed to scale for read-mostly workloads.
3  *
4  * Copyright (C) 2016, Emilio G. Cota <cota@braap.org>
5  *
6  * License: GNU GPL, version 2 or later.
7  *   See the COPYING file in the top-level directory.
8  *
9  * Assumptions:
10  * - NULL cannot be inserted/removed as a pointer value.
11  * - Trying to insert an already-existing hash-pointer pair is OK. However,
12  *   it is not OK to insert into the same hash table different hash-pointer
13  *   pairs that have the same pointer value, but not the hashes.
14  * - Lookups are performed under an RCU read-critical section; removals
15  *   must wait for a grace period to elapse before freeing removed objects.
16  *
17  * Features:
18  * - Reads (i.e. lookups and iterators) can be concurrent with other reads.
19  *   Lookups that are concurrent with writes to the same bucket will retry
20  *   via a seqlock; iterators acquire all bucket locks and therefore can be
21  *   concurrent with lookups and are serialized wrt writers.
22  * - Writes (i.e. insertions/removals) can be concurrent with writes to
23  *   different buckets; writes to the same bucket are serialized through a lock.
24  * - Optional auto-resizing: the hash table resizes up if the load surpasses
25  *   a certain threshold. Resizing is done concurrently with readers; writes
26  *   are serialized with the resize operation.
27  *
28  * The key structure is the bucket, which is cacheline-sized. Buckets
29  * contain a few hash values and pointers; the u32 hash values are stored in
30  * full so that resizing is fast. Having this structure instead of directly
31  * chaining items has two advantages:
32  * - Failed lookups fail fast, and touch a minimum number of cache lines.
33  * - Resizing the hash table with concurrent lookups is easy.
34  *
35  * There are two types of buckets:
36  * 1. "head" buckets are the ones allocated in the array of buckets in qht_map.
37  * 2. all "non-head" buckets (i.e. all others) are members of a chain that
38  *    starts from a head bucket.
39  * Note that the seqlock and spinlock of a head bucket applies to all buckets
40  * chained to it; these two fields are unused in non-head buckets.
41  *
42  * On removals, we move the last valid item in the chain to the position of the
43  * just-removed entry. This makes lookups slightly faster, since the moment an
44  * invalid entry is found, the (failed) lookup is over.
45  *
46  * Resizing is done by taking all bucket spinlocks (so that no other writers can
47  * race with us) and then copying all entries into a new hash map. Then, the
48  * ht->map pointer is set, and the old map is freed once no RCU readers can see
49  * it anymore.
50  *
51  * Writers check for concurrent resizes by comparing ht->map before and after
52  * acquiring their bucket lock. If they don't match, a resize has occured
53  * while the bucket spinlock was being acquired.
54  *
55  * Related Work:
56  * - Idea of cacheline-sized buckets with full hashes taken from:
57  *   David, Guerraoui & Trigonakis, "Asynchronized Concurrency:
58  *   The Secret to Scaling Concurrent Search Data Structures", ASPLOS'15.
59  * - Why not RCU-based hash tables? They would allow us to get rid of the
60  *   seqlock, but resizing would take forever since RCU read critical
61  *   sections in QEMU take quite a long time.
62  *   More info on relativistic hash tables:
63  *   + Triplett, McKenney & Walpole, "Resizable, Scalable, Concurrent Hash
64  *     Tables via Relativistic Programming", USENIX ATC'11.
65  *   + Corbet, "Relativistic hash tables, part 1: Algorithms", @ lwn.net, 2014.
66  *     https://lwn.net/Articles/612021/
67  */
68 #include "qemu/osdep.h"
69 #include "qemu/qht.h"
70 #include "qemu/atomic.h"
71 #include "qemu/rcu.h"
72 
73 //#define QHT_DEBUG
74 
75 /*
76  * We want to avoid false sharing of cache lines. Most systems have 64-byte
77  * cache lines so we go with it for simplicity.
78  *
79  * Note that systems with smaller cache lines will be fine (the struct is
80  * almost 64-bytes); systems with larger cache lines might suffer from
81  * some false sharing.
82  */
83 #define QHT_BUCKET_ALIGN 64
84 
85 /* define these to keep sizeof(qht_bucket) within QHT_BUCKET_ALIGN */
86 #if HOST_LONG_BITS == 32
87 #define QHT_BUCKET_ENTRIES 6
88 #else /* 64-bit */
89 #define QHT_BUCKET_ENTRIES 4
90 #endif
91 
92 /*
93  * Note: reading partially-updated pointers in @pointers could lead to
94  * segfaults. We thus access them with atomic_read/set; this guarantees
95  * that the compiler makes all those accesses atomic. We also need the
96  * volatile-like behavior in atomic_read, since otherwise the compiler
97  * might refetch the pointer.
98  * atomic_read's are of course not necessary when the bucket lock is held.
99  *
100  * If both ht->lock and b->lock are grabbed, ht->lock should always
101  * be grabbed first.
102  */
103 struct qht_bucket {
104     QemuSpin lock;
105     QemuSeqLock sequence;
106     uint32_t hashes[QHT_BUCKET_ENTRIES];
107     void *pointers[QHT_BUCKET_ENTRIES];
108     struct qht_bucket *next;
109 } QEMU_ALIGNED(QHT_BUCKET_ALIGN);
110 
111 QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket) > QHT_BUCKET_ALIGN);
112 
113 /**
114  * struct qht_map - structure to track an array of buckets
115  * @rcu: used by RCU. Keep it as the top field in the struct to help valgrind
116  *       find the whole struct.
117  * @buckets: array of head buckets. It is constant once the map is created.
118  * @n_buckets: number of head buckets. It is constant once the map is created.
119  * @n_added_buckets: number of added (i.e. "non-head") buckets
120  * @n_added_buckets_threshold: threshold to trigger an upward resize once the
121  *                             number of added buckets surpasses it.
122  *
123  * Buckets are tracked in what we call a "map", i.e. this structure.
124  */
125 struct qht_map {
126     struct rcu_head rcu;
127     struct qht_bucket *buckets;
128     size_t n_buckets;
129     size_t n_added_buckets;
130     size_t n_added_buckets_threshold;
131 };
132 
133 /* trigger a resize when n_added_buckets > n_buckets / div */
134 #define QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV 8
135 
136 static void qht_do_resize(struct qht *ht, struct qht_map *new);
137 static void qht_grow_maybe(struct qht *ht);
138 
139 #ifdef QHT_DEBUG
140 
141 #define qht_debug_assert(X) do { assert(X); } while (0)
142 
143 static void qht_bucket_debug__locked(struct qht_bucket *b)
144 {
145     bool seen_empty = false;
146     bool corrupt = false;
147     int i;
148 
149     do {
150         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
151             if (b->pointers[i] == NULL) {
152                 seen_empty = true;
153                 continue;
154             }
155             if (seen_empty) {
156                 fprintf(stderr, "%s: b: %p, pos: %i, hash: 0x%x, p: %p\n",
157                         __func__, b, i, b->hashes[i], b->pointers[i]);
158                 corrupt = true;
159             }
160         }
161         b = b->next;
162     } while (b);
163     qht_debug_assert(!corrupt);
164 }
165 
166 static void qht_map_debug__all_locked(struct qht_map *map)
167 {
168     int i;
169 
170     for (i = 0; i < map->n_buckets; i++) {
171         qht_bucket_debug__locked(&map->buckets[i]);
172     }
173 }
174 #else
175 
176 #define qht_debug_assert(X) do { (void)(X); } while (0)
177 
178 static inline void qht_bucket_debug__locked(struct qht_bucket *b)
179 { }
180 
181 static inline void qht_map_debug__all_locked(struct qht_map *map)
182 { }
183 #endif /* QHT_DEBUG */
184 
185 static inline size_t qht_elems_to_buckets(size_t n_elems)
186 {
187     return pow2ceil(n_elems / QHT_BUCKET_ENTRIES);
188 }
189 
190 static inline void qht_head_init(struct qht_bucket *b)
191 {
192     memset(b, 0, sizeof(*b));
193     qemu_spin_init(&b->lock);
194     seqlock_init(&b->sequence);
195 }
196 
197 static inline
198 struct qht_bucket *qht_map_to_bucket(struct qht_map *map, uint32_t hash)
199 {
200     return &map->buckets[hash & (map->n_buckets - 1)];
201 }
202 
203 /* acquire all bucket locks from a map */
204 static void qht_map_lock_buckets(struct qht_map *map)
205 {
206     size_t i;
207 
208     for (i = 0; i < map->n_buckets; i++) {
209         struct qht_bucket *b = &map->buckets[i];
210 
211         qemu_spin_lock(&b->lock);
212     }
213 }
214 
215 static void qht_map_unlock_buckets(struct qht_map *map)
216 {
217     size_t i;
218 
219     for (i = 0; i < map->n_buckets; i++) {
220         struct qht_bucket *b = &map->buckets[i];
221 
222         qemu_spin_unlock(&b->lock);
223     }
224 }
225 
226 /*
227  * Call with at least a bucket lock held.
228  * @map should be the value read before acquiring the lock (or locks).
229  */
230 static inline bool qht_map_is_stale__locked(struct qht *ht, struct qht_map *map)
231 {
232     return map != ht->map;
233 }
234 
235 /*
236  * Grab all bucket locks, and set @pmap after making sure the map isn't stale.
237  *
238  * Pairs with qht_map_unlock_buckets(), hence the pass-by-reference.
239  *
240  * Note: callers cannot have ht->lock held.
241  */
242 static inline
243 void qht_map_lock_buckets__no_stale(struct qht *ht, struct qht_map **pmap)
244 {
245     struct qht_map *map;
246 
247     map = atomic_rcu_read(&ht->map);
248     qht_map_lock_buckets(map);
249     if (likely(!qht_map_is_stale__locked(ht, map))) {
250         *pmap = map;
251         return;
252     }
253     qht_map_unlock_buckets(map);
254 
255     /* we raced with a resize; acquire ht->lock to see the updated ht->map */
256     qemu_mutex_lock(&ht->lock);
257     map = ht->map;
258     qht_map_lock_buckets(map);
259     qemu_mutex_unlock(&ht->lock);
260     *pmap = map;
261     return;
262 }
263 
264 /*
265  * Get a head bucket and lock it, making sure its parent map is not stale.
266  * @pmap is filled with a pointer to the bucket's parent map.
267  *
268  * Unlock with qemu_spin_unlock(&b->lock).
269  *
270  * Note: callers cannot have ht->lock held.
271  */
272 static inline
273 struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash,
274                                              struct qht_map **pmap)
275 {
276     struct qht_bucket *b;
277     struct qht_map *map;
278 
279     map = atomic_rcu_read(&ht->map);
280     b = qht_map_to_bucket(map, hash);
281 
282     qemu_spin_lock(&b->lock);
283     if (likely(!qht_map_is_stale__locked(ht, map))) {
284         *pmap = map;
285         return b;
286     }
287     qemu_spin_unlock(&b->lock);
288 
289     /* we raced with a resize; acquire ht->lock to see the updated ht->map */
290     qemu_mutex_lock(&ht->lock);
291     map = ht->map;
292     b = qht_map_to_bucket(map, hash);
293     qemu_spin_lock(&b->lock);
294     qemu_mutex_unlock(&ht->lock);
295     *pmap = map;
296     return b;
297 }
298 
299 static inline bool qht_map_needs_resize(struct qht_map *map)
300 {
301     return atomic_read(&map->n_added_buckets) > map->n_added_buckets_threshold;
302 }
303 
304 static inline void qht_chain_destroy(struct qht_bucket *head)
305 {
306     struct qht_bucket *curr = head->next;
307     struct qht_bucket *prev;
308 
309     while (curr) {
310         prev = curr;
311         curr = curr->next;
312         qemu_vfree(prev);
313     }
314 }
315 
316 /* pass only an orphan map */
317 static void qht_map_destroy(struct qht_map *map)
318 {
319     size_t i;
320 
321     for (i = 0; i < map->n_buckets; i++) {
322         qht_chain_destroy(&map->buckets[i]);
323     }
324     qemu_vfree(map->buckets);
325     g_free(map);
326 }
327 
328 static struct qht_map *qht_map_create(size_t n_buckets)
329 {
330     struct qht_map *map;
331     size_t i;
332 
333     map = g_malloc(sizeof(*map));
334     map->n_buckets = n_buckets;
335 
336     map->n_added_buckets = 0;
337     map->n_added_buckets_threshold = n_buckets /
338         QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV;
339 
340     /* let tiny hash tables to at least add one non-head bucket */
341     if (unlikely(map->n_added_buckets_threshold == 0)) {
342         map->n_added_buckets_threshold = 1;
343     }
344 
345     map->buckets = qemu_memalign(QHT_BUCKET_ALIGN,
346                                  sizeof(*map->buckets) * n_buckets);
347     for (i = 0; i < n_buckets; i++) {
348         qht_head_init(&map->buckets[i]);
349     }
350     return map;
351 }
352 
353 void qht_init(struct qht *ht, size_t n_elems, unsigned int mode)
354 {
355     struct qht_map *map;
356     size_t n_buckets = qht_elems_to_buckets(n_elems);
357 
358     ht->mode = mode;
359     qemu_mutex_init(&ht->lock);
360     map = qht_map_create(n_buckets);
361     atomic_rcu_set(&ht->map, map);
362 }
363 
364 /* call only when there are no readers/writers left */
365 void qht_destroy(struct qht *ht)
366 {
367     qht_map_destroy(ht->map);
368     memset(ht, 0, sizeof(*ht));
369 }
370 
371 static void qht_bucket_reset__locked(struct qht_bucket *head)
372 {
373     struct qht_bucket *b = head;
374     int i;
375 
376     seqlock_write_begin(&head->sequence);
377     do {
378         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
379             if (b->pointers[i] == NULL) {
380                 goto done;
381             }
382             b->hashes[i] = 0;
383             atomic_set(&b->pointers[i], NULL);
384         }
385         b = b->next;
386     } while (b);
387  done:
388     seqlock_write_end(&head->sequence);
389 }
390 
391 /* call with all bucket locks held */
392 static void qht_map_reset__all_locked(struct qht_map *map)
393 {
394     size_t i;
395 
396     for (i = 0; i < map->n_buckets; i++) {
397         qht_bucket_reset__locked(&map->buckets[i]);
398     }
399     qht_map_debug__all_locked(map);
400 }
401 
402 void qht_reset(struct qht *ht)
403 {
404     struct qht_map *map;
405 
406     qht_map_lock_buckets__no_stale(ht, &map);
407     qht_map_reset__all_locked(map);
408     qht_map_unlock_buckets(map);
409 }
410 
411 bool qht_reset_size(struct qht *ht, size_t n_elems)
412 {
413     struct qht_map *new;
414     struct qht_map *map;
415     size_t n_buckets;
416     bool resize = false;
417 
418     n_buckets = qht_elems_to_buckets(n_elems);
419 
420     qemu_mutex_lock(&ht->lock);
421     map = ht->map;
422     if (n_buckets != map->n_buckets) {
423         new = qht_map_create(n_buckets);
424         resize = true;
425     }
426 
427     qht_map_lock_buckets(map);
428     qht_map_reset__all_locked(map);
429     if (resize) {
430         qht_do_resize(ht, new);
431     }
432     qht_map_unlock_buckets(map);
433     qemu_mutex_unlock(&ht->lock);
434 
435     return resize;
436 }
437 
438 static inline
439 void *qht_do_lookup(struct qht_bucket *head, qht_lookup_func_t func,
440                     const void *userp, uint32_t hash)
441 {
442     struct qht_bucket *b = head;
443     int i;
444 
445     do {
446         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
447             if (b->hashes[i] == hash) {
448                 /* The pointer is dereferenced before seqlock_read_retry,
449                  * so (unlike qht_insert__locked) we need to use
450                  * atomic_rcu_read here.
451                  */
452                 void *p = atomic_rcu_read(&b->pointers[i]);
453 
454                 if (likely(p) && likely(func(p, userp))) {
455                     return p;
456                 }
457             }
458         }
459         b = atomic_rcu_read(&b->next);
460     } while (b);
461 
462     return NULL;
463 }
464 
465 static __attribute__((noinline))
466 void *qht_lookup__slowpath(struct qht_bucket *b, qht_lookup_func_t func,
467                            const void *userp, uint32_t hash)
468 {
469     unsigned int version;
470     void *ret;
471 
472     do {
473         version = seqlock_read_begin(&b->sequence);
474         ret = qht_do_lookup(b, func, userp, hash);
475     } while (seqlock_read_retry(&b->sequence, version));
476     return ret;
477 }
478 
479 void *qht_lookup(struct qht *ht, qht_lookup_func_t func, const void *userp,
480                  uint32_t hash)
481 {
482     struct qht_bucket *b;
483     struct qht_map *map;
484     unsigned int version;
485     void *ret;
486 
487     map = atomic_rcu_read(&ht->map);
488     b = qht_map_to_bucket(map, hash);
489 
490     version = seqlock_read_begin(&b->sequence);
491     ret = qht_do_lookup(b, func, userp, hash);
492     if (likely(!seqlock_read_retry(&b->sequence, version))) {
493         return ret;
494     }
495     /*
496      * Removing the do/while from the fastpath gives a 4% perf. increase when
497      * running a 100%-lookup microbenchmark.
498      */
499     return qht_lookup__slowpath(b, func, userp, hash);
500 }
501 
502 /* call with head->lock held */
503 static bool qht_insert__locked(struct qht *ht, struct qht_map *map,
504                                struct qht_bucket *head, void *p, uint32_t hash,
505                                bool *needs_resize)
506 {
507     struct qht_bucket *b = head;
508     struct qht_bucket *prev = NULL;
509     struct qht_bucket *new = NULL;
510     int i;
511 
512     do {
513         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
514             if (b->pointers[i]) {
515                 if (unlikely(b->pointers[i] == p)) {
516                     return false;
517                 }
518             } else {
519                 goto found;
520             }
521         }
522         prev = b;
523         b = b->next;
524     } while (b);
525 
526     b = qemu_memalign(QHT_BUCKET_ALIGN, sizeof(*b));
527     memset(b, 0, sizeof(*b));
528     new = b;
529     i = 0;
530     atomic_inc(&map->n_added_buckets);
531     if (unlikely(qht_map_needs_resize(map)) && needs_resize) {
532         *needs_resize = true;
533     }
534 
535  found:
536     /* found an empty key: acquire the seqlock and write */
537     seqlock_write_begin(&head->sequence);
538     if (new) {
539         atomic_rcu_set(&prev->next, b);
540     }
541     b->hashes[i] = hash;
542     /* smp_wmb() implicit in seqlock_write_begin.  */
543     atomic_set(&b->pointers[i], p);
544     seqlock_write_end(&head->sequence);
545     return true;
546 }
547 
548 static __attribute__((noinline)) void qht_grow_maybe(struct qht *ht)
549 {
550     struct qht_map *map;
551 
552     /*
553      * If the lock is taken it probably means there's an ongoing resize,
554      * so bail out.
555      */
556     if (qemu_mutex_trylock(&ht->lock)) {
557         return;
558     }
559     map = ht->map;
560     /* another thread might have just performed the resize we were after */
561     if (qht_map_needs_resize(map)) {
562         struct qht_map *new = qht_map_create(map->n_buckets * 2);
563 
564         qht_map_lock_buckets(map);
565         qht_do_resize(ht, new);
566         qht_map_unlock_buckets(map);
567     }
568     qemu_mutex_unlock(&ht->lock);
569 }
570 
571 bool qht_insert(struct qht *ht, void *p, uint32_t hash)
572 {
573     struct qht_bucket *b;
574     struct qht_map *map;
575     bool needs_resize = false;
576     bool ret;
577 
578     /* NULL pointers are not supported */
579     qht_debug_assert(p);
580 
581     b = qht_bucket_lock__no_stale(ht, hash, &map);
582     ret = qht_insert__locked(ht, map, b, p, hash, &needs_resize);
583     qht_bucket_debug__locked(b);
584     qemu_spin_unlock(&b->lock);
585 
586     if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) {
587         qht_grow_maybe(ht);
588     }
589     return ret;
590 }
591 
592 static inline bool qht_entry_is_last(struct qht_bucket *b, int pos)
593 {
594     if (pos == QHT_BUCKET_ENTRIES - 1) {
595         if (b->next == NULL) {
596             return true;
597         }
598         return b->next->pointers[0] == NULL;
599     }
600     return b->pointers[pos + 1] == NULL;
601 }
602 
603 static void
604 qht_entry_move(struct qht_bucket *to, int i, struct qht_bucket *from, int j)
605 {
606     qht_debug_assert(!(to == from && i == j));
607     qht_debug_assert(to->pointers[i]);
608     qht_debug_assert(from->pointers[j]);
609 
610     to->hashes[i] = from->hashes[j];
611     atomic_set(&to->pointers[i], from->pointers[j]);
612 
613     from->hashes[j] = 0;
614     atomic_set(&from->pointers[j], NULL);
615 }
616 
617 /*
618  * Find the last valid entry in @head, and swap it with @orig[pos], which has
619  * just been invalidated.
620  */
621 static inline void qht_bucket_remove_entry(struct qht_bucket *orig, int pos)
622 {
623     struct qht_bucket *b = orig;
624     struct qht_bucket *prev = NULL;
625     int i;
626 
627     if (qht_entry_is_last(orig, pos)) {
628         orig->hashes[pos] = 0;
629         atomic_set(&orig->pointers[pos], NULL);
630         return;
631     }
632     do {
633         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
634             if (b->pointers[i]) {
635                 continue;
636             }
637             if (i > 0) {
638                 return qht_entry_move(orig, pos, b, i - 1);
639             }
640             qht_debug_assert(prev);
641             return qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
642         }
643         prev = b;
644         b = b->next;
645     } while (b);
646     /* no free entries other than orig[pos], so swap it with the last one */
647     qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
648 }
649 
650 /* call with b->lock held */
651 static inline
652 bool qht_remove__locked(struct qht_map *map, struct qht_bucket *head,
653                         const void *p, uint32_t hash)
654 {
655     struct qht_bucket *b = head;
656     int i;
657 
658     do {
659         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
660             void *q = b->pointers[i];
661 
662             if (unlikely(q == NULL)) {
663                 return false;
664             }
665             if (q == p) {
666                 qht_debug_assert(b->hashes[i] == hash);
667                 seqlock_write_begin(&head->sequence);
668                 qht_bucket_remove_entry(b, i);
669                 seqlock_write_end(&head->sequence);
670                 return true;
671             }
672         }
673         b = b->next;
674     } while (b);
675     return false;
676 }
677 
678 bool qht_remove(struct qht *ht, const void *p, uint32_t hash)
679 {
680     struct qht_bucket *b;
681     struct qht_map *map;
682     bool ret;
683 
684     /* NULL pointers are not supported */
685     qht_debug_assert(p);
686 
687     b = qht_bucket_lock__no_stale(ht, hash, &map);
688     ret = qht_remove__locked(map, b, p, hash);
689     qht_bucket_debug__locked(b);
690     qemu_spin_unlock(&b->lock);
691     return ret;
692 }
693 
694 static inline void qht_bucket_iter(struct qht *ht, struct qht_bucket *b,
695                                    qht_iter_func_t func, void *userp)
696 {
697     int i;
698 
699     do {
700         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
701             if (b->pointers[i] == NULL) {
702                 return;
703             }
704             func(ht, b->pointers[i], b->hashes[i], userp);
705         }
706         b = b->next;
707     } while (b);
708 }
709 
710 /* call with all of the map's locks held */
711 static inline void qht_map_iter__all_locked(struct qht *ht, struct qht_map *map,
712                                             qht_iter_func_t func, void *userp)
713 {
714     size_t i;
715 
716     for (i = 0; i < map->n_buckets; i++) {
717         qht_bucket_iter(ht, &map->buckets[i], func, userp);
718     }
719 }
720 
721 void qht_iter(struct qht *ht, qht_iter_func_t func, void *userp)
722 {
723     struct qht_map *map;
724 
725     map = atomic_rcu_read(&ht->map);
726     qht_map_lock_buckets(map);
727     /* Note: ht here is merely for carrying ht->mode; ht->map won't be read */
728     qht_map_iter__all_locked(ht, map, func, userp);
729     qht_map_unlock_buckets(map);
730 }
731 
732 static void qht_map_copy(struct qht *ht, void *p, uint32_t hash, void *userp)
733 {
734     struct qht_map *new = userp;
735     struct qht_bucket *b = qht_map_to_bucket(new, hash);
736 
737     /* no need to acquire b->lock because no thread has seen this map yet */
738     qht_insert__locked(ht, new, b, p, hash, NULL);
739 }
740 
741 /*
742  * Call with ht->lock and all bucket locks held.
743  *
744  * Creating the @new map here would add unnecessary delay while all the locks
745  * are held--holding up the bucket locks is particularly bad, since no writes
746  * can occur while these are held. Thus, we let callers create the new map,
747  * hopefully without the bucket locks held.
748  */
749 static void qht_do_resize(struct qht *ht, struct qht_map *new)
750 {
751     struct qht_map *old;
752 
753     old = ht->map;
754     g_assert_cmpuint(new->n_buckets, !=, old->n_buckets);
755 
756     qht_map_iter__all_locked(ht, old, qht_map_copy, new);
757     qht_map_debug__all_locked(new);
758 
759     atomic_rcu_set(&ht->map, new);
760     call_rcu(old, qht_map_destroy, rcu);
761 }
762 
763 bool qht_resize(struct qht *ht, size_t n_elems)
764 {
765     size_t n_buckets = qht_elems_to_buckets(n_elems);
766     size_t ret = false;
767 
768     qemu_mutex_lock(&ht->lock);
769     if (n_buckets != ht->map->n_buckets) {
770         struct qht_map *new;
771         struct qht_map *old = ht->map;
772 
773         new = qht_map_create(n_buckets);
774         qht_map_lock_buckets(old);
775         qht_do_resize(ht, new);
776         qht_map_unlock_buckets(old);
777         ret = true;
778     }
779     qemu_mutex_unlock(&ht->lock);
780 
781     return ret;
782 }
783 
784 /* pass @stats to qht_statistics_destroy() when done */
785 void qht_statistics_init(struct qht *ht, struct qht_stats *stats)
786 {
787     struct qht_map *map;
788     int i;
789 
790     map = atomic_rcu_read(&ht->map);
791 
792     stats->used_head_buckets = 0;
793     stats->entries = 0;
794     qdist_init(&stats->chain);
795     qdist_init(&stats->occupancy);
796     /* bail out if the qht has not yet been initialized */
797     if (unlikely(map == NULL)) {
798         stats->head_buckets = 0;
799         return;
800     }
801     stats->head_buckets = map->n_buckets;
802 
803     for (i = 0; i < map->n_buckets; i++) {
804         struct qht_bucket *head = &map->buckets[i];
805         struct qht_bucket *b;
806         unsigned int version;
807         size_t buckets;
808         size_t entries;
809         int j;
810 
811         do {
812             version = seqlock_read_begin(&head->sequence);
813             buckets = 0;
814             entries = 0;
815             b = head;
816             do {
817                 for (j = 0; j < QHT_BUCKET_ENTRIES; j++) {
818                     if (atomic_read(&b->pointers[j]) == NULL) {
819                         break;
820                     }
821                     entries++;
822                 }
823                 buckets++;
824                 b = atomic_rcu_read(&b->next);
825             } while (b);
826         } while (seqlock_read_retry(&head->sequence, version));
827 
828         if (entries) {
829             qdist_inc(&stats->chain, buckets);
830             qdist_inc(&stats->occupancy,
831                       (double)entries / QHT_BUCKET_ENTRIES / buckets);
832             stats->used_head_buckets++;
833             stats->entries += entries;
834         } else {
835             qdist_inc(&stats->occupancy, 0);
836         }
837     }
838 }
839 
840 void qht_statistics_destroy(struct qht_stats *stats)
841 {
842     qdist_destroy(&stats->occupancy);
843     qdist_destroy(&stats->chain);
844 }
845