xref: /openbmc/qemu/tests/unit/rcutorture.c (revision 7c08eefc)
1 /*
2  * rcutorture.c: simple user-level performance/stress test of RCU.
3  *
4  * Usage:
5  *     ./rcu <nreaders> rperf [ <seconds> ]
6  *         Run a read-side performance test with the specified
7  *         number of readers for <seconds> seconds.
8  *     ./rcu <nupdaters> uperf [ <seconds> ]
9  *         Run an update-side performance test with the specified
10  *         number of updaters and specified duration.
11  *     ./rcu <nreaders> perf [ <seconds> ]
12  *         Run a combined read/update performance test with the specified
13  *         number of readers and one updater and specified duration.
14  *
15  * The above tests produce output as follows:
16  *
17  * n_reads: 46008000  n_updates: 146026  nreaders: 2  nupdaters: 1 duration: 1
18  * ns/read: 43.4707  ns/update: 6848.1
19  *
20  * The first line lists the total number of RCU reads and updates executed
21  * during the test, the number of reader threads, the number of updater
22  * threads, and the duration of the test in seconds.  The second line
23  * lists the average duration of each type of operation in nanoseconds,
24  * or "nan" if the corresponding type of operation was not performed.
25  *
26  *     ./rcu <nreaders> stress [ <seconds> ]
27  *         Run a stress test with the specified number of readers and
28  *         one updater.
29  *
30  * This test produces output as follows:
31  *
32  * n_reads: 114633217  n_updates: 3903415  n_mberror: 0
33  * rcu_stress_count: 114618391 14826 0 0 0 0 0 0 0 0 0
34  *
35  * The first line lists the number of RCU read and update operations
36  * executed, followed by the number of memory-ordering violations
37  * (which will be zero in a correct RCU implementation).  The second
38  * line lists the number of readers observing progressively more stale
39  * data.  A correct RCU implementation will have all but the first two
40  * numbers non-zero.
41  *
42  * This program is free software; you can redistribute it and/or modify
43  * it under the terms of the GNU General Public License as published by
44  * the Free Software Foundation; either version 2 of the License, or
45  * (at your option) any later version.
46  *
47  * This program is distributed in the hope that it will be useful,
48  * but WITHOUT ANY WARRANTY; without even the implied warranty of
49  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
50  * GNU General Public License for more details.
51  *
52  * You should have received a copy of the GNU General Public License
53  * along with this program. If not, see <https://www.gnu.org/licenses/>.
54  *
55  * Copyright (c) 2008 Paul E. McKenney, IBM Corporation.
56  */
57 
58 /*
59  * Test variables.
60  */
61 
62 #include "qemu/osdep.h"
63 #include "qemu/atomic.h"
64 #include "qemu/rcu.h"
65 #include "qemu/thread.h"
66 
67 int nthreadsrunning;
68 
69 #define GOFLAG_INIT 0
70 #define GOFLAG_RUN  1
71 #define GOFLAG_STOP 2
72 
73 static volatile int goflag = GOFLAG_INIT;
74 
75 #define RCU_READ_RUN 1000
76 
77 #define NR_THREADS 100
78 static QemuThread threads[NR_THREADS];
79 static struct rcu_reader_data *data[NR_THREADS];
80 static int n_threads;
81 
82 /*
83  * Statistical counts
84  *
85  * These are the sum of local counters at the end of a run.
86  * Updates are protected by a mutex.
87  */
88 static QemuMutex counts_mutex;
89 long long n_reads = 0LL;
90 long n_updates = 0L;
91 
92 static void create_thread(void *(*func)(void *))
93 {
94     if (n_threads >= NR_THREADS) {
95         fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS);
96         exit(-1);
97     }
98     qemu_thread_create(&threads[n_threads], "test", func, &data[n_threads],
99                        QEMU_THREAD_JOINABLE);
100     n_threads++;
101 }
102 
103 static void wait_all_threads(void)
104 {
105     int i;
106 
107     for (i = 0; i < n_threads; i++) {
108         qemu_thread_join(&threads[i]);
109     }
110     n_threads = 0;
111 }
112 
113 /*
114  * Performance test.
115  */
116 
117 static void *rcu_read_perf_test(void *arg)
118 {
119     int i;
120     long long n_reads_local = 0;
121 
122     rcu_register_thread();
123 
124     *(struct rcu_reader_data **)arg = get_ptr_rcu_reader();
125     qatomic_inc(&nthreadsrunning);
126     while (goflag == GOFLAG_INIT) {
127         g_usleep(1000);
128     }
129     while (goflag == GOFLAG_RUN) {
130         for (i = 0; i < RCU_READ_RUN; i++) {
131             rcu_read_lock();
132             rcu_read_unlock();
133         }
134         n_reads_local += RCU_READ_RUN;
135     }
136     qemu_mutex_lock(&counts_mutex);
137     n_reads += n_reads_local;
138     qemu_mutex_unlock(&counts_mutex);
139 
140     rcu_unregister_thread();
141     return NULL;
142 }
143 
144 static void *rcu_update_perf_test(void *arg)
145 {
146     long long n_updates_local = 0;
147 
148     rcu_register_thread();
149 
150     *(struct rcu_reader_data **)arg = get_ptr_rcu_reader();
151     qatomic_inc(&nthreadsrunning);
152     while (goflag == GOFLAG_INIT) {
153         g_usleep(1000);
154     }
155     while (goflag == GOFLAG_RUN) {
156         synchronize_rcu();
157         n_updates_local++;
158     }
159     qemu_mutex_lock(&counts_mutex);
160     n_updates += n_updates_local;
161     qemu_mutex_unlock(&counts_mutex);
162 
163     rcu_unregister_thread();
164     return NULL;
165 }
166 
167 static void perftestinit(void)
168 {
169     nthreadsrunning = 0;
170 }
171 
172 static void perftestrun(int nthreads, int duration, int nreaders, int nupdaters)
173 {
174     while (qatomic_read(&nthreadsrunning) < nthreads) {
175         g_usleep(1000);
176     }
177     goflag = GOFLAG_RUN;
178     g_usleep(duration * G_USEC_PER_SEC);
179     goflag = GOFLAG_STOP;
180     wait_all_threads();
181     printf("n_reads: %lld  n_updates: %ld  nreaders: %d  nupdaters: %d duration: %d\n",
182            n_reads, n_updates, nreaders, nupdaters, duration);
183     printf("ns/read: %g  ns/update: %g\n",
184            ((duration * 1000*1000*1000.*(double)nreaders) /
185         (double)n_reads),
186            ((duration * 1000*1000*1000.*(double)nupdaters) /
187         (double)n_updates));
188     exit(0);
189 }
190 
191 static void perftest(int nreaders, int duration)
192 {
193     int i;
194 
195     perftestinit();
196     for (i = 0; i < nreaders; i++) {
197         create_thread(rcu_read_perf_test);
198     }
199     create_thread(rcu_update_perf_test);
200     perftestrun(i + 1, duration, nreaders, 1);
201 }
202 
203 static void rperftest(int nreaders, int duration)
204 {
205     int i;
206 
207     perftestinit();
208     for (i = 0; i < nreaders; i++) {
209         create_thread(rcu_read_perf_test);
210     }
211     perftestrun(i, duration, nreaders, 0);
212 }
213 
214 static void uperftest(int nupdaters, int duration)
215 {
216     int i;
217 
218     perftestinit();
219     for (i = 0; i < nupdaters; i++) {
220         create_thread(rcu_update_perf_test);
221     }
222     perftestrun(i, duration, 0, nupdaters);
223 }
224 
225 /*
226  * Stress test.
227  */
228 
229 #define RCU_STRESS_PIPE_LEN 10
230 
231 struct rcu_stress {
232     int age;  /* how many update cycles while not rcu_stress_current */
233     int mbtest;
234 };
235 
236 struct rcu_stress rcu_stress_array[RCU_STRESS_PIPE_LEN] = { { 0 } };
237 struct rcu_stress *rcu_stress_current;
238 int n_mberror;
239 
240 /* Updates protected by counts_mutex */
241 long long rcu_stress_count[RCU_STRESS_PIPE_LEN + 1];
242 
243 
244 static void *rcu_read_stress_test(void *arg)
245 {
246     int i;
247     struct rcu_stress *p;
248     int pc;
249     long long n_reads_local = 0;
250     long long rcu_stress_local[RCU_STRESS_PIPE_LEN + 1] = { 0 };
251     volatile int garbage = 0;
252 
253     rcu_register_thread();
254 
255     *(struct rcu_reader_data **)arg = get_ptr_rcu_reader();
256     while (goflag == GOFLAG_INIT) {
257         g_usleep(1000);
258     }
259     while (goflag == GOFLAG_RUN) {
260         rcu_read_lock();
261         p = qatomic_rcu_read(&rcu_stress_current);
262         if (qatomic_read(&p->mbtest) == 0) {
263             n_mberror++;
264         }
265         rcu_read_lock();
266         for (i = 0; i < 100; i++) {
267             garbage++;
268         }
269         rcu_read_unlock();
270         pc = qatomic_read(&p->age);
271         rcu_read_unlock();
272         if ((pc > RCU_STRESS_PIPE_LEN) || (pc < 0)) {
273             pc = RCU_STRESS_PIPE_LEN;
274         }
275         rcu_stress_local[pc]++;
276         n_reads_local++;
277     }
278     qemu_mutex_lock(&counts_mutex);
279     n_reads += n_reads_local;
280     for (i = 0; i <= RCU_STRESS_PIPE_LEN; i++) {
281         rcu_stress_count[i] += rcu_stress_local[i];
282     }
283     qemu_mutex_unlock(&counts_mutex);
284 
285     rcu_unregister_thread();
286     return NULL;
287 }
288 
289 /*
290  * Stress Test Updater
291  *
292  * The updater cycles around updating rcu_stress_current to point at
293  * one of the rcu_stress_array_entries and resets it's age. It
294  * then increments the age of all the other entries. The age
295  * will be read under an rcu_read_lock() and distribution of values
296  * calculated. The final result gives an indication of how many
297  * previously current rcu_stress entries are in flight until the RCU
298  * cycle complete.
299  */
300 static void *rcu_update_stress_test(void *arg)
301 {
302     int i, rcu_stress_idx = 0;
303     struct rcu_stress *cp = qatomic_read(&rcu_stress_current);
304 
305     rcu_register_thread();
306     *(struct rcu_reader_data **)arg = get_ptr_rcu_reader();
307 
308     while (goflag == GOFLAG_INIT) {
309         g_usleep(1000);
310     }
311 
312     while (goflag == GOFLAG_RUN) {
313         struct rcu_stress *p;
314         rcu_stress_idx++;
315         if (rcu_stress_idx >= RCU_STRESS_PIPE_LEN) {
316             rcu_stress_idx = 0;
317         }
318         p = &rcu_stress_array[rcu_stress_idx];
319         /* catching up with ourselves would be a bug */
320         assert(p != cp);
321         qatomic_set(&p->mbtest, 0);
322         smp_mb();
323         qatomic_set(&p->age, 0);
324         qatomic_set(&p->mbtest, 1);
325         qatomic_rcu_set(&rcu_stress_current, p);
326         cp = p;
327         /*
328          * New RCU structure is now live, update pipe counts on old
329          * ones.
330          */
331         for (i = 0; i < RCU_STRESS_PIPE_LEN; i++) {
332             if (i != rcu_stress_idx) {
333                 qatomic_set(&rcu_stress_array[i].age,
334                            rcu_stress_array[i].age + 1);
335             }
336         }
337         synchronize_rcu();
338         n_updates++;
339     }
340 
341     rcu_unregister_thread();
342     return NULL;
343 }
344 
345 static void *rcu_fake_update_stress_test(void *arg)
346 {
347     rcu_register_thread();
348 
349     *(struct rcu_reader_data **)arg = get_ptr_rcu_reader();
350     while (goflag == GOFLAG_INIT) {
351         g_usleep(1000);
352     }
353     while (goflag == GOFLAG_RUN) {
354         synchronize_rcu();
355         g_usleep(1000);
356     }
357 
358     rcu_unregister_thread();
359     return NULL;
360 }
361 
362 static void stresstest(int nreaders, int duration)
363 {
364     int i;
365 
366     rcu_stress_current = &rcu_stress_array[0];
367     rcu_stress_current->age = 0;
368     rcu_stress_current->mbtest = 1;
369     for (i = 0; i < nreaders; i++) {
370         create_thread(rcu_read_stress_test);
371     }
372     create_thread(rcu_update_stress_test);
373     for (i = 0; i < 5; i++) {
374         create_thread(rcu_fake_update_stress_test);
375     }
376     goflag = GOFLAG_RUN;
377     g_usleep(duration * G_USEC_PER_SEC);
378     goflag = GOFLAG_STOP;
379     wait_all_threads();
380     printf("n_reads: %lld  n_updates: %ld  n_mberror: %d\n",
381            n_reads, n_updates, n_mberror);
382     printf("rcu_stress_count:");
383     for (i = 0; i <= RCU_STRESS_PIPE_LEN; i++) {
384         printf(" %lld", rcu_stress_count[i]);
385     }
386     printf("\n");
387     exit(0);
388 }
389 
390 /* GTest interface */
391 
392 static void gtest_stress(int nreaders, int duration)
393 {
394     int i;
395 
396     rcu_stress_current = &rcu_stress_array[0];
397     rcu_stress_current->age = 0;
398     rcu_stress_current->mbtest = 1;
399     for (i = 0; i < nreaders; i++) {
400         create_thread(rcu_read_stress_test);
401     }
402     create_thread(rcu_update_stress_test);
403     for (i = 0; i < 5; i++) {
404         create_thread(rcu_fake_update_stress_test);
405     }
406     goflag = GOFLAG_RUN;
407     g_usleep(duration * G_USEC_PER_SEC);
408     goflag = GOFLAG_STOP;
409     wait_all_threads();
410     g_assert_cmpint(n_mberror, ==, 0);
411     for (i = 2; i <= RCU_STRESS_PIPE_LEN; i++) {
412         g_assert_cmpint(rcu_stress_count[i], ==, 0);
413     }
414 }
415 
416 static void gtest_stress_1_1(void)
417 {
418     gtest_stress(1, 1);
419 }
420 
421 static void gtest_stress_10_1(void)
422 {
423     gtest_stress(10, 1);
424 }
425 
426 static void gtest_stress_1_5(void)
427 {
428     gtest_stress(1, 5);
429 }
430 
431 static void gtest_stress_10_5(void)
432 {
433     gtest_stress(10, 5);
434 }
435 
436 /*
437  * Mainprogram.
438  */
439 
440 static void usage(int argc, char *argv[])
441 {
442     fprintf(stderr, "Usage: %s [nreaders [ [r|u]perf | stress [duration]]\n",
443             argv[0]);
444     exit(-1);
445 }
446 
447 int main(int argc, char *argv[])
448 {
449     int nreaders = 1;
450     int duration = 1;
451 
452     qemu_mutex_init(&counts_mutex);
453     if (argc >= 2 && argv[1][0] == '-') {
454         g_test_init(&argc, &argv, NULL);
455         if (g_test_quick()) {
456             g_test_add_func("/rcu/torture/1reader", gtest_stress_1_1);
457             g_test_add_func("/rcu/torture/10readers", gtest_stress_10_1);
458         } else {
459             g_test_add_func("/rcu/torture/1reader", gtest_stress_1_5);
460             g_test_add_func("/rcu/torture/10readers", gtest_stress_10_5);
461         }
462         return g_test_run();
463     }
464 
465     if (argc >= 2) {
466         nreaders = strtoul(argv[1], NULL, 0);
467     }
468     if (argc > 3) {
469         duration = strtoul(argv[3], NULL, 0);
470     }
471     if (argc < 3 || strcmp(argv[2], "stress") == 0) {
472         stresstest(nreaders, duration);
473     } else if (strcmp(argv[2], "rperf") == 0) {
474         rperftest(nreaders, duration);
475     } else if (strcmp(argv[2], "uperf") == 0) {
476         uperftest(nreaders, duration);
477     } else if (strcmp(argv[2], "perf") == 0) {
478         perftest(nreaders, duration);
479     }
480     usage(argc, argv);
481     return 0;
482 }
483