1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2018 Davidlohr Bueso. 4 * 5 * Benchmark the various operations allowed for epoll_ctl(2). 6 * The idea is to concurrently stress a single epoll instance 7 */ 8 #ifdef HAVE_EVENTFD_SUPPORT 9 /* For the CLR_() macros */ 10 #include <string.h> 11 #include <pthread.h> 12 13 #include <errno.h> 14 #include <inttypes.h> 15 #include <signal.h> 16 #include <stdlib.h> 17 #include <unistd.h> 18 #include <linux/compiler.h> 19 #include <linux/kernel.h> 20 #include <sys/time.h> 21 #include <sys/resource.h> 22 #include <sys/epoll.h> 23 #include <sys/eventfd.h> 24 #include <perf/cpumap.h> 25 26 #include "../util/stat.h" 27 #include <subcmd/parse-options.h> 28 #include "bench.h" 29 30 #include <err.h> 31 32 #define printinfo(fmt, arg...) \ 33 do { if (__verbose) printf(fmt, ## arg); } while (0) 34 35 static unsigned int nthreads = 0; 36 static unsigned int nsecs = 8; 37 static bool done, __verbose, randomize; 38 39 /* 40 * epoll related shared variables. 41 */ 42 43 /* Maximum number of nesting allowed inside epoll sets */ 44 #define EPOLL_MAXNESTS 4 45 46 enum { 47 OP_EPOLL_ADD, 48 OP_EPOLL_MOD, 49 OP_EPOLL_DEL, 50 EPOLL_NR_OPS, 51 }; 52 53 static int epollfd; 54 static int *epollfdp; 55 static bool noaffinity; 56 static unsigned int nested = 0; 57 58 /* amount of fds to monitor, per thread */ 59 static unsigned int nfds = 64; 60 61 static pthread_mutex_t thread_lock; 62 static unsigned int threads_starting; 63 static struct stats all_stats[EPOLL_NR_OPS]; 64 static pthread_cond_t thread_parent, thread_worker; 65 66 struct worker { 67 int tid; 68 pthread_t thread; 69 unsigned long ops[EPOLL_NR_OPS]; 70 int *fdmap; 71 }; 72 73 static const struct option options[] = { 74 OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"), 75 OPT_UINTEGER('r', "runtime", &nsecs, "Specify runtime (in seconds)"), 76 OPT_UINTEGER('f', "nfds", &nfds, "Specify amount of file descriptors to monitor for each thread"), 77 OPT_BOOLEAN( 'n', "noaffinity", &noaffinity, "Disables CPU affinity"), 78 OPT_UINTEGER( 'N', "nested", &nested, "Nesting level epoll hierarchy (default is 0, no nesting)"), 79 OPT_BOOLEAN( 'R', "randomize", &randomize, "Perform random operations on random fds"), 80 OPT_BOOLEAN( 'v', "verbose", &__verbose, "Verbose mode"), 81 OPT_END() 82 }; 83 84 static const char * const bench_epoll_ctl_usage[] = { 85 "perf bench epoll ctl <options>", 86 NULL 87 }; 88 89 static void toggle_done(int sig __maybe_unused, 90 siginfo_t *info __maybe_unused, 91 void *uc __maybe_unused) 92 { 93 /* inform all threads that we're done for the day */ 94 done = true; 95 gettimeofday(&bench__end, NULL); 96 timersub(&bench__end, &bench__start, &bench__runtime); 97 } 98 99 static void nest_epollfd(void) 100 { 101 unsigned int i; 102 struct epoll_event ev; 103 104 if (nested > EPOLL_MAXNESTS) 105 nested = EPOLL_MAXNESTS; 106 printinfo("Nesting level(s): %d\n", nested); 107 108 epollfdp = calloc(nested, sizeof(int)); 109 if (!epollfdp) 110 err(EXIT_FAILURE, "calloc"); 111 112 for (i = 0; i < nested; i++) { 113 epollfdp[i] = epoll_create(1); 114 if (epollfd < 0) 115 err(EXIT_FAILURE, "epoll_create"); 116 } 117 118 ev.events = EPOLLHUP; /* anything */ 119 ev.data.u64 = i; /* any number */ 120 121 for (i = nested - 1; i; i--) { 122 if (epoll_ctl(epollfdp[i - 1], EPOLL_CTL_ADD, 123 epollfdp[i], &ev) < 0) 124 err(EXIT_FAILURE, "epoll_ctl"); 125 } 126 127 if (epoll_ctl(epollfd, EPOLL_CTL_ADD, *epollfdp, &ev) < 0) 128 err(EXIT_FAILURE, "epoll_ctl"); 129 } 130 131 static inline void do_epoll_op(struct worker *w, int op, int fd) 132 { 133 int error; 134 struct epoll_event ev; 135 136 ev.events = EPOLLIN; 137 ev.data.u64 = fd; 138 139 switch (op) { 140 case OP_EPOLL_ADD: 141 error = epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &ev); 142 break; 143 case OP_EPOLL_MOD: 144 ev.events = EPOLLOUT; 145 error = epoll_ctl(epollfd, EPOLL_CTL_MOD, fd, &ev); 146 break; 147 case OP_EPOLL_DEL: 148 error = epoll_ctl(epollfd, EPOLL_CTL_DEL, fd, NULL); 149 break; 150 default: 151 error = 1; 152 break; 153 } 154 155 if (!error) 156 w->ops[op]++; 157 } 158 159 static inline void do_random_epoll_op(struct worker *w) 160 { 161 unsigned long rnd1 = random(), rnd2 = random(); 162 int op, fd; 163 164 fd = w->fdmap[rnd1 % nfds]; 165 op = rnd2 % EPOLL_NR_OPS; 166 167 do_epoll_op(w, op, fd); 168 } 169 170 static void *workerfn(void *arg) 171 { 172 unsigned int i; 173 struct worker *w = (struct worker *) arg; 174 struct timespec ts = { .tv_sec = 0, 175 .tv_nsec = 250 }; 176 177 pthread_mutex_lock(&thread_lock); 178 threads_starting--; 179 if (!threads_starting) 180 pthread_cond_signal(&thread_parent); 181 pthread_cond_wait(&thread_worker, &thread_lock); 182 pthread_mutex_unlock(&thread_lock); 183 184 /* Let 'em loose */ 185 do { 186 /* random */ 187 if (randomize) { 188 do_random_epoll_op(w); 189 } else { 190 for (i = 0; i < nfds; i++) { 191 do_epoll_op(w, OP_EPOLL_ADD, w->fdmap[i]); 192 do_epoll_op(w, OP_EPOLL_MOD, w->fdmap[i]); 193 do_epoll_op(w, OP_EPOLL_DEL, w->fdmap[i]); 194 } 195 } 196 197 nanosleep(&ts, NULL); 198 } while (!done); 199 200 return NULL; 201 } 202 203 static void init_fdmaps(struct worker *w, int pct) 204 { 205 unsigned int i; 206 int inc; 207 struct epoll_event ev; 208 209 if (!pct) 210 return; 211 212 inc = 100/pct; 213 for (i = 0; i < nfds; i+=inc) { 214 ev.data.fd = w->fdmap[i]; 215 ev.events = EPOLLIN; 216 217 if (epoll_ctl(epollfd, EPOLL_CTL_ADD, w->fdmap[i], &ev) < 0) 218 err(EXIT_FAILURE, "epoll_ct"); 219 } 220 } 221 222 static int do_threads(struct worker *worker, struct perf_cpu_map *cpu) 223 { 224 pthread_attr_t thread_attr, *attrp = NULL; 225 cpu_set_t cpuset; 226 unsigned int i, j; 227 int ret = 0; 228 229 if (!noaffinity) 230 pthread_attr_init(&thread_attr); 231 232 for (i = 0; i < nthreads; i++) { 233 struct worker *w = &worker[i]; 234 235 w->tid = i; 236 w->fdmap = calloc(nfds, sizeof(int)); 237 if (!w->fdmap) 238 return 1; 239 240 for (j = 0; j < nfds; j++) { 241 w->fdmap[j] = eventfd(0, EFD_NONBLOCK); 242 if (w->fdmap[j] < 0) 243 err(EXIT_FAILURE, "eventfd"); 244 } 245 246 /* 247 * Lets add 50% of the fdmap to the epoll instance, and 248 * do it before any threads are started; otherwise there is 249 * an initial bias of the call failing (mod and del ops). 250 */ 251 if (randomize) 252 init_fdmaps(w, 50); 253 254 if (!noaffinity) { 255 CPU_ZERO(&cpuset); 256 CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset); 257 258 ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset); 259 if (ret) 260 err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); 261 262 attrp = &thread_attr; 263 } 264 265 ret = pthread_create(&w->thread, attrp, workerfn, 266 (void *)(struct worker *) w); 267 if (ret) 268 err(EXIT_FAILURE, "pthread_create"); 269 } 270 271 if (!noaffinity) 272 pthread_attr_destroy(&thread_attr); 273 274 return ret; 275 } 276 277 static void print_summary(void) 278 { 279 int i; 280 unsigned long avg[EPOLL_NR_OPS]; 281 double stddev[EPOLL_NR_OPS]; 282 283 for (i = 0; i < EPOLL_NR_OPS; i++) { 284 avg[i] = avg_stats(&all_stats[i]); 285 stddev[i] = stddev_stats(&all_stats[i]); 286 } 287 288 printf("\nAveraged %ld ADD operations (+- %.2f%%)\n", 289 avg[OP_EPOLL_ADD], rel_stddev_stats(stddev[OP_EPOLL_ADD], 290 avg[OP_EPOLL_ADD])); 291 printf("Averaged %ld MOD operations (+- %.2f%%)\n", 292 avg[OP_EPOLL_MOD], rel_stddev_stats(stddev[OP_EPOLL_MOD], 293 avg[OP_EPOLL_MOD])); 294 printf("Averaged %ld DEL operations (+- %.2f%%)\n", 295 avg[OP_EPOLL_DEL], rel_stddev_stats(stddev[OP_EPOLL_DEL], 296 avg[OP_EPOLL_DEL])); 297 } 298 299 int bench_epoll_ctl(int argc, const char **argv) 300 { 301 int j, ret = 0; 302 struct sigaction act; 303 struct worker *worker = NULL; 304 struct perf_cpu_map *cpu; 305 struct rlimit rl, prevrl; 306 unsigned int i; 307 308 argc = parse_options(argc, argv, options, bench_epoll_ctl_usage, 0); 309 if (argc) { 310 usage_with_options(bench_epoll_ctl_usage, options); 311 exit(EXIT_FAILURE); 312 } 313 314 memset(&act, 0, sizeof(act)); 315 sigfillset(&act.sa_mask); 316 act.sa_sigaction = toggle_done; 317 sigaction(SIGINT, &act, NULL); 318 319 cpu = perf_cpu_map__new(NULL); 320 if (!cpu) 321 goto errmem; 322 323 /* a single, main epoll instance */ 324 epollfd = epoll_create(1); 325 if (epollfd < 0) 326 err(EXIT_FAILURE, "epoll_create"); 327 328 /* 329 * Deal with nested epolls, if any. 330 */ 331 if (nested) 332 nest_epollfd(); 333 334 /* default to the number of CPUs */ 335 if (!nthreads) 336 nthreads = perf_cpu_map__nr(cpu); 337 338 worker = calloc(nthreads, sizeof(*worker)); 339 if (!worker) 340 goto errmem; 341 342 if (getrlimit(RLIMIT_NOFILE, &prevrl)) 343 err(EXIT_FAILURE, "getrlimit"); 344 rl.rlim_cur = rl.rlim_max = nfds * nthreads * 2 + 50; 345 printinfo("Setting RLIMIT_NOFILE rlimit from %" PRIu64 " to: %" PRIu64 "\n", 346 (uint64_t)prevrl.rlim_max, (uint64_t)rl.rlim_max); 347 if (setrlimit(RLIMIT_NOFILE, &rl) < 0) 348 err(EXIT_FAILURE, "setrlimit"); 349 350 printf("Run summary [PID %d]: %d threads doing epoll_ctl ops " 351 "%d file-descriptors for %d secs.\n\n", 352 getpid(), nthreads, nfds, nsecs); 353 354 for (i = 0; i < EPOLL_NR_OPS; i++) 355 init_stats(&all_stats[i]); 356 357 pthread_mutex_init(&thread_lock, NULL); 358 pthread_cond_init(&thread_parent, NULL); 359 pthread_cond_init(&thread_worker, NULL); 360 361 threads_starting = nthreads; 362 363 gettimeofday(&bench__start, NULL); 364 365 do_threads(worker, cpu); 366 367 pthread_mutex_lock(&thread_lock); 368 while (threads_starting) 369 pthread_cond_wait(&thread_parent, &thread_lock); 370 pthread_cond_broadcast(&thread_worker); 371 pthread_mutex_unlock(&thread_lock); 372 373 sleep(nsecs); 374 toggle_done(0, NULL, NULL); 375 printinfo("main thread: toggling done\n"); 376 377 for (i = 0; i < nthreads; i++) { 378 ret = pthread_join(worker[i].thread, NULL); 379 if (ret) 380 err(EXIT_FAILURE, "pthread_join"); 381 } 382 383 /* cleanup & report results */ 384 pthread_cond_destroy(&thread_parent); 385 pthread_cond_destroy(&thread_worker); 386 pthread_mutex_destroy(&thread_lock); 387 388 for (i = 0; i < nthreads; i++) { 389 unsigned long t[EPOLL_NR_OPS]; 390 391 for (j = 0; j < EPOLL_NR_OPS; j++) { 392 t[j] = worker[i].ops[j]; 393 update_stats(&all_stats[j], t[j]); 394 } 395 396 if (nfds == 1) 397 printf("[thread %2d] fdmap: %p [ add: %04ld; mod: %04ld; del: %04lds ops ]\n", 398 worker[i].tid, &worker[i].fdmap[0], 399 t[OP_EPOLL_ADD], t[OP_EPOLL_MOD], t[OP_EPOLL_DEL]); 400 else 401 printf("[thread %2d] fdmap: %p ... %p [ add: %04ld ops; mod: %04ld ops; del: %04ld ops ]\n", 402 worker[i].tid, &worker[i].fdmap[0], 403 &worker[i].fdmap[nfds-1], 404 t[OP_EPOLL_ADD], t[OP_EPOLL_MOD], t[OP_EPOLL_DEL]); 405 } 406 407 print_summary(); 408 409 close(epollfd); 410 return ret; 411 errmem: 412 err(EXIT_FAILURE, "calloc"); 413 } 414 #endif // HAVE_EVENTFD_SUPPORT 415