1 // SPDX-License-Identifier: GPL-2.0-only 2 /* bpf/cpumap.c 3 * 4 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. 5 */ 6 7 /** 8 * DOC: cpu map 9 * The 'cpumap' is primarily used as a backend map for XDP BPF helper 10 * call bpf_redirect_map() and XDP_REDIRECT action, like 'devmap'. 11 * 12 * Unlike devmap which redirects XDP frames out to another NIC device, 13 * this map type redirects raw XDP frames to another CPU. The remote 14 * CPU will do SKB-allocation and call the normal network stack. 15 */ 16 /* 17 * This is a scalability and isolation mechanism, that allow 18 * separating the early driver network XDP layer, from the rest of the 19 * netstack, and assigning dedicated CPUs for this stage. This 20 * basically allows for 10G wirespeed pre-filtering via bpf. 21 */ 22 #include <linux/bitops.h> 23 #include <linux/bpf.h> 24 #include <linux/filter.h> 25 #include <linux/ptr_ring.h> 26 #include <net/xdp.h> 27 28 #include <linux/sched.h> 29 #include <linux/workqueue.h> 30 #include <linux/kthread.h> 31 #include <linux/completion.h> 32 #include <trace/events/xdp.h> 33 #include <linux/btf_ids.h> 34 35 #include <linux/netdevice.h> /* netif_receive_skb_list */ 36 #include <linux/etherdevice.h> /* eth_type_trans */ 37 38 /* General idea: XDP packets getting XDP redirected to another CPU, 39 * will maximum be stored/queued for one driver ->poll() call. It is 40 * guaranteed that queueing the frame and the flush operation happen on 41 * same CPU. Thus, cpu_map_flush operation can deduct via this_cpu_ptr() 42 * which queue in bpf_cpu_map_entry contains packets. 43 */ 44 45 #define CPU_MAP_BULK_SIZE 8 /* 8 == one cacheline on 64-bit archs */ 46 struct bpf_cpu_map_entry; 47 struct bpf_cpu_map; 48 49 struct xdp_bulk_queue { 50 void *q[CPU_MAP_BULK_SIZE]; 51 struct list_head flush_node; 52 struct bpf_cpu_map_entry *obj; 53 unsigned int count; 54 }; 55 56 /* Struct for every remote "destination" CPU in map */ 57 struct bpf_cpu_map_entry { 58 u32 cpu; /* kthread CPU and map index */ 59 int map_id; /* Back reference to map */ 60 61 /* XDP can run multiple RX-ring queues, need __percpu enqueue store */ 62 struct xdp_bulk_queue __percpu *bulkq; 63 64 /* Queue with potential multi-producers, and single-consumer kthread */ 65 struct ptr_ring *queue; 66 struct task_struct *kthread; 67 68 struct bpf_cpumap_val value; 69 struct bpf_prog *prog; 70 71 struct completion kthread_running; 72 struct rcu_work free_work; 73 }; 74 75 struct bpf_cpu_map { 76 struct bpf_map map; 77 /* Below members specific for map type */ 78 struct bpf_cpu_map_entry __rcu **cpu_map; 79 }; 80 81 static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list); 82 83 static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) 84 { 85 u32 value_size = attr->value_size; 86 struct bpf_cpu_map *cmap; 87 88 /* check sanity of attributes */ 89 if (attr->max_entries == 0 || attr->key_size != 4 || 90 (value_size != offsetofend(struct bpf_cpumap_val, qsize) && 91 value_size != offsetofend(struct bpf_cpumap_val, bpf_prog.fd)) || 92 attr->map_flags & ~BPF_F_NUMA_NODE) 93 return ERR_PTR(-EINVAL); 94 95 /* Pre-limit array size based on NR_CPUS, not final CPU check */ 96 if (attr->max_entries > NR_CPUS) 97 return ERR_PTR(-E2BIG); 98 99 cmap = bpf_map_area_alloc(sizeof(*cmap), NUMA_NO_NODE); 100 if (!cmap) 101 return ERR_PTR(-ENOMEM); 102 103 bpf_map_init_from_attr(&cmap->map, attr); 104 105 /* Alloc array for possible remote "destination" CPUs */ 106 cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries * 107 sizeof(struct bpf_cpu_map_entry *), 108 cmap->map.numa_node); 109 if (!cmap->cpu_map) { 110 bpf_map_area_free(cmap); 111 return ERR_PTR(-ENOMEM); 112 } 113 114 return &cmap->map; 115 } 116 117 static void __cpu_map_ring_cleanup(struct ptr_ring *ring) 118 { 119 /* The tear-down procedure should have made sure that queue is 120 * empty. See __cpu_map_entry_replace() and work-queue 121 * invoked cpu_map_kthread_stop(). Catch any broken behaviour 122 * gracefully and warn once. 123 */ 124 void *ptr; 125 126 while ((ptr = ptr_ring_consume(ring))) { 127 WARN_ON_ONCE(1); 128 if (unlikely(__ptr_test_bit(0, &ptr))) { 129 __ptr_clear_bit(0, &ptr); 130 kfree_skb(ptr); 131 continue; 132 } 133 xdp_return_frame(ptr); 134 } 135 } 136 137 static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu, 138 struct list_head *listp, 139 struct xdp_cpumap_stats *stats) 140 { 141 struct sk_buff *skb, *tmp; 142 struct xdp_buff xdp; 143 u32 act; 144 int err; 145 146 list_for_each_entry_safe(skb, tmp, listp, list) { 147 act = bpf_prog_run_generic_xdp(skb, &xdp, rcpu->prog); 148 switch (act) { 149 case XDP_PASS: 150 break; 151 case XDP_REDIRECT: 152 skb_list_del_init(skb); 153 err = xdp_do_generic_redirect(skb->dev, skb, &xdp, 154 rcpu->prog); 155 if (unlikely(err)) { 156 kfree_skb(skb); 157 stats->drop++; 158 } else { 159 stats->redirect++; 160 } 161 return; 162 default: 163 bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act); 164 fallthrough; 165 case XDP_ABORTED: 166 trace_xdp_exception(skb->dev, rcpu->prog, act); 167 fallthrough; 168 case XDP_DROP: 169 skb_list_del_init(skb); 170 kfree_skb(skb); 171 stats->drop++; 172 return; 173 } 174 } 175 } 176 177 static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu, 178 void **frames, int n, 179 struct xdp_cpumap_stats *stats) 180 { 181 struct xdp_rxq_info rxq = {}; 182 struct xdp_buff xdp; 183 int i, nframes = 0; 184 185 xdp_set_return_frame_no_direct(); 186 xdp.rxq = &rxq; 187 188 for (i = 0; i < n; i++) { 189 struct xdp_frame *xdpf = frames[i]; 190 u32 act; 191 int err; 192 193 rxq.dev = xdpf->dev_rx; 194 rxq.mem = xdpf->mem; 195 /* TODO: report queue_index to xdp_rxq_info */ 196 197 xdp_convert_frame_to_buff(xdpf, &xdp); 198 199 act = bpf_prog_run_xdp(rcpu->prog, &xdp); 200 switch (act) { 201 case XDP_PASS: 202 err = xdp_update_frame_from_buff(&xdp, xdpf); 203 if (err < 0) { 204 xdp_return_frame(xdpf); 205 stats->drop++; 206 } else { 207 frames[nframes++] = xdpf; 208 stats->pass++; 209 } 210 break; 211 case XDP_REDIRECT: 212 err = xdp_do_redirect(xdpf->dev_rx, &xdp, 213 rcpu->prog); 214 if (unlikely(err)) { 215 xdp_return_frame(xdpf); 216 stats->drop++; 217 } else { 218 stats->redirect++; 219 } 220 break; 221 default: 222 bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act); 223 fallthrough; 224 case XDP_DROP: 225 xdp_return_frame(xdpf); 226 stats->drop++; 227 break; 228 } 229 } 230 231 xdp_clear_return_frame_no_direct(); 232 233 return nframes; 234 } 235 236 #define CPUMAP_BATCH 8 237 238 static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames, 239 int xdp_n, struct xdp_cpumap_stats *stats, 240 struct list_head *list) 241 { 242 int nframes; 243 244 if (!rcpu->prog) 245 return xdp_n; 246 247 rcu_read_lock_bh(); 248 249 nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, xdp_n, stats); 250 251 if (stats->redirect) 252 xdp_do_flush(); 253 254 if (unlikely(!list_empty(list))) 255 cpu_map_bpf_prog_run_skb(rcpu, list, stats); 256 257 rcu_read_unlock_bh(); /* resched point, may call do_softirq() */ 258 259 return nframes; 260 } 261 262 static int cpu_map_kthread_run(void *data) 263 { 264 struct bpf_cpu_map_entry *rcpu = data; 265 unsigned long last_qs = jiffies; 266 267 complete(&rcpu->kthread_running); 268 set_current_state(TASK_INTERRUPTIBLE); 269 270 /* When kthread gives stop order, then rcpu have been disconnected 271 * from map, thus no new packets can enter. Remaining in-flight 272 * per CPU stored packets are flushed to this queue. Wait honoring 273 * kthread_stop signal until queue is empty. 274 */ 275 while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) { 276 struct xdp_cpumap_stats stats = {}; /* zero stats */ 277 unsigned int kmem_alloc_drops = 0, sched = 0; 278 gfp_t gfp = __GFP_ZERO | GFP_ATOMIC; 279 int i, n, m, nframes, xdp_n; 280 void *frames[CPUMAP_BATCH]; 281 void *skbs[CPUMAP_BATCH]; 282 LIST_HEAD(list); 283 284 /* Release CPU reschedule checks */ 285 if (__ptr_ring_empty(rcpu->queue)) { 286 set_current_state(TASK_INTERRUPTIBLE); 287 /* Recheck to avoid lost wake-up */ 288 if (__ptr_ring_empty(rcpu->queue)) { 289 schedule(); 290 sched = 1; 291 last_qs = jiffies; 292 } else { 293 __set_current_state(TASK_RUNNING); 294 } 295 } else { 296 rcu_softirq_qs_periodic(last_qs); 297 sched = cond_resched(); 298 } 299 300 /* 301 * The bpf_cpu_map_entry is single consumer, with this 302 * kthread CPU pinned. Lockless access to ptr_ring 303 * consume side valid as no-resize allowed of queue. 304 */ 305 n = __ptr_ring_consume_batched(rcpu->queue, frames, 306 CPUMAP_BATCH); 307 for (i = 0, xdp_n = 0; i < n; i++) { 308 void *f = frames[i]; 309 struct page *page; 310 311 if (unlikely(__ptr_test_bit(0, &f))) { 312 struct sk_buff *skb = f; 313 314 __ptr_clear_bit(0, &skb); 315 list_add_tail(&skb->list, &list); 316 continue; 317 } 318 319 frames[xdp_n++] = f; 320 page = virt_to_page(f); 321 322 /* Bring struct page memory area to curr CPU. Read by 323 * build_skb_around via page_is_pfmemalloc(), and when 324 * freed written by page_frag_free call. 325 */ 326 prefetchw(page); 327 } 328 329 /* Support running another XDP prog on this CPU */ 330 nframes = cpu_map_bpf_prog_run(rcpu, frames, xdp_n, &stats, &list); 331 if (nframes) { 332 m = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, skbs); 333 if (unlikely(m == 0)) { 334 for (i = 0; i < nframes; i++) 335 skbs[i] = NULL; /* effect: xdp_return_frame */ 336 kmem_alloc_drops += nframes; 337 } 338 } 339 340 local_bh_disable(); 341 for (i = 0; i < nframes; i++) { 342 struct xdp_frame *xdpf = frames[i]; 343 struct sk_buff *skb = skbs[i]; 344 345 skb = __xdp_build_skb_from_frame(xdpf, skb, 346 xdpf->dev_rx); 347 if (!skb) { 348 xdp_return_frame(xdpf); 349 continue; 350 } 351 352 list_add_tail(&skb->list, &list); 353 } 354 netif_receive_skb_list(&list); 355 356 /* Feedback loop via tracepoint */ 357 trace_xdp_cpumap_kthread(rcpu->map_id, n, kmem_alloc_drops, 358 sched, &stats); 359 360 local_bh_enable(); /* resched point, may call do_softirq() */ 361 } 362 __set_current_state(TASK_RUNNING); 363 364 return 0; 365 } 366 367 static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, 368 struct bpf_map *map, int fd) 369 { 370 struct bpf_prog *prog; 371 372 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP); 373 if (IS_ERR(prog)) 374 return PTR_ERR(prog); 375 376 if (prog->expected_attach_type != BPF_XDP_CPUMAP || 377 !bpf_prog_map_compatible(map, prog)) { 378 bpf_prog_put(prog); 379 return -EINVAL; 380 } 381 382 rcpu->value.bpf_prog.id = prog->aux->id; 383 rcpu->prog = prog; 384 385 return 0; 386 } 387 388 static struct bpf_cpu_map_entry * 389 __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value, 390 u32 cpu) 391 { 392 int numa, err, i, fd = value->bpf_prog.fd; 393 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; 394 struct bpf_cpu_map_entry *rcpu; 395 struct xdp_bulk_queue *bq; 396 397 /* Have map->numa_node, but choose node of redirect target CPU */ 398 numa = cpu_to_node(cpu); 399 400 rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa); 401 if (!rcpu) 402 return NULL; 403 404 /* Alloc percpu bulkq */ 405 rcpu->bulkq = bpf_map_alloc_percpu(map, sizeof(*rcpu->bulkq), 406 sizeof(void *), gfp); 407 if (!rcpu->bulkq) 408 goto free_rcu; 409 410 for_each_possible_cpu(i) { 411 bq = per_cpu_ptr(rcpu->bulkq, i); 412 bq->obj = rcpu; 413 } 414 415 /* Alloc queue */ 416 rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp, 417 numa); 418 if (!rcpu->queue) 419 goto free_bulkq; 420 421 err = ptr_ring_init(rcpu->queue, value->qsize, gfp); 422 if (err) 423 goto free_queue; 424 425 rcpu->cpu = cpu; 426 rcpu->map_id = map->id; 427 rcpu->value.qsize = value->qsize; 428 429 if (fd > 0 && __cpu_map_load_bpf_program(rcpu, map, fd)) 430 goto free_ptr_ring; 431 432 /* Setup kthread */ 433 init_completion(&rcpu->kthread_running); 434 rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa, 435 "cpumap/%d/map:%d", cpu, 436 map->id); 437 if (IS_ERR(rcpu->kthread)) 438 goto free_prog; 439 440 /* Make sure kthread runs on a single CPU */ 441 kthread_bind(rcpu->kthread, cpu); 442 wake_up_process(rcpu->kthread); 443 444 /* Make sure kthread has been running, so kthread_stop() will not 445 * stop the kthread prematurely and all pending frames or skbs 446 * will be handled by the kthread before kthread_stop() returns. 447 */ 448 wait_for_completion(&rcpu->kthread_running); 449 450 return rcpu; 451 452 free_prog: 453 if (rcpu->prog) 454 bpf_prog_put(rcpu->prog); 455 free_ptr_ring: 456 ptr_ring_cleanup(rcpu->queue, NULL); 457 free_queue: 458 kfree(rcpu->queue); 459 free_bulkq: 460 free_percpu(rcpu->bulkq); 461 free_rcu: 462 kfree(rcpu); 463 return NULL; 464 } 465 466 static void __cpu_map_entry_free(struct work_struct *work) 467 { 468 struct bpf_cpu_map_entry *rcpu; 469 470 /* This cpu_map_entry have been disconnected from map and one 471 * RCU grace-period have elapsed. Thus, XDP cannot queue any 472 * new packets and cannot change/set flush_needed that can 473 * find this entry. 474 */ 475 rcpu = container_of(to_rcu_work(work), struct bpf_cpu_map_entry, free_work); 476 477 /* kthread_stop will wake_up_process and wait for it to complete. 478 * cpu_map_kthread_run() makes sure the pointer ring is empty 479 * before exiting. 480 */ 481 kthread_stop(rcpu->kthread); 482 483 if (rcpu->prog) 484 bpf_prog_put(rcpu->prog); 485 /* The queue should be empty at this point */ 486 __cpu_map_ring_cleanup(rcpu->queue); 487 ptr_ring_cleanup(rcpu->queue, NULL); 488 kfree(rcpu->queue); 489 free_percpu(rcpu->bulkq); 490 kfree(rcpu); 491 } 492 493 /* After the xchg of the bpf_cpu_map_entry pointer, we need to make sure the old 494 * entry is no longer in use before freeing. We use queue_rcu_work() to call 495 * __cpu_map_entry_free() in a separate workqueue after waiting for an RCU grace 496 * period. This means that (a) all pending enqueue and flush operations have 497 * completed (because of the RCU callback), and (b) we are in a workqueue 498 * context where we can stop the kthread and wait for it to exit before freeing 499 * everything. 500 */ 501 static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap, 502 u32 key_cpu, struct bpf_cpu_map_entry *rcpu) 503 { 504 struct bpf_cpu_map_entry *old_rcpu; 505 506 old_rcpu = unrcu_pointer(xchg(&cmap->cpu_map[key_cpu], RCU_INITIALIZER(rcpu))); 507 if (old_rcpu) { 508 INIT_RCU_WORK(&old_rcpu->free_work, __cpu_map_entry_free); 509 queue_rcu_work(system_wq, &old_rcpu->free_work); 510 } 511 } 512 513 static long cpu_map_delete_elem(struct bpf_map *map, void *key) 514 { 515 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); 516 u32 key_cpu = *(u32 *)key; 517 518 if (key_cpu >= map->max_entries) 519 return -EINVAL; 520 521 /* notice caller map_delete_elem() uses rcu_read_lock() */ 522 __cpu_map_entry_replace(cmap, key_cpu, NULL); 523 return 0; 524 } 525 526 static long cpu_map_update_elem(struct bpf_map *map, void *key, void *value, 527 u64 map_flags) 528 { 529 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); 530 struct bpf_cpumap_val cpumap_value = {}; 531 struct bpf_cpu_map_entry *rcpu; 532 /* Array index key correspond to CPU number */ 533 u32 key_cpu = *(u32 *)key; 534 535 memcpy(&cpumap_value, value, map->value_size); 536 537 if (unlikely(map_flags > BPF_EXIST)) 538 return -EINVAL; 539 if (unlikely(key_cpu >= cmap->map.max_entries)) 540 return -E2BIG; 541 if (unlikely(map_flags == BPF_NOEXIST)) 542 return -EEXIST; 543 if (unlikely(cpumap_value.qsize > 16384)) /* sanity limit on qsize */ 544 return -EOVERFLOW; 545 546 /* Make sure CPU is a valid possible cpu */ 547 if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu)) 548 return -ENODEV; 549 550 if (cpumap_value.qsize == 0) { 551 rcpu = NULL; /* Same as deleting */ 552 } else { 553 /* Updating qsize cause re-allocation of bpf_cpu_map_entry */ 554 rcpu = __cpu_map_entry_alloc(map, &cpumap_value, key_cpu); 555 if (!rcpu) 556 return -ENOMEM; 557 } 558 rcu_read_lock(); 559 __cpu_map_entry_replace(cmap, key_cpu, rcpu); 560 rcu_read_unlock(); 561 return 0; 562 } 563 564 static void cpu_map_free(struct bpf_map *map) 565 { 566 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); 567 u32 i; 568 569 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, 570 * so the bpf programs (can be more than one that used this map) were 571 * disconnected from events. Wait for outstanding critical sections in 572 * these programs to complete. synchronize_rcu() below not only 573 * guarantees no further "XDP/bpf-side" reads against 574 * bpf_cpu_map->cpu_map, but also ensure pending flush operations 575 * (if any) are completed. 576 */ 577 synchronize_rcu(); 578 579 /* The only possible user of bpf_cpu_map_entry is 580 * cpu_map_kthread_run(). 581 */ 582 for (i = 0; i < cmap->map.max_entries; i++) { 583 struct bpf_cpu_map_entry *rcpu; 584 585 rcpu = rcu_dereference_raw(cmap->cpu_map[i]); 586 if (!rcpu) 587 continue; 588 589 /* Stop kthread and cleanup entry directly */ 590 __cpu_map_entry_free(&rcpu->free_work.work); 591 } 592 bpf_map_area_free(cmap->cpu_map); 593 bpf_map_area_free(cmap); 594 } 595 596 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or 597 * by local_bh_disable() (from XDP calls inside NAPI). The 598 * rcu_read_lock_bh_held() below makes lockdep accept both. 599 */ 600 static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) 601 { 602 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); 603 struct bpf_cpu_map_entry *rcpu; 604 605 if (key >= map->max_entries) 606 return NULL; 607 608 rcpu = rcu_dereference_check(cmap->cpu_map[key], 609 rcu_read_lock_bh_held()); 610 return rcpu; 611 } 612 613 static void *cpu_map_lookup_elem(struct bpf_map *map, void *key) 614 { 615 struct bpf_cpu_map_entry *rcpu = 616 __cpu_map_lookup_elem(map, *(u32 *)key); 617 618 return rcpu ? &rcpu->value : NULL; 619 } 620 621 static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 622 { 623 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); 624 u32 index = key ? *(u32 *)key : U32_MAX; 625 u32 *next = next_key; 626 627 if (index >= cmap->map.max_entries) { 628 *next = 0; 629 return 0; 630 } 631 632 if (index == cmap->map.max_entries - 1) 633 return -ENOENT; 634 *next = index + 1; 635 return 0; 636 } 637 638 static long cpu_map_redirect(struct bpf_map *map, u64 index, u64 flags) 639 { 640 return __bpf_xdp_redirect_map(map, index, flags, 0, 641 __cpu_map_lookup_elem); 642 } 643 644 static u64 cpu_map_mem_usage(const struct bpf_map *map) 645 { 646 u64 usage = sizeof(struct bpf_cpu_map); 647 648 /* Currently the dynamically allocated elements are not counted */ 649 usage += (u64)map->max_entries * sizeof(struct bpf_cpu_map_entry *); 650 return usage; 651 } 652 653 BTF_ID_LIST_SINGLE(cpu_map_btf_ids, struct, bpf_cpu_map) 654 const struct bpf_map_ops cpu_map_ops = { 655 .map_meta_equal = bpf_map_meta_equal, 656 .map_alloc = cpu_map_alloc, 657 .map_free = cpu_map_free, 658 .map_delete_elem = cpu_map_delete_elem, 659 .map_update_elem = cpu_map_update_elem, 660 .map_lookup_elem = cpu_map_lookup_elem, 661 .map_get_next_key = cpu_map_get_next_key, 662 .map_check_btf = map_check_no_btf, 663 .map_mem_usage = cpu_map_mem_usage, 664 .map_btf_id = &cpu_map_btf_ids[0], 665 .map_redirect = cpu_map_redirect, 666 }; 667 668 static void bq_flush_to_queue(struct xdp_bulk_queue *bq) 669 { 670 struct bpf_cpu_map_entry *rcpu = bq->obj; 671 unsigned int processed = 0, drops = 0; 672 const int to_cpu = rcpu->cpu; 673 struct ptr_ring *q; 674 int i; 675 676 if (unlikely(!bq->count)) 677 return; 678 679 q = rcpu->queue; 680 spin_lock(&q->producer_lock); 681 682 for (i = 0; i < bq->count; i++) { 683 struct xdp_frame *xdpf = bq->q[i]; 684 int err; 685 686 err = __ptr_ring_produce(q, xdpf); 687 if (err) { 688 drops++; 689 xdp_return_frame_rx_napi(xdpf); 690 } 691 processed++; 692 } 693 bq->count = 0; 694 spin_unlock(&q->producer_lock); 695 696 __list_del_clearprev(&bq->flush_node); 697 698 /* Feedback loop via tracepoints */ 699 trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu); 700 } 701 702 /* Runs under RCU-read-side, plus in softirq under NAPI protection. 703 * Thus, safe percpu variable access. 704 */ 705 static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) 706 { 707 struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list); 708 struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); 709 710 if (unlikely(bq->count == CPU_MAP_BULK_SIZE)) 711 bq_flush_to_queue(bq); 712 713 /* Notice, xdp_buff/page MUST be queued here, long enough for 714 * driver to code invoking us to finished, due to driver 715 * (e.g. ixgbe) recycle tricks based on page-refcnt. 716 * 717 * Thus, incoming xdp_frame is always queued here (else we race 718 * with another CPU on page-refcnt and remaining driver code). 719 * Queue time is very short, as driver will invoke flush 720 * operation, when completing napi->poll call. 721 */ 722 bq->q[bq->count++] = xdpf; 723 724 if (!bq->flush_node.prev) 725 list_add(&bq->flush_node, flush_list); 726 } 727 728 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf, 729 struct net_device *dev_rx) 730 { 731 /* Info needed when constructing SKB on remote CPU */ 732 xdpf->dev_rx = dev_rx; 733 734 bq_enqueue(rcpu, xdpf); 735 return 0; 736 } 737 738 int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, 739 struct sk_buff *skb) 740 { 741 int ret; 742 743 __skb_pull(skb, skb->mac_len); 744 skb_set_redirected(skb, false); 745 __ptr_set_bit(0, &skb); 746 747 ret = ptr_ring_produce(rcpu->queue, skb); 748 if (ret < 0) 749 goto trace; 750 751 wake_up_process(rcpu->kthread); 752 trace: 753 trace_xdp_cpumap_enqueue(rcpu->map_id, !ret, !!ret, rcpu->cpu); 754 return ret; 755 } 756 757 void __cpu_map_flush(void) 758 { 759 struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list); 760 struct xdp_bulk_queue *bq, *tmp; 761 762 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) { 763 bq_flush_to_queue(bq); 764 765 /* If already running, costs spin_lock_irqsave + smb_mb */ 766 wake_up_process(bq->obj->kthread); 767 } 768 } 769 770 static int __init cpu_map_init(void) 771 { 772 int cpu; 773 774 for_each_possible_cpu(cpu) 775 INIT_LIST_HEAD(&per_cpu(cpu_map_flush_list, cpu)); 776 return 0; 777 } 778 779 subsys_initcall(cpu_map_init); 780