1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * padata.c - generic interface to process data streams in parallel 4 * 5 * See Documentation/padata.txt for an api documentation. 6 * 7 * Copyright (C) 2008, 2009 secunet Security Networks AG 8 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com> 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms and conditions of the GNU General Public License, 12 * version 2, as published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope it will be useful, but WITHOUT 15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 17 * more details. 18 * 19 * You should have received a copy of the GNU General Public License along with 20 * this program; if not, write to the Free Software Foundation, Inc., 21 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 22 */ 23 24 #include <linux/export.h> 25 #include <linux/cpumask.h> 26 #include <linux/err.h> 27 #include <linux/cpu.h> 28 #include <linux/padata.h> 29 #include <linux/mutex.h> 30 #include <linux/sched.h> 31 #include <linux/slab.h> 32 #include <linux/sysfs.h> 33 #include <linux/rcupdate.h> 34 #include <linux/module.h> 35 36 #define MAX_OBJ_NUM 1000 37 38 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) 39 { 40 int cpu, target_cpu; 41 42 target_cpu = cpumask_first(pd->cpumask.pcpu); 43 for (cpu = 0; cpu < cpu_index; cpu++) 44 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); 45 46 return target_cpu; 47 } 48 49 static int padata_cpu_hash(struct parallel_data *pd) 50 { 51 unsigned int seq_nr; 52 int cpu_index; 53 54 /* 55 * Hash the sequence numbers to the cpus by taking 56 * seq_nr mod. number of cpus in use. 57 */ 58 59 seq_nr = atomic_inc_return(&pd->seq_nr); 60 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); 61 62 return padata_index_to_cpu(pd, cpu_index); 63 } 64 65 static void padata_parallel_worker(struct work_struct *parallel_work) 66 { 67 struct padata_parallel_queue *pqueue; 68 LIST_HEAD(local_list); 69 70 local_bh_disable(); 71 pqueue = container_of(parallel_work, 72 struct padata_parallel_queue, work); 73 74 spin_lock(&pqueue->parallel.lock); 75 list_replace_init(&pqueue->parallel.list, &local_list); 76 spin_unlock(&pqueue->parallel.lock); 77 78 while (!list_empty(&local_list)) { 79 struct padata_priv *padata; 80 81 padata = list_entry(local_list.next, 82 struct padata_priv, list); 83 84 list_del_init(&padata->list); 85 86 padata->parallel(padata); 87 } 88 89 local_bh_enable(); 90 } 91 92 /** 93 * padata_do_parallel - padata parallelization function 94 * 95 * @pinst: padata instance 96 * @padata: object to be parallelized 97 * @cb_cpu: cpu the serialization callback function will run on, 98 * must be in the serial cpumask of padata(i.e. cpumask.cbcpu). 99 * 100 * The parallelization callback function will run with BHs off. 101 * Note: Every object which is parallelized by padata_do_parallel 102 * must be seen by padata_do_serial. 103 */ 104 int padata_do_parallel(struct padata_instance *pinst, 105 struct padata_priv *padata, int cb_cpu) 106 { 107 int target_cpu, err; 108 struct padata_parallel_queue *queue; 109 struct parallel_data *pd; 110 111 rcu_read_lock_bh(); 112 113 pd = rcu_dereference_bh(pinst->pd); 114 115 err = -EINVAL; 116 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID) 117 goto out; 118 119 if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu)) 120 goto out; 121 122 err = -EBUSY; 123 if ((pinst->flags & PADATA_RESET)) 124 goto out; 125 126 if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM) 127 goto out; 128 129 err = 0; 130 atomic_inc(&pd->refcnt); 131 padata->pd = pd; 132 padata->cb_cpu = cb_cpu; 133 134 target_cpu = padata_cpu_hash(pd); 135 padata->cpu = target_cpu; 136 queue = per_cpu_ptr(pd->pqueue, target_cpu); 137 138 spin_lock(&queue->parallel.lock); 139 list_add_tail(&padata->list, &queue->parallel.list); 140 spin_unlock(&queue->parallel.lock); 141 142 queue_work_on(target_cpu, pinst->wq, &queue->work); 143 144 out: 145 rcu_read_unlock_bh(); 146 147 return err; 148 } 149 EXPORT_SYMBOL(padata_do_parallel); 150 151 /* 152 * padata_get_next - Get the next object that needs serialization. 153 * 154 * Return values are: 155 * 156 * A pointer to the control struct of the next object that needs 157 * serialization, if present in one of the percpu reorder queues. 158 * 159 * -EINPROGRESS, if the next object that needs serialization will 160 * be parallel processed by another cpu and is not yet present in 161 * the cpu's reorder queue. 162 * 163 * -ENODATA, if this cpu has to do the parallel processing for 164 * the next object. 165 */ 166 static struct padata_priv *padata_get_next(struct parallel_data *pd) 167 { 168 int cpu, num_cpus; 169 unsigned int next_nr, next_index; 170 struct padata_parallel_queue *next_queue; 171 struct padata_priv *padata; 172 struct padata_list *reorder; 173 174 num_cpus = cpumask_weight(pd->cpumask.pcpu); 175 176 /* 177 * Calculate the percpu reorder queue and the sequence 178 * number of the next object. 179 */ 180 next_nr = pd->processed; 181 next_index = next_nr % num_cpus; 182 cpu = padata_index_to_cpu(pd, next_index); 183 next_queue = per_cpu_ptr(pd->pqueue, cpu); 184 185 reorder = &next_queue->reorder; 186 187 spin_lock(&reorder->lock); 188 if (!list_empty(&reorder->list)) { 189 padata = list_entry(reorder->list.next, 190 struct padata_priv, list); 191 192 list_del_init(&padata->list); 193 atomic_dec(&pd->reorder_objects); 194 195 pd->processed++; 196 197 spin_unlock(&reorder->lock); 198 goto out; 199 } 200 spin_unlock(&reorder->lock); 201 202 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) { 203 padata = ERR_PTR(-ENODATA); 204 goto out; 205 } 206 207 padata = ERR_PTR(-EINPROGRESS); 208 out: 209 return padata; 210 } 211 212 static void padata_reorder(struct parallel_data *pd) 213 { 214 int cb_cpu; 215 struct padata_priv *padata; 216 struct padata_serial_queue *squeue; 217 struct padata_instance *pinst = pd->pinst; 218 219 /* 220 * We need to ensure that only one cpu can work on dequeueing of 221 * the reorder queue the time. Calculating in which percpu reorder 222 * queue the next object will arrive takes some time. A spinlock 223 * would be highly contended. Also it is not clear in which order 224 * the objects arrive to the reorder queues. So a cpu could wait to 225 * get the lock just to notice that there is nothing to do at the 226 * moment. Therefore we use a trylock and let the holder of the lock 227 * care for all the objects enqueued during the holdtime of the lock. 228 */ 229 if (!spin_trylock_bh(&pd->lock)) 230 return; 231 232 while (1) { 233 padata = padata_get_next(pd); 234 235 /* 236 * If the next object that needs serialization is parallel 237 * processed by another cpu and is still on it's way to the 238 * cpu's reorder queue, nothing to do for now. 239 */ 240 if (PTR_ERR(padata) == -EINPROGRESS) 241 break; 242 243 /* 244 * This cpu has to do the parallel processing of the next 245 * object. It's waiting in the cpu's parallelization queue, 246 * so exit immediately. 247 */ 248 if (PTR_ERR(padata) == -ENODATA) { 249 del_timer(&pd->timer); 250 spin_unlock_bh(&pd->lock); 251 return; 252 } 253 254 cb_cpu = padata->cb_cpu; 255 squeue = per_cpu_ptr(pd->squeue, cb_cpu); 256 257 spin_lock(&squeue->serial.lock); 258 list_add_tail(&padata->list, &squeue->serial.list); 259 spin_unlock(&squeue->serial.lock); 260 261 queue_work_on(cb_cpu, pinst->wq, &squeue->work); 262 } 263 264 spin_unlock_bh(&pd->lock); 265 266 /* 267 * The next object that needs serialization might have arrived to 268 * the reorder queues in the meantime, we will be called again 269 * from the timer function if no one else cares for it. 270 */ 271 if (atomic_read(&pd->reorder_objects) 272 && !(pinst->flags & PADATA_RESET)) 273 mod_timer(&pd->timer, jiffies + HZ); 274 else 275 del_timer(&pd->timer); 276 277 return; 278 } 279 280 static void invoke_padata_reorder(struct work_struct *work) 281 { 282 struct padata_parallel_queue *pqueue; 283 struct parallel_data *pd; 284 285 local_bh_disable(); 286 pqueue = container_of(work, struct padata_parallel_queue, reorder_work); 287 pd = pqueue->pd; 288 padata_reorder(pd); 289 local_bh_enable(); 290 } 291 292 static void padata_reorder_timer(struct timer_list *t) 293 { 294 struct parallel_data *pd = from_timer(pd, t, timer); 295 unsigned int weight; 296 int target_cpu, cpu; 297 298 cpu = get_cpu(); 299 300 /* We don't lock pd here to not interfere with parallel processing 301 * padata_reorder() calls on other CPUs. We just need any CPU out of 302 * the cpumask.pcpu set. It would be nice if it's the right one but 303 * it doesn't matter if we're off to the next one by using an outdated 304 * pd->processed value. 305 */ 306 weight = cpumask_weight(pd->cpumask.pcpu); 307 target_cpu = padata_index_to_cpu(pd, pd->processed % weight); 308 309 /* ensure to call the reorder callback on the correct CPU */ 310 if (cpu != target_cpu) { 311 struct padata_parallel_queue *pqueue; 312 struct padata_instance *pinst; 313 314 /* The timer function is serialized wrt itself -- no locking 315 * needed. 316 */ 317 pinst = pd->pinst; 318 pqueue = per_cpu_ptr(pd->pqueue, target_cpu); 319 queue_work_on(target_cpu, pinst->wq, &pqueue->reorder_work); 320 } else { 321 padata_reorder(pd); 322 } 323 324 put_cpu(); 325 } 326 327 static void padata_serial_worker(struct work_struct *serial_work) 328 { 329 struct padata_serial_queue *squeue; 330 struct parallel_data *pd; 331 LIST_HEAD(local_list); 332 333 local_bh_disable(); 334 squeue = container_of(serial_work, struct padata_serial_queue, work); 335 pd = squeue->pd; 336 337 spin_lock(&squeue->serial.lock); 338 list_replace_init(&squeue->serial.list, &local_list); 339 spin_unlock(&squeue->serial.lock); 340 341 while (!list_empty(&local_list)) { 342 struct padata_priv *padata; 343 344 padata = list_entry(local_list.next, 345 struct padata_priv, list); 346 347 list_del_init(&padata->list); 348 349 padata->serial(padata); 350 atomic_dec(&pd->refcnt); 351 } 352 local_bh_enable(); 353 } 354 355 /** 356 * padata_do_serial - padata serialization function 357 * 358 * @padata: object to be serialized. 359 * 360 * padata_do_serial must be called for every parallelized object. 361 * The serialization callback function will run with BHs off. 362 */ 363 void padata_do_serial(struct padata_priv *padata) 364 { 365 int cpu; 366 struct padata_parallel_queue *pqueue; 367 struct parallel_data *pd; 368 int reorder_via_wq = 0; 369 370 pd = padata->pd; 371 372 cpu = get_cpu(); 373 374 /* We need to run on the same CPU padata_do_parallel(.., padata, ..) 375 * was called on -- or, at least, enqueue the padata object into the 376 * correct per-cpu queue. 377 */ 378 if (cpu != padata->cpu) { 379 reorder_via_wq = 1; 380 cpu = padata->cpu; 381 } 382 383 pqueue = per_cpu_ptr(pd->pqueue, cpu); 384 385 spin_lock(&pqueue->reorder.lock); 386 atomic_inc(&pd->reorder_objects); 387 list_add_tail(&padata->list, &pqueue->reorder.list); 388 spin_unlock(&pqueue->reorder.lock); 389 390 put_cpu(); 391 392 /* If we're running on the wrong CPU, call padata_reorder() via a 393 * kernel worker. 394 */ 395 if (reorder_via_wq) 396 queue_work_on(cpu, pd->pinst->wq, &pqueue->reorder_work); 397 else 398 padata_reorder(pd); 399 } 400 EXPORT_SYMBOL(padata_do_serial); 401 402 static int padata_setup_cpumasks(struct parallel_data *pd, 403 const struct cpumask *pcpumask, 404 const struct cpumask *cbcpumask) 405 { 406 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) 407 return -ENOMEM; 408 409 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask); 410 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { 411 free_cpumask_var(pd->cpumask.pcpu); 412 return -ENOMEM; 413 } 414 415 cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask); 416 return 0; 417 } 418 419 static void __padata_list_init(struct padata_list *pd_list) 420 { 421 INIT_LIST_HEAD(&pd_list->list); 422 spin_lock_init(&pd_list->lock); 423 } 424 425 /* Initialize all percpu queues used by serial workers */ 426 static void padata_init_squeues(struct parallel_data *pd) 427 { 428 int cpu; 429 struct padata_serial_queue *squeue; 430 431 for_each_cpu(cpu, pd->cpumask.cbcpu) { 432 squeue = per_cpu_ptr(pd->squeue, cpu); 433 squeue->pd = pd; 434 __padata_list_init(&squeue->serial); 435 INIT_WORK(&squeue->work, padata_serial_worker); 436 } 437 } 438 439 /* Initialize all percpu queues used by parallel workers */ 440 static void padata_init_pqueues(struct parallel_data *pd) 441 { 442 int cpu_index, cpu; 443 struct padata_parallel_queue *pqueue; 444 445 cpu_index = 0; 446 for_each_possible_cpu(cpu) { 447 pqueue = per_cpu_ptr(pd->pqueue, cpu); 448 449 if (!cpumask_test_cpu(cpu, pd->cpumask.pcpu)) { 450 pqueue->cpu_index = -1; 451 continue; 452 } 453 454 pqueue->pd = pd; 455 pqueue->cpu_index = cpu_index; 456 cpu_index++; 457 458 __padata_list_init(&pqueue->reorder); 459 __padata_list_init(&pqueue->parallel); 460 INIT_WORK(&pqueue->work, padata_parallel_worker); 461 INIT_WORK(&pqueue->reorder_work, invoke_padata_reorder); 462 atomic_set(&pqueue->num_obj, 0); 463 } 464 } 465 466 /* Allocate and initialize the internal cpumask dependend resources. */ 467 static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, 468 const struct cpumask *pcpumask, 469 const struct cpumask *cbcpumask) 470 { 471 struct parallel_data *pd; 472 473 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); 474 if (!pd) 475 goto err; 476 477 pd->pqueue = alloc_percpu(struct padata_parallel_queue); 478 if (!pd->pqueue) 479 goto err_free_pd; 480 481 pd->squeue = alloc_percpu(struct padata_serial_queue); 482 if (!pd->squeue) 483 goto err_free_pqueue; 484 if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0) 485 goto err_free_squeue; 486 487 padata_init_pqueues(pd); 488 padata_init_squeues(pd); 489 timer_setup(&pd->timer, padata_reorder_timer, 0); 490 atomic_set(&pd->seq_nr, -1); 491 atomic_set(&pd->reorder_objects, 0); 492 atomic_set(&pd->refcnt, 0); 493 pd->pinst = pinst; 494 spin_lock_init(&pd->lock); 495 496 return pd; 497 498 err_free_squeue: 499 free_percpu(pd->squeue); 500 err_free_pqueue: 501 free_percpu(pd->pqueue); 502 err_free_pd: 503 kfree(pd); 504 err: 505 return NULL; 506 } 507 508 static void padata_free_pd(struct parallel_data *pd) 509 { 510 free_cpumask_var(pd->cpumask.pcpu); 511 free_cpumask_var(pd->cpumask.cbcpu); 512 free_percpu(pd->pqueue); 513 free_percpu(pd->squeue); 514 kfree(pd); 515 } 516 517 /* Flush all objects out of the padata queues. */ 518 static void padata_flush_queues(struct parallel_data *pd) 519 { 520 int cpu; 521 struct padata_parallel_queue *pqueue; 522 struct padata_serial_queue *squeue; 523 524 for_each_cpu(cpu, pd->cpumask.pcpu) { 525 pqueue = per_cpu_ptr(pd->pqueue, cpu); 526 flush_work(&pqueue->work); 527 } 528 529 del_timer_sync(&pd->timer); 530 531 if (atomic_read(&pd->reorder_objects)) 532 padata_reorder(pd); 533 534 for_each_cpu(cpu, pd->cpumask.cbcpu) { 535 squeue = per_cpu_ptr(pd->squeue, cpu); 536 flush_work(&squeue->work); 537 } 538 539 BUG_ON(atomic_read(&pd->refcnt) != 0); 540 } 541 542 static void __padata_start(struct padata_instance *pinst) 543 { 544 pinst->flags |= PADATA_INIT; 545 } 546 547 static void __padata_stop(struct padata_instance *pinst) 548 { 549 if (!(pinst->flags & PADATA_INIT)) 550 return; 551 552 pinst->flags &= ~PADATA_INIT; 553 554 synchronize_rcu(); 555 556 get_online_cpus(); 557 padata_flush_queues(pinst->pd); 558 put_online_cpus(); 559 } 560 561 /* Replace the internal control structure with a new one. */ 562 static void padata_replace(struct padata_instance *pinst, 563 struct parallel_data *pd_new) 564 { 565 struct parallel_data *pd_old = pinst->pd; 566 int notification_mask = 0; 567 568 pinst->flags |= PADATA_RESET; 569 570 rcu_assign_pointer(pinst->pd, pd_new); 571 572 synchronize_rcu(); 573 574 if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu)) 575 notification_mask |= PADATA_CPU_PARALLEL; 576 if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu)) 577 notification_mask |= PADATA_CPU_SERIAL; 578 579 padata_flush_queues(pd_old); 580 padata_free_pd(pd_old); 581 582 if (notification_mask) 583 blocking_notifier_call_chain(&pinst->cpumask_change_notifier, 584 notification_mask, 585 &pd_new->cpumask); 586 587 pinst->flags &= ~PADATA_RESET; 588 } 589 590 /** 591 * padata_register_cpumask_notifier - Registers a notifier that will be called 592 * if either pcpu or cbcpu or both cpumasks change. 593 * 594 * @pinst: A poineter to padata instance 595 * @nblock: A pointer to notifier block. 596 */ 597 int padata_register_cpumask_notifier(struct padata_instance *pinst, 598 struct notifier_block *nblock) 599 { 600 return blocking_notifier_chain_register(&pinst->cpumask_change_notifier, 601 nblock); 602 } 603 EXPORT_SYMBOL(padata_register_cpumask_notifier); 604 605 /** 606 * padata_unregister_cpumask_notifier - Unregisters cpumask notifier 607 * registered earlier using padata_register_cpumask_notifier 608 * 609 * @pinst: A pointer to data instance. 610 * @nlock: A pointer to notifier block. 611 */ 612 int padata_unregister_cpumask_notifier(struct padata_instance *pinst, 613 struct notifier_block *nblock) 614 { 615 return blocking_notifier_chain_unregister( 616 &pinst->cpumask_change_notifier, 617 nblock); 618 } 619 EXPORT_SYMBOL(padata_unregister_cpumask_notifier); 620 621 622 /* If cpumask contains no active cpu, we mark the instance as invalid. */ 623 static bool padata_validate_cpumask(struct padata_instance *pinst, 624 const struct cpumask *cpumask) 625 { 626 if (!cpumask_intersects(cpumask, cpu_online_mask)) { 627 pinst->flags |= PADATA_INVALID; 628 return false; 629 } 630 631 pinst->flags &= ~PADATA_INVALID; 632 return true; 633 } 634 635 static int __padata_set_cpumasks(struct padata_instance *pinst, 636 cpumask_var_t pcpumask, 637 cpumask_var_t cbcpumask) 638 { 639 int valid; 640 struct parallel_data *pd; 641 642 valid = padata_validate_cpumask(pinst, pcpumask); 643 if (!valid) { 644 __padata_stop(pinst); 645 goto out_replace; 646 } 647 648 valid = padata_validate_cpumask(pinst, cbcpumask); 649 if (!valid) 650 __padata_stop(pinst); 651 652 out_replace: 653 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); 654 if (!pd) 655 return -ENOMEM; 656 657 cpumask_copy(pinst->cpumask.pcpu, pcpumask); 658 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); 659 660 padata_replace(pinst, pd); 661 662 if (valid) 663 __padata_start(pinst); 664 665 return 0; 666 } 667 668 /** 669 * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value 670 * equivalent to @cpumask. 671 * 672 * @pinst: padata instance 673 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding 674 * to parallel and serial cpumasks respectively. 675 * @cpumask: the cpumask to use 676 */ 677 int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, 678 cpumask_var_t cpumask) 679 { 680 struct cpumask *serial_mask, *parallel_mask; 681 int err = -EINVAL; 682 683 mutex_lock(&pinst->lock); 684 get_online_cpus(); 685 686 switch (cpumask_type) { 687 case PADATA_CPU_PARALLEL: 688 serial_mask = pinst->cpumask.cbcpu; 689 parallel_mask = cpumask; 690 break; 691 case PADATA_CPU_SERIAL: 692 parallel_mask = pinst->cpumask.pcpu; 693 serial_mask = cpumask; 694 break; 695 default: 696 goto out; 697 } 698 699 err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask); 700 701 out: 702 put_online_cpus(); 703 mutex_unlock(&pinst->lock); 704 705 return err; 706 } 707 EXPORT_SYMBOL(padata_set_cpumask); 708 709 /** 710 * padata_start - start the parallel processing 711 * 712 * @pinst: padata instance to start 713 */ 714 int padata_start(struct padata_instance *pinst) 715 { 716 int err = 0; 717 718 mutex_lock(&pinst->lock); 719 720 if (pinst->flags & PADATA_INVALID) 721 err = -EINVAL; 722 723 __padata_start(pinst); 724 725 mutex_unlock(&pinst->lock); 726 727 return err; 728 } 729 EXPORT_SYMBOL(padata_start); 730 731 /** 732 * padata_stop - stop the parallel processing 733 * 734 * @pinst: padata instance to stop 735 */ 736 void padata_stop(struct padata_instance *pinst) 737 { 738 mutex_lock(&pinst->lock); 739 __padata_stop(pinst); 740 mutex_unlock(&pinst->lock); 741 } 742 EXPORT_SYMBOL(padata_stop); 743 744 #ifdef CONFIG_HOTPLUG_CPU 745 746 static int __padata_add_cpu(struct padata_instance *pinst, int cpu) 747 { 748 struct parallel_data *pd; 749 750 if (cpumask_test_cpu(cpu, cpu_online_mask)) { 751 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, 752 pinst->cpumask.cbcpu); 753 if (!pd) 754 return -ENOMEM; 755 756 padata_replace(pinst, pd); 757 758 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && 759 padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) 760 __padata_start(pinst); 761 } 762 763 return 0; 764 } 765 766 static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) 767 { 768 struct parallel_data *pd = NULL; 769 770 if (cpumask_test_cpu(cpu, cpu_online_mask)) { 771 772 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || 773 !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) 774 __padata_stop(pinst); 775 776 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, 777 pinst->cpumask.cbcpu); 778 if (!pd) 779 return -ENOMEM; 780 781 padata_replace(pinst, pd); 782 783 cpumask_clear_cpu(cpu, pd->cpumask.cbcpu); 784 cpumask_clear_cpu(cpu, pd->cpumask.pcpu); 785 } 786 787 return 0; 788 } 789 790 /** 791 * padata_remove_cpu - remove a cpu from the one or both(serial and parallel) 792 * padata cpumasks. 793 * 794 * @pinst: padata instance 795 * @cpu: cpu to remove 796 * @mask: bitmask specifying from which cpumask @cpu should be removed 797 * The @mask may be any combination of the following flags: 798 * PADATA_CPU_SERIAL - serial cpumask 799 * PADATA_CPU_PARALLEL - parallel cpumask 800 */ 801 int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask) 802 { 803 int err; 804 805 if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) 806 return -EINVAL; 807 808 mutex_lock(&pinst->lock); 809 810 get_online_cpus(); 811 if (mask & PADATA_CPU_SERIAL) 812 cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu); 813 if (mask & PADATA_CPU_PARALLEL) 814 cpumask_clear_cpu(cpu, pinst->cpumask.pcpu); 815 816 err = __padata_remove_cpu(pinst, cpu); 817 put_online_cpus(); 818 819 mutex_unlock(&pinst->lock); 820 821 return err; 822 } 823 EXPORT_SYMBOL(padata_remove_cpu); 824 825 static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu) 826 { 827 return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) || 828 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu); 829 } 830 831 static int padata_cpu_online(unsigned int cpu, struct hlist_node *node) 832 { 833 struct padata_instance *pinst; 834 int ret; 835 836 pinst = hlist_entry_safe(node, struct padata_instance, node); 837 if (!pinst_has_cpu(pinst, cpu)) 838 return 0; 839 840 mutex_lock(&pinst->lock); 841 ret = __padata_add_cpu(pinst, cpu); 842 mutex_unlock(&pinst->lock); 843 return ret; 844 } 845 846 static int padata_cpu_prep_down(unsigned int cpu, struct hlist_node *node) 847 { 848 struct padata_instance *pinst; 849 int ret; 850 851 pinst = hlist_entry_safe(node, struct padata_instance, node); 852 if (!pinst_has_cpu(pinst, cpu)) 853 return 0; 854 855 mutex_lock(&pinst->lock); 856 ret = __padata_remove_cpu(pinst, cpu); 857 mutex_unlock(&pinst->lock); 858 return ret; 859 } 860 861 static enum cpuhp_state hp_online; 862 #endif 863 864 static void __padata_free(struct padata_instance *pinst) 865 { 866 #ifdef CONFIG_HOTPLUG_CPU 867 cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node); 868 #endif 869 870 padata_stop(pinst); 871 padata_free_pd(pinst->pd); 872 free_cpumask_var(pinst->cpumask.pcpu); 873 free_cpumask_var(pinst->cpumask.cbcpu); 874 kfree(pinst); 875 } 876 877 #define kobj2pinst(_kobj) \ 878 container_of(_kobj, struct padata_instance, kobj) 879 #define attr2pentry(_attr) \ 880 container_of(_attr, struct padata_sysfs_entry, attr) 881 882 static void padata_sysfs_release(struct kobject *kobj) 883 { 884 struct padata_instance *pinst = kobj2pinst(kobj); 885 __padata_free(pinst); 886 } 887 888 struct padata_sysfs_entry { 889 struct attribute attr; 890 ssize_t (*show)(struct padata_instance *, struct attribute *, char *); 891 ssize_t (*store)(struct padata_instance *, struct attribute *, 892 const char *, size_t); 893 }; 894 895 static ssize_t show_cpumask(struct padata_instance *pinst, 896 struct attribute *attr, char *buf) 897 { 898 struct cpumask *cpumask; 899 ssize_t len; 900 901 mutex_lock(&pinst->lock); 902 if (!strcmp(attr->name, "serial_cpumask")) 903 cpumask = pinst->cpumask.cbcpu; 904 else 905 cpumask = pinst->cpumask.pcpu; 906 907 len = snprintf(buf, PAGE_SIZE, "%*pb\n", 908 nr_cpu_ids, cpumask_bits(cpumask)); 909 mutex_unlock(&pinst->lock); 910 return len < PAGE_SIZE ? len : -EINVAL; 911 } 912 913 static ssize_t store_cpumask(struct padata_instance *pinst, 914 struct attribute *attr, 915 const char *buf, size_t count) 916 { 917 cpumask_var_t new_cpumask; 918 ssize_t ret; 919 int mask_type; 920 921 if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL)) 922 return -ENOMEM; 923 924 ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask), 925 nr_cpumask_bits); 926 if (ret < 0) 927 goto out; 928 929 mask_type = !strcmp(attr->name, "serial_cpumask") ? 930 PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL; 931 ret = padata_set_cpumask(pinst, mask_type, new_cpumask); 932 if (!ret) 933 ret = count; 934 935 out: 936 free_cpumask_var(new_cpumask); 937 return ret; 938 } 939 940 #define PADATA_ATTR_RW(_name, _show_name, _store_name) \ 941 static struct padata_sysfs_entry _name##_attr = \ 942 __ATTR(_name, 0644, _show_name, _store_name) 943 #define PADATA_ATTR_RO(_name, _show_name) \ 944 static struct padata_sysfs_entry _name##_attr = \ 945 __ATTR(_name, 0400, _show_name, NULL) 946 947 PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask); 948 PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask); 949 950 /* 951 * Padata sysfs provides the following objects: 952 * serial_cpumask [RW] - cpumask for serial workers 953 * parallel_cpumask [RW] - cpumask for parallel workers 954 */ 955 static struct attribute *padata_default_attrs[] = { 956 &serial_cpumask_attr.attr, 957 ¶llel_cpumask_attr.attr, 958 NULL, 959 }; 960 961 static ssize_t padata_sysfs_show(struct kobject *kobj, 962 struct attribute *attr, char *buf) 963 { 964 struct padata_instance *pinst; 965 struct padata_sysfs_entry *pentry; 966 ssize_t ret = -EIO; 967 968 pinst = kobj2pinst(kobj); 969 pentry = attr2pentry(attr); 970 if (pentry->show) 971 ret = pentry->show(pinst, attr, buf); 972 973 return ret; 974 } 975 976 static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr, 977 const char *buf, size_t count) 978 { 979 struct padata_instance *pinst; 980 struct padata_sysfs_entry *pentry; 981 ssize_t ret = -EIO; 982 983 pinst = kobj2pinst(kobj); 984 pentry = attr2pentry(attr); 985 if (pentry->show) 986 ret = pentry->store(pinst, attr, buf, count); 987 988 return ret; 989 } 990 991 static const struct sysfs_ops padata_sysfs_ops = { 992 .show = padata_sysfs_show, 993 .store = padata_sysfs_store, 994 }; 995 996 static struct kobj_type padata_attr_type = { 997 .sysfs_ops = &padata_sysfs_ops, 998 .default_attrs = padata_default_attrs, 999 .release = padata_sysfs_release, 1000 }; 1001 1002 /** 1003 * padata_alloc - allocate and initialize a padata instance and specify 1004 * cpumasks for serial and parallel workers. 1005 * 1006 * @wq: workqueue to use for the allocated padata instance 1007 * @pcpumask: cpumask that will be used for padata parallelization 1008 * @cbcpumask: cpumask that will be used for padata serialization 1009 * 1010 * Must be called from a cpus_read_lock() protected region 1011 */ 1012 static struct padata_instance *padata_alloc(struct workqueue_struct *wq, 1013 const struct cpumask *pcpumask, 1014 const struct cpumask *cbcpumask) 1015 { 1016 struct padata_instance *pinst; 1017 struct parallel_data *pd = NULL; 1018 1019 pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); 1020 if (!pinst) 1021 goto err; 1022 1023 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) 1024 goto err_free_inst; 1025 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { 1026 free_cpumask_var(pinst->cpumask.pcpu); 1027 goto err_free_inst; 1028 } 1029 if (!padata_validate_cpumask(pinst, pcpumask) || 1030 !padata_validate_cpumask(pinst, cbcpumask)) 1031 goto err_free_masks; 1032 1033 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); 1034 if (!pd) 1035 goto err_free_masks; 1036 1037 rcu_assign_pointer(pinst->pd, pd); 1038 1039 pinst->wq = wq; 1040 1041 cpumask_copy(pinst->cpumask.pcpu, pcpumask); 1042 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); 1043 1044 pinst->flags = 0; 1045 1046 BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier); 1047 kobject_init(&pinst->kobj, &padata_attr_type); 1048 mutex_init(&pinst->lock); 1049 1050 #ifdef CONFIG_HOTPLUG_CPU 1051 cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node); 1052 #endif 1053 return pinst; 1054 1055 err_free_masks: 1056 free_cpumask_var(pinst->cpumask.pcpu); 1057 free_cpumask_var(pinst->cpumask.cbcpu); 1058 err_free_inst: 1059 kfree(pinst); 1060 err: 1061 return NULL; 1062 } 1063 1064 /** 1065 * padata_alloc_possible - Allocate and initialize padata instance. 1066 * Use the cpu_possible_mask for serial and 1067 * parallel workers. 1068 * 1069 * @wq: workqueue to use for the allocated padata instance 1070 * 1071 * Must be called from a cpus_read_lock() protected region 1072 */ 1073 struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq) 1074 { 1075 lockdep_assert_cpus_held(); 1076 return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask); 1077 } 1078 EXPORT_SYMBOL(padata_alloc_possible); 1079 1080 /** 1081 * padata_free - free a padata instance 1082 * 1083 * @padata_inst: padata instance to free 1084 */ 1085 void padata_free(struct padata_instance *pinst) 1086 { 1087 kobject_put(&pinst->kobj); 1088 } 1089 EXPORT_SYMBOL(padata_free); 1090 1091 #ifdef CONFIG_HOTPLUG_CPU 1092 1093 static __init int padata_driver_init(void) 1094 { 1095 int ret; 1096 1097 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online", 1098 padata_cpu_online, 1099 padata_cpu_prep_down); 1100 if (ret < 0) 1101 return ret; 1102 hp_online = ret; 1103 return 0; 1104 } 1105 module_init(padata_driver_init); 1106 1107 static __exit void padata_driver_exit(void) 1108 { 1109 cpuhp_remove_multi_state(hp_online); 1110 } 1111 module_exit(padata_driver_exit); 1112 #endif 1113