1 /* 2 * padata.c - generic interface to process data streams in parallel 3 * 4 * See Documentation/padata.txt for an api documentation. 5 * 6 * Copyright (C) 2008, 2009 secunet Security Networks AG 7 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com> 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms and conditions of the GNU General Public License, 11 * version 2, as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * You should have received a copy of the GNU General Public License along with 19 * this program; if not, write to the Free Software Foundation, Inc., 20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 */ 22 23 #include <linux/export.h> 24 #include <linux/cpumask.h> 25 #include <linux/err.h> 26 #include <linux/cpu.h> 27 #include <linux/padata.h> 28 #include <linux/mutex.h> 29 #include <linux/sched.h> 30 #include <linux/slab.h> 31 #include <linux/sysfs.h> 32 #include <linux/rcupdate.h> 33 #include <linux/module.h> 34 35 #define MAX_OBJ_NUM 1000 36 37 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) 38 { 39 int cpu, target_cpu; 40 41 target_cpu = cpumask_first(pd->cpumask.pcpu); 42 for (cpu = 0; cpu < cpu_index; cpu++) 43 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); 44 45 return target_cpu; 46 } 47 48 static int padata_cpu_hash(struct parallel_data *pd) 49 { 50 unsigned int seq_nr; 51 int cpu_index; 52 53 /* 54 * Hash the sequence numbers to the cpus by taking 55 * seq_nr mod. number of cpus in use. 56 */ 57 58 seq_nr = atomic_inc_return(&pd->seq_nr); 59 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); 60 61 return padata_index_to_cpu(pd, cpu_index); 62 } 63 64 static void padata_parallel_worker(struct work_struct *parallel_work) 65 { 66 struct padata_parallel_queue *pqueue; 67 struct parallel_data *pd; 68 struct padata_instance *pinst; 69 LIST_HEAD(local_list); 70 71 local_bh_disable(); 72 pqueue = container_of(parallel_work, 73 struct padata_parallel_queue, work); 74 pd = pqueue->pd; 75 pinst = pd->pinst; 76 77 spin_lock(&pqueue->parallel.lock); 78 list_replace_init(&pqueue->parallel.list, &local_list); 79 spin_unlock(&pqueue->parallel.lock); 80 81 while (!list_empty(&local_list)) { 82 struct padata_priv *padata; 83 84 padata = list_entry(local_list.next, 85 struct padata_priv, list); 86 87 list_del_init(&padata->list); 88 89 padata->parallel(padata); 90 } 91 92 local_bh_enable(); 93 } 94 95 /** 96 * padata_do_parallel - padata parallelization function 97 * 98 * @pinst: padata instance 99 * @padata: object to be parallelized 100 * @cb_cpu: cpu the serialization callback function will run on, 101 * must be in the serial cpumask of padata(i.e. cpumask.cbcpu). 102 * 103 * The parallelization callback function will run with BHs off. 104 * Note: Every object which is parallelized by padata_do_parallel 105 * must be seen by padata_do_serial. 106 */ 107 int padata_do_parallel(struct padata_instance *pinst, 108 struct padata_priv *padata, int cb_cpu) 109 { 110 int target_cpu, err; 111 struct padata_parallel_queue *queue; 112 struct parallel_data *pd; 113 114 rcu_read_lock_bh(); 115 116 pd = rcu_dereference_bh(pinst->pd); 117 118 err = -EINVAL; 119 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID) 120 goto out; 121 122 if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu)) 123 goto out; 124 125 err = -EBUSY; 126 if ((pinst->flags & PADATA_RESET)) 127 goto out; 128 129 if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM) 130 goto out; 131 132 err = 0; 133 atomic_inc(&pd->refcnt); 134 padata->pd = pd; 135 padata->cb_cpu = cb_cpu; 136 137 target_cpu = padata_cpu_hash(pd); 138 queue = per_cpu_ptr(pd->pqueue, target_cpu); 139 140 spin_lock(&queue->parallel.lock); 141 list_add_tail(&padata->list, &queue->parallel.list); 142 spin_unlock(&queue->parallel.lock); 143 144 queue_work_on(target_cpu, pinst->wq, &queue->work); 145 146 out: 147 rcu_read_unlock_bh(); 148 149 return err; 150 } 151 EXPORT_SYMBOL(padata_do_parallel); 152 153 /* 154 * padata_get_next - Get the next object that needs serialization. 155 * 156 * Return values are: 157 * 158 * A pointer to the control struct of the next object that needs 159 * serialization, if present in one of the percpu reorder queues. 160 * 161 * NULL, if all percpu reorder queues are empty. 162 * 163 * -EINPROGRESS, if the next object that needs serialization will 164 * be parallel processed by another cpu and is not yet present in 165 * the cpu's reorder queue. 166 * 167 * -ENODATA, if this cpu has to do the parallel processing for 168 * the next object. 169 */ 170 static struct padata_priv *padata_get_next(struct parallel_data *pd) 171 { 172 int cpu, num_cpus; 173 unsigned int next_nr, next_index; 174 struct padata_parallel_queue *next_queue; 175 struct padata_priv *padata; 176 struct padata_list *reorder; 177 178 num_cpus = cpumask_weight(pd->cpumask.pcpu); 179 180 /* 181 * Calculate the percpu reorder queue and the sequence 182 * number of the next object. 183 */ 184 next_nr = pd->processed; 185 next_index = next_nr % num_cpus; 186 cpu = padata_index_to_cpu(pd, next_index); 187 next_queue = per_cpu_ptr(pd->pqueue, cpu); 188 189 padata = NULL; 190 191 reorder = &next_queue->reorder; 192 193 if (!list_empty(&reorder->list)) { 194 padata = list_entry(reorder->list.next, 195 struct padata_priv, list); 196 197 spin_lock(&reorder->lock); 198 list_del_init(&padata->list); 199 atomic_dec(&pd->reorder_objects); 200 spin_unlock(&reorder->lock); 201 202 pd->processed++; 203 204 goto out; 205 } 206 207 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) { 208 padata = ERR_PTR(-ENODATA); 209 goto out; 210 } 211 212 padata = ERR_PTR(-EINPROGRESS); 213 out: 214 return padata; 215 } 216 217 static void padata_reorder(struct parallel_data *pd) 218 { 219 int cb_cpu; 220 struct padata_priv *padata; 221 struct padata_serial_queue *squeue; 222 struct padata_instance *pinst = pd->pinst; 223 224 /* 225 * We need to ensure that only one cpu can work on dequeueing of 226 * the reorder queue the time. Calculating in which percpu reorder 227 * queue the next object will arrive takes some time. A spinlock 228 * would be highly contended. Also it is not clear in which order 229 * the objects arrive to the reorder queues. So a cpu could wait to 230 * get the lock just to notice that there is nothing to do at the 231 * moment. Therefore we use a trylock and let the holder of the lock 232 * care for all the objects enqueued during the holdtime of the lock. 233 */ 234 if (!spin_trylock_bh(&pd->lock)) 235 return; 236 237 while (1) { 238 padata = padata_get_next(pd); 239 240 /* 241 * All reorder queues are empty, or the next object that needs 242 * serialization is parallel processed by another cpu and is 243 * still on it's way to the cpu's reorder queue, nothing to 244 * do for now. 245 */ 246 if (!padata || PTR_ERR(padata) == -EINPROGRESS) 247 break; 248 249 /* 250 * This cpu has to do the parallel processing of the next 251 * object. It's waiting in the cpu's parallelization queue, 252 * so exit immediately. 253 */ 254 if (PTR_ERR(padata) == -ENODATA) { 255 del_timer(&pd->timer); 256 spin_unlock_bh(&pd->lock); 257 return; 258 } 259 260 cb_cpu = padata->cb_cpu; 261 squeue = per_cpu_ptr(pd->squeue, cb_cpu); 262 263 spin_lock(&squeue->serial.lock); 264 list_add_tail(&padata->list, &squeue->serial.list); 265 spin_unlock(&squeue->serial.lock); 266 267 queue_work_on(cb_cpu, pinst->wq, &squeue->work); 268 } 269 270 spin_unlock_bh(&pd->lock); 271 272 /* 273 * The next object that needs serialization might have arrived to 274 * the reorder queues in the meantime, we will be called again 275 * from the timer function if no one else cares for it. 276 */ 277 if (atomic_read(&pd->reorder_objects) 278 && !(pinst->flags & PADATA_RESET)) 279 mod_timer(&pd->timer, jiffies + HZ); 280 else 281 del_timer(&pd->timer); 282 283 return; 284 } 285 286 static void padata_reorder_timer(unsigned long arg) 287 { 288 struct parallel_data *pd = (struct parallel_data *)arg; 289 290 padata_reorder(pd); 291 } 292 293 static void padata_serial_worker(struct work_struct *serial_work) 294 { 295 struct padata_serial_queue *squeue; 296 struct parallel_data *pd; 297 LIST_HEAD(local_list); 298 299 local_bh_disable(); 300 squeue = container_of(serial_work, struct padata_serial_queue, work); 301 pd = squeue->pd; 302 303 spin_lock(&squeue->serial.lock); 304 list_replace_init(&squeue->serial.list, &local_list); 305 spin_unlock(&squeue->serial.lock); 306 307 while (!list_empty(&local_list)) { 308 struct padata_priv *padata; 309 310 padata = list_entry(local_list.next, 311 struct padata_priv, list); 312 313 list_del_init(&padata->list); 314 315 padata->serial(padata); 316 atomic_dec(&pd->refcnt); 317 } 318 local_bh_enable(); 319 } 320 321 /** 322 * padata_do_serial - padata serialization function 323 * 324 * @padata: object to be serialized. 325 * 326 * padata_do_serial must be called for every parallelized object. 327 * The serialization callback function will run with BHs off. 328 */ 329 void padata_do_serial(struct padata_priv *padata) 330 { 331 int cpu; 332 struct padata_parallel_queue *pqueue; 333 struct parallel_data *pd; 334 335 pd = padata->pd; 336 337 cpu = get_cpu(); 338 pqueue = per_cpu_ptr(pd->pqueue, cpu); 339 340 spin_lock(&pqueue->reorder.lock); 341 atomic_inc(&pd->reorder_objects); 342 list_add_tail(&padata->list, &pqueue->reorder.list); 343 spin_unlock(&pqueue->reorder.lock); 344 345 put_cpu(); 346 347 padata_reorder(pd); 348 } 349 EXPORT_SYMBOL(padata_do_serial); 350 351 static int padata_setup_cpumasks(struct parallel_data *pd, 352 const struct cpumask *pcpumask, 353 const struct cpumask *cbcpumask) 354 { 355 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) 356 return -ENOMEM; 357 358 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask); 359 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { 360 free_cpumask_var(pd->cpumask.cbcpu); 361 return -ENOMEM; 362 } 363 364 cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask); 365 return 0; 366 } 367 368 static void __padata_list_init(struct padata_list *pd_list) 369 { 370 INIT_LIST_HEAD(&pd_list->list); 371 spin_lock_init(&pd_list->lock); 372 } 373 374 /* Initialize all percpu queues used by serial workers */ 375 static void padata_init_squeues(struct parallel_data *pd) 376 { 377 int cpu; 378 struct padata_serial_queue *squeue; 379 380 for_each_cpu(cpu, pd->cpumask.cbcpu) { 381 squeue = per_cpu_ptr(pd->squeue, cpu); 382 squeue->pd = pd; 383 __padata_list_init(&squeue->serial); 384 INIT_WORK(&squeue->work, padata_serial_worker); 385 } 386 } 387 388 /* Initialize all percpu queues used by parallel workers */ 389 static void padata_init_pqueues(struct parallel_data *pd) 390 { 391 int cpu_index, cpu; 392 struct padata_parallel_queue *pqueue; 393 394 cpu_index = 0; 395 for_each_cpu(cpu, pd->cpumask.pcpu) { 396 pqueue = per_cpu_ptr(pd->pqueue, cpu); 397 pqueue->pd = pd; 398 pqueue->cpu_index = cpu_index; 399 cpu_index++; 400 401 __padata_list_init(&pqueue->reorder); 402 __padata_list_init(&pqueue->parallel); 403 INIT_WORK(&pqueue->work, padata_parallel_worker); 404 atomic_set(&pqueue->num_obj, 0); 405 } 406 } 407 408 /* Allocate and initialize the internal cpumask dependend resources. */ 409 static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, 410 const struct cpumask *pcpumask, 411 const struct cpumask *cbcpumask) 412 { 413 struct parallel_data *pd; 414 415 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); 416 if (!pd) 417 goto err; 418 419 pd->pqueue = alloc_percpu(struct padata_parallel_queue); 420 if (!pd->pqueue) 421 goto err_free_pd; 422 423 pd->squeue = alloc_percpu(struct padata_serial_queue); 424 if (!pd->squeue) 425 goto err_free_pqueue; 426 if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0) 427 goto err_free_squeue; 428 429 padata_init_pqueues(pd); 430 padata_init_squeues(pd); 431 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); 432 atomic_set(&pd->seq_nr, -1); 433 atomic_set(&pd->reorder_objects, 0); 434 atomic_set(&pd->refcnt, 0); 435 pd->pinst = pinst; 436 spin_lock_init(&pd->lock); 437 438 return pd; 439 440 err_free_squeue: 441 free_percpu(pd->squeue); 442 err_free_pqueue: 443 free_percpu(pd->pqueue); 444 err_free_pd: 445 kfree(pd); 446 err: 447 return NULL; 448 } 449 450 static void padata_free_pd(struct parallel_data *pd) 451 { 452 free_cpumask_var(pd->cpumask.pcpu); 453 free_cpumask_var(pd->cpumask.cbcpu); 454 free_percpu(pd->pqueue); 455 free_percpu(pd->squeue); 456 kfree(pd); 457 } 458 459 /* Flush all objects out of the padata queues. */ 460 static void padata_flush_queues(struct parallel_data *pd) 461 { 462 int cpu; 463 struct padata_parallel_queue *pqueue; 464 struct padata_serial_queue *squeue; 465 466 for_each_cpu(cpu, pd->cpumask.pcpu) { 467 pqueue = per_cpu_ptr(pd->pqueue, cpu); 468 flush_work(&pqueue->work); 469 } 470 471 del_timer_sync(&pd->timer); 472 473 if (atomic_read(&pd->reorder_objects)) 474 padata_reorder(pd); 475 476 for_each_cpu(cpu, pd->cpumask.cbcpu) { 477 squeue = per_cpu_ptr(pd->squeue, cpu); 478 flush_work(&squeue->work); 479 } 480 481 BUG_ON(atomic_read(&pd->refcnt) != 0); 482 } 483 484 static void __padata_start(struct padata_instance *pinst) 485 { 486 pinst->flags |= PADATA_INIT; 487 } 488 489 static void __padata_stop(struct padata_instance *pinst) 490 { 491 if (!(pinst->flags & PADATA_INIT)) 492 return; 493 494 pinst->flags &= ~PADATA_INIT; 495 496 synchronize_rcu(); 497 498 get_online_cpus(); 499 padata_flush_queues(pinst->pd); 500 put_online_cpus(); 501 } 502 503 /* Replace the internal control structure with a new one. */ 504 static void padata_replace(struct padata_instance *pinst, 505 struct parallel_data *pd_new) 506 { 507 struct parallel_data *pd_old = pinst->pd; 508 int notification_mask = 0; 509 510 pinst->flags |= PADATA_RESET; 511 512 rcu_assign_pointer(pinst->pd, pd_new); 513 514 synchronize_rcu(); 515 516 if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu)) 517 notification_mask |= PADATA_CPU_PARALLEL; 518 if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu)) 519 notification_mask |= PADATA_CPU_SERIAL; 520 521 padata_flush_queues(pd_old); 522 padata_free_pd(pd_old); 523 524 if (notification_mask) 525 blocking_notifier_call_chain(&pinst->cpumask_change_notifier, 526 notification_mask, 527 &pd_new->cpumask); 528 529 pinst->flags &= ~PADATA_RESET; 530 } 531 532 /** 533 * padata_register_cpumask_notifier - Registers a notifier that will be called 534 * if either pcpu or cbcpu or both cpumasks change. 535 * 536 * @pinst: A poineter to padata instance 537 * @nblock: A pointer to notifier block. 538 */ 539 int padata_register_cpumask_notifier(struct padata_instance *pinst, 540 struct notifier_block *nblock) 541 { 542 return blocking_notifier_chain_register(&pinst->cpumask_change_notifier, 543 nblock); 544 } 545 EXPORT_SYMBOL(padata_register_cpumask_notifier); 546 547 /** 548 * padata_unregister_cpumask_notifier - Unregisters cpumask notifier 549 * registered earlier using padata_register_cpumask_notifier 550 * 551 * @pinst: A pointer to data instance. 552 * @nlock: A pointer to notifier block. 553 */ 554 int padata_unregister_cpumask_notifier(struct padata_instance *pinst, 555 struct notifier_block *nblock) 556 { 557 return blocking_notifier_chain_unregister( 558 &pinst->cpumask_change_notifier, 559 nblock); 560 } 561 EXPORT_SYMBOL(padata_unregister_cpumask_notifier); 562 563 564 /* If cpumask contains no active cpu, we mark the instance as invalid. */ 565 static bool padata_validate_cpumask(struct padata_instance *pinst, 566 const struct cpumask *cpumask) 567 { 568 if (!cpumask_intersects(cpumask, cpu_online_mask)) { 569 pinst->flags |= PADATA_INVALID; 570 return false; 571 } 572 573 pinst->flags &= ~PADATA_INVALID; 574 return true; 575 } 576 577 static int __padata_set_cpumasks(struct padata_instance *pinst, 578 cpumask_var_t pcpumask, 579 cpumask_var_t cbcpumask) 580 { 581 int valid; 582 struct parallel_data *pd; 583 584 valid = padata_validate_cpumask(pinst, pcpumask); 585 if (!valid) { 586 __padata_stop(pinst); 587 goto out_replace; 588 } 589 590 valid = padata_validate_cpumask(pinst, cbcpumask); 591 if (!valid) 592 __padata_stop(pinst); 593 594 out_replace: 595 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); 596 if (!pd) 597 return -ENOMEM; 598 599 cpumask_copy(pinst->cpumask.pcpu, pcpumask); 600 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); 601 602 padata_replace(pinst, pd); 603 604 if (valid) 605 __padata_start(pinst); 606 607 return 0; 608 } 609 610 /** 611 * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value 612 * equivalent to @cpumask. 613 * 614 * @pinst: padata instance 615 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding 616 * to parallel and serial cpumasks respectively. 617 * @cpumask: the cpumask to use 618 */ 619 int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, 620 cpumask_var_t cpumask) 621 { 622 struct cpumask *serial_mask, *parallel_mask; 623 int err = -EINVAL; 624 625 mutex_lock(&pinst->lock); 626 get_online_cpus(); 627 628 switch (cpumask_type) { 629 case PADATA_CPU_PARALLEL: 630 serial_mask = pinst->cpumask.cbcpu; 631 parallel_mask = cpumask; 632 break; 633 case PADATA_CPU_SERIAL: 634 parallel_mask = pinst->cpumask.pcpu; 635 serial_mask = cpumask; 636 break; 637 default: 638 goto out; 639 } 640 641 err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask); 642 643 out: 644 put_online_cpus(); 645 mutex_unlock(&pinst->lock); 646 647 return err; 648 } 649 EXPORT_SYMBOL(padata_set_cpumask); 650 651 /** 652 * padata_start - start the parallel processing 653 * 654 * @pinst: padata instance to start 655 */ 656 int padata_start(struct padata_instance *pinst) 657 { 658 int err = 0; 659 660 mutex_lock(&pinst->lock); 661 662 if (pinst->flags & PADATA_INVALID) 663 err = -EINVAL; 664 665 __padata_start(pinst); 666 667 mutex_unlock(&pinst->lock); 668 669 return err; 670 } 671 EXPORT_SYMBOL(padata_start); 672 673 /** 674 * padata_stop - stop the parallel processing 675 * 676 * @pinst: padata instance to stop 677 */ 678 void padata_stop(struct padata_instance *pinst) 679 { 680 mutex_lock(&pinst->lock); 681 __padata_stop(pinst); 682 mutex_unlock(&pinst->lock); 683 } 684 EXPORT_SYMBOL(padata_stop); 685 686 #ifdef CONFIG_HOTPLUG_CPU 687 688 static int __padata_add_cpu(struct padata_instance *pinst, int cpu) 689 { 690 struct parallel_data *pd; 691 692 if (cpumask_test_cpu(cpu, cpu_online_mask)) { 693 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, 694 pinst->cpumask.cbcpu); 695 if (!pd) 696 return -ENOMEM; 697 698 padata_replace(pinst, pd); 699 700 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && 701 padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) 702 __padata_start(pinst); 703 } 704 705 return 0; 706 } 707 708 static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) 709 { 710 struct parallel_data *pd = NULL; 711 712 if (cpumask_test_cpu(cpu, cpu_online_mask)) { 713 714 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || 715 !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) 716 __padata_stop(pinst); 717 718 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, 719 pinst->cpumask.cbcpu); 720 if (!pd) 721 return -ENOMEM; 722 723 padata_replace(pinst, pd); 724 725 cpumask_clear_cpu(cpu, pd->cpumask.cbcpu); 726 cpumask_clear_cpu(cpu, pd->cpumask.pcpu); 727 } 728 729 return 0; 730 } 731 732 /** 733 * padata_remove_cpu - remove a cpu from the one or both(serial and parallel) 734 * padata cpumasks. 735 * 736 * @pinst: padata instance 737 * @cpu: cpu to remove 738 * @mask: bitmask specifying from which cpumask @cpu should be removed 739 * The @mask may be any combination of the following flags: 740 * PADATA_CPU_SERIAL - serial cpumask 741 * PADATA_CPU_PARALLEL - parallel cpumask 742 */ 743 int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask) 744 { 745 int err; 746 747 if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) 748 return -EINVAL; 749 750 mutex_lock(&pinst->lock); 751 752 get_online_cpus(); 753 if (mask & PADATA_CPU_SERIAL) 754 cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu); 755 if (mask & PADATA_CPU_PARALLEL) 756 cpumask_clear_cpu(cpu, pinst->cpumask.pcpu); 757 758 err = __padata_remove_cpu(pinst, cpu); 759 put_online_cpus(); 760 761 mutex_unlock(&pinst->lock); 762 763 return err; 764 } 765 EXPORT_SYMBOL(padata_remove_cpu); 766 767 static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu) 768 { 769 return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) || 770 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu); 771 } 772 773 static int padata_cpu_online(unsigned int cpu, struct hlist_node *node) 774 { 775 struct padata_instance *pinst; 776 int ret; 777 778 pinst = hlist_entry_safe(node, struct padata_instance, node); 779 if (!pinst_has_cpu(pinst, cpu)) 780 return 0; 781 782 mutex_lock(&pinst->lock); 783 ret = __padata_add_cpu(pinst, cpu); 784 mutex_unlock(&pinst->lock); 785 return ret; 786 } 787 788 static int padata_cpu_prep_down(unsigned int cpu, struct hlist_node *node) 789 { 790 struct padata_instance *pinst; 791 int ret; 792 793 pinst = hlist_entry_safe(node, struct padata_instance, node); 794 if (!pinst_has_cpu(pinst, cpu)) 795 return 0; 796 797 mutex_lock(&pinst->lock); 798 ret = __padata_remove_cpu(pinst, cpu); 799 mutex_unlock(&pinst->lock); 800 return ret; 801 } 802 803 static enum cpuhp_state hp_online; 804 #endif 805 806 static void __padata_free(struct padata_instance *pinst) 807 { 808 #ifdef CONFIG_HOTPLUG_CPU 809 cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node); 810 #endif 811 812 padata_stop(pinst); 813 padata_free_pd(pinst->pd); 814 free_cpumask_var(pinst->cpumask.pcpu); 815 free_cpumask_var(pinst->cpumask.cbcpu); 816 kfree(pinst); 817 } 818 819 #define kobj2pinst(_kobj) \ 820 container_of(_kobj, struct padata_instance, kobj) 821 #define attr2pentry(_attr) \ 822 container_of(_attr, struct padata_sysfs_entry, attr) 823 824 static void padata_sysfs_release(struct kobject *kobj) 825 { 826 struct padata_instance *pinst = kobj2pinst(kobj); 827 __padata_free(pinst); 828 } 829 830 struct padata_sysfs_entry { 831 struct attribute attr; 832 ssize_t (*show)(struct padata_instance *, struct attribute *, char *); 833 ssize_t (*store)(struct padata_instance *, struct attribute *, 834 const char *, size_t); 835 }; 836 837 static ssize_t show_cpumask(struct padata_instance *pinst, 838 struct attribute *attr, char *buf) 839 { 840 struct cpumask *cpumask; 841 ssize_t len; 842 843 mutex_lock(&pinst->lock); 844 if (!strcmp(attr->name, "serial_cpumask")) 845 cpumask = pinst->cpumask.cbcpu; 846 else 847 cpumask = pinst->cpumask.pcpu; 848 849 len = snprintf(buf, PAGE_SIZE, "%*pb\n", 850 nr_cpu_ids, cpumask_bits(cpumask)); 851 mutex_unlock(&pinst->lock); 852 return len < PAGE_SIZE ? len : -EINVAL; 853 } 854 855 static ssize_t store_cpumask(struct padata_instance *pinst, 856 struct attribute *attr, 857 const char *buf, size_t count) 858 { 859 cpumask_var_t new_cpumask; 860 ssize_t ret; 861 int mask_type; 862 863 if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL)) 864 return -ENOMEM; 865 866 ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask), 867 nr_cpumask_bits); 868 if (ret < 0) 869 goto out; 870 871 mask_type = !strcmp(attr->name, "serial_cpumask") ? 872 PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL; 873 ret = padata_set_cpumask(pinst, mask_type, new_cpumask); 874 if (!ret) 875 ret = count; 876 877 out: 878 free_cpumask_var(new_cpumask); 879 return ret; 880 } 881 882 #define PADATA_ATTR_RW(_name, _show_name, _store_name) \ 883 static struct padata_sysfs_entry _name##_attr = \ 884 __ATTR(_name, 0644, _show_name, _store_name) 885 #define PADATA_ATTR_RO(_name, _show_name) \ 886 static struct padata_sysfs_entry _name##_attr = \ 887 __ATTR(_name, 0400, _show_name, NULL) 888 889 PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask); 890 PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask); 891 892 /* 893 * Padata sysfs provides the following objects: 894 * serial_cpumask [RW] - cpumask for serial workers 895 * parallel_cpumask [RW] - cpumask for parallel workers 896 */ 897 static struct attribute *padata_default_attrs[] = { 898 &serial_cpumask_attr.attr, 899 ¶llel_cpumask_attr.attr, 900 NULL, 901 }; 902 903 static ssize_t padata_sysfs_show(struct kobject *kobj, 904 struct attribute *attr, char *buf) 905 { 906 struct padata_instance *pinst; 907 struct padata_sysfs_entry *pentry; 908 ssize_t ret = -EIO; 909 910 pinst = kobj2pinst(kobj); 911 pentry = attr2pentry(attr); 912 if (pentry->show) 913 ret = pentry->show(pinst, attr, buf); 914 915 return ret; 916 } 917 918 static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr, 919 const char *buf, size_t count) 920 { 921 struct padata_instance *pinst; 922 struct padata_sysfs_entry *pentry; 923 ssize_t ret = -EIO; 924 925 pinst = kobj2pinst(kobj); 926 pentry = attr2pentry(attr); 927 if (pentry->show) 928 ret = pentry->store(pinst, attr, buf, count); 929 930 return ret; 931 } 932 933 static const struct sysfs_ops padata_sysfs_ops = { 934 .show = padata_sysfs_show, 935 .store = padata_sysfs_store, 936 }; 937 938 static struct kobj_type padata_attr_type = { 939 .sysfs_ops = &padata_sysfs_ops, 940 .default_attrs = padata_default_attrs, 941 .release = padata_sysfs_release, 942 }; 943 944 /** 945 * padata_alloc_possible - Allocate and initialize padata instance. 946 * Use the cpu_possible_mask for serial and 947 * parallel workers. 948 * 949 * @wq: workqueue to use for the allocated padata instance 950 */ 951 struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq) 952 { 953 return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask); 954 } 955 EXPORT_SYMBOL(padata_alloc_possible); 956 957 /** 958 * padata_alloc - allocate and initialize a padata instance and specify 959 * cpumasks for serial and parallel workers. 960 * 961 * @wq: workqueue to use for the allocated padata instance 962 * @pcpumask: cpumask that will be used for padata parallelization 963 * @cbcpumask: cpumask that will be used for padata serialization 964 */ 965 struct padata_instance *padata_alloc(struct workqueue_struct *wq, 966 const struct cpumask *pcpumask, 967 const struct cpumask *cbcpumask) 968 { 969 struct padata_instance *pinst; 970 struct parallel_data *pd = NULL; 971 972 pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); 973 if (!pinst) 974 goto err; 975 976 get_online_cpus(); 977 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) 978 goto err_free_inst; 979 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { 980 free_cpumask_var(pinst->cpumask.pcpu); 981 goto err_free_inst; 982 } 983 if (!padata_validate_cpumask(pinst, pcpumask) || 984 !padata_validate_cpumask(pinst, cbcpumask)) 985 goto err_free_masks; 986 987 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); 988 if (!pd) 989 goto err_free_masks; 990 991 rcu_assign_pointer(pinst->pd, pd); 992 993 pinst->wq = wq; 994 995 cpumask_copy(pinst->cpumask.pcpu, pcpumask); 996 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); 997 998 pinst->flags = 0; 999 1000 put_online_cpus(); 1001 1002 BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier); 1003 kobject_init(&pinst->kobj, &padata_attr_type); 1004 mutex_init(&pinst->lock); 1005 1006 #ifdef CONFIG_HOTPLUG_CPU 1007 cpuhp_state_add_instance_nocalls(hp_online, &pinst->node); 1008 #endif 1009 return pinst; 1010 1011 err_free_masks: 1012 free_cpumask_var(pinst->cpumask.pcpu); 1013 free_cpumask_var(pinst->cpumask.cbcpu); 1014 err_free_inst: 1015 kfree(pinst); 1016 put_online_cpus(); 1017 err: 1018 return NULL; 1019 } 1020 1021 /** 1022 * padata_free - free a padata instance 1023 * 1024 * @padata_inst: padata instance to free 1025 */ 1026 void padata_free(struct padata_instance *pinst) 1027 { 1028 kobject_put(&pinst->kobj); 1029 } 1030 EXPORT_SYMBOL(padata_free); 1031 1032 #ifdef CONFIG_HOTPLUG_CPU 1033 1034 static __init int padata_driver_init(void) 1035 { 1036 int ret; 1037 1038 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online", 1039 padata_cpu_online, 1040 padata_cpu_prep_down); 1041 if (ret < 0) 1042 return ret; 1043 hp_online = ret; 1044 return 0; 1045 } 1046 module_init(padata_driver_init); 1047 1048 static __exit void padata_driver_exit(void) 1049 { 1050 cpuhp_remove_multi_state(hp_online); 1051 } 1052 module_exit(padata_driver_exit); 1053 #endif 1054