1 /* 2 * padata.c - generic interface to process data streams in parallel 3 * 4 * See Documentation/padata.txt for an api documentation. 5 * 6 * Copyright (C) 2008, 2009 secunet Security Networks AG 7 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com> 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms and conditions of the GNU General Public License, 11 * version 2, as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * You should have received a copy of the GNU General Public License along with 19 * this program; if not, write to the Free Software Foundation, Inc., 20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 */ 22 23 #include <linux/export.h> 24 #include <linux/cpumask.h> 25 #include <linux/err.h> 26 #include <linux/cpu.h> 27 #include <linux/padata.h> 28 #include <linux/mutex.h> 29 #include <linux/sched.h> 30 #include <linux/slab.h> 31 #include <linux/sysfs.h> 32 #include <linux/rcupdate.h> 33 34 #define MAX_OBJ_NUM 1000 35 36 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) 37 { 38 int cpu, target_cpu; 39 40 target_cpu = cpumask_first(pd->cpumask.pcpu); 41 for (cpu = 0; cpu < cpu_index; cpu++) 42 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); 43 44 return target_cpu; 45 } 46 47 static int padata_cpu_hash(struct parallel_data *pd) 48 { 49 unsigned int seq_nr; 50 int cpu_index; 51 52 /* 53 * Hash the sequence numbers to the cpus by taking 54 * seq_nr mod. number of cpus in use. 55 */ 56 57 seq_nr = atomic_inc_return(&pd->seq_nr); 58 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); 59 60 return padata_index_to_cpu(pd, cpu_index); 61 } 62 63 static void padata_parallel_worker(struct work_struct *parallel_work) 64 { 65 struct padata_parallel_queue *pqueue; 66 struct parallel_data *pd; 67 struct padata_instance *pinst; 68 LIST_HEAD(local_list); 69 70 local_bh_disable(); 71 pqueue = container_of(parallel_work, 72 struct padata_parallel_queue, work); 73 pd = pqueue->pd; 74 pinst = pd->pinst; 75 76 spin_lock(&pqueue->parallel.lock); 77 list_replace_init(&pqueue->parallel.list, &local_list); 78 spin_unlock(&pqueue->parallel.lock); 79 80 while (!list_empty(&local_list)) { 81 struct padata_priv *padata; 82 83 padata = list_entry(local_list.next, 84 struct padata_priv, list); 85 86 list_del_init(&padata->list); 87 88 padata->parallel(padata); 89 } 90 91 local_bh_enable(); 92 } 93 94 /** 95 * padata_do_parallel - padata parallelization function 96 * 97 * @pinst: padata instance 98 * @padata: object to be parallelized 99 * @cb_cpu: cpu the serialization callback function will run on, 100 * must be in the serial cpumask of padata(i.e. cpumask.cbcpu). 101 * 102 * The parallelization callback function will run with BHs off. 103 * Note: Every object which is parallelized by padata_do_parallel 104 * must be seen by padata_do_serial. 105 */ 106 int padata_do_parallel(struct padata_instance *pinst, 107 struct padata_priv *padata, int cb_cpu) 108 { 109 int target_cpu, err; 110 struct padata_parallel_queue *queue; 111 struct parallel_data *pd; 112 113 rcu_read_lock_bh(); 114 115 pd = rcu_dereference_bh(pinst->pd); 116 117 err = -EINVAL; 118 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID) 119 goto out; 120 121 if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu)) 122 goto out; 123 124 err = -EBUSY; 125 if ((pinst->flags & PADATA_RESET)) 126 goto out; 127 128 if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM) 129 goto out; 130 131 err = 0; 132 atomic_inc(&pd->refcnt); 133 padata->pd = pd; 134 padata->cb_cpu = cb_cpu; 135 136 target_cpu = padata_cpu_hash(pd); 137 queue = per_cpu_ptr(pd->pqueue, target_cpu); 138 139 spin_lock(&queue->parallel.lock); 140 list_add_tail(&padata->list, &queue->parallel.list); 141 spin_unlock(&queue->parallel.lock); 142 143 queue_work_on(target_cpu, pinst->wq, &queue->work); 144 145 out: 146 rcu_read_unlock_bh(); 147 148 return err; 149 } 150 EXPORT_SYMBOL(padata_do_parallel); 151 152 /* 153 * padata_get_next - Get the next object that needs serialization. 154 * 155 * Return values are: 156 * 157 * A pointer to the control struct of the next object that needs 158 * serialization, if present in one of the percpu reorder queues. 159 * 160 * NULL, if all percpu reorder queues are empty. 161 * 162 * -EINPROGRESS, if the next object that needs serialization will 163 * be parallel processed by another cpu and is not yet present in 164 * the cpu's reorder queue. 165 * 166 * -ENODATA, if this cpu has to do the parallel processing for 167 * the next object. 168 */ 169 static struct padata_priv *padata_get_next(struct parallel_data *pd) 170 { 171 int cpu, num_cpus; 172 unsigned int next_nr, next_index; 173 struct padata_parallel_queue *next_queue; 174 struct padata_priv *padata; 175 struct padata_list *reorder; 176 177 num_cpus = cpumask_weight(pd->cpumask.pcpu); 178 179 /* 180 * Calculate the percpu reorder queue and the sequence 181 * number of the next object. 182 */ 183 next_nr = pd->processed; 184 next_index = next_nr % num_cpus; 185 cpu = padata_index_to_cpu(pd, next_index); 186 next_queue = per_cpu_ptr(pd->pqueue, cpu); 187 188 padata = NULL; 189 190 reorder = &next_queue->reorder; 191 192 if (!list_empty(&reorder->list)) { 193 padata = list_entry(reorder->list.next, 194 struct padata_priv, list); 195 196 spin_lock(&reorder->lock); 197 list_del_init(&padata->list); 198 atomic_dec(&pd->reorder_objects); 199 spin_unlock(&reorder->lock); 200 201 pd->processed++; 202 203 goto out; 204 } 205 206 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) { 207 padata = ERR_PTR(-ENODATA); 208 goto out; 209 } 210 211 padata = ERR_PTR(-EINPROGRESS); 212 out: 213 return padata; 214 } 215 216 static void padata_reorder(struct parallel_data *pd) 217 { 218 int cb_cpu; 219 struct padata_priv *padata; 220 struct padata_serial_queue *squeue; 221 struct padata_instance *pinst = pd->pinst; 222 223 /* 224 * We need to ensure that only one cpu can work on dequeueing of 225 * the reorder queue the time. Calculating in which percpu reorder 226 * queue the next object will arrive takes some time. A spinlock 227 * would be highly contended. Also it is not clear in which order 228 * the objects arrive to the reorder queues. So a cpu could wait to 229 * get the lock just to notice that there is nothing to do at the 230 * moment. Therefore we use a trylock and let the holder of the lock 231 * care for all the objects enqueued during the holdtime of the lock. 232 */ 233 if (!spin_trylock_bh(&pd->lock)) 234 return; 235 236 while (1) { 237 padata = padata_get_next(pd); 238 239 /* 240 * All reorder queues are empty, or the next object that needs 241 * serialization is parallel processed by another cpu and is 242 * still on it's way to the cpu's reorder queue, nothing to 243 * do for now. 244 */ 245 if (!padata || PTR_ERR(padata) == -EINPROGRESS) 246 break; 247 248 /* 249 * This cpu has to do the parallel processing of the next 250 * object. It's waiting in the cpu's parallelization queue, 251 * so exit immediately. 252 */ 253 if (PTR_ERR(padata) == -ENODATA) { 254 del_timer(&pd->timer); 255 spin_unlock_bh(&pd->lock); 256 return; 257 } 258 259 cb_cpu = padata->cb_cpu; 260 squeue = per_cpu_ptr(pd->squeue, cb_cpu); 261 262 spin_lock(&squeue->serial.lock); 263 list_add_tail(&padata->list, &squeue->serial.list); 264 spin_unlock(&squeue->serial.lock); 265 266 queue_work_on(cb_cpu, pinst->wq, &squeue->work); 267 } 268 269 spin_unlock_bh(&pd->lock); 270 271 /* 272 * The next object that needs serialization might have arrived to 273 * the reorder queues in the meantime, we will be called again 274 * from the timer function if no one else cares for it. 275 */ 276 if (atomic_read(&pd->reorder_objects) 277 && !(pinst->flags & PADATA_RESET)) 278 mod_timer(&pd->timer, jiffies + HZ); 279 else 280 del_timer(&pd->timer); 281 282 return; 283 } 284 285 static void padata_reorder_timer(unsigned long arg) 286 { 287 struct parallel_data *pd = (struct parallel_data *)arg; 288 289 padata_reorder(pd); 290 } 291 292 static void padata_serial_worker(struct work_struct *serial_work) 293 { 294 struct padata_serial_queue *squeue; 295 struct parallel_data *pd; 296 LIST_HEAD(local_list); 297 298 local_bh_disable(); 299 squeue = container_of(serial_work, struct padata_serial_queue, work); 300 pd = squeue->pd; 301 302 spin_lock(&squeue->serial.lock); 303 list_replace_init(&squeue->serial.list, &local_list); 304 spin_unlock(&squeue->serial.lock); 305 306 while (!list_empty(&local_list)) { 307 struct padata_priv *padata; 308 309 padata = list_entry(local_list.next, 310 struct padata_priv, list); 311 312 list_del_init(&padata->list); 313 314 padata->serial(padata); 315 atomic_dec(&pd->refcnt); 316 } 317 local_bh_enable(); 318 } 319 320 /** 321 * padata_do_serial - padata serialization function 322 * 323 * @padata: object to be serialized. 324 * 325 * padata_do_serial must be called for every parallelized object. 326 * The serialization callback function will run with BHs off. 327 */ 328 void padata_do_serial(struct padata_priv *padata) 329 { 330 int cpu; 331 struct padata_parallel_queue *pqueue; 332 struct parallel_data *pd; 333 334 pd = padata->pd; 335 336 cpu = get_cpu(); 337 pqueue = per_cpu_ptr(pd->pqueue, cpu); 338 339 spin_lock(&pqueue->reorder.lock); 340 atomic_inc(&pd->reorder_objects); 341 list_add_tail(&padata->list, &pqueue->reorder.list); 342 spin_unlock(&pqueue->reorder.lock); 343 344 put_cpu(); 345 346 padata_reorder(pd); 347 } 348 EXPORT_SYMBOL(padata_do_serial); 349 350 static int padata_setup_cpumasks(struct parallel_data *pd, 351 const struct cpumask *pcpumask, 352 const struct cpumask *cbcpumask) 353 { 354 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) 355 return -ENOMEM; 356 357 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask); 358 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { 359 free_cpumask_var(pd->cpumask.cbcpu); 360 return -ENOMEM; 361 } 362 363 cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask); 364 return 0; 365 } 366 367 static void __padata_list_init(struct padata_list *pd_list) 368 { 369 INIT_LIST_HEAD(&pd_list->list); 370 spin_lock_init(&pd_list->lock); 371 } 372 373 /* Initialize all percpu queues used by serial workers */ 374 static void padata_init_squeues(struct parallel_data *pd) 375 { 376 int cpu; 377 struct padata_serial_queue *squeue; 378 379 for_each_cpu(cpu, pd->cpumask.cbcpu) { 380 squeue = per_cpu_ptr(pd->squeue, cpu); 381 squeue->pd = pd; 382 __padata_list_init(&squeue->serial); 383 INIT_WORK(&squeue->work, padata_serial_worker); 384 } 385 } 386 387 /* Initialize all percpu queues used by parallel workers */ 388 static void padata_init_pqueues(struct parallel_data *pd) 389 { 390 int cpu_index, cpu; 391 struct padata_parallel_queue *pqueue; 392 393 cpu_index = 0; 394 for_each_cpu(cpu, pd->cpumask.pcpu) { 395 pqueue = per_cpu_ptr(pd->pqueue, cpu); 396 pqueue->pd = pd; 397 pqueue->cpu_index = cpu_index; 398 cpu_index++; 399 400 __padata_list_init(&pqueue->reorder); 401 __padata_list_init(&pqueue->parallel); 402 INIT_WORK(&pqueue->work, padata_parallel_worker); 403 atomic_set(&pqueue->num_obj, 0); 404 } 405 } 406 407 /* Allocate and initialize the internal cpumask dependend resources. */ 408 static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, 409 const struct cpumask *pcpumask, 410 const struct cpumask *cbcpumask) 411 { 412 struct parallel_data *pd; 413 414 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); 415 if (!pd) 416 goto err; 417 418 pd->pqueue = alloc_percpu(struct padata_parallel_queue); 419 if (!pd->pqueue) 420 goto err_free_pd; 421 422 pd->squeue = alloc_percpu(struct padata_serial_queue); 423 if (!pd->squeue) 424 goto err_free_pqueue; 425 if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0) 426 goto err_free_squeue; 427 428 padata_init_pqueues(pd); 429 padata_init_squeues(pd); 430 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); 431 atomic_set(&pd->seq_nr, -1); 432 atomic_set(&pd->reorder_objects, 0); 433 atomic_set(&pd->refcnt, 0); 434 pd->pinst = pinst; 435 spin_lock_init(&pd->lock); 436 437 return pd; 438 439 err_free_squeue: 440 free_percpu(pd->squeue); 441 err_free_pqueue: 442 free_percpu(pd->pqueue); 443 err_free_pd: 444 kfree(pd); 445 err: 446 return NULL; 447 } 448 449 static void padata_free_pd(struct parallel_data *pd) 450 { 451 free_cpumask_var(pd->cpumask.pcpu); 452 free_cpumask_var(pd->cpumask.cbcpu); 453 free_percpu(pd->pqueue); 454 free_percpu(pd->squeue); 455 kfree(pd); 456 } 457 458 /* Flush all objects out of the padata queues. */ 459 static void padata_flush_queues(struct parallel_data *pd) 460 { 461 int cpu; 462 struct padata_parallel_queue *pqueue; 463 struct padata_serial_queue *squeue; 464 465 for_each_cpu(cpu, pd->cpumask.pcpu) { 466 pqueue = per_cpu_ptr(pd->pqueue, cpu); 467 flush_work(&pqueue->work); 468 } 469 470 del_timer_sync(&pd->timer); 471 472 if (atomic_read(&pd->reorder_objects)) 473 padata_reorder(pd); 474 475 for_each_cpu(cpu, pd->cpumask.cbcpu) { 476 squeue = per_cpu_ptr(pd->squeue, cpu); 477 flush_work(&squeue->work); 478 } 479 480 BUG_ON(atomic_read(&pd->refcnt) != 0); 481 } 482 483 static void __padata_start(struct padata_instance *pinst) 484 { 485 pinst->flags |= PADATA_INIT; 486 } 487 488 static void __padata_stop(struct padata_instance *pinst) 489 { 490 if (!(pinst->flags & PADATA_INIT)) 491 return; 492 493 pinst->flags &= ~PADATA_INIT; 494 495 synchronize_rcu(); 496 497 get_online_cpus(); 498 padata_flush_queues(pinst->pd); 499 put_online_cpus(); 500 } 501 502 /* Replace the internal control structure with a new one. */ 503 static void padata_replace(struct padata_instance *pinst, 504 struct parallel_data *pd_new) 505 { 506 struct parallel_data *pd_old = pinst->pd; 507 int notification_mask = 0; 508 509 pinst->flags |= PADATA_RESET; 510 511 rcu_assign_pointer(pinst->pd, pd_new); 512 513 synchronize_rcu(); 514 515 if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu)) 516 notification_mask |= PADATA_CPU_PARALLEL; 517 if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu)) 518 notification_mask |= PADATA_CPU_SERIAL; 519 520 padata_flush_queues(pd_old); 521 padata_free_pd(pd_old); 522 523 if (notification_mask) 524 blocking_notifier_call_chain(&pinst->cpumask_change_notifier, 525 notification_mask, 526 &pd_new->cpumask); 527 528 pinst->flags &= ~PADATA_RESET; 529 } 530 531 /** 532 * padata_register_cpumask_notifier - Registers a notifier that will be called 533 * if either pcpu or cbcpu or both cpumasks change. 534 * 535 * @pinst: A poineter to padata instance 536 * @nblock: A pointer to notifier block. 537 */ 538 int padata_register_cpumask_notifier(struct padata_instance *pinst, 539 struct notifier_block *nblock) 540 { 541 return blocking_notifier_chain_register(&pinst->cpumask_change_notifier, 542 nblock); 543 } 544 EXPORT_SYMBOL(padata_register_cpumask_notifier); 545 546 /** 547 * padata_unregister_cpumask_notifier - Unregisters cpumask notifier 548 * registered earlier using padata_register_cpumask_notifier 549 * 550 * @pinst: A pointer to data instance. 551 * @nlock: A pointer to notifier block. 552 */ 553 int padata_unregister_cpumask_notifier(struct padata_instance *pinst, 554 struct notifier_block *nblock) 555 { 556 return blocking_notifier_chain_unregister( 557 &pinst->cpumask_change_notifier, 558 nblock); 559 } 560 EXPORT_SYMBOL(padata_unregister_cpumask_notifier); 561 562 563 /* If cpumask contains no active cpu, we mark the instance as invalid. */ 564 static bool padata_validate_cpumask(struct padata_instance *pinst, 565 const struct cpumask *cpumask) 566 { 567 if (!cpumask_intersects(cpumask, cpu_online_mask)) { 568 pinst->flags |= PADATA_INVALID; 569 return false; 570 } 571 572 pinst->flags &= ~PADATA_INVALID; 573 return true; 574 } 575 576 static int __padata_set_cpumasks(struct padata_instance *pinst, 577 cpumask_var_t pcpumask, 578 cpumask_var_t cbcpumask) 579 { 580 int valid; 581 struct parallel_data *pd; 582 583 valid = padata_validate_cpumask(pinst, pcpumask); 584 if (!valid) { 585 __padata_stop(pinst); 586 goto out_replace; 587 } 588 589 valid = padata_validate_cpumask(pinst, cbcpumask); 590 if (!valid) 591 __padata_stop(pinst); 592 593 out_replace: 594 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); 595 if (!pd) 596 return -ENOMEM; 597 598 cpumask_copy(pinst->cpumask.pcpu, pcpumask); 599 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); 600 601 padata_replace(pinst, pd); 602 603 if (valid) 604 __padata_start(pinst); 605 606 return 0; 607 } 608 609 /** 610 * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value 611 * equivalent to @cpumask. 612 * 613 * @pinst: padata instance 614 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding 615 * to parallel and serial cpumasks respectively. 616 * @cpumask: the cpumask to use 617 */ 618 int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, 619 cpumask_var_t cpumask) 620 { 621 struct cpumask *serial_mask, *parallel_mask; 622 int err = -EINVAL; 623 624 mutex_lock(&pinst->lock); 625 get_online_cpus(); 626 627 switch (cpumask_type) { 628 case PADATA_CPU_PARALLEL: 629 serial_mask = pinst->cpumask.cbcpu; 630 parallel_mask = cpumask; 631 break; 632 case PADATA_CPU_SERIAL: 633 parallel_mask = pinst->cpumask.pcpu; 634 serial_mask = cpumask; 635 break; 636 default: 637 goto out; 638 } 639 640 err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask); 641 642 out: 643 put_online_cpus(); 644 mutex_unlock(&pinst->lock); 645 646 return err; 647 } 648 EXPORT_SYMBOL(padata_set_cpumask); 649 650 /** 651 * padata_start - start the parallel processing 652 * 653 * @pinst: padata instance to start 654 */ 655 int padata_start(struct padata_instance *pinst) 656 { 657 int err = 0; 658 659 mutex_lock(&pinst->lock); 660 661 if (pinst->flags & PADATA_INVALID) 662 err = -EINVAL; 663 664 __padata_start(pinst); 665 666 mutex_unlock(&pinst->lock); 667 668 return err; 669 } 670 EXPORT_SYMBOL(padata_start); 671 672 /** 673 * padata_stop - stop the parallel processing 674 * 675 * @pinst: padata instance to stop 676 */ 677 void padata_stop(struct padata_instance *pinst) 678 { 679 mutex_lock(&pinst->lock); 680 __padata_stop(pinst); 681 mutex_unlock(&pinst->lock); 682 } 683 EXPORT_SYMBOL(padata_stop); 684 685 #ifdef CONFIG_HOTPLUG_CPU 686 687 static int __padata_add_cpu(struct padata_instance *pinst, int cpu) 688 { 689 struct parallel_data *pd; 690 691 if (cpumask_test_cpu(cpu, cpu_online_mask)) { 692 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, 693 pinst->cpumask.cbcpu); 694 if (!pd) 695 return -ENOMEM; 696 697 padata_replace(pinst, pd); 698 699 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && 700 padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) 701 __padata_start(pinst); 702 } 703 704 return 0; 705 } 706 707 static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) 708 { 709 struct parallel_data *pd = NULL; 710 711 if (cpumask_test_cpu(cpu, cpu_online_mask)) { 712 713 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || 714 !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) 715 __padata_stop(pinst); 716 717 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, 718 pinst->cpumask.cbcpu); 719 if (!pd) 720 return -ENOMEM; 721 722 padata_replace(pinst, pd); 723 724 cpumask_clear_cpu(cpu, pd->cpumask.cbcpu); 725 cpumask_clear_cpu(cpu, pd->cpumask.pcpu); 726 } 727 728 return 0; 729 } 730 731 /** 732 * padata_remove_cpu - remove a cpu from the one or both(serial and parallel) 733 * padata cpumasks. 734 * 735 * @pinst: padata instance 736 * @cpu: cpu to remove 737 * @mask: bitmask specifying from which cpumask @cpu should be removed 738 * The @mask may be any combination of the following flags: 739 * PADATA_CPU_SERIAL - serial cpumask 740 * PADATA_CPU_PARALLEL - parallel cpumask 741 */ 742 int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask) 743 { 744 int err; 745 746 if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) 747 return -EINVAL; 748 749 mutex_lock(&pinst->lock); 750 751 get_online_cpus(); 752 if (mask & PADATA_CPU_SERIAL) 753 cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu); 754 if (mask & PADATA_CPU_PARALLEL) 755 cpumask_clear_cpu(cpu, pinst->cpumask.pcpu); 756 757 err = __padata_remove_cpu(pinst, cpu); 758 put_online_cpus(); 759 760 mutex_unlock(&pinst->lock); 761 762 return err; 763 } 764 EXPORT_SYMBOL(padata_remove_cpu); 765 766 static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu) 767 { 768 return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) || 769 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu); 770 } 771 772 773 static int padata_cpu_callback(struct notifier_block *nfb, 774 unsigned long action, void *hcpu) 775 { 776 int err; 777 struct padata_instance *pinst; 778 int cpu = (unsigned long)hcpu; 779 780 pinst = container_of(nfb, struct padata_instance, cpu_notifier); 781 782 switch (action) { 783 case CPU_ONLINE: 784 case CPU_ONLINE_FROZEN: 785 case CPU_DOWN_FAILED: 786 case CPU_DOWN_FAILED_FROZEN: 787 if (!pinst_has_cpu(pinst, cpu)) 788 break; 789 mutex_lock(&pinst->lock); 790 err = __padata_add_cpu(pinst, cpu); 791 mutex_unlock(&pinst->lock); 792 if (err) 793 return notifier_from_errno(err); 794 break; 795 796 case CPU_DOWN_PREPARE: 797 case CPU_DOWN_PREPARE_FROZEN: 798 case CPU_UP_CANCELED: 799 case CPU_UP_CANCELED_FROZEN: 800 if (!pinst_has_cpu(pinst, cpu)) 801 break; 802 mutex_lock(&pinst->lock); 803 err = __padata_remove_cpu(pinst, cpu); 804 mutex_unlock(&pinst->lock); 805 if (err) 806 return notifier_from_errno(err); 807 break; 808 } 809 810 return NOTIFY_OK; 811 } 812 #endif 813 814 static void __padata_free(struct padata_instance *pinst) 815 { 816 #ifdef CONFIG_HOTPLUG_CPU 817 unregister_hotcpu_notifier(&pinst->cpu_notifier); 818 #endif 819 820 padata_stop(pinst); 821 padata_free_pd(pinst->pd); 822 free_cpumask_var(pinst->cpumask.pcpu); 823 free_cpumask_var(pinst->cpumask.cbcpu); 824 kfree(pinst); 825 } 826 827 #define kobj2pinst(_kobj) \ 828 container_of(_kobj, struct padata_instance, kobj) 829 #define attr2pentry(_attr) \ 830 container_of(_attr, struct padata_sysfs_entry, attr) 831 832 static void padata_sysfs_release(struct kobject *kobj) 833 { 834 struct padata_instance *pinst = kobj2pinst(kobj); 835 __padata_free(pinst); 836 } 837 838 struct padata_sysfs_entry { 839 struct attribute attr; 840 ssize_t (*show)(struct padata_instance *, struct attribute *, char *); 841 ssize_t (*store)(struct padata_instance *, struct attribute *, 842 const char *, size_t); 843 }; 844 845 static ssize_t show_cpumask(struct padata_instance *pinst, 846 struct attribute *attr, char *buf) 847 { 848 struct cpumask *cpumask; 849 ssize_t len; 850 851 mutex_lock(&pinst->lock); 852 if (!strcmp(attr->name, "serial_cpumask")) 853 cpumask = pinst->cpumask.cbcpu; 854 else 855 cpumask = pinst->cpumask.pcpu; 856 857 len = snprintf(buf, PAGE_SIZE, "%*pb\n", 858 nr_cpu_ids, cpumask_bits(cpumask)); 859 mutex_unlock(&pinst->lock); 860 return len < PAGE_SIZE ? len : -EINVAL; 861 } 862 863 static ssize_t store_cpumask(struct padata_instance *pinst, 864 struct attribute *attr, 865 const char *buf, size_t count) 866 { 867 cpumask_var_t new_cpumask; 868 ssize_t ret; 869 int mask_type; 870 871 if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL)) 872 return -ENOMEM; 873 874 ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask), 875 nr_cpumask_bits); 876 if (ret < 0) 877 goto out; 878 879 mask_type = !strcmp(attr->name, "serial_cpumask") ? 880 PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL; 881 ret = padata_set_cpumask(pinst, mask_type, new_cpumask); 882 if (!ret) 883 ret = count; 884 885 out: 886 free_cpumask_var(new_cpumask); 887 return ret; 888 } 889 890 #define PADATA_ATTR_RW(_name, _show_name, _store_name) \ 891 static struct padata_sysfs_entry _name##_attr = \ 892 __ATTR(_name, 0644, _show_name, _store_name) 893 #define PADATA_ATTR_RO(_name, _show_name) \ 894 static struct padata_sysfs_entry _name##_attr = \ 895 __ATTR(_name, 0400, _show_name, NULL) 896 897 PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask); 898 PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask); 899 900 /* 901 * Padata sysfs provides the following objects: 902 * serial_cpumask [RW] - cpumask for serial workers 903 * parallel_cpumask [RW] - cpumask for parallel workers 904 */ 905 static struct attribute *padata_default_attrs[] = { 906 &serial_cpumask_attr.attr, 907 ¶llel_cpumask_attr.attr, 908 NULL, 909 }; 910 911 static ssize_t padata_sysfs_show(struct kobject *kobj, 912 struct attribute *attr, char *buf) 913 { 914 struct padata_instance *pinst; 915 struct padata_sysfs_entry *pentry; 916 ssize_t ret = -EIO; 917 918 pinst = kobj2pinst(kobj); 919 pentry = attr2pentry(attr); 920 if (pentry->show) 921 ret = pentry->show(pinst, attr, buf); 922 923 return ret; 924 } 925 926 static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr, 927 const char *buf, size_t count) 928 { 929 struct padata_instance *pinst; 930 struct padata_sysfs_entry *pentry; 931 ssize_t ret = -EIO; 932 933 pinst = kobj2pinst(kobj); 934 pentry = attr2pentry(attr); 935 if (pentry->show) 936 ret = pentry->store(pinst, attr, buf, count); 937 938 return ret; 939 } 940 941 static const struct sysfs_ops padata_sysfs_ops = { 942 .show = padata_sysfs_show, 943 .store = padata_sysfs_store, 944 }; 945 946 static struct kobj_type padata_attr_type = { 947 .sysfs_ops = &padata_sysfs_ops, 948 .default_attrs = padata_default_attrs, 949 .release = padata_sysfs_release, 950 }; 951 952 /** 953 * padata_alloc_possible - Allocate and initialize padata instance. 954 * Use the cpu_possible_mask for serial and 955 * parallel workers. 956 * 957 * @wq: workqueue to use for the allocated padata instance 958 */ 959 struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq) 960 { 961 return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask); 962 } 963 EXPORT_SYMBOL(padata_alloc_possible); 964 965 /** 966 * padata_alloc - allocate and initialize a padata instance and specify 967 * cpumasks for serial and parallel workers. 968 * 969 * @wq: workqueue to use for the allocated padata instance 970 * @pcpumask: cpumask that will be used for padata parallelization 971 * @cbcpumask: cpumask that will be used for padata serialization 972 */ 973 struct padata_instance *padata_alloc(struct workqueue_struct *wq, 974 const struct cpumask *pcpumask, 975 const struct cpumask *cbcpumask) 976 { 977 struct padata_instance *pinst; 978 struct parallel_data *pd = NULL; 979 980 pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); 981 if (!pinst) 982 goto err; 983 984 get_online_cpus(); 985 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) 986 goto err_free_inst; 987 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { 988 free_cpumask_var(pinst->cpumask.pcpu); 989 goto err_free_inst; 990 } 991 if (!padata_validate_cpumask(pinst, pcpumask) || 992 !padata_validate_cpumask(pinst, cbcpumask)) 993 goto err_free_masks; 994 995 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); 996 if (!pd) 997 goto err_free_masks; 998 999 rcu_assign_pointer(pinst->pd, pd); 1000 1001 pinst->wq = wq; 1002 1003 cpumask_copy(pinst->cpumask.pcpu, pcpumask); 1004 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); 1005 1006 pinst->flags = 0; 1007 1008 put_online_cpus(); 1009 1010 BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier); 1011 kobject_init(&pinst->kobj, &padata_attr_type); 1012 mutex_init(&pinst->lock); 1013 1014 #ifdef CONFIG_HOTPLUG_CPU 1015 pinst->cpu_notifier.notifier_call = padata_cpu_callback; 1016 pinst->cpu_notifier.priority = 0; 1017 register_hotcpu_notifier(&pinst->cpu_notifier); 1018 #endif 1019 1020 return pinst; 1021 1022 err_free_masks: 1023 free_cpumask_var(pinst->cpumask.pcpu); 1024 free_cpumask_var(pinst->cpumask.cbcpu); 1025 err_free_inst: 1026 kfree(pinst); 1027 put_online_cpus(); 1028 err: 1029 return NULL; 1030 } 1031 1032 /** 1033 * padata_free - free a padata instance 1034 * 1035 * @padata_inst: padata instance to free 1036 */ 1037 void padata_free(struct padata_instance *pinst) 1038 { 1039 kobject_put(&pinst->kobj); 1040 } 1041 EXPORT_SYMBOL(padata_free); 1042