1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * padata.c - generic interface to process data streams in parallel 4 * 5 * See Documentation/core-api/padata.rst for more information. 6 * 7 * Copyright (C) 2008, 2009 secunet Security Networks AG 8 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com> 9 * 10 * Copyright (c) 2020 Oracle and/or its affiliates. 11 * Author: Daniel Jordan <daniel.m.jordan@oracle.com> 12 * 13 * This program is free software; you can redistribute it and/or modify it 14 * under the terms and conditions of the GNU General Public License, 15 * version 2, as published by the Free Software Foundation. 16 * 17 * This program is distributed in the hope it will be useful, but WITHOUT 18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 20 * more details. 21 * 22 * You should have received a copy of the GNU General Public License along with 23 * this program; if not, write to the Free Software Foundation, Inc., 24 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 25 */ 26 27 #include <linux/completion.h> 28 #include <linux/export.h> 29 #include <linux/cpumask.h> 30 #include <linux/err.h> 31 #include <linux/cpu.h> 32 #include <linux/padata.h> 33 #include <linux/mutex.h> 34 #include <linux/sched.h> 35 #include <linux/slab.h> 36 #include <linux/sysfs.h> 37 #include <linux/rcupdate.h> 38 39 #define PADATA_WORK_ONSTACK 1 /* Work's memory is on stack */ 40 41 struct padata_work { 42 struct work_struct pw_work; 43 struct list_head pw_list; /* padata_free_works linkage */ 44 void *pw_data; 45 }; 46 47 static DEFINE_SPINLOCK(padata_works_lock); 48 static struct padata_work *padata_works; 49 static LIST_HEAD(padata_free_works); 50 51 struct padata_mt_job_state { 52 spinlock_t lock; 53 struct completion completion; 54 struct padata_mt_job *job; 55 int nworks; 56 int nworks_fini; 57 unsigned long chunk_size; 58 }; 59 60 static void padata_free_pd(struct parallel_data *pd); 61 static void __init padata_mt_helper(struct work_struct *work); 62 63 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) 64 { 65 int cpu, target_cpu; 66 67 target_cpu = cpumask_first(pd->cpumask.pcpu); 68 for (cpu = 0; cpu < cpu_index; cpu++) 69 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); 70 71 return target_cpu; 72 } 73 74 static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr) 75 { 76 /* 77 * Hash the sequence numbers to the cpus by taking 78 * seq_nr mod. number of cpus in use. 79 */ 80 int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); 81 82 return padata_index_to_cpu(pd, cpu_index); 83 } 84 85 static struct padata_work *padata_work_alloc(void) 86 { 87 struct padata_work *pw; 88 89 lockdep_assert_held(&padata_works_lock); 90 91 if (list_empty(&padata_free_works)) 92 return NULL; /* No more work items allowed to be queued. */ 93 94 pw = list_first_entry(&padata_free_works, struct padata_work, pw_list); 95 list_del(&pw->pw_list); 96 return pw; 97 } 98 99 static void padata_work_init(struct padata_work *pw, work_func_t work_fn, 100 void *data, int flags) 101 { 102 if (flags & PADATA_WORK_ONSTACK) 103 INIT_WORK_ONSTACK(&pw->pw_work, work_fn); 104 else 105 INIT_WORK(&pw->pw_work, work_fn); 106 pw->pw_data = data; 107 } 108 109 static int __init padata_work_alloc_mt(int nworks, void *data, 110 struct list_head *head) 111 { 112 int i; 113 114 spin_lock(&padata_works_lock); 115 /* Start at 1 because the current task participates in the job. */ 116 for (i = 1; i < nworks; ++i) { 117 struct padata_work *pw = padata_work_alloc(); 118 119 if (!pw) 120 break; 121 padata_work_init(pw, padata_mt_helper, data, 0); 122 list_add(&pw->pw_list, head); 123 } 124 spin_unlock(&padata_works_lock); 125 126 return i; 127 } 128 129 static void padata_work_free(struct padata_work *pw) 130 { 131 lockdep_assert_held(&padata_works_lock); 132 list_add(&pw->pw_list, &padata_free_works); 133 } 134 135 static void __init padata_works_free(struct list_head *works) 136 { 137 struct padata_work *cur, *next; 138 139 if (list_empty(works)) 140 return; 141 142 spin_lock(&padata_works_lock); 143 list_for_each_entry_safe(cur, next, works, pw_list) { 144 list_del(&cur->pw_list); 145 padata_work_free(cur); 146 } 147 spin_unlock(&padata_works_lock); 148 } 149 150 static void padata_parallel_worker(struct work_struct *parallel_work) 151 { 152 struct padata_work *pw = container_of(parallel_work, struct padata_work, 153 pw_work); 154 struct padata_priv *padata = pw->pw_data; 155 156 local_bh_disable(); 157 padata->parallel(padata); 158 spin_lock(&padata_works_lock); 159 padata_work_free(pw); 160 spin_unlock(&padata_works_lock); 161 local_bh_enable(); 162 } 163 164 /** 165 * padata_do_parallel - padata parallelization function 166 * 167 * @ps: padatashell 168 * @padata: object to be parallelized 169 * @cb_cpu: pointer to the CPU that the serialization callback function should 170 * run on. If it's not in the serial cpumask of @pinst 171 * (i.e. cpumask.cbcpu), this function selects a fallback CPU and if 172 * none found, returns -EINVAL. 173 * 174 * The parallelization callback function will run with BHs off. 175 * Note: Every object which is parallelized by padata_do_parallel 176 * must be seen by padata_do_serial. 177 * 178 * Return: 0 on success or else negative error code. 179 */ 180 int padata_do_parallel(struct padata_shell *ps, 181 struct padata_priv *padata, int *cb_cpu) 182 { 183 struct padata_instance *pinst = ps->pinst; 184 int i, cpu, cpu_index, err; 185 struct parallel_data *pd; 186 struct padata_work *pw; 187 188 rcu_read_lock_bh(); 189 190 pd = rcu_dereference_bh(ps->pd); 191 192 err = -EINVAL; 193 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID) 194 goto out; 195 196 if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) { 197 if (!cpumask_weight(pd->cpumask.cbcpu)) 198 goto out; 199 200 /* Select an alternate fallback CPU and notify the caller. */ 201 cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu); 202 203 cpu = cpumask_first(pd->cpumask.cbcpu); 204 for (i = 0; i < cpu_index; i++) 205 cpu = cpumask_next(cpu, pd->cpumask.cbcpu); 206 207 *cb_cpu = cpu; 208 } 209 210 err = -EBUSY; 211 if ((pinst->flags & PADATA_RESET)) 212 goto out; 213 214 atomic_inc(&pd->refcnt); 215 padata->pd = pd; 216 padata->cb_cpu = *cb_cpu; 217 218 rcu_read_unlock_bh(); 219 220 spin_lock(&padata_works_lock); 221 padata->seq_nr = ++pd->seq_nr; 222 pw = padata_work_alloc(); 223 spin_unlock(&padata_works_lock); 224 if (pw) { 225 padata_work_init(pw, padata_parallel_worker, padata, 0); 226 queue_work(pinst->parallel_wq, &pw->pw_work); 227 } else { 228 /* Maximum works limit exceeded, run in the current task. */ 229 padata->parallel(padata); 230 } 231 232 return 0; 233 out: 234 rcu_read_unlock_bh(); 235 236 return err; 237 } 238 EXPORT_SYMBOL(padata_do_parallel); 239 240 /* 241 * padata_find_next - Find the next object that needs serialization. 242 * 243 * Return: 244 * * A pointer to the control struct of the next object that needs 245 * serialization, if present in one of the percpu reorder queues. 246 * * NULL, if the next object that needs serialization will 247 * be parallel processed by another cpu and is not yet present in 248 * the cpu's reorder queue. 249 */ 250 static struct padata_priv *padata_find_next(struct parallel_data *pd, 251 bool remove_object) 252 { 253 struct padata_priv *padata; 254 struct padata_list *reorder; 255 int cpu = pd->cpu; 256 257 reorder = per_cpu_ptr(pd->reorder_list, cpu); 258 259 spin_lock(&reorder->lock); 260 if (list_empty(&reorder->list)) { 261 spin_unlock(&reorder->lock); 262 return NULL; 263 } 264 265 padata = list_entry(reorder->list.next, struct padata_priv, list); 266 267 /* 268 * Checks the rare case where two or more parallel jobs have hashed to 269 * the same CPU and one of the later ones finishes first. 270 */ 271 if (padata->seq_nr != pd->processed) { 272 spin_unlock(&reorder->lock); 273 return NULL; 274 } 275 276 if (remove_object) { 277 list_del_init(&padata->list); 278 ++pd->processed; 279 pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false); 280 } 281 282 spin_unlock(&reorder->lock); 283 return padata; 284 } 285 286 static void padata_reorder(struct parallel_data *pd) 287 { 288 struct padata_instance *pinst = pd->ps->pinst; 289 int cb_cpu; 290 struct padata_priv *padata; 291 struct padata_serial_queue *squeue; 292 struct padata_list *reorder; 293 294 /* 295 * We need to ensure that only one cpu can work on dequeueing of 296 * the reorder queue the time. Calculating in which percpu reorder 297 * queue the next object will arrive takes some time. A spinlock 298 * would be highly contended. Also it is not clear in which order 299 * the objects arrive to the reorder queues. So a cpu could wait to 300 * get the lock just to notice that there is nothing to do at the 301 * moment. Therefore we use a trylock and let the holder of the lock 302 * care for all the objects enqueued during the holdtime of the lock. 303 */ 304 if (!spin_trylock_bh(&pd->lock)) 305 return; 306 307 while (1) { 308 padata = padata_find_next(pd, true); 309 310 /* 311 * If the next object that needs serialization is parallel 312 * processed by another cpu and is still on it's way to the 313 * cpu's reorder queue, nothing to do for now. 314 */ 315 if (!padata) 316 break; 317 318 cb_cpu = padata->cb_cpu; 319 squeue = per_cpu_ptr(pd->squeue, cb_cpu); 320 321 spin_lock(&squeue->serial.lock); 322 list_add_tail(&padata->list, &squeue->serial.list); 323 spin_unlock(&squeue->serial.lock); 324 325 queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work); 326 } 327 328 spin_unlock_bh(&pd->lock); 329 330 /* 331 * The next object that needs serialization might have arrived to 332 * the reorder queues in the meantime. 333 * 334 * Ensure reorder queue is read after pd->lock is dropped so we see 335 * new objects from another task in padata_do_serial. Pairs with 336 * smp_mb in padata_do_serial. 337 */ 338 smp_mb(); 339 340 reorder = per_cpu_ptr(pd->reorder_list, pd->cpu); 341 if (!list_empty(&reorder->list) && padata_find_next(pd, false)) 342 queue_work(pinst->serial_wq, &pd->reorder_work); 343 } 344 345 static void invoke_padata_reorder(struct work_struct *work) 346 { 347 struct parallel_data *pd; 348 349 local_bh_disable(); 350 pd = container_of(work, struct parallel_data, reorder_work); 351 padata_reorder(pd); 352 local_bh_enable(); 353 } 354 355 static void padata_serial_worker(struct work_struct *serial_work) 356 { 357 struct padata_serial_queue *squeue; 358 struct parallel_data *pd; 359 LIST_HEAD(local_list); 360 int cnt; 361 362 local_bh_disable(); 363 squeue = container_of(serial_work, struct padata_serial_queue, work); 364 pd = squeue->pd; 365 366 spin_lock(&squeue->serial.lock); 367 list_replace_init(&squeue->serial.list, &local_list); 368 spin_unlock(&squeue->serial.lock); 369 370 cnt = 0; 371 372 while (!list_empty(&local_list)) { 373 struct padata_priv *padata; 374 375 padata = list_entry(local_list.next, 376 struct padata_priv, list); 377 378 list_del_init(&padata->list); 379 380 padata->serial(padata); 381 cnt++; 382 } 383 local_bh_enable(); 384 385 if (atomic_sub_and_test(cnt, &pd->refcnt)) 386 padata_free_pd(pd); 387 } 388 389 /** 390 * padata_do_serial - padata serialization function 391 * 392 * @padata: object to be serialized. 393 * 394 * padata_do_serial must be called for every parallelized object. 395 * The serialization callback function will run with BHs off. 396 */ 397 void padata_do_serial(struct padata_priv *padata) 398 { 399 struct parallel_data *pd = padata->pd; 400 int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr); 401 struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu); 402 struct padata_priv *cur; 403 404 spin_lock(&reorder->lock); 405 /* Sort in ascending order of sequence number. */ 406 list_for_each_entry_reverse(cur, &reorder->list, list) 407 if (cur->seq_nr < padata->seq_nr) 408 break; 409 list_add(&padata->list, &cur->list); 410 spin_unlock(&reorder->lock); 411 412 /* 413 * Ensure the addition to the reorder list is ordered correctly 414 * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb 415 * in padata_reorder. 416 */ 417 smp_mb(); 418 419 padata_reorder(pd); 420 } 421 EXPORT_SYMBOL(padata_do_serial); 422 423 static int padata_setup_cpumasks(struct padata_instance *pinst) 424 { 425 struct workqueue_attrs *attrs; 426 int err; 427 428 attrs = alloc_workqueue_attrs(); 429 if (!attrs) 430 return -ENOMEM; 431 432 /* Restrict parallel_wq workers to pd->cpumask.pcpu. */ 433 cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu); 434 err = apply_workqueue_attrs(pinst->parallel_wq, attrs); 435 free_workqueue_attrs(attrs); 436 437 return err; 438 } 439 440 static void __init padata_mt_helper(struct work_struct *w) 441 { 442 struct padata_work *pw = container_of(w, struct padata_work, pw_work); 443 struct padata_mt_job_state *ps = pw->pw_data; 444 struct padata_mt_job *job = ps->job; 445 bool done; 446 447 spin_lock(&ps->lock); 448 449 while (job->size > 0) { 450 unsigned long start, size, end; 451 452 start = job->start; 453 /* So end is chunk size aligned if enough work remains. */ 454 size = roundup(start + 1, ps->chunk_size) - start; 455 size = min(size, job->size); 456 end = start + size; 457 458 job->start = end; 459 job->size -= size; 460 461 spin_unlock(&ps->lock); 462 job->thread_fn(start, end, job->fn_arg); 463 spin_lock(&ps->lock); 464 } 465 466 ++ps->nworks_fini; 467 done = (ps->nworks_fini == ps->nworks); 468 spin_unlock(&ps->lock); 469 470 if (done) 471 complete(&ps->completion); 472 } 473 474 /** 475 * padata_do_multithreaded - run a multithreaded job 476 * @job: Description of the job. 477 * 478 * See the definition of struct padata_mt_job for more details. 479 */ 480 void __init padata_do_multithreaded(struct padata_mt_job *job) 481 { 482 /* In case threads finish at different times. */ 483 static const unsigned long load_balance_factor = 4; 484 struct padata_work my_work, *pw; 485 struct padata_mt_job_state ps; 486 LIST_HEAD(works); 487 int nworks; 488 489 if (job->size == 0) 490 return; 491 492 /* Ensure at least one thread when size < min_chunk. */ 493 nworks = max(job->size / job->min_chunk, 1ul); 494 nworks = min(nworks, job->max_threads); 495 496 if (nworks == 1) { 497 /* Single thread, no coordination needed, cut to the chase. */ 498 job->thread_fn(job->start, job->start + job->size, job->fn_arg); 499 return; 500 } 501 502 spin_lock_init(&ps.lock); 503 init_completion(&ps.completion); 504 ps.job = job; 505 ps.nworks = padata_work_alloc_mt(nworks, &ps, &works); 506 ps.nworks_fini = 0; 507 508 /* 509 * Chunk size is the amount of work a helper does per call to the 510 * thread function. Load balance large jobs between threads by 511 * increasing the number of chunks, guarantee at least the minimum 512 * chunk size from the caller, and honor the caller's alignment. 513 */ 514 ps.chunk_size = job->size / (ps.nworks * load_balance_factor); 515 ps.chunk_size = max(ps.chunk_size, job->min_chunk); 516 ps.chunk_size = roundup(ps.chunk_size, job->align); 517 518 list_for_each_entry(pw, &works, pw_list) 519 queue_work(system_unbound_wq, &pw->pw_work); 520 521 /* Use the current thread, which saves starting a workqueue worker. */ 522 padata_work_init(&my_work, padata_mt_helper, &ps, PADATA_WORK_ONSTACK); 523 padata_mt_helper(&my_work.pw_work); 524 525 /* Wait for all the helpers to finish. */ 526 wait_for_completion(&ps.completion); 527 528 destroy_work_on_stack(&my_work.pw_work); 529 padata_works_free(&works); 530 } 531 532 static void __padata_list_init(struct padata_list *pd_list) 533 { 534 INIT_LIST_HEAD(&pd_list->list); 535 spin_lock_init(&pd_list->lock); 536 } 537 538 /* Initialize all percpu queues used by serial workers */ 539 static void padata_init_squeues(struct parallel_data *pd) 540 { 541 int cpu; 542 struct padata_serial_queue *squeue; 543 544 for_each_cpu(cpu, pd->cpumask.cbcpu) { 545 squeue = per_cpu_ptr(pd->squeue, cpu); 546 squeue->pd = pd; 547 __padata_list_init(&squeue->serial); 548 INIT_WORK(&squeue->work, padata_serial_worker); 549 } 550 } 551 552 /* Initialize per-CPU reorder lists */ 553 static void padata_init_reorder_list(struct parallel_data *pd) 554 { 555 int cpu; 556 struct padata_list *list; 557 558 for_each_cpu(cpu, pd->cpumask.pcpu) { 559 list = per_cpu_ptr(pd->reorder_list, cpu); 560 __padata_list_init(list); 561 } 562 } 563 564 /* Allocate and initialize the internal cpumask dependend resources. */ 565 static struct parallel_data *padata_alloc_pd(struct padata_shell *ps) 566 { 567 struct padata_instance *pinst = ps->pinst; 568 struct parallel_data *pd; 569 570 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); 571 if (!pd) 572 goto err; 573 574 pd->reorder_list = alloc_percpu(struct padata_list); 575 if (!pd->reorder_list) 576 goto err_free_pd; 577 578 pd->squeue = alloc_percpu(struct padata_serial_queue); 579 if (!pd->squeue) 580 goto err_free_reorder_list; 581 582 pd->ps = ps; 583 584 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) 585 goto err_free_squeue; 586 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) 587 goto err_free_pcpu; 588 589 cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask); 590 cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask); 591 592 padata_init_reorder_list(pd); 593 padata_init_squeues(pd); 594 pd->seq_nr = -1; 595 atomic_set(&pd->refcnt, 1); 596 spin_lock_init(&pd->lock); 597 pd->cpu = cpumask_first(pd->cpumask.pcpu); 598 INIT_WORK(&pd->reorder_work, invoke_padata_reorder); 599 600 return pd; 601 602 err_free_pcpu: 603 free_cpumask_var(pd->cpumask.pcpu); 604 err_free_squeue: 605 free_percpu(pd->squeue); 606 err_free_reorder_list: 607 free_percpu(pd->reorder_list); 608 err_free_pd: 609 kfree(pd); 610 err: 611 return NULL; 612 } 613 614 static void padata_free_pd(struct parallel_data *pd) 615 { 616 free_cpumask_var(pd->cpumask.pcpu); 617 free_cpumask_var(pd->cpumask.cbcpu); 618 free_percpu(pd->reorder_list); 619 free_percpu(pd->squeue); 620 kfree(pd); 621 } 622 623 static void __padata_start(struct padata_instance *pinst) 624 { 625 pinst->flags |= PADATA_INIT; 626 } 627 628 static void __padata_stop(struct padata_instance *pinst) 629 { 630 if (!(pinst->flags & PADATA_INIT)) 631 return; 632 633 pinst->flags &= ~PADATA_INIT; 634 635 synchronize_rcu(); 636 } 637 638 /* Replace the internal control structure with a new one. */ 639 static int padata_replace_one(struct padata_shell *ps) 640 { 641 struct parallel_data *pd_new; 642 643 pd_new = padata_alloc_pd(ps); 644 if (!pd_new) 645 return -ENOMEM; 646 647 ps->opd = rcu_dereference_protected(ps->pd, 1); 648 rcu_assign_pointer(ps->pd, pd_new); 649 650 return 0; 651 } 652 653 static int padata_replace(struct padata_instance *pinst) 654 { 655 struct padata_shell *ps; 656 int err = 0; 657 658 pinst->flags |= PADATA_RESET; 659 660 list_for_each_entry(ps, &pinst->pslist, list) { 661 err = padata_replace_one(ps); 662 if (err) 663 break; 664 } 665 666 synchronize_rcu(); 667 668 list_for_each_entry_continue_reverse(ps, &pinst->pslist, list) 669 if (atomic_dec_and_test(&ps->opd->refcnt)) 670 padata_free_pd(ps->opd); 671 672 pinst->flags &= ~PADATA_RESET; 673 674 return err; 675 } 676 677 /* If cpumask contains no active cpu, we mark the instance as invalid. */ 678 static bool padata_validate_cpumask(struct padata_instance *pinst, 679 const struct cpumask *cpumask) 680 { 681 if (!cpumask_intersects(cpumask, cpu_online_mask)) { 682 pinst->flags |= PADATA_INVALID; 683 return false; 684 } 685 686 pinst->flags &= ~PADATA_INVALID; 687 return true; 688 } 689 690 static int __padata_set_cpumasks(struct padata_instance *pinst, 691 cpumask_var_t pcpumask, 692 cpumask_var_t cbcpumask) 693 { 694 int valid; 695 int err; 696 697 valid = padata_validate_cpumask(pinst, pcpumask); 698 if (!valid) { 699 __padata_stop(pinst); 700 goto out_replace; 701 } 702 703 valid = padata_validate_cpumask(pinst, cbcpumask); 704 if (!valid) 705 __padata_stop(pinst); 706 707 out_replace: 708 cpumask_copy(pinst->cpumask.pcpu, pcpumask); 709 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); 710 711 err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst); 712 713 if (valid) 714 __padata_start(pinst); 715 716 return err; 717 } 718 719 /** 720 * padata_set_cpumask - Sets specified by @cpumask_type cpumask to the value 721 * equivalent to @cpumask. 722 * @pinst: padata instance 723 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding 724 * to parallel and serial cpumasks respectively. 725 * @cpumask: the cpumask to use 726 * 727 * Return: 0 on success or negative error code 728 */ 729 int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, 730 cpumask_var_t cpumask) 731 { 732 struct cpumask *serial_mask, *parallel_mask; 733 int err = -EINVAL; 734 735 get_online_cpus(); 736 mutex_lock(&pinst->lock); 737 738 switch (cpumask_type) { 739 case PADATA_CPU_PARALLEL: 740 serial_mask = pinst->cpumask.cbcpu; 741 parallel_mask = cpumask; 742 break; 743 case PADATA_CPU_SERIAL: 744 parallel_mask = pinst->cpumask.pcpu; 745 serial_mask = cpumask; 746 break; 747 default: 748 goto out; 749 } 750 751 err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask); 752 753 out: 754 mutex_unlock(&pinst->lock); 755 put_online_cpus(); 756 757 return err; 758 } 759 EXPORT_SYMBOL(padata_set_cpumask); 760 761 #ifdef CONFIG_HOTPLUG_CPU 762 763 static int __padata_add_cpu(struct padata_instance *pinst, int cpu) 764 { 765 int err = 0; 766 767 if (cpumask_test_cpu(cpu, cpu_online_mask)) { 768 err = padata_replace(pinst); 769 770 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && 771 padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) 772 __padata_start(pinst); 773 } 774 775 return err; 776 } 777 778 static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) 779 { 780 int err = 0; 781 782 if (!cpumask_test_cpu(cpu, cpu_online_mask)) { 783 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || 784 !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) 785 __padata_stop(pinst); 786 787 err = padata_replace(pinst); 788 } 789 790 return err; 791 } 792 793 static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu) 794 { 795 return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) || 796 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu); 797 } 798 799 static int padata_cpu_online(unsigned int cpu, struct hlist_node *node) 800 { 801 struct padata_instance *pinst; 802 int ret; 803 804 pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node); 805 if (!pinst_has_cpu(pinst, cpu)) 806 return 0; 807 808 mutex_lock(&pinst->lock); 809 ret = __padata_add_cpu(pinst, cpu); 810 mutex_unlock(&pinst->lock); 811 return ret; 812 } 813 814 static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node) 815 { 816 struct padata_instance *pinst; 817 int ret; 818 819 pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node); 820 if (!pinst_has_cpu(pinst, cpu)) 821 return 0; 822 823 mutex_lock(&pinst->lock); 824 ret = __padata_remove_cpu(pinst, cpu); 825 mutex_unlock(&pinst->lock); 826 return ret; 827 } 828 829 static enum cpuhp_state hp_online; 830 #endif 831 832 static void __padata_free(struct padata_instance *pinst) 833 { 834 #ifdef CONFIG_HOTPLUG_CPU 835 cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD, 836 &pinst->cpu_dead_node); 837 cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node); 838 #endif 839 840 WARN_ON(!list_empty(&pinst->pslist)); 841 842 free_cpumask_var(pinst->cpumask.pcpu); 843 free_cpumask_var(pinst->cpumask.cbcpu); 844 destroy_workqueue(pinst->serial_wq); 845 destroy_workqueue(pinst->parallel_wq); 846 kfree(pinst); 847 } 848 849 #define kobj2pinst(_kobj) \ 850 container_of(_kobj, struct padata_instance, kobj) 851 #define attr2pentry(_attr) \ 852 container_of(_attr, struct padata_sysfs_entry, attr) 853 854 static void padata_sysfs_release(struct kobject *kobj) 855 { 856 struct padata_instance *pinst = kobj2pinst(kobj); 857 __padata_free(pinst); 858 } 859 860 struct padata_sysfs_entry { 861 struct attribute attr; 862 ssize_t (*show)(struct padata_instance *, struct attribute *, char *); 863 ssize_t (*store)(struct padata_instance *, struct attribute *, 864 const char *, size_t); 865 }; 866 867 static ssize_t show_cpumask(struct padata_instance *pinst, 868 struct attribute *attr, char *buf) 869 { 870 struct cpumask *cpumask; 871 ssize_t len; 872 873 mutex_lock(&pinst->lock); 874 if (!strcmp(attr->name, "serial_cpumask")) 875 cpumask = pinst->cpumask.cbcpu; 876 else 877 cpumask = pinst->cpumask.pcpu; 878 879 len = snprintf(buf, PAGE_SIZE, "%*pb\n", 880 nr_cpu_ids, cpumask_bits(cpumask)); 881 mutex_unlock(&pinst->lock); 882 return len < PAGE_SIZE ? len : -EINVAL; 883 } 884 885 static ssize_t store_cpumask(struct padata_instance *pinst, 886 struct attribute *attr, 887 const char *buf, size_t count) 888 { 889 cpumask_var_t new_cpumask; 890 ssize_t ret; 891 int mask_type; 892 893 if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL)) 894 return -ENOMEM; 895 896 ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask), 897 nr_cpumask_bits); 898 if (ret < 0) 899 goto out; 900 901 mask_type = !strcmp(attr->name, "serial_cpumask") ? 902 PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL; 903 ret = padata_set_cpumask(pinst, mask_type, new_cpumask); 904 if (!ret) 905 ret = count; 906 907 out: 908 free_cpumask_var(new_cpumask); 909 return ret; 910 } 911 912 #define PADATA_ATTR_RW(_name, _show_name, _store_name) \ 913 static struct padata_sysfs_entry _name##_attr = \ 914 __ATTR(_name, 0644, _show_name, _store_name) 915 #define PADATA_ATTR_RO(_name, _show_name) \ 916 static struct padata_sysfs_entry _name##_attr = \ 917 __ATTR(_name, 0400, _show_name, NULL) 918 919 PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask); 920 PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask); 921 922 /* 923 * Padata sysfs provides the following objects: 924 * serial_cpumask [RW] - cpumask for serial workers 925 * parallel_cpumask [RW] - cpumask for parallel workers 926 */ 927 static struct attribute *padata_default_attrs[] = { 928 &serial_cpumask_attr.attr, 929 ¶llel_cpumask_attr.attr, 930 NULL, 931 }; 932 ATTRIBUTE_GROUPS(padata_default); 933 934 static ssize_t padata_sysfs_show(struct kobject *kobj, 935 struct attribute *attr, char *buf) 936 { 937 struct padata_instance *pinst; 938 struct padata_sysfs_entry *pentry; 939 ssize_t ret = -EIO; 940 941 pinst = kobj2pinst(kobj); 942 pentry = attr2pentry(attr); 943 if (pentry->show) 944 ret = pentry->show(pinst, attr, buf); 945 946 return ret; 947 } 948 949 static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr, 950 const char *buf, size_t count) 951 { 952 struct padata_instance *pinst; 953 struct padata_sysfs_entry *pentry; 954 ssize_t ret = -EIO; 955 956 pinst = kobj2pinst(kobj); 957 pentry = attr2pentry(attr); 958 if (pentry->show) 959 ret = pentry->store(pinst, attr, buf, count); 960 961 return ret; 962 } 963 964 static const struct sysfs_ops padata_sysfs_ops = { 965 .show = padata_sysfs_show, 966 .store = padata_sysfs_store, 967 }; 968 969 static struct kobj_type padata_attr_type = { 970 .sysfs_ops = &padata_sysfs_ops, 971 .default_groups = padata_default_groups, 972 .release = padata_sysfs_release, 973 }; 974 975 /** 976 * padata_alloc - allocate and initialize a padata instance 977 * @name: used to identify the instance 978 * 979 * Return: new instance on success, NULL on error 980 */ 981 struct padata_instance *padata_alloc(const char *name) 982 { 983 struct padata_instance *pinst; 984 985 pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); 986 if (!pinst) 987 goto err; 988 989 pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0, 990 name); 991 if (!pinst->parallel_wq) 992 goto err_free_inst; 993 994 get_online_cpus(); 995 996 pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM | 997 WQ_CPU_INTENSIVE, 1, name); 998 if (!pinst->serial_wq) 999 goto err_put_cpus; 1000 1001 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) 1002 goto err_free_serial_wq; 1003 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { 1004 free_cpumask_var(pinst->cpumask.pcpu); 1005 goto err_free_serial_wq; 1006 } 1007 1008 INIT_LIST_HEAD(&pinst->pslist); 1009 1010 cpumask_copy(pinst->cpumask.pcpu, cpu_possible_mask); 1011 cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask); 1012 1013 if (padata_setup_cpumasks(pinst)) 1014 goto err_free_masks; 1015 1016 __padata_start(pinst); 1017 1018 kobject_init(&pinst->kobj, &padata_attr_type); 1019 mutex_init(&pinst->lock); 1020 1021 #ifdef CONFIG_HOTPLUG_CPU 1022 cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, 1023 &pinst->cpu_online_node); 1024 cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD, 1025 &pinst->cpu_dead_node); 1026 #endif 1027 1028 put_online_cpus(); 1029 1030 return pinst; 1031 1032 err_free_masks: 1033 free_cpumask_var(pinst->cpumask.pcpu); 1034 free_cpumask_var(pinst->cpumask.cbcpu); 1035 err_free_serial_wq: 1036 destroy_workqueue(pinst->serial_wq); 1037 err_put_cpus: 1038 put_online_cpus(); 1039 destroy_workqueue(pinst->parallel_wq); 1040 err_free_inst: 1041 kfree(pinst); 1042 err: 1043 return NULL; 1044 } 1045 EXPORT_SYMBOL(padata_alloc); 1046 1047 /** 1048 * padata_free - free a padata instance 1049 * 1050 * @pinst: padata instance to free 1051 */ 1052 void padata_free(struct padata_instance *pinst) 1053 { 1054 kobject_put(&pinst->kobj); 1055 } 1056 EXPORT_SYMBOL(padata_free); 1057 1058 /** 1059 * padata_alloc_shell - Allocate and initialize padata shell. 1060 * 1061 * @pinst: Parent padata_instance object. 1062 * 1063 * Return: new shell on success, NULL on error 1064 */ 1065 struct padata_shell *padata_alloc_shell(struct padata_instance *pinst) 1066 { 1067 struct parallel_data *pd; 1068 struct padata_shell *ps; 1069 1070 ps = kzalloc(sizeof(*ps), GFP_KERNEL); 1071 if (!ps) 1072 goto out; 1073 1074 ps->pinst = pinst; 1075 1076 get_online_cpus(); 1077 pd = padata_alloc_pd(ps); 1078 put_online_cpus(); 1079 1080 if (!pd) 1081 goto out_free_ps; 1082 1083 mutex_lock(&pinst->lock); 1084 RCU_INIT_POINTER(ps->pd, pd); 1085 list_add(&ps->list, &pinst->pslist); 1086 mutex_unlock(&pinst->lock); 1087 1088 return ps; 1089 1090 out_free_ps: 1091 kfree(ps); 1092 out: 1093 return NULL; 1094 } 1095 EXPORT_SYMBOL(padata_alloc_shell); 1096 1097 /** 1098 * padata_free_shell - free a padata shell 1099 * 1100 * @ps: padata shell to free 1101 */ 1102 void padata_free_shell(struct padata_shell *ps) 1103 { 1104 if (!ps) 1105 return; 1106 1107 mutex_lock(&ps->pinst->lock); 1108 list_del(&ps->list); 1109 padata_free_pd(rcu_dereference_protected(ps->pd, 1)); 1110 mutex_unlock(&ps->pinst->lock); 1111 1112 kfree(ps); 1113 } 1114 EXPORT_SYMBOL(padata_free_shell); 1115 1116 void __init padata_init(void) 1117 { 1118 unsigned int i, possible_cpus; 1119 #ifdef CONFIG_HOTPLUG_CPU 1120 int ret; 1121 1122 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online", 1123 padata_cpu_online, NULL); 1124 if (ret < 0) 1125 goto err; 1126 hp_online = ret; 1127 1128 ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead", 1129 NULL, padata_cpu_dead); 1130 if (ret < 0) 1131 goto remove_online_state; 1132 #endif 1133 1134 possible_cpus = num_possible_cpus(); 1135 padata_works = kmalloc_array(possible_cpus, sizeof(struct padata_work), 1136 GFP_KERNEL); 1137 if (!padata_works) 1138 goto remove_dead_state; 1139 1140 for (i = 0; i < possible_cpus; ++i) 1141 list_add(&padata_works[i].pw_list, &padata_free_works); 1142 1143 return; 1144 1145 remove_dead_state: 1146 #ifdef CONFIG_HOTPLUG_CPU 1147 cpuhp_remove_multi_state(CPUHP_PADATA_DEAD); 1148 remove_online_state: 1149 cpuhp_remove_multi_state(hp_online); 1150 err: 1151 #endif 1152 pr_warn("padata: initialization failed\n"); 1153 } 1154