1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * padata.c - generic interface to process data streams in parallel 4 * 5 * See Documentation/core-api/padata.rst for more information. 6 * 7 * Copyright (C) 2008, 2009 secunet Security Networks AG 8 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com> 9 * 10 * Copyright (c) 2020 Oracle and/or its affiliates. 11 * Author: Daniel Jordan <daniel.m.jordan@oracle.com> 12 */ 13 14 #include <linux/completion.h> 15 #include <linux/export.h> 16 #include <linux/cpumask.h> 17 #include <linux/err.h> 18 #include <linux/cpu.h> 19 #include <linux/padata.h> 20 #include <linux/mutex.h> 21 #include <linux/sched.h> 22 #include <linux/slab.h> 23 #include <linux/sysfs.h> 24 #include <linux/rcupdate.h> 25 26 #define PADATA_WORK_ONSTACK 1 /* Work's memory is on stack */ 27 28 struct padata_work { 29 struct work_struct pw_work; 30 struct list_head pw_list; /* padata_free_works linkage */ 31 void *pw_data; 32 }; 33 34 static DEFINE_SPINLOCK(padata_works_lock); 35 static struct padata_work *padata_works; 36 static LIST_HEAD(padata_free_works); 37 38 struct padata_mt_job_state { 39 spinlock_t lock; 40 struct completion completion; 41 struct padata_mt_job *job; 42 int nworks; 43 int nworks_fini; 44 unsigned long chunk_size; 45 }; 46 47 static void padata_free_pd(struct parallel_data *pd); 48 static void __init padata_mt_helper(struct work_struct *work); 49 50 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) 51 { 52 int cpu, target_cpu; 53 54 target_cpu = cpumask_first(pd->cpumask.pcpu); 55 for (cpu = 0; cpu < cpu_index; cpu++) 56 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); 57 58 return target_cpu; 59 } 60 61 static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr) 62 { 63 /* 64 * Hash the sequence numbers to the cpus by taking 65 * seq_nr mod. number of cpus in use. 66 */ 67 int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); 68 69 return padata_index_to_cpu(pd, cpu_index); 70 } 71 72 static struct padata_work *padata_work_alloc(void) 73 { 74 struct padata_work *pw; 75 76 lockdep_assert_held(&padata_works_lock); 77 78 if (list_empty(&padata_free_works)) 79 return NULL; /* No more work items allowed to be queued. */ 80 81 pw = list_first_entry(&padata_free_works, struct padata_work, pw_list); 82 list_del(&pw->pw_list); 83 return pw; 84 } 85 86 /* 87 * This function is marked __ref because this function may be optimized in such 88 * a way that it directly refers to work_fn's address, which causes modpost to 89 * complain when work_fn is marked __init. This scenario was observed with clang 90 * LTO, where padata_work_init() was optimized to refer directly to 91 * padata_mt_helper() because the calls to padata_work_init() with other work_fn 92 * values were eliminated or inlined. 93 */ 94 static void __ref padata_work_init(struct padata_work *pw, work_func_t work_fn, 95 void *data, int flags) 96 { 97 if (flags & PADATA_WORK_ONSTACK) 98 INIT_WORK_ONSTACK(&pw->pw_work, work_fn); 99 else 100 INIT_WORK(&pw->pw_work, work_fn); 101 pw->pw_data = data; 102 } 103 104 static int __init padata_work_alloc_mt(int nworks, void *data, 105 struct list_head *head) 106 { 107 int i; 108 109 spin_lock_bh(&padata_works_lock); 110 /* Start at 1 because the current task participates in the job. */ 111 for (i = 1; i < nworks; ++i) { 112 struct padata_work *pw = padata_work_alloc(); 113 114 if (!pw) 115 break; 116 padata_work_init(pw, padata_mt_helper, data, 0); 117 list_add(&pw->pw_list, head); 118 } 119 spin_unlock_bh(&padata_works_lock); 120 121 return i; 122 } 123 124 static void padata_work_free(struct padata_work *pw) 125 { 126 lockdep_assert_held(&padata_works_lock); 127 list_add(&pw->pw_list, &padata_free_works); 128 } 129 130 static void __init padata_works_free(struct list_head *works) 131 { 132 struct padata_work *cur, *next; 133 134 if (list_empty(works)) 135 return; 136 137 spin_lock_bh(&padata_works_lock); 138 list_for_each_entry_safe(cur, next, works, pw_list) { 139 list_del(&cur->pw_list); 140 padata_work_free(cur); 141 } 142 spin_unlock_bh(&padata_works_lock); 143 } 144 145 static void padata_parallel_worker(struct work_struct *parallel_work) 146 { 147 struct padata_work *pw = container_of(parallel_work, struct padata_work, 148 pw_work); 149 struct padata_priv *padata = pw->pw_data; 150 151 local_bh_disable(); 152 padata->parallel(padata); 153 spin_lock(&padata_works_lock); 154 padata_work_free(pw); 155 spin_unlock(&padata_works_lock); 156 local_bh_enable(); 157 } 158 159 /** 160 * padata_do_parallel - padata parallelization function 161 * 162 * @ps: padatashell 163 * @padata: object to be parallelized 164 * @cb_cpu: pointer to the CPU that the serialization callback function should 165 * run on. If it's not in the serial cpumask of @pinst 166 * (i.e. cpumask.cbcpu), this function selects a fallback CPU and if 167 * none found, returns -EINVAL. 168 * 169 * The parallelization callback function will run with BHs off. 170 * Note: Every object which is parallelized by padata_do_parallel 171 * must be seen by padata_do_serial. 172 * 173 * Return: 0 on success or else negative error code. 174 */ 175 int padata_do_parallel(struct padata_shell *ps, 176 struct padata_priv *padata, int *cb_cpu) 177 { 178 struct padata_instance *pinst = ps->pinst; 179 int i, cpu, cpu_index, err; 180 struct parallel_data *pd; 181 struct padata_work *pw; 182 183 rcu_read_lock_bh(); 184 185 pd = rcu_dereference_bh(ps->pd); 186 187 err = -EINVAL; 188 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID) 189 goto out; 190 191 if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) { 192 if (cpumask_empty(pd->cpumask.cbcpu)) 193 goto out; 194 195 /* Select an alternate fallback CPU and notify the caller. */ 196 cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu); 197 198 cpu = cpumask_first(pd->cpumask.cbcpu); 199 for (i = 0; i < cpu_index; i++) 200 cpu = cpumask_next(cpu, pd->cpumask.cbcpu); 201 202 *cb_cpu = cpu; 203 } 204 205 err = -EBUSY; 206 if ((pinst->flags & PADATA_RESET)) 207 goto out; 208 209 refcount_inc(&pd->refcnt); 210 padata->pd = pd; 211 padata->cb_cpu = *cb_cpu; 212 213 spin_lock(&padata_works_lock); 214 padata->seq_nr = ++pd->seq_nr; 215 pw = padata_work_alloc(); 216 spin_unlock(&padata_works_lock); 217 218 if (!pw) { 219 /* Maximum works limit exceeded, run in the current task. */ 220 padata->parallel(padata); 221 } 222 223 rcu_read_unlock_bh(); 224 225 if (pw) { 226 padata_work_init(pw, padata_parallel_worker, padata, 0); 227 queue_work(pinst->parallel_wq, &pw->pw_work); 228 } 229 230 return 0; 231 out: 232 rcu_read_unlock_bh(); 233 234 return err; 235 } 236 EXPORT_SYMBOL(padata_do_parallel); 237 238 /* 239 * padata_find_next - Find the next object that needs serialization. 240 * 241 * Return: 242 * * A pointer to the control struct of the next object that needs 243 * serialization, if present in one of the percpu reorder queues. 244 * * NULL, if the next object that needs serialization will 245 * be parallel processed by another cpu and is not yet present in 246 * the cpu's reorder queue. 247 */ 248 static struct padata_priv *padata_find_next(struct parallel_data *pd, 249 bool remove_object) 250 { 251 struct padata_priv *padata; 252 struct padata_list *reorder; 253 int cpu = pd->cpu; 254 255 reorder = per_cpu_ptr(pd->reorder_list, cpu); 256 257 spin_lock(&reorder->lock); 258 if (list_empty(&reorder->list)) { 259 spin_unlock(&reorder->lock); 260 return NULL; 261 } 262 263 padata = list_entry(reorder->list.next, struct padata_priv, list); 264 265 /* 266 * Checks the rare case where two or more parallel jobs have hashed to 267 * the same CPU and one of the later ones finishes first. 268 */ 269 if (padata->seq_nr != pd->processed) { 270 spin_unlock(&reorder->lock); 271 return NULL; 272 } 273 274 if (remove_object) { 275 list_del_init(&padata->list); 276 ++pd->processed; 277 pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false); 278 } 279 280 spin_unlock(&reorder->lock); 281 return padata; 282 } 283 284 static void padata_reorder(struct parallel_data *pd) 285 { 286 struct padata_instance *pinst = pd->ps->pinst; 287 int cb_cpu; 288 struct padata_priv *padata; 289 struct padata_serial_queue *squeue; 290 struct padata_list *reorder; 291 292 /* 293 * We need to ensure that only one cpu can work on dequeueing of 294 * the reorder queue the time. Calculating in which percpu reorder 295 * queue the next object will arrive takes some time. A spinlock 296 * would be highly contended. Also it is not clear in which order 297 * the objects arrive to the reorder queues. So a cpu could wait to 298 * get the lock just to notice that there is nothing to do at the 299 * moment. Therefore we use a trylock and let the holder of the lock 300 * care for all the objects enqueued during the holdtime of the lock. 301 */ 302 if (!spin_trylock_bh(&pd->lock)) 303 return; 304 305 while (1) { 306 padata = padata_find_next(pd, true); 307 308 /* 309 * If the next object that needs serialization is parallel 310 * processed by another cpu and is still on it's way to the 311 * cpu's reorder queue, nothing to do for now. 312 */ 313 if (!padata) 314 break; 315 316 cb_cpu = padata->cb_cpu; 317 squeue = per_cpu_ptr(pd->squeue, cb_cpu); 318 319 spin_lock(&squeue->serial.lock); 320 list_add_tail(&padata->list, &squeue->serial.list); 321 spin_unlock(&squeue->serial.lock); 322 323 queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work); 324 } 325 326 spin_unlock_bh(&pd->lock); 327 328 /* 329 * The next object that needs serialization might have arrived to 330 * the reorder queues in the meantime. 331 * 332 * Ensure reorder queue is read after pd->lock is dropped so we see 333 * new objects from another task in padata_do_serial. Pairs with 334 * smp_mb in padata_do_serial. 335 */ 336 smp_mb(); 337 338 reorder = per_cpu_ptr(pd->reorder_list, pd->cpu); 339 if (!list_empty(&reorder->list) && padata_find_next(pd, false)) 340 queue_work(pinst->serial_wq, &pd->reorder_work); 341 } 342 343 static void invoke_padata_reorder(struct work_struct *work) 344 { 345 struct parallel_data *pd; 346 347 local_bh_disable(); 348 pd = container_of(work, struct parallel_data, reorder_work); 349 padata_reorder(pd); 350 local_bh_enable(); 351 } 352 353 static void padata_serial_worker(struct work_struct *serial_work) 354 { 355 struct padata_serial_queue *squeue; 356 struct parallel_data *pd; 357 LIST_HEAD(local_list); 358 int cnt; 359 360 local_bh_disable(); 361 squeue = container_of(serial_work, struct padata_serial_queue, work); 362 pd = squeue->pd; 363 364 spin_lock(&squeue->serial.lock); 365 list_replace_init(&squeue->serial.list, &local_list); 366 spin_unlock(&squeue->serial.lock); 367 368 cnt = 0; 369 370 while (!list_empty(&local_list)) { 371 struct padata_priv *padata; 372 373 padata = list_entry(local_list.next, 374 struct padata_priv, list); 375 376 list_del_init(&padata->list); 377 378 padata->serial(padata); 379 cnt++; 380 } 381 local_bh_enable(); 382 383 if (refcount_sub_and_test(cnt, &pd->refcnt)) 384 padata_free_pd(pd); 385 } 386 387 /** 388 * padata_do_serial - padata serialization function 389 * 390 * @padata: object to be serialized. 391 * 392 * padata_do_serial must be called for every parallelized object. 393 * The serialization callback function will run with BHs off. 394 */ 395 void padata_do_serial(struct padata_priv *padata) 396 { 397 struct parallel_data *pd = padata->pd; 398 int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr); 399 struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu); 400 struct padata_priv *cur; 401 struct list_head *pos; 402 403 spin_lock(&reorder->lock); 404 /* Sort in ascending order of sequence number. */ 405 list_for_each_prev(pos, &reorder->list) { 406 cur = list_entry(pos, struct padata_priv, list); 407 /* Compare by difference to consider integer wrap around */ 408 if ((signed int)(cur->seq_nr - padata->seq_nr) < 0) 409 break; 410 } 411 list_add(&padata->list, pos); 412 spin_unlock(&reorder->lock); 413 414 /* 415 * Ensure the addition to the reorder list is ordered correctly 416 * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb 417 * in padata_reorder. 418 */ 419 smp_mb(); 420 421 padata_reorder(pd); 422 } 423 EXPORT_SYMBOL(padata_do_serial); 424 425 static int padata_setup_cpumasks(struct padata_instance *pinst) 426 { 427 struct workqueue_attrs *attrs; 428 int err; 429 430 attrs = alloc_workqueue_attrs(); 431 if (!attrs) 432 return -ENOMEM; 433 434 /* Restrict parallel_wq workers to pd->cpumask.pcpu. */ 435 cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu); 436 err = apply_workqueue_attrs(pinst->parallel_wq, attrs); 437 free_workqueue_attrs(attrs); 438 439 return err; 440 } 441 442 static void __init padata_mt_helper(struct work_struct *w) 443 { 444 struct padata_work *pw = container_of(w, struct padata_work, pw_work); 445 struct padata_mt_job_state *ps = pw->pw_data; 446 struct padata_mt_job *job = ps->job; 447 bool done; 448 449 spin_lock(&ps->lock); 450 451 while (job->size > 0) { 452 unsigned long start, size, end; 453 454 start = job->start; 455 /* So end is chunk size aligned if enough work remains. */ 456 size = roundup(start + 1, ps->chunk_size) - start; 457 size = min(size, job->size); 458 end = start + size; 459 460 job->start = end; 461 job->size -= size; 462 463 spin_unlock(&ps->lock); 464 job->thread_fn(start, end, job->fn_arg); 465 spin_lock(&ps->lock); 466 } 467 468 ++ps->nworks_fini; 469 done = (ps->nworks_fini == ps->nworks); 470 spin_unlock(&ps->lock); 471 472 if (done) 473 complete(&ps->completion); 474 } 475 476 /** 477 * padata_do_multithreaded - run a multithreaded job 478 * @job: Description of the job. 479 * 480 * See the definition of struct padata_mt_job for more details. 481 */ 482 void __init padata_do_multithreaded(struct padata_mt_job *job) 483 { 484 /* In case threads finish at different times. */ 485 static const unsigned long load_balance_factor = 4; 486 struct padata_work my_work, *pw; 487 struct padata_mt_job_state ps; 488 LIST_HEAD(works); 489 int nworks; 490 491 if (job->size == 0) 492 return; 493 494 /* Ensure at least one thread when size < min_chunk. */ 495 nworks = max(job->size / max(job->min_chunk, job->align), 1ul); 496 nworks = min(nworks, job->max_threads); 497 498 if (nworks == 1) { 499 /* Single thread, no coordination needed, cut to the chase. */ 500 job->thread_fn(job->start, job->start + job->size, job->fn_arg); 501 return; 502 } 503 504 spin_lock_init(&ps.lock); 505 init_completion(&ps.completion); 506 ps.job = job; 507 ps.nworks = padata_work_alloc_mt(nworks, &ps, &works); 508 ps.nworks_fini = 0; 509 510 /* 511 * Chunk size is the amount of work a helper does per call to the 512 * thread function. Load balance large jobs between threads by 513 * increasing the number of chunks, guarantee at least the minimum 514 * chunk size from the caller, and honor the caller's alignment. 515 * Ensure chunk_size is at least 1 to prevent divide-by-0 516 * panic in padata_mt_helper(). 517 */ 518 ps.chunk_size = job->size / (ps.nworks * load_balance_factor); 519 ps.chunk_size = max(ps.chunk_size, job->min_chunk); 520 ps.chunk_size = max(ps.chunk_size, 1ul); 521 ps.chunk_size = roundup(ps.chunk_size, job->align); 522 523 /* 524 * chunk_size can be 0 if the caller sets min_chunk to 0. So force it 525 * to at least 1 to prevent divide-by-0 panic in padata_mt_helper().` 526 */ 527 if (!ps.chunk_size) 528 ps.chunk_size = 1U; 529 530 list_for_each_entry(pw, &works, pw_list) 531 queue_work(system_unbound_wq, &pw->pw_work); 532 533 /* Use the current thread, which saves starting a workqueue worker. */ 534 padata_work_init(&my_work, padata_mt_helper, &ps, PADATA_WORK_ONSTACK); 535 padata_mt_helper(&my_work.pw_work); 536 537 /* Wait for all the helpers to finish. */ 538 wait_for_completion(&ps.completion); 539 540 destroy_work_on_stack(&my_work.pw_work); 541 padata_works_free(&works); 542 } 543 544 static void __padata_list_init(struct padata_list *pd_list) 545 { 546 INIT_LIST_HEAD(&pd_list->list); 547 spin_lock_init(&pd_list->lock); 548 } 549 550 /* Initialize all percpu queues used by serial workers */ 551 static void padata_init_squeues(struct parallel_data *pd) 552 { 553 int cpu; 554 struct padata_serial_queue *squeue; 555 556 for_each_cpu(cpu, pd->cpumask.cbcpu) { 557 squeue = per_cpu_ptr(pd->squeue, cpu); 558 squeue->pd = pd; 559 __padata_list_init(&squeue->serial); 560 INIT_WORK(&squeue->work, padata_serial_worker); 561 } 562 } 563 564 /* Initialize per-CPU reorder lists */ 565 static void padata_init_reorder_list(struct parallel_data *pd) 566 { 567 int cpu; 568 struct padata_list *list; 569 570 for_each_cpu(cpu, pd->cpumask.pcpu) { 571 list = per_cpu_ptr(pd->reorder_list, cpu); 572 __padata_list_init(list); 573 } 574 } 575 576 /* Allocate and initialize the internal cpumask dependend resources. */ 577 static struct parallel_data *padata_alloc_pd(struct padata_shell *ps) 578 { 579 struct padata_instance *pinst = ps->pinst; 580 struct parallel_data *pd; 581 582 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); 583 if (!pd) 584 goto err; 585 586 pd->reorder_list = alloc_percpu(struct padata_list); 587 if (!pd->reorder_list) 588 goto err_free_pd; 589 590 pd->squeue = alloc_percpu(struct padata_serial_queue); 591 if (!pd->squeue) 592 goto err_free_reorder_list; 593 594 pd->ps = ps; 595 596 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) 597 goto err_free_squeue; 598 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) 599 goto err_free_pcpu; 600 601 cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask); 602 cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask); 603 604 padata_init_reorder_list(pd); 605 padata_init_squeues(pd); 606 pd->seq_nr = -1; 607 refcount_set(&pd->refcnt, 1); 608 spin_lock_init(&pd->lock); 609 pd->cpu = cpumask_first(pd->cpumask.pcpu); 610 INIT_WORK(&pd->reorder_work, invoke_padata_reorder); 611 612 return pd; 613 614 err_free_pcpu: 615 free_cpumask_var(pd->cpumask.pcpu); 616 err_free_squeue: 617 free_percpu(pd->squeue); 618 err_free_reorder_list: 619 free_percpu(pd->reorder_list); 620 err_free_pd: 621 kfree(pd); 622 err: 623 return NULL; 624 } 625 626 static void padata_free_pd(struct parallel_data *pd) 627 { 628 free_cpumask_var(pd->cpumask.pcpu); 629 free_cpumask_var(pd->cpumask.cbcpu); 630 free_percpu(pd->reorder_list); 631 free_percpu(pd->squeue); 632 kfree(pd); 633 } 634 635 static void __padata_start(struct padata_instance *pinst) 636 { 637 pinst->flags |= PADATA_INIT; 638 } 639 640 static void __padata_stop(struct padata_instance *pinst) 641 { 642 if (!(pinst->flags & PADATA_INIT)) 643 return; 644 645 pinst->flags &= ~PADATA_INIT; 646 647 synchronize_rcu(); 648 } 649 650 /* Replace the internal control structure with a new one. */ 651 static int padata_replace_one(struct padata_shell *ps) 652 { 653 struct parallel_data *pd_new; 654 655 pd_new = padata_alloc_pd(ps); 656 if (!pd_new) 657 return -ENOMEM; 658 659 ps->opd = rcu_dereference_protected(ps->pd, 1); 660 rcu_assign_pointer(ps->pd, pd_new); 661 662 return 0; 663 } 664 665 static int padata_replace(struct padata_instance *pinst) 666 { 667 struct padata_shell *ps; 668 int err = 0; 669 670 pinst->flags |= PADATA_RESET; 671 672 list_for_each_entry(ps, &pinst->pslist, list) { 673 err = padata_replace_one(ps); 674 if (err) 675 break; 676 } 677 678 synchronize_rcu(); 679 680 list_for_each_entry_continue_reverse(ps, &pinst->pslist, list) 681 if (refcount_dec_and_test(&ps->opd->refcnt)) 682 padata_free_pd(ps->opd); 683 684 pinst->flags &= ~PADATA_RESET; 685 686 return err; 687 } 688 689 /* If cpumask contains no active cpu, we mark the instance as invalid. */ 690 static bool padata_validate_cpumask(struct padata_instance *pinst, 691 const struct cpumask *cpumask) 692 { 693 if (!cpumask_intersects(cpumask, cpu_online_mask)) { 694 pinst->flags |= PADATA_INVALID; 695 return false; 696 } 697 698 pinst->flags &= ~PADATA_INVALID; 699 return true; 700 } 701 702 static int __padata_set_cpumasks(struct padata_instance *pinst, 703 cpumask_var_t pcpumask, 704 cpumask_var_t cbcpumask) 705 { 706 int valid; 707 int err; 708 709 valid = padata_validate_cpumask(pinst, pcpumask); 710 if (!valid) { 711 __padata_stop(pinst); 712 goto out_replace; 713 } 714 715 valid = padata_validate_cpumask(pinst, cbcpumask); 716 if (!valid) 717 __padata_stop(pinst); 718 719 out_replace: 720 cpumask_copy(pinst->cpumask.pcpu, pcpumask); 721 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); 722 723 err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst); 724 725 if (valid) 726 __padata_start(pinst); 727 728 return err; 729 } 730 731 /** 732 * padata_set_cpumask - Sets specified by @cpumask_type cpumask to the value 733 * equivalent to @cpumask. 734 * @pinst: padata instance 735 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding 736 * to parallel and serial cpumasks respectively. 737 * @cpumask: the cpumask to use 738 * 739 * Return: 0 on success or negative error code 740 */ 741 int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, 742 cpumask_var_t cpumask) 743 { 744 struct cpumask *serial_mask, *parallel_mask; 745 int err = -EINVAL; 746 747 cpus_read_lock(); 748 mutex_lock(&pinst->lock); 749 750 switch (cpumask_type) { 751 case PADATA_CPU_PARALLEL: 752 serial_mask = pinst->cpumask.cbcpu; 753 parallel_mask = cpumask; 754 break; 755 case PADATA_CPU_SERIAL: 756 parallel_mask = pinst->cpumask.pcpu; 757 serial_mask = cpumask; 758 break; 759 default: 760 goto out; 761 } 762 763 err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask); 764 765 out: 766 mutex_unlock(&pinst->lock); 767 cpus_read_unlock(); 768 769 return err; 770 } 771 EXPORT_SYMBOL(padata_set_cpumask); 772 773 #ifdef CONFIG_HOTPLUG_CPU 774 775 static int __padata_add_cpu(struct padata_instance *pinst, int cpu) 776 { 777 int err = 0; 778 779 if (cpumask_test_cpu(cpu, cpu_online_mask)) { 780 err = padata_replace(pinst); 781 782 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && 783 padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) 784 __padata_start(pinst); 785 } 786 787 return err; 788 } 789 790 static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) 791 { 792 int err = 0; 793 794 if (!cpumask_test_cpu(cpu, cpu_online_mask)) { 795 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || 796 !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) 797 __padata_stop(pinst); 798 799 err = padata_replace(pinst); 800 } 801 802 return err; 803 } 804 805 static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu) 806 { 807 return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) || 808 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu); 809 } 810 811 static int padata_cpu_online(unsigned int cpu, struct hlist_node *node) 812 { 813 struct padata_instance *pinst; 814 int ret; 815 816 pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node); 817 if (!pinst_has_cpu(pinst, cpu)) 818 return 0; 819 820 mutex_lock(&pinst->lock); 821 ret = __padata_add_cpu(pinst, cpu); 822 mutex_unlock(&pinst->lock); 823 return ret; 824 } 825 826 static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node) 827 { 828 struct padata_instance *pinst; 829 int ret; 830 831 pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node); 832 if (!pinst_has_cpu(pinst, cpu)) 833 return 0; 834 835 mutex_lock(&pinst->lock); 836 ret = __padata_remove_cpu(pinst, cpu); 837 mutex_unlock(&pinst->lock); 838 return ret; 839 } 840 841 static enum cpuhp_state hp_online; 842 #endif 843 844 static void __padata_free(struct padata_instance *pinst) 845 { 846 #ifdef CONFIG_HOTPLUG_CPU 847 cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD, 848 &pinst->cpu_dead_node); 849 cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node); 850 #endif 851 852 WARN_ON(!list_empty(&pinst->pslist)); 853 854 free_cpumask_var(pinst->cpumask.pcpu); 855 free_cpumask_var(pinst->cpumask.cbcpu); 856 destroy_workqueue(pinst->serial_wq); 857 destroy_workqueue(pinst->parallel_wq); 858 kfree(pinst); 859 } 860 861 #define kobj2pinst(_kobj) \ 862 container_of(_kobj, struct padata_instance, kobj) 863 #define attr2pentry(_attr) \ 864 container_of(_attr, struct padata_sysfs_entry, attr) 865 866 static void padata_sysfs_release(struct kobject *kobj) 867 { 868 struct padata_instance *pinst = kobj2pinst(kobj); 869 __padata_free(pinst); 870 } 871 872 struct padata_sysfs_entry { 873 struct attribute attr; 874 ssize_t (*show)(struct padata_instance *, struct attribute *, char *); 875 ssize_t (*store)(struct padata_instance *, struct attribute *, 876 const char *, size_t); 877 }; 878 879 static ssize_t show_cpumask(struct padata_instance *pinst, 880 struct attribute *attr, char *buf) 881 { 882 struct cpumask *cpumask; 883 ssize_t len; 884 885 mutex_lock(&pinst->lock); 886 if (!strcmp(attr->name, "serial_cpumask")) 887 cpumask = pinst->cpumask.cbcpu; 888 else 889 cpumask = pinst->cpumask.pcpu; 890 891 len = snprintf(buf, PAGE_SIZE, "%*pb\n", 892 nr_cpu_ids, cpumask_bits(cpumask)); 893 mutex_unlock(&pinst->lock); 894 return len < PAGE_SIZE ? len : -EINVAL; 895 } 896 897 static ssize_t store_cpumask(struct padata_instance *pinst, 898 struct attribute *attr, 899 const char *buf, size_t count) 900 { 901 cpumask_var_t new_cpumask; 902 ssize_t ret; 903 int mask_type; 904 905 if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL)) 906 return -ENOMEM; 907 908 ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask), 909 nr_cpumask_bits); 910 if (ret < 0) 911 goto out; 912 913 mask_type = !strcmp(attr->name, "serial_cpumask") ? 914 PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL; 915 ret = padata_set_cpumask(pinst, mask_type, new_cpumask); 916 if (!ret) 917 ret = count; 918 919 out: 920 free_cpumask_var(new_cpumask); 921 return ret; 922 } 923 924 #define PADATA_ATTR_RW(_name, _show_name, _store_name) \ 925 static struct padata_sysfs_entry _name##_attr = \ 926 __ATTR(_name, 0644, _show_name, _store_name) 927 #define PADATA_ATTR_RO(_name, _show_name) \ 928 static struct padata_sysfs_entry _name##_attr = \ 929 __ATTR(_name, 0400, _show_name, NULL) 930 931 PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask); 932 PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask); 933 934 /* 935 * Padata sysfs provides the following objects: 936 * serial_cpumask [RW] - cpumask for serial workers 937 * parallel_cpumask [RW] - cpumask for parallel workers 938 */ 939 static struct attribute *padata_default_attrs[] = { 940 &serial_cpumask_attr.attr, 941 ¶llel_cpumask_attr.attr, 942 NULL, 943 }; 944 ATTRIBUTE_GROUPS(padata_default); 945 946 static ssize_t padata_sysfs_show(struct kobject *kobj, 947 struct attribute *attr, char *buf) 948 { 949 struct padata_instance *pinst; 950 struct padata_sysfs_entry *pentry; 951 ssize_t ret = -EIO; 952 953 pinst = kobj2pinst(kobj); 954 pentry = attr2pentry(attr); 955 if (pentry->show) 956 ret = pentry->show(pinst, attr, buf); 957 958 return ret; 959 } 960 961 static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr, 962 const char *buf, size_t count) 963 { 964 struct padata_instance *pinst; 965 struct padata_sysfs_entry *pentry; 966 ssize_t ret = -EIO; 967 968 pinst = kobj2pinst(kobj); 969 pentry = attr2pentry(attr); 970 if (pentry->show) 971 ret = pentry->store(pinst, attr, buf, count); 972 973 return ret; 974 } 975 976 static const struct sysfs_ops padata_sysfs_ops = { 977 .show = padata_sysfs_show, 978 .store = padata_sysfs_store, 979 }; 980 981 static const struct kobj_type padata_attr_type = { 982 .sysfs_ops = &padata_sysfs_ops, 983 .default_groups = padata_default_groups, 984 .release = padata_sysfs_release, 985 }; 986 987 /** 988 * padata_alloc - allocate and initialize a padata instance 989 * @name: used to identify the instance 990 * 991 * Return: new instance on success, NULL on error 992 */ 993 struct padata_instance *padata_alloc(const char *name) 994 { 995 struct padata_instance *pinst; 996 997 pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); 998 if (!pinst) 999 goto err; 1000 1001 pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0, 1002 name); 1003 if (!pinst->parallel_wq) 1004 goto err_free_inst; 1005 1006 cpus_read_lock(); 1007 1008 pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM | 1009 WQ_CPU_INTENSIVE, 1, name); 1010 if (!pinst->serial_wq) 1011 goto err_put_cpus; 1012 1013 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) 1014 goto err_free_serial_wq; 1015 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { 1016 free_cpumask_var(pinst->cpumask.pcpu); 1017 goto err_free_serial_wq; 1018 } 1019 1020 INIT_LIST_HEAD(&pinst->pslist); 1021 1022 cpumask_copy(pinst->cpumask.pcpu, cpu_possible_mask); 1023 cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask); 1024 1025 if (padata_setup_cpumasks(pinst)) 1026 goto err_free_masks; 1027 1028 __padata_start(pinst); 1029 1030 kobject_init(&pinst->kobj, &padata_attr_type); 1031 mutex_init(&pinst->lock); 1032 1033 #ifdef CONFIG_HOTPLUG_CPU 1034 cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, 1035 &pinst->cpu_online_node); 1036 cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD, 1037 &pinst->cpu_dead_node); 1038 #endif 1039 1040 cpus_read_unlock(); 1041 1042 return pinst; 1043 1044 err_free_masks: 1045 free_cpumask_var(pinst->cpumask.pcpu); 1046 free_cpumask_var(pinst->cpumask.cbcpu); 1047 err_free_serial_wq: 1048 destroy_workqueue(pinst->serial_wq); 1049 err_put_cpus: 1050 cpus_read_unlock(); 1051 destroy_workqueue(pinst->parallel_wq); 1052 err_free_inst: 1053 kfree(pinst); 1054 err: 1055 return NULL; 1056 } 1057 EXPORT_SYMBOL(padata_alloc); 1058 1059 /** 1060 * padata_free - free a padata instance 1061 * 1062 * @pinst: padata instance to free 1063 */ 1064 void padata_free(struct padata_instance *pinst) 1065 { 1066 kobject_put(&pinst->kobj); 1067 } 1068 EXPORT_SYMBOL(padata_free); 1069 1070 /** 1071 * padata_alloc_shell - Allocate and initialize padata shell. 1072 * 1073 * @pinst: Parent padata_instance object. 1074 * 1075 * Return: new shell on success, NULL on error 1076 */ 1077 struct padata_shell *padata_alloc_shell(struct padata_instance *pinst) 1078 { 1079 struct parallel_data *pd; 1080 struct padata_shell *ps; 1081 1082 ps = kzalloc(sizeof(*ps), GFP_KERNEL); 1083 if (!ps) 1084 goto out; 1085 1086 ps->pinst = pinst; 1087 1088 cpus_read_lock(); 1089 pd = padata_alloc_pd(ps); 1090 cpus_read_unlock(); 1091 1092 if (!pd) 1093 goto out_free_ps; 1094 1095 mutex_lock(&pinst->lock); 1096 RCU_INIT_POINTER(ps->pd, pd); 1097 list_add(&ps->list, &pinst->pslist); 1098 mutex_unlock(&pinst->lock); 1099 1100 return ps; 1101 1102 out_free_ps: 1103 kfree(ps); 1104 out: 1105 return NULL; 1106 } 1107 EXPORT_SYMBOL(padata_alloc_shell); 1108 1109 /** 1110 * padata_free_shell - free a padata shell 1111 * 1112 * @ps: padata shell to free 1113 */ 1114 void padata_free_shell(struct padata_shell *ps) 1115 { 1116 struct parallel_data *pd; 1117 1118 if (!ps) 1119 return; 1120 1121 mutex_lock(&ps->pinst->lock); 1122 list_del(&ps->list); 1123 pd = rcu_dereference_protected(ps->pd, 1); 1124 if (refcount_dec_and_test(&pd->refcnt)) 1125 padata_free_pd(pd); 1126 mutex_unlock(&ps->pinst->lock); 1127 1128 kfree(ps); 1129 } 1130 EXPORT_SYMBOL(padata_free_shell); 1131 1132 void __init padata_init(void) 1133 { 1134 unsigned int i, possible_cpus; 1135 #ifdef CONFIG_HOTPLUG_CPU 1136 int ret; 1137 1138 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online", 1139 padata_cpu_online, NULL); 1140 if (ret < 0) 1141 goto err; 1142 hp_online = ret; 1143 1144 ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead", 1145 NULL, padata_cpu_dead); 1146 if (ret < 0) 1147 goto remove_online_state; 1148 #endif 1149 1150 possible_cpus = num_possible_cpus(); 1151 padata_works = kmalloc_array(possible_cpus, sizeof(struct padata_work), 1152 GFP_KERNEL); 1153 if (!padata_works) 1154 goto remove_dead_state; 1155 1156 for (i = 0; i < possible_cpus; ++i) 1157 list_add(&padata_works[i].pw_list, &padata_free_works); 1158 1159 return; 1160 1161 remove_dead_state: 1162 #ifdef CONFIG_HOTPLUG_CPU 1163 cpuhp_remove_multi_state(CPUHP_PADATA_DEAD); 1164 remove_online_state: 1165 cpuhp_remove_multi_state(hp_online); 1166 err: 1167 #endif 1168 pr_warn("padata: initialization failed\n"); 1169 } 1170