1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * padata.c - generic interface to process data streams in parallel 4 * 5 * See Documentation/core-api/padata.rst for more information. 6 * 7 * Copyright (C) 2008, 2009 secunet Security Networks AG 8 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com> 9 * 10 * Copyright (c) 2020 Oracle and/or its affiliates. 11 * Author: Daniel Jordan <daniel.m.jordan@oracle.com> 12 */ 13 14 #include <linux/completion.h> 15 #include <linux/export.h> 16 #include <linux/cpumask.h> 17 #include <linux/err.h> 18 #include <linux/cpu.h> 19 #include <linux/padata.h> 20 #include <linux/mutex.h> 21 #include <linux/sched.h> 22 #include <linux/slab.h> 23 #include <linux/sysfs.h> 24 #include <linux/rcupdate.h> 25 26 #define PADATA_WORK_ONSTACK 1 /* Work's memory is on stack */ 27 28 struct padata_work { 29 struct work_struct pw_work; 30 struct list_head pw_list; /* padata_free_works linkage */ 31 void *pw_data; 32 }; 33 34 static DEFINE_SPINLOCK(padata_works_lock); 35 static struct padata_work *padata_works; 36 static LIST_HEAD(padata_free_works); 37 38 struct padata_mt_job_state { 39 spinlock_t lock; 40 struct completion completion; 41 struct padata_mt_job *job; 42 int nworks; 43 int nworks_fini; 44 unsigned long chunk_size; 45 }; 46 47 static void padata_free_pd(struct parallel_data *pd); 48 static void __init padata_mt_helper(struct work_struct *work); 49 50 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) 51 { 52 int cpu, target_cpu; 53 54 target_cpu = cpumask_first(pd->cpumask.pcpu); 55 for (cpu = 0; cpu < cpu_index; cpu++) 56 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); 57 58 return target_cpu; 59 } 60 61 static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr) 62 { 63 /* 64 * Hash the sequence numbers to the cpus by taking 65 * seq_nr mod. number of cpus in use. 66 */ 67 int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); 68 69 return padata_index_to_cpu(pd, cpu_index); 70 } 71 72 static struct padata_work *padata_work_alloc(void) 73 { 74 struct padata_work *pw; 75 76 lockdep_assert_held(&padata_works_lock); 77 78 if (list_empty(&padata_free_works)) 79 return NULL; /* No more work items allowed to be queued. */ 80 81 pw = list_first_entry(&padata_free_works, struct padata_work, pw_list); 82 list_del(&pw->pw_list); 83 return pw; 84 } 85 86 static void padata_work_init(struct padata_work *pw, work_func_t work_fn, 87 void *data, int flags) 88 { 89 if (flags & PADATA_WORK_ONSTACK) 90 INIT_WORK_ONSTACK(&pw->pw_work, work_fn); 91 else 92 INIT_WORK(&pw->pw_work, work_fn); 93 pw->pw_data = data; 94 } 95 96 static int __init padata_work_alloc_mt(int nworks, void *data, 97 struct list_head *head) 98 { 99 int i; 100 101 spin_lock(&padata_works_lock); 102 /* Start at 1 because the current task participates in the job. */ 103 for (i = 1; i < nworks; ++i) { 104 struct padata_work *pw = padata_work_alloc(); 105 106 if (!pw) 107 break; 108 padata_work_init(pw, padata_mt_helper, data, 0); 109 list_add(&pw->pw_list, head); 110 } 111 spin_unlock(&padata_works_lock); 112 113 return i; 114 } 115 116 static void padata_work_free(struct padata_work *pw) 117 { 118 lockdep_assert_held(&padata_works_lock); 119 list_add(&pw->pw_list, &padata_free_works); 120 } 121 122 static void __init padata_works_free(struct list_head *works) 123 { 124 struct padata_work *cur, *next; 125 126 if (list_empty(works)) 127 return; 128 129 spin_lock(&padata_works_lock); 130 list_for_each_entry_safe(cur, next, works, pw_list) { 131 list_del(&cur->pw_list); 132 padata_work_free(cur); 133 } 134 spin_unlock(&padata_works_lock); 135 } 136 137 static void padata_parallel_worker(struct work_struct *parallel_work) 138 { 139 struct padata_work *pw = container_of(parallel_work, struct padata_work, 140 pw_work); 141 struct padata_priv *padata = pw->pw_data; 142 143 local_bh_disable(); 144 padata->parallel(padata); 145 spin_lock(&padata_works_lock); 146 padata_work_free(pw); 147 spin_unlock(&padata_works_lock); 148 local_bh_enable(); 149 } 150 151 /** 152 * padata_do_parallel - padata parallelization function 153 * 154 * @ps: padatashell 155 * @padata: object to be parallelized 156 * @cb_cpu: pointer to the CPU that the serialization callback function should 157 * run on. If it's not in the serial cpumask of @pinst 158 * (i.e. cpumask.cbcpu), this function selects a fallback CPU and if 159 * none found, returns -EINVAL. 160 * 161 * The parallelization callback function will run with BHs off. 162 * Note: Every object which is parallelized by padata_do_parallel 163 * must be seen by padata_do_serial. 164 * 165 * Return: 0 on success or else negative error code. 166 */ 167 int padata_do_parallel(struct padata_shell *ps, 168 struct padata_priv *padata, int *cb_cpu) 169 { 170 struct padata_instance *pinst = ps->pinst; 171 int i, cpu, cpu_index, err; 172 struct parallel_data *pd; 173 struct padata_work *pw; 174 175 rcu_read_lock_bh(); 176 177 pd = rcu_dereference_bh(ps->pd); 178 179 err = -EINVAL; 180 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID) 181 goto out; 182 183 if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) { 184 if (cpumask_empty(pd->cpumask.cbcpu)) 185 goto out; 186 187 /* Select an alternate fallback CPU and notify the caller. */ 188 cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu); 189 190 cpu = cpumask_first(pd->cpumask.cbcpu); 191 for (i = 0; i < cpu_index; i++) 192 cpu = cpumask_next(cpu, pd->cpumask.cbcpu); 193 194 *cb_cpu = cpu; 195 } 196 197 err = -EBUSY; 198 if ((pinst->flags & PADATA_RESET)) 199 goto out; 200 201 refcount_inc(&pd->refcnt); 202 padata->pd = pd; 203 padata->cb_cpu = *cb_cpu; 204 205 spin_lock(&padata_works_lock); 206 padata->seq_nr = ++pd->seq_nr; 207 pw = padata_work_alloc(); 208 spin_unlock(&padata_works_lock); 209 210 if (!pw) { 211 /* Maximum works limit exceeded, run in the current task. */ 212 padata->parallel(padata); 213 } 214 215 rcu_read_unlock_bh(); 216 217 if (pw) { 218 padata_work_init(pw, padata_parallel_worker, padata, 0); 219 queue_work(pinst->parallel_wq, &pw->pw_work); 220 } 221 222 return 0; 223 out: 224 rcu_read_unlock_bh(); 225 226 return err; 227 } 228 EXPORT_SYMBOL(padata_do_parallel); 229 230 /* 231 * padata_find_next - Find the next object that needs serialization. 232 * 233 * Return: 234 * * A pointer to the control struct of the next object that needs 235 * serialization, if present in one of the percpu reorder queues. 236 * * NULL, if the next object that needs serialization will 237 * be parallel processed by another cpu and is not yet present in 238 * the cpu's reorder queue. 239 */ 240 static struct padata_priv *padata_find_next(struct parallel_data *pd, 241 bool remove_object) 242 { 243 struct padata_priv *padata; 244 struct padata_list *reorder; 245 int cpu = pd->cpu; 246 247 reorder = per_cpu_ptr(pd->reorder_list, cpu); 248 249 spin_lock(&reorder->lock); 250 if (list_empty(&reorder->list)) { 251 spin_unlock(&reorder->lock); 252 return NULL; 253 } 254 255 padata = list_entry(reorder->list.next, struct padata_priv, list); 256 257 /* 258 * Checks the rare case where two or more parallel jobs have hashed to 259 * the same CPU and one of the later ones finishes first. 260 */ 261 if (padata->seq_nr != pd->processed) { 262 spin_unlock(&reorder->lock); 263 return NULL; 264 } 265 266 if (remove_object) { 267 list_del_init(&padata->list); 268 ++pd->processed; 269 pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false); 270 } 271 272 spin_unlock(&reorder->lock); 273 return padata; 274 } 275 276 static void padata_reorder(struct parallel_data *pd) 277 { 278 struct padata_instance *pinst = pd->ps->pinst; 279 int cb_cpu; 280 struct padata_priv *padata; 281 struct padata_serial_queue *squeue; 282 struct padata_list *reorder; 283 284 /* 285 * We need to ensure that only one cpu can work on dequeueing of 286 * the reorder queue the time. Calculating in which percpu reorder 287 * queue the next object will arrive takes some time. A spinlock 288 * would be highly contended. Also it is not clear in which order 289 * the objects arrive to the reorder queues. So a cpu could wait to 290 * get the lock just to notice that there is nothing to do at the 291 * moment. Therefore we use a trylock and let the holder of the lock 292 * care for all the objects enqueued during the holdtime of the lock. 293 */ 294 if (!spin_trylock_bh(&pd->lock)) 295 return; 296 297 while (1) { 298 padata = padata_find_next(pd, true); 299 300 /* 301 * If the next object that needs serialization is parallel 302 * processed by another cpu and is still on it's way to the 303 * cpu's reorder queue, nothing to do for now. 304 */ 305 if (!padata) 306 break; 307 308 cb_cpu = padata->cb_cpu; 309 squeue = per_cpu_ptr(pd->squeue, cb_cpu); 310 311 spin_lock(&squeue->serial.lock); 312 list_add_tail(&padata->list, &squeue->serial.list); 313 spin_unlock(&squeue->serial.lock); 314 315 queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work); 316 } 317 318 spin_unlock_bh(&pd->lock); 319 320 /* 321 * The next object that needs serialization might have arrived to 322 * the reorder queues in the meantime. 323 * 324 * Ensure reorder queue is read after pd->lock is dropped so we see 325 * new objects from another task in padata_do_serial. Pairs with 326 * smp_mb in padata_do_serial. 327 */ 328 smp_mb(); 329 330 reorder = per_cpu_ptr(pd->reorder_list, pd->cpu); 331 if (!list_empty(&reorder->list) && padata_find_next(pd, false)) 332 queue_work(pinst->serial_wq, &pd->reorder_work); 333 } 334 335 static void invoke_padata_reorder(struct work_struct *work) 336 { 337 struct parallel_data *pd; 338 339 local_bh_disable(); 340 pd = container_of(work, struct parallel_data, reorder_work); 341 padata_reorder(pd); 342 local_bh_enable(); 343 } 344 345 static void padata_serial_worker(struct work_struct *serial_work) 346 { 347 struct padata_serial_queue *squeue; 348 struct parallel_data *pd; 349 LIST_HEAD(local_list); 350 int cnt; 351 352 local_bh_disable(); 353 squeue = container_of(serial_work, struct padata_serial_queue, work); 354 pd = squeue->pd; 355 356 spin_lock(&squeue->serial.lock); 357 list_replace_init(&squeue->serial.list, &local_list); 358 spin_unlock(&squeue->serial.lock); 359 360 cnt = 0; 361 362 while (!list_empty(&local_list)) { 363 struct padata_priv *padata; 364 365 padata = list_entry(local_list.next, 366 struct padata_priv, list); 367 368 list_del_init(&padata->list); 369 370 padata->serial(padata); 371 cnt++; 372 } 373 local_bh_enable(); 374 375 if (refcount_sub_and_test(cnt, &pd->refcnt)) 376 padata_free_pd(pd); 377 } 378 379 /** 380 * padata_do_serial - padata serialization function 381 * 382 * @padata: object to be serialized. 383 * 384 * padata_do_serial must be called for every parallelized object. 385 * The serialization callback function will run with BHs off. 386 */ 387 void padata_do_serial(struct padata_priv *padata) 388 { 389 struct parallel_data *pd = padata->pd; 390 int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr); 391 struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu); 392 struct padata_priv *cur; 393 struct list_head *pos; 394 395 spin_lock(&reorder->lock); 396 /* Sort in ascending order of sequence number. */ 397 list_for_each_prev(pos, &reorder->list) { 398 cur = list_entry(pos, struct padata_priv, list); 399 if (cur->seq_nr < padata->seq_nr) 400 break; 401 } 402 list_add(&padata->list, pos); 403 spin_unlock(&reorder->lock); 404 405 /* 406 * Ensure the addition to the reorder list is ordered correctly 407 * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb 408 * in padata_reorder. 409 */ 410 smp_mb(); 411 412 padata_reorder(pd); 413 } 414 EXPORT_SYMBOL(padata_do_serial); 415 416 static int padata_setup_cpumasks(struct padata_instance *pinst) 417 { 418 struct workqueue_attrs *attrs; 419 int err; 420 421 attrs = alloc_workqueue_attrs(); 422 if (!attrs) 423 return -ENOMEM; 424 425 /* Restrict parallel_wq workers to pd->cpumask.pcpu. */ 426 cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu); 427 err = apply_workqueue_attrs(pinst->parallel_wq, attrs); 428 free_workqueue_attrs(attrs); 429 430 return err; 431 } 432 433 static void __init padata_mt_helper(struct work_struct *w) 434 { 435 struct padata_work *pw = container_of(w, struct padata_work, pw_work); 436 struct padata_mt_job_state *ps = pw->pw_data; 437 struct padata_mt_job *job = ps->job; 438 bool done; 439 440 spin_lock(&ps->lock); 441 442 while (job->size > 0) { 443 unsigned long start, size, end; 444 445 start = job->start; 446 /* So end is chunk size aligned if enough work remains. */ 447 size = roundup(start + 1, ps->chunk_size) - start; 448 size = min(size, job->size); 449 end = start + size; 450 451 job->start = end; 452 job->size -= size; 453 454 spin_unlock(&ps->lock); 455 job->thread_fn(start, end, job->fn_arg); 456 spin_lock(&ps->lock); 457 } 458 459 ++ps->nworks_fini; 460 done = (ps->nworks_fini == ps->nworks); 461 spin_unlock(&ps->lock); 462 463 if (done) 464 complete(&ps->completion); 465 } 466 467 /** 468 * padata_do_multithreaded - run a multithreaded job 469 * @job: Description of the job. 470 * 471 * See the definition of struct padata_mt_job for more details. 472 */ 473 void __init padata_do_multithreaded(struct padata_mt_job *job) 474 { 475 /* In case threads finish at different times. */ 476 static const unsigned long load_balance_factor = 4; 477 struct padata_work my_work, *pw; 478 struct padata_mt_job_state ps; 479 LIST_HEAD(works); 480 int nworks; 481 482 if (job->size == 0) 483 return; 484 485 /* Ensure at least one thread when size < min_chunk. */ 486 nworks = max(job->size / job->min_chunk, 1ul); 487 nworks = min(nworks, job->max_threads); 488 489 if (nworks == 1) { 490 /* Single thread, no coordination needed, cut to the chase. */ 491 job->thread_fn(job->start, job->start + job->size, job->fn_arg); 492 return; 493 } 494 495 spin_lock_init(&ps.lock); 496 init_completion(&ps.completion); 497 ps.job = job; 498 ps.nworks = padata_work_alloc_mt(nworks, &ps, &works); 499 ps.nworks_fini = 0; 500 501 /* 502 * Chunk size is the amount of work a helper does per call to the 503 * thread function. Load balance large jobs between threads by 504 * increasing the number of chunks, guarantee at least the minimum 505 * chunk size from the caller, and honor the caller's alignment. 506 */ 507 ps.chunk_size = job->size / (ps.nworks * load_balance_factor); 508 ps.chunk_size = max(ps.chunk_size, job->min_chunk); 509 ps.chunk_size = roundup(ps.chunk_size, job->align); 510 511 list_for_each_entry(pw, &works, pw_list) 512 queue_work(system_unbound_wq, &pw->pw_work); 513 514 /* Use the current thread, which saves starting a workqueue worker. */ 515 padata_work_init(&my_work, padata_mt_helper, &ps, PADATA_WORK_ONSTACK); 516 padata_mt_helper(&my_work.pw_work); 517 518 /* Wait for all the helpers to finish. */ 519 wait_for_completion(&ps.completion); 520 521 destroy_work_on_stack(&my_work.pw_work); 522 padata_works_free(&works); 523 } 524 525 static void __padata_list_init(struct padata_list *pd_list) 526 { 527 INIT_LIST_HEAD(&pd_list->list); 528 spin_lock_init(&pd_list->lock); 529 } 530 531 /* Initialize all percpu queues used by serial workers */ 532 static void padata_init_squeues(struct parallel_data *pd) 533 { 534 int cpu; 535 struct padata_serial_queue *squeue; 536 537 for_each_cpu(cpu, pd->cpumask.cbcpu) { 538 squeue = per_cpu_ptr(pd->squeue, cpu); 539 squeue->pd = pd; 540 __padata_list_init(&squeue->serial); 541 INIT_WORK(&squeue->work, padata_serial_worker); 542 } 543 } 544 545 /* Initialize per-CPU reorder lists */ 546 static void padata_init_reorder_list(struct parallel_data *pd) 547 { 548 int cpu; 549 struct padata_list *list; 550 551 for_each_cpu(cpu, pd->cpumask.pcpu) { 552 list = per_cpu_ptr(pd->reorder_list, cpu); 553 __padata_list_init(list); 554 } 555 } 556 557 /* Allocate and initialize the internal cpumask dependend resources. */ 558 static struct parallel_data *padata_alloc_pd(struct padata_shell *ps) 559 { 560 struct padata_instance *pinst = ps->pinst; 561 struct parallel_data *pd; 562 563 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); 564 if (!pd) 565 goto err; 566 567 pd->reorder_list = alloc_percpu(struct padata_list); 568 if (!pd->reorder_list) 569 goto err_free_pd; 570 571 pd->squeue = alloc_percpu(struct padata_serial_queue); 572 if (!pd->squeue) 573 goto err_free_reorder_list; 574 575 pd->ps = ps; 576 577 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) 578 goto err_free_squeue; 579 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) 580 goto err_free_pcpu; 581 582 cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask); 583 cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask); 584 585 padata_init_reorder_list(pd); 586 padata_init_squeues(pd); 587 pd->seq_nr = -1; 588 refcount_set(&pd->refcnt, 1); 589 spin_lock_init(&pd->lock); 590 pd->cpu = cpumask_first(pd->cpumask.pcpu); 591 INIT_WORK(&pd->reorder_work, invoke_padata_reorder); 592 593 return pd; 594 595 err_free_pcpu: 596 free_cpumask_var(pd->cpumask.pcpu); 597 err_free_squeue: 598 free_percpu(pd->squeue); 599 err_free_reorder_list: 600 free_percpu(pd->reorder_list); 601 err_free_pd: 602 kfree(pd); 603 err: 604 return NULL; 605 } 606 607 static void padata_free_pd(struct parallel_data *pd) 608 { 609 free_cpumask_var(pd->cpumask.pcpu); 610 free_cpumask_var(pd->cpumask.cbcpu); 611 free_percpu(pd->reorder_list); 612 free_percpu(pd->squeue); 613 kfree(pd); 614 } 615 616 static void __padata_start(struct padata_instance *pinst) 617 { 618 pinst->flags |= PADATA_INIT; 619 } 620 621 static void __padata_stop(struct padata_instance *pinst) 622 { 623 if (!(pinst->flags & PADATA_INIT)) 624 return; 625 626 pinst->flags &= ~PADATA_INIT; 627 628 synchronize_rcu(); 629 } 630 631 /* Replace the internal control structure with a new one. */ 632 static int padata_replace_one(struct padata_shell *ps) 633 { 634 struct parallel_data *pd_new; 635 636 pd_new = padata_alloc_pd(ps); 637 if (!pd_new) 638 return -ENOMEM; 639 640 ps->opd = rcu_dereference_protected(ps->pd, 1); 641 rcu_assign_pointer(ps->pd, pd_new); 642 643 return 0; 644 } 645 646 static int padata_replace(struct padata_instance *pinst) 647 { 648 struct padata_shell *ps; 649 int err = 0; 650 651 pinst->flags |= PADATA_RESET; 652 653 list_for_each_entry(ps, &pinst->pslist, list) { 654 err = padata_replace_one(ps); 655 if (err) 656 break; 657 } 658 659 synchronize_rcu(); 660 661 list_for_each_entry_continue_reverse(ps, &pinst->pslist, list) 662 if (refcount_dec_and_test(&ps->opd->refcnt)) 663 padata_free_pd(ps->opd); 664 665 pinst->flags &= ~PADATA_RESET; 666 667 return err; 668 } 669 670 /* If cpumask contains no active cpu, we mark the instance as invalid. */ 671 static bool padata_validate_cpumask(struct padata_instance *pinst, 672 const struct cpumask *cpumask) 673 { 674 if (!cpumask_intersects(cpumask, cpu_online_mask)) { 675 pinst->flags |= PADATA_INVALID; 676 return false; 677 } 678 679 pinst->flags &= ~PADATA_INVALID; 680 return true; 681 } 682 683 static int __padata_set_cpumasks(struct padata_instance *pinst, 684 cpumask_var_t pcpumask, 685 cpumask_var_t cbcpumask) 686 { 687 int valid; 688 int err; 689 690 valid = padata_validate_cpumask(pinst, pcpumask); 691 if (!valid) { 692 __padata_stop(pinst); 693 goto out_replace; 694 } 695 696 valid = padata_validate_cpumask(pinst, cbcpumask); 697 if (!valid) 698 __padata_stop(pinst); 699 700 out_replace: 701 cpumask_copy(pinst->cpumask.pcpu, pcpumask); 702 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); 703 704 err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst); 705 706 if (valid) 707 __padata_start(pinst); 708 709 return err; 710 } 711 712 /** 713 * padata_set_cpumask - Sets specified by @cpumask_type cpumask to the value 714 * equivalent to @cpumask. 715 * @pinst: padata instance 716 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding 717 * to parallel and serial cpumasks respectively. 718 * @cpumask: the cpumask to use 719 * 720 * Return: 0 on success or negative error code 721 */ 722 int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, 723 cpumask_var_t cpumask) 724 { 725 struct cpumask *serial_mask, *parallel_mask; 726 int err = -EINVAL; 727 728 cpus_read_lock(); 729 mutex_lock(&pinst->lock); 730 731 switch (cpumask_type) { 732 case PADATA_CPU_PARALLEL: 733 serial_mask = pinst->cpumask.cbcpu; 734 parallel_mask = cpumask; 735 break; 736 case PADATA_CPU_SERIAL: 737 parallel_mask = pinst->cpumask.pcpu; 738 serial_mask = cpumask; 739 break; 740 default: 741 goto out; 742 } 743 744 err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask); 745 746 out: 747 mutex_unlock(&pinst->lock); 748 cpus_read_unlock(); 749 750 return err; 751 } 752 EXPORT_SYMBOL(padata_set_cpumask); 753 754 #ifdef CONFIG_HOTPLUG_CPU 755 756 static int __padata_add_cpu(struct padata_instance *pinst, int cpu) 757 { 758 int err = 0; 759 760 if (cpumask_test_cpu(cpu, cpu_online_mask)) { 761 err = padata_replace(pinst); 762 763 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && 764 padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) 765 __padata_start(pinst); 766 } 767 768 return err; 769 } 770 771 static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) 772 { 773 int err = 0; 774 775 if (!cpumask_test_cpu(cpu, cpu_online_mask)) { 776 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || 777 !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) 778 __padata_stop(pinst); 779 780 err = padata_replace(pinst); 781 } 782 783 return err; 784 } 785 786 static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu) 787 { 788 return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) || 789 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu); 790 } 791 792 static int padata_cpu_online(unsigned int cpu, struct hlist_node *node) 793 { 794 struct padata_instance *pinst; 795 int ret; 796 797 pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node); 798 if (!pinst_has_cpu(pinst, cpu)) 799 return 0; 800 801 mutex_lock(&pinst->lock); 802 ret = __padata_add_cpu(pinst, cpu); 803 mutex_unlock(&pinst->lock); 804 return ret; 805 } 806 807 static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node) 808 { 809 struct padata_instance *pinst; 810 int ret; 811 812 pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node); 813 if (!pinst_has_cpu(pinst, cpu)) 814 return 0; 815 816 mutex_lock(&pinst->lock); 817 ret = __padata_remove_cpu(pinst, cpu); 818 mutex_unlock(&pinst->lock); 819 return ret; 820 } 821 822 static enum cpuhp_state hp_online; 823 #endif 824 825 static void __padata_free(struct padata_instance *pinst) 826 { 827 #ifdef CONFIG_HOTPLUG_CPU 828 cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD, 829 &pinst->cpu_dead_node); 830 cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node); 831 #endif 832 833 WARN_ON(!list_empty(&pinst->pslist)); 834 835 free_cpumask_var(pinst->cpumask.pcpu); 836 free_cpumask_var(pinst->cpumask.cbcpu); 837 destroy_workqueue(pinst->serial_wq); 838 destroy_workqueue(pinst->parallel_wq); 839 kfree(pinst); 840 } 841 842 #define kobj2pinst(_kobj) \ 843 container_of(_kobj, struct padata_instance, kobj) 844 #define attr2pentry(_attr) \ 845 container_of(_attr, struct padata_sysfs_entry, attr) 846 847 static void padata_sysfs_release(struct kobject *kobj) 848 { 849 struct padata_instance *pinst = kobj2pinst(kobj); 850 __padata_free(pinst); 851 } 852 853 struct padata_sysfs_entry { 854 struct attribute attr; 855 ssize_t (*show)(struct padata_instance *, struct attribute *, char *); 856 ssize_t (*store)(struct padata_instance *, struct attribute *, 857 const char *, size_t); 858 }; 859 860 static ssize_t show_cpumask(struct padata_instance *pinst, 861 struct attribute *attr, char *buf) 862 { 863 struct cpumask *cpumask; 864 ssize_t len; 865 866 mutex_lock(&pinst->lock); 867 if (!strcmp(attr->name, "serial_cpumask")) 868 cpumask = pinst->cpumask.cbcpu; 869 else 870 cpumask = pinst->cpumask.pcpu; 871 872 len = snprintf(buf, PAGE_SIZE, "%*pb\n", 873 nr_cpu_ids, cpumask_bits(cpumask)); 874 mutex_unlock(&pinst->lock); 875 return len < PAGE_SIZE ? len : -EINVAL; 876 } 877 878 static ssize_t store_cpumask(struct padata_instance *pinst, 879 struct attribute *attr, 880 const char *buf, size_t count) 881 { 882 cpumask_var_t new_cpumask; 883 ssize_t ret; 884 int mask_type; 885 886 if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL)) 887 return -ENOMEM; 888 889 ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask), 890 nr_cpumask_bits); 891 if (ret < 0) 892 goto out; 893 894 mask_type = !strcmp(attr->name, "serial_cpumask") ? 895 PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL; 896 ret = padata_set_cpumask(pinst, mask_type, new_cpumask); 897 if (!ret) 898 ret = count; 899 900 out: 901 free_cpumask_var(new_cpumask); 902 return ret; 903 } 904 905 #define PADATA_ATTR_RW(_name, _show_name, _store_name) \ 906 static struct padata_sysfs_entry _name##_attr = \ 907 __ATTR(_name, 0644, _show_name, _store_name) 908 #define PADATA_ATTR_RO(_name, _show_name) \ 909 static struct padata_sysfs_entry _name##_attr = \ 910 __ATTR(_name, 0400, _show_name, NULL) 911 912 PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask); 913 PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask); 914 915 /* 916 * Padata sysfs provides the following objects: 917 * serial_cpumask [RW] - cpumask for serial workers 918 * parallel_cpumask [RW] - cpumask for parallel workers 919 */ 920 static struct attribute *padata_default_attrs[] = { 921 &serial_cpumask_attr.attr, 922 ¶llel_cpumask_attr.attr, 923 NULL, 924 }; 925 ATTRIBUTE_GROUPS(padata_default); 926 927 static ssize_t padata_sysfs_show(struct kobject *kobj, 928 struct attribute *attr, char *buf) 929 { 930 struct padata_instance *pinst; 931 struct padata_sysfs_entry *pentry; 932 ssize_t ret = -EIO; 933 934 pinst = kobj2pinst(kobj); 935 pentry = attr2pentry(attr); 936 if (pentry->show) 937 ret = pentry->show(pinst, attr, buf); 938 939 return ret; 940 } 941 942 static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr, 943 const char *buf, size_t count) 944 { 945 struct padata_instance *pinst; 946 struct padata_sysfs_entry *pentry; 947 ssize_t ret = -EIO; 948 949 pinst = kobj2pinst(kobj); 950 pentry = attr2pentry(attr); 951 if (pentry->show) 952 ret = pentry->store(pinst, attr, buf, count); 953 954 return ret; 955 } 956 957 static const struct sysfs_ops padata_sysfs_ops = { 958 .show = padata_sysfs_show, 959 .store = padata_sysfs_store, 960 }; 961 962 static struct kobj_type padata_attr_type = { 963 .sysfs_ops = &padata_sysfs_ops, 964 .default_groups = padata_default_groups, 965 .release = padata_sysfs_release, 966 }; 967 968 /** 969 * padata_alloc - allocate and initialize a padata instance 970 * @name: used to identify the instance 971 * 972 * Return: new instance on success, NULL on error 973 */ 974 struct padata_instance *padata_alloc(const char *name) 975 { 976 struct padata_instance *pinst; 977 978 pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); 979 if (!pinst) 980 goto err; 981 982 pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0, 983 name); 984 if (!pinst->parallel_wq) 985 goto err_free_inst; 986 987 cpus_read_lock(); 988 989 pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM | 990 WQ_CPU_INTENSIVE, 1, name); 991 if (!pinst->serial_wq) 992 goto err_put_cpus; 993 994 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) 995 goto err_free_serial_wq; 996 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { 997 free_cpumask_var(pinst->cpumask.pcpu); 998 goto err_free_serial_wq; 999 } 1000 1001 INIT_LIST_HEAD(&pinst->pslist); 1002 1003 cpumask_copy(pinst->cpumask.pcpu, cpu_possible_mask); 1004 cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask); 1005 1006 if (padata_setup_cpumasks(pinst)) 1007 goto err_free_masks; 1008 1009 __padata_start(pinst); 1010 1011 kobject_init(&pinst->kobj, &padata_attr_type); 1012 mutex_init(&pinst->lock); 1013 1014 #ifdef CONFIG_HOTPLUG_CPU 1015 cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, 1016 &pinst->cpu_online_node); 1017 cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD, 1018 &pinst->cpu_dead_node); 1019 #endif 1020 1021 cpus_read_unlock(); 1022 1023 return pinst; 1024 1025 err_free_masks: 1026 free_cpumask_var(pinst->cpumask.pcpu); 1027 free_cpumask_var(pinst->cpumask.cbcpu); 1028 err_free_serial_wq: 1029 destroy_workqueue(pinst->serial_wq); 1030 err_put_cpus: 1031 cpus_read_unlock(); 1032 destroy_workqueue(pinst->parallel_wq); 1033 err_free_inst: 1034 kfree(pinst); 1035 err: 1036 return NULL; 1037 } 1038 EXPORT_SYMBOL(padata_alloc); 1039 1040 /** 1041 * padata_free - free a padata instance 1042 * 1043 * @pinst: padata instance to free 1044 */ 1045 void padata_free(struct padata_instance *pinst) 1046 { 1047 kobject_put(&pinst->kobj); 1048 } 1049 EXPORT_SYMBOL(padata_free); 1050 1051 /** 1052 * padata_alloc_shell - Allocate and initialize padata shell. 1053 * 1054 * @pinst: Parent padata_instance object. 1055 * 1056 * Return: new shell on success, NULL on error 1057 */ 1058 struct padata_shell *padata_alloc_shell(struct padata_instance *pinst) 1059 { 1060 struct parallel_data *pd; 1061 struct padata_shell *ps; 1062 1063 ps = kzalloc(sizeof(*ps), GFP_KERNEL); 1064 if (!ps) 1065 goto out; 1066 1067 ps->pinst = pinst; 1068 1069 cpus_read_lock(); 1070 pd = padata_alloc_pd(ps); 1071 cpus_read_unlock(); 1072 1073 if (!pd) 1074 goto out_free_ps; 1075 1076 mutex_lock(&pinst->lock); 1077 RCU_INIT_POINTER(ps->pd, pd); 1078 list_add(&ps->list, &pinst->pslist); 1079 mutex_unlock(&pinst->lock); 1080 1081 return ps; 1082 1083 out_free_ps: 1084 kfree(ps); 1085 out: 1086 return NULL; 1087 } 1088 EXPORT_SYMBOL(padata_alloc_shell); 1089 1090 /** 1091 * padata_free_shell - free a padata shell 1092 * 1093 * @ps: padata shell to free 1094 */ 1095 void padata_free_shell(struct padata_shell *ps) 1096 { 1097 if (!ps) 1098 return; 1099 1100 mutex_lock(&ps->pinst->lock); 1101 list_del(&ps->list); 1102 padata_free_pd(rcu_dereference_protected(ps->pd, 1)); 1103 mutex_unlock(&ps->pinst->lock); 1104 1105 kfree(ps); 1106 } 1107 EXPORT_SYMBOL(padata_free_shell); 1108 1109 void __init padata_init(void) 1110 { 1111 unsigned int i, possible_cpus; 1112 #ifdef CONFIG_HOTPLUG_CPU 1113 int ret; 1114 1115 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online", 1116 padata_cpu_online, NULL); 1117 if (ret < 0) 1118 goto err; 1119 hp_online = ret; 1120 1121 ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead", 1122 NULL, padata_cpu_dead); 1123 if (ret < 0) 1124 goto remove_online_state; 1125 #endif 1126 1127 possible_cpus = num_possible_cpus(); 1128 padata_works = kmalloc_array(possible_cpus, sizeof(struct padata_work), 1129 GFP_KERNEL); 1130 if (!padata_works) 1131 goto remove_dead_state; 1132 1133 for (i = 0; i < possible_cpus; ++i) 1134 list_add(&padata_works[i].pw_list, &padata_free_works); 1135 1136 return; 1137 1138 remove_dead_state: 1139 #ifdef CONFIG_HOTPLUG_CPU 1140 cpuhp_remove_multi_state(CPUHP_PADATA_DEAD); 1141 remove_online_state: 1142 cpuhp_remove_multi_state(hp_online); 1143 err: 1144 #endif 1145 pr_warn("padata: initialization failed\n"); 1146 } 1147