1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * padata.c - generic interface to process data streams in parallel
4 *
5 * See Documentation/core-api/padata.rst for more information.
6 *
7 * Copyright (C) 2008, 2009 secunet Security Networks AG
8 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
9 *
10 * Copyright (c) 2020 Oracle and/or its affiliates.
11 * Author: Daniel Jordan <daniel.m.jordan@oracle.com>
12 */
13
14 #include <linux/completion.h>
15 #include <linux/export.h>
16 #include <linux/cpumask.h>
17 #include <linux/err.h>
18 #include <linux/cpu.h>
19 #include <linux/padata.h>
20 #include <linux/mutex.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/sysfs.h>
24 #include <linux/rcupdate.h>
25
26 #define PADATA_WORK_ONSTACK 1 /* Work's memory is on stack */
27
28 struct padata_work {
29 struct work_struct pw_work;
30 struct list_head pw_list; /* padata_free_works linkage */
31 void *pw_data;
32 };
33
34 static DEFINE_SPINLOCK(padata_works_lock);
35 static struct padata_work *padata_works;
36 static LIST_HEAD(padata_free_works);
37
38 struct padata_mt_job_state {
39 spinlock_t lock;
40 struct completion completion;
41 struct padata_mt_job *job;
42 int nworks;
43 int nworks_fini;
44 unsigned long chunk_size;
45 };
46
47 static void padata_free_pd(struct parallel_data *pd);
48 static void __init padata_mt_helper(struct work_struct *work);
49
padata_index_to_cpu(struct parallel_data * pd,int cpu_index)50 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
51 {
52 int cpu, target_cpu;
53
54 target_cpu = cpumask_first(pd->cpumask.pcpu);
55 for (cpu = 0; cpu < cpu_index; cpu++)
56 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
57
58 return target_cpu;
59 }
60
padata_cpu_hash(struct parallel_data * pd,unsigned int seq_nr)61 static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr)
62 {
63 /*
64 * Hash the sequence numbers to the cpus by taking
65 * seq_nr mod. number of cpus in use.
66 */
67 int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
68
69 return padata_index_to_cpu(pd, cpu_index);
70 }
71
padata_work_alloc(void)72 static struct padata_work *padata_work_alloc(void)
73 {
74 struct padata_work *pw;
75
76 lockdep_assert_held(&padata_works_lock);
77
78 if (list_empty(&padata_free_works))
79 return NULL; /* No more work items allowed to be queued. */
80
81 pw = list_first_entry(&padata_free_works, struct padata_work, pw_list);
82 list_del(&pw->pw_list);
83 return pw;
84 }
85
86 /*
87 * This function is marked __ref because this function may be optimized in such
88 * a way that it directly refers to work_fn's address, which causes modpost to
89 * complain when work_fn is marked __init. This scenario was observed with clang
90 * LTO, where padata_work_init() was optimized to refer directly to
91 * padata_mt_helper() because the calls to padata_work_init() with other work_fn
92 * values were eliminated or inlined.
93 */
padata_work_init(struct padata_work * pw,work_func_t work_fn,void * data,int flags)94 static void __ref padata_work_init(struct padata_work *pw, work_func_t work_fn,
95 void *data, int flags)
96 {
97 if (flags & PADATA_WORK_ONSTACK)
98 INIT_WORK_ONSTACK(&pw->pw_work, work_fn);
99 else
100 INIT_WORK(&pw->pw_work, work_fn);
101 pw->pw_data = data;
102 }
103
padata_work_alloc_mt(int nworks,void * data,struct list_head * head)104 static int __init padata_work_alloc_mt(int nworks, void *data,
105 struct list_head *head)
106 {
107 int i;
108
109 spin_lock_bh(&padata_works_lock);
110 /* Start at 1 because the current task participates in the job. */
111 for (i = 1; i < nworks; ++i) {
112 struct padata_work *pw = padata_work_alloc();
113
114 if (!pw)
115 break;
116 padata_work_init(pw, padata_mt_helper, data, 0);
117 list_add(&pw->pw_list, head);
118 }
119 spin_unlock_bh(&padata_works_lock);
120
121 return i;
122 }
123
padata_work_free(struct padata_work * pw)124 static void padata_work_free(struct padata_work *pw)
125 {
126 lockdep_assert_held(&padata_works_lock);
127 list_add(&pw->pw_list, &padata_free_works);
128 }
129
padata_works_free(struct list_head * works)130 static void __init padata_works_free(struct list_head *works)
131 {
132 struct padata_work *cur, *next;
133
134 if (list_empty(works))
135 return;
136
137 spin_lock_bh(&padata_works_lock);
138 list_for_each_entry_safe(cur, next, works, pw_list) {
139 list_del(&cur->pw_list);
140 padata_work_free(cur);
141 }
142 spin_unlock_bh(&padata_works_lock);
143 }
144
padata_parallel_worker(struct work_struct * parallel_work)145 static void padata_parallel_worker(struct work_struct *parallel_work)
146 {
147 struct padata_work *pw = container_of(parallel_work, struct padata_work,
148 pw_work);
149 struct padata_priv *padata = pw->pw_data;
150
151 local_bh_disable();
152 padata->parallel(padata);
153 spin_lock(&padata_works_lock);
154 padata_work_free(pw);
155 spin_unlock(&padata_works_lock);
156 local_bh_enable();
157 }
158
159 /**
160 * padata_do_parallel - padata parallelization function
161 *
162 * @ps: padatashell
163 * @padata: object to be parallelized
164 * @cb_cpu: pointer to the CPU that the serialization callback function should
165 * run on. If it's not in the serial cpumask of @pinst
166 * (i.e. cpumask.cbcpu), this function selects a fallback CPU and if
167 * none found, returns -EINVAL.
168 *
169 * The parallelization callback function will run with BHs off.
170 * Note: Every object which is parallelized by padata_do_parallel
171 * must be seen by padata_do_serial.
172 *
173 * Return: 0 on success or else negative error code.
174 */
padata_do_parallel(struct padata_shell * ps,struct padata_priv * padata,int * cb_cpu)175 int padata_do_parallel(struct padata_shell *ps,
176 struct padata_priv *padata, int *cb_cpu)
177 {
178 struct padata_instance *pinst = ps->pinst;
179 int i, cpu, cpu_index, err;
180 struct parallel_data *pd;
181 struct padata_work *pw;
182
183 rcu_read_lock_bh();
184
185 pd = rcu_dereference_bh(ps->pd);
186
187 err = -EINVAL;
188 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
189 goto out;
190
191 if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) {
192 if (cpumask_empty(pd->cpumask.cbcpu))
193 goto out;
194
195 /* Select an alternate fallback CPU and notify the caller. */
196 cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu);
197
198 cpu = cpumask_first(pd->cpumask.cbcpu);
199 for (i = 0; i < cpu_index; i++)
200 cpu = cpumask_next(cpu, pd->cpumask.cbcpu);
201
202 *cb_cpu = cpu;
203 }
204
205 err = -EBUSY;
206 if ((pinst->flags & PADATA_RESET))
207 goto out;
208
209 refcount_inc(&pd->refcnt);
210 padata->pd = pd;
211 padata->cb_cpu = *cb_cpu;
212
213 spin_lock(&padata_works_lock);
214 padata->seq_nr = ++pd->seq_nr;
215 pw = padata_work_alloc();
216 spin_unlock(&padata_works_lock);
217
218 if (!pw) {
219 /* Maximum works limit exceeded, run in the current task. */
220 padata->parallel(padata);
221 }
222
223 rcu_read_unlock_bh();
224
225 if (pw) {
226 padata_work_init(pw, padata_parallel_worker, padata, 0);
227 queue_work(pinst->parallel_wq, &pw->pw_work);
228 }
229
230 return 0;
231 out:
232 rcu_read_unlock_bh();
233
234 return err;
235 }
236 EXPORT_SYMBOL(padata_do_parallel);
237
238 /*
239 * padata_find_next - Find the next object that needs serialization.
240 *
241 * Return:
242 * * A pointer to the control struct of the next object that needs
243 * serialization, if present in one of the percpu reorder queues.
244 * * NULL, if the next object that needs serialization will
245 * be parallel processed by another cpu and is not yet present in
246 * the cpu's reorder queue.
247 */
padata_find_next(struct parallel_data * pd,bool remove_object)248 static struct padata_priv *padata_find_next(struct parallel_data *pd,
249 bool remove_object)
250 {
251 struct padata_priv *padata;
252 struct padata_list *reorder;
253 int cpu = pd->cpu;
254
255 reorder = per_cpu_ptr(pd->reorder_list, cpu);
256
257 spin_lock(&reorder->lock);
258 if (list_empty(&reorder->list)) {
259 spin_unlock(&reorder->lock);
260 return NULL;
261 }
262
263 padata = list_entry(reorder->list.next, struct padata_priv, list);
264
265 /*
266 * Checks the rare case where two or more parallel jobs have hashed to
267 * the same CPU and one of the later ones finishes first.
268 */
269 if (padata->seq_nr != pd->processed) {
270 spin_unlock(&reorder->lock);
271 return NULL;
272 }
273
274 if (remove_object) {
275 list_del_init(&padata->list);
276 ++pd->processed;
277 pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
278 }
279
280 spin_unlock(&reorder->lock);
281 return padata;
282 }
283
padata_reorder(struct parallel_data * pd)284 static void padata_reorder(struct parallel_data *pd)
285 {
286 struct padata_instance *pinst = pd->ps->pinst;
287 int cb_cpu;
288 struct padata_priv *padata;
289 struct padata_serial_queue *squeue;
290 struct padata_list *reorder;
291
292 /*
293 * We need to ensure that only one cpu can work on dequeueing of
294 * the reorder queue the time. Calculating in which percpu reorder
295 * queue the next object will arrive takes some time. A spinlock
296 * would be highly contended. Also it is not clear in which order
297 * the objects arrive to the reorder queues. So a cpu could wait to
298 * get the lock just to notice that there is nothing to do at the
299 * moment. Therefore we use a trylock and let the holder of the lock
300 * care for all the objects enqueued during the holdtime of the lock.
301 */
302 if (!spin_trylock_bh(&pd->lock))
303 return;
304
305 while (1) {
306 padata = padata_find_next(pd, true);
307
308 /*
309 * If the next object that needs serialization is parallel
310 * processed by another cpu and is still on it's way to the
311 * cpu's reorder queue, nothing to do for now.
312 */
313 if (!padata)
314 break;
315
316 cb_cpu = padata->cb_cpu;
317 squeue = per_cpu_ptr(pd->squeue, cb_cpu);
318
319 spin_lock(&squeue->serial.lock);
320 list_add_tail(&padata->list, &squeue->serial.list);
321 spin_unlock(&squeue->serial.lock);
322
323 queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
324 }
325
326 spin_unlock_bh(&pd->lock);
327
328 /*
329 * The next object that needs serialization might have arrived to
330 * the reorder queues in the meantime.
331 *
332 * Ensure reorder queue is read after pd->lock is dropped so we see
333 * new objects from another task in padata_do_serial. Pairs with
334 * smp_mb in padata_do_serial.
335 */
336 smp_mb();
337
338 reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
339 if (!list_empty(&reorder->list) && padata_find_next(pd, false))
340 queue_work(pinst->serial_wq, &pd->reorder_work);
341 }
342
invoke_padata_reorder(struct work_struct * work)343 static void invoke_padata_reorder(struct work_struct *work)
344 {
345 struct parallel_data *pd;
346
347 local_bh_disable();
348 pd = container_of(work, struct parallel_data, reorder_work);
349 padata_reorder(pd);
350 local_bh_enable();
351 }
352
padata_serial_worker(struct work_struct * serial_work)353 static void padata_serial_worker(struct work_struct *serial_work)
354 {
355 struct padata_serial_queue *squeue;
356 struct parallel_data *pd;
357 LIST_HEAD(local_list);
358 int cnt;
359
360 local_bh_disable();
361 squeue = container_of(serial_work, struct padata_serial_queue, work);
362 pd = squeue->pd;
363
364 spin_lock(&squeue->serial.lock);
365 list_replace_init(&squeue->serial.list, &local_list);
366 spin_unlock(&squeue->serial.lock);
367
368 cnt = 0;
369
370 while (!list_empty(&local_list)) {
371 struct padata_priv *padata;
372
373 padata = list_entry(local_list.next,
374 struct padata_priv, list);
375
376 list_del_init(&padata->list);
377
378 padata->serial(padata);
379 cnt++;
380 }
381 local_bh_enable();
382
383 if (refcount_sub_and_test(cnt, &pd->refcnt))
384 padata_free_pd(pd);
385 }
386
387 /**
388 * padata_do_serial - padata serialization function
389 *
390 * @padata: object to be serialized.
391 *
392 * padata_do_serial must be called for every parallelized object.
393 * The serialization callback function will run with BHs off.
394 */
padata_do_serial(struct padata_priv * padata)395 void padata_do_serial(struct padata_priv *padata)
396 {
397 struct parallel_data *pd = padata->pd;
398 int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr);
399 struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu);
400 struct padata_priv *cur;
401 struct list_head *pos;
402
403 spin_lock(&reorder->lock);
404 /* Sort in ascending order of sequence number. */
405 list_for_each_prev(pos, &reorder->list) {
406 cur = list_entry(pos, struct padata_priv, list);
407 if (cur->seq_nr < padata->seq_nr)
408 break;
409 }
410 list_add(&padata->list, pos);
411 spin_unlock(&reorder->lock);
412
413 /*
414 * Ensure the addition to the reorder list is ordered correctly
415 * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
416 * in padata_reorder.
417 */
418 smp_mb();
419
420 padata_reorder(pd);
421 }
422 EXPORT_SYMBOL(padata_do_serial);
423
padata_setup_cpumasks(struct padata_instance * pinst)424 static int padata_setup_cpumasks(struct padata_instance *pinst)
425 {
426 struct workqueue_attrs *attrs;
427 int err;
428
429 attrs = alloc_workqueue_attrs();
430 if (!attrs)
431 return -ENOMEM;
432
433 /* Restrict parallel_wq workers to pd->cpumask.pcpu. */
434 cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu);
435 err = apply_workqueue_attrs(pinst->parallel_wq, attrs);
436 free_workqueue_attrs(attrs);
437
438 return err;
439 }
440
padata_mt_helper(struct work_struct * w)441 static void __init padata_mt_helper(struct work_struct *w)
442 {
443 struct padata_work *pw = container_of(w, struct padata_work, pw_work);
444 struct padata_mt_job_state *ps = pw->pw_data;
445 struct padata_mt_job *job = ps->job;
446 bool done;
447
448 spin_lock(&ps->lock);
449
450 while (job->size > 0) {
451 unsigned long start, size, end;
452
453 start = job->start;
454 /* So end is chunk size aligned if enough work remains. */
455 size = roundup(start + 1, ps->chunk_size) - start;
456 size = min(size, job->size);
457 end = start + size;
458
459 job->start = end;
460 job->size -= size;
461
462 spin_unlock(&ps->lock);
463 job->thread_fn(start, end, job->fn_arg);
464 spin_lock(&ps->lock);
465 }
466
467 ++ps->nworks_fini;
468 done = (ps->nworks_fini == ps->nworks);
469 spin_unlock(&ps->lock);
470
471 if (done)
472 complete(&ps->completion);
473 }
474
475 /**
476 * padata_do_multithreaded - run a multithreaded job
477 * @job: Description of the job.
478 *
479 * See the definition of struct padata_mt_job for more details.
480 */
padata_do_multithreaded(struct padata_mt_job * job)481 void __init padata_do_multithreaded(struct padata_mt_job *job)
482 {
483 /* In case threads finish at different times. */
484 static const unsigned long load_balance_factor = 4;
485 struct padata_work my_work, *pw;
486 struct padata_mt_job_state ps;
487 LIST_HEAD(works);
488 int nworks;
489
490 if (job->size == 0)
491 return;
492
493 /* Ensure at least one thread when size < min_chunk. */
494 nworks = max(job->size / max(job->min_chunk, job->align), 1ul);
495 nworks = min(nworks, job->max_threads);
496
497 if (nworks == 1) {
498 /* Single thread, no coordination needed, cut to the chase. */
499 job->thread_fn(job->start, job->start + job->size, job->fn_arg);
500 return;
501 }
502
503 spin_lock_init(&ps.lock);
504 init_completion(&ps.completion);
505 ps.job = job;
506 ps.nworks = padata_work_alloc_mt(nworks, &ps, &works);
507 ps.nworks_fini = 0;
508
509 /*
510 * Chunk size is the amount of work a helper does per call to the
511 * thread function. Load balance large jobs between threads by
512 * increasing the number of chunks, guarantee at least the minimum
513 * chunk size from the caller, and honor the caller's alignment.
514 */
515 ps.chunk_size = job->size / (ps.nworks * load_balance_factor);
516 ps.chunk_size = max(ps.chunk_size, job->min_chunk);
517 ps.chunk_size = roundup(ps.chunk_size, job->align);
518
519 /*
520 * chunk_size can be 0 if the caller sets min_chunk to 0. So force it
521 * to at least 1 to prevent divide-by-0 panic in padata_mt_helper().`
522 */
523 if (!ps.chunk_size)
524 ps.chunk_size = 1U;
525
526 list_for_each_entry(pw, &works, pw_list)
527 queue_work(system_unbound_wq, &pw->pw_work);
528
529 /* Use the current thread, which saves starting a workqueue worker. */
530 padata_work_init(&my_work, padata_mt_helper, &ps, PADATA_WORK_ONSTACK);
531 padata_mt_helper(&my_work.pw_work);
532
533 /* Wait for all the helpers to finish. */
534 wait_for_completion(&ps.completion);
535
536 destroy_work_on_stack(&my_work.pw_work);
537 padata_works_free(&works);
538 }
539
__padata_list_init(struct padata_list * pd_list)540 static void __padata_list_init(struct padata_list *pd_list)
541 {
542 INIT_LIST_HEAD(&pd_list->list);
543 spin_lock_init(&pd_list->lock);
544 }
545
546 /* Initialize all percpu queues used by serial workers */
padata_init_squeues(struct parallel_data * pd)547 static void padata_init_squeues(struct parallel_data *pd)
548 {
549 int cpu;
550 struct padata_serial_queue *squeue;
551
552 for_each_cpu(cpu, pd->cpumask.cbcpu) {
553 squeue = per_cpu_ptr(pd->squeue, cpu);
554 squeue->pd = pd;
555 __padata_list_init(&squeue->serial);
556 INIT_WORK(&squeue->work, padata_serial_worker);
557 }
558 }
559
560 /* Initialize per-CPU reorder lists */
padata_init_reorder_list(struct parallel_data * pd)561 static void padata_init_reorder_list(struct parallel_data *pd)
562 {
563 int cpu;
564 struct padata_list *list;
565
566 for_each_cpu(cpu, pd->cpumask.pcpu) {
567 list = per_cpu_ptr(pd->reorder_list, cpu);
568 __padata_list_init(list);
569 }
570 }
571
572 /* Allocate and initialize the internal cpumask dependend resources. */
padata_alloc_pd(struct padata_shell * ps)573 static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
574 {
575 struct padata_instance *pinst = ps->pinst;
576 struct parallel_data *pd;
577
578 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
579 if (!pd)
580 goto err;
581
582 pd->reorder_list = alloc_percpu(struct padata_list);
583 if (!pd->reorder_list)
584 goto err_free_pd;
585
586 pd->squeue = alloc_percpu(struct padata_serial_queue);
587 if (!pd->squeue)
588 goto err_free_reorder_list;
589
590 pd->ps = ps;
591
592 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
593 goto err_free_squeue;
594 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
595 goto err_free_pcpu;
596
597 cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask);
598 cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask);
599
600 padata_init_reorder_list(pd);
601 padata_init_squeues(pd);
602 pd->seq_nr = -1;
603 refcount_set(&pd->refcnt, 1);
604 spin_lock_init(&pd->lock);
605 pd->cpu = cpumask_first(pd->cpumask.pcpu);
606 INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
607
608 return pd;
609
610 err_free_pcpu:
611 free_cpumask_var(pd->cpumask.pcpu);
612 err_free_squeue:
613 free_percpu(pd->squeue);
614 err_free_reorder_list:
615 free_percpu(pd->reorder_list);
616 err_free_pd:
617 kfree(pd);
618 err:
619 return NULL;
620 }
621
padata_free_pd(struct parallel_data * pd)622 static void padata_free_pd(struct parallel_data *pd)
623 {
624 free_cpumask_var(pd->cpumask.pcpu);
625 free_cpumask_var(pd->cpumask.cbcpu);
626 free_percpu(pd->reorder_list);
627 free_percpu(pd->squeue);
628 kfree(pd);
629 }
630
__padata_start(struct padata_instance * pinst)631 static void __padata_start(struct padata_instance *pinst)
632 {
633 pinst->flags |= PADATA_INIT;
634 }
635
__padata_stop(struct padata_instance * pinst)636 static void __padata_stop(struct padata_instance *pinst)
637 {
638 if (!(pinst->flags & PADATA_INIT))
639 return;
640
641 pinst->flags &= ~PADATA_INIT;
642
643 synchronize_rcu();
644 }
645
646 /* Replace the internal control structure with a new one. */
padata_replace_one(struct padata_shell * ps)647 static int padata_replace_one(struct padata_shell *ps)
648 {
649 struct parallel_data *pd_new;
650
651 pd_new = padata_alloc_pd(ps);
652 if (!pd_new)
653 return -ENOMEM;
654
655 ps->opd = rcu_dereference_protected(ps->pd, 1);
656 rcu_assign_pointer(ps->pd, pd_new);
657
658 return 0;
659 }
660
padata_replace(struct padata_instance * pinst)661 static int padata_replace(struct padata_instance *pinst)
662 {
663 struct padata_shell *ps;
664 int err = 0;
665
666 pinst->flags |= PADATA_RESET;
667
668 list_for_each_entry(ps, &pinst->pslist, list) {
669 err = padata_replace_one(ps);
670 if (err)
671 break;
672 }
673
674 synchronize_rcu();
675
676 list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
677 if (refcount_dec_and_test(&ps->opd->refcnt))
678 padata_free_pd(ps->opd);
679
680 pinst->flags &= ~PADATA_RESET;
681
682 return err;
683 }
684
685 /* If cpumask contains no active cpu, we mark the instance as invalid. */
padata_validate_cpumask(struct padata_instance * pinst,const struct cpumask * cpumask)686 static bool padata_validate_cpumask(struct padata_instance *pinst,
687 const struct cpumask *cpumask)
688 {
689 if (!cpumask_intersects(cpumask, cpu_online_mask)) {
690 pinst->flags |= PADATA_INVALID;
691 return false;
692 }
693
694 pinst->flags &= ~PADATA_INVALID;
695 return true;
696 }
697
__padata_set_cpumasks(struct padata_instance * pinst,cpumask_var_t pcpumask,cpumask_var_t cbcpumask)698 static int __padata_set_cpumasks(struct padata_instance *pinst,
699 cpumask_var_t pcpumask,
700 cpumask_var_t cbcpumask)
701 {
702 int valid;
703 int err;
704
705 valid = padata_validate_cpumask(pinst, pcpumask);
706 if (!valid) {
707 __padata_stop(pinst);
708 goto out_replace;
709 }
710
711 valid = padata_validate_cpumask(pinst, cbcpumask);
712 if (!valid)
713 __padata_stop(pinst);
714
715 out_replace:
716 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
717 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
718
719 err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst);
720
721 if (valid)
722 __padata_start(pinst);
723
724 return err;
725 }
726
727 /**
728 * padata_set_cpumask - Sets specified by @cpumask_type cpumask to the value
729 * equivalent to @cpumask.
730 * @pinst: padata instance
731 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
732 * to parallel and serial cpumasks respectively.
733 * @cpumask: the cpumask to use
734 *
735 * Return: 0 on success or negative error code
736 */
padata_set_cpumask(struct padata_instance * pinst,int cpumask_type,cpumask_var_t cpumask)737 int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
738 cpumask_var_t cpumask)
739 {
740 struct cpumask *serial_mask, *parallel_mask;
741 int err = -EINVAL;
742
743 cpus_read_lock();
744 mutex_lock(&pinst->lock);
745
746 switch (cpumask_type) {
747 case PADATA_CPU_PARALLEL:
748 serial_mask = pinst->cpumask.cbcpu;
749 parallel_mask = cpumask;
750 break;
751 case PADATA_CPU_SERIAL:
752 parallel_mask = pinst->cpumask.pcpu;
753 serial_mask = cpumask;
754 break;
755 default:
756 goto out;
757 }
758
759 err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
760
761 out:
762 mutex_unlock(&pinst->lock);
763 cpus_read_unlock();
764
765 return err;
766 }
767 EXPORT_SYMBOL(padata_set_cpumask);
768
769 #ifdef CONFIG_HOTPLUG_CPU
770
__padata_add_cpu(struct padata_instance * pinst,int cpu)771 static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
772 {
773 int err = 0;
774
775 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
776 err = padata_replace(pinst);
777
778 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
779 padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
780 __padata_start(pinst);
781 }
782
783 return err;
784 }
785
__padata_remove_cpu(struct padata_instance * pinst,int cpu)786 static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
787 {
788 int err = 0;
789
790 if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
791 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
792 !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
793 __padata_stop(pinst);
794
795 err = padata_replace(pinst);
796 }
797
798 return err;
799 }
800
pinst_has_cpu(struct padata_instance * pinst,int cpu)801 static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
802 {
803 return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
804 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
805 }
806
padata_cpu_online(unsigned int cpu,struct hlist_node * node)807 static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
808 {
809 struct padata_instance *pinst;
810 int ret;
811
812 pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node);
813 if (!pinst_has_cpu(pinst, cpu))
814 return 0;
815
816 mutex_lock(&pinst->lock);
817 ret = __padata_add_cpu(pinst, cpu);
818 mutex_unlock(&pinst->lock);
819 return ret;
820 }
821
padata_cpu_dead(unsigned int cpu,struct hlist_node * node)822 static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
823 {
824 struct padata_instance *pinst;
825 int ret;
826
827 pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node);
828 if (!pinst_has_cpu(pinst, cpu))
829 return 0;
830
831 mutex_lock(&pinst->lock);
832 ret = __padata_remove_cpu(pinst, cpu);
833 mutex_unlock(&pinst->lock);
834 return ret;
835 }
836
837 static enum cpuhp_state hp_online;
838 #endif
839
__padata_free(struct padata_instance * pinst)840 static void __padata_free(struct padata_instance *pinst)
841 {
842 #ifdef CONFIG_HOTPLUG_CPU
843 cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD,
844 &pinst->cpu_dead_node);
845 cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node);
846 #endif
847
848 WARN_ON(!list_empty(&pinst->pslist));
849
850 free_cpumask_var(pinst->cpumask.pcpu);
851 free_cpumask_var(pinst->cpumask.cbcpu);
852 destroy_workqueue(pinst->serial_wq);
853 destroy_workqueue(pinst->parallel_wq);
854 kfree(pinst);
855 }
856
857 #define kobj2pinst(_kobj) \
858 container_of(_kobj, struct padata_instance, kobj)
859 #define attr2pentry(_attr) \
860 container_of(_attr, struct padata_sysfs_entry, attr)
861
padata_sysfs_release(struct kobject * kobj)862 static void padata_sysfs_release(struct kobject *kobj)
863 {
864 struct padata_instance *pinst = kobj2pinst(kobj);
865 __padata_free(pinst);
866 }
867
868 struct padata_sysfs_entry {
869 struct attribute attr;
870 ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
871 ssize_t (*store)(struct padata_instance *, struct attribute *,
872 const char *, size_t);
873 };
874
show_cpumask(struct padata_instance * pinst,struct attribute * attr,char * buf)875 static ssize_t show_cpumask(struct padata_instance *pinst,
876 struct attribute *attr, char *buf)
877 {
878 struct cpumask *cpumask;
879 ssize_t len;
880
881 mutex_lock(&pinst->lock);
882 if (!strcmp(attr->name, "serial_cpumask"))
883 cpumask = pinst->cpumask.cbcpu;
884 else
885 cpumask = pinst->cpumask.pcpu;
886
887 len = snprintf(buf, PAGE_SIZE, "%*pb\n",
888 nr_cpu_ids, cpumask_bits(cpumask));
889 mutex_unlock(&pinst->lock);
890 return len < PAGE_SIZE ? len : -EINVAL;
891 }
892
store_cpumask(struct padata_instance * pinst,struct attribute * attr,const char * buf,size_t count)893 static ssize_t store_cpumask(struct padata_instance *pinst,
894 struct attribute *attr,
895 const char *buf, size_t count)
896 {
897 cpumask_var_t new_cpumask;
898 ssize_t ret;
899 int mask_type;
900
901 if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
902 return -ENOMEM;
903
904 ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
905 nr_cpumask_bits);
906 if (ret < 0)
907 goto out;
908
909 mask_type = !strcmp(attr->name, "serial_cpumask") ?
910 PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
911 ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
912 if (!ret)
913 ret = count;
914
915 out:
916 free_cpumask_var(new_cpumask);
917 return ret;
918 }
919
920 #define PADATA_ATTR_RW(_name, _show_name, _store_name) \
921 static struct padata_sysfs_entry _name##_attr = \
922 __ATTR(_name, 0644, _show_name, _store_name)
923 #define PADATA_ATTR_RO(_name, _show_name) \
924 static struct padata_sysfs_entry _name##_attr = \
925 __ATTR(_name, 0400, _show_name, NULL)
926
927 PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
928 PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
929
930 /*
931 * Padata sysfs provides the following objects:
932 * serial_cpumask [RW] - cpumask for serial workers
933 * parallel_cpumask [RW] - cpumask for parallel workers
934 */
935 static struct attribute *padata_default_attrs[] = {
936 &serial_cpumask_attr.attr,
937 ¶llel_cpumask_attr.attr,
938 NULL,
939 };
940 ATTRIBUTE_GROUPS(padata_default);
941
padata_sysfs_show(struct kobject * kobj,struct attribute * attr,char * buf)942 static ssize_t padata_sysfs_show(struct kobject *kobj,
943 struct attribute *attr, char *buf)
944 {
945 struct padata_instance *pinst;
946 struct padata_sysfs_entry *pentry;
947 ssize_t ret = -EIO;
948
949 pinst = kobj2pinst(kobj);
950 pentry = attr2pentry(attr);
951 if (pentry->show)
952 ret = pentry->show(pinst, attr, buf);
953
954 return ret;
955 }
956
padata_sysfs_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)957 static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
958 const char *buf, size_t count)
959 {
960 struct padata_instance *pinst;
961 struct padata_sysfs_entry *pentry;
962 ssize_t ret = -EIO;
963
964 pinst = kobj2pinst(kobj);
965 pentry = attr2pentry(attr);
966 if (pentry->show)
967 ret = pentry->store(pinst, attr, buf, count);
968
969 return ret;
970 }
971
972 static const struct sysfs_ops padata_sysfs_ops = {
973 .show = padata_sysfs_show,
974 .store = padata_sysfs_store,
975 };
976
977 static const struct kobj_type padata_attr_type = {
978 .sysfs_ops = &padata_sysfs_ops,
979 .default_groups = padata_default_groups,
980 .release = padata_sysfs_release,
981 };
982
983 /**
984 * padata_alloc - allocate and initialize a padata instance
985 * @name: used to identify the instance
986 *
987 * Return: new instance on success, NULL on error
988 */
padata_alloc(const char * name)989 struct padata_instance *padata_alloc(const char *name)
990 {
991 struct padata_instance *pinst;
992
993 pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
994 if (!pinst)
995 goto err;
996
997 pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0,
998 name);
999 if (!pinst->parallel_wq)
1000 goto err_free_inst;
1001
1002 cpus_read_lock();
1003
1004 pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM |
1005 WQ_CPU_INTENSIVE, 1, name);
1006 if (!pinst->serial_wq)
1007 goto err_put_cpus;
1008
1009 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
1010 goto err_free_serial_wq;
1011 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
1012 free_cpumask_var(pinst->cpumask.pcpu);
1013 goto err_free_serial_wq;
1014 }
1015
1016 INIT_LIST_HEAD(&pinst->pslist);
1017
1018 cpumask_copy(pinst->cpumask.pcpu, cpu_possible_mask);
1019 cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask);
1020
1021 if (padata_setup_cpumasks(pinst))
1022 goto err_free_masks;
1023
1024 __padata_start(pinst);
1025
1026 kobject_init(&pinst->kobj, &padata_attr_type);
1027 mutex_init(&pinst->lock);
1028
1029 #ifdef CONFIG_HOTPLUG_CPU
1030 cpuhp_state_add_instance_nocalls_cpuslocked(hp_online,
1031 &pinst->cpu_online_node);
1032 cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
1033 &pinst->cpu_dead_node);
1034 #endif
1035
1036 cpus_read_unlock();
1037
1038 return pinst;
1039
1040 err_free_masks:
1041 free_cpumask_var(pinst->cpumask.pcpu);
1042 free_cpumask_var(pinst->cpumask.cbcpu);
1043 err_free_serial_wq:
1044 destroy_workqueue(pinst->serial_wq);
1045 err_put_cpus:
1046 cpus_read_unlock();
1047 destroy_workqueue(pinst->parallel_wq);
1048 err_free_inst:
1049 kfree(pinst);
1050 err:
1051 return NULL;
1052 }
1053 EXPORT_SYMBOL(padata_alloc);
1054
1055 /**
1056 * padata_free - free a padata instance
1057 *
1058 * @pinst: padata instance to free
1059 */
padata_free(struct padata_instance * pinst)1060 void padata_free(struct padata_instance *pinst)
1061 {
1062 kobject_put(&pinst->kobj);
1063 }
1064 EXPORT_SYMBOL(padata_free);
1065
1066 /**
1067 * padata_alloc_shell - Allocate and initialize padata shell.
1068 *
1069 * @pinst: Parent padata_instance object.
1070 *
1071 * Return: new shell on success, NULL on error
1072 */
padata_alloc_shell(struct padata_instance * pinst)1073 struct padata_shell *padata_alloc_shell(struct padata_instance *pinst)
1074 {
1075 struct parallel_data *pd;
1076 struct padata_shell *ps;
1077
1078 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1079 if (!ps)
1080 goto out;
1081
1082 ps->pinst = pinst;
1083
1084 cpus_read_lock();
1085 pd = padata_alloc_pd(ps);
1086 cpus_read_unlock();
1087
1088 if (!pd)
1089 goto out_free_ps;
1090
1091 mutex_lock(&pinst->lock);
1092 RCU_INIT_POINTER(ps->pd, pd);
1093 list_add(&ps->list, &pinst->pslist);
1094 mutex_unlock(&pinst->lock);
1095
1096 return ps;
1097
1098 out_free_ps:
1099 kfree(ps);
1100 out:
1101 return NULL;
1102 }
1103 EXPORT_SYMBOL(padata_alloc_shell);
1104
1105 /**
1106 * padata_free_shell - free a padata shell
1107 *
1108 * @ps: padata shell to free
1109 */
padata_free_shell(struct padata_shell * ps)1110 void padata_free_shell(struct padata_shell *ps)
1111 {
1112 struct parallel_data *pd;
1113
1114 if (!ps)
1115 return;
1116
1117 mutex_lock(&ps->pinst->lock);
1118 list_del(&ps->list);
1119 pd = rcu_dereference_protected(ps->pd, 1);
1120 if (refcount_dec_and_test(&pd->refcnt))
1121 padata_free_pd(pd);
1122 mutex_unlock(&ps->pinst->lock);
1123
1124 kfree(ps);
1125 }
1126 EXPORT_SYMBOL(padata_free_shell);
1127
padata_init(void)1128 void __init padata_init(void)
1129 {
1130 unsigned int i, possible_cpus;
1131 #ifdef CONFIG_HOTPLUG_CPU
1132 int ret;
1133
1134 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
1135 padata_cpu_online, NULL);
1136 if (ret < 0)
1137 goto err;
1138 hp_online = ret;
1139
1140 ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
1141 NULL, padata_cpu_dead);
1142 if (ret < 0)
1143 goto remove_online_state;
1144 #endif
1145
1146 possible_cpus = num_possible_cpus();
1147 padata_works = kmalloc_array(possible_cpus, sizeof(struct padata_work),
1148 GFP_KERNEL);
1149 if (!padata_works)
1150 goto remove_dead_state;
1151
1152 for (i = 0; i < possible_cpus; ++i)
1153 list_add(&padata_works[i].pw_list, &padata_free_works);
1154
1155 return;
1156
1157 remove_dead_state:
1158 #ifdef CONFIG_HOTPLUG_CPU
1159 cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
1160 remove_online_state:
1161 cpuhp_remove_multi_state(hp_online);
1162 err:
1163 #endif
1164 pr_warn("padata: initialization failed\n");
1165 }
1166