1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2010-2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * membarrier system call
6 */
7
8 /*
9 * For documentation purposes, here are some membarrier ordering
10 * scenarios to keep in mind:
11 *
12 * A) Userspace thread execution after IPI vs membarrier's memory
13 * barrier before sending the IPI
14 *
15 * Userspace variables:
16 *
17 * int x = 0, y = 0;
18 *
19 * The memory barrier at the start of membarrier() on CPU0 is necessary in
20 * order to enforce the guarantee that any writes occurring on CPU0 before
21 * the membarrier() is executed will be visible to any code executing on
22 * CPU1 after the IPI-induced memory barrier:
23 *
24 * CPU0 CPU1
25 *
26 * x = 1
27 * membarrier():
28 * a: smp_mb()
29 * b: send IPI IPI-induced mb
30 * c: smp_mb()
31 * r2 = y
32 * y = 1
33 * barrier()
34 * r1 = x
35 *
36 * BUG_ON(r1 == 0 && r2 == 0)
37 *
38 * The write to y and load from x by CPU1 are unordered by the hardware,
39 * so it's possible to have "r1 = x" reordered before "y = 1" at any
40 * point after (b). If the memory barrier at (a) is omitted, then "x = 1"
41 * can be reordered after (a) (although not after (c)), so we get r1 == 0
42 * and r2 == 0. This violates the guarantee that membarrier() is
43 * supposed by provide.
44 *
45 * The timing of the memory barrier at (a) has to ensure that it executes
46 * before the IPI-induced memory barrier on CPU1.
47 *
48 * B) Userspace thread execution before IPI vs membarrier's memory
49 * barrier after completing the IPI
50 *
51 * Userspace variables:
52 *
53 * int x = 0, y = 0;
54 *
55 * The memory barrier at the end of membarrier() on CPU0 is necessary in
56 * order to enforce the guarantee that any writes occurring on CPU1 before
57 * the membarrier() is executed will be visible to any code executing on
58 * CPU0 after the membarrier():
59 *
60 * CPU0 CPU1
61 *
62 * x = 1
63 * barrier()
64 * y = 1
65 * r2 = y
66 * membarrier():
67 * a: smp_mb()
68 * b: send IPI IPI-induced mb
69 * c: smp_mb()
70 * r1 = x
71 * BUG_ON(r1 == 0 && r2 == 1)
72 *
73 * The writes to x and y are unordered by the hardware, so it's possible to
74 * have "r2 = 1" even though the write to x doesn't execute until (b). If
75 * the memory barrier at (c) is omitted then "r1 = x" can be reordered
76 * before (b) (although not before (a)), so we get "r1 = 0". This violates
77 * the guarantee that membarrier() is supposed to provide.
78 *
79 * The timing of the memory barrier at (c) has to ensure that it executes
80 * after the IPI-induced memory barrier on CPU1.
81 *
82 * C) Scheduling userspace thread -> kthread -> userspace thread vs membarrier
83 *
84 * CPU0 CPU1
85 *
86 * membarrier():
87 * a: smp_mb()
88 * d: switch to kthread (includes mb)
89 * b: read rq->curr->mm == NULL
90 * e: switch to user (includes mb)
91 * c: smp_mb()
92 *
93 * Using the scenario from (A), we can show that (a) needs to be paired
94 * with (e). Using the scenario from (B), we can show that (c) needs to
95 * be paired with (d).
96 *
97 * D) exit_mm vs membarrier
98 *
99 * Two thread groups are created, A and B. Thread group B is created by
100 * issuing clone from group A with flag CLONE_VM set, but not CLONE_THREAD.
101 * Let's assume we have a single thread within each thread group (Thread A
102 * and Thread B). Thread A runs on CPU0, Thread B runs on CPU1.
103 *
104 * CPU0 CPU1
105 *
106 * membarrier():
107 * a: smp_mb()
108 * exit_mm():
109 * d: smp_mb()
110 * e: current->mm = NULL
111 * b: read rq->curr->mm == NULL
112 * c: smp_mb()
113 *
114 * Using scenario (B), we can show that (c) needs to be paired with (d).
115 *
116 * E) kthread_{use,unuse}_mm vs membarrier
117 *
118 * CPU0 CPU1
119 *
120 * membarrier():
121 * a: smp_mb()
122 * kthread_unuse_mm()
123 * d: smp_mb()
124 * e: current->mm = NULL
125 * b: read rq->curr->mm == NULL
126 * kthread_use_mm()
127 * f: current->mm = mm
128 * g: smp_mb()
129 * c: smp_mb()
130 *
131 * Using the scenario from (A), we can show that (a) needs to be paired
132 * with (g). Using the scenario from (B), we can show that (c) needs to
133 * be paired with (d).
134 */
135
136 /*
137 * Bitmask made from a "or" of all commands within enum membarrier_cmd,
138 * except MEMBARRIER_CMD_QUERY.
139 */
140 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE
141 #define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \
142 (MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE \
143 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE)
144 #else
145 #define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK 0
146 #endif
147
148 #ifdef CONFIG_RSEQ
149 #define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK \
150 (MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ \
151 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ)
152 #else
153 #define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK 0
154 #endif
155
156 #define MEMBARRIER_CMD_BITMASK \
157 (MEMBARRIER_CMD_GLOBAL | MEMBARRIER_CMD_GLOBAL_EXPEDITED \
158 | MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \
159 | MEMBARRIER_CMD_PRIVATE_EXPEDITED \
160 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \
161 | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \
162 | MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK \
163 | MEMBARRIER_CMD_GET_REGISTRATIONS)
164
165 static DEFINE_MUTEX(membarrier_ipi_mutex);
166 #define SERIALIZE_IPI() guard(mutex)(&membarrier_ipi_mutex)
167
ipi_mb(void * info)168 static void ipi_mb(void *info)
169 {
170 smp_mb(); /* IPIs should be serializing but paranoid. */
171 }
172
ipi_sync_core(void * info)173 static void ipi_sync_core(void *info)
174 {
175 /*
176 * The smp_mb() in membarrier after all the IPIs is supposed to
177 * ensure that memory on remote CPUs that occur before the IPI
178 * become visible to membarrier()'s caller -- see scenario B in
179 * the big comment at the top of this file.
180 *
181 * A sync_core() would provide this guarantee, but
182 * sync_core_before_usermode() might end up being deferred until
183 * after membarrier()'s smp_mb().
184 */
185 smp_mb(); /* IPIs should be serializing but paranoid. */
186
187 sync_core_before_usermode();
188 }
189
ipi_rseq(void * info)190 static void ipi_rseq(void *info)
191 {
192 /*
193 * Ensure that all stores done by the calling thread are visible
194 * to the current task before the current task resumes. We could
195 * probably optimize this away on most architectures, but by the
196 * time we've already sent an IPI, the cost of the extra smp_mb()
197 * is negligible.
198 */
199 smp_mb();
200 rseq_preempt(current);
201 }
202
ipi_sync_rq_state(void * info)203 static void ipi_sync_rq_state(void *info)
204 {
205 struct mm_struct *mm = (struct mm_struct *) info;
206
207 if (current->mm != mm)
208 return;
209 this_cpu_write(runqueues.membarrier_state,
210 atomic_read(&mm->membarrier_state));
211 /*
212 * Issue a memory barrier after setting
213 * MEMBARRIER_STATE_GLOBAL_EXPEDITED in the current runqueue to
214 * guarantee that no memory access following registration is reordered
215 * before registration.
216 */
217 smp_mb();
218 }
219
membarrier_exec_mmap(struct mm_struct * mm)220 void membarrier_exec_mmap(struct mm_struct *mm)
221 {
222 /*
223 * Issue a memory barrier before clearing membarrier_state to
224 * guarantee that no memory access prior to exec is reordered after
225 * clearing this state.
226 */
227 smp_mb();
228 atomic_set(&mm->membarrier_state, 0);
229 /*
230 * Keep the runqueue membarrier_state in sync with this mm
231 * membarrier_state.
232 */
233 this_cpu_write(runqueues.membarrier_state, 0);
234 }
235
membarrier_update_current_mm(struct mm_struct * next_mm)236 void membarrier_update_current_mm(struct mm_struct *next_mm)
237 {
238 struct rq *rq = this_rq();
239 int membarrier_state = 0;
240
241 if (next_mm)
242 membarrier_state = atomic_read(&next_mm->membarrier_state);
243 if (READ_ONCE(rq->membarrier_state) == membarrier_state)
244 return;
245 WRITE_ONCE(rq->membarrier_state, membarrier_state);
246 }
247
membarrier_global_expedited(void)248 static int membarrier_global_expedited(void)
249 {
250 int cpu;
251 cpumask_var_t tmpmask;
252
253 if (num_online_cpus() == 1)
254 return 0;
255
256 /*
257 * Matches memory barriers around rq->curr modification in
258 * scheduler.
259 */
260 smp_mb(); /* system call entry is not a mb. */
261
262 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
263 return -ENOMEM;
264
265 SERIALIZE_IPI();
266 cpus_read_lock();
267 rcu_read_lock();
268 for_each_online_cpu(cpu) {
269 struct task_struct *p;
270
271 /*
272 * Skipping the current CPU is OK even through we can be
273 * migrated at any point. The current CPU, at the point
274 * where we read raw_smp_processor_id(), is ensured to
275 * be in program order with respect to the caller
276 * thread. Therefore, we can skip this CPU from the
277 * iteration.
278 */
279 if (cpu == raw_smp_processor_id())
280 continue;
281
282 if (!(READ_ONCE(cpu_rq(cpu)->membarrier_state) &
283 MEMBARRIER_STATE_GLOBAL_EXPEDITED))
284 continue;
285
286 /*
287 * Skip the CPU if it runs a kernel thread which is not using
288 * a task mm.
289 */
290 p = rcu_dereference(cpu_rq(cpu)->curr);
291 if (!p->mm)
292 continue;
293
294 __cpumask_set_cpu(cpu, tmpmask);
295 }
296 rcu_read_unlock();
297
298 preempt_disable();
299 smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
300 preempt_enable();
301
302 free_cpumask_var(tmpmask);
303 cpus_read_unlock();
304
305 /*
306 * Memory barrier on the caller thread _after_ we finished
307 * waiting for the last IPI. Matches memory barriers around
308 * rq->curr modification in scheduler.
309 */
310 smp_mb(); /* exit from system call is not a mb */
311 return 0;
312 }
313
membarrier_private_expedited(int flags,int cpu_id)314 static int membarrier_private_expedited(int flags, int cpu_id)
315 {
316 cpumask_var_t tmpmask;
317 struct mm_struct *mm = current->mm;
318 smp_call_func_t ipi_func = ipi_mb;
319
320 if (flags == MEMBARRIER_FLAG_SYNC_CORE) {
321 if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
322 return -EINVAL;
323 if (!(atomic_read(&mm->membarrier_state) &
324 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY))
325 return -EPERM;
326 ipi_func = ipi_sync_core;
327 } else if (flags == MEMBARRIER_FLAG_RSEQ) {
328 if (!IS_ENABLED(CONFIG_RSEQ))
329 return -EINVAL;
330 if (!(atomic_read(&mm->membarrier_state) &
331 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY))
332 return -EPERM;
333 ipi_func = ipi_rseq;
334 } else {
335 WARN_ON_ONCE(flags);
336 if (!(atomic_read(&mm->membarrier_state) &
337 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
338 return -EPERM;
339 }
340
341 if (flags != MEMBARRIER_FLAG_SYNC_CORE &&
342 (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1))
343 return 0;
344
345 /*
346 * Matches memory barriers around rq->curr modification in
347 * scheduler.
348 */
349 smp_mb(); /* system call entry is not a mb. */
350
351 if (cpu_id < 0 && !zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
352 return -ENOMEM;
353
354 SERIALIZE_IPI();
355 cpus_read_lock();
356
357 if (cpu_id >= 0) {
358 struct task_struct *p;
359
360 if (cpu_id >= nr_cpu_ids || !cpu_online(cpu_id))
361 goto out;
362 rcu_read_lock();
363 p = rcu_dereference(cpu_rq(cpu_id)->curr);
364 if (!p || p->mm != mm) {
365 rcu_read_unlock();
366 goto out;
367 }
368 rcu_read_unlock();
369 } else {
370 int cpu;
371
372 rcu_read_lock();
373 for_each_online_cpu(cpu) {
374 struct task_struct *p;
375
376 p = rcu_dereference(cpu_rq(cpu)->curr);
377 if (p && p->mm == mm)
378 __cpumask_set_cpu(cpu, tmpmask);
379 }
380 rcu_read_unlock();
381 }
382
383 if (cpu_id >= 0) {
384 /*
385 * smp_call_function_single() will call ipi_func() if cpu_id
386 * is the calling CPU.
387 */
388 smp_call_function_single(cpu_id, ipi_func, NULL, 1);
389 } else {
390 /*
391 * For regular membarrier, we can save a few cycles by
392 * skipping the current cpu -- we're about to do smp_mb()
393 * below, and if we migrate to a different cpu, this cpu
394 * and the new cpu will execute a full barrier in the
395 * scheduler.
396 *
397 * For SYNC_CORE, we do need a barrier on the current cpu --
398 * otherwise, if we are migrated and replaced by a different
399 * task in the same mm just before, during, or after
400 * membarrier, we will end up with some thread in the mm
401 * running without a core sync.
402 *
403 * For RSEQ, don't rseq_preempt() the caller. User code
404 * is not supposed to issue syscalls at all from inside an
405 * rseq critical section.
406 */
407 if (flags != MEMBARRIER_FLAG_SYNC_CORE) {
408 preempt_disable();
409 smp_call_function_many(tmpmask, ipi_func, NULL, true);
410 preempt_enable();
411 } else {
412 on_each_cpu_mask(tmpmask, ipi_func, NULL, true);
413 }
414 }
415
416 out:
417 if (cpu_id < 0)
418 free_cpumask_var(tmpmask);
419 cpus_read_unlock();
420
421 /*
422 * Memory barrier on the caller thread _after_ we finished
423 * waiting for the last IPI. Matches memory barriers around
424 * rq->curr modification in scheduler.
425 */
426 smp_mb(); /* exit from system call is not a mb */
427
428 return 0;
429 }
430
sync_runqueues_membarrier_state(struct mm_struct * mm)431 static int sync_runqueues_membarrier_state(struct mm_struct *mm)
432 {
433 int membarrier_state = atomic_read(&mm->membarrier_state);
434 cpumask_var_t tmpmask;
435 int cpu;
436
437 if (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1) {
438 this_cpu_write(runqueues.membarrier_state, membarrier_state);
439
440 /*
441 * For single mm user, we can simply issue a memory barrier
442 * after setting MEMBARRIER_STATE_GLOBAL_EXPEDITED in the
443 * mm and in the current runqueue to guarantee that no memory
444 * access following registration is reordered before
445 * registration.
446 */
447 smp_mb();
448 return 0;
449 }
450
451 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
452 return -ENOMEM;
453
454 /*
455 * For mm with multiple users, we need to ensure all future
456 * scheduler executions will observe @mm's new membarrier
457 * state.
458 */
459 synchronize_rcu();
460
461 /*
462 * For each cpu runqueue, if the task's mm match @mm, ensure that all
463 * @mm's membarrier state set bits are also set in the runqueue's
464 * membarrier state. This ensures that a runqueue scheduling
465 * between threads which are users of @mm has its membarrier state
466 * updated.
467 */
468 SERIALIZE_IPI();
469 cpus_read_lock();
470 rcu_read_lock();
471 for_each_online_cpu(cpu) {
472 struct rq *rq = cpu_rq(cpu);
473 struct task_struct *p;
474
475 p = rcu_dereference(rq->curr);
476 if (p && p->mm == mm)
477 __cpumask_set_cpu(cpu, tmpmask);
478 }
479 rcu_read_unlock();
480
481 on_each_cpu_mask(tmpmask, ipi_sync_rq_state, mm, true);
482
483 free_cpumask_var(tmpmask);
484 cpus_read_unlock();
485
486 return 0;
487 }
488
membarrier_register_global_expedited(void)489 static int membarrier_register_global_expedited(void)
490 {
491 struct task_struct *p = current;
492 struct mm_struct *mm = p->mm;
493 int ret;
494
495 if (atomic_read(&mm->membarrier_state) &
496 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY)
497 return 0;
498 atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED, &mm->membarrier_state);
499 ret = sync_runqueues_membarrier_state(mm);
500 if (ret)
501 return ret;
502 atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY,
503 &mm->membarrier_state);
504
505 return 0;
506 }
507
membarrier_register_private_expedited(int flags)508 static int membarrier_register_private_expedited(int flags)
509 {
510 struct task_struct *p = current;
511 struct mm_struct *mm = p->mm;
512 int ready_state = MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY,
513 set_state = MEMBARRIER_STATE_PRIVATE_EXPEDITED,
514 ret;
515
516 if (flags == MEMBARRIER_FLAG_SYNC_CORE) {
517 if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
518 return -EINVAL;
519 ready_state =
520 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY;
521 } else if (flags == MEMBARRIER_FLAG_RSEQ) {
522 if (!IS_ENABLED(CONFIG_RSEQ))
523 return -EINVAL;
524 ready_state =
525 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY;
526 } else {
527 WARN_ON_ONCE(flags);
528 }
529
530 /*
531 * We need to consider threads belonging to different thread
532 * groups, which use the same mm. (CLONE_VM but not
533 * CLONE_THREAD).
534 */
535 if ((atomic_read(&mm->membarrier_state) & ready_state) == ready_state)
536 return 0;
537 if (flags & MEMBARRIER_FLAG_SYNC_CORE)
538 set_state |= MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE;
539 if (flags & MEMBARRIER_FLAG_RSEQ)
540 set_state |= MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ;
541 atomic_or(set_state, &mm->membarrier_state);
542 ret = sync_runqueues_membarrier_state(mm);
543 if (ret)
544 return ret;
545 atomic_or(ready_state, &mm->membarrier_state);
546
547 return 0;
548 }
549
membarrier_get_registrations(void)550 static int membarrier_get_registrations(void)
551 {
552 struct task_struct *p = current;
553 struct mm_struct *mm = p->mm;
554 int registrations_mask = 0, membarrier_state, i;
555 static const int states[] = {
556 MEMBARRIER_STATE_GLOBAL_EXPEDITED |
557 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY,
558 MEMBARRIER_STATE_PRIVATE_EXPEDITED |
559 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY,
560 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE |
561 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY,
562 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ |
563 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY
564 };
565 static const int registration_cmds[] = {
566 MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED,
567 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED,
568 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE,
569 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ
570 };
571 BUILD_BUG_ON(ARRAY_SIZE(states) != ARRAY_SIZE(registration_cmds));
572
573 membarrier_state = atomic_read(&mm->membarrier_state);
574 for (i = 0; i < ARRAY_SIZE(states); ++i) {
575 if (membarrier_state & states[i]) {
576 registrations_mask |= registration_cmds[i];
577 membarrier_state &= ~states[i];
578 }
579 }
580 WARN_ON_ONCE(membarrier_state != 0);
581 return registrations_mask;
582 }
583
584 /**
585 * sys_membarrier - issue memory barriers on a set of threads
586 * @cmd: Takes command values defined in enum membarrier_cmd.
587 * @flags: Currently needs to be 0 for all commands other than
588 * MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ: in the latter
589 * case it can be MEMBARRIER_CMD_FLAG_CPU, indicating that @cpu_id
590 * contains the CPU on which to interrupt (= restart)
591 * the RSEQ critical section.
592 * @cpu_id: if @flags == MEMBARRIER_CMD_FLAG_CPU, indicates the cpu on which
593 * RSEQ CS should be interrupted (@cmd must be
594 * MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ).
595 *
596 * If this system call is not implemented, -ENOSYS is returned. If the
597 * command specified does not exist, not available on the running
598 * kernel, or if the command argument is invalid, this system call
599 * returns -EINVAL. For a given command, with flags argument set to 0,
600 * if this system call returns -ENOSYS or -EINVAL, it is guaranteed to
601 * always return the same value until reboot. In addition, it can return
602 * -ENOMEM if there is not enough memory available to perform the system
603 * call.
604 *
605 * All memory accesses performed in program order from each targeted thread
606 * is guaranteed to be ordered with respect to sys_membarrier(). If we use
607 * the semantic "barrier()" to represent a compiler barrier forcing memory
608 * accesses to be performed in program order across the barrier, and
609 * smp_mb() to represent explicit memory barriers forcing full memory
610 * ordering across the barrier, we have the following ordering table for
611 * each pair of barrier(), sys_membarrier() and smp_mb():
612 *
613 * The pair ordering is detailed as (O: ordered, X: not ordered):
614 *
615 * barrier() smp_mb() sys_membarrier()
616 * barrier() X X O
617 * smp_mb() X O O
618 * sys_membarrier() O O O
619 */
SYSCALL_DEFINE3(membarrier,int,cmd,unsigned int,flags,int,cpu_id)620 SYSCALL_DEFINE3(membarrier, int, cmd, unsigned int, flags, int, cpu_id)
621 {
622 switch (cmd) {
623 case MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ:
624 if (unlikely(flags && flags != MEMBARRIER_CMD_FLAG_CPU))
625 return -EINVAL;
626 break;
627 default:
628 if (unlikely(flags))
629 return -EINVAL;
630 }
631
632 if (!(flags & MEMBARRIER_CMD_FLAG_CPU))
633 cpu_id = -1;
634
635 switch (cmd) {
636 case MEMBARRIER_CMD_QUERY:
637 {
638 int cmd_mask = MEMBARRIER_CMD_BITMASK;
639
640 if (tick_nohz_full_enabled())
641 cmd_mask &= ~MEMBARRIER_CMD_GLOBAL;
642 return cmd_mask;
643 }
644 case MEMBARRIER_CMD_GLOBAL:
645 /* MEMBARRIER_CMD_GLOBAL is not compatible with nohz_full. */
646 if (tick_nohz_full_enabled())
647 return -EINVAL;
648 if (num_online_cpus() > 1)
649 synchronize_rcu();
650 return 0;
651 case MEMBARRIER_CMD_GLOBAL_EXPEDITED:
652 return membarrier_global_expedited();
653 case MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED:
654 return membarrier_register_global_expedited();
655 case MEMBARRIER_CMD_PRIVATE_EXPEDITED:
656 return membarrier_private_expedited(0, cpu_id);
657 case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
658 return membarrier_register_private_expedited(0);
659 case MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE:
660 return membarrier_private_expedited(MEMBARRIER_FLAG_SYNC_CORE, cpu_id);
661 case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE:
662 return membarrier_register_private_expedited(MEMBARRIER_FLAG_SYNC_CORE);
663 case MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ:
664 return membarrier_private_expedited(MEMBARRIER_FLAG_RSEQ, cpu_id);
665 case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ:
666 return membarrier_register_private_expedited(MEMBARRIER_FLAG_RSEQ);
667 case MEMBARRIER_CMD_GET_REGISTRATIONS:
668 return membarrier_get_registrations();
669 default:
670 return -EINVAL;
671 }
672 }
673