1*a2977c0cSAndrea Parri /* SPDX-License-Identifier: GPL-2.0-only */ 2*a2977c0cSAndrea Parri #ifndef _ASM_RISCV_MEMBARRIER_H 3*a2977c0cSAndrea Parri #define _ASM_RISCV_MEMBARRIER_H 4*a2977c0cSAndrea Parri membarrier_arch_switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)5*a2977c0cSAndrea Parristatic inline void membarrier_arch_switch_mm(struct mm_struct *prev, 6*a2977c0cSAndrea Parri struct mm_struct *next, 7*a2977c0cSAndrea Parri struct task_struct *tsk) 8*a2977c0cSAndrea Parri { 9*a2977c0cSAndrea Parri /* 10*a2977c0cSAndrea Parri * Only need the full barrier when switching between processes. 11*a2977c0cSAndrea Parri * Barrier when switching from kernel to userspace is not 12*a2977c0cSAndrea Parri * required here, given that it is implied by mmdrop(). Barrier 13*a2977c0cSAndrea Parri * when switching from userspace to kernel is not needed after 14*a2977c0cSAndrea Parri * store to rq->curr. 15*a2977c0cSAndrea Parri */ 16*a2977c0cSAndrea Parri if (IS_ENABLED(CONFIG_SMP) && 17*a2977c0cSAndrea Parri likely(!(atomic_read(&next->membarrier_state) & 18*a2977c0cSAndrea Parri (MEMBARRIER_STATE_PRIVATE_EXPEDITED | 19*a2977c0cSAndrea Parri MEMBARRIER_STATE_GLOBAL_EXPEDITED)) || !prev)) 20*a2977c0cSAndrea Parri return; 21*a2977c0cSAndrea Parri 22*a2977c0cSAndrea Parri /* 23*a2977c0cSAndrea Parri * The membarrier system call requires a full memory barrier 24*a2977c0cSAndrea Parri * after storing to rq->curr, before going back to user-space. 25*a2977c0cSAndrea Parri * Matches a full barrier in the proximity of the membarrier 26*a2977c0cSAndrea Parri * system call entry. 27*a2977c0cSAndrea Parri */ 28*a2977c0cSAndrea Parri smp_mb(); 29*a2977c0cSAndrea Parri } 30*a2977c0cSAndrea Parri 31*a2977c0cSAndrea Parri #endif /* _ASM_RISCV_MEMBARRIER_H */ 32