1a231b883SGuo Ren /* SPDX-License-Identifier: GPL-2.0 */ 2a231b883SGuo Ren #ifndef __ASM_ASM_ASID_H 3a231b883SGuo Ren #define __ASM_ASM_ASID_H 4a231b883SGuo Ren 5a231b883SGuo Ren #include <linux/atomic.h> 6a231b883SGuo Ren #include <linux/compiler.h> 7a231b883SGuo Ren #include <linux/cpumask.h> 8a231b883SGuo Ren #include <linux/percpu.h> 9a231b883SGuo Ren #include <linux/spinlock.h> 10a231b883SGuo Ren 11a231b883SGuo Ren struct asid_info 12a231b883SGuo Ren { 13a231b883SGuo Ren atomic64_t generation; 14a231b883SGuo Ren unsigned long *map; 15a231b883SGuo Ren atomic64_t __percpu *active; 16a231b883SGuo Ren u64 __percpu *reserved; 17a231b883SGuo Ren u32 bits; 18a231b883SGuo Ren /* Lock protecting the structure */ 19a231b883SGuo Ren raw_spinlock_t lock; 20a231b883SGuo Ren /* Which CPU requires context flush on next call */ 21a231b883SGuo Ren cpumask_t flush_pending; 22a231b883SGuo Ren /* Number of ASID allocated by context (shift value) */ 23a231b883SGuo Ren unsigned int ctxt_shift; 24a231b883SGuo Ren /* Callback to locally flush the context. */ 25a231b883SGuo Ren void (*flush_cpu_ctxt_cb)(void); 26a231b883SGuo Ren }; 27a231b883SGuo Ren 28a231b883SGuo Ren #define NUM_ASIDS(info) (1UL << ((info)->bits)) 29a231b883SGuo Ren #define NUM_CTXT_ASIDS(info) (NUM_ASIDS(info) >> (info)->ctxt_shift) 30a231b883SGuo Ren 31a231b883SGuo Ren #define active_asid(info, cpu) *per_cpu_ptr((info)->active, cpu) 32a231b883SGuo Ren 33a231b883SGuo Ren void asid_new_context(struct asid_info *info, atomic64_t *pasid, 34a231b883SGuo Ren unsigned int cpu, struct mm_struct *mm); 35a231b883SGuo Ren 36a231b883SGuo Ren /* 37a231b883SGuo Ren * Check the ASID is still valid for the context. If not generate a new ASID. 38a231b883SGuo Ren * 39a231b883SGuo Ren * @pasid: Pointer to the current ASID batch 40a231b883SGuo Ren * @cpu: current CPU ID. Must have been acquired throught get_cpu() 41a231b883SGuo Ren */ 42a231b883SGuo Ren static inline void asid_check_context(struct asid_info *info, 43a231b883SGuo Ren atomic64_t *pasid, unsigned int cpu, 44a231b883SGuo Ren struct mm_struct *mm) 45a231b883SGuo Ren { 46a231b883SGuo Ren u64 asid, old_active_asid; 47a231b883SGuo Ren 48a231b883SGuo Ren asid = atomic64_read(pasid); 49a231b883SGuo Ren 50a231b883SGuo Ren /* 51a231b883SGuo Ren * The memory ordering here is subtle. 52a231b883SGuo Ren * If our active_asid is non-zero and the ASID matches the current 53a231b883SGuo Ren * generation, then we update the active_asid entry with a relaxed 54a231b883SGuo Ren * cmpxchg. Racing with a concurrent rollover means that either: 55a231b883SGuo Ren * 56a231b883SGuo Ren * - We get a zero back from the cmpxchg and end up waiting on the 57a231b883SGuo Ren * lock. Taking the lock synchronises with the rollover and so 58a231b883SGuo Ren * we are forced to see the updated generation. 59a231b883SGuo Ren * 60a231b883SGuo Ren * - We get a valid ASID back from the cmpxchg, which means the 61a231b883SGuo Ren * relaxed xchg in flush_context will treat us as reserved 62a231b883SGuo Ren * because atomic RmWs are totally ordered for a given location. 63a231b883SGuo Ren */ 64a231b883SGuo Ren old_active_asid = atomic64_read(&active_asid(info, cpu)); 65a231b883SGuo Ren if (old_active_asid && 66a231b883SGuo Ren !((asid ^ atomic64_read(&info->generation)) >> info->bits) && 67a231b883SGuo Ren atomic64_cmpxchg_relaxed(&active_asid(info, cpu), 68a231b883SGuo Ren old_active_asid, asid)) 69a231b883SGuo Ren return; 70a231b883SGuo Ren 71a231b883SGuo Ren asid_new_context(info, pasid, cpu, mm); 72a231b883SGuo Ren } 73a231b883SGuo Ren 74a231b883SGuo Ren int asid_allocator_init(struct asid_info *info, 75a231b883SGuo Ren u32 bits, unsigned int asid_per_ctxt, 76a231b883SGuo Ren void (*flush_cpu_ctxt_cb)(void)); 77a231b883SGuo Ren 78a231b883SGuo Ren #endif 79