1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> 4 * Copyright (C) 2008-2009 PetaLogix 5 * Copyright (C) 2006 Atmark Techno, Inc. 6 */ 7 8 #ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H 9 #define _ASM_MICROBLAZE_MMU_CONTEXT_H 10 11 #include <linux/atomic.h> 12 #include <linux/mm_types.h> 13 #include <linux/sched.h> 14 15 #include <asm/bitops.h> 16 #include <asm/mmu.h> 17 #include <asm-generic/mm_hooks.h> 18 19 # ifdef __KERNEL__ 20 /* 21 * This function defines the mapping from contexts to VSIDs (virtual 22 * segment IDs). We use a skew on both the context and the high 4 bits 23 * of the 32-bit virtual address (the "effective segment ID") in order 24 * to spread out the entries in the MMU hash table. 25 */ 26 # define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \ 27 & 0xffffff) 28 29 /* 30 MicroBlaze has 256 contexts, so we can just rotate through these 31 as a way of "switching" contexts. If the TID of the TLB is zero, 32 the PID/TID comparison is disabled, so we can use a TID of zero 33 to represent all kernel pages as shared among all contexts. 34 */ 35 36 # define NO_CONTEXT 256 37 # define LAST_CONTEXT 255 38 # define FIRST_CONTEXT 1 39 40 /* 41 * Set the current MMU context. 42 * This is done byloading up the segment registers for the user part of the 43 * address space. 44 * 45 * Since the PGD is immediately available, it is much faster to simply 46 * pass this along as a second parameter, which is required for 8xx and 47 * can be used for debugging on all processors (if you happen to have 48 * an Abatron). 49 */ 50 extern void set_context(mm_context_t context, pgd_t *pgd); 51 52 /* 53 * Bitmap of contexts in use. 54 * The size of this bitmap is LAST_CONTEXT + 1 bits. 55 */ 56 extern unsigned long context_map[]; 57 58 /* 59 * This caches the next context number that we expect to be free. 60 * Its use is an optimization only, we can't rely on this context 61 * number to be free, but it usually will be. 62 */ 63 extern mm_context_t next_mmu_context; 64 65 /* 66 * Since we don't have sufficient contexts to give one to every task 67 * that could be in the system, we need to be able to steal contexts. 68 * These variables support that. 69 */ 70 extern atomic_t nr_free_contexts; 71 extern struct mm_struct *context_mm[LAST_CONTEXT+1]; 72 extern void steal_context(void); 73 74 /* 75 * Get a new mmu context for the address space described by `mm'. 76 */ 77 static inline void get_mmu_context(struct mm_struct *mm) 78 { 79 mm_context_t ctx; 80 81 if (mm->context != NO_CONTEXT) 82 return; 83 while (atomic_dec_if_positive(&nr_free_contexts) < 0) 84 steal_context(); 85 ctx = next_mmu_context; 86 while (test_and_set_bit(ctx, context_map)) { 87 ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx); 88 if (ctx > LAST_CONTEXT) 89 ctx = 0; 90 } 91 next_mmu_context = (ctx + 1) & LAST_CONTEXT; 92 mm->context = ctx; 93 context_mm[ctx] = mm; 94 } 95 96 /* 97 * Set up the context for a new address space. 98 */ 99 # define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) 100 101 /* 102 * We're finished using the context for an address space. 103 */ 104 #define destroy_context destroy_context 105 static inline void destroy_context(struct mm_struct *mm) 106 { 107 if (mm->context != NO_CONTEXT) { 108 clear_bit(mm->context, context_map); 109 mm->context = NO_CONTEXT; 110 atomic_inc(&nr_free_contexts); 111 } 112 } 113 114 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 115 struct task_struct *tsk) 116 { 117 tsk->thread.pgdir = next->pgd; 118 get_mmu_context(next); 119 set_context(next->context, next->pgd); 120 } 121 122 /* 123 * After we have set current->mm to a new value, this activates 124 * the context for the new mm so we see the new mappings. 125 */ 126 #define activate_mm activate_mm 127 static inline void activate_mm(struct mm_struct *active_mm, 128 struct mm_struct *mm) 129 { 130 current->thread.pgdir = mm->pgd; 131 get_mmu_context(mm); 132 set_context(mm->context, mm->pgd); 133 } 134 135 extern void mmu_context_init(void); 136 137 #include <asm-generic/mmu_context.h> 138 139 # endif /* __KERNEL__ */ 140 #endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */ 141