1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * SN Platform GRU Driver 4 * 5 * MMUOPS callbacks + TLB flushing 6 * 7 * This file handles emu notifier callbacks from the core kernel. The callbacks 8 * are used to update the TLB in the GRU as a result of changes in the 9 * state of a process address space. This file also handles TLB invalidates 10 * from the GRU driver. 11 * 12 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. 13 */ 14 15 #include <linux/kernel.h> 16 #include <linux/list.h> 17 #include <linux/spinlock.h> 18 #include <linux/mm.h> 19 #include <linux/slab.h> 20 #include <linux/device.h> 21 #include <linux/hugetlb.h> 22 #include <linux/delay.h> 23 #include <linux/timex.h> 24 #include <linux/srcu.h> 25 #include <asm/processor.h> 26 #include "gru.h" 27 #include "grutables.h" 28 #include <asm/uv/uv_hub.h> 29 30 #define gru_random() get_cycles() 31 32 /* ---------------------------------- TLB Invalidation functions -------- 33 * get_tgh_handle 34 * 35 * Find a TGH to use for issuing a TLB invalidate. For GRUs that are on the 36 * local blade, use a fixed TGH that is a function of the blade-local cpu 37 * number. Normally, this TGH is private to the cpu & no contention occurs for 38 * the TGH. For offblade GRUs, select a random TGH in the range above the 39 * private TGHs. A spinlock is required to access this TGH & the lock must be 40 * released when the invalidate is completes. This sucks, but it is the best we 41 * can do. 42 * 43 * Note that the spinlock is IN the TGH handle so locking does not involve 44 * additional cache lines. 45 * 46 */ 47 static inline int get_off_blade_tgh(struct gru_state *gru) 48 { 49 int n; 50 51 n = GRU_NUM_TGH - gru->gs_tgh_first_remote; 52 n = gru_random() % n; 53 n += gru->gs_tgh_first_remote; 54 return n; 55 } 56 57 static inline int get_on_blade_tgh(struct gru_state *gru) 58 { 59 return uv_blade_processor_id() >> gru->gs_tgh_local_shift; 60 } 61 62 static struct gru_tlb_global_handle *get_lock_tgh_handle(struct gru_state 63 *gru) 64 { 65 struct gru_tlb_global_handle *tgh; 66 int n; 67 68 preempt_disable(); 69 if (uv_numa_blade_id() == gru->gs_blade_id) 70 n = get_on_blade_tgh(gru); 71 else 72 n = get_off_blade_tgh(gru); 73 tgh = get_tgh_by_index(gru, n); 74 lock_tgh_handle(tgh); 75 76 return tgh; 77 } 78 79 static void get_unlock_tgh_handle(struct gru_tlb_global_handle *tgh) 80 { 81 unlock_tgh_handle(tgh); 82 preempt_enable(); 83 } 84 85 /* 86 * gru_flush_tlb_range 87 * 88 * General purpose TLB invalidation function. This function scans every GRU in 89 * the ENTIRE system (partition) looking for GRUs where the specified MM has 90 * been accessed by the GRU. For each GRU found, the TLB must be invalidated OR 91 * the ASID invalidated. Invalidating an ASID causes a new ASID to be assigned 92 * on the next fault. This effectively flushes the ENTIRE TLB for the MM at the 93 * cost of (possibly) a large number of future TLBmisses. 94 * 95 * The current algorithm is optimized based on the following (somewhat true) 96 * assumptions: 97 * - GRU contexts are not loaded into a GRU unless a reference is made to 98 * the data segment or control block (this is true, not an assumption). 99 * If a DS/CB is referenced, the user will also issue instructions that 100 * cause TLBmisses. It is not necessary to optimize for the case where 101 * contexts are loaded but no instructions cause TLB misses. (I know 102 * this will happen but I'm not optimizing for it). 103 * - GRU instructions to invalidate TLB entries are SLOOOOWWW - normally 104 * a few usec but in unusual cases, it could be longer. Avoid if 105 * possible. 106 * - intrablade process migration between cpus is not frequent but is 107 * common. 108 * - a GRU context is not typically migrated to a different GRU on the 109 * blade because of intrablade migration 110 * - interblade migration is rare. Processes migrate their GRU context to 111 * the new blade. 112 * - if interblade migration occurs, migration back to the original blade 113 * is very very rare (ie., no optimization for this case) 114 * - most GRU instruction operate on a subset of the user REGIONS. Code 115 * & shared library regions are not likely targets of GRU instructions. 116 * 117 * To help improve the efficiency of TLB invalidation, the GMS data 118 * structure is maintained for EACH address space (MM struct). The GMS is 119 * also the structure that contains the pointer to the mmu callout 120 * functions. This structure is linked to the mm_struct for the address space 121 * using the mmu "register" function. The mmu interfaces are used to 122 * provide the callbacks for TLB invalidation. The GMS contains: 123 * 124 * - asid[maxgrus] array. ASIDs are assigned to a GRU when a context is 125 * loaded into the GRU. 126 * - asidmap[maxgrus]. bitmap to make it easier to find non-zero asids in 127 * the above array 128 * - ctxbitmap[maxgrus]. Indicates the contexts that are currently active 129 * in the GRU for the address space. This bitmap must be passed to the 130 * GRU to do an invalidate. 131 * 132 * The current algorithm for invalidating TLBs is: 133 * - scan the asidmap for GRUs where the context has been loaded, ie, 134 * asid is non-zero. 135 * - for each gru found: 136 * - if the ctxtmap is non-zero, there are active contexts in the 137 * GRU. TLB invalidate instructions must be issued to the GRU. 138 * - if the ctxtmap is zero, no context is active. Set the ASID to 139 * zero to force a full TLB invalidation. This is fast but will 140 * cause a lot of TLB misses if the context is reloaded onto the 141 * GRU 142 * 143 */ 144 145 void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start, 146 unsigned long len) 147 { 148 struct gru_state *gru; 149 struct gru_mm_tracker *asids; 150 struct gru_tlb_global_handle *tgh; 151 unsigned long num; 152 int grupagesize, pagesize, pageshift, gid, asid; 153 154 /* ZZZ TODO - handle huge pages */ 155 pageshift = PAGE_SHIFT; 156 pagesize = (1UL << pageshift); 157 grupagesize = GRU_PAGESIZE(pageshift); 158 num = min(((len + pagesize - 1) >> pageshift), GRUMAXINVAL); 159 160 STAT(flush_tlb); 161 gru_dbg(grudev, "gms %p, start 0x%lx, len 0x%lx, asidmap 0x%lx\n", gms, 162 start, len, gms->ms_asidmap[0]); 163 164 spin_lock(&gms->ms_asid_lock); 165 for_each_gru_in_bitmap(gid, gms->ms_asidmap) { 166 STAT(flush_tlb_gru); 167 gru = GID_TO_GRU(gid); 168 asids = gms->ms_asids + gid; 169 asid = asids->mt_asid; 170 if (asids->mt_ctxbitmap && asid) { 171 STAT(flush_tlb_gru_tgh); 172 asid = GRUASID(asid, start); 173 gru_dbg(grudev, 174 " FLUSH gruid %d, asid 0x%x, vaddr 0x%lx, vamask 0x%x, num %ld, cbmap 0x%x\n", 175 gid, asid, start, grupagesize, num, asids->mt_ctxbitmap); 176 tgh = get_lock_tgh_handle(gru); 177 tgh_invalidate(tgh, start, ~0, asid, grupagesize, 0, 178 num - 1, asids->mt_ctxbitmap); 179 get_unlock_tgh_handle(tgh); 180 } else { 181 STAT(flush_tlb_gru_zero_asid); 182 asids->mt_asid = 0; 183 __clear_bit(gru->gs_gid, gms->ms_asidmap); 184 gru_dbg(grudev, 185 " CLEARASID gruid %d, asid 0x%x, cbtmap 0x%x, asidmap 0x%lx\n", 186 gid, asid, asids->mt_ctxbitmap, 187 gms->ms_asidmap[0]); 188 } 189 } 190 spin_unlock(&gms->ms_asid_lock); 191 } 192 193 /* 194 * Flush the entire TLB on a chiplet. 195 */ 196 void gru_flush_all_tlb(struct gru_state *gru) 197 { 198 struct gru_tlb_global_handle *tgh; 199 200 gru_dbg(grudev, "gid %d\n", gru->gs_gid); 201 tgh = get_lock_tgh_handle(gru); 202 tgh_invalidate(tgh, 0, ~0, 0, 1, 1, GRUMAXINVAL - 1, 0xffff); 203 get_unlock_tgh_handle(tgh); 204 } 205 206 /* 207 * MMUOPS notifier callout functions 208 */ 209 static int gru_invalidate_range_start(struct mmu_notifier *mn, 210 const struct mmu_notifier_range *range) 211 { 212 struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, 213 ms_notifier); 214 215 STAT(mmu_invalidate_range); 216 atomic_inc(&gms->ms_range_active); 217 gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx, act %d\n", gms, 218 range->start, range->end, atomic_read(&gms->ms_range_active)); 219 gru_flush_tlb_range(gms, range->start, range->end - range->start); 220 221 return 0; 222 } 223 224 static void gru_invalidate_range_end(struct mmu_notifier *mn, 225 const struct mmu_notifier_range *range) 226 { 227 struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, 228 ms_notifier); 229 230 /* ..._and_test() provides needed barrier */ 231 (void)atomic_dec_and_test(&gms->ms_range_active); 232 233 wake_up_all(&gms->ms_wait_queue); 234 gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx\n", 235 gms, range->start, range->end); 236 } 237 238 static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm) 239 { 240 struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, 241 ms_notifier); 242 243 gms->ms_released = 1; 244 gru_dbg(grudev, "gms %p\n", gms); 245 } 246 247 248 static const struct mmu_notifier_ops gru_mmuops = { 249 .invalidate_range_start = gru_invalidate_range_start, 250 .invalidate_range_end = gru_invalidate_range_end, 251 .release = gru_release, 252 }; 253 254 /* Move this to the basic mmu_notifier file. But for now... */ 255 static struct mmu_notifier *mmu_find_ops(struct mm_struct *mm, 256 const struct mmu_notifier_ops *ops) 257 { 258 struct mmu_notifier *mn, *gru_mn = NULL; 259 260 if (mm->mmu_notifier_mm) { 261 rcu_read_lock(); 262 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, 263 hlist) 264 if (mn->ops == ops) { 265 gru_mn = mn; 266 break; 267 } 268 rcu_read_unlock(); 269 } 270 return gru_mn; 271 } 272 273 struct gru_mm_struct *gru_register_mmu_notifier(void) 274 { 275 struct gru_mm_struct *gms; 276 struct mmu_notifier *mn; 277 int err; 278 279 mn = mmu_find_ops(current->mm, &gru_mmuops); 280 if (mn) { 281 gms = container_of(mn, struct gru_mm_struct, ms_notifier); 282 atomic_inc(&gms->ms_refcnt); 283 } else { 284 gms = kzalloc(sizeof(*gms), GFP_KERNEL); 285 if (!gms) 286 return ERR_PTR(-ENOMEM); 287 STAT(gms_alloc); 288 spin_lock_init(&gms->ms_asid_lock); 289 gms->ms_notifier.ops = &gru_mmuops; 290 atomic_set(&gms->ms_refcnt, 1); 291 init_waitqueue_head(&gms->ms_wait_queue); 292 err = __mmu_notifier_register(&gms->ms_notifier, current->mm); 293 if (err) 294 goto error; 295 } 296 if (gms) 297 gru_dbg(grudev, "gms %p, refcnt %d\n", gms, 298 atomic_read(&gms->ms_refcnt)); 299 return gms; 300 error: 301 kfree(gms); 302 return ERR_PTR(err); 303 } 304 305 void gru_drop_mmu_notifier(struct gru_mm_struct *gms) 306 { 307 gru_dbg(grudev, "gms %p, refcnt %d, released %d\n", gms, 308 atomic_read(&gms->ms_refcnt), gms->ms_released); 309 if (atomic_dec_return(&gms->ms_refcnt) == 0) { 310 if (!gms->ms_released) 311 mmu_notifier_unregister(&gms->ms_notifier, current->mm); 312 kfree(gms); 313 STAT(gms_free); 314 } 315 } 316 317 /* 318 * Setup TGH parameters. There are: 319 * - 24 TGH handles per GRU chiplet 320 * - a portion (MAX_LOCAL_TGH) of the handles are reserved for 321 * use by blade-local cpus 322 * - the rest are used by off-blade cpus. This usage is 323 * less frequent than blade-local usage. 324 * 325 * For now, use 16 handles for local flushes, 8 for remote flushes. If the blade 326 * has less tan or equal to 16 cpus, each cpu has a unique handle that it can 327 * use. 328 */ 329 #define MAX_LOCAL_TGH 16 330 331 void gru_tgh_flush_init(struct gru_state *gru) 332 { 333 int cpus, shift = 0, n; 334 335 cpus = uv_blade_nr_possible_cpus(gru->gs_blade_id); 336 337 /* n = cpus rounded up to next power of 2 */ 338 if (cpus) { 339 n = 1 << fls(cpus - 1); 340 341 /* 342 * shift count for converting local cpu# to TGH index 343 * 0 if cpus <= MAX_LOCAL_TGH, 344 * 1 if cpus <= 2*MAX_LOCAL_TGH, 345 * etc 346 */ 347 shift = max(0, fls(n - 1) - fls(MAX_LOCAL_TGH - 1)); 348 } 349 gru->gs_tgh_local_shift = shift; 350 351 /* first starting TGH index to use for remote purges */ 352 gru->gs_tgh_first_remote = (cpus + (1 << shift) - 1) >> shift; 353 354 } 355