1 /* arch/sparc64/mm/tsb.c 2 * 3 * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net> 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/preempt.h> 8 #include <linux/slab.h> 9 #include <asm/page.h> 10 #include <asm/pgtable.h> 11 #include <asm/mmu_context.h> 12 #include <asm/tsb.h> 13 #include <asm/tlb.h> 14 #include <asm/oplib.h> 15 16 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; 17 18 static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries) 19 { 20 vaddr >>= hash_shift; 21 return vaddr & (nentries - 1); 22 } 23 24 static inline int tag_compare(unsigned long tag, unsigned long vaddr) 25 { 26 return (tag == (vaddr >> 22)); 27 } 28 29 /* TSB flushes need only occur on the processor initiating the address 30 * space modification, not on each cpu the address space has run on. 31 * Only the TLB flush needs that treatment. 32 */ 33 34 void flush_tsb_kernel_range(unsigned long start, unsigned long end) 35 { 36 unsigned long v; 37 38 for (v = start; v < end; v += PAGE_SIZE) { 39 unsigned long hash = tsb_hash(v, PAGE_SHIFT, 40 KERNEL_TSB_NENTRIES); 41 struct tsb *ent = &swapper_tsb[hash]; 42 43 if (tag_compare(ent->tag, v)) 44 ent->tag = (1UL << TSB_TAG_INVALID_BIT); 45 } 46 } 47 48 static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v, 49 unsigned long hash_shift, 50 unsigned long nentries) 51 { 52 unsigned long tag, ent, hash; 53 54 v &= ~0x1UL; 55 hash = tsb_hash(v, hash_shift, nentries); 56 ent = tsb + (hash * sizeof(struct tsb)); 57 tag = (v >> 22UL); 58 59 tsb_flush(ent, tag); 60 } 61 62 static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, 63 unsigned long tsb, unsigned long nentries) 64 { 65 unsigned long i; 66 67 for (i = 0; i < tb->tlb_nr; i++) 68 __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries); 69 } 70 71 void flush_tsb_user(struct tlb_batch *tb) 72 { 73 struct mm_struct *mm = tb->mm; 74 unsigned long nentries, base, flags; 75 76 spin_lock_irqsave(&mm->context.lock, flags); 77 78 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; 79 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; 80 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 81 base = __pa(base); 82 __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); 83 84 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 85 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { 86 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; 87 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; 88 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 89 base = __pa(base); 90 __flush_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries); 91 } 92 #endif 93 spin_unlock_irqrestore(&mm->context.lock, flags); 94 } 95 96 void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) 97 { 98 unsigned long nentries, base, flags; 99 100 spin_lock_irqsave(&mm->context.lock, flags); 101 102 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; 103 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; 104 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 105 base = __pa(base); 106 __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries); 107 108 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 109 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { 110 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; 111 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; 112 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 113 base = __pa(base); 114 __flush_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT, nentries); 115 } 116 #endif 117 spin_unlock_irqrestore(&mm->context.lock, flags); 118 } 119 120 #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K 121 #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K 122 123 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 124 #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB 125 #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB 126 #endif 127 128 static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes) 129 { 130 unsigned long tsb_reg, base, tsb_paddr; 131 unsigned long page_sz, tte; 132 133 mm->context.tsb_block[tsb_idx].tsb_nentries = 134 tsb_bytes / sizeof(struct tsb); 135 136 switch (tsb_idx) { 137 case MM_TSB_BASE: 138 base = TSBMAP_8K_BASE; 139 break; 140 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 141 case MM_TSB_HUGE: 142 base = TSBMAP_4M_BASE; 143 break; 144 #endif 145 default: 146 BUG(); 147 } 148 149 tte = pgprot_val(PAGE_KERNEL_LOCKED); 150 tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb); 151 BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); 152 153 /* Use the smallest page size that can map the whole TSB 154 * in one TLB entry. 155 */ 156 switch (tsb_bytes) { 157 case 8192 << 0: 158 tsb_reg = 0x0UL; 159 #ifdef DCACHE_ALIASING_POSSIBLE 160 base += (tsb_paddr & 8192); 161 #endif 162 page_sz = 8192; 163 break; 164 165 case 8192 << 1: 166 tsb_reg = 0x1UL; 167 page_sz = 64 * 1024; 168 break; 169 170 case 8192 << 2: 171 tsb_reg = 0x2UL; 172 page_sz = 64 * 1024; 173 break; 174 175 case 8192 << 3: 176 tsb_reg = 0x3UL; 177 page_sz = 64 * 1024; 178 break; 179 180 case 8192 << 4: 181 tsb_reg = 0x4UL; 182 page_sz = 512 * 1024; 183 break; 184 185 case 8192 << 5: 186 tsb_reg = 0x5UL; 187 page_sz = 512 * 1024; 188 break; 189 190 case 8192 << 6: 191 tsb_reg = 0x6UL; 192 page_sz = 512 * 1024; 193 break; 194 195 case 8192 << 7: 196 tsb_reg = 0x7UL; 197 page_sz = 4 * 1024 * 1024; 198 break; 199 200 default: 201 printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n", 202 current->comm, current->pid, tsb_bytes); 203 do_exit(SIGSEGV); 204 } 205 tte |= pte_sz_bits(page_sz); 206 207 if (tlb_type == cheetah_plus || tlb_type == hypervisor) { 208 /* Physical mapping, no locked TLB entry for TSB. */ 209 tsb_reg |= tsb_paddr; 210 211 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; 212 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0; 213 mm->context.tsb_block[tsb_idx].tsb_map_pte = 0; 214 } else { 215 tsb_reg |= base; 216 tsb_reg |= (tsb_paddr & (page_sz - 1UL)); 217 tte |= (tsb_paddr & ~(page_sz - 1UL)); 218 219 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; 220 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base; 221 mm->context.tsb_block[tsb_idx].tsb_map_pte = tte; 222 } 223 224 /* Setup the Hypervisor TSB descriptor. */ 225 if (tlb_type == hypervisor) { 226 struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx]; 227 228 switch (tsb_idx) { 229 case MM_TSB_BASE: 230 hp->pgsz_idx = HV_PGSZ_IDX_BASE; 231 break; 232 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 233 case MM_TSB_HUGE: 234 hp->pgsz_idx = HV_PGSZ_IDX_HUGE; 235 break; 236 #endif 237 default: 238 BUG(); 239 } 240 hp->assoc = 1; 241 hp->num_ttes = tsb_bytes / 16; 242 hp->ctx_idx = 0; 243 switch (tsb_idx) { 244 case MM_TSB_BASE: 245 hp->pgsz_mask = HV_PGSZ_MASK_BASE; 246 break; 247 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 248 case MM_TSB_HUGE: 249 hp->pgsz_mask = HV_PGSZ_MASK_HUGE; 250 break; 251 #endif 252 default: 253 BUG(); 254 } 255 hp->tsb_base = tsb_paddr; 256 hp->resv = 0; 257 } 258 } 259 260 struct kmem_cache *pgtable_cache __read_mostly; 261 262 static struct kmem_cache *tsb_caches[8] __read_mostly; 263 264 static const char *tsb_cache_names[8] = { 265 "tsb_8KB", 266 "tsb_16KB", 267 "tsb_32KB", 268 "tsb_64KB", 269 "tsb_128KB", 270 "tsb_256KB", 271 "tsb_512KB", 272 "tsb_1MB", 273 }; 274 275 void __init pgtable_cache_init(void) 276 { 277 unsigned long i; 278 279 pgtable_cache = kmem_cache_create("pgtable_cache", 280 PAGE_SIZE, PAGE_SIZE, 281 0, 282 _clear_page); 283 if (!pgtable_cache) { 284 prom_printf("pgtable_cache_init(): Could not create!\n"); 285 prom_halt(); 286 } 287 288 for (i = 0; i < ARRAY_SIZE(tsb_cache_names); i++) { 289 unsigned long size = 8192 << i; 290 const char *name = tsb_cache_names[i]; 291 292 tsb_caches[i] = kmem_cache_create(name, 293 size, size, 294 0, NULL); 295 if (!tsb_caches[i]) { 296 prom_printf("Could not create %s cache\n", name); 297 prom_halt(); 298 } 299 } 300 } 301 302 int sysctl_tsb_ratio = -2; 303 304 static unsigned long tsb_size_to_rss_limit(unsigned long new_size) 305 { 306 unsigned long num_ents = (new_size / sizeof(struct tsb)); 307 308 if (sysctl_tsb_ratio < 0) 309 return num_ents - (num_ents >> -sysctl_tsb_ratio); 310 else 311 return num_ents + (num_ents >> sysctl_tsb_ratio); 312 } 313 314 /* When the RSS of an address space exceeds tsb_rss_limit for a TSB, 315 * do_sparc64_fault() invokes this routine to try and grow it. 316 * 317 * When we reach the maximum TSB size supported, we stick ~0UL into 318 * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault() 319 * will not trigger any longer. 320 * 321 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers 322 * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB 323 * must be 512K aligned. It also must be physically contiguous, so we 324 * cannot use vmalloc(). 325 * 326 * The idea here is to grow the TSB when the RSS of the process approaches 327 * the number of entries that the current TSB can hold at once. Currently, 328 * we trigger when the RSS hits 3/4 of the TSB capacity. 329 */ 330 void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss) 331 { 332 unsigned long max_tsb_size = 1 * 1024 * 1024; 333 unsigned long new_size, old_size, flags; 334 struct tsb *old_tsb, *new_tsb; 335 unsigned long new_cache_index, old_cache_index; 336 unsigned long new_rss_limit; 337 gfp_t gfp_flags; 338 339 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER)) 340 max_tsb_size = (PAGE_SIZE << MAX_ORDER); 341 342 new_cache_index = 0; 343 for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) { 344 new_rss_limit = tsb_size_to_rss_limit(new_size); 345 if (new_rss_limit > rss) 346 break; 347 new_cache_index++; 348 } 349 350 if (new_size == max_tsb_size) 351 new_rss_limit = ~0UL; 352 353 retry_tsb_alloc: 354 gfp_flags = GFP_KERNEL; 355 if (new_size > (PAGE_SIZE * 2)) 356 gfp_flags |= __GFP_NOWARN | __GFP_NORETRY; 357 358 new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index], 359 gfp_flags, numa_node_id()); 360 if (unlikely(!new_tsb)) { 361 /* Not being able to fork due to a high-order TSB 362 * allocation failure is very bad behavior. Just back 363 * down to a 0-order allocation and force no TSB 364 * growing for this address space. 365 */ 366 if (mm->context.tsb_block[tsb_index].tsb == NULL && 367 new_cache_index > 0) { 368 new_cache_index = 0; 369 new_size = 8192; 370 new_rss_limit = ~0UL; 371 goto retry_tsb_alloc; 372 } 373 374 /* If we failed on a TSB grow, we are under serious 375 * memory pressure so don't try to grow any more. 376 */ 377 if (mm->context.tsb_block[tsb_index].tsb != NULL) 378 mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL; 379 return; 380 } 381 382 /* Mark all tags as invalid. */ 383 tsb_init(new_tsb, new_size); 384 385 /* Ok, we are about to commit the changes. If we are 386 * growing an existing TSB the locking is very tricky, 387 * so WATCH OUT! 388 * 389 * We have to hold mm->context.lock while committing to the 390 * new TSB, this synchronizes us with processors in 391 * flush_tsb_user() and switch_mm() for this address space. 392 * 393 * But even with that lock held, processors run asynchronously 394 * accessing the old TSB via TLB miss handling. This is OK 395 * because those actions are just propagating state from the 396 * Linux page tables into the TSB, page table mappings are not 397 * being changed. If a real fault occurs, the processor will 398 * synchronize with us when it hits flush_tsb_user(), this is 399 * also true for the case where vmscan is modifying the page 400 * tables. The only thing we need to be careful with is to 401 * skip any locked TSB entries during copy_tsb(). 402 * 403 * When we finish committing to the new TSB, we have to drop 404 * the lock and ask all other cpus running this address space 405 * to run tsb_context_switch() to see the new TSB table. 406 */ 407 spin_lock_irqsave(&mm->context.lock, flags); 408 409 old_tsb = mm->context.tsb_block[tsb_index].tsb; 410 old_cache_index = 411 (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL); 412 old_size = (mm->context.tsb_block[tsb_index].tsb_nentries * 413 sizeof(struct tsb)); 414 415 416 /* Handle multiple threads trying to grow the TSB at the same time. 417 * One will get in here first, and bump the size and the RSS limit. 418 * The others will get in here next and hit this check. 419 */ 420 if (unlikely(old_tsb && 421 (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) { 422 spin_unlock_irqrestore(&mm->context.lock, flags); 423 424 kmem_cache_free(tsb_caches[new_cache_index], new_tsb); 425 return; 426 } 427 428 mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit; 429 430 if (old_tsb) { 431 extern void copy_tsb(unsigned long old_tsb_base, 432 unsigned long old_tsb_size, 433 unsigned long new_tsb_base, 434 unsigned long new_tsb_size); 435 unsigned long old_tsb_base = (unsigned long) old_tsb; 436 unsigned long new_tsb_base = (unsigned long) new_tsb; 437 438 if (tlb_type == cheetah_plus || tlb_type == hypervisor) { 439 old_tsb_base = __pa(old_tsb_base); 440 new_tsb_base = __pa(new_tsb_base); 441 } 442 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); 443 } 444 445 mm->context.tsb_block[tsb_index].tsb = new_tsb; 446 setup_tsb_params(mm, tsb_index, new_size); 447 448 spin_unlock_irqrestore(&mm->context.lock, flags); 449 450 /* If old_tsb is NULL, we're being invoked for the first time 451 * from init_new_context(). 452 */ 453 if (old_tsb) { 454 /* Reload it on the local cpu. */ 455 tsb_context_switch(mm); 456 457 /* Now force other processors to do the same. */ 458 preempt_disable(); 459 smp_tsb_sync(mm); 460 preempt_enable(); 461 462 /* Now it is safe to free the old tsb. */ 463 kmem_cache_free(tsb_caches[old_cache_index], old_tsb); 464 } 465 } 466 467 int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 468 { 469 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 470 unsigned long huge_pte_count; 471 #endif 472 unsigned int i; 473 474 spin_lock_init(&mm->context.lock); 475 476 mm->context.sparc64_ctx_val = 0UL; 477 478 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 479 /* We reset it to zero because the fork() page copying 480 * will re-increment the counters as the parent PTEs are 481 * copied into the child address space. 482 */ 483 huge_pte_count = mm->context.huge_pte_count; 484 mm->context.huge_pte_count = 0; 485 #endif 486 487 /* copy_mm() copies over the parent's mm_struct before calling 488 * us, so we need to zero out the TSB pointer or else tsb_grow() 489 * will be confused and think there is an older TSB to free up. 490 */ 491 for (i = 0; i < MM_NUM_TSBS; i++) 492 mm->context.tsb_block[i].tsb = NULL; 493 494 /* If this is fork, inherit the parent's TSB size. We would 495 * grow it to that size on the first page fault anyways. 496 */ 497 tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); 498 499 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 500 if (unlikely(huge_pte_count)) 501 tsb_grow(mm, MM_TSB_HUGE, huge_pte_count); 502 #endif 503 504 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) 505 return -ENOMEM; 506 507 return 0; 508 } 509 510 static void tsb_destroy_one(struct tsb_config *tp) 511 { 512 unsigned long cache_index; 513 514 if (!tp->tsb) 515 return; 516 cache_index = tp->tsb_reg_val & 0x7UL; 517 kmem_cache_free(tsb_caches[cache_index], tp->tsb); 518 tp->tsb = NULL; 519 tp->tsb_reg_val = 0UL; 520 } 521 522 void destroy_context(struct mm_struct *mm) 523 { 524 unsigned long flags, i; 525 526 for (i = 0; i < MM_NUM_TSBS; i++) 527 tsb_destroy_one(&mm->context.tsb_block[i]); 528 529 spin_lock_irqsave(&ctx_alloc_lock, flags); 530 531 if (CTX_VALID(mm->context)) { 532 unsigned long nr = CTX_NRBITS(mm->context); 533 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); 534 } 535 536 spin_unlock_irqrestore(&ctx_alloc_lock, flags); 537 } 538