1 /* arch/sparc64/mm/tsb.c 2 * 3 * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net> 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/preempt.h> 8 #include <linux/slab.h> 9 #include <asm/system.h> 10 #include <asm/page.h> 11 #include <asm/tlbflush.h> 12 #include <asm/tlb.h> 13 #include <asm/mmu_context.h> 14 #include <asm/pgtable.h> 15 #include <asm/tsb.h> 16 #include <asm/oplib.h> 17 18 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; 19 20 static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries) 21 { 22 vaddr >>= hash_shift; 23 return vaddr & (nentries - 1); 24 } 25 26 static inline int tag_compare(unsigned long tag, unsigned long vaddr) 27 { 28 return (tag == (vaddr >> 22)); 29 } 30 31 /* TSB flushes need only occur on the processor initiating the address 32 * space modification, not on each cpu the address space has run on. 33 * Only the TLB flush needs that treatment. 34 */ 35 36 void flush_tsb_kernel_range(unsigned long start, unsigned long end) 37 { 38 unsigned long v; 39 40 for (v = start; v < end; v += PAGE_SIZE) { 41 unsigned long hash = tsb_hash(v, PAGE_SHIFT, 42 KERNEL_TSB_NENTRIES); 43 struct tsb *ent = &swapper_tsb[hash]; 44 45 if (tag_compare(ent->tag, v)) 46 ent->tag = (1UL << TSB_TAG_INVALID_BIT); 47 } 48 } 49 50 static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, 51 unsigned long tsb, unsigned long nentries) 52 { 53 unsigned long i; 54 55 for (i = 0; i < tb->tlb_nr; i++) { 56 unsigned long v = tb->vaddrs[i]; 57 unsigned long tag, ent, hash; 58 59 v &= ~0x1UL; 60 61 hash = tsb_hash(v, hash_shift, nentries); 62 ent = tsb + (hash * sizeof(struct tsb)); 63 tag = (v >> 22UL); 64 65 tsb_flush(ent, tag); 66 } 67 } 68 69 void flush_tsb_user(struct tlb_batch *tb) 70 { 71 struct mm_struct *mm = tb->mm; 72 unsigned long nentries, base, flags; 73 74 spin_lock_irqsave(&mm->context.lock, flags); 75 76 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; 77 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; 78 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 79 base = __pa(base); 80 __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); 81 82 #ifdef CONFIG_HUGETLB_PAGE 83 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { 84 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; 85 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; 86 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 87 base = __pa(base); 88 __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries); 89 } 90 #endif 91 spin_unlock_irqrestore(&mm->context.lock, flags); 92 } 93 94 #if defined(CONFIG_SPARC64_PAGE_SIZE_8KB) 95 #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K 96 #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K 97 #elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB) 98 #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_64K 99 #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_64K 100 #else 101 #error Broken base page size setting... 102 #endif 103 104 #ifdef CONFIG_HUGETLB_PAGE 105 #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) 106 #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_64K 107 #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_64K 108 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) 109 #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_512K 110 #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_512K 111 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) 112 #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB 113 #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB 114 #else 115 #error Broken huge page size setting... 116 #endif 117 #endif 118 119 static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes) 120 { 121 unsigned long tsb_reg, base, tsb_paddr; 122 unsigned long page_sz, tte; 123 124 mm->context.tsb_block[tsb_idx].tsb_nentries = 125 tsb_bytes / sizeof(struct tsb); 126 127 base = TSBMAP_BASE; 128 tte = pgprot_val(PAGE_KERNEL_LOCKED); 129 tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb); 130 BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); 131 132 /* Use the smallest page size that can map the whole TSB 133 * in one TLB entry. 134 */ 135 switch (tsb_bytes) { 136 case 8192 << 0: 137 tsb_reg = 0x0UL; 138 #ifdef DCACHE_ALIASING_POSSIBLE 139 base += (tsb_paddr & 8192); 140 #endif 141 page_sz = 8192; 142 break; 143 144 case 8192 << 1: 145 tsb_reg = 0x1UL; 146 page_sz = 64 * 1024; 147 break; 148 149 case 8192 << 2: 150 tsb_reg = 0x2UL; 151 page_sz = 64 * 1024; 152 break; 153 154 case 8192 << 3: 155 tsb_reg = 0x3UL; 156 page_sz = 64 * 1024; 157 break; 158 159 case 8192 << 4: 160 tsb_reg = 0x4UL; 161 page_sz = 512 * 1024; 162 break; 163 164 case 8192 << 5: 165 tsb_reg = 0x5UL; 166 page_sz = 512 * 1024; 167 break; 168 169 case 8192 << 6: 170 tsb_reg = 0x6UL; 171 page_sz = 512 * 1024; 172 break; 173 174 case 8192 << 7: 175 tsb_reg = 0x7UL; 176 page_sz = 4 * 1024 * 1024; 177 break; 178 179 default: 180 printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n", 181 current->comm, current->pid, tsb_bytes); 182 do_exit(SIGSEGV); 183 } 184 tte |= pte_sz_bits(page_sz); 185 186 if (tlb_type == cheetah_plus || tlb_type == hypervisor) { 187 /* Physical mapping, no locked TLB entry for TSB. */ 188 tsb_reg |= tsb_paddr; 189 190 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; 191 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0; 192 mm->context.tsb_block[tsb_idx].tsb_map_pte = 0; 193 } else { 194 tsb_reg |= base; 195 tsb_reg |= (tsb_paddr & (page_sz - 1UL)); 196 tte |= (tsb_paddr & ~(page_sz - 1UL)); 197 198 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; 199 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base; 200 mm->context.tsb_block[tsb_idx].tsb_map_pte = tte; 201 } 202 203 /* Setup the Hypervisor TSB descriptor. */ 204 if (tlb_type == hypervisor) { 205 struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx]; 206 207 switch (tsb_idx) { 208 case MM_TSB_BASE: 209 hp->pgsz_idx = HV_PGSZ_IDX_BASE; 210 break; 211 #ifdef CONFIG_HUGETLB_PAGE 212 case MM_TSB_HUGE: 213 hp->pgsz_idx = HV_PGSZ_IDX_HUGE; 214 break; 215 #endif 216 default: 217 BUG(); 218 } 219 hp->assoc = 1; 220 hp->num_ttes = tsb_bytes / 16; 221 hp->ctx_idx = 0; 222 switch (tsb_idx) { 223 case MM_TSB_BASE: 224 hp->pgsz_mask = HV_PGSZ_MASK_BASE; 225 break; 226 #ifdef CONFIG_HUGETLB_PAGE 227 case MM_TSB_HUGE: 228 hp->pgsz_mask = HV_PGSZ_MASK_HUGE; 229 break; 230 #endif 231 default: 232 BUG(); 233 } 234 hp->tsb_base = tsb_paddr; 235 hp->resv = 0; 236 } 237 } 238 239 struct kmem_cache *pgtable_cache __read_mostly; 240 241 static struct kmem_cache *tsb_caches[8] __read_mostly; 242 243 static const char *tsb_cache_names[8] = { 244 "tsb_8KB", 245 "tsb_16KB", 246 "tsb_32KB", 247 "tsb_64KB", 248 "tsb_128KB", 249 "tsb_256KB", 250 "tsb_512KB", 251 "tsb_1MB", 252 }; 253 254 void __init pgtable_cache_init(void) 255 { 256 unsigned long i; 257 258 pgtable_cache = kmem_cache_create("pgtable_cache", 259 PAGE_SIZE, PAGE_SIZE, 260 0, 261 _clear_page); 262 if (!pgtable_cache) { 263 prom_printf("pgtable_cache_init(): Could not create!\n"); 264 prom_halt(); 265 } 266 267 for (i = 0; i < 8; i++) { 268 unsigned long size = 8192 << i; 269 const char *name = tsb_cache_names[i]; 270 271 tsb_caches[i] = kmem_cache_create(name, 272 size, size, 273 0, NULL); 274 if (!tsb_caches[i]) { 275 prom_printf("Could not create %s cache\n", name); 276 prom_halt(); 277 } 278 } 279 } 280 281 int sysctl_tsb_ratio = -2; 282 283 static unsigned long tsb_size_to_rss_limit(unsigned long new_size) 284 { 285 unsigned long num_ents = (new_size / sizeof(struct tsb)); 286 287 if (sysctl_tsb_ratio < 0) 288 return num_ents - (num_ents >> -sysctl_tsb_ratio); 289 else 290 return num_ents + (num_ents >> sysctl_tsb_ratio); 291 } 292 293 /* When the RSS of an address space exceeds tsb_rss_limit for a TSB, 294 * do_sparc64_fault() invokes this routine to try and grow it. 295 * 296 * When we reach the maximum TSB size supported, we stick ~0UL into 297 * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault() 298 * will not trigger any longer. 299 * 300 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers 301 * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB 302 * must be 512K aligned. It also must be physically contiguous, so we 303 * cannot use vmalloc(). 304 * 305 * The idea here is to grow the TSB when the RSS of the process approaches 306 * the number of entries that the current TSB can hold at once. Currently, 307 * we trigger when the RSS hits 3/4 of the TSB capacity. 308 */ 309 void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss) 310 { 311 unsigned long max_tsb_size = 1 * 1024 * 1024; 312 unsigned long new_size, old_size, flags; 313 struct tsb *old_tsb, *new_tsb; 314 unsigned long new_cache_index, old_cache_index; 315 unsigned long new_rss_limit; 316 gfp_t gfp_flags; 317 318 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER)) 319 max_tsb_size = (PAGE_SIZE << MAX_ORDER); 320 321 new_cache_index = 0; 322 for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) { 323 new_rss_limit = tsb_size_to_rss_limit(new_size); 324 if (new_rss_limit > rss) 325 break; 326 new_cache_index++; 327 } 328 329 if (new_size == max_tsb_size) 330 new_rss_limit = ~0UL; 331 332 retry_tsb_alloc: 333 gfp_flags = GFP_KERNEL; 334 if (new_size > (PAGE_SIZE * 2)) 335 gfp_flags = __GFP_NOWARN | __GFP_NORETRY; 336 337 new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index], 338 gfp_flags, numa_node_id()); 339 if (unlikely(!new_tsb)) { 340 /* Not being able to fork due to a high-order TSB 341 * allocation failure is very bad behavior. Just back 342 * down to a 0-order allocation and force no TSB 343 * growing for this address space. 344 */ 345 if (mm->context.tsb_block[tsb_index].tsb == NULL && 346 new_cache_index > 0) { 347 new_cache_index = 0; 348 new_size = 8192; 349 new_rss_limit = ~0UL; 350 goto retry_tsb_alloc; 351 } 352 353 /* If we failed on a TSB grow, we are under serious 354 * memory pressure so don't try to grow any more. 355 */ 356 if (mm->context.tsb_block[tsb_index].tsb != NULL) 357 mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL; 358 return; 359 } 360 361 /* Mark all tags as invalid. */ 362 tsb_init(new_tsb, new_size); 363 364 /* Ok, we are about to commit the changes. If we are 365 * growing an existing TSB the locking is very tricky, 366 * so WATCH OUT! 367 * 368 * We have to hold mm->context.lock while committing to the 369 * new TSB, this synchronizes us with processors in 370 * flush_tsb_user() and switch_mm() for this address space. 371 * 372 * But even with that lock held, processors run asynchronously 373 * accessing the old TSB via TLB miss handling. This is OK 374 * because those actions are just propagating state from the 375 * Linux page tables into the TSB, page table mappings are not 376 * being changed. If a real fault occurs, the processor will 377 * synchronize with us when it hits flush_tsb_user(), this is 378 * also true for the case where vmscan is modifying the page 379 * tables. The only thing we need to be careful with is to 380 * skip any locked TSB entries during copy_tsb(). 381 * 382 * When we finish committing to the new TSB, we have to drop 383 * the lock and ask all other cpus running this address space 384 * to run tsb_context_switch() to see the new TSB table. 385 */ 386 spin_lock_irqsave(&mm->context.lock, flags); 387 388 old_tsb = mm->context.tsb_block[tsb_index].tsb; 389 old_cache_index = 390 (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL); 391 old_size = (mm->context.tsb_block[tsb_index].tsb_nentries * 392 sizeof(struct tsb)); 393 394 395 /* Handle multiple threads trying to grow the TSB at the same time. 396 * One will get in here first, and bump the size and the RSS limit. 397 * The others will get in here next and hit this check. 398 */ 399 if (unlikely(old_tsb && 400 (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) { 401 spin_unlock_irqrestore(&mm->context.lock, flags); 402 403 kmem_cache_free(tsb_caches[new_cache_index], new_tsb); 404 return; 405 } 406 407 mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit; 408 409 if (old_tsb) { 410 extern void copy_tsb(unsigned long old_tsb_base, 411 unsigned long old_tsb_size, 412 unsigned long new_tsb_base, 413 unsigned long new_tsb_size); 414 unsigned long old_tsb_base = (unsigned long) old_tsb; 415 unsigned long new_tsb_base = (unsigned long) new_tsb; 416 417 if (tlb_type == cheetah_plus || tlb_type == hypervisor) { 418 old_tsb_base = __pa(old_tsb_base); 419 new_tsb_base = __pa(new_tsb_base); 420 } 421 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); 422 } 423 424 mm->context.tsb_block[tsb_index].tsb = new_tsb; 425 setup_tsb_params(mm, tsb_index, new_size); 426 427 spin_unlock_irqrestore(&mm->context.lock, flags); 428 429 /* If old_tsb is NULL, we're being invoked for the first time 430 * from init_new_context(). 431 */ 432 if (old_tsb) { 433 /* Reload it on the local cpu. */ 434 tsb_context_switch(mm); 435 436 /* Now force other processors to do the same. */ 437 preempt_disable(); 438 smp_tsb_sync(mm); 439 preempt_enable(); 440 441 /* Now it is safe to free the old tsb. */ 442 kmem_cache_free(tsb_caches[old_cache_index], old_tsb); 443 } 444 } 445 446 int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 447 { 448 #ifdef CONFIG_HUGETLB_PAGE 449 unsigned long huge_pte_count; 450 #endif 451 unsigned int i; 452 453 spin_lock_init(&mm->context.lock); 454 455 mm->context.sparc64_ctx_val = 0UL; 456 457 #ifdef CONFIG_HUGETLB_PAGE 458 /* We reset it to zero because the fork() page copying 459 * will re-increment the counters as the parent PTEs are 460 * copied into the child address space. 461 */ 462 huge_pte_count = mm->context.huge_pte_count; 463 mm->context.huge_pte_count = 0; 464 #endif 465 466 /* copy_mm() copies over the parent's mm_struct before calling 467 * us, so we need to zero out the TSB pointer or else tsb_grow() 468 * will be confused and think there is an older TSB to free up. 469 */ 470 for (i = 0; i < MM_NUM_TSBS; i++) 471 mm->context.tsb_block[i].tsb = NULL; 472 473 /* If this is fork, inherit the parent's TSB size. We would 474 * grow it to that size on the first page fault anyways. 475 */ 476 tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); 477 478 #ifdef CONFIG_HUGETLB_PAGE 479 if (unlikely(huge_pte_count)) 480 tsb_grow(mm, MM_TSB_HUGE, huge_pte_count); 481 #endif 482 483 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) 484 return -ENOMEM; 485 486 return 0; 487 } 488 489 static void tsb_destroy_one(struct tsb_config *tp) 490 { 491 unsigned long cache_index; 492 493 if (!tp->tsb) 494 return; 495 cache_index = tp->tsb_reg_val & 0x7UL; 496 kmem_cache_free(tsb_caches[cache_index], tp->tsb); 497 tp->tsb = NULL; 498 tp->tsb_reg_val = 0UL; 499 } 500 501 void destroy_context(struct mm_struct *mm) 502 { 503 unsigned long flags, i; 504 505 for (i = 0; i < MM_NUM_TSBS; i++) 506 tsb_destroy_one(&mm->context.tsb_block[i]); 507 508 spin_lock_irqsave(&ctx_alloc_lock, flags); 509 510 if (CTX_VALID(mm->context)) { 511 unsigned long nr = CTX_NRBITS(mm->context); 512 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); 513 } 514 515 spin_unlock_irqrestore(&ctx_alloc_lock, flags); 516 } 517