1 /* 2 * (C) Copyright 2013 3 * David Feng <fenghua@phytium.com.cn> 4 * 5 * (C) Copyright 2016 6 * Alexander Graf <agraf@suse.de> 7 * 8 * SPDX-License-Identifier: GPL-2.0+ 9 */ 10 11 #include <common.h> 12 #include <asm/system.h> 13 #include <asm/armv8/mmu.h> 14 15 DECLARE_GLOBAL_DATA_PTR; 16 17 #ifndef CONFIG_SYS_DCACHE_OFF 18 19 /* 20 * With 4k page granule, a virtual address is split into 4 lookup parts 21 * spanning 9 bits each: 22 * 23 * _______________________________________________ 24 * | | | | | | | 25 * | 0 | Lv0 | Lv1 | Lv2 | Lv3 | off | 26 * |_______|_______|_______|_______|_______|_______| 27 * 63-48 47-39 38-30 29-21 20-12 11-00 28 * 29 * mask page size 30 * 31 * Lv0: FF8000000000 -- 32 * Lv1: 7FC0000000 1G 33 * Lv2: 3FE00000 2M 34 * Lv3: 1FF000 4K 35 * off: FFF 36 */ 37 38 static u64 get_tcr(int el, u64 *pips, u64 *pva_bits) 39 { 40 u64 max_addr = 0; 41 u64 ips, va_bits; 42 u64 tcr; 43 int i; 44 45 /* Find the largest address we need to support */ 46 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) 47 max_addr = max(max_addr, mem_map[i].base + mem_map[i].size); 48 49 /* Calculate the maximum physical (and thus virtual) address */ 50 if (max_addr > (1ULL << 44)) { 51 ips = 5; 52 va_bits = 48; 53 } else if (max_addr > (1ULL << 42)) { 54 ips = 4; 55 va_bits = 44; 56 } else if (max_addr > (1ULL << 40)) { 57 ips = 3; 58 va_bits = 42; 59 } else if (max_addr > (1ULL << 36)) { 60 ips = 2; 61 va_bits = 40; 62 } else if (max_addr > (1ULL << 32)) { 63 ips = 1; 64 va_bits = 36; 65 } else { 66 ips = 0; 67 va_bits = 32; 68 } 69 70 if (el == 1) { 71 tcr = TCR_EL1_RSVD | (ips << 32) | TCR_EPD1_DISABLE; 72 } else if (el == 2) { 73 tcr = TCR_EL2_RSVD | (ips << 16); 74 } else { 75 tcr = TCR_EL3_RSVD | (ips << 16); 76 } 77 78 /* PTWs cacheable, inner/outer WBWA and inner shareable */ 79 tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA; 80 tcr |= TCR_T0SZ(va_bits); 81 82 if (pips) 83 *pips = ips; 84 if (pva_bits) 85 *pva_bits = va_bits; 86 87 return tcr; 88 } 89 90 #define MAX_PTE_ENTRIES 512 91 92 static int pte_type(u64 *pte) 93 { 94 return *pte & PTE_TYPE_MASK; 95 } 96 97 /* Returns the LSB number for a PTE on level <level> */ 98 static int level2shift(int level) 99 { 100 /* Page is 12 bits wide, every level translates 9 bits */ 101 return (12 + 9 * (3 - level)); 102 } 103 104 static u64 *find_pte(u64 addr, int level) 105 { 106 int start_level = 0; 107 u64 *pte; 108 u64 idx; 109 u64 va_bits; 110 int i; 111 112 debug("addr=%llx level=%d\n", addr, level); 113 114 get_tcr(0, NULL, &va_bits); 115 if (va_bits < 39) 116 start_level = 1; 117 118 if (level < start_level) 119 return NULL; 120 121 /* Walk through all page table levels to find our PTE */ 122 pte = (u64*)gd->arch.tlb_addr; 123 for (i = start_level; i < 4; i++) { 124 idx = (addr >> level2shift(i)) & 0x1FF; 125 pte += idx; 126 debug("idx=%llx PTE %p at level %d: %llx\n", idx, pte, i, *pte); 127 128 /* Found it */ 129 if (i == level) 130 return pte; 131 /* PTE is no table (either invalid or block), can't traverse */ 132 if (pte_type(pte) != PTE_TYPE_TABLE) 133 return NULL; 134 /* Off to the next level */ 135 pte = (u64*)(*pte & 0x0000fffffffff000ULL); 136 } 137 138 /* Should never reach here */ 139 return NULL; 140 } 141 142 /* Returns and creates a new full table (512 entries) */ 143 static u64 *create_table(void) 144 { 145 u64 *new_table = (u64*)gd->arch.tlb_fillptr; 146 u64 pt_len = MAX_PTE_ENTRIES * sizeof(u64); 147 148 /* Allocate MAX_PTE_ENTRIES pte entries */ 149 gd->arch.tlb_fillptr += pt_len; 150 151 if (gd->arch.tlb_fillptr - gd->arch.tlb_addr > gd->arch.tlb_size) 152 panic("Insufficient RAM for page table: 0x%lx > 0x%lx. " 153 "Please increase the size in get_page_table_size()", 154 gd->arch.tlb_fillptr - gd->arch.tlb_addr, 155 gd->arch.tlb_size); 156 157 /* Mark all entries as invalid */ 158 memset(new_table, 0, pt_len); 159 160 return new_table; 161 } 162 163 static void set_pte_table(u64 *pte, u64 *table) 164 { 165 /* Point *pte to the new table */ 166 debug("Setting %p to addr=%p\n", pte, table); 167 *pte = PTE_TYPE_TABLE | (ulong)table; 168 } 169 170 /* Add one mm_region map entry to the page tables */ 171 static void add_map(struct mm_region *map) 172 { 173 u64 *pte; 174 u64 addr = map->base; 175 u64 size = map->size; 176 u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF; 177 u64 blocksize; 178 int level; 179 u64 *new_table; 180 181 while (size) { 182 pte = find_pte(addr, 0); 183 if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) { 184 debug("Creating table for addr 0x%llx\n", addr); 185 new_table = create_table(); 186 set_pte_table(pte, new_table); 187 } 188 189 for (level = 1; level < 4; level++) { 190 pte = find_pte(addr, level); 191 blocksize = 1ULL << level2shift(level); 192 debug("Checking if pte fits for addr=%llx size=%llx " 193 "blocksize=%llx\n", addr, size, blocksize); 194 if (size >= blocksize && !(addr & (blocksize - 1))) { 195 /* Page fits, create block PTE */ 196 debug("Setting PTE %p to block addr=%llx\n", 197 pte, addr); 198 *pte = addr | attrs; 199 addr += blocksize; 200 size -= blocksize; 201 break; 202 } else if ((pte_type(pte) == PTE_TYPE_FAULT)) { 203 /* Page doesn't fit, create subpages */ 204 debug("Creating subtable for addr 0x%llx " 205 "blksize=%llx\n", addr, blocksize); 206 new_table = create_table(); 207 set_pte_table(pte, new_table); 208 } 209 } 210 } 211 } 212 213 /* Splits a block PTE into table with subpages spanning the old block */ 214 static void split_block(u64 *pte, int level) 215 { 216 u64 old_pte = *pte; 217 u64 *new_table; 218 u64 i = 0; 219 /* level describes the parent level, we need the child ones */ 220 int levelshift = level2shift(level + 1); 221 222 if (pte_type(pte) != PTE_TYPE_BLOCK) 223 panic("PTE %p (%llx) is not a block. Some driver code wants to " 224 "modify dcache settings for an range not covered in " 225 "mem_map.", pte, old_pte); 226 227 new_table = create_table(); 228 debug("Splitting pte %p (%llx) into %p\n", pte, old_pte, new_table); 229 230 for (i = 0; i < MAX_PTE_ENTRIES; i++) { 231 new_table[i] = old_pte | (i << levelshift); 232 233 /* Level 3 block PTEs have the table type */ 234 if ((level + 1) == 3) 235 new_table[i] |= PTE_TYPE_TABLE; 236 237 debug("Setting new_table[%lld] = %llx\n", i, new_table[i]); 238 } 239 240 /* Set the new table into effect */ 241 set_pte_table(pte, new_table); 242 } 243 244 enum pte_type { 245 PTE_INVAL, 246 PTE_BLOCK, 247 PTE_LEVEL, 248 }; 249 250 /* 251 * This is a recursively called function to count the number of 252 * page tables we need to cover a particular PTE range. If you 253 * call this with level = -1 you basically get the full 48 bit 254 * coverage. 255 */ 256 static int count_required_pts(u64 addr, int level, u64 maxaddr) 257 { 258 int levelshift = level2shift(level); 259 u64 levelsize = 1ULL << levelshift; 260 u64 levelmask = levelsize - 1; 261 u64 levelend = addr + levelsize; 262 int r = 0; 263 int i; 264 enum pte_type pte_type = PTE_INVAL; 265 266 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) { 267 struct mm_region *map = &mem_map[i]; 268 u64 start = map->base; 269 u64 end = start + map->size; 270 271 /* Check if the PTE would overlap with the map */ 272 if (max(addr, start) <= min(levelend, end)) { 273 start = max(addr, start); 274 end = min(levelend, end); 275 276 /* We need a sub-pt for this level */ 277 if ((start & levelmask) || (end & levelmask)) { 278 pte_type = PTE_LEVEL; 279 break; 280 } 281 282 /* Lv0 can not do block PTEs, so do levels here too */ 283 if (level <= 0) { 284 pte_type = PTE_LEVEL; 285 break; 286 } 287 288 /* PTE is active, but fits into a block */ 289 pte_type = PTE_BLOCK; 290 } 291 } 292 293 /* 294 * Block PTEs at this level are already covered by the parent page 295 * table, so we only need to count sub page tables. 296 */ 297 if (pte_type == PTE_LEVEL) { 298 int sublevel = level + 1; 299 u64 sublevelsize = 1ULL << level2shift(sublevel); 300 301 /* Account for the new sub page table ... */ 302 r = 1; 303 304 /* ... and for all child page tables that one might have */ 305 for (i = 0; i < MAX_PTE_ENTRIES; i++) { 306 r += count_required_pts(addr, sublevel, maxaddr); 307 addr += sublevelsize; 308 309 if (addr >= maxaddr) { 310 /* 311 * We reached the end of address space, no need 312 * to look any further. 313 */ 314 break; 315 } 316 } 317 } 318 319 return r; 320 } 321 322 /* Returns the estimated required size of all page tables */ 323 __weak u64 get_page_table_size(void) 324 { 325 u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64); 326 u64 size = 0; 327 u64 va_bits; 328 int start_level = 0; 329 330 get_tcr(0, NULL, &va_bits); 331 if (va_bits < 39) 332 start_level = 1; 333 334 /* Account for all page tables we would need to cover our memory map */ 335 size = one_pt * count_required_pts(0, start_level - 1, 1ULL << va_bits); 336 337 /* 338 * We need to duplicate our page table once to have an emergency pt to 339 * resort to when splitting page tables later on 340 */ 341 size *= 2; 342 343 /* 344 * We may need to split page tables later on if dcache settings change, 345 * so reserve up to 4 (random pick) page tables for that. 346 */ 347 size += one_pt * 4; 348 349 return size; 350 } 351 352 static void setup_pgtables(void) 353 { 354 int i; 355 356 /* 357 * Allocate the first level we're on with invalidate entries. 358 * If the starting level is 0 (va_bits >= 39), then this is our 359 * Lv0 page table, otherwise it's the entry Lv1 page table. 360 */ 361 create_table(); 362 363 /* Now add all MMU table entries one after another to the table */ 364 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) 365 add_map(&mem_map[i]); 366 367 /* Create the same thing once more for our emergency page table */ 368 create_table(); 369 } 370 371 static void setup_all_pgtables(void) 372 { 373 u64 tlb_addr = gd->arch.tlb_addr; 374 375 /* Reset the fill ptr */ 376 gd->arch.tlb_fillptr = tlb_addr; 377 378 /* Create normal system page tables */ 379 setup_pgtables(); 380 381 /* Create emergency page tables */ 382 gd->arch.tlb_addr = gd->arch.tlb_fillptr; 383 setup_pgtables(); 384 gd->arch.tlb_emerg = gd->arch.tlb_addr; 385 gd->arch.tlb_addr = tlb_addr; 386 } 387 388 /* to activate the MMU we need to set up virtual memory */ 389 __weak void mmu_setup(void) 390 { 391 int el; 392 393 /* Set up page tables only once */ 394 if (!gd->arch.tlb_fillptr) 395 setup_all_pgtables(); 396 397 el = current_el(); 398 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL), 399 MEMORY_ATTRIBUTES); 400 401 /* enable the mmu */ 402 set_sctlr(get_sctlr() | CR_M); 403 } 404 405 /* 406 * Performs a invalidation of the entire data cache at all levels 407 */ 408 void invalidate_dcache_all(void) 409 { 410 __asm_invalidate_dcache_all(); 411 } 412 413 /* 414 * Performs a clean & invalidation of the entire data cache at all levels. 415 * This function needs to be inline to avoid using stack. 416 * __asm_flush_l3_cache return status of timeout 417 */ 418 inline void flush_dcache_all(void) 419 { 420 int ret; 421 422 __asm_flush_dcache_all(); 423 ret = __asm_flush_l3_cache(); 424 if (ret) 425 debug("flushing dcache returns 0x%x\n", ret); 426 else 427 debug("flushing dcache successfully.\n"); 428 } 429 430 /* 431 * Invalidates range in all levels of D-cache/unified cache 432 */ 433 void invalidate_dcache_range(unsigned long start, unsigned long stop) 434 { 435 __asm_flush_dcache_range(start, stop); 436 } 437 438 /* 439 * Flush range(clean & invalidate) from all levels of D-cache/unified cache 440 */ 441 void flush_dcache_range(unsigned long start, unsigned long stop) 442 { 443 __asm_flush_dcache_range(start, stop); 444 } 445 446 void dcache_enable(void) 447 { 448 /* The data cache is not active unless the mmu is enabled */ 449 if (!(get_sctlr() & CR_M)) { 450 invalidate_dcache_all(); 451 __asm_invalidate_tlb_all(); 452 mmu_setup(); 453 } 454 455 set_sctlr(get_sctlr() | CR_C); 456 } 457 458 void dcache_disable(void) 459 { 460 uint32_t sctlr; 461 462 sctlr = get_sctlr(); 463 464 /* if cache isn't enabled no need to disable */ 465 if (!(sctlr & CR_C)) 466 return; 467 468 set_sctlr(sctlr & ~(CR_C|CR_M)); 469 470 flush_dcache_all(); 471 __asm_invalidate_tlb_all(); 472 } 473 474 int dcache_status(void) 475 { 476 return (get_sctlr() & CR_C) != 0; 477 } 478 479 u64 *__weak arch_get_page_table(void) { 480 puts("No page table offset defined\n"); 481 482 return NULL; 483 } 484 485 static bool is_aligned(u64 addr, u64 size, u64 align) 486 { 487 return !(addr & (align - 1)) && !(size & (align - 1)); 488 } 489 490 static u64 set_one_region(u64 start, u64 size, u64 attrs, int level) 491 { 492 int levelshift = level2shift(level); 493 u64 levelsize = 1ULL << levelshift; 494 u64 *pte = find_pte(start, level); 495 496 /* Can we can just modify the current level block PTE? */ 497 if (is_aligned(start, size, levelsize)) { 498 *pte &= ~PMD_ATTRINDX_MASK; 499 *pte |= attrs; 500 debug("Set attrs=%llx pte=%p level=%d\n", attrs, pte, level); 501 502 return levelsize; 503 } 504 505 /* Unaligned or doesn't fit, maybe split block into table */ 506 debug("addr=%llx level=%d pte=%p (%llx)\n", start, level, pte, *pte); 507 508 /* Maybe we need to split the block into a table */ 509 if (pte_type(pte) == PTE_TYPE_BLOCK) 510 split_block(pte, level); 511 512 /* And then double-check it became a table or already is one */ 513 if (pte_type(pte) != PTE_TYPE_TABLE) 514 panic("PTE %p (%llx) for addr=%llx should be a table", 515 pte, *pte, start); 516 517 /* Roll on to the next page table level */ 518 return 0; 519 } 520 521 void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, 522 enum dcache_option option) 523 { 524 u64 attrs = PMD_ATTRINDX(option); 525 u64 real_start = start; 526 u64 real_size = size; 527 528 debug("start=%lx size=%lx\n", (ulong)start, (ulong)size); 529 530 /* 531 * We can not modify page tables that we're currently running on, 532 * so we first need to switch to the "emergency" page tables where 533 * we can safely modify our primary page tables and then switch back 534 */ 535 __asm_switch_ttbr(gd->arch.tlb_emerg); 536 537 /* 538 * Loop through the address range until we find a page granule that fits 539 * our alignment constraints, then set it to the new cache attributes 540 */ 541 while (size > 0) { 542 int level; 543 u64 r; 544 545 for (level = 1; level < 4; level++) { 546 r = set_one_region(start, size, attrs, level); 547 if (r) { 548 /* PTE successfully replaced */ 549 size -= r; 550 start += r; 551 break; 552 } 553 } 554 555 } 556 557 /* We're done modifying page tables, switch back to our primary ones */ 558 __asm_switch_ttbr(gd->arch.tlb_addr); 559 560 /* 561 * Make sure there's nothing stale in dcache for a region that might 562 * have caches off now 563 */ 564 flush_dcache_range(real_start, real_start + real_size); 565 } 566 567 #else /* CONFIG_SYS_DCACHE_OFF */ 568 569 /* 570 * For SPL builds, we may want to not have dcache enabled. Any real U-Boot 571 * running however really wants to have dcache and the MMU active. Check that 572 * everything is sane and give the developer a hint if it isn't. 573 */ 574 #ifndef CONFIG_SPL_BUILD 575 #error Please describe your MMU layout in CONFIG_SYS_MEM_MAP and enable dcache. 576 #endif 577 578 void invalidate_dcache_all(void) 579 { 580 } 581 582 void flush_dcache_all(void) 583 { 584 } 585 586 void dcache_enable(void) 587 { 588 } 589 590 void dcache_disable(void) 591 { 592 } 593 594 int dcache_status(void) 595 { 596 return 0; 597 } 598 599 void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, 600 enum dcache_option option) 601 { 602 } 603 604 #endif /* CONFIG_SYS_DCACHE_OFF */ 605 606 #ifndef CONFIG_SYS_ICACHE_OFF 607 608 void icache_enable(void) 609 { 610 __asm_invalidate_icache_all(); 611 set_sctlr(get_sctlr() | CR_I); 612 } 613 614 void icache_disable(void) 615 { 616 set_sctlr(get_sctlr() & ~CR_I); 617 } 618 619 int icache_status(void) 620 { 621 return (get_sctlr() & CR_I) != 0; 622 } 623 624 void invalidate_icache_all(void) 625 { 626 __asm_invalidate_icache_all(); 627 } 628 629 #else /* CONFIG_SYS_ICACHE_OFF */ 630 631 void icache_enable(void) 632 { 633 } 634 635 void icache_disable(void) 636 { 637 } 638 639 int icache_status(void) 640 { 641 return 0; 642 } 643 644 void invalidate_icache_all(void) 645 { 646 } 647 648 #endif /* CONFIG_SYS_ICACHE_OFF */ 649 650 /* 651 * Enable dCache & iCache, whether cache is actually enabled 652 * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF 653 */ 654 void __weak enable_caches(void) 655 { 656 icache_enable(); 657 dcache_enable(); 658 } 659