1 /* 2 * Copyright (c) 2012 The Chromium OS Authors. All rights reserved. 3 * Use of this source code is governed by a BSD-style license that can be 4 * found in the LICENSE file. 5 * 6 * Alternatively, this software may be distributed under the terms of the 7 * GNU General Public License ("GPL") version 2 as published by the Free 8 * Software Foundation. 9 */ 10 11 #include <common.h> 12 #include <physmem.h> 13 #include <asm/cpu.h> 14 #include <linux/compiler.h> 15 16 DECLARE_GLOBAL_DATA_PTR; 17 18 /* Large pages are 2MB. */ 19 #define LARGE_PAGE_SIZE ((1 << 20) * 2) 20 21 /* 22 * Paging data structures. 23 */ 24 25 struct pdpe { 26 uint64_t p:1; 27 uint64_t mbz_0:2; 28 uint64_t pwt:1; 29 uint64_t pcd:1; 30 uint64_t mbz_1:4; 31 uint64_t avl:3; 32 uint64_t base:40; 33 uint64_t mbz_2:12; 34 }; 35 36 typedef struct pdpe pdpt_t[512]; 37 38 struct pde { 39 uint64_t p:1; /* present */ 40 uint64_t rw:1; /* read/write */ 41 uint64_t us:1; /* user/supervisor */ 42 uint64_t pwt:1; /* page-level writethrough */ 43 uint64_t pcd:1; /* page-level cache disable */ 44 uint64_t a:1; /* accessed */ 45 uint64_t d:1; /* dirty */ 46 uint64_t ps:1; /* page size */ 47 uint64_t g:1; /* global page */ 48 uint64_t avl:3; /* available to software */ 49 uint64_t pat:1; /* page-attribute table */ 50 uint64_t mbz_0:8; /* must be zero */ 51 uint64_t base:31; /* base address */ 52 }; 53 54 typedef struct pde pdt_t[512]; 55 56 static pdpt_t pdpt __aligned(4096); 57 static pdt_t pdts[4] __aligned(4096); 58 59 /* 60 * Map a virtual address to a physical address and optionally invalidate any 61 * old mapping. 62 * 63 * @param virt The virtual address to use. 64 * @param phys The physical address to use. 65 * @param invlpg Whether to use invlpg to clear any old mappings. 66 */ 67 static void x86_phys_map_page(uintptr_t virt, phys_addr_t phys, int invlpg) 68 { 69 /* Extract the two bit PDPT index and the 9 bit PDT index. */ 70 uintptr_t pdpt_idx = (virt >> 30) & 0x3; 71 uintptr_t pdt_idx = (virt >> 21) & 0x1ff; 72 73 /* Set up a handy pointer to the appropriate PDE. */ 74 struct pde *pde = &(pdts[pdpt_idx][pdt_idx]); 75 76 memset(pde, 0, sizeof(struct pde)); 77 pde->p = 1; 78 pde->rw = 1; 79 pde->us = 1; 80 pde->ps = 1; 81 pde->base = phys >> 21; 82 83 if (invlpg) { 84 /* Flush any stale mapping out of the TLBs. */ 85 __asm__ __volatile__( 86 "invlpg %0\n\t" 87 : 88 : "m" (*(uint8_t *)virt) 89 ); 90 } 91 } 92 93 /* Identity map the lower 4GB and turn on paging with PAE. */ 94 static void x86_phys_enter_paging(void) 95 { 96 phys_addr_t page_addr; 97 unsigned i; 98 99 /* Zero out the page tables. */ 100 memset(pdpt, 0, sizeof(pdpt)); 101 memset(pdts, 0, sizeof(pdts)); 102 103 /* Set up the PDPT. */ 104 for (i = 0; i < ARRAY_SIZE(pdts); i++) { 105 pdpt[i].p = 1; 106 pdpt[i].base = ((uintptr_t)&pdts[i]) >> 12; 107 } 108 109 /* Identity map everything up to 4GB. */ 110 for (page_addr = 0; page_addr < (1ULL << 32); 111 page_addr += LARGE_PAGE_SIZE) { 112 /* There's no reason to invalidate the TLB with paging off. */ 113 x86_phys_map_page(page_addr, page_addr, 0); 114 } 115 116 cpu_enable_paging_pae((ulong)pdpt); 117 } 118 119 /* Disable paging and PAE mode. */ 120 static void x86_phys_exit_paging(void) 121 { 122 cpu_disable_paging_pae(); 123 } 124 125 /* 126 * Set physical memory to a particular value when the whole region fits on one 127 * page. 128 * 129 * @param map_addr The address that starts the physical page. 130 * @param offset How far into that page to start setting a value. 131 * @param c The value to set memory to. 132 * @param size The size in bytes of the area to set. 133 */ 134 static void x86_phys_memset_page(phys_addr_t map_addr, uintptr_t offset, int c, 135 unsigned size) 136 { 137 /* 138 * U-Boot should be far away from the beginning of memory, so that's a 139 * good place to map our window on top of. 140 */ 141 const uintptr_t window = LARGE_PAGE_SIZE; 142 143 /* Make sure the window is below U-Boot. */ 144 assert(window + LARGE_PAGE_SIZE < 145 gd->relocaddr - CONFIG_SYS_MALLOC_LEN - CONFIG_SYS_STACK_SIZE); 146 /* Map the page into the window and then memset the appropriate part. */ 147 x86_phys_map_page(window, map_addr, 1); 148 memset((void *)(window + offset), c, size); 149 } 150 151 /* 152 * A physical memory anologue to memset with matching parameters and return 153 * value. 154 */ 155 phys_addr_t arch_phys_memset(phys_addr_t start, int c, phys_size_t size) 156 { 157 const phys_addr_t max_addr = (phys_addr_t)~(uintptr_t)0; 158 const phys_addr_t orig_start = start; 159 160 if (!size) 161 return orig_start; 162 163 /* Handle memory below 4GB. */ 164 if (start <= max_addr) { 165 phys_size_t low_size = min(max_addr + 1 - start, size); 166 void *start_ptr = (void *)(uintptr_t)start; 167 168 assert(((phys_addr_t)(uintptr_t)start) == start); 169 memset(start_ptr, c, low_size); 170 start += low_size; 171 size -= low_size; 172 } 173 174 /* Use paging and PAE to handle memory above 4GB up to 64GB. */ 175 if (size) { 176 phys_addr_t map_addr = start & ~(LARGE_PAGE_SIZE - 1); 177 phys_addr_t offset = start - map_addr; 178 179 x86_phys_enter_paging(); 180 181 /* Handle the first partial page. */ 182 if (offset) { 183 phys_addr_t end = 184 min(map_addr + LARGE_PAGE_SIZE, start + size); 185 phys_size_t cur_size = end - start; 186 x86_phys_memset_page(map_addr, offset, c, cur_size); 187 size -= cur_size; 188 map_addr += LARGE_PAGE_SIZE; 189 } 190 /* Handle the complete pages. */ 191 while (size > LARGE_PAGE_SIZE) { 192 x86_phys_memset_page(map_addr, 0, c, LARGE_PAGE_SIZE); 193 size -= LARGE_PAGE_SIZE; 194 map_addr += LARGE_PAGE_SIZE; 195 } 196 /* Handle the last partial page. */ 197 if (size) 198 x86_phys_memset_page(map_addr, 0, c, size); 199 200 x86_phys_exit_paging(); 201 } 202 return orig_start; 203 } 204