1 /* 2 * Copyright (c) 2012 The Chromium OS Authors. All rights reserved. 3 * Use of this source code is governed by a BSD-style license that can be 4 * found in the LICENSE file. 5 * 6 * Alternatively, this software may be distributed under the terms of the 7 * GNU General Public License ("GPL") version 2 as published by the Free 8 * Software Foundation. 9 */ 10 11 #include <common.h> 12 #include <physmem.h> 13 #include <linux/compiler.h> 14 15 /* Large pages are 2MB. */ 16 #define LARGE_PAGE_SIZE ((1 << 20) * 2) 17 18 /* 19 * Paging data structures. 20 */ 21 22 struct pdpe { 23 uint64_t p:1; 24 uint64_t mbz_0:2; 25 uint64_t pwt:1; 26 uint64_t pcd:1; 27 uint64_t mbz_1:4; 28 uint64_t avl:3; 29 uint64_t base:40; 30 uint64_t mbz_2:12; 31 }; 32 33 typedef struct pdpe pdpt_t[512]; 34 35 struct pde { 36 uint64_t p:1; /* present */ 37 uint64_t rw:1; /* read/write */ 38 uint64_t us:1; /* user/supervisor */ 39 uint64_t pwt:1; /* page-level writethrough */ 40 uint64_t pcd:1; /* page-level cache disable */ 41 uint64_t a:1; /* accessed */ 42 uint64_t d:1; /* dirty */ 43 uint64_t ps:1; /* page size */ 44 uint64_t g:1; /* global page */ 45 uint64_t avl:3; /* available to software */ 46 uint64_t pat:1; /* page-attribute table */ 47 uint64_t mbz_0:8; /* must be zero */ 48 uint64_t base:31; /* base address */ 49 }; 50 51 typedef struct pde pdt_t[512]; 52 53 static pdpt_t pdpt __aligned(4096); 54 static pdt_t pdts[4] __aligned(4096); 55 56 /* 57 * Map a virtual address to a physical address and optionally invalidate any 58 * old mapping. 59 * 60 * @param virt The virtual address to use. 61 * @param phys The physical address to use. 62 * @param invlpg Whether to use invlpg to clear any old mappings. 63 */ 64 static void x86_phys_map_page(uintptr_t virt, phys_addr_t phys, int invlpg) 65 { 66 /* Extract the two bit PDPT index and the 9 bit PDT index. */ 67 uintptr_t pdpt_idx = (virt >> 30) & 0x3; 68 uintptr_t pdt_idx = (virt >> 21) & 0x1ff; 69 70 /* Set up a handy pointer to the appropriate PDE. */ 71 struct pde *pde = &(pdts[pdpt_idx][pdt_idx]); 72 73 memset(pde, 0, sizeof(struct pde)); 74 pde->p = 1; 75 pde->rw = 1; 76 pde->us = 1; 77 pde->ps = 1; 78 pde->base = phys >> 21; 79 80 if (invlpg) { 81 /* Flush any stale mapping out of the TLBs. */ 82 __asm__ __volatile__( 83 "invlpg %0\n\t" 84 : 85 : "m" (*(uint8_t *)virt) 86 ); 87 } 88 } 89 90 /* Identity map the lower 4GB and turn on paging with PAE. */ 91 static void x86_phys_enter_paging(void) 92 { 93 phys_addr_t page_addr; 94 unsigned i; 95 96 /* Zero out the page tables. */ 97 memset(pdpt, 0, sizeof(pdpt)); 98 memset(pdts, 0, sizeof(pdts)); 99 100 /* Set up the PDPT. */ 101 for (i = 0; i < ARRAY_SIZE(pdts); i++) { 102 pdpt[i].p = 1; 103 pdpt[i].base = ((uintptr_t)&pdts[i]) >> 12; 104 } 105 106 /* Identity map everything up to 4GB. */ 107 for (page_addr = 0; page_addr < (1ULL << 32); 108 page_addr += LARGE_PAGE_SIZE) { 109 /* There's no reason to invalidate the TLB with paging off. */ 110 x86_phys_map_page(page_addr, page_addr, 0); 111 } 112 113 /* Turn on paging */ 114 __asm__ __volatile__( 115 /* Load the page table address */ 116 "movl %0, %%cr3\n\t" 117 /* Enable pae */ 118 "movl %%cr4, %%eax\n\t" 119 "orl $0x00000020, %%eax\n\t" 120 "movl %%eax, %%cr4\n\t" 121 /* Enable paging */ 122 "movl %%cr0, %%eax\n\t" 123 "orl $0x80000000, %%eax\n\t" 124 "movl %%eax, %%cr0\n\t" 125 : 126 : "r" (pdpt) 127 : "eax" 128 ); 129 } 130 131 /* Disable paging and PAE mode. */ 132 static void x86_phys_exit_paging(void) 133 { 134 /* Turn off paging */ 135 __asm__ __volatile__ ( 136 /* Disable paging */ 137 "movl %%cr0, %%eax\n\t" 138 "andl $0x7fffffff, %%eax\n\t" 139 "movl %%eax, %%cr0\n\t" 140 /* Disable pae */ 141 "movl %%cr4, %%eax\n\t" 142 "andl $0xffffffdf, %%eax\n\t" 143 "movl %%eax, %%cr4\n\t" 144 : 145 : 146 : "eax" 147 ); 148 } 149 150 /* 151 * Set physical memory to a particular value when the whole region fits on one 152 * page. 153 * 154 * @param map_addr The address that starts the physical page. 155 * @param offset How far into that page to start setting a value. 156 * @param c The value to set memory to. 157 * @param size The size in bytes of the area to set. 158 */ 159 static void x86_phys_memset_page(phys_addr_t map_addr, uintptr_t offset, int c, 160 unsigned size) 161 { 162 /* 163 * U-Boot should be far away from the beginning of memory, so that's a 164 * good place to map our window on top of. 165 */ 166 const uintptr_t window = LARGE_PAGE_SIZE; 167 168 /* Make sure the window is below U-Boot. */ 169 assert(window + LARGE_PAGE_SIZE < 170 gd->relocaddr - CONFIG_SYS_MALLOC_LEN - CONFIG_SYS_STACK_SIZE); 171 /* Map the page into the window and then memset the appropriate part. */ 172 x86_phys_map_page(window, map_addr, 1); 173 memset((void *)(window + offset), c, size); 174 } 175 176 /* 177 * A physical memory anologue to memset with matching parameters and return 178 * value. 179 */ 180 phys_addr_t arch_phys_memset(phys_addr_t start, int c, phys_size_t size) 181 { 182 const phys_addr_t max_addr = (phys_addr_t)~(uintptr_t)0; 183 const phys_addr_t orig_start = start; 184 185 if (!size) 186 return orig_start; 187 188 /* Handle memory below 4GB. */ 189 if (start <= max_addr) { 190 phys_size_t low_size = MIN(max_addr + 1 - start, size); 191 void *start_ptr = (void *)(uintptr_t)start; 192 193 assert(((phys_addr_t)(uintptr_t)start) == start); 194 memset(start_ptr, c, low_size); 195 start += low_size; 196 size -= low_size; 197 } 198 199 /* Use paging and PAE to handle memory above 4GB up to 64GB. */ 200 if (size) { 201 phys_addr_t map_addr = start & ~(LARGE_PAGE_SIZE - 1); 202 phys_addr_t offset = start - map_addr; 203 204 x86_phys_enter_paging(); 205 206 /* Handle the first partial page. */ 207 if (offset) { 208 phys_addr_t end = 209 MIN(map_addr + LARGE_PAGE_SIZE, start + size); 210 phys_size_t cur_size = end - start; 211 x86_phys_memset_page(map_addr, offset, c, cur_size); 212 size -= cur_size; 213 map_addr += LARGE_PAGE_SIZE; 214 } 215 /* Handle the complete pages. */ 216 while (size > LARGE_PAGE_SIZE) { 217 x86_phys_memset_page(map_addr, 0, c, LARGE_PAGE_SIZE); 218 size -= LARGE_PAGE_SIZE; 219 map_addr += LARGE_PAGE_SIZE; 220 } 221 /* Handle the last partial page. */ 222 if (size) 223 x86_phys_memset_page(map_addr, 0, c, size); 224 225 x86_phys_exit_paging(); 226 } 227 return orig_start; 228 } 229