1ac31a7b8SGabe Black /* 2ac31a7b8SGabe Black * Copyright (c) 2012 The Chromium OS Authors. All rights reserved. 3ac31a7b8SGabe Black * Use of this source code is governed by a BSD-style license that can be 4ac31a7b8SGabe Black * found in the LICENSE file. 5ac31a7b8SGabe Black * 6ac31a7b8SGabe Black * Alternatively, this software may be distributed under the terms of the 7ac31a7b8SGabe Black * GNU General Public License ("GPL") version 2 as published by the Free 8ac31a7b8SGabe Black * Software Foundation. 9ac31a7b8SGabe Black */ 10ac31a7b8SGabe Black 11ac31a7b8SGabe Black #include <common.h> 12ac31a7b8SGabe Black #include <physmem.h> 13*7bddac94SSimon Glass #include <asm/cpu.h> 14ac31a7b8SGabe Black #include <linux/compiler.h> 15ac31a7b8SGabe Black 167282d834SSimon Glass DECLARE_GLOBAL_DATA_PTR; 177282d834SSimon Glass 18ac31a7b8SGabe Black /* Large pages are 2MB. */ 19ac31a7b8SGabe Black #define LARGE_PAGE_SIZE ((1 << 20) * 2) 20ac31a7b8SGabe Black 21ac31a7b8SGabe Black /* 22ac31a7b8SGabe Black * Paging data structures. 23ac31a7b8SGabe Black */ 24ac31a7b8SGabe Black 25ac31a7b8SGabe Black struct pdpe { 26ac31a7b8SGabe Black uint64_t p:1; 27ac31a7b8SGabe Black uint64_t mbz_0:2; 28ac31a7b8SGabe Black uint64_t pwt:1; 29ac31a7b8SGabe Black uint64_t pcd:1; 30ac31a7b8SGabe Black uint64_t mbz_1:4; 31ac31a7b8SGabe Black uint64_t avl:3; 32ac31a7b8SGabe Black uint64_t base:40; 33ac31a7b8SGabe Black uint64_t mbz_2:12; 34ac31a7b8SGabe Black }; 35ac31a7b8SGabe Black 36ac31a7b8SGabe Black typedef struct pdpe pdpt_t[512]; 37ac31a7b8SGabe Black 38ac31a7b8SGabe Black struct pde { 39ac31a7b8SGabe Black uint64_t p:1; /* present */ 40ac31a7b8SGabe Black uint64_t rw:1; /* read/write */ 41ac31a7b8SGabe Black uint64_t us:1; /* user/supervisor */ 42ac31a7b8SGabe Black uint64_t pwt:1; /* page-level writethrough */ 43ac31a7b8SGabe Black uint64_t pcd:1; /* page-level cache disable */ 44ac31a7b8SGabe Black uint64_t a:1; /* accessed */ 45ac31a7b8SGabe Black uint64_t d:1; /* dirty */ 46ac31a7b8SGabe Black uint64_t ps:1; /* page size */ 47ac31a7b8SGabe Black uint64_t g:1; /* global page */ 48ac31a7b8SGabe Black uint64_t avl:3; /* available to software */ 49ac31a7b8SGabe Black uint64_t pat:1; /* page-attribute table */ 50ac31a7b8SGabe Black uint64_t mbz_0:8; /* must be zero */ 51ac31a7b8SGabe Black uint64_t base:31; /* base address */ 52ac31a7b8SGabe Black }; 53ac31a7b8SGabe Black 54ac31a7b8SGabe Black typedef struct pde pdt_t[512]; 55ac31a7b8SGabe Black 56ac31a7b8SGabe Black static pdpt_t pdpt __aligned(4096); 57ac31a7b8SGabe Black static pdt_t pdts[4] __aligned(4096); 58ac31a7b8SGabe Black 59ac31a7b8SGabe Black /* 60ac31a7b8SGabe Black * Map a virtual address to a physical address and optionally invalidate any 61ac31a7b8SGabe Black * old mapping. 62ac31a7b8SGabe Black * 63ac31a7b8SGabe Black * @param virt The virtual address to use. 64ac31a7b8SGabe Black * @param phys The physical address to use. 65ac31a7b8SGabe Black * @param invlpg Whether to use invlpg to clear any old mappings. 66ac31a7b8SGabe Black */ 67ac31a7b8SGabe Black static void x86_phys_map_page(uintptr_t virt, phys_addr_t phys, int invlpg) 68ac31a7b8SGabe Black { 69ac31a7b8SGabe Black /* Extract the two bit PDPT index and the 9 bit PDT index. */ 70ac31a7b8SGabe Black uintptr_t pdpt_idx = (virt >> 30) & 0x3; 71ac31a7b8SGabe Black uintptr_t pdt_idx = (virt >> 21) & 0x1ff; 72ac31a7b8SGabe Black 73ac31a7b8SGabe Black /* Set up a handy pointer to the appropriate PDE. */ 74ac31a7b8SGabe Black struct pde *pde = &(pdts[pdpt_idx][pdt_idx]); 75ac31a7b8SGabe Black 76ac31a7b8SGabe Black memset(pde, 0, sizeof(struct pde)); 77ac31a7b8SGabe Black pde->p = 1; 78ac31a7b8SGabe Black pde->rw = 1; 79ac31a7b8SGabe Black pde->us = 1; 80ac31a7b8SGabe Black pde->ps = 1; 81ac31a7b8SGabe Black pde->base = phys >> 21; 82ac31a7b8SGabe Black 83ac31a7b8SGabe Black if (invlpg) { 84ac31a7b8SGabe Black /* Flush any stale mapping out of the TLBs. */ 85ac31a7b8SGabe Black __asm__ __volatile__( 86ac31a7b8SGabe Black "invlpg %0\n\t" 87ac31a7b8SGabe Black : 88ac31a7b8SGabe Black : "m" (*(uint8_t *)virt) 89ac31a7b8SGabe Black ); 90ac31a7b8SGabe Black } 91ac31a7b8SGabe Black } 92ac31a7b8SGabe Black 93ac31a7b8SGabe Black /* Identity map the lower 4GB and turn on paging with PAE. */ 94ac31a7b8SGabe Black static void x86_phys_enter_paging(void) 95ac31a7b8SGabe Black { 96ac31a7b8SGabe Black phys_addr_t page_addr; 97ac31a7b8SGabe Black unsigned i; 98ac31a7b8SGabe Black 99ac31a7b8SGabe Black /* Zero out the page tables. */ 100ac31a7b8SGabe Black memset(pdpt, 0, sizeof(pdpt)); 101ac31a7b8SGabe Black memset(pdts, 0, sizeof(pdts)); 102ac31a7b8SGabe Black 103ac31a7b8SGabe Black /* Set up the PDPT. */ 104ac31a7b8SGabe Black for (i = 0; i < ARRAY_SIZE(pdts); i++) { 105ac31a7b8SGabe Black pdpt[i].p = 1; 106ac31a7b8SGabe Black pdpt[i].base = ((uintptr_t)&pdts[i]) >> 12; 107ac31a7b8SGabe Black } 108ac31a7b8SGabe Black 109ac31a7b8SGabe Black /* Identity map everything up to 4GB. */ 110ac31a7b8SGabe Black for (page_addr = 0; page_addr < (1ULL << 32); 111ac31a7b8SGabe Black page_addr += LARGE_PAGE_SIZE) { 112ac31a7b8SGabe Black /* There's no reason to invalidate the TLB with paging off. */ 113ac31a7b8SGabe Black x86_phys_map_page(page_addr, page_addr, 0); 114ac31a7b8SGabe Black } 115ac31a7b8SGabe Black 116*7bddac94SSimon Glass cpu_enable_paging_pae((ulong)pdpt); 117ac31a7b8SGabe Black } 118ac31a7b8SGabe Black 119ac31a7b8SGabe Black /* Disable paging and PAE mode. */ 120ac31a7b8SGabe Black static void x86_phys_exit_paging(void) 121ac31a7b8SGabe Black { 122*7bddac94SSimon Glass cpu_disable_paging_pae(); 123ac31a7b8SGabe Black } 124ac31a7b8SGabe Black 125ac31a7b8SGabe Black /* 126ac31a7b8SGabe Black * Set physical memory to a particular value when the whole region fits on one 127ac31a7b8SGabe Black * page. 128ac31a7b8SGabe Black * 129ac31a7b8SGabe Black * @param map_addr The address that starts the physical page. 130ac31a7b8SGabe Black * @param offset How far into that page to start setting a value. 131ac31a7b8SGabe Black * @param c The value to set memory to. 132ac31a7b8SGabe Black * @param size The size in bytes of the area to set. 133ac31a7b8SGabe Black */ 134ac31a7b8SGabe Black static void x86_phys_memset_page(phys_addr_t map_addr, uintptr_t offset, int c, 135ac31a7b8SGabe Black unsigned size) 136ac31a7b8SGabe Black { 137ac31a7b8SGabe Black /* 138ac31a7b8SGabe Black * U-Boot should be far away from the beginning of memory, so that's a 139ac31a7b8SGabe Black * good place to map our window on top of. 140ac31a7b8SGabe Black */ 141ac31a7b8SGabe Black const uintptr_t window = LARGE_PAGE_SIZE; 142ac31a7b8SGabe Black 143ac31a7b8SGabe Black /* Make sure the window is below U-Boot. */ 144ac31a7b8SGabe Black assert(window + LARGE_PAGE_SIZE < 145ac31a7b8SGabe Black gd->relocaddr - CONFIG_SYS_MALLOC_LEN - CONFIG_SYS_STACK_SIZE); 146ac31a7b8SGabe Black /* Map the page into the window and then memset the appropriate part. */ 147ac31a7b8SGabe Black x86_phys_map_page(window, map_addr, 1); 148ac31a7b8SGabe Black memset((void *)(window + offset), c, size); 149ac31a7b8SGabe Black } 150ac31a7b8SGabe Black 151ac31a7b8SGabe Black /* 152ac31a7b8SGabe Black * A physical memory anologue to memset with matching parameters and return 153ac31a7b8SGabe Black * value. 154ac31a7b8SGabe Black */ 155ac31a7b8SGabe Black phys_addr_t arch_phys_memset(phys_addr_t start, int c, phys_size_t size) 156ac31a7b8SGabe Black { 157ac31a7b8SGabe Black const phys_addr_t max_addr = (phys_addr_t)~(uintptr_t)0; 158ac31a7b8SGabe Black const phys_addr_t orig_start = start; 159ac31a7b8SGabe Black 160ac31a7b8SGabe Black if (!size) 161ac31a7b8SGabe Black return orig_start; 162ac31a7b8SGabe Black 163ac31a7b8SGabe Black /* Handle memory below 4GB. */ 164ac31a7b8SGabe Black if (start <= max_addr) { 165c79cba37SMasahiro Yamada phys_size_t low_size = min(max_addr + 1 - start, size); 166ac31a7b8SGabe Black void *start_ptr = (void *)(uintptr_t)start; 167ac31a7b8SGabe Black 168ac31a7b8SGabe Black assert(((phys_addr_t)(uintptr_t)start) == start); 169ac31a7b8SGabe Black memset(start_ptr, c, low_size); 170ac31a7b8SGabe Black start += low_size; 171ac31a7b8SGabe Black size -= low_size; 172ac31a7b8SGabe Black } 173ac31a7b8SGabe Black 174ac31a7b8SGabe Black /* Use paging and PAE to handle memory above 4GB up to 64GB. */ 175ac31a7b8SGabe Black if (size) { 176ac31a7b8SGabe Black phys_addr_t map_addr = start & ~(LARGE_PAGE_SIZE - 1); 177ac31a7b8SGabe Black phys_addr_t offset = start - map_addr; 178ac31a7b8SGabe Black 179ac31a7b8SGabe Black x86_phys_enter_paging(); 180ac31a7b8SGabe Black 181ac31a7b8SGabe Black /* Handle the first partial page. */ 182ac31a7b8SGabe Black if (offset) { 183ac31a7b8SGabe Black phys_addr_t end = 184c79cba37SMasahiro Yamada min(map_addr + LARGE_PAGE_SIZE, start + size); 185ac31a7b8SGabe Black phys_size_t cur_size = end - start; 186ac31a7b8SGabe Black x86_phys_memset_page(map_addr, offset, c, cur_size); 187ac31a7b8SGabe Black size -= cur_size; 188ac31a7b8SGabe Black map_addr += LARGE_PAGE_SIZE; 189ac31a7b8SGabe Black } 190ac31a7b8SGabe Black /* Handle the complete pages. */ 191ac31a7b8SGabe Black while (size > LARGE_PAGE_SIZE) { 192ac31a7b8SGabe Black x86_phys_memset_page(map_addr, 0, c, LARGE_PAGE_SIZE); 193ac31a7b8SGabe Black size -= LARGE_PAGE_SIZE; 194ac31a7b8SGabe Black map_addr += LARGE_PAGE_SIZE; 195ac31a7b8SGabe Black } 196ac31a7b8SGabe Black /* Handle the last partial page. */ 197ac31a7b8SGabe Black if (size) 198ac31a7b8SGabe Black x86_phys_memset_page(map_addr, 0, c, size); 199ac31a7b8SGabe Black 200ac31a7b8SGabe Black x86_phys_exit_paging(); 201ac31a7b8SGabe Black } 202ac31a7b8SGabe Black return orig_start; 203ac31a7b8SGabe Black } 204