1c633544aSMax Filippov /* 2c633544aSMax Filippov * Xtensa KASAN shadow map initialization 3c633544aSMax Filippov * 4c633544aSMax Filippov * This file is subject to the terms and conditions of the GNU General Public 5c633544aSMax Filippov * License. See the file "COPYING" in the main directory of this archive 6c633544aSMax Filippov * for more details. 7c633544aSMax Filippov * 8c633544aSMax Filippov * Copyright (C) 2017 Cadence Design Systems Inc. 9c633544aSMax Filippov */ 10c633544aSMax Filippov 1157c8a661SMike Rapoport #include <linux/memblock.h> 12c633544aSMax Filippov #include <linux/init_task.h> 13c633544aSMax Filippov #include <linux/kasan.h> 14c633544aSMax Filippov #include <linux/kernel.h> 15c633544aSMax Filippov #include <asm/initialize_mmu.h> 16c633544aSMax Filippov #include <asm/tlbflush.h> 17c633544aSMax Filippov #include <asm/traps.h> 18c633544aSMax Filippov 19c633544aSMax Filippov void __init kasan_early_init(void) 20c633544aSMax Filippov { 21c633544aSMax Filippov unsigned long vaddr = KASAN_SHADOW_START; 22c633544aSMax Filippov pgd_t *pgd = pgd_offset_k(vaddr); 23f5ee2567SMike Rapoport p4d_t *p4d = p4d_offset(pgd, vaddr); 24f5ee2567SMike Rapoport pud_t *pud = pud_offset(p4d, vaddr); 25f0d1eab8SMike Rapoport pmd_t *pmd = pmd_offset(pud, vaddr); 26c633544aSMax Filippov int i; 27c633544aSMax Filippov 28c633544aSMax Filippov for (i = 0; i < PTRS_PER_PTE; ++i) 299577dd74SAndrey Konovalov set_pte(kasan_early_shadow_pte + i, 309577dd74SAndrey Konovalov mk_pte(virt_to_page(kasan_early_shadow_page), 319577dd74SAndrey Konovalov PAGE_KERNEL)); 32c633544aSMax Filippov 33c633544aSMax Filippov for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) { 34c633544aSMax Filippov BUG_ON(!pmd_none(*pmd)); 359577dd74SAndrey Konovalov set_pmd(pmd, __pmd((unsigned long)kasan_early_shadow_pte)); 36c633544aSMax Filippov } 37c633544aSMax Filippov early_trap_init(); 38c633544aSMax Filippov } 39c633544aSMax Filippov 40c633544aSMax Filippov static void __init populate(void *start, void *end) 41c633544aSMax Filippov { 42c633544aSMax Filippov unsigned long n_pages = (end - start) / PAGE_SIZE; 43c633544aSMax Filippov unsigned long n_pmds = n_pages / PTRS_PER_PTE; 44c633544aSMax Filippov unsigned long i, j; 45c633544aSMax Filippov unsigned long vaddr = (unsigned long)start; 46c633544aSMax Filippov pgd_t *pgd = pgd_offset_k(vaddr); 47f5ee2567SMike Rapoport p4d_t *p4d = p4d_offset(pgd, vaddr); 48f5ee2567SMike Rapoport pud_t *pud = pud_offset(p4d, vaddr); 49f0d1eab8SMike Rapoport pmd_t *pmd = pmd_offset(pud, vaddr); 50eb31d559SMike Rapoport pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE); 51c633544aSMax Filippov 528a7f97b9SMike Rapoport if (!pte) 538a7f97b9SMike Rapoport panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 548a7f97b9SMike Rapoport __func__, n_pages * sizeof(pte_t), PAGE_SIZE); 558a7f97b9SMike Rapoport 56c633544aSMax Filippov pr_debug("%s: %p - %p\n", __func__, start, end); 57c633544aSMax Filippov 58c633544aSMax Filippov for (i = j = 0; i < n_pmds; ++i) { 59c633544aSMax Filippov int k; 60c633544aSMax Filippov 61c633544aSMax Filippov for (k = 0; k < PTRS_PER_PTE; ++k, ++j) { 62c633544aSMax Filippov phys_addr_t phys = 63*e64681b4SMax Filippov memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 64*e64681b4SMax Filippov 0, 65*e64681b4SMax Filippov MEMBLOCK_ALLOC_ANYWHERE); 66c633544aSMax Filippov 67ecc3e771SMike Rapoport if (!phys) 68ecc3e771SMike Rapoport panic("Failed to allocate page table page\n"); 69ecc3e771SMike Rapoport 70c633544aSMax Filippov set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL)); 71c633544aSMax Filippov } 72c633544aSMax Filippov } 73c633544aSMax Filippov 74c633544aSMax Filippov for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE) 75c633544aSMax Filippov set_pmd(pmd + i, __pmd((unsigned long)pte)); 76c633544aSMax Filippov 77c633544aSMax Filippov local_flush_tlb_all(); 78c633544aSMax Filippov memset(start, 0, end - start); 79c633544aSMax Filippov } 80c633544aSMax Filippov 81c633544aSMax Filippov void __init kasan_init(void) 82c633544aSMax Filippov { 83c633544aSMax Filippov int i; 84c633544aSMax Filippov 85c633544aSMax Filippov BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START - 86c633544aSMax Filippov (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT)); 87c633544aSMax Filippov BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR); 88c633544aSMax Filippov 89c633544aSMax Filippov /* 90c633544aSMax Filippov * Replace shadow map pages that cover addresses from VMALLOC area 91c633544aSMax Filippov * start to the end of KSEG with clean writable pages. 92c633544aSMax Filippov */ 93c633544aSMax Filippov populate(kasan_mem_to_shadow((void *)VMALLOC_START), 94c633544aSMax Filippov kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR)); 95c633544aSMax Filippov 969577dd74SAndrey Konovalov /* 979577dd74SAndrey Konovalov * Write protect kasan_early_shadow_page and zero-initialize it again. 989577dd74SAndrey Konovalov */ 99c633544aSMax Filippov for (i = 0; i < PTRS_PER_PTE; ++i) 1009577dd74SAndrey Konovalov set_pte(kasan_early_shadow_pte + i, 1019577dd74SAndrey Konovalov mk_pte(virt_to_page(kasan_early_shadow_page), 1029577dd74SAndrey Konovalov PAGE_KERNEL_RO)); 103c633544aSMax Filippov 104c633544aSMax Filippov local_flush_tlb_all(); 1059577dd74SAndrey Konovalov memset(kasan_early_shadow_page, 0, PAGE_SIZE); 106c633544aSMax Filippov 107c633544aSMax Filippov /* At this point kasan is fully initialized. Enable error messages. */ 108c633544aSMax Filippov current->kasan_depth = 0; 109c633544aSMax Filippov pr_info("KernelAddressSanitizer initialized\n"); 110c633544aSMax Filippov } 111