15615f69bSLinus Walleij // SPDX-License-Identifier: GPL-2.0-only
25615f69bSLinus Walleij /*
35615f69bSLinus Walleij * This file contains kasan initialization code for ARM.
45615f69bSLinus Walleij *
55615f69bSLinus Walleij * Copyright (c) 2018 Samsung Electronics Co., Ltd.
65615f69bSLinus Walleij * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
75615f69bSLinus Walleij * Author: Linus Walleij <linus.walleij@linaro.org>
85615f69bSLinus Walleij */
95615f69bSLinus Walleij
105615f69bSLinus Walleij #define pr_fmt(fmt) "kasan: " fmt
115615f69bSLinus Walleij #include <linux/kasan.h>
125615f69bSLinus Walleij #include <linux/kernel.h>
135615f69bSLinus Walleij #include <linux/memblock.h>
145615f69bSLinus Walleij #include <linux/sched/task.h>
155615f69bSLinus Walleij #include <linux/start_kernel.h>
165615f69bSLinus Walleij #include <linux/pgtable.h>
175615f69bSLinus Walleij #include <asm/cputype.h>
185615f69bSLinus Walleij #include <asm/highmem.h>
195615f69bSLinus Walleij #include <asm/mach/map.h>
205615f69bSLinus Walleij #include <asm/page.h>
215615f69bSLinus Walleij #include <asm/pgalloc.h>
225615f69bSLinus Walleij #include <asm/procinfo.h>
235615f69bSLinus Walleij #include <asm/proc-fns.h>
245615f69bSLinus Walleij
255615f69bSLinus Walleij #include "mm.h"
265615f69bSLinus Walleij
275615f69bSLinus Walleij static pgd_t tmp_pgd_table[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
285615f69bSLinus Walleij
295615f69bSLinus Walleij pmd_t tmp_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
305615f69bSLinus Walleij
kasan_alloc_block(size_t size)315615f69bSLinus Walleij static __init void *kasan_alloc_block(size_t size)
325615f69bSLinus Walleij {
335615f69bSLinus Walleij return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
34c6975d7cSQian Cai MEMBLOCK_ALLOC_NOLEAKTRACE, NUMA_NO_NODE);
355615f69bSLinus Walleij }
365615f69bSLinus Walleij
kasan_pte_populate(pmd_t * pmdp,unsigned long addr,unsigned long end,bool early)375615f69bSLinus Walleij static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
385615f69bSLinus Walleij unsigned long end, bool early)
395615f69bSLinus Walleij {
405615f69bSLinus Walleij unsigned long next;
415615f69bSLinus Walleij pte_t *ptep = pte_offset_kernel(pmdp, addr);
425615f69bSLinus Walleij
435615f69bSLinus Walleij do {
445615f69bSLinus Walleij pte_t entry;
455615f69bSLinus Walleij void *p;
465615f69bSLinus Walleij
475615f69bSLinus Walleij next = addr + PAGE_SIZE;
485615f69bSLinus Walleij
495615f69bSLinus Walleij if (!early) {
505615f69bSLinus Walleij if (!pte_none(READ_ONCE(*ptep)))
515615f69bSLinus Walleij continue;
525615f69bSLinus Walleij
535615f69bSLinus Walleij p = kasan_alloc_block(PAGE_SIZE);
545615f69bSLinus Walleij if (!p) {
555615f69bSLinus Walleij panic("%s failed to allocate shadow page for address 0x%lx\n",
565615f69bSLinus Walleij __func__, addr);
575615f69bSLinus Walleij return;
585615f69bSLinus Walleij }
595615f69bSLinus Walleij memset(p, KASAN_SHADOW_INIT, PAGE_SIZE);
605615f69bSLinus Walleij entry = pfn_pte(virt_to_pfn(p),
615615f69bSLinus Walleij __pgprot(pgprot_val(PAGE_KERNEL)));
625615f69bSLinus Walleij } else if (pte_none(READ_ONCE(*ptep))) {
635615f69bSLinus Walleij /*
645615f69bSLinus Walleij * The early shadow memory is mapping all KASan
655615f69bSLinus Walleij * operations to one and the same page in memory,
665615f69bSLinus Walleij * "kasan_early_shadow_page" so that the instrumentation
675615f69bSLinus Walleij * will work on a scratch area until we can set up the
685615f69bSLinus Walleij * proper KASan shadow memory.
695615f69bSLinus Walleij */
705615f69bSLinus Walleij entry = pfn_pte(virt_to_pfn(kasan_early_shadow_page),
715615f69bSLinus Walleij __pgprot(_L_PTE_DEFAULT | L_PTE_DIRTY | L_PTE_XN));
725615f69bSLinus Walleij } else {
735615f69bSLinus Walleij /*
745615f69bSLinus Walleij * Early shadow mappings are PMD_SIZE aligned, so if the
755615f69bSLinus Walleij * first entry is already set, they must all be set.
765615f69bSLinus Walleij */
775615f69bSLinus Walleij return;
785615f69bSLinus Walleij }
795615f69bSLinus Walleij
805615f69bSLinus Walleij set_pte_at(&init_mm, addr, ptep, entry);
815615f69bSLinus Walleij } while (ptep++, addr = next, addr != end);
825615f69bSLinus Walleij }
835615f69bSLinus Walleij
845615f69bSLinus Walleij /*
855615f69bSLinus Walleij * The pmd (page middle directory) is only used on LPAE
865615f69bSLinus Walleij */
kasan_pmd_populate(pud_t * pudp,unsigned long addr,unsigned long end,bool early)875615f69bSLinus Walleij static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
885615f69bSLinus Walleij unsigned long end, bool early)
895615f69bSLinus Walleij {
905615f69bSLinus Walleij unsigned long next;
915615f69bSLinus Walleij pmd_t *pmdp = pmd_offset(pudp, addr);
925615f69bSLinus Walleij
935615f69bSLinus Walleij do {
945615f69bSLinus Walleij if (pmd_none(*pmdp)) {
955615f69bSLinus Walleij /*
965615f69bSLinus Walleij * We attempt to allocate a shadow block for the PMDs
975615f69bSLinus Walleij * used by the PTEs for this address if it isn't already
985615f69bSLinus Walleij * allocated.
995615f69bSLinus Walleij */
1005615f69bSLinus Walleij void *p = early ? kasan_early_shadow_pte :
1015615f69bSLinus Walleij kasan_alloc_block(PAGE_SIZE);
1025615f69bSLinus Walleij
1035615f69bSLinus Walleij if (!p) {
1045615f69bSLinus Walleij panic("%s failed to allocate shadow block for address 0x%lx\n",
1055615f69bSLinus Walleij __func__, addr);
1065615f69bSLinus Walleij return;
1075615f69bSLinus Walleij }
1085615f69bSLinus Walleij pmd_populate_kernel(&init_mm, pmdp, p);
1095615f69bSLinus Walleij flush_pmd_entry(pmdp);
1105615f69bSLinus Walleij }
1115615f69bSLinus Walleij
1125615f69bSLinus Walleij next = pmd_addr_end(addr, end);
1135615f69bSLinus Walleij kasan_pte_populate(pmdp, addr, next, early);
1145615f69bSLinus Walleij } while (pmdp++, addr = next, addr != end);
1155615f69bSLinus Walleij }
1165615f69bSLinus Walleij
kasan_pgd_populate(unsigned long addr,unsigned long end,bool early)1175615f69bSLinus Walleij static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
1185615f69bSLinus Walleij bool early)
1195615f69bSLinus Walleij {
1205615f69bSLinus Walleij unsigned long next;
1215615f69bSLinus Walleij pgd_t *pgdp;
1225615f69bSLinus Walleij p4d_t *p4dp;
1235615f69bSLinus Walleij pud_t *pudp;
1245615f69bSLinus Walleij
1255615f69bSLinus Walleij pgdp = pgd_offset_k(addr);
1265615f69bSLinus Walleij
1275615f69bSLinus Walleij do {
1285615f69bSLinus Walleij /*
1295615f69bSLinus Walleij * Allocate and populate the shadow block of p4d folded into
1305615f69bSLinus Walleij * pud folded into pmd if it doesn't already exist
1315615f69bSLinus Walleij */
1325615f69bSLinus Walleij if (!early && pgd_none(*pgdp)) {
1335615f69bSLinus Walleij void *p = kasan_alloc_block(PAGE_SIZE);
1345615f69bSLinus Walleij
1355615f69bSLinus Walleij if (!p) {
1365615f69bSLinus Walleij panic("%s failed to allocate shadow block for address 0x%lx\n",
1375615f69bSLinus Walleij __func__, addr);
1385615f69bSLinus Walleij return;
1395615f69bSLinus Walleij }
1405615f69bSLinus Walleij pgd_populate(&init_mm, pgdp, p);
1415615f69bSLinus Walleij }
1425615f69bSLinus Walleij
1435615f69bSLinus Walleij next = pgd_addr_end(addr, end);
1445615f69bSLinus Walleij /*
1455615f69bSLinus Walleij * We just immediately jump over the p4d and pud page
1465615f69bSLinus Walleij * directories since we believe ARM32 will never gain four
1475615f69bSLinus Walleij * nor five level page tables.
1485615f69bSLinus Walleij */
1495615f69bSLinus Walleij p4dp = p4d_offset(pgdp, addr);
1505615f69bSLinus Walleij pudp = pud_offset(p4dp, addr);
1515615f69bSLinus Walleij
1525615f69bSLinus Walleij kasan_pmd_populate(pudp, addr, next, early);
1535615f69bSLinus Walleij } while (pgdp++, addr = next, addr != end);
1545615f69bSLinus Walleij }
1555615f69bSLinus Walleij
1565615f69bSLinus Walleij extern struct proc_info_list *lookup_processor_type(unsigned int);
1575615f69bSLinus Walleij
kasan_early_init(void)1585615f69bSLinus Walleij void __init kasan_early_init(void)
1595615f69bSLinus Walleij {
1605615f69bSLinus Walleij struct proc_info_list *list;
1615615f69bSLinus Walleij
1625615f69bSLinus Walleij /*
1635615f69bSLinus Walleij * locate processor in the list of supported processor
1645615f69bSLinus Walleij * types. The linker builds this table for us from the
1655615f69bSLinus Walleij * entries in arch/arm/mm/proc-*.S
1665615f69bSLinus Walleij */
1675615f69bSLinus Walleij list = lookup_processor_type(read_cpuid_id());
1685615f69bSLinus Walleij if (list) {
1695615f69bSLinus Walleij #ifdef MULTI_CPU
1705615f69bSLinus Walleij processor = *list->proc;
1715615f69bSLinus Walleij #endif
1725615f69bSLinus Walleij }
1735615f69bSLinus Walleij
1745615f69bSLinus Walleij BUILD_BUG_ON((KASAN_SHADOW_END - (1UL << 29)) != KASAN_SHADOW_OFFSET);
1755615f69bSLinus Walleij /*
1765615f69bSLinus Walleij * We walk the page table and set all of the shadow memory to point
1775615f69bSLinus Walleij * to the scratch page.
1785615f69bSLinus Walleij */
1795615f69bSLinus Walleij kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, true);
1805615f69bSLinus Walleij }
1815615f69bSLinus Walleij
clear_pgds(unsigned long start,unsigned long end)1825615f69bSLinus Walleij static void __init clear_pgds(unsigned long start,
1835615f69bSLinus Walleij unsigned long end)
1845615f69bSLinus Walleij {
1855615f69bSLinus Walleij for (; start && start < end; start += PMD_SIZE)
1865615f69bSLinus Walleij pmd_clear(pmd_off_k(start));
1875615f69bSLinus Walleij }
1885615f69bSLinus Walleij
create_mapping(void * start,void * end)1895615f69bSLinus Walleij static int __init create_mapping(void *start, void *end)
1905615f69bSLinus Walleij {
1915615f69bSLinus Walleij void *shadow_start, *shadow_end;
1925615f69bSLinus Walleij
1935615f69bSLinus Walleij shadow_start = kasan_mem_to_shadow(start);
1945615f69bSLinus Walleij shadow_end = kasan_mem_to_shadow(end);
1955615f69bSLinus Walleij
1965615f69bSLinus Walleij pr_info("Mapping kernel virtual memory block: %px-%px at shadow: %px-%px\n",
1975615f69bSLinus Walleij start, end, shadow_start, shadow_end);
1985615f69bSLinus Walleij
1995615f69bSLinus Walleij kasan_pgd_populate((unsigned long)shadow_start & PAGE_MASK,
2005615f69bSLinus Walleij PAGE_ALIGN((unsigned long)shadow_end), false);
2015615f69bSLinus Walleij return 0;
2025615f69bSLinus Walleij }
2035615f69bSLinus Walleij
kasan_init(void)2045615f69bSLinus Walleij void __init kasan_init(void)
2055615f69bSLinus Walleij {
2065615f69bSLinus Walleij phys_addr_t pa_start, pa_end;
2075615f69bSLinus Walleij u64 i;
2085615f69bSLinus Walleij
2095615f69bSLinus Walleij /*
2105615f69bSLinus Walleij * We are going to perform proper setup of shadow memory.
2115615f69bSLinus Walleij *
2125615f69bSLinus Walleij * At first we should unmap early shadow (clear_pgds() call bellow).
2135615f69bSLinus Walleij * However, instrumented code can't execute without shadow memory.
2145615f69bSLinus Walleij *
2155615f69bSLinus Walleij * To keep the early shadow memory MMU tables around while setting up
2165615f69bSLinus Walleij * the proper shadow memory, we copy swapper_pg_dir (the initial page
2175615f69bSLinus Walleij * table) to tmp_pgd_table and use that to keep the early shadow memory
2185615f69bSLinus Walleij * mapped until the full shadow setup is finished. Then we swap back
2195615f69bSLinus Walleij * to the proper swapper_pg_dir.
2205615f69bSLinus Walleij */
2215615f69bSLinus Walleij
2225615f69bSLinus Walleij memcpy(tmp_pgd_table, swapper_pg_dir, sizeof(tmp_pgd_table));
2235615f69bSLinus Walleij #ifdef CONFIG_ARM_LPAE
2245615f69bSLinus Walleij /* We need to be in the same PGD or this won't work */
2255615f69bSLinus Walleij BUILD_BUG_ON(pgd_index(KASAN_SHADOW_START) !=
2265615f69bSLinus Walleij pgd_index(KASAN_SHADOW_END));
2275615f69bSLinus Walleij memcpy(tmp_pmd_table,
228c2e6df3eSArnd Bergmann (void*)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_START)),
2295615f69bSLinus Walleij sizeof(tmp_pmd_table));
2305615f69bSLinus Walleij set_pgd(&tmp_pgd_table[pgd_index(KASAN_SHADOW_START)],
2315615f69bSLinus Walleij __pgd(__pa(tmp_pmd_table) | PMD_TYPE_TABLE | L_PGD_SWAPPER));
2325615f69bSLinus Walleij #endif
2335615f69bSLinus Walleij cpu_switch_mm(tmp_pgd_table, &init_mm);
2345615f69bSLinus Walleij local_flush_tlb_all();
2355615f69bSLinus Walleij
2365615f69bSLinus Walleij clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
2375615f69bSLinus Walleij
238565cbaadSLecopzer Chen if (!IS_ENABLED(CONFIG_KASAN_VMALLOC))
2395615f69bSLinus Walleij kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START),
240565cbaadSLecopzer Chen kasan_mem_to_shadow((void *)VMALLOC_END));
241565cbaadSLecopzer Chen
242565cbaadSLecopzer Chen kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_END),
2435615f69bSLinus Walleij kasan_mem_to_shadow((void *)-1UL) + 1);
2445615f69bSLinus Walleij
2455615f69bSLinus Walleij for_each_mem_range(i, &pa_start, &pa_end) {
2465615f69bSLinus Walleij void *start = __va(pa_start);
2475615f69bSLinus Walleij void *end = __va(pa_end);
2485615f69bSLinus Walleij
2495615f69bSLinus Walleij /* Do not attempt to shadow highmem */
2505615f69bSLinus Walleij if (pa_start >= arm_lowmem_limit) {
2515615f69bSLinus Walleij pr_info("Skip highmem block at %pa-%pa\n", &pa_start, &pa_end);
2525615f69bSLinus Walleij continue;
2535615f69bSLinus Walleij }
2545615f69bSLinus Walleij if (pa_end > arm_lowmem_limit) {
2555615f69bSLinus Walleij pr_info("Truncating shadow for memory block at %pa-%pa to lowmem region at %pa\n",
2565615f69bSLinus Walleij &pa_start, &pa_end, &arm_lowmem_limit);
2575615f69bSLinus Walleij end = __va(arm_lowmem_limit);
2585615f69bSLinus Walleij }
2595615f69bSLinus Walleij if (start >= end) {
2605615f69bSLinus Walleij pr_info("Skipping invalid memory block %pa-%pa (virtual %p-%p)\n",
2615615f69bSLinus Walleij &pa_start, &pa_end, start, end);
2625615f69bSLinus Walleij continue;
2635615f69bSLinus Walleij }
2645615f69bSLinus Walleij
2655615f69bSLinus Walleij create_mapping(start, end);
2665615f69bSLinus Walleij }
2675615f69bSLinus Walleij
2685615f69bSLinus Walleij /*
2695615f69bSLinus Walleij * 1. The module global variables are in MODULES_VADDR ~ MODULES_END,
270*823f606aSAlex Sverdlin * so we need to map this area if CONFIG_KASAN_VMALLOC=n. With
271*823f606aSAlex Sverdlin * VMALLOC support KASAN will manage this region dynamically,
272*823f606aSAlex Sverdlin * refer to kasan_populate_vmalloc() and ARM's implementation of
273*823f606aSAlex Sverdlin * module_alloc().
2745615f69bSLinus Walleij * 2. PKMAP_BASE ~ PKMAP_BASE+PMD_SIZE's shadow and MODULES_VADDR
2755615f69bSLinus Walleij * ~ MODULES_END's shadow is in the same PMD_SIZE, so we can't
2765615f69bSLinus Walleij * use kasan_populate_zero_shadow.
2775615f69bSLinus Walleij */
278*823f606aSAlex Sverdlin if (!IS_ENABLED(CONFIG_KASAN_VMALLOC) && IS_ENABLED(CONFIG_MODULES))
279*823f606aSAlex Sverdlin create_mapping((void *)MODULES_VADDR, (void *)(MODULES_END));
280*823f606aSAlex Sverdlin create_mapping((void *)PKMAP_BASE, (void *)(PKMAP_BASE + PMD_SIZE));
2815615f69bSLinus Walleij
2825615f69bSLinus Walleij /*
2835615f69bSLinus Walleij * KAsan may reuse the contents of kasan_early_shadow_pte directly, so
2845615f69bSLinus Walleij * we should make sure that it maps the zero page read-only.
2855615f69bSLinus Walleij */
2865615f69bSLinus Walleij for (i = 0; i < PTRS_PER_PTE; i++)
2875615f69bSLinus Walleij set_pte_at(&init_mm, KASAN_SHADOW_START + i*PAGE_SIZE,
2885615f69bSLinus Walleij &kasan_early_shadow_pte[i],
2895615f69bSLinus Walleij pfn_pte(virt_to_pfn(kasan_early_shadow_page),
2905615f69bSLinus Walleij __pgprot(pgprot_val(PAGE_KERNEL)
2915615f69bSLinus Walleij | L_PTE_RDONLY)));
2925615f69bSLinus Walleij
2935615f69bSLinus Walleij cpu_switch_mm(swapper_pg_dir, &init_mm);
2945615f69bSLinus Walleij local_flush_tlb_all();
2955615f69bSLinus Walleij
2965615f69bSLinus Walleij memset(kasan_early_shadow_page, 0, PAGE_SIZE);
2975615f69bSLinus Walleij pr_info("Kernel address sanitizer initialized\n");
2985615f69bSLinus Walleij init_task.kasan_depth = 0;
2995615f69bSLinus Walleij }
300