1 // SPDX-License-Identifier: GPL-2.0 2 3 #define DISABLE_BRANCH_PROFILING 4 5 #include <linux/kasan.h> 6 #include <linux/memblock.h> 7 #include <mm/mmu_decl.h> 8 9 int __init kasan_init_region(void *start, size_t size) 10 { 11 unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start); 12 unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size); 13 unsigned long k_cur = k_start; 14 int k_size = k_end - k_start; 15 int k_size_base = 1 << (ffs(k_size) - 1); 16 int ret; 17 void *block; 18 19 block = memblock_alloc(k_size, k_size_base); 20 21 if (block && k_size_base >= SZ_128K && k_start == ALIGN(k_start, k_size_base)) { 22 int k_size_more = 1 << (ffs(k_size - k_size_base) - 1); 23 24 setbat(-1, k_start, __pa(block), k_size_base, PAGE_KERNEL); 25 if (k_size_more >= SZ_128K) 26 setbat(-1, k_start + k_size_base, __pa(block) + k_size_base, 27 k_size_more, PAGE_KERNEL); 28 if (v_block_mapped(k_start)) 29 k_cur = k_start + k_size_base; 30 if (v_block_mapped(k_start + k_size_base)) 31 k_cur = k_start + k_size_base + k_size_more; 32 33 update_bats(); 34 } 35 36 if (!block) 37 block = memblock_alloc(k_size, PAGE_SIZE); 38 if (!block) 39 return -ENOMEM; 40 41 ret = kasan_init_shadow_page_tables(k_start, k_end); 42 if (ret) 43 return ret; 44 45 kasan_update_early_region(k_start, k_cur, __pte(0)); 46 47 for (; k_cur < k_end; k_cur += PAGE_SIZE) { 48 pmd_t *pmd = pmd_off_k(k_cur); 49 void *va = block + k_cur - k_start; 50 pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL); 51 52 __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0); 53 } 54 flush_tlb_kernel_range(k_start, k_end); 55 return 0; 56 } 57