1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21d18c47cSCatalin Marinas /* 31d18c47cSCatalin Marinas * Based on arch/arm/mm/mmap.c 41d18c47cSCatalin Marinas * 51d18c47cSCatalin Marinas * Copyright (C) 2012 ARM Ltd. 61d18c47cSCatalin Marinas */ 71d18c47cSCatalin Marinas 8c7b9095eSWill Deacon #include <linux/io.h> 91151f838SArd Biesheuvel #include <linux/memblock.h> 106e2edd63SCatalin Marinas #include <linux/mm.h> 11507d6644SShaokun Zhang #include <linux/types.h> 121d18c47cSCatalin Marinas 136e2edd63SCatalin Marinas #include <asm/cpufeature.h> 14507d6644SShaokun Zhang #include <asm/page.h> 151d18c47cSCatalin Marinas 16*42251045SAnshuman Khandual static pgprot_t protection_map[16] __ro_after_init = { 17*42251045SAnshuman Khandual [VM_NONE] = PAGE_NONE, 18*42251045SAnshuman Khandual [VM_READ] = PAGE_READONLY, 19*42251045SAnshuman Khandual [VM_WRITE] = PAGE_READONLY, 20*42251045SAnshuman Khandual [VM_WRITE | VM_READ] = PAGE_READONLY, 21*42251045SAnshuman Khandual /* PAGE_EXECONLY if Enhanced PAN */ 22*42251045SAnshuman Khandual [VM_EXEC] = PAGE_READONLY_EXEC, 23*42251045SAnshuman Khandual [VM_EXEC | VM_READ] = PAGE_READONLY_EXEC, 24*42251045SAnshuman Khandual [VM_EXEC | VM_WRITE] = PAGE_READONLY_EXEC, 25*42251045SAnshuman Khandual [VM_EXEC | VM_WRITE | VM_READ] = PAGE_READONLY_EXEC, 26*42251045SAnshuman Khandual [VM_SHARED] = PAGE_NONE, 27*42251045SAnshuman Khandual [VM_SHARED | VM_READ] = PAGE_READONLY, 28*42251045SAnshuman Khandual [VM_SHARED | VM_WRITE] = PAGE_SHARED, 29*42251045SAnshuman Khandual [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, 30*42251045SAnshuman Khandual /* PAGE_EXECONLY if Enhanced PAN */ 31*42251045SAnshuman Khandual [VM_SHARED | VM_EXEC] = PAGE_READONLY_EXEC, 32*42251045SAnshuman Khandual [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_EXEC, 33*42251045SAnshuman Khandual [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC, 34*42251045SAnshuman Khandual [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC 35*42251045SAnshuman Khandual }; 36*42251045SAnshuman Khandual 371d18c47cSCatalin Marinas /* 381d18c47cSCatalin Marinas * You really shouldn't be using read() or write() on /dev/mem. This might go 391d18c47cSCatalin Marinas * away in the future. 401d18c47cSCatalin Marinas */ 41097cbd8dSMin-Hua Chen int valid_phys_addr_range(phys_addr_t addr, size_t size) 421d18c47cSCatalin Marinas { 431151f838SArd Biesheuvel /* 441151f838SArd Biesheuvel * Check whether addr is covered by a memory region without the 451151f838SArd Biesheuvel * MEMBLOCK_NOMAP attribute, and whether that region covers the 461151f838SArd Biesheuvel * entire range. In theory, this could lead to false negatives 471151f838SArd Biesheuvel * if the range is covered by distinct but adjacent memory regions 481151f838SArd Biesheuvel * that only differ in other attributes. However, few of such 491151f838SArd Biesheuvel * attributes have been defined, and it is debatable whether it 501151f838SArd Biesheuvel * follows that /dev/mem read() calls should be able traverse 511151f838SArd Biesheuvel * such boundaries. 521151f838SArd Biesheuvel */ 531151f838SArd Biesheuvel return memblock_is_region_memory(addr, size) && 541151f838SArd Biesheuvel memblock_is_map_memory(addr); 551d18c47cSCatalin Marinas } 561d18c47cSCatalin Marinas 571d18c47cSCatalin Marinas /* 581d18c47cSCatalin Marinas * Do not allow /dev/mem mappings beyond the supported physical range. 591d18c47cSCatalin Marinas */ 601d18c47cSCatalin Marinas int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) 611d18c47cSCatalin Marinas { 621d18c47cSCatalin Marinas return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK); 631d18c47cSCatalin Marinas } 646e2edd63SCatalin Marinas 656e2edd63SCatalin Marinas static int __init adjust_protection_map(void) 666e2edd63SCatalin Marinas { 676e2edd63SCatalin Marinas /* 686e2edd63SCatalin Marinas * With Enhanced PAN we can honour the execute-only permissions as 696e2edd63SCatalin Marinas * there is no PAN override with such mappings. 706e2edd63SCatalin Marinas */ 716e2edd63SCatalin Marinas if (cpus_have_const_cap(ARM64_HAS_EPAN)) { 726e2edd63SCatalin Marinas protection_map[VM_EXEC] = PAGE_EXECONLY; 736e2edd63SCatalin Marinas protection_map[VM_EXEC | VM_SHARED] = PAGE_EXECONLY; 746e2edd63SCatalin Marinas } 756e2edd63SCatalin Marinas 766e2edd63SCatalin Marinas return 0; 776e2edd63SCatalin Marinas } 786e2edd63SCatalin Marinas arch_initcall(adjust_protection_map); 79b3aca728SAnshuman Khandual 80b3aca728SAnshuman Khandual pgprot_t vm_get_page_prot(unsigned long vm_flags) 81b3aca728SAnshuman Khandual { 82b3aca728SAnshuman Khandual pteval_t prot = pgprot_val(protection_map[vm_flags & 83b3aca728SAnshuman Khandual (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]); 84b3aca728SAnshuman Khandual 85b3aca728SAnshuman Khandual if (vm_flags & VM_ARM64_BTI) 86b3aca728SAnshuman Khandual prot |= PTE_GP; 87b3aca728SAnshuman Khandual 88b3aca728SAnshuman Khandual /* 89b3aca728SAnshuman Khandual * There are two conditions required for returning a Normal Tagged 90b3aca728SAnshuman Khandual * memory type: (1) the user requested it via PROT_MTE passed to 91b3aca728SAnshuman Khandual * mmap() or mprotect() and (2) the corresponding vma supports MTE. We 92b3aca728SAnshuman Khandual * register (1) as VM_MTE in the vma->vm_flags and (2) as 93b3aca728SAnshuman Khandual * VM_MTE_ALLOWED. Note that the latter can only be set during the 94b3aca728SAnshuman Khandual * mmap() call since mprotect() does not accept MAP_* flags. 95b3aca728SAnshuman Khandual * Checking for VM_MTE only is sufficient since arch_validate_flags() 96b3aca728SAnshuman Khandual * does not permit (VM_MTE & !VM_MTE_ALLOWED). 97b3aca728SAnshuman Khandual */ 98b3aca728SAnshuman Khandual if (vm_flags & VM_MTE) 99b3aca728SAnshuman Khandual prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED); 100b3aca728SAnshuman Khandual 101b3aca728SAnshuman Khandual return __pgprot(prot); 102b3aca728SAnshuman Khandual } 103b3aca728SAnshuman Khandual EXPORT_SYMBOL(vm_get_page_prot); 104