1 // SPDX-License-Identifier: GPL-2.0 2 3 /* 4 * MMU-generic set_memory implementation for powerpc 5 * 6 * Copyright 2019-2021, IBM Corporation. 7 */ 8 9 #include <linux/mm.h> 10 #include <linux/vmalloc.h> 11 #include <linux/set_memory.h> 12 13 #include <asm/mmu.h> 14 #include <asm/page.h> 15 #include <asm/pgtable.h> 16 17 18 static pte_basic_t pte_update_delta(pte_t *ptep, unsigned long addr, 19 unsigned long old, unsigned long new) 20 { 21 return pte_update(&init_mm, addr, ptep, old & ~new, new & ~old, 0); 22 } 23 24 /* 25 * Updates the attributes of a page atomically. 26 * 27 * This sequence is safe against concurrent updates, and also allows updating the 28 * attributes of a page currently being executed or accessed. 29 */ 30 static int change_page_attr(pte_t *ptep, unsigned long addr, void *data) 31 { 32 long action = (long)data; 33 34 /* modify the PTE bits as desired */ 35 switch (action) { 36 case SET_MEMORY_RO: 37 /* Don't clear DIRTY bit */ 38 pte_update_delta(ptep, addr, _PAGE_KERNEL_RW & ~_PAGE_DIRTY, _PAGE_KERNEL_RO); 39 break; 40 case SET_MEMORY_RW: 41 pte_update_delta(ptep, addr, _PAGE_KERNEL_RO, _PAGE_KERNEL_RW); 42 break; 43 case SET_MEMORY_NX: 44 pte_update_delta(ptep, addr, _PAGE_KERNEL_ROX, _PAGE_KERNEL_RO); 45 break; 46 case SET_MEMORY_X: 47 pte_update_delta(ptep, addr, _PAGE_KERNEL_RO, _PAGE_KERNEL_ROX); 48 break; 49 case SET_MEMORY_NP: 50 pte_update(&init_mm, addr, ptep, _PAGE_PRESENT, 0, 0); 51 break; 52 case SET_MEMORY_P: 53 pte_update(&init_mm, addr, ptep, 0, _PAGE_PRESENT, 0); 54 break; 55 default: 56 WARN_ON_ONCE(1); 57 break; 58 } 59 60 /* See ptesync comment in radix__set_pte_at() */ 61 if (radix_enabled()) 62 asm volatile("ptesync": : :"memory"); 63 64 flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 65 66 return 0; 67 } 68 69 int change_memory_attr(unsigned long addr, int numpages, long action) 70 { 71 unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE); 72 unsigned long size = numpages * PAGE_SIZE; 73 74 if (!numpages) 75 return 0; 76 77 if (WARN_ON_ONCE(is_vmalloc_or_module_addr((void *)addr) && 78 is_vm_area_hugepages((void *)addr))) 79 return -EINVAL; 80 81 #ifdef CONFIG_PPC_BOOK3S_64 82 /* 83 * On hash, the linear mapping is not in the Linux page table so 84 * apply_to_existing_page_range() will have no effect. If in the future 85 * the set_memory_* functions are used on the linear map this will need 86 * to be updated. 87 */ 88 if (!radix_enabled()) { 89 int region = get_region_id(addr); 90 91 if (WARN_ON_ONCE(region != VMALLOC_REGION_ID && region != IO_REGION_ID)) 92 return -EINVAL; 93 } 94 #endif 95 96 return apply_to_existing_page_range(&init_mm, start, size, 97 change_page_attr, (void *)action); 98 } 99