1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include <linux/coredump.h> 4 #include <linux/elfcore.h> 5 #include <linux/kernel.h> 6 #include <linux/mm.h> 7 8 #include <asm/cpufeature.h> 9 #include <asm/mte.h> 10 11 #define for_each_mte_vma(vmi, vma) \ 12 if (system_supports_mte()) \ 13 for_each_vma(vmi, vma) \ 14 if (vma->vm_flags & VM_MTE) 15 16 static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma) 17 { 18 if (vma->vm_flags & VM_DONTDUMP) 19 return 0; 20 21 return vma_pages(vma) * MTE_PAGE_TAG_STORAGE; 22 } 23 24 /* Derived from dump_user_range(); start/end must be page-aligned */ 25 static int mte_dump_tag_range(struct coredump_params *cprm, 26 unsigned long start, unsigned long end) 27 { 28 int ret = 1; 29 unsigned long addr; 30 void *tags = NULL; 31 32 for (addr = start; addr < end; addr += PAGE_SIZE) { 33 struct page *page = get_dump_page(addr); 34 35 /* 36 * get_dump_page() returns NULL when encountering an empty 37 * page table entry that would otherwise have been filled with 38 * the zero page. Skip the equivalent tag dump which would 39 * have been all zeros. 40 */ 41 if (!page) { 42 dump_skip(cprm, MTE_PAGE_TAG_STORAGE); 43 continue; 44 } 45 46 /* 47 * Pages mapped in user space as !pte_access_permitted() (e.g. 48 * PROT_EXEC only) may not have the PG_mte_tagged flag set. 49 */ 50 if (!page_mte_tagged(page)) { 51 put_page(page); 52 dump_skip(cprm, MTE_PAGE_TAG_STORAGE); 53 continue; 54 } 55 56 if (!tags) { 57 tags = mte_allocate_tag_storage(); 58 if (!tags) { 59 put_page(page); 60 ret = 0; 61 break; 62 } 63 } 64 65 mte_save_page_tags(page_address(page), tags); 66 put_page(page); 67 if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE)) { 68 mte_free_tag_storage(tags); 69 ret = 0; 70 break; 71 } 72 } 73 74 if (tags) 75 mte_free_tag_storage(tags); 76 77 return ret; 78 } 79 80 Elf_Half elf_core_extra_phdrs(void) 81 { 82 struct vm_area_struct *vma; 83 int vma_count = 0; 84 VMA_ITERATOR(vmi, current->mm, 0); 85 86 for_each_mte_vma(vmi, vma) 87 vma_count++; 88 89 return vma_count; 90 } 91 92 int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) 93 { 94 struct vm_area_struct *vma; 95 VMA_ITERATOR(vmi, current->mm, 0); 96 97 for_each_mte_vma(vmi, vma) { 98 struct elf_phdr phdr; 99 100 phdr.p_type = PT_AARCH64_MEMTAG_MTE; 101 phdr.p_offset = offset; 102 phdr.p_vaddr = vma->vm_start; 103 phdr.p_paddr = 0; 104 phdr.p_filesz = mte_vma_tag_dump_size(vma); 105 phdr.p_memsz = vma->vm_end - vma->vm_start; 106 offset += phdr.p_filesz; 107 phdr.p_flags = 0; 108 phdr.p_align = 0; 109 110 if (!dump_emit(cprm, &phdr, sizeof(phdr))) 111 return 0; 112 } 113 114 return 1; 115 } 116 117 size_t elf_core_extra_data_size(void) 118 { 119 struct vm_area_struct *vma; 120 size_t data_size = 0; 121 VMA_ITERATOR(vmi, current->mm, 0); 122 123 for_each_mte_vma(vmi, vma) 124 data_size += mte_vma_tag_dump_size(vma); 125 126 return data_size; 127 } 128 129 int elf_core_write_extra_data(struct coredump_params *cprm) 130 { 131 struct vm_area_struct *vma; 132 VMA_ITERATOR(vmi, current->mm, 0); 133 134 for_each_mte_vma(vmi, vma) { 135 if (vma->vm_flags & VM_DONTDUMP) 136 continue; 137 138 if (!mte_dump_tag_range(cprm, vma->vm_start, vma->vm_end)) 139 return 0; 140 } 141 142 return 1; 143 } 144