1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020 ARM Ltd. 4 */ 5 6 #include <linux/bitops.h> 7 #include <linux/kernel.h> 8 #include <linux/mm.h> 9 #include <linux/prctl.h> 10 #include <linux/sched.h> 11 #include <linux/sched/mm.h> 12 #include <linux/string.h> 13 #include <linux/swap.h> 14 #include <linux/swapops.h> 15 #include <linux/thread_info.h> 16 #include <linux/uio.h> 17 18 #include <asm/cpufeature.h> 19 #include <asm/mte.h> 20 #include <asm/ptrace.h> 21 #include <asm/sysreg.h> 22 23 static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap) 24 { 25 pte_t old_pte = READ_ONCE(*ptep); 26 27 if (check_swap && is_swap_pte(old_pte)) { 28 swp_entry_t entry = pte_to_swp_entry(old_pte); 29 30 if (!non_swap_entry(entry) && mte_restore_tags(entry, page)) 31 return; 32 } 33 34 mte_clear_page_tags(page_address(page)); 35 } 36 37 void mte_sync_tags(pte_t *ptep, pte_t pte) 38 { 39 struct page *page = pte_page(pte); 40 long i, nr_pages = compound_nr(page); 41 bool check_swap = nr_pages == 1; 42 43 /* if PG_mte_tagged is set, tags have already been initialised */ 44 for (i = 0; i < nr_pages; i++, page++) { 45 if (!test_and_set_bit(PG_mte_tagged, &page->flags)) 46 mte_sync_page_tags(page, ptep, check_swap); 47 } 48 } 49 50 int memcmp_pages(struct page *page1, struct page *page2) 51 { 52 char *addr1, *addr2; 53 int ret; 54 55 addr1 = page_address(page1); 56 addr2 = page_address(page2); 57 ret = memcmp(addr1, addr2, PAGE_SIZE); 58 59 if (!system_supports_mte() || ret) 60 return ret; 61 62 /* 63 * If the page content is identical but at least one of the pages is 64 * tagged, return non-zero to avoid KSM merging. If only one of the 65 * pages is tagged, set_pte_at() may zero or change the tags of the 66 * other page via mte_sync_tags(). 67 */ 68 if (test_bit(PG_mte_tagged, &page1->flags) || 69 test_bit(PG_mte_tagged, &page2->flags)) 70 return addr1 != addr2; 71 72 return ret; 73 } 74 75 static void update_sctlr_el1_tcf0(u64 tcf0) 76 { 77 /* ISB required for the kernel uaccess routines */ 78 sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF0_MASK, tcf0); 79 isb(); 80 } 81 82 static void set_sctlr_el1_tcf0(u64 tcf0) 83 { 84 /* 85 * mte_thread_switch() checks current->thread.sctlr_tcf0 as an 86 * optimisation. Disable preemption so that it does not see 87 * the variable update before the SCTLR_EL1.TCF0 one. 88 */ 89 preempt_disable(); 90 current->thread.sctlr_tcf0 = tcf0; 91 update_sctlr_el1_tcf0(tcf0); 92 preempt_enable(); 93 } 94 95 static void update_gcr_el1_excl(u64 incl) 96 { 97 u64 excl = ~incl & SYS_GCR_EL1_EXCL_MASK; 98 99 /* 100 * Note that 'incl' is an include mask (controlled by the user via 101 * prctl()) while GCR_EL1 accepts an exclude mask. 102 * No need for ISB since this only affects EL0 currently, implicit 103 * with ERET. 104 */ 105 sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl); 106 } 107 108 static void set_gcr_el1_excl(u64 incl) 109 { 110 current->thread.gcr_user_incl = incl; 111 update_gcr_el1_excl(incl); 112 } 113 114 void flush_mte_state(void) 115 { 116 if (!system_supports_mte()) 117 return; 118 119 /* clear any pending asynchronous tag fault */ 120 dsb(ish); 121 write_sysreg_s(0, SYS_TFSRE0_EL1); 122 clear_thread_flag(TIF_MTE_ASYNC_FAULT); 123 /* disable tag checking */ 124 set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE); 125 /* reset tag generation mask */ 126 set_gcr_el1_excl(0); 127 } 128 129 void mte_thread_switch(struct task_struct *next) 130 { 131 if (!system_supports_mte()) 132 return; 133 134 /* avoid expensive SCTLR_EL1 accesses if no change */ 135 if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0) 136 update_sctlr_el1_tcf0(next->thread.sctlr_tcf0); 137 update_gcr_el1_excl(next->thread.gcr_user_incl); 138 } 139 140 void mte_suspend_exit(void) 141 { 142 if (!system_supports_mte()) 143 return; 144 145 update_gcr_el1_excl(current->thread.gcr_user_incl); 146 } 147 148 long set_mte_ctrl(struct task_struct *task, unsigned long arg) 149 { 150 u64 tcf0; 151 u64 gcr_incl = (arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT; 152 153 if (!system_supports_mte()) 154 return 0; 155 156 switch (arg & PR_MTE_TCF_MASK) { 157 case PR_MTE_TCF_NONE: 158 tcf0 = SCTLR_EL1_TCF0_NONE; 159 break; 160 case PR_MTE_TCF_SYNC: 161 tcf0 = SCTLR_EL1_TCF0_SYNC; 162 break; 163 case PR_MTE_TCF_ASYNC: 164 tcf0 = SCTLR_EL1_TCF0_ASYNC; 165 break; 166 default: 167 return -EINVAL; 168 } 169 170 if (task != current) { 171 task->thread.sctlr_tcf0 = tcf0; 172 task->thread.gcr_user_incl = gcr_incl; 173 } else { 174 set_sctlr_el1_tcf0(tcf0); 175 set_gcr_el1_excl(gcr_incl); 176 } 177 178 return 0; 179 } 180 181 long get_mte_ctrl(struct task_struct *task) 182 { 183 unsigned long ret; 184 185 if (!system_supports_mte()) 186 return 0; 187 188 ret = task->thread.gcr_user_incl << PR_MTE_TAG_SHIFT; 189 190 switch (task->thread.sctlr_tcf0) { 191 case SCTLR_EL1_TCF0_NONE: 192 return PR_MTE_TCF_NONE; 193 case SCTLR_EL1_TCF0_SYNC: 194 ret |= PR_MTE_TCF_SYNC; 195 break; 196 case SCTLR_EL1_TCF0_ASYNC: 197 ret |= PR_MTE_TCF_ASYNC; 198 break; 199 } 200 201 return ret; 202 } 203 204 /* 205 * Access MTE tags in another process' address space as given in mm. Update 206 * the number of tags copied. Return 0 if any tags copied, error otherwise. 207 * Inspired by __access_remote_vm(). 208 */ 209 static int __access_remote_tags(struct mm_struct *mm, unsigned long addr, 210 struct iovec *kiov, unsigned int gup_flags) 211 { 212 struct vm_area_struct *vma; 213 void __user *buf = kiov->iov_base; 214 size_t len = kiov->iov_len; 215 int ret; 216 int write = gup_flags & FOLL_WRITE; 217 218 if (!access_ok(buf, len)) 219 return -EFAULT; 220 221 if (mmap_read_lock_killable(mm)) 222 return -EIO; 223 224 while (len) { 225 unsigned long tags, offset; 226 void *maddr; 227 struct page *page = NULL; 228 229 ret = get_user_pages_remote(mm, addr, 1, gup_flags, &page, 230 &vma, NULL); 231 if (ret <= 0) 232 break; 233 234 /* 235 * Only copy tags if the page has been mapped as PROT_MTE 236 * (PG_mte_tagged set). Otherwise the tags are not valid and 237 * not accessible to user. Moreover, an mprotect(PROT_MTE) 238 * would cause the existing tags to be cleared if the page 239 * was never mapped with PROT_MTE. 240 */ 241 if (!test_bit(PG_mte_tagged, &page->flags)) { 242 ret = -EOPNOTSUPP; 243 put_page(page); 244 break; 245 } 246 247 /* limit access to the end of the page */ 248 offset = offset_in_page(addr); 249 tags = min(len, (PAGE_SIZE - offset) / MTE_GRANULE_SIZE); 250 251 maddr = page_address(page); 252 if (write) { 253 tags = mte_copy_tags_from_user(maddr + offset, buf, tags); 254 set_page_dirty_lock(page); 255 } else { 256 tags = mte_copy_tags_to_user(buf, maddr + offset, tags); 257 } 258 put_page(page); 259 260 /* error accessing the tracer's buffer */ 261 if (!tags) 262 break; 263 264 len -= tags; 265 buf += tags; 266 addr += tags * MTE_GRANULE_SIZE; 267 } 268 mmap_read_unlock(mm); 269 270 /* return an error if no tags copied */ 271 kiov->iov_len = buf - kiov->iov_base; 272 if (!kiov->iov_len) { 273 /* check for error accessing the tracee's address space */ 274 if (ret <= 0) 275 return -EIO; 276 else 277 return -EFAULT; 278 } 279 280 return 0; 281 } 282 283 /* 284 * Copy MTE tags in another process' address space at 'addr' to/from tracer's 285 * iovec buffer. Return 0 on success. Inspired by ptrace_access_vm(). 286 */ 287 static int access_remote_tags(struct task_struct *tsk, unsigned long addr, 288 struct iovec *kiov, unsigned int gup_flags) 289 { 290 struct mm_struct *mm; 291 int ret; 292 293 mm = get_task_mm(tsk); 294 if (!mm) 295 return -EPERM; 296 297 if (!tsk->ptrace || (current != tsk->parent) || 298 ((get_dumpable(mm) != SUID_DUMP_USER) && 299 !ptracer_capable(tsk, mm->user_ns))) { 300 mmput(mm); 301 return -EPERM; 302 } 303 304 ret = __access_remote_tags(mm, addr, kiov, gup_flags); 305 mmput(mm); 306 307 return ret; 308 } 309 310 int mte_ptrace_copy_tags(struct task_struct *child, long request, 311 unsigned long addr, unsigned long data) 312 { 313 int ret; 314 struct iovec kiov; 315 struct iovec __user *uiov = (void __user *)data; 316 unsigned int gup_flags = FOLL_FORCE; 317 318 if (!system_supports_mte()) 319 return -EIO; 320 321 if (get_user(kiov.iov_base, &uiov->iov_base) || 322 get_user(kiov.iov_len, &uiov->iov_len)) 323 return -EFAULT; 324 325 if (request == PTRACE_POKEMTETAGS) 326 gup_flags |= FOLL_WRITE; 327 328 /* align addr to the MTE tag granule */ 329 addr &= MTE_GRANULE_MASK; 330 331 ret = access_remote_tags(child, addr, &kiov, gup_flags); 332 if (!ret) 333 ret = put_user(kiov.iov_len, &uiov->iov_len); 334 335 return ret; 336 } 337