xref: /openbmc/linux/arch/arm64/kernel/mte.c (revision 6cd70754)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 ARM Ltd.
4  */
5 
6 #include <linux/bitops.h>
7 #include <linux/kernel.h>
8 #include <linux/mm.h>
9 #include <linux/prctl.h>
10 #include <linux/sched.h>
11 #include <linux/sched/mm.h>
12 #include <linux/string.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/thread_info.h>
16 #include <linux/types.h>
17 #include <linux/uio.h>
18 
19 #include <asm/barrier.h>
20 #include <asm/cpufeature.h>
21 #include <asm/mte.h>
22 #include <asm/ptrace.h>
23 #include <asm/sysreg.h>
24 
25 u64 gcr_kernel_excl __ro_after_init;
26 
27 static bool report_fault_once = true;
28 
29 static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
30 {
31 	pte_t old_pte = READ_ONCE(*ptep);
32 
33 	if (check_swap && is_swap_pte(old_pte)) {
34 		swp_entry_t entry = pte_to_swp_entry(old_pte);
35 
36 		if (!non_swap_entry(entry) && mte_restore_tags(entry, page))
37 			return;
38 	}
39 
40 	page_kasan_tag_reset(page);
41 	/*
42 	 * We need smp_wmb() in between setting the flags and clearing the
43 	 * tags because if another thread reads page->flags and builds a
44 	 * tagged address out of it, there is an actual dependency to the
45 	 * memory access, but on the current thread we do not guarantee that
46 	 * the new page->flags are visible before the tags were updated.
47 	 */
48 	smp_wmb();
49 	mte_clear_page_tags(page_address(page));
50 }
51 
52 void mte_sync_tags(pte_t *ptep, pte_t pte)
53 {
54 	struct page *page = pte_page(pte);
55 	long i, nr_pages = compound_nr(page);
56 	bool check_swap = nr_pages == 1;
57 
58 	/* if PG_mte_tagged is set, tags have already been initialised */
59 	for (i = 0; i < nr_pages; i++, page++) {
60 		if (!test_and_set_bit(PG_mte_tagged, &page->flags))
61 			mte_sync_page_tags(page, ptep, check_swap);
62 	}
63 }
64 
65 int memcmp_pages(struct page *page1, struct page *page2)
66 {
67 	char *addr1, *addr2;
68 	int ret;
69 
70 	addr1 = page_address(page1);
71 	addr2 = page_address(page2);
72 	ret = memcmp(addr1, addr2, PAGE_SIZE);
73 
74 	if (!system_supports_mte() || ret)
75 		return ret;
76 
77 	/*
78 	 * If the page content is identical but at least one of the pages is
79 	 * tagged, return non-zero to avoid KSM merging. If only one of the
80 	 * pages is tagged, set_pte_at() may zero or change the tags of the
81 	 * other page via mte_sync_tags().
82 	 */
83 	if (test_bit(PG_mte_tagged, &page1->flags) ||
84 	    test_bit(PG_mte_tagged, &page2->flags))
85 		return addr1 != addr2;
86 
87 	return ret;
88 }
89 
90 void mte_init_tags(u64 max_tag)
91 {
92 	static bool gcr_kernel_excl_initialized;
93 
94 	if (!gcr_kernel_excl_initialized) {
95 		/*
96 		 * The format of the tags in KASAN is 0xFF and in MTE is 0xF.
97 		 * This conversion extracts an MTE tag from a KASAN tag.
98 		 */
99 		u64 incl = GENMASK(FIELD_GET(MTE_TAG_MASK >> MTE_TAG_SHIFT,
100 					     max_tag), 0);
101 
102 		gcr_kernel_excl = ~incl & SYS_GCR_EL1_EXCL_MASK;
103 		gcr_kernel_excl_initialized = true;
104 	}
105 
106 	/* Enable the kernel exclude mask for random tags generation. */
107 	write_sysreg_s(SYS_GCR_EL1_RRND | gcr_kernel_excl, SYS_GCR_EL1);
108 }
109 
110 void mte_enable_kernel(void)
111 {
112 	/* Enable MTE Sync Mode for EL1. */
113 	sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, SCTLR_ELx_TCF_SYNC);
114 	isb();
115 }
116 
117 void mte_set_report_once(bool state)
118 {
119 	WRITE_ONCE(report_fault_once, state);
120 }
121 
122 bool mte_report_once(void)
123 {
124 	return READ_ONCE(report_fault_once);
125 }
126 
127 static void update_sctlr_el1_tcf0(u64 tcf0)
128 {
129 	/* ISB required for the kernel uaccess routines */
130 	sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF0_MASK, tcf0);
131 	isb();
132 }
133 
134 static void set_sctlr_el1_tcf0(u64 tcf0)
135 {
136 	/*
137 	 * mte_thread_switch() checks current->thread.sctlr_tcf0 as an
138 	 * optimisation. Disable preemption so that it does not see
139 	 * the variable update before the SCTLR_EL1.TCF0 one.
140 	 */
141 	preempt_disable();
142 	current->thread.sctlr_tcf0 = tcf0;
143 	update_sctlr_el1_tcf0(tcf0);
144 	preempt_enable();
145 }
146 
147 static void update_gcr_el1_excl(u64 excl)
148 {
149 
150 	/*
151 	 * Note that the mask controlled by the user via prctl() is an
152 	 * include while GCR_EL1 accepts an exclude mask.
153 	 * No need for ISB since this only affects EL0 currently, implicit
154 	 * with ERET.
155 	 */
156 	sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl);
157 }
158 
159 static void set_gcr_el1_excl(u64 excl)
160 {
161 	current->thread.gcr_user_excl = excl;
162 
163 	/*
164 	 * SYS_GCR_EL1 will be set to current->thread.gcr_user_excl value
165 	 * by mte_set_user_gcr() in kernel_exit,
166 	 */
167 }
168 
169 void flush_mte_state(void)
170 {
171 	if (!system_supports_mte())
172 		return;
173 
174 	/* clear any pending asynchronous tag fault */
175 	dsb(ish);
176 	write_sysreg_s(0, SYS_TFSRE0_EL1);
177 	clear_thread_flag(TIF_MTE_ASYNC_FAULT);
178 	/* disable tag checking */
179 	set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE);
180 	/* reset tag generation mask */
181 	set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK);
182 }
183 
184 void mte_thread_switch(struct task_struct *next)
185 {
186 	if (!system_supports_mte())
187 		return;
188 
189 	/* avoid expensive SCTLR_EL1 accesses if no change */
190 	if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
191 		update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
192 }
193 
194 void mte_suspend_exit(void)
195 {
196 	if (!system_supports_mte())
197 		return;
198 
199 	update_gcr_el1_excl(gcr_kernel_excl);
200 }
201 
202 long set_mte_ctrl(struct task_struct *task, unsigned long arg)
203 {
204 	u64 tcf0;
205 	u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
206 		       SYS_GCR_EL1_EXCL_MASK;
207 
208 	if (!system_supports_mte())
209 		return 0;
210 
211 	switch (arg & PR_MTE_TCF_MASK) {
212 	case PR_MTE_TCF_NONE:
213 		tcf0 = SCTLR_EL1_TCF0_NONE;
214 		break;
215 	case PR_MTE_TCF_SYNC:
216 		tcf0 = SCTLR_EL1_TCF0_SYNC;
217 		break;
218 	case PR_MTE_TCF_ASYNC:
219 		tcf0 = SCTLR_EL1_TCF0_ASYNC;
220 		break;
221 	default:
222 		return -EINVAL;
223 	}
224 
225 	if (task != current) {
226 		task->thread.sctlr_tcf0 = tcf0;
227 		task->thread.gcr_user_excl = gcr_excl;
228 	} else {
229 		set_sctlr_el1_tcf0(tcf0);
230 		set_gcr_el1_excl(gcr_excl);
231 	}
232 
233 	return 0;
234 }
235 
236 long get_mte_ctrl(struct task_struct *task)
237 {
238 	unsigned long ret;
239 	u64 incl = ~task->thread.gcr_user_excl & SYS_GCR_EL1_EXCL_MASK;
240 
241 	if (!system_supports_mte())
242 		return 0;
243 
244 	ret = incl << PR_MTE_TAG_SHIFT;
245 
246 	switch (task->thread.sctlr_tcf0) {
247 	case SCTLR_EL1_TCF0_NONE:
248 		ret |= PR_MTE_TCF_NONE;
249 		break;
250 	case SCTLR_EL1_TCF0_SYNC:
251 		ret |= PR_MTE_TCF_SYNC;
252 		break;
253 	case SCTLR_EL1_TCF0_ASYNC:
254 		ret |= PR_MTE_TCF_ASYNC;
255 		break;
256 	}
257 
258 	return ret;
259 }
260 
261 /*
262  * Access MTE tags in another process' address space as given in mm. Update
263  * the number of tags copied. Return 0 if any tags copied, error otherwise.
264  * Inspired by __access_remote_vm().
265  */
266 static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
267 				struct iovec *kiov, unsigned int gup_flags)
268 {
269 	struct vm_area_struct *vma;
270 	void __user *buf = kiov->iov_base;
271 	size_t len = kiov->iov_len;
272 	int ret;
273 	int write = gup_flags & FOLL_WRITE;
274 
275 	if (!access_ok(buf, len))
276 		return -EFAULT;
277 
278 	if (mmap_read_lock_killable(mm))
279 		return -EIO;
280 
281 	while (len) {
282 		unsigned long tags, offset;
283 		void *maddr;
284 		struct page *page = NULL;
285 
286 		ret = get_user_pages_remote(mm, addr, 1, gup_flags, &page,
287 					    &vma, NULL);
288 		if (ret <= 0)
289 			break;
290 
291 		/*
292 		 * Only copy tags if the page has been mapped as PROT_MTE
293 		 * (PG_mte_tagged set). Otherwise the tags are not valid and
294 		 * not accessible to user. Moreover, an mprotect(PROT_MTE)
295 		 * would cause the existing tags to be cleared if the page
296 		 * was never mapped with PROT_MTE.
297 		 */
298 		if (!(vma->vm_flags & VM_MTE)) {
299 			ret = -EOPNOTSUPP;
300 			put_page(page);
301 			break;
302 		}
303 		WARN_ON_ONCE(!test_bit(PG_mte_tagged, &page->flags));
304 
305 		/* limit access to the end of the page */
306 		offset = offset_in_page(addr);
307 		tags = min(len, (PAGE_SIZE - offset) / MTE_GRANULE_SIZE);
308 
309 		maddr = page_address(page);
310 		if (write) {
311 			tags = mte_copy_tags_from_user(maddr + offset, buf, tags);
312 			set_page_dirty_lock(page);
313 		} else {
314 			tags = mte_copy_tags_to_user(buf, maddr + offset, tags);
315 		}
316 		put_page(page);
317 
318 		/* error accessing the tracer's buffer */
319 		if (!tags)
320 			break;
321 
322 		len -= tags;
323 		buf += tags;
324 		addr += tags * MTE_GRANULE_SIZE;
325 	}
326 	mmap_read_unlock(mm);
327 
328 	/* return an error if no tags copied */
329 	kiov->iov_len = buf - kiov->iov_base;
330 	if (!kiov->iov_len) {
331 		/* check for error accessing the tracee's address space */
332 		if (ret <= 0)
333 			return -EIO;
334 		else
335 			return -EFAULT;
336 	}
337 
338 	return 0;
339 }
340 
341 /*
342  * Copy MTE tags in another process' address space at 'addr' to/from tracer's
343  * iovec buffer. Return 0 on success. Inspired by ptrace_access_vm().
344  */
345 static int access_remote_tags(struct task_struct *tsk, unsigned long addr,
346 			      struct iovec *kiov, unsigned int gup_flags)
347 {
348 	struct mm_struct *mm;
349 	int ret;
350 
351 	mm = get_task_mm(tsk);
352 	if (!mm)
353 		return -EPERM;
354 
355 	if (!tsk->ptrace || (current != tsk->parent) ||
356 	    ((get_dumpable(mm) != SUID_DUMP_USER) &&
357 	     !ptracer_capable(tsk, mm->user_ns))) {
358 		mmput(mm);
359 		return -EPERM;
360 	}
361 
362 	ret = __access_remote_tags(mm, addr, kiov, gup_flags);
363 	mmput(mm);
364 
365 	return ret;
366 }
367 
368 int mte_ptrace_copy_tags(struct task_struct *child, long request,
369 			 unsigned long addr, unsigned long data)
370 {
371 	int ret;
372 	struct iovec kiov;
373 	struct iovec __user *uiov = (void __user *)data;
374 	unsigned int gup_flags = FOLL_FORCE;
375 
376 	if (!system_supports_mte())
377 		return -EIO;
378 
379 	if (get_user(kiov.iov_base, &uiov->iov_base) ||
380 	    get_user(kiov.iov_len, &uiov->iov_len))
381 		return -EFAULT;
382 
383 	if (request == PTRACE_POKEMTETAGS)
384 		gup_flags |= FOLL_WRITE;
385 
386 	/* align addr to the MTE tag granule */
387 	addr &= MTE_GRANULE_MASK;
388 
389 	ret = access_remote_tags(child, addr, &kiov, gup_flags);
390 	if (!ret)
391 		ret = put_user(kiov.iov_len, &uiov->iov_len);
392 
393 	return ret;
394 }
395