xref: /openbmc/linux/mm/kmsan/hooks.c (revision 8ebc80a25f9d9bf7a8e368b266d5b740c485c362)
1f80be457SAlexander Potapenko // SPDX-License-Identifier: GPL-2.0
2f80be457SAlexander Potapenko /*
3f80be457SAlexander Potapenko  * KMSAN hooks for kernel subsystems.
4f80be457SAlexander Potapenko  *
5f80be457SAlexander Potapenko  * These functions handle creation of KMSAN metadata for memory allocations.
6f80be457SAlexander Potapenko  *
7f80be457SAlexander Potapenko  * Copyright (C) 2018-2022 Google LLC
8f80be457SAlexander Potapenko  * Author: Alexander Potapenko <glider@google.com>
9f80be457SAlexander Potapenko  *
10f80be457SAlexander Potapenko  */
11f80be457SAlexander Potapenko 
12f80be457SAlexander Potapenko #include <linux/cacheflush.h>
137ade4f10SAlexander Potapenko #include <linux/dma-direction.h>
14f80be457SAlexander Potapenko #include <linux/gfp.h>
15b073d7f8SAlexander Potapenko #include <linux/kmsan.h>
16f80be457SAlexander Potapenko #include <linux/mm.h>
17f80be457SAlexander Potapenko #include <linux/mm_types.h>
187ade4f10SAlexander Potapenko #include <linux/scatterlist.h>
19f80be457SAlexander Potapenko #include <linux/slab.h>
20f80be457SAlexander Potapenko #include <linux/uaccess.h>
21553a8018SAlexander Potapenko #include <linux/usb.h>
22f80be457SAlexander Potapenko 
23f80be457SAlexander Potapenko #include "../internal.h"
24f80be457SAlexander Potapenko #include "../slab.h"
25f80be457SAlexander Potapenko #include "kmsan.h"
26f80be457SAlexander Potapenko 
27f80be457SAlexander Potapenko /*
28f80be457SAlexander Potapenko  * Instrumented functions shouldn't be called under
29f80be457SAlexander Potapenko  * kmsan_enter_runtime()/kmsan_leave_runtime(), because this will lead to
30f80be457SAlexander Potapenko  * skipping effects of functions like memset() inside instrumented code.
31f80be457SAlexander Potapenko  */
32f80be457SAlexander Potapenko 
kmsan_task_create(struct task_struct * task)3350b5e49cSAlexander Potapenko void kmsan_task_create(struct task_struct *task)
3450b5e49cSAlexander Potapenko {
3550b5e49cSAlexander Potapenko 	kmsan_enter_runtime();
3650b5e49cSAlexander Potapenko 	kmsan_internal_task_create(task);
3750b5e49cSAlexander Potapenko 	kmsan_leave_runtime();
3850b5e49cSAlexander Potapenko }
3950b5e49cSAlexander Potapenko 
kmsan_task_exit(struct task_struct * task)4050b5e49cSAlexander Potapenko void kmsan_task_exit(struct task_struct *task)
4150b5e49cSAlexander Potapenko {
4250b5e49cSAlexander Potapenko 	struct kmsan_ctx *ctx = &task->kmsan_ctx;
4350b5e49cSAlexander Potapenko 
4450b5e49cSAlexander Potapenko 	if (!kmsan_enabled || kmsan_in_runtime())
4550b5e49cSAlexander Potapenko 		return;
4650b5e49cSAlexander Potapenko 
4750b5e49cSAlexander Potapenko 	ctx->allow_reporting = false;
4850b5e49cSAlexander Potapenko }
4950b5e49cSAlexander Potapenko 
kmsan_slab_alloc(struct kmem_cache * s,void * object,gfp_t flags)5068ef169aSAlexander Potapenko void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags)
5168ef169aSAlexander Potapenko {
5268ef169aSAlexander Potapenko 	if (unlikely(object == NULL))
5368ef169aSAlexander Potapenko 		return;
5468ef169aSAlexander Potapenko 	if (!kmsan_enabled || kmsan_in_runtime())
5568ef169aSAlexander Potapenko 		return;
5668ef169aSAlexander Potapenko 	/*
5768ef169aSAlexander Potapenko 	 * There's a ctor or this is an RCU cache - do nothing. The memory
5868ef169aSAlexander Potapenko 	 * status hasn't changed since last use.
5968ef169aSAlexander Potapenko 	 */
6068ef169aSAlexander Potapenko 	if (s->ctor || (s->flags & SLAB_TYPESAFE_BY_RCU))
6168ef169aSAlexander Potapenko 		return;
6268ef169aSAlexander Potapenko 
6368ef169aSAlexander Potapenko 	kmsan_enter_runtime();
6468ef169aSAlexander Potapenko 	if (flags & __GFP_ZERO)
6568ef169aSAlexander Potapenko 		kmsan_internal_unpoison_memory(object, s->object_size,
6668ef169aSAlexander Potapenko 					       KMSAN_POISON_CHECK);
6768ef169aSAlexander Potapenko 	else
6868ef169aSAlexander Potapenko 		kmsan_internal_poison_memory(object, s->object_size, flags,
6968ef169aSAlexander Potapenko 					     KMSAN_POISON_CHECK);
7068ef169aSAlexander Potapenko 	kmsan_leave_runtime();
7168ef169aSAlexander Potapenko }
7268ef169aSAlexander Potapenko 
kmsan_slab_free(struct kmem_cache * s,void * object)7368ef169aSAlexander Potapenko void kmsan_slab_free(struct kmem_cache *s, void *object)
7468ef169aSAlexander Potapenko {
7568ef169aSAlexander Potapenko 	if (!kmsan_enabled || kmsan_in_runtime())
7668ef169aSAlexander Potapenko 		return;
7768ef169aSAlexander Potapenko 
7868ef169aSAlexander Potapenko 	/* RCU slabs could be legally used after free within the RCU period */
7968ef169aSAlexander Potapenko 	if (unlikely(s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)))
8068ef169aSAlexander Potapenko 		return;
8168ef169aSAlexander Potapenko 	/*
8268ef169aSAlexander Potapenko 	 * If there's a constructor, freed memory must remain in the same state
8368ef169aSAlexander Potapenko 	 * until the next allocation. We cannot save its state to detect
8468ef169aSAlexander Potapenko 	 * use-after-free bugs, instead we just keep it unpoisoned.
8568ef169aSAlexander Potapenko 	 */
8668ef169aSAlexander Potapenko 	if (s->ctor)
8768ef169aSAlexander Potapenko 		return;
8868ef169aSAlexander Potapenko 	kmsan_enter_runtime();
8968ef169aSAlexander Potapenko 	kmsan_internal_poison_memory(object, s->object_size, GFP_KERNEL,
9068ef169aSAlexander Potapenko 				     KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
9168ef169aSAlexander Potapenko 	kmsan_leave_runtime();
9268ef169aSAlexander Potapenko }
9368ef169aSAlexander Potapenko 
kmsan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)9468ef169aSAlexander Potapenko void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
9568ef169aSAlexander Potapenko {
9668ef169aSAlexander Potapenko 	if (unlikely(ptr == NULL))
9768ef169aSAlexander Potapenko 		return;
9868ef169aSAlexander Potapenko 	if (!kmsan_enabled || kmsan_in_runtime())
9968ef169aSAlexander Potapenko 		return;
10068ef169aSAlexander Potapenko 	kmsan_enter_runtime();
10168ef169aSAlexander Potapenko 	if (flags & __GFP_ZERO)
10268ef169aSAlexander Potapenko 		kmsan_internal_unpoison_memory((void *)ptr, size,
10368ef169aSAlexander Potapenko 					       /*checked*/ true);
10468ef169aSAlexander Potapenko 	else
10568ef169aSAlexander Potapenko 		kmsan_internal_poison_memory((void *)ptr, size, flags,
10668ef169aSAlexander Potapenko 					     KMSAN_POISON_CHECK);
10768ef169aSAlexander Potapenko 	kmsan_leave_runtime();
10868ef169aSAlexander Potapenko }
10968ef169aSAlexander Potapenko 
kmsan_kfree_large(const void * ptr)11068ef169aSAlexander Potapenko void kmsan_kfree_large(const void *ptr)
11168ef169aSAlexander Potapenko {
11268ef169aSAlexander Potapenko 	struct page *page;
11368ef169aSAlexander Potapenko 
11468ef169aSAlexander Potapenko 	if (!kmsan_enabled || kmsan_in_runtime())
11568ef169aSAlexander Potapenko 		return;
11668ef169aSAlexander Potapenko 	kmsan_enter_runtime();
11768ef169aSAlexander Potapenko 	page = virt_to_head_page((void *)ptr);
11868ef169aSAlexander Potapenko 	KMSAN_WARN_ON(ptr != page_address(page));
11968ef169aSAlexander Potapenko 	kmsan_internal_poison_memory((void *)ptr,
1205d7800d9SZhangPeng 				     page_size(page),
12168ef169aSAlexander Potapenko 				     GFP_KERNEL,
12268ef169aSAlexander Potapenko 				     KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
12368ef169aSAlexander Potapenko 	kmsan_leave_runtime();
12468ef169aSAlexander Potapenko }
12568ef169aSAlexander Potapenko 
vmalloc_shadow(unsigned long addr)126b073d7f8SAlexander Potapenko static unsigned long vmalloc_shadow(unsigned long addr)
127b073d7f8SAlexander Potapenko {
128b073d7f8SAlexander Potapenko 	return (unsigned long)kmsan_get_metadata((void *)addr,
129b073d7f8SAlexander Potapenko 						 KMSAN_META_SHADOW);
130b073d7f8SAlexander Potapenko }
131b073d7f8SAlexander Potapenko 
vmalloc_origin(unsigned long addr)132b073d7f8SAlexander Potapenko static unsigned long vmalloc_origin(unsigned long addr)
133b073d7f8SAlexander Potapenko {
134b073d7f8SAlexander Potapenko 	return (unsigned long)kmsan_get_metadata((void *)addr,
135b073d7f8SAlexander Potapenko 						 KMSAN_META_ORIGIN);
136b073d7f8SAlexander Potapenko }
137b073d7f8SAlexander Potapenko 
kmsan_vunmap_range_noflush(unsigned long start,unsigned long end)138b073d7f8SAlexander Potapenko void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end)
139b073d7f8SAlexander Potapenko {
140b073d7f8SAlexander Potapenko 	__vunmap_range_noflush(vmalloc_shadow(start), vmalloc_shadow(end));
141b073d7f8SAlexander Potapenko 	__vunmap_range_noflush(vmalloc_origin(start), vmalloc_origin(end));
142b073d7f8SAlexander Potapenko 	flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
143b073d7f8SAlexander Potapenko 	flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
144b073d7f8SAlexander Potapenko }
145b073d7f8SAlexander Potapenko 
146b073d7f8SAlexander Potapenko /*
147b073d7f8SAlexander Potapenko  * This function creates new shadow/origin pages for the physical pages mapped
148b073d7f8SAlexander Potapenko  * into the virtual memory. If those physical pages already had shadow/origin,
149b073d7f8SAlexander Potapenko  * those are ignored.
150b073d7f8SAlexander Potapenko  */
kmsan_ioremap_page_range(unsigned long start,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int page_shift)151fdea03e1SAlexander Potapenko int kmsan_ioremap_page_range(unsigned long start, unsigned long end,
152b073d7f8SAlexander Potapenko 			     phys_addr_t phys_addr, pgprot_t prot,
153b073d7f8SAlexander Potapenko 			     unsigned int page_shift)
154b073d7f8SAlexander Potapenko {
155b073d7f8SAlexander Potapenko 	gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO;
156b073d7f8SAlexander Potapenko 	struct page *shadow, *origin;
157b073d7f8SAlexander Potapenko 	unsigned long off = 0;
158fdea03e1SAlexander Potapenko 	int nr, err = 0, clean = 0, mapped;
159b073d7f8SAlexander Potapenko 
160b073d7f8SAlexander Potapenko 	if (!kmsan_enabled || kmsan_in_runtime())
161fdea03e1SAlexander Potapenko 		return 0;
162b073d7f8SAlexander Potapenko 
163b073d7f8SAlexander Potapenko 	nr = (end - start) / PAGE_SIZE;
164b073d7f8SAlexander Potapenko 	kmsan_enter_runtime();
165fdea03e1SAlexander Potapenko 	for (int i = 0; i < nr; i++, off += PAGE_SIZE, clean = i) {
166b073d7f8SAlexander Potapenko 		shadow = alloc_pages(gfp_mask, 1);
167b073d7f8SAlexander Potapenko 		origin = alloc_pages(gfp_mask, 1);
168fdea03e1SAlexander Potapenko 		if (!shadow || !origin) {
169fdea03e1SAlexander Potapenko 			err = -ENOMEM;
170fdea03e1SAlexander Potapenko 			goto ret;
171fdea03e1SAlexander Potapenko 		}
172fdea03e1SAlexander Potapenko 		mapped = __vmap_pages_range_noflush(
173b073d7f8SAlexander Potapenko 			vmalloc_shadow(start + off),
174b073d7f8SAlexander Potapenko 			vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow,
175b073d7f8SAlexander Potapenko 			PAGE_SHIFT);
176fdea03e1SAlexander Potapenko 		if (mapped) {
177fdea03e1SAlexander Potapenko 			err = mapped;
178fdea03e1SAlexander Potapenko 			goto ret;
179fdea03e1SAlexander Potapenko 		}
180fdea03e1SAlexander Potapenko 		shadow = NULL;
181fdea03e1SAlexander Potapenko 		mapped = __vmap_pages_range_noflush(
182b073d7f8SAlexander Potapenko 			vmalloc_origin(start + off),
183b073d7f8SAlexander Potapenko 			vmalloc_origin(start + off + PAGE_SIZE), prot, &origin,
184b073d7f8SAlexander Potapenko 			PAGE_SHIFT);
185fdea03e1SAlexander Potapenko 		if (mapped) {
186fdea03e1SAlexander Potapenko 			__vunmap_range_noflush(
187fdea03e1SAlexander Potapenko 				vmalloc_shadow(start + off),
188fdea03e1SAlexander Potapenko 				vmalloc_shadow(start + off + PAGE_SIZE));
189fdea03e1SAlexander Potapenko 			err = mapped;
190fdea03e1SAlexander Potapenko 			goto ret;
191fdea03e1SAlexander Potapenko 		}
192fdea03e1SAlexander Potapenko 		origin = NULL;
193fdea03e1SAlexander Potapenko 	}
194fdea03e1SAlexander Potapenko 	/* Page mapping loop finished normally, nothing to clean up. */
195fdea03e1SAlexander Potapenko 	clean = 0;
196fdea03e1SAlexander Potapenko 
197fdea03e1SAlexander Potapenko ret:
198fdea03e1SAlexander Potapenko 	if (clean > 0) {
199fdea03e1SAlexander Potapenko 		/*
200fdea03e1SAlexander Potapenko 		 * Something went wrong. Clean up shadow/origin pages allocated
201fdea03e1SAlexander Potapenko 		 * on the last loop iteration, then delete mappings created
202fdea03e1SAlexander Potapenko 		 * during the previous iterations.
203fdea03e1SAlexander Potapenko 		 */
204fdea03e1SAlexander Potapenko 		if (shadow)
205fdea03e1SAlexander Potapenko 			__free_pages(shadow, 1);
206fdea03e1SAlexander Potapenko 		if (origin)
207fdea03e1SAlexander Potapenko 			__free_pages(origin, 1);
208fdea03e1SAlexander Potapenko 		__vunmap_range_noflush(
209fdea03e1SAlexander Potapenko 			vmalloc_shadow(start),
210fdea03e1SAlexander Potapenko 			vmalloc_shadow(start + clean * PAGE_SIZE));
211fdea03e1SAlexander Potapenko 		__vunmap_range_noflush(
212fdea03e1SAlexander Potapenko 			vmalloc_origin(start),
213fdea03e1SAlexander Potapenko 			vmalloc_origin(start + clean * PAGE_SIZE));
214b073d7f8SAlexander Potapenko 	}
215b073d7f8SAlexander Potapenko 	flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
216b073d7f8SAlexander Potapenko 	flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
217b073d7f8SAlexander Potapenko 	kmsan_leave_runtime();
218fdea03e1SAlexander Potapenko 	return err;
219b073d7f8SAlexander Potapenko }
220b073d7f8SAlexander Potapenko 
kmsan_iounmap_page_range(unsigned long start,unsigned long end)221b073d7f8SAlexander Potapenko void kmsan_iounmap_page_range(unsigned long start, unsigned long end)
222b073d7f8SAlexander Potapenko {
223b073d7f8SAlexander Potapenko 	unsigned long v_shadow, v_origin;
224b073d7f8SAlexander Potapenko 	struct page *shadow, *origin;
225b073d7f8SAlexander Potapenko 	int nr;
226b073d7f8SAlexander Potapenko 
227b073d7f8SAlexander Potapenko 	if (!kmsan_enabled || kmsan_in_runtime())
228b073d7f8SAlexander Potapenko 		return;
229b073d7f8SAlexander Potapenko 
230b073d7f8SAlexander Potapenko 	nr = (end - start) / PAGE_SIZE;
231b073d7f8SAlexander Potapenko 	kmsan_enter_runtime();
232b073d7f8SAlexander Potapenko 	v_shadow = (unsigned long)vmalloc_shadow(start);
233b073d7f8SAlexander Potapenko 	v_origin = (unsigned long)vmalloc_origin(start);
234b073d7f8SAlexander Potapenko 	for (int i = 0; i < nr;
235b073d7f8SAlexander Potapenko 	     i++, v_shadow += PAGE_SIZE, v_origin += PAGE_SIZE) {
236b073d7f8SAlexander Potapenko 		shadow = kmsan_vmalloc_to_page_or_null((void *)v_shadow);
237b073d7f8SAlexander Potapenko 		origin = kmsan_vmalloc_to_page_or_null((void *)v_origin);
238b073d7f8SAlexander Potapenko 		__vunmap_range_noflush(v_shadow, vmalloc_shadow(end));
239b073d7f8SAlexander Potapenko 		__vunmap_range_noflush(v_origin, vmalloc_origin(end));
240b073d7f8SAlexander Potapenko 		if (shadow)
241b073d7f8SAlexander Potapenko 			__free_pages(shadow, 1);
242b073d7f8SAlexander Potapenko 		if (origin)
243b073d7f8SAlexander Potapenko 			__free_pages(origin, 1);
244b073d7f8SAlexander Potapenko 	}
245b073d7f8SAlexander Potapenko 	flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
246b073d7f8SAlexander Potapenko 	flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
247b073d7f8SAlexander Potapenko 	kmsan_leave_runtime();
248b073d7f8SAlexander Potapenko }
249b073d7f8SAlexander Potapenko 
kmsan_copy_to_user(void __user * to,const void * from,size_t to_copy,size_t left)25075cf0290SAlexander Potapenko void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy,
25175cf0290SAlexander Potapenko 			size_t left)
25275cf0290SAlexander Potapenko {
25375cf0290SAlexander Potapenko 	unsigned long ua_flags;
25475cf0290SAlexander Potapenko 
25575cf0290SAlexander Potapenko 	if (!kmsan_enabled || kmsan_in_runtime())
25675cf0290SAlexander Potapenko 		return;
25775cf0290SAlexander Potapenko 	/*
25875cf0290SAlexander Potapenko 	 * At this point we've copied the memory already. It's hard to check it
25975cf0290SAlexander Potapenko 	 * before copying, as the size of actually copied buffer is unknown.
26075cf0290SAlexander Potapenko 	 */
26175cf0290SAlexander Potapenko 
26275cf0290SAlexander Potapenko 	/* copy_to_user() may copy zero bytes. No need to check. */
26375cf0290SAlexander Potapenko 	if (!to_copy)
26475cf0290SAlexander Potapenko 		return;
26575cf0290SAlexander Potapenko 	/* Or maybe copy_to_user() failed to copy anything. */
26675cf0290SAlexander Potapenko 	if (to_copy <= left)
26775cf0290SAlexander Potapenko 		return;
26875cf0290SAlexander Potapenko 
26975cf0290SAlexander Potapenko 	ua_flags = user_access_save();
27075cf0290SAlexander Potapenko 	if ((u64)to < TASK_SIZE) {
27175cf0290SAlexander Potapenko 		/* This is a user memory access, check it. */
27275cf0290SAlexander Potapenko 		kmsan_internal_check_memory((void *)from, to_copy - left, to,
27375cf0290SAlexander Potapenko 					    REASON_COPY_TO_USER);
27475cf0290SAlexander Potapenko 	} else {
27575cf0290SAlexander Potapenko 		/* Otherwise this is a kernel memory access. This happens when a
27675cf0290SAlexander Potapenko 		 * compat syscall passes an argument allocated on the kernel
27775cf0290SAlexander Potapenko 		 * stack to a real syscall.
27875cf0290SAlexander Potapenko 		 * Don't check anything, just copy the shadow of the copied
27975cf0290SAlexander Potapenko 		 * bytes.
28075cf0290SAlexander Potapenko 		 */
28175cf0290SAlexander Potapenko 		kmsan_internal_memmove_metadata((void *)to, (void *)from,
28275cf0290SAlexander Potapenko 						to_copy - left);
28375cf0290SAlexander Potapenko 	}
28475cf0290SAlexander Potapenko 	user_access_restore(ua_flags);
28575cf0290SAlexander Potapenko }
28675cf0290SAlexander Potapenko EXPORT_SYMBOL(kmsan_copy_to_user);
28775cf0290SAlexander Potapenko 
288553a8018SAlexander Potapenko /* Helper function to check an URB. */
kmsan_handle_urb(const struct urb * urb,bool is_out)289553a8018SAlexander Potapenko void kmsan_handle_urb(const struct urb *urb, bool is_out)
290553a8018SAlexander Potapenko {
291553a8018SAlexander Potapenko 	if (!urb)
292553a8018SAlexander Potapenko 		return;
293553a8018SAlexander Potapenko 	if (is_out)
294553a8018SAlexander Potapenko 		kmsan_internal_check_memory(urb->transfer_buffer,
295553a8018SAlexander Potapenko 					    urb->transfer_buffer_length,
296553a8018SAlexander Potapenko 					    /*user_addr*/ 0, REASON_SUBMIT_URB);
297553a8018SAlexander Potapenko 	else
298553a8018SAlexander Potapenko 		kmsan_internal_unpoison_memory(urb->transfer_buffer,
299553a8018SAlexander Potapenko 					       urb->transfer_buffer_length,
300553a8018SAlexander Potapenko 					       /*checked*/ false);
301553a8018SAlexander Potapenko }
3027ba594d7SArnd Bergmann EXPORT_SYMBOL_GPL(kmsan_handle_urb);
303553a8018SAlexander Potapenko 
kmsan_handle_dma_page(const void * addr,size_t size,enum dma_data_direction dir)3047ade4f10SAlexander Potapenko static void kmsan_handle_dma_page(const void *addr, size_t size,
3057ade4f10SAlexander Potapenko 				  enum dma_data_direction dir)
3067ade4f10SAlexander Potapenko {
3077ade4f10SAlexander Potapenko 	switch (dir) {
3087ade4f10SAlexander Potapenko 	case DMA_BIDIRECTIONAL:
3097ade4f10SAlexander Potapenko 		kmsan_internal_check_memory((void *)addr, size, /*user_addr*/ 0,
3107ade4f10SAlexander Potapenko 					    REASON_ANY);
3117ade4f10SAlexander Potapenko 		kmsan_internal_unpoison_memory((void *)addr, size,
3127ade4f10SAlexander Potapenko 					       /*checked*/ false);
3137ade4f10SAlexander Potapenko 		break;
3147ade4f10SAlexander Potapenko 	case DMA_TO_DEVICE:
3157ade4f10SAlexander Potapenko 		kmsan_internal_check_memory((void *)addr, size, /*user_addr*/ 0,
3167ade4f10SAlexander Potapenko 					    REASON_ANY);
3177ade4f10SAlexander Potapenko 		break;
3187ade4f10SAlexander Potapenko 	case DMA_FROM_DEVICE:
3197ade4f10SAlexander Potapenko 		kmsan_internal_unpoison_memory((void *)addr, size,
3207ade4f10SAlexander Potapenko 					       /*checked*/ false);
3217ade4f10SAlexander Potapenko 		break;
3227ade4f10SAlexander Potapenko 	case DMA_NONE:
3237ade4f10SAlexander Potapenko 		break;
3247ade4f10SAlexander Potapenko 	}
3257ade4f10SAlexander Potapenko }
3267ade4f10SAlexander Potapenko 
3277ade4f10SAlexander Potapenko /* Helper function to handle DMA data transfers. */
kmsan_handle_dma(struct page * page,size_t offset,size_t size,enum dma_data_direction dir)3287ade4f10SAlexander Potapenko void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
3297ade4f10SAlexander Potapenko 		      enum dma_data_direction dir)
3307ade4f10SAlexander Potapenko {
3317ade4f10SAlexander Potapenko 	u64 page_offset, to_go, addr;
3327ade4f10SAlexander Potapenko 
3337ade4f10SAlexander Potapenko 	if (PageHighMem(page))
3347ade4f10SAlexander Potapenko 		return;
3357ade4f10SAlexander Potapenko 	addr = (u64)page_address(page) + offset;
3367ade4f10SAlexander Potapenko 	/*
3377ade4f10SAlexander Potapenko 	 * The kernel may occasionally give us adjacent DMA pages not belonging
3387ade4f10SAlexander Potapenko 	 * to the same allocation. Process them separately to avoid triggering
3397ade4f10SAlexander Potapenko 	 * internal KMSAN checks.
3407ade4f10SAlexander Potapenko 	 */
3417ade4f10SAlexander Potapenko 	while (size > 0) {
3424852a805SZhangPeng 		page_offset = offset_in_page(addr);
3437ade4f10SAlexander Potapenko 		to_go = min(PAGE_SIZE - page_offset, (u64)size);
3447ade4f10SAlexander Potapenko 		kmsan_handle_dma_page((void *)addr, to_go, dir);
3457ade4f10SAlexander Potapenko 		addr += to_go;
3467ade4f10SAlexander Potapenko 		size -= to_go;
3477ade4f10SAlexander Potapenko 	}
3487ade4f10SAlexander Potapenko }
349*18519478SSebastian Andrzej Siewior EXPORT_SYMBOL_GPL(kmsan_handle_dma);
3507ade4f10SAlexander Potapenko 
kmsan_handle_dma_sg(struct scatterlist * sg,int nents,enum dma_data_direction dir)3517ade4f10SAlexander Potapenko void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
3527ade4f10SAlexander Potapenko 			 enum dma_data_direction dir)
3537ade4f10SAlexander Potapenko {
3547ade4f10SAlexander Potapenko 	struct scatterlist *item;
3557ade4f10SAlexander Potapenko 	int i;
3567ade4f10SAlexander Potapenko 
3577ade4f10SAlexander Potapenko 	for_each_sg(sg, item, nents, i)
3587ade4f10SAlexander Potapenko 		kmsan_handle_dma(sg_page(item), item->offset, item->length,
3597ade4f10SAlexander Potapenko 				 dir);
3607ade4f10SAlexander Potapenko }
3617ade4f10SAlexander Potapenko 
362f80be457SAlexander Potapenko /* Functions from kmsan-checks.h follow. */
kmsan_poison_memory(const void * address,size_t size,gfp_t flags)363f80be457SAlexander Potapenko void kmsan_poison_memory(const void *address, size_t size, gfp_t flags)
364f80be457SAlexander Potapenko {
365f80be457SAlexander Potapenko 	if (!kmsan_enabled || kmsan_in_runtime())
366f80be457SAlexander Potapenko 		return;
367f80be457SAlexander Potapenko 	kmsan_enter_runtime();
368f80be457SAlexander Potapenko 	/* The users may want to poison/unpoison random memory. */
369f80be457SAlexander Potapenko 	kmsan_internal_poison_memory((void *)address, size, flags,
370f80be457SAlexander Potapenko 				     KMSAN_POISON_NOCHECK);
371f80be457SAlexander Potapenko 	kmsan_leave_runtime();
372f80be457SAlexander Potapenko }
373f80be457SAlexander Potapenko EXPORT_SYMBOL(kmsan_poison_memory);
374f80be457SAlexander Potapenko 
kmsan_unpoison_memory(const void * address,size_t size)375f80be457SAlexander Potapenko void kmsan_unpoison_memory(const void *address, size_t size)
376f80be457SAlexander Potapenko {
377f80be457SAlexander Potapenko 	unsigned long ua_flags;
378f80be457SAlexander Potapenko 
379f80be457SAlexander Potapenko 	if (!kmsan_enabled || kmsan_in_runtime())
380f80be457SAlexander Potapenko 		return;
381f80be457SAlexander Potapenko 
382f80be457SAlexander Potapenko 	ua_flags = user_access_save();
383f80be457SAlexander Potapenko 	kmsan_enter_runtime();
384f80be457SAlexander Potapenko 	/* The users may want to poison/unpoison random memory. */
385f80be457SAlexander Potapenko 	kmsan_internal_unpoison_memory((void *)address, size,
386f80be457SAlexander Potapenko 				       KMSAN_POISON_NOCHECK);
387f80be457SAlexander Potapenko 	kmsan_leave_runtime();
388f80be457SAlexander Potapenko 	user_access_restore(ua_flags);
389f80be457SAlexander Potapenko }
390f80be457SAlexander Potapenko EXPORT_SYMBOL(kmsan_unpoison_memory);
391f80be457SAlexander Potapenko 
3926cae637fSAlexander Potapenko /*
3936cae637fSAlexander Potapenko  * Version of kmsan_unpoison_memory() that can be called from within the KMSAN
3946cae637fSAlexander Potapenko  * runtime.
3956cae637fSAlexander Potapenko  *
3966cae637fSAlexander Potapenko  * Non-instrumented IRQ entry functions receive struct pt_regs from assembly
3976cae637fSAlexander Potapenko  * code. Those regs need to be unpoisoned, otherwise using them will result in
3986cae637fSAlexander Potapenko  * false positives.
3996cae637fSAlexander Potapenko  * Using kmsan_unpoison_memory() is not an option in entry code, because the
4006cae637fSAlexander Potapenko  * return value of in_task() is inconsistent - as a result, certain calls to
4016cae637fSAlexander Potapenko  * kmsan_unpoison_memory() are ignored. kmsan_unpoison_entry_regs() ensures that
4026cae637fSAlexander Potapenko  * the registers are unpoisoned even if kmsan_in_runtime() is true in the early
4036cae637fSAlexander Potapenko  * entry code.
4046cae637fSAlexander Potapenko  */
kmsan_unpoison_entry_regs(const struct pt_regs * regs)4056cae637fSAlexander Potapenko void kmsan_unpoison_entry_regs(const struct pt_regs *regs)
4066cae637fSAlexander Potapenko {
4076cae637fSAlexander Potapenko 	unsigned long ua_flags;
4086cae637fSAlexander Potapenko 
4096cae637fSAlexander Potapenko 	if (!kmsan_enabled)
4106cae637fSAlexander Potapenko 		return;
4116cae637fSAlexander Potapenko 
4126cae637fSAlexander Potapenko 	ua_flags = user_access_save();
4136cae637fSAlexander Potapenko 	kmsan_internal_unpoison_memory((void *)regs, sizeof(*regs),
4146cae637fSAlexander Potapenko 				       KMSAN_POISON_NOCHECK);
4156cae637fSAlexander Potapenko 	user_access_restore(ua_flags);
4166cae637fSAlexander Potapenko }
4176cae637fSAlexander Potapenko 
kmsan_check_memory(const void * addr,size_t size)418f80be457SAlexander Potapenko void kmsan_check_memory(const void *addr, size_t size)
419f80be457SAlexander Potapenko {
420f80be457SAlexander Potapenko 	if (!kmsan_enabled)
421f80be457SAlexander Potapenko 		return;
422f80be457SAlexander Potapenko 	return kmsan_internal_check_memory((void *)addr, size, /*user_addr*/ 0,
423f80be457SAlexander Potapenko 					   REASON_ANY);
424f80be457SAlexander Potapenko }
425f80be457SAlexander Potapenko EXPORT_SYMBOL(kmsan_check_memory);
426