xref: /openbmc/linux/arch/x86/lib/usercopy_64.c (revision 3639a535587d7aac449cdce9710dfdc97a3c8c8e)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2185f3d38SThomas Gleixner /*
3185f3d38SThomas Gleixner  * User address space access functions.
4185f3d38SThomas Gleixner  *
5185f3d38SThomas Gleixner  * Copyright 1997 Andi Kleen <ak@muc.de>
6185f3d38SThomas Gleixner  * Copyright 1997 Linus Torvalds
7185f3d38SThomas Gleixner  * Copyright 2002 Andi Kleen <ak@suse.de>
8185f3d38SThomas Gleixner  */
9e683014cSPaul Gortmaker #include <linux/export.h>
1013d4ea09SAndy Lutomirski #include <linux/uaccess.h>
110aed55afSDan Williams #include <linux/highmem.h>
12185f3d38SThomas Gleixner 
13185f3d38SThomas Gleixner /*
14185f3d38SThomas Gleixner  * Zero Userspace
15185f3d38SThomas Gleixner  */
16185f3d38SThomas Gleixner 
170aed55afSDan Williams #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
180aed55afSDan Williams /**
190aed55afSDan Williams  * clean_cache_range - write back a cache range with CLWB
200aed55afSDan Williams  * @vaddr:	virtual start address
210aed55afSDan Williams  * @size:	number of bytes to write back
220aed55afSDan Williams  *
230aed55afSDan Williams  * Write back a cache range using the CLWB (cache line write back)
240aed55afSDan Williams  * instruction. Note that @size is internally rounded up to be cache
250aed55afSDan Williams  * line size aligned.
260aed55afSDan Williams  */
270aed55afSDan Williams static void clean_cache_range(void *addr, size_t size)
280aed55afSDan Williams {
290aed55afSDan Williams 	u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
300aed55afSDan Williams 	unsigned long clflush_mask = x86_clflush_size - 1;
310aed55afSDan Williams 	void *vend = addr + size;
320aed55afSDan Williams 	void *p;
330aed55afSDan Williams 
340aed55afSDan Williams 	for (p = (void *)((unsigned long)addr & ~clflush_mask);
350aed55afSDan Williams 	     p < vend; p += x86_clflush_size)
360aed55afSDan Williams 		clwb(p);
370aed55afSDan Williams }
380aed55afSDan Williams 
394e4f00a9SDan Williams void arch_wb_cache_pmem(void *addr, size_t size)
404e4f00a9SDan Williams {
414e4f00a9SDan Williams 	clean_cache_range(addr, size);
424e4f00a9SDan Williams }
434e4f00a9SDan Williams EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
444e4f00a9SDan Williams 
450aed55afSDan Williams long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
460aed55afSDan Williams {
470aed55afSDan Williams 	unsigned long flushed, dest = (unsigned long) dst;
48*3639a535SLinus Torvalds 	long rc;
49*3639a535SLinus Torvalds 
50*3639a535SLinus Torvalds 	stac();
51*3639a535SLinus Torvalds 	rc = __copy_user_nocache(dst, src, size, 0);
52*3639a535SLinus Torvalds 	clac();
530aed55afSDan Williams 
540aed55afSDan Williams 	/*
550aed55afSDan Williams 	 * __copy_user_nocache() uses non-temporal stores for the bulk
560aed55afSDan Williams 	 * of the transfer, but we need to manually flush if the
570aed55afSDan Williams 	 * transfer is unaligned. A cached memory copy is used when
580aed55afSDan Williams 	 * destination or size is not naturally aligned. That is:
590aed55afSDan Williams 	 *   - Require 8-byte alignment when size is 8 bytes or larger.
600aed55afSDan Williams 	 *   - Require 4-byte alignment when size is 4 bytes.
610aed55afSDan Williams 	 */
620aed55afSDan Williams 	if (size < 8) {
630aed55afSDan Williams 		if (!IS_ALIGNED(dest, 4) || size != 4)
64a1cd6c2aSMikulas Patocka 			clean_cache_range(dst, size);
650aed55afSDan Williams 	} else {
660aed55afSDan Williams 		if (!IS_ALIGNED(dest, 8)) {
670aed55afSDan Williams 			dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
680aed55afSDan Williams 			clean_cache_range(dst, 1);
690aed55afSDan Williams 		}
700aed55afSDan Williams 
710aed55afSDan Williams 		flushed = dest - (unsigned long) dst;
720aed55afSDan Williams 		if (size > flushed && !IS_ALIGNED(size - flushed, 8))
730aed55afSDan Williams 			clean_cache_range(dst + size - 1, 1);
740aed55afSDan Williams 	}
750aed55afSDan Williams 
760aed55afSDan Williams 	return rc;
770aed55afSDan Williams }
780aed55afSDan Williams 
7902101c45SMikulas Patocka void __memcpy_flushcache(void *_dst, const void *_src, size_t size)
800aed55afSDan Williams {
810aed55afSDan Williams 	unsigned long dest = (unsigned long) _dst;
820aed55afSDan Williams 	unsigned long source = (unsigned long) _src;
830aed55afSDan Williams 
840aed55afSDan Williams 	/* cache copy and flush to align dest */
850aed55afSDan Williams 	if (!IS_ALIGNED(dest, 8)) {
86a6823e4eSMikulas Patocka 		size_t len = min_t(size_t, size, ALIGN(dest, 8) - dest);
870aed55afSDan Williams 
880aed55afSDan Williams 		memcpy((void *) dest, (void *) source, len);
890aed55afSDan Williams 		clean_cache_range((void *) dest, len);
900aed55afSDan Williams 		dest += len;
910aed55afSDan Williams 		source += len;
920aed55afSDan Williams 		size -= len;
930aed55afSDan Williams 		if (!size)
940aed55afSDan Williams 			return;
950aed55afSDan Williams 	}
960aed55afSDan Williams 
970aed55afSDan Williams 	/* 4x8 movnti loop */
980aed55afSDan Williams 	while (size >= 32) {
990aed55afSDan Williams 		asm("movq    (%0), %%r8\n"
1000aed55afSDan Williams 		    "movq   8(%0), %%r9\n"
1010aed55afSDan Williams 		    "movq  16(%0), %%r10\n"
1020aed55afSDan Williams 		    "movq  24(%0), %%r11\n"
1030aed55afSDan Williams 		    "movnti  %%r8,   (%1)\n"
1040aed55afSDan Williams 		    "movnti  %%r9,  8(%1)\n"
1050aed55afSDan Williams 		    "movnti %%r10, 16(%1)\n"
1060aed55afSDan Williams 		    "movnti %%r11, 24(%1)\n"
1070aed55afSDan Williams 		    :: "r" (source), "r" (dest)
1080aed55afSDan Williams 		    : "memory", "r8", "r9", "r10", "r11");
1090aed55afSDan Williams 		dest += 32;
1100aed55afSDan Williams 		source += 32;
1110aed55afSDan Williams 		size -= 32;
1120aed55afSDan Williams 	}
1130aed55afSDan Williams 
1140aed55afSDan Williams 	/* 1x8 movnti loop */
1150aed55afSDan Williams 	while (size >= 8) {
1160aed55afSDan Williams 		asm("movq    (%0), %%r8\n"
1170aed55afSDan Williams 		    "movnti  %%r8,   (%1)\n"
1180aed55afSDan Williams 		    :: "r" (source), "r" (dest)
1190aed55afSDan Williams 		    : "memory", "r8");
1200aed55afSDan Williams 		dest += 8;
1210aed55afSDan Williams 		source += 8;
1220aed55afSDan Williams 		size -= 8;
1230aed55afSDan Williams 	}
1240aed55afSDan Williams 
1250aed55afSDan Williams 	/* 1x4 movnti loop */
1260aed55afSDan Williams 	while (size >= 4) {
1270aed55afSDan Williams 		asm("movl    (%0), %%r8d\n"
1280aed55afSDan Williams 		    "movnti  %%r8d,   (%1)\n"
1290aed55afSDan Williams 		    :: "r" (source), "r" (dest)
1300aed55afSDan Williams 		    : "memory", "r8");
1310aed55afSDan Williams 		dest += 4;
1320aed55afSDan Williams 		source += 4;
1330aed55afSDan Williams 		size -= 4;
1340aed55afSDan Williams 	}
1350aed55afSDan Williams 
1360aed55afSDan Williams 	/* cache copy for remaining bytes */
1370aed55afSDan Williams 	if (size) {
1380aed55afSDan Williams 		memcpy((void *) dest, (void *) source, size);
1390aed55afSDan Williams 		clean_cache_range((void *) dest, size);
1400aed55afSDan Williams 	}
1410aed55afSDan Williams }
14202101c45SMikulas Patocka EXPORT_SYMBOL_GPL(__memcpy_flushcache);
1430aed55afSDan Williams 
1440aed55afSDan Williams void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
1450aed55afSDan Williams 		size_t len)
1460aed55afSDan Williams {
1470aed55afSDan Williams 	char *from = kmap_atomic(page);
1480aed55afSDan Williams 
1490aed55afSDan Williams 	memcpy_flushcache(to, from + offset, len);
1500aed55afSDan Williams 	kunmap_atomic(from);
1510aed55afSDan Williams }
1520aed55afSDan Williams #endif
153