xref: /openbmc/linux/arch/x86/lib/usercopy_64.c (revision 06ba8020)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * User address space access functions.
4  *
5  * Copyright 1997 Andi Kleen <ak@muc.de>
6  * Copyright 1997 Linus Torvalds
7  * Copyright 2002 Andi Kleen <ak@suse.de>
8  */
9 #include <linux/export.h>
10 #include <linux/uaccess.h>
11 #include <linux/highmem.h>
12 
13 /*
14  * Zero Userspace
15  */
16 
17 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
18 /**
19  * clean_cache_range - write back a cache range with CLWB
20  * @vaddr:	virtual start address
21  * @size:	number of bytes to write back
22  *
23  * Write back a cache range using the CLWB (cache line write back)
24  * instruction. Note that @size is internally rounded up to be cache
25  * line size aligned.
26  */
27 static void clean_cache_range(void *addr, size_t size)
28 {
29 	u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
30 	unsigned long clflush_mask = x86_clflush_size - 1;
31 	void *vend = addr + size;
32 	void *p;
33 
34 	for (p = (void *)((unsigned long)addr & ~clflush_mask);
35 	     p < vend; p += x86_clflush_size)
36 		clwb(p);
37 }
38 
39 void arch_wb_cache_pmem(void *addr, size_t size)
40 {
41 	clean_cache_range(addr, size);
42 }
43 EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
44 
45 long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
46 {
47 	unsigned long flushed, dest = (unsigned long) dst;
48 	long rc;
49 
50 	stac();
51 	rc = __copy_user_nocache(dst, src, size);
52 	clac();
53 
54 	/*
55 	 * __copy_user_nocache() uses non-temporal stores for the bulk
56 	 * of the transfer, but we need to manually flush if the
57 	 * transfer is unaligned. A cached memory copy is used when
58 	 * destination or size is not naturally aligned. That is:
59 	 *   - Require 8-byte alignment when size is 8 bytes or larger.
60 	 *   - Require 4-byte alignment when size is 4 bytes.
61 	 */
62 	if (size < 8) {
63 		if (!IS_ALIGNED(dest, 4) || size != 4)
64 			clean_cache_range(dst, size);
65 	} else {
66 		if (!IS_ALIGNED(dest, 8)) {
67 			dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
68 			clean_cache_range(dst, 1);
69 		}
70 
71 		flushed = dest - (unsigned long) dst;
72 		if (size > flushed && !IS_ALIGNED(size - flushed, 8))
73 			clean_cache_range(dst + size - 1, 1);
74 	}
75 
76 	return rc;
77 }
78 
79 void __memcpy_flushcache(void *_dst, const void *_src, size_t size)
80 {
81 	unsigned long dest = (unsigned long) _dst;
82 	unsigned long source = (unsigned long) _src;
83 
84 	/* cache copy and flush to align dest */
85 	if (!IS_ALIGNED(dest, 8)) {
86 		size_t len = min_t(size_t, size, ALIGN(dest, 8) - dest);
87 
88 		memcpy((void *) dest, (void *) source, len);
89 		clean_cache_range((void *) dest, len);
90 		dest += len;
91 		source += len;
92 		size -= len;
93 		if (!size)
94 			return;
95 	}
96 
97 	/* 4x8 movnti loop */
98 	while (size >= 32) {
99 		asm("movq    (%0), %%r8\n"
100 		    "movq   8(%0), %%r9\n"
101 		    "movq  16(%0), %%r10\n"
102 		    "movq  24(%0), %%r11\n"
103 		    "movnti  %%r8,   (%1)\n"
104 		    "movnti  %%r9,  8(%1)\n"
105 		    "movnti %%r10, 16(%1)\n"
106 		    "movnti %%r11, 24(%1)\n"
107 		    :: "r" (source), "r" (dest)
108 		    : "memory", "r8", "r9", "r10", "r11");
109 		dest += 32;
110 		source += 32;
111 		size -= 32;
112 	}
113 
114 	/* 1x8 movnti loop */
115 	while (size >= 8) {
116 		asm("movq    (%0), %%r8\n"
117 		    "movnti  %%r8,   (%1)\n"
118 		    :: "r" (source), "r" (dest)
119 		    : "memory", "r8");
120 		dest += 8;
121 		source += 8;
122 		size -= 8;
123 	}
124 
125 	/* 1x4 movnti loop */
126 	while (size >= 4) {
127 		asm("movl    (%0), %%r8d\n"
128 		    "movnti  %%r8d,   (%1)\n"
129 		    :: "r" (source), "r" (dest)
130 		    : "memory", "r8");
131 		dest += 4;
132 		source += 4;
133 		size -= 4;
134 	}
135 
136 	/* cache copy for remaining bytes */
137 	if (size) {
138 		memcpy((void *) dest, (void *) source, size);
139 		clean_cache_range((void *) dest, size);
140 	}
141 }
142 EXPORT_SYMBOL_GPL(__memcpy_flushcache);
143 #endif
144