xref: /openbmc/linux/arch/x86/lib/usercopy_64.c (revision d9565bf4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * User address space access functions.
4  *
5  * Copyright 1997 Andi Kleen <ak@muc.de>
6  * Copyright 1997 Linus Torvalds
7  * Copyright 2002 Andi Kleen <ak@suse.de>
8  */
9 #include <linux/export.h>
10 #include <linux/uaccess.h>
11 #include <linux/highmem.h>
12 
13 /*
14  * Zero Userspace
15  */
16 
17 unsigned long __clear_user(void __user *addr, unsigned long size)
18 {
19 	long __d0;
20 	might_fault();
21 	/* no memory constraint because it doesn't change any memory gcc knows
22 	   about */
23 	stac();
24 	asm volatile(
25 		"	testq  %[size8],%[size8]\n"
26 		"	jz     4f\n"
27 		"	.align 16\n"
28 		"0:	movq $0,(%[dst])\n"
29 		"	addq   $8,%[dst]\n"
30 		"	decl %%ecx ; jnz   0b\n"
31 		"4:	movq  %[size1],%%rcx\n"
32 		"	testl %%ecx,%%ecx\n"
33 		"	jz     2f\n"
34 		"1:	movb   $0,(%[dst])\n"
35 		"	incq   %[dst]\n"
36 		"	decl %%ecx ; jnz  1b\n"
37 		"2:\n"
38 
39 		_ASM_EXTABLE_TYPE_REG(0b, 2b, EX_TYPE_UCOPY_LEN8, %[size1])
40 		_ASM_EXTABLE_UA(1b, 2b)
41 
42 		: [size8] "=&c"(size), [dst] "=&D" (__d0)
43 		: [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr));
44 	clac();
45 	return size;
46 }
47 EXPORT_SYMBOL(__clear_user);
48 
49 unsigned long clear_user(void __user *to, unsigned long n)
50 {
51 	if (access_ok(to, n))
52 		return __clear_user(to, n);
53 	return n;
54 }
55 EXPORT_SYMBOL(clear_user);
56 
57 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
58 /**
59  * clean_cache_range - write back a cache range with CLWB
60  * @vaddr:	virtual start address
61  * @size:	number of bytes to write back
62  *
63  * Write back a cache range using the CLWB (cache line write back)
64  * instruction. Note that @size is internally rounded up to be cache
65  * line size aligned.
66  */
67 static void clean_cache_range(void *addr, size_t size)
68 {
69 	u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
70 	unsigned long clflush_mask = x86_clflush_size - 1;
71 	void *vend = addr + size;
72 	void *p;
73 
74 	for (p = (void *)((unsigned long)addr & ~clflush_mask);
75 	     p < vend; p += x86_clflush_size)
76 		clwb(p);
77 }
78 
79 void arch_wb_cache_pmem(void *addr, size_t size)
80 {
81 	clean_cache_range(addr, size);
82 }
83 EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
84 
85 long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
86 {
87 	unsigned long flushed, dest = (unsigned long) dst;
88 	long rc = __copy_user_nocache(dst, src, size, 0);
89 
90 	/*
91 	 * __copy_user_nocache() uses non-temporal stores for the bulk
92 	 * of the transfer, but we need to manually flush if the
93 	 * transfer is unaligned. A cached memory copy is used when
94 	 * destination or size is not naturally aligned. That is:
95 	 *   - Require 8-byte alignment when size is 8 bytes or larger.
96 	 *   - Require 4-byte alignment when size is 4 bytes.
97 	 */
98 	if (size < 8) {
99 		if (!IS_ALIGNED(dest, 4) || size != 4)
100 			clean_cache_range(dst, size);
101 	} else {
102 		if (!IS_ALIGNED(dest, 8)) {
103 			dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
104 			clean_cache_range(dst, 1);
105 		}
106 
107 		flushed = dest - (unsigned long) dst;
108 		if (size > flushed && !IS_ALIGNED(size - flushed, 8))
109 			clean_cache_range(dst + size - 1, 1);
110 	}
111 
112 	return rc;
113 }
114 
115 void __memcpy_flushcache(void *_dst, const void *_src, size_t size)
116 {
117 	unsigned long dest = (unsigned long) _dst;
118 	unsigned long source = (unsigned long) _src;
119 
120 	/* cache copy and flush to align dest */
121 	if (!IS_ALIGNED(dest, 8)) {
122 		unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest);
123 
124 		memcpy((void *) dest, (void *) source, len);
125 		clean_cache_range((void *) dest, len);
126 		dest += len;
127 		source += len;
128 		size -= len;
129 		if (!size)
130 			return;
131 	}
132 
133 	/* 4x8 movnti loop */
134 	while (size >= 32) {
135 		asm("movq    (%0), %%r8\n"
136 		    "movq   8(%0), %%r9\n"
137 		    "movq  16(%0), %%r10\n"
138 		    "movq  24(%0), %%r11\n"
139 		    "movnti  %%r8,   (%1)\n"
140 		    "movnti  %%r9,  8(%1)\n"
141 		    "movnti %%r10, 16(%1)\n"
142 		    "movnti %%r11, 24(%1)\n"
143 		    :: "r" (source), "r" (dest)
144 		    : "memory", "r8", "r9", "r10", "r11");
145 		dest += 32;
146 		source += 32;
147 		size -= 32;
148 	}
149 
150 	/* 1x8 movnti loop */
151 	while (size >= 8) {
152 		asm("movq    (%0), %%r8\n"
153 		    "movnti  %%r8,   (%1)\n"
154 		    :: "r" (source), "r" (dest)
155 		    : "memory", "r8");
156 		dest += 8;
157 		source += 8;
158 		size -= 8;
159 	}
160 
161 	/* 1x4 movnti loop */
162 	while (size >= 4) {
163 		asm("movl    (%0), %%r8d\n"
164 		    "movnti  %%r8d,   (%1)\n"
165 		    :: "r" (source), "r" (dest)
166 		    : "memory", "r8");
167 		dest += 4;
168 		source += 4;
169 		size -= 4;
170 	}
171 
172 	/* cache copy for remaining bytes */
173 	if (size) {
174 		memcpy((void *) dest, (void *) source, size);
175 		clean_cache_range((void *) dest, size);
176 	}
177 }
178 EXPORT_SYMBOL_GPL(__memcpy_flushcache);
179 
180 void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
181 		size_t len)
182 {
183 	char *from = kmap_atomic(page);
184 
185 	memcpy_flushcache(to, from + offset, len);
186 	kunmap_atomic(from);
187 }
188 #endif
189