xref: /openbmc/linux/arch/x86/lib/usercopy_64.c (revision 0aed55af88345b5d673240f90e671d79662fb01e)
1185f3d38SThomas Gleixner /*
2185f3d38SThomas Gleixner  * User address space access functions.
3185f3d38SThomas Gleixner  *
4185f3d38SThomas Gleixner  * Copyright 1997 Andi Kleen <ak@muc.de>
5185f3d38SThomas Gleixner  * Copyright 1997 Linus Torvalds
6185f3d38SThomas Gleixner  * Copyright 2002 Andi Kleen <ak@suse.de>
7185f3d38SThomas Gleixner  */
8e683014cSPaul Gortmaker #include <linux/export.h>
913d4ea09SAndy Lutomirski #include <linux/uaccess.h>
10*0aed55afSDan Williams #include <linux/highmem.h>
11185f3d38SThomas Gleixner 
12185f3d38SThomas Gleixner /*
13185f3d38SThomas Gleixner  * Zero Userspace
14185f3d38SThomas Gleixner  */
15185f3d38SThomas Gleixner 
16185f3d38SThomas Gleixner unsigned long __clear_user(void __user *addr, unsigned long size)
17185f3d38SThomas Gleixner {
18185f3d38SThomas Gleixner 	long __d0;
193ee1afa3SNick Piggin 	might_fault();
20185f3d38SThomas Gleixner 	/* no memory constraint because it doesn't change any memory gcc knows
21185f3d38SThomas Gleixner 	   about */
2263bcff2aSH. Peter Anvin 	stac();
23185f3d38SThomas Gleixner 	asm volatile(
24185f3d38SThomas Gleixner 		"	testq  %[size8],%[size8]\n"
25185f3d38SThomas Gleixner 		"	jz     4f\n"
26185f3d38SThomas Gleixner 		"0:	movq %[zero],(%[dst])\n"
27185f3d38SThomas Gleixner 		"	addq   %[eight],%[dst]\n"
28185f3d38SThomas Gleixner 		"	decl %%ecx ; jnz   0b\n"
29185f3d38SThomas Gleixner 		"4:	movq  %[size1],%%rcx\n"
30185f3d38SThomas Gleixner 		"	testl %%ecx,%%ecx\n"
31185f3d38SThomas Gleixner 		"	jz     2f\n"
32185f3d38SThomas Gleixner 		"1:	movb   %b[zero],(%[dst])\n"
33185f3d38SThomas Gleixner 		"	incq   %[dst]\n"
34185f3d38SThomas Gleixner 		"	decl %%ecx ; jnz  1b\n"
35185f3d38SThomas Gleixner 		"2:\n"
36185f3d38SThomas Gleixner 		".section .fixup,\"ax\"\n"
37185f3d38SThomas Gleixner 		"3:	lea 0(%[size1],%[size8],8),%[size8]\n"
38185f3d38SThomas Gleixner 		"	jmp 2b\n"
39185f3d38SThomas Gleixner 		".previous\n"
408da804f2SH. Peter Anvin 		_ASM_EXTABLE(0b,3b)
418da804f2SH. Peter Anvin 		_ASM_EXTABLE(1b,2b)
42e0a96129SAndi Kleen 		: [size8] "=&c"(size), [dst] "=&D" (__d0)
43185f3d38SThomas Gleixner 		: [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
44185f3d38SThomas Gleixner 		  [zero] "r" (0UL), [eight] "r" (8UL));
4563bcff2aSH. Peter Anvin 	clac();
46185f3d38SThomas Gleixner 	return size;
47185f3d38SThomas Gleixner }
48185f3d38SThomas Gleixner EXPORT_SYMBOL(__clear_user);
49185f3d38SThomas Gleixner 
50185f3d38SThomas Gleixner unsigned long clear_user(void __user *to, unsigned long n)
51185f3d38SThomas Gleixner {
52185f3d38SThomas Gleixner 	if (access_ok(VERIFY_WRITE, to, n))
53185f3d38SThomas Gleixner 		return __clear_user(to, n);
54185f3d38SThomas Gleixner 	return n;
55185f3d38SThomas Gleixner }
56185f3d38SThomas Gleixner EXPORT_SYMBOL(clear_user);
57185f3d38SThomas Gleixner 
581129585aSVitaly Mayatskikh /*
591129585aSVitaly Mayatskikh  * Try to copy last bytes and clear the rest if needed.
601129585aSVitaly Mayatskikh  * Since protection fault in copy_from/to_user is not a normal situation,
611129585aSVitaly Mayatskikh  * it is not necessary to optimize tail handling.
621129585aSVitaly Mayatskikh  */
63277d5b40SAndi Kleen __visible unsigned long
64cae2a173SLinus Torvalds copy_user_handle_tail(char *to, char *from, unsigned len)
651129585aSVitaly Mayatskikh {
6666db3febSCQ Tang 	for (; len; --len, to++) {
67cae2a173SLinus Torvalds 		char c;
68cae2a173SLinus Torvalds 
691129585aSVitaly Mayatskikh 		if (__get_user_nocheck(c, from++, sizeof(char)))
701129585aSVitaly Mayatskikh 			break;
7166db3febSCQ Tang 		if (__put_user_nocheck(c, to, sizeof(char)))
721129585aSVitaly Mayatskikh 			break;
731129585aSVitaly Mayatskikh 	}
7463bcff2aSH. Peter Anvin 	clac();
751129585aSVitaly Mayatskikh 	return len;
761129585aSVitaly Mayatskikh }
77*0aed55afSDan Williams 
78*0aed55afSDan Williams #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
79*0aed55afSDan Williams /**
80*0aed55afSDan Williams  * clean_cache_range - write back a cache range with CLWB
81*0aed55afSDan Williams  * @vaddr:	virtual start address
82*0aed55afSDan Williams  * @size:	number of bytes to write back
83*0aed55afSDan Williams  *
84*0aed55afSDan Williams  * Write back a cache range using the CLWB (cache line write back)
85*0aed55afSDan Williams  * instruction. Note that @size is internally rounded up to be cache
86*0aed55afSDan Williams  * line size aligned.
87*0aed55afSDan Williams  */
88*0aed55afSDan Williams static void clean_cache_range(void *addr, size_t size)
89*0aed55afSDan Williams {
90*0aed55afSDan Williams 	u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
91*0aed55afSDan Williams 	unsigned long clflush_mask = x86_clflush_size - 1;
92*0aed55afSDan Williams 	void *vend = addr + size;
93*0aed55afSDan Williams 	void *p;
94*0aed55afSDan Williams 
95*0aed55afSDan Williams 	for (p = (void *)((unsigned long)addr & ~clflush_mask);
96*0aed55afSDan Williams 	     p < vend; p += x86_clflush_size)
97*0aed55afSDan Williams 		clwb(p);
98*0aed55afSDan Williams }
99*0aed55afSDan Williams 
100*0aed55afSDan Williams long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
101*0aed55afSDan Williams {
102*0aed55afSDan Williams 	unsigned long flushed, dest = (unsigned long) dst;
103*0aed55afSDan Williams 	long rc = __copy_user_nocache(dst, src, size, 0);
104*0aed55afSDan Williams 
105*0aed55afSDan Williams 	/*
106*0aed55afSDan Williams 	 * __copy_user_nocache() uses non-temporal stores for the bulk
107*0aed55afSDan Williams 	 * of the transfer, but we need to manually flush if the
108*0aed55afSDan Williams 	 * transfer is unaligned. A cached memory copy is used when
109*0aed55afSDan Williams 	 * destination or size is not naturally aligned. That is:
110*0aed55afSDan Williams 	 *   - Require 8-byte alignment when size is 8 bytes or larger.
111*0aed55afSDan Williams 	 *   - Require 4-byte alignment when size is 4 bytes.
112*0aed55afSDan Williams 	 */
113*0aed55afSDan Williams 	if (size < 8) {
114*0aed55afSDan Williams 		if (!IS_ALIGNED(dest, 4) || size != 4)
115*0aed55afSDan Williams 			clean_cache_range(dst, 1);
116*0aed55afSDan Williams 	} else {
117*0aed55afSDan Williams 		if (!IS_ALIGNED(dest, 8)) {
118*0aed55afSDan Williams 			dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
119*0aed55afSDan Williams 			clean_cache_range(dst, 1);
120*0aed55afSDan Williams 		}
121*0aed55afSDan Williams 
122*0aed55afSDan Williams 		flushed = dest - (unsigned long) dst;
123*0aed55afSDan Williams 		if (size > flushed && !IS_ALIGNED(size - flushed, 8))
124*0aed55afSDan Williams 			clean_cache_range(dst + size - 1, 1);
125*0aed55afSDan Williams 	}
126*0aed55afSDan Williams 
127*0aed55afSDan Williams 	return rc;
128*0aed55afSDan Williams }
129*0aed55afSDan Williams 
130*0aed55afSDan Williams void memcpy_flushcache(void *_dst, const void *_src, size_t size)
131*0aed55afSDan Williams {
132*0aed55afSDan Williams 	unsigned long dest = (unsigned long) _dst;
133*0aed55afSDan Williams 	unsigned long source = (unsigned long) _src;
134*0aed55afSDan Williams 
135*0aed55afSDan Williams 	/* cache copy and flush to align dest */
136*0aed55afSDan Williams 	if (!IS_ALIGNED(dest, 8)) {
137*0aed55afSDan Williams 		unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest);
138*0aed55afSDan Williams 
139*0aed55afSDan Williams 		memcpy((void *) dest, (void *) source, len);
140*0aed55afSDan Williams 		clean_cache_range((void *) dest, len);
141*0aed55afSDan Williams 		dest += len;
142*0aed55afSDan Williams 		source += len;
143*0aed55afSDan Williams 		size -= len;
144*0aed55afSDan Williams 		if (!size)
145*0aed55afSDan Williams 			return;
146*0aed55afSDan Williams 	}
147*0aed55afSDan Williams 
148*0aed55afSDan Williams 	/* 4x8 movnti loop */
149*0aed55afSDan Williams 	while (size >= 32) {
150*0aed55afSDan Williams 		asm("movq    (%0), %%r8\n"
151*0aed55afSDan Williams 		    "movq   8(%0), %%r9\n"
152*0aed55afSDan Williams 		    "movq  16(%0), %%r10\n"
153*0aed55afSDan Williams 		    "movq  24(%0), %%r11\n"
154*0aed55afSDan Williams 		    "movnti  %%r8,   (%1)\n"
155*0aed55afSDan Williams 		    "movnti  %%r9,  8(%1)\n"
156*0aed55afSDan Williams 		    "movnti %%r10, 16(%1)\n"
157*0aed55afSDan Williams 		    "movnti %%r11, 24(%1)\n"
158*0aed55afSDan Williams 		    :: "r" (source), "r" (dest)
159*0aed55afSDan Williams 		    : "memory", "r8", "r9", "r10", "r11");
160*0aed55afSDan Williams 		dest += 32;
161*0aed55afSDan Williams 		source += 32;
162*0aed55afSDan Williams 		size -= 32;
163*0aed55afSDan Williams 	}
164*0aed55afSDan Williams 
165*0aed55afSDan Williams 	/* 1x8 movnti loop */
166*0aed55afSDan Williams 	while (size >= 8) {
167*0aed55afSDan Williams 		asm("movq    (%0), %%r8\n"
168*0aed55afSDan Williams 		    "movnti  %%r8,   (%1)\n"
169*0aed55afSDan Williams 		    :: "r" (source), "r" (dest)
170*0aed55afSDan Williams 		    : "memory", "r8");
171*0aed55afSDan Williams 		dest += 8;
172*0aed55afSDan Williams 		source += 8;
173*0aed55afSDan Williams 		size -= 8;
174*0aed55afSDan Williams 	}
175*0aed55afSDan Williams 
176*0aed55afSDan Williams 	/* 1x4 movnti loop */
177*0aed55afSDan Williams 	while (size >= 4) {
178*0aed55afSDan Williams 		asm("movl    (%0), %%r8d\n"
179*0aed55afSDan Williams 		    "movnti  %%r8d,   (%1)\n"
180*0aed55afSDan Williams 		    :: "r" (source), "r" (dest)
181*0aed55afSDan Williams 		    : "memory", "r8");
182*0aed55afSDan Williams 		dest += 4;
183*0aed55afSDan Williams 		source += 4;
184*0aed55afSDan Williams 		size -= 4;
185*0aed55afSDan Williams 	}
186*0aed55afSDan Williams 
187*0aed55afSDan Williams 	/* cache copy for remaining bytes */
188*0aed55afSDan Williams 	if (size) {
189*0aed55afSDan Williams 		memcpy((void *) dest, (void *) source, size);
190*0aed55afSDan Williams 		clean_cache_range((void *) dest, size);
191*0aed55afSDan Williams 	}
192*0aed55afSDan Williams }
193*0aed55afSDan Williams EXPORT_SYMBOL_GPL(memcpy_flushcache);
194*0aed55afSDan Williams 
195*0aed55afSDan Williams void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
196*0aed55afSDan Williams 		size_t len)
197*0aed55afSDan Williams {
198*0aed55afSDan Williams 	char *from = kmap_atomic(page);
199*0aed55afSDan Williams 
200*0aed55afSDan Williams 	memcpy_flushcache(to, from + offset, len);
201*0aed55afSDan Williams 	kunmap_atomic(from);
202*0aed55afSDan Williams }
203*0aed55afSDan Williams #endif
204