xref: /openbmc/linux/arch/x86/lib/csum-wrappers_64.c (revision 82df5b73)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2002, 2003 Andi Kleen, SuSE Labs.
4  *
5  * Wrappers of assembly checksum functions for x86-64.
6  */
7 #include <asm/checksum.h>
8 #include <linux/export.h>
9 #include <linux/uaccess.h>
10 #include <asm/smap.h>
11 
12 /**
13  * csum_and_copy_from_user - Copy and checksum from user space.
14  * @src: source address (user space)
15  * @dst: destination address
16  * @len: number of bytes to be copied.
17  * @isum: initial sum that is added into the result (32bit unfolded)
18  * @errp: set to -EFAULT for an bad source address.
19  *
20  * Returns an 32bit unfolded checksum of the buffer.
21  * src and dst are best aligned to 64bits.
22  */
23 __wsum
24 csum_and_copy_from_user(const void __user *src, void *dst,
25 			    int len, __wsum isum, int *errp)
26 {
27 	might_sleep();
28 	*errp = 0;
29 
30 	if (!user_access_begin(src, len))
31 		goto out_err;
32 
33 	/*
34 	 * Why 6, not 7? To handle odd addresses aligned we
35 	 * would need to do considerable complications to fix the
36 	 * checksum which is defined as an 16bit accumulator. The
37 	 * fix alignment code is primarily for performance
38 	 * compatibility with 32bit and that will handle odd
39 	 * addresses slowly too.
40 	 */
41 	if (unlikely((unsigned long)src & 6)) {
42 		while (((unsigned long)src & 6) && len >= 2) {
43 			__u16 val16;
44 
45 			unsafe_get_user(val16, (const __u16 __user *)src, out);
46 
47 			*(__u16 *)dst = val16;
48 			isum = (__force __wsum)add32_with_carry(
49 					(__force unsigned)isum, val16);
50 			src += 2;
51 			dst += 2;
52 			len -= 2;
53 		}
54 	}
55 	isum = csum_partial_copy_generic((__force const void *)src,
56 				dst, len, isum, errp, NULL);
57 	user_access_end();
58 	if (unlikely(*errp))
59 		goto out_err;
60 
61 	return isum;
62 
63 out:
64 	user_access_end();
65 out_err:
66 	*errp = -EFAULT;
67 	memset(dst, 0, len);
68 
69 	return isum;
70 }
71 EXPORT_SYMBOL(csum_and_copy_from_user);
72 
73 /**
74  * csum_and_copy_to_user - Copy and checksum to user space.
75  * @src: source address
76  * @dst: destination address (user space)
77  * @len: number of bytes to be copied.
78  * @isum: initial sum that is added into the result (32bit unfolded)
79  * @errp: set to -EFAULT for an bad destination address.
80  *
81  * Returns an 32bit unfolded checksum of the buffer.
82  * src and dst are best aligned to 64bits.
83  */
84 __wsum
85 csum_and_copy_to_user(const void *src, void __user *dst,
86 			  int len, __wsum isum, int *errp)
87 {
88 	__wsum ret;
89 
90 	might_sleep();
91 
92 	if (!user_access_begin(dst, len)) {
93 		*errp = -EFAULT;
94 		return 0;
95 	}
96 
97 	if (unlikely((unsigned long)dst & 6)) {
98 		while (((unsigned long)dst & 6) && len >= 2) {
99 			__u16 val16 = *(__u16 *)src;
100 
101 			isum = (__force __wsum)add32_with_carry(
102 					(__force unsigned)isum, val16);
103 			unsafe_put_user(val16, (__u16 __user *)dst, out);
104 			src += 2;
105 			dst += 2;
106 			len -= 2;
107 		}
108 	}
109 
110 	*errp = 0;
111 	ret = csum_partial_copy_generic(src, (void __force *)dst,
112 					len, isum, NULL, errp);
113 	user_access_end();
114 	return ret;
115 out:
116 	user_access_end();
117 	*errp = -EFAULT;
118 	return isum;
119 }
120 EXPORT_SYMBOL(csum_and_copy_to_user);
121 
122 /**
123  * csum_partial_copy_nocheck - Copy and checksum.
124  * @src: source address
125  * @dst: destination address
126  * @len: number of bytes to be copied.
127  * @sum: initial sum that is added into the result (32bit unfolded)
128  *
129  * Returns an 32bit unfolded checksum of the buffer.
130  */
131 __wsum
132 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
133 {
134 	return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
135 }
136 EXPORT_SYMBOL(csum_partial_copy_nocheck);
137 
138 __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
139 			const struct in6_addr *daddr,
140 			__u32 len, __u8 proto, __wsum sum)
141 {
142 	__u64 rest, sum64;
143 
144 	rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) +
145 		(__force __u64)sum;
146 
147 	asm("	addq (%[saddr]),%[sum]\n"
148 	    "	adcq 8(%[saddr]),%[sum]\n"
149 	    "	adcq (%[daddr]),%[sum]\n"
150 	    "	adcq 8(%[daddr]),%[sum]\n"
151 	    "	adcq $0,%[sum]\n"
152 
153 	    : [sum] "=r" (sum64)
154 	    : "[sum]" (rest), [saddr] "r" (saddr), [daddr] "r" (daddr));
155 
156 	return csum_fold(
157 	       (__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32));
158 }
159 EXPORT_SYMBOL(csum_ipv6_magic);
160