1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __ALPHA_UACCESS_H 3 #define __ALPHA_UACCESS_H 4 5 /* 6 * The fs value determines whether argument validity checking should be 7 * performed or not. If get_fs() == USER_DS, checking is performed, with 8 * get_fs() == KERNEL_DS, checking is bypassed. 9 * 10 * Or at least it did once upon a time. Nowadays it is a mask that 11 * defines which bits of the address space are off limits. This is a 12 * wee bit faster than the above. 13 * 14 * For historical reasons, these macros are grossly misnamed. 15 */ 16 17 #define KERNEL_DS ((mm_segment_t) { 0UL }) 18 #define USER_DS ((mm_segment_t) { -0x40000000000UL }) 19 20 #define get_fs() (current_thread_info()->addr_limit) 21 #define get_ds() (KERNEL_DS) 22 #define set_fs(x) (current_thread_info()->addr_limit = (x)) 23 24 #define segment_eq(a, b) ((a).seg == (b).seg) 25 26 /* 27 * Is a address valid? This does a straightforward calculation rather 28 * than tests. 29 * 30 * Address valid if: 31 * - "addr" doesn't have any high-bits set 32 * - AND "size" doesn't have any high-bits set 33 * - AND "addr+size" doesn't have any high-bits set 34 * - OR we are in kernel mode. 35 */ 36 #define __access_ok(addr, size) \ 37 ((get_fs().seg & (addr | size | (addr+size))) == 0) 38 39 #define access_ok(type, addr, size) \ 40 ({ \ 41 __chk_user_ptr(addr); \ 42 __access_ok(((unsigned long)(addr)), (size)); \ 43 }) 44 45 /* 46 * These are the main single-value transfer routines. They automatically 47 * use the right size if we just have the right pointer type. 48 * 49 * As the alpha uses the same address space for kernel and user 50 * data, we can just do these as direct assignments. (Of course, the 51 * exception handling means that it's no longer "just"...) 52 * 53 * Careful to not 54 * (a) re-use the arguments for side effects (sizeof/typeof is ok) 55 * (b) require any knowledge of processes at this stage 56 */ 57 #define put_user(x, ptr) \ 58 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 59 #define get_user(x, ptr) \ 60 __get_user_check((x), (ptr), sizeof(*(ptr))) 61 62 /* 63 * The "__xxx" versions do not do address space checking, useful when 64 * doing multiple accesses to the same area (the programmer has to do the 65 * checks by hand with "access_ok()") 66 */ 67 #define __put_user(x, ptr) \ 68 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 69 #define __get_user(x, ptr) \ 70 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 71 72 /* 73 * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to 74 * encode the bits we need for resolving the exception. See the 75 * more extensive comments with fixup_inline_exception below for 76 * more information. 77 */ 78 #define EXC(label,cont,res,err) \ 79 ".section __ex_table,\"a\"\n" \ 80 " .long "#label"-.\n" \ 81 " lda "#res","#cont"-"#label"("#err")\n" \ 82 ".previous\n" 83 84 extern void __get_user_unknown(void); 85 86 #define __get_user_nocheck(x, ptr, size) \ 87 ({ \ 88 long __gu_err = 0; \ 89 unsigned long __gu_val; \ 90 __chk_user_ptr(ptr); \ 91 switch (size) { \ 92 case 1: __get_user_8(ptr); break; \ 93 case 2: __get_user_16(ptr); break; \ 94 case 4: __get_user_32(ptr); break; \ 95 case 8: __get_user_64(ptr); break; \ 96 default: __get_user_unknown(); break; \ 97 } \ 98 (x) = (__force __typeof__(*(ptr))) __gu_val; \ 99 __gu_err; \ 100 }) 101 102 #define __get_user_check(x, ptr, size) \ 103 ({ \ 104 long __gu_err = -EFAULT; \ 105 unsigned long __gu_val = 0; \ 106 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 107 if (__access_ok((unsigned long)__gu_addr, size)) { \ 108 __gu_err = 0; \ 109 switch (size) { \ 110 case 1: __get_user_8(__gu_addr); break; \ 111 case 2: __get_user_16(__gu_addr); break; \ 112 case 4: __get_user_32(__gu_addr); break; \ 113 case 8: __get_user_64(__gu_addr); break; \ 114 default: __get_user_unknown(); break; \ 115 } \ 116 } \ 117 (x) = (__force __typeof__(*(ptr))) __gu_val; \ 118 __gu_err; \ 119 }) 120 121 struct __large_struct { unsigned long buf[100]; }; 122 #define __m(x) (*(struct __large_struct __user *)(x)) 123 124 #define __get_user_64(addr) \ 125 __asm__("1: ldq %0,%2\n" \ 126 "2:\n" \ 127 EXC(1b,2b,%0,%1) \ 128 : "=r"(__gu_val), "=r"(__gu_err) \ 129 : "m"(__m(addr)), "1"(__gu_err)) 130 131 #define __get_user_32(addr) \ 132 __asm__("1: ldl %0,%2\n" \ 133 "2:\n" \ 134 EXC(1b,2b,%0,%1) \ 135 : "=r"(__gu_val), "=r"(__gu_err) \ 136 : "m"(__m(addr)), "1"(__gu_err)) 137 138 #ifdef __alpha_bwx__ 139 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */ 140 141 #define __get_user_16(addr) \ 142 __asm__("1: ldwu %0,%2\n" \ 143 "2:\n" \ 144 EXC(1b,2b,%0,%1) \ 145 : "=r"(__gu_val), "=r"(__gu_err) \ 146 : "m"(__m(addr)), "1"(__gu_err)) 147 148 #define __get_user_8(addr) \ 149 __asm__("1: ldbu %0,%2\n" \ 150 "2:\n" \ 151 EXC(1b,2b,%0,%1) \ 152 : "=r"(__gu_val), "=r"(__gu_err) \ 153 : "m"(__m(addr)), "1"(__gu_err)) 154 #else 155 /* Unfortunately, we can't get an unaligned access trap for the sub-word 156 load, so we have to do a general unaligned operation. */ 157 158 #define __get_user_16(addr) \ 159 { \ 160 long __gu_tmp; \ 161 __asm__("1: ldq_u %0,0(%3)\n" \ 162 "2: ldq_u %1,1(%3)\n" \ 163 " extwl %0,%3,%0\n" \ 164 " extwh %1,%3,%1\n" \ 165 " or %0,%1,%0\n" \ 166 "3:\n" \ 167 EXC(1b,3b,%0,%2) \ 168 EXC(2b,3b,%0,%2) \ 169 : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \ 170 : "r"(addr), "2"(__gu_err)); \ 171 } 172 173 #define __get_user_8(addr) \ 174 __asm__("1: ldq_u %0,0(%2)\n" \ 175 " extbl %0,%2,%0\n" \ 176 "2:\n" \ 177 EXC(1b,2b,%0,%1) \ 178 : "=&r"(__gu_val), "=r"(__gu_err) \ 179 : "r"(addr), "1"(__gu_err)) 180 #endif 181 182 extern void __put_user_unknown(void); 183 184 #define __put_user_nocheck(x, ptr, size) \ 185 ({ \ 186 long __pu_err = 0; \ 187 __chk_user_ptr(ptr); \ 188 switch (size) { \ 189 case 1: __put_user_8(x, ptr); break; \ 190 case 2: __put_user_16(x, ptr); break; \ 191 case 4: __put_user_32(x, ptr); break; \ 192 case 8: __put_user_64(x, ptr); break; \ 193 default: __put_user_unknown(); break; \ 194 } \ 195 __pu_err; \ 196 }) 197 198 #define __put_user_check(x, ptr, size) \ 199 ({ \ 200 long __pu_err = -EFAULT; \ 201 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 202 if (__access_ok((unsigned long)__pu_addr, size)) { \ 203 __pu_err = 0; \ 204 switch (size) { \ 205 case 1: __put_user_8(x, __pu_addr); break; \ 206 case 2: __put_user_16(x, __pu_addr); break; \ 207 case 4: __put_user_32(x, __pu_addr); break; \ 208 case 8: __put_user_64(x, __pu_addr); break; \ 209 default: __put_user_unknown(); break; \ 210 } \ 211 } \ 212 __pu_err; \ 213 }) 214 215 /* 216 * The "__put_user_xx()" macros tell gcc they read from memory 217 * instead of writing: this is because they do not write to 218 * any memory gcc knows about, so there are no aliasing issues 219 */ 220 #define __put_user_64(x, addr) \ 221 __asm__ __volatile__("1: stq %r2,%1\n" \ 222 "2:\n" \ 223 EXC(1b,2b,$31,%0) \ 224 : "=r"(__pu_err) \ 225 : "m" (__m(addr)), "rJ" (x), "0"(__pu_err)) 226 227 #define __put_user_32(x, addr) \ 228 __asm__ __volatile__("1: stl %r2,%1\n" \ 229 "2:\n" \ 230 EXC(1b,2b,$31,%0) \ 231 : "=r"(__pu_err) \ 232 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) 233 234 #ifdef __alpha_bwx__ 235 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */ 236 237 #define __put_user_16(x, addr) \ 238 __asm__ __volatile__("1: stw %r2,%1\n" \ 239 "2:\n" \ 240 EXC(1b,2b,$31,%0) \ 241 : "=r"(__pu_err) \ 242 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) 243 244 #define __put_user_8(x, addr) \ 245 __asm__ __volatile__("1: stb %r2,%1\n" \ 246 "2:\n" \ 247 EXC(1b,2b,$31,%0) \ 248 : "=r"(__pu_err) \ 249 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) 250 #else 251 /* Unfortunately, we can't get an unaligned access trap for the sub-word 252 write, so we have to do a general unaligned operation. */ 253 254 #define __put_user_16(x, addr) \ 255 { \ 256 long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \ 257 __asm__ __volatile__( \ 258 "1: ldq_u %2,1(%5)\n" \ 259 "2: ldq_u %1,0(%5)\n" \ 260 " inswh %6,%5,%4\n" \ 261 " inswl %6,%5,%3\n" \ 262 " mskwh %2,%5,%2\n" \ 263 " mskwl %1,%5,%1\n" \ 264 " or %2,%4,%2\n" \ 265 " or %1,%3,%1\n" \ 266 "3: stq_u %2,1(%5)\n" \ 267 "4: stq_u %1,0(%5)\n" \ 268 "5:\n" \ 269 EXC(1b,5b,$31,%0) \ 270 EXC(2b,5b,$31,%0) \ 271 EXC(3b,5b,$31,%0) \ 272 EXC(4b,5b,$31,%0) \ 273 : "=r"(__pu_err), "=&r"(__pu_tmp1), \ 274 "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \ 275 "=&r"(__pu_tmp4) \ 276 : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \ 277 } 278 279 #define __put_user_8(x, addr) \ 280 { \ 281 long __pu_tmp1, __pu_tmp2; \ 282 __asm__ __volatile__( \ 283 "1: ldq_u %1,0(%4)\n" \ 284 " insbl %3,%4,%2\n" \ 285 " mskbl %1,%4,%1\n" \ 286 " or %1,%2,%1\n" \ 287 "2: stq_u %1,0(%4)\n" \ 288 "3:\n" \ 289 EXC(1b,3b,$31,%0) \ 290 EXC(2b,3b,$31,%0) \ 291 : "=r"(__pu_err), \ 292 "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \ 293 : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \ 294 } 295 #endif 296 297 298 /* 299 * Complex access routines 300 */ 301 302 extern long __copy_user(void *to, const void *from, long len); 303 304 static inline unsigned long 305 raw_copy_from_user(void *to, const void __user *from, unsigned long len) 306 { 307 return __copy_user(to, (__force const void *)from, len); 308 } 309 310 static inline unsigned long 311 raw_copy_to_user(void __user *to, const void *from, unsigned long len) 312 { 313 return __copy_user((__force void *)to, from, len); 314 } 315 316 extern long __clear_user(void __user *to, long len); 317 318 extern inline long 319 clear_user(void __user *to, long len) 320 { 321 if (__access_ok((unsigned long)to, len)) 322 len = __clear_user(to, len); 323 return len; 324 } 325 326 #define user_addr_max() \ 327 (uaccess_kernel() ? ~0UL : TASK_SIZE) 328 329 extern long strncpy_from_user(char *dest, const char __user *src, long count); 330 extern __must_check long strnlen_user(const char __user *str, long n); 331 332 #include <asm/extable.h> 333 334 #endif /* __ALPHA_UACCESS_H */ 335