1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * S390 version 4 * Copyright IBM Corp. 1999, 2000 5 * Author(s): Hartmut Penner (hp@de.ibm.com), 6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * 8 * Derived from "include/asm-i386/uaccess.h" 9 */ 10 #ifndef __S390_UACCESS_H 11 #define __S390_UACCESS_H 12 13 /* 14 * User space memory access functions 15 */ 16 #include <asm/asm-extable.h> 17 #include <asm/processor.h> 18 #include <asm/ctl_reg.h> 19 #include <asm/extable.h> 20 #include <asm/facility.h> 21 #include <asm-generic/access_ok.h> 22 23 void debug_user_asce(int exit); 24 25 unsigned long __must_check 26 raw_copy_from_user(void *to, const void __user *from, unsigned long n); 27 28 unsigned long __must_check 29 raw_copy_to_user(void __user *to, const void *from, unsigned long n); 30 31 #ifndef CONFIG_KASAN 32 #define INLINE_COPY_FROM_USER 33 #define INLINE_COPY_TO_USER 34 #endif 35 36 unsigned long __must_check 37 _copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key); 38 39 static __always_inline unsigned long __must_check 40 copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key) 41 { 42 if (likely(check_copy_size(to, n, false))) 43 n = _copy_from_user_key(to, from, n, key); 44 return n; 45 } 46 47 unsigned long __must_check 48 _copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key); 49 50 static __always_inline unsigned long __must_check 51 copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key) 52 { 53 if (likely(check_copy_size(from, n, true))) 54 n = _copy_to_user_key(to, from, n, key); 55 return n; 56 } 57 58 int __put_user_bad(void) __attribute__((noreturn)); 59 int __get_user_bad(void) __attribute__((noreturn)); 60 61 union oac { 62 unsigned int val; 63 struct { 64 struct { 65 unsigned short key : 4; 66 unsigned short : 4; 67 unsigned short as : 2; 68 unsigned short : 4; 69 unsigned short k : 1; 70 unsigned short a : 1; 71 } oac1; 72 struct { 73 unsigned short key : 4; 74 unsigned short : 4; 75 unsigned short as : 2; 76 unsigned short : 4; 77 unsigned short k : 1; 78 unsigned short a : 1; 79 } oac2; 80 }; 81 }; 82 83 #define __put_get_user_asm(to, from, size, oac_spec) \ 84 ({ \ 85 int __rc; \ 86 \ 87 asm volatile( \ 88 " lr 0,%[spec]\n" \ 89 "0: mvcos %[_to],%[_from],%[_size]\n" \ 90 "1: xr %[rc],%[rc]\n" \ 91 "2:\n" \ 92 EX_TABLE_UA(0b,2b,%[rc]) EX_TABLE_UA(1b,2b,%[rc]) \ 93 : [rc] "=&d" (__rc), [_to] "+Q" (*(to)) \ 94 : [_size] "d" (size), [_from] "Q" (*(from)), \ 95 [spec] "d" (oac_spec.val) \ 96 : "cc", "0"); \ 97 __rc; \ 98 }) 99 100 #define __put_user_asm(to, from, size) \ 101 __put_get_user_asm(to, from, size, ((union oac) { \ 102 .oac1.as = PSW_BITS_AS_SECONDARY, \ 103 .oac1.a = 1 \ 104 })) 105 106 #define __get_user_asm(to, from, size) \ 107 __put_get_user_asm(to, from, size, ((union oac) { \ 108 .oac2.as = PSW_BITS_AS_SECONDARY, \ 109 .oac2.a = 1 \ 110 })) \ 111 112 static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) 113 { 114 int rc; 115 116 switch (size) { 117 case 1: 118 rc = __put_user_asm((unsigned char __user *)ptr, 119 (unsigned char *)x, 120 size); 121 break; 122 case 2: 123 rc = __put_user_asm((unsigned short __user *)ptr, 124 (unsigned short *)x, 125 size); 126 break; 127 case 4: 128 rc = __put_user_asm((unsigned int __user *)ptr, 129 (unsigned int *)x, 130 size); 131 break; 132 case 8: 133 rc = __put_user_asm((unsigned long __user *)ptr, 134 (unsigned long *)x, 135 size); 136 break; 137 default: 138 __put_user_bad(); 139 break; 140 } 141 return rc; 142 } 143 144 static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) 145 { 146 int rc; 147 148 switch (size) { 149 case 1: 150 rc = __get_user_asm((unsigned char *)x, 151 (unsigned char __user *)ptr, 152 size); 153 break; 154 case 2: 155 rc = __get_user_asm((unsigned short *)x, 156 (unsigned short __user *)ptr, 157 size); 158 break; 159 case 4: 160 rc = __get_user_asm((unsigned int *)x, 161 (unsigned int __user *)ptr, 162 size); 163 break; 164 case 8: 165 rc = __get_user_asm((unsigned long *)x, 166 (unsigned long __user *)ptr, 167 size); 168 break; 169 default: 170 __get_user_bad(); 171 break; 172 } 173 return rc; 174 } 175 176 /* 177 * These are the main single-value transfer routines. They automatically 178 * use the right size if we just have the right pointer type. 179 */ 180 #define __put_user(x, ptr) \ 181 ({ \ 182 __typeof__(*(ptr)) __x = (x); \ 183 int __pu_err = -EFAULT; \ 184 __chk_user_ptr(ptr); \ 185 switch (sizeof (*(ptr))) { \ 186 case 1: \ 187 case 2: \ 188 case 4: \ 189 case 8: \ 190 __pu_err = __put_user_fn(&__x, ptr, \ 191 sizeof(*(ptr))); \ 192 break; \ 193 default: \ 194 __put_user_bad(); \ 195 break; \ 196 } \ 197 __builtin_expect(__pu_err, 0); \ 198 }) 199 200 #define put_user(x, ptr) \ 201 ({ \ 202 might_fault(); \ 203 __put_user(x, ptr); \ 204 }) 205 206 207 #define __get_user(x, ptr) \ 208 ({ \ 209 int __gu_err = -EFAULT; \ 210 __chk_user_ptr(ptr); \ 211 switch (sizeof(*(ptr))) { \ 212 case 1: { \ 213 unsigned char __x = 0; \ 214 __gu_err = __get_user_fn(&__x, ptr, \ 215 sizeof(*(ptr))); \ 216 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 217 break; \ 218 }; \ 219 case 2: { \ 220 unsigned short __x = 0; \ 221 __gu_err = __get_user_fn(&__x, ptr, \ 222 sizeof(*(ptr))); \ 223 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 224 break; \ 225 }; \ 226 case 4: { \ 227 unsigned int __x = 0; \ 228 __gu_err = __get_user_fn(&__x, ptr, \ 229 sizeof(*(ptr))); \ 230 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 231 break; \ 232 }; \ 233 case 8: { \ 234 unsigned long long __x = 0; \ 235 __gu_err = __get_user_fn(&__x, ptr, \ 236 sizeof(*(ptr))); \ 237 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 238 break; \ 239 }; \ 240 default: \ 241 __get_user_bad(); \ 242 break; \ 243 } \ 244 __builtin_expect(__gu_err, 0); \ 245 }) 246 247 #define get_user(x, ptr) \ 248 ({ \ 249 might_fault(); \ 250 __get_user(x, ptr); \ 251 }) 252 253 /* 254 * Copy a null terminated string from userspace. 255 */ 256 long __must_check strncpy_from_user(char *dst, const char __user *src, long count); 257 258 long __must_check strnlen_user(const char __user *src, long count); 259 260 /* 261 * Zero Userspace 262 */ 263 unsigned long __must_check __clear_user(void __user *to, unsigned long size); 264 265 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) 266 { 267 might_fault(); 268 return __clear_user(to, n); 269 } 270 271 int copy_to_user_real(void __user *dest, unsigned long src, unsigned long count); 272 void *s390_kernel_write(void *dst, const void *src, size_t size); 273 274 int __noreturn __put_kernel_bad(void); 275 276 #define __put_kernel_asm(val, to, insn) \ 277 ({ \ 278 int __rc; \ 279 \ 280 asm volatile( \ 281 "0: " insn " %2,%1\n" \ 282 "1: xr %0,%0\n" \ 283 "2:\n" \ 284 EX_TABLE_UA(0b,2b,%0) EX_TABLE_UA(1b,2b,%0) \ 285 : "=d" (__rc), "+Q" (*(to)) \ 286 : "d" (val) \ 287 : "cc"); \ 288 __rc; \ 289 }) 290 291 #define __put_kernel_nofault(dst, src, type, err_label) \ 292 do { \ 293 u64 __x = (u64)(*((type *)(src))); \ 294 int __pk_err; \ 295 \ 296 switch (sizeof(type)) { \ 297 case 1: \ 298 __pk_err = __put_kernel_asm(__x, (type *)(dst), "stc"); \ 299 break; \ 300 case 2: \ 301 __pk_err = __put_kernel_asm(__x, (type *)(dst), "sth"); \ 302 break; \ 303 case 4: \ 304 __pk_err = __put_kernel_asm(__x, (type *)(dst), "st"); \ 305 break; \ 306 case 8: \ 307 __pk_err = __put_kernel_asm(__x, (type *)(dst), "stg"); \ 308 break; \ 309 default: \ 310 __pk_err = __put_kernel_bad(); \ 311 break; \ 312 } \ 313 if (unlikely(__pk_err)) \ 314 goto err_label; \ 315 } while (0) 316 317 int __noreturn __get_kernel_bad(void); 318 319 #define __get_kernel_asm(val, from, insn) \ 320 ({ \ 321 int __rc; \ 322 \ 323 asm volatile( \ 324 "0: " insn " %1,%2\n" \ 325 "1: xr %0,%0\n" \ 326 "2:\n" \ 327 EX_TABLE_UA(0b,2b,%0) EX_TABLE_UA(1b,2b,%0) \ 328 : "=d" (__rc), "+d" (val) \ 329 : "Q" (*(from)) \ 330 : "cc"); \ 331 __rc; \ 332 }) 333 334 #define __get_kernel_nofault(dst, src, type, err_label) \ 335 do { \ 336 int __gk_err; \ 337 \ 338 switch (sizeof(type)) { \ 339 case 1: { \ 340 u8 __x = 0; \ 341 \ 342 __gk_err = __get_kernel_asm(__x, (type *)(src), "ic"); \ 343 *((type *)(dst)) = (type)__x; \ 344 break; \ 345 }; \ 346 case 2: { \ 347 u16 __x = 0; \ 348 \ 349 __gk_err = __get_kernel_asm(__x, (type *)(src), "lh"); \ 350 *((type *)(dst)) = (type)__x; \ 351 break; \ 352 }; \ 353 case 4: { \ 354 u32 __x = 0; \ 355 \ 356 __gk_err = __get_kernel_asm(__x, (type *)(src), "l"); \ 357 *((type *)(dst)) = (type)__x; \ 358 break; \ 359 }; \ 360 case 8: { \ 361 u64 __x = 0; \ 362 \ 363 __gk_err = __get_kernel_asm(__x, (type *)(src), "lg"); \ 364 *((type *)(dst)) = (type)__x; \ 365 break; \ 366 }; \ 367 default: \ 368 __gk_err = __get_kernel_bad(); \ 369 break; \ 370 } \ 371 if (unlikely(__gk_err)) \ 372 goto err_label; \ 373 } while (0) 374 375 #endif /* __S390_UACCESS_H */ 376