1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * uaccess.h: User space memore access functions. 4 * 5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 6 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 7 */ 8 #ifndef _ASM_UACCESS_H 9 #define _ASM_UACCESS_H 10 11 #include <linux/compiler.h> 12 #include <linux/string.h> 13 14 #include <asm/processor.h> 15 16 /* Sparc is not segmented, however we need to be able to fool access_ok() 17 * when doing system calls from kernel mode legitimately. 18 * 19 * "For historical reasons, these macros are grossly misnamed." -Linus 20 */ 21 22 #define KERNEL_DS ((mm_segment_t) { 0 }) 23 #define USER_DS ((mm_segment_t) { -1 }) 24 25 #define get_fs() (current->thread.current_ds) 26 #define set_fs(val) ((current->thread.current_ds) = (val)) 27 28 #define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) 29 30 /* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test 31 * can be fairly lightweight. 32 * No one can read/write anything from userland in the kernel space by setting 33 * large size and address near to PAGE_OFFSET - a fault will break his intentions. 34 */ 35 #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; }) 36 #define __kernel_ok (uaccess_kernel()) 37 #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size))) 38 #define access_ok(addr, size) __access_ok((unsigned long)(addr), size) 39 40 /* Uh, these should become the main single-value transfer routines.. 41 * They automatically use the right size if we just have the right 42 * pointer type.. 43 * 44 * This gets kind of ugly. We want to return _two_ values in "get_user()" 45 * and yet we don't want to do any pointers, because that is too much 46 * of a performance impact. Thus we have a few rather ugly macros here, 47 * and hide all the ugliness from the user. 48 */ 49 #define put_user(x, ptr) ({ \ 50 unsigned long __pu_addr = (unsigned long)(ptr); \ 51 __chk_user_ptr(ptr); \ 52 __put_user_check((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr))); \ 53 }) 54 55 #define get_user(x, ptr) ({ \ 56 unsigned long __gu_addr = (unsigned long)(ptr); \ 57 __chk_user_ptr(ptr); \ 58 __get_user_check((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr))); \ 59 }) 60 61 /* 62 * The "__xxx" versions do not do address space checking, useful when 63 * doing multiple accesses to the same area (the user has to do the 64 * checks by hand with "access_ok()") 65 */ 66 #define __put_user(x, ptr) \ 67 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 68 #define __get_user(x, ptr) \ 69 __get_user_nocheck((x), (ptr), sizeof(*(ptr)), __typeof__(*(ptr))) 70 71 struct __large_struct { unsigned long buf[100]; }; 72 #define __m(x) ((struct __large_struct __user *)(x)) 73 74 #define __put_user_check(x, addr, size) ({ \ 75 register int __pu_ret; \ 76 if (__access_ok(addr, size)) { \ 77 switch (size) { \ 78 case 1: \ 79 __put_user_asm(x, b, addr, __pu_ret); \ 80 break; \ 81 case 2: \ 82 __put_user_asm(x, h, addr, __pu_ret); \ 83 break; \ 84 case 4: \ 85 __put_user_asm(x, , addr, __pu_ret); \ 86 break; \ 87 case 8: \ 88 __put_user_asm(x, d, addr, __pu_ret); \ 89 break; \ 90 default: \ 91 __pu_ret = __put_user_bad(); \ 92 break; \ 93 } \ 94 } else { \ 95 __pu_ret = -EFAULT; \ 96 } \ 97 __pu_ret; \ 98 }) 99 100 #define __put_user_nocheck(x, addr, size) ({ \ 101 register int __pu_ret; \ 102 switch (size) { \ 103 case 1: __put_user_asm(x, b, addr, __pu_ret); break; \ 104 case 2: __put_user_asm(x, h, addr, __pu_ret); break; \ 105 case 4: __put_user_asm(x, , addr, __pu_ret); break; \ 106 case 8: __put_user_asm(x, d, addr, __pu_ret); break; \ 107 default: __pu_ret = __put_user_bad(); break; \ 108 } \ 109 __pu_ret; \ 110 }) 111 112 #define __put_user_asm(x, size, addr, ret) \ 113 __asm__ __volatile__( \ 114 "/* Put user asm, inline. */\n" \ 115 "1:\t" "st"#size " %1, %2\n\t" \ 116 "clr %0\n" \ 117 "2:\n\n\t" \ 118 ".section .fixup,#alloc,#execinstr\n\t" \ 119 ".align 4\n" \ 120 "3:\n\t" \ 121 "b 2b\n\t" \ 122 " mov %3, %0\n\t" \ 123 ".previous\n\n\t" \ 124 ".section __ex_table,#alloc\n\t" \ 125 ".align 4\n\t" \ 126 ".word 1b, 3b\n\t" \ 127 ".previous\n\n\t" \ 128 : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \ 129 "i" (-EFAULT)) 130 131 int __put_user_bad(void); 132 133 #define __get_user_check(x, addr, size, type) ({ \ 134 register int __gu_ret; \ 135 register unsigned long __gu_val; \ 136 if (__access_ok(addr, size)) { \ 137 switch (size) { \ 138 case 1: \ 139 __get_user_asm(__gu_val, ub, addr, __gu_ret); \ 140 break; \ 141 case 2: \ 142 __get_user_asm(__gu_val, uh, addr, __gu_ret); \ 143 break; \ 144 case 4: \ 145 __get_user_asm(__gu_val, , addr, __gu_ret); \ 146 break; \ 147 case 8: \ 148 __get_user_asm(__gu_val, d, addr, __gu_ret); \ 149 break; \ 150 default: \ 151 __gu_val = 0; \ 152 __gu_ret = __get_user_bad(); \ 153 break; \ 154 } \ 155 } else { \ 156 __gu_val = 0; \ 157 __gu_ret = -EFAULT; \ 158 } \ 159 x = (__force type) __gu_val; \ 160 __gu_ret; \ 161 }) 162 163 #define __get_user_nocheck(x, addr, size, type) ({ \ 164 register int __gu_ret; \ 165 register unsigned long __gu_val; \ 166 switch (size) { \ 167 case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \ 168 case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \ 169 case 4: __get_user_asm(__gu_val, , addr, __gu_ret); break; \ 170 case 8: __get_user_asm(__gu_val, d, addr, __gu_ret); break; \ 171 default: \ 172 __gu_val = 0; \ 173 __gu_ret = __get_user_bad(); \ 174 break; \ 175 } \ 176 x = (__force type) __gu_val; \ 177 __gu_ret; \ 178 }) 179 180 #define __get_user_asm(x, size, addr, ret) \ 181 __asm__ __volatile__( \ 182 "/* Get user asm, inline. */\n" \ 183 "1:\t" "ld"#size " %2, %1\n\t" \ 184 "clr %0\n" \ 185 "2:\n\n\t" \ 186 ".section .fixup,#alloc,#execinstr\n\t" \ 187 ".align 4\n" \ 188 "3:\n\t" \ 189 "clr %1\n\t" \ 190 "b 2b\n\t" \ 191 " mov %3, %0\n\n\t" \ 192 ".previous\n\t" \ 193 ".section __ex_table,#alloc\n\t" \ 194 ".align 4\n\t" \ 195 ".word 1b, 3b\n\n\t" \ 196 ".previous\n\t" \ 197 : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)), \ 198 "i" (-EFAULT)) 199 200 int __get_user_bad(void); 201 202 unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size); 203 204 static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) 205 { 206 return __copy_user(to, (__force void __user *) from, n); 207 } 208 209 static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) 210 { 211 return __copy_user((__force void __user *) to, from, n); 212 } 213 214 #define INLINE_COPY_FROM_USER 215 #define INLINE_COPY_TO_USER 216 217 static inline unsigned long __clear_user(void __user *addr, unsigned long size) 218 { 219 unsigned long ret; 220 221 __asm__ __volatile__ ( 222 "mov %2, %%o1\n" 223 "call __bzero\n\t" 224 " mov %1, %%o0\n\t" 225 "mov %%o0, %0\n" 226 : "=r" (ret) : "r" (addr), "r" (size) : 227 "o0", "o1", "o2", "o3", "o4", "o5", "o7", 228 "g1", "g2", "g3", "g4", "g5", "g7", "cc"); 229 230 return ret; 231 } 232 233 static inline unsigned long clear_user(void __user *addr, unsigned long n) 234 { 235 if (n && __access_ok((unsigned long) addr, n)) 236 return __clear_user(addr, n); 237 else 238 return n; 239 } 240 241 __must_check long strnlen_user(const char __user *str, long n); 242 243 #endif /* _ASM_UACCESS_H */ 244