1 #ifndef _ASM_UACCESS_H 2 #define _ASM_UACCESS_H 3 4 /* 5 * User space memory access functions 6 */ 7 8 #ifdef __KERNEL__ 9 #include <linux/errno.h> 10 #include <linux/compiler.h> 11 #include <linux/string.h> 12 #include <linux/thread_info.h> 13 #include <asm/asi.h> 14 #include <asm/spitfire.h> 15 #include <asm-generic/uaccess-unaligned.h> 16 #include <asm/extable_64.h> 17 #endif 18 19 #ifndef __ASSEMBLY__ 20 21 #include <asm/processor.h> 22 23 /* 24 * Sparc64 is segmented, though more like the M68K than the I386. 25 * We use the secondary ASI to address user memory, which references a 26 * completely different VM map, thus there is zero chance of the user 27 * doing something queer and tricking us into poking kernel memory. 28 * 29 * What is left here is basically what is needed for the other parts of 30 * the kernel that expect to be able to manipulate, erum, "segments". 31 * Or perhaps more properly, permissions. 32 * 33 * "For historical reasons, these macros are grossly misnamed." -Linus 34 */ 35 36 #define KERNEL_DS ((mm_segment_t) { ASI_P }) 37 #define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */ 38 39 #define VERIFY_READ 0 40 #define VERIFY_WRITE 1 41 42 #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)}) 43 #define get_ds() (KERNEL_DS) 44 45 #define segment_eq(a, b) ((a).seg == (b).seg) 46 47 #define set_fs(val) \ 48 do { \ 49 current_thread_info()->current_ds = (val).seg; \ 50 __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \ 51 } while(0) 52 53 /* 54 * Test whether a block of memory is a valid user space address. 55 * Returns 0 if the range is valid, nonzero otherwise. 56 */ 57 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) 58 { 59 if (__builtin_constant_p(size)) 60 return addr > limit - size; 61 62 addr += size; 63 if (addr < size) 64 return true; 65 66 return addr > limit; 67 } 68 69 #define __range_not_ok(addr, size, limit) \ 70 ({ \ 71 __chk_user_ptr(addr); \ 72 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ 73 }) 74 75 static inline int __access_ok(const void __user * addr, unsigned long size) 76 { 77 return 1; 78 } 79 80 static inline int access_ok(int type, const void __user * addr, unsigned long size) 81 { 82 return 1; 83 } 84 85 void __retl_efault(void); 86 87 /* Uh, these should become the main single-value transfer routines.. 88 * They automatically use the right size if we just have the right 89 * pointer type.. 90 * 91 * This gets kind of ugly. We want to return _two_ values in "get_user()" 92 * and yet we don't want to do any pointers, because that is too much 93 * of a performance impact. Thus we have a few rather ugly macros here, 94 * and hide all the ugliness from the user. 95 */ 96 #define put_user(x, ptr) ({ \ 97 unsigned long __pu_addr = (unsigned long)(ptr); \ 98 __chk_user_ptr(ptr); \ 99 __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\ 100 }) 101 102 #define get_user(x, ptr) ({ \ 103 unsigned long __gu_addr = (unsigned long)(ptr); \ 104 __chk_user_ptr(ptr); \ 105 __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\ 106 }) 107 108 #define __put_user(x, ptr) put_user(x, ptr) 109 #define __get_user(x, ptr) get_user(x, ptr) 110 111 struct __large_struct { unsigned long buf[100]; }; 112 #define __m(x) ((struct __large_struct *)(x)) 113 114 #define __put_user_nocheck(data, addr, size) ({ \ 115 register int __pu_ret; \ 116 switch (size) { \ 117 case 1: __put_user_asm(data, b, addr, __pu_ret); break; \ 118 case 2: __put_user_asm(data, h, addr, __pu_ret); break; \ 119 case 4: __put_user_asm(data, w, addr, __pu_ret); break; \ 120 case 8: __put_user_asm(data, x, addr, __pu_ret); break; \ 121 default: __pu_ret = __put_user_bad(); break; \ 122 } \ 123 __pu_ret; \ 124 }) 125 126 #define __put_user_asm(x, size, addr, ret) \ 127 __asm__ __volatile__( \ 128 "/* Put user asm, inline. */\n" \ 129 "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \ 130 "clr %0\n" \ 131 "2:\n\n\t" \ 132 ".section .fixup,#alloc,#execinstr\n\t" \ 133 ".align 4\n" \ 134 "3:\n\t" \ 135 "sethi %%hi(2b), %0\n\t" \ 136 "jmpl %0 + %%lo(2b), %%g0\n\t" \ 137 " mov %3, %0\n\n\t" \ 138 ".previous\n\t" \ 139 ".section __ex_table,\"a\"\n\t" \ 140 ".align 4\n\t" \ 141 ".word 1b, 3b\n\t" \ 142 ".previous\n\n\t" \ 143 : "=r" (ret) : "r" (x), "r" (__m(addr)), \ 144 "i" (-EFAULT)) 145 146 int __put_user_bad(void); 147 148 #define __get_user_nocheck(data, addr, size, type) ({ \ 149 register int __gu_ret; \ 150 register unsigned long __gu_val; \ 151 switch (size) { \ 152 case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \ 153 case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \ 154 case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \ 155 case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break; \ 156 default: \ 157 __gu_val = 0; \ 158 __gu_ret = __get_user_bad(); \ 159 break; \ 160 } \ 161 data = (__force type) __gu_val; \ 162 __gu_ret; \ 163 }) 164 165 #define __get_user_asm(x, size, addr, ret) \ 166 __asm__ __volatile__( \ 167 "/* Get user asm, inline. */\n" \ 168 "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \ 169 "clr %0\n" \ 170 "2:\n\n\t" \ 171 ".section .fixup,#alloc,#execinstr\n\t" \ 172 ".align 4\n" \ 173 "3:\n\t" \ 174 "sethi %%hi(2b), %0\n\t" \ 175 "clr %1\n\t" \ 176 "jmpl %0 + %%lo(2b), %%g0\n\t" \ 177 " mov %3, %0\n\n\t" \ 178 ".previous\n\t" \ 179 ".section __ex_table,\"a\"\n\t" \ 180 ".align 4\n\t" \ 181 ".word 1b, 3b\n\n\t" \ 182 ".previous\n\t" \ 183 : "=r" (ret), "=r" (x) : "r" (__m(addr)), \ 184 "i" (-EFAULT)) 185 186 int __get_user_bad(void); 187 188 unsigned long __must_check ___copy_from_user(void *to, 189 const void __user *from, 190 unsigned long size); 191 static inline unsigned long __must_check 192 copy_from_user(void *to, const void __user *from, unsigned long size) 193 { 194 check_object_size(to, size, false); 195 196 return ___copy_from_user(to, from, size); 197 } 198 #define __copy_from_user copy_from_user 199 200 unsigned long __must_check ___copy_to_user(void __user *to, 201 const void *from, 202 unsigned long size); 203 static inline unsigned long __must_check 204 copy_to_user(void __user *to, const void *from, unsigned long size) 205 { 206 check_object_size(from, size, true); 207 208 return ___copy_to_user(to, from, size); 209 } 210 #define __copy_to_user copy_to_user 211 212 unsigned long __must_check ___copy_in_user(void __user *to, 213 const void __user *from, 214 unsigned long size); 215 static inline unsigned long __must_check 216 copy_in_user(void __user *to, void __user *from, unsigned long size) 217 { 218 return ___copy_in_user(to, from, size); 219 } 220 #define __copy_in_user copy_in_user 221 222 unsigned long __must_check __clear_user(void __user *, unsigned long); 223 224 #define clear_user __clear_user 225 226 __must_check long strlen_user(const char __user *str); 227 __must_check long strnlen_user(const char __user *str, long n); 228 229 #define __copy_to_user_inatomic __copy_to_user 230 #define __copy_from_user_inatomic __copy_from_user 231 232 struct pt_regs; 233 unsigned long compute_effective_address(struct pt_regs *, 234 unsigned int insn, 235 unsigned int rd); 236 237 #endif /* __ASSEMBLY__ */ 238 239 #endif /* _ASM_UACCESS_H */ 240