1 #ifndef __PARISC_UACCESS_H 2 #define __PARISC_UACCESS_H 3 4 /* 5 * User space memory access functions 6 */ 7 #include <asm/page.h> 8 #include <asm/cache.h> 9 #include <asm-generic/uaccess-unaligned.h> 10 11 #include <linux/bug.h> 12 #include <linux/string.h> 13 14 #define KERNEL_DS ((mm_segment_t){0}) 15 #define USER_DS ((mm_segment_t){1}) 16 17 #define segment_eq(a, b) ((a).seg == (b).seg) 18 19 #define get_ds() (KERNEL_DS) 20 #define get_fs() (current_thread_info()->addr_limit) 21 #define set_fs(x) (current_thread_info()->addr_limit = (x)) 22 23 /* 24 * Note that since kernel addresses are in a separate address space on 25 * parisc, we don't need to do anything for access_ok(). 26 * We just let the page fault handler do the right thing. This also means 27 * that put_user is the same as __put_user, etc. 28 */ 29 30 #define access_ok(type, uaddr, size) \ 31 ( (uaddr) == (uaddr) ) 32 33 #define put_user __put_user 34 #define get_user __get_user 35 36 #if !defined(CONFIG_64BIT) 37 #define LDD_USER(val, ptr) __get_user_asm64(val, ptr) 38 #define STD_USER(x, ptr) __put_user_asm64(x, ptr) 39 #else 40 #define LDD_USER(val, ptr) __get_user_asm(val, "ldd", ptr) 41 #define STD_USER(x, ptr) __put_user_asm("std", x, ptr) 42 #endif 43 44 /* 45 * The exception table contains two values: the first is the relative offset to 46 * the address of the instruction that is allowed to fault, and the second is 47 * the relative offset to the address of the fixup routine. Since relative 48 * addresses are used, 32bit values are sufficient even on 64bit kernel. 49 */ 50 51 #define ARCH_HAS_RELATIVE_EXTABLE 52 struct exception_table_entry { 53 int insn; /* relative address of insn that is allowed to fault. */ 54 int fixup; /* relative address of fixup routine */ 55 }; 56 57 #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\ 58 ".section __ex_table,\"aw\"\n" \ 59 ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \ 60 ".previous\n" 61 62 /* 63 * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry 64 * (with lowest bit set) for which the fault handler in fixup_exception() will 65 * load -EFAULT into %r8 for a read or write fault, and zeroes the target 66 * register in case of a read fault in get_user(). 67 */ 68 #define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\ 69 ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1) 70 71 /* 72 * The page fault handler stores, in a per-cpu area, the following information 73 * if a fixup routine is available. 74 */ 75 struct exception_data { 76 unsigned long fault_ip; 77 unsigned long fault_gp; 78 unsigned long fault_space; 79 unsigned long fault_addr; 80 }; 81 82 /* 83 * load_sr2() preloads the space register %%sr2 - based on the value of 84 * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which 85 * is 0), or with the current value of %%sr3 to access user space (USER_DS) 86 * memory. The following __get_user_asm() and __put_user_asm() functions have 87 * %%sr2 hard-coded to access the requested memory. 88 */ 89 #define load_sr2() \ 90 __asm__(" or,= %0,%%r0,%%r0\n\t" \ 91 " mfsp %%sr3,%0\n\t" \ 92 " mtsp %0,%%sr2\n\t" \ 93 : : "r"(get_fs()) : ) 94 95 #define __get_user_internal(val, ptr) \ 96 ({ \ 97 register long __gu_err __asm__ ("r8") = 0; \ 98 \ 99 switch (sizeof(*(ptr))) { \ 100 case 1: __get_user_asm(val, "ldb", ptr); break; \ 101 case 2: __get_user_asm(val, "ldh", ptr); break; \ 102 case 4: __get_user_asm(val, "ldw", ptr); break; \ 103 case 8: LDD_USER(val, ptr); break; \ 104 default: BUILD_BUG(); \ 105 } \ 106 \ 107 __gu_err; \ 108 }) 109 110 #define __get_user(val, ptr) \ 111 ({ \ 112 load_sr2(); \ 113 __get_user_internal(val, ptr); \ 114 }) 115 116 #define __get_user_asm(val, ldx, ptr) \ 117 { \ 118 register long __gu_val; \ 119 \ 120 __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \ 121 "9:\n" \ 122 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 123 : "=r"(__gu_val), "=r"(__gu_err) \ 124 : "r"(ptr), "1"(__gu_err)); \ 125 \ 126 (val) = (__force __typeof__(*(ptr))) __gu_val; \ 127 } 128 129 #if !defined(CONFIG_64BIT) 130 131 #define __get_user_asm64(val, ptr) \ 132 { \ 133 union { \ 134 unsigned long long l; \ 135 __typeof__(*(ptr)) t; \ 136 } __gu_tmp; \ 137 \ 138 __asm__(" copy %%r0,%R0\n" \ 139 "1: ldw 0(%%sr2,%2),%0\n" \ 140 "2: ldw 4(%%sr2,%2),%R0\n" \ 141 "9:\n" \ 142 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 143 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ 144 : "=&r"(__gu_tmp.l), "=r"(__gu_err) \ 145 : "r"(ptr), "1"(__gu_err)); \ 146 \ 147 (val) = __gu_tmp.t; \ 148 } 149 150 #endif /* !defined(CONFIG_64BIT) */ 151 152 153 #define __put_user_internal(x, ptr) \ 154 ({ \ 155 register long __pu_err __asm__ ("r8") = 0; \ 156 __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \ 157 \ 158 switch (sizeof(*(ptr))) { \ 159 case 1: __put_user_asm("stb", __x, ptr); break; \ 160 case 2: __put_user_asm("sth", __x, ptr); break; \ 161 case 4: __put_user_asm("stw", __x, ptr); break; \ 162 case 8: STD_USER(__x, ptr); break; \ 163 default: BUILD_BUG(); \ 164 } \ 165 \ 166 __pu_err; \ 167 }) 168 169 #define __put_user(x, ptr) \ 170 ({ \ 171 load_sr2(); \ 172 __put_user_internal(x, ptr); \ 173 }) 174 175 176 /* 177 * The "__put_user/kernel_asm()" macros tell gcc they read from memory 178 * instead of writing. This is because they do not write to any memory 179 * gcc knows about, so there are no aliasing issues. These macros must 180 * also be aware that fixups are executed in the context of the fault, 181 * and any registers used there must be listed as clobbers. 182 * r8 is already listed as err. 183 */ 184 185 #define __put_user_asm(stx, x, ptr) \ 186 __asm__ __volatile__ ( \ 187 "1: " stx " %2,0(%%sr2,%1)\n" \ 188 "9:\n" \ 189 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 190 : "=r"(__pu_err) \ 191 : "r"(ptr), "r"(x), "0"(__pu_err)) 192 193 194 #if !defined(CONFIG_64BIT) 195 196 #define __put_user_asm64(__val, ptr) do { \ 197 __asm__ __volatile__ ( \ 198 "1: stw %2,0(%%sr2,%1)\n" \ 199 "2: stw %R2,4(%%sr2,%1)\n" \ 200 "9:\n" \ 201 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 202 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ 203 : "=r"(__pu_err) \ 204 : "r"(ptr), "r"(__val), "0"(__pu_err)); \ 205 } while (0) 206 207 #endif /* !defined(CONFIG_64BIT) */ 208 209 210 /* 211 * Complex access routines -- external declarations 212 */ 213 214 extern long strncpy_from_user(char *, const char __user *, long); 215 extern unsigned lclear_user(void __user *, unsigned long); 216 extern long lstrnlen_user(const char __user *, long); 217 /* 218 * Complex access routines -- macros 219 */ 220 #define user_addr_max() (~0UL) 221 222 #define strnlen_user lstrnlen_user 223 #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL) 224 #define clear_user lclear_user 225 #define __clear_user lclear_user 226 227 unsigned long __must_check raw_copy_to_user(void __user *dst, const void *src, 228 unsigned long len); 229 unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src, 230 unsigned long len); 231 unsigned long __must_check raw_copy_in_user(void __user *dst, const void __user *src, 232 unsigned long len); 233 #define INLINE_COPY_TO_USER 234 #define INLINE_COPY_FROM_USER 235 236 struct pt_regs; 237 int fixup_exception(struct pt_regs *regs); 238 239 #endif /* __PARISC_UACCESS_H */ 240