1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __PARISC_UACCESS_H 3 #define __PARISC_UACCESS_H 4 5 /* 6 * User space memory access functions 7 */ 8 #include <asm/page.h> 9 #include <asm/cache.h> 10 11 #include <linux/bug.h> 12 #include <linux/string.h> 13 14 /* 15 * Note that since kernel addresses are in a separate address space on 16 * parisc, we don't need to do anything for access_ok(). 17 * We just let the page fault handler do the right thing. This also means 18 * that put_user is the same as __put_user, etc. 19 */ 20 21 #define access_ok(uaddr, size) \ 22 ( (uaddr) == (uaddr) ) 23 24 #define put_user __put_user 25 #define get_user __get_user 26 27 #if !defined(CONFIG_64BIT) 28 #define LDD_USER(sr, val, ptr) __get_user_asm64(sr, val, ptr) 29 #define STD_USER(sr, x, ptr) __put_user_asm64(sr, x, ptr) 30 #else 31 #define LDD_USER(sr, val, ptr) __get_user_asm(sr, val, "ldd", ptr) 32 #define STD_USER(sr, x, ptr) __put_user_asm(sr, "std", x, ptr) 33 #endif 34 35 /* 36 * The exception table contains two values: the first is the relative offset to 37 * the address of the instruction that is allowed to fault, and the second is 38 * the relative offset to the address of the fixup routine. Since relative 39 * addresses are used, 32bit values are sufficient even on 64bit kernel. 40 */ 41 42 #define ARCH_HAS_RELATIVE_EXTABLE 43 struct exception_table_entry { 44 int insn; /* relative address of insn that is allowed to fault. */ 45 int fixup; /* relative address of fixup routine */ 46 }; 47 48 #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\ 49 ".section __ex_table,\"aw\"\n" \ 50 ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \ 51 ".previous\n" 52 53 /* 54 * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry 55 * (with lowest bit set) for which the fault handler in fixup_exception() will 56 * load -EFAULT into %r29 for a read or write fault, and zeroes the target 57 * register in case of a read fault in get_user(). 58 */ 59 #define ASM_EXCEPTIONTABLE_REG 29 60 #define ASM_EXCEPTIONTABLE_VAR(__variable) \ 61 register long __variable __asm__ ("r29") = 0 62 #define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\ 63 ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1) 64 65 #define __get_user_internal(sr, val, ptr) \ 66 ({ \ 67 ASM_EXCEPTIONTABLE_VAR(__gu_err); \ 68 \ 69 switch (sizeof(*(ptr))) { \ 70 case 1: __get_user_asm(sr, val, "ldb", ptr); break; \ 71 case 2: __get_user_asm(sr, val, "ldh", ptr); break; \ 72 case 4: __get_user_asm(sr, val, "ldw", ptr); break; \ 73 case 8: LDD_USER(sr, val, ptr); break; \ 74 default: BUILD_BUG(); \ 75 } \ 76 \ 77 __gu_err; \ 78 }) 79 80 #define __get_user(val, ptr) \ 81 ({ \ 82 __get_user_internal("%%sr3,", val, ptr); \ 83 }) 84 85 #define __get_user_asm(sr, val, ldx, ptr) \ 86 { \ 87 register long __gu_val; \ 88 \ 89 __asm__("1: " ldx " 0(" sr "%2),%0\n" \ 90 "9:\n" \ 91 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 92 : "=r"(__gu_val), "+r"(__gu_err) \ 93 : "r"(ptr)); \ 94 \ 95 (val) = (__force __typeof__(*(ptr))) __gu_val; \ 96 } 97 98 #define HAVE_GET_KERNEL_NOFAULT 99 #define __get_kernel_nofault(dst, src, type, err_label) \ 100 { \ 101 type __z; \ 102 long __err; \ 103 __err = __get_user_internal("%%sr0,", __z, (type *)(src)); \ 104 if (unlikely(__err)) \ 105 goto err_label; \ 106 else \ 107 *(type *)(dst) = __z; \ 108 } 109 110 111 #if !defined(CONFIG_64BIT) 112 113 #define __get_user_asm64(sr, val, ptr) \ 114 { \ 115 union { \ 116 unsigned long long l; \ 117 __typeof__(*(ptr)) t; \ 118 } __gu_tmp; \ 119 \ 120 __asm__(" copy %%r0,%R0\n" \ 121 "1: ldw 0(" sr "%2),%0\n" \ 122 "2: ldw 4(" sr "%2),%R0\n" \ 123 "9:\n" \ 124 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 125 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ 126 : "=&r"(__gu_tmp.l), "+r"(__gu_err) \ 127 : "r"(ptr)); \ 128 \ 129 (val) = __gu_tmp.t; \ 130 } 131 132 #endif /* !defined(CONFIG_64BIT) */ 133 134 135 #define __put_user_internal(sr, x, ptr) \ 136 ({ \ 137 ASM_EXCEPTIONTABLE_VAR(__pu_err); \ 138 \ 139 switch (sizeof(*(ptr))) { \ 140 case 1: __put_user_asm(sr, "stb", x, ptr); break; \ 141 case 2: __put_user_asm(sr, "sth", x, ptr); break; \ 142 case 4: __put_user_asm(sr, "stw", x, ptr); break; \ 143 case 8: STD_USER(sr, x, ptr); break; \ 144 default: BUILD_BUG(); \ 145 } \ 146 \ 147 __pu_err; \ 148 }) 149 150 #define __put_user(x, ptr) \ 151 ({ \ 152 __typeof__(&*(ptr)) __ptr = ptr; \ 153 __typeof__(*(__ptr)) __x = (__typeof__(*(__ptr)))(x); \ 154 __put_user_internal("%%sr3,", __x, __ptr); \ 155 }) 156 157 #define __put_kernel_nofault(dst, src, type, err_label) \ 158 { \ 159 type __z = *(type *)(src); \ 160 long __err; \ 161 __err = __put_user_internal("%%sr0,", __z, (type *)(dst)); \ 162 if (unlikely(__err)) \ 163 goto err_label; \ 164 } 165 166 167 168 169 /* 170 * The "__put_user/kernel_asm()" macros tell gcc they read from memory 171 * instead of writing. This is because they do not write to any memory 172 * gcc knows about, so there are no aliasing issues. These macros must 173 * also be aware that fixups are executed in the context of the fault, 174 * and any registers used there must be listed as clobbers. 175 * The register holding the possible EFAULT error (ASM_EXCEPTIONTABLE_REG) 176 * is already listed as input and output register. 177 */ 178 179 #define __put_user_asm(sr, stx, x, ptr) \ 180 __asm__ __volatile__ ( \ 181 "1: " stx " %2,0(" sr "%1)\n" \ 182 "9:\n" \ 183 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 184 : "+r"(__pu_err) \ 185 : "r"(ptr), "r"(x)) 186 187 188 #if !defined(CONFIG_64BIT) 189 190 #define __put_user_asm64(sr, __val, ptr) do { \ 191 __asm__ __volatile__ ( \ 192 "1: stw %2,0(" sr "%1)\n" \ 193 "2: stw %R2,4(" sr "%1)\n" \ 194 "9:\n" \ 195 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 196 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ 197 : "+r"(__pu_err) \ 198 : "r"(ptr), "r"(__val)); \ 199 } while (0) 200 201 #endif /* !defined(CONFIG_64BIT) */ 202 203 204 /* 205 * Complex access routines -- external declarations 206 */ 207 208 extern long strncpy_from_user(char *, const char __user *, long); 209 extern __must_check unsigned lclear_user(void __user *, unsigned long); 210 extern __must_check long strnlen_user(const char __user *src, long n); 211 /* 212 * Complex access routines -- macros 213 */ 214 215 #define clear_user lclear_user 216 #define __clear_user lclear_user 217 218 unsigned long __must_check raw_copy_to_user(void __user *dst, const void *src, 219 unsigned long len); 220 unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src, 221 unsigned long len); 222 #define INLINE_COPY_TO_USER 223 #define INLINE_COPY_FROM_USER 224 225 struct pt_regs; 226 int fixup_exception(struct pt_regs *regs); 227 228 #endif /* __PARISC_UACCESS_H */ 229