1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __PARISC_UACCESS_H 3 #define __PARISC_UACCESS_H 4 5 /* 6 * User space memory access functions 7 */ 8 #include <asm/page.h> 9 #include <asm/cache.h> 10 11 #include <linux/bug.h> 12 #include <linux/string.h> 13 14 /* 15 * Note that since kernel addresses are in a separate address space on 16 * parisc, we don't need to do anything for access_ok(). 17 * We just let the page fault handler do the right thing. This also means 18 * that put_user is the same as __put_user, etc. 19 */ 20 21 #define access_ok(uaddr, size) \ 22 ( (uaddr) == (uaddr) ) 23 24 #define put_user __put_user 25 #define get_user __get_user 26 27 #if !defined(CONFIG_64BIT) 28 #define LDD_USER(sr, val, ptr) __get_user_asm64(sr, val, ptr) 29 #define STD_USER(sr, x, ptr) __put_user_asm64(sr, x, ptr) 30 #else 31 #define LDD_USER(sr, val, ptr) __get_user_asm(sr, val, "ldd", ptr) 32 #define STD_USER(sr, x, ptr) __put_user_asm(sr, "std", x, ptr) 33 #endif 34 35 /* 36 * The exception table contains two values: the first is the relative offset to 37 * the address of the instruction that is allowed to fault, and the second is 38 * the relative offset to the address of the fixup routine. Since relative 39 * addresses are used, 32bit values are sufficient even on 64bit kernel. 40 */ 41 42 #define ARCH_HAS_RELATIVE_EXTABLE 43 struct exception_table_entry { 44 int insn; /* relative address of insn that is allowed to fault. */ 45 int fixup; /* relative address of fixup routine */ 46 }; 47 48 #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\ 49 ".section __ex_table,\"aw\"\n" \ 50 ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \ 51 ".previous\n" 52 53 /* 54 * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry 55 * (with lowest bit set) for which the fault handler in fixup_exception() will 56 * load -EFAULT into %r29 for a read or write fault, and zeroes the target 57 * register in case of a read fault in get_user(). 58 */ 59 #define ASM_EXCEPTIONTABLE_REG 29 60 #define ASM_EXCEPTIONTABLE_VAR(__variable) \ 61 register long __variable __asm__ ("r29") = 0 62 #define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\ 63 ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1) 64 65 #define __get_user_internal(sr, val, ptr) \ 66 ({ \ 67 ASM_EXCEPTIONTABLE_VAR(__gu_err); \ 68 \ 69 switch (sizeof(*(ptr))) { \ 70 case 1: __get_user_asm(sr, val, "ldb", ptr); break; \ 71 case 2: __get_user_asm(sr, val, "ldh", ptr); break; \ 72 case 4: __get_user_asm(sr, val, "ldw", ptr); break; \ 73 case 8: LDD_USER(sr, val, ptr); break; \ 74 default: BUILD_BUG(); \ 75 } \ 76 \ 77 __gu_err; \ 78 }) 79 80 #define __get_user(val, ptr) \ 81 ({ \ 82 __get_user_internal("%%sr3,", val, ptr); \ 83 }) 84 85 #define __get_user_asm(sr, val, ldx, ptr) \ 86 { \ 87 register long __gu_val; \ 88 \ 89 __asm__("1: " ldx " 0(" sr "%2),%0\n" \ 90 "9:\n" \ 91 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 92 : "=r"(__gu_val), "=r"(__gu_err) \ 93 : "r"(ptr), "1"(__gu_err)); \ 94 \ 95 (val) = (__force __typeof__(*(ptr))) __gu_val; \ 96 } 97 98 #define HAVE_GET_KERNEL_NOFAULT 99 #define __get_kernel_nofault(dst, src, type, err_label) \ 100 { \ 101 type __z; \ 102 long __err; \ 103 __err = __get_user_internal("%%sr0,", __z, (type *)(src)); \ 104 if (unlikely(__err)) \ 105 goto err_label; \ 106 else \ 107 *(type *)(dst) = __z; \ 108 } 109 110 111 #if !defined(CONFIG_64BIT) 112 113 #define __get_user_asm64(sr, val, ptr) \ 114 { \ 115 union { \ 116 unsigned long long l; \ 117 __typeof__(*(ptr)) t; \ 118 } __gu_tmp; \ 119 \ 120 __asm__(" copy %%r0,%R0\n" \ 121 "1: ldw 0(" sr "%2),%0\n" \ 122 "2: ldw 4(" sr "%2),%R0\n" \ 123 "9:\n" \ 124 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 125 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ 126 : "=&r"(__gu_tmp.l), "=r"(__gu_err) \ 127 : "r"(ptr), "1"(__gu_err)); \ 128 \ 129 (val) = __gu_tmp.t; \ 130 } 131 132 #endif /* !defined(CONFIG_64BIT) */ 133 134 135 #define __put_user_internal(sr, x, ptr) \ 136 ({ \ 137 ASM_EXCEPTIONTABLE_VAR(__pu_err); \ 138 __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \ 139 \ 140 switch (sizeof(*(ptr))) { \ 141 case 1: __put_user_asm(sr, "stb", __x, ptr); break; \ 142 case 2: __put_user_asm(sr, "sth", __x, ptr); break; \ 143 case 4: __put_user_asm(sr, "stw", __x, ptr); break; \ 144 case 8: STD_USER(sr, __x, ptr); break; \ 145 default: BUILD_BUG(); \ 146 } \ 147 \ 148 __pu_err; \ 149 }) 150 151 #define __put_user(x, ptr) \ 152 ({ \ 153 __put_user_internal("%%sr3,", x, ptr); \ 154 }) 155 156 #define __put_kernel_nofault(dst, src, type, err_label) \ 157 { \ 158 type __z = *(type *)(src); \ 159 long __err; \ 160 __err = __put_user_internal("%%sr0,", __z, (type *)(dst)); \ 161 if (unlikely(__err)) \ 162 goto err_label; \ 163 } 164 165 166 167 168 /* 169 * The "__put_user/kernel_asm()" macros tell gcc they read from memory 170 * instead of writing. This is because they do not write to any memory 171 * gcc knows about, so there are no aliasing issues. These macros must 172 * also be aware that fixups are executed in the context of the fault, 173 * and any registers used there must be listed as clobbers. 174 * The register holding the possible EFAULT error (ASM_EXCEPTIONTABLE_REG) 175 * is already listed as input and output register. 176 */ 177 178 #define __put_user_asm(sr, stx, x, ptr) \ 179 __asm__ __volatile__ ( \ 180 "1: " stx " %2,0(" sr "%1)\n" \ 181 "9:\n" \ 182 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 183 : "=r"(__pu_err) \ 184 : "r"(ptr), "r"(x), "0"(__pu_err)) 185 186 187 #if !defined(CONFIG_64BIT) 188 189 #define __put_user_asm64(sr, __val, ptr) do { \ 190 __asm__ __volatile__ ( \ 191 "1: stw %2,0(" sr "%1)\n" \ 192 "2: stw %R2,4(" sr "%1)\n" \ 193 "9:\n" \ 194 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 195 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ 196 : "=r"(__pu_err) \ 197 : "r"(ptr), "r"(__val), "0"(__pu_err)); \ 198 } while (0) 199 200 #endif /* !defined(CONFIG_64BIT) */ 201 202 203 /* 204 * Complex access routines -- external declarations 205 */ 206 207 extern long strncpy_from_user(char *, const char __user *, long); 208 extern __must_check unsigned lclear_user(void __user *, unsigned long); 209 extern __must_check long strnlen_user(const char __user *src, long n); 210 /* 211 * Complex access routines -- macros 212 */ 213 214 #define clear_user lclear_user 215 #define __clear_user lclear_user 216 217 unsigned long __must_check raw_copy_to_user(void __user *dst, const void *src, 218 unsigned long len); 219 unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src, 220 unsigned long len); 221 #define INLINE_COPY_TO_USER 222 #define INLINE_COPY_FROM_USER 223 224 struct pt_regs; 225 int fixup_exception(struct pt_regs *regs); 226 227 #endif /* __PARISC_UACCESS_H */ 228