1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_IA64_UACCESS_H 3 #define _ASM_IA64_UACCESS_H 4 5 /* 6 * This file defines various macros to transfer memory areas across 7 * the user/kernel boundary. This needs to be done carefully because 8 * this code is executed in kernel mode and uses user-specified 9 * addresses. Thus, we need to be careful not to let the user to 10 * trick us into accessing kernel memory that would normally be 11 * inaccessible. This code is also fairly performance sensitive, 12 * so we want to spend as little time doing safety checks as 13 * possible. 14 * 15 * To make matters a bit more interesting, these macros sometimes also 16 * called from within the kernel itself, in which case the address 17 * validity check must be skipped. The get_fs() macro tells us what 18 * to do: if get_fs()==USER_DS, checking is performed, if 19 * get_fs()==KERNEL_DS, checking is bypassed. 20 * 21 * Note that even if the memory area specified by the user is in a 22 * valid address range, it is still possible that we'll get a page 23 * fault while accessing it. This is handled by filling out an 24 * exception handler fixup entry for each instruction that has the 25 * potential to fault. When such a fault occurs, the page fault 26 * handler checks to see whether the faulting instruction has a fixup 27 * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and 28 * then resumes execution at the continuation point. 29 * 30 * Based on <asm-alpha/uaccess.h>. 31 * 32 * Copyright (C) 1998, 1999, 2001-2004 Hewlett-Packard Co 33 * David Mosberger-Tang <davidm@hpl.hp.com> 34 */ 35 36 #include <linux/compiler.h> 37 #include <linux/page-flags.h> 38 #include <linux/mm.h> 39 40 #include <asm/intrinsics.h> 41 #include <asm/pgtable.h> 42 #include <asm/io.h> 43 #include <asm/extable.h> 44 45 /* 46 * For historical reasons, the following macros are grossly misnamed: 47 */ 48 #define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */ 49 #define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */ 50 51 #define get_fs() (current_thread_info()->addr_limit) 52 #define set_fs(x) (current_thread_info()->addr_limit = (x)) 53 54 #define segment_eq(a, b) ((a).seg == (b).seg) 55 56 /* 57 * When accessing user memory, we need to make sure the entire area really is in 58 * user-level space. In order to do this efficiently, we make sure that the page at 59 * address TASK_SIZE is never valid. We also need to make sure that the address doesn't 60 * point inside the virtually mapped linear page table. 61 */ 62 static inline int __access_ok(const void __user *p, unsigned long size) 63 { 64 unsigned long addr = (unsigned long)p; 65 unsigned long seg = get_fs().seg; 66 return likely(addr <= seg) && 67 (seg == KERNEL_DS.seg || likely(REGION_OFFSET(addr) < RGN_MAP_LIMIT)); 68 } 69 #define access_ok(addr, size) __access_ok((addr), (size)) 70 71 /* 72 * These are the main single-value transfer routines. They automatically 73 * use the right size if we just have the right pointer type. 74 * 75 * Careful to not 76 * (a) re-use the arguments for side effects (sizeof/typeof is ok) 77 * (b) require any knowledge of processes at this stage 78 */ 79 #define put_user(x, ptr) __put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr))) 80 #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr))) 81 82 /* 83 * The "__xxx" versions do not do address space checking, useful when 84 * doing multiple accesses to the same area (the programmer has to do the 85 * checks by hand with "access_ok()") 86 */ 87 #define __put_user(x, ptr) __put_user_nocheck((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr))) 88 #define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 89 90 #ifdef ASM_SUPPORTED 91 struct __large_struct { unsigned long buf[100]; }; 92 # define __m(x) (*(struct __large_struct __user *)(x)) 93 94 /* We need to declare the __ex_table section before we can use it in .xdata. */ 95 asm (".section \"__ex_table\", \"a\"\n\t.previous"); 96 97 # define __get_user_size(val, addr, n, err) \ 98 do { \ 99 register long __gu_r8 asm ("r8") = 0; \ 100 register long __gu_r9 asm ("r9"); \ 101 asm ("\n[1:]\tld"#n" %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n" \ 102 "\t.xdata4 \"__ex_table\", 1b-., 1f-.+4\n" \ 103 "[1:]" \ 104 : "=r"(__gu_r9), "=r"(__gu_r8) : "m"(__m(addr)), "1"(__gu_r8)); \ 105 (err) = __gu_r8; \ 106 (val) = __gu_r9; \ 107 } while (0) 108 109 /* 110 * The "__put_user_size()" macro tells gcc it reads from memory instead of writing it. This 111 * is because they do not write to any memory gcc knows about, so there are no aliasing 112 * issues. 113 */ 114 # define __put_user_size(val, addr, n, err) \ 115 do { \ 116 register long __pu_r8 asm ("r8") = 0; \ 117 asm volatile ("\n[1:]\tst"#n" %1=%r2%P1\t// %0 gets overwritten by exception handler\n" \ 118 "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n" \ 119 "[1:]" \ 120 : "=r"(__pu_r8) : "m"(__m(addr)), "rO"(val), "0"(__pu_r8)); \ 121 (err) = __pu_r8; \ 122 } while (0) 123 124 #else /* !ASM_SUPPORTED */ 125 # define RELOC_TYPE 2 /* ip-rel */ 126 # define __get_user_size(val, addr, n, err) \ 127 do { \ 128 __ld_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE); \ 129 (err) = ia64_getreg(_IA64_REG_R8); \ 130 (val) = ia64_getreg(_IA64_REG_R9); \ 131 } while (0) 132 # define __put_user_size(val, addr, n, err) \ 133 do { \ 134 __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, \ 135 (__force unsigned long) (val)); \ 136 (err) = ia64_getreg(_IA64_REG_R8); \ 137 } while (0) 138 #endif /* !ASM_SUPPORTED */ 139 140 extern void __get_user_unknown (void); 141 142 /* 143 * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which 144 * could clobber r8 and r9 (among others). Thus, be careful not to evaluate it while 145 * using r8/r9. 146 */ 147 #define __do_get_user(check, x, ptr, size) \ 148 ({ \ 149 const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ 150 __typeof__ (size) __gu_size = (size); \ 151 long __gu_err = -EFAULT; \ 152 unsigned long __gu_val = 0; \ 153 if (!check || __access_ok(__gu_ptr, size)) \ 154 switch (__gu_size) { \ 155 case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \ 156 case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break; \ 157 case 4: __get_user_size(__gu_val, __gu_ptr, 4, __gu_err); break; \ 158 case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break; \ 159 default: __get_user_unknown(); break; \ 160 } \ 161 (x) = (__force __typeof__(*(__gu_ptr))) __gu_val; \ 162 __gu_err; \ 163 }) 164 165 #define __get_user_nocheck(x, ptr, size) __do_get_user(0, x, ptr, size) 166 #define __get_user_check(x, ptr, size) __do_get_user(1, x, ptr, size) 167 168 extern void __put_user_unknown (void); 169 170 /* 171 * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which 172 * could clobber r8 (among others). Thus, be careful not to evaluate them while using r8. 173 */ 174 #define __do_put_user(check, x, ptr, size) \ 175 ({ \ 176 __typeof__ (x) __pu_x = (x); \ 177 __typeof__ (*(ptr)) __user *__pu_ptr = (ptr); \ 178 __typeof__ (size) __pu_size = (size); \ 179 long __pu_err = -EFAULT; \ 180 \ 181 if (!check || __access_ok(__pu_ptr, __pu_size)) \ 182 switch (__pu_size) { \ 183 case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break; \ 184 case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break; \ 185 case 4: __put_user_size(__pu_x, __pu_ptr, 4, __pu_err); break; \ 186 case 8: __put_user_size(__pu_x, __pu_ptr, 8, __pu_err); break; \ 187 default: __put_user_unknown(); break; \ 188 } \ 189 __pu_err; \ 190 }) 191 192 #define __put_user_nocheck(x, ptr, size) __do_put_user(0, x, ptr, size) 193 #define __put_user_check(x, ptr, size) __do_put_user(1, x, ptr, size) 194 195 /* 196 * Complex access routines 197 */ 198 extern unsigned long __must_check __copy_user (void __user *to, const void __user *from, 199 unsigned long count); 200 201 static inline unsigned long 202 raw_copy_to_user(void __user *to, const void *from, unsigned long count) 203 { 204 return __copy_user(to, (__force void __user *) from, count); 205 } 206 207 static inline unsigned long 208 raw_copy_from_user(void *to, const void __user *from, unsigned long count) 209 { 210 return __copy_user((__force void __user *) to, from, count); 211 } 212 213 #define INLINE_COPY_FROM_USER 214 #define INLINE_COPY_TO_USER 215 216 extern unsigned long __do_clear_user (void __user *, unsigned long); 217 218 #define __clear_user(to, n) __do_clear_user(to, n) 219 220 #define clear_user(to, n) \ 221 ({ \ 222 unsigned long __cu_len = (n); \ 223 if (__access_ok(to, __cu_len)) \ 224 __cu_len = __do_clear_user(to, __cu_len); \ 225 __cu_len; \ 226 }) 227 228 229 /* 230 * Returns: -EFAULT if exception before terminator, N if the entire buffer filled, else 231 * strlen. 232 */ 233 extern long __must_check __strncpy_from_user (char *to, const char __user *from, long to_len); 234 235 #define strncpy_from_user(to, from, n) \ 236 ({ \ 237 const char __user * __sfu_from = (from); \ 238 long __sfu_ret = -EFAULT; \ 239 if (__access_ok(__sfu_from, 0)) \ 240 __sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \ 241 __sfu_ret; \ 242 }) 243 244 /* 245 * Returns: 0 if exception before NUL or reaching the supplied limit 246 * (N), a value greater than N if the limit would be exceeded, else 247 * strlen. 248 */ 249 extern unsigned long __strnlen_user (const char __user *, long); 250 251 #define strnlen_user(str, len) \ 252 ({ \ 253 const char __user *__su_str = (str); \ 254 unsigned long __su_ret = 0; \ 255 if (__access_ok(__su_str, 0)) \ 256 __su_ret = __strnlen_user(__su_str, len); \ 257 __su_ret; \ 258 }) 259 260 #define ARCH_HAS_TRANSLATE_MEM_PTR 1 261 static __inline__ void * 262 xlate_dev_mem_ptr(phys_addr_t p) 263 { 264 struct page *page; 265 void *ptr; 266 267 page = pfn_to_page(p >> PAGE_SHIFT); 268 if (PageUncached(page)) 269 ptr = (void *)p + __IA64_UNCACHED_OFFSET; 270 else 271 ptr = __va(p); 272 273 return ptr; 274 } 275 276 /* 277 * Convert a virtual cached kernel memory pointer to an uncached pointer 278 */ 279 static __inline__ void * 280 xlate_dev_kmem_ptr(void *p) 281 { 282 struct page *page; 283 void *ptr; 284 285 page = virt_to_page((unsigned long)p); 286 if (PageUncached(page)) 287 ptr = (void *)__pa(p) + __IA64_UNCACHED_OFFSET; 288 else 289 ptr = p; 290 291 return ptr; 292 } 293 294 #endif /* _ASM_IA64_UACCESS_H */ 295