1deae26bfSKyle McMartin #ifndef __PARISC_UACCESS_H 2deae26bfSKyle McMartin #define __PARISC_UACCESS_H 3deae26bfSKyle McMartin 4deae26bfSKyle McMartin /* 5deae26bfSKyle McMartin * User space memory access functions 6deae26bfSKyle McMartin */ 7deae26bfSKyle McMartin #include <asm/page.h> 8deae26bfSKyle McMartin #include <asm/cache.h> 9888c31fcSHelge Deller #include <asm/errno.h> 105b17e1cdSArnd Bergmann #include <asm-generic/uaccess-unaligned.h> 11deae26bfSKyle McMartin 128dd95c68SHelge Deller #include <linux/bug.h> 13aace880fSAl Viro #include <linux/string.h> 149e91db6bSHelge Deller #include <linux/thread_info.h> 158dd95c68SHelge Deller 16deae26bfSKyle McMartin #define VERIFY_READ 0 17deae26bfSKyle McMartin #define VERIFY_WRITE 1 18deae26bfSKyle McMartin 19deae26bfSKyle McMartin #define KERNEL_DS ((mm_segment_t){0}) 20deae26bfSKyle McMartin #define USER_DS ((mm_segment_t){1}) 21deae26bfSKyle McMartin 22deae26bfSKyle McMartin #define segment_eq(a, b) ((a).seg == (b).seg) 23deae26bfSKyle McMartin 24deae26bfSKyle McMartin #define get_ds() (KERNEL_DS) 25deae26bfSKyle McMartin #define get_fs() (current_thread_info()->addr_limit) 26deae26bfSKyle McMartin #define set_fs(x) (current_thread_info()->addr_limit = (x)) 27deae26bfSKyle McMartin 28deae26bfSKyle McMartin /* 29deae26bfSKyle McMartin * Note that since kernel addresses are in a separate address space on 30deae26bfSKyle McMartin * parisc, we don't need to do anything for access_ok(). 31deae26bfSKyle McMartin * We just let the page fault handler do the right thing. This also means 32deae26bfSKyle McMartin * that put_user is the same as __put_user, etc. 33deae26bfSKyle McMartin */ 34deae26bfSKyle McMartin 35186ecf14SHelge Deller #define access_ok(type, uaddr, size) \ 36186ecf14SHelge Deller ( (uaddr) == (uaddr) ) 37deae26bfSKyle McMartin 38deae26bfSKyle McMartin #define put_user __put_user 39deae26bfSKyle McMartin #define get_user __get_user 40deae26bfSKyle McMartin 41deae26bfSKyle McMartin #if !defined(CONFIG_64BIT) 42*3f795cefSHelge Deller #define LDD_USER(val, ptr) __get_user_asm64(val, ptr) 43deae26bfSKyle McMartin #define STD_USER(x, ptr) __put_user_asm64(x, ptr) 44deae26bfSKyle McMartin #else 45*3f795cefSHelge Deller #define LDD_USER(val, ptr) __get_user_asm(val, "ldd", ptr) 46deae26bfSKyle McMartin #define STD_USER(x, ptr) __put_user_asm("std", x, ptr) 47deae26bfSKyle McMartin #endif 48deae26bfSKyle McMartin 49deae26bfSKyle McMartin /* 50cb910c17SHelge Deller * The exception table contains two values: the first is the relative offset to 51cb910c17SHelge Deller * the address of the instruction that is allowed to fault, and the second is 52cb910c17SHelge Deller * the relative offset to the address of the fixup routine. Since relative 53cb910c17SHelge Deller * addresses are used, 32bit values are sufficient even on 64bit kernel. 54deae26bfSKyle McMartin */ 55deae26bfSKyle McMartin 560de79858SHelge Deller #define ARCH_HAS_RELATIVE_EXTABLE 57deae26bfSKyle McMartin struct exception_table_entry { 580de79858SHelge Deller int insn; /* relative address of insn that is allowed to fault. */ 590de79858SHelge Deller int fixup; /* relative address of fixup routine */ 60deae26bfSKyle McMartin }; 61deae26bfSKyle McMartin 62deae26bfSKyle McMartin #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\ 63deae26bfSKyle McMartin ".section __ex_table,\"aw\"\n" \ 640de79858SHelge Deller ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \ 65deae26bfSKyle McMartin ".previous\n" 66deae26bfSKyle McMartin 67deae26bfSKyle McMartin /* 68d19f5e41SHelge Deller * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry 69d19f5e41SHelge Deller * (with lowest bit set) for which the fault handler in fixup_exception() will 70d19f5e41SHelge Deller * load -EFAULT into %r8 for a read or write fault, and zeroes the target 71d19f5e41SHelge Deller * register in case of a read fault in get_user(). 72d19f5e41SHelge Deller */ 73d19f5e41SHelge Deller #define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\ 74d19f5e41SHelge Deller ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1) 75d19f5e41SHelge Deller 76d19f5e41SHelge Deller /* 77deae26bfSKyle McMartin * The page fault handler stores, in a per-cpu area, the following information 78deae26bfSKyle McMartin * if a fixup routine is available. 79deae26bfSKyle McMartin */ 80deae26bfSKyle McMartin struct exception_data { 81deae26bfSKyle McMartin unsigned long fault_ip; 822ef4dfd9SHelge Deller unsigned long fault_gp; 83deae26bfSKyle McMartin unsigned long fault_space; 84deae26bfSKyle McMartin unsigned long fault_addr; 85deae26bfSKyle McMartin }; 86deae26bfSKyle McMartin 8706bff6b9SHelge Deller /* 8806bff6b9SHelge Deller * load_sr2() preloads the space register %%sr2 - based on the value of 8906bff6b9SHelge Deller * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which 9006bff6b9SHelge Deller * is 0), or with the current value of %%sr3 to access user space (USER_DS) 9106bff6b9SHelge Deller * memory. The following __get_user_asm() and __put_user_asm() functions have 9206bff6b9SHelge Deller * %%sr2 hard-coded to access the requested memory. 9306bff6b9SHelge Deller */ 9406bff6b9SHelge Deller #define load_sr2() \ 9506bff6b9SHelge Deller __asm__(" or,= %0,%%r0,%%r0\n\t" \ 9606bff6b9SHelge Deller " mfsp %%sr3,%0\n\t" \ 9706bff6b9SHelge Deller " mtsp %0,%%sr2\n\t" \ 9806bff6b9SHelge Deller : : "r"(get_fs()) : ) 9906bff6b9SHelge Deller 100*3f795cefSHelge Deller #define __get_user_internal(val, ptr) \ 101deae26bfSKyle McMartin ({ \ 102deae26bfSKyle McMartin register long __gu_err __asm__ ("r8") = 0; \ 103deae26bfSKyle McMartin \ 104deae26bfSKyle McMartin switch (sizeof(*(ptr))) { \ 105*3f795cefSHelge Deller case 1: __get_user_asm(val, "ldb", ptr); break; \ 106*3f795cefSHelge Deller case 2: __get_user_asm(val, "ldh", ptr); break; \ 107*3f795cefSHelge Deller case 4: __get_user_asm(val, "ldw", ptr); break; \ 108*3f795cefSHelge Deller case 8: LDD_USER(val, ptr); break; \ 109*3f795cefSHelge Deller default: BUILD_BUG(); \ 110deae26bfSKyle McMartin } \ 111deae26bfSKyle McMartin \ 112deae26bfSKyle McMartin __gu_err; \ 113deae26bfSKyle McMartin }) 114deae26bfSKyle McMartin 115*3f795cefSHelge Deller #define __get_user(val, ptr) \ 116*3f795cefSHelge Deller ({ \ 117*3f795cefSHelge Deller load_sr2(); \ 118*3f795cefSHelge Deller __get_user_internal(val, ptr); \ 119*3f795cefSHelge Deller }) 120*3f795cefSHelge Deller 121*3f795cefSHelge Deller #define __get_user_asm(val, ldx, ptr) \ 122*3f795cefSHelge Deller { \ 123*3f795cefSHelge Deller register long __gu_val; \ 124*3f795cefSHelge Deller \ 125d19f5e41SHelge Deller __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \ 126d19f5e41SHelge Deller "9:\n" \ 127d19f5e41SHelge Deller ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 128deae26bfSKyle McMartin : "=r"(__gu_val), "=r"(__gu_err) \ 129*3f795cefSHelge Deller : "r"(ptr), "1"(__gu_err)); \ 130*3f795cefSHelge Deller \ 131*3f795cefSHelge Deller (val) = (__force __typeof__(*(ptr))) __gu_val; \ 132*3f795cefSHelge Deller } 133deae26bfSKyle McMartin 134d2ad824fSHelge Deller #if !defined(CONFIG_64BIT) 135d2ad824fSHelge Deller 136*3f795cefSHelge Deller #define __get_user_asm64(val, ptr) \ 137*3f795cefSHelge Deller { \ 138*3f795cefSHelge Deller union { \ 139*3f795cefSHelge Deller unsigned long long l; \ 140*3f795cefSHelge Deller __typeof__(*(ptr)) t; \ 141*3f795cefSHelge Deller } __gu_tmp; \ 142*3f795cefSHelge Deller \ 143d19f5e41SHelge Deller __asm__(" copy %%r0,%R0\n" \ 144d19f5e41SHelge Deller "1: ldw 0(%%sr2,%2),%0\n" \ 145d19f5e41SHelge Deller "2: ldw 4(%%sr2,%2),%R0\n" \ 146d19f5e41SHelge Deller "9:\n" \ 147d19f5e41SHelge Deller ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 148d19f5e41SHelge Deller ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ 149*3f795cefSHelge Deller : "=&r"(__gu_tmp.l), "=r"(__gu_err) \ 150*3f795cefSHelge Deller : "r"(ptr), "1"(__gu_err)); \ 151*3f795cefSHelge Deller \ 152*3f795cefSHelge Deller (val) = __gu_tmp.t; \ 153*3f795cefSHelge Deller } 154d2ad824fSHelge Deller 155d2ad824fSHelge Deller #endif /* !defined(CONFIG_64BIT) */ 156d2ad824fSHelge Deller 157d2ad824fSHelge Deller 158*3f795cefSHelge Deller #define __put_user_internal(x, ptr) \ 159deae26bfSKyle McMartin ({ \ 160deae26bfSKyle McMartin register long __pu_err __asm__ ("r8") = 0; \ 161deae26bfSKyle McMartin __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \ 162deae26bfSKyle McMartin \ 163deae26bfSKyle McMartin switch (sizeof(*(ptr))) { \ 164deae26bfSKyle McMartin case 1: __put_user_asm("stb", __x, ptr); break; \ 165deae26bfSKyle McMartin case 2: __put_user_asm("sth", __x, ptr); break; \ 166deae26bfSKyle McMartin case 4: __put_user_asm("stw", __x, ptr); break; \ 167deae26bfSKyle McMartin case 8: STD_USER(__x, ptr); break; \ 168*3f795cefSHelge Deller default: BUILD_BUG(); \ 169deae26bfSKyle McMartin } \ 170deae26bfSKyle McMartin \ 171deae26bfSKyle McMartin __pu_err; \ 172deae26bfSKyle McMartin }) 173deae26bfSKyle McMartin 174*3f795cefSHelge Deller #define __put_user(x, ptr) \ 175*3f795cefSHelge Deller ({ \ 176*3f795cefSHelge Deller load_sr2(); \ 177*3f795cefSHelge Deller __put_user_internal(x, ptr); \ 178*3f795cefSHelge Deller }) 179*3f795cefSHelge Deller 180*3f795cefSHelge Deller 181deae26bfSKyle McMartin /* 182deae26bfSKyle McMartin * The "__put_user/kernel_asm()" macros tell gcc they read from memory 183deae26bfSKyle McMartin * instead of writing. This is because they do not write to any memory 184deae26bfSKyle McMartin * gcc knows about, so there are no aliasing issues. These macros must 185d19f5e41SHelge Deller * also be aware that fixups are executed in the context of the fault, 186d19f5e41SHelge Deller * and any registers used there must be listed as clobbers. 187d19f5e41SHelge Deller * r8 is already listed as err. 188deae26bfSKyle McMartin */ 189deae26bfSKyle McMartin 190deae26bfSKyle McMartin #define __put_user_asm(stx, x, ptr) \ 191deae26bfSKyle McMartin __asm__ __volatile__ ( \ 192d19f5e41SHelge Deller "1: " stx " %2,0(%%sr2,%1)\n" \ 193d19f5e41SHelge Deller "9:\n" \ 194d19f5e41SHelge Deller ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 195deae26bfSKyle McMartin : "=r"(__pu_err) \ 196d19f5e41SHelge Deller : "r"(ptr), "r"(x), "0"(__pu_err)) 197deae26bfSKyle McMartin 198deae26bfSKyle McMartin 199deae26bfSKyle McMartin #if !defined(CONFIG_64BIT) 200deae26bfSKyle McMartin 201deae26bfSKyle McMartin #define __put_user_asm64(__val, ptr) do { \ 202deae26bfSKyle McMartin __asm__ __volatile__ ( \ 203d19f5e41SHelge Deller "1: stw %2,0(%%sr2,%1)\n" \ 204d19f5e41SHelge Deller "2: stw %R2,4(%%sr2,%1)\n" \ 205d19f5e41SHelge Deller "9:\n" \ 206d19f5e41SHelge Deller ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 207d19f5e41SHelge Deller ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ 208deae26bfSKyle McMartin : "=r"(__pu_err) \ 209d19f5e41SHelge Deller : "r"(ptr), "r"(__val), "0"(__pu_err)); \ 210deae26bfSKyle McMartin } while (0) 211deae26bfSKyle McMartin 212deae26bfSKyle McMartin #endif /* !defined(CONFIG_64BIT) */ 213deae26bfSKyle McMartin 214deae26bfSKyle McMartin 215deae26bfSKyle McMartin /* 216deae26bfSKyle McMartin * Complex access routines -- external declarations 217deae26bfSKyle McMartin */ 218deae26bfSKyle McMartin 219deae26bfSKyle McMartin extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long); 220deae26bfSKyle McMartin extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long); 221deae26bfSKyle McMartin extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long); 222b1195c0eSJames Bottomley extern long strncpy_from_user(char *, const char __user *, long); 223deae26bfSKyle McMartin extern unsigned lclear_user(void __user *, unsigned long); 224deae26bfSKyle McMartin extern long lstrnlen_user(const char __user *, long); 225deae26bfSKyle McMartin /* 226deae26bfSKyle McMartin * Complex access routines -- macros 227deae26bfSKyle McMartin */ 228a0ffa8f0SHelge Deller #define user_addr_max() (~0UL) 229deae26bfSKyle McMartin 230deae26bfSKyle McMartin #define strnlen_user lstrnlen_user 231deae26bfSKyle McMartin #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL) 232deae26bfSKyle McMartin #define clear_user lclear_user 233deae26bfSKyle McMartin #define __clear_user lclear_user 234deae26bfSKyle McMartin 2359e91db6bSHelge Deller unsigned long __must_check __copy_to_user(void __user *dst, const void *src, 2369e91db6bSHelge Deller unsigned long len); 2379e91db6bSHelge Deller unsigned long __must_check __copy_from_user(void *dst, const void __user *src, 2389e91db6bSHelge Deller unsigned long len); 2399e91db6bSHelge Deller unsigned long copy_in_user(void __user *dst, const void __user *src, 2409e91db6bSHelge Deller unsigned long len); 241deae26bfSKyle McMartin #define __copy_in_user copy_in_user 242deae26bfSKyle McMartin #define __copy_to_user_inatomic __copy_to_user 243deae26bfSKyle McMartin #define __copy_from_user_inatomic __copy_from_user 244deae26bfSKyle McMartin 2450d025d27SJosh Poimboeuf extern void __compiletime_error("usercopy buffer size is too small") 2460d025d27SJosh Poimboeuf __bad_copy_user(void); 2470d025d27SJosh Poimboeuf 2480d025d27SJosh Poimboeuf static inline void copy_user_overflow(int size, unsigned long count) 2490d025d27SJosh Poimboeuf { 2500d025d27SJosh Poimboeuf WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); 2510d025d27SJosh Poimboeuf } 252888c31fcSHelge Deller 2539e91db6bSHelge Deller static __always_inline unsigned long __must_check 2549e91db6bSHelge Deller copy_from_user(void *to, const void __user *from, unsigned long n) 255888c31fcSHelge Deller { 256888c31fcSHelge Deller int sz = __compiletime_object_size(to); 257aace880fSAl Viro unsigned long ret = n; 258888c31fcSHelge Deller 2599e91db6bSHelge Deller if (likely(sz < 0 || sz >= n)) { 2609e91db6bSHelge Deller check_object_size(to, n, false); 261888c31fcSHelge Deller ret = __copy_from_user(to, from, n); 2629e91db6bSHelge Deller } else if (!__builtin_constant_p(n)) 2630d025d27SJosh Poimboeuf copy_user_overflow(sz, n); 264888c31fcSHelge Deller else 2650d025d27SJosh Poimboeuf __bad_copy_user(); 266888c31fcSHelge Deller 267aace880fSAl Viro if (unlikely(ret)) 268aace880fSAl Viro memset(to + (n - ret), 0, ret); 2699e91db6bSHelge Deller 270888c31fcSHelge Deller return ret; 271888c31fcSHelge Deller } 272888c31fcSHelge Deller 2739e91db6bSHelge Deller static __always_inline unsigned long __must_check 2749e91db6bSHelge Deller copy_to_user(void __user *to, const void *from, unsigned long n) 2759e91db6bSHelge Deller { 2769e91db6bSHelge Deller int sz = __compiletime_object_size(from); 2779e91db6bSHelge Deller 2789e91db6bSHelge Deller if (likely(sz < 0 || sz >= n)) { 2799e91db6bSHelge Deller check_object_size(from, n, true); 2809e91db6bSHelge Deller n = __copy_to_user(to, from, n); 2819e91db6bSHelge Deller } else if (!__builtin_constant_p(n)) 2829e91db6bSHelge Deller copy_user_overflow(sz, n); 2839e91db6bSHelge Deller else 2849e91db6bSHelge Deller __bad_copy_user(); 2859e91db6bSHelge Deller 2869e91db6bSHelge Deller return n; 2879e91db6bSHelge Deller } 2889e91db6bSHelge Deller 289e448372cSHelge Deller struct pt_regs; 290c61c25ebSKyle McMartin int fixup_exception(struct pt_regs *regs); 291c61c25ebSKyle McMartin 292deae26bfSKyle McMartin #endif /* __PARISC_UACCESS_H */ 293