1deae26bfSKyle McMartin #ifndef __PARISC_UACCESS_H 2deae26bfSKyle McMartin #define __PARISC_UACCESS_H 3deae26bfSKyle McMartin 4deae26bfSKyle McMartin /* 5deae26bfSKyle McMartin * User space memory access functions 6deae26bfSKyle McMartin */ 7deae26bfSKyle McMartin #include <asm/page.h> 8deae26bfSKyle McMartin #include <asm/cache.h> 9888c31fcSHelge Deller #include <asm/errno.h> 105b17e1cdSArnd Bergmann #include <asm-generic/uaccess-unaligned.h> 11deae26bfSKyle McMartin 128dd95c68SHelge Deller #include <linux/bug.h> 138dd95c68SHelge Deller 14deae26bfSKyle McMartin #define VERIFY_READ 0 15deae26bfSKyle McMartin #define VERIFY_WRITE 1 16deae26bfSKyle McMartin 17deae26bfSKyle McMartin #define KERNEL_DS ((mm_segment_t){0}) 18deae26bfSKyle McMartin #define USER_DS ((mm_segment_t){1}) 19deae26bfSKyle McMartin 20deae26bfSKyle McMartin #define segment_eq(a, b) ((a).seg == (b).seg) 21deae26bfSKyle McMartin 22deae26bfSKyle McMartin #define get_ds() (KERNEL_DS) 23deae26bfSKyle McMartin #define get_fs() (current_thread_info()->addr_limit) 24deae26bfSKyle McMartin #define set_fs(x) (current_thread_info()->addr_limit = (x)) 25deae26bfSKyle McMartin 26deae26bfSKyle McMartin /* 27deae26bfSKyle McMartin * Note that since kernel addresses are in a separate address space on 28deae26bfSKyle McMartin * parisc, we don't need to do anything for access_ok(). 29deae26bfSKyle McMartin * We just let the page fault handler do the right thing. This also means 30deae26bfSKyle McMartin * that put_user is the same as __put_user, etc. 31deae26bfSKyle McMartin */ 32deae26bfSKyle McMartin 33a0ffa8f0SHelge Deller static inline long access_ok(int type, const void __user * addr, 34a0ffa8f0SHelge Deller unsigned long size) 35deae26bfSKyle McMartin { 36a0ffa8f0SHelge Deller return 1; 37deae26bfSKyle McMartin } 38deae26bfSKyle McMartin 39deae26bfSKyle McMartin #define put_user __put_user 40deae26bfSKyle McMartin #define get_user __get_user 41deae26bfSKyle McMartin 42deae26bfSKyle McMartin #if !defined(CONFIG_64BIT) 438dd95c68SHelge Deller #define LDD_KERNEL(ptr) BUILD_BUG() 448dd95c68SHelge Deller #define LDD_USER(ptr) BUILD_BUG() 45deae26bfSKyle McMartin #define STD_KERNEL(x, ptr) __put_kernel_asm64(x, ptr) 46deae26bfSKyle McMartin #define STD_USER(x, ptr) __put_user_asm64(x, ptr) 47deae26bfSKyle McMartin #define ASM_WORD_INSN ".word\t" 48deae26bfSKyle McMartin #else 49deae26bfSKyle McMartin #define LDD_KERNEL(ptr) __get_kernel_asm("ldd", ptr) 50deae26bfSKyle McMartin #define LDD_USER(ptr) __get_user_asm("ldd", ptr) 51deae26bfSKyle McMartin #define STD_KERNEL(x, ptr) __put_kernel_asm("std", x, ptr) 52deae26bfSKyle McMartin #define STD_USER(x, ptr) __put_user_asm("std", x, ptr) 53deae26bfSKyle McMartin #define ASM_WORD_INSN ".dword\t" 54deae26bfSKyle McMartin #endif 55deae26bfSKyle McMartin 56deae26bfSKyle McMartin /* 57deae26bfSKyle McMartin * The exception table contains two values: the first is an address 58deae26bfSKyle McMartin * for an instruction that is allowed to fault, and the second is 5961dbbaebSHelge Deller * the address to the fixup routine. Even on a 64bit kernel we could 6061dbbaebSHelge Deller * use a 32bit (unsigned int) address here. 61deae26bfSKyle McMartin */ 62deae26bfSKyle McMartin 63*0de79858SHelge Deller #define ARCH_HAS_RELATIVE_EXTABLE 64deae26bfSKyle McMartin struct exception_table_entry { 65*0de79858SHelge Deller int insn; /* relative address of insn that is allowed to fault. */ 66*0de79858SHelge Deller int fixup; /* relative address of fixup routine */ 67deae26bfSKyle McMartin }; 68deae26bfSKyle McMartin 69deae26bfSKyle McMartin #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\ 70deae26bfSKyle McMartin ".section __ex_table,\"aw\"\n" \ 71*0de79858SHelge Deller ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \ 72deae26bfSKyle McMartin ".previous\n" 73deae26bfSKyle McMartin 74deae26bfSKyle McMartin /* 75deae26bfSKyle McMartin * The page fault handler stores, in a per-cpu area, the following information 76deae26bfSKyle McMartin * if a fixup routine is available. 77deae26bfSKyle McMartin */ 78deae26bfSKyle McMartin struct exception_data { 79deae26bfSKyle McMartin unsigned long fault_ip; 80deae26bfSKyle McMartin unsigned long fault_space; 81deae26bfSKyle McMartin unsigned long fault_addr; 82deae26bfSKyle McMartin }; 83deae26bfSKyle McMartin 84deae26bfSKyle McMartin #define __get_user(x, ptr) \ 85deae26bfSKyle McMartin ({ \ 86deae26bfSKyle McMartin register long __gu_err __asm__ ("r8") = 0; \ 87deae26bfSKyle McMartin register long __gu_val __asm__ ("r9") = 0; \ 88deae26bfSKyle McMartin \ 89deae26bfSKyle McMartin if (segment_eq(get_fs(), KERNEL_DS)) { \ 90deae26bfSKyle McMartin switch (sizeof(*(ptr))) { \ 91deae26bfSKyle McMartin case 1: __get_kernel_asm("ldb", ptr); break; \ 92deae26bfSKyle McMartin case 2: __get_kernel_asm("ldh", ptr); break; \ 93deae26bfSKyle McMartin case 4: __get_kernel_asm("ldw", ptr); break; \ 94deae26bfSKyle McMartin case 8: LDD_KERNEL(ptr); break; \ 958dd95c68SHelge Deller default: BUILD_BUG(); break; \ 96deae26bfSKyle McMartin } \ 97deae26bfSKyle McMartin } \ 98deae26bfSKyle McMartin else { \ 99deae26bfSKyle McMartin switch (sizeof(*(ptr))) { \ 100deae26bfSKyle McMartin case 1: __get_user_asm("ldb", ptr); break; \ 101deae26bfSKyle McMartin case 2: __get_user_asm("ldh", ptr); break; \ 102deae26bfSKyle McMartin case 4: __get_user_asm("ldw", ptr); break; \ 103deae26bfSKyle McMartin case 8: LDD_USER(ptr); break; \ 1048dd95c68SHelge Deller default: BUILD_BUG(); break; \ 105deae26bfSKyle McMartin } \ 106deae26bfSKyle McMartin } \ 107deae26bfSKyle McMartin \ 108876b2a00SMichael S. Tsirkin (x) = (__force __typeof__(*(ptr))) __gu_val; \ 109deae26bfSKyle McMartin __gu_err; \ 110deae26bfSKyle McMartin }) 111deae26bfSKyle McMartin 112deae26bfSKyle McMartin #define __get_kernel_asm(ldx, ptr) \ 113deae26bfSKyle McMartin __asm__("\n1:\t" ldx "\t0(%2),%0\n\t" \ 114deae26bfSKyle McMartin ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\ 115deae26bfSKyle McMartin : "=r"(__gu_val), "=r"(__gu_err) \ 116deae26bfSKyle McMartin : "r"(ptr), "1"(__gu_err) \ 117deae26bfSKyle McMartin : "r1"); 118deae26bfSKyle McMartin 119deae26bfSKyle McMartin #define __get_user_asm(ldx, ptr) \ 120deae26bfSKyle McMartin __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n\t" \ 121deae26bfSKyle McMartin ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\ 122deae26bfSKyle McMartin : "=r"(__gu_val), "=r"(__gu_err) \ 123deae26bfSKyle McMartin : "r"(ptr), "1"(__gu_err) \ 124deae26bfSKyle McMartin : "r1"); 125deae26bfSKyle McMartin 126deae26bfSKyle McMartin #define __put_user(x, ptr) \ 127deae26bfSKyle McMartin ({ \ 128deae26bfSKyle McMartin register long __pu_err __asm__ ("r8") = 0; \ 129deae26bfSKyle McMartin __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \ 130deae26bfSKyle McMartin \ 131deae26bfSKyle McMartin if (segment_eq(get_fs(), KERNEL_DS)) { \ 132deae26bfSKyle McMartin switch (sizeof(*(ptr))) { \ 133deae26bfSKyle McMartin case 1: __put_kernel_asm("stb", __x, ptr); break; \ 134deae26bfSKyle McMartin case 2: __put_kernel_asm("sth", __x, ptr); break; \ 135deae26bfSKyle McMartin case 4: __put_kernel_asm("stw", __x, ptr); break; \ 136deae26bfSKyle McMartin case 8: STD_KERNEL(__x, ptr); break; \ 1378dd95c68SHelge Deller default: BUILD_BUG(); break; \ 138deae26bfSKyle McMartin } \ 139deae26bfSKyle McMartin } \ 140deae26bfSKyle McMartin else { \ 141deae26bfSKyle McMartin switch (sizeof(*(ptr))) { \ 142deae26bfSKyle McMartin case 1: __put_user_asm("stb", __x, ptr); break; \ 143deae26bfSKyle McMartin case 2: __put_user_asm("sth", __x, ptr); break; \ 144deae26bfSKyle McMartin case 4: __put_user_asm("stw", __x, ptr); break; \ 145deae26bfSKyle McMartin case 8: STD_USER(__x, ptr); break; \ 1468dd95c68SHelge Deller default: BUILD_BUG(); break; \ 147deae26bfSKyle McMartin } \ 148deae26bfSKyle McMartin } \ 149deae26bfSKyle McMartin \ 150deae26bfSKyle McMartin __pu_err; \ 151deae26bfSKyle McMartin }) 152deae26bfSKyle McMartin 153deae26bfSKyle McMartin /* 154deae26bfSKyle McMartin * The "__put_user/kernel_asm()" macros tell gcc they read from memory 155deae26bfSKyle McMartin * instead of writing. This is because they do not write to any memory 156deae26bfSKyle McMartin * gcc knows about, so there are no aliasing issues. These macros must 157deae26bfSKyle McMartin * also be aware that "fixup_put_user_skip_[12]" are executed in the 158deae26bfSKyle McMartin * context of the fault, and any registers used there must be listed 159deae26bfSKyle McMartin * as clobbers. In this case only "r1" is used by the current routines. 160deae26bfSKyle McMartin * r8/r9 are already listed as err/val. 161deae26bfSKyle McMartin */ 162deae26bfSKyle McMartin 163deae26bfSKyle McMartin #define __put_kernel_asm(stx, x, ptr) \ 164deae26bfSKyle McMartin __asm__ __volatile__ ( \ 165deae26bfSKyle McMartin "\n1:\t" stx "\t%2,0(%1)\n\t" \ 166deae26bfSKyle McMartin ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\ 167deae26bfSKyle McMartin : "=r"(__pu_err) \ 168deae26bfSKyle McMartin : "r"(ptr), "r"(x), "0"(__pu_err) \ 169deae26bfSKyle McMartin : "r1") 170deae26bfSKyle McMartin 171deae26bfSKyle McMartin #define __put_user_asm(stx, x, ptr) \ 172deae26bfSKyle McMartin __asm__ __volatile__ ( \ 173deae26bfSKyle McMartin "\n1:\t" stx "\t%2,0(%%sr3,%1)\n\t" \ 174deae26bfSKyle McMartin ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\ 175deae26bfSKyle McMartin : "=r"(__pu_err) \ 176deae26bfSKyle McMartin : "r"(ptr), "r"(x), "0"(__pu_err) \ 177deae26bfSKyle McMartin : "r1") 178deae26bfSKyle McMartin 179deae26bfSKyle McMartin 180deae26bfSKyle McMartin #if !defined(CONFIG_64BIT) 181deae26bfSKyle McMartin 182deae26bfSKyle McMartin #define __put_kernel_asm64(__val, ptr) do { \ 183deae26bfSKyle McMartin __asm__ __volatile__ ( \ 184deae26bfSKyle McMartin "\n1:\tstw %2,0(%1)" \ 1850f28b628SWill Deacon "\n2:\tstw %R2,4(%1)\n\t" \ 186deae26bfSKyle McMartin ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\ 187deae26bfSKyle McMartin ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\ 188deae26bfSKyle McMartin : "=r"(__pu_err) \ 1890f28b628SWill Deacon : "r"(ptr), "r"(__val), "0"(__pu_err) \ 190deae26bfSKyle McMartin : "r1"); \ 191deae26bfSKyle McMartin } while (0) 192deae26bfSKyle McMartin 193deae26bfSKyle McMartin #define __put_user_asm64(__val, ptr) do { \ 194deae26bfSKyle McMartin __asm__ __volatile__ ( \ 195deae26bfSKyle McMartin "\n1:\tstw %2,0(%%sr3,%1)" \ 1960f28b628SWill Deacon "\n2:\tstw %R2,4(%%sr3,%1)\n\t" \ 197deae26bfSKyle McMartin ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\ 198deae26bfSKyle McMartin ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\ 199deae26bfSKyle McMartin : "=r"(__pu_err) \ 2000f28b628SWill Deacon : "r"(ptr), "r"(__val), "0"(__pu_err) \ 201deae26bfSKyle McMartin : "r1"); \ 202deae26bfSKyle McMartin } while (0) 203deae26bfSKyle McMartin 204deae26bfSKyle McMartin #endif /* !defined(CONFIG_64BIT) */ 205deae26bfSKyle McMartin 206deae26bfSKyle McMartin 207deae26bfSKyle McMartin /* 208deae26bfSKyle McMartin * Complex access routines -- external declarations 209deae26bfSKyle McMartin */ 210deae26bfSKyle McMartin 211deae26bfSKyle McMartin extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long); 212deae26bfSKyle McMartin extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long); 213deae26bfSKyle McMartin extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long); 214b1195c0eSJames Bottomley extern long strncpy_from_user(char *, const char __user *, long); 215deae26bfSKyle McMartin extern unsigned lclear_user(void __user *, unsigned long); 216deae26bfSKyle McMartin extern long lstrnlen_user(const char __user *, long); 217deae26bfSKyle McMartin /* 218deae26bfSKyle McMartin * Complex access routines -- macros 219deae26bfSKyle McMartin */ 220a0ffa8f0SHelge Deller #define user_addr_max() (~0UL) 221deae26bfSKyle McMartin 222deae26bfSKyle McMartin #define strnlen_user lstrnlen_user 223deae26bfSKyle McMartin #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL) 224deae26bfSKyle McMartin #define clear_user lclear_user 225deae26bfSKyle McMartin #define __clear_user lclear_user 226deae26bfSKyle McMartin 227deae26bfSKyle McMartin unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len); 228deae26bfSKyle McMartin #define __copy_to_user copy_to_user 229888c31fcSHelge Deller unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long len); 230deae26bfSKyle McMartin unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len); 231deae26bfSKyle McMartin #define __copy_in_user copy_in_user 232deae26bfSKyle McMartin #define __copy_to_user_inatomic __copy_to_user 233deae26bfSKyle McMartin #define __copy_from_user_inatomic __copy_from_user 234deae26bfSKyle McMartin 235888c31fcSHelge Deller extern void copy_from_user_overflow(void) 236888c31fcSHelge Deller #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS 237888c31fcSHelge Deller __compiletime_error("copy_from_user() buffer size is not provably correct") 238888c31fcSHelge Deller #else 239888c31fcSHelge Deller __compiletime_warning("copy_from_user() buffer size is not provably correct") 240888c31fcSHelge Deller #endif 241888c31fcSHelge Deller ; 242888c31fcSHelge Deller 243888c31fcSHelge Deller static inline unsigned long __must_check copy_from_user(void *to, 244888c31fcSHelge Deller const void __user *from, 245888c31fcSHelge Deller unsigned long n) 246888c31fcSHelge Deller { 247888c31fcSHelge Deller int sz = __compiletime_object_size(to); 248888c31fcSHelge Deller int ret = -EFAULT; 249888c31fcSHelge Deller 250888c31fcSHelge Deller if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n)) 251888c31fcSHelge Deller ret = __copy_from_user(to, from, n); 252888c31fcSHelge Deller else 253888c31fcSHelge Deller copy_from_user_overflow(); 254888c31fcSHelge Deller 255888c31fcSHelge Deller return ret; 256888c31fcSHelge Deller } 257888c31fcSHelge Deller 258e448372cSHelge Deller struct pt_regs; 259c61c25ebSKyle McMartin int fixup_exception(struct pt_regs *regs); 260c61c25ebSKyle McMartin 261deae26bfSKyle McMartin #endif /* __PARISC_UACCESS_H */ 262