1*deae26bfSKyle McMartin #ifndef __PARISC_UACCESS_H 2*deae26bfSKyle McMartin #define __PARISC_UACCESS_H 3*deae26bfSKyle McMartin 4*deae26bfSKyle McMartin /* 5*deae26bfSKyle McMartin * User space memory access functions 6*deae26bfSKyle McMartin */ 7*deae26bfSKyle McMartin #include <asm/page.h> 8*deae26bfSKyle McMartin #include <asm/system.h> 9*deae26bfSKyle McMartin #include <asm/cache.h> 10*deae26bfSKyle McMartin #include <asm-generic/uaccess.h> 11*deae26bfSKyle McMartin 12*deae26bfSKyle McMartin #define VERIFY_READ 0 13*deae26bfSKyle McMartin #define VERIFY_WRITE 1 14*deae26bfSKyle McMartin 15*deae26bfSKyle McMartin #define KERNEL_DS ((mm_segment_t){0}) 16*deae26bfSKyle McMartin #define USER_DS ((mm_segment_t){1}) 17*deae26bfSKyle McMartin 18*deae26bfSKyle McMartin #define segment_eq(a,b) ((a).seg == (b).seg) 19*deae26bfSKyle McMartin 20*deae26bfSKyle McMartin #define get_ds() (KERNEL_DS) 21*deae26bfSKyle McMartin #define get_fs() (current_thread_info()->addr_limit) 22*deae26bfSKyle McMartin #define set_fs(x) (current_thread_info()->addr_limit = (x)) 23*deae26bfSKyle McMartin 24*deae26bfSKyle McMartin /* 25*deae26bfSKyle McMartin * Note that since kernel addresses are in a separate address space on 26*deae26bfSKyle McMartin * parisc, we don't need to do anything for access_ok(). 27*deae26bfSKyle McMartin * We just let the page fault handler do the right thing. This also means 28*deae26bfSKyle McMartin * that put_user is the same as __put_user, etc. 29*deae26bfSKyle McMartin */ 30*deae26bfSKyle McMartin 31*deae26bfSKyle McMartin extern int __get_kernel_bad(void); 32*deae26bfSKyle McMartin extern int __get_user_bad(void); 33*deae26bfSKyle McMartin extern int __put_kernel_bad(void); 34*deae26bfSKyle McMartin extern int __put_user_bad(void); 35*deae26bfSKyle McMartin 36*deae26bfSKyle McMartin static inline long access_ok(int type, const void __user * addr, 37*deae26bfSKyle McMartin unsigned long size) 38*deae26bfSKyle McMartin { 39*deae26bfSKyle McMartin return 1; 40*deae26bfSKyle McMartin } 41*deae26bfSKyle McMartin 42*deae26bfSKyle McMartin #define put_user __put_user 43*deae26bfSKyle McMartin #define get_user __get_user 44*deae26bfSKyle McMartin 45*deae26bfSKyle McMartin #if !defined(CONFIG_64BIT) 46*deae26bfSKyle McMartin #define LDD_KERNEL(ptr) __get_kernel_bad(); 47*deae26bfSKyle McMartin #define LDD_USER(ptr) __get_user_bad(); 48*deae26bfSKyle McMartin #define STD_KERNEL(x, ptr) __put_kernel_asm64(x,ptr) 49*deae26bfSKyle McMartin #define STD_USER(x, ptr) __put_user_asm64(x,ptr) 50*deae26bfSKyle McMartin #define ASM_WORD_INSN ".word\t" 51*deae26bfSKyle McMartin #else 52*deae26bfSKyle McMartin #define LDD_KERNEL(ptr) __get_kernel_asm("ldd",ptr) 53*deae26bfSKyle McMartin #define LDD_USER(ptr) __get_user_asm("ldd",ptr) 54*deae26bfSKyle McMartin #define STD_KERNEL(x, ptr) __put_kernel_asm("std",x,ptr) 55*deae26bfSKyle McMartin #define STD_USER(x, ptr) __put_user_asm("std",x,ptr) 56*deae26bfSKyle McMartin #define ASM_WORD_INSN ".dword\t" 57*deae26bfSKyle McMartin #endif 58*deae26bfSKyle McMartin 59*deae26bfSKyle McMartin /* 60*deae26bfSKyle McMartin * The exception table contains two values: the first is an address 61*deae26bfSKyle McMartin * for an instruction that is allowed to fault, and the second is 62*deae26bfSKyle McMartin * the address to the fixup routine. 63*deae26bfSKyle McMartin */ 64*deae26bfSKyle McMartin 65*deae26bfSKyle McMartin struct exception_table_entry { 66*deae26bfSKyle McMartin unsigned long insn; /* address of insn that is allowed to fault. */ 67*deae26bfSKyle McMartin long fixup; /* fixup routine */ 68*deae26bfSKyle McMartin }; 69*deae26bfSKyle McMartin 70*deae26bfSKyle McMartin #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\ 71*deae26bfSKyle McMartin ".section __ex_table,\"aw\"\n" \ 72*deae26bfSKyle McMartin ASM_WORD_INSN #fault_addr ", " #except_addr "\n\t" \ 73*deae26bfSKyle McMartin ".previous\n" 74*deae26bfSKyle McMartin 75*deae26bfSKyle McMartin /* 76*deae26bfSKyle McMartin * The page fault handler stores, in a per-cpu area, the following information 77*deae26bfSKyle McMartin * if a fixup routine is available. 78*deae26bfSKyle McMartin */ 79*deae26bfSKyle McMartin struct exception_data { 80*deae26bfSKyle McMartin unsigned long fault_ip; 81*deae26bfSKyle McMartin unsigned long fault_space; 82*deae26bfSKyle McMartin unsigned long fault_addr; 83*deae26bfSKyle McMartin }; 84*deae26bfSKyle McMartin 85*deae26bfSKyle McMartin #define __get_user(x,ptr) \ 86*deae26bfSKyle McMartin ({ \ 87*deae26bfSKyle McMartin register long __gu_err __asm__ ("r8") = 0; \ 88*deae26bfSKyle McMartin register long __gu_val __asm__ ("r9") = 0; \ 89*deae26bfSKyle McMartin \ 90*deae26bfSKyle McMartin if (segment_eq(get_fs(),KERNEL_DS)) { \ 91*deae26bfSKyle McMartin switch (sizeof(*(ptr))) { \ 92*deae26bfSKyle McMartin case 1: __get_kernel_asm("ldb",ptr); break; \ 93*deae26bfSKyle McMartin case 2: __get_kernel_asm("ldh",ptr); break; \ 94*deae26bfSKyle McMartin case 4: __get_kernel_asm("ldw",ptr); break; \ 95*deae26bfSKyle McMartin case 8: LDD_KERNEL(ptr); break; \ 96*deae26bfSKyle McMartin default: __get_kernel_bad(); break; \ 97*deae26bfSKyle McMartin } \ 98*deae26bfSKyle McMartin } \ 99*deae26bfSKyle McMartin else { \ 100*deae26bfSKyle McMartin switch (sizeof(*(ptr))) { \ 101*deae26bfSKyle McMartin case 1: __get_user_asm("ldb",ptr); break; \ 102*deae26bfSKyle McMartin case 2: __get_user_asm("ldh",ptr); break; \ 103*deae26bfSKyle McMartin case 4: __get_user_asm("ldw",ptr); break; \ 104*deae26bfSKyle McMartin case 8: LDD_USER(ptr); break; \ 105*deae26bfSKyle McMartin default: __get_user_bad(); break; \ 106*deae26bfSKyle McMartin } \ 107*deae26bfSKyle McMartin } \ 108*deae26bfSKyle McMartin \ 109*deae26bfSKyle McMartin (x) = (__typeof__(*(ptr))) __gu_val; \ 110*deae26bfSKyle McMartin __gu_err; \ 111*deae26bfSKyle McMartin }) 112*deae26bfSKyle McMartin 113*deae26bfSKyle McMartin #define __get_kernel_asm(ldx,ptr) \ 114*deae26bfSKyle McMartin __asm__("\n1:\t" ldx "\t0(%2),%0\n\t" \ 115*deae26bfSKyle McMartin ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\ 116*deae26bfSKyle McMartin : "=r"(__gu_val), "=r"(__gu_err) \ 117*deae26bfSKyle McMartin : "r"(ptr), "1"(__gu_err) \ 118*deae26bfSKyle McMartin : "r1"); 119*deae26bfSKyle McMartin 120*deae26bfSKyle McMartin #define __get_user_asm(ldx,ptr) \ 121*deae26bfSKyle McMartin __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n\t" \ 122*deae26bfSKyle McMartin ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_get_user_skip_1)\ 123*deae26bfSKyle McMartin : "=r"(__gu_val), "=r"(__gu_err) \ 124*deae26bfSKyle McMartin : "r"(ptr), "1"(__gu_err) \ 125*deae26bfSKyle McMartin : "r1"); 126*deae26bfSKyle McMartin 127*deae26bfSKyle McMartin #define __put_user(x,ptr) \ 128*deae26bfSKyle McMartin ({ \ 129*deae26bfSKyle McMartin register long __pu_err __asm__ ("r8") = 0; \ 130*deae26bfSKyle McMartin __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \ 131*deae26bfSKyle McMartin \ 132*deae26bfSKyle McMartin if (segment_eq(get_fs(),KERNEL_DS)) { \ 133*deae26bfSKyle McMartin switch (sizeof(*(ptr))) { \ 134*deae26bfSKyle McMartin case 1: __put_kernel_asm("stb",__x,ptr); break; \ 135*deae26bfSKyle McMartin case 2: __put_kernel_asm("sth",__x,ptr); break; \ 136*deae26bfSKyle McMartin case 4: __put_kernel_asm("stw",__x,ptr); break; \ 137*deae26bfSKyle McMartin case 8: STD_KERNEL(__x,ptr); break; \ 138*deae26bfSKyle McMartin default: __put_kernel_bad(); break; \ 139*deae26bfSKyle McMartin } \ 140*deae26bfSKyle McMartin } \ 141*deae26bfSKyle McMartin else { \ 142*deae26bfSKyle McMartin switch (sizeof(*(ptr))) { \ 143*deae26bfSKyle McMartin case 1: __put_user_asm("stb",__x,ptr); break; \ 144*deae26bfSKyle McMartin case 2: __put_user_asm("sth",__x,ptr); break; \ 145*deae26bfSKyle McMartin case 4: __put_user_asm("stw",__x,ptr); break; \ 146*deae26bfSKyle McMartin case 8: STD_USER(__x,ptr); break; \ 147*deae26bfSKyle McMartin default: __put_user_bad(); break; \ 148*deae26bfSKyle McMartin } \ 149*deae26bfSKyle McMartin } \ 150*deae26bfSKyle McMartin \ 151*deae26bfSKyle McMartin __pu_err; \ 152*deae26bfSKyle McMartin }) 153*deae26bfSKyle McMartin 154*deae26bfSKyle McMartin /* 155*deae26bfSKyle McMartin * The "__put_user/kernel_asm()" macros tell gcc they read from memory 156*deae26bfSKyle McMartin * instead of writing. This is because they do not write to any memory 157*deae26bfSKyle McMartin * gcc knows about, so there are no aliasing issues. These macros must 158*deae26bfSKyle McMartin * also be aware that "fixup_put_user_skip_[12]" are executed in the 159*deae26bfSKyle McMartin * context of the fault, and any registers used there must be listed 160*deae26bfSKyle McMartin * as clobbers. In this case only "r1" is used by the current routines. 161*deae26bfSKyle McMartin * r8/r9 are already listed as err/val. 162*deae26bfSKyle McMartin */ 163*deae26bfSKyle McMartin 164*deae26bfSKyle McMartin #define __put_kernel_asm(stx,x,ptr) \ 165*deae26bfSKyle McMartin __asm__ __volatile__ ( \ 166*deae26bfSKyle McMartin "\n1:\t" stx "\t%2,0(%1)\n\t" \ 167*deae26bfSKyle McMartin ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\ 168*deae26bfSKyle McMartin : "=r"(__pu_err) \ 169*deae26bfSKyle McMartin : "r"(ptr), "r"(x), "0"(__pu_err) \ 170*deae26bfSKyle McMartin : "r1") 171*deae26bfSKyle McMartin 172*deae26bfSKyle McMartin #define __put_user_asm(stx,x,ptr) \ 173*deae26bfSKyle McMartin __asm__ __volatile__ ( \ 174*deae26bfSKyle McMartin "\n1:\t" stx "\t%2,0(%%sr3,%1)\n\t" \ 175*deae26bfSKyle McMartin ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\ 176*deae26bfSKyle McMartin : "=r"(__pu_err) \ 177*deae26bfSKyle McMartin : "r"(ptr), "r"(x), "0"(__pu_err) \ 178*deae26bfSKyle McMartin : "r1") 179*deae26bfSKyle McMartin 180*deae26bfSKyle McMartin 181*deae26bfSKyle McMartin #if !defined(CONFIG_64BIT) 182*deae26bfSKyle McMartin 183*deae26bfSKyle McMartin #define __put_kernel_asm64(__val,ptr) do { \ 184*deae26bfSKyle McMartin u64 __val64 = (u64)(__val); \ 185*deae26bfSKyle McMartin u32 hi = (__val64) >> 32; \ 186*deae26bfSKyle McMartin u32 lo = (__val64) & 0xffffffff; \ 187*deae26bfSKyle McMartin __asm__ __volatile__ ( \ 188*deae26bfSKyle McMartin "\n1:\tstw %2,0(%1)" \ 189*deae26bfSKyle McMartin "\n2:\tstw %3,4(%1)\n\t" \ 190*deae26bfSKyle McMartin ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\ 191*deae26bfSKyle McMartin ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\ 192*deae26bfSKyle McMartin : "=r"(__pu_err) \ 193*deae26bfSKyle McMartin : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \ 194*deae26bfSKyle McMartin : "r1"); \ 195*deae26bfSKyle McMartin } while (0) 196*deae26bfSKyle McMartin 197*deae26bfSKyle McMartin #define __put_user_asm64(__val,ptr) do { \ 198*deae26bfSKyle McMartin u64 __val64 = (u64)(__val); \ 199*deae26bfSKyle McMartin u32 hi = (__val64) >> 32; \ 200*deae26bfSKyle McMartin u32 lo = (__val64) & 0xffffffff; \ 201*deae26bfSKyle McMartin __asm__ __volatile__ ( \ 202*deae26bfSKyle McMartin "\n1:\tstw %2,0(%%sr3,%1)" \ 203*deae26bfSKyle McMartin "\n2:\tstw %3,4(%%sr3,%1)\n\t" \ 204*deae26bfSKyle McMartin ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\ 205*deae26bfSKyle McMartin ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\ 206*deae26bfSKyle McMartin : "=r"(__pu_err) \ 207*deae26bfSKyle McMartin : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \ 208*deae26bfSKyle McMartin : "r1"); \ 209*deae26bfSKyle McMartin } while (0) 210*deae26bfSKyle McMartin 211*deae26bfSKyle McMartin #endif /* !defined(CONFIG_64BIT) */ 212*deae26bfSKyle McMartin 213*deae26bfSKyle McMartin 214*deae26bfSKyle McMartin /* 215*deae26bfSKyle McMartin * Complex access routines -- external declarations 216*deae26bfSKyle McMartin */ 217*deae26bfSKyle McMartin 218*deae26bfSKyle McMartin extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long); 219*deae26bfSKyle McMartin extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long); 220*deae26bfSKyle McMartin extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long); 221*deae26bfSKyle McMartin extern long lstrncpy_from_user(char *, const char __user *, long); 222*deae26bfSKyle McMartin extern unsigned lclear_user(void __user *,unsigned long); 223*deae26bfSKyle McMartin extern long lstrnlen_user(const char __user *,long); 224*deae26bfSKyle McMartin 225*deae26bfSKyle McMartin /* 226*deae26bfSKyle McMartin * Complex access routines -- macros 227*deae26bfSKyle McMartin */ 228*deae26bfSKyle McMartin 229*deae26bfSKyle McMartin #define strncpy_from_user lstrncpy_from_user 230*deae26bfSKyle McMartin #define strnlen_user lstrnlen_user 231*deae26bfSKyle McMartin #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL) 232*deae26bfSKyle McMartin #define clear_user lclear_user 233*deae26bfSKyle McMartin #define __clear_user lclear_user 234*deae26bfSKyle McMartin 235*deae26bfSKyle McMartin unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len); 236*deae26bfSKyle McMartin #define __copy_to_user copy_to_user 237*deae26bfSKyle McMartin unsigned long copy_from_user(void *dst, const void __user *src, unsigned long len); 238*deae26bfSKyle McMartin #define __copy_from_user copy_from_user 239*deae26bfSKyle McMartin unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len); 240*deae26bfSKyle McMartin #define __copy_in_user copy_in_user 241*deae26bfSKyle McMartin #define __copy_to_user_inatomic __copy_to_user 242*deae26bfSKyle McMartin #define __copy_from_user_inatomic __copy_from_user 243*deae26bfSKyle McMartin 244*deae26bfSKyle McMartin #endif /* __PARISC_UACCESS_H */ 245