1367b8112SChris Zankel /* 2367b8112SChris Zankel * include/asm-xtensa/uaccess.h 3367b8112SChris Zankel * 4367b8112SChris Zankel * User space memory access functions 5367b8112SChris Zankel * 6367b8112SChris Zankel * These routines provide basic accessing functions to the user memory 7eef35c2dSStefan Weil * space for the kernel. This header file provides functions such as: 8367b8112SChris Zankel * 9367b8112SChris Zankel * This file is subject to the terms and conditions of the GNU General Public 10367b8112SChris Zankel * License. See the file "COPYING" in the main directory of this archive 11367b8112SChris Zankel * for more details. 12367b8112SChris Zankel * 13367b8112SChris Zankel * Copyright (C) 2001 - 2005 Tensilica Inc. 14367b8112SChris Zankel */ 15367b8112SChris Zankel 16367b8112SChris Zankel #ifndef _XTENSA_UACCESS_H 17367b8112SChris Zankel #define _XTENSA_UACCESS_H 18367b8112SChris Zankel 19367b8112SChris Zankel #include <linux/errno.h> 20*e44ba033SVitaliy Ivanov #include <asm/types.h> 21367b8112SChris Zankel 22367b8112SChris Zankel #define VERIFY_READ 0 23367b8112SChris Zankel #define VERIFY_WRITE 1 24367b8112SChris Zankel 25367b8112SChris Zankel #ifdef __ASSEMBLY__ 26367b8112SChris Zankel 27367b8112SChris Zankel #include <asm/current.h> 28367b8112SChris Zankel #include <asm/asm-offsets.h> 29367b8112SChris Zankel #include <asm/processor.h> 30367b8112SChris Zankel 31367b8112SChris Zankel /* 32367b8112SChris Zankel * These assembly macros mirror the C macros that follow below. They 33367b8112SChris Zankel * should always have identical functionality. See 34367b8112SChris Zankel * arch/xtensa/kernel/sys.S for usage. 35367b8112SChris Zankel */ 36367b8112SChris Zankel 37367b8112SChris Zankel #define KERNEL_DS 0 38367b8112SChris Zankel #define USER_DS 1 39367b8112SChris Zankel 40367b8112SChris Zankel #define get_ds (KERNEL_DS) 41367b8112SChris Zankel 42367b8112SChris Zankel /* 43367b8112SChris Zankel * get_fs reads current->thread.current_ds into a register. 44367b8112SChris Zankel * On Entry: 45367b8112SChris Zankel * <ad> anything 46367b8112SChris Zankel * <sp> stack 47367b8112SChris Zankel * On Exit: 48367b8112SChris Zankel * <ad> contains current->thread.current_ds 49367b8112SChris Zankel */ 50367b8112SChris Zankel .macro get_fs ad, sp 51367b8112SChris Zankel GET_CURRENT(\ad,\sp) 52367b8112SChris Zankel l32i \ad, \ad, THREAD_CURRENT_DS 53367b8112SChris Zankel .endm 54367b8112SChris Zankel 55367b8112SChris Zankel /* 56367b8112SChris Zankel * set_fs sets current->thread.current_ds to some value. 57367b8112SChris Zankel * On Entry: 58367b8112SChris Zankel * <at> anything (temp register) 59367b8112SChris Zankel * <av> value to write 60367b8112SChris Zankel * <sp> stack 61367b8112SChris Zankel * On Exit: 62367b8112SChris Zankel * <at> destroyed (actually, current) 63367b8112SChris Zankel * <av> preserved, value to write 64367b8112SChris Zankel */ 65367b8112SChris Zankel .macro set_fs at, av, sp 66367b8112SChris Zankel GET_CURRENT(\at,\sp) 67367b8112SChris Zankel s32i \av, \at, THREAD_CURRENT_DS 68367b8112SChris Zankel .endm 69367b8112SChris Zankel 70367b8112SChris Zankel /* 71367b8112SChris Zankel * kernel_ok determines whether we should bypass addr/size checking. 72367b8112SChris Zankel * See the equivalent C-macro version below for clarity. 73367b8112SChris Zankel * On success, kernel_ok branches to a label indicated by parameter 74367b8112SChris Zankel * <success>. This implies that the macro falls through to the next 75367b8112SChris Zankel * insruction on an error. 76367b8112SChris Zankel * 77367b8112SChris Zankel * Note that while this macro can be used independently, we designed 78367b8112SChris Zankel * in for optimal use in the access_ok macro below (i.e., we fall 79367b8112SChris Zankel * through on error). 80367b8112SChris Zankel * 81367b8112SChris Zankel * On Entry: 82367b8112SChris Zankel * <at> anything (temp register) 83367b8112SChris Zankel * <success> label to branch to on success; implies 84367b8112SChris Zankel * fall-through macro on error 85367b8112SChris Zankel * <sp> stack pointer 86367b8112SChris Zankel * On Exit: 87367b8112SChris Zankel * <at> destroyed (actually, current->thread.current_ds) 88367b8112SChris Zankel */ 89367b8112SChris Zankel 90367b8112SChris Zankel #if ((KERNEL_DS != 0) || (USER_DS == 0)) 91367b8112SChris Zankel # error Assembly macro kernel_ok fails 92367b8112SChris Zankel #endif 93367b8112SChris Zankel .macro kernel_ok at, sp, success 94367b8112SChris Zankel get_fs \at, \sp 95367b8112SChris Zankel beqz \at, \success 96367b8112SChris Zankel .endm 97367b8112SChris Zankel 98367b8112SChris Zankel /* 99367b8112SChris Zankel * user_ok determines whether the access to user-space memory is allowed. 100367b8112SChris Zankel * See the equivalent C-macro version below for clarity. 101367b8112SChris Zankel * 102367b8112SChris Zankel * On error, user_ok branches to a label indicated by parameter 103367b8112SChris Zankel * <error>. This implies that the macro falls through to the next 104367b8112SChris Zankel * instruction on success. 105367b8112SChris Zankel * 106367b8112SChris Zankel * Note that while this macro can be used independently, we designed 107367b8112SChris Zankel * in for optimal use in the access_ok macro below (i.e., we fall 108367b8112SChris Zankel * through on success). 109367b8112SChris Zankel * 110367b8112SChris Zankel * On Entry: 111367b8112SChris Zankel * <aa> register containing memory address 112367b8112SChris Zankel * <as> register containing memory size 113367b8112SChris Zankel * <at> temp register 114367b8112SChris Zankel * <error> label to branch to on error; implies fall-through 115367b8112SChris Zankel * macro on success 116367b8112SChris Zankel * On Exit: 117367b8112SChris Zankel * <aa> preserved 118367b8112SChris Zankel * <as> preserved 119367b8112SChris Zankel * <at> destroyed (actually, (TASK_SIZE + 1 - size)) 120367b8112SChris Zankel */ 121367b8112SChris Zankel .macro user_ok aa, as, at, error 122367b8112SChris Zankel movi \at, __XTENSA_UL_CONST(TASK_SIZE) 123367b8112SChris Zankel bgeu \as, \at, \error 124367b8112SChris Zankel sub \at, \at, \as 125367b8112SChris Zankel bgeu \aa, \at, \error 126367b8112SChris Zankel .endm 127367b8112SChris Zankel 128367b8112SChris Zankel /* 129367b8112SChris Zankel * access_ok determines whether a memory access is allowed. See the 130367b8112SChris Zankel * equivalent C-macro version below for clarity. 131367b8112SChris Zankel * 132367b8112SChris Zankel * On error, access_ok branches to a label indicated by parameter 133367b8112SChris Zankel * <error>. This implies that the macro falls through to the next 134367b8112SChris Zankel * instruction on success. 135367b8112SChris Zankel * 136367b8112SChris Zankel * Note that we assume success is the common case, and we optimize the 137367b8112SChris Zankel * branch fall-through case on success. 138367b8112SChris Zankel * 139367b8112SChris Zankel * On Entry: 140367b8112SChris Zankel * <aa> register containing memory address 141367b8112SChris Zankel * <as> register containing memory size 142367b8112SChris Zankel * <at> temp register 143367b8112SChris Zankel * <sp> 144367b8112SChris Zankel * <error> label to branch to on error; implies fall-through 145367b8112SChris Zankel * macro on success 146367b8112SChris Zankel * On Exit: 147367b8112SChris Zankel * <aa> preserved 148367b8112SChris Zankel * <as> preserved 149367b8112SChris Zankel * <at> destroyed 150367b8112SChris Zankel */ 151367b8112SChris Zankel .macro access_ok aa, as, at, sp, error 152367b8112SChris Zankel kernel_ok \at, \sp, .Laccess_ok_\@ 153367b8112SChris Zankel user_ok \aa, \as, \at, \error 154367b8112SChris Zankel .Laccess_ok_\@: 155367b8112SChris Zankel .endm 156367b8112SChris Zankel 157367b8112SChris Zankel #else /* __ASSEMBLY__ not defined */ 158367b8112SChris Zankel 159367b8112SChris Zankel #include <linux/sched.h> 160367b8112SChris Zankel 161367b8112SChris Zankel /* 162367b8112SChris Zankel * The fs value determines whether argument validity checking should 163367b8112SChris Zankel * be performed or not. If get_fs() == USER_DS, checking is 164367b8112SChris Zankel * performed, with get_fs() == KERNEL_DS, checking is bypassed. 165367b8112SChris Zankel * 166367b8112SChris Zankel * For historical reasons (Data Segment Register?), these macros are 167367b8112SChris Zankel * grossly misnamed. 168367b8112SChris Zankel */ 169367b8112SChris Zankel 170367b8112SChris Zankel #define KERNEL_DS ((mm_segment_t) { 0 }) 171367b8112SChris Zankel #define USER_DS ((mm_segment_t) { 1 }) 172367b8112SChris Zankel 173367b8112SChris Zankel #define get_ds() (KERNEL_DS) 174367b8112SChris Zankel #define get_fs() (current->thread.current_ds) 175367b8112SChris Zankel #define set_fs(val) (current->thread.current_ds = (val)) 176367b8112SChris Zankel 177367b8112SChris Zankel #define segment_eq(a,b) ((a).seg == (b).seg) 178367b8112SChris Zankel 179367b8112SChris Zankel #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) 180367b8112SChris Zankel #define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) 181367b8112SChris Zankel #define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size))) 182367b8112SChris Zankel #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) 183367b8112SChris Zankel 184367b8112SChris Zankel /* 185367b8112SChris Zankel * These are the main single-value transfer routines. They 186367b8112SChris Zankel * automatically use the right size if we just have the right pointer 187367b8112SChris Zankel * type. 188367b8112SChris Zankel * 189367b8112SChris Zankel * This gets kind of ugly. We want to return _two_ values in 190367b8112SChris Zankel * "get_user()" and yet we don't want to do any pointers, because that 191367b8112SChris Zankel * is too much of a performance impact. Thus we have a few rather ugly 192367b8112SChris Zankel * macros here, and hide all the uglyness from the user. 193367b8112SChris Zankel * 194367b8112SChris Zankel * Careful to not 195367b8112SChris Zankel * (a) re-use the arguments for side effects (sizeof is ok) 196367b8112SChris Zankel * (b) require any knowledge of processes at this stage 197367b8112SChris Zankel */ 198367b8112SChris Zankel #define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr))) 199367b8112SChris Zankel #define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr))) 200367b8112SChris Zankel 201367b8112SChris Zankel /* 202367b8112SChris Zankel * The "__xxx" versions of the user access functions are versions that 203367b8112SChris Zankel * do not verify the address space, that must have been done previously 204367b8112SChris Zankel * with a separate "access_ok()" call (this is used when we do multiple 205367b8112SChris Zankel * accesses to the same area of user memory). 206367b8112SChris Zankel */ 207367b8112SChris Zankel #define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr))) 208367b8112SChris Zankel #define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr))) 209367b8112SChris Zankel 210367b8112SChris Zankel 211367b8112SChris Zankel extern long __put_user_bad(void); 212367b8112SChris Zankel 213367b8112SChris Zankel #define __put_user_nocheck(x,ptr,size) \ 214367b8112SChris Zankel ({ \ 215367b8112SChris Zankel long __pu_err; \ 216367b8112SChris Zankel __put_user_size((x),(ptr),(size),__pu_err); \ 217367b8112SChris Zankel __pu_err; \ 218367b8112SChris Zankel }) 219367b8112SChris Zankel 220367b8112SChris Zankel #define __put_user_check(x,ptr,size) \ 221367b8112SChris Zankel ({ \ 222367b8112SChris Zankel long __pu_err = -EFAULT; \ 223367b8112SChris Zankel __typeof__(*(ptr)) *__pu_addr = (ptr); \ 224367b8112SChris Zankel if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ 225367b8112SChris Zankel __put_user_size((x),__pu_addr,(size),__pu_err); \ 226367b8112SChris Zankel __pu_err; \ 227367b8112SChris Zankel }) 228367b8112SChris Zankel 229367b8112SChris Zankel #define __put_user_size(x,ptr,size,retval) \ 230367b8112SChris Zankel do { \ 231367b8112SChris Zankel int __cb; \ 232367b8112SChris Zankel retval = 0; \ 233367b8112SChris Zankel switch (size) { \ 234367b8112SChris Zankel case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \ 235367b8112SChris Zankel case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \ 236367b8112SChris Zankel case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \ 237367b8112SChris Zankel case 8: { \ 238367b8112SChris Zankel __typeof__(*ptr) __v64 = x; \ 239367b8112SChris Zankel retval = __copy_to_user(ptr,&__v64,8); \ 240367b8112SChris Zankel break; \ 241367b8112SChris Zankel } \ 242367b8112SChris Zankel default: __put_user_bad(); \ 243367b8112SChris Zankel } \ 244367b8112SChris Zankel } while (0) 245367b8112SChris Zankel 246367b8112SChris Zankel 247367b8112SChris Zankel /* 248367b8112SChris Zankel * Consider a case of a user single load/store would cause both an 249367b8112SChris Zankel * unaligned exception and an MMU-related exception (unaligned 250367b8112SChris Zankel * exceptions happen first): 251367b8112SChris Zankel * 252367b8112SChris Zankel * User code passes a bad variable ptr to a system call. 253367b8112SChris Zankel * Kernel tries to access the variable. 254367b8112SChris Zankel * Unaligned exception occurs. 255367b8112SChris Zankel * Unaligned exception handler tries to make aligned accesses. 256367b8112SChris Zankel * Double exception occurs for MMU-related cause (e.g., page not mapped). 257367b8112SChris Zankel * do_page_fault() thinks the fault address belongs to the kernel, not the 258367b8112SChris Zankel * user, and panics. 259367b8112SChris Zankel * 260367b8112SChris Zankel * The kernel currently prohibits user unaligned accesses. We use the 261367b8112SChris Zankel * __check_align_* macros to check for unaligned addresses before 262367b8112SChris Zankel * accessing user space so we don't crash the kernel. Both 263367b8112SChris Zankel * __put_user_asm and __get_user_asm use these alignment macros, so 264367b8112SChris Zankel * macro-specific labels such as 0f, 1f, %0, %2, and %3 must stay in 265367b8112SChris Zankel * sync. 266367b8112SChris Zankel */ 267367b8112SChris Zankel 268367b8112SChris Zankel #define __check_align_1 "" 269367b8112SChris Zankel 270367b8112SChris Zankel #define __check_align_2 \ 271367b8112SChris Zankel " _bbci.l %3, 0, 1f \n" \ 272367b8112SChris Zankel " movi %0, %4 \n" \ 273367b8112SChris Zankel " _j 2f \n" 274367b8112SChris Zankel 275367b8112SChris Zankel #define __check_align_4 \ 276367b8112SChris Zankel " _bbsi.l %3, 0, 0f \n" \ 277367b8112SChris Zankel " _bbci.l %3, 1, 1f \n" \ 278367b8112SChris Zankel "0: movi %0, %4 \n" \ 279367b8112SChris Zankel " _j 2f \n" 280367b8112SChris Zankel 281367b8112SChris Zankel 282367b8112SChris Zankel /* 283367b8112SChris Zankel * We don't tell gcc that we are accessing memory, but this is OK 284367b8112SChris Zankel * because we do not write to any memory gcc knows about, so there 285367b8112SChris Zankel * are no aliasing issues. 286367b8112SChris Zankel * 287367b8112SChris Zankel * WARNING: If you modify this macro at all, verify that the 288367b8112SChris Zankel * __check_align_* macros still work. 289367b8112SChris Zankel */ 290367b8112SChris Zankel #define __put_user_asm(x, addr, err, align, insn, cb) \ 291367b8112SChris Zankel __asm__ __volatile__( \ 292367b8112SChris Zankel __check_align_##align \ 293367b8112SChris Zankel "1: "insn" %2, %3, 0 \n" \ 294367b8112SChris Zankel "2: \n" \ 295367b8112SChris Zankel " .section .fixup,\"ax\" \n" \ 296367b8112SChris Zankel " .align 4 \n" \ 297367b8112SChris Zankel "4: \n" \ 298367b8112SChris Zankel " .long 2b \n" \ 299367b8112SChris Zankel "5: \n" \ 300367b8112SChris Zankel " l32r %1, 4b \n" \ 301367b8112SChris Zankel " movi %0, %4 \n" \ 302367b8112SChris Zankel " jx %1 \n" \ 303367b8112SChris Zankel " .previous \n" \ 304367b8112SChris Zankel " .section __ex_table,\"a\" \n" \ 305367b8112SChris Zankel " .long 1b, 5b \n" \ 306367b8112SChris Zankel " .previous" \ 307367b8112SChris Zankel :"=r" (err), "=r" (cb) \ 308367b8112SChris Zankel :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err)) 309367b8112SChris Zankel 310367b8112SChris Zankel #define __get_user_nocheck(x,ptr,size) \ 311367b8112SChris Zankel ({ \ 312367b8112SChris Zankel long __gu_err, __gu_val; \ 313367b8112SChris Zankel __get_user_size(__gu_val,(ptr),(size),__gu_err); \ 314367b8112SChris Zankel (x) = (__typeof__(*(ptr)))__gu_val; \ 315367b8112SChris Zankel __gu_err; \ 316367b8112SChris Zankel }) 317367b8112SChris Zankel 318367b8112SChris Zankel #define __get_user_check(x,ptr,size) \ 319367b8112SChris Zankel ({ \ 320367b8112SChris Zankel long __gu_err = -EFAULT, __gu_val = 0; \ 321367b8112SChris Zankel const __typeof__(*(ptr)) *__gu_addr = (ptr); \ 322367b8112SChris Zankel if (access_ok(VERIFY_READ,__gu_addr,size)) \ 323367b8112SChris Zankel __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \ 324367b8112SChris Zankel (x) = (__typeof__(*(ptr)))__gu_val; \ 325367b8112SChris Zankel __gu_err; \ 326367b8112SChris Zankel }) 327367b8112SChris Zankel 328367b8112SChris Zankel extern long __get_user_bad(void); 329367b8112SChris Zankel 330367b8112SChris Zankel #define __get_user_size(x,ptr,size,retval) \ 331367b8112SChris Zankel do { \ 332367b8112SChris Zankel int __cb; \ 333367b8112SChris Zankel retval = 0; \ 334367b8112SChris Zankel switch (size) { \ 335367b8112SChris Zankel case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \ 336367b8112SChris Zankel case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \ 337367b8112SChris Zankel case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \ 338367b8112SChris Zankel case 8: retval = __copy_from_user(&x,ptr,8); break; \ 339367b8112SChris Zankel default: (x) = __get_user_bad(); \ 340367b8112SChris Zankel } \ 341367b8112SChris Zankel } while (0) 342367b8112SChris Zankel 343367b8112SChris Zankel 344367b8112SChris Zankel /* 345367b8112SChris Zankel * WARNING: If you modify this macro at all, verify that the 346367b8112SChris Zankel * __check_align_* macros still work. 347367b8112SChris Zankel */ 348367b8112SChris Zankel #define __get_user_asm(x, addr, err, align, insn, cb) \ 349367b8112SChris Zankel __asm__ __volatile__( \ 350367b8112SChris Zankel __check_align_##align \ 351367b8112SChris Zankel "1: "insn" %2, %3, 0 \n" \ 352367b8112SChris Zankel "2: \n" \ 353367b8112SChris Zankel " .section .fixup,\"ax\" \n" \ 354367b8112SChris Zankel " .align 4 \n" \ 355367b8112SChris Zankel "4: \n" \ 356367b8112SChris Zankel " .long 2b \n" \ 357367b8112SChris Zankel "5: \n" \ 358367b8112SChris Zankel " l32r %1, 4b \n" \ 359367b8112SChris Zankel " movi %2, 0 \n" \ 360367b8112SChris Zankel " movi %0, %4 \n" \ 361367b8112SChris Zankel " jx %1 \n" \ 362367b8112SChris Zankel " .previous \n" \ 363367b8112SChris Zankel " .section __ex_table,\"a\" \n" \ 364367b8112SChris Zankel " .long 1b, 5b \n" \ 365367b8112SChris Zankel " .previous" \ 366367b8112SChris Zankel :"=r" (err), "=r" (cb), "=r" (x) \ 367367b8112SChris Zankel :"r" (addr), "i" (-EFAULT), "0" (err)) 368367b8112SChris Zankel 369367b8112SChris Zankel 370367b8112SChris Zankel /* 371367b8112SChris Zankel * Copy to/from user space 372367b8112SChris Zankel */ 373367b8112SChris Zankel 374367b8112SChris Zankel /* 375367b8112SChris Zankel * We use a generic, arbitrary-sized copy subroutine. The Xtensa 376367b8112SChris Zankel * architecture would cause heavy code bloat if we tried to inline 377367b8112SChris Zankel * these functions and provide __constant_copy_* equivalents like the 378367b8112SChris Zankel * i386 versions. __xtensa_copy_user is quite efficient. See the 379367b8112SChris Zankel * .fixup section of __xtensa_copy_user for a discussion on the 380367b8112SChris Zankel * X_zeroing equivalents for Xtensa. 381367b8112SChris Zankel */ 382367b8112SChris Zankel 383367b8112SChris Zankel extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n); 384367b8112SChris Zankel #define __copy_user(to,from,size) __xtensa_copy_user(to,from,size) 385367b8112SChris Zankel 386367b8112SChris Zankel 387367b8112SChris Zankel static inline unsigned long 388367b8112SChris Zankel __generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n) 389367b8112SChris Zankel { 390367b8112SChris Zankel return __copy_user(to,from,n); 391367b8112SChris Zankel } 392367b8112SChris Zankel 393367b8112SChris Zankel static inline unsigned long 394367b8112SChris Zankel __generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n) 395367b8112SChris Zankel { 396367b8112SChris Zankel return __copy_user(to,from,n); 397367b8112SChris Zankel } 398367b8112SChris Zankel 399367b8112SChris Zankel static inline unsigned long 400367b8112SChris Zankel __generic_copy_to_user(void *to, const void *from, unsigned long n) 401367b8112SChris Zankel { 402367b8112SChris Zankel prefetch(from); 403367b8112SChris Zankel if (access_ok(VERIFY_WRITE, to, n)) 404367b8112SChris Zankel return __copy_user(to,from,n); 405367b8112SChris Zankel return n; 406367b8112SChris Zankel } 407367b8112SChris Zankel 408367b8112SChris Zankel static inline unsigned long 409367b8112SChris Zankel __generic_copy_from_user(void *to, const void *from, unsigned long n) 410367b8112SChris Zankel { 411367b8112SChris Zankel prefetchw(to); 412367b8112SChris Zankel if (access_ok(VERIFY_READ, from, n)) 413367b8112SChris Zankel return __copy_user(to,from,n); 414367b8112SChris Zankel else 415367b8112SChris Zankel memset(to, 0, n); 416367b8112SChris Zankel return n; 417367b8112SChris Zankel } 418367b8112SChris Zankel 419367b8112SChris Zankel #define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n)) 420367b8112SChris Zankel #define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n)) 421367b8112SChris Zankel #define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n)) 422367b8112SChris Zankel #define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n)) 423367b8112SChris Zankel #define __copy_to_user_inatomic __copy_to_user 424367b8112SChris Zankel #define __copy_from_user_inatomic __copy_from_user 425367b8112SChris Zankel 426367b8112SChris Zankel 427367b8112SChris Zankel /* 428367b8112SChris Zankel * We need to return the number of bytes not cleared. Our memset() 429367b8112SChris Zankel * returns zero if a problem occurs while accessing user-space memory. 430367b8112SChris Zankel * In that event, return no memory cleared. Otherwise, zero for 431367b8112SChris Zankel * success. 432367b8112SChris Zankel */ 433367b8112SChris Zankel 434367b8112SChris Zankel static inline unsigned long 435367b8112SChris Zankel __xtensa_clear_user(void *addr, unsigned long size) 436367b8112SChris Zankel { 437367b8112SChris Zankel if ( ! memset(addr, 0, size) ) 438367b8112SChris Zankel return size; 439367b8112SChris Zankel return 0; 440367b8112SChris Zankel } 441367b8112SChris Zankel 442367b8112SChris Zankel static inline unsigned long 443367b8112SChris Zankel clear_user(void *addr, unsigned long size) 444367b8112SChris Zankel { 445367b8112SChris Zankel if (access_ok(VERIFY_WRITE, addr, size)) 446367b8112SChris Zankel return __xtensa_clear_user(addr, size); 447367b8112SChris Zankel return size ? -EFAULT : 0; 448367b8112SChris Zankel } 449367b8112SChris Zankel 450367b8112SChris Zankel #define __clear_user __xtensa_clear_user 451367b8112SChris Zankel 452367b8112SChris Zankel 453367b8112SChris Zankel extern long __strncpy_user(char *, const char *, long); 454367b8112SChris Zankel #define __strncpy_from_user __strncpy_user 455367b8112SChris Zankel 456367b8112SChris Zankel static inline long 457367b8112SChris Zankel strncpy_from_user(char *dst, const char *src, long count) 458367b8112SChris Zankel { 459367b8112SChris Zankel if (access_ok(VERIFY_READ, src, 1)) 460367b8112SChris Zankel return __strncpy_from_user(dst, src, count); 461367b8112SChris Zankel return -EFAULT; 462367b8112SChris Zankel } 463367b8112SChris Zankel 464367b8112SChris Zankel 465367b8112SChris Zankel #define strlen_user(str) strnlen_user((str), TASK_SIZE - 1) 466367b8112SChris Zankel 467367b8112SChris Zankel /* 468367b8112SChris Zankel * Return the size of a string (including the ending 0!) 469367b8112SChris Zankel */ 470367b8112SChris Zankel extern long __strnlen_user(const char *, long); 471367b8112SChris Zankel 472367b8112SChris Zankel static inline long strnlen_user(const char *str, long len) 473367b8112SChris Zankel { 474367b8112SChris Zankel unsigned long top = __kernel_ok ? ~0UL : TASK_SIZE - 1; 475367b8112SChris Zankel 476367b8112SChris Zankel if ((unsigned long)str > top) 477367b8112SChris Zankel return 0; 478367b8112SChris Zankel return __strnlen_user(str, len); 479367b8112SChris Zankel } 480367b8112SChris Zankel 481367b8112SChris Zankel 482367b8112SChris Zankel struct exception_table_entry 483367b8112SChris Zankel { 484367b8112SChris Zankel unsigned long insn, fixup; 485367b8112SChris Zankel }; 486367b8112SChris Zankel 487367b8112SChris Zankel /* Returns 0 if exception not found and fixup.unit otherwise. */ 488367b8112SChris Zankel 489367b8112SChris Zankel extern unsigned long search_exception_table(unsigned long addr); 490367b8112SChris Zankel extern void sort_exception_table(void); 491367b8112SChris Zankel 492367b8112SChris Zankel /* Returns the new pc */ 493367b8112SChris Zankel #define fixup_exception(map_reg, fixup_unit, pc) \ 494367b8112SChris Zankel ({ \ 495367b8112SChris Zankel fixup_unit; \ 496367b8112SChris Zankel }) 497367b8112SChris Zankel 498367b8112SChris Zankel #endif /* __ASSEMBLY__ */ 499367b8112SChris Zankel #endif /* _XTENSA_UACCESS_H */ 500