1*08dbd0f8SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */ 27567746eSRichard Kuo /* 37567746eSRichard Kuo * User memory access support for Hexagon 47567746eSRichard Kuo * 5e1858b2aSRichard Kuo * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. 67567746eSRichard Kuo */ 77567746eSRichard Kuo 87567746eSRichard Kuo #ifndef _ASM_UACCESS_H 97567746eSRichard Kuo #define _ASM_UACCESS_H 107567746eSRichard Kuo /* 117567746eSRichard Kuo * User space memory access functions 127567746eSRichard Kuo */ 137567746eSRichard Kuo #include <asm/sections.h> 147567746eSRichard Kuo 157567746eSRichard Kuo /* 167567746eSRichard Kuo * When a kernel-mode page fault is taken, the faulting instruction 177567746eSRichard Kuo * address is checked against a table of exception_table_entries. 187567746eSRichard Kuo * Each entry is a tuple of the address of an instruction that may 197567746eSRichard Kuo * be authorized to fault, and the address at which execution should 207567746eSRichard Kuo * be resumed instead of the faulting instruction, so as to effect 217567746eSRichard Kuo * a workaround. 227567746eSRichard Kuo */ 237567746eSRichard Kuo 247567746eSRichard Kuo /* Assembly somewhat optimized copy routines */ 25ac4691faSAl Viro unsigned long raw_copy_from_user(void *to, const void __user *from, 267567746eSRichard Kuo unsigned long n); 27ac4691faSAl Viro unsigned long raw_copy_to_user(void __user *to, const void *from, 287567746eSRichard Kuo unsigned long n); 29ac4691faSAl Viro #define INLINE_COPY_FROM_USER 30ac4691faSAl Viro #define INLINE_COPY_TO_USER 317567746eSRichard Kuo 327567746eSRichard Kuo __kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count); 337567746eSRichard Kuo #define __clear_user(a, s) __clear_user_hexagon((a), (s)) 347567746eSRichard Kuo 357567746eSRichard Kuo #include <asm-generic/uaccess.h> 367567746eSRichard Kuo 377567746eSRichard Kuo 387567746eSRichard Kuo #endif 39