1 /* 2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 3 * Licensed under the GPL 4 */ 5 6 #include <linux/err.h> 7 #include <linux/highmem.h> 8 #include <linux/mm.h> 9 #include <linux/module.h> 10 #include <linux/sched.h> 11 #include <asm/current.h> 12 #include <asm/page.h> 13 #include <asm/pgtable.h> 14 #include <kern_util.h> 15 #include <os.h> 16 17 pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr) 18 { 19 pgd_t *pgd; 20 pud_t *pud; 21 pmd_t *pmd; 22 23 if (mm == NULL) 24 return NULL; 25 26 pgd = pgd_offset(mm, addr); 27 if (!pgd_present(*pgd)) 28 return NULL; 29 30 pud = pud_offset(pgd, addr); 31 if (!pud_present(*pud)) 32 return NULL; 33 34 pmd = pmd_offset(pud, addr); 35 if (!pmd_present(*pmd)) 36 return NULL; 37 38 return pte_offset_kernel(pmd, addr); 39 } 40 41 static pte_t *maybe_map(unsigned long virt, int is_write) 42 { 43 pte_t *pte = virt_to_pte(current->mm, virt); 44 int err, dummy_code; 45 46 if ((pte == NULL) || !pte_present(*pte) || 47 (is_write && !pte_write(*pte))) { 48 err = handle_page_fault(virt, 0, is_write, 1, &dummy_code); 49 if (err) 50 return NULL; 51 pte = virt_to_pte(current->mm, virt); 52 } 53 if (!pte_present(*pte)) 54 pte = NULL; 55 56 return pte; 57 } 58 59 static int do_op_one_page(unsigned long addr, int len, int is_write, 60 int (*op)(unsigned long addr, int len, void *arg), void *arg) 61 { 62 jmp_buf buf; 63 struct page *page; 64 pte_t *pte; 65 int n, faulted; 66 67 pte = maybe_map(addr, is_write); 68 if (pte == NULL) 69 return -1; 70 71 page = pte_page(*pte); 72 addr = (unsigned long) kmap_atomic(page) + 73 (addr & ~PAGE_MASK); 74 75 current->thread.fault_catcher = &buf; 76 77 faulted = UML_SETJMP(&buf); 78 if (faulted == 0) 79 n = (*op)(addr, len, arg); 80 else 81 n = -1; 82 83 current->thread.fault_catcher = NULL; 84 85 kunmap_atomic((void *)addr); 86 87 return n; 88 } 89 90 static long buffer_op(unsigned long addr, int len, int is_write, 91 int (*op)(unsigned long, int, void *), void *arg) 92 { 93 long size, remain, n; 94 95 size = min(PAGE_ALIGN(addr) - addr, (unsigned long) len); 96 remain = len; 97 98 n = do_op_one_page(addr, size, is_write, op, arg); 99 if (n != 0) { 100 remain = (n < 0 ? remain : 0); 101 goto out; 102 } 103 104 addr += size; 105 remain -= size; 106 if (remain == 0) 107 goto out; 108 109 while (addr < ((addr + remain) & PAGE_MASK)) { 110 n = do_op_one_page(addr, PAGE_SIZE, is_write, op, arg); 111 if (n != 0) { 112 remain = (n < 0 ? remain : 0); 113 goto out; 114 } 115 116 addr += PAGE_SIZE; 117 remain -= PAGE_SIZE; 118 } 119 if (remain == 0) 120 goto out; 121 122 n = do_op_one_page(addr, remain, is_write, op, arg); 123 if (n != 0) { 124 remain = (n < 0 ? remain : 0); 125 goto out; 126 } 127 128 return 0; 129 out: 130 return remain; 131 } 132 133 static int copy_chunk_from_user(unsigned long from, int len, void *arg) 134 { 135 unsigned long *to_ptr = arg, to = *to_ptr; 136 137 memcpy((void *) to, (void *) from, len); 138 *to_ptr += len; 139 return 0; 140 } 141 142 long __copy_from_user(void *to, const void __user *from, unsigned long n) 143 { 144 if (segment_eq(get_fs(), KERNEL_DS)) { 145 memcpy(to, (__force void*)from, n); 146 return 0; 147 } 148 149 return buffer_op((unsigned long) from, n, 0, copy_chunk_from_user, &to); 150 } 151 EXPORT_SYMBOL(__copy_from_user); 152 153 static int copy_chunk_to_user(unsigned long to, int len, void *arg) 154 { 155 unsigned long *from_ptr = arg, from = *from_ptr; 156 157 memcpy((void *) to, (void *) from, len); 158 *from_ptr += len; 159 return 0; 160 } 161 162 long __copy_to_user(void __user *to, const void *from, unsigned long n) 163 { 164 if (segment_eq(get_fs(), KERNEL_DS)) { 165 memcpy((__force void *) to, from, n); 166 return 0; 167 } 168 169 return buffer_op((unsigned long) to, n, 1, copy_chunk_to_user, &from); 170 } 171 EXPORT_SYMBOL(__copy_to_user); 172 173 static int strncpy_chunk_from_user(unsigned long from, int len, void *arg) 174 { 175 char **to_ptr = arg, *to = *to_ptr; 176 int n; 177 178 strncpy(to, (void *) from, len); 179 n = strnlen(to, len); 180 *to_ptr += n; 181 182 if (n < len) 183 return 1; 184 return 0; 185 } 186 187 long __strncpy_from_user(char *dst, const char __user *src, long count) 188 { 189 long n; 190 char *ptr = dst; 191 192 if (segment_eq(get_fs(), KERNEL_DS)) { 193 strncpy(dst, (__force void *) src, count); 194 return strnlen(dst, count); 195 } 196 197 n = buffer_op((unsigned long) src, count, 0, strncpy_chunk_from_user, 198 &ptr); 199 if (n != 0) 200 return -EFAULT; 201 return strnlen(dst, count); 202 } 203 EXPORT_SYMBOL(__strncpy_from_user); 204 205 static int clear_chunk(unsigned long addr, int len, void *unused) 206 { 207 memset((void *) addr, 0, len); 208 return 0; 209 } 210 211 unsigned long __clear_user(void __user *mem, unsigned long len) 212 { 213 if (segment_eq(get_fs(), KERNEL_DS)) { 214 memset((__force void*)mem, 0, len); 215 return 0; 216 } 217 218 return buffer_op((unsigned long) mem, len, 1, clear_chunk, NULL); 219 } 220 EXPORT_SYMBOL(__clear_user); 221 222 static int strnlen_chunk(unsigned long str, int len, void *arg) 223 { 224 int *len_ptr = arg, n; 225 226 n = strnlen((void *) str, len); 227 *len_ptr += n; 228 229 if (n < len) 230 return 1; 231 return 0; 232 } 233 234 long __strnlen_user(const void __user *str, long len) 235 { 236 int count = 0, n; 237 238 if (segment_eq(get_fs(), KERNEL_DS)) 239 return strnlen((__force char*)str, len) + 1; 240 241 n = buffer_op((unsigned long) str, len, 0, strnlen_chunk, &count); 242 if (n == 0) 243 return count + 1; 244 return 0; 245 } 246 EXPORT_SYMBOL(__strnlen_user); 247