1 /* 2 * linux/mm/process_vm_access.c 3 * 4 * Copyright (C) 2010-2011 Christopher Yeoh <cyeoh@au1.ibm.com>, IBM Corp. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/mm.h> 13 #include <linux/uio.h> 14 #include <linux/sched.h> 15 #include <linux/highmem.h> 16 #include <linux/ptrace.h> 17 #include <linux/slab.h> 18 #include <linux/syscalls.h> 19 20 #ifdef CONFIG_COMPAT 21 #include <linux/compat.h> 22 #endif 23 24 /** 25 * process_vm_rw_pages - read/write pages from task specified 26 * @pages: array of pointers to pages we want to copy 27 * @start_offset: offset in page to start copying from/to 28 * @len: number of bytes to copy 29 * @iter: where to copy to/from locally 30 * @vm_write: 0 means copy from, 1 means copy to 31 * Returns 0 on success, error code otherwise 32 */ 33 static int process_vm_rw_pages(struct page **pages, 34 unsigned offset, 35 size_t len, 36 struct iov_iter *iter, 37 int vm_write) 38 { 39 /* Do the copy for each page */ 40 while (len && iov_iter_count(iter)) { 41 struct page *page = *pages++; 42 size_t copy = PAGE_SIZE - offset; 43 size_t copied; 44 45 if (copy > len) 46 copy = len; 47 48 if (vm_write) { 49 if (copy > iov_iter_count(iter)) 50 copy = iov_iter_count(iter); 51 copied = iov_iter_copy_from_user(page, iter, 52 offset, copy); 53 iov_iter_advance(iter, copied); 54 set_page_dirty_lock(page); 55 } else { 56 copied = copy_page_to_iter(page, offset, copy, iter); 57 } 58 len -= copied; 59 if (copied < copy && iov_iter_count(iter)) 60 return -EFAULT; 61 offset = 0; 62 } 63 return 0; 64 } 65 66 /* Maximum number of pages kmalloc'd to hold struct page's during copy */ 67 #define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2) 68 69 /** 70 * process_vm_rw_single_vec - read/write pages from task specified 71 * @addr: start memory address of target process 72 * @len: size of area to copy to/from 73 * @iter: where to copy to/from locally 74 * @process_pages: struct pages area that can store at least 75 * nr_pages_to_copy struct page pointers 76 * @mm: mm for task 77 * @task: task to read/write from 78 * @vm_write: 0 means copy from, 1 means copy to 79 * Returns 0 on success or on failure error code 80 */ 81 static int process_vm_rw_single_vec(unsigned long addr, 82 unsigned long len, 83 struct iov_iter *iter, 84 struct page **process_pages, 85 struct mm_struct *mm, 86 struct task_struct *task, 87 int vm_write) 88 { 89 unsigned long pa = addr & PAGE_MASK; 90 unsigned long start_offset = addr - pa; 91 unsigned long nr_pages; 92 ssize_t rc = 0; 93 unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES 94 / sizeof(struct pages *); 95 96 /* Work out address and page range required */ 97 if (len == 0) 98 return 0; 99 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; 100 101 while (!rc && nr_pages && iov_iter_count(iter)) { 102 int pages = min(nr_pages, max_pages_per_loop); 103 size_t bytes; 104 105 /* Get the pages we're interested in */ 106 down_read(&mm->mmap_sem); 107 pages = get_user_pages(task, mm, pa, pages, 108 vm_write, 0, process_pages, NULL); 109 up_read(&mm->mmap_sem); 110 111 if (pages <= 0) 112 return -EFAULT; 113 114 bytes = pages * PAGE_SIZE - start_offset; 115 if (bytes > len) 116 bytes = len; 117 118 rc = process_vm_rw_pages(process_pages, 119 start_offset, bytes, iter, 120 vm_write); 121 len -= bytes; 122 start_offset = 0; 123 nr_pages -= pages; 124 pa += pages * PAGE_SIZE; 125 while (pages) 126 put_page(process_pages[--pages]); 127 } 128 129 return rc; 130 } 131 132 /* Maximum number of entries for process pages array 133 which lives on stack */ 134 #define PVM_MAX_PP_ARRAY_COUNT 16 135 136 /** 137 * process_vm_rw_core - core of reading/writing pages from task specified 138 * @pid: PID of process to read/write from/to 139 * @iter: where to copy to/from locally 140 * @rvec: iovec array specifying where to copy to/from in the other process 141 * @riovcnt: size of rvec array 142 * @flags: currently unused 143 * @vm_write: 0 if reading from other process, 1 if writing to other process 144 * Returns the number of bytes read/written or error code. May 145 * return less bytes than expected if an error occurs during the copying 146 * process. 147 */ 148 static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter, 149 const struct iovec *rvec, 150 unsigned long riovcnt, 151 unsigned long flags, int vm_write) 152 { 153 struct task_struct *task; 154 struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT]; 155 struct page **process_pages = pp_stack; 156 struct mm_struct *mm; 157 unsigned long i; 158 ssize_t rc = 0; 159 unsigned long nr_pages = 0; 160 unsigned long nr_pages_iov; 161 ssize_t iov_len; 162 size_t total_len = iov_iter_count(iter); 163 164 /* 165 * Work out how many pages of struct pages we're going to need 166 * when eventually calling get_user_pages 167 */ 168 for (i = 0; i < riovcnt; i++) { 169 iov_len = rvec[i].iov_len; 170 if (iov_len > 0) { 171 nr_pages_iov = ((unsigned long)rvec[i].iov_base 172 + iov_len) 173 / PAGE_SIZE - (unsigned long)rvec[i].iov_base 174 / PAGE_SIZE + 1; 175 nr_pages = max(nr_pages, nr_pages_iov); 176 } 177 } 178 179 if (nr_pages == 0) 180 return 0; 181 182 if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) { 183 /* For reliability don't try to kmalloc more than 184 2 pages worth */ 185 process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES, 186 sizeof(struct pages *)*nr_pages), 187 GFP_KERNEL); 188 189 if (!process_pages) 190 return -ENOMEM; 191 } 192 193 /* Get process information */ 194 rcu_read_lock(); 195 task = find_task_by_vpid(pid); 196 if (task) 197 get_task_struct(task); 198 rcu_read_unlock(); 199 if (!task) { 200 rc = -ESRCH; 201 goto free_proc_pages; 202 } 203 204 mm = mm_access(task, PTRACE_MODE_ATTACH); 205 if (!mm || IS_ERR(mm)) { 206 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; 207 /* 208 * Explicitly map EACCES to EPERM as EPERM is a more a 209 * appropriate error code for process_vw_readv/writev 210 */ 211 if (rc == -EACCES) 212 rc = -EPERM; 213 goto put_task_struct; 214 } 215 216 for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++) 217 rc = process_vm_rw_single_vec( 218 (unsigned long)rvec[i].iov_base, rvec[i].iov_len, 219 iter, process_pages, mm, task, vm_write); 220 221 /* copied = space before - space after */ 222 total_len -= iov_iter_count(iter); 223 224 /* If we have managed to copy any data at all then 225 we return the number of bytes copied. Otherwise 226 we return the error code */ 227 if (total_len) 228 rc = total_len; 229 230 mmput(mm); 231 232 put_task_struct: 233 put_task_struct(task); 234 235 free_proc_pages: 236 if (process_pages != pp_stack) 237 kfree(process_pages); 238 return rc; 239 } 240 241 /** 242 * process_vm_rw - check iovecs before calling core routine 243 * @pid: PID of process to read/write from/to 244 * @lvec: iovec array specifying where to copy to/from locally 245 * @liovcnt: size of lvec array 246 * @rvec: iovec array specifying where to copy to/from in the other process 247 * @riovcnt: size of rvec array 248 * @flags: currently unused 249 * @vm_write: 0 if reading from other process, 1 if writing to other process 250 * Returns the number of bytes read/written or error code. May 251 * return less bytes than expected if an error occurs during the copying 252 * process. 253 */ 254 static ssize_t process_vm_rw(pid_t pid, 255 const struct iovec __user *lvec, 256 unsigned long liovcnt, 257 const struct iovec __user *rvec, 258 unsigned long riovcnt, 259 unsigned long flags, int vm_write) 260 { 261 struct iovec iovstack_l[UIO_FASTIOV]; 262 struct iovec iovstack_r[UIO_FASTIOV]; 263 struct iovec *iov_l = iovstack_l; 264 struct iovec *iov_r = iovstack_r; 265 struct iov_iter iter; 266 ssize_t rc; 267 268 if (flags != 0) 269 return -EINVAL; 270 271 /* Check iovecs */ 272 if (vm_write) 273 rc = rw_copy_check_uvector(WRITE, lvec, liovcnt, UIO_FASTIOV, 274 iovstack_l, &iov_l); 275 else 276 rc = rw_copy_check_uvector(READ, lvec, liovcnt, UIO_FASTIOV, 277 iovstack_l, &iov_l); 278 if (rc <= 0) 279 goto free_iovecs; 280 281 iov_iter_init(&iter, iov_l, liovcnt, rc, 0); 282 283 rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV, 284 iovstack_r, &iov_r); 285 if (rc <= 0) 286 goto free_iovecs; 287 288 rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write); 289 290 free_iovecs: 291 if (iov_r != iovstack_r) 292 kfree(iov_r); 293 if (iov_l != iovstack_l) 294 kfree(iov_l); 295 296 return rc; 297 } 298 299 SYSCALL_DEFINE6(process_vm_readv, pid_t, pid, const struct iovec __user *, lvec, 300 unsigned long, liovcnt, const struct iovec __user *, rvec, 301 unsigned long, riovcnt, unsigned long, flags) 302 { 303 return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 0); 304 } 305 306 SYSCALL_DEFINE6(process_vm_writev, pid_t, pid, 307 const struct iovec __user *, lvec, 308 unsigned long, liovcnt, const struct iovec __user *, rvec, 309 unsigned long, riovcnt, unsigned long, flags) 310 { 311 return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 1); 312 } 313 314 #ifdef CONFIG_COMPAT 315 316 static ssize_t 317 compat_process_vm_rw(compat_pid_t pid, 318 const struct compat_iovec __user *lvec, 319 unsigned long liovcnt, 320 const struct compat_iovec __user *rvec, 321 unsigned long riovcnt, 322 unsigned long flags, int vm_write) 323 { 324 struct iovec iovstack_l[UIO_FASTIOV]; 325 struct iovec iovstack_r[UIO_FASTIOV]; 326 struct iovec *iov_l = iovstack_l; 327 struct iovec *iov_r = iovstack_r; 328 struct iov_iter iter; 329 ssize_t rc = -EFAULT; 330 331 if (flags != 0) 332 return -EINVAL; 333 334 if (vm_write) 335 rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt, 336 UIO_FASTIOV, iovstack_l, 337 &iov_l); 338 else 339 rc = compat_rw_copy_check_uvector(READ, lvec, liovcnt, 340 UIO_FASTIOV, iovstack_l, 341 &iov_l); 342 if (rc <= 0) 343 goto free_iovecs; 344 iov_iter_init(&iter, iov_l, liovcnt, rc, 0); 345 rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, 346 UIO_FASTIOV, iovstack_r, 347 &iov_r); 348 if (rc <= 0) 349 goto free_iovecs; 350 351 rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write); 352 353 free_iovecs: 354 if (iov_r != iovstack_r) 355 kfree(iov_r); 356 if (iov_l != iovstack_l) 357 kfree(iov_l); 358 return rc; 359 } 360 361 COMPAT_SYSCALL_DEFINE6(process_vm_readv, compat_pid_t, pid, 362 const struct compat_iovec __user *, lvec, 363 compat_ulong_t, liovcnt, 364 const struct compat_iovec __user *, rvec, 365 compat_ulong_t, riovcnt, 366 compat_ulong_t, flags) 367 { 368 return compat_process_vm_rw(pid, lvec, liovcnt, rvec, 369 riovcnt, flags, 0); 370 } 371 372 COMPAT_SYSCALL_DEFINE6(process_vm_writev, compat_pid_t, pid, 373 const struct compat_iovec __user *, lvec, 374 compat_ulong_t, liovcnt, 375 const struct compat_iovec __user *, rvec, 376 compat_ulong_t, riovcnt, 377 compat_ulong_t, flags) 378 { 379 return compat_process_vm_rw(pid, lvec, liovcnt, rvec, 380 riovcnt, flags, 1); 381 } 382 383 #endif 384