1 /* 2 * linux/mm/process_vm_access.c 3 * 4 * Copyright (C) 2010-2011 Christopher Yeoh <cyeoh@au1.ibm.com>, IBM Corp. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/mm.h> 13 #include <linux/uio.h> 14 #include <linux/sched.h> 15 #include <linux/sched/mm.h> 16 #include <linux/highmem.h> 17 #include <linux/ptrace.h> 18 #include <linux/slab.h> 19 #include <linux/syscalls.h> 20 21 #ifdef CONFIG_COMPAT 22 #include <linux/compat.h> 23 #endif 24 25 /** 26 * process_vm_rw_pages - read/write pages from task specified 27 * @pages: array of pointers to pages we want to copy 28 * @start_offset: offset in page to start copying from/to 29 * @len: number of bytes to copy 30 * @iter: where to copy to/from locally 31 * @vm_write: 0 means copy from, 1 means copy to 32 * Returns 0 on success, error code otherwise 33 */ 34 static int process_vm_rw_pages(struct page **pages, 35 unsigned offset, 36 size_t len, 37 struct iov_iter *iter, 38 int vm_write) 39 { 40 /* Do the copy for each page */ 41 while (len && iov_iter_count(iter)) { 42 struct page *page = *pages++; 43 size_t copy = PAGE_SIZE - offset; 44 size_t copied; 45 46 if (copy > len) 47 copy = len; 48 49 if (vm_write) { 50 copied = copy_page_from_iter(page, offset, copy, iter); 51 set_page_dirty_lock(page); 52 } else { 53 copied = copy_page_to_iter(page, offset, copy, iter); 54 } 55 len -= copied; 56 if (copied < copy && iov_iter_count(iter)) 57 return -EFAULT; 58 offset = 0; 59 } 60 return 0; 61 } 62 63 /* Maximum number of pages kmalloc'd to hold struct page's during copy */ 64 #define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2) 65 66 /** 67 * process_vm_rw_single_vec - read/write pages from task specified 68 * @addr: start memory address of target process 69 * @len: size of area to copy to/from 70 * @iter: where to copy to/from locally 71 * @process_pages: struct pages area that can store at least 72 * nr_pages_to_copy struct page pointers 73 * @mm: mm for task 74 * @task: task to read/write from 75 * @vm_write: 0 means copy from, 1 means copy to 76 * Returns 0 on success or on failure error code 77 */ 78 static int process_vm_rw_single_vec(unsigned long addr, 79 unsigned long len, 80 struct iov_iter *iter, 81 struct page **process_pages, 82 struct mm_struct *mm, 83 struct task_struct *task, 84 int vm_write) 85 { 86 unsigned long pa = addr & PAGE_MASK; 87 unsigned long start_offset = addr - pa; 88 unsigned long nr_pages; 89 ssize_t rc = 0; 90 unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES 91 / sizeof(struct pages *); 92 unsigned int flags = 0; 93 94 /* Work out address and page range required */ 95 if (len == 0) 96 return 0; 97 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; 98 99 if (vm_write) 100 flags |= FOLL_WRITE; 101 102 while (!rc && nr_pages && iov_iter_count(iter)) { 103 int pages = min(nr_pages, max_pages_per_loop); 104 int locked = 1; 105 size_t bytes; 106 107 /* 108 * Get the pages we're interested in. We must 109 * access remotely because task/mm might not 110 * current/current->mm 111 */ 112 down_read(&mm->mmap_sem); 113 pages = get_user_pages_remote(task, mm, pa, pages, flags, 114 process_pages, NULL, &locked); 115 if (locked) 116 up_read(&mm->mmap_sem); 117 if (pages <= 0) 118 return -EFAULT; 119 120 bytes = pages * PAGE_SIZE - start_offset; 121 if (bytes > len) 122 bytes = len; 123 124 rc = process_vm_rw_pages(process_pages, 125 start_offset, bytes, iter, 126 vm_write); 127 len -= bytes; 128 start_offset = 0; 129 nr_pages -= pages; 130 pa += pages * PAGE_SIZE; 131 while (pages) 132 put_page(process_pages[--pages]); 133 } 134 135 return rc; 136 } 137 138 /* Maximum number of entries for process pages array 139 which lives on stack */ 140 #define PVM_MAX_PP_ARRAY_COUNT 16 141 142 /** 143 * process_vm_rw_core - core of reading/writing pages from task specified 144 * @pid: PID of process to read/write from/to 145 * @iter: where to copy to/from locally 146 * @rvec: iovec array specifying where to copy to/from in the other process 147 * @riovcnt: size of rvec array 148 * @flags: currently unused 149 * @vm_write: 0 if reading from other process, 1 if writing to other process 150 * Returns the number of bytes read/written or error code. May 151 * return less bytes than expected if an error occurs during the copying 152 * process. 153 */ 154 static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter, 155 const struct iovec *rvec, 156 unsigned long riovcnt, 157 unsigned long flags, int vm_write) 158 { 159 struct task_struct *task; 160 struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT]; 161 struct page **process_pages = pp_stack; 162 struct mm_struct *mm; 163 unsigned long i; 164 ssize_t rc = 0; 165 unsigned long nr_pages = 0; 166 unsigned long nr_pages_iov; 167 ssize_t iov_len; 168 size_t total_len = iov_iter_count(iter); 169 170 /* 171 * Work out how many pages of struct pages we're going to need 172 * when eventually calling get_user_pages 173 */ 174 for (i = 0; i < riovcnt; i++) { 175 iov_len = rvec[i].iov_len; 176 if (iov_len > 0) { 177 nr_pages_iov = ((unsigned long)rvec[i].iov_base 178 + iov_len) 179 / PAGE_SIZE - (unsigned long)rvec[i].iov_base 180 / PAGE_SIZE + 1; 181 nr_pages = max(nr_pages, nr_pages_iov); 182 } 183 } 184 185 if (nr_pages == 0) 186 return 0; 187 188 if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) { 189 /* For reliability don't try to kmalloc more than 190 2 pages worth */ 191 process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES, 192 sizeof(struct pages *)*nr_pages), 193 GFP_KERNEL); 194 195 if (!process_pages) 196 return -ENOMEM; 197 } 198 199 /* Get process information */ 200 rcu_read_lock(); 201 task = find_task_by_vpid(pid); 202 if (task) 203 get_task_struct(task); 204 rcu_read_unlock(); 205 if (!task) { 206 rc = -ESRCH; 207 goto free_proc_pages; 208 } 209 210 mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS); 211 if (!mm || IS_ERR(mm)) { 212 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; 213 /* 214 * Explicitly map EACCES to EPERM as EPERM is a more a 215 * appropriate error code for process_vw_readv/writev 216 */ 217 if (rc == -EACCES) 218 rc = -EPERM; 219 goto put_task_struct; 220 } 221 222 for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++) 223 rc = process_vm_rw_single_vec( 224 (unsigned long)rvec[i].iov_base, rvec[i].iov_len, 225 iter, process_pages, mm, task, vm_write); 226 227 /* copied = space before - space after */ 228 total_len -= iov_iter_count(iter); 229 230 /* If we have managed to copy any data at all then 231 we return the number of bytes copied. Otherwise 232 we return the error code */ 233 if (total_len) 234 rc = total_len; 235 236 mmput(mm); 237 238 put_task_struct: 239 put_task_struct(task); 240 241 free_proc_pages: 242 if (process_pages != pp_stack) 243 kfree(process_pages); 244 return rc; 245 } 246 247 /** 248 * process_vm_rw - check iovecs before calling core routine 249 * @pid: PID of process to read/write from/to 250 * @lvec: iovec array specifying where to copy to/from locally 251 * @liovcnt: size of lvec array 252 * @rvec: iovec array specifying where to copy to/from in the other process 253 * @riovcnt: size of rvec array 254 * @flags: currently unused 255 * @vm_write: 0 if reading from other process, 1 if writing to other process 256 * Returns the number of bytes read/written or error code. May 257 * return less bytes than expected if an error occurs during the copying 258 * process. 259 */ 260 static ssize_t process_vm_rw(pid_t pid, 261 const struct iovec __user *lvec, 262 unsigned long liovcnt, 263 const struct iovec __user *rvec, 264 unsigned long riovcnt, 265 unsigned long flags, int vm_write) 266 { 267 struct iovec iovstack_l[UIO_FASTIOV]; 268 struct iovec iovstack_r[UIO_FASTIOV]; 269 struct iovec *iov_l = iovstack_l; 270 struct iovec *iov_r = iovstack_r; 271 struct iov_iter iter; 272 ssize_t rc; 273 int dir = vm_write ? WRITE : READ; 274 275 if (flags != 0) 276 return -EINVAL; 277 278 /* Check iovecs */ 279 rc = import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter); 280 if (rc < 0) 281 return rc; 282 if (!iov_iter_count(&iter)) 283 goto free_iovecs; 284 285 rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV, 286 iovstack_r, &iov_r); 287 if (rc <= 0) 288 goto free_iovecs; 289 290 rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write); 291 292 free_iovecs: 293 if (iov_r != iovstack_r) 294 kfree(iov_r); 295 kfree(iov_l); 296 297 return rc; 298 } 299 300 SYSCALL_DEFINE6(process_vm_readv, pid_t, pid, const struct iovec __user *, lvec, 301 unsigned long, liovcnt, const struct iovec __user *, rvec, 302 unsigned long, riovcnt, unsigned long, flags) 303 { 304 return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 0); 305 } 306 307 SYSCALL_DEFINE6(process_vm_writev, pid_t, pid, 308 const struct iovec __user *, lvec, 309 unsigned long, liovcnt, const struct iovec __user *, rvec, 310 unsigned long, riovcnt, unsigned long, flags) 311 { 312 return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 1); 313 } 314 315 #ifdef CONFIG_COMPAT 316 317 static ssize_t 318 compat_process_vm_rw(compat_pid_t pid, 319 const struct compat_iovec __user *lvec, 320 unsigned long liovcnt, 321 const struct compat_iovec __user *rvec, 322 unsigned long riovcnt, 323 unsigned long flags, int vm_write) 324 { 325 struct iovec iovstack_l[UIO_FASTIOV]; 326 struct iovec iovstack_r[UIO_FASTIOV]; 327 struct iovec *iov_l = iovstack_l; 328 struct iovec *iov_r = iovstack_r; 329 struct iov_iter iter; 330 ssize_t rc = -EFAULT; 331 int dir = vm_write ? WRITE : READ; 332 333 if (flags != 0) 334 return -EINVAL; 335 336 rc = compat_import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter); 337 if (rc < 0) 338 return rc; 339 if (!iov_iter_count(&iter)) 340 goto free_iovecs; 341 rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, 342 UIO_FASTIOV, iovstack_r, 343 &iov_r); 344 if (rc <= 0) 345 goto free_iovecs; 346 347 rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write); 348 349 free_iovecs: 350 if (iov_r != iovstack_r) 351 kfree(iov_r); 352 kfree(iov_l); 353 return rc; 354 } 355 356 COMPAT_SYSCALL_DEFINE6(process_vm_readv, compat_pid_t, pid, 357 const struct compat_iovec __user *, lvec, 358 compat_ulong_t, liovcnt, 359 const struct compat_iovec __user *, rvec, 360 compat_ulong_t, riovcnt, 361 compat_ulong_t, flags) 362 { 363 return compat_process_vm_rw(pid, lvec, liovcnt, rvec, 364 riovcnt, flags, 0); 365 } 366 367 COMPAT_SYSCALL_DEFINE6(process_vm_writev, compat_pid_t, pid, 368 const struct compat_iovec __user *, lvec, 369 compat_ulong_t, liovcnt, 370 const struct compat_iovec __user *, rvec, 371 compat_ulong_t, riovcnt, 372 compat_ulong_t, flags) 373 { 374 return compat_process_vm_rw(pid, lvec, liovcnt, rvec, 375 riovcnt, flags, 1); 376 } 377 378 #endif 379