1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/mm/process_vm_access.c 4 * 5 * Copyright (C) 2010-2011 Christopher Yeoh <cyeoh@au1.ibm.com>, IBM Corp. 6 */ 7 8 #include <linux/mm.h> 9 #include <linux/uio.h> 10 #include <linux/sched.h> 11 #include <linux/sched/mm.h> 12 #include <linux/highmem.h> 13 #include <linux/ptrace.h> 14 #include <linux/slab.h> 15 #include <linux/syscalls.h> 16 17 #ifdef CONFIG_COMPAT 18 #include <linux/compat.h> 19 #endif 20 21 /** 22 * process_vm_rw_pages - read/write pages from task specified 23 * @pages: array of pointers to pages we want to copy 24 * @offset: offset in page to start copying from/to 25 * @len: number of bytes to copy 26 * @iter: where to copy to/from locally 27 * @vm_write: 0 means copy from, 1 means copy to 28 * Returns 0 on success, error code otherwise 29 */ 30 static int process_vm_rw_pages(struct page **pages, 31 unsigned offset, 32 size_t len, 33 struct iov_iter *iter, 34 int vm_write) 35 { 36 /* Do the copy for each page */ 37 while (len && iov_iter_count(iter)) { 38 struct page *page = *pages++; 39 size_t copy = PAGE_SIZE - offset; 40 size_t copied; 41 42 if (copy > len) 43 copy = len; 44 45 if (vm_write) 46 copied = copy_page_from_iter(page, offset, copy, iter); 47 else 48 copied = copy_page_to_iter(page, offset, copy, iter); 49 50 len -= copied; 51 if (copied < copy && iov_iter_count(iter)) 52 return -EFAULT; 53 offset = 0; 54 } 55 return 0; 56 } 57 58 /* Maximum number of pages kmalloc'd to hold struct page's during copy */ 59 #define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2) 60 61 /** 62 * process_vm_rw_single_vec - read/write pages from task specified 63 * @addr: start memory address of target process 64 * @len: size of area to copy to/from 65 * @iter: where to copy to/from locally 66 * @process_pages: struct pages area that can store at least 67 * nr_pages_to_copy struct page pointers 68 * @mm: mm for task 69 * @task: task to read/write from 70 * @vm_write: 0 means copy from, 1 means copy to 71 * Returns 0 on success or on failure error code 72 */ 73 static int process_vm_rw_single_vec(unsigned long addr, 74 unsigned long len, 75 struct iov_iter *iter, 76 struct page **process_pages, 77 struct mm_struct *mm, 78 struct task_struct *task, 79 int vm_write) 80 { 81 unsigned long pa = addr & PAGE_MASK; 82 unsigned long start_offset = addr - pa; 83 unsigned long nr_pages; 84 ssize_t rc = 0; 85 unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES 86 / sizeof(struct pages *); 87 unsigned int flags = 0; 88 89 /* Work out address and page range required */ 90 if (len == 0) 91 return 0; 92 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; 93 94 if (vm_write) 95 flags |= FOLL_WRITE; 96 97 while (!rc && nr_pages && iov_iter_count(iter)) { 98 int pinned_pages = min(nr_pages, max_pages_per_loop); 99 int locked = 1; 100 size_t bytes; 101 102 /* 103 * Get the pages we're interested in. We must 104 * access remotely because task/mm might not 105 * current/current->mm 106 */ 107 mmap_read_lock(mm); 108 pinned_pages = pin_user_pages_remote(task, mm, pa, pinned_pages, 109 flags, process_pages, 110 NULL, &locked); 111 if (locked) 112 mmap_read_unlock(mm); 113 if (pinned_pages <= 0) 114 return -EFAULT; 115 116 bytes = pinned_pages * PAGE_SIZE - start_offset; 117 if (bytes > len) 118 bytes = len; 119 120 rc = process_vm_rw_pages(process_pages, 121 start_offset, bytes, iter, 122 vm_write); 123 len -= bytes; 124 start_offset = 0; 125 nr_pages -= pinned_pages; 126 pa += pinned_pages * PAGE_SIZE; 127 128 /* If vm_write is set, the pages need to be made dirty: */ 129 unpin_user_pages_dirty_lock(process_pages, pinned_pages, 130 vm_write); 131 } 132 133 return rc; 134 } 135 136 /* Maximum number of entries for process pages array 137 which lives on stack */ 138 #define PVM_MAX_PP_ARRAY_COUNT 16 139 140 /** 141 * process_vm_rw_core - core of reading/writing pages from task specified 142 * @pid: PID of process to read/write from/to 143 * @iter: where to copy to/from locally 144 * @rvec: iovec array specifying where to copy to/from in the other process 145 * @riovcnt: size of rvec array 146 * @flags: currently unused 147 * @vm_write: 0 if reading from other process, 1 if writing to other process 148 * 149 * Returns the number of bytes read/written or error code. May 150 * return less bytes than expected if an error occurs during the copying 151 * process. 152 */ 153 static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter, 154 const struct iovec *rvec, 155 unsigned long riovcnt, 156 unsigned long flags, int vm_write) 157 { 158 struct task_struct *task; 159 struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT]; 160 struct page **process_pages = pp_stack; 161 struct mm_struct *mm; 162 unsigned long i; 163 ssize_t rc = 0; 164 unsigned long nr_pages = 0; 165 unsigned long nr_pages_iov; 166 ssize_t iov_len; 167 size_t total_len = iov_iter_count(iter); 168 169 /* 170 * Work out how many pages of struct pages we're going to need 171 * when eventually calling get_user_pages 172 */ 173 for (i = 0; i < riovcnt; i++) { 174 iov_len = rvec[i].iov_len; 175 if (iov_len > 0) { 176 nr_pages_iov = ((unsigned long)rvec[i].iov_base 177 + iov_len) 178 / PAGE_SIZE - (unsigned long)rvec[i].iov_base 179 / PAGE_SIZE + 1; 180 nr_pages = max(nr_pages, nr_pages_iov); 181 } 182 } 183 184 if (nr_pages == 0) 185 return 0; 186 187 if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) { 188 /* For reliability don't try to kmalloc more than 189 2 pages worth */ 190 process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES, 191 sizeof(struct pages *)*nr_pages), 192 GFP_KERNEL); 193 194 if (!process_pages) 195 return -ENOMEM; 196 } 197 198 /* Get process information */ 199 task = find_get_task_by_vpid(pid); 200 if (!task) { 201 rc = -ESRCH; 202 goto free_proc_pages; 203 } 204 205 mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS); 206 if (!mm || IS_ERR(mm)) { 207 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; 208 /* 209 * Explicitly map EACCES to EPERM as EPERM is a more 210 * appropriate error code for process_vw_readv/writev 211 */ 212 if (rc == -EACCES) 213 rc = -EPERM; 214 goto put_task_struct; 215 } 216 217 for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++) 218 rc = process_vm_rw_single_vec( 219 (unsigned long)rvec[i].iov_base, rvec[i].iov_len, 220 iter, process_pages, mm, task, vm_write); 221 222 /* copied = space before - space after */ 223 total_len -= iov_iter_count(iter); 224 225 /* If we have managed to copy any data at all then 226 we return the number of bytes copied. Otherwise 227 we return the error code */ 228 if (total_len) 229 rc = total_len; 230 231 mmput(mm); 232 233 put_task_struct: 234 put_task_struct(task); 235 236 free_proc_pages: 237 if (process_pages != pp_stack) 238 kfree(process_pages); 239 return rc; 240 } 241 242 /** 243 * process_vm_rw - check iovecs before calling core routine 244 * @pid: PID of process to read/write from/to 245 * @lvec: iovec array specifying where to copy to/from locally 246 * @liovcnt: size of lvec array 247 * @rvec: iovec array specifying where to copy to/from in the other process 248 * @riovcnt: size of rvec array 249 * @flags: currently unused 250 * @vm_write: 0 if reading from other process, 1 if writing to other process 251 * 252 * Returns the number of bytes read/written or error code. May 253 * return less bytes than expected if an error occurs during the copying 254 * process. 255 */ 256 static ssize_t process_vm_rw(pid_t pid, 257 const struct iovec __user *lvec, 258 unsigned long liovcnt, 259 const struct iovec __user *rvec, 260 unsigned long riovcnt, 261 unsigned long flags, int vm_write) 262 { 263 struct iovec iovstack_l[UIO_FASTIOV]; 264 struct iovec iovstack_r[UIO_FASTIOV]; 265 struct iovec *iov_l = iovstack_l; 266 struct iovec *iov_r = iovstack_r; 267 struct iov_iter iter; 268 ssize_t rc; 269 int dir = vm_write ? WRITE : READ; 270 271 if (flags != 0) 272 return -EINVAL; 273 274 /* Check iovecs */ 275 rc = import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter); 276 if (rc < 0) 277 return rc; 278 if (!iov_iter_count(&iter)) 279 goto free_iovecs; 280 281 rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV, 282 iovstack_r, &iov_r); 283 if (rc <= 0) 284 goto free_iovecs; 285 286 rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write); 287 288 free_iovecs: 289 if (iov_r != iovstack_r) 290 kfree(iov_r); 291 kfree(iov_l); 292 293 return rc; 294 } 295 296 SYSCALL_DEFINE6(process_vm_readv, pid_t, pid, const struct iovec __user *, lvec, 297 unsigned long, liovcnt, const struct iovec __user *, rvec, 298 unsigned long, riovcnt, unsigned long, flags) 299 { 300 return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 0); 301 } 302 303 SYSCALL_DEFINE6(process_vm_writev, pid_t, pid, 304 const struct iovec __user *, lvec, 305 unsigned long, liovcnt, const struct iovec __user *, rvec, 306 unsigned long, riovcnt, unsigned long, flags) 307 { 308 return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 1); 309 } 310 311 #ifdef CONFIG_COMPAT 312 313 static ssize_t 314 compat_process_vm_rw(compat_pid_t pid, 315 const struct compat_iovec __user *lvec, 316 unsigned long liovcnt, 317 const struct compat_iovec __user *rvec, 318 unsigned long riovcnt, 319 unsigned long flags, int vm_write) 320 { 321 struct iovec iovstack_l[UIO_FASTIOV]; 322 struct iovec iovstack_r[UIO_FASTIOV]; 323 struct iovec *iov_l = iovstack_l; 324 struct iovec *iov_r = iovstack_r; 325 struct iov_iter iter; 326 ssize_t rc = -EFAULT; 327 int dir = vm_write ? WRITE : READ; 328 329 if (flags != 0) 330 return -EINVAL; 331 332 rc = compat_import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter); 333 if (rc < 0) 334 return rc; 335 if (!iov_iter_count(&iter)) 336 goto free_iovecs; 337 rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, 338 UIO_FASTIOV, iovstack_r, 339 &iov_r); 340 if (rc <= 0) 341 goto free_iovecs; 342 343 rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write); 344 345 free_iovecs: 346 if (iov_r != iovstack_r) 347 kfree(iov_r); 348 kfree(iov_l); 349 return rc; 350 } 351 352 COMPAT_SYSCALL_DEFINE6(process_vm_readv, compat_pid_t, pid, 353 const struct compat_iovec __user *, lvec, 354 compat_ulong_t, liovcnt, 355 const struct compat_iovec __user *, rvec, 356 compat_ulong_t, riovcnt, 357 compat_ulong_t, flags) 358 { 359 return compat_process_vm_rw(pid, lvec, liovcnt, rvec, 360 riovcnt, flags, 0); 361 } 362 363 COMPAT_SYSCALL_DEFINE6(process_vm_writev, compat_pid_t, pid, 364 const struct compat_iovec __user *, lvec, 365 compat_ulong_t, liovcnt, 366 const struct compat_iovec __user *, rvec, 367 compat_ulong_t, riovcnt, 368 compat_ulong_t, flags) 369 { 370 return compat_process_vm_rw(pid, lvec, liovcnt, rvec, 371 riovcnt, flags, 1); 372 } 373 374 #endif 375