xref: /openbmc/linux/mm/process_vm_access.c (revision 0edbfea5)
1 /*
2  * linux/mm/process_vm_access.c
3  *
4  * Copyright (C) 2010-2011 Christopher Yeoh <cyeoh@au1.ibm.com>, IBM Corp.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/mm.h>
13 #include <linux/uio.h>
14 #include <linux/sched.h>
15 #include <linux/highmem.h>
16 #include <linux/ptrace.h>
17 #include <linux/slab.h>
18 #include <linux/syscalls.h>
19 
20 #ifdef CONFIG_COMPAT
21 #include <linux/compat.h>
22 #endif
23 
24 /**
25  * process_vm_rw_pages - read/write pages from task specified
26  * @pages: array of pointers to pages we want to copy
27  * @start_offset: offset in page to start copying from/to
28  * @len: number of bytes to copy
29  * @iter: where to copy to/from locally
30  * @vm_write: 0 means copy from, 1 means copy to
31  * Returns 0 on success, error code otherwise
32  */
33 static int process_vm_rw_pages(struct page **pages,
34 			       unsigned offset,
35 			       size_t len,
36 			       struct iov_iter *iter,
37 			       int vm_write)
38 {
39 	/* Do the copy for each page */
40 	while (len && iov_iter_count(iter)) {
41 		struct page *page = *pages++;
42 		size_t copy = PAGE_SIZE - offset;
43 		size_t copied;
44 
45 		if (copy > len)
46 			copy = len;
47 
48 		if (vm_write) {
49 			copied = copy_page_from_iter(page, offset, copy, iter);
50 			set_page_dirty_lock(page);
51 		} else {
52 			copied = copy_page_to_iter(page, offset, copy, iter);
53 		}
54 		len -= copied;
55 		if (copied < copy && iov_iter_count(iter))
56 			return -EFAULT;
57 		offset = 0;
58 	}
59 	return 0;
60 }
61 
62 /* Maximum number of pages kmalloc'd to hold struct page's during copy */
63 #define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2)
64 
65 /**
66  * process_vm_rw_single_vec - read/write pages from task specified
67  * @addr: start memory address of target process
68  * @len: size of area to copy to/from
69  * @iter: where to copy to/from locally
70  * @process_pages: struct pages area that can store at least
71  *  nr_pages_to_copy struct page pointers
72  * @mm: mm for task
73  * @task: task to read/write from
74  * @vm_write: 0 means copy from, 1 means copy to
75  * Returns 0 on success or on failure error code
76  */
77 static int process_vm_rw_single_vec(unsigned long addr,
78 				    unsigned long len,
79 				    struct iov_iter *iter,
80 				    struct page **process_pages,
81 				    struct mm_struct *mm,
82 				    struct task_struct *task,
83 				    int vm_write)
84 {
85 	unsigned long pa = addr & PAGE_MASK;
86 	unsigned long start_offset = addr - pa;
87 	unsigned long nr_pages;
88 	ssize_t rc = 0;
89 	unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
90 		/ sizeof(struct pages *);
91 
92 	/* Work out address and page range required */
93 	if (len == 0)
94 		return 0;
95 	nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
96 
97 	while (!rc && nr_pages && iov_iter_count(iter)) {
98 		int pages = min(nr_pages, max_pages_per_loop);
99 		size_t bytes;
100 
101 		/*
102 		 * Get the pages we're interested in.  We must
103 		 * add FOLL_REMOTE because task/mm might not
104 		 * current/current->mm
105 		 */
106 		pages = __get_user_pages_unlocked(task, mm, pa, pages,
107 						  vm_write, 0, process_pages,
108 						  FOLL_REMOTE);
109 		if (pages <= 0)
110 			return -EFAULT;
111 
112 		bytes = pages * PAGE_SIZE - start_offset;
113 		if (bytes > len)
114 			bytes = len;
115 
116 		rc = process_vm_rw_pages(process_pages,
117 					 start_offset, bytes, iter,
118 					 vm_write);
119 		len -= bytes;
120 		start_offset = 0;
121 		nr_pages -= pages;
122 		pa += pages * PAGE_SIZE;
123 		while (pages)
124 			put_page(process_pages[--pages]);
125 	}
126 
127 	return rc;
128 }
129 
130 /* Maximum number of entries for process pages array
131    which lives on stack */
132 #define PVM_MAX_PP_ARRAY_COUNT 16
133 
134 /**
135  * process_vm_rw_core - core of reading/writing pages from task specified
136  * @pid: PID of process to read/write from/to
137  * @iter: where to copy to/from locally
138  * @rvec: iovec array specifying where to copy to/from in the other process
139  * @riovcnt: size of rvec array
140  * @flags: currently unused
141  * @vm_write: 0 if reading from other process, 1 if writing to other process
142  * Returns the number of bytes read/written or error code. May
143  *  return less bytes than expected if an error occurs during the copying
144  *  process.
145  */
146 static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
147 				  const struct iovec *rvec,
148 				  unsigned long riovcnt,
149 				  unsigned long flags, int vm_write)
150 {
151 	struct task_struct *task;
152 	struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT];
153 	struct page **process_pages = pp_stack;
154 	struct mm_struct *mm;
155 	unsigned long i;
156 	ssize_t rc = 0;
157 	unsigned long nr_pages = 0;
158 	unsigned long nr_pages_iov;
159 	ssize_t iov_len;
160 	size_t total_len = iov_iter_count(iter);
161 
162 	/*
163 	 * Work out how many pages of struct pages we're going to need
164 	 * when eventually calling get_user_pages
165 	 */
166 	for (i = 0; i < riovcnt; i++) {
167 		iov_len = rvec[i].iov_len;
168 		if (iov_len > 0) {
169 			nr_pages_iov = ((unsigned long)rvec[i].iov_base
170 					+ iov_len)
171 				/ PAGE_SIZE - (unsigned long)rvec[i].iov_base
172 				/ PAGE_SIZE + 1;
173 			nr_pages = max(nr_pages, nr_pages_iov);
174 		}
175 	}
176 
177 	if (nr_pages == 0)
178 		return 0;
179 
180 	if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) {
181 		/* For reliability don't try to kmalloc more than
182 		   2 pages worth */
183 		process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES,
184 					      sizeof(struct pages *)*nr_pages),
185 					GFP_KERNEL);
186 
187 		if (!process_pages)
188 			return -ENOMEM;
189 	}
190 
191 	/* Get process information */
192 	rcu_read_lock();
193 	task = find_task_by_vpid(pid);
194 	if (task)
195 		get_task_struct(task);
196 	rcu_read_unlock();
197 	if (!task) {
198 		rc = -ESRCH;
199 		goto free_proc_pages;
200 	}
201 
202 	mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS);
203 	if (!mm || IS_ERR(mm)) {
204 		rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
205 		/*
206 		 * Explicitly map EACCES to EPERM as EPERM is a more a
207 		 * appropriate error code for process_vw_readv/writev
208 		 */
209 		if (rc == -EACCES)
210 			rc = -EPERM;
211 		goto put_task_struct;
212 	}
213 
214 	for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++)
215 		rc = process_vm_rw_single_vec(
216 			(unsigned long)rvec[i].iov_base, rvec[i].iov_len,
217 			iter, process_pages, mm, task, vm_write);
218 
219 	/* copied = space before - space after */
220 	total_len -= iov_iter_count(iter);
221 
222 	/* If we have managed to copy any data at all then
223 	   we return the number of bytes copied. Otherwise
224 	   we return the error code */
225 	if (total_len)
226 		rc = total_len;
227 
228 	mmput(mm);
229 
230 put_task_struct:
231 	put_task_struct(task);
232 
233 free_proc_pages:
234 	if (process_pages != pp_stack)
235 		kfree(process_pages);
236 	return rc;
237 }
238 
239 /**
240  * process_vm_rw - check iovecs before calling core routine
241  * @pid: PID of process to read/write from/to
242  * @lvec: iovec array specifying where to copy to/from locally
243  * @liovcnt: size of lvec array
244  * @rvec: iovec array specifying where to copy to/from in the other process
245  * @riovcnt: size of rvec array
246  * @flags: currently unused
247  * @vm_write: 0 if reading from other process, 1 if writing to other process
248  * Returns the number of bytes read/written or error code. May
249  *  return less bytes than expected if an error occurs during the copying
250  *  process.
251  */
252 static ssize_t process_vm_rw(pid_t pid,
253 			     const struct iovec __user *lvec,
254 			     unsigned long liovcnt,
255 			     const struct iovec __user *rvec,
256 			     unsigned long riovcnt,
257 			     unsigned long flags, int vm_write)
258 {
259 	struct iovec iovstack_l[UIO_FASTIOV];
260 	struct iovec iovstack_r[UIO_FASTIOV];
261 	struct iovec *iov_l = iovstack_l;
262 	struct iovec *iov_r = iovstack_r;
263 	struct iov_iter iter;
264 	ssize_t rc;
265 	int dir = vm_write ? WRITE : READ;
266 
267 	if (flags != 0)
268 		return -EINVAL;
269 
270 	/* Check iovecs */
271 	rc = import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter);
272 	if (rc < 0)
273 		return rc;
274 	if (!iov_iter_count(&iter))
275 		goto free_iovecs;
276 
277 	rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV,
278 				   iovstack_r, &iov_r);
279 	if (rc <= 0)
280 		goto free_iovecs;
281 
282 	rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
283 
284 free_iovecs:
285 	if (iov_r != iovstack_r)
286 		kfree(iov_r);
287 	kfree(iov_l);
288 
289 	return rc;
290 }
291 
292 SYSCALL_DEFINE6(process_vm_readv, pid_t, pid, const struct iovec __user *, lvec,
293 		unsigned long, liovcnt, const struct iovec __user *, rvec,
294 		unsigned long, riovcnt,	unsigned long, flags)
295 {
296 	return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 0);
297 }
298 
299 SYSCALL_DEFINE6(process_vm_writev, pid_t, pid,
300 		const struct iovec __user *, lvec,
301 		unsigned long, liovcnt, const struct iovec __user *, rvec,
302 		unsigned long, riovcnt,	unsigned long, flags)
303 {
304 	return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 1);
305 }
306 
307 #ifdef CONFIG_COMPAT
308 
309 static ssize_t
310 compat_process_vm_rw(compat_pid_t pid,
311 		     const struct compat_iovec __user *lvec,
312 		     unsigned long liovcnt,
313 		     const struct compat_iovec __user *rvec,
314 		     unsigned long riovcnt,
315 		     unsigned long flags, int vm_write)
316 {
317 	struct iovec iovstack_l[UIO_FASTIOV];
318 	struct iovec iovstack_r[UIO_FASTIOV];
319 	struct iovec *iov_l = iovstack_l;
320 	struct iovec *iov_r = iovstack_r;
321 	struct iov_iter iter;
322 	ssize_t rc = -EFAULT;
323 	int dir = vm_write ? WRITE : READ;
324 
325 	if (flags != 0)
326 		return -EINVAL;
327 
328 	rc = compat_import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter);
329 	if (rc < 0)
330 		return rc;
331 	if (!iov_iter_count(&iter))
332 		goto free_iovecs;
333 	rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt,
334 					  UIO_FASTIOV, iovstack_r,
335 					  &iov_r);
336 	if (rc <= 0)
337 		goto free_iovecs;
338 
339 	rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
340 
341 free_iovecs:
342 	if (iov_r != iovstack_r)
343 		kfree(iov_r);
344 	kfree(iov_l);
345 	return rc;
346 }
347 
348 COMPAT_SYSCALL_DEFINE6(process_vm_readv, compat_pid_t, pid,
349 		       const struct compat_iovec __user *, lvec,
350 		       compat_ulong_t, liovcnt,
351 		       const struct compat_iovec __user *, rvec,
352 		       compat_ulong_t, riovcnt,
353 		       compat_ulong_t, flags)
354 {
355 	return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
356 				    riovcnt, flags, 0);
357 }
358 
359 COMPAT_SYSCALL_DEFINE6(process_vm_writev, compat_pid_t, pid,
360 		       const struct compat_iovec __user *, lvec,
361 		       compat_ulong_t, liovcnt,
362 		       const struct compat_iovec __user *, rvec,
363 		       compat_ulong_t, riovcnt,
364 		       compat_ulong_t, flags)
365 {
366 	return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
367 				    riovcnt, flags, 1);
368 }
369 
370 #endif
371