1 /*
2  *  linux/arch/arm/lib/uaccess_with_memcpy.c
3  *
4  *  Written by: Lennert Buytenhek and Nicolas Pitre
5  *  Copyright (C) 2009 Marvell Semiconductor
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/ctype.h>
14 #include <linux/uaccess.h>
15 #include <linux/rwsem.h>
16 #include <linux/mm.h>
17 #include <linux/sched.h>
18 #include <linux/hardirq.h> /* for in_atomic() */
19 #include <linux/gfp.h>
20 #include <asm/current.h>
21 #include <asm/page.h>
22 
23 static int
24 pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
25 {
26 	unsigned long addr = (unsigned long)_addr;
27 	pgd_t *pgd;
28 	pmd_t *pmd;
29 	pte_t *pte;
30 	spinlock_t *ptl;
31 
32 	pgd = pgd_offset(current->mm, addr);
33 	if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
34 		return 0;
35 
36 	pmd = pmd_offset(pgd, addr);
37 	if (unlikely(pmd_none(*pmd) || pmd_bad(*pmd)))
38 		return 0;
39 
40 	pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
41 	if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
42 	    !pte_write(*pte) || !pte_dirty(*pte))) {
43 		pte_unmap_unlock(pte, ptl);
44 		return 0;
45 	}
46 
47 	*ptep = pte;
48 	*ptlp = ptl;
49 
50 	return 1;
51 }
52 
53 static unsigned long noinline
54 __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
55 {
56 	int atomic;
57 
58 	if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
59 		memcpy((void *)to, from, n);
60 		return 0;
61 	}
62 
63 	/* the mmap semaphore is taken only if not in an atomic context */
64 	atomic = in_atomic();
65 
66 	if (!atomic)
67 		down_read(&current->mm->mmap_sem);
68 	while (n) {
69 		pte_t *pte;
70 		spinlock_t *ptl;
71 		int tocopy;
72 
73 		while (!pin_page_for_write(to, &pte, &ptl)) {
74 			if (!atomic)
75 				up_read(&current->mm->mmap_sem);
76 			if (__put_user(0, (char __user *)to))
77 				goto out;
78 			if (!atomic)
79 				down_read(&current->mm->mmap_sem);
80 		}
81 
82 		tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
83 		if (tocopy > n)
84 			tocopy = n;
85 
86 		memcpy((void *)to, from, tocopy);
87 		to += tocopy;
88 		from += tocopy;
89 		n -= tocopy;
90 
91 		pte_unmap_unlock(pte, ptl);
92 	}
93 	if (!atomic)
94 		up_read(&current->mm->mmap_sem);
95 
96 out:
97 	return n;
98 }
99 
100 unsigned long
101 __copy_to_user(void __user *to, const void *from, unsigned long n)
102 {
103 	/*
104 	 * This test is stubbed out of the main function above to keep
105 	 * the overhead for small copies low by avoiding a large
106 	 * register dump on the stack just to reload them right away.
107 	 * With frame pointer disabled, tail call optimization kicks in
108 	 * as well making this test almost invisible.
109 	 */
110 	if (n < 64)
111 		return __copy_to_user_std(to, from, n);
112 	return __copy_to_user_memcpy(to, from, n);
113 }
114 
115 static unsigned long noinline
116 __clear_user_memset(void __user *addr, unsigned long n)
117 {
118 	if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
119 		memset((void *)addr, 0, n);
120 		return 0;
121 	}
122 
123 	down_read(&current->mm->mmap_sem);
124 	while (n) {
125 		pte_t *pte;
126 		spinlock_t *ptl;
127 		int tocopy;
128 
129 		while (!pin_page_for_write(addr, &pte, &ptl)) {
130 			up_read(&current->mm->mmap_sem);
131 			if (__put_user(0, (char __user *)addr))
132 				goto out;
133 			down_read(&current->mm->mmap_sem);
134 		}
135 
136 		tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
137 		if (tocopy > n)
138 			tocopy = n;
139 
140 		memset((void *)addr, 0, tocopy);
141 		addr += tocopy;
142 		n -= tocopy;
143 
144 		pte_unmap_unlock(pte, ptl);
145 	}
146 	up_read(&current->mm->mmap_sem);
147 
148 out:
149 	return n;
150 }
151 
152 unsigned long __clear_user(void __user *addr, unsigned long n)
153 {
154 	/* See rational for this in __copy_to_user() above. */
155 	if (n < 64)
156 		return __clear_user_std(addr, n);
157 	return __clear_user_memset(addr, n);
158 }
159 
160 #if 0
161 
162 /*
163  * This code is disabled by default, but kept around in case the chosen
164  * thresholds need to be revalidated.  Some overhead (small but still)
165  * would be implied by a runtime determined variable threshold, and
166  * so far the measurement on concerned targets didn't show a worthwhile
167  * variation.
168  *
169  * Note that a fairly precise sched_clock() implementation is needed
170  * for results to make some sense.
171  */
172 
173 #include <linux/vmalloc.h>
174 
175 static int __init test_size_treshold(void)
176 {
177 	struct page *src_page, *dst_page;
178 	void *user_ptr, *kernel_ptr;
179 	unsigned long long t0, t1, t2;
180 	int size, ret;
181 
182 	ret = -ENOMEM;
183 	src_page = alloc_page(GFP_KERNEL);
184 	if (!src_page)
185 		goto no_src;
186 	dst_page = alloc_page(GFP_KERNEL);
187 	if (!dst_page)
188 		goto no_dst;
189 	kernel_ptr = page_address(src_page);
190 	user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010));
191 	if (!user_ptr)
192 		goto no_vmap;
193 
194 	/* warm up the src page dcache */
195 	ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE);
196 
197 	for (size = PAGE_SIZE; size >= 4; size /= 2) {
198 		t0 = sched_clock();
199 		ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size);
200 		t1 = sched_clock();
201 		ret |= __copy_to_user_std(user_ptr, kernel_ptr, size);
202 		t2 = sched_clock();
203 		printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
204 	}
205 
206 	for (size = PAGE_SIZE; size >= 4; size /= 2) {
207 		t0 = sched_clock();
208 		ret |= __clear_user_memset(user_ptr, size);
209 		t1 = sched_clock();
210 		ret |= __clear_user_std(user_ptr, size);
211 		t2 = sched_clock();
212 		printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
213 	}
214 
215 	if (ret)
216 		ret = -EFAULT;
217 
218 	vunmap(user_ptr);
219 no_vmap:
220 	put_page(dst_page);
221 no_dst:
222 	put_page(src_page);
223 no_src:
224 	return ret;
225 }
226 
227 subsys_initcall(test_size_treshold);
228 
229 #endif
230