xref: /openbmc/linux/net/ceph/pagevec.c (revision 95e9fd10)
1 #include <linux/ceph/ceph_debug.h>
2 
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/slab.h>
6 #include <linux/file.h>
7 #include <linux/namei.h>
8 #include <linux/writeback.h>
9 
10 #include <linux/ceph/libceph.h>
11 
12 /*
13  * build a vector of user pages
14  */
15 struct page **ceph_get_direct_page_vector(const char __user *data,
16 					  int num_pages, bool write_page)
17 {
18 	struct page **pages;
19 	int got = 0;
20 	int rc = 0;
21 
22 	pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
23 	if (!pages)
24 		return ERR_PTR(-ENOMEM);
25 
26 	down_read(&current->mm->mmap_sem);
27 	while (got < num_pages) {
28 		rc = get_user_pages(current, current->mm,
29 		    (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
30 		    num_pages - got, write_page, 0, pages + got, NULL);
31 		if (rc < 0)
32 			break;
33 		BUG_ON(rc == 0);
34 		got += rc;
35 	}
36 	up_read(&current->mm->mmap_sem);
37 	if (rc < 0)
38 		goto fail;
39 	return pages;
40 
41 fail:
42 	ceph_put_page_vector(pages, got, false);
43 	return ERR_PTR(rc);
44 }
45 EXPORT_SYMBOL(ceph_get_direct_page_vector);
46 
47 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty)
48 {
49 	int i;
50 
51 	for (i = 0; i < num_pages; i++) {
52 		if (dirty)
53 			set_page_dirty_lock(pages[i]);
54 		put_page(pages[i]);
55 	}
56 	kfree(pages);
57 }
58 EXPORT_SYMBOL(ceph_put_page_vector);
59 
60 void ceph_release_page_vector(struct page **pages, int num_pages)
61 {
62 	int i;
63 
64 	for (i = 0; i < num_pages; i++)
65 		__free_pages(pages[i], 0);
66 	kfree(pages);
67 }
68 EXPORT_SYMBOL(ceph_release_page_vector);
69 
70 /*
71  * allocate a vector new pages
72  */
73 struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
74 {
75 	struct page **pages;
76 	int i;
77 
78 	pages = kmalloc(sizeof(*pages) * num_pages, flags);
79 	if (!pages)
80 		return ERR_PTR(-ENOMEM);
81 	for (i = 0; i < num_pages; i++) {
82 		pages[i] = __page_cache_alloc(flags);
83 		if (pages[i] == NULL) {
84 			ceph_release_page_vector(pages, i);
85 			return ERR_PTR(-ENOMEM);
86 		}
87 	}
88 	return pages;
89 }
90 EXPORT_SYMBOL(ceph_alloc_page_vector);
91 
92 /*
93  * copy user data into a page vector
94  */
95 int ceph_copy_user_to_page_vector(struct page **pages,
96 					 const char __user *data,
97 					 loff_t off, size_t len)
98 {
99 	int i = 0;
100 	int po = off & ~PAGE_CACHE_MASK;
101 	int left = len;
102 	int l, bad;
103 
104 	while (left > 0) {
105 		l = min_t(int, PAGE_CACHE_SIZE-po, left);
106 		bad = copy_from_user(page_address(pages[i]) + po, data, l);
107 		if (bad == l)
108 			return -EFAULT;
109 		data += l - bad;
110 		left -= l - bad;
111 		po += l - bad;
112 		if (po == PAGE_CACHE_SIZE) {
113 			po = 0;
114 			i++;
115 		}
116 	}
117 	return len;
118 }
119 EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
120 
121 int ceph_copy_to_page_vector(struct page **pages,
122 				    const char *data,
123 				    loff_t off, size_t len)
124 {
125 	int i = 0;
126 	size_t po = off & ~PAGE_CACHE_MASK;
127 	size_t left = len;
128 	size_t l;
129 
130 	while (left > 0) {
131 		l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
132 		memcpy(page_address(pages[i]) + po, data, l);
133 		data += l;
134 		left -= l;
135 		po += l;
136 		if (po == PAGE_CACHE_SIZE) {
137 			po = 0;
138 			i++;
139 		}
140 	}
141 	return len;
142 }
143 EXPORT_SYMBOL(ceph_copy_to_page_vector);
144 
145 int ceph_copy_from_page_vector(struct page **pages,
146 				    char *data,
147 				    loff_t off, size_t len)
148 {
149 	int i = 0;
150 	size_t po = off & ~PAGE_CACHE_MASK;
151 	size_t left = len;
152 	size_t l;
153 
154 	while (left > 0) {
155 		l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
156 		memcpy(data, page_address(pages[i]) + po, l);
157 		data += l;
158 		left -= l;
159 		po += l;
160 		if (po == PAGE_CACHE_SIZE) {
161 			po = 0;
162 			i++;
163 		}
164 	}
165 	return len;
166 }
167 EXPORT_SYMBOL(ceph_copy_from_page_vector);
168 
169 /*
170  * copy user data from a page vector into a user pointer
171  */
172 int ceph_copy_page_vector_to_user(struct page **pages,
173 					 char __user *data,
174 					 loff_t off, size_t len)
175 {
176 	int i = 0;
177 	int po = off & ~PAGE_CACHE_MASK;
178 	int left = len;
179 	int l, bad;
180 
181 	while (left > 0) {
182 		l = min_t(int, left, PAGE_CACHE_SIZE-po);
183 		bad = copy_to_user(data, page_address(pages[i]) + po, l);
184 		if (bad == l)
185 			return -EFAULT;
186 		data += l - bad;
187 		left -= l - bad;
188 		if (po) {
189 			po += l - bad;
190 			if (po == PAGE_CACHE_SIZE)
191 				po = 0;
192 		}
193 		i++;
194 	}
195 	return len;
196 }
197 EXPORT_SYMBOL(ceph_copy_page_vector_to_user);
198 
199 /*
200  * Zero an extent within a page vector.  Offset is relative to the
201  * start of the first page.
202  */
203 void ceph_zero_page_vector_range(int off, int len, struct page **pages)
204 {
205 	int i = off >> PAGE_CACHE_SHIFT;
206 
207 	off &= ~PAGE_CACHE_MASK;
208 
209 	dout("zero_page_vector_page %u~%u\n", off, len);
210 
211 	/* leading partial page? */
212 	if (off) {
213 		int end = min((int)PAGE_CACHE_SIZE, off + len);
214 		dout("zeroing %d %p head from %d\n", i, pages[i],
215 		     (int)off);
216 		zero_user_segment(pages[i], off, end);
217 		len -= (end - off);
218 		i++;
219 	}
220 	while (len >= PAGE_CACHE_SIZE) {
221 		dout("zeroing %d %p len=%d\n", i, pages[i], len);
222 		zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
223 		len -= PAGE_CACHE_SIZE;
224 		i++;
225 	}
226 	/* trailing partial page? */
227 	if (len) {
228 		dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
229 		zero_user_segment(pages[i], 0, len);
230 	}
231 }
232 EXPORT_SYMBOL(ceph_zero_page_vector_range);
233 
234