xref: /openbmc/linux/net/ceph/pagevec.c (revision e0bf6c5c)
1 #include <linux/ceph/ceph_debug.h>
2 
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/slab.h>
6 #include <linux/file.h>
7 #include <linux/namei.h>
8 #include <linux/writeback.h>
9 
10 #include <linux/ceph/libceph.h>
11 
12 /*
13  * build a vector of user pages
14  */
15 struct page **ceph_get_direct_page_vector(const void __user *data,
16 					  int num_pages, bool write_page)
17 {
18 	struct page **pages;
19 	int got = 0;
20 	int rc = 0;
21 
22 	pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
23 	if (!pages)
24 		return ERR_PTR(-ENOMEM);
25 
26 	while (got < num_pages) {
27 		rc = get_user_pages_unlocked(current, current->mm,
28 		    (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
29 		    num_pages - got, write_page, 0, pages + got);
30 		if (rc < 0)
31 			break;
32 		BUG_ON(rc == 0);
33 		got += rc;
34 	}
35 	if (rc < 0)
36 		goto fail;
37 	return pages;
38 
39 fail:
40 	ceph_put_page_vector(pages, got, false);
41 	return ERR_PTR(rc);
42 }
43 EXPORT_SYMBOL(ceph_get_direct_page_vector);
44 
45 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty)
46 {
47 	int i;
48 
49 	for (i = 0; i < num_pages; i++) {
50 		if (dirty)
51 			set_page_dirty_lock(pages[i]);
52 		put_page(pages[i]);
53 	}
54 	if (is_vmalloc_addr(pages))
55 		vfree(pages);
56 	else
57 		kfree(pages);
58 }
59 EXPORT_SYMBOL(ceph_put_page_vector);
60 
61 void ceph_release_page_vector(struct page **pages, int num_pages)
62 {
63 	int i;
64 
65 	for (i = 0; i < num_pages; i++)
66 		__free_pages(pages[i], 0);
67 	kfree(pages);
68 }
69 EXPORT_SYMBOL(ceph_release_page_vector);
70 
71 /*
72  * allocate a vector new pages
73  */
74 struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
75 {
76 	struct page **pages;
77 	int i;
78 
79 	pages = kmalloc(sizeof(*pages) * num_pages, flags);
80 	if (!pages)
81 		return ERR_PTR(-ENOMEM);
82 	for (i = 0; i < num_pages; i++) {
83 		pages[i] = __page_cache_alloc(flags);
84 		if (pages[i] == NULL) {
85 			ceph_release_page_vector(pages, i);
86 			return ERR_PTR(-ENOMEM);
87 		}
88 	}
89 	return pages;
90 }
91 EXPORT_SYMBOL(ceph_alloc_page_vector);
92 
93 /*
94  * copy user data into a page vector
95  */
96 int ceph_copy_user_to_page_vector(struct page **pages,
97 					 const void __user *data,
98 					 loff_t off, size_t len)
99 {
100 	int i = 0;
101 	int po = off & ~PAGE_CACHE_MASK;
102 	int left = len;
103 	int l, bad;
104 
105 	while (left > 0) {
106 		l = min_t(int, PAGE_CACHE_SIZE-po, left);
107 		bad = copy_from_user(page_address(pages[i]) + po, data, l);
108 		if (bad == l)
109 			return -EFAULT;
110 		data += l - bad;
111 		left -= l - bad;
112 		po += l - bad;
113 		if (po == PAGE_CACHE_SIZE) {
114 			po = 0;
115 			i++;
116 		}
117 	}
118 	return len;
119 }
120 EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
121 
122 void ceph_copy_to_page_vector(struct page **pages,
123 				    const void *data,
124 				    loff_t off, size_t len)
125 {
126 	int i = 0;
127 	size_t po = off & ~PAGE_CACHE_MASK;
128 	size_t left = len;
129 
130 	while (left > 0) {
131 		size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
132 
133 		memcpy(page_address(pages[i]) + po, data, l);
134 		data += l;
135 		left -= l;
136 		po += l;
137 		if (po == PAGE_CACHE_SIZE) {
138 			po = 0;
139 			i++;
140 		}
141 	}
142 }
143 EXPORT_SYMBOL(ceph_copy_to_page_vector);
144 
145 void ceph_copy_from_page_vector(struct page **pages,
146 				    void *data,
147 				    loff_t off, size_t len)
148 {
149 	int i = 0;
150 	size_t po = off & ~PAGE_CACHE_MASK;
151 	size_t left = len;
152 
153 	while (left > 0) {
154 		size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
155 
156 		memcpy(data, page_address(pages[i]) + po, l);
157 		data += l;
158 		left -= l;
159 		po += l;
160 		if (po == PAGE_CACHE_SIZE) {
161 			po = 0;
162 			i++;
163 		}
164 	}
165 }
166 EXPORT_SYMBOL(ceph_copy_from_page_vector);
167 
168 /*
169  * Zero an extent within a page vector.  Offset is relative to the
170  * start of the first page.
171  */
172 void ceph_zero_page_vector_range(int off, int len, struct page **pages)
173 {
174 	int i = off >> PAGE_CACHE_SHIFT;
175 
176 	off &= ~PAGE_CACHE_MASK;
177 
178 	dout("zero_page_vector_page %u~%u\n", off, len);
179 
180 	/* leading partial page? */
181 	if (off) {
182 		int end = min((int)PAGE_CACHE_SIZE, off + len);
183 		dout("zeroing %d %p head from %d\n", i, pages[i],
184 		     (int)off);
185 		zero_user_segment(pages[i], off, end);
186 		len -= (end - off);
187 		i++;
188 	}
189 	while (len >= PAGE_CACHE_SIZE) {
190 		dout("zeroing %d %p len=%d\n", i, pages[i], len);
191 		zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
192 		len -= PAGE_CACHE_SIZE;
193 		i++;
194 	}
195 	/* trailing partial page? */
196 	if (len) {
197 		dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
198 		zero_user_segment(pages[i], 0, len);
199 	}
200 }
201 EXPORT_SYMBOL(ceph_zero_page_vector_range);
202 
203