1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/module.h> 4 #include <linux/sched.h> 5 #include <linux/slab.h> 6 #include <linux/file.h> 7 #include <linux/namei.h> 8 #include <linux/writeback.h> 9 10 #include <linux/ceph/libceph.h> 11 12 /* 13 * build a vector of user pages 14 */ 15 struct page **ceph_get_direct_page_vector(const void __user *data, 16 int num_pages, bool write_page) 17 { 18 struct page **pages; 19 int got = 0; 20 int rc = 0; 21 22 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); 23 if (!pages) 24 return ERR_PTR(-ENOMEM); 25 26 down_read(¤t->mm->mmap_sem); 27 while (got < num_pages) { 28 rc = get_user_pages(current, current->mm, 29 (unsigned long)data + ((unsigned long)got * PAGE_SIZE), 30 num_pages - got, write_page, 0, pages + got, NULL); 31 if (rc < 0) 32 break; 33 BUG_ON(rc == 0); 34 got += rc; 35 } 36 up_read(¤t->mm->mmap_sem); 37 if (rc < 0) 38 goto fail; 39 return pages; 40 41 fail: 42 ceph_put_page_vector(pages, got, false); 43 return ERR_PTR(rc); 44 } 45 EXPORT_SYMBOL(ceph_get_direct_page_vector); 46 47 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) 48 { 49 int i; 50 51 for (i = 0; i < num_pages; i++) { 52 if (dirty) 53 set_page_dirty_lock(pages[i]); 54 put_page(pages[i]); 55 } 56 kfree(pages); 57 } 58 EXPORT_SYMBOL(ceph_put_page_vector); 59 60 void ceph_release_page_vector(struct page **pages, int num_pages) 61 { 62 int i; 63 64 for (i = 0; i < num_pages; i++) 65 __free_pages(pages[i], 0); 66 kfree(pages); 67 } 68 EXPORT_SYMBOL(ceph_release_page_vector); 69 70 /* 71 * allocate a vector new pages 72 */ 73 struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags) 74 { 75 struct page **pages; 76 int i; 77 78 pages = kmalloc(sizeof(*pages) * num_pages, flags); 79 if (!pages) 80 return ERR_PTR(-ENOMEM); 81 for (i = 0; i < num_pages; i++) { 82 pages[i] = __page_cache_alloc(flags); 83 if (pages[i] == NULL) { 84 ceph_release_page_vector(pages, i); 85 return ERR_PTR(-ENOMEM); 86 } 87 } 88 return pages; 89 } 90 EXPORT_SYMBOL(ceph_alloc_page_vector); 91 92 /* 93 * copy user data into a page vector 94 */ 95 int ceph_copy_user_to_page_vector(struct page **pages, 96 const void __user *data, 97 loff_t off, size_t len) 98 { 99 int i = 0; 100 int po = off & ~PAGE_CACHE_MASK; 101 int left = len; 102 int l, bad; 103 104 while (left > 0) { 105 l = min_t(int, PAGE_CACHE_SIZE-po, left); 106 bad = copy_from_user(page_address(pages[i]) + po, data, l); 107 if (bad == l) 108 return -EFAULT; 109 data += l - bad; 110 left -= l - bad; 111 po += l - bad; 112 if (po == PAGE_CACHE_SIZE) { 113 po = 0; 114 i++; 115 } 116 } 117 return len; 118 } 119 EXPORT_SYMBOL(ceph_copy_user_to_page_vector); 120 121 void ceph_copy_to_page_vector(struct page **pages, 122 const void *data, 123 loff_t off, size_t len) 124 { 125 int i = 0; 126 size_t po = off & ~PAGE_CACHE_MASK; 127 size_t left = len; 128 129 while (left > 0) { 130 size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left); 131 132 memcpy(page_address(pages[i]) + po, data, l); 133 data += l; 134 left -= l; 135 po += l; 136 if (po == PAGE_CACHE_SIZE) { 137 po = 0; 138 i++; 139 } 140 } 141 } 142 EXPORT_SYMBOL(ceph_copy_to_page_vector); 143 144 void ceph_copy_from_page_vector(struct page **pages, 145 void *data, 146 loff_t off, size_t len) 147 { 148 int i = 0; 149 size_t po = off & ~PAGE_CACHE_MASK; 150 size_t left = len; 151 152 while (left > 0) { 153 size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left); 154 155 memcpy(data, page_address(pages[i]) + po, l); 156 data += l; 157 left -= l; 158 po += l; 159 if (po == PAGE_CACHE_SIZE) { 160 po = 0; 161 i++; 162 } 163 } 164 } 165 EXPORT_SYMBOL(ceph_copy_from_page_vector); 166 167 /* 168 * copy user data from a page vector into a user pointer 169 */ 170 int ceph_copy_page_vector_to_user(struct page **pages, 171 void __user *data, 172 loff_t off, size_t len) 173 { 174 int i = 0; 175 int po = off & ~PAGE_CACHE_MASK; 176 int left = len; 177 int l, bad; 178 179 while (left > 0) { 180 l = min_t(int, left, PAGE_CACHE_SIZE-po); 181 bad = copy_to_user(data, page_address(pages[i]) + po, l); 182 if (bad == l) 183 return -EFAULT; 184 data += l - bad; 185 left -= l - bad; 186 if (po) { 187 po += l - bad; 188 if (po == PAGE_CACHE_SIZE) 189 po = 0; 190 } 191 i++; 192 } 193 return len; 194 } 195 EXPORT_SYMBOL(ceph_copy_page_vector_to_user); 196 197 /* 198 * Zero an extent within a page vector. Offset is relative to the 199 * start of the first page. 200 */ 201 void ceph_zero_page_vector_range(int off, int len, struct page **pages) 202 { 203 int i = off >> PAGE_CACHE_SHIFT; 204 205 off &= ~PAGE_CACHE_MASK; 206 207 dout("zero_page_vector_page %u~%u\n", off, len); 208 209 /* leading partial page? */ 210 if (off) { 211 int end = min((int)PAGE_CACHE_SIZE, off + len); 212 dout("zeroing %d %p head from %d\n", i, pages[i], 213 (int)off); 214 zero_user_segment(pages[i], off, end); 215 len -= (end - off); 216 i++; 217 } 218 while (len >= PAGE_CACHE_SIZE) { 219 dout("zeroing %d %p len=%d\n", i, pages[i], len); 220 zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE); 221 len -= PAGE_CACHE_SIZE; 222 i++; 223 } 224 /* trailing partial page? */ 225 if (len) { 226 dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len); 227 zero_user_segment(pages[i], 0, len); 228 } 229 } 230 EXPORT_SYMBOL(ceph_zero_page_vector_range); 231 232