xref: /openbmc/linux/fs/ceph/addr.c (revision dbd646a8)
1 #include "ceph_debug.h"
2 
3 #include <linux/backing-dev.h>
4 #include <linux/fs.h>
5 #include <linux/mm.h>
6 #include <linux/pagemap.h>
7 #include <linux/writeback.h>	/* generic_writepages */
8 #include <linux/pagevec.h>
9 #include <linux/task_io_accounting_ops.h>
10 
11 #include "super.h"
12 #include "osd_client.h"
13 
14 /*
15  * Ceph address space ops.
16  *
17  * There are a few funny things going on here.
18  *
19  * The page->private field is used to reference a struct
20  * ceph_snap_context for _every_ dirty page.  This indicates which
21  * snapshot the page was logically dirtied in, and thus which snap
22  * context needs to be associated with the osd write during writeback.
23  *
24  * Similarly, struct ceph_inode_info maintains a set of counters to
25  * count dirty pages on the inode.  In the absense of snapshots,
26  * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
27  *
28  * When a snapshot is taken (that is, when the client receives
29  * notification that a snapshot was taken), each inode with caps and
30  * with dirty pages (dirty pages implies there is a cap) gets a new
31  * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending
32  * order, new snaps go to the tail).  The i_wrbuffer_ref_head count is
33  * moved to capsnap->dirty. (Unless a sync write is currently in
34  * progress.  In that case, the capsnap is said to be "pending", new
35  * writes cannot start, and the capsnap isn't "finalized" until the
36  * write completes (or fails) and a final size/mtime for the inode for
37  * that snap can be settled upon.)  i_wrbuffer_ref_head is reset to 0.
38  *
39  * On writeback, we must submit writes to the osd IN SNAP ORDER.  So,
40  * we look for the first capsnap in i_cap_snaps and write out pages in
41  * that snap context _only_.  Then we move on to the next capsnap,
42  * eventually reaching the "live" or "head" context (i.e., pages that
43  * are not yet snapped) and are writing the most recently dirtied
44  * pages.
45  *
46  * Invalidate and so forth must take care to ensure the dirty page
47  * accounting is preserved.
48  */
49 
50 
51 /*
52  * Dirty a page.  Optimistically adjust accounting, on the assumption
53  * that we won't race with invalidate.  If we do, readjust.
54  */
55 static int ceph_set_page_dirty(struct page *page)
56 {
57 	struct address_space *mapping = page->mapping;
58 	struct inode *inode;
59 	struct ceph_inode_info *ci;
60 	int undo = 0;
61 	struct ceph_snap_context *snapc;
62 
63 	if (unlikely(!mapping))
64 		return !TestSetPageDirty(page);
65 
66 	if (TestSetPageDirty(page)) {
67 		dout("%p set_page_dirty %p idx %lu -- already dirty\n",
68 		     mapping->host, page, page->index);
69 		return 0;
70 	}
71 
72 	inode = mapping->host;
73 	ci = ceph_inode(inode);
74 
75 	/*
76 	 * Note that we're grabbing a snapc ref here without holding
77 	 * any locks!
78 	 */
79 	snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context);
80 
81 	/* dirty the head */
82 	spin_lock(&inode->i_lock);
83 	if (ci->i_wrbuffer_ref_head == 0)
84 		ci->i_head_snapc = ceph_get_snap_context(snapc);
85 	++ci->i_wrbuffer_ref_head;
86 	if (ci->i_wrbuffer_ref == 0)
87 		igrab(inode);
88 	++ci->i_wrbuffer_ref;
89 	dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d "
90 	     "snapc %p seq %lld (%d snaps)\n",
91 	     mapping->host, page, page->index,
92 	     ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
93 	     ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
94 	     snapc, snapc->seq, snapc->num_snaps);
95 	spin_unlock(&inode->i_lock);
96 
97 	/* now adjust page */
98 	spin_lock_irq(&mapping->tree_lock);
99 	if (page->mapping) {	/* Race with truncate? */
100 		WARN_ON_ONCE(!PageUptodate(page));
101 
102 		if (mapping_cap_account_dirty(mapping)) {
103 			__inc_zone_page_state(page, NR_FILE_DIRTY);
104 			__inc_bdi_stat(mapping->backing_dev_info,
105 					BDI_RECLAIMABLE);
106 			task_io_account_write(PAGE_CACHE_SIZE);
107 		}
108 		radix_tree_tag_set(&mapping->page_tree,
109 				page_index(page), PAGECACHE_TAG_DIRTY);
110 
111 		/*
112 		 * Reference snap context in page->private.  Also set
113 		 * PagePrivate so that we get invalidatepage callback.
114 		 */
115 		page->private = (unsigned long)snapc;
116 		SetPagePrivate(page);
117 	} else {
118 		dout("ANON set_page_dirty %p (raced truncate?)\n", page);
119 		undo = 1;
120 	}
121 
122 	spin_unlock_irq(&mapping->tree_lock);
123 
124 	if (undo)
125 		/* whoops, we failed to dirty the page */
126 		ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
127 
128 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
129 
130 	BUG_ON(!PageDirty(page));
131 	return 1;
132 }
133 
134 /*
135  * If we are truncating the full page (i.e. offset == 0), adjust the
136  * dirty page counters appropriately.  Only called if there is private
137  * data on the page.
138  */
139 static void ceph_invalidatepage(struct page *page, unsigned long offset)
140 {
141 	struct inode *inode = page->mapping->host;
142 	struct ceph_inode_info *ci;
143 	struct ceph_snap_context *snapc = (void *)page->private;
144 
145 	BUG_ON(!PageLocked(page));
146 	BUG_ON(!page->private);
147 	BUG_ON(!PagePrivate(page));
148 	BUG_ON(!page->mapping);
149 
150 	/*
151 	 * We can get non-dirty pages here due to races between
152 	 * set_page_dirty and truncate_complete_page; just spit out a
153 	 * warning, in case we end up with accounting problems later.
154 	 */
155 	if (!PageDirty(page))
156 		pr_err("%p invalidatepage %p page not dirty\n", inode, page);
157 
158 	if (offset == 0)
159 		ClearPageChecked(page);
160 
161 	ci = ceph_inode(inode);
162 	if (offset == 0) {
163 		dout("%p invalidatepage %p idx %lu full dirty page %lu\n",
164 		     inode, page, page->index, offset);
165 		ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
166 		ceph_put_snap_context(snapc);
167 		page->private = 0;
168 		ClearPagePrivate(page);
169 	} else {
170 		dout("%p invalidatepage %p idx %lu partial dirty page\n",
171 		     inode, page, page->index);
172 	}
173 }
174 
175 /* just a sanity check */
176 static int ceph_releasepage(struct page *page, gfp_t g)
177 {
178 	struct inode *inode = page->mapping ? page->mapping->host : NULL;
179 	dout("%p releasepage %p idx %lu\n", inode, page, page->index);
180 	WARN_ON(PageDirty(page));
181 	WARN_ON(page->private);
182 	WARN_ON(PagePrivate(page));
183 	return 0;
184 }
185 
186 /*
187  * read a single page, without unlocking it.
188  */
189 static int readpage_nounlock(struct file *filp, struct page *page)
190 {
191 	struct inode *inode = filp->f_dentry->d_inode;
192 	struct ceph_inode_info *ci = ceph_inode(inode);
193 	struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->osdc;
194 	int err = 0;
195 	u64 len = PAGE_CACHE_SIZE;
196 
197 	dout("readpage inode %p file %p page %p index %lu\n",
198 	     inode, filp, page, page->index);
199 	err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout,
200 				  page->index << PAGE_CACHE_SHIFT, &len,
201 				  ci->i_truncate_seq, ci->i_truncate_size,
202 				  &page, 1);
203 	if (err == -ENOENT)
204 		err = 0;
205 	if (err < 0) {
206 		SetPageError(page);
207 		goto out;
208 	} else if (err < PAGE_CACHE_SIZE) {
209 		/* zero fill remainder of page */
210 		zero_user_segment(page, err, PAGE_CACHE_SIZE);
211 	}
212 	SetPageUptodate(page);
213 
214 out:
215 	return err < 0 ? err : 0;
216 }
217 
218 static int ceph_readpage(struct file *filp, struct page *page)
219 {
220 	int r = readpage_nounlock(filp, page);
221 	unlock_page(page);
222 	return r;
223 }
224 
225 /*
226  * Build a vector of contiguous pages from the provided page list.
227  */
228 static struct page **page_vector_from_list(struct list_head *page_list,
229 					   unsigned *nr_pages)
230 {
231 	struct page **pages;
232 	struct page *page;
233 	int next_index, contig_pages = 0;
234 
235 	/* build page vector */
236 	pages = kmalloc(sizeof(*pages) * *nr_pages, GFP_NOFS);
237 	if (!pages)
238 		return ERR_PTR(-ENOMEM);
239 
240 	BUG_ON(list_empty(page_list));
241 	next_index = list_entry(page_list->prev, struct page, lru)->index;
242 	list_for_each_entry_reverse(page, page_list, lru) {
243 		if (page->index == next_index) {
244 			dout("readpages page %d %p\n", contig_pages, page);
245 			pages[contig_pages] = page;
246 			contig_pages++;
247 			next_index++;
248 		} else {
249 			break;
250 		}
251 	}
252 	*nr_pages = contig_pages;
253 	return pages;
254 }
255 
256 /*
257  * Read multiple pages.  Leave pages we don't read + unlock in page_list;
258  * the caller (VM) cleans them up.
259  */
260 static int ceph_readpages(struct file *file, struct address_space *mapping,
261 			  struct list_head *page_list, unsigned nr_pages)
262 {
263 	struct inode *inode = file->f_dentry->d_inode;
264 	struct ceph_inode_info *ci = ceph_inode(inode);
265 	struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->osdc;
266 	int rc = 0;
267 	struct page **pages;
268 	struct pagevec pvec;
269 	loff_t offset;
270 	u64 len;
271 
272 	dout("readpages %p file %p nr_pages %d\n",
273 	     inode, file, nr_pages);
274 
275 	pages = page_vector_from_list(page_list, &nr_pages);
276 	if (IS_ERR(pages))
277 		return PTR_ERR(pages);
278 
279 	/* guess read extent */
280 	offset = pages[0]->index << PAGE_CACHE_SHIFT;
281 	len = nr_pages << PAGE_CACHE_SHIFT;
282 	rc = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout,
283 				 offset, &len,
284 				 ci->i_truncate_seq, ci->i_truncate_size,
285 				 pages, nr_pages);
286 	if (rc == -ENOENT)
287 		rc = 0;
288 	if (rc < 0)
289 		goto out;
290 
291 	/* set uptodate and add to lru in pagevec-sized chunks */
292 	pagevec_init(&pvec, 0);
293 	for (; !list_empty(page_list) && len > 0;
294 	     rc -= PAGE_CACHE_SIZE, len -= PAGE_CACHE_SIZE) {
295 		struct page *page =
296 			list_entry(page_list->prev, struct page, lru);
297 
298 		list_del(&page->lru);
299 
300 		if (rc < (int)PAGE_CACHE_SIZE) {
301 			/* zero (remainder of) page */
302 			int s = rc < 0 ? 0 : rc;
303 			zero_user_segment(page, s, PAGE_CACHE_SIZE);
304 		}
305 
306 		if (add_to_page_cache(page, mapping, page->index, GFP_NOFS)) {
307 			page_cache_release(page);
308 			dout("readpages %p add_to_page_cache failed %p\n",
309 			     inode, page);
310 			continue;
311 		}
312 		dout("readpages %p adding %p idx %lu\n", inode, page,
313 		     page->index);
314 		flush_dcache_page(page);
315 		SetPageUptodate(page);
316 		unlock_page(page);
317 		if (pagevec_add(&pvec, page) == 0)
318 			pagevec_lru_add_file(&pvec);   /* add to lru */
319 	}
320 	pagevec_lru_add_file(&pvec);
321 	rc = 0;
322 
323 out:
324 	kfree(pages);
325 	return rc;
326 }
327 
328 /*
329  * Get ref for the oldest snapc for an inode with dirty data... that is, the
330  * only snap context we are allowed to write back.
331  *
332  * Caller holds i_lock.
333  */
334 static struct ceph_snap_context *__get_oldest_context(struct inode *inode,
335 						      u64 *snap_size)
336 {
337 	struct ceph_inode_info *ci = ceph_inode(inode);
338 	struct ceph_snap_context *snapc = NULL;
339 	struct ceph_cap_snap *capsnap = NULL;
340 
341 	list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
342 		dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
343 		     capsnap->context, capsnap->dirty_pages);
344 		if (capsnap->dirty_pages) {
345 			snapc = ceph_get_snap_context(capsnap->context);
346 			if (snap_size)
347 				*snap_size = capsnap->size;
348 			break;
349 		}
350 	}
351 	if (!snapc && ci->i_snap_realm) {
352 		snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context);
353 		dout(" head snapc %p has %d dirty pages\n",
354 		     snapc, ci->i_wrbuffer_ref_head);
355 	}
356 	return snapc;
357 }
358 
359 static struct ceph_snap_context *get_oldest_context(struct inode *inode,
360 						    u64 *snap_size)
361 {
362 	struct ceph_snap_context *snapc = NULL;
363 
364 	spin_lock(&inode->i_lock);
365 	snapc = __get_oldest_context(inode, snap_size);
366 	spin_unlock(&inode->i_lock);
367 	return snapc;
368 }
369 
370 /*
371  * Write a single page, but leave the page locked.
372  *
373  * If we get a write error, set the page error bit, but still adjust the
374  * dirty page accounting (i.e., page is no longer dirty).
375  */
376 static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
377 {
378 	struct inode *inode;
379 	struct ceph_inode_info *ci;
380 	struct ceph_osd_client *osdc;
381 	loff_t page_off = page->index << PAGE_CACHE_SHIFT;
382 	int len = PAGE_CACHE_SIZE;
383 	loff_t i_size;
384 	int err = 0;
385 	struct ceph_snap_context *snapc;
386 	u64 snap_size = 0;
387 
388 	dout("writepage %p idx %lu\n", page, page->index);
389 
390 	if (!page->mapping || !page->mapping->host) {
391 		dout("writepage %p - no mapping\n", page);
392 		return -EFAULT;
393 	}
394 	inode = page->mapping->host;
395 	ci = ceph_inode(inode);
396 	osdc = &ceph_inode_to_client(inode)->osdc;
397 
398 	/* verify this is a writeable snap context */
399 	snapc = (void *)page->private;
400 	if (snapc == NULL) {
401 		dout("writepage %p page %p not dirty?\n", inode, page);
402 		goto out;
403 	}
404 	if (snapc != get_oldest_context(inode, &snap_size)) {
405 		dout("writepage %p page %p snapc %p not writeable - noop\n",
406 		     inode, page, (void *)page->private);
407 		/* we should only noop if called by kswapd */
408 		WARN_ON((current->flags & PF_MEMALLOC) == 0);
409 		goto out;
410 	}
411 
412 	/* is this a partial page at end of file? */
413 	if (snap_size)
414 		i_size = snap_size;
415 	else
416 		i_size = i_size_read(inode);
417 	if (i_size < page_off + len)
418 		len = i_size - page_off;
419 
420 	dout("writepage %p page %p index %lu on %llu~%u\n",
421 	     inode, page, page->index, page_off, len);
422 
423 	set_page_writeback(page);
424 	err = ceph_osdc_writepages(osdc, ceph_vino(inode),
425 				   &ci->i_layout, snapc,
426 				   page_off, len,
427 				   ci->i_truncate_seq, ci->i_truncate_size,
428 				   &inode->i_mtime,
429 				   &page, 1, 0, 0, true);
430 	if (err < 0) {
431 		dout("writepage setting page/mapping error %d %p\n", err, page);
432 		SetPageError(page);
433 		mapping_set_error(&inode->i_data, err);
434 		if (wbc)
435 			wbc->pages_skipped++;
436 	} else {
437 		dout("writepage cleaned page %p\n", page);
438 		err = 0;  /* vfs expects us to return 0 */
439 	}
440 	page->private = 0;
441 	ClearPagePrivate(page);
442 	end_page_writeback(page);
443 	ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
444 	ceph_put_snap_context(snapc);
445 out:
446 	return err;
447 }
448 
449 static int ceph_writepage(struct page *page, struct writeback_control *wbc)
450 {
451 	int err;
452 	struct inode *inode = page->mapping->host;
453 	BUG_ON(!inode);
454 	igrab(inode);
455 	err = writepage_nounlock(page, wbc);
456 	unlock_page(page);
457 	iput(inode);
458 	return err;
459 }
460 
461 
462 /*
463  * lame release_pages helper.  release_pages() isn't exported to
464  * modules.
465  */
466 static void ceph_release_pages(struct page **pages, int num)
467 {
468 	struct pagevec pvec;
469 	int i;
470 
471 	pagevec_init(&pvec, 0);
472 	for (i = 0; i < num; i++) {
473 		if (pagevec_add(&pvec, pages[i]) == 0)
474 			pagevec_release(&pvec);
475 	}
476 	pagevec_release(&pvec);
477 }
478 
479 
480 /*
481  * async writeback completion handler.
482  *
483  * If we get an error, set the mapping error bit, but not the individual
484  * page error bits.
485  */
486 static void writepages_finish(struct ceph_osd_request *req,
487 			      struct ceph_msg *msg)
488 {
489 	struct inode *inode = req->r_inode;
490 	struct ceph_osd_reply_head *replyhead;
491 	struct ceph_osd_op *op;
492 	struct ceph_inode_info *ci = ceph_inode(inode);
493 	unsigned wrote;
494 	loff_t offset = req->r_pages[0]->index << PAGE_CACHE_SHIFT;
495 	struct page *page;
496 	int i;
497 	struct ceph_snap_context *snapc = req->r_snapc;
498 	struct address_space *mapping = inode->i_mapping;
499 	struct writeback_control *wbc = req->r_wbc;
500 	__s32 rc = -EIO;
501 	u64 bytes = 0;
502 
503 	/* parse reply */
504 	replyhead = msg->front.iov_base;
505 	WARN_ON(le32_to_cpu(replyhead->num_ops) == 0);
506 	op = (void *)(replyhead + 1);
507 	rc = le32_to_cpu(replyhead->result);
508 	bytes = le64_to_cpu(op->extent.length);
509 
510 	if (rc >= 0) {
511 		wrote = (bytes + (offset & ~PAGE_CACHE_MASK) + ~PAGE_CACHE_MASK)
512 			>> PAGE_CACHE_SHIFT;
513 		WARN_ON(wrote != req->r_num_pages);
514 	} else {
515 		wrote = 0;
516 		mapping_set_error(mapping, rc);
517 	}
518 	dout("writepages_finish %p rc %d bytes %llu wrote %d (pages)\n",
519 	     inode, rc, bytes, wrote);
520 
521 	/* clean all pages */
522 	for (i = 0; i < req->r_num_pages; i++) {
523 		page = req->r_pages[i];
524 		BUG_ON(!page);
525 		WARN_ON(!PageUptodate(page));
526 
527 		if (i >= wrote) {
528 			dout("inode %p skipping page %p\n", inode, page);
529 			wbc->pages_skipped++;
530 		}
531 		page->private = 0;
532 		ClearPagePrivate(page);
533 		ceph_put_snap_context(snapc);
534 		dout("unlocking %d %p\n", i, page);
535 		end_page_writeback(page);
536 		unlock_page(page);
537 	}
538 	dout("%p wrote+cleaned %d pages\n", inode, wrote);
539 	ceph_put_wrbuffer_cap_refs(ci, req->r_num_pages, snapc);
540 
541 	ceph_release_pages(req->r_pages, req->r_num_pages);
542 	if (req->r_pages_from_pool)
543 		mempool_free(req->r_pages,
544 			     ceph_client(inode->i_sb)->wb_pagevec_pool);
545 	else
546 		kfree(req->r_pages);
547 	ceph_osdc_put_request(req);
548 }
549 
550 /*
551  * allocate a page vec, either directly, or if necessary, via a the
552  * mempool.  we avoid the mempool if we can because req->r_num_pages
553  * may be less than the maximum write size.
554  */
555 static void alloc_page_vec(struct ceph_client *client,
556 			   struct ceph_osd_request *req)
557 {
558 	req->r_pages = kmalloc(sizeof(struct page *) * req->r_num_pages,
559 			       GFP_NOFS);
560 	if (!req->r_pages) {
561 		req->r_pages = mempool_alloc(client->wb_pagevec_pool, GFP_NOFS);
562 		req->r_pages_from_pool = 1;
563 		WARN_ON(!req->r_pages);
564 	}
565 }
566 
567 /*
568  * initiate async writeback
569  */
570 static int ceph_writepages_start(struct address_space *mapping,
571 				 struct writeback_control *wbc)
572 {
573 	struct inode *inode = mapping->host;
574 	struct backing_dev_info *bdi = mapping->backing_dev_info;
575 	struct ceph_inode_info *ci = ceph_inode(inode);
576 	struct ceph_client *client = ceph_inode_to_client(inode);
577 	pgoff_t index, start, end;
578 	int range_whole = 0;
579 	int should_loop = 1;
580 	pgoff_t max_pages = 0, max_pages_ever = 0;
581 	struct ceph_snap_context *snapc = NULL, *last_snapc = NULL;
582 	struct pagevec pvec;
583 	int done = 0;
584 	int rc = 0;
585 	unsigned wsize = 1 << inode->i_blkbits;
586 	struct ceph_osd_request *req = NULL;
587 	int do_sync;
588 	u64 snap_size = 0;
589 
590 	/*
591 	 * Include a 'sync' in the OSD request if this is a data
592 	 * integrity write (e.g., O_SYNC write or fsync()), or if our
593 	 * cap is being revoked.
594 	 */
595 	do_sync = wbc->sync_mode == WB_SYNC_ALL;
596 	if (ceph_caps_revoking(ci, CEPH_CAP_FILE_BUFFER))
597 		do_sync = 1;
598 	dout("writepages_start %p dosync=%d (mode=%s)\n",
599 	     inode, do_sync,
600 	     wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
601 	     (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
602 
603 	client = ceph_inode_to_client(inode);
604 	if (client->mount_state == CEPH_MOUNT_SHUTDOWN) {
605 		pr_warning("writepage_start %p on forced umount\n", inode);
606 		return -EIO; /* we're in a forced umount, don't write! */
607 	}
608 	if (client->mount_args->wsize && client->mount_args->wsize < wsize)
609 		wsize = client->mount_args->wsize;
610 	if (wsize < PAGE_CACHE_SIZE)
611 		wsize = PAGE_CACHE_SIZE;
612 	max_pages_ever = wsize >> PAGE_CACHE_SHIFT;
613 
614 	pagevec_init(&pvec, 0);
615 
616 	/* ?? */
617 	if (wbc->nonblocking && bdi_write_congested(bdi)) {
618 		dout(" writepages congested\n");
619 		wbc->encountered_congestion = 1;
620 		goto out_final;
621 	}
622 
623 	/* where to start/end? */
624 	if (wbc->range_cyclic) {
625 		start = mapping->writeback_index; /* Start from prev offset */
626 		end = -1;
627 		dout(" cyclic, start at %lu\n", start);
628 	} else {
629 		start = wbc->range_start >> PAGE_CACHE_SHIFT;
630 		end = wbc->range_end >> PAGE_CACHE_SHIFT;
631 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
632 			range_whole = 1;
633 		should_loop = 0;
634 		dout(" not cyclic, %lu to %lu\n", start, end);
635 	}
636 	index = start;
637 
638 retry:
639 	/* find oldest snap context with dirty data */
640 	ceph_put_snap_context(snapc);
641 	snapc = get_oldest_context(inode, &snap_size);
642 	if (!snapc) {
643 		/* hmm, why does writepages get called when there
644 		   is no dirty data? */
645 		dout(" no snap context with dirty data?\n");
646 		goto out;
647 	}
648 	dout(" oldest snapc is %p seq %lld (%d snaps)\n",
649 	     snapc, snapc->seq, snapc->num_snaps);
650 	if (last_snapc && snapc != last_snapc) {
651 		/* if we switched to a newer snapc, restart our scan at the
652 		 * start of the original file range. */
653 		dout("  snapc differs from last pass, restarting at %lu\n",
654 		     index);
655 		index = start;
656 	}
657 	last_snapc = snapc;
658 
659 	while (!done && index <= end) {
660 		unsigned i;
661 		int first;
662 		pgoff_t next;
663 		int pvec_pages, locked_pages;
664 		struct page *page;
665 		int want;
666 		u64 offset, len;
667 		struct ceph_osd_request_head *reqhead;
668 		struct ceph_osd_op *op;
669 
670 		next = 0;
671 		locked_pages = 0;
672 		max_pages = max_pages_ever;
673 
674 get_more_pages:
675 		first = -1;
676 		want = min(end - index,
677 			   min((pgoff_t)PAGEVEC_SIZE,
678 			       max_pages - (pgoff_t)locked_pages) - 1)
679 			+ 1;
680 		pvec_pages = pagevec_lookup_tag(&pvec, mapping, &index,
681 						PAGECACHE_TAG_DIRTY,
682 						want);
683 		dout("pagevec_lookup_tag got %d\n", pvec_pages);
684 		if (!pvec_pages && !locked_pages)
685 			break;
686 		for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) {
687 			page = pvec.pages[i];
688 			dout("? %p idx %lu\n", page, page->index);
689 			if (locked_pages == 0)
690 				lock_page(page);  /* first page */
691 			else if (!trylock_page(page))
692 				break;
693 
694 			/* only dirty pages, or our accounting breaks */
695 			if (unlikely(!PageDirty(page)) ||
696 			    unlikely(page->mapping != mapping)) {
697 				dout("!dirty or !mapping %p\n", page);
698 				unlock_page(page);
699 				break;
700 			}
701 			if (!wbc->range_cyclic && page->index > end) {
702 				dout("end of range %p\n", page);
703 				done = 1;
704 				unlock_page(page);
705 				break;
706 			}
707 			if (next && (page->index != next)) {
708 				dout("not consecutive %p\n", page);
709 				unlock_page(page);
710 				break;
711 			}
712 			if (wbc->sync_mode != WB_SYNC_NONE) {
713 				dout("waiting on writeback %p\n", page);
714 				wait_on_page_writeback(page);
715 			}
716 			if ((snap_size && page_offset(page) > snap_size) ||
717 			    (!snap_size &&
718 			     page_offset(page) > i_size_read(inode))) {
719 				dout("%p page eof %llu\n", page, snap_size ?
720 				     snap_size : i_size_read(inode));
721 				done = 1;
722 				unlock_page(page);
723 				break;
724 			}
725 			if (PageWriteback(page)) {
726 				dout("%p under writeback\n", page);
727 				unlock_page(page);
728 				break;
729 			}
730 
731 			/* only if matching snap context */
732 			if (snapc != (void *)page->private) {
733 				dout("page snapc %p != oldest %p\n",
734 				     (void *)page->private, snapc);
735 				unlock_page(page);
736 				if (!locked_pages)
737 					continue; /* keep looking for snap */
738 				break;
739 			}
740 
741 			if (!clear_page_dirty_for_io(page)) {
742 				dout("%p !clear_page_dirty_for_io\n", page);
743 				unlock_page(page);
744 				break;
745 			}
746 
747 			/* ok */
748 			if (locked_pages == 0) {
749 				/* prepare async write request */
750 				offset = page->index << PAGE_CACHE_SHIFT;
751 				len = wsize;
752 				req = ceph_osdc_new_request(&client->osdc,
753 					    &ci->i_layout,
754 					    ceph_vino(inode),
755 					    offset, &len,
756 					    CEPH_OSD_OP_WRITE,
757 					    CEPH_OSD_FLAG_WRITE |
758 						    CEPH_OSD_FLAG_ONDISK,
759 					    snapc, do_sync,
760 					    ci->i_truncate_seq,
761 					    ci->i_truncate_size,
762 					    &inode->i_mtime, true, 1);
763 				max_pages = req->r_num_pages;
764 
765 				alloc_page_vec(client, req);
766 				req->r_callback = writepages_finish;
767 				req->r_inode = inode;
768 				req->r_wbc = wbc;
769 			}
770 
771 			/* note position of first page in pvec */
772 			if (first < 0)
773 				first = i;
774 			dout("%p will write page %p idx %lu\n",
775 			     inode, page, page->index);
776 			set_page_writeback(page);
777 			req->r_pages[locked_pages] = page;
778 			locked_pages++;
779 			next = page->index + 1;
780 		}
781 
782 		/* did we get anything? */
783 		if (!locked_pages)
784 			goto release_pvec_pages;
785 		if (i) {
786 			int j;
787 			BUG_ON(!locked_pages || first < 0);
788 
789 			if (pvec_pages && i == pvec_pages &&
790 			    locked_pages < max_pages) {
791 				dout("reached end pvec, trying for more\n");
792 				pagevec_reinit(&pvec);
793 				goto get_more_pages;
794 			}
795 
796 			/* shift unused pages over in the pvec...  we
797 			 * will need to release them below. */
798 			for (j = i; j < pvec_pages; j++) {
799 				dout(" pvec leftover page %p\n",
800 				     pvec.pages[j]);
801 				pvec.pages[j-i+first] = pvec.pages[j];
802 			}
803 			pvec.nr -= i-first;
804 		}
805 
806 		/* submit the write */
807 		offset = req->r_pages[0]->index << PAGE_CACHE_SHIFT;
808 		len = min((snap_size ? snap_size : i_size_read(inode)) - offset,
809 			  (u64)locked_pages << PAGE_CACHE_SHIFT);
810 		dout("writepages got %d pages at %llu~%llu\n",
811 		     locked_pages, offset, len);
812 
813 		/* revise final length, page count */
814 		req->r_num_pages = locked_pages;
815 		reqhead = req->r_request->front.iov_base;
816 		op = (void *)(reqhead + 1);
817 		op->extent.length = cpu_to_le64(len);
818 		op->payload_len = cpu_to_le32(len);
819 		req->r_request->hdr.data_len = cpu_to_le32(len);
820 
821 		ceph_osdc_start_request(&client->osdc, req, true);
822 		req = NULL;
823 
824 		/* continue? */
825 		index = next;
826 		wbc->nr_to_write -= locked_pages;
827 		if (wbc->nr_to_write <= 0)
828 			done = 1;
829 
830 release_pvec_pages:
831 		dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr,
832 		     pvec.nr ? pvec.pages[0] : NULL);
833 		pagevec_release(&pvec);
834 
835 		if (locked_pages && !done)
836 			goto retry;
837 	}
838 
839 	if (should_loop && !done) {
840 		/* more to do; loop back to beginning of file */
841 		dout("writepages looping back to beginning of file\n");
842 		should_loop = 0;
843 		index = 0;
844 		goto retry;
845 	}
846 
847 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
848 		mapping->writeback_index = index;
849 
850 out:
851 	if (req)
852 		ceph_osdc_put_request(req);
853 	if (rc > 0)
854 		rc = 0;  /* vfs expects us to return 0 */
855 	ceph_put_snap_context(snapc);
856 	dout("writepages done, rc = %d\n", rc);
857 out_final:
858 	return rc;
859 }
860 
861 
862 
863 /*
864  * See if a given @snapc is either writeable, or already written.
865  */
866 static int context_is_writeable_or_written(struct inode *inode,
867 					   struct ceph_snap_context *snapc)
868 {
869 	struct ceph_snap_context *oldest = get_oldest_context(inode, NULL);
870 	return !oldest || snapc->seq <= oldest->seq;
871 }
872 
873 /*
874  * We are only allowed to write into/dirty the page if the page is
875  * clean, or already dirty within the same snap context.
876  */
877 static int ceph_write_begin(struct file *file, struct address_space *mapping,
878 			    loff_t pos, unsigned len, unsigned flags,
879 			    struct page **pagep, void **fsdata)
880 {
881 	struct inode *inode = file->f_dentry->d_inode;
882 	struct ceph_inode_info *ci = ceph_inode(inode);
883 	struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
884 	struct page *page;
885 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
886 	loff_t page_off = pos & PAGE_CACHE_MASK;
887 	int pos_in_page = pos & ~PAGE_CACHE_MASK;
888 	int end_in_page = pos_in_page + len;
889 	loff_t i_size;
890 	struct ceph_snap_context *snapc;
891 	int r;
892 
893 	/* get a page*/
894 retry:
895 	page = grab_cache_page_write_begin(mapping, index, 0);
896 	if (!page)
897 		return -ENOMEM;
898 	*pagep = page;
899 
900 	dout("write_begin file %p inode %p page %p %d~%d\n", file,
901 	     inode, page, (int)pos, (int)len);
902 
903 retry_locked:
904 	/* writepages currently holds page lock, but if we change that later, */
905 	wait_on_page_writeback(page);
906 
907 	/* check snap context */
908 	BUG_ON(!ci->i_snap_realm);
909 	down_read(&mdsc->snap_rwsem);
910 	BUG_ON(!ci->i_snap_realm->cached_context);
911 	if (page->private &&
912 	    (void *)page->private != ci->i_snap_realm->cached_context) {
913 		/*
914 		 * this page is already dirty in another (older) snap
915 		 * context!  is it writeable now?
916 		 */
917 		snapc = get_oldest_context(inode, NULL);
918 		up_read(&mdsc->snap_rwsem);
919 
920 		if (snapc != (void *)page->private) {
921 			dout(" page %p snapc %p not current or oldest\n",
922 			     page, (void *)page->private);
923 			/*
924 			 * queue for writeback, and wait for snapc to
925 			 * be writeable or written
926 			 */
927 			snapc = ceph_get_snap_context((void *)page->private);
928 			unlock_page(page);
929 			if (ceph_queue_writeback(inode))
930 				igrab(inode);
931 			wait_event_interruptible(ci->i_cap_wq,
932 			       context_is_writeable_or_written(inode, snapc));
933 			ceph_put_snap_context(snapc);
934 			goto retry;
935 		}
936 
937 		/* yay, writeable, do it now (without dropping page lock) */
938 		dout(" page %p snapc %p not current, but oldest\n",
939 		     page, snapc);
940 		if (!clear_page_dirty_for_io(page))
941 			goto retry_locked;
942 		r = writepage_nounlock(page, NULL);
943 		if (r < 0)
944 			goto fail_nosnap;
945 		goto retry_locked;
946 	}
947 
948 	if (PageUptodate(page)) {
949 		dout(" page %p already uptodate\n", page);
950 		return 0;
951 	}
952 
953 	/* full page? */
954 	if (pos_in_page == 0 && len == PAGE_CACHE_SIZE)
955 		return 0;
956 
957 	/* past end of file? */
958 	i_size = inode->i_size;   /* caller holds i_mutex */
959 
960 	if (i_size + len > inode->i_sb->s_maxbytes) {
961 		/* file is too big */
962 		r = -EINVAL;
963 		goto fail;
964 	}
965 
966 	if (page_off >= i_size ||
967 	    (pos_in_page == 0 && (pos+len) >= i_size &&
968 	     end_in_page - pos_in_page != PAGE_CACHE_SIZE)) {
969 		dout(" zeroing %p 0 - %d and %d - %d\n",
970 		     page, pos_in_page, end_in_page, (int)PAGE_CACHE_SIZE);
971 		zero_user_segments(page,
972 				   0, pos_in_page,
973 				   end_in_page, PAGE_CACHE_SIZE);
974 		return 0;
975 	}
976 
977 	/* we need to read it. */
978 	up_read(&mdsc->snap_rwsem);
979 	r = readpage_nounlock(file, page);
980 	if (r < 0)
981 		goto fail_nosnap;
982 	goto retry_locked;
983 
984 fail:
985 	up_read(&mdsc->snap_rwsem);
986 fail_nosnap:
987 	unlock_page(page);
988 	return r;
989 }
990 
991 /*
992  * we don't do anything in here that simple_write_end doesn't do
993  * except adjust dirty page accounting and drop read lock on
994  * mdsc->snap_rwsem.
995  */
996 static int ceph_write_end(struct file *file, struct address_space *mapping,
997 			  loff_t pos, unsigned len, unsigned copied,
998 			  struct page *page, void *fsdata)
999 {
1000 	struct inode *inode = file->f_dentry->d_inode;
1001 	struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
1002 	unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1003 	int check_cap = 0;
1004 
1005 	dout("write_end file %p inode %p page %p %d~%d (%d)\n", file,
1006 	     inode, page, (int)pos, (int)copied, (int)len);
1007 
1008 	/* zero the stale part of the page if we did a short copy */
1009 	if (copied < len)
1010 		zero_user_segment(page, from+copied, len);
1011 
1012 	/* did file size increase? */
1013 	/* (no need for i_size_read(); we caller holds i_mutex */
1014 	if (pos+copied > inode->i_size)
1015 		check_cap = ceph_inode_set_size(inode, pos+copied);
1016 
1017 	if (!PageUptodate(page))
1018 		SetPageUptodate(page);
1019 
1020 	set_page_dirty(page);
1021 
1022 	unlock_page(page);
1023 	up_read(&mdsc->snap_rwsem);
1024 	page_cache_release(page);
1025 
1026 	if (check_cap)
1027 		ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL);
1028 
1029 	return copied;
1030 }
1031 
1032 /*
1033  * we set .direct_IO to indicate direct io is supported, but since we
1034  * intercept O_DIRECT reads and writes early, this function should
1035  * never get called.
1036  */
1037 static ssize_t ceph_direct_io(int rw, struct kiocb *iocb,
1038 			      const struct iovec *iov,
1039 			      loff_t pos, unsigned long nr_segs)
1040 {
1041 	WARN_ON(1);
1042 	return -EINVAL;
1043 }
1044 
1045 const struct address_space_operations ceph_aops = {
1046 	.readpage = ceph_readpage,
1047 	.readpages = ceph_readpages,
1048 	.writepage = ceph_writepage,
1049 	.writepages = ceph_writepages_start,
1050 	.write_begin = ceph_write_begin,
1051 	.write_end = ceph_write_end,
1052 	.set_page_dirty = ceph_set_page_dirty,
1053 	.invalidatepage = ceph_invalidatepage,
1054 	.releasepage = ceph_releasepage,
1055 	.direct_IO = ceph_direct_io,
1056 };
1057 
1058 
1059 /*
1060  * vm ops
1061  */
1062 
1063 /*
1064  * Reuse write_begin here for simplicity.
1065  */
1066 static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1067 {
1068 	struct inode *inode = vma->vm_file->f_dentry->d_inode;
1069 	struct page *page = vmf->page;
1070 	struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
1071 	loff_t off = page->index << PAGE_CACHE_SHIFT;
1072 	loff_t size, len;
1073 	struct page *locked_page = NULL;
1074 	void *fsdata = NULL;
1075 	int ret;
1076 
1077 	size = i_size_read(inode);
1078 	if (off + PAGE_CACHE_SIZE <= size)
1079 		len = PAGE_CACHE_SIZE;
1080 	else
1081 		len = size & ~PAGE_CACHE_MASK;
1082 
1083 	dout("page_mkwrite %p %llu~%llu page %p idx %lu\n", inode,
1084 	     off, len, page, page->index);
1085 	ret = ceph_write_begin(vma->vm_file, inode->i_mapping, off, len, 0,
1086 			       &locked_page, &fsdata);
1087 	WARN_ON(page != locked_page);
1088 	if (!ret) {
1089 		/*
1090 		 * doing the following, instead of calling
1091 		 * ceph_write_end. Note that we keep the
1092 		 * page locked
1093 		 */
1094 		set_page_dirty(page);
1095 		up_read(&mdsc->snap_rwsem);
1096 		page_cache_release(page);
1097 		ret = VM_FAULT_LOCKED;
1098 	} else {
1099 		ret = VM_FAULT_SIGBUS;
1100 	}
1101 	dout("page_mkwrite %p %llu~%llu = %d\n", inode, off, len, ret);
1102 	return ret;
1103 }
1104 
1105 static struct vm_operations_struct ceph_vmops = {
1106 	.fault		= filemap_fault,
1107 	.page_mkwrite	= ceph_page_mkwrite,
1108 };
1109 
1110 int ceph_mmap(struct file *file, struct vm_area_struct *vma)
1111 {
1112 	struct address_space *mapping = file->f_mapping;
1113 
1114 	if (!mapping->a_ops->readpage)
1115 		return -ENOEXEC;
1116 	file_accessed(file);
1117 	vma->vm_ops = &ceph_vmops;
1118 	vma->vm_flags |= VM_CAN_NONLINEAR;
1119 	return 0;
1120 }
1121