xref: /openbmc/linux/fs/ceph/addr.c (revision 7c272194)
1 #include <linux/ceph/ceph_debug.h>
2 
3 #include <linux/backing-dev.h>
4 #include <linux/fs.h>
5 #include <linux/mm.h>
6 #include <linux/pagemap.h>
7 #include <linux/writeback.h>	/* generic_writepages */
8 #include <linux/slab.h>
9 #include <linux/pagevec.h>
10 #include <linux/task_io_accounting_ops.h>
11 
12 #include "super.h"
13 #include "mds_client.h"
14 #include <linux/ceph/osd_client.h>
15 
16 /*
17  * Ceph address space ops.
18  *
19  * There are a few funny things going on here.
20  *
21  * The page->private field is used to reference a struct
22  * ceph_snap_context for _every_ dirty page.  This indicates which
23  * snapshot the page was logically dirtied in, and thus which snap
24  * context needs to be associated with the osd write during writeback.
25  *
26  * Similarly, struct ceph_inode_info maintains a set of counters to
27  * count dirty pages on the inode.  In the absence of snapshots,
28  * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
29  *
30  * When a snapshot is taken (that is, when the client receives
31  * notification that a snapshot was taken), each inode with caps and
32  * with dirty pages (dirty pages implies there is a cap) gets a new
33  * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending
34  * order, new snaps go to the tail).  The i_wrbuffer_ref_head count is
35  * moved to capsnap->dirty. (Unless a sync write is currently in
36  * progress.  In that case, the capsnap is said to be "pending", new
37  * writes cannot start, and the capsnap isn't "finalized" until the
38  * write completes (or fails) and a final size/mtime for the inode for
39  * that snap can be settled upon.)  i_wrbuffer_ref_head is reset to 0.
40  *
41  * On writeback, we must submit writes to the osd IN SNAP ORDER.  So,
42  * we look for the first capsnap in i_cap_snaps and write out pages in
43  * that snap context _only_.  Then we move on to the next capsnap,
44  * eventually reaching the "live" or "head" context (i.e., pages that
45  * are not yet snapped) and are writing the most recently dirtied
46  * pages.
47  *
48  * Invalidate and so forth must take care to ensure the dirty page
49  * accounting is preserved.
50  */
51 
52 #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10))
53 #define CONGESTION_OFF_THRESH(congestion_kb)				\
54 	(CONGESTION_ON_THRESH(congestion_kb) -				\
55 	 (CONGESTION_ON_THRESH(congestion_kb) >> 2))
56 
57 
58 
59 /*
60  * Dirty a page.  Optimistically adjust accounting, on the assumption
61  * that we won't race with invalidate.  If we do, readjust.
62  */
63 static int ceph_set_page_dirty(struct page *page)
64 {
65 	struct address_space *mapping = page->mapping;
66 	struct inode *inode;
67 	struct ceph_inode_info *ci;
68 	int undo = 0;
69 	struct ceph_snap_context *snapc;
70 
71 	if (unlikely(!mapping))
72 		return !TestSetPageDirty(page);
73 
74 	if (TestSetPageDirty(page)) {
75 		dout("%p set_page_dirty %p idx %lu -- already dirty\n",
76 		     mapping->host, page, page->index);
77 		return 0;
78 	}
79 
80 	inode = mapping->host;
81 	ci = ceph_inode(inode);
82 
83 	/*
84 	 * Note that we're grabbing a snapc ref here without holding
85 	 * any locks!
86 	 */
87 	snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context);
88 
89 	/* dirty the head */
90 	spin_lock(&inode->i_lock);
91 	if (ci->i_head_snapc == NULL)
92 		ci->i_head_snapc = ceph_get_snap_context(snapc);
93 	++ci->i_wrbuffer_ref_head;
94 	if (ci->i_wrbuffer_ref == 0)
95 		ihold(inode);
96 	++ci->i_wrbuffer_ref;
97 	dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d "
98 	     "snapc %p seq %lld (%d snaps)\n",
99 	     mapping->host, page, page->index,
100 	     ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
101 	     ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
102 	     snapc, snapc->seq, snapc->num_snaps);
103 	spin_unlock(&inode->i_lock);
104 
105 	/* now adjust page */
106 	spin_lock_irq(&mapping->tree_lock);
107 	if (page->mapping) {	/* Race with truncate? */
108 		WARN_ON_ONCE(!PageUptodate(page));
109 		account_page_dirtied(page, page->mapping);
110 		radix_tree_tag_set(&mapping->page_tree,
111 				page_index(page), PAGECACHE_TAG_DIRTY);
112 
113 		/*
114 		 * Reference snap context in page->private.  Also set
115 		 * PagePrivate so that we get invalidatepage callback.
116 		 */
117 		page->private = (unsigned long)snapc;
118 		SetPagePrivate(page);
119 	} else {
120 		dout("ANON set_page_dirty %p (raced truncate?)\n", page);
121 		undo = 1;
122 	}
123 
124 	spin_unlock_irq(&mapping->tree_lock);
125 
126 	if (undo)
127 		/* whoops, we failed to dirty the page */
128 		ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
129 
130 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
131 
132 	BUG_ON(!PageDirty(page));
133 	return 1;
134 }
135 
136 /*
137  * If we are truncating the full page (i.e. offset == 0), adjust the
138  * dirty page counters appropriately.  Only called if there is private
139  * data on the page.
140  */
141 static void ceph_invalidatepage(struct page *page, unsigned long offset)
142 {
143 	struct inode *inode;
144 	struct ceph_inode_info *ci;
145 	struct ceph_snap_context *snapc = (void *)page->private;
146 
147 	BUG_ON(!PageLocked(page));
148 	BUG_ON(!page->private);
149 	BUG_ON(!PagePrivate(page));
150 	BUG_ON(!page->mapping);
151 
152 	inode = page->mapping->host;
153 
154 	/*
155 	 * We can get non-dirty pages here due to races between
156 	 * set_page_dirty and truncate_complete_page; just spit out a
157 	 * warning, in case we end up with accounting problems later.
158 	 */
159 	if (!PageDirty(page))
160 		pr_err("%p invalidatepage %p page not dirty\n", inode, page);
161 
162 	if (offset == 0)
163 		ClearPageChecked(page);
164 
165 	ci = ceph_inode(inode);
166 	if (offset == 0) {
167 		dout("%p invalidatepage %p idx %lu full dirty page %lu\n",
168 		     inode, page, page->index, offset);
169 		ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
170 		ceph_put_snap_context(snapc);
171 		page->private = 0;
172 		ClearPagePrivate(page);
173 	} else {
174 		dout("%p invalidatepage %p idx %lu partial dirty page\n",
175 		     inode, page, page->index);
176 	}
177 }
178 
179 /* just a sanity check */
180 static int ceph_releasepage(struct page *page, gfp_t g)
181 {
182 	struct inode *inode = page->mapping ? page->mapping->host : NULL;
183 	dout("%p releasepage %p idx %lu\n", inode, page, page->index);
184 	WARN_ON(PageDirty(page));
185 	WARN_ON(page->private);
186 	WARN_ON(PagePrivate(page));
187 	return 0;
188 }
189 
190 /*
191  * read a single page, without unlocking it.
192  */
193 static int readpage_nounlock(struct file *filp, struct page *page)
194 {
195 	struct inode *inode = filp->f_dentry->d_inode;
196 	struct ceph_inode_info *ci = ceph_inode(inode);
197 	struct ceph_osd_client *osdc =
198 		&ceph_inode_to_client(inode)->client->osdc;
199 	int err = 0;
200 	u64 len = PAGE_CACHE_SIZE;
201 
202 	dout("readpage inode %p file %p page %p index %lu\n",
203 	     inode, filp, page, page->index);
204 	err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout,
205 				  page->index << PAGE_CACHE_SHIFT, &len,
206 				  ci->i_truncate_seq, ci->i_truncate_size,
207 				  &page, 1, 0);
208 	if (err == -ENOENT)
209 		err = 0;
210 	if (err < 0) {
211 		SetPageError(page);
212 		goto out;
213 	} else if (err < PAGE_CACHE_SIZE) {
214 		/* zero fill remainder of page */
215 		zero_user_segment(page, err, PAGE_CACHE_SIZE);
216 	}
217 	SetPageUptodate(page);
218 
219 out:
220 	return err < 0 ? err : 0;
221 }
222 
223 static int ceph_readpage(struct file *filp, struct page *page)
224 {
225 	int r = readpage_nounlock(filp, page);
226 	unlock_page(page);
227 	return r;
228 }
229 
230 /*
231  * Finish an async read(ahead) op.
232  */
233 static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
234 {
235 	struct inode *inode = req->r_inode;
236 	struct ceph_osd_reply_head *replyhead;
237 	int rc, bytes;
238 	int i;
239 
240 	/* parse reply */
241 	replyhead = msg->front.iov_base;
242 	WARN_ON(le32_to_cpu(replyhead->num_ops) == 0);
243 	rc = le32_to_cpu(replyhead->result);
244 	bytes = le32_to_cpu(msg->hdr.data_len);
245 
246 	dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes);
247 
248 	/* unlock all pages, zeroing any data we didn't read */
249 	for (i = 0; i < req->r_num_pages; i++, bytes -= PAGE_CACHE_SIZE) {
250 		struct page *page = req->r_pages[i];
251 
252 		if (bytes < (int)PAGE_CACHE_SIZE) {
253 			/* zero (remainder of) page */
254 			int s = bytes < 0 ? 0 : bytes;
255 			zero_user_segment(page, s, PAGE_CACHE_SIZE);
256 		}
257  		dout("finish_read %p uptodate %p idx %lu\n", inode, page,
258 		     page->index);
259 		flush_dcache_page(page);
260 		SetPageUptodate(page);
261 		unlock_page(page);
262 		page_cache_release(page);
263 	}
264 	kfree(req->r_pages);
265 }
266 
267 /*
268  * start an async read(ahead) operation.  return nr_pages we submitted
269  * a read for on success, or negative error code.
270  */
271 static int start_read(struct inode *inode, struct list_head *page_list)
272 {
273 	struct ceph_osd_client *osdc =
274 		&ceph_inode_to_client(inode)->client->osdc;
275 	struct ceph_inode_info *ci = ceph_inode(inode);
276 	struct page *page = list_entry(page_list->prev, struct page, lru);
277 	struct ceph_osd_request *req;
278 	u64 off;
279 	u64 len;
280 	int i;
281 	struct page **pages;
282 	pgoff_t next_index;
283 	int nr_pages = 0;
284 	int ret;
285 
286 	off = page->index << PAGE_CACHE_SHIFT;
287 
288 	/* count pages */
289 	next_index = page->index;
290 	list_for_each_entry_reverse(page, page_list, lru) {
291 		if (page->index != next_index)
292 			break;
293 		nr_pages++;
294 		next_index++;
295 	}
296 	len = nr_pages << PAGE_CACHE_SHIFT;
297 	dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages,
298 	     off, len);
299 
300 	req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode),
301 				    off, &len,
302 				    CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
303 				    NULL, 0,
304 				    ci->i_truncate_seq, ci->i_truncate_size,
305 				    NULL, false, 1, 0);
306 	if (!req)
307 		return -ENOMEM;
308 
309 	/* build page vector */
310 	nr_pages = len >> PAGE_CACHE_SHIFT;
311 	pages = kmalloc(sizeof(*pages) * nr_pages, GFP_NOFS);
312 	ret = -ENOMEM;
313 	if (!pages)
314 		goto out;
315 	for (i = 0; i < nr_pages; ++i) {
316 		page = list_entry(page_list->prev, struct page, lru);
317 		BUG_ON(PageLocked(page));
318 		list_del(&page->lru);
319 
320  		dout("start_read %p adding %p idx %lu\n", inode, page,
321 		     page->index);
322 		if (add_to_page_cache_lru(page, &inode->i_data, page->index,
323 					  GFP_NOFS)) {
324 			page_cache_release(page);
325 			dout("start_read %p add_to_page_cache failed %p\n",
326 			     inode, page);
327 			nr_pages = i;
328 			goto out_pages;
329 		}
330 		pages[i] = page;
331 	}
332 	req->r_pages = pages;
333 	req->r_num_pages = nr_pages;
334 	req->r_callback = finish_read;
335 	req->r_inode = inode;
336 
337 	dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len);
338 	ret = ceph_osdc_start_request(osdc, req, false);
339 	if (ret < 0)
340 		goto out_pages;
341 	ceph_osdc_put_request(req);
342 	return nr_pages;
343 
344 out_pages:
345 	ceph_release_page_vector(pages, nr_pages);
346 	kfree(pages);
347 out:
348 	ceph_osdc_put_request(req);
349 	return ret;
350 }
351 
352 
353 /*
354  * Read multiple pages.  Leave pages we don't read + unlock in page_list;
355  * the caller (VM) cleans them up.
356  */
357 static int ceph_readpages(struct file *file, struct address_space *mapping,
358 			  struct list_head *page_list, unsigned nr_pages)
359 {
360 	struct inode *inode = file->f_dentry->d_inode;
361 	int rc = 0;
362 
363 	dout("readpages %p file %p nr_pages %d\n", inode, file, nr_pages);
364 	while (!list_empty(page_list)) {
365 		rc = start_read(inode, page_list);
366 		if (rc < 0)
367 			goto out;
368 		BUG_ON(rc == 0);
369 	}
370 out:
371 	dout("readpages %p file %p ret %d\n", inode, file, rc);
372 	return rc;
373 }
374 
375 /*
376  * Get ref for the oldest snapc for an inode with dirty data... that is, the
377  * only snap context we are allowed to write back.
378  */
379 static struct ceph_snap_context *get_oldest_context(struct inode *inode,
380 						    u64 *snap_size)
381 {
382 	struct ceph_inode_info *ci = ceph_inode(inode);
383 	struct ceph_snap_context *snapc = NULL;
384 	struct ceph_cap_snap *capsnap = NULL;
385 
386 	spin_lock(&inode->i_lock);
387 	list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
388 		dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
389 		     capsnap->context, capsnap->dirty_pages);
390 		if (capsnap->dirty_pages) {
391 			snapc = ceph_get_snap_context(capsnap->context);
392 			if (snap_size)
393 				*snap_size = capsnap->size;
394 			break;
395 		}
396 	}
397 	if (!snapc && ci->i_wrbuffer_ref_head) {
398 		snapc = ceph_get_snap_context(ci->i_head_snapc);
399 		dout(" head snapc %p has %d dirty pages\n",
400 		     snapc, ci->i_wrbuffer_ref_head);
401 	}
402 	spin_unlock(&inode->i_lock);
403 	return snapc;
404 }
405 
406 /*
407  * Write a single page, but leave the page locked.
408  *
409  * If we get a write error, set the page error bit, but still adjust the
410  * dirty page accounting (i.e., page is no longer dirty).
411  */
412 static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
413 {
414 	struct inode *inode;
415 	struct ceph_inode_info *ci;
416 	struct ceph_fs_client *fsc;
417 	struct ceph_osd_client *osdc;
418 	loff_t page_off = page->index << PAGE_CACHE_SHIFT;
419 	int len = PAGE_CACHE_SIZE;
420 	loff_t i_size;
421 	int err = 0;
422 	struct ceph_snap_context *snapc, *oldest;
423 	u64 snap_size = 0;
424 	long writeback_stat;
425 
426 	dout("writepage %p idx %lu\n", page, page->index);
427 
428 	if (!page->mapping || !page->mapping->host) {
429 		dout("writepage %p - no mapping\n", page);
430 		return -EFAULT;
431 	}
432 	inode = page->mapping->host;
433 	ci = ceph_inode(inode);
434 	fsc = ceph_inode_to_client(inode);
435 	osdc = &fsc->client->osdc;
436 
437 	/* verify this is a writeable snap context */
438 	snapc = (void *)page->private;
439 	if (snapc == NULL) {
440 		dout("writepage %p page %p not dirty?\n", inode, page);
441 		goto out;
442 	}
443 	oldest = get_oldest_context(inode, &snap_size);
444 	if (snapc->seq > oldest->seq) {
445 		dout("writepage %p page %p snapc %p not writeable - noop\n",
446 		     inode, page, (void *)page->private);
447 		/* we should only noop if called by kswapd */
448 		WARN_ON((current->flags & PF_MEMALLOC) == 0);
449 		ceph_put_snap_context(oldest);
450 		goto out;
451 	}
452 	ceph_put_snap_context(oldest);
453 
454 	/* is this a partial page at end of file? */
455 	if (snap_size)
456 		i_size = snap_size;
457 	else
458 		i_size = i_size_read(inode);
459 	if (i_size < page_off + len)
460 		len = i_size - page_off;
461 
462 	dout("writepage %p page %p index %lu on %llu~%u snapc %p\n",
463 	     inode, page, page->index, page_off, len, snapc);
464 
465 	writeback_stat = atomic_long_inc_return(&fsc->writeback_count);
466 	if (writeback_stat >
467 	    CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
468 		set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC);
469 
470 	set_page_writeback(page);
471 	err = ceph_osdc_writepages(osdc, ceph_vino(inode),
472 				   &ci->i_layout, snapc,
473 				   page_off, len,
474 				   ci->i_truncate_seq, ci->i_truncate_size,
475 				   &inode->i_mtime,
476 				   &page, 1, 0, 0, true);
477 	if (err < 0) {
478 		dout("writepage setting page/mapping error %d %p\n", err, page);
479 		SetPageError(page);
480 		mapping_set_error(&inode->i_data, err);
481 		if (wbc)
482 			wbc->pages_skipped++;
483 	} else {
484 		dout("writepage cleaned page %p\n", page);
485 		err = 0;  /* vfs expects us to return 0 */
486 	}
487 	page->private = 0;
488 	ClearPagePrivate(page);
489 	end_page_writeback(page);
490 	ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
491 	ceph_put_snap_context(snapc);  /* page's reference */
492 out:
493 	return err;
494 }
495 
496 static int ceph_writepage(struct page *page, struct writeback_control *wbc)
497 {
498 	int err;
499 	struct inode *inode = page->mapping->host;
500 	BUG_ON(!inode);
501 	ihold(inode);
502 	err = writepage_nounlock(page, wbc);
503 	unlock_page(page);
504 	iput(inode);
505 	return err;
506 }
507 
508 
509 /*
510  * lame release_pages helper.  release_pages() isn't exported to
511  * modules.
512  */
513 static void ceph_release_pages(struct page **pages, int num)
514 {
515 	struct pagevec pvec;
516 	int i;
517 
518 	pagevec_init(&pvec, 0);
519 	for (i = 0; i < num; i++) {
520 		if (pagevec_add(&pvec, pages[i]) == 0)
521 			pagevec_release(&pvec);
522 	}
523 	pagevec_release(&pvec);
524 }
525 
526 
527 /*
528  * async writeback completion handler.
529  *
530  * If we get an error, set the mapping error bit, but not the individual
531  * page error bits.
532  */
533 static void writepages_finish(struct ceph_osd_request *req,
534 			      struct ceph_msg *msg)
535 {
536 	struct inode *inode = req->r_inode;
537 	struct ceph_osd_reply_head *replyhead;
538 	struct ceph_osd_op *op;
539 	struct ceph_inode_info *ci = ceph_inode(inode);
540 	unsigned wrote;
541 	struct page *page;
542 	int i;
543 	struct ceph_snap_context *snapc = req->r_snapc;
544 	struct address_space *mapping = inode->i_mapping;
545 	__s32 rc = -EIO;
546 	u64 bytes = 0;
547 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
548 	long writeback_stat;
549 	unsigned issued = ceph_caps_issued(ci);
550 
551 	/* parse reply */
552 	replyhead = msg->front.iov_base;
553 	WARN_ON(le32_to_cpu(replyhead->num_ops) == 0);
554 	op = (void *)(replyhead + 1);
555 	rc = le32_to_cpu(replyhead->result);
556 	bytes = le64_to_cpu(op->extent.length);
557 
558 	if (rc >= 0) {
559 		/*
560 		 * Assume we wrote the pages we originally sent.  The
561 		 * osd might reply with fewer pages if our writeback
562 		 * raced with a truncation and was adjusted at the osd,
563 		 * so don't believe the reply.
564 		 */
565 		wrote = req->r_num_pages;
566 	} else {
567 		wrote = 0;
568 		mapping_set_error(mapping, rc);
569 	}
570 	dout("writepages_finish %p rc %d bytes %llu wrote %d (pages)\n",
571 	     inode, rc, bytes, wrote);
572 
573 	/* clean all pages */
574 	for (i = 0; i < req->r_num_pages; i++) {
575 		page = req->r_pages[i];
576 		BUG_ON(!page);
577 		WARN_ON(!PageUptodate(page));
578 
579 		writeback_stat =
580 			atomic_long_dec_return(&fsc->writeback_count);
581 		if (writeback_stat <
582 		    CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb))
583 			clear_bdi_congested(&fsc->backing_dev_info,
584 					    BLK_RW_ASYNC);
585 
586 		ceph_put_snap_context((void *)page->private);
587 		page->private = 0;
588 		ClearPagePrivate(page);
589 		dout("unlocking %d %p\n", i, page);
590 		end_page_writeback(page);
591 
592 		/*
593 		 * We lost the cache cap, need to truncate the page before
594 		 * it is unlocked, otherwise we'd truncate it later in the
595 		 * page truncation thread, possibly losing some data that
596 		 * raced its way in
597 		 */
598 		if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0)
599 			generic_error_remove_page(inode->i_mapping, page);
600 
601 		unlock_page(page);
602 	}
603 	dout("%p wrote+cleaned %d pages\n", inode, wrote);
604 	ceph_put_wrbuffer_cap_refs(ci, req->r_num_pages, snapc);
605 
606 	ceph_release_pages(req->r_pages, req->r_num_pages);
607 	if (req->r_pages_from_pool)
608 		mempool_free(req->r_pages,
609 			     ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool);
610 	else
611 		kfree(req->r_pages);
612 	ceph_osdc_put_request(req);
613 }
614 
615 /*
616  * allocate a page vec, either directly, or if necessary, via a the
617  * mempool.  we avoid the mempool if we can because req->r_num_pages
618  * may be less than the maximum write size.
619  */
620 static void alloc_page_vec(struct ceph_fs_client *fsc,
621 			   struct ceph_osd_request *req)
622 {
623 	req->r_pages = kmalloc(sizeof(struct page *) * req->r_num_pages,
624 			       GFP_NOFS);
625 	if (!req->r_pages) {
626 		req->r_pages = mempool_alloc(fsc->wb_pagevec_pool, GFP_NOFS);
627 		req->r_pages_from_pool = 1;
628 		WARN_ON(!req->r_pages);
629 	}
630 }
631 
632 /*
633  * initiate async writeback
634  */
635 static int ceph_writepages_start(struct address_space *mapping,
636 				 struct writeback_control *wbc)
637 {
638 	struct inode *inode = mapping->host;
639 	struct ceph_inode_info *ci = ceph_inode(inode);
640 	struct ceph_fs_client *fsc;
641 	pgoff_t index, start, end;
642 	int range_whole = 0;
643 	int should_loop = 1;
644 	pgoff_t max_pages = 0, max_pages_ever = 0;
645 	struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
646 	struct pagevec pvec;
647 	int done = 0;
648 	int rc = 0;
649 	unsigned wsize = 1 << inode->i_blkbits;
650 	struct ceph_osd_request *req = NULL;
651 	int do_sync;
652 	u64 snap_size = 0;
653 
654 	/*
655 	 * Include a 'sync' in the OSD request if this is a data
656 	 * integrity write (e.g., O_SYNC write or fsync()), or if our
657 	 * cap is being revoked.
658 	 */
659 	do_sync = wbc->sync_mode == WB_SYNC_ALL;
660 	if (ceph_caps_revoking(ci, CEPH_CAP_FILE_BUFFER))
661 		do_sync = 1;
662 	dout("writepages_start %p dosync=%d (mode=%s)\n",
663 	     inode, do_sync,
664 	     wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
665 	     (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
666 
667 	fsc = ceph_inode_to_client(inode);
668 	if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) {
669 		pr_warning("writepage_start %p on forced umount\n", inode);
670 		return -EIO; /* we're in a forced umount, don't write! */
671 	}
672 	if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize)
673 		wsize = fsc->mount_options->wsize;
674 	if (wsize < PAGE_CACHE_SIZE)
675 		wsize = PAGE_CACHE_SIZE;
676 	max_pages_ever = wsize >> PAGE_CACHE_SHIFT;
677 
678 	pagevec_init(&pvec, 0);
679 
680 	/* where to start/end? */
681 	if (wbc->range_cyclic) {
682 		start = mapping->writeback_index; /* Start from prev offset */
683 		end = -1;
684 		dout(" cyclic, start at %lu\n", start);
685 	} else {
686 		start = wbc->range_start >> PAGE_CACHE_SHIFT;
687 		end = wbc->range_end >> PAGE_CACHE_SHIFT;
688 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
689 			range_whole = 1;
690 		should_loop = 0;
691 		dout(" not cyclic, %lu to %lu\n", start, end);
692 	}
693 	index = start;
694 
695 retry:
696 	/* find oldest snap context with dirty data */
697 	ceph_put_snap_context(snapc);
698 	snapc = get_oldest_context(inode, &snap_size);
699 	if (!snapc) {
700 		/* hmm, why does writepages get called when there
701 		   is no dirty data? */
702 		dout(" no snap context with dirty data?\n");
703 		goto out;
704 	}
705 	dout(" oldest snapc is %p seq %lld (%d snaps)\n",
706 	     snapc, snapc->seq, snapc->num_snaps);
707 	if (last_snapc && snapc != last_snapc) {
708 		/* if we switched to a newer snapc, restart our scan at the
709 		 * start of the original file range. */
710 		dout("  snapc differs from last pass, restarting at %lu\n",
711 		     index);
712 		index = start;
713 	}
714 	last_snapc = snapc;
715 
716 	while (!done && index <= end) {
717 		unsigned i;
718 		int first;
719 		pgoff_t next;
720 		int pvec_pages, locked_pages;
721 		struct page *page;
722 		int want;
723 		u64 offset, len;
724 		struct ceph_osd_request_head *reqhead;
725 		struct ceph_osd_op *op;
726 		long writeback_stat;
727 
728 		next = 0;
729 		locked_pages = 0;
730 		max_pages = max_pages_ever;
731 
732 get_more_pages:
733 		first = -1;
734 		want = min(end - index,
735 			   min((pgoff_t)PAGEVEC_SIZE,
736 			       max_pages - (pgoff_t)locked_pages) - 1)
737 			+ 1;
738 		pvec_pages = pagevec_lookup_tag(&pvec, mapping, &index,
739 						PAGECACHE_TAG_DIRTY,
740 						want);
741 		dout("pagevec_lookup_tag got %d\n", pvec_pages);
742 		if (!pvec_pages && !locked_pages)
743 			break;
744 		for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) {
745 			page = pvec.pages[i];
746 			dout("? %p idx %lu\n", page, page->index);
747 			if (locked_pages == 0)
748 				lock_page(page);  /* first page */
749 			else if (!trylock_page(page))
750 				break;
751 
752 			/* only dirty pages, or our accounting breaks */
753 			if (unlikely(!PageDirty(page)) ||
754 			    unlikely(page->mapping != mapping)) {
755 				dout("!dirty or !mapping %p\n", page);
756 				unlock_page(page);
757 				break;
758 			}
759 			if (!wbc->range_cyclic && page->index > end) {
760 				dout("end of range %p\n", page);
761 				done = 1;
762 				unlock_page(page);
763 				break;
764 			}
765 			if (next && (page->index != next)) {
766 				dout("not consecutive %p\n", page);
767 				unlock_page(page);
768 				break;
769 			}
770 			if (wbc->sync_mode != WB_SYNC_NONE) {
771 				dout("waiting on writeback %p\n", page);
772 				wait_on_page_writeback(page);
773 			}
774 			if ((snap_size && page_offset(page) > snap_size) ||
775 			    (!snap_size &&
776 			     page_offset(page) > i_size_read(inode))) {
777 				dout("%p page eof %llu\n", page, snap_size ?
778 				     snap_size : i_size_read(inode));
779 				done = 1;
780 				unlock_page(page);
781 				break;
782 			}
783 			if (PageWriteback(page)) {
784 				dout("%p under writeback\n", page);
785 				unlock_page(page);
786 				break;
787 			}
788 
789 			/* only if matching snap context */
790 			pgsnapc = (void *)page->private;
791 			if (pgsnapc->seq > snapc->seq) {
792 				dout("page snapc %p %lld > oldest %p %lld\n",
793 				     pgsnapc, pgsnapc->seq, snapc, snapc->seq);
794 				unlock_page(page);
795 				if (!locked_pages)
796 					continue; /* keep looking for snap */
797 				break;
798 			}
799 
800 			if (!clear_page_dirty_for_io(page)) {
801 				dout("%p !clear_page_dirty_for_io\n", page);
802 				unlock_page(page);
803 				break;
804 			}
805 
806 			/* ok */
807 			if (locked_pages == 0) {
808 				/* prepare async write request */
809 				offset = (unsigned long long)page->index
810 					<< PAGE_CACHE_SHIFT;
811 				len = wsize;
812 				req = ceph_osdc_new_request(&fsc->client->osdc,
813 					    &ci->i_layout,
814 					    ceph_vino(inode),
815 					    offset, &len,
816 					    CEPH_OSD_OP_WRITE,
817 					    CEPH_OSD_FLAG_WRITE |
818 						    CEPH_OSD_FLAG_ONDISK,
819 					    snapc, do_sync,
820 					    ci->i_truncate_seq,
821 					    ci->i_truncate_size,
822 					    &inode->i_mtime, true, 1, 0);
823 
824 				if (!req) {
825 					rc = -ENOMEM;
826 					unlock_page(page);
827 					break;
828 				}
829 
830 				max_pages = req->r_num_pages;
831 
832 				alloc_page_vec(fsc, req);
833 				req->r_callback = writepages_finish;
834 				req->r_inode = inode;
835 			}
836 
837 			/* note position of first page in pvec */
838 			if (first < 0)
839 				first = i;
840 			dout("%p will write page %p idx %lu\n",
841 			     inode, page, page->index);
842 
843 			writeback_stat =
844 			       atomic_long_inc_return(&fsc->writeback_count);
845 			if (writeback_stat > CONGESTION_ON_THRESH(
846 				    fsc->mount_options->congestion_kb)) {
847 				set_bdi_congested(&fsc->backing_dev_info,
848 						  BLK_RW_ASYNC);
849 			}
850 
851 			set_page_writeback(page);
852 			req->r_pages[locked_pages] = page;
853 			locked_pages++;
854 			next = page->index + 1;
855 		}
856 
857 		/* did we get anything? */
858 		if (!locked_pages)
859 			goto release_pvec_pages;
860 		if (i) {
861 			int j;
862 			BUG_ON(!locked_pages || first < 0);
863 
864 			if (pvec_pages && i == pvec_pages &&
865 			    locked_pages < max_pages) {
866 				dout("reached end pvec, trying for more\n");
867 				pagevec_reinit(&pvec);
868 				goto get_more_pages;
869 			}
870 
871 			/* shift unused pages over in the pvec...  we
872 			 * will need to release them below. */
873 			for (j = i; j < pvec_pages; j++) {
874 				dout(" pvec leftover page %p\n",
875 				     pvec.pages[j]);
876 				pvec.pages[j-i+first] = pvec.pages[j];
877 			}
878 			pvec.nr -= i-first;
879 		}
880 
881 		/* submit the write */
882 		offset = req->r_pages[0]->index << PAGE_CACHE_SHIFT;
883 		len = min((snap_size ? snap_size : i_size_read(inode)) - offset,
884 			  (u64)locked_pages << PAGE_CACHE_SHIFT);
885 		dout("writepages got %d pages at %llu~%llu\n",
886 		     locked_pages, offset, len);
887 
888 		/* revise final length, page count */
889 		req->r_num_pages = locked_pages;
890 		reqhead = req->r_request->front.iov_base;
891 		op = (void *)(reqhead + 1);
892 		op->extent.length = cpu_to_le64(len);
893 		op->payload_len = cpu_to_le32(len);
894 		req->r_request->hdr.data_len = cpu_to_le32(len);
895 
896 		rc = ceph_osdc_start_request(&fsc->client->osdc, req, true);
897 		BUG_ON(rc);
898 		req = NULL;
899 
900 		/* continue? */
901 		index = next;
902 		wbc->nr_to_write -= locked_pages;
903 		if (wbc->nr_to_write <= 0)
904 			done = 1;
905 
906 release_pvec_pages:
907 		dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr,
908 		     pvec.nr ? pvec.pages[0] : NULL);
909 		pagevec_release(&pvec);
910 
911 		if (locked_pages && !done)
912 			goto retry;
913 	}
914 
915 	if (should_loop && !done) {
916 		/* more to do; loop back to beginning of file */
917 		dout("writepages looping back to beginning of file\n");
918 		should_loop = 0;
919 		index = 0;
920 		goto retry;
921 	}
922 
923 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
924 		mapping->writeback_index = index;
925 
926 out:
927 	if (req)
928 		ceph_osdc_put_request(req);
929 	ceph_put_snap_context(snapc);
930 	dout("writepages done, rc = %d\n", rc);
931 	return rc;
932 }
933 
934 
935 
936 /*
937  * See if a given @snapc is either writeable, or already written.
938  */
939 static int context_is_writeable_or_written(struct inode *inode,
940 					   struct ceph_snap_context *snapc)
941 {
942 	struct ceph_snap_context *oldest = get_oldest_context(inode, NULL);
943 	int ret = !oldest || snapc->seq <= oldest->seq;
944 
945 	ceph_put_snap_context(oldest);
946 	return ret;
947 }
948 
949 /*
950  * We are only allowed to write into/dirty the page if the page is
951  * clean, or already dirty within the same snap context.
952  *
953  * called with page locked.
954  * return success with page locked,
955  * or any failure (incl -EAGAIN) with page unlocked.
956  */
957 static int ceph_update_writeable_page(struct file *file,
958 			    loff_t pos, unsigned len,
959 			    struct page *page)
960 {
961 	struct inode *inode = file->f_dentry->d_inode;
962 	struct ceph_inode_info *ci = ceph_inode(inode);
963 	struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
964 	loff_t page_off = pos & PAGE_CACHE_MASK;
965 	int pos_in_page = pos & ~PAGE_CACHE_MASK;
966 	int end_in_page = pos_in_page + len;
967 	loff_t i_size;
968 	int r;
969 	struct ceph_snap_context *snapc, *oldest;
970 
971 retry_locked:
972 	/* writepages currently holds page lock, but if we change that later, */
973 	wait_on_page_writeback(page);
974 
975 	/* check snap context */
976 	BUG_ON(!ci->i_snap_realm);
977 	down_read(&mdsc->snap_rwsem);
978 	BUG_ON(!ci->i_snap_realm->cached_context);
979 	snapc = (void *)page->private;
980 	if (snapc && snapc != ci->i_head_snapc) {
981 		/*
982 		 * this page is already dirty in another (older) snap
983 		 * context!  is it writeable now?
984 		 */
985 		oldest = get_oldest_context(inode, NULL);
986 		up_read(&mdsc->snap_rwsem);
987 
988 		if (snapc->seq > oldest->seq) {
989 			ceph_put_snap_context(oldest);
990 			dout(" page %p snapc %p not current or oldest\n",
991 			     page, snapc);
992 			/*
993 			 * queue for writeback, and wait for snapc to
994 			 * be writeable or written
995 			 */
996 			snapc = ceph_get_snap_context(snapc);
997 			unlock_page(page);
998 			ceph_queue_writeback(inode);
999 			r = wait_event_interruptible(ci->i_cap_wq,
1000 			       context_is_writeable_or_written(inode, snapc));
1001 			ceph_put_snap_context(snapc);
1002 			if (r == -ERESTARTSYS)
1003 				return r;
1004 			return -EAGAIN;
1005 		}
1006 		ceph_put_snap_context(oldest);
1007 
1008 		/* yay, writeable, do it now (without dropping page lock) */
1009 		dout(" page %p snapc %p not current, but oldest\n",
1010 		     page, snapc);
1011 		if (!clear_page_dirty_for_io(page))
1012 			goto retry_locked;
1013 		r = writepage_nounlock(page, NULL);
1014 		if (r < 0)
1015 			goto fail_nosnap;
1016 		goto retry_locked;
1017 	}
1018 
1019 	if (PageUptodate(page)) {
1020 		dout(" page %p already uptodate\n", page);
1021 		return 0;
1022 	}
1023 
1024 	/* full page? */
1025 	if (pos_in_page == 0 && len == PAGE_CACHE_SIZE)
1026 		return 0;
1027 
1028 	/* past end of file? */
1029 	i_size = inode->i_size;   /* caller holds i_mutex */
1030 
1031 	if (i_size + len > inode->i_sb->s_maxbytes) {
1032 		/* file is too big */
1033 		r = -EINVAL;
1034 		goto fail;
1035 	}
1036 
1037 	if (page_off >= i_size ||
1038 	    (pos_in_page == 0 && (pos+len) >= i_size &&
1039 	     end_in_page - pos_in_page != PAGE_CACHE_SIZE)) {
1040 		dout(" zeroing %p 0 - %d and %d - %d\n",
1041 		     page, pos_in_page, end_in_page, (int)PAGE_CACHE_SIZE);
1042 		zero_user_segments(page,
1043 				   0, pos_in_page,
1044 				   end_in_page, PAGE_CACHE_SIZE);
1045 		return 0;
1046 	}
1047 
1048 	/* we need to read it. */
1049 	up_read(&mdsc->snap_rwsem);
1050 	r = readpage_nounlock(file, page);
1051 	if (r < 0)
1052 		goto fail_nosnap;
1053 	goto retry_locked;
1054 
1055 fail:
1056 	up_read(&mdsc->snap_rwsem);
1057 fail_nosnap:
1058 	unlock_page(page);
1059 	return r;
1060 }
1061 
1062 /*
1063  * We are only allowed to write into/dirty the page if the page is
1064  * clean, or already dirty within the same snap context.
1065  */
1066 static int ceph_write_begin(struct file *file, struct address_space *mapping,
1067 			    loff_t pos, unsigned len, unsigned flags,
1068 			    struct page **pagep, void **fsdata)
1069 {
1070 	struct inode *inode = file->f_dentry->d_inode;
1071 	struct page *page;
1072 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1073 	int r;
1074 
1075 	do {
1076 		/* get a page */
1077 		page = grab_cache_page_write_begin(mapping, index, 0);
1078 		if (!page)
1079 			return -ENOMEM;
1080 		*pagep = page;
1081 
1082 		dout("write_begin file %p inode %p page %p %d~%d\n", file,
1083 		     inode, page, (int)pos, (int)len);
1084 
1085 		r = ceph_update_writeable_page(file, pos, len, page);
1086 	} while (r == -EAGAIN);
1087 
1088 	return r;
1089 }
1090 
1091 /*
1092  * we don't do anything in here that simple_write_end doesn't do
1093  * except adjust dirty page accounting and drop read lock on
1094  * mdsc->snap_rwsem.
1095  */
1096 static int ceph_write_end(struct file *file, struct address_space *mapping,
1097 			  loff_t pos, unsigned len, unsigned copied,
1098 			  struct page *page, void *fsdata)
1099 {
1100 	struct inode *inode = file->f_dentry->d_inode;
1101 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1102 	struct ceph_mds_client *mdsc = fsc->mdsc;
1103 	unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1104 	int check_cap = 0;
1105 
1106 	dout("write_end file %p inode %p page %p %d~%d (%d)\n", file,
1107 	     inode, page, (int)pos, (int)copied, (int)len);
1108 
1109 	/* zero the stale part of the page if we did a short copy */
1110 	if (copied < len)
1111 		zero_user_segment(page, from+copied, len);
1112 
1113 	/* did file size increase? */
1114 	/* (no need for i_size_read(); we caller holds i_mutex */
1115 	if (pos+copied > inode->i_size)
1116 		check_cap = ceph_inode_set_size(inode, pos+copied);
1117 
1118 	if (!PageUptodate(page))
1119 		SetPageUptodate(page);
1120 
1121 	set_page_dirty(page);
1122 
1123 	unlock_page(page);
1124 	up_read(&mdsc->snap_rwsem);
1125 	page_cache_release(page);
1126 
1127 	if (check_cap)
1128 		ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL);
1129 
1130 	return copied;
1131 }
1132 
1133 /*
1134  * we set .direct_IO to indicate direct io is supported, but since we
1135  * intercept O_DIRECT reads and writes early, this function should
1136  * never get called.
1137  */
1138 static ssize_t ceph_direct_io(int rw, struct kiocb *iocb,
1139 			      const struct iovec *iov,
1140 			      loff_t pos, unsigned long nr_segs)
1141 {
1142 	WARN_ON(1);
1143 	return -EINVAL;
1144 }
1145 
1146 const struct address_space_operations ceph_aops = {
1147 	.readpage = ceph_readpage,
1148 	.readpages = ceph_readpages,
1149 	.writepage = ceph_writepage,
1150 	.writepages = ceph_writepages_start,
1151 	.write_begin = ceph_write_begin,
1152 	.write_end = ceph_write_end,
1153 	.set_page_dirty = ceph_set_page_dirty,
1154 	.invalidatepage = ceph_invalidatepage,
1155 	.releasepage = ceph_releasepage,
1156 	.direct_IO = ceph_direct_io,
1157 };
1158 
1159 
1160 /*
1161  * vm ops
1162  */
1163 
1164 /*
1165  * Reuse write_begin here for simplicity.
1166  */
1167 static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1168 {
1169 	struct inode *inode = vma->vm_file->f_dentry->d_inode;
1170 	struct page *page = vmf->page;
1171 	struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
1172 	loff_t off = page->index << PAGE_CACHE_SHIFT;
1173 	loff_t size, len;
1174 	int ret;
1175 
1176 	size = i_size_read(inode);
1177 	if (off + PAGE_CACHE_SIZE <= size)
1178 		len = PAGE_CACHE_SIZE;
1179 	else
1180 		len = size & ~PAGE_CACHE_MASK;
1181 
1182 	dout("page_mkwrite %p %llu~%llu page %p idx %lu\n", inode,
1183 	     off, len, page, page->index);
1184 
1185 	lock_page(page);
1186 
1187 	ret = VM_FAULT_NOPAGE;
1188 	if ((off > size) ||
1189 	    (page->mapping != inode->i_mapping))
1190 		goto out;
1191 
1192 	ret = ceph_update_writeable_page(vma->vm_file, off, len, page);
1193 	if (ret == 0) {
1194 		/* success.  we'll keep the page locked. */
1195 		set_page_dirty(page);
1196 		up_read(&mdsc->snap_rwsem);
1197 		ret = VM_FAULT_LOCKED;
1198 	} else {
1199 		if (ret == -ENOMEM)
1200 			ret = VM_FAULT_OOM;
1201 		else
1202 			ret = VM_FAULT_SIGBUS;
1203 	}
1204 out:
1205 	dout("page_mkwrite %p %llu~%llu = %d\n", inode, off, len, ret);
1206 	if (ret != VM_FAULT_LOCKED)
1207 		unlock_page(page);
1208 	return ret;
1209 }
1210 
1211 static struct vm_operations_struct ceph_vmops = {
1212 	.fault		= filemap_fault,
1213 	.page_mkwrite	= ceph_page_mkwrite,
1214 };
1215 
1216 int ceph_mmap(struct file *file, struct vm_area_struct *vma)
1217 {
1218 	struct address_space *mapping = file->f_mapping;
1219 
1220 	if (!mapping->a_ops->readpage)
1221 		return -ENOEXEC;
1222 	file_accessed(file);
1223 	vma->vm_ops = &ceph_vmops;
1224 	vma->vm_flags |= VM_CAN_NONLINEAR;
1225 	return 0;
1226 }
1227