xref: /openbmc/linux/fs/ceph/addr.c (revision e481ff3f)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/backing-dev.h>
5 #include <linux/fs.h>
6 #include <linux/mm.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>	/* generic_writepages */
9 #include <linux/slab.h>
10 #include <linux/pagevec.h>
11 #include <linux/task_io_accounting_ops.h>
12 #include <linux/signal.h>
13 #include <linux/iversion.h>
14 #include <linux/ktime.h>
15 #include <linux/netfs.h>
16 
17 #include "super.h"
18 #include "mds_client.h"
19 #include "cache.h"
20 #include "metric.h"
21 #include <linux/ceph/osd_client.h>
22 #include <linux/ceph/striper.h>
23 
24 /*
25  * Ceph address space ops.
26  *
27  * There are a few funny things going on here.
28  *
29  * The page->private field is used to reference a struct
30  * ceph_snap_context for _every_ dirty page.  This indicates which
31  * snapshot the page was logically dirtied in, and thus which snap
32  * context needs to be associated with the osd write during writeback.
33  *
34  * Similarly, struct ceph_inode_info maintains a set of counters to
35  * count dirty pages on the inode.  In the absence of snapshots,
36  * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
37  *
38  * When a snapshot is taken (that is, when the client receives
39  * notification that a snapshot was taken), each inode with caps and
40  * with dirty pages (dirty pages implies there is a cap) gets a new
41  * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending
42  * order, new snaps go to the tail).  The i_wrbuffer_ref_head count is
43  * moved to capsnap->dirty. (Unless a sync write is currently in
44  * progress.  In that case, the capsnap is said to be "pending", new
45  * writes cannot start, and the capsnap isn't "finalized" until the
46  * write completes (or fails) and a final size/mtime for the inode for
47  * that snap can be settled upon.)  i_wrbuffer_ref_head is reset to 0.
48  *
49  * On writeback, we must submit writes to the osd IN SNAP ORDER.  So,
50  * we look for the first capsnap in i_cap_snaps and write out pages in
51  * that snap context _only_.  Then we move on to the next capsnap,
52  * eventually reaching the "live" or "head" context (i.e., pages that
53  * are not yet snapped) and are writing the most recently dirtied
54  * pages.
55  *
56  * Invalidate and so forth must take care to ensure the dirty page
57  * accounting is preserved.
58  */
59 
60 #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10))
61 #define CONGESTION_OFF_THRESH(congestion_kb)				\
62 	(CONGESTION_ON_THRESH(congestion_kb) -				\
63 	 (CONGESTION_ON_THRESH(congestion_kb) >> 2))
64 
65 static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
66 					struct page *page, void **_fsdata);
67 
68 static inline struct ceph_snap_context *page_snap_context(struct page *page)
69 {
70 	if (PagePrivate(page))
71 		return (void *)page->private;
72 	return NULL;
73 }
74 
75 /*
76  * Dirty a page.  Optimistically adjust accounting, on the assumption
77  * that we won't race with invalidate.  If we do, readjust.
78  */
79 static int ceph_set_page_dirty(struct page *page)
80 {
81 	struct address_space *mapping = page->mapping;
82 	struct inode *inode;
83 	struct ceph_inode_info *ci;
84 	struct ceph_snap_context *snapc;
85 
86 	if (PageDirty(page)) {
87 		dout("%p set_page_dirty %p idx %lu -- already dirty\n",
88 		     mapping->host, page, page->index);
89 		BUG_ON(!PagePrivate(page));
90 		return 0;
91 	}
92 
93 	inode = mapping->host;
94 	ci = ceph_inode(inode);
95 
96 	/* dirty the head */
97 	spin_lock(&ci->i_ceph_lock);
98 	BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference
99 	if (__ceph_have_pending_cap_snap(ci)) {
100 		struct ceph_cap_snap *capsnap =
101 				list_last_entry(&ci->i_cap_snaps,
102 						struct ceph_cap_snap,
103 						ci_item);
104 		snapc = ceph_get_snap_context(capsnap->context);
105 		capsnap->dirty_pages++;
106 	} else {
107 		BUG_ON(!ci->i_head_snapc);
108 		snapc = ceph_get_snap_context(ci->i_head_snapc);
109 		++ci->i_wrbuffer_ref_head;
110 	}
111 	if (ci->i_wrbuffer_ref == 0)
112 		ihold(inode);
113 	++ci->i_wrbuffer_ref;
114 	dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d "
115 	     "snapc %p seq %lld (%d snaps)\n",
116 	     mapping->host, page, page->index,
117 	     ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
118 	     ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
119 	     snapc, snapc->seq, snapc->num_snaps);
120 	spin_unlock(&ci->i_ceph_lock);
121 
122 	/*
123 	 * Reference snap context in page->private.  Also set
124 	 * PagePrivate so that we get invalidatepage callback.
125 	 */
126 	BUG_ON(PagePrivate(page));
127 	attach_page_private(page, snapc);
128 
129 	return __set_page_dirty_nobuffers(page);
130 }
131 
132 /*
133  * If we are truncating the full page (i.e. offset == 0), adjust the
134  * dirty page counters appropriately.  Only called if there is private
135  * data on the page.
136  */
137 static void ceph_invalidatepage(struct page *page, unsigned int offset,
138 				unsigned int length)
139 {
140 	struct inode *inode;
141 	struct ceph_inode_info *ci;
142 	struct ceph_snap_context *snapc;
143 
144 	wait_on_page_fscache(page);
145 
146 	inode = page->mapping->host;
147 	ci = ceph_inode(inode);
148 
149 	if (offset != 0 || length != thp_size(page)) {
150 		dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n",
151 		     inode, page, page->index, offset, length);
152 		return;
153 	}
154 
155 	WARN_ON(!PageLocked(page));
156 	if (!PagePrivate(page))
157 		return;
158 
159 	dout("%p invalidatepage %p idx %lu full dirty page\n",
160 	     inode, page, page->index);
161 
162 	snapc = detach_page_private(page);
163 	ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
164 	ceph_put_snap_context(snapc);
165 }
166 
167 static int ceph_releasepage(struct page *page, gfp_t gfp)
168 {
169 	dout("%p releasepage %p idx %lu (%sdirty)\n", page->mapping->host,
170 	     page, page->index, PageDirty(page) ? "" : "not ");
171 
172 	if (PageFsCache(page)) {
173 		if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS))
174 			return 0;
175 		wait_on_page_fscache(page);
176 	}
177 	return !PagePrivate(page);
178 }
179 
180 static void ceph_netfs_expand_readahead(struct netfs_read_request *rreq)
181 {
182 	struct inode *inode = rreq->mapping->host;
183 	struct ceph_inode_info *ci = ceph_inode(inode);
184 	struct ceph_file_layout *lo = &ci->i_layout;
185 	u32 blockoff;
186 	u64 blockno;
187 
188 	/* Expand the start downward */
189 	blockno = div_u64_rem(rreq->start, lo->stripe_unit, &blockoff);
190 	rreq->start = blockno * lo->stripe_unit;
191 	rreq->len += blockoff;
192 
193 	/* Now, round up the length to the next block */
194 	rreq->len = roundup(rreq->len, lo->stripe_unit);
195 }
196 
197 static bool ceph_netfs_clamp_length(struct netfs_read_subrequest *subreq)
198 {
199 	struct inode *inode = subreq->rreq->mapping->host;
200 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
201 	struct ceph_inode_info *ci = ceph_inode(inode);
202 	u64 objno, objoff;
203 	u32 xlen;
204 
205 	/* Truncate the extent at the end of the current block */
206 	ceph_calc_file_object_mapping(&ci->i_layout, subreq->start, subreq->len,
207 				      &objno, &objoff, &xlen);
208 	subreq->len = min(xlen, fsc->mount_options->rsize);
209 	return true;
210 }
211 
212 static void finish_netfs_read(struct ceph_osd_request *req)
213 {
214 	struct ceph_fs_client *fsc = ceph_inode_to_client(req->r_inode);
215 	struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
216 	struct netfs_read_subrequest *subreq = req->r_priv;
217 	int num_pages;
218 	int err = req->r_result;
219 
220 	ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency,
221 				 req->r_end_latency, osd_data->length, err);
222 
223 	dout("%s: result %d subreq->len=%zu i_size=%lld\n", __func__, req->r_result,
224 	     subreq->len, i_size_read(req->r_inode));
225 
226 	/* no object means success but no data */
227 	if (err == -ENOENT)
228 		err = 0;
229 	else if (err == -EBLOCKLISTED)
230 		fsc->blocklisted = true;
231 
232 	if (err >= 0 && err < subreq->len)
233 		__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
234 
235 	netfs_subreq_terminated(subreq, err, true);
236 
237 	num_pages = calc_pages_for(osd_data->alignment, osd_data->length);
238 	ceph_put_page_vector(osd_data->pages, num_pages, false);
239 	iput(req->r_inode);
240 }
241 
242 static void ceph_netfs_issue_op(struct netfs_read_subrequest *subreq)
243 {
244 	struct netfs_read_request *rreq = subreq->rreq;
245 	struct inode *inode = rreq->mapping->host;
246 	struct ceph_inode_info *ci = ceph_inode(inode);
247 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
248 	struct ceph_osd_request *req;
249 	struct ceph_vino vino = ceph_vino(inode);
250 	struct iov_iter iter;
251 	struct page **pages;
252 	size_t page_off;
253 	int err = 0;
254 	u64 len = subreq->len;
255 
256 	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino, subreq->start, &len,
257 			0, 1, CEPH_OSD_OP_READ,
258 			CEPH_OSD_FLAG_READ | fsc->client->osdc.client->options->read_from_replica,
259 			NULL, ci->i_truncate_seq, ci->i_truncate_size, false);
260 	if (IS_ERR(req)) {
261 		err = PTR_ERR(req);
262 		req = NULL;
263 		goto out;
264 	}
265 
266 	dout("%s: pos=%llu orig_len=%zu len=%llu\n", __func__, subreq->start, subreq->len, len);
267 	iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, subreq->start, len);
268 	err = iov_iter_get_pages_alloc(&iter, &pages, len, &page_off);
269 	if (err < 0) {
270 		dout("%s: iov_ter_get_pages_alloc returned %d\n", __func__, err);
271 		goto out;
272 	}
273 
274 	/* should always give us a page-aligned read */
275 	WARN_ON_ONCE(page_off);
276 	len = err;
277 
278 	osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false);
279 	req->r_callback = finish_netfs_read;
280 	req->r_priv = subreq;
281 	req->r_inode = inode;
282 	ihold(inode);
283 
284 	err = ceph_osdc_start_request(req->r_osdc, req, false);
285 	if (err)
286 		iput(inode);
287 out:
288 	ceph_osdc_put_request(req);
289 	if (err)
290 		netfs_subreq_terminated(subreq, err, false);
291 	dout("%s: result %d\n", __func__, err);
292 }
293 
294 static void ceph_init_rreq(struct netfs_read_request *rreq, struct file *file)
295 {
296 }
297 
298 static void ceph_readahead_cleanup(struct address_space *mapping, void *priv)
299 {
300 	struct inode *inode = mapping->host;
301 	struct ceph_inode_info *ci = ceph_inode(inode);
302 	int got = (uintptr_t)priv;
303 
304 	if (got)
305 		ceph_put_cap_refs(ci, got);
306 }
307 
308 static const struct netfs_read_request_ops ceph_netfs_read_ops = {
309 	.init_rreq		= ceph_init_rreq,
310 	.is_cache_enabled	= ceph_is_cache_enabled,
311 	.begin_cache_operation	= ceph_begin_cache_operation,
312 	.issue_op		= ceph_netfs_issue_op,
313 	.expand_readahead	= ceph_netfs_expand_readahead,
314 	.clamp_length		= ceph_netfs_clamp_length,
315 	.check_write_begin	= ceph_netfs_check_write_begin,
316 	.cleanup		= ceph_readahead_cleanup,
317 };
318 
319 /* read a single page, without unlocking it. */
320 static int ceph_readpage(struct file *file, struct page *page)
321 {
322 	struct inode *inode = file_inode(file);
323 	struct ceph_inode_info *ci = ceph_inode(inode);
324 	struct ceph_vino vino = ceph_vino(inode);
325 	u64 off = page_offset(page);
326 	u64 len = thp_size(page);
327 
328 	if (ci->i_inline_version != CEPH_INLINE_NONE) {
329 		/*
330 		 * Uptodate inline data should have been added
331 		 * into page cache while getting Fcr caps.
332 		 */
333 		if (off == 0) {
334 			unlock_page(page);
335 			return -EINVAL;
336 		}
337 		zero_user_segment(page, 0, thp_size(page));
338 		SetPageUptodate(page);
339 		unlock_page(page);
340 		return 0;
341 	}
342 
343 	dout("readpage ino %llx.%llx file %p off %llu len %llu page %p index %lu\n",
344 	     vino.ino, vino.snap, file, off, len, page, page->index);
345 
346 	return netfs_readpage(file, page, &ceph_netfs_read_ops, NULL);
347 }
348 
349 static void ceph_readahead(struct readahead_control *ractl)
350 {
351 	struct inode *inode = file_inode(ractl->file);
352 	struct ceph_file_info *fi = ractl->file->private_data;
353 	struct ceph_rw_context *rw_ctx;
354 	int got = 0;
355 	int ret = 0;
356 
357 	if (ceph_inode(inode)->i_inline_version != CEPH_INLINE_NONE)
358 		return;
359 
360 	rw_ctx = ceph_find_rw_context(fi);
361 	if (!rw_ctx) {
362 		/*
363 		 * readahead callers do not necessarily hold Fcb caps
364 		 * (e.g. fadvise, madvise).
365 		 */
366 		int want = CEPH_CAP_FILE_CACHE;
367 
368 		ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got);
369 		if (ret < 0)
370 			dout("start_read %p, error getting cap\n", inode);
371 		else if (!(got & want))
372 			dout("start_read %p, no cache cap\n", inode);
373 
374 		if (ret <= 0)
375 			return;
376 	}
377 	netfs_readahead(ractl, &ceph_netfs_read_ops, (void *)(uintptr_t)got);
378 }
379 
380 struct ceph_writeback_ctl
381 {
382 	loff_t i_size;
383 	u64 truncate_size;
384 	u32 truncate_seq;
385 	bool size_stable;
386 	bool head_snapc;
387 };
388 
389 /*
390  * Get ref for the oldest snapc for an inode with dirty data... that is, the
391  * only snap context we are allowed to write back.
392  */
393 static struct ceph_snap_context *
394 get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl,
395 		   struct ceph_snap_context *page_snapc)
396 {
397 	struct ceph_inode_info *ci = ceph_inode(inode);
398 	struct ceph_snap_context *snapc = NULL;
399 	struct ceph_cap_snap *capsnap = NULL;
400 
401 	spin_lock(&ci->i_ceph_lock);
402 	list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
403 		dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
404 		     capsnap->context, capsnap->dirty_pages);
405 		if (!capsnap->dirty_pages)
406 			continue;
407 
408 		/* get i_size, truncate_{seq,size} for page_snapc? */
409 		if (snapc && capsnap->context != page_snapc)
410 			continue;
411 
412 		if (ctl) {
413 			if (capsnap->writing) {
414 				ctl->i_size = i_size_read(inode);
415 				ctl->size_stable = false;
416 			} else {
417 				ctl->i_size = capsnap->size;
418 				ctl->size_stable = true;
419 			}
420 			ctl->truncate_size = capsnap->truncate_size;
421 			ctl->truncate_seq = capsnap->truncate_seq;
422 			ctl->head_snapc = false;
423 		}
424 
425 		if (snapc)
426 			break;
427 
428 		snapc = ceph_get_snap_context(capsnap->context);
429 		if (!page_snapc ||
430 		    page_snapc == snapc ||
431 		    page_snapc->seq > snapc->seq)
432 			break;
433 	}
434 	if (!snapc && ci->i_wrbuffer_ref_head) {
435 		snapc = ceph_get_snap_context(ci->i_head_snapc);
436 		dout(" head snapc %p has %d dirty pages\n",
437 		     snapc, ci->i_wrbuffer_ref_head);
438 		if (ctl) {
439 			ctl->i_size = i_size_read(inode);
440 			ctl->truncate_size = ci->i_truncate_size;
441 			ctl->truncate_seq = ci->i_truncate_seq;
442 			ctl->size_stable = false;
443 			ctl->head_snapc = true;
444 		}
445 	}
446 	spin_unlock(&ci->i_ceph_lock);
447 	return snapc;
448 }
449 
450 static u64 get_writepages_data_length(struct inode *inode,
451 				      struct page *page, u64 start)
452 {
453 	struct ceph_inode_info *ci = ceph_inode(inode);
454 	struct ceph_snap_context *snapc = page_snap_context(page);
455 	struct ceph_cap_snap *capsnap = NULL;
456 	u64 end = i_size_read(inode);
457 
458 	if (snapc != ci->i_head_snapc) {
459 		bool found = false;
460 		spin_lock(&ci->i_ceph_lock);
461 		list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
462 			if (capsnap->context == snapc) {
463 				if (!capsnap->writing)
464 					end = capsnap->size;
465 				found = true;
466 				break;
467 			}
468 		}
469 		spin_unlock(&ci->i_ceph_lock);
470 		WARN_ON(!found);
471 	}
472 	if (end > page_offset(page) + thp_size(page))
473 		end = page_offset(page) + thp_size(page);
474 	return end > start ? end - start : 0;
475 }
476 
477 /*
478  * Write a single page, but leave the page locked.
479  *
480  * If we get a write error, mark the mapping for error, but still adjust the
481  * dirty page accounting (i.e., page is no longer dirty).
482  */
483 static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
484 {
485 	struct inode *inode = page->mapping->host;
486 	struct ceph_inode_info *ci = ceph_inode(inode);
487 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
488 	struct ceph_snap_context *snapc, *oldest;
489 	loff_t page_off = page_offset(page);
490 	int err;
491 	loff_t len = thp_size(page);
492 	struct ceph_writeback_ctl ceph_wbc;
493 	struct ceph_osd_client *osdc = &fsc->client->osdc;
494 	struct ceph_osd_request *req;
495 
496 	dout("writepage %p idx %lu\n", page, page->index);
497 
498 	/* verify this is a writeable snap context */
499 	snapc = page_snap_context(page);
500 	if (!snapc) {
501 		dout("writepage %p page %p not dirty?\n", inode, page);
502 		return 0;
503 	}
504 	oldest = get_oldest_context(inode, &ceph_wbc, snapc);
505 	if (snapc->seq > oldest->seq) {
506 		dout("writepage %p page %p snapc %p not writeable - noop\n",
507 		     inode, page, snapc);
508 		/* we should only noop if called by kswapd */
509 		WARN_ON(!(current->flags & PF_MEMALLOC));
510 		ceph_put_snap_context(oldest);
511 		redirty_page_for_writepage(wbc, page);
512 		return 0;
513 	}
514 	ceph_put_snap_context(oldest);
515 
516 	/* is this a partial page at end of file? */
517 	if (page_off >= ceph_wbc.i_size) {
518 		dout("%p page eof %llu\n", page, ceph_wbc.i_size);
519 		page->mapping->a_ops->invalidatepage(page, 0, thp_size(page));
520 		return 0;
521 	}
522 
523 	if (ceph_wbc.i_size < page_off + len)
524 		len = ceph_wbc.i_size - page_off;
525 
526 	dout("writepage %p page %p index %lu on %llu~%llu snapc %p seq %lld\n",
527 	     inode, page, page->index, page_off, len, snapc, snapc->seq);
528 
529 	if (atomic_long_inc_return(&fsc->writeback_count) >
530 	    CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
531 		set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
532 
533 	set_page_writeback(page);
534 	req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode), page_off, &len, 0, 1,
535 				    CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, snapc,
536 				    ceph_wbc.truncate_seq, ceph_wbc.truncate_size,
537 				    true);
538 	if (IS_ERR(req)) {
539 		redirty_page_for_writepage(wbc, page);
540 		end_page_writeback(page);
541 		return PTR_ERR(req);
542 	}
543 
544 	/* it may be a short write due to an object boundary */
545 	WARN_ON_ONCE(len > thp_size(page));
546 	osd_req_op_extent_osd_data_pages(req, 0, &page, len, 0, false, false);
547 	dout("writepage %llu~%llu (%llu bytes)\n", page_off, len, len);
548 
549 	req->r_mtime = inode->i_mtime;
550 	err = ceph_osdc_start_request(osdc, req, true);
551 	if (!err)
552 		err = ceph_osdc_wait_request(osdc, req);
553 
554 	ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
555 				  req->r_end_latency, len, err);
556 
557 	ceph_osdc_put_request(req);
558 	if (err == 0)
559 		err = len;
560 
561 	if (err < 0) {
562 		struct writeback_control tmp_wbc;
563 		if (!wbc)
564 			wbc = &tmp_wbc;
565 		if (err == -ERESTARTSYS) {
566 			/* killed by SIGKILL */
567 			dout("writepage interrupted page %p\n", page);
568 			redirty_page_for_writepage(wbc, page);
569 			end_page_writeback(page);
570 			return err;
571 		}
572 		if (err == -EBLOCKLISTED)
573 			fsc->blocklisted = true;
574 		dout("writepage setting page/mapping error %d %p\n",
575 		     err, page);
576 		mapping_set_error(&inode->i_data, err);
577 		wbc->pages_skipped++;
578 	} else {
579 		dout("writepage cleaned page %p\n", page);
580 		err = 0;  /* vfs expects us to return 0 */
581 	}
582 	oldest = detach_page_private(page);
583 	WARN_ON_ONCE(oldest != snapc);
584 	end_page_writeback(page);
585 	ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
586 	ceph_put_snap_context(snapc);  /* page's reference */
587 
588 	if (atomic_long_dec_return(&fsc->writeback_count) <
589 	    CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb))
590 		clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
591 
592 	return err;
593 }
594 
595 static int ceph_writepage(struct page *page, struct writeback_control *wbc)
596 {
597 	int err;
598 	struct inode *inode = page->mapping->host;
599 	BUG_ON(!inode);
600 	ihold(inode);
601 	err = writepage_nounlock(page, wbc);
602 	if (err == -ERESTARTSYS) {
603 		/* direct memory reclaimer was killed by SIGKILL. return 0
604 		 * to prevent caller from setting mapping/page error */
605 		err = 0;
606 	}
607 	unlock_page(page);
608 	iput(inode);
609 	return err;
610 }
611 
612 /*
613  * async writeback completion handler.
614  *
615  * If we get an error, set the mapping error bit, but not the individual
616  * page error bits.
617  */
618 static void writepages_finish(struct ceph_osd_request *req)
619 {
620 	struct inode *inode = req->r_inode;
621 	struct ceph_inode_info *ci = ceph_inode(inode);
622 	struct ceph_osd_data *osd_data;
623 	struct page *page;
624 	int num_pages, total_pages = 0;
625 	int i, j;
626 	int rc = req->r_result;
627 	struct ceph_snap_context *snapc = req->r_snapc;
628 	struct address_space *mapping = inode->i_mapping;
629 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
630 	unsigned int len = 0;
631 	bool remove_page;
632 
633 	dout("writepages_finish %p rc %d\n", inode, rc);
634 	if (rc < 0) {
635 		mapping_set_error(mapping, rc);
636 		ceph_set_error_write(ci);
637 		if (rc == -EBLOCKLISTED)
638 			fsc->blocklisted = true;
639 	} else {
640 		ceph_clear_error_write(ci);
641 	}
642 
643 	/*
644 	 * We lost the cache cap, need to truncate the page before
645 	 * it is unlocked, otherwise we'd truncate it later in the
646 	 * page truncation thread, possibly losing some data that
647 	 * raced its way in
648 	 */
649 	remove_page = !(ceph_caps_issued(ci) &
650 			(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO));
651 
652 	/* clean all pages */
653 	for (i = 0; i < req->r_num_ops; i++) {
654 		if (req->r_ops[i].op != CEPH_OSD_OP_WRITE)
655 			break;
656 
657 		osd_data = osd_req_op_extent_osd_data(req, i);
658 		BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
659 		len += osd_data->length;
660 		num_pages = calc_pages_for((u64)osd_data->alignment,
661 					   (u64)osd_data->length);
662 		total_pages += num_pages;
663 		for (j = 0; j < num_pages; j++) {
664 			page = osd_data->pages[j];
665 			BUG_ON(!page);
666 			WARN_ON(!PageUptodate(page));
667 
668 			if (atomic_long_dec_return(&fsc->writeback_count) <
669 			     CONGESTION_OFF_THRESH(
670 					fsc->mount_options->congestion_kb))
671 				clear_bdi_congested(inode_to_bdi(inode),
672 						    BLK_RW_ASYNC);
673 
674 			ceph_put_snap_context(detach_page_private(page));
675 			end_page_writeback(page);
676 			dout("unlocking %p\n", page);
677 
678 			if (remove_page)
679 				generic_error_remove_page(inode->i_mapping,
680 							  page);
681 
682 			unlock_page(page);
683 		}
684 		dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n",
685 		     inode, osd_data->length, rc >= 0 ? num_pages : 0);
686 
687 		release_pages(osd_data->pages, num_pages);
688 	}
689 
690 	ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
691 				  req->r_end_latency, len, rc);
692 
693 	ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc);
694 
695 	osd_data = osd_req_op_extent_osd_data(req, 0);
696 	if (osd_data->pages_from_pool)
697 		mempool_free(osd_data->pages, ceph_wb_pagevec_pool);
698 	else
699 		kfree(osd_data->pages);
700 	ceph_osdc_put_request(req);
701 }
702 
703 /*
704  * initiate async writeback
705  */
706 static int ceph_writepages_start(struct address_space *mapping,
707 				 struct writeback_control *wbc)
708 {
709 	struct inode *inode = mapping->host;
710 	struct ceph_inode_info *ci = ceph_inode(inode);
711 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
712 	struct ceph_vino vino = ceph_vino(inode);
713 	pgoff_t index, start_index, end = -1;
714 	struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
715 	struct pagevec pvec;
716 	int rc = 0;
717 	unsigned int wsize = i_blocksize(inode);
718 	struct ceph_osd_request *req = NULL;
719 	struct ceph_writeback_ctl ceph_wbc;
720 	bool should_loop, range_whole = false;
721 	bool done = false;
722 
723 	dout("writepages_start %p (mode=%s)\n", inode,
724 	     wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
725 	     (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
726 
727 	if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
728 		if (ci->i_wrbuffer_ref > 0) {
729 			pr_warn_ratelimited(
730 				"writepage_start %p %lld forced umount\n",
731 				inode, ceph_ino(inode));
732 		}
733 		mapping_set_error(mapping, -EIO);
734 		return -EIO; /* we're in a forced umount, don't write! */
735 	}
736 	if (fsc->mount_options->wsize < wsize)
737 		wsize = fsc->mount_options->wsize;
738 
739 	pagevec_init(&pvec);
740 
741 	start_index = wbc->range_cyclic ? mapping->writeback_index : 0;
742 	index = start_index;
743 
744 retry:
745 	/* find oldest snap context with dirty data */
746 	snapc = get_oldest_context(inode, &ceph_wbc, NULL);
747 	if (!snapc) {
748 		/* hmm, why does writepages get called when there
749 		   is no dirty data? */
750 		dout(" no snap context with dirty data?\n");
751 		goto out;
752 	}
753 	dout(" oldest snapc is %p seq %lld (%d snaps)\n",
754 	     snapc, snapc->seq, snapc->num_snaps);
755 
756 	should_loop = false;
757 	if (ceph_wbc.head_snapc && snapc != last_snapc) {
758 		/* where to start/end? */
759 		if (wbc->range_cyclic) {
760 			index = start_index;
761 			end = -1;
762 			if (index > 0)
763 				should_loop = true;
764 			dout(" cyclic, start at %lu\n", index);
765 		} else {
766 			index = wbc->range_start >> PAGE_SHIFT;
767 			end = wbc->range_end >> PAGE_SHIFT;
768 			if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
769 				range_whole = true;
770 			dout(" not cyclic, %lu to %lu\n", index, end);
771 		}
772 	} else if (!ceph_wbc.head_snapc) {
773 		/* Do not respect wbc->range_{start,end}. Dirty pages
774 		 * in that range can be associated with newer snapc.
775 		 * They are not writeable until we write all dirty pages
776 		 * associated with 'snapc' get written */
777 		if (index > 0)
778 			should_loop = true;
779 		dout(" non-head snapc, range whole\n");
780 	}
781 
782 	ceph_put_snap_context(last_snapc);
783 	last_snapc = snapc;
784 
785 	while (!done && index <= end) {
786 		int num_ops = 0, op_idx;
787 		unsigned i, pvec_pages, max_pages, locked_pages = 0;
788 		struct page **pages = NULL, **data_pages;
789 		struct page *page;
790 		pgoff_t strip_unit_end = 0;
791 		u64 offset = 0, len = 0;
792 		bool from_pool = false;
793 
794 		max_pages = wsize >> PAGE_SHIFT;
795 
796 get_more_pages:
797 		pvec_pages = pagevec_lookup_range_tag(&pvec, mapping, &index,
798 						end, PAGECACHE_TAG_DIRTY);
799 		dout("pagevec_lookup_range_tag got %d\n", pvec_pages);
800 		if (!pvec_pages && !locked_pages)
801 			break;
802 		for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) {
803 			page = pvec.pages[i];
804 			dout("? %p idx %lu\n", page, page->index);
805 			if (locked_pages == 0)
806 				lock_page(page);  /* first page */
807 			else if (!trylock_page(page))
808 				break;
809 
810 			/* only dirty pages, or our accounting breaks */
811 			if (unlikely(!PageDirty(page)) ||
812 			    unlikely(page->mapping != mapping)) {
813 				dout("!dirty or !mapping %p\n", page);
814 				unlock_page(page);
815 				continue;
816 			}
817 			/* only if matching snap context */
818 			pgsnapc = page_snap_context(page);
819 			if (pgsnapc != snapc) {
820 				dout("page snapc %p %lld != oldest %p %lld\n",
821 				     pgsnapc, pgsnapc->seq, snapc, snapc->seq);
822 				if (!should_loop &&
823 				    !ceph_wbc.head_snapc &&
824 				    wbc->sync_mode != WB_SYNC_NONE)
825 					should_loop = true;
826 				unlock_page(page);
827 				continue;
828 			}
829 			if (page_offset(page) >= ceph_wbc.i_size) {
830 				dout("%p page eof %llu\n",
831 				     page, ceph_wbc.i_size);
832 				if ((ceph_wbc.size_stable ||
833 				    page_offset(page) >= i_size_read(inode)) &&
834 				    clear_page_dirty_for_io(page))
835 					mapping->a_ops->invalidatepage(page,
836 								0, thp_size(page));
837 				unlock_page(page);
838 				continue;
839 			}
840 			if (strip_unit_end && (page->index > strip_unit_end)) {
841 				dout("end of strip unit %p\n", page);
842 				unlock_page(page);
843 				break;
844 			}
845 			if (PageWriteback(page)) {
846 				if (wbc->sync_mode == WB_SYNC_NONE) {
847 					dout("%p under writeback\n", page);
848 					unlock_page(page);
849 					continue;
850 				}
851 				dout("waiting on writeback %p\n", page);
852 				wait_on_page_writeback(page);
853 			}
854 
855 			if (!clear_page_dirty_for_io(page)) {
856 				dout("%p !clear_page_dirty_for_io\n", page);
857 				unlock_page(page);
858 				continue;
859 			}
860 
861 			/*
862 			 * We have something to write.  If this is
863 			 * the first locked page this time through,
864 			 * calculate max possinle write size and
865 			 * allocate a page array
866 			 */
867 			if (locked_pages == 0) {
868 				u64 objnum;
869 				u64 objoff;
870 				u32 xlen;
871 
872 				/* prepare async write request */
873 				offset = (u64)page_offset(page);
874 				ceph_calc_file_object_mapping(&ci->i_layout,
875 							      offset, wsize,
876 							      &objnum, &objoff,
877 							      &xlen);
878 				len = xlen;
879 
880 				num_ops = 1;
881 				strip_unit_end = page->index +
882 					((len - 1) >> PAGE_SHIFT);
883 
884 				BUG_ON(pages);
885 				max_pages = calc_pages_for(0, (u64)len);
886 				pages = kmalloc_array(max_pages,
887 						      sizeof(*pages),
888 						      GFP_NOFS);
889 				if (!pages) {
890 					from_pool = true;
891 					pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
892 					BUG_ON(!pages);
893 				}
894 
895 				len = 0;
896 			} else if (page->index !=
897 				   (offset + len) >> PAGE_SHIFT) {
898 				if (num_ops >= (from_pool ?  CEPH_OSD_SLAB_OPS :
899 							     CEPH_OSD_MAX_OPS)) {
900 					redirty_page_for_writepage(wbc, page);
901 					unlock_page(page);
902 					break;
903 				}
904 
905 				num_ops++;
906 				offset = (u64)page_offset(page);
907 				len = 0;
908 			}
909 
910 			/* note position of first page in pvec */
911 			dout("%p will write page %p idx %lu\n",
912 			     inode, page, page->index);
913 
914 			if (atomic_long_inc_return(&fsc->writeback_count) >
915 			    CONGESTION_ON_THRESH(
916 				    fsc->mount_options->congestion_kb)) {
917 				set_bdi_congested(inode_to_bdi(inode),
918 						  BLK_RW_ASYNC);
919 			}
920 
921 
922 			pages[locked_pages++] = page;
923 			pvec.pages[i] = NULL;
924 
925 			len += thp_size(page);
926 		}
927 
928 		/* did we get anything? */
929 		if (!locked_pages)
930 			goto release_pvec_pages;
931 		if (i) {
932 			unsigned j, n = 0;
933 			/* shift unused page to beginning of pvec */
934 			for (j = 0; j < pvec_pages; j++) {
935 				if (!pvec.pages[j])
936 					continue;
937 				if (n < j)
938 					pvec.pages[n] = pvec.pages[j];
939 				n++;
940 			}
941 			pvec.nr = n;
942 
943 			if (pvec_pages && i == pvec_pages &&
944 			    locked_pages < max_pages) {
945 				dout("reached end pvec, trying for more\n");
946 				pagevec_release(&pvec);
947 				goto get_more_pages;
948 			}
949 		}
950 
951 new_request:
952 		offset = page_offset(pages[0]);
953 		len = wsize;
954 
955 		req = ceph_osdc_new_request(&fsc->client->osdc,
956 					&ci->i_layout, vino,
957 					offset, &len, 0, num_ops,
958 					CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
959 					snapc, ceph_wbc.truncate_seq,
960 					ceph_wbc.truncate_size, false);
961 		if (IS_ERR(req)) {
962 			req = ceph_osdc_new_request(&fsc->client->osdc,
963 						&ci->i_layout, vino,
964 						offset, &len, 0,
965 						min(num_ops,
966 						    CEPH_OSD_SLAB_OPS),
967 						CEPH_OSD_OP_WRITE,
968 						CEPH_OSD_FLAG_WRITE,
969 						snapc, ceph_wbc.truncate_seq,
970 						ceph_wbc.truncate_size, true);
971 			BUG_ON(IS_ERR(req));
972 		}
973 		BUG_ON(len < page_offset(pages[locked_pages - 1]) +
974 			     thp_size(page) - offset);
975 
976 		req->r_callback = writepages_finish;
977 		req->r_inode = inode;
978 
979 		/* Format the osd request message and submit the write */
980 		len = 0;
981 		data_pages = pages;
982 		op_idx = 0;
983 		for (i = 0; i < locked_pages; i++) {
984 			u64 cur_offset = page_offset(pages[i]);
985 			if (offset + len != cur_offset) {
986 				if (op_idx + 1 == req->r_num_ops)
987 					break;
988 				osd_req_op_extent_dup_last(req, op_idx,
989 							   cur_offset - offset);
990 				dout("writepages got pages at %llu~%llu\n",
991 				     offset, len);
992 				osd_req_op_extent_osd_data_pages(req, op_idx,
993 							data_pages, len, 0,
994 							from_pool, false);
995 				osd_req_op_extent_update(req, op_idx, len);
996 
997 				len = 0;
998 				offset = cur_offset;
999 				data_pages = pages + i;
1000 				op_idx++;
1001 			}
1002 
1003 			set_page_writeback(pages[i]);
1004 			len += thp_size(page);
1005 		}
1006 
1007 		if (ceph_wbc.size_stable) {
1008 			len = min(len, ceph_wbc.i_size - offset);
1009 		} else if (i == locked_pages) {
1010 			/* writepages_finish() clears writeback pages
1011 			 * according to the data length, so make sure
1012 			 * data length covers all locked pages */
1013 			u64 min_len = len + 1 - thp_size(page);
1014 			len = get_writepages_data_length(inode, pages[i - 1],
1015 							 offset);
1016 			len = max(len, min_len);
1017 		}
1018 		dout("writepages got pages at %llu~%llu\n", offset, len);
1019 
1020 		osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len,
1021 						 0, from_pool, false);
1022 		osd_req_op_extent_update(req, op_idx, len);
1023 
1024 		BUG_ON(op_idx + 1 != req->r_num_ops);
1025 
1026 		from_pool = false;
1027 		if (i < locked_pages) {
1028 			BUG_ON(num_ops <= req->r_num_ops);
1029 			num_ops -= req->r_num_ops;
1030 			locked_pages -= i;
1031 
1032 			/* allocate new pages array for next request */
1033 			data_pages = pages;
1034 			pages = kmalloc_array(locked_pages, sizeof(*pages),
1035 					      GFP_NOFS);
1036 			if (!pages) {
1037 				from_pool = true;
1038 				pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
1039 				BUG_ON(!pages);
1040 			}
1041 			memcpy(pages, data_pages + i,
1042 			       locked_pages * sizeof(*pages));
1043 			memset(data_pages + i, 0,
1044 			       locked_pages * sizeof(*pages));
1045 		} else {
1046 			BUG_ON(num_ops != req->r_num_ops);
1047 			index = pages[i - 1]->index + 1;
1048 			/* request message now owns the pages array */
1049 			pages = NULL;
1050 		}
1051 
1052 		req->r_mtime = inode->i_mtime;
1053 		rc = ceph_osdc_start_request(&fsc->client->osdc, req, true);
1054 		BUG_ON(rc);
1055 		req = NULL;
1056 
1057 		wbc->nr_to_write -= i;
1058 		if (pages)
1059 			goto new_request;
1060 
1061 		/*
1062 		 * We stop writing back only if we are not doing
1063 		 * integrity sync. In case of integrity sync we have to
1064 		 * keep going until we have written all the pages
1065 		 * we tagged for writeback prior to entering this loop.
1066 		 */
1067 		if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE)
1068 			done = true;
1069 
1070 release_pvec_pages:
1071 		dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr,
1072 		     pvec.nr ? pvec.pages[0] : NULL);
1073 		pagevec_release(&pvec);
1074 	}
1075 
1076 	if (should_loop && !done) {
1077 		/* more to do; loop back to beginning of file */
1078 		dout("writepages looping back to beginning of file\n");
1079 		end = start_index - 1; /* OK even when start_index == 0 */
1080 
1081 		/* to write dirty pages associated with next snapc,
1082 		 * we need to wait until current writes complete */
1083 		if (wbc->sync_mode != WB_SYNC_NONE &&
1084 		    start_index == 0 && /* all dirty pages were checked */
1085 		    !ceph_wbc.head_snapc) {
1086 			struct page *page;
1087 			unsigned i, nr;
1088 			index = 0;
1089 			while ((index <= end) &&
1090 			       (nr = pagevec_lookup_tag(&pvec, mapping, &index,
1091 						PAGECACHE_TAG_WRITEBACK))) {
1092 				for (i = 0; i < nr; i++) {
1093 					page = pvec.pages[i];
1094 					if (page_snap_context(page) != snapc)
1095 						continue;
1096 					wait_on_page_writeback(page);
1097 				}
1098 				pagevec_release(&pvec);
1099 				cond_resched();
1100 			}
1101 		}
1102 
1103 		start_index = 0;
1104 		index = 0;
1105 		goto retry;
1106 	}
1107 
1108 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1109 		mapping->writeback_index = index;
1110 
1111 out:
1112 	ceph_osdc_put_request(req);
1113 	ceph_put_snap_context(last_snapc);
1114 	dout("writepages dend - startone, rc = %d\n", rc);
1115 	return rc;
1116 }
1117 
1118 
1119 
1120 /*
1121  * See if a given @snapc is either writeable, or already written.
1122  */
1123 static int context_is_writeable_or_written(struct inode *inode,
1124 					   struct ceph_snap_context *snapc)
1125 {
1126 	struct ceph_snap_context *oldest = get_oldest_context(inode, NULL, NULL);
1127 	int ret = !oldest || snapc->seq <= oldest->seq;
1128 
1129 	ceph_put_snap_context(oldest);
1130 	return ret;
1131 }
1132 
1133 /**
1134  * ceph_find_incompatible - find an incompatible context and return it
1135  * @page: page being dirtied
1136  *
1137  * We are only allowed to write into/dirty a page if the page is
1138  * clean, or already dirty within the same snap context. Returns a
1139  * conflicting context if there is one, NULL if there isn't, or a
1140  * negative error code on other errors.
1141  *
1142  * Must be called with page lock held.
1143  */
1144 static struct ceph_snap_context *
1145 ceph_find_incompatible(struct page *page)
1146 {
1147 	struct inode *inode = page->mapping->host;
1148 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1149 	struct ceph_inode_info *ci = ceph_inode(inode);
1150 
1151 	if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
1152 		dout(" page %p forced umount\n", page);
1153 		return ERR_PTR(-EIO);
1154 	}
1155 
1156 	for (;;) {
1157 		struct ceph_snap_context *snapc, *oldest;
1158 
1159 		wait_on_page_writeback(page);
1160 
1161 		snapc = page_snap_context(page);
1162 		if (!snapc || snapc == ci->i_head_snapc)
1163 			break;
1164 
1165 		/*
1166 		 * this page is already dirty in another (older) snap
1167 		 * context!  is it writeable now?
1168 		 */
1169 		oldest = get_oldest_context(inode, NULL, NULL);
1170 		if (snapc->seq > oldest->seq) {
1171 			/* not writeable -- return it for the caller to deal with */
1172 			ceph_put_snap_context(oldest);
1173 			dout(" page %p snapc %p not current or oldest\n", page, snapc);
1174 			return ceph_get_snap_context(snapc);
1175 		}
1176 		ceph_put_snap_context(oldest);
1177 
1178 		/* yay, writeable, do it now (without dropping page lock) */
1179 		dout(" page %p snapc %p not current, but oldest\n", page, snapc);
1180 		if (clear_page_dirty_for_io(page)) {
1181 			int r = writepage_nounlock(page, NULL);
1182 			if (r < 0)
1183 				return ERR_PTR(r);
1184 		}
1185 	}
1186 	return NULL;
1187 }
1188 
1189 static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
1190 					struct page *page, void **_fsdata)
1191 {
1192 	struct inode *inode = file_inode(file);
1193 	struct ceph_inode_info *ci = ceph_inode(inode);
1194 	struct ceph_snap_context *snapc;
1195 
1196 	snapc = ceph_find_incompatible(page);
1197 	if (snapc) {
1198 		int r;
1199 
1200 		unlock_page(page);
1201 		put_page(page);
1202 		if (IS_ERR(snapc))
1203 			return PTR_ERR(snapc);
1204 
1205 		ceph_queue_writeback(inode);
1206 		r = wait_event_killable(ci->i_cap_wq,
1207 					context_is_writeable_or_written(inode, snapc));
1208 		ceph_put_snap_context(snapc);
1209 		return r == 0 ? -EAGAIN : r;
1210 	}
1211 	return 0;
1212 }
1213 
1214 /*
1215  * We are only allowed to write into/dirty the page if the page is
1216  * clean, or already dirty within the same snap context.
1217  */
1218 static int ceph_write_begin(struct file *file, struct address_space *mapping,
1219 			    loff_t pos, unsigned len, unsigned flags,
1220 			    struct page **pagep, void **fsdata)
1221 {
1222 	struct inode *inode = file_inode(file);
1223 	struct ceph_inode_info *ci = ceph_inode(inode);
1224 	struct page *page = NULL;
1225 	pgoff_t index = pos >> PAGE_SHIFT;
1226 	int r;
1227 
1228 	/*
1229 	 * Uninlining should have already been done and everything updated, EXCEPT
1230 	 * for inline_version sent to the MDS.
1231 	 */
1232 	if (ci->i_inline_version != CEPH_INLINE_NONE) {
1233 		page = grab_cache_page_write_begin(mapping, index, flags);
1234 		if (!page)
1235 			return -ENOMEM;
1236 
1237 		/*
1238 		 * The inline_version on a new inode is set to 1. If that's the
1239 		 * case, then the page is brand new and isn't yet Uptodate.
1240 		 */
1241 		r = 0;
1242 		if (index == 0 && ci->i_inline_version != 1) {
1243 			if (!PageUptodate(page)) {
1244 				WARN_ONCE(1, "ceph: write_begin called on still-inlined inode (inline_version %llu)!\n",
1245 					  ci->i_inline_version);
1246 				r = -EINVAL;
1247 			}
1248 			goto out;
1249 		}
1250 		zero_user_segment(page, 0, thp_size(page));
1251 		SetPageUptodate(page);
1252 		goto out;
1253 	}
1254 
1255 	r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &page, NULL,
1256 			      &ceph_netfs_read_ops, NULL);
1257 out:
1258 	if (r == 0)
1259 		wait_on_page_fscache(page);
1260 	if (r < 0) {
1261 		if (page)
1262 			put_page(page);
1263 	} else {
1264 		WARN_ON_ONCE(!PageLocked(page));
1265 		*pagep = page;
1266 	}
1267 	return r;
1268 }
1269 
1270 /*
1271  * we don't do anything in here that simple_write_end doesn't do
1272  * except adjust dirty page accounting
1273  */
1274 static int ceph_write_end(struct file *file, struct address_space *mapping,
1275 			  loff_t pos, unsigned len, unsigned copied,
1276 			  struct page *page, void *fsdata)
1277 {
1278 	struct inode *inode = file_inode(file);
1279 	bool check_cap = false;
1280 
1281 	dout("write_end file %p inode %p page %p %d~%d (%d)\n", file,
1282 	     inode, page, (int)pos, (int)copied, (int)len);
1283 
1284 	/* zero the stale part of the page if we did a short copy */
1285 	if (!PageUptodate(page)) {
1286 		if (copied < len) {
1287 			copied = 0;
1288 			goto out;
1289 		}
1290 		SetPageUptodate(page);
1291 	}
1292 
1293 	/* did file size increase? */
1294 	if (pos+copied > i_size_read(inode))
1295 		check_cap = ceph_inode_set_size(inode, pos+copied);
1296 
1297 	set_page_dirty(page);
1298 
1299 out:
1300 	unlock_page(page);
1301 	put_page(page);
1302 
1303 	if (check_cap)
1304 		ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL);
1305 
1306 	return copied;
1307 }
1308 
1309 /*
1310  * we set .direct_IO to indicate direct io is supported, but since we
1311  * intercept O_DIRECT reads and writes early, this function should
1312  * never get called.
1313  */
1314 static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter)
1315 {
1316 	WARN_ON(1);
1317 	return -EINVAL;
1318 }
1319 
1320 const struct address_space_operations ceph_aops = {
1321 	.readpage = ceph_readpage,
1322 	.readahead = ceph_readahead,
1323 	.writepage = ceph_writepage,
1324 	.writepages = ceph_writepages_start,
1325 	.write_begin = ceph_write_begin,
1326 	.write_end = ceph_write_end,
1327 	.set_page_dirty = ceph_set_page_dirty,
1328 	.invalidatepage = ceph_invalidatepage,
1329 	.releasepage = ceph_releasepage,
1330 	.direct_IO = ceph_direct_io,
1331 };
1332 
1333 static void ceph_block_sigs(sigset_t *oldset)
1334 {
1335 	sigset_t mask;
1336 	siginitsetinv(&mask, sigmask(SIGKILL));
1337 	sigprocmask(SIG_BLOCK, &mask, oldset);
1338 }
1339 
1340 static void ceph_restore_sigs(sigset_t *oldset)
1341 {
1342 	sigprocmask(SIG_SETMASK, oldset, NULL);
1343 }
1344 
1345 /*
1346  * vm ops
1347  */
1348 static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
1349 {
1350 	struct vm_area_struct *vma = vmf->vma;
1351 	struct inode *inode = file_inode(vma->vm_file);
1352 	struct ceph_inode_info *ci = ceph_inode(inode);
1353 	struct ceph_file_info *fi = vma->vm_file->private_data;
1354 	loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT;
1355 	int want, got, err;
1356 	sigset_t oldset;
1357 	vm_fault_t ret = VM_FAULT_SIGBUS;
1358 
1359 	ceph_block_sigs(&oldset);
1360 
1361 	dout("filemap_fault %p %llx.%llx %llu trying to get caps\n",
1362 	     inode, ceph_vinop(inode), off);
1363 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1364 		want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1365 	else
1366 		want = CEPH_CAP_FILE_CACHE;
1367 
1368 	got = 0;
1369 	err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_RD, want, -1, &got);
1370 	if (err < 0)
1371 		goto out_restore;
1372 
1373 	dout("filemap_fault %p %llu got cap refs on %s\n",
1374 	     inode, off, ceph_cap_string(got));
1375 
1376 	if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
1377 	    ci->i_inline_version == CEPH_INLINE_NONE) {
1378 		CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1379 		ceph_add_rw_context(fi, &rw_ctx);
1380 		ret = filemap_fault(vmf);
1381 		ceph_del_rw_context(fi, &rw_ctx);
1382 		dout("filemap_fault %p %llu drop cap refs %s ret %x\n",
1383 		     inode, off, ceph_cap_string(got), ret);
1384 	} else
1385 		err = -EAGAIN;
1386 
1387 	ceph_put_cap_refs(ci, got);
1388 
1389 	if (err != -EAGAIN)
1390 		goto out_restore;
1391 
1392 	/* read inline data */
1393 	if (off >= PAGE_SIZE) {
1394 		/* does not support inline data > PAGE_SIZE */
1395 		ret = VM_FAULT_SIGBUS;
1396 	} else {
1397 		struct address_space *mapping = inode->i_mapping;
1398 		struct page *page = find_or_create_page(mapping, 0,
1399 						mapping_gfp_constraint(mapping,
1400 						~__GFP_FS));
1401 		if (!page) {
1402 			ret = VM_FAULT_OOM;
1403 			goto out_inline;
1404 		}
1405 		err = __ceph_do_getattr(inode, page,
1406 					 CEPH_STAT_CAP_INLINE_DATA, true);
1407 		if (err < 0 || off >= i_size_read(inode)) {
1408 			unlock_page(page);
1409 			put_page(page);
1410 			ret = vmf_error(err);
1411 			goto out_inline;
1412 		}
1413 		if (err < PAGE_SIZE)
1414 			zero_user_segment(page, err, PAGE_SIZE);
1415 		else
1416 			flush_dcache_page(page);
1417 		SetPageUptodate(page);
1418 		vmf->page = page;
1419 		ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
1420 out_inline:
1421 		dout("filemap_fault %p %llu read inline data ret %x\n",
1422 		     inode, off, ret);
1423 	}
1424 out_restore:
1425 	ceph_restore_sigs(&oldset);
1426 	if (err < 0)
1427 		ret = vmf_error(err);
1428 
1429 	return ret;
1430 }
1431 
1432 static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
1433 {
1434 	struct vm_area_struct *vma = vmf->vma;
1435 	struct inode *inode = file_inode(vma->vm_file);
1436 	struct ceph_inode_info *ci = ceph_inode(inode);
1437 	struct ceph_file_info *fi = vma->vm_file->private_data;
1438 	struct ceph_cap_flush *prealloc_cf;
1439 	struct page *page = vmf->page;
1440 	loff_t off = page_offset(page);
1441 	loff_t size = i_size_read(inode);
1442 	size_t len;
1443 	int want, got, err;
1444 	sigset_t oldset;
1445 	vm_fault_t ret = VM_FAULT_SIGBUS;
1446 
1447 	prealloc_cf = ceph_alloc_cap_flush();
1448 	if (!prealloc_cf)
1449 		return VM_FAULT_OOM;
1450 
1451 	sb_start_pagefault(inode->i_sb);
1452 	ceph_block_sigs(&oldset);
1453 
1454 	if (ci->i_inline_version != CEPH_INLINE_NONE) {
1455 		struct page *locked_page = NULL;
1456 		if (off == 0) {
1457 			lock_page(page);
1458 			locked_page = page;
1459 		}
1460 		err = ceph_uninline_data(vma->vm_file, locked_page);
1461 		if (locked_page)
1462 			unlock_page(locked_page);
1463 		if (err < 0)
1464 			goto out_free;
1465 	}
1466 
1467 	if (off + thp_size(page) <= size)
1468 		len = thp_size(page);
1469 	else
1470 		len = offset_in_thp(page, size);
1471 
1472 	dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n",
1473 	     inode, ceph_vinop(inode), off, len, size);
1474 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1475 		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1476 	else
1477 		want = CEPH_CAP_FILE_BUFFER;
1478 
1479 	got = 0;
1480 	err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_WR, want, off + len, &got);
1481 	if (err < 0)
1482 		goto out_free;
1483 
1484 	dout("page_mkwrite %p %llu~%zd got cap refs on %s\n",
1485 	     inode, off, len, ceph_cap_string(got));
1486 
1487 	/* Update time before taking page lock */
1488 	file_update_time(vma->vm_file);
1489 	inode_inc_iversion_raw(inode);
1490 
1491 	do {
1492 		struct ceph_snap_context *snapc;
1493 
1494 		lock_page(page);
1495 
1496 		if (page_mkwrite_check_truncate(page, inode) < 0) {
1497 			unlock_page(page);
1498 			ret = VM_FAULT_NOPAGE;
1499 			break;
1500 		}
1501 
1502 		snapc = ceph_find_incompatible(page);
1503 		if (!snapc) {
1504 			/* success.  we'll keep the page locked. */
1505 			set_page_dirty(page);
1506 			ret = VM_FAULT_LOCKED;
1507 			break;
1508 		}
1509 
1510 		unlock_page(page);
1511 
1512 		if (IS_ERR(snapc)) {
1513 			ret = VM_FAULT_SIGBUS;
1514 			break;
1515 		}
1516 
1517 		ceph_queue_writeback(inode);
1518 		err = wait_event_killable(ci->i_cap_wq,
1519 				context_is_writeable_or_written(inode, snapc));
1520 		ceph_put_snap_context(snapc);
1521 	} while (err == 0);
1522 
1523 	if (ret == VM_FAULT_LOCKED ||
1524 	    ci->i_inline_version != CEPH_INLINE_NONE) {
1525 		int dirty;
1526 		spin_lock(&ci->i_ceph_lock);
1527 		ci->i_inline_version = CEPH_INLINE_NONE;
1528 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1529 					       &prealloc_cf);
1530 		spin_unlock(&ci->i_ceph_lock);
1531 		if (dirty)
1532 			__mark_inode_dirty(inode, dirty);
1533 	}
1534 
1535 	dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %x\n",
1536 	     inode, off, len, ceph_cap_string(got), ret);
1537 	ceph_put_cap_refs_async(ci, got);
1538 out_free:
1539 	ceph_restore_sigs(&oldset);
1540 	sb_end_pagefault(inode->i_sb);
1541 	ceph_free_cap_flush(prealloc_cf);
1542 	if (err < 0)
1543 		ret = vmf_error(err);
1544 	return ret;
1545 }
1546 
1547 void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
1548 			   char	*data, size_t len)
1549 {
1550 	struct address_space *mapping = inode->i_mapping;
1551 	struct page *page;
1552 
1553 	if (locked_page) {
1554 		page = locked_page;
1555 	} else {
1556 		if (i_size_read(inode) == 0)
1557 			return;
1558 		page = find_or_create_page(mapping, 0,
1559 					   mapping_gfp_constraint(mapping,
1560 					   ~__GFP_FS));
1561 		if (!page)
1562 			return;
1563 		if (PageUptodate(page)) {
1564 			unlock_page(page);
1565 			put_page(page);
1566 			return;
1567 		}
1568 	}
1569 
1570 	dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n",
1571 	     inode, ceph_vinop(inode), len, locked_page);
1572 
1573 	if (len > 0) {
1574 		void *kaddr = kmap_atomic(page);
1575 		memcpy(kaddr, data, len);
1576 		kunmap_atomic(kaddr);
1577 	}
1578 
1579 	if (page != locked_page) {
1580 		if (len < PAGE_SIZE)
1581 			zero_user_segment(page, len, PAGE_SIZE);
1582 		else
1583 			flush_dcache_page(page);
1584 
1585 		SetPageUptodate(page);
1586 		unlock_page(page);
1587 		put_page(page);
1588 	}
1589 }
1590 
1591 int ceph_uninline_data(struct file *filp, struct page *locked_page)
1592 {
1593 	struct inode *inode = file_inode(filp);
1594 	struct ceph_inode_info *ci = ceph_inode(inode);
1595 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1596 	struct ceph_osd_request *req;
1597 	struct page *page = NULL;
1598 	u64 len, inline_version;
1599 	int err = 0;
1600 	bool from_pagecache = false;
1601 
1602 	spin_lock(&ci->i_ceph_lock);
1603 	inline_version = ci->i_inline_version;
1604 	spin_unlock(&ci->i_ceph_lock);
1605 
1606 	dout("uninline_data %p %llx.%llx inline_version %llu\n",
1607 	     inode, ceph_vinop(inode), inline_version);
1608 
1609 	if (inline_version == 1 || /* initial version, no data */
1610 	    inline_version == CEPH_INLINE_NONE)
1611 		goto out;
1612 
1613 	if (locked_page) {
1614 		page = locked_page;
1615 		WARN_ON(!PageUptodate(page));
1616 	} else if (ceph_caps_issued(ci) &
1617 		   (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) {
1618 		page = find_get_page(inode->i_mapping, 0);
1619 		if (page) {
1620 			if (PageUptodate(page)) {
1621 				from_pagecache = true;
1622 				lock_page(page);
1623 			} else {
1624 				put_page(page);
1625 				page = NULL;
1626 			}
1627 		}
1628 	}
1629 
1630 	if (page) {
1631 		len = i_size_read(inode);
1632 		if (len > PAGE_SIZE)
1633 			len = PAGE_SIZE;
1634 	} else {
1635 		page = __page_cache_alloc(GFP_NOFS);
1636 		if (!page) {
1637 			err = -ENOMEM;
1638 			goto out;
1639 		}
1640 		err = __ceph_do_getattr(inode, page,
1641 					CEPH_STAT_CAP_INLINE_DATA, true);
1642 		if (err < 0) {
1643 			/* no inline data */
1644 			if (err == -ENODATA)
1645 				err = 0;
1646 			goto out;
1647 		}
1648 		len = err;
1649 	}
1650 
1651 	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1652 				    ceph_vino(inode), 0, &len, 0, 1,
1653 				    CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_WRITE,
1654 				    NULL, 0, 0, false);
1655 	if (IS_ERR(req)) {
1656 		err = PTR_ERR(req);
1657 		goto out;
1658 	}
1659 
1660 	req->r_mtime = inode->i_mtime;
1661 	err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1662 	if (!err)
1663 		err = ceph_osdc_wait_request(&fsc->client->osdc, req);
1664 	ceph_osdc_put_request(req);
1665 	if (err < 0)
1666 		goto out;
1667 
1668 	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1669 				    ceph_vino(inode), 0, &len, 1, 3,
1670 				    CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
1671 				    NULL, ci->i_truncate_seq,
1672 				    ci->i_truncate_size, false);
1673 	if (IS_ERR(req)) {
1674 		err = PTR_ERR(req);
1675 		goto out;
1676 	}
1677 
1678 	osd_req_op_extent_osd_data_pages(req, 1, &page, len, 0, false, false);
1679 
1680 	{
1681 		__le64 xattr_buf = cpu_to_le64(inline_version);
1682 		err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR,
1683 					    "inline_version", &xattr_buf,
1684 					    sizeof(xattr_buf),
1685 					    CEPH_OSD_CMPXATTR_OP_GT,
1686 					    CEPH_OSD_CMPXATTR_MODE_U64);
1687 		if (err)
1688 			goto out_put;
1689 	}
1690 
1691 	{
1692 		char xattr_buf[32];
1693 		int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf),
1694 					 "%llu", inline_version);
1695 		err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR,
1696 					    "inline_version",
1697 					    xattr_buf, xattr_len, 0, 0);
1698 		if (err)
1699 			goto out_put;
1700 	}
1701 
1702 	req->r_mtime = inode->i_mtime;
1703 	err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1704 	if (!err)
1705 		err = ceph_osdc_wait_request(&fsc->client->osdc, req);
1706 
1707 	ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
1708 				  req->r_end_latency, len, err);
1709 
1710 out_put:
1711 	ceph_osdc_put_request(req);
1712 	if (err == -ECANCELED)
1713 		err = 0;
1714 out:
1715 	if (page && page != locked_page) {
1716 		if (from_pagecache) {
1717 			unlock_page(page);
1718 			put_page(page);
1719 		} else
1720 			__free_pages(page, 0);
1721 	}
1722 
1723 	dout("uninline_data %p %llx.%llx inline_version %llu = %d\n",
1724 	     inode, ceph_vinop(inode), inline_version, err);
1725 	return err;
1726 }
1727 
1728 static const struct vm_operations_struct ceph_vmops = {
1729 	.fault		= ceph_filemap_fault,
1730 	.page_mkwrite	= ceph_page_mkwrite,
1731 };
1732 
1733 int ceph_mmap(struct file *file, struct vm_area_struct *vma)
1734 {
1735 	struct address_space *mapping = file->f_mapping;
1736 
1737 	if (!mapping->a_ops->readpage)
1738 		return -ENOEXEC;
1739 	file_accessed(file);
1740 	vma->vm_ops = &ceph_vmops;
1741 	return 0;
1742 }
1743 
1744 enum {
1745 	POOL_READ	= 1,
1746 	POOL_WRITE	= 2,
1747 };
1748 
1749 static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
1750 				s64 pool, struct ceph_string *pool_ns)
1751 {
1752 	struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
1753 	struct ceph_mds_client *mdsc = fsc->mdsc;
1754 	struct ceph_osd_request *rd_req = NULL, *wr_req = NULL;
1755 	struct rb_node **p, *parent;
1756 	struct ceph_pool_perm *perm;
1757 	struct page **pages;
1758 	size_t pool_ns_len;
1759 	int err = 0, err2 = 0, have = 0;
1760 
1761 	down_read(&mdsc->pool_perm_rwsem);
1762 	p = &mdsc->pool_perm_tree.rb_node;
1763 	while (*p) {
1764 		perm = rb_entry(*p, struct ceph_pool_perm, node);
1765 		if (pool < perm->pool)
1766 			p = &(*p)->rb_left;
1767 		else if (pool > perm->pool)
1768 			p = &(*p)->rb_right;
1769 		else {
1770 			int ret = ceph_compare_string(pool_ns,
1771 						perm->pool_ns,
1772 						perm->pool_ns_len);
1773 			if (ret < 0)
1774 				p = &(*p)->rb_left;
1775 			else if (ret > 0)
1776 				p = &(*p)->rb_right;
1777 			else {
1778 				have = perm->perm;
1779 				break;
1780 			}
1781 		}
1782 	}
1783 	up_read(&mdsc->pool_perm_rwsem);
1784 	if (*p)
1785 		goto out;
1786 
1787 	if (pool_ns)
1788 		dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n",
1789 		     pool, (int)pool_ns->len, pool_ns->str);
1790 	else
1791 		dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool);
1792 
1793 	down_write(&mdsc->pool_perm_rwsem);
1794 	p = &mdsc->pool_perm_tree.rb_node;
1795 	parent = NULL;
1796 	while (*p) {
1797 		parent = *p;
1798 		perm = rb_entry(parent, struct ceph_pool_perm, node);
1799 		if (pool < perm->pool)
1800 			p = &(*p)->rb_left;
1801 		else if (pool > perm->pool)
1802 			p = &(*p)->rb_right;
1803 		else {
1804 			int ret = ceph_compare_string(pool_ns,
1805 						perm->pool_ns,
1806 						perm->pool_ns_len);
1807 			if (ret < 0)
1808 				p = &(*p)->rb_left;
1809 			else if (ret > 0)
1810 				p = &(*p)->rb_right;
1811 			else {
1812 				have = perm->perm;
1813 				break;
1814 			}
1815 		}
1816 	}
1817 	if (*p) {
1818 		up_write(&mdsc->pool_perm_rwsem);
1819 		goto out;
1820 	}
1821 
1822 	rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
1823 					 1, false, GFP_NOFS);
1824 	if (!rd_req) {
1825 		err = -ENOMEM;
1826 		goto out_unlock;
1827 	}
1828 
1829 	rd_req->r_flags = CEPH_OSD_FLAG_READ;
1830 	osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0);
1831 	rd_req->r_base_oloc.pool = pool;
1832 	if (pool_ns)
1833 		rd_req->r_base_oloc.pool_ns = ceph_get_string(pool_ns);
1834 	ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino);
1835 
1836 	err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS);
1837 	if (err)
1838 		goto out_unlock;
1839 
1840 	wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
1841 					 1, false, GFP_NOFS);
1842 	if (!wr_req) {
1843 		err = -ENOMEM;
1844 		goto out_unlock;
1845 	}
1846 
1847 	wr_req->r_flags = CEPH_OSD_FLAG_WRITE;
1848 	osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL);
1849 	ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc);
1850 	ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid);
1851 
1852 	err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS);
1853 	if (err)
1854 		goto out_unlock;
1855 
1856 	/* one page should be large enough for STAT data */
1857 	pages = ceph_alloc_page_vector(1, GFP_KERNEL);
1858 	if (IS_ERR(pages)) {
1859 		err = PTR_ERR(pages);
1860 		goto out_unlock;
1861 	}
1862 
1863 	osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE,
1864 				     0, false, true);
1865 	err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false);
1866 
1867 	wr_req->r_mtime = ci->vfs_inode.i_mtime;
1868 	err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false);
1869 
1870 	if (!err)
1871 		err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req);
1872 	if (!err2)
1873 		err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req);
1874 
1875 	if (err >= 0 || err == -ENOENT)
1876 		have |= POOL_READ;
1877 	else if (err != -EPERM) {
1878 		if (err == -EBLOCKLISTED)
1879 			fsc->blocklisted = true;
1880 		goto out_unlock;
1881 	}
1882 
1883 	if (err2 == 0 || err2 == -EEXIST)
1884 		have |= POOL_WRITE;
1885 	else if (err2 != -EPERM) {
1886 		if (err2 == -EBLOCKLISTED)
1887 			fsc->blocklisted = true;
1888 		err = err2;
1889 		goto out_unlock;
1890 	}
1891 
1892 	pool_ns_len = pool_ns ? pool_ns->len : 0;
1893 	perm = kmalloc(sizeof(*perm) + pool_ns_len + 1, GFP_NOFS);
1894 	if (!perm) {
1895 		err = -ENOMEM;
1896 		goto out_unlock;
1897 	}
1898 
1899 	perm->pool = pool;
1900 	perm->perm = have;
1901 	perm->pool_ns_len = pool_ns_len;
1902 	if (pool_ns_len > 0)
1903 		memcpy(perm->pool_ns, pool_ns->str, pool_ns_len);
1904 	perm->pool_ns[pool_ns_len] = 0;
1905 
1906 	rb_link_node(&perm->node, parent, p);
1907 	rb_insert_color(&perm->node, &mdsc->pool_perm_tree);
1908 	err = 0;
1909 out_unlock:
1910 	up_write(&mdsc->pool_perm_rwsem);
1911 
1912 	ceph_osdc_put_request(rd_req);
1913 	ceph_osdc_put_request(wr_req);
1914 out:
1915 	if (!err)
1916 		err = have;
1917 	if (pool_ns)
1918 		dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n",
1919 		     pool, (int)pool_ns->len, pool_ns->str, err);
1920 	else
1921 		dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err);
1922 	return err;
1923 }
1924 
1925 int ceph_pool_perm_check(struct inode *inode, int need)
1926 {
1927 	struct ceph_inode_info *ci = ceph_inode(inode);
1928 	struct ceph_string *pool_ns;
1929 	s64 pool;
1930 	int ret, flags;
1931 
1932 	/* Only need to do this for regular files */
1933 	if (!S_ISREG(inode->i_mode))
1934 		return 0;
1935 
1936 	if (ci->i_vino.snap != CEPH_NOSNAP) {
1937 		/*
1938 		 * Pool permission check needs to write to the first object.
1939 		 * But for snapshot, head of the first object may have alread
1940 		 * been deleted. Skip check to avoid creating orphan object.
1941 		 */
1942 		return 0;
1943 	}
1944 
1945 	if (ceph_test_mount_opt(ceph_inode_to_client(inode),
1946 				NOPOOLPERM))
1947 		return 0;
1948 
1949 	spin_lock(&ci->i_ceph_lock);
1950 	flags = ci->i_ceph_flags;
1951 	pool = ci->i_layout.pool_id;
1952 	spin_unlock(&ci->i_ceph_lock);
1953 check:
1954 	if (flags & CEPH_I_POOL_PERM) {
1955 		if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) {
1956 			dout("ceph_pool_perm_check pool %lld no read perm\n",
1957 			     pool);
1958 			return -EPERM;
1959 		}
1960 		if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) {
1961 			dout("ceph_pool_perm_check pool %lld no write perm\n",
1962 			     pool);
1963 			return -EPERM;
1964 		}
1965 		return 0;
1966 	}
1967 
1968 	pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
1969 	ret = __ceph_pool_perm_get(ci, pool, pool_ns);
1970 	ceph_put_string(pool_ns);
1971 	if (ret < 0)
1972 		return ret;
1973 
1974 	flags = CEPH_I_POOL_PERM;
1975 	if (ret & POOL_READ)
1976 		flags |= CEPH_I_POOL_RD;
1977 	if (ret & POOL_WRITE)
1978 		flags |= CEPH_I_POOL_WR;
1979 
1980 	spin_lock(&ci->i_ceph_lock);
1981 	if (pool == ci->i_layout.pool_id &&
1982 	    pool_ns == rcu_dereference_raw(ci->i_layout.pool_ns)) {
1983 		ci->i_ceph_flags |= flags;
1984         } else {
1985 		pool = ci->i_layout.pool_id;
1986 		flags = ci->i_ceph_flags;
1987 	}
1988 	spin_unlock(&ci->i_ceph_lock);
1989 	goto check;
1990 }
1991 
1992 void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc)
1993 {
1994 	struct ceph_pool_perm *perm;
1995 	struct rb_node *n;
1996 
1997 	while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) {
1998 		n = rb_first(&mdsc->pool_perm_tree);
1999 		perm = rb_entry(n, struct ceph_pool_perm, node);
2000 		rb_erase(n, &mdsc->pool_perm_tree);
2001 		kfree(perm);
2002 	}
2003 }
2004