xref: /openbmc/linux/fs/ceph/addr.c (revision 7467b044)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
23d14c5d2SYehuda Sadeh #include <linux/ceph/ceph_debug.h>
31d3576fdSSage Weil 
41d3576fdSSage Weil #include <linux/backing-dev.h>
51d3576fdSSage Weil #include <linux/fs.h>
61d3576fdSSage Weil #include <linux/mm.h>
7d7bdba1cSDavid Howells #include <linux/swap.h>
81d3576fdSSage Weil #include <linux/pagemap.h>
95a0e3ad6STejun Heo #include <linux/slab.h>
101d3576fdSSage Weil #include <linux/pagevec.h>
111d3576fdSSage Weil #include <linux/task_io_accounting_ops.h>
12f361bf4aSIngo Molnar #include <linux/signal.h>
135c308356SJeff Layton #include <linux/iversion.h>
1497e27aaaSXiubo Li #include <linux/ktime.h>
15f0702876SJeff Layton #include <linux/netfs.h>
161d3576fdSSage Weil 
171d3576fdSSage Weil #include "super.h"
183d14c5d2SYehuda Sadeh #include "mds_client.h"
1999ccbd22SMilosz Tanski #include "cache.h"
2097e27aaaSXiubo Li #include "metric.h"
213d14c5d2SYehuda Sadeh #include <linux/ceph/osd_client.h>
2208c1ac50SIlya Dryomov #include <linux/ceph/striper.h>
231d3576fdSSage Weil 
241d3576fdSSage Weil /*
251d3576fdSSage Weil  * Ceph address space ops.
261d3576fdSSage Weil  *
271d3576fdSSage Weil  * There are a few funny things going on here.
281d3576fdSSage Weil  *
291d3576fdSSage Weil  * The page->private field is used to reference a struct
301d3576fdSSage Weil  * ceph_snap_context for _every_ dirty page.  This indicates which
311d3576fdSSage Weil  * snapshot the page was logically dirtied in, and thus which snap
321d3576fdSSage Weil  * context needs to be associated with the osd write during writeback.
331d3576fdSSage Weil  *
341d3576fdSSage Weil  * Similarly, struct ceph_inode_info maintains a set of counters to
3525985edcSLucas De Marchi  * count dirty pages on the inode.  In the absence of snapshots,
361d3576fdSSage Weil  * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
371d3576fdSSage Weil  *
381d3576fdSSage Weil  * When a snapshot is taken (that is, when the client receives
391d3576fdSSage Weil  * notification that a snapshot was taken), each inode with caps and
401d3576fdSSage Weil  * with dirty pages (dirty pages implies there is a cap) gets a new
411d3576fdSSage Weil  * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending
421d3576fdSSage Weil  * order, new snaps go to the tail).  The i_wrbuffer_ref_head count is
431d3576fdSSage Weil  * moved to capsnap->dirty. (Unless a sync write is currently in
441d3576fdSSage Weil  * progress.  In that case, the capsnap is said to be "pending", new
451d3576fdSSage Weil  * writes cannot start, and the capsnap isn't "finalized" until the
461d3576fdSSage Weil  * write completes (or fails) and a final size/mtime for the inode for
471d3576fdSSage Weil  * that snap can be settled upon.)  i_wrbuffer_ref_head is reset to 0.
481d3576fdSSage Weil  *
491d3576fdSSage Weil  * On writeback, we must submit writes to the osd IN SNAP ORDER.  So,
501d3576fdSSage Weil  * we look for the first capsnap in i_cap_snaps and write out pages in
511d3576fdSSage Weil  * that snap context _only_.  Then we move on to the next capsnap,
521d3576fdSSage Weil  * eventually reaching the "live" or "head" context (i.e., pages that
531d3576fdSSage Weil  * are not yet snapped) and are writing the most recently dirtied
541d3576fdSSage Weil  * pages.
551d3576fdSSage Weil  *
561d3576fdSSage Weil  * Invalidate and so forth must take care to ensure the dirty page
571d3576fdSSage Weil  * accounting is preserved.
581d3576fdSSage Weil  */
591d3576fdSSage Weil 
602baba250SYehuda Sadeh #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10))
612baba250SYehuda Sadeh #define CONGESTION_OFF_THRESH(congestion_kb)				\
622baba250SYehuda Sadeh 	(CONGESTION_ON_THRESH(congestion_kb) -				\
632baba250SYehuda Sadeh 	 (CONGESTION_ON_THRESH(congestion_kb) >> 2))
642baba250SYehuda Sadeh 
65d801327dSJeff Layton static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
66fac47b43SXiubo Li 					struct folio **foliop, void **_fsdata);
67d801327dSJeff Layton 
6861600ef8SYan, Zheng static inline struct ceph_snap_context *page_snap_context(struct page *page)
6961600ef8SYan, Zheng {
7061600ef8SYan, Zheng 	if (PagePrivate(page))
7161600ef8SYan, Zheng 		return (void *)page->private;
7261600ef8SYan, Zheng 	return NULL;
7361600ef8SYan, Zheng }
741d3576fdSSage Weil 
751d3576fdSSage Weil /*
761d3576fdSSage Weil  * Dirty a page.  Optimistically adjust accounting, on the assumption
771d3576fdSSage Weil  * that we won't race with invalidate.  If we do, readjust.
781d3576fdSSage Weil  */
798fb72b4aSMatthew Wilcox (Oracle) static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
801d3576fdSSage Weil {
811d3576fdSSage Weil 	struct inode *inode;
821d3576fdSSage Weil 	struct ceph_inode_info *ci;
831d3576fdSSage Weil 	struct ceph_snap_context *snapc;
841d3576fdSSage Weil 
858fb72b4aSMatthew Wilcox (Oracle) 	if (folio_test_dirty(folio)) {
868fb72b4aSMatthew Wilcox (Oracle) 		dout("%p dirty_folio %p idx %lu -- already dirty\n",
878fb72b4aSMatthew Wilcox (Oracle) 		     mapping->host, folio, folio->index);
88642d51fbSXiubo Li 		VM_BUG_ON_FOLIO(!folio_test_private(folio), folio);
898fb72b4aSMatthew Wilcox (Oracle) 		return false;
901d3576fdSSage Weil 	}
911d3576fdSSage Weil 
921d3576fdSSage Weil 	inode = mapping->host;
931d3576fdSSage Weil 	ci = ceph_inode(inode);
941d3576fdSSage Weil 
951d3576fdSSage Weil 	/* dirty the head */
96be655596SSage Weil 	spin_lock(&ci->i_ceph_lock);
975dda377cSYan, Zheng 	BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference
985dda377cSYan, Zheng 	if (__ceph_have_pending_cap_snap(ci)) {
995dda377cSYan, Zheng 		struct ceph_cap_snap *capsnap =
1005dda377cSYan, Zheng 				list_last_entry(&ci->i_cap_snaps,
1015dda377cSYan, Zheng 						struct ceph_cap_snap,
1025dda377cSYan, Zheng 						ci_item);
1035dda377cSYan, Zheng 		snapc = ceph_get_snap_context(capsnap->context);
1045dda377cSYan, Zheng 		capsnap->dirty_pages++;
1055dda377cSYan, Zheng 	} else {
1065dda377cSYan, Zheng 		BUG_ON(!ci->i_head_snapc);
1075dda377cSYan, Zheng 		snapc = ceph_get_snap_context(ci->i_head_snapc);
1081d3576fdSSage Weil 		++ci->i_wrbuffer_ref_head;
1095dda377cSYan, Zheng 	}
1101d3576fdSSage Weil 	if (ci->i_wrbuffer_ref == 0)
1110444d76aSDave Chinner 		ihold(inode);
1121d3576fdSSage Weil 	++ci->i_wrbuffer_ref;
1138fb72b4aSMatthew Wilcox (Oracle) 	dout("%p dirty_folio %p idx %lu head %d/%d -> %d/%d "
1141d3576fdSSage Weil 	     "snapc %p seq %lld (%d snaps)\n",
1158fb72b4aSMatthew Wilcox (Oracle) 	     mapping->host, folio, folio->index,
1161d3576fdSSage Weil 	     ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
1171d3576fdSSage Weil 	     ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
1181d3576fdSSage Weil 	     snapc, snapc->seq, snapc->num_snaps);
119be655596SSage Weil 	spin_unlock(&ci->i_ceph_lock);
1201d3576fdSSage Weil 
1211d3576fdSSage Weil 	/*
1228fb72b4aSMatthew Wilcox (Oracle) 	 * Reference snap context in folio->private.  Also set
1239872f4deSMatthew Wilcox (Oracle) 	 * PagePrivate so that we get invalidate_folio callback.
1241d3576fdSSage Weil 	 */
125642d51fbSXiubo Li 	VM_BUG_ON_FOLIO(folio_test_private(folio), folio);
1268fb72b4aSMatthew Wilcox (Oracle) 	folio_attach_private(folio, snapc);
1271d3576fdSSage Weil 
1288fb72b4aSMatthew Wilcox (Oracle) 	return ceph_fscache_dirty_folio(mapping, folio);
1291d3576fdSSage Weil }
1301d3576fdSSage Weil 
1311d3576fdSSage Weil /*
1329872f4deSMatthew Wilcox (Oracle)  * If we are truncating the full folio (i.e. offset == 0), adjust the
1339872f4deSMatthew Wilcox (Oracle)  * dirty folio counters appropriately.  Only called if there is private
1349872f4deSMatthew Wilcox (Oracle)  * data on the folio.
1351d3576fdSSage Weil  */
1369872f4deSMatthew Wilcox (Oracle) static void ceph_invalidate_folio(struct folio *folio, size_t offset,
1379872f4deSMatthew Wilcox (Oracle) 				size_t length)
1381d3576fdSSage Weil {
1394ce1e9adSAlexander Beregalov 	struct inode *inode;
1401d3576fdSSage Weil 	struct ceph_inode_info *ci;
141379fc7faSJeff Layton 	struct ceph_snap_context *snapc;
1421d3576fdSSage Weil 
1439872f4deSMatthew Wilcox (Oracle) 	inode = folio->mapping->host;
144b150f5c1SMilosz Tanski 	ci = ceph_inode(inode);
145b150f5c1SMilosz Tanski 
1469872f4deSMatthew Wilcox (Oracle) 	if (offset != 0 || length != folio_size(folio)) {
1479872f4deSMatthew Wilcox (Oracle) 		dout("%p invalidate_folio idx %lu partial dirty page %zu~%zu\n",
1489872f4deSMatthew Wilcox (Oracle) 		     inode, folio->index, offset, length);
149b150f5c1SMilosz Tanski 		return;
150b150f5c1SMilosz Tanski 	}
1514ce1e9adSAlexander Beregalov 
1529872f4deSMatthew Wilcox (Oracle) 	WARN_ON(!folio_test_locked(folio));
153642d51fbSXiubo Li 	if (folio_test_private(folio)) {
1549872f4deSMatthew Wilcox (Oracle) 		dout("%p invalidate_folio idx %lu full dirty page\n",
1559872f4deSMatthew Wilcox (Oracle) 		     inode, folio->index);
156b150f5c1SMilosz Tanski 
1579872f4deSMatthew Wilcox (Oracle) 		snapc = folio_detach_private(folio);
1581d3576fdSSage Weil 		ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
1591d3576fdSSage Weil 		ceph_put_snap_context(snapc);
1601d3576fdSSage Weil 	}
1611d3576fdSSage Weil 
1629872f4deSMatthew Wilcox (Oracle) 	folio_wait_fscache(folio);
163400e1286SJeff Layton }
164400e1286SJeff Layton 
1655e414655SMatthew Wilcox (Oracle) static bool ceph_release_folio(struct folio *folio, gfp_t gfp)
1661d3576fdSSage Weil {
1675e414655SMatthew Wilcox (Oracle) 	struct inode *inode = folio->mapping->host;
168400e1286SJeff Layton 
1695e414655SMatthew Wilcox (Oracle) 	dout("%llx:%llx release_folio idx %lu (%sdirty)\n",
1705e414655SMatthew Wilcox (Oracle) 	     ceph_vinop(inode),
1715e414655SMatthew Wilcox (Oracle) 	     folio->index, folio_test_dirty(folio) ? "" : "not ");
172400e1286SJeff Layton 
1735e414655SMatthew Wilcox (Oracle) 	if (folio_test_private(folio))
1745e414655SMatthew Wilcox (Oracle) 		return false;
17599ccbd22SMilosz Tanski 
1765e414655SMatthew Wilcox (Oracle) 	if (folio_test_fscache(folio)) {
177d7bdba1cSDavid Howells 		if (current_is_kswapd() || !(gfp & __GFP_FS))
1785e414655SMatthew Wilcox (Oracle) 			return false;
1795e414655SMatthew Wilcox (Oracle) 		folio_wait_fscache(folio);
1807c46b318SJeff Layton 	}
181400e1286SJeff Layton 	ceph_fscache_note_page_release(inode);
1825e414655SMatthew Wilcox (Oracle) 	return true;
1831d3576fdSSage Weil }
1841d3576fdSSage Weil 
1856a19114bSDavid Howells static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
186f0702876SJeff Layton {
187a25cedb4SJeff Layton 	struct inode *inode = rreq->inode;
188f0702876SJeff Layton 	struct ceph_inode_info *ci = ceph_inode(inode);
189f0702876SJeff Layton 	struct ceph_file_layout *lo = &ci->i_layout;
190f0702876SJeff Layton 	u32 blockoff;
191f0702876SJeff Layton 	u64 blockno;
192f0702876SJeff Layton 
193f0702876SJeff Layton 	/* Expand the start downward */
194f0702876SJeff Layton 	blockno = div_u64_rem(rreq->start, lo->stripe_unit, &blockoff);
195f0702876SJeff Layton 	rreq->start = blockno * lo->stripe_unit;
196f0702876SJeff Layton 	rreq->len += blockoff;
197f0702876SJeff Layton 
198f0702876SJeff Layton 	/* Now, round up the length to the next block */
199f0702876SJeff Layton 	rreq->len = roundup(rreq->len, lo->stripe_unit);
200f0702876SJeff Layton }
201f0702876SJeff Layton 
2026a19114bSDavid Howells static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq)
203f0702876SJeff Layton {
204a25cedb4SJeff Layton 	struct inode *inode = subreq->rreq->inode;
205f0702876SJeff Layton 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
206f0702876SJeff Layton 	struct ceph_inode_info *ci = ceph_inode(inode);
207f0702876SJeff Layton 	u64 objno, objoff;
208f0702876SJeff Layton 	u32 xlen;
209f0702876SJeff Layton 
210f0702876SJeff Layton 	/* Truncate the extent at the end of the current block */
211f0702876SJeff Layton 	ceph_calc_file_object_mapping(&ci->i_layout, subreq->start, subreq->len,
212f0702876SJeff Layton 				      &objno, &objoff, &xlen);
213f0702876SJeff Layton 	subreq->len = min(xlen, fsc->mount_options->rsize);
214f0702876SJeff Layton 	return true;
215f0702876SJeff Layton }
216f0702876SJeff Layton 
217f0702876SJeff Layton static void finish_netfs_read(struct ceph_osd_request *req)
218f0702876SJeff Layton {
219f0702876SJeff Layton 	struct ceph_fs_client *fsc = ceph_inode_to_client(req->r_inode);
220f0702876SJeff Layton 	struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
2216a19114bSDavid Howells 	struct netfs_io_subrequest *subreq = req->r_priv;
222f0702876SJeff Layton 	int num_pages;
223f0702876SJeff Layton 	int err = req->r_result;
224f0702876SJeff Layton 
2258ae99ae2SXiubo Li 	ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency,
226903f4fecSXiubo Li 				 req->r_end_latency, osd_data->length, err);
227f0702876SJeff Layton 
228f0702876SJeff Layton 	dout("%s: result %d subreq->len=%zu i_size=%lld\n", __func__, req->r_result,
229f0702876SJeff Layton 	     subreq->len, i_size_read(req->r_inode));
230f0702876SJeff Layton 
231f0702876SJeff Layton 	/* no object means success but no data */
232f0702876SJeff Layton 	if (err == -ENOENT)
233f0702876SJeff Layton 		err = 0;
234f0702876SJeff Layton 	else if (err == -EBLOCKLISTED)
235f0702876SJeff Layton 		fsc->blocklisted = true;
236f0702876SJeff Layton 
237f0702876SJeff Layton 	if (err >= 0 && err < subreq->len)
238f0702876SJeff Layton 		__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
239f0702876SJeff Layton 
240*7467b044SJeff Layton 	netfs_subreq_terminated(subreq, err, false);
241f0702876SJeff Layton 
242f0702876SJeff Layton 	num_pages = calc_pages_for(osd_data->alignment, osd_data->length);
243f0702876SJeff Layton 	ceph_put_page_vector(osd_data->pages, num_pages, false);
244f0702876SJeff Layton 	iput(req->r_inode);
245f0702876SJeff Layton }
246f0702876SJeff Layton 
2476a19114bSDavid Howells static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
2485b19f1ebSDavid Howells {
2496a19114bSDavid Howells 	struct netfs_io_request *rreq = subreq->rreq;
2505b19f1ebSDavid Howells 	struct inode *inode = rreq->inode;
2515b19f1ebSDavid Howells 	struct ceph_mds_reply_info_parsed *rinfo;
2525b19f1ebSDavid Howells 	struct ceph_mds_reply_info_in *iinfo;
2535b19f1ebSDavid Howells 	struct ceph_mds_request *req;
2545b19f1ebSDavid Howells 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
2555b19f1ebSDavid Howells 	struct ceph_inode_info *ci = ceph_inode(inode);
2565b19f1ebSDavid Howells 	struct iov_iter iter;
2575b19f1ebSDavid Howells 	ssize_t err = 0;
2585b19f1ebSDavid Howells 	size_t len;
2595eed80fbSXiubo Li 	int mode;
2605b19f1ebSDavid Howells 
2615b19f1ebSDavid Howells 	__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
262f18a3785SDavid Howells 	__clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
2635b19f1ebSDavid Howells 
2645b19f1ebSDavid Howells 	if (subreq->start >= inode->i_size)
2655b19f1ebSDavid Howells 		goto out;
2665b19f1ebSDavid Howells 
2675b19f1ebSDavid Howells 	/* We need to fetch the inline data. */
2685eed80fbSXiubo Li 	mode = ceph_try_to_choose_auth_mds(inode, CEPH_STAT_CAP_INLINE_DATA);
2695eed80fbSXiubo Li 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
2705b19f1ebSDavid Howells 	if (IS_ERR(req)) {
2715b19f1ebSDavid Howells 		err = PTR_ERR(req);
2725b19f1ebSDavid Howells 		goto out;
2735b19f1ebSDavid Howells 	}
2745b19f1ebSDavid Howells 	req->r_ino1 = ci->i_vino;
2755b19f1ebSDavid Howells 	req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INLINE_DATA);
2765b19f1ebSDavid Howells 	req->r_num_caps = 2;
2775b19f1ebSDavid Howells 
2785b19f1ebSDavid Howells 	err = ceph_mdsc_do_request(mdsc, NULL, req);
2795b19f1ebSDavid Howells 	if (err < 0)
2805b19f1ebSDavid Howells 		goto out;
2815b19f1ebSDavid Howells 
2825b19f1ebSDavid Howells 	rinfo = &req->r_reply_info;
2835b19f1ebSDavid Howells 	iinfo = &rinfo->targeti;
2845b19f1ebSDavid Howells 	if (iinfo->inline_version == CEPH_INLINE_NONE) {
2855b19f1ebSDavid Howells 		/* The data got uninlined */
2865b19f1ebSDavid Howells 		ceph_mdsc_put_request(req);
2875b19f1ebSDavid Howells 		return false;
2885b19f1ebSDavid Howells 	}
2895b19f1ebSDavid Howells 
2905b19f1ebSDavid Howells 	len = min_t(size_t, iinfo->inline_len - subreq->start, subreq->len);
2915b19f1ebSDavid Howells 	iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, subreq->start, len);
2925b19f1ebSDavid Howells 	err = copy_to_iter(iinfo->inline_data + subreq->start, len, &iter);
2935b19f1ebSDavid Howells 	if (err == 0)
2945b19f1ebSDavid Howells 		err = -EFAULT;
2955b19f1ebSDavid Howells 
2965b19f1ebSDavid Howells 	ceph_mdsc_put_request(req);
2975b19f1ebSDavid Howells out:
2985b19f1ebSDavid Howells 	netfs_subreq_terminated(subreq, err, false);
2995b19f1ebSDavid Howells 	return true;
3005b19f1ebSDavid Howells }
3015b19f1ebSDavid Howells 
302f18a3785SDavid Howells static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
303f0702876SJeff Layton {
3046a19114bSDavid Howells 	struct netfs_io_request *rreq = subreq->rreq;
305a25cedb4SJeff Layton 	struct inode *inode = rreq->inode;
306f0702876SJeff Layton 	struct ceph_inode_info *ci = ceph_inode(inode);
307f0702876SJeff Layton 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
308f0702876SJeff Layton 	struct ceph_osd_request *req;
309f0702876SJeff Layton 	struct ceph_vino vino = ceph_vino(inode);
310f0702876SJeff Layton 	struct iov_iter iter;
311f0702876SJeff Layton 	struct page **pages;
312f0702876SJeff Layton 	size_t page_off;
313f0702876SJeff Layton 	int err = 0;
314f0702876SJeff Layton 	u64 len = subreq->len;
315f0702876SJeff Layton 
3165b19f1ebSDavid Howells 	if (ci->i_inline_version != CEPH_INLINE_NONE &&
3175b19f1ebSDavid Howells 	    ceph_netfs_issue_op_inline(subreq))
3185b19f1ebSDavid Howells 		return;
3195b19f1ebSDavid Howells 
320f0702876SJeff Layton 	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino, subreq->start, &len,
321f0702876SJeff Layton 			0, 1, CEPH_OSD_OP_READ,
322f0702876SJeff Layton 			CEPH_OSD_FLAG_READ | fsc->client->osdc.client->options->read_from_replica,
323f0702876SJeff Layton 			NULL, ci->i_truncate_seq, ci->i_truncate_size, false);
324f0702876SJeff Layton 	if (IS_ERR(req)) {
325f0702876SJeff Layton 		err = PTR_ERR(req);
326f0702876SJeff Layton 		req = NULL;
327f0702876SJeff Layton 		goto out;
328f0702876SJeff Layton 	}
329f0702876SJeff Layton 
330f0702876SJeff Layton 	dout("%s: pos=%llu orig_len=%zu len=%llu\n", __func__, subreq->start, subreq->len, len);
331f0702876SJeff Layton 	iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, subreq->start, len);
332f0702876SJeff Layton 	err = iov_iter_get_pages_alloc(&iter, &pages, len, &page_off);
333f0702876SJeff Layton 	if (err < 0) {
334f0702876SJeff Layton 		dout("%s: iov_ter_get_pages_alloc returned %d\n", __func__, err);
335f0702876SJeff Layton 		goto out;
336f0702876SJeff Layton 	}
337f0702876SJeff Layton 
338f0702876SJeff Layton 	/* should always give us a page-aligned read */
339f0702876SJeff Layton 	WARN_ON_ONCE(page_off);
340f0702876SJeff Layton 	len = err;
341f0702876SJeff Layton 
342f0702876SJeff Layton 	osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false);
343f0702876SJeff Layton 	req->r_callback = finish_netfs_read;
344f0702876SJeff Layton 	req->r_priv = subreq;
345f0702876SJeff Layton 	req->r_inode = inode;
346f0702876SJeff Layton 	ihold(inode);
347f0702876SJeff Layton 
348f0702876SJeff Layton 	err = ceph_osdc_start_request(req->r_osdc, req, false);
349f0702876SJeff Layton 	if (err)
350f0702876SJeff Layton 		iput(inode);
351f0702876SJeff Layton out:
352f0702876SJeff Layton 	ceph_osdc_put_request(req);
353f0702876SJeff Layton 	if (err)
354f0702876SJeff Layton 		netfs_subreq_terminated(subreq, err, false);
355f0702876SJeff Layton 	dout("%s: result %d\n", __func__, err);
356f0702876SJeff Layton }
357f0702876SJeff Layton 
358a5c9dc44SDavid Howells static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
359a5c9dc44SDavid Howells {
360a5c9dc44SDavid Howells 	struct inode *inode = rreq->inode;
361a5c9dc44SDavid Howells 	int got = 0, want = CEPH_CAP_FILE_CACHE;
362a5c9dc44SDavid Howells 	int ret = 0;
363a5c9dc44SDavid Howells 
364a5c9dc44SDavid Howells 	if (rreq->origin != NETFS_READAHEAD)
365a5c9dc44SDavid Howells 		return 0;
366a5c9dc44SDavid Howells 
367a5c9dc44SDavid Howells 	if (file) {
368a5c9dc44SDavid Howells 		struct ceph_rw_context *rw_ctx;
369a5c9dc44SDavid Howells 		struct ceph_file_info *fi = file->private_data;
370a5c9dc44SDavid Howells 
371a5c9dc44SDavid Howells 		rw_ctx = ceph_find_rw_context(fi);
372a5c9dc44SDavid Howells 		if (rw_ctx)
373a5c9dc44SDavid Howells 			return 0;
374a5c9dc44SDavid Howells 	}
375a5c9dc44SDavid Howells 
376a5c9dc44SDavid Howells 	/*
377a5c9dc44SDavid Howells 	 * readahead callers do not necessarily hold Fcb caps
378a5c9dc44SDavid Howells 	 * (e.g. fadvise, madvise).
379a5c9dc44SDavid Howells 	 */
380a5c9dc44SDavid Howells 	ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got);
381a5c9dc44SDavid Howells 	if (ret < 0) {
382a5c9dc44SDavid Howells 		dout("start_read %p, error getting cap\n", inode);
383a5c9dc44SDavid Howells 		return ret;
384a5c9dc44SDavid Howells 	}
385a5c9dc44SDavid Howells 
386a5c9dc44SDavid Howells 	if (!(got & want)) {
387a5c9dc44SDavid Howells 		dout("start_read %p, no cache cap\n", inode);
388a5c9dc44SDavid Howells 		return -EACCES;
389a5c9dc44SDavid Howells 	}
390a5c9dc44SDavid Howells 	if (ret == 0)
391a5c9dc44SDavid Howells 		return -EACCES;
392a5c9dc44SDavid Howells 
393a5c9dc44SDavid Howells 	rreq->netfs_priv = (void *)(uintptr_t)got;
394a5c9dc44SDavid Howells 	return 0;
395a5c9dc44SDavid Howells }
396a5c9dc44SDavid Howells 
39740a81101SDavid Howells static void ceph_netfs_free_request(struct netfs_io_request *rreq)
39849870056SJeff Layton {
39940a81101SDavid Howells 	struct ceph_inode_info *ci = ceph_inode(rreq->inode);
40040a81101SDavid Howells 	int got = (uintptr_t)rreq->netfs_priv;
40149870056SJeff Layton 
40249870056SJeff Layton 	if (got)
40349870056SJeff Layton 		ceph_put_cap_refs(ci, got);
40449870056SJeff Layton }
40549870056SJeff Layton 
406bc899ee1SDavid Howells const struct netfs_request_ops ceph_netfs_ops = {
407a5c9dc44SDavid Howells 	.init_request		= ceph_init_request,
40840a81101SDavid Howells 	.free_request		= ceph_netfs_free_request,
409f0702876SJeff Layton 	.begin_cache_operation	= ceph_begin_cache_operation,
410f18a3785SDavid Howells 	.issue_read		= ceph_netfs_issue_read,
411f0702876SJeff Layton 	.expand_readahead	= ceph_netfs_expand_readahead,
412f0702876SJeff Layton 	.clamp_length		= ceph_netfs_clamp_length,
413d801327dSJeff Layton 	.check_write_begin	= ceph_netfs_check_write_begin,
414f0702876SJeff Layton };
415f0702876SJeff Layton 
4161702e797SJeff Layton #ifdef CONFIG_CEPH_FSCACHE
4171702e797SJeff Layton static void ceph_set_page_fscache(struct page *page)
4181702e797SJeff Layton {
4191702e797SJeff Layton 	set_page_fscache(page);
4201702e797SJeff Layton }
4211702e797SJeff Layton 
4221702e797SJeff Layton static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async)
4231702e797SJeff Layton {
4241702e797SJeff Layton 	struct inode *inode = priv;
4251702e797SJeff Layton 
4261702e797SJeff Layton 	if (IS_ERR_VALUE(error) && error != -ENOBUFS)
4271702e797SJeff Layton 		ceph_fscache_invalidate(inode, false);
4281702e797SJeff Layton }
4291702e797SJeff Layton 
4301702e797SJeff Layton static void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
4311702e797SJeff Layton {
4321702e797SJeff Layton 	struct ceph_inode_info *ci = ceph_inode(inode);
4331702e797SJeff Layton 	struct fscache_cookie *cookie = ceph_fscache_cookie(ci);
4341702e797SJeff Layton 
4351702e797SJeff Layton 	fscache_write_to_cache(cookie, inode->i_mapping, off, len, i_size_read(inode),
4361702e797SJeff Layton 			       ceph_fscache_write_terminated, inode, caching);
4371702e797SJeff Layton }
4381702e797SJeff Layton #else
4391702e797SJeff Layton static inline void ceph_set_page_fscache(struct page *page)
4401702e797SJeff Layton {
4411702e797SJeff Layton }
4421702e797SJeff Layton 
4431702e797SJeff Layton static inline void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
4441702e797SJeff Layton {
4451702e797SJeff Layton }
4461702e797SJeff Layton #endif /* CONFIG_CEPH_FSCACHE */
4471702e797SJeff Layton 
4481f934b00SYan, Zheng struct ceph_writeback_ctl
4491f934b00SYan, Zheng {
4501f934b00SYan, Zheng 	loff_t i_size;
4511f934b00SYan, Zheng 	u64 truncate_size;
4521f934b00SYan, Zheng 	u32 truncate_seq;
4531f934b00SYan, Zheng 	bool size_stable;
4542a2d927eSYan, Zheng 	bool head_snapc;
4551f934b00SYan, Zheng };
4561f934b00SYan, Zheng 
4571d3576fdSSage Weil /*
4581d3576fdSSage Weil  * Get ref for the oldest snapc for an inode with dirty data... that is, the
4591d3576fdSSage Weil  * only snap context we are allowed to write back.
4601d3576fdSSage Weil  */
4611f934b00SYan, Zheng static struct ceph_snap_context *
46205455e11SYan, Zheng get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl,
46305455e11SYan, Zheng 		   struct ceph_snap_context *page_snapc)
4641d3576fdSSage Weil {
4651d3576fdSSage Weil 	struct ceph_inode_info *ci = ceph_inode(inode);
4661d3576fdSSage Weil 	struct ceph_snap_context *snapc = NULL;
4671d3576fdSSage Weil 	struct ceph_cap_snap *capsnap = NULL;
4681d3576fdSSage Weil 
469be655596SSage Weil 	spin_lock(&ci->i_ceph_lock);
4701d3576fdSSage Weil 	list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
4711d3576fdSSage Weil 		dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
4721d3576fdSSage Weil 		     capsnap->context, capsnap->dirty_pages);
47305455e11SYan, Zheng 		if (!capsnap->dirty_pages)
47405455e11SYan, Zheng 			continue;
47505455e11SYan, Zheng 
47605455e11SYan, Zheng 		/* get i_size, truncate_{seq,size} for page_snapc? */
47705455e11SYan, Zheng 		if (snapc && capsnap->context != page_snapc)
47805455e11SYan, Zheng 			continue;
47905455e11SYan, Zheng 
4801f934b00SYan, Zheng 		if (ctl) {
4811f934b00SYan, Zheng 			if (capsnap->writing) {
4821f934b00SYan, Zheng 				ctl->i_size = i_size_read(inode);
4831f934b00SYan, Zheng 				ctl->size_stable = false;
4841f934b00SYan, Zheng 			} else {
4851f934b00SYan, Zheng 				ctl->i_size = capsnap->size;
4861f934b00SYan, Zheng 				ctl->size_stable = true;
4871f934b00SYan, Zheng 			}
4881f934b00SYan, Zheng 			ctl->truncate_size = capsnap->truncate_size;
4891f934b00SYan, Zheng 			ctl->truncate_seq = capsnap->truncate_seq;
4902a2d927eSYan, Zheng 			ctl->head_snapc = false;
4911f934b00SYan, Zheng 		}
49205455e11SYan, Zheng 
49305455e11SYan, Zheng 		if (snapc)
4941d3576fdSSage Weil 			break;
49505455e11SYan, Zheng 
49605455e11SYan, Zheng 		snapc = ceph_get_snap_context(capsnap->context);
49705455e11SYan, Zheng 		if (!page_snapc ||
49805455e11SYan, Zheng 		    page_snapc == snapc ||
49905455e11SYan, Zheng 		    page_snapc->seq > snapc->seq)
50005455e11SYan, Zheng 			break;
5011d3576fdSSage Weil 	}
5027d8cb26dSSage Weil 	if (!snapc && ci->i_wrbuffer_ref_head) {
50380e755feSSage Weil 		snapc = ceph_get_snap_context(ci->i_head_snapc);
5041d3576fdSSage Weil 		dout(" head snapc %p has %d dirty pages\n",
5051d3576fdSSage Weil 		     snapc, ci->i_wrbuffer_ref_head);
5061f934b00SYan, Zheng 		if (ctl) {
5071f934b00SYan, Zheng 			ctl->i_size = i_size_read(inode);
5081f934b00SYan, Zheng 			ctl->truncate_size = ci->i_truncate_size;
5091f934b00SYan, Zheng 			ctl->truncate_seq = ci->i_truncate_seq;
5101f934b00SYan, Zheng 			ctl->size_stable = false;
5112a2d927eSYan, Zheng 			ctl->head_snapc = true;
5121f934b00SYan, Zheng 		}
5131d3576fdSSage Weil 	}
514be655596SSage Weil 	spin_unlock(&ci->i_ceph_lock);
5151d3576fdSSage Weil 	return snapc;
5161d3576fdSSage Weil }
5171d3576fdSSage Weil 
5181f934b00SYan, Zheng static u64 get_writepages_data_length(struct inode *inode,
5191f934b00SYan, Zheng 				      struct page *page, u64 start)
5201f934b00SYan, Zheng {
5211f934b00SYan, Zheng 	struct ceph_inode_info *ci = ceph_inode(inode);
5221f934b00SYan, Zheng 	struct ceph_snap_context *snapc = page_snap_context(page);
5231f934b00SYan, Zheng 	struct ceph_cap_snap *capsnap = NULL;
5241f934b00SYan, Zheng 	u64 end = i_size_read(inode);
5251f934b00SYan, Zheng 
5261f934b00SYan, Zheng 	if (snapc != ci->i_head_snapc) {
5271f934b00SYan, Zheng 		bool found = false;
5281f934b00SYan, Zheng 		spin_lock(&ci->i_ceph_lock);
5291f934b00SYan, Zheng 		list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
5301f934b00SYan, Zheng 			if (capsnap->context == snapc) {
5311f934b00SYan, Zheng 				if (!capsnap->writing)
5321f934b00SYan, Zheng 					end = capsnap->size;
5331f934b00SYan, Zheng 				found = true;
5341f934b00SYan, Zheng 				break;
5351f934b00SYan, Zheng 			}
5361f934b00SYan, Zheng 		}
5371f934b00SYan, Zheng 		spin_unlock(&ci->i_ceph_lock);
5381f934b00SYan, Zheng 		WARN_ON(!found);
5391f934b00SYan, Zheng 	}
5408ff2d290SJeff Layton 	if (end > page_offset(page) + thp_size(page))
5418ff2d290SJeff Layton 		end = page_offset(page) + thp_size(page);
5421f934b00SYan, Zheng 	return end > start ? end - start : 0;
5431f934b00SYan, Zheng }
5441f934b00SYan, Zheng 
5451d3576fdSSage Weil /*
5461d3576fdSSage Weil  * Write a single page, but leave the page locked.
5471d3576fdSSage Weil  *
548b72b13ebSJeff Layton  * If we get a write error, mark the mapping for error, but still adjust the
5491d3576fdSSage Weil  * dirty page accounting (i.e., page is no longer dirty).
5501d3576fdSSage Weil  */
5511d3576fdSSage Weil static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
5521d3576fdSSage Weil {
553a628304eSMatthew Wilcox (Oracle) 	struct folio *folio = page_folio(page);
5546390987fSJeff Layton 	struct inode *inode = page->mapping->host;
5556390987fSJeff Layton 	struct ceph_inode_info *ci = ceph_inode(inode);
5566390987fSJeff Layton 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
5576298a337SSage Weil 	struct ceph_snap_context *snapc, *oldest;
558fc2744aaSYan, Zheng 	loff_t page_off = page_offset(page);
5596390987fSJeff Layton 	int err;
5608ff2d290SJeff Layton 	loff_t len = thp_size(page);
5611f934b00SYan, Zheng 	struct ceph_writeback_ctl ceph_wbc;
5626390987fSJeff Layton 	struct ceph_osd_client *osdc = &fsc->client->osdc;
5636390987fSJeff Layton 	struct ceph_osd_request *req;
5641702e797SJeff Layton 	bool caching = ceph_is_cache_enabled(inode);
5651d3576fdSSage Weil 
5661d3576fdSSage Weil 	dout("writepage %p idx %lu\n", page, page->index);
5671d3576fdSSage Weil 
5681d3576fdSSage Weil 	/* verify this is a writeable snap context */
56961600ef8SYan, Zheng 	snapc = page_snap_context(page);
570d37b1d99SMarkus Elfring 	if (!snapc) {
5711d3576fdSSage Weil 		dout("writepage %p page %p not dirty?\n", inode, page);
57243986881SYan, Zheng 		return 0;
5731d3576fdSSage Weil 	}
57405455e11SYan, Zheng 	oldest = get_oldest_context(inode, &ceph_wbc, snapc);
5756298a337SSage Weil 	if (snapc->seq > oldest->seq) {
5761d3576fdSSage Weil 		dout("writepage %p page %p snapc %p not writeable - noop\n",
57761600ef8SYan, Zheng 		     inode, page, snapc);
5781d3576fdSSage Weil 		/* we should only noop if called by kswapd */
579fa71fefbSYan, Zheng 		WARN_ON(!(current->flags & PF_MEMALLOC));
5806298a337SSage Weil 		ceph_put_snap_context(oldest);
581fa71fefbSYan, Zheng 		redirty_page_for_writepage(wbc, page);
58243986881SYan, Zheng 		return 0;
5831d3576fdSSage Weil 	}
5846298a337SSage Weil 	ceph_put_snap_context(oldest);
5851d3576fdSSage Weil 
5861d3576fdSSage Weil 	/* is this a partial page at end of file? */
5871f934b00SYan, Zheng 	if (page_off >= ceph_wbc.i_size) {
588a628304eSMatthew Wilcox (Oracle) 		dout("folio at %lu beyond eof %llu\n", folio->index,
589a628304eSMatthew Wilcox (Oracle) 				ceph_wbc.i_size);
590a628304eSMatthew Wilcox (Oracle) 		folio_invalidate(folio, 0, folio_size(folio));
59143986881SYan, Zheng 		return 0;
592fc2744aaSYan, Zheng 	}
59343986881SYan, Zheng 
5941f934b00SYan, Zheng 	if (ceph_wbc.i_size < page_off + len)
5951f934b00SYan, Zheng 		len = ceph_wbc.i_size - page_off;
5961d3576fdSSage Weil 
5976390987fSJeff Layton 	dout("writepage %p page %p index %lu on %llu~%llu snapc %p seq %lld\n",
5981c0a9c2dSYan, Zheng 	     inode, page, page->index, page_off, len, snapc, snapc->seq);
5991d3576fdSSage Weil 
600314c4737SYan, Zheng 	if (atomic_long_inc_return(&fsc->writeback_count) >
6013d14c5d2SYehuda Sadeh 	    CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
602503d4fa6SNeilBrown 		fsc->write_congested = true;
6032baba250SYehuda Sadeh 
6046390987fSJeff Layton 	req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode), page_off, &len, 0, 1,
6056390987fSJeff Layton 				    CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, snapc,
6066390987fSJeff Layton 				    ceph_wbc.truncate_seq, ceph_wbc.truncate_size,
6076390987fSJeff Layton 				    true);
6083459bd0cSXiubo Li 	if (IS_ERR(req)) {
6093459bd0cSXiubo Li 		redirty_page_for_writepage(wbc, page);
6106390987fSJeff Layton 		return PTR_ERR(req);
6113459bd0cSXiubo Li 	}
6121702e797SJeff Layton 
6131702e797SJeff Layton 	set_page_writeback(page);
6141702e797SJeff Layton 	if (caching)
6151702e797SJeff Layton 		ceph_set_page_fscache(page);
6161702e797SJeff Layton 	ceph_fscache_write_to_cache(inode, page_off, len, caching);
6176390987fSJeff Layton 
6186390987fSJeff Layton 	/* it may be a short write due to an object boundary */
6198ff2d290SJeff Layton 	WARN_ON_ONCE(len > thp_size(page));
6206390987fSJeff Layton 	osd_req_op_extent_osd_data_pages(req, 0, &page, len, 0, false, false);
6216390987fSJeff Layton 	dout("writepage %llu~%llu (%llu bytes)\n", page_off, len, len);
6226390987fSJeff Layton 
6236390987fSJeff Layton 	req->r_mtime = inode->i_mtime;
6246390987fSJeff Layton 	err = ceph_osdc_start_request(osdc, req, true);
6256390987fSJeff Layton 	if (!err)
6266390987fSJeff Layton 		err = ceph_osdc_wait_request(osdc, req);
6276390987fSJeff Layton 
6288ae99ae2SXiubo Li 	ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
629903f4fecSXiubo Li 				  req->r_end_latency, len, err);
6306390987fSJeff Layton 
6316390987fSJeff Layton 	ceph_osdc_put_request(req);
6326390987fSJeff Layton 	if (err == 0)
6336390987fSJeff Layton 		err = len;
6346390987fSJeff Layton 
6351d3576fdSSage Weil 	if (err < 0) {
636ad15ec06SYan, Zheng 		struct writeback_control tmp_wbc;
637ad15ec06SYan, Zheng 		if (!wbc)
638ad15ec06SYan, Zheng 			wbc = &tmp_wbc;
639ad15ec06SYan, Zheng 		if (err == -ERESTARTSYS) {
640ad15ec06SYan, Zheng 			/* killed by SIGKILL */
641ad15ec06SYan, Zheng 			dout("writepage interrupted page %p\n", page);
642ad15ec06SYan, Zheng 			redirty_page_for_writepage(wbc, page);
643ad15ec06SYan, Zheng 			end_page_writeback(page);
64443986881SYan, Zheng 			return err;
645ad15ec06SYan, Zheng 		}
6460b98acd6SIlya Dryomov 		if (err == -EBLOCKLISTED)
6470b98acd6SIlya Dryomov 			fsc->blocklisted = true;
648ad15ec06SYan, Zheng 		dout("writepage setting page/mapping error %d %p\n",
649ad15ec06SYan, Zheng 		     err, page);
6501d3576fdSSage Weil 		mapping_set_error(&inode->i_data, err);
6511d3576fdSSage Weil 		wbc->pages_skipped++;
6521d3576fdSSage Weil 	} else {
6531d3576fdSSage Weil 		dout("writepage cleaned page %p\n", page);
6541d3576fdSSage Weil 		err = 0;  /* vfs expects us to return 0 */
6551d3576fdSSage Weil 	}
656379fc7faSJeff Layton 	oldest = detach_page_private(page);
657379fc7faSJeff Layton 	WARN_ON_ONCE(oldest != snapc);
6581d3576fdSSage Weil 	end_page_writeback(page);
6591d3576fdSSage Weil 	ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
6606298a337SSage Weil 	ceph_put_snap_context(snapc);  /* page's reference */
661314c4737SYan, Zheng 
662314c4737SYan, Zheng 	if (atomic_long_dec_return(&fsc->writeback_count) <
663314c4737SYan, Zheng 	    CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb))
664503d4fa6SNeilBrown 		fsc->write_congested = false;
665314c4737SYan, Zheng 
6661d3576fdSSage Weil 	return err;
6671d3576fdSSage Weil }
6681d3576fdSSage Weil 
6691d3576fdSSage Weil static int ceph_writepage(struct page *page, struct writeback_control *wbc)
6701d3576fdSSage Weil {
671dbd646a8SYehuda Sadeh 	int err;
672dbd646a8SYehuda Sadeh 	struct inode *inode = page->mapping->host;
673dbd646a8SYehuda Sadeh 	BUG_ON(!inode);
67470b666c3SSage Weil 	ihold(inode);
6751702e797SJeff Layton 
676503d4fa6SNeilBrown 	if (wbc->sync_mode == WB_SYNC_NONE &&
677503d4fa6SNeilBrown 	    ceph_inode_to_client(inode)->write_congested)
678503d4fa6SNeilBrown 		return AOP_WRITEPAGE_ACTIVATE;
679503d4fa6SNeilBrown 
6801702e797SJeff Layton 	wait_on_page_fscache(page);
6811702e797SJeff Layton 
682dbd646a8SYehuda Sadeh 	err = writepage_nounlock(page, wbc);
683ad15ec06SYan, Zheng 	if (err == -ERESTARTSYS) {
684ad15ec06SYan, Zheng 		/* direct memory reclaimer was killed by SIGKILL. return 0
685ad15ec06SYan, Zheng 		 * to prevent caller from setting mapping/page error */
686ad15ec06SYan, Zheng 		err = 0;
687ad15ec06SYan, Zheng 	}
6881d3576fdSSage Weil 	unlock_page(page);
689dbd646a8SYehuda Sadeh 	iput(inode);
6901d3576fdSSage Weil 	return err;
6911d3576fdSSage Weil }
6921d3576fdSSage Weil 
6931d3576fdSSage Weil /*
6941d3576fdSSage Weil  * async writeback completion handler.
6951d3576fdSSage Weil  *
6961d3576fdSSage Weil  * If we get an error, set the mapping error bit, but not the individual
6971d3576fdSSage Weil  * page error bits.
6981d3576fdSSage Weil  */
69985e084feSIlya Dryomov static void writepages_finish(struct ceph_osd_request *req)
7001d3576fdSSage Weil {
7011d3576fdSSage Weil 	struct inode *inode = req->r_inode;
7021d3576fdSSage Weil 	struct ceph_inode_info *ci = ceph_inode(inode);
70387060c10SAlex Elder 	struct ceph_osd_data *osd_data;
7041d3576fdSSage Weil 	struct page *page;
7055b64640cSYan, Zheng 	int num_pages, total_pages = 0;
7065b64640cSYan, Zheng 	int i, j;
7075b64640cSYan, Zheng 	int rc = req->r_result;
7081d3576fdSSage Weil 	struct ceph_snap_context *snapc = req->r_snapc;
7091d3576fdSSage Weil 	struct address_space *mapping = inode->i_mapping;
7103d14c5d2SYehuda Sadeh 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
711903f4fecSXiubo Li 	unsigned int len = 0;
7125b64640cSYan, Zheng 	bool remove_page;
7131d3576fdSSage Weil 
7145b64640cSYan, Zheng 	dout("writepages_finish %p rc %d\n", inode, rc);
71526544c62SJeff Layton 	if (rc < 0) {
7161d3576fdSSage Weil 		mapping_set_error(mapping, rc);
71726544c62SJeff Layton 		ceph_set_error_write(ci);
7180b98acd6SIlya Dryomov 		if (rc == -EBLOCKLISTED)
7190b98acd6SIlya Dryomov 			fsc->blocklisted = true;
72026544c62SJeff Layton 	} else {
72126544c62SJeff Layton 		ceph_clear_error_write(ci);
72226544c62SJeff Layton 	}
723e63dc5c7SYehuda Sadeh 
724e63dc5c7SYehuda Sadeh 	/*
725e63dc5c7SYehuda Sadeh 	 * We lost the cache cap, need to truncate the page before
726e63dc5c7SYehuda Sadeh 	 * it is unlocked, otherwise we'd truncate it later in the
727e63dc5c7SYehuda Sadeh 	 * page truncation thread, possibly losing some data that
728e63dc5c7SYehuda Sadeh 	 * raced its way in
729e63dc5c7SYehuda Sadeh 	 */
7305b64640cSYan, Zheng 	remove_page = !(ceph_caps_issued(ci) &
7315b64640cSYan, Zheng 			(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO));
7325b64640cSYan, Zheng 
7335b64640cSYan, Zheng 	/* clean all pages */
7345b64640cSYan, Zheng 	for (i = 0; i < req->r_num_ops; i++) {
735642d51fbSXiubo Li 		if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) {
736642d51fbSXiubo Li 			pr_warn("%s incorrect op %d req %p index %d tid %llu\n",
737642d51fbSXiubo Li 				__func__, req->r_ops[i].op, req, i, req->r_tid);
7385b64640cSYan, Zheng 			break;
739642d51fbSXiubo Li 		}
7405b64640cSYan, Zheng 
7415b64640cSYan, Zheng 		osd_data = osd_req_op_extent_osd_data(req, i);
7425b64640cSYan, Zheng 		BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
743903f4fecSXiubo Li 		len += osd_data->length;
7445b64640cSYan, Zheng 		num_pages = calc_pages_for((u64)osd_data->alignment,
7455b64640cSYan, Zheng 					   (u64)osd_data->length);
7465b64640cSYan, Zheng 		total_pages += num_pages;
7475b64640cSYan, Zheng 		for (j = 0; j < num_pages; j++) {
7485b64640cSYan, Zheng 			page = osd_data->pages[j];
7495b64640cSYan, Zheng 			BUG_ON(!page);
7505b64640cSYan, Zheng 			WARN_ON(!PageUptodate(page));
7515b64640cSYan, Zheng 
7525b64640cSYan, Zheng 			if (atomic_long_dec_return(&fsc->writeback_count) <
7535b64640cSYan, Zheng 			     CONGESTION_OFF_THRESH(
7545b64640cSYan, Zheng 					fsc->mount_options->congestion_kb))
755503d4fa6SNeilBrown 				fsc->write_congested = false;
7565b64640cSYan, Zheng 
757379fc7faSJeff Layton 			ceph_put_snap_context(detach_page_private(page));
7585b64640cSYan, Zheng 			end_page_writeback(page);
759379fc7faSJeff Layton 			dout("unlocking %p\n", page);
7605b64640cSYan, Zheng 
7615b64640cSYan, Zheng 			if (remove_page)
7625b64640cSYan, Zheng 				generic_error_remove_page(inode->i_mapping,
7635b64640cSYan, Zheng 							  page);
764e63dc5c7SYehuda Sadeh 
7651d3576fdSSage Weil 			unlock_page(page);
7661d3576fdSSage Weil 		}
7675b64640cSYan, Zheng 		dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n",
7685b64640cSYan, Zheng 		     inode, osd_data->length, rc >= 0 ? num_pages : 0);
7691d3576fdSSage Weil 
77096ac9158SJohn Hubbard 		release_pages(osd_data->pages, num_pages);
7715b64640cSYan, Zheng 	}
7725b64640cSYan, Zheng 
773903f4fecSXiubo Li 	ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
774903f4fecSXiubo Li 				  req->r_end_latency, len, rc);
775903f4fecSXiubo Li 
7765b64640cSYan, Zheng 	ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc);
7775b64640cSYan, Zheng 
7785b64640cSYan, Zheng 	osd_data = osd_req_op_extent_osd_data(req, 0);
77987060c10SAlex Elder 	if (osd_data->pages_from_pool)
780a0102bdaSJeff Layton 		mempool_free(osd_data->pages, ceph_wb_pagevec_pool);
7811d3576fdSSage Weil 	else
78287060c10SAlex Elder 		kfree(osd_data->pages);
7831d3576fdSSage Weil 	ceph_osdc_put_request(req);
7841d3576fdSSage Weil }
7851d3576fdSSage Weil 
7861d3576fdSSage Weil /*
7871d3576fdSSage Weil  * initiate async writeback
7881d3576fdSSage Weil  */
7891d3576fdSSage Weil static int ceph_writepages_start(struct address_space *mapping,
7901d3576fdSSage Weil 				 struct writeback_control *wbc)
7911d3576fdSSage Weil {
7921d3576fdSSage Weil 	struct inode *inode = mapping->host;
7931d3576fdSSage Weil 	struct ceph_inode_info *ci = ceph_inode(inode);
794fc2744aaSYan, Zheng 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
795fc2744aaSYan, Zheng 	struct ceph_vino vino = ceph_vino(inode);
7962a2d927eSYan, Zheng 	pgoff_t index, start_index, end = -1;
79780e755feSSage Weil 	struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
7981d3576fdSSage Weil 	struct pagevec pvec;
7991d3576fdSSage Weil 	int rc = 0;
80093407472SFabian Frederick 	unsigned int wsize = i_blocksize(inode);
8011d3576fdSSage Weil 	struct ceph_osd_request *req = NULL;
8021f934b00SYan, Zheng 	struct ceph_writeback_ctl ceph_wbc;
803590e9d98SYan, Zheng 	bool should_loop, range_whole = false;
804af9cc401SYan, Zheng 	bool done = false;
8051702e797SJeff Layton 	bool caching = ceph_is_cache_enabled(inode);
8061d3576fdSSage Weil 
807503d4fa6SNeilBrown 	if (wbc->sync_mode == WB_SYNC_NONE &&
808503d4fa6SNeilBrown 	    fsc->write_congested)
809503d4fa6SNeilBrown 		return 0;
810503d4fa6SNeilBrown 
8113fb99d48SYanhu Cao 	dout("writepages_start %p (mode=%s)\n", inode,
8121d3576fdSSage Weil 	     wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
8131d3576fdSSage Weil 	     (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
8141d3576fdSSage Weil 
8155d6451b1SJeff Layton 	if (ceph_inode_is_shutdown(inode)) {
8166c93df5dSYan, Zheng 		if (ci->i_wrbuffer_ref > 0) {
8176c93df5dSYan, Zheng 			pr_warn_ratelimited(
8186c93df5dSYan, Zheng 				"writepage_start %p %lld forced umount\n",
8196c93df5dSYan, Zheng 				inode, ceph_ino(inode));
8206c93df5dSYan, Zheng 		}
821a341d4dfSYan, Zheng 		mapping_set_error(mapping, -EIO);
8221d3576fdSSage Weil 		return -EIO; /* we're in a forced umount, don't write! */
8231d3576fdSSage Weil 	}
82495cca2b4SYan, Zheng 	if (fsc->mount_options->wsize < wsize)
8253d14c5d2SYehuda Sadeh 		wsize = fsc->mount_options->wsize;
8261d3576fdSSage Weil 
82786679820SMel Gorman 	pagevec_init(&pvec);
8281d3576fdSSage Weil 
829590e9d98SYan, Zheng 	start_index = wbc->range_cyclic ? mapping->writeback_index : 0;
830590e9d98SYan, Zheng 	index = start_index;
8311d3576fdSSage Weil 
8321d3576fdSSage Weil retry:
8331d3576fdSSage Weil 	/* find oldest snap context with dirty data */
83405455e11SYan, Zheng 	snapc = get_oldest_context(inode, &ceph_wbc, NULL);
8351d3576fdSSage Weil 	if (!snapc) {
8361d3576fdSSage Weil 		/* hmm, why does writepages get called when there
8371d3576fdSSage Weil 		   is no dirty data? */
8381d3576fdSSage Weil 		dout(" no snap context with dirty data?\n");
8391d3576fdSSage Weil 		goto out;
8401d3576fdSSage Weil 	}
8411d3576fdSSage Weil 	dout(" oldest snapc is %p seq %lld (%d snaps)\n",
8421d3576fdSSage Weil 	     snapc, snapc->seq, snapc->num_snaps);
843fc2744aaSYan, Zheng 
8442a2d927eSYan, Zheng 	should_loop = false;
8452a2d927eSYan, Zheng 	if (ceph_wbc.head_snapc && snapc != last_snapc) {
8462a2d927eSYan, Zheng 		/* where to start/end? */
8472a2d927eSYan, Zheng 		if (wbc->range_cyclic) {
8482a2d927eSYan, Zheng 			index = start_index;
8492a2d927eSYan, Zheng 			end = -1;
8502a2d927eSYan, Zheng 			if (index > 0)
8512a2d927eSYan, Zheng 				should_loop = true;
8522a2d927eSYan, Zheng 			dout(" cyclic, start at %lu\n", index);
8532a2d927eSYan, Zheng 		} else {
8542a2d927eSYan, Zheng 			index = wbc->range_start >> PAGE_SHIFT;
8552a2d927eSYan, Zheng 			end = wbc->range_end >> PAGE_SHIFT;
8562a2d927eSYan, Zheng 			if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
8572a2d927eSYan, Zheng 				range_whole = true;
8582a2d927eSYan, Zheng 			dout(" not cyclic, %lu to %lu\n", index, end);
8591d3576fdSSage Weil 		}
8602a2d927eSYan, Zheng 	} else if (!ceph_wbc.head_snapc) {
8612a2d927eSYan, Zheng 		/* Do not respect wbc->range_{start,end}. Dirty pages
8622a2d927eSYan, Zheng 		 * in that range can be associated with newer snapc.
8632a2d927eSYan, Zheng 		 * They are not writeable until we write all dirty pages
8642a2d927eSYan, Zheng 		 * associated with 'snapc' get written */
8651582af2eSYan, Zheng 		if (index > 0)
8662a2d927eSYan, Zheng 			should_loop = true;
8672a2d927eSYan, Zheng 		dout(" non-head snapc, range whole\n");
8682a2d927eSYan, Zheng 	}
8692a2d927eSYan, Zheng 
8702a2d927eSYan, Zheng 	ceph_put_snap_context(last_snapc);
8711d3576fdSSage Weil 	last_snapc = snapc;
8721d3576fdSSage Weil 
873af9cc401SYan, Zheng 	while (!done && index <= end) {
8745b64640cSYan, Zheng 		int num_ops = 0, op_idx;
8750e5ecac7SYan, Zheng 		unsigned i, pvec_pages, max_pages, locked_pages = 0;
8765b64640cSYan, Zheng 		struct page **pages = NULL, **data_pages;
8771d3576fdSSage Weil 		struct page *page;
8780e5ecac7SYan, Zheng 		pgoff_t strip_unit_end = 0;
8795b64640cSYan, Zheng 		u64 offset = 0, len = 0;
880a0102bdaSJeff Layton 		bool from_pool = false;
8811d3576fdSSage Weil 
8820e5ecac7SYan, Zheng 		max_pages = wsize >> PAGE_SHIFT;
8831d3576fdSSage Weil 
8841d3576fdSSage Weil get_more_pages:
8852e169296SJeff Layton 		pvec_pages = pagevec_lookup_range_tag(&pvec, mapping, &index,
8862e169296SJeff Layton 						end, PAGECACHE_TAG_DIRTY);
8870ed75fc8SJan Kara 		dout("pagevec_lookup_range_tag got %d\n", pvec_pages);
8881d3576fdSSage Weil 		if (!pvec_pages && !locked_pages)
8891d3576fdSSage Weil 			break;
8901d3576fdSSage Weil 		for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) {
8911d3576fdSSage Weil 			page = pvec.pages[i];
8921d3576fdSSage Weil 			dout("? %p idx %lu\n", page, page->index);
8931d3576fdSSage Weil 			if (locked_pages == 0)
8941d3576fdSSage Weil 				lock_page(page);  /* first page */
8951d3576fdSSage Weil 			else if (!trylock_page(page))
8961d3576fdSSage Weil 				break;
8971d3576fdSSage Weil 
8981d3576fdSSage Weil 			/* only dirty pages, or our accounting breaks */
8991d3576fdSSage Weil 			if (unlikely(!PageDirty(page)) ||
9001d3576fdSSage Weil 			    unlikely(page->mapping != mapping)) {
9011d3576fdSSage Weil 				dout("!dirty or !mapping %p\n", page);
9021d3576fdSSage Weil 				unlock_page(page);
9030713e5f2SYan, Zheng 				continue;
9041d3576fdSSage Weil 			}
905af9cc401SYan, Zheng 			/* only if matching snap context */
906af9cc401SYan, Zheng 			pgsnapc = page_snap_context(page);
907af9cc401SYan, Zheng 			if (pgsnapc != snapc) {
908af9cc401SYan, Zheng 				dout("page snapc %p %lld != oldest %p %lld\n",
909af9cc401SYan, Zheng 				     pgsnapc, pgsnapc->seq, snapc, snapc->seq);
9101582af2eSYan, Zheng 				if (!should_loop &&
9111582af2eSYan, Zheng 				    !ceph_wbc.head_snapc &&
9121582af2eSYan, Zheng 				    wbc->sync_mode != WB_SYNC_NONE)
9131582af2eSYan, Zheng 					should_loop = true;
9141d3576fdSSage Weil 				unlock_page(page);
915af9cc401SYan, Zheng 				continue;
9161d3576fdSSage Weil 			}
9171f934b00SYan, Zheng 			if (page_offset(page) >= ceph_wbc.i_size) {
918a628304eSMatthew Wilcox (Oracle) 				struct folio *folio = page_folio(page);
919a628304eSMatthew Wilcox (Oracle) 
920a628304eSMatthew Wilcox (Oracle) 				dout("folio at %lu beyond eof %llu\n",
921a628304eSMatthew Wilcox (Oracle) 				     folio->index, ceph_wbc.i_size);
922c95f1c5fSErqi Chen 				if ((ceph_wbc.size_stable ||
923a628304eSMatthew Wilcox (Oracle) 				    folio_pos(folio) >= i_size_read(inode)) &&
924a628304eSMatthew Wilcox (Oracle) 				    folio_clear_dirty_for_io(folio))
925a628304eSMatthew Wilcox (Oracle) 					folio_invalidate(folio, 0,
926a628304eSMatthew Wilcox (Oracle) 							folio_size(folio));
927a628304eSMatthew Wilcox (Oracle) 				folio_unlock(folio);
928af9cc401SYan, Zheng 				continue;
929af9cc401SYan, Zheng 			}
930af9cc401SYan, Zheng 			if (strip_unit_end && (page->index > strip_unit_end)) {
931af9cc401SYan, Zheng 				dout("end of strip unit %p\n", page);
9321d3576fdSSage Weil 				unlock_page(page);
9331d3576fdSSage Weil 				break;
9341d3576fdSSage Weil 			}
9351702e797SJeff Layton 			if (PageWriteback(page) || PageFsCache(page)) {
9360713e5f2SYan, Zheng 				if (wbc->sync_mode == WB_SYNC_NONE) {
9371d3576fdSSage Weil 					dout("%p under writeback\n", page);
9381d3576fdSSage Weil 					unlock_page(page);
9390713e5f2SYan, Zheng 					continue;
9400713e5f2SYan, Zheng 				}
9410713e5f2SYan, Zheng 				dout("waiting on writeback %p\n", page);
9420713e5f2SYan, Zheng 				wait_on_page_writeback(page);
9431702e797SJeff Layton 				wait_on_page_fscache(page);
9441d3576fdSSage Weil 			}
9451d3576fdSSage Weil 
9461d3576fdSSage Weil 			if (!clear_page_dirty_for_io(page)) {
9471d3576fdSSage Weil 				dout("%p !clear_page_dirty_for_io\n", page);
9481d3576fdSSage Weil 				unlock_page(page);
9490713e5f2SYan, Zheng 				continue;
9501d3576fdSSage Weil 			}
9511d3576fdSSage Weil 
952e5975c7cSAlex Elder 			/*
953e5975c7cSAlex Elder 			 * We have something to write.  If this is
954e5975c7cSAlex Elder 			 * the first locked page this time through,
9555b64640cSYan, Zheng 			 * calculate max possinle write size and
9565b64640cSYan, Zheng 			 * allocate a page array
957e5975c7cSAlex Elder 			 */
9581d3576fdSSage Weil 			if (locked_pages == 0) {
9595b64640cSYan, Zheng 				u64 objnum;
9605b64640cSYan, Zheng 				u64 objoff;
961dccbf080SIlya Dryomov 				u32 xlen;
9625b64640cSYan, Zheng 
9631d3576fdSSage Weil 				/* prepare async write request */
9646285bc23SAlex Elder 				offset = (u64)page_offset(page);
965dccbf080SIlya Dryomov 				ceph_calc_file_object_mapping(&ci->i_layout,
966dccbf080SIlya Dryomov 							      offset, wsize,
9675b64640cSYan, Zheng 							      &objnum, &objoff,
968dccbf080SIlya Dryomov 							      &xlen);
969dccbf080SIlya Dryomov 				len = xlen;
9708c71897bSHenry C Chang 
9713fb99d48SYanhu Cao 				num_ops = 1;
9725b64640cSYan, Zheng 				strip_unit_end = page->index +
97309cbfeafSKirill A. Shutemov 					((len - 1) >> PAGE_SHIFT);
974715e4cd4SYan, Zheng 
9755b64640cSYan, Zheng 				BUG_ON(pages);
97688486957SAlex Elder 				max_pages = calc_pages_for(0, (u64)len);
9776da2ec56SKees Cook 				pages = kmalloc_array(max_pages,
9786da2ec56SKees Cook 						      sizeof(*pages),
979fc2744aaSYan, Zheng 						      GFP_NOFS);
98088486957SAlex Elder 				if (!pages) {
981a0102bdaSJeff Layton 					from_pool = true;
982a0102bdaSJeff Layton 					pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
983e5975c7cSAlex Elder 					BUG_ON(!pages);
98488486957SAlex Elder 				}
9855b64640cSYan, Zheng 
9865b64640cSYan, Zheng 				len = 0;
9875b64640cSYan, Zheng 			} else if (page->index !=
98809cbfeafSKirill A. Shutemov 				   (offset + len) >> PAGE_SHIFT) {
989a0102bdaSJeff Layton 				if (num_ops >= (from_pool ?  CEPH_OSD_SLAB_OPS :
9905b64640cSYan, Zheng 							     CEPH_OSD_MAX_OPS)) {
9915b64640cSYan, Zheng 					redirty_page_for_writepage(wbc, page);
9925b64640cSYan, Zheng 					unlock_page(page);
9935b64640cSYan, Zheng 					break;
9945b64640cSYan, Zheng 				}
9955b64640cSYan, Zheng 
9965b64640cSYan, Zheng 				num_ops++;
9975b64640cSYan, Zheng 				offset = (u64)page_offset(page);
9985b64640cSYan, Zheng 				len = 0;
9991d3576fdSSage Weil 			}
10001d3576fdSSage Weil 
10011d3576fdSSage Weil 			/* note position of first page in pvec */
10021d3576fdSSage Weil 			dout("%p will write page %p idx %lu\n",
10031d3576fdSSage Weil 			     inode, page, page->index);
10042baba250SYehuda Sadeh 
10055b64640cSYan, Zheng 			if (atomic_long_inc_return(&fsc->writeback_count) >
10065b64640cSYan, Zheng 			    CONGESTION_ON_THRESH(
1007503d4fa6SNeilBrown 				    fsc->mount_options->congestion_kb))
1008503d4fa6SNeilBrown 				fsc->write_congested = true;
10090713e5f2SYan, Zheng 
10100713e5f2SYan, Zheng 			pages[locked_pages++] = page;
10110713e5f2SYan, Zheng 			pvec.pages[i] = NULL;
10120713e5f2SYan, Zheng 
10138ff2d290SJeff Layton 			len += thp_size(page);
10141d3576fdSSage Weil 		}
10151d3576fdSSage Weil 
10161d3576fdSSage Weil 		/* did we get anything? */
10171d3576fdSSage Weil 		if (!locked_pages)
10181d3576fdSSage Weil 			goto release_pvec_pages;
10191d3576fdSSage Weil 		if (i) {
10200713e5f2SYan, Zheng 			unsigned j, n = 0;
10210713e5f2SYan, Zheng 			/* shift unused page to beginning of pvec */
10220713e5f2SYan, Zheng 			for (j = 0; j < pvec_pages; j++) {
10230713e5f2SYan, Zheng 				if (!pvec.pages[j])
10240713e5f2SYan, Zheng 					continue;
10250713e5f2SYan, Zheng 				if (n < j)
10260713e5f2SYan, Zheng 					pvec.pages[n] = pvec.pages[j];
10270713e5f2SYan, Zheng 				n++;
10280713e5f2SYan, Zheng 			}
10290713e5f2SYan, Zheng 			pvec.nr = n;
10301d3576fdSSage Weil 
10311d3576fdSSage Weil 			if (pvec_pages && i == pvec_pages &&
10321d3576fdSSage Weil 			    locked_pages < max_pages) {
10331d3576fdSSage Weil 				dout("reached end pvec, trying for more\n");
10340713e5f2SYan, Zheng 				pagevec_release(&pvec);
10351d3576fdSSage Weil 				goto get_more_pages;
10361d3576fdSSage Weil 			}
10371d3576fdSSage Weil 		}
10381d3576fdSSage Weil 
10395b64640cSYan, Zheng new_request:
1040e5975c7cSAlex Elder 		offset = page_offset(pages[0]);
10415b64640cSYan, Zheng 		len = wsize;
10425b64640cSYan, Zheng 
10435b64640cSYan, Zheng 		req = ceph_osdc_new_request(&fsc->client->osdc,
10445b64640cSYan, Zheng 					&ci->i_layout, vino,
10455b64640cSYan, Zheng 					offset, &len, 0, num_ops,
10461f934b00SYan, Zheng 					CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
10471f934b00SYan, Zheng 					snapc, ceph_wbc.truncate_seq,
10481f934b00SYan, Zheng 					ceph_wbc.truncate_size, false);
10495b64640cSYan, Zheng 		if (IS_ERR(req)) {
10505b64640cSYan, Zheng 			req = ceph_osdc_new_request(&fsc->client->osdc,
10515b64640cSYan, Zheng 						&ci->i_layout, vino,
10525b64640cSYan, Zheng 						offset, &len, 0,
10535b64640cSYan, Zheng 						min(num_ops,
10545b64640cSYan, Zheng 						    CEPH_OSD_SLAB_OPS),
10555b64640cSYan, Zheng 						CEPH_OSD_OP_WRITE,
105654ea0046SIlya Dryomov 						CEPH_OSD_FLAG_WRITE,
10571f934b00SYan, Zheng 						snapc, ceph_wbc.truncate_seq,
10581f934b00SYan, Zheng 						ceph_wbc.truncate_size, true);
10595b64640cSYan, Zheng 			BUG_ON(IS_ERR(req));
10605b64640cSYan, Zheng 		}
10615b64640cSYan, Zheng 		BUG_ON(len < page_offset(pages[locked_pages - 1]) +
10628ff2d290SJeff Layton 			     thp_size(page) - offset);
10635b64640cSYan, Zheng 
10645b64640cSYan, Zheng 		req->r_callback = writepages_finish;
10655b64640cSYan, Zheng 		req->r_inode = inode;
10665b64640cSYan, Zheng 
10675b64640cSYan, Zheng 		/* Format the osd request message and submit the write */
10685b64640cSYan, Zheng 		len = 0;
10695b64640cSYan, Zheng 		data_pages = pages;
10705b64640cSYan, Zheng 		op_idx = 0;
10715b64640cSYan, Zheng 		for (i = 0; i < locked_pages; i++) {
10725b64640cSYan, Zheng 			u64 cur_offset = page_offset(pages[i]);
10731702e797SJeff Layton 			/*
10741702e797SJeff Layton 			 * Discontinuity in page range? Ceph can handle that by just passing
10751702e797SJeff Layton 			 * multiple extents in the write op.
10761702e797SJeff Layton 			 */
10775b64640cSYan, Zheng 			if (offset + len != cur_offset) {
10781702e797SJeff Layton 				/* If it's full, stop here */
10793fb99d48SYanhu Cao 				if (op_idx + 1 == req->r_num_ops)
10805b64640cSYan, Zheng 					break;
10811702e797SJeff Layton 
10821702e797SJeff Layton 				/* Kick off an fscache write with what we have so far. */
10831702e797SJeff Layton 				ceph_fscache_write_to_cache(inode, offset, len, caching);
10841702e797SJeff Layton 
10851702e797SJeff Layton 				/* Start a new extent */
10865b64640cSYan, Zheng 				osd_req_op_extent_dup_last(req, op_idx,
10875b64640cSYan, Zheng 							   cur_offset - offset);
10885b64640cSYan, Zheng 				dout("writepages got pages at %llu~%llu\n",
10895b64640cSYan, Zheng 				     offset, len);
10905b64640cSYan, Zheng 				osd_req_op_extent_osd_data_pages(req, op_idx,
10915b64640cSYan, Zheng 							data_pages, len, 0,
1092a0102bdaSJeff Layton 							from_pool, false);
10935b64640cSYan, Zheng 				osd_req_op_extent_update(req, op_idx, len);
10945b64640cSYan, Zheng 
10955b64640cSYan, Zheng 				len = 0;
10965b64640cSYan, Zheng 				offset = cur_offset;
10975b64640cSYan, Zheng 				data_pages = pages + i;
10985b64640cSYan, Zheng 				op_idx++;
10995b64640cSYan, Zheng 			}
11005b64640cSYan, Zheng 
11015b64640cSYan, Zheng 			set_page_writeback(pages[i]);
11021702e797SJeff Layton 			if (caching)
11031702e797SJeff Layton 				ceph_set_page_fscache(pages[i]);
11048ff2d290SJeff Layton 			len += thp_size(page);
11055b64640cSYan, Zheng 		}
11061702e797SJeff Layton 		ceph_fscache_write_to_cache(inode, offset, len, caching);
11075b64640cSYan, Zheng 
11081f934b00SYan, Zheng 		if (ceph_wbc.size_stable) {
11091f934b00SYan, Zheng 			len = min(len, ceph_wbc.i_size - offset);
11105b64640cSYan, Zheng 		} else if (i == locked_pages) {
1111e1966b49SYan, Zheng 			/* writepages_finish() clears writeback pages
1112e1966b49SYan, Zheng 			 * according to the data length, so make sure
1113e1966b49SYan, Zheng 			 * data length covers all locked pages */
11148ff2d290SJeff Layton 			u64 min_len = len + 1 - thp_size(page);
11151f934b00SYan, Zheng 			len = get_writepages_data_length(inode, pages[i - 1],
11161f934b00SYan, Zheng 							 offset);
11175b64640cSYan, Zheng 			len = max(len, min_len);
1118e1966b49SYan, Zheng 		}
11195b64640cSYan, Zheng 		dout("writepages got pages at %llu~%llu\n", offset, len);
11201d3576fdSSage Weil 
11215b64640cSYan, Zheng 		osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len,
1122a0102bdaSJeff Layton 						 0, from_pool, false);
11235b64640cSYan, Zheng 		osd_req_op_extent_update(req, op_idx, len);
1124e5975c7cSAlex Elder 
11255b64640cSYan, Zheng 		BUG_ON(op_idx + 1 != req->r_num_ops);
11265b64640cSYan, Zheng 
1127a0102bdaSJeff Layton 		from_pool = false;
11285b64640cSYan, Zheng 		if (i < locked_pages) {
11295b64640cSYan, Zheng 			BUG_ON(num_ops <= req->r_num_ops);
11305b64640cSYan, Zheng 			num_ops -= req->r_num_ops;
11315b64640cSYan, Zheng 			locked_pages -= i;
1132e5975c7cSAlex Elder 
11335b64640cSYan, Zheng 			/* allocate new pages array for next request */
11345b64640cSYan, Zheng 			data_pages = pages;
11356da2ec56SKees Cook 			pages = kmalloc_array(locked_pages, sizeof(*pages),
11365b64640cSYan, Zheng 					      GFP_NOFS);
11375b64640cSYan, Zheng 			if (!pages) {
1138a0102bdaSJeff Layton 				from_pool = true;
1139a0102bdaSJeff Layton 				pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
11405b64640cSYan, Zheng 				BUG_ON(!pages);
11415b64640cSYan, Zheng 			}
11425b64640cSYan, Zheng 			memcpy(pages, data_pages + i,
11435b64640cSYan, Zheng 			       locked_pages * sizeof(*pages));
11445b64640cSYan, Zheng 			memset(data_pages + i, 0,
11455b64640cSYan, Zheng 			       locked_pages * sizeof(*pages));
11465b64640cSYan, Zheng 		} else {
11475b64640cSYan, Zheng 			BUG_ON(num_ops != req->r_num_ops);
11485b64640cSYan, Zheng 			index = pages[i - 1]->index + 1;
11495b64640cSYan, Zheng 			/* request message now owns the pages array */
11505b64640cSYan, Zheng 			pages = NULL;
11515b64640cSYan, Zheng 		}
1152e5975c7cSAlex Elder 
1153fac02ddfSArnd Bergmann 		req->r_mtime = inode->i_mtime;
11549d6fcb08SSage Weil 		rc = ceph_osdc_start_request(&fsc->client->osdc, req, true);
11559d6fcb08SSage Weil 		BUG_ON(rc);
11561d3576fdSSage Weil 		req = NULL;
11571d3576fdSSage Weil 
11585b64640cSYan, Zheng 		wbc->nr_to_write -= i;
11595b64640cSYan, Zheng 		if (pages)
11605b64640cSYan, Zheng 			goto new_request;
11615b64640cSYan, Zheng 
11622a2d927eSYan, Zheng 		/*
11632a2d927eSYan, Zheng 		 * We stop writing back only if we are not doing
11642a2d927eSYan, Zheng 		 * integrity sync. In case of integrity sync we have to
11652a2d927eSYan, Zheng 		 * keep going until we have written all the pages
11662a2d927eSYan, Zheng 		 * we tagged for writeback prior to entering this loop.
11672a2d927eSYan, Zheng 		 */
11682a2d927eSYan, Zheng 		if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE)
1169af9cc401SYan, Zheng 			done = true;
11701d3576fdSSage Weil 
11711d3576fdSSage Weil release_pvec_pages:
11721d3576fdSSage Weil 		dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr,
11731d3576fdSSage Weil 		     pvec.nr ? pvec.pages[0] : NULL);
11741d3576fdSSage Weil 		pagevec_release(&pvec);
11751d3576fdSSage Weil 	}
11761d3576fdSSage Weil 
11771d3576fdSSage Weil 	if (should_loop && !done) {
11781d3576fdSSage Weil 		/* more to do; loop back to beginning of file */
11791d3576fdSSage Weil 		dout("writepages looping back to beginning of file\n");
11802a2d927eSYan, Zheng 		end = start_index - 1; /* OK even when start_index == 0 */
1181f275635eSYan, Zheng 
1182f275635eSYan, Zheng 		/* to write dirty pages associated with next snapc,
1183f275635eSYan, Zheng 		 * we need to wait until current writes complete */
1184f275635eSYan, Zheng 		if (wbc->sync_mode != WB_SYNC_NONE &&
1185f275635eSYan, Zheng 		    start_index == 0 && /* all dirty pages were checked */
1186f275635eSYan, Zheng 		    !ceph_wbc.head_snapc) {
1187f275635eSYan, Zheng 			struct page *page;
1188f275635eSYan, Zheng 			unsigned i, nr;
1189f275635eSYan, Zheng 			index = 0;
1190f275635eSYan, Zheng 			while ((index <= end) &&
1191f275635eSYan, Zheng 			       (nr = pagevec_lookup_tag(&pvec, mapping, &index,
119267fd707fSJan Kara 						PAGECACHE_TAG_WRITEBACK))) {
1193f275635eSYan, Zheng 				for (i = 0; i < nr; i++) {
1194f275635eSYan, Zheng 					page = pvec.pages[i];
1195f275635eSYan, Zheng 					if (page_snap_context(page) != snapc)
1196f275635eSYan, Zheng 						continue;
1197f275635eSYan, Zheng 					wait_on_page_writeback(page);
1198f275635eSYan, Zheng 				}
1199f275635eSYan, Zheng 				pagevec_release(&pvec);
1200f275635eSYan, Zheng 				cond_resched();
1201f275635eSYan, Zheng 			}
1202f275635eSYan, Zheng 		}
1203f275635eSYan, Zheng 
12042a2d927eSYan, Zheng 		start_index = 0;
12051d3576fdSSage Weil 		index = 0;
12061d3576fdSSage Weil 		goto retry;
12071d3576fdSSage Weil 	}
12081d3576fdSSage Weil 
12091d3576fdSSage Weil 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
12101d3576fdSSage Weil 		mapping->writeback_index = index;
12111d3576fdSSage Weil 
12121d3576fdSSage Weil out:
12131d3576fdSSage Weil 	ceph_osdc_put_request(req);
12142a2d927eSYan, Zheng 	ceph_put_snap_context(last_snapc);
12152a2d927eSYan, Zheng 	dout("writepages dend - startone, rc = %d\n", rc);
12161d3576fdSSage Weil 	return rc;
12171d3576fdSSage Weil }
12181d3576fdSSage Weil 
12191d3576fdSSage Weil 
12201d3576fdSSage Weil 
12211d3576fdSSage Weil /*
12221d3576fdSSage Weil  * See if a given @snapc is either writeable, or already written.
12231d3576fdSSage Weil  */
12241d3576fdSSage Weil static int context_is_writeable_or_written(struct inode *inode,
12251d3576fdSSage Weil 					   struct ceph_snap_context *snapc)
12261d3576fdSSage Weil {
122705455e11SYan, Zheng 	struct ceph_snap_context *oldest = get_oldest_context(inode, NULL, NULL);
12286298a337SSage Weil 	int ret = !oldest || snapc->seq <= oldest->seq;
12296298a337SSage Weil 
12306298a337SSage Weil 	ceph_put_snap_context(oldest);
12316298a337SSage Weil 	return ret;
12321d3576fdSSage Weil }
12331d3576fdSSage Weil 
123418d620f0SJeff Layton /**
123518d620f0SJeff Layton  * ceph_find_incompatible - find an incompatible context and return it
123618d620f0SJeff Layton  * @page: page being dirtied
123718d620f0SJeff Layton  *
123818d620f0SJeff Layton  * We are only allowed to write into/dirty a page if the page is
123918d620f0SJeff Layton  * clean, or already dirty within the same snap context. Returns a
124018d620f0SJeff Layton  * conflicting context if there is one, NULL if there isn't, or a
124118d620f0SJeff Layton  * negative error code on other errors.
124218d620f0SJeff Layton  *
124318d620f0SJeff Layton  * Must be called with page lock held.
124418d620f0SJeff Layton  */
124518d620f0SJeff Layton static struct ceph_snap_context *
1246d45156bfSJeff Layton ceph_find_incompatible(struct page *page)
124718d620f0SJeff Layton {
1248d45156bfSJeff Layton 	struct inode *inode = page->mapping->host;
124918d620f0SJeff Layton 	struct ceph_inode_info *ci = ceph_inode(inode);
125018d620f0SJeff Layton 
12515d6451b1SJeff Layton 	if (ceph_inode_is_shutdown(inode)) {
12525d6451b1SJeff Layton 		dout(" page %p %llx:%llx is shutdown\n", page,
12535d6451b1SJeff Layton 		     ceph_vinop(inode));
12545d6451b1SJeff Layton 		return ERR_PTR(-ESTALE);
125518d620f0SJeff Layton 	}
125618d620f0SJeff Layton 
125718d620f0SJeff Layton 	for (;;) {
125818d620f0SJeff Layton 		struct ceph_snap_context *snapc, *oldest;
125918d620f0SJeff Layton 
126018d620f0SJeff Layton 		wait_on_page_writeback(page);
126118d620f0SJeff Layton 
126218d620f0SJeff Layton 		snapc = page_snap_context(page);
126318d620f0SJeff Layton 		if (!snapc || snapc == ci->i_head_snapc)
126418d620f0SJeff Layton 			break;
126518d620f0SJeff Layton 
126618d620f0SJeff Layton 		/*
126718d620f0SJeff Layton 		 * this page is already dirty in another (older) snap
126818d620f0SJeff Layton 		 * context!  is it writeable now?
126918d620f0SJeff Layton 		 */
127018d620f0SJeff Layton 		oldest = get_oldest_context(inode, NULL, NULL);
127118d620f0SJeff Layton 		if (snapc->seq > oldest->seq) {
127218d620f0SJeff Layton 			/* not writeable -- return it for the caller to deal with */
127318d620f0SJeff Layton 			ceph_put_snap_context(oldest);
127418d620f0SJeff Layton 			dout(" page %p snapc %p not current or oldest\n", page, snapc);
127518d620f0SJeff Layton 			return ceph_get_snap_context(snapc);
127618d620f0SJeff Layton 		}
127718d620f0SJeff Layton 		ceph_put_snap_context(oldest);
127818d620f0SJeff Layton 
127918d620f0SJeff Layton 		/* yay, writeable, do it now (without dropping page lock) */
128018d620f0SJeff Layton 		dout(" page %p snapc %p not current, but oldest\n", page, snapc);
128118d620f0SJeff Layton 		if (clear_page_dirty_for_io(page)) {
128218d620f0SJeff Layton 			int r = writepage_nounlock(page, NULL);
128318d620f0SJeff Layton 			if (r < 0)
128418d620f0SJeff Layton 				return ERR_PTR(r);
128518d620f0SJeff Layton 		}
128618d620f0SJeff Layton 	}
128718d620f0SJeff Layton 	return NULL;
128818d620f0SJeff Layton }
128918d620f0SJeff Layton 
1290d801327dSJeff Layton static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
1291fac47b43SXiubo Li 					struct folio **foliop, void **_fsdata)
1292d801327dSJeff Layton {
1293d801327dSJeff Layton 	struct inode *inode = file_inode(file);
1294d801327dSJeff Layton 	struct ceph_inode_info *ci = ceph_inode(inode);
1295d801327dSJeff Layton 	struct ceph_snap_context *snapc;
1296d801327dSJeff Layton 
1297fac47b43SXiubo Li 	snapc = ceph_find_incompatible(folio_page(*foliop, 0));
1298d801327dSJeff Layton 	if (snapc) {
1299d801327dSJeff Layton 		int r;
1300d801327dSJeff Layton 
1301fac47b43SXiubo Li 		folio_unlock(*foliop);
1302fac47b43SXiubo Li 		folio_put(*foliop);
1303fac47b43SXiubo Li 		*foliop = NULL;
1304d801327dSJeff Layton 		if (IS_ERR(snapc))
1305d801327dSJeff Layton 			return PTR_ERR(snapc);
1306d801327dSJeff Layton 
1307d801327dSJeff Layton 		ceph_queue_writeback(inode);
1308d801327dSJeff Layton 		r = wait_event_killable(ci->i_cap_wq,
1309d801327dSJeff Layton 					context_is_writeable_or_written(inode, snapc));
1310d801327dSJeff Layton 		ceph_put_snap_context(snapc);
1311d801327dSJeff Layton 		return r == 0 ? -EAGAIN : r;
1312d801327dSJeff Layton 	}
1313d801327dSJeff Layton 	return 0;
1314d801327dSJeff Layton }
1315d801327dSJeff Layton 
13161d3576fdSSage Weil /*
13171d3576fdSSage Weil  * We are only allowed to write into/dirty the page if the page is
13181d3576fdSSage Weil  * clean, or already dirty within the same snap context.
13194af6b225SYehuda Sadeh  */
13204af6b225SYehuda Sadeh static int ceph_write_begin(struct file *file, struct address_space *mapping,
13219d6b0cd7SMatthew Wilcox (Oracle) 			    loff_t pos, unsigned len,
13224af6b225SYehuda Sadeh 			    struct page **pagep, void **fsdata)
13234af6b225SYehuda Sadeh {
1324496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
1325e81fb419SLinus Torvalds 	struct ceph_inode_info *ci = ceph_inode(inode);
132678525c74SDavid Howells 	struct folio *folio = NULL;
1327d801327dSJeff Layton 	int r;
13284af6b225SYehuda Sadeh 
1329e81fb419SLinus Torvalds 	r = netfs_write_begin(&ci->netfs, file, inode->i_mapping, pos, len, &folio, NULL);
1330d801327dSJeff Layton 	if (r == 0)
133178525c74SDavid Howells 		folio_wait_fscache(folio);
13321cc16990SJeff Layton 	if (r < 0) {
133378525c74SDavid Howells 		if (folio)
133478525c74SDavid Howells 			folio_put(folio);
13351cc16990SJeff Layton 	} else {
133678525c74SDavid Howells 		WARN_ON_ONCE(!folio_test_locked(folio));
133778525c74SDavid Howells 		*pagep = &folio->page;
13381cc16990SJeff Layton 	}
13394af6b225SYehuda Sadeh 	return r;
13404af6b225SYehuda Sadeh }
13414af6b225SYehuda Sadeh 
13424af6b225SYehuda Sadeh /*
13431d3576fdSSage Weil  * we don't do anything in here that simple_write_end doesn't do
13445dda377cSYan, Zheng  * except adjust dirty page accounting
13451d3576fdSSage Weil  */
13461d3576fdSSage Weil static int ceph_write_end(struct file *file, struct address_space *mapping,
13471d3576fdSSage Weil 			  loff_t pos, unsigned len, unsigned copied,
134878525c74SDavid Howells 			  struct page *subpage, void *fsdata)
13491d3576fdSSage Weil {
135078525c74SDavid Howells 	struct folio *folio = page_folio(subpage);
1351496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
1352efb0ca76SYan, Zheng 	bool check_cap = false;
13531d3576fdSSage Weil 
135478525c74SDavid Howells 	dout("write_end file %p inode %p folio %p %d~%d (%d)\n", file,
135578525c74SDavid Howells 	     inode, folio, (int)pos, (int)copied, (int)len);
13561d3576fdSSage Weil 
135778525c74SDavid Howells 	if (!folio_test_uptodate(folio)) {
1358ce3a8732SJeff Layton 		/* just return that nothing was copied on a short copy */
1359b9de313cSAl Viro 		if (copied < len) {
1360b9de313cSAl Viro 			copied = 0;
1361b9de313cSAl Viro 			goto out;
1362b9de313cSAl Viro 		}
136378525c74SDavid Howells 		folio_mark_uptodate(folio);
1364b9de313cSAl Viro 	}
13651d3576fdSSage Weil 
13661d3576fdSSage Weil 	/* did file size increase? */
136799c88e69SYan, Zheng 	if (pos+copied > i_size_read(inode))
13681d3576fdSSage Weil 		check_cap = ceph_inode_set_size(inode, pos+copied);
13691d3576fdSSage Weil 
137078525c74SDavid Howells 	folio_mark_dirty(folio);
13711d3576fdSSage Weil 
1372b9de313cSAl Viro out:
137378525c74SDavid Howells 	folio_unlock(folio);
137478525c74SDavid Howells 	folio_put(folio);
13751d3576fdSSage Weil 
13761d3576fdSSage Weil 	if (check_cap)
13771d3576fdSSage Weil 		ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL);
13781d3576fdSSage Weil 
13791d3576fdSSage Weil 	return copied;
13801d3576fdSSage Weil }
13811d3576fdSSage Weil 
13821d3576fdSSage Weil const struct address_space_operations ceph_aops = {
13836c62371bSMatthew Wilcox (Oracle) 	.read_folio = netfs_read_folio,
1384bc899ee1SDavid Howells 	.readahead = netfs_readahead,
13851d3576fdSSage Weil 	.writepage = ceph_writepage,
13861d3576fdSSage Weil 	.writepages = ceph_writepages_start,
13871d3576fdSSage Weil 	.write_begin = ceph_write_begin,
13881d3576fdSSage Weil 	.write_end = ceph_write_end,
13898fb72b4aSMatthew Wilcox (Oracle) 	.dirty_folio = ceph_dirty_folio,
13909872f4deSMatthew Wilcox (Oracle) 	.invalidate_folio = ceph_invalidate_folio,
13915e414655SMatthew Wilcox (Oracle) 	.release_folio = ceph_release_folio,
13929c43ff44SJeff Layton 	.direct_IO = noop_direct_IO,
13931d3576fdSSage Weil };
13941d3576fdSSage Weil 
13954f7e89f6SYan, Zheng static void ceph_block_sigs(sigset_t *oldset)
13964f7e89f6SYan, Zheng {
13974f7e89f6SYan, Zheng 	sigset_t mask;
13984f7e89f6SYan, Zheng 	siginitsetinv(&mask, sigmask(SIGKILL));
13994f7e89f6SYan, Zheng 	sigprocmask(SIG_BLOCK, &mask, oldset);
14004f7e89f6SYan, Zheng }
14014f7e89f6SYan, Zheng 
14024f7e89f6SYan, Zheng static void ceph_restore_sigs(sigset_t *oldset)
14034f7e89f6SYan, Zheng {
14044f7e89f6SYan, Zheng 	sigprocmask(SIG_SETMASK, oldset, NULL);
14054f7e89f6SYan, Zheng }
14061d3576fdSSage Weil 
14071d3576fdSSage Weil /*
14081d3576fdSSage Weil  * vm ops
14091d3576fdSSage Weil  */
141024499847SSouptick Joarder static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
141161f68816SYan, Zheng {
141211bac800SDave Jiang 	struct vm_area_struct *vma = vmf->vma;
141361f68816SYan, Zheng 	struct inode *inode = file_inode(vma->vm_file);
141461f68816SYan, Zheng 	struct ceph_inode_info *ci = ceph_inode(inode);
141561f68816SYan, Zheng 	struct ceph_file_info *fi = vma->vm_file->private_data;
1416c403c3a2SMatthew Wilcox (Oracle) 	loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT;
141724499847SSouptick Joarder 	int want, got, err;
14184f7e89f6SYan, Zheng 	sigset_t oldset;
141924499847SSouptick Joarder 	vm_fault_t ret = VM_FAULT_SIGBUS;
14204f7e89f6SYan, Zheng 
14215d6451b1SJeff Layton 	if (ceph_inode_is_shutdown(inode))
14225d6451b1SJeff Layton 		return ret;
14235d6451b1SJeff Layton 
14244f7e89f6SYan, Zheng 	ceph_block_sigs(&oldset);
142561f68816SYan, Zheng 
14268ff2d290SJeff Layton 	dout("filemap_fault %p %llx.%llx %llu trying to get caps\n",
14278ff2d290SJeff Layton 	     inode, ceph_vinop(inode), off);
142861f68816SYan, Zheng 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
142961f68816SYan, Zheng 		want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
143061f68816SYan, Zheng 	else
143161f68816SYan, Zheng 		want = CEPH_CAP_FILE_CACHE;
14324f7e89f6SYan, Zheng 
143361f68816SYan, Zheng 	got = 0;
1434e72968e1SJeff Layton 	err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_RD, want, -1, &got);
143524499847SSouptick Joarder 	if (err < 0)
14364f7e89f6SYan, Zheng 		goto out_restore;
14376ce026e4SYan, Zheng 
14388ff2d290SJeff Layton 	dout("filemap_fault %p %llu got cap refs on %s\n",
14398ff2d290SJeff Layton 	     inode, off, ceph_cap_string(got));
144061f68816SYan, Zheng 
144183701246SYan, Zheng 	if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
14422b1ac852SYan, Zheng 	    ci->i_inline_version == CEPH_INLINE_NONE) {
14435d988308SYan, Zheng 		CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
14445d988308SYan, Zheng 		ceph_add_rw_context(fi, &rw_ctx);
144511bac800SDave Jiang 		ret = filemap_fault(vmf);
14465d988308SYan, Zheng 		ceph_del_rw_context(fi, &rw_ctx);
14478ff2d290SJeff Layton 		dout("filemap_fault %p %llu drop cap refs %s ret %x\n",
14488ff2d290SJeff Layton 		     inode, off, ceph_cap_string(got), ret);
14492b1ac852SYan, Zheng 	} else
145024499847SSouptick Joarder 		err = -EAGAIN;
145161f68816SYan, Zheng 
145261f68816SYan, Zheng 	ceph_put_cap_refs(ci, got);
145361f68816SYan, Zheng 
145424499847SSouptick Joarder 	if (err != -EAGAIN)
14554f7e89f6SYan, Zheng 		goto out_restore;
145683701246SYan, Zheng 
145783701246SYan, Zheng 	/* read inline data */
145809cbfeafSKirill A. Shutemov 	if (off >= PAGE_SIZE) {
145983701246SYan, Zheng 		/* does not support inline data > PAGE_SIZE */
146083701246SYan, Zheng 		ret = VM_FAULT_SIGBUS;
146183701246SYan, Zheng 	} else {
146283701246SYan, Zheng 		struct address_space *mapping = inode->i_mapping;
1463057ba5b2SJan Kara 		struct page *page;
1464057ba5b2SJan Kara 
1465057ba5b2SJan Kara 		filemap_invalidate_lock_shared(mapping);
1466057ba5b2SJan Kara 		page = find_or_create_page(mapping, 0,
1467057ba5b2SJan Kara 				mapping_gfp_constraint(mapping, ~__GFP_FS));
146883701246SYan, Zheng 		if (!page) {
146983701246SYan, Zheng 			ret = VM_FAULT_OOM;
14704f7e89f6SYan, Zheng 			goto out_inline;
147183701246SYan, Zheng 		}
147224499847SSouptick Joarder 		err = __ceph_do_getattr(inode, page,
147383701246SYan, Zheng 					 CEPH_STAT_CAP_INLINE_DATA, true);
147424499847SSouptick Joarder 		if (err < 0 || off >= i_size_read(inode)) {
147583701246SYan, Zheng 			unlock_page(page);
147609cbfeafSKirill A. Shutemov 			put_page(page);
1477c64a2b05SSouptick Joarder 			ret = vmf_error(err);
14784f7e89f6SYan, Zheng 			goto out_inline;
147983701246SYan, Zheng 		}
148024499847SSouptick Joarder 		if (err < PAGE_SIZE)
148124499847SSouptick Joarder 			zero_user_segment(page, err, PAGE_SIZE);
148283701246SYan, Zheng 		else
148383701246SYan, Zheng 			flush_dcache_page(page);
148483701246SYan, Zheng 		SetPageUptodate(page);
148583701246SYan, Zheng 		vmf->page = page;
148683701246SYan, Zheng 		ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
14874f7e89f6SYan, Zheng out_inline:
1488057ba5b2SJan Kara 		filemap_invalidate_unlock_shared(mapping);
14898ff2d290SJeff Layton 		dout("filemap_fault %p %llu read inline data ret %x\n",
14908ff2d290SJeff Layton 		     inode, off, ret);
14914f7e89f6SYan, Zheng 	}
14924f7e89f6SYan, Zheng out_restore:
14934f7e89f6SYan, Zheng 	ceph_restore_sigs(&oldset);
149424499847SSouptick Joarder 	if (err < 0)
149524499847SSouptick Joarder 		ret = vmf_error(err);
14966ce026e4SYan, Zheng 
149761f68816SYan, Zheng 	return ret;
149861f68816SYan, Zheng }
14991d3576fdSSage Weil 
150024499847SSouptick Joarder static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
15011d3576fdSSage Weil {
150211bac800SDave Jiang 	struct vm_area_struct *vma = vmf->vma;
1503496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
150461f68816SYan, Zheng 	struct ceph_inode_info *ci = ceph_inode(inode);
150561f68816SYan, Zheng 	struct ceph_file_info *fi = vma->vm_file->private_data;
1506f66fd9f0SYan, Zheng 	struct ceph_cap_flush *prealloc_cf;
150761f68816SYan, Zheng 	struct page *page = vmf->page;
15086285bc23SAlex Elder 	loff_t off = page_offset(page);
150961f68816SYan, Zheng 	loff_t size = i_size_read(inode);
151061f68816SYan, Zheng 	size_t len;
151124499847SSouptick Joarder 	int want, got, err;
15124f7e89f6SYan, Zheng 	sigset_t oldset;
151324499847SSouptick Joarder 	vm_fault_t ret = VM_FAULT_SIGBUS;
15141d3576fdSSage Weil 
15155d6451b1SJeff Layton 	if (ceph_inode_is_shutdown(inode))
15165d6451b1SJeff Layton 		return ret;
15175d6451b1SJeff Layton 
1518f66fd9f0SYan, Zheng 	prealloc_cf = ceph_alloc_cap_flush();
1519f66fd9f0SYan, Zheng 	if (!prealloc_cf)
15206ce026e4SYan, Zheng 		return VM_FAULT_OOM;
1521f66fd9f0SYan, Zheng 
1522249c1df5SJeff Layton 	sb_start_pagefault(inode->i_sb);
15234f7e89f6SYan, Zheng 	ceph_block_sigs(&oldset);
15241d3576fdSSage Weil 
15258ff2d290SJeff Layton 	if (off + thp_size(page) <= size)
15268ff2d290SJeff Layton 		len = thp_size(page);
15271d3576fdSSage Weil 	else
15288ff2d290SJeff Layton 		len = offset_in_thp(page, size);
15291d3576fdSSage Weil 
153061f68816SYan, Zheng 	dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n",
153161f68816SYan, Zheng 	     inode, ceph_vinop(inode), off, len, size);
153261f68816SYan, Zheng 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
153361f68816SYan, Zheng 		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
153461f68816SYan, Zheng 	else
153561f68816SYan, Zheng 		want = CEPH_CAP_FILE_BUFFER;
15364f7e89f6SYan, Zheng 
153761f68816SYan, Zheng 	got = 0;
1538e72968e1SJeff Layton 	err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_WR, want, off + len, &got);
153924499847SSouptick Joarder 	if (err < 0)
1540f66fd9f0SYan, Zheng 		goto out_free;
15416ce026e4SYan, Zheng 
154261f68816SYan, Zheng 	dout("page_mkwrite %p %llu~%zd got cap refs on %s\n",
154361f68816SYan, Zheng 	     inode, off, len, ceph_cap_string(got));
154461f68816SYan, Zheng 
154561f68816SYan, Zheng 	/* Update time before taking page lock */
154661f68816SYan, Zheng 	file_update_time(vma->vm_file);
15475c308356SJeff Layton 	inode_inc_iversion_raw(inode);
15484af6b225SYehuda Sadeh 
1549f0b33df5SYan, Zheng 	do {
1550d45156bfSJeff Layton 		struct ceph_snap_context *snapc;
1551d45156bfSJeff Layton 
15524af6b225SYehuda Sadeh 		lock_page(page);
15534af6b225SYehuda Sadeh 
1554cb03c143SAndreas Gruenbacher 		if (page_mkwrite_check_truncate(page, inode) < 0) {
1555f9cac5acSYan, Zheng 			unlock_page(page);
15566ce026e4SYan, Zheng 			ret = VM_FAULT_NOPAGE;
1557f0b33df5SYan, Zheng 			break;
1558f9cac5acSYan, Zheng 		}
15594af6b225SYehuda Sadeh 
1560d45156bfSJeff Layton 		snapc = ceph_find_incompatible(page);
1561d45156bfSJeff Layton 		if (!snapc) {
15624af6b225SYehuda Sadeh 			/* success.  we'll keep the page locked. */
15631d3576fdSSage Weil 			set_page_dirty(page);
15641d3576fdSSage Weil 			ret = VM_FAULT_LOCKED;
1565d45156bfSJeff Layton 			break;
15661d3576fdSSage Weil 		}
1567d45156bfSJeff Layton 
1568d45156bfSJeff Layton 		unlock_page(page);
1569d45156bfSJeff Layton 
1570d45156bfSJeff Layton 		if (IS_ERR(snapc)) {
1571d45156bfSJeff Layton 			ret = VM_FAULT_SIGBUS;
1572d45156bfSJeff Layton 			break;
1573d45156bfSJeff Layton 		}
1574d45156bfSJeff Layton 
1575d45156bfSJeff Layton 		ceph_queue_writeback(inode);
1576d45156bfSJeff Layton 		err = wait_event_killable(ci->i_cap_wq,
1577d45156bfSJeff Layton 				context_is_writeable_or_written(inode, snapc));
1578d45156bfSJeff Layton 		ceph_put_snap_context(snapc);
1579d45156bfSJeff Layton 	} while (err == 0);
1580f0b33df5SYan, Zheng 
1581083db6fdSDavid Howells 	if (ret == VM_FAULT_LOCKED) {
158261f68816SYan, Zheng 		int dirty;
158361f68816SYan, Zheng 		spin_lock(&ci->i_ceph_lock);
1584f66fd9f0SYan, Zheng 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1585f66fd9f0SYan, Zheng 					       &prealloc_cf);
158661f68816SYan, Zheng 		spin_unlock(&ci->i_ceph_lock);
158761f68816SYan, Zheng 		if (dirty)
158861f68816SYan, Zheng 			__mark_inode_dirty(inode, dirty);
158961f68816SYan, Zheng 	}
159061f68816SYan, Zheng 
159124499847SSouptick Joarder 	dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %x\n",
159261f68816SYan, Zheng 	     inode, off, len, ceph_cap_string(got), ret);
1593a8810cdcSJeff Layton 	ceph_put_cap_refs_async(ci, got);
1594f66fd9f0SYan, Zheng out_free:
15954f7e89f6SYan, Zheng 	ceph_restore_sigs(&oldset);
1596249c1df5SJeff Layton 	sb_end_pagefault(inode->i_sb);
1597f66fd9f0SYan, Zheng 	ceph_free_cap_flush(prealloc_cf);
159824499847SSouptick Joarder 	if (err < 0)
159924499847SSouptick Joarder 		ret = vmf_error(err);
16001d3576fdSSage Weil 	return ret;
16011d3576fdSSage Weil }
16021d3576fdSSage Weil 
160331c542a1SYan, Zheng void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
160431c542a1SYan, Zheng 			   char	*data, size_t len)
160531c542a1SYan, Zheng {
160631c542a1SYan, Zheng 	struct address_space *mapping = inode->i_mapping;
160731c542a1SYan, Zheng 	struct page *page;
160831c542a1SYan, Zheng 
160931c542a1SYan, Zheng 	if (locked_page) {
161031c542a1SYan, Zheng 		page = locked_page;
161131c542a1SYan, Zheng 	} else {
161231c542a1SYan, Zheng 		if (i_size_read(inode) == 0)
161331c542a1SYan, Zheng 			return;
161431c542a1SYan, Zheng 		page = find_or_create_page(mapping, 0,
1615c62d2555SMichal Hocko 					   mapping_gfp_constraint(mapping,
1616c62d2555SMichal Hocko 					   ~__GFP_FS));
161731c542a1SYan, Zheng 		if (!page)
161831c542a1SYan, Zheng 			return;
161931c542a1SYan, Zheng 		if (PageUptodate(page)) {
162031c542a1SYan, Zheng 			unlock_page(page);
162109cbfeafSKirill A. Shutemov 			put_page(page);
162231c542a1SYan, Zheng 			return;
162331c542a1SYan, Zheng 		}
162431c542a1SYan, Zheng 	}
162531c542a1SYan, Zheng 
16260668ff52SIlya Dryomov 	dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n",
162731c542a1SYan, Zheng 	     inode, ceph_vinop(inode), len, locked_page);
162831c542a1SYan, Zheng 
162931c542a1SYan, Zheng 	if (len > 0) {
163031c542a1SYan, Zheng 		void *kaddr = kmap_atomic(page);
163131c542a1SYan, Zheng 		memcpy(kaddr, data, len);
163231c542a1SYan, Zheng 		kunmap_atomic(kaddr);
163331c542a1SYan, Zheng 	}
163431c542a1SYan, Zheng 
163531c542a1SYan, Zheng 	if (page != locked_page) {
163609cbfeafSKirill A. Shutemov 		if (len < PAGE_SIZE)
163709cbfeafSKirill A. Shutemov 			zero_user_segment(page, len, PAGE_SIZE);
163831c542a1SYan, Zheng 		else
163931c542a1SYan, Zheng 			flush_dcache_page(page);
164031c542a1SYan, Zheng 
164131c542a1SYan, Zheng 		SetPageUptodate(page);
164231c542a1SYan, Zheng 		unlock_page(page);
164309cbfeafSKirill A. Shutemov 		put_page(page);
164431c542a1SYan, Zheng 	}
164531c542a1SYan, Zheng }
164631c542a1SYan, Zheng 
1647083db6fdSDavid Howells int ceph_uninline_data(struct file *file)
164828127bddSYan, Zheng {
1649083db6fdSDavid Howells 	struct inode *inode = file_inode(file);
165028127bddSYan, Zheng 	struct ceph_inode_info *ci = ceph_inode(inode);
165128127bddSYan, Zheng 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1652825978fdSXiubo Li 	struct ceph_osd_request *req = NULL;
1653083db6fdSDavid Howells 	struct ceph_cap_flush *prealloc_cf;
1654083db6fdSDavid Howells 	struct folio *folio = NULL;
1655c38af982SDan Carpenter 	u64 inline_version = CEPH_INLINE_NONE;
1656083db6fdSDavid Howells 	struct page *pages[1];
165728127bddSYan, Zheng 	int err = 0;
1658c38af982SDan Carpenter 	u64 len;
1659083db6fdSDavid Howells 
166028127bddSYan, Zheng 	spin_lock(&ci->i_ceph_lock);
166128127bddSYan, Zheng 	inline_version = ci->i_inline_version;
166228127bddSYan, Zheng 	spin_unlock(&ci->i_ceph_lock);
166328127bddSYan, Zheng 
166428127bddSYan, Zheng 	dout("uninline_data %p %llx.%llx inline_version %llu\n",
166528127bddSYan, Zheng 	     inode, ceph_vinop(inode), inline_version);
166628127bddSYan, Zheng 
1667825978fdSXiubo Li 	if (inline_version == CEPH_INLINE_NONE)
1668825978fdSXiubo Li 		return 0;
1669825978fdSXiubo Li 
1670825978fdSXiubo Li 	prealloc_cf = ceph_alloc_cap_flush();
1671825978fdSXiubo Li 	if (!prealloc_cf)
1672825978fdSXiubo Li 		return -ENOMEM;
1673825978fdSXiubo Li 
1674825978fdSXiubo Li 	if (inline_version == 1) /* initial version, no data */
1675825978fdSXiubo Li 		goto out_uninline;
1676825978fdSXiubo Li 
1677825978fdSXiubo Li 	folio = read_mapping_folio(inode->i_mapping, 0, file);
1678825978fdSXiubo Li 	if (IS_ERR(folio)) {
1679825978fdSXiubo Li 		err = PTR_ERR(folio);
1680825978fdSXiubo Li 		goto out;
1681825978fdSXiubo Li 	}
1682825978fdSXiubo Li 
1683825978fdSXiubo Li 	folio_lock(folio);
168428127bddSYan, Zheng 
168528127bddSYan, Zheng 	len = i_size_read(inode);
1686083db6fdSDavid Howells 	if (len > folio_size(folio))
1687083db6fdSDavid Howells 		len = folio_size(folio);
168828127bddSYan, Zheng 
168928127bddSYan, Zheng 	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
169028127bddSYan, Zheng 				    ceph_vino(inode), 0, &len, 0, 1,
169154ea0046SIlya Dryomov 				    CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_WRITE,
169234b759b4SIlya Dryomov 				    NULL, 0, 0, false);
169328127bddSYan, Zheng 	if (IS_ERR(req)) {
169428127bddSYan, Zheng 		err = PTR_ERR(req);
1695083db6fdSDavid Howells 		goto out_unlock;
169628127bddSYan, Zheng 	}
169728127bddSYan, Zheng 
1698fac02ddfSArnd Bergmann 	req->r_mtime = inode->i_mtime;
169928127bddSYan, Zheng 	err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
170028127bddSYan, Zheng 	if (!err)
170128127bddSYan, Zheng 		err = ceph_osdc_wait_request(&fsc->client->osdc, req);
170228127bddSYan, Zheng 	ceph_osdc_put_request(req);
170328127bddSYan, Zheng 	if (err < 0)
1704083db6fdSDavid Howells 		goto out_unlock;
170528127bddSYan, Zheng 
170628127bddSYan, Zheng 	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
170728127bddSYan, Zheng 				    ceph_vino(inode), 0, &len, 1, 3,
170854ea0046SIlya Dryomov 				    CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
170934b759b4SIlya Dryomov 				    NULL, ci->i_truncate_seq,
171034b759b4SIlya Dryomov 				    ci->i_truncate_size, false);
171128127bddSYan, Zheng 	if (IS_ERR(req)) {
171228127bddSYan, Zheng 		err = PTR_ERR(req);
1713083db6fdSDavid Howells 		goto out_unlock;
171428127bddSYan, Zheng 	}
171528127bddSYan, Zheng 
1716083db6fdSDavid Howells 	pages[0] = folio_page(folio, 0);
1717083db6fdSDavid Howells 	osd_req_op_extent_osd_data_pages(req, 1, pages, len, 0, false, false);
171828127bddSYan, Zheng 
1719ec137c10SYan, Zheng 	{
1720ec137c10SYan, Zheng 		__le64 xattr_buf = cpu_to_le64(inline_version);
172128127bddSYan, Zheng 		err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR,
1722ec137c10SYan, Zheng 					    "inline_version", &xattr_buf,
1723ec137c10SYan, Zheng 					    sizeof(xattr_buf),
172428127bddSYan, Zheng 					    CEPH_OSD_CMPXATTR_OP_GT,
172528127bddSYan, Zheng 					    CEPH_OSD_CMPXATTR_MODE_U64);
172628127bddSYan, Zheng 		if (err)
1727083db6fdSDavid Howells 			goto out_put_req;
1728ec137c10SYan, Zheng 	}
172928127bddSYan, Zheng 
1730ec137c10SYan, Zheng 	{
1731ec137c10SYan, Zheng 		char xattr_buf[32];
1732ec137c10SYan, Zheng 		int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf),
1733ec137c10SYan, Zheng 					 "%llu", inline_version);
173428127bddSYan, Zheng 		err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR,
1735ec137c10SYan, Zheng 					    "inline_version",
1736ec137c10SYan, Zheng 					    xattr_buf, xattr_len, 0, 0);
173728127bddSYan, Zheng 		if (err)
1738083db6fdSDavid Howells 			goto out_put_req;
1739ec137c10SYan, Zheng 	}
174028127bddSYan, Zheng 
1741fac02ddfSArnd Bergmann 	req->r_mtime = inode->i_mtime;
174228127bddSYan, Zheng 	err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
174328127bddSYan, Zheng 	if (!err)
174428127bddSYan, Zheng 		err = ceph_osdc_wait_request(&fsc->client->osdc, req);
174597e27aaaSXiubo Li 
17468ae99ae2SXiubo Li 	ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
1747903f4fecSXiubo Li 				  req->r_end_latency, len, err);
174897e27aaaSXiubo Li 
1749825978fdSXiubo Li out_uninline:
1750083db6fdSDavid Howells 	if (!err) {
1751083db6fdSDavid Howells 		int dirty;
1752083db6fdSDavid Howells 
1753083db6fdSDavid Howells 		/* Set to CAP_INLINE_NONE and dirty the caps */
1754083db6fdSDavid Howells 		down_read(&fsc->mdsc->snap_rwsem);
1755083db6fdSDavid Howells 		spin_lock(&ci->i_ceph_lock);
1756083db6fdSDavid Howells 		ci->i_inline_version = CEPH_INLINE_NONE;
1757083db6fdSDavid Howells 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, &prealloc_cf);
1758083db6fdSDavid Howells 		spin_unlock(&ci->i_ceph_lock);
1759083db6fdSDavid Howells 		up_read(&fsc->mdsc->snap_rwsem);
1760083db6fdSDavid Howells 		if (dirty)
1761083db6fdSDavid Howells 			__mark_inode_dirty(inode, dirty);
1762083db6fdSDavid Howells 	}
1763083db6fdSDavid Howells out_put_req:
176428127bddSYan, Zheng 	ceph_osdc_put_request(req);
176528127bddSYan, Zheng 	if (err == -ECANCELED)
176628127bddSYan, Zheng 		err = 0;
1767083db6fdSDavid Howells out_unlock:
1768825978fdSXiubo Li 	if (folio) {
1769083db6fdSDavid Howells 		folio_unlock(folio);
1770083db6fdSDavid Howells 		folio_put(folio);
1771825978fdSXiubo Li 	}
177228127bddSYan, Zheng out:
1773083db6fdSDavid Howells 	ceph_free_cap_flush(prealloc_cf);
177428127bddSYan, Zheng 	dout("uninline_data %p %llx.%llx inline_version %llu = %d\n",
177528127bddSYan, Zheng 	     inode, ceph_vinop(inode), inline_version, err);
177628127bddSYan, Zheng 	return err;
177728127bddSYan, Zheng }
177828127bddSYan, Zheng 
17797cbea8dcSKirill A. Shutemov static const struct vm_operations_struct ceph_vmops = {
178061f68816SYan, Zheng 	.fault		= ceph_filemap_fault,
17811d3576fdSSage Weil 	.page_mkwrite	= ceph_page_mkwrite,
17821d3576fdSSage Weil };
17831d3576fdSSage Weil 
17841d3576fdSSage Weil int ceph_mmap(struct file *file, struct vm_area_struct *vma)
17851d3576fdSSage Weil {
17861d3576fdSSage Weil 	struct address_space *mapping = file->f_mapping;
17871d3576fdSSage Weil 
17887e0a1265SMatthew Wilcox (Oracle) 	if (!mapping->a_ops->read_folio)
17891d3576fdSSage Weil 		return -ENOEXEC;
17901d3576fdSSage Weil 	vma->vm_ops = &ceph_vmops;
17911d3576fdSSage Weil 	return 0;
17921d3576fdSSage Weil }
179310183a69SYan, Zheng 
179410183a69SYan, Zheng enum {
179510183a69SYan, Zheng 	POOL_READ	= 1,
179610183a69SYan, Zheng 	POOL_WRITE	= 2,
179710183a69SYan, Zheng };
179810183a69SYan, Zheng 
1799779fe0fbSYan, Zheng static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
1800779fe0fbSYan, Zheng 				s64 pool, struct ceph_string *pool_ns)
180110183a69SYan, Zheng {
1802874c8ca1SDavid Howells 	struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->netfs.inode);
180310183a69SYan, Zheng 	struct ceph_mds_client *mdsc = fsc->mdsc;
180410183a69SYan, Zheng 	struct ceph_osd_request *rd_req = NULL, *wr_req = NULL;
180510183a69SYan, Zheng 	struct rb_node **p, *parent;
180610183a69SYan, Zheng 	struct ceph_pool_perm *perm;
180710183a69SYan, Zheng 	struct page **pages;
1808779fe0fbSYan, Zheng 	size_t pool_ns_len;
180910183a69SYan, Zheng 	int err = 0, err2 = 0, have = 0;
181010183a69SYan, Zheng 
181110183a69SYan, Zheng 	down_read(&mdsc->pool_perm_rwsem);
181210183a69SYan, Zheng 	p = &mdsc->pool_perm_tree.rb_node;
181310183a69SYan, Zheng 	while (*p) {
181410183a69SYan, Zheng 		perm = rb_entry(*p, struct ceph_pool_perm, node);
181510183a69SYan, Zheng 		if (pool < perm->pool)
181610183a69SYan, Zheng 			p = &(*p)->rb_left;
181710183a69SYan, Zheng 		else if (pool > perm->pool)
181810183a69SYan, Zheng 			p = &(*p)->rb_right;
181910183a69SYan, Zheng 		else {
1820779fe0fbSYan, Zheng 			int ret = ceph_compare_string(pool_ns,
1821779fe0fbSYan, Zheng 						perm->pool_ns,
1822779fe0fbSYan, Zheng 						perm->pool_ns_len);
1823779fe0fbSYan, Zheng 			if (ret < 0)
1824779fe0fbSYan, Zheng 				p = &(*p)->rb_left;
1825779fe0fbSYan, Zheng 			else if (ret > 0)
1826779fe0fbSYan, Zheng 				p = &(*p)->rb_right;
1827779fe0fbSYan, Zheng 			else {
182810183a69SYan, Zheng 				have = perm->perm;
182910183a69SYan, Zheng 				break;
183010183a69SYan, Zheng 			}
183110183a69SYan, Zheng 		}
1832779fe0fbSYan, Zheng 	}
183310183a69SYan, Zheng 	up_read(&mdsc->pool_perm_rwsem);
183410183a69SYan, Zheng 	if (*p)
183510183a69SYan, Zheng 		goto out;
183610183a69SYan, Zheng 
1837779fe0fbSYan, Zheng 	if (pool_ns)
1838779fe0fbSYan, Zheng 		dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n",
1839779fe0fbSYan, Zheng 		     pool, (int)pool_ns->len, pool_ns->str);
1840779fe0fbSYan, Zheng 	else
18417627151eSYan, Zheng 		dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool);
184210183a69SYan, Zheng 
184310183a69SYan, Zheng 	down_write(&mdsc->pool_perm_rwsem);
1844779fe0fbSYan, Zheng 	p = &mdsc->pool_perm_tree.rb_node;
184510183a69SYan, Zheng 	parent = NULL;
184610183a69SYan, Zheng 	while (*p) {
184710183a69SYan, Zheng 		parent = *p;
184810183a69SYan, Zheng 		perm = rb_entry(parent, struct ceph_pool_perm, node);
184910183a69SYan, Zheng 		if (pool < perm->pool)
185010183a69SYan, Zheng 			p = &(*p)->rb_left;
185110183a69SYan, Zheng 		else if (pool > perm->pool)
185210183a69SYan, Zheng 			p = &(*p)->rb_right;
185310183a69SYan, Zheng 		else {
1854779fe0fbSYan, Zheng 			int ret = ceph_compare_string(pool_ns,
1855779fe0fbSYan, Zheng 						perm->pool_ns,
1856779fe0fbSYan, Zheng 						perm->pool_ns_len);
1857779fe0fbSYan, Zheng 			if (ret < 0)
1858779fe0fbSYan, Zheng 				p = &(*p)->rb_left;
1859779fe0fbSYan, Zheng 			else if (ret > 0)
1860779fe0fbSYan, Zheng 				p = &(*p)->rb_right;
1861779fe0fbSYan, Zheng 			else {
186210183a69SYan, Zheng 				have = perm->perm;
186310183a69SYan, Zheng 				break;
186410183a69SYan, Zheng 			}
186510183a69SYan, Zheng 		}
1866779fe0fbSYan, Zheng 	}
186710183a69SYan, Zheng 	if (*p) {
186810183a69SYan, Zheng 		up_write(&mdsc->pool_perm_rwsem);
186910183a69SYan, Zheng 		goto out;
187010183a69SYan, Zheng 	}
187110183a69SYan, Zheng 
187234b759b4SIlya Dryomov 	rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
187310183a69SYan, Zheng 					 1, false, GFP_NOFS);
187410183a69SYan, Zheng 	if (!rd_req) {
187510183a69SYan, Zheng 		err = -ENOMEM;
187610183a69SYan, Zheng 		goto out_unlock;
187710183a69SYan, Zheng 	}
187810183a69SYan, Zheng 
187910183a69SYan, Zheng 	rd_req->r_flags = CEPH_OSD_FLAG_READ;
188010183a69SYan, Zheng 	osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0);
188110183a69SYan, Zheng 	rd_req->r_base_oloc.pool = pool;
1882779fe0fbSYan, Zheng 	if (pool_ns)
1883779fe0fbSYan, Zheng 		rd_req->r_base_oloc.pool_ns = ceph_get_string(pool_ns);
1884d30291b9SIlya Dryomov 	ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino);
188510183a69SYan, Zheng 
188613d1ad16SIlya Dryomov 	err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS);
188713d1ad16SIlya Dryomov 	if (err)
188813d1ad16SIlya Dryomov 		goto out_unlock;
188910183a69SYan, Zheng 
189034b759b4SIlya Dryomov 	wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
189110183a69SYan, Zheng 					 1, false, GFP_NOFS);
189210183a69SYan, Zheng 	if (!wr_req) {
189310183a69SYan, Zheng 		err = -ENOMEM;
189410183a69SYan, Zheng 		goto out_unlock;
189510183a69SYan, Zheng 	}
189610183a69SYan, Zheng 
189754ea0046SIlya Dryomov 	wr_req->r_flags = CEPH_OSD_FLAG_WRITE;
189810183a69SYan, Zheng 	osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL);
189963244fa1SIlya Dryomov 	ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc);
1900d30291b9SIlya Dryomov 	ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid);
190110183a69SYan, Zheng 
190213d1ad16SIlya Dryomov 	err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS);
190313d1ad16SIlya Dryomov 	if (err)
190413d1ad16SIlya Dryomov 		goto out_unlock;
190510183a69SYan, Zheng 
190610183a69SYan, Zheng 	/* one page should be large enough for STAT data */
190710183a69SYan, Zheng 	pages = ceph_alloc_page_vector(1, GFP_KERNEL);
190810183a69SYan, Zheng 	if (IS_ERR(pages)) {
190910183a69SYan, Zheng 		err = PTR_ERR(pages);
191010183a69SYan, Zheng 		goto out_unlock;
191110183a69SYan, Zheng 	}
191210183a69SYan, Zheng 
191310183a69SYan, Zheng 	osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE,
191410183a69SYan, Zheng 				     0, false, true);
191510183a69SYan, Zheng 	err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false);
191610183a69SYan, Zheng 
1917874c8ca1SDavid Howells 	wr_req->r_mtime = ci->netfs.inode.i_mtime;
191810183a69SYan, Zheng 	err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false);
191910183a69SYan, Zheng 
192010183a69SYan, Zheng 	if (!err)
192110183a69SYan, Zheng 		err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req);
192210183a69SYan, Zheng 	if (!err2)
192310183a69SYan, Zheng 		err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req);
192410183a69SYan, Zheng 
192510183a69SYan, Zheng 	if (err >= 0 || err == -ENOENT)
192610183a69SYan, Zheng 		have |= POOL_READ;
1927131d7eb4SYan, Zheng 	else if (err != -EPERM) {
19280b98acd6SIlya Dryomov 		if (err == -EBLOCKLISTED)
19290b98acd6SIlya Dryomov 			fsc->blocklisted = true;
193010183a69SYan, Zheng 		goto out_unlock;
1931131d7eb4SYan, Zheng 	}
193210183a69SYan, Zheng 
193310183a69SYan, Zheng 	if (err2 == 0 || err2 == -EEXIST)
193410183a69SYan, Zheng 		have |= POOL_WRITE;
193510183a69SYan, Zheng 	else if (err2 != -EPERM) {
19360b98acd6SIlya Dryomov 		if (err2 == -EBLOCKLISTED)
19370b98acd6SIlya Dryomov 			fsc->blocklisted = true;
193810183a69SYan, Zheng 		err = err2;
193910183a69SYan, Zheng 		goto out_unlock;
194010183a69SYan, Zheng 	}
194110183a69SYan, Zheng 
1942779fe0fbSYan, Zheng 	pool_ns_len = pool_ns ? pool_ns->len : 0;
1943779fe0fbSYan, Zheng 	perm = kmalloc(sizeof(*perm) + pool_ns_len + 1, GFP_NOFS);
194410183a69SYan, Zheng 	if (!perm) {
194510183a69SYan, Zheng 		err = -ENOMEM;
194610183a69SYan, Zheng 		goto out_unlock;
194710183a69SYan, Zheng 	}
194810183a69SYan, Zheng 
194910183a69SYan, Zheng 	perm->pool = pool;
195010183a69SYan, Zheng 	perm->perm = have;
1951779fe0fbSYan, Zheng 	perm->pool_ns_len = pool_ns_len;
1952779fe0fbSYan, Zheng 	if (pool_ns_len > 0)
1953779fe0fbSYan, Zheng 		memcpy(perm->pool_ns, pool_ns->str, pool_ns_len);
1954779fe0fbSYan, Zheng 	perm->pool_ns[pool_ns_len] = 0;
1955779fe0fbSYan, Zheng 
195610183a69SYan, Zheng 	rb_link_node(&perm->node, parent, p);
195710183a69SYan, Zheng 	rb_insert_color(&perm->node, &mdsc->pool_perm_tree);
195810183a69SYan, Zheng 	err = 0;
195910183a69SYan, Zheng out_unlock:
196010183a69SYan, Zheng 	up_write(&mdsc->pool_perm_rwsem);
196110183a69SYan, Zheng 
196210183a69SYan, Zheng 	ceph_osdc_put_request(rd_req);
196310183a69SYan, Zheng 	ceph_osdc_put_request(wr_req);
196410183a69SYan, Zheng out:
196510183a69SYan, Zheng 	if (!err)
196610183a69SYan, Zheng 		err = have;
1967779fe0fbSYan, Zheng 	if (pool_ns)
1968779fe0fbSYan, Zheng 		dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n",
1969779fe0fbSYan, Zheng 		     pool, (int)pool_ns->len, pool_ns->str, err);
1970779fe0fbSYan, Zheng 	else
19717627151eSYan, Zheng 		dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err);
197210183a69SYan, Zheng 	return err;
197310183a69SYan, Zheng }
197410183a69SYan, Zheng 
19755e3ded1bSYan, Zheng int ceph_pool_perm_check(struct inode *inode, int need)
197610183a69SYan, Zheng {
19775e3ded1bSYan, Zheng 	struct ceph_inode_info *ci = ceph_inode(inode);
1978779fe0fbSYan, Zheng 	struct ceph_string *pool_ns;
19795e3ded1bSYan, Zheng 	s64 pool;
198010183a69SYan, Zheng 	int ret, flags;
198110183a69SYan, Zheng 
1982e9b22501SJeff Layton 	/* Only need to do this for regular files */
1983e9b22501SJeff Layton 	if (!S_ISREG(inode->i_mode))
1984e9b22501SJeff Layton 		return 0;
1985e9b22501SJeff Layton 
198680e80fbbSYan, Zheng 	if (ci->i_vino.snap != CEPH_NOSNAP) {
198780e80fbbSYan, Zheng 		/*
198880e80fbbSYan, Zheng 		 * Pool permission check needs to write to the first object.
198980e80fbbSYan, Zheng 		 * But for snapshot, head of the first object may have alread
199080e80fbbSYan, Zheng 		 * been deleted. Skip check to avoid creating orphan object.
199180e80fbbSYan, Zheng 		 */
199280e80fbbSYan, Zheng 		return 0;
199380e80fbbSYan, Zheng 	}
199480e80fbbSYan, Zheng 
19955e3ded1bSYan, Zheng 	if (ceph_test_mount_opt(ceph_inode_to_client(inode),
199610183a69SYan, Zheng 				NOPOOLPERM))
199710183a69SYan, Zheng 		return 0;
199810183a69SYan, Zheng 
199910183a69SYan, Zheng 	spin_lock(&ci->i_ceph_lock);
200010183a69SYan, Zheng 	flags = ci->i_ceph_flags;
20017627151eSYan, Zheng 	pool = ci->i_layout.pool_id;
200210183a69SYan, Zheng 	spin_unlock(&ci->i_ceph_lock);
200310183a69SYan, Zheng check:
200410183a69SYan, Zheng 	if (flags & CEPH_I_POOL_PERM) {
200510183a69SYan, Zheng 		if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) {
20067627151eSYan, Zheng 			dout("ceph_pool_perm_check pool %lld no read perm\n",
200710183a69SYan, Zheng 			     pool);
200810183a69SYan, Zheng 			return -EPERM;
200910183a69SYan, Zheng 		}
201010183a69SYan, Zheng 		if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) {
20117627151eSYan, Zheng 			dout("ceph_pool_perm_check pool %lld no write perm\n",
201210183a69SYan, Zheng 			     pool);
201310183a69SYan, Zheng 			return -EPERM;
201410183a69SYan, Zheng 		}
201510183a69SYan, Zheng 		return 0;
201610183a69SYan, Zheng 	}
201710183a69SYan, Zheng 
2018779fe0fbSYan, Zheng 	pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
2019779fe0fbSYan, Zheng 	ret = __ceph_pool_perm_get(ci, pool, pool_ns);
2020779fe0fbSYan, Zheng 	ceph_put_string(pool_ns);
202110183a69SYan, Zheng 	if (ret < 0)
202210183a69SYan, Zheng 		return ret;
202310183a69SYan, Zheng 
202410183a69SYan, Zheng 	flags = CEPH_I_POOL_PERM;
202510183a69SYan, Zheng 	if (ret & POOL_READ)
202610183a69SYan, Zheng 		flags |= CEPH_I_POOL_RD;
202710183a69SYan, Zheng 	if (ret & POOL_WRITE)
202810183a69SYan, Zheng 		flags |= CEPH_I_POOL_WR;
202910183a69SYan, Zheng 
203010183a69SYan, Zheng 	spin_lock(&ci->i_ceph_lock);
2031779fe0fbSYan, Zheng 	if (pool == ci->i_layout.pool_id &&
2032779fe0fbSYan, Zheng 	    pool_ns == rcu_dereference_raw(ci->i_layout.pool_ns)) {
2033779fe0fbSYan, Zheng 		ci->i_ceph_flags |= flags;
203410183a69SYan, Zheng         } else {
20357627151eSYan, Zheng 		pool = ci->i_layout.pool_id;
203610183a69SYan, Zheng 		flags = ci->i_ceph_flags;
203710183a69SYan, Zheng 	}
203810183a69SYan, Zheng 	spin_unlock(&ci->i_ceph_lock);
203910183a69SYan, Zheng 	goto check;
204010183a69SYan, Zheng }
204110183a69SYan, Zheng 
204210183a69SYan, Zheng void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc)
204310183a69SYan, Zheng {
204410183a69SYan, Zheng 	struct ceph_pool_perm *perm;
204510183a69SYan, Zheng 	struct rb_node *n;
204610183a69SYan, Zheng 
204710183a69SYan, Zheng 	while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) {
204810183a69SYan, Zheng 		n = rb_first(&mdsc->pool_perm_tree);
204910183a69SYan, Zheng 		perm = rb_entry(n, struct ceph_pool_perm, node);
205010183a69SYan, Zheng 		rb_erase(n, &mdsc->pool_perm_tree);
205110183a69SYan, Zheng 		kfree(perm);
205210183a69SYan, Zheng 	}
205310183a69SYan, Zheng }
2054