1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 23d14c5d2SYehuda Sadeh #include <linux/ceph/ceph_debug.h> 31d3576fdSSage Weil 41d3576fdSSage Weil #include <linux/backing-dev.h> 51d3576fdSSage Weil #include <linux/fs.h> 61d3576fdSSage Weil #include <linux/mm.h> 71d3576fdSSage Weil #include <linux/pagemap.h> 85a0e3ad6STejun Heo #include <linux/slab.h> 91d3576fdSSage Weil #include <linux/pagevec.h> 101d3576fdSSage Weil #include <linux/task_io_accounting_ops.h> 11f361bf4aSIngo Molnar #include <linux/signal.h> 125c308356SJeff Layton #include <linux/iversion.h> 1397e27aaaSXiubo Li #include <linux/ktime.h> 14f0702876SJeff Layton #include <linux/netfs.h> 151d3576fdSSage Weil 161d3576fdSSage Weil #include "super.h" 173d14c5d2SYehuda Sadeh #include "mds_client.h" 1899ccbd22SMilosz Tanski #include "cache.h" 1997e27aaaSXiubo Li #include "metric.h" 203d14c5d2SYehuda Sadeh #include <linux/ceph/osd_client.h> 2108c1ac50SIlya Dryomov #include <linux/ceph/striper.h> 221d3576fdSSage Weil 231d3576fdSSage Weil /* 241d3576fdSSage Weil * Ceph address space ops. 251d3576fdSSage Weil * 261d3576fdSSage Weil * There are a few funny things going on here. 271d3576fdSSage Weil * 281d3576fdSSage Weil * The page->private field is used to reference a struct 291d3576fdSSage Weil * ceph_snap_context for _every_ dirty page. This indicates which 301d3576fdSSage Weil * snapshot the page was logically dirtied in, and thus which snap 311d3576fdSSage Weil * context needs to be associated with the osd write during writeback. 321d3576fdSSage Weil * 331d3576fdSSage Weil * Similarly, struct ceph_inode_info maintains a set of counters to 3425985edcSLucas De Marchi * count dirty pages on the inode. In the absence of snapshots, 351d3576fdSSage Weil * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count. 361d3576fdSSage Weil * 371d3576fdSSage Weil * When a snapshot is taken (that is, when the client receives 381d3576fdSSage Weil * notification that a snapshot was taken), each inode with caps and 391d3576fdSSage Weil * with dirty pages (dirty pages implies there is a cap) gets a new 401d3576fdSSage Weil * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending 411d3576fdSSage Weil * order, new snaps go to the tail). The i_wrbuffer_ref_head count is 421d3576fdSSage Weil * moved to capsnap->dirty. (Unless a sync write is currently in 431d3576fdSSage Weil * progress. In that case, the capsnap is said to be "pending", new 441d3576fdSSage Weil * writes cannot start, and the capsnap isn't "finalized" until the 451d3576fdSSage Weil * write completes (or fails) and a final size/mtime for the inode for 461d3576fdSSage Weil * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0. 471d3576fdSSage Weil * 481d3576fdSSage Weil * On writeback, we must submit writes to the osd IN SNAP ORDER. So, 491d3576fdSSage Weil * we look for the first capsnap in i_cap_snaps and write out pages in 501d3576fdSSage Weil * that snap context _only_. Then we move on to the next capsnap, 511d3576fdSSage Weil * eventually reaching the "live" or "head" context (i.e., pages that 521d3576fdSSage Weil * are not yet snapped) and are writing the most recently dirtied 531d3576fdSSage Weil * pages. 541d3576fdSSage Weil * 551d3576fdSSage Weil * Invalidate and so forth must take care to ensure the dirty page 561d3576fdSSage Weil * accounting is preserved. 571d3576fdSSage Weil */ 581d3576fdSSage Weil 592baba250SYehuda Sadeh #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10)) 602baba250SYehuda Sadeh #define CONGESTION_OFF_THRESH(congestion_kb) \ 612baba250SYehuda Sadeh (CONGESTION_ON_THRESH(congestion_kb) - \ 622baba250SYehuda Sadeh (CONGESTION_ON_THRESH(congestion_kb) >> 2)) 632baba250SYehuda Sadeh 64d801327dSJeff Layton static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len, 6578525c74SDavid Howells struct folio *folio, void **_fsdata); 66d801327dSJeff Layton 6761600ef8SYan, Zheng static inline struct ceph_snap_context *page_snap_context(struct page *page) 6861600ef8SYan, Zheng { 6961600ef8SYan, Zheng if (PagePrivate(page)) 7061600ef8SYan, Zheng return (void *)page->private; 7161600ef8SYan, Zheng return NULL; 7261600ef8SYan, Zheng } 731d3576fdSSage Weil 741d3576fdSSage Weil /* 751d3576fdSSage Weil * Dirty a page. Optimistically adjust accounting, on the assumption 761d3576fdSSage Weil * that we won't race with invalidate. If we do, readjust. 771d3576fdSSage Weil */ 781d3576fdSSage Weil static int ceph_set_page_dirty(struct page *page) 791d3576fdSSage Weil { 801d3576fdSSage Weil struct address_space *mapping = page->mapping; 811d3576fdSSage Weil struct inode *inode; 821d3576fdSSage Weil struct ceph_inode_info *ci; 831d3576fdSSage Weil struct ceph_snap_context *snapc; 841d3576fdSSage Weil 857d6e1f54SSha Zhengju if (PageDirty(page)) { 861d3576fdSSage Weil dout("%p set_page_dirty %p idx %lu -- already dirty\n", 871d3576fdSSage Weil mapping->host, page, page->index); 887d6e1f54SSha Zhengju BUG_ON(!PagePrivate(page)); 891d3576fdSSage Weil return 0; 901d3576fdSSage Weil } 911d3576fdSSage Weil 921d3576fdSSage Weil inode = mapping->host; 931d3576fdSSage Weil ci = ceph_inode(inode); 941d3576fdSSage Weil 951d3576fdSSage Weil /* dirty the head */ 96be655596SSage Weil spin_lock(&ci->i_ceph_lock); 975dda377cSYan, Zheng BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference 985dda377cSYan, Zheng if (__ceph_have_pending_cap_snap(ci)) { 995dda377cSYan, Zheng struct ceph_cap_snap *capsnap = 1005dda377cSYan, Zheng list_last_entry(&ci->i_cap_snaps, 1015dda377cSYan, Zheng struct ceph_cap_snap, 1025dda377cSYan, Zheng ci_item); 1035dda377cSYan, Zheng snapc = ceph_get_snap_context(capsnap->context); 1045dda377cSYan, Zheng capsnap->dirty_pages++; 1055dda377cSYan, Zheng } else { 1065dda377cSYan, Zheng BUG_ON(!ci->i_head_snapc); 1075dda377cSYan, Zheng snapc = ceph_get_snap_context(ci->i_head_snapc); 1081d3576fdSSage Weil ++ci->i_wrbuffer_ref_head; 1095dda377cSYan, Zheng } 1101d3576fdSSage Weil if (ci->i_wrbuffer_ref == 0) 1110444d76aSDave Chinner ihold(inode); 1121d3576fdSSage Weil ++ci->i_wrbuffer_ref; 1131d3576fdSSage Weil dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d " 1141d3576fdSSage Weil "snapc %p seq %lld (%d snaps)\n", 1151d3576fdSSage Weil mapping->host, page, page->index, 1161d3576fdSSage Weil ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1, 1171d3576fdSSage Weil ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, 1181d3576fdSSage Weil snapc, snapc->seq, snapc->num_snaps); 119be655596SSage Weil spin_unlock(&ci->i_ceph_lock); 1201d3576fdSSage Weil 1211d3576fdSSage Weil /* 1221d3576fdSSage Weil * Reference snap context in page->private. Also set 1231d3576fdSSage Weil * PagePrivate so that we get invalidatepage callback. 1241d3576fdSSage Weil */ 1257d6e1f54SSha Zhengju BUG_ON(PagePrivate(page)); 126379fc7faSJeff Layton attach_page_private(page, snapc); 1271d3576fdSSage Weil 128400e1286SJeff Layton return ceph_fscache_set_page_dirty(page); 1291d3576fdSSage Weil } 1301d3576fdSSage Weil 1311d3576fdSSage Weil /* 1321d3576fdSSage Weil * If we are truncating the full page (i.e. offset == 0), adjust the 1331d3576fdSSage Weil * dirty page counters appropriately. Only called if there is private 1341d3576fdSSage Weil * data on the page. 1351d3576fdSSage Weil */ 136d47992f8SLukas Czerner static void ceph_invalidatepage(struct page *page, unsigned int offset, 137d47992f8SLukas Czerner unsigned int length) 1381d3576fdSSage Weil { 1394ce1e9adSAlexander Beregalov struct inode *inode; 1401d3576fdSSage Weil struct ceph_inode_info *ci; 141379fc7faSJeff Layton struct ceph_snap_context *snapc; 1421d3576fdSSage Weil 1434ce1e9adSAlexander Beregalov inode = page->mapping->host; 144b150f5c1SMilosz Tanski ci = ceph_inode(inode); 145b150f5c1SMilosz Tanski 1468ff2d290SJeff Layton if (offset != 0 || length != thp_size(page)) { 147b150f5c1SMilosz Tanski dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n", 148b150f5c1SMilosz Tanski inode, page, page->index, offset, length); 149b150f5c1SMilosz Tanski return; 150b150f5c1SMilosz Tanski } 1514ce1e9adSAlexander Beregalov 152b072d774SYan, Zheng WARN_ON(!PageLocked(page)); 153400e1286SJeff Layton if (PagePrivate(page)) { 154569d39fcSLukas Czerner dout("%p invalidatepage %p idx %lu full dirty page\n", 155569d39fcSLukas Czerner inode, page, page->index); 156b150f5c1SMilosz Tanski 157379fc7faSJeff Layton snapc = detach_page_private(page); 1581d3576fdSSage Weil ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 1591d3576fdSSage Weil ceph_put_snap_context(snapc); 1601d3576fdSSage Weil } 1611d3576fdSSage Weil 162400e1286SJeff Layton wait_on_page_fscache(page); 163400e1286SJeff Layton } 164400e1286SJeff Layton 1657c46b318SJeff Layton static int ceph_releasepage(struct page *page, gfp_t gfp) 1661d3576fdSSage Weil { 167400e1286SJeff Layton struct inode *inode = page->mapping->host; 168400e1286SJeff Layton 169400e1286SJeff Layton dout("%llx:%llx releasepage %p idx %lu (%sdirty)\n", 170400e1286SJeff Layton ceph_vinop(inode), page, 171400e1286SJeff Layton page->index, PageDirty(page) ? "" : "not "); 172400e1286SJeff Layton 173400e1286SJeff Layton if (PagePrivate(page)) 174400e1286SJeff Layton return 0; 17599ccbd22SMilosz Tanski 1767c46b318SJeff Layton if (PageFsCache(page)) { 177400e1286SJeff Layton if (!gfpflags_allow_blocking(gfp) || !(gfp & __GFP_FS)) 1787c46b318SJeff Layton return 0; 1797c46b318SJeff Layton wait_on_page_fscache(page); 1807c46b318SJeff Layton } 181400e1286SJeff Layton ceph_fscache_note_page_release(inode); 182400e1286SJeff Layton return 1; 1831d3576fdSSage Weil } 1841d3576fdSSage Weil 185f0702876SJeff Layton static void ceph_netfs_expand_readahead(struct netfs_read_request *rreq) 186f0702876SJeff Layton { 187f0702876SJeff Layton struct inode *inode = rreq->mapping->host; 188f0702876SJeff Layton struct ceph_inode_info *ci = ceph_inode(inode); 189f0702876SJeff Layton struct ceph_file_layout *lo = &ci->i_layout; 190f0702876SJeff Layton u32 blockoff; 191f0702876SJeff Layton u64 blockno; 192f0702876SJeff Layton 193f0702876SJeff Layton /* Expand the start downward */ 194f0702876SJeff Layton blockno = div_u64_rem(rreq->start, lo->stripe_unit, &blockoff); 195f0702876SJeff Layton rreq->start = blockno * lo->stripe_unit; 196f0702876SJeff Layton rreq->len += blockoff; 197f0702876SJeff Layton 198f0702876SJeff Layton /* Now, round up the length to the next block */ 199f0702876SJeff Layton rreq->len = roundup(rreq->len, lo->stripe_unit); 200f0702876SJeff Layton } 201f0702876SJeff Layton 202f0702876SJeff Layton static bool ceph_netfs_clamp_length(struct netfs_read_subrequest *subreq) 203f0702876SJeff Layton { 204f0702876SJeff Layton struct inode *inode = subreq->rreq->mapping->host; 205f0702876SJeff Layton struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 206f0702876SJeff Layton struct ceph_inode_info *ci = ceph_inode(inode); 207f0702876SJeff Layton u64 objno, objoff; 208f0702876SJeff Layton u32 xlen; 209f0702876SJeff Layton 210f0702876SJeff Layton /* Truncate the extent at the end of the current block */ 211f0702876SJeff Layton ceph_calc_file_object_mapping(&ci->i_layout, subreq->start, subreq->len, 212f0702876SJeff Layton &objno, &objoff, &xlen); 213f0702876SJeff Layton subreq->len = min(xlen, fsc->mount_options->rsize); 214f0702876SJeff Layton return true; 215f0702876SJeff Layton } 216f0702876SJeff Layton 217f0702876SJeff Layton static void finish_netfs_read(struct ceph_osd_request *req) 218f0702876SJeff Layton { 219f0702876SJeff Layton struct ceph_fs_client *fsc = ceph_inode_to_client(req->r_inode); 220f0702876SJeff Layton struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); 221f0702876SJeff Layton struct netfs_read_subrequest *subreq = req->r_priv; 222f0702876SJeff Layton int num_pages; 223f0702876SJeff Layton int err = req->r_result; 224f0702876SJeff Layton 2258ae99ae2SXiubo Li ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency, 226903f4fecSXiubo Li req->r_end_latency, osd_data->length, err); 227f0702876SJeff Layton 228f0702876SJeff Layton dout("%s: result %d subreq->len=%zu i_size=%lld\n", __func__, req->r_result, 229f0702876SJeff Layton subreq->len, i_size_read(req->r_inode)); 230f0702876SJeff Layton 231f0702876SJeff Layton /* no object means success but no data */ 232f0702876SJeff Layton if (err == -ENOENT) 233f0702876SJeff Layton err = 0; 234f0702876SJeff Layton else if (err == -EBLOCKLISTED) 235f0702876SJeff Layton fsc->blocklisted = true; 236f0702876SJeff Layton 237f0702876SJeff Layton if (err >= 0 && err < subreq->len) 238f0702876SJeff Layton __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 239f0702876SJeff Layton 240f0702876SJeff Layton netfs_subreq_terminated(subreq, err, true); 241f0702876SJeff Layton 242f0702876SJeff Layton num_pages = calc_pages_for(osd_data->alignment, osd_data->length); 243f0702876SJeff Layton ceph_put_page_vector(osd_data->pages, num_pages, false); 244f0702876SJeff Layton iput(req->r_inode); 245f0702876SJeff Layton } 246f0702876SJeff Layton 247f0702876SJeff Layton static void ceph_netfs_issue_op(struct netfs_read_subrequest *subreq) 248f0702876SJeff Layton { 249f0702876SJeff Layton struct netfs_read_request *rreq = subreq->rreq; 250f0702876SJeff Layton struct inode *inode = rreq->mapping->host; 251f0702876SJeff Layton struct ceph_inode_info *ci = ceph_inode(inode); 252f0702876SJeff Layton struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 253f0702876SJeff Layton struct ceph_osd_request *req; 254f0702876SJeff Layton struct ceph_vino vino = ceph_vino(inode); 255f0702876SJeff Layton struct iov_iter iter; 256f0702876SJeff Layton struct page **pages; 257f0702876SJeff Layton size_t page_off; 258f0702876SJeff Layton int err = 0; 259f0702876SJeff Layton u64 len = subreq->len; 260f0702876SJeff Layton 261f0702876SJeff Layton req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino, subreq->start, &len, 262f0702876SJeff Layton 0, 1, CEPH_OSD_OP_READ, 263f0702876SJeff Layton CEPH_OSD_FLAG_READ | fsc->client->osdc.client->options->read_from_replica, 264f0702876SJeff Layton NULL, ci->i_truncate_seq, ci->i_truncate_size, false); 265f0702876SJeff Layton if (IS_ERR(req)) { 266f0702876SJeff Layton err = PTR_ERR(req); 267f0702876SJeff Layton req = NULL; 268f0702876SJeff Layton goto out; 269f0702876SJeff Layton } 270f0702876SJeff Layton 271f0702876SJeff Layton dout("%s: pos=%llu orig_len=%zu len=%llu\n", __func__, subreq->start, subreq->len, len); 272f0702876SJeff Layton iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, subreq->start, len); 273f0702876SJeff Layton err = iov_iter_get_pages_alloc(&iter, &pages, len, &page_off); 274f0702876SJeff Layton if (err < 0) { 275f0702876SJeff Layton dout("%s: iov_ter_get_pages_alloc returned %d\n", __func__, err); 276f0702876SJeff Layton goto out; 277f0702876SJeff Layton } 278f0702876SJeff Layton 279f0702876SJeff Layton /* should always give us a page-aligned read */ 280f0702876SJeff Layton WARN_ON_ONCE(page_off); 281f0702876SJeff Layton len = err; 282f0702876SJeff Layton 283f0702876SJeff Layton osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false); 284f0702876SJeff Layton req->r_callback = finish_netfs_read; 285f0702876SJeff Layton req->r_priv = subreq; 286f0702876SJeff Layton req->r_inode = inode; 287f0702876SJeff Layton ihold(inode); 288f0702876SJeff Layton 289f0702876SJeff Layton err = ceph_osdc_start_request(req->r_osdc, req, false); 290f0702876SJeff Layton if (err) 291f0702876SJeff Layton iput(inode); 292f0702876SJeff Layton out: 293f0702876SJeff Layton ceph_osdc_put_request(req); 294f0702876SJeff Layton if (err) 295f0702876SJeff Layton netfs_subreq_terminated(subreq, err, false); 296f0702876SJeff Layton dout("%s: result %d\n", __func__, err); 297f0702876SJeff Layton } 298f0702876SJeff Layton 299f0702876SJeff Layton static void ceph_init_rreq(struct netfs_read_request *rreq, struct file *file) 300f0702876SJeff Layton { 301f0702876SJeff Layton } 302f0702876SJeff Layton 30349870056SJeff Layton static void ceph_readahead_cleanup(struct address_space *mapping, void *priv) 30449870056SJeff Layton { 30549870056SJeff Layton struct inode *inode = mapping->host; 30649870056SJeff Layton struct ceph_inode_info *ci = ceph_inode(inode); 30749870056SJeff Layton int got = (uintptr_t)priv; 30849870056SJeff Layton 30949870056SJeff Layton if (got) 31049870056SJeff Layton ceph_put_cap_refs(ci, got); 31149870056SJeff Layton } 31249870056SJeff Layton 313675d4d89SWei Yongjun static const struct netfs_read_request_ops ceph_netfs_read_ops = { 314f0702876SJeff Layton .init_rreq = ceph_init_rreq, 315f0702876SJeff Layton .is_cache_enabled = ceph_is_cache_enabled, 316f0702876SJeff Layton .begin_cache_operation = ceph_begin_cache_operation, 317f0702876SJeff Layton .issue_op = ceph_netfs_issue_op, 318f0702876SJeff Layton .expand_readahead = ceph_netfs_expand_readahead, 319f0702876SJeff Layton .clamp_length = ceph_netfs_clamp_length, 320d801327dSJeff Layton .check_write_begin = ceph_netfs_check_write_begin, 32149870056SJeff Layton .cleanup = ceph_readahead_cleanup, 322f0702876SJeff Layton }; 323f0702876SJeff Layton 324f0702876SJeff Layton /* read a single page, without unlocking it. */ 32578525c74SDavid Howells static int ceph_readpage(struct file *file, struct page *subpage) 326f0702876SJeff Layton { 32778525c74SDavid Howells struct folio *folio = page_folio(subpage); 328f0702876SJeff Layton struct inode *inode = file_inode(file); 329f0702876SJeff Layton struct ceph_inode_info *ci = ceph_inode(inode); 330f0702876SJeff Layton struct ceph_vino vino = ceph_vino(inode); 33178525c74SDavid Howells size_t len = folio_size(folio); 33278525c74SDavid Howells u64 off = folio_file_pos(folio); 333f0702876SJeff Layton 334f0702876SJeff Layton if (ci->i_inline_version != CEPH_INLINE_NONE) { 335f0702876SJeff Layton /* 336f0702876SJeff Layton * Uptodate inline data should have been added 337f0702876SJeff Layton * into page cache while getting Fcr caps. 338f0702876SJeff Layton */ 339f0702876SJeff Layton if (off == 0) { 34078525c74SDavid Howells folio_unlock(folio); 341f0702876SJeff Layton return -EINVAL; 342f0702876SJeff Layton } 34378525c74SDavid Howells zero_user_segment(&folio->page, 0, folio_size(folio)); 34478525c74SDavid Howells folio_mark_uptodate(folio); 34578525c74SDavid Howells folio_unlock(folio); 346f0702876SJeff Layton return 0; 347f0702876SJeff Layton } 348f0702876SJeff Layton 34978525c74SDavid Howells dout("readpage ino %llx.%llx file %p off %llu len %zu folio %p index %lu\n", 35078525c74SDavid Howells vino.ino, vino.snap, file, off, len, folio, folio_index(folio)); 351f0702876SJeff Layton 35278525c74SDavid Howells return netfs_readpage(file, folio, &ceph_netfs_read_ops, NULL); 353f0702876SJeff Layton } 354f0702876SJeff Layton 35549870056SJeff Layton static void ceph_readahead(struct readahead_control *ractl) 3561d3576fdSSage Weil { 35749870056SJeff Layton struct inode *inode = file_inode(ractl->file); 35849870056SJeff Layton struct ceph_file_info *fi = ractl->file->private_data; 35949870056SJeff Layton struct ceph_rw_context *rw_ctx; 3602b1ac852SYan, Zheng int got = 0; 3612b1ac852SYan, Zheng int ret = 0; 3622b1ac852SYan, Zheng 36383701246SYan, Zheng if (ceph_inode(inode)->i_inline_version != CEPH_INLINE_NONE) 36449870056SJeff Layton return; 36583701246SYan, Zheng 36673737682SChengguang Xu rw_ctx = ceph_find_rw_context(fi); 36749870056SJeff Layton if (!rw_ctx) { 36849870056SJeff Layton /* 36949870056SJeff Layton * readahead callers do not necessarily hold Fcb caps 37049870056SJeff Layton * (e.g. fadvise, madvise). 37149870056SJeff Layton */ 37249870056SJeff Layton int want = CEPH_CAP_FILE_CACHE; 37349870056SJeff Layton 37449870056SJeff Layton ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got); 37549870056SJeff Layton if (ret < 0) 37649870056SJeff Layton dout("start_read %p, error getting cap\n", inode); 37749870056SJeff Layton else if (!(got & want)) 37849870056SJeff Layton dout("start_read %p, no cache cap\n", inode); 37949870056SJeff Layton 38049870056SJeff Layton if (ret <= 0) 38149870056SJeff Layton return; 3821d3576fdSSage Weil } 38349870056SJeff Layton netfs_readahead(ractl, &ceph_netfs_read_ops, (void *)(uintptr_t)got); 3841d3576fdSSage Weil } 3851d3576fdSSage Weil 386*1702e797SJeff Layton #ifdef CONFIG_CEPH_FSCACHE 387*1702e797SJeff Layton static void ceph_set_page_fscache(struct page *page) 388*1702e797SJeff Layton { 389*1702e797SJeff Layton set_page_fscache(page); 390*1702e797SJeff Layton } 391*1702e797SJeff Layton 392*1702e797SJeff Layton static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async) 393*1702e797SJeff Layton { 394*1702e797SJeff Layton struct inode *inode = priv; 395*1702e797SJeff Layton 396*1702e797SJeff Layton if (IS_ERR_VALUE(error) && error != -ENOBUFS) 397*1702e797SJeff Layton ceph_fscache_invalidate(inode, false); 398*1702e797SJeff Layton } 399*1702e797SJeff Layton 400*1702e797SJeff Layton static void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching) 401*1702e797SJeff Layton { 402*1702e797SJeff Layton struct ceph_inode_info *ci = ceph_inode(inode); 403*1702e797SJeff Layton struct fscache_cookie *cookie = ceph_fscache_cookie(ci); 404*1702e797SJeff Layton 405*1702e797SJeff Layton fscache_write_to_cache(cookie, inode->i_mapping, off, len, i_size_read(inode), 406*1702e797SJeff Layton ceph_fscache_write_terminated, inode, caching); 407*1702e797SJeff Layton } 408*1702e797SJeff Layton #else 409*1702e797SJeff Layton static inline void ceph_set_page_fscache(struct page *page) 410*1702e797SJeff Layton { 411*1702e797SJeff Layton } 412*1702e797SJeff Layton 413*1702e797SJeff Layton static inline void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching) 414*1702e797SJeff Layton { 415*1702e797SJeff Layton } 416*1702e797SJeff Layton #endif /* CONFIG_CEPH_FSCACHE */ 417*1702e797SJeff Layton 4181f934b00SYan, Zheng struct ceph_writeback_ctl 4191f934b00SYan, Zheng { 4201f934b00SYan, Zheng loff_t i_size; 4211f934b00SYan, Zheng u64 truncate_size; 4221f934b00SYan, Zheng u32 truncate_seq; 4231f934b00SYan, Zheng bool size_stable; 4242a2d927eSYan, Zheng bool head_snapc; 4251f934b00SYan, Zheng }; 4261f934b00SYan, Zheng 4271d3576fdSSage Weil /* 4281d3576fdSSage Weil * Get ref for the oldest snapc for an inode with dirty data... that is, the 4291d3576fdSSage Weil * only snap context we are allowed to write back. 4301d3576fdSSage Weil */ 4311f934b00SYan, Zheng static struct ceph_snap_context * 43205455e11SYan, Zheng get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl, 43305455e11SYan, Zheng struct ceph_snap_context *page_snapc) 4341d3576fdSSage Weil { 4351d3576fdSSage Weil struct ceph_inode_info *ci = ceph_inode(inode); 4361d3576fdSSage Weil struct ceph_snap_context *snapc = NULL; 4371d3576fdSSage Weil struct ceph_cap_snap *capsnap = NULL; 4381d3576fdSSage Weil 439be655596SSage Weil spin_lock(&ci->i_ceph_lock); 4401d3576fdSSage Weil list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 4411d3576fdSSage Weil dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, 4421d3576fdSSage Weil capsnap->context, capsnap->dirty_pages); 44305455e11SYan, Zheng if (!capsnap->dirty_pages) 44405455e11SYan, Zheng continue; 44505455e11SYan, Zheng 44605455e11SYan, Zheng /* get i_size, truncate_{seq,size} for page_snapc? */ 44705455e11SYan, Zheng if (snapc && capsnap->context != page_snapc) 44805455e11SYan, Zheng continue; 44905455e11SYan, Zheng 4501f934b00SYan, Zheng if (ctl) { 4511f934b00SYan, Zheng if (capsnap->writing) { 4521f934b00SYan, Zheng ctl->i_size = i_size_read(inode); 4531f934b00SYan, Zheng ctl->size_stable = false; 4541f934b00SYan, Zheng } else { 4551f934b00SYan, Zheng ctl->i_size = capsnap->size; 4561f934b00SYan, Zheng ctl->size_stable = true; 4571f934b00SYan, Zheng } 4581f934b00SYan, Zheng ctl->truncate_size = capsnap->truncate_size; 4591f934b00SYan, Zheng ctl->truncate_seq = capsnap->truncate_seq; 4602a2d927eSYan, Zheng ctl->head_snapc = false; 4611f934b00SYan, Zheng } 46205455e11SYan, Zheng 46305455e11SYan, Zheng if (snapc) 4641d3576fdSSage Weil break; 46505455e11SYan, Zheng 46605455e11SYan, Zheng snapc = ceph_get_snap_context(capsnap->context); 46705455e11SYan, Zheng if (!page_snapc || 46805455e11SYan, Zheng page_snapc == snapc || 46905455e11SYan, Zheng page_snapc->seq > snapc->seq) 47005455e11SYan, Zheng break; 4711d3576fdSSage Weil } 4727d8cb26dSSage Weil if (!snapc && ci->i_wrbuffer_ref_head) { 47380e755feSSage Weil snapc = ceph_get_snap_context(ci->i_head_snapc); 4741d3576fdSSage Weil dout(" head snapc %p has %d dirty pages\n", 4751d3576fdSSage Weil snapc, ci->i_wrbuffer_ref_head); 4761f934b00SYan, Zheng if (ctl) { 4771f934b00SYan, Zheng ctl->i_size = i_size_read(inode); 4781f934b00SYan, Zheng ctl->truncate_size = ci->i_truncate_size; 4791f934b00SYan, Zheng ctl->truncate_seq = ci->i_truncate_seq; 4801f934b00SYan, Zheng ctl->size_stable = false; 4812a2d927eSYan, Zheng ctl->head_snapc = true; 4821f934b00SYan, Zheng } 4831d3576fdSSage Weil } 484be655596SSage Weil spin_unlock(&ci->i_ceph_lock); 4851d3576fdSSage Weil return snapc; 4861d3576fdSSage Weil } 4871d3576fdSSage Weil 4881f934b00SYan, Zheng static u64 get_writepages_data_length(struct inode *inode, 4891f934b00SYan, Zheng struct page *page, u64 start) 4901f934b00SYan, Zheng { 4911f934b00SYan, Zheng struct ceph_inode_info *ci = ceph_inode(inode); 4921f934b00SYan, Zheng struct ceph_snap_context *snapc = page_snap_context(page); 4931f934b00SYan, Zheng struct ceph_cap_snap *capsnap = NULL; 4941f934b00SYan, Zheng u64 end = i_size_read(inode); 4951f934b00SYan, Zheng 4961f934b00SYan, Zheng if (snapc != ci->i_head_snapc) { 4971f934b00SYan, Zheng bool found = false; 4981f934b00SYan, Zheng spin_lock(&ci->i_ceph_lock); 4991f934b00SYan, Zheng list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 5001f934b00SYan, Zheng if (capsnap->context == snapc) { 5011f934b00SYan, Zheng if (!capsnap->writing) 5021f934b00SYan, Zheng end = capsnap->size; 5031f934b00SYan, Zheng found = true; 5041f934b00SYan, Zheng break; 5051f934b00SYan, Zheng } 5061f934b00SYan, Zheng } 5071f934b00SYan, Zheng spin_unlock(&ci->i_ceph_lock); 5081f934b00SYan, Zheng WARN_ON(!found); 5091f934b00SYan, Zheng } 5108ff2d290SJeff Layton if (end > page_offset(page) + thp_size(page)) 5118ff2d290SJeff Layton end = page_offset(page) + thp_size(page); 5121f934b00SYan, Zheng return end > start ? end - start : 0; 5131f934b00SYan, Zheng } 5141f934b00SYan, Zheng 5151d3576fdSSage Weil /* 5161d3576fdSSage Weil * Write a single page, but leave the page locked. 5171d3576fdSSage Weil * 518b72b13ebSJeff Layton * If we get a write error, mark the mapping for error, but still adjust the 5191d3576fdSSage Weil * dirty page accounting (i.e., page is no longer dirty). 5201d3576fdSSage Weil */ 5211d3576fdSSage Weil static int writepage_nounlock(struct page *page, struct writeback_control *wbc) 5221d3576fdSSage Weil { 5236390987fSJeff Layton struct inode *inode = page->mapping->host; 5246390987fSJeff Layton struct ceph_inode_info *ci = ceph_inode(inode); 5256390987fSJeff Layton struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 5266298a337SSage Weil struct ceph_snap_context *snapc, *oldest; 527fc2744aaSYan, Zheng loff_t page_off = page_offset(page); 5286390987fSJeff Layton int err; 5298ff2d290SJeff Layton loff_t len = thp_size(page); 5301f934b00SYan, Zheng struct ceph_writeback_ctl ceph_wbc; 5316390987fSJeff Layton struct ceph_osd_client *osdc = &fsc->client->osdc; 5326390987fSJeff Layton struct ceph_osd_request *req; 533*1702e797SJeff Layton bool caching = ceph_is_cache_enabled(inode); 5341d3576fdSSage Weil 5351d3576fdSSage Weil dout("writepage %p idx %lu\n", page, page->index); 5361d3576fdSSage Weil 5371d3576fdSSage Weil /* verify this is a writeable snap context */ 53861600ef8SYan, Zheng snapc = page_snap_context(page); 539d37b1d99SMarkus Elfring if (!snapc) { 5401d3576fdSSage Weil dout("writepage %p page %p not dirty?\n", inode, page); 54143986881SYan, Zheng return 0; 5421d3576fdSSage Weil } 54305455e11SYan, Zheng oldest = get_oldest_context(inode, &ceph_wbc, snapc); 5446298a337SSage Weil if (snapc->seq > oldest->seq) { 5451d3576fdSSage Weil dout("writepage %p page %p snapc %p not writeable - noop\n", 54661600ef8SYan, Zheng inode, page, snapc); 5471d3576fdSSage Weil /* we should only noop if called by kswapd */ 548fa71fefbSYan, Zheng WARN_ON(!(current->flags & PF_MEMALLOC)); 5496298a337SSage Weil ceph_put_snap_context(oldest); 550fa71fefbSYan, Zheng redirty_page_for_writepage(wbc, page); 55143986881SYan, Zheng return 0; 5521d3576fdSSage Weil } 5536298a337SSage Weil ceph_put_snap_context(oldest); 5541d3576fdSSage Weil 5551d3576fdSSage Weil /* is this a partial page at end of file? */ 5561f934b00SYan, Zheng if (page_off >= ceph_wbc.i_size) { 5571f934b00SYan, Zheng dout("%p page eof %llu\n", page, ceph_wbc.i_size); 5588ff2d290SJeff Layton page->mapping->a_ops->invalidatepage(page, 0, thp_size(page)); 55943986881SYan, Zheng return 0; 560fc2744aaSYan, Zheng } 56143986881SYan, Zheng 5621f934b00SYan, Zheng if (ceph_wbc.i_size < page_off + len) 5631f934b00SYan, Zheng len = ceph_wbc.i_size - page_off; 5641d3576fdSSage Weil 5656390987fSJeff Layton dout("writepage %p page %p index %lu on %llu~%llu snapc %p seq %lld\n", 5661c0a9c2dSYan, Zheng inode, page, page->index, page_off, len, snapc, snapc->seq); 5671d3576fdSSage Weil 568314c4737SYan, Zheng if (atomic_long_inc_return(&fsc->writeback_count) > 5693d14c5d2SYehuda Sadeh CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) 57009dc9fc2SJan Kara set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); 5712baba250SYehuda Sadeh 5726390987fSJeff Layton req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode), page_off, &len, 0, 1, 5736390987fSJeff Layton CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, snapc, 5746390987fSJeff Layton ceph_wbc.truncate_seq, ceph_wbc.truncate_size, 5756390987fSJeff Layton true); 576*1702e797SJeff Layton if (IS_ERR(req)) 5776390987fSJeff Layton return PTR_ERR(req); 578*1702e797SJeff Layton 579*1702e797SJeff Layton set_page_writeback(page); 580*1702e797SJeff Layton if (caching) 581*1702e797SJeff Layton ceph_set_page_fscache(page); 582*1702e797SJeff Layton ceph_fscache_write_to_cache(inode, page_off, len, caching); 5836390987fSJeff Layton 5846390987fSJeff Layton /* it may be a short write due to an object boundary */ 5858ff2d290SJeff Layton WARN_ON_ONCE(len > thp_size(page)); 5866390987fSJeff Layton osd_req_op_extent_osd_data_pages(req, 0, &page, len, 0, false, false); 5876390987fSJeff Layton dout("writepage %llu~%llu (%llu bytes)\n", page_off, len, len); 5886390987fSJeff Layton 5896390987fSJeff Layton req->r_mtime = inode->i_mtime; 5906390987fSJeff Layton err = ceph_osdc_start_request(osdc, req, true); 5916390987fSJeff Layton if (!err) 5926390987fSJeff Layton err = ceph_osdc_wait_request(osdc, req); 5936390987fSJeff Layton 5948ae99ae2SXiubo Li ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 595903f4fecSXiubo Li req->r_end_latency, len, err); 5966390987fSJeff Layton 5976390987fSJeff Layton ceph_osdc_put_request(req); 5986390987fSJeff Layton if (err == 0) 5996390987fSJeff Layton err = len; 6006390987fSJeff Layton 6011d3576fdSSage Weil if (err < 0) { 602ad15ec06SYan, Zheng struct writeback_control tmp_wbc; 603ad15ec06SYan, Zheng if (!wbc) 604ad15ec06SYan, Zheng wbc = &tmp_wbc; 605ad15ec06SYan, Zheng if (err == -ERESTARTSYS) { 606ad15ec06SYan, Zheng /* killed by SIGKILL */ 607ad15ec06SYan, Zheng dout("writepage interrupted page %p\n", page); 608ad15ec06SYan, Zheng redirty_page_for_writepage(wbc, page); 609ad15ec06SYan, Zheng end_page_writeback(page); 61043986881SYan, Zheng return err; 611ad15ec06SYan, Zheng } 6120b98acd6SIlya Dryomov if (err == -EBLOCKLISTED) 6130b98acd6SIlya Dryomov fsc->blocklisted = true; 614ad15ec06SYan, Zheng dout("writepage setting page/mapping error %d %p\n", 615ad15ec06SYan, Zheng err, page); 6161d3576fdSSage Weil mapping_set_error(&inode->i_data, err); 6171d3576fdSSage Weil wbc->pages_skipped++; 6181d3576fdSSage Weil } else { 6191d3576fdSSage Weil dout("writepage cleaned page %p\n", page); 6201d3576fdSSage Weil err = 0; /* vfs expects us to return 0 */ 6211d3576fdSSage Weil } 622379fc7faSJeff Layton oldest = detach_page_private(page); 623379fc7faSJeff Layton WARN_ON_ONCE(oldest != snapc); 6241d3576fdSSage Weil end_page_writeback(page); 6251d3576fdSSage Weil ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 6266298a337SSage Weil ceph_put_snap_context(snapc); /* page's reference */ 627314c4737SYan, Zheng 628314c4737SYan, Zheng if (atomic_long_dec_return(&fsc->writeback_count) < 629314c4737SYan, Zheng CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb)) 630314c4737SYan, Zheng clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); 631314c4737SYan, Zheng 6321d3576fdSSage Weil return err; 6331d3576fdSSage Weil } 6341d3576fdSSage Weil 6351d3576fdSSage Weil static int ceph_writepage(struct page *page, struct writeback_control *wbc) 6361d3576fdSSage Weil { 637dbd646a8SYehuda Sadeh int err; 638dbd646a8SYehuda Sadeh struct inode *inode = page->mapping->host; 639dbd646a8SYehuda Sadeh BUG_ON(!inode); 64070b666c3SSage Weil ihold(inode); 641*1702e797SJeff Layton 642*1702e797SJeff Layton wait_on_page_fscache(page); 643*1702e797SJeff Layton 644dbd646a8SYehuda Sadeh err = writepage_nounlock(page, wbc); 645ad15ec06SYan, Zheng if (err == -ERESTARTSYS) { 646ad15ec06SYan, Zheng /* direct memory reclaimer was killed by SIGKILL. return 0 647ad15ec06SYan, Zheng * to prevent caller from setting mapping/page error */ 648ad15ec06SYan, Zheng err = 0; 649ad15ec06SYan, Zheng } 6501d3576fdSSage Weil unlock_page(page); 651dbd646a8SYehuda Sadeh iput(inode); 6521d3576fdSSage Weil return err; 6531d3576fdSSage Weil } 6541d3576fdSSage Weil 6551d3576fdSSage Weil /* 6561d3576fdSSage Weil * async writeback completion handler. 6571d3576fdSSage Weil * 6581d3576fdSSage Weil * If we get an error, set the mapping error bit, but not the individual 6591d3576fdSSage Weil * page error bits. 6601d3576fdSSage Weil */ 66185e084feSIlya Dryomov static void writepages_finish(struct ceph_osd_request *req) 6621d3576fdSSage Weil { 6631d3576fdSSage Weil struct inode *inode = req->r_inode; 6641d3576fdSSage Weil struct ceph_inode_info *ci = ceph_inode(inode); 66587060c10SAlex Elder struct ceph_osd_data *osd_data; 6661d3576fdSSage Weil struct page *page; 6675b64640cSYan, Zheng int num_pages, total_pages = 0; 6685b64640cSYan, Zheng int i, j; 6695b64640cSYan, Zheng int rc = req->r_result; 6701d3576fdSSage Weil struct ceph_snap_context *snapc = req->r_snapc; 6711d3576fdSSage Weil struct address_space *mapping = inode->i_mapping; 6723d14c5d2SYehuda Sadeh struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 673903f4fecSXiubo Li unsigned int len = 0; 6745b64640cSYan, Zheng bool remove_page; 6751d3576fdSSage Weil 6765b64640cSYan, Zheng dout("writepages_finish %p rc %d\n", inode, rc); 67726544c62SJeff Layton if (rc < 0) { 6781d3576fdSSage Weil mapping_set_error(mapping, rc); 67926544c62SJeff Layton ceph_set_error_write(ci); 6800b98acd6SIlya Dryomov if (rc == -EBLOCKLISTED) 6810b98acd6SIlya Dryomov fsc->blocklisted = true; 68226544c62SJeff Layton } else { 68326544c62SJeff Layton ceph_clear_error_write(ci); 68426544c62SJeff Layton } 685e63dc5c7SYehuda Sadeh 686e63dc5c7SYehuda Sadeh /* 687e63dc5c7SYehuda Sadeh * We lost the cache cap, need to truncate the page before 688e63dc5c7SYehuda Sadeh * it is unlocked, otherwise we'd truncate it later in the 689e63dc5c7SYehuda Sadeh * page truncation thread, possibly losing some data that 690e63dc5c7SYehuda Sadeh * raced its way in 691e63dc5c7SYehuda Sadeh */ 6925b64640cSYan, Zheng remove_page = !(ceph_caps_issued(ci) & 6935b64640cSYan, Zheng (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)); 6945b64640cSYan, Zheng 6955b64640cSYan, Zheng /* clean all pages */ 6965b64640cSYan, Zheng for (i = 0; i < req->r_num_ops; i++) { 6975b64640cSYan, Zheng if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) 6985b64640cSYan, Zheng break; 6995b64640cSYan, Zheng 7005b64640cSYan, Zheng osd_data = osd_req_op_extent_osd_data(req, i); 7015b64640cSYan, Zheng BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 702903f4fecSXiubo Li len += osd_data->length; 7035b64640cSYan, Zheng num_pages = calc_pages_for((u64)osd_data->alignment, 7045b64640cSYan, Zheng (u64)osd_data->length); 7055b64640cSYan, Zheng total_pages += num_pages; 7065b64640cSYan, Zheng for (j = 0; j < num_pages; j++) { 7075b64640cSYan, Zheng page = osd_data->pages[j]; 7085b64640cSYan, Zheng BUG_ON(!page); 7095b64640cSYan, Zheng WARN_ON(!PageUptodate(page)); 7105b64640cSYan, Zheng 7115b64640cSYan, Zheng if (atomic_long_dec_return(&fsc->writeback_count) < 7125b64640cSYan, Zheng CONGESTION_OFF_THRESH( 7135b64640cSYan, Zheng fsc->mount_options->congestion_kb)) 71409dc9fc2SJan Kara clear_bdi_congested(inode_to_bdi(inode), 7155b64640cSYan, Zheng BLK_RW_ASYNC); 7165b64640cSYan, Zheng 717379fc7faSJeff Layton ceph_put_snap_context(detach_page_private(page)); 7185b64640cSYan, Zheng end_page_writeback(page); 719379fc7faSJeff Layton dout("unlocking %p\n", page); 7205b64640cSYan, Zheng 7215b64640cSYan, Zheng if (remove_page) 7225b64640cSYan, Zheng generic_error_remove_page(inode->i_mapping, 7235b64640cSYan, Zheng page); 724e63dc5c7SYehuda Sadeh 7251d3576fdSSage Weil unlock_page(page); 7261d3576fdSSage Weil } 7275b64640cSYan, Zheng dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n", 7285b64640cSYan, Zheng inode, osd_data->length, rc >= 0 ? num_pages : 0); 7291d3576fdSSage Weil 73096ac9158SJohn Hubbard release_pages(osd_data->pages, num_pages); 7315b64640cSYan, Zheng } 7325b64640cSYan, Zheng 733903f4fecSXiubo Li ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 734903f4fecSXiubo Li req->r_end_latency, len, rc); 735903f4fecSXiubo Li 7365b64640cSYan, Zheng ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc); 7375b64640cSYan, Zheng 7385b64640cSYan, Zheng osd_data = osd_req_op_extent_osd_data(req, 0); 73987060c10SAlex Elder if (osd_data->pages_from_pool) 740a0102bdaSJeff Layton mempool_free(osd_data->pages, ceph_wb_pagevec_pool); 7411d3576fdSSage Weil else 74287060c10SAlex Elder kfree(osd_data->pages); 7431d3576fdSSage Weil ceph_osdc_put_request(req); 7441d3576fdSSage Weil } 7451d3576fdSSage Weil 7461d3576fdSSage Weil /* 7471d3576fdSSage Weil * initiate async writeback 7481d3576fdSSage Weil */ 7491d3576fdSSage Weil static int ceph_writepages_start(struct address_space *mapping, 7501d3576fdSSage Weil struct writeback_control *wbc) 7511d3576fdSSage Weil { 7521d3576fdSSage Weil struct inode *inode = mapping->host; 7531d3576fdSSage Weil struct ceph_inode_info *ci = ceph_inode(inode); 754fc2744aaSYan, Zheng struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 755fc2744aaSYan, Zheng struct ceph_vino vino = ceph_vino(inode); 7562a2d927eSYan, Zheng pgoff_t index, start_index, end = -1; 75780e755feSSage Weil struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc; 7581d3576fdSSage Weil struct pagevec pvec; 7591d3576fdSSage Weil int rc = 0; 76093407472SFabian Frederick unsigned int wsize = i_blocksize(inode); 7611d3576fdSSage Weil struct ceph_osd_request *req = NULL; 7621f934b00SYan, Zheng struct ceph_writeback_ctl ceph_wbc; 763590e9d98SYan, Zheng bool should_loop, range_whole = false; 764af9cc401SYan, Zheng bool done = false; 765*1702e797SJeff Layton bool caching = ceph_is_cache_enabled(inode); 7661d3576fdSSage Weil 7673fb99d48SYanhu Cao dout("writepages_start %p (mode=%s)\n", inode, 7681d3576fdSSage Weil wbc->sync_mode == WB_SYNC_NONE ? "NONE" : 7691d3576fdSSage Weil (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); 7701d3576fdSSage Weil 7715d6451b1SJeff Layton if (ceph_inode_is_shutdown(inode)) { 7726c93df5dSYan, Zheng if (ci->i_wrbuffer_ref > 0) { 7736c93df5dSYan, Zheng pr_warn_ratelimited( 7746c93df5dSYan, Zheng "writepage_start %p %lld forced umount\n", 7756c93df5dSYan, Zheng inode, ceph_ino(inode)); 7766c93df5dSYan, Zheng } 777a341d4dfSYan, Zheng mapping_set_error(mapping, -EIO); 7781d3576fdSSage Weil return -EIO; /* we're in a forced umount, don't write! */ 7791d3576fdSSage Weil } 78095cca2b4SYan, Zheng if (fsc->mount_options->wsize < wsize) 7813d14c5d2SYehuda Sadeh wsize = fsc->mount_options->wsize; 7821d3576fdSSage Weil 78386679820SMel Gorman pagevec_init(&pvec); 7841d3576fdSSage Weil 785590e9d98SYan, Zheng start_index = wbc->range_cyclic ? mapping->writeback_index : 0; 786590e9d98SYan, Zheng index = start_index; 7871d3576fdSSage Weil 7881d3576fdSSage Weil retry: 7891d3576fdSSage Weil /* find oldest snap context with dirty data */ 79005455e11SYan, Zheng snapc = get_oldest_context(inode, &ceph_wbc, NULL); 7911d3576fdSSage Weil if (!snapc) { 7921d3576fdSSage Weil /* hmm, why does writepages get called when there 7931d3576fdSSage Weil is no dirty data? */ 7941d3576fdSSage Weil dout(" no snap context with dirty data?\n"); 7951d3576fdSSage Weil goto out; 7961d3576fdSSage Weil } 7971d3576fdSSage Weil dout(" oldest snapc is %p seq %lld (%d snaps)\n", 7981d3576fdSSage Weil snapc, snapc->seq, snapc->num_snaps); 799fc2744aaSYan, Zheng 8002a2d927eSYan, Zheng should_loop = false; 8012a2d927eSYan, Zheng if (ceph_wbc.head_snapc && snapc != last_snapc) { 8022a2d927eSYan, Zheng /* where to start/end? */ 8032a2d927eSYan, Zheng if (wbc->range_cyclic) { 8042a2d927eSYan, Zheng index = start_index; 8052a2d927eSYan, Zheng end = -1; 8062a2d927eSYan, Zheng if (index > 0) 8072a2d927eSYan, Zheng should_loop = true; 8082a2d927eSYan, Zheng dout(" cyclic, start at %lu\n", index); 8092a2d927eSYan, Zheng } else { 8102a2d927eSYan, Zheng index = wbc->range_start >> PAGE_SHIFT; 8112a2d927eSYan, Zheng end = wbc->range_end >> PAGE_SHIFT; 8122a2d927eSYan, Zheng if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 8132a2d927eSYan, Zheng range_whole = true; 8142a2d927eSYan, Zheng dout(" not cyclic, %lu to %lu\n", index, end); 8151d3576fdSSage Weil } 8162a2d927eSYan, Zheng } else if (!ceph_wbc.head_snapc) { 8172a2d927eSYan, Zheng /* Do not respect wbc->range_{start,end}. Dirty pages 8182a2d927eSYan, Zheng * in that range can be associated with newer snapc. 8192a2d927eSYan, Zheng * They are not writeable until we write all dirty pages 8202a2d927eSYan, Zheng * associated with 'snapc' get written */ 8211582af2eSYan, Zheng if (index > 0) 8222a2d927eSYan, Zheng should_loop = true; 8232a2d927eSYan, Zheng dout(" non-head snapc, range whole\n"); 8242a2d927eSYan, Zheng } 8252a2d927eSYan, Zheng 8262a2d927eSYan, Zheng ceph_put_snap_context(last_snapc); 8271d3576fdSSage Weil last_snapc = snapc; 8281d3576fdSSage Weil 829af9cc401SYan, Zheng while (!done && index <= end) { 8305b64640cSYan, Zheng int num_ops = 0, op_idx; 8310e5ecac7SYan, Zheng unsigned i, pvec_pages, max_pages, locked_pages = 0; 8325b64640cSYan, Zheng struct page **pages = NULL, **data_pages; 8331d3576fdSSage Weil struct page *page; 8340e5ecac7SYan, Zheng pgoff_t strip_unit_end = 0; 8355b64640cSYan, Zheng u64 offset = 0, len = 0; 836a0102bdaSJeff Layton bool from_pool = false; 8371d3576fdSSage Weil 8380e5ecac7SYan, Zheng max_pages = wsize >> PAGE_SHIFT; 8391d3576fdSSage Weil 8401d3576fdSSage Weil get_more_pages: 8412e169296SJeff Layton pvec_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, 8422e169296SJeff Layton end, PAGECACHE_TAG_DIRTY); 8430ed75fc8SJan Kara dout("pagevec_lookup_range_tag got %d\n", pvec_pages); 8441d3576fdSSage Weil if (!pvec_pages && !locked_pages) 8451d3576fdSSage Weil break; 8461d3576fdSSage Weil for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) { 8471d3576fdSSage Weil page = pvec.pages[i]; 8481d3576fdSSage Weil dout("? %p idx %lu\n", page, page->index); 8491d3576fdSSage Weil if (locked_pages == 0) 8501d3576fdSSage Weil lock_page(page); /* first page */ 8511d3576fdSSage Weil else if (!trylock_page(page)) 8521d3576fdSSage Weil break; 8531d3576fdSSage Weil 8541d3576fdSSage Weil /* only dirty pages, or our accounting breaks */ 8551d3576fdSSage Weil if (unlikely(!PageDirty(page)) || 8561d3576fdSSage Weil unlikely(page->mapping != mapping)) { 8571d3576fdSSage Weil dout("!dirty or !mapping %p\n", page); 8581d3576fdSSage Weil unlock_page(page); 8590713e5f2SYan, Zheng continue; 8601d3576fdSSage Weil } 861af9cc401SYan, Zheng /* only if matching snap context */ 862af9cc401SYan, Zheng pgsnapc = page_snap_context(page); 863af9cc401SYan, Zheng if (pgsnapc != snapc) { 864af9cc401SYan, Zheng dout("page snapc %p %lld != oldest %p %lld\n", 865af9cc401SYan, Zheng pgsnapc, pgsnapc->seq, snapc, snapc->seq); 8661582af2eSYan, Zheng if (!should_loop && 8671582af2eSYan, Zheng !ceph_wbc.head_snapc && 8681582af2eSYan, Zheng wbc->sync_mode != WB_SYNC_NONE) 8691582af2eSYan, Zheng should_loop = true; 8701d3576fdSSage Weil unlock_page(page); 871af9cc401SYan, Zheng continue; 8721d3576fdSSage Weil } 8731f934b00SYan, Zheng if (page_offset(page) >= ceph_wbc.i_size) { 8741f934b00SYan, Zheng dout("%p page eof %llu\n", 8751f934b00SYan, Zheng page, ceph_wbc.i_size); 876c95f1c5fSErqi Chen if ((ceph_wbc.size_stable || 877c95f1c5fSErqi Chen page_offset(page) >= i_size_read(inode)) && 878c95f1c5fSErqi Chen clear_page_dirty_for_io(page)) 879af9cc401SYan, Zheng mapping->a_ops->invalidatepage(page, 8808ff2d290SJeff Layton 0, thp_size(page)); 881af9cc401SYan, Zheng unlock_page(page); 882af9cc401SYan, Zheng continue; 883af9cc401SYan, Zheng } 884af9cc401SYan, Zheng if (strip_unit_end && (page->index > strip_unit_end)) { 885af9cc401SYan, Zheng dout("end of strip unit %p\n", page); 8861d3576fdSSage Weil unlock_page(page); 8871d3576fdSSage Weil break; 8881d3576fdSSage Weil } 889*1702e797SJeff Layton if (PageWriteback(page) || PageFsCache(page)) { 8900713e5f2SYan, Zheng if (wbc->sync_mode == WB_SYNC_NONE) { 8911d3576fdSSage Weil dout("%p under writeback\n", page); 8921d3576fdSSage Weil unlock_page(page); 8930713e5f2SYan, Zheng continue; 8940713e5f2SYan, Zheng } 8950713e5f2SYan, Zheng dout("waiting on writeback %p\n", page); 8960713e5f2SYan, Zheng wait_on_page_writeback(page); 897*1702e797SJeff Layton wait_on_page_fscache(page); 8981d3576fdSSage Weil } 8991d3576fdSSage Weil 9001d3576fdSSage Weil if (!clear_page_dirty_for_io(page)) { 9011d3576fdSSage Weil dout("%p !clear_page_dirty_for_io\n", page); 9021d3576fdSSage Weil unlock_page(page); 9030713e5f2SYan, Zheng continue; 9041d3576fdSSage Weil } 9051d3576fdSSage Weil 906e5975c7cSAlex Elder /* 907e5975c7cSAlex Elder * We have something to write. If this is 908e5975c7cSAlex Elder * the first locked page this time through, 9095b64640cSYan, Zheng * calculate max possinle write size and 9105b64640cSYan, Zheng * allocate a page array 911e5975c7cSAlex Elder */ 9121d3576fdSSage Weil if (locked_pages == 0) { 9135b64640cSYan, Zheng u64 objnum; 9145b64640cSYan, Zheng u64 objoff; 915dccbf080SIlya Dryomov u32 xlen; 9165b64640cSYan, Zheng 9171d3576fdSSage Weil /* prepare async write request */ 9186285bc23SAlex Elder offset = (u64)page_offset(page); 919dccbf080SIlya Dryomov ceph_calc_file_object_mapping(&ci->i_layout, 920dccbf080SIlya Dryomov offset, wsize, 9215b64640cSYan, Zheng &objnum, &objoff, 922dccbf080SIlya Dryomov &xlen); 923dccbf080SIlya Dryomov len = xlen; 9248c71897bSHenry C Chang 9253fb99d48SYanhu Cao num_ops = 1; 9265b64640cSYan, Zheng strip_unit_end = page->index + 92709cbfeafSKirill A. Shutemov ((len - 1) >> PAGE_SHIFT); 928715e4cd4SYan, Zheng 9295b64640cSYan, Zheng BUG_ON(pages); 93088486957SAlex Elder max_pages = calc_pages_for(0, (u64)len); 9316da2ec56SKees Cook pages = kmalloc_array(max_pages, 9326da2ec56SKees Cook sizeof(*pages), 933fc2744aaSYan, Zheng GFP_NOFS); 93488486957SAlex Elder if (!pages) { 935a0102bdaSJeff Layton from_pool = true; 936a0102bdaSJeff Layton pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS); 937e5975c7cSAlex Elder BUG_ON(!pages); 93888486957SAlex Elder } 9395b64640cSYan, Zheng 9405b64640cSYan, Zheng len = 0; 9415b64640cSYan, Zheng } else if (page->index != 94209cbfeafSKirill A. Shutemov (offset + len) >> PAGE_SHIFT) { 943a0102bdaSJeff Layton if (num_ops >= (from_pool ? CEPH_OSD_SLAB_OPS : 9445b64640cSYan, Zheng CEPH_OSD_MAX_OPS)) { 9455b64640cSYan, Zheng redirty_page_for_writepage(wbc, page); 9465b64640cSYan, Zheng unlock_page(page); 9475b64640cSYan, Zheng break; 9485b64640cSYan, Zheng } 9495b64640cSYan, Zheng 9505b64640cSYan, Zheng num_ops++; 9515b64640cSYan, Zheng offset = (u64)page_offset(page); 9525b64640cSYan, Zheng len = 0; 9531d3576fdSSage Weil } 9541d3576fdSSage Weil 9551d3576fdSSage Weil /* note position of first page in pvec */ 9561d3576fdSSage Weil dout("%p will write page %p idx %lu\n", 9571d3576fdSSage Weil inode, page, page->index); 9582baba250SYehuda Sadeh 9595b64640cSYan, Zheng if (atomic_long_inc_return(&fsc->writeback_count) > 9605b64640cSYan, Zheng CONGESTION_ON_THRESH( 9613d14c5d2SYehuda Sadeh fsc->mount_options->congestion_kb)) { 96209dc9fc2SJan Kara set_bdi_congested(inode_to_bdi(inode), 963213c99eeSSage Weil BLK_RW_ASYNC); 9642baba250SYehuda Sadeh } 9652baba250SYehuda Sadeh 9660713e5f2SYan, Zheng 9670713e5f2SYan, Zheng pages[locked_pages++] = page; 9680713e5f2SYan, Zheng pvec.pages[i] = NULL; 9690713e5f2SYan, Zheng 9708ff2d290SJeff Layton len += thp_size(page); 9711d3576fdSSage Weil } 9721d3576fdSSage Weil 9731d3576fdSSage Weil /* did we get anything? */ 9741d3576fdSSage Weil if (!locked_pages) 9751d3576fdSSage Weil goto release_pvec_pages; 9761d3576fdSSage Weil if (i) { 9770713e5f2SYan, Zheng unsigned j, n = 0; 9780713e5f2SYan, Zheng /* shift unused page to beginning of pvec */ 9790713e5f2SYan, Zheng for (j = 0; j < pvec_pages; j++) { 9800713e5f2SYan, Zheng if (!pvec.pages[j]) 9810713e5f2SYan, Zheng continue; 9820713e5f2SYan, Zheng if (n < j) 9830713e5f2SYan, Zheng pvec.pages[n] = pvec.pages[j]; 9840713e5f2SYan, Zheng n++; 9850713e5f2SYan, Zheng } 9860713e5f2SYan, Zheng pvec.nr = n; 9871d3576fdSSage Weil 9881d3576fdSSage Weil if (pvec_pages && i == pvec_pages && 9891d3576fdSSage Weil locked_pages < max_pages) { 9901d3576fdSSage Weil dout("reached end pvec, trying for more\n"); 9910713e5f2SYan, Zheng pagevec_release(&pvec); 9921d3576fdSSage Weil goto get_more_pages; 9931d3576fdSSage Weil } 9941d3576fdSSage Weil } 9951d3576fdSSage Weil 9965b64640cSYan, Zheng new_request: 997e5975c7cSAlex Elder offset = page_offset(pages[0]); 9985b64640cSYan, Zheng len = wsize; 9995b64640cSYan, Zheng 10005b64640cSYan, Zheng req = ceph_osdc_new_request(&fsc->client->osdc, 10015b64640cSYan, Zheng &ci->i_layout, vino, 10025b64640cSYan, Zheng offset, &len, 0, num_ops, 10031f934b00SYan, Zheng CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, 10041f934b00SYan, Zheng snapc, ceph_wbc.truncate_seq, 10051f934b00SYan, Zheng ceph_wbc.truncate_size, false); 10065b64640cSYan, Zheng if (IS_ERR(req)) { 10075b64640cSYan, Zheng req = ceph_osdc_new_request(&fsc->client->osdc, 10085b64640cSYan, Zheng &ci->i_layout, vino, 10095b64640cSYan, Zheng offset, &len, 0, 10105b64640cSYan, Zheng min(num_ops, 10115b64640cSYan, Zheng CEPH_OSD_SLAB_OPS), 10125b64640cSYan, Zheng CEPH_OSD_OP_WRITE, 101354ea0046SIlya Dryomov CEPH_OSD_FLAG_WRITE, 10141f934b00SYan, Zheng snapc, ceph_wbc.truncate_seq, 10151f934b00SYan, Zheng ceph_wbc.truncate_size, true); 10165b64640cSYan, Zheng BUG_ON(IS_ERR(req)); 10175b64640cSYan, Zheng } 10185b64640cSYan, Zheng BUG_ON(len < page_offset(pages[locked_pages - 1]) + 10198ff2d290SJeff Layton thp_size(page) - offset); 10205b64640cSYan, Zheng 10215b64640cSYan, Zheng req->r_callback = writepages_finish; 10225b64640cSYan, Zheng req->r_inode = inode; 10235b64640cSYan, Zheng 10245b64640cSYan, Zheng /* Format the osd request message and submit the write */ 10255b64640cSYan, Zheng len = 0; 10265b64640cSYan, Zheng data_pages = pages; 10275b64640cSYan, Zheng op_idx = 0; 10285b64640cSYan, Zheng for (i = 0; i < locked_pages; i++) { 10295b64640cSYan, Zheng u64 cur_offset = page_offset(pages[i]); 1030*1702e797SJeff Layton /* 1031*1702e797SJeff Layton * Discontinuity in page range? Ceph can handle that by just passing 1032*1702e797SJeff Layton * multiple extents in the write op. 1033*1702e797SJeff Layton */ 10345b64640cSYan, Zheng if (offset + len != cur_offset) { 1035*1702e797SJeff Layton /* If it's full, stop here */ 10363fb99d48SYanhu Cao if (op_idx + 1 == req->r_num_ops) 10375b64640cSYan, Zheng break; 1038*1702e797SJeff Layton 1039*1702e797SJeff Layton /* Kick off an fscache write with what we have so far. */ 1040*1702e797SJeff Layton ceph_fscache_write_to_cache(inode, offset, len, caching); 1041*1702e797SJeff Layton 1042*1702e797SJeff Layton /* Start a new extent */ 10435b64640cSYan, Zheng osd_req_op_extent_dup_last(req, op_idx, 10445b64640cSYan, Zheng cur_offset - offset); 10455b64640cSYan, Zheng dout("writepages got pages at %llu~%llu\n", 10465b64640cSYan, Zheng offset, len); 10475b64640cSYan, Zheng osd_req_op_extent_osd_data_pages(req, op_idx, 10485b64640cSYan, Zheng data_pages, len, 0, 1049a0102bdaSJeff Layton from_pool, false); 10505b64640cSYan, Zheng osd_req_op_extent_update(req, op_idx, len); 10515b64640cSYan, Zheng 10525b64640cSYan, Zheng len = 0; 10535b64640cSYan, Zheng offset = cur_offset; 10545b64640cSYan, Zheng data_pages = pages + i; 10555b64640cSYan, Zheng op_idx++; 10565b64640cSYan, Zheng } 10575b64640cSYan, Zheng 10585b64640cSYan, Zheng set_page_writeback(pages[i]); 1059*1702e797SJeff Layton if (caching) 1060*1702e797SJeff Layton ceph_set_page_fscache(pages[i]); 10618ff2d290SJeff Layton len += thp_size(page); 10625b64640cSYan, Zheng } 1063*1702e797SJeff Layton ceph_fscache_write_to_cache(inode, offset, len, caching); 10645b64640cSYan, Zheng 10651f934b00SYan, Zheng if (ceph_wbc.size_stable) { 10661f934b00SYan, Zheng len = min(len, ceph_wbc.i_size - offset); 10675b64640cSYan, Zheng } else if (i == locked_pages) { 1068e1966b49SYan, Zheng /* writepages_finish() clears writeback pages 1069e1966b49SYan, Zheng * according to the data length, so make sure 1070e1966b49SYan, Zheng * data length covers all locked pages */ 10718ff2d290SJeff Layton u64 min_len = len + 1 - thp_size(page); 10721f934b00SYan, Zheng len = get_writepages_data_length(inode, pages[i - 1], 10731f934b00SYan, Zheng offset); 10745b64640cSYan, Zheng len = max(len, min_len); 1075e1966b49SYan, Zheng } 10765b64640cSYan, Zheng dout("writepages got pages at %llu~%llu\n", offset, len); 10771d3576fdSSage Weil 10785b64640cSYan, Zheng osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len, 1079a0102bdaSJeff Layton 0, from_pool, false); 10805b64640cSYan, Zheng osd_req_op_extent_update(req, op_idx, len); 1081e5975c7cSAlex Elder 10825b64640cSYan, Zheng BUG_ON(op_idx + 1 != req->r_num_ops); 10835b64640cSYan, Zheng 1084a0102bdaSJeff Layton from_pool = false; 10855b64640cSYan, Zheng if (i < locked_pages) { 10865b64640cSYan, Zheng BUG_ON(num_ops <= req->r_num_ops); 10875b64640cSYan, Zheng num_ops -= req->r_num_ops; 10885b64640cSYan, Zheng locked_pages -= i; 1089e5975c7cSAlex Elder 10905b64640cSYan, Zheng /* allocate new pages array for next request */ 10915b64640cSYan, Zheng data_pages = pages; 10926da2ec56SKees Cook pages = kmalloc_array(locked_pages, sizeof(*pages), 10935b64640cSYan, Zheng GFP_NOFS); 10945b64640cSYan, Zheng if (!pages) { 1095a0102bdaSJeff Layton from_pool = true; 1096a0102bdaSJeff Layton pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS); 10975b64640cSYan, Zheng BUG_ON(!pages); 10985b64640cSYan, Zheng } 10995b64640cSYan, Zheng memcpy(pages, data_pages + i, 11005b64640cSYan, Zheng locked_pages * sizeof(*pages)); 11015b64640cSYan, Zheng memset(data_pages + i, 0, 11025b64640cSYan, Zheng locked_pages * sizeof(*pages)); 11035b64640cSYan, Zheng } else { 11045b64640cSYan, Zheng BUG_ON(num_ops != req->r_num_ops); 11055b64640cSYan, Zheng index = pages[i - 1]->index + 1; 11065b64640cSYan, Zheng /* request message now owns the pages array */ 11075b64640cSYan, Zheng pages = NULL; 11085b64640cSYan, Zheng } 1109e5975c7cSAlex Elder 1110fac02ddfSArnd Bergmann req->r_mtime = inode->i_mtime; 11119d6fcb08SSage Weil rc = ceph_osdc_start_request(&fsc->client->osdc, req, true); 11129d6fcb08SSage Weil BUG_ON(rc); 11131d3576fdSSage Weil req = NULL; 11141d3576fdSSage Weil 11155b64640cSYan, Zheng wbc->nr_to_write -= i; 11165b64640cSYan, Zheng if (pages) 11175b64640cSYan, Zheng goto new_request; 11185b64640cSYan, Zheng 11192a2d927eSYan, Zheng /* 11202a2d927eSYan, Zheng * We stop writing back only if we are not doing 11212a2d927eSYan, Zheng * integrity sync. In case of integrity sync we have to 11222a2d927eSYan, Zheng * keep going until we have written all the pages 11232a2d927eSYan, Zheng * we tagged for writeback prior to entering this loop. 11242a2d927eSYan, Zheng */ 11252a2d927eSYan, Zheng if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) 1126af9cc401SYan, Zheng done = true; 11271d3576fdSSage Weil 11281d3576fdSSage Weil release_pvec_pages: 11291d3576fdSSage Weil dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr, 11301d3576fdSSage Weil pvec.nr ? pvec.pages[0] : NULL); 11311d3576fdSSage Weil pagevec_release(&pvec); 11321d3576fdSSage Weil } 11331d3576fdSSage Weil 11341d3576fdSSage Weil if (should_loop && !done) { 11351d3576fdSSage Weil /* more to do; loop back to beginning of file */ 11361d3576fdSSage Weil dout("writepages looping back to beginning of file\n"); 11372a2d927eSYan, Zheng end = start_index - 1; /* OK even when start_index == 0 */ 1138f275635eSYan, Zheng 1139f275635eSYan, Zheng /* to write dirty pages associated with next snapc, 1140f275635eSYan, Zheng * we need to wait until current writes complete */ 1141f275635eSYan, Zheng if (wbc->sync_mode != WB_SYNC_NONE && 1142f275635eSYan, Zheng start_index == 0 && /* all dirty pages were checked */ 1143f275635eSYan, Zheng !ceph_wbc.head_snapc) { 1144f275635eSYan, Zheng struct page *page; 1145f275635eSYan, Zheng unsigned i, nr; 1146f275635eSYan, Zheng index = 0; 1147f275635eSYan, Zheng while ((index <= end) && 1148f275635eSYan, Zheng (nr = pagevec_lookup_tag(&pvec, mapping, &index, 114967fd707fSJan Kara PAGECACHE_TAG_WRITEBACK))) { 1150f275635eSYan, Zheng for (i = 0; i < nr; i++) { 1151f275635eSYan, Zheng page = pvec.pages[i]; 1152f275635eSYan, Zheng if (page_snap_context(page) != snapc) 1153f275635eSYan, Zheng continue; 1154f275635eSYan, Zheng wait_on_page_writeback(page); 1155f275635eSYan, Zheng } 1156f275635eSYan, Zheng pagevec_release(&pvec); 1157f275635eSYan, Zheng cond_resched(); 1158f275635eSYan, Zheng } 1159f275635eSYan, Zheng } 1160f275635eSYan, Zheng 11612a2d927eSYan, Zheng start_index = 0; 11621d3576fdSSage Weil index = 0; 11631d3576fdSSage Weil goto retry; 11641d3576fdSSage Weil } 11651d3576fdSSage Weil 11661d3576fdSSage Weil if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 11671d3576fdSSage Weil mapping->writeback_index = index; 11681d3576fdSSage Weil 11691d3576fdSSage Weil out: 11701d3576fdSSage Weil ceph_osdc_put_request(req); 11712a2d927eSYan, Zheng ceph_put_snap_context(last_snapc); 11722a2d927eSYan, Zheng dout("writepages dend - startone, rc = %d\n", rc); 11731d3576fdSSage Weil return rc; 11741d3576fdSSage Weil } 11751d3576fdSSage Weil 11761d3576fdSSage Weil 11771d3576fdSSage Weil 11781d3576fdSSage Weil /* 11791d3576fdSSage Weil * See if a given @snapc is either writeable, or already written. 11801d3576fdSSage Weil */ 11811d3576fdSSage Weil static int context_is_writeable_or_written(struct inode *inode, 11821d3576fdSSage Weil struct ceph_snap_context *snapc) 11831d3576fdSSage Weil { 118405455e11SYan, Zheng struct ceph_snap_context *oldest = get_oldest_context(inode, NULL, NULL); 11856298a337SSage Weil int ret = !oldest || snapc->seq <= oldest->seq; 11866298a337SSage Weil 11876298a337SSage Weil ceph_put_snap_context(oldest); 11886298a337SSage Weil return ret; 11891d3576fdSSage Weil } 11901d3576fdSSage Weil 119118d620f0SJeff Layton /** 119218d620f0SJeff Layton * ceph_find_incompatible - find an incompatible context and return it 119318d620f0SJeff Layton * @page: page being dirtied 119418d620f0SJeff Layton * 119518d620f0SJeff Layton * We are only allowed to write into/dirty a page if the page is 119618d620f0SJeff Layton * clean, or already dirty within the same snap context. Returns a 119718d620f0SJeff Layton * conflicting context if there is one, NULL if there isn't, or a 119818d620f0SJeff Layton * negative error code on other errors. 119918d620f0SJeff Layton * 120018d620f0SJeff Layton * Must be called with page lock held. 120118d620f0SJeff Layton */ 120218d620f0SJeff Layton static struct ceph_snap_context * 1203d45156bfSJeff Layton ceph_find_incompatible(struct page *page) 120418d620f0SJeff Layton { 1205d45156bfSJeff Layton struct inode *inode = page->mapping->host; 120618d620f0SJeff Layton struct ceph_inode_info *ci = ceph_inode(inode); 120718d620f0SJeff Layton 12085d6451b1SJeff Layton if (ceph_inode_is_shutdown(inode)) { 12095d6451b1SJeff Layton dout(" page %p %llx:%llx is shutdown\n", page, 12105d6451b1SJeff Layton ceph_vinop(inode)); 12115d6451b1SJeff Layton return ERR_PTR(-ESTALE); 121218d620f0SJeff Layton } 121318d620f0SJeff Layton 121418d620f0SJeff Layton for (;;) { 121518d620f0SJeff Layton struct ceph_snap_context *snapc, *oldest; 121618d620f0SJeff Layton 121718d620f0SJeff Layton wait_on_page_writeback(page); 121818d620f0SJeff Layton 121918d620f0SJeff Layton snapc = page_snap_context(page); 122018d620f0SJeff Layton if (!snapc || snapc == ci->i_head_snapc) 122118d620f0SJeff Layton break; 122218d620f0SJeff Layton 122318d620f0SJeff Layton /* 122418d620f0SJeff Layton * this page is already dirty in another (older) snap 122518d620f0SJeff Layton * context! is it writeable now? 122618d620f0SJeff Layton */ 122718d620f0SJeff Layton oldest = get_oldest_context(inode, NULL, NULL); 122818d620f0SJeff Layton if (snapc->seq > oldest->seq) { 122918d620f0SJeff Layton /* not writeable -- return it for the caller to deal with */ 123018d620f0SJeff Layton ceph_put_snap_context(oldest); 123118d620f0SJeff Layton dout(" page %p snapc %p not current or oldest\n", page, snapc); 123218d620f0SJeff Layton return ceph_get_snap_context(snapc); 123318d620f0SJeff Layton } 123418d620f0SJeff Layton ceph_put_snap_context(oldest); 123518d620f0SJeff Layton 123618d620f0SJeff Layton /* yay, writeable, do it now (without dropping page lock) */ 123718d620f0SJeff Layton dout(" page %p snapc %p not current, but oldest\n", page, snapc); 123818d620f0SJeff Layton if (clear_page_dirty_for_io(page)) { 123918d620f0SJeff Layton int r = writepage_nounlock(page, NULL); 124018d620f0SJeff Layton if (r < 0) 124118d620f0SJeff Layton return ERR_PTR(r); 124218d620f0SJeff Layton } 124318d620f0SJeff Layton } 124418d620f0SJeff Layton return NULL; 124518d620f0SJeff Layton } 124618d620f0SJeff Layton 1247d801327dSJeff Layton static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len, 124878525c74SDavid Howells struct folio *folio, void **_fsdata) 1249d801327dSJeff Layton { 1250d801327dSJeff Layton struct inode *inode = file_inode(file); 1251d801327dSJeff Layton struct ceph_inode_info *ci = ceph_inode(inode); 1252d801327dSJeff Layton struct ceph_snap_context *snapc; 1253d801327dSJeff Layton 125478525c74SDavid Howells snapc = ceph_find_incompatible(folio_page(folio, 0)); 1255d801327dSJeff Layton if (snapc) { 1256d801327dSJeff Layton int r; 1257d801327dSJeff Layton 125878525c74SDavid Howells folio_unlock(folio); 125978525c74SDavid Howells folio_put(folio); 1260d801327dSJeff Layton if (IS_ERR(snapc)) 1261d801327dSJeff Layton return PTR_ERR(snapc); 1262d801327dSJeff Layton 1263d801327dSJeff Layton ceph_queue_writeback(inode); 1264d801327dSJeff Layton r = wait_event_killable(ci->i_cap_wq, 1265d801327dSJeff Layton context_is_writeable_or_written(inode, snapc)); 1266d801327dSJeff Layton ceph_put_snap_context(snapc); 1267d801327dSJeff Layton return r == 0 ? -EAGAIN : r; 1268d801327dSJeff Layton } 1269d801327dSJeff Layton return 0; 1270d801327dSJeff Layton } 1271d801327dSJeff Layton 12721d3576fdSSage Weil /* 12731d3576fdSSage Weil * We are only allowed to write into/dirty the page if the page is 12741d3576fdSSage Weil * clean, or already dirty within the same snap context. 12754af6b225SYehuda Sadeh */ 12764af6b225SYehuda Sadeh static int ceph_write_begin(struct file *file, struct address_space *mapping, 127778525c74SDavid Howells loff_t pos, unsigned len, unsigned aop_flags, 12784af6b225SYehuda Sadeh struct page **pagep, void **fsdata) 12794af6b225SYehuda Sadeh { 1280496ad9aaSAl Viro struct inode *inode = file_inode(file); 12811cc16990SJeff Layton struct ceph_inode_info *ci = ceph_inode(inode); 128278525c74SDavid Howells struct folio *folio = NULL; 128309cbfeafSKirill A. Shutemov pgoff_t index = pos >> PAGE_SHIFT; 1284d801327dSJeff Layton int r; 12854af6b225SYehuda Sadeh 1286d801327dSJeff Layton /* 1287d801327dSJeff Layton * Uninlining should have already been done and everything updated, EXCEPT 1288d801327dSJeff Layton * for inline_version sent to the MDS. 1289d801327dSJeff Layton */ 1290d801327dSJeff Layton if (ci->i_inline_version != CEPH_INLINE_NONE) { 129178525c74SDavid Howells unsigned int fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE; 129278525c74SDavid Howells if (aop_flags & AOP_FLAG_NOFS) 129378525c74SDavid Howells fgp_flags |= FGP_NOFS; 129478525c74SDavid Howells folio = __filemap_get_folio(mapping, index, fgp_flags, 129578525c74SDavid Howells mapping_gfp_mask(mapping)); 129678525c74SDavid Howells if (!folio) 1297d801327dSJeff Layton return -ENOMEM; 12981cc16990SJeff Layton 12991cc16990SJeff Layton /* 1300d801327dSJeff Layton * The inline_version on a new inode is set to 1. If that's the 130178525c74SDavid Howells * case, then the folio is brand new and isn't yet Uptodate. 13021cc16990SJeff Layton */ 1303d801327dSJeff Layton r = 0; 1304d801327dSJeff Layton if (index == 0 && ci->i_inline_version != 1) { 130578525c74SDavid Howells if (!folio_test_uptodate(folio)) { 1306d801327dSJeff Layton WARN_ONCE(1, "ceph: write_begin called on still-inlined inode (inline_version %llu)!\n", 1307d801327dSJeff Layton ci->i_inline_version); 1308d801327dSJeff Layton r = -EINVAL; 1309d801327dSJeff Layton } 1310d801327dSJeff Layton goto out; 1311d801327dSJeff Layton } 131278525c74SDavid Howells zero_user_segment(&folio->page, 0, folio_size(folio)); 131378525c74SDavid Howells folio_mark_uptodate(folio); 1314d801327dSJeff Layton goto out; 13151cc16990SJeff Layton } 13161cc16990SJeff Layton 131778525c74SDavid Howells r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &folio, NULL, 1318d801327dSJeff Layton &ceph_netfs_read_ops, NULL); 1319d801327dSJeff Layton out: 1320d801327dSJeff Layton if (r == 0) 132178525c74SDavid Howells folio_wait_fscache(folio); 13221cc16990SJeff Layton if (r < 0) { 132378525c74SDavid Howells if (folio) 132478525c74SDavid Howells folio_put(folio); 13251cc16990SJeff Layton } else { 132678525c74SDavid Howells WARN_ON_ONCE(!folio_test_locked(folio)); 132778525c74SDavid Howells *pagep = &folio->page; 13281cc16990SJeff Layton } 13294af6b225SYehuda Sadeh return r; 13304af6b225SYehuda Sadeh } 13314af6b225SYehuda Sadeh 13324af6b225SYehuda Sadeh /* 13331d3576fdSSage Weil * we don't do anything in here that simple_write_end doesn't do 13345dda377cSYan, Zheng * except adjust dirty page accounting 13351d3576fdSSage Weil */ 13361d3576fdSSage Weil static int ceph_write_end(struct file *file, struct address_space *mapping, 13371d3576fdSSage Weil loff_t pos, unsigned len, unsigned copied, 133878525c74SDavid Howells struct page *subpage, void *fsdata) 13391d3576fdSSage Weil { 134078525c74SDavid Howells struct folio *folio = page_folio(subpage); 1341496ad9aaSAl Viro struct inode *inode = file_inode(file); 1342efb0ca76SYan, Zheng bool check_cap = false; 13431d3576fdSSage Weil 134478525c74SDavid Howells dout("write_end file %p inode %p folio %p %d~%d (%d)\n", file, 134578525c74SDavid Howells inode, folio, (int)pos, (int)copied, (int)len); 13461d3576fdSSage Weil 134778525c74SDavid Howells if (!folio_test_uptodate(folio)) { 1348ce3a8732SJeff Layton /* just return that nothing was copied on a short copy */ 1349b9de313cSAl Viro if (copied < len) { 1350b9de313cSAl Viro copied = 0; 1351b9de313cSAl Viro goto out; 1352b9de313cSAl Viro } 135378525c74SDavid Howells folio_mark_uptodate(folio); 1354b9de313cSAl Viro } 13551d3576fdSSage Weil 13561d3576fdSSage Weil /* did file size increase? */ 135799c88e69SYan, Zheng if (pos+copied > i_size_read(inode)) 13581d3576fdSSage Weil check_cap = ceph_inode_set_size(inode, pos+copied); 13591d3576fdSSage Weil 136078525c74SDavid Howells folio_mark_dirty(folio); 13611d3576fdSSage Weil 1362b9de313cSAl Viro out: 136378525c74SDavid Howells folio_unlock(folio); 136478525c74SDavid Howells folio_put(folio); 13651d3576fdSSage Weil 13661d3576fdSSage Weil if (check_cap) 13671d3576fdSSage Weil ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); 13681d3576fdSSage Weil 13691d3576fdSSage Weil return copied; 13701d3576fdSSage Weil } 13711d3576fdSSage Weil 13721d3576fdSSage Weil const struct address_space_operations ceph_aops = { 13731d3576fdSSage Weil .readpage = ceph_readpage, 137449870056SJeff Layton .readahead = ceph_readahead, 13751d3576fdSSage Weil .writepage = ceph_writepage, 13761d3576fdSSage Weil .writepages = ceph_writepages_start, 13771d3576fdSSage Weil .write_begin = ceph_write_begin, 13781d3576fdSSage Weil .write_end = ceph_write_end, 13791d3576fdSSage Weil .set_page_dirty = ceph_set_page_dirty, 13801d3576fdSSage Weil .invalidatepage = ceph_invalidatepage, 13811d3576fdSSage Weil .releasepage = ceph_releasepage, 13829c43ff44SJeff Layton .direct_IO = noop_direct_IO, 13831d3576fdSSage Weil }; 13841d3576fdSSage Weil 13854f7e89f6SYan, Zheng static void ceph_block_sigs(sigset_t *oldset) 13864f7e89f6SYan, Zheng { 13874f7e89f6SYan, Zheng sigset_t mask; 13884f7e89f6SYan, Zheng siginitsetinv(&mask, sigmask(SIGKILL)); 13894f7e89f6SYan, Zheng sigprocmask(SIG_BLOCK, &mask, oldset); 13904f7e89f6SYan, Zheng } 13914f7e89f6SYan, Zheng 13924f7e89f6SYan, Zheng static void ceph_restore_sigs(sigset_t *oldset) 13934f7e89f6SYan, Zheng { 13944f7e89f6SYan, Zheng sigprocmask(SIG_SETMASK, oldset, NULL); 13954f7e89f6SYan, Zheng } 13961d3576fdSSage Weil 13971d3576fdSSage Weil /* 13981d3576fdSSage Weil * vm ops 13991d3576fdSSage Weil */ 140024499847SSouptick Joarder static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf) 140161f68816SYan, Zheng { 140211bac800SDave Jiang struct vm_area_struct *vma = vmf->vma; 140361f68816SYan, Zheng struct inode *inode = file_inode(vma->vm_file); 140461f68816SYan, Zheng struct ceph_inode_info *ci = ceph_inode(inode); 140561f68816SYan, Zheng struct ceph_file_info *fi = vma->vm_file->private_data; 1406c403c3a2SMatthew Wilcox (Oracle) loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT; 140724499847SSouptick Joarder int want, got, err; 14084f7e89f6SYan, Zheng sigset_t oldset; 140924499847SSouptick Joarder vm_fault_t ret = VM_FAULT_SIGBUS; 14104f7e89f6SYan, Zheng 14115d6451b1SJeff Layton if (ceph_inode_is_shutdown(inode)) 14125d6451b1SJeff Layton return ret; 14135d6451b1SJeff Layton 14144f7e89f6SYan, Zheng ceph_block_sigs(&oldset); 141561f68816SYan, Zheng 14168ff2d290SJeff Layton dout("filemap_fault %p %llx.%llx %llu trying to get caps\n", 14178ff2d290SJeff Layton inode, ceph_vinop(inode), off); 141861f68816SYan, Zheng if (fi->fmode & CEPH_FILE_MODE_LAZY) 141961f68816SYan, Zheng want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 142061f68816SYan, Zheng else 142161f68816SYan, Zheng want = CEPH_CAP_FILE_CACHE; 14224f7e89f6SYan, Zheng 142361f68816SYan, Zheng got = 0; 1424e72968e1SJeff Layton err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_RD, want, -1, &got); 142524499847SSouptick Joarder if (err < 0) 14264f7e89f6SYan, Zheng goto out_restore; 14276ce026e4SYan, Zheng 14288ff2d290SJeff Layton dout("filemap_fault %p %llu got cap refs on %s\n", 14298ff2d290SJeff Layton inode, off, ceph_cap_string(got)); 143061f68816SYan, Zheng 143183701246SYan, Zheng if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) || 14322b1ac852SYan, Zheng ci->i_inline_version == CEPH_INLINE_NONE) { 14335d988308SYan, Zheng CEPH_DEFINE_RW_CONTEXT(rw_ctx, got); 14345d988308SYan, Zheng ceph_add_rw_context(fi, &rw_ctx); 143511bac800SDave Jiang ret = filemap_fault(vmf); 14365d988308SYan, Zheng ceph_del_rw_context(fi, &rw_ctx); 14378ff2d290SJeff Layton dout("filemap_fault %p %llu drop cap refs %s ret %x\n", 14388ff2d290SJeff Layton inode, off, ceph_cap_string(got), ret); 14392b1ac852SYan, Zheng } else 144024499847SSouptick Joarder err = -EAGAIN; 144161f68816SYan, Zheng 144261f68816SYan, Zheng ceph_put_cap_refs(ci, got); 144361f68816SYan, Zheng 144424499847SSouptick Joarder if (err != -EAGAIN) 14454f7e89f6SYan, Zheng goto out_restore; 144683701246SYan, Zheng 144783701246SYan, Zheng /* read inline data */ 144809cbfeafSKirill A. Shutemov if (off >= PAGE_SIZE) { 144983701246SYan, Zheng /* does not support inline data > PAGE_SIZE */ 145083701246SYan, Zheng ret = VM_FAULT_SIGBUS; 145183701246SYan, Zheng } else { 145283701246SYan, Zheng struct address_space *mapping = inode->i_mapping; 1453057ba5b2SJan Kara struct page *page; 1454057ba5b2SJan Kara 1455057ba5b2SJan Kara filemap_invalidate_lock_shared(mapping); 1456057ba5b2SJan Kara page = find_or_create_page(mapping, 0, 1457057ba5b2SJan Kara mapping_gfp_constraint(mapping, ~__GFP_FS)); 145883701246SYan, Zheng if (!page) { 145983701246SYan, Zheng ret = VM_FAULT_OOM; 14604f7e89f6SYan, Zheng goto out_inline; 146183701246SYan, Zheng } 146224499847SSouptick Joarder err = __ceph_do_getattr(inode, page, 146383701246SYan, Zheng CEPH_STAT_CAP_INLINE_DATA, true); 146424499847SSouptick Joarder if (err < 0 || off >= i_size_read(inode)) { 146583701246SYan, Zheng unlock_page(page); 146609cbfeafSKirill A. Shutemov put_page(page); 1467c64a2b05SSouptick Joarder ret = vmf_error(err); 14684f7e89f6SYan, Zheng goto out_inline; 146983701246SYan, Zheng } 147024499847SSouptick Joarder if (err < PAGE_SIZE) 147124499847SSouptick Joarder zero_user_segment(page, err, PAGE_SIZE); 147283701246SYan, Zheng else 147383701246SYan, Zheng flush_dcache_page(page); 147483701246SYan, Zheng SetPageUptodate(page); 147583701246SYan, Zheng vmf->page = page; 147683701246SYan, Zheng ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED; 14774f7e89f6SYan, Zheng out_inline: 1478057ba5b2SJan Kara filemap_invalidate_unlock_shared(mapping); 14798ff2d290SJeff Layton dout("filemap_fault %p %llu read inline data ret %x\n", 14808ff2d290SJeff Layton inode, off, ret); 14814f7e89f6SYan, Zheng } 14824f7e89f6SYan, Zheng out_restore: 14834f7e89f6SYan, Zheng ceph_restore_sigs(&oldset); 148424499847SSouptick Joarder if (err < 0) 148524499847SSouptick Joarder ret = vmf_error(err); 14866ce026e4SYan, Zheng 148761f68816SYan, Zheng return ret; 148861f68816SYan, Zheng } 14891d3576fdSSage Weil 149024499847SSouptick Joarder static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf) 14911d3576fdSSage Weil { 149211bac800SDave Jiang struct vm_area_struct *vma = vmf->vma; 1493496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 149461f68816SYan, Zheng struct ceph_inode_info *ci = ceph_inode(inode); 149561f68816SYan, Zheng struct ceph_file_info *fi = vma->vm_file->private_data; 1496f66fd9f0SYan, Zheng struct ceph_cap_flush *prealloc_cf; 149761f68816SYan, Zheng struct page *page = vmf->page; 14986285bc23SAlex Elder loff_t off = page_offset(page); 149961f68816SYan, Zheng loff_t size = i_size_read(inode); 150061f68816SYan, Zheng size_t len; 150124499847SSouptick Joarder int want, got, err; 15024f7e89f6SYan, Zheng sigset_t oldset; 150324499847SSouptick Joarder vm_fault_t ret = VM_FAULT_SIGBUS; 15041d3576fdSSage Weil 15055d6451b1SJeff Layton if (ceph_inode_is_shutdown(inode)) 15065d6451b1SJeff Layton return ret; 15075d6451b1SJeff Layton 1508f66fd9f0SYan, Zheng prealloc_cf = ceph_alloc_cap_flush(); 1509f66fd9f0SYan, Zheng if (!prealloc_cf) 15106ce026e4SYan, Zheng return VM_FAULT_OOM; 1511f66fd9f0SYan, Zheng 1512249c1df5SJeff Layton sb_start_pagefault(inode->i_sb); 15134f7e89f6SYan, Zheng ceph_block_sigs(&oldset); 15141d3576fdSSage Weil 151528127bddSYan, Zheng if (ci->i_inline_version != CEPH_INLINE_NONE) { 151628127bddSYan, Zheng struct page *locked_page = NULL; 151728127bddSYan, Zheng if (off == 0) { 151828127bddSYan, Zheng lock_page(page); 151928127bddSYan, Zheng locked_page = page; 152028127bddSYan, Zheng } 152124499847SSouptick Joarder err = ceph_uninline_data(vma->vm_file, locked_page); 152228127bddSYan, Zheng if (locked_page) 152328127bddSYan, Zheng unlock_page(locked_page); 152424499847SSouptick Joarder if (err < 0) 1525f66fd9f0SYan, Zheng goto out_free; 1526f66fd9f0SYan, Zheng } 152728127bddSYan, Zheng 15288ff2d290SJeff Layton if (off + thp_size(page) <= size) 15298ff2d290SJeff Layton len = thp_size(page); 15301d3576fdSSage Weil else 15318ff2d290SJeff Layton len = offset_in_thp(page, size); 15321d3576fdSSage Weil 153361f68816SYan, Zheng dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n", 153461f68816SYan, Zheng inode, ceph_vinop(inode), off, len, size); 153561f68816SYan, Zheng if (fi->fmode & CEPH_FILE_MODE_LAZY) 153661f68816SYan, Zheng want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 153761f68816SYan, Zheng else 153861f68816SYan, Zheng want = CEPH_CAP_FILE_BUFFER; 15394f7e89f6SYan, Zheng 154061f68816SYan, Zheng got = 0; 1541e72968e1SJeff Layton err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_WR, want, off + len, &got); 154224499847SSouptick Joarder if (err < 0) 1543f66fd9f0SYan, Zheng goto out_free; 15446ce026e4SYan, Zheng 154561f68816SYan, Zheng dout("page_mkwrite %p %llu~%zd got cap refs on %s\n", 154661f68816SYan, Zheng inode, off, len, ceph_cap_string(got)); 154761f68816SYan, Zheng 154861f68816SYan, Zheng /* Update time before taking page lock */ 154961f68816SYan, Zheng file_update_time(vma->vm_file); 15505c308356SJeff Layton inode_inc_iversion_raw(inode); 15514af6b225SYehuda Sadeh 1552f0b33df5SYan, Zheng do { 1553d45156bfSJeff Layton struct ceph_snap_context *snapc; 1554d45156bfSJeff Layton 15554af6b225SYehuda Sadeh lock_page(page); 15564af6b225SYehuda Sadeh 1557cb03c143SAndreas Gruenbacher if (page_mkwrite_check_truncate(page, inode) < 0) { 1558f9cac5acSYan, Zheng unlock_page(page); 15596ce026e4SYan, Zheng ret = VM_FAULT_NOPAGE; 1560f0b33df5SYan, Zheng break; 1561f9cac5acSYan, Zheng } 15624af6b225SYehuda Sadeh 1563d45156bfSJeff Layton snapc = ceph_find_incompatible(page); 1564d45156bfSJeff Layton if (!snapc) { 15654af6b225SYehuda Sadeh /* success. we'll keep the page locked. */ 15661d3576fdSSage Weil set_page_dirty(page); 15671d3576fdSSage Weil ret = VM_FAULT_LOCKED; 1568d45156bfSJeff Layton break; 15691d3576fdSSage Weil } 1570d45156bfSJeff Layton 1571d45156bfSJeff Layton unlock_page(page); 1572d45156bfSJeff Layton 1573d45156bfSJeff Layton if (IS_ERR(snapc)) { 1574d45156bfSJeff Layton ret = VM_FAULT_SIGBUS; 1575d45156bfSJeff Layton break; 1576d45156bfSJeff Layton } 1577d45156bfSJeff Layton 1578d45156bfSJeff Layton ceph_queue_writeback(inode); 1579d45156bfSJeff Layton err = wait_event_killable(ci->i_cap_wq, 1580d45156bfSJeff Layton context_is_writeable_or_written(inode, snapc)); 1581d45156bfSJeff Layton ceph_put_snap_context(snapc); 1582d45156bfSJeff Layton } while (err == 0); 1583f0b33df5SYan, Zheng 158428127bddSYan, Zheng if (ret == VM_FAULT_LOCKED || 158528127bddSYan, Zheng ci->i_inline_version != CEPH_INLINE_NONE) { 158661f68816SYan, Zheng int dirty; 158761f68816SYan, Zheng spin_lock(&ci->i_ceph_lock); 158828127bddSYan, Zheng ci->i_inline_version = CEPH_INLINE_NONE; 1589f66fd9f0SYan, Zheng dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1590f66fd9f0SYan, Zheng &prealloc_cf); 159161f68816SYan, Zheng spin_unlock(&ci->i_ceph_lock); 159261f68816SYan, Zheng if (dirty) 159361f68816SYan, Zheng __mark_inode_dirty(inode, dirty); 159461f68816SYan, Zheng } 159561f68816SYan, Zheng 159624499847SSouptick Joarder dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %x\n", 159761f68816SYan, Zheng inode, off, len, ceph_cap_string(got), ret); 1598a8810cdcSJeff Layton ceph_put_cap_refs_async(ci, got); 1599f66fd9f0SYan, Zheng out_free: 16004f7e89f6SYan, Zheng ceph_restore_sigs(&oldset); 1601249c1df5SJeff Layton sb_end_pagefault(inode->i_sb); 1602f66fd9f0SYan, Zheng ceph_free_cap_flush(prealloc_cf); 160324499847SSouptick Joarder if (err < 0) 160424499847SSouptick Joarder ret = vmf_error(err); 16051d3576fdSSage Weil return ret; 16061d3576fdSSage Weil } 16071d3576fdSSage Weil 160831c542a1SYan, Zheng void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, 160931c542a1SYan, Zheng char *data, size_t len) 161031c542a1SYan, Zheng { 161131c542a1SYan, Zheng struct address_space *mapping = inode->i_mapping; 161231c542a1SYan, Zheng struct page *page; 161331c542a1SYan, Zheng 161431c542a1SYan, Zheng if (locked_page) { 161531c542a1SYan, Zheng page = locked_page; 161631c542a1SYan, Zheng } else { 161731c542a1SYan, Zheng if (i_size_read(inode) == 0) 161831c542a1SYan, Zheng return; 161931c542a1SYan, Zheng page = find_or_create_page(mapping, 0, 1620c62d2555SMichal Hocko mapping_gfp_constraint(mapping, 1621c62d2555SMichal Hocko ~__GFP_FS)); 162231c542a1SYan, Zheng if (!page) 162331c542a1SYan, Zheng return; 162431c542a1SYan, Zheng if (PageUptodate(page)) { 162531c542a1SYan, Zheng unlock_page(page); 162609cbfeafSKirill A. Shutemov put_page(page); 162731c542a1SYan, Zheng return; 162831c542a1SYan, Zheng } 162931c542a1SYan, Zheng } 163031c542a1SYan, Zheng 16310668ff52SIlya Dryomov dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n", 163231c542a1SYan, Zheng inode, ceph_vinop(inode), len, locked_page); 163331c542a1SYan, Zheng 163431c542a1SYan, Zheng if (len > 0) { 163531c542a1SYan, Zheng void *kaddr = kmap_atomic(page); 163631c542a1SYan, Zheng memcpy(kaddr, data, len); 163731c542a1SYan, Zheng kunmap_atomic(kaddr); 163831c542a1SYan, Zheng } 163931c542a1SYan, Zheng 164031c542a1SYan, Zheng if (page != locked_page) { 164109cbfeafSKirill A. Shutemov if (len < PAGE_SIZE) 164209cbfeafSKirill A. Shutemov zero_user_segment(page, len, PAGE_SIZE); 164331c542a1SYan, Zheng else 164431c542a1SYan, Zheng flush_dcache_page(page); 164531c542a1SYan, Zheng 164631c542a1SYan, Zheng SetPageUptodate(page); 164731c542a1SYan, Zheng unlock_page(page); 164809cbfeafSKirill A. Shutemov put_page(page); 164931c542a1SYan, Zheng } 165031c542a1SYan, Zheng } 165131c542a1SYan, Zheng 165228127bddSYan, Zheng int ceph_uninline_data(struct file *filp, struct page *locked_page) 165328127bddSYan, Zheng { 165428127bddSYan, Zheng struct inode *inode = file_inode(filp); 165528127bddSYan, Zheng struct ceph_inode_info *ci = ceph_inode(inode); 165628127bddSYan, Zheng struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 165728127bddSYan, Zheng struct ceph_osd_request *req; 165828127bddSYan, Zheng struct page *page = NULL; 165928127bddSYan, Zheng u64 len, inline_version; 166028127bddSYan, Zheng int err = 0; 166128127bddSYan, Zheng bool from_pagecache = false; 166228127bddSYan, Zheng 166328127bddSYan, Zheng spin_lock(&ci->i_ceph_lock); 166428127bddSYan, Zheng inline_version = ci->i_inline_version; 166528127bddSYan, Zheng spin_unlock(&ci->i_ceph_lock); 166628127bddSYan, Zheng 166728127bddSYan, Zheng dout("uninline_data %p %llx.%llx inline_version %llu\n", 166828127bddSYan, Zheng inode, ceph_vinop(inode), inline_version); 166928127bddSYan, Zheng 167028127bddSYan, Zheng if (inline_version == 1 || /* initial version, no data */ 167128127bddSYan, Zheng inline_version == CEPH_INLINE_NONE) 167228127bddSYan, Zheng goto out; 167328127bddSYan, Zheng 167428127bddSYan, Zheng if (locked_page) { 167528127bddSYan, Zheng page = locked_page; 167628127bddSYan, Zheng WARN_ON(!PageUptodate(page)); 167728127bddSYan, Zheng } else if (ceph_caps_issued(ci) & 167828127bddSYan, Zheng (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) { 167928127bddSYan, Zheng page = find_get_page(inode->i_mapping, 0); 168028127bddSYan, Zheng if (page) { 168128127bddSYan, Zheng if (PageUptodate(page)) { 168228127bddSYan, Zheng from_pagecache = true; 168328127bddSYan, Zheng lock_page(page); 168428127bddSYan, Zheng } else { 168509cbfeafSKirill A. Shutemov put_page(page); 168628127bddSYan, Zheng page = NULL; 168728127bddSYan, Zheng } 168828127bddSYan, Zheng } 168928127bddSYan, Zheng } 169028127bddSYan, Zheng 169128127bddSYan, Zheng if (page) { 169228127bddSYan, Zheng len = i_size_read(inode); 169309cbfeafSKirill A. Shutemov if (len > PAGE_SIZE) 169409cbfeafSKirill A. Shutemov len = PAGE_SIZE; 169528127bddSYan, Zheng } else { 169628127bddSYan, Zheng page = __page_cache_alloc(GFP_NOFS); 169728127bddSYan, Zheng if (!page) { 169828127bddSYan, Zheng err = -ENOMEM; 169928127bddSYan, Zheng goto out; 170028127bddSYan, Zheng } 170128127bddSYan, Zheng err = __ceph_do_getattr(inode, page, 170228127bddSYan, Zheng CEPH_STAT_CAP_INLINE_DATA, true); 170328127bddSYan, Zheng if (err < 0) { 170428127bddSYan, Zheng /* no inline data */ 170528127bddSYan, Zheng if (err == -ENODATA) 170628127bddSYan, Zheng err = 0; 170728127bddSYan, Zheng goto out; 170828127bddSYan, Zheng } 170928127bddSYan, Zheng len = err; 171028127bddSYan, Zheng } 171128127bddSYan, Zheng 171228127bddSYan, Zheng req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 171328127bddSYan, Zheng ceph_vino(inode), 0, &len, 0, 1, 171454ea0046SIlya Dryomov CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_WRITE, 171534b759b4SIlya Dryomov NULL, 0, 0, false); 171628127bddSYan, Zheng if (IS_ERR(req)) { 171728127bddSYan, Zheng err = PTR_ERR(req); 171828127bddSYan, Zheng goto out; 171928127bddSYan, Zheng } 172028127bddSYan, Zheng 1721fac02ddfSArnd Bergmann req->r_mtime = inode->i_mtime; 172228127bddSYan, Zheng err = ceph_osdc_start_request(&fsc->client->osdc, req, false); 172328127bddSYan, Zheng if (!err) 172428127bddSYan, Zheng err = ceph_osdc_wait_request(&fsc->client->osdc, req); 172528127bddSYan, Zheng ceph_osdc_put_request(req); 172628127bddSYan, Zheng if (err < 0) 172728127bddSYan, Zheng goto out; 172828127bddSYan, Zheng 172928127bddSYan, Zheng req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 173028127bddSYan, Zheng ceph_vino(inode), 0, &len, 1, 3, 173154ea0046SIlya Dryomov CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, 173234b759b4SIlya Dryomov NULL, ci->i_truncate_seq, 173334b759b4SIlya Dryomov ci->i_truncate_size, false); 173428127bddSYan, Zheng if (IS_ERR(req)) { 173528127bddSYan, Zheng err = PTR_ERR(req); 173628127bddSYan, Zheng goto out; 173728127bddSYan, Zheng } 173828127bddSYan, Zheng 173928127bddSYan, Zheng osd_req_op_extent_osd_data_pages(req, 1, &page, len, 0, false, false); 174028127bddSYan, Zheng 1741ec137c10SYan, Zheng { 1742ec137c10SYan, Zheng __le64 xattr_buf = cpu_to_le64(inline_version); 174328127bddSYan, Zheng err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR, 1744ec137c10SYan, Zheng "inline_version", &xattr_buf, 1745ec137c10SYan, Zheng sizeof(xattr_buf), 174628127bddSYan, Zheng CEPH_OSD_CMPXATTR_OP_GT, 174728127bddSYan, Zheng CEPH_OSD_CMPXATTR_MODE_U64); 174828127bddSYan, Zheng if (err) 174928127bddSYan, Zheng goto out_put; 1750ec137c10SYan, Zheng } 175128127bddSYan, Zheng 1752ec137c10SYan, Zheng { 1753ec137c10SYan, Zheng char xattr_buf[32]; 1754ec137c10SYan, Zheng int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf), 1755ec137c10SYan, Zheng "%llu", inline_version); 175628127bddSYan, Zheng err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR, 1757ec137c10SYan, Zheng "inline_version", 1758ec137c10SYan, Zheng xattr_buf, xattr_len, 0, 0); 175928127bddSYan, Zheng if (err) 176028127bddSYan, Zheng goto out_put; 1761ec137c10SYan, Zheng } 176228127bddSYan, Zheng 1763fac02ddfSArnd Bergmann req->r_mtime = inode->i_mtime; 176428127bddSYan, Zheng err = ceph_osdc_start_request(&fsc->client->osdc, req, false); 176528127bddSYan, Zheng if (!err) 176628127bddSYan, Zheng err = ceph_osdc_wait_request(&fsc->client->osdc, req); 176797e27aaaSXiubo Li 17688ae99ae2SXiubo Li ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 1769903f4fecSXiubo Li req->r_end_latency, len, err); 177097e27aaaSXiubo Li 177128127bddSYan, Zheng out_put: 177228127bddSYan, Zheng ceph_osdc_put_request(req); 177328127bddSYan, Zheng if (err == -ECANCELED) 177428127bddSYan, Zheng err = 0; 177528127bddSYan, Zheng out: 177628127bddSYan, Zheng if (page && page != locked_page) { 177728127bddSYan, Zheng if (from_pagecache) { 177828127bddSYan, Zheng unlock_page(page); 177909cbfeafSKirill A. Shutemov put_page(page); 178028127bddSYan, Zheng } else 178128127bddSYan, Zheng __free_pages(page, 0); 178228127bddSYan, Zheng } 178328127bddSYan, Zheng 178428127bddSYan, Zheng dout("uninline_data %p %llx.%llx inline_version %llu = %d\n", 178528127bddSYan, Zheng inode, ceph_vinop(inode), inline_version, err); 178628127bddSYan, Zheng return err; 178728127bddSYan, Zheng } 178828127bddSYan, Zheng 17897cbea8dcSKirill A. Shutemov static const struct vm_operations_struct ceph_vmops = { 179061f68816SYan, Zheng .fault = ceph_filemap_fault, 17911d3576fdSSage Weil .page_mkwrite = ceph_page_mkwrite, 17921d3576fdSSage Weil }; 17931d3576fdSSage Weil 17941d3576fdSSage Weil int ceph_mmap(struct file *file, struct vm_area_struct *vma) 17951d3576fdSSage Weil { 17961d3576fdSSage Weil struct address_space *mapping = file->f_mapping; 17971d3576fdSSage Weil 17981d3576fdSSage Weil if (!mapping->a_ops->readpage) 17991d3576fdSSage Weil return -ENOEXEC; 18001d3576fdSSage Weil file_accessed(file); 18011d3576fdSSage Weil vma->vm_ops = &ceph_vmops; 18021d3576fdSSage Weil return 0; 18031d3576fdSSage Weil } 180410183a69SYan, Zheng 180510183a69SYan, Zheng enum { 180610183a69SYan, Zheng POOL_READ = 1, 180710183a69SYan, Zheng POOL_WRITE = 2, 180810183a69SYan, Zheng }; 180910183a69SYan, Zheng 1810779fe0fbSYan, Zheng static int __ceph_pool_perm_get(struct ceph_inode_info *ci, 1811779fe0fbSYan, Zheng s64 pool, struct ceph_string *pool_ns) 181210183a69SYan, Zheng { 181310183a69SYan, Zheng struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode); 181410183a69SYan, Zheng struct ceph_mds_client *mdsc = fsc->mdsc; 181510183a69SYan, Zheng struct ceph_osd_request *rd_req = NULL, *wr_req = NULL; 181610183a69SYan, Zheng struct rb_node **p, *parent; 181710183a69SYan, Zheng struct ceph_pool_perm *perm; 181810183a69SYan, Zheng struct page **pages; 1819779fe0fbSYan, Zheng size_t pool_ns_len; 182010183a69SYan, Zheng int err = 0, err2 = 0, have = 0; 182110183a69SYan, Zheng 182210183a69SYan, Zheng down_read(&mdsc->pool_perm_rwsem); 182310183a69SYan, Zheng p = &mdsc->pool_perm_tree.rb_node; 182410183a69SYan, Zheng while (*p) { 182510183a69SYan, Zheng perm = rb_entry(*p, struct ceph_pool_perm, node); 182610183a69SYan, Zheng if (pool < perm->pool) 182710183a69SYan, Zheng p = &(*p)->rb_left; 182810183a69SYan, Zheng else if (pool > perm->pool) 182910183a69SYan, Zheng p = &(*p)->rb_right; 183010183a69SYan, Zheng else { 1831779fe0fbSYan, Zheng int ret = ceph_compare_string(pool_ns, 1832779fe0fbSYan, Zheng perm->pool_ns, 1833779fe0fbSYan, Zheng perm->pool_ns_len); 1834779fe0fbSYan, Zheng if (ret < 0) 1835779fe0fbSYan, Zheng p = &(*p)->rb_left; 1836779fe0fbSYan, Zheng else if (ret > 0) 1837779fe0fbSYan, Zheng p = &(*p)->rb_right; 1838779fe0fbSYan, Zheng else { 183910183a69SYan, Zheng have = perm->perm; 184010183a69SYan, Zheng break; 184110183a69SYan, Zheng } 184210183a69SYan, Zheng } 1843779fe0fbSYan, Zheng } 184410183a69SYan, Zheng up_read(&mdsc->pool_perm_rwsem); 184510183a69SYan, Zheng if (*p) 184610183a69SYan, Zheng goto out; 184710183a69SYan, Zheng 1848779fe0fbSYan, Zheng if (pool_ns) 1849779fe0fbSYan, Zheng dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n", 1850779fe0fbSYan, Zheng pool, (int)pool_ns->len, pool_ns->str); 1851779fe0fbSYan, Zheng else 18527627151eSYan, Zheng dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool); 185310183a69SYan, Zheng 185410183a69SYan, Zheng down_write(&mdsc->pool_perm_rwsem); 1855779fe0fbSYan, Zheng p = &mdsc->pool_perm_tree.rb_node; 185610183a69SYan, Zheng parent = NULL; 185710183a69SYan, Zheng while (*p) { 185810183a69SYan, Zheng parent = *p; 185910183a69SYan, Zheng perm = rb_entry(parent, struct ceph_pool_perm, node); 186010183a69SYan, Zheng if (pool < perm->pool) 186110183a69SYan, Zheng p = &(*p)->rb_left; 186210183a69SYan, Zheng else if (pool > perm->pool) 186310183a69SYan, Zheng p = &(*p)->rb_right; 186410183a69SYan, Zheng else { 1865779fe0fbSYan, Zheng int ret = ceph_compare_string(pool_ns, 1866779fe0fbSYan, Zheng perm->pool_ns, 1867779fe0fbSYan, Zheng perm->pool_ns_len); 1868779fe0fbSYan, Zheng if (ret < 0) 1869779fe0fbSYan, Zheng p = &(*p)->rb_left; 1870779fe0fbSYan, Zheng else if (ret > 0) 1871779fe0fbSYan, Zheng p = &(*p)->rb_right; 1872779fe0fbSYan, Zheng else { 187310183a69SYan, Zheng have = perm->perm; 187410183a69SYan, Zheng break; 187510183a69SYan, Zheng } 187610183a69SYan, Zheng } 1877779fe0fbSYan, Zheng } 187810183a69SYan, Zheng if (*p) { 187910183a69SYan, Zheng up_write(&mdsc->pool_perm_rwsem); 188010183a69SYan, Zheng goto out; 188110183a69SYan, Zheng } 188210183a69SYan, Zheng 188334b759b4SIlya Dryomov rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 188410183a69SYan, Zheng 1, false, GFP_NOFS); 188510183a69SYan, Zheng if (!rd_req) { 188610183a69SYan, Zheng err = -ENOMEM; 188710183a69SYan, Zheng goto out_unlock; 188810183a69SYan, Zheng } 188910183a69SYan, Zheng 189010183a69SYan, Zheng rd_req->r_flags = CEPH_OSD_FLAG_READ; 189110183a69SYan, Zheng osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0); 189210183a69SYan, Zheng rd_req->r_base_oloc.pool = pool; 1893779fe0fbSYan, Zheng if (pool_ns) 1894779fe0fbSYan, Zheng rd_req->r_base_oloc.pool_ns = ceph_get_string(pool_ns); 1895d30291b9SIlya Dryomov ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino); 189610183a69SYan, Zheng 189713d1ad16SIlya Dryomov err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS); 189813d1ad16SIlya Dryomov if (err) 189913d1ad16SIlya Dryomov goto out_unlock; 190010183a69SYan, Zheng 190134b759b4SIlya Dryomov wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 190210183a69SYan, Zheng 1, false, GFP_NOFS); 190310183a69SYan, Zheng if (!wr_req) { 190410183a69SYan, Zheng err = -ENOMEM; 190510183a69SYan, Zheng goto out_unlock; 190610183a69SYan, Zheng } 190710183a69SYan, Zheng 190854ea0046SIlya Dryomov wr_req->r_flags = CEPH_OSD_FLAG_WRITE; 190910183a69SYan, Zheng osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL); 191063244fa1SIlya Dryomov ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc); 1911d30291b9SIlya Dryomov ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid); 191210183a69SYan, Zheng 191313d1ad16SIlya Dryomov err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS); 191413d1ad16SIlya Dryomov if (err) 191513d1ad16SIlya Dryomov goto out_unlock; 191610183a69SYan, Zheng 191710183a69SYan, Zheng /* one page should be large enough for STAT data */ 191810183a69SYan, Zheng pages = ceph_alloc_page_vector(1, GFP_KERNEL); 191910183a69SYan, Zheng if (IS_ERR(pages)) { 192010183a69SYan, Zheng err = PTR_ERR(pages); 192110183a69SYan, Zheng goto out_unlock; 192210183a69SYan, Zheng } 192310183a69SYan, Zheng 192410183a69SYan, Zheng osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE, 192510183a69SYan, Zheng 0, false, true); 192610183a69SYan, Zheng err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false); 192710183a69SYan, Zheng 1928fac02ddfSArnd Bergmann wr_req->r_mtime = ci->vfs_inode.i_mtime; 192910183a69SYan, Zheng err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false); 193010183a69SYan, Zheng 193110183a69SYan, Zheng if (!err) 193210183a69SYan, Zheng err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req); 193310183a69SYan, Zheng if (!err2) 193410183a69SYan, Zheng err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req); 193510183a69SYan, Zheng 193610183a69SYan, Zheng if (err >= 0 || err == -ENOENT) 193710183a69SYan, Zheng have |= POOL_READ; 1938131d7eb4SYan, Zheng else if (err != -EPERM) { 19390b98acd6SIlya Dryomov if (err == -EBLOCKLISTED) 19400b98acd6SIlya Dryomov fsc->blocklisted = true; 194110183a69SYan, Zheng goto out_unlock; 1942131d7eb4SYan, Zheng } 194310183a69SYan, Zheng 194410183a69SYan, Zheng if (err2 == 0 || err2 == -EEXIST) 194510183a69SYan, Zheng have |= POOL_WRITE; 194610183a69SYan, Zheng else if (err2 != -EPERM) { 19470b98acd6SIlya Dryomov if (err2 == -EBLOCKLISTED) 19480b98acd6SIlya Dryomov fsc->blocklisted = true; 194910183a69SYan, Zheng err = err2; 195010183a69SYan, Zheng goto out_unlock; 195110183a69SYan, Zheng } 195210183a69SYan, Zheng 1953779fe0fbSYan, Zheng pool_ns_len = pool_ns ? pool_ns->len : 0; 1954779fe0fbSYan, Zheng perm = kmalloc(sizeof(*perm) + pool_ns_len + 1, GFP_NOFS); 195510183a69SYan, Zheng if (!perm) { 195610183a69SYan, Zheng err = -ENOMEM; 195710183a69SYan, Zheng goto out_unlock; 195810183a69SYan, Zheng } 195910183a69SYan, Zheng 196010183a69SYan, Zheng perm->pool = pool; 196110183a69SYan, Zheng perm->perm = have; 1962779fe0fbSYan, Zheng perm->pool_ns_len = pool_ns_len; 1963779fe0fbSYan, Zheng if (pool_ns_len > 0) 1964779fe0fbSYan, Zheng memcpy(perm->pool_ns, pool_ns->str, pool_ns_len); 1965779fe0fbSYan, Zheng perm->pool_ns[pool_ns_len] = 0; 1966779fe0fbSYan, Zheng 196710183a69SYan, Zheng rb_link_node(&perm->node, parent, p); 196810183a69SYan, Zheng rb_insert_color(&perm->node, &mdsc->pool_perm_tree); 196910183a69SYan, Zheng err = 0; 197010183a69SYan, Zheng out_unlock: 197110183a69SYan, Zheng up_write(&mdsc->pool_perm_rwsem); 197210183a69SYan, Zheng 197310183a69SYan, Zheng ceph_osdc_put_request(rd_req); 197410183a69SYan, Zheng ceph_osdc_put_request(wr_req); 197510183a69SYan, Zheng out: 197610183a69SYan, Zheng if (!err) 197710183a69SYan, Zheng err = have; 1978779fe0fbSYan, Zheng if (pool_ns) 1979779fe0fbSYan, Zheng dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n", 1980779fe0fbSYan, Zheng pool, (int)pool_ns->len, pool_ns->str, err); 1981779fe0fbSYan, Zheng else 19827627151eSYan, Zheng dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err); 198310183a69SYan, Zheng return err; 198410183a69SYan, Zheng } 198510183a69SYan, Zheng 19865e3ded1bSYan, Zheng int ceph_pool_perm_check(struct inode *inode, int need) 198710183a69SYan, Zheng { 19885e3ded1bSYan, Zheng struct ceph_inode_info *ci = ceph_inode(inode); 1989779fe0fbSYan, Zheng struct ceph_string *pool_ns; 19905e3ded1bSYan, Zheng s64 pool; 199110183a69SYan, Zheng int ret, flags; 199210183a69SYan, Zheng 1993e9b22501SJeff Layton /* Only need to do this for regular files */ 1994e9b22501SJeff Layton if (!S_ISREG(inode->i_mode)) 1995e9b22501SJeff Layton return 0; 1996e9b22501SJeff Layton 199780e80fbbSYan, Zheng if (ci->i_vino.snap != CEPH_NOSNAP) { 199880e80fbbSYan, Zheng /* 199980e80fbbSYan, Zheng * Pool permission check needs to write to the first object. 200080e80fbbSYan, Zheng * But for snapshot, head of the first object may have alread 200180e80fbbSYan, Zheng * been deleted. Skip check to avoid creating orphan object. 200280e80fbbSYan, Zheng */ 200380e80fbbSYan, Zheng return 0; 200480e80fbbSYan, Zheng } 200580e80fbbSYan, Zheng 20065e3ded1bSYan, Zheng if (ceph_test_mount_opt(ceph_inode_to_client(inode), 200710183a69SYan, Zheng NOPOOLPERM)) 200810183a69SYan, Zheng return 0; 200910183a69SYan, Zheng 201010183a69SYan, Zheng spin_lock(&ci->i_ceph_lock); 201110183a69SYan, Zheng flags = ci->i_ceph_flags; 20127627151eSYan, Zheng pool = ci->i_layout.pool_id; 201310183a69SYan, Zheng spin_unlock(&ci->i_ceph_lock); 201410183a69SYan, Zheng check: 201510183a69SYan, Zheng if (flags & CEPH_I_POOL_PERM) { 201610183a69SYan, Zheng if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) { 20177627151eSYan, Zheng dout("ceph_pool_perm_check pool %lld no read perm\n", 201810183a69SYan, Zheng pool); 201910183a69SYan, Zheng return -EPERM; 202010183a69SYan, Zheng } 202110183a69SYan, Zheng if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) { 20227627151eSYan, Zheng dout("ceph_pool_perm_check pool %lld no write perm\n", 202310183a69SYan, Zheng pool); 202410183a69SYan, Zheng return -EPERM; 202510183a69SYan, Zheng } 202610183a69SYan, Zheng return 0; 202710183a69SYan, Zheng } 202810183a69SYan, Zheng 2029779fe0fbSYan, Zheng pool_ns = ceph_try_get_string(ci->i_layout.pool_ns); 2030779fe0fbSYan, Zheng ret = __ceph_pool_perm_get(ci, pool, pool_ns); 2031779fe0fbSYan, Zheng ceph_put_string(pool_ns); 203210183a69SYan, Zheng if (ret < 0) 203310183a69SYan, Zheng return ret; 203410183a69SYan, Zheng 203510183a69SYan, Zheng flags = CEPH_I_POOL_PERM; 203610183a69SYan, Zheng if (ret & POOL_READ) 203710183a69SYan, Zheng flags |= CEPH_I_POOL_RD; 203810183a69SYan, Zheng if (ret & POOL_WRITE) 203910183a69SYan, Zheng flags |= CEPH_I_POOL_WR; 204010183a69SYan, Zheng 204110183a69SYan, Zheng spin_lock(&ci->i_ceph_lock); 2042779fe0fbSYan, Zheng if (pool == ci->i_layout.pool_id && 2043779fe0fbSYan, Zheng pool_ns == rcu_dereference_raw(ci->i_layout.pool_ns)) { 2044779fe0fbSYan, Zheng ci->i_ceph_flags |= flags; 204510183a69SYan, Zheng } else { 20467627151eSYan, Zheng pool = ci->i_layout.pool_id; 204710183a69SYan, Zheng flags = ci->i_ceph_flags; 204810183a69SYan, Zheng } 204910183a69SYan, Zheng spin_unlock(&ci->i_ceph_lock); 205010183a69SYan, Zheng goto check; 205110183a69SYan, Zheng } 205210183a69SYan, Zheng 205310183a69SYan, Zheng void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc) 205410183a69SYan, Zheng { 205510183a69SYan, Zheng struct ceph_pool_perm *perm; 205610183a69SYan, Zheng struct rb_node *n; 205710183a69SYan, Zheng 205810183a69SYan, Zheng while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) { 205910183a69SYan, Zheng n = rb_first(&mdsc->pool_perm_tree); 206010183a69SYan, Zheng perm = rb_entry(n, struct ceph_pool_perm, node); 206110183a69SYan, Zheng rb_erase(n, &mdsc->pool_perm_tree); 206210183a69SYan, Zheng kfree(perm); 206310183a69SYan, Zheng } 206410183a69SYan, Zheng } 2065