1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 23d14c5d2SYehuda Sadeh #include <linux/ceph/ceph_debug.h> 31d3576fdSSage Weil 41d3576fdSSage Weil #include <linux/backing-dev.h> 51d3576fdSSage Weil #include <linux/fs.h> 61d3576fdSSage Weil #include <linux/mm.h> 71d3576fdSSage Weil #include <linux/pagemap.h> 81d3576fdSSage Weil #include <linux/writeback.h> /* generic_writepages */ 95a0e3ad6STejun Heo #include <linux/slab.h> 101d3576fdSSage Weil #include <linux/pagevec.h> 111d3576fdSSage Weil #include <linux/task_io_accounting_ops.h> 12f361bf4aSIngo Molnar #include <linux/signal.h> 135c308356SJeff Layton #include <linux/iversion.h> 1497e27aaaSXiubo Li #include <linux/ktime.h> 15f0702876SJeff Layton #include <linux/netfs.h> 161d3576fdSSage Weil 171d3576fdSSage Weil #include "super.h" 183d14c5d2SYehuda Sadeh #include "mds_client.h" 1999ccbd22SMilosz Tanski #include "cache.h" 2097e27aaaSXiubo Li #include "metric.h" 213d14c5d2SYehuda Sadeh #include <linux/ceph/osd_client.h> 2208c1ac50SIlya Dryomov #include <linux/ceph/striper.h> 231d3576fdSSage Weil 241d3576fdSSage Weil /* 251d3576fdSSage Weil * Ceph address space ops. 261d3576fdSSage Weil * 271d3576fdSSage Weil * There are a few funny things going on here. 281d3576fdSSage Weil * 291d3576fdSSage Weil * The page->private field is used to reference a struct 301d3576fdSSage Weil * ceph_snap_context for _every_ dirty page. This indicates which 311d3576fdSSage Weil * snapshot the page was logically dirtied in, and thus which snap 321d3576fdSSage Weil * context needs to be associated with the osd write during writeback. 331d3576fdSSage Weil * 341d3576fdSSage Weil * Similarly, struct ceph_inode_info maintains a set of counters to 3525985edcSLucas De Marchi * count dirty pages on the inode. In the absence of snapshots, 361d3576fdSSage Weil * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count. 371d3576fdSSage Weil * 381d3576fdSSage Weil * When a snapshot is taken (that is, when the client receives 391d3576fdSSage Weil * notification that a snapshot was taken), each inode with caps and 401d3576fdSSage Weil * with dirty pages (dirty pages implies there is a cap) gets a new 411d3576fdSSage Weil * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending 421d3576fdSSage Weil * order, new snaps go to the tail). The i_wrbuffer_ref_head count is 431d3576fdSSage Weil * moved to capsnap->dirty. (Unless a sync write is currently in 441d3576fdSSage Weil * progress. In that case, the capsnap is said to be "pending", new 451d3576fdSSage Weil * writes cannot start, and the capsnap isn't "finalized" until the 461d3576fdSSage Weil * write completes (or fails) and a final size/mtime for the inode for 471d3576fdSSage Weil * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0. 481d3576fdSSage Weil * 491d3576fdSSage Weil * On writeback, we must submit writes to the osd IN SNAP ORDER. So, 501d3576fdSSage Weil * we look for the first capsnap in i_cap_snaps and write out pages in 511d3576fdSSage Weil * that snap context _only_. Then we move on to the next capsnap, 521d3576fdSSage Weil * eventually reaching the "live" or "head" context (i.e., pages that 531d3576fdSSage Weil * are not yet snapped) and are writing the most recently dirtied 541d3576fdSSage Weil * pages. 551d3576fdSSage Weil * 561d3576fdSSage Weil * Invalidate and so forth must take care to ensure the dirty page 571d3576fdSSage Weil * accounting is preserved. 581d3576fdSSage Weil */ 591d3576fdSSage Weil 602baba250SYehuda Sadeh #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10)) 612baba250SYehuda Sadeh #define CONGESTION_OFF_THRESH(congestion_kb) \ 622baba250SYehuda Sadeh (CONGESTION_ON_THRESH(congestion_kb) - \ 632baba250SYehuda Sadeh (CONGESTION_ON_THRESH(congestion_kb) >> 2)) 642baba250SYehuda Sadeh 65d801327dSJeff Layton static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len, 66*78525c74SDavid Howells struct folio *folio, void **_fsdata); 67d801327dSJeff Layton 6861600ef8SYan, Zheng static inline struct ceph_snap_context *page_snap_context(struct page *page) 6961600ef8SYan, Zheng { 7061600ef8SYan, Zheng if (PagePrivate(page)) 7161600ef8SYan, Zheng return (void *)page->private; 7261600ef8SYan, Zheng return NULL; 7361600ef8SYan, Zheng } 741d3576fdSSage Weil 751d3576fdSSage Weil /* 761d3576fdSSage Weil * Dirty a page. Optimistically adjust accounting, on the assumption 771d3576fdSSage Weil * that we won't race with invalidate. If we do, readjust. 781d3576fdSSage Weil */ 791d3576fdSSage Weil static int ceph_set_page_dirty(struct page *page) 801d3576fdSSage Weil { 811d3576fdSSage Weil struct address_space *mapping = page->mapping; 821d3576fdSSage Weil struct inode *inode; 831d3576fdSSage Weil struct ceph_inode_info *ci; 841d3576fdSSage Weil struct ceph_snap_context *snapc; 851d3576fdSSage Weil 867d6e1f54SSha Zhengju if (PageDirty(page)) { 871d3576fdSSage Weil dout("%p set_page_dirty %p idx %lu -- already dirty\n", 881d3576fdSSage Weil mapping->host, page, page->index); 897d6e1f54SSha Zhengju BUG_ON(!PagePrivate(page)); 901d3576fdSSage Weil return 0; 911d3576fdSSage Weil } 921d3576fdSSage Weil 931d3576fdSSage Weil inode = mapping->host; 941d3576fdSSage Weil ci = ceph_inode(inode); 951d3576fdSSage Weil 961d3576fdSSage Weil /* dirty the head */ 97be655596SSage Weil spin_lock(&ci->i_ceph_lock); 985dda377cSYan, Zheng BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference 995dda377cSYan, Zheng if (__ceph_have_pending_cap_snap(ci)) { 1005dda377cSYan, Zheng struct ceph_cap_snap *capsnap = 1015dda377cSYan, Zheng list_last_entry(&ci->i_cap_snaps, 1025dda377cSYan, Zheng struct ceph_cap_snap, 1035dda377cSYan, Zheng ci_item); 1045dda377cSYan, Zheng snapc = ceph_get_snap_context(capsnap->context); 1055dda377cSYan, Zheng capsnap->dirty_pages++; 1065dda377cSYan, Zheng } else { 1075dda377cSYan, Zheng BUG_ON(!ci->i_head_snapc); 1085dda377cSYan, Zheng snapc = ceph_get_snap_context(ci->i_head_snapc); 1091d3576fdSSage Weil ++ci->i_wrbuffer_ref_head; 1105dda377cSYan, Zheng } 1111d3576fdSSage Weil if (ci->i_wrbuffer_ref == 0) 1120444d76aSDave Chinner ihold(inode); 1131d3576fdSSage Weil ++ci->i_wrbuffer_ref; 1141d3576fdSSage Weil dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d " 1151d3576fdSSage Weil "snapc %p seq %lld (%d snaps)\n", 1161d3576fdSSage Weil mapping->host, page, page->index, 1171d3576fdSSage Weil ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1, 1181d3576fdSSage Weil ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, 1191d3576fdSSage Weil snapc, snapc->seq, snapc->num_snaps); 120be655596SSage Weil spin_unlock(&ci->i_ceph_lock); 1211d3576fdSSage Weil 1221d3576fdSSage Weil /* 1231d3576fdSSage Weil * Reference snap context in page->private. Also set 1241d3576fdSSage Weil * PagePrivate so that we get invalidatepage callback. 1251d3576fdSSage Weil */ 1267d6e1f54SSha Zhengju BUG_ON(PagePrivate(page)); 127379fc7faSJeff Layton attach_page_private(page, snapc); 1281d3576fdSSage Weil 12922d41cdcSJeff Layton return __set_page_dirty_nobuffers(page); 1301d3576fdSSage Weil } 1311d3576fdSSage Weil 1321d3576fdSSage Weil /* 1331d3576fdSSage Weil * If we are truncating the full page (i.e. offset == 0), adjust the 1341d3576fdSSage Weil * dirty page counters appropriately. Only called if there is private 1351d3576fdSSage Weil * data on the page. 1361d3576fdSSage Weil */ 137d47992f8SLukas Czerner static void ceph_invalidatepage(struct page *page, unsigned int offset, 138d47992f8SLukas Czerner unsigned int length) 1391d3576fdSSage Weil { 1404ce1e9adSAlexander Beregalov struct inode *inode; 1411d3576fdSSage Weil struct ceph_inode_info *ci; 142379fc7faSJeff Layton struct ceph_snap_context *snapc; 1431d3576fdSSage Weil 1447c46b318SJeff Layton wait_on_page_fscache(page); 1457c46b318SJeff Layton 1464ce1e9adSAlexander Beregalov inode = page->mapping->host; 147b150f5c1SMilosz Tanski ci = ceph_inode(inode); 148b150f5c1SMilosz Tanski 1498ff2d290SJeff Layton if (offset != 0 || length != thp_size(page)) { 150b150f5c1SMilosz Tanski dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n", 151b150f5c1SMilosz Tanski inode, page, page->index, offset, length); 152b150f5c1SMilosz Tanski return; 153b150f5c1SMilosz Tanski } 1544ce1e9adSAlexander Beregalov 155b072d774SYan, Zheng WARN_ON(!PageLocked(page)); 15699ccbd22SMilosz Tanski if (!PagePrivate(page)) 15799ccbd22SMilosz Tanski return; 15899ccbd22SMilosz Tanski 159569d39fcSLukas Czerner dout("%p invalidatepage %p idx %lu full dirty page\n", 160569d39fcSLukas Czerner inode, page, page->index); 161b150f5c1SMilosz Tanski 162379fc7faSJeff Layton snapc = detach_page_private(page); 1631d3576fdSSage Weil ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 1641d3576fdSSage Weil ceph_put_snap_context(snapc); 1651d3576fdSSage Weil } 1661d3576fdSSage Weil 1677c46b318SJeff Layton static int ceph_releasepage(struct page *page, gfp_t gfp) 1681d3576fdSSage Weil { 169e55f1a18SNeilBrown dout("%p releasepage %p idx %lu (%sdirty)\n", page->mapping->host, 170e55f1a18SNeilBrown page, page->index, PageDirty(page) ? "" : "not "); 17199ccbd22SMilosz Tanski 1727c46b318SJeff Layton if (PageFsCache(page)) { 1737c46b318SJeff Layton if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) 1747c46b318SJeff Layton return 0; 1757c46b318SJeff Layton wait_on_page_fscache(page); 1767c46b318SJeff Layton } 17799ccbd22SMilosz Tanski return !PagePrivate(page); 1781d3576fdSSage Weil } 1791d3576fdSSage Weil 180f0702876SJeff Layton static void ceph_netfs_expand_readahead(struct netfs_read_request *rreq) 181f0702876SJeff Layton { 182f0702876SJeff Layton struct inode *inode = rreq->mapping->host; 183f0702876SJeff Layton struct ceph_inode_info *ci = ceph_inode(inode); 184f0702876SJeff Layton struct ceph_file_layout *lo = &ci->i_layout; 185f0702876SJeff Layton u32 blockoff; 186f0702876SJeff Layton u64 blockno; 187f0702876SJeff Layton 188f0702876SJeff Layton /* Expand the start downward */ 189f0702876SJeff Layton blockno = div_u64_rem(rreq->start, lo->stripe_unit, &blockoff); 190f0702876SJeff Layton rreq->start = blockno * lo->stripe_unit; 191f0702876SJeff Layton rreq->len += blockoff; 192f0702876SJeff Layton 193f0702876SJeff Layton /* Now, round up the length to the next block */ 194f0702876SJeff Layton rreq->len = roundup(rreq->len, lo->stripe_unit); 195f0702876SJeff Layton } 196f0702876SJeff Layton 197f0702876SJeff Layton static bool ceph_netfs_clamp_length(struct netfs_read_subrequest *subreq) 198f0702876SJeff Layton { 199f0702876SJeff Layton struct inode *inode = subreq->rreq->mapping->host; 200f0702876SJeff Layton struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 201f0702876SJeff Layton struct ceph_inode_info *ci = ceph_inode(inode); 202f0702876SJeff Layton u64 objno, objoff; 203f0702876SJeff Layton u32 xlen; 204f0702876SJeff Layton 205f0702876SJeff Layton /* Truncate the extent at the end of the current block */ 206f0702876SJeff Layton ceph_calc_file_object_mapping(&ci->i_layout, subreq->start, subreq->len, 207f0702876SJeff Layton &objno, &objoff, &xlen); 208f0702876SJeff Layton subreq->len = min(xlen, fsc->mount_options->rsize); 209f0702876SJeff Layton return true; 210f0702876SJeff Layton } 211f0702876SJeff Layton 212f0702876SJeff Layton static void finish_netfs_read(struct ceph_osd_request *req) 213f0702876SJeff Layton { 214f0702876SJeff Layton struct ceph_fs_client *fsc = ceph_inode_to_client(req->r_inode); 215f0702876SJeff Layton struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); 216f0702876SJeff Layton struct netfs_read_subrequest *subreq = req->r_priv; 217f0702876SJeff Layton int num_pages; 218f0702876SJeff Layton int err = req->r_result; 219f0702876SJeff Layton 2208ae99ae2SXiubo Li ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency, 221903f4fecSXiubo Li req->r_end_latency, osd_data->length, err); 222f0702876SJeff Layton 223f0702876SJeff Layton dout("%s: result %d subreq->len=%zu i_size=%lld\n", __func__, req->r_result, 224f0702876SJeff Layton subreq->len, i_size_read(req->r_inode)); 225f0702876SJeff Layton 226f0702876SJeff Layton /* no object means success but no data */ 227f0702876SJeff Layton if (err == -ENOENT) 228f0702876SJeff Layton err = 0; 229f0702876SJeff Layton else if (err == -EBLOCKLISTED) 230f0702876SJeff Layton fsc->blocklisted = true; 231f0702876SJeff Layton 232f0702876SJeff Layton if (err >= 0 && err < subreq->len) 233f0702876SJeff Layton __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 234f0702876SJeff Layton 235f0702876SJeff Layton netfs_subreq_terminated(subreq, err, true); 236f0702876SJeff Layton 237f0702876SJeff Layton num_pages = calc_pages_for(osd_data->alignment, osd_data->length); 238f0702876SJeff Layton ceph_put_page_vector(osd_data->pages, num_pages, false); 239f0702876SJeff Layton iput(req->r_inode); 240f0702876SJeff Layton } 241f0702876SJeff Layton 242f0702876SJeff Layton static void ceph_netfs_issue_op(struct netfs_read_subrequest *subreq) 243f0702876SJeff Layton { 244f0702876SJeff Layton struct netfs_read_request *rreq = subreq->rreq; 245f0702876SJeff Layton struct inode *inode = rreq->mapping->host; 246f0702876SJeff Layton struct ceph_inode_info *ci = ceph_inode(inode); 247f0702876SJeff Layton struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 248f0702876SJeff Layton struct ceph_osd_request *req; 249f0702876SJeff Layton struct ceph_vino vino = ceph_vino(inode); 250f0702876SJeff Layton struct iov_iter iter; 251f0702876SJeff Layton struct page **pages; 252f0702876SJeff Layton size_t page_off; 253f0702876SJeff Layton int err = 0; 254f0702876SJeff Layton u64 len = subreq->len; 255f0702876SJeff Layton 256f0702876SJeff Layton req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino, subreq->start, &len, 257f0702876SJeff Layton 0, 1, CEPH_OSD_OP_READ, 258f0702876SJeff Layton CEPH_OSD_FLAG_READ | fsc->client->osdc.client->options->read_from_replica, 259f0702876SJeff Layton NULL, ci->i_truncate_seq, ci->i_truncate_size, false); 260f0702876SJeff Layton if (IS_ERR(req)) { 261f0702876SJeff Layton err = PTR_ERR(req); 262f0702876SJeff Layton req = NULL; 263f0702876SJeff Layton goto out; 264f0702876SJeff Layton } 265f0702876SJeff Layton 266f0702876SJeff Layton dout("%s: pos=%llu orig_len=%zu len=%llu\n", __func__, subreq->start, subreq->len, len); 267f0702876SJeff Layton iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, subreq->start, len); 268f0702876SJeff Layton err = iov_iter_get_pages_alloc(&iter, &pages, len, &page_off); 269f0702876SJeff Layton if (err < 0) { 270f0702876SJeff Layton dout("%s: iov_ter_get_pages_alloc returned %d\n", __func__, err); 271f0702876SJeff Layton goto out; 272f0702876SJeff Layton } 273f0702876SJeff Layton 274f0702876SJeff Layton /* should always give us a page-aligned read */ 275f0702876SJeff Layton WARN_ON_ONCE(page_off); 276f0702876SJeff Layton len = err; 277f0702876SJeff Layton 278f0702876SJeff Layton osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false); 279f0702876SJeff Layton req->r_callback = finish_netfs_read; 280f0702876SJeff Layton req->r_priv = subreq; 281f0702876SJeff Layton req->r_inode = inode; 282f0702876SJeff Layton ihold(inode); 283f0702876SJeff Layton 284f0702876SJeff Layton err = ceph_osdc_start_request(req->r_osdc, req, false); 285f0702876SJeff Layton if (err) 286f0702876SJeff Layton iput(inode); 287f0702876SJeff Layton out: 288f0702876SJeff Layton ceph_osdc_put_request(req); 289f0702876SJeff Layton if (err) 290f0702876SJeff Layton netfs_subreq_terminated(subreq, err, false); 291f0702876SJeff Layton dout("%s: result %d\n", __func__, err); 292f0702876SJeff Layton } 293f0702876SJeff Layton 294f0702876SJeff Layton static void ceph_init_rreq(struct netfs_read_request *rreq, struct file *file) 295f0702876SJeff Layton { 296f0702876SJeff Layton } 297f0702876SJeff Layton 29849870056SJeff Layton static void ceph_readahead_cleanup(struct address_space *mapping, void *priv) 29949870056SJeff Layton { 30049870056SJeff Layton struct inode *inode = mapping->host; 30149870056SJeff Layton struct ceph_inode_info *ci = ceph_inode(inode); 30249870056SJeff Layton int got = (uintptr_t)priv; 30349870056SJeff Layton 30449870056SJeff Layton if (got) 30549870056SJeff Layton ceph_put_cap_refs(ci, got); 30649870056SJeff Layton } 30749870056SJeff Layton 308675d4d89SWei Yongjun static const struct netfs_read_request_ops ceph_netfs_read_ops = { 309f0702876SJeff Layton .init_rreq = ceph_init_rreq, 310f0702876SJeff Layton .is_cache_enabled = ceph_is_cache_enabled, 311f0702876SJeff Layton .begin_cache_operation = ceph_begin_cache_operation, 312f0702876SJeff Layton .issue_op = ceph_netfs_issue_op, 313f0702876SJeff Layton .expand_readahead = ceph_netfs_expand_readahead, 314f0702876SJeff Layton .clamp_length = ceph_netfs_clamp_length, 315d801327dSJeff Layton .check_write_begin = ceph_netfs_check_write_begin, 31649870056SJeff Layton .cleanup = ceph_readahead_cleanup, 317f0702876SJeff Layton }; 318f0702876SJeff Layton 319f0702876SJeff Layton /* read a single page, without unlocking it. */ 320*78525c74SDavid Howells static int ceph_readpage(struct file *file, struct page *subpage) 321f0702876SJeff Layton { 322*78525c74SDavid Howells struct folio *folio = page_folio(subpage); 323f0702876SJeff Layton struct inode *inode = file_inode(file); 324f0702876SJeff Layton struct ceph_inode_info *ci = ceph_inode(inode); 325f0702876SJeff Layton struct ceph_vino vino = ceph_vino(inode); 326*78525c74SDavid Howells size_t len = folio_size(folio); 327*78525c74SDavid Howells u64 off = folio_file_pos(folio); 328f0702876SJeff Layton 329f0702876SJeff Layton if (ci->i_inline_version != CEPH_INLINE_NONE) { 330f0702876SJeff Layton /* 331f0702876SJeff Layton * Uptodate inline data should have been added 332f0702876SJeff Layton * into page cache while getting Fcr caps. 333f0702876SJeff Layton */ 334f0702876SJeff Layton if (off == 0) { 335*78525c74SDavid Howells folio_unlock(folio); 336f0702876SJeff Layton return -EINVAL; 337f0702876SJeff Layton } 338*78525c74SDavid Howells zero_user_segment(&folio->page, 0, folio_size(folio)); 339*78525c74SDavid Howells folio_mark_uptodate(folio); 340*78525c74SDavid Howells folio_unlock(folio); 341f0702876SJeff Layton return 0; 342f0702876SJeff Layton } 343f0702876SJeff Layton 344*78525c74SDavid Howells dout("readpage ino %llx.%llx file %p off %llu len %zu folio %p index %lu\n", 345*78525c74SDavid Howells vino.ino, vino.snap, file, off, len, folio, folio_index(folio)); 346f0702876SJeff Layton 347*78525c74SDavid Howells return netfs_readpage(file, folio, &ceph_netfs_read_ops, NULL); 348f0702876SJeff Layton } 349f0702876SJeff Layton 35049870056SJeff Layton static void ceph_readahead(struct readahead_control *ractl) 3511d3576fdSSage Weil { 35249870056SJeff Layton struct inode *inode = file_inode(ractl->file); 35349870056SJeff Layton struct ceph_file_info *fi = ractl->file->private_data; 35449870056SJeff Layton struct ceph_rw_context *rw_ctx; 3552b1ac852SYan, Zheng int got = 0; 3562b1ac852SYan, Zheng int ret = 0; 3572b1ac852SYan, Zheng 35883701246SYan, Zheng if (ceph_inode(inode)->i_inline_version != CEPH_INLINE_NONE) 35949870056SJeff Layton return; 36083701246SYan, Zheng 36173737682SChengguang Xu rw_ctx = ceph_find_rw_context(fi); 36249870056SJeff Layton if (!rw_ctx) { 36349870056SJeff Layton /* 36449870056SJeff Layton * readahead callers do not necessarily hold Fcb caps 36549870056SJeff Layton * (e.g. fadvise, madvise). 36649870056SJeff Layton */ 36749870056SJeff Layton int want = CEPH_CAP_FILE_CACHE; 36849870056SJeff Layton 36949870056SJeff Layton ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got); 37049870056SJeff Layton if (ret < 0) 37149870056SJeff Layton dout("start_read %p, error getting cap\n", inode); 37249870056SJeff Layton else if (!(got & want)) 37349870056SJeff Layton dout("start_read %p, no cache cap\n", inode); 37449870056SJeff Layton 37549870056SJeff Layton if (ret <= 0) 37649870056SJeff Layton return; 3771d3576fdSSage Weil } 37849870056SJeff Layton netfs_readahead(ractl, &ceph_netfs_read_ops, (void *)(uintptr_t)got); 3791d3576fdSSage Weil } 3801d3576fdSSage Weil 3811f934b00SYan, Zheng struct ceph_writeback_ctl 3821f934b00SYan, Zheng { 3831f934b00SYan, Zheng loff_t i_size; 3841f934b00SYan, Zheng u64 truncate_size; 3851f934b00SYan, Zheng u32 truncate_seq; 3861f934b00SYan, Zheng bool size_stable; 3872a2d927eSYan, Zheng bool head_snapc; 3881f934b00SYan, Zheng }; 3891f934b00SYan, Zheng 3901d3576fdSSage Weil /* 3911d3576fdSSage Weil * Get ref for the oldest snapc for an inode with dirty data... that is, the 3921d3576fdSSage Weil * only snap context we are allowed to write back. 3931d3576fdSSage Weil */ 3941f934b00SYan, Zheng static struct ceph_snap_context * 39505455e11SYan, Zheng get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl, 39605455e11SYan, Zheng struct ceph_snap_context *page_snapc) 3971d3576fdSSage Weil { 3981d3576fdSSage Weil struct ceph_inode_info *ci = ceph_inode(inode); 3991d3576fdSSage Weil struct ceph_snap_context *snapc = NULL; 4001d3576fdSSage Weil struct ceph_cap_snap *capsnap = NULL; 4011d3576fdSSage Weil 402be655596SSage Weil spin_lock(&ci->i_ceph_lock); 4031d3576fdSSage Weil list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 4041d3576fdSSage Weil dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, 4051d3576fdSSage Weil capsnap->context, capsnap->dirty_pages); 40605455e11SYan, Zheng if (!capsnap->dirty_pages) 40705455e11SYan, Zheng continue; 40805455e11SYan, Zheng 40905455e11SYan, Zheng /* get i_size, truncate_{seq,size} for page_snapc? */ 41005455e11SYan, Zheng if (snapc && capsnap->context != page_snapc) 41105455e11SYan, Zheng continue; 41205455e11SYan, Zheng 4131f934b00SYan, Zheng if (ctl) { 4141f934b00SYan, Zheng if (capsnap->writing) { 4151f934b00SYan, Zheng ctl->i_size = i_size_read(inode); 4161f934b00SYan, Zheng ctl->size_stable = false; 4171f934b00SYan, Zheng } else { 4181f934b00SYan, Zheng ctl->i_size = capsnap->size; 4191f934b00SYan, Zheng ctl->size_stable = true; 4201f934b00SYan, Zheng } 4211f934b00SYan, Zheng ctl->truncate_size = capsnap->truncate_size; 4221f934b00SYan, Zheng ctl->truncate_seq = capsnap->truncate_seq; 4232a2d927eSYan, Zheng ctl->head_snapc = false; 4241f934b00SYan, Zheng } 42505455e11SYan, Zheng 42605455e11SYan, Zheng if (snapc) 4271d3576fdSSage Weil break; 42805455e11SYan, Zheng 42905455e11SYan, Zheng snapc = ceph_get_snap_context(capsnap->context); 43005455e11SYan, Zheng if (!page_snapc || 43105455e11SYan, Zheng page_snapc == snapc || 43205455e11SYan, Zheng page_snapc->seq > snapc->seq) 43305455e11SYan, Zheng break; 4341d3576fdSSage Weil } 4357d8cb26dSSage Weil if (!snapc && ci->i_wrbuffer_ref_head) { 43680e755feSSage Weil snapc = ceph_get_snap_context(ci->i_head_snapc); 4371d3576fdSSage Weil dout(" head snapc %p has %d dirty pages\n", 4381d3576fdSSage Weil snapc, ci->i_wrbuffer_ref_head); 4391f934b00SYan, Zheng if (ctl) { 4401f934b00SYan, Zheng ctl->i_size = i_size_read(inode); 4411f934b00SYan, Zheng ctl->truncate_size = ci->i_truncate_size; 4421f934b00SYan, Zheng ctl->truncate_seq = ci->i_truncate_seq; 4431f934b00SYan, Zheng ctl->size_stable = false; 4442a2d927eSYan, Zheng ctl->head_snapc = true; 4451f934b00SYan, Zheng } 4461d3576fdSSage Weil } 447be655596SSage Weil spin_unlock(&ci->i_ceph_lock); 4481d3576fdSSage Weil return snapc; 4491d3576fdSSage Weil } 4501d3576fdSSage Weil 4511f934b00SYan, Zheng static u64 get_writepages_data_length(struct inode *inode, 4521f934b00SYan, Zheng struct page *page, u64 start) 4531f934b00SYan, Zheng { 4541f934b00SYan, Zheng struct ceph_inode_info *ci = ceph_inode(inode); 4551f934b00SYan, Zheng struct ceph_snap_context *snapc = page_snap_context(page); 4561f934b00SYan, Zheng struct ceph_cap_snap *capsnap = NULL; 4571f934b00SYan, Zheng u64 end = i_size_read(inode); 4581f934b00SYan, Zheng 4591f934b00SYan, Zheng if (snapc != ci->i_head_snapc) { 4601f934b00SYan, Zheng bool found = false; 4611f934b00SYan, Zheng spin_lock(&ci->i_ceph_lock); 4621f934b00SYan, Zheng list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 4631f934b00SYan, Zheng if (capsnap->context == snapc) { 4641f934b00SYan, Zheng if (!capsnap->writing) 4651f934b00SYan, Zheng end = capsnap->size; 4661f934b00SYan, Zheng found = true; 4671f934b00SYan, Zheng break; 4681f934b00SYan, Zheng } 4691f934b00SYan, Zheng } 4701f934b00SYan, Zheng spin_unlock(&ci->i_ceph_lock); 4711f934b00SYan, Zheng WARN_ON(!found); 4721f934b00SYan, Zheng } 4738ff2d290SJeff Layton if (end > page_offset(page) + thp_size(page)) 4748ff2d290SJeff Layton end = page_offset(page) + thp_size(page); 4751f934b00SYan, Zheng return end > start ? end - start : 0; 4761f934b00SYan, Zheng } 4771f934b00SYan, Zheng 4781d3576fdSSage Weil /* 4791d3576fdSSage Weil * Write a single page, but leave the page locked. 4801d3576fdSSage Weil * 481b72b13ebSJeff Layton * If we get a write error, mark the mapping for error, but still adjust the 4821d3576fdSSage Weil * dirty page accounting (i.e., page is no longer dirty). 4831d3576fdSSage Weil */ 4841d3576fdSSage Weil static int writepage_nounlock(struct page *page, struct writeback_control *wbc) 4851d3576fdSSage Weil { 4866390987fSJeff Layton struct inode *inode = page->mapping->host; 4876390987fSJeff Layton struct ceph_inode_info *ci = ceph_inode(inode); 4886390987fSJeff Layton struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 4896298a337SSage Weil struct ceph_snap_context *snapc, *oldest; 490fc2744aaSYan, Zheng loff_t page_off = page_offset(page); 4916390987fSJeff Layton int err; 4928ff2d290SJeff Layton loff_t len = thp_size(page); 4931f934b00SYan, Zheng struct ceph_writeback_ctl ceph_wbc; 4946390987fSJeff Layton struct ceph_osd_client *osdc = &fsc->client->osdc; 4956390987fSJeff Layton struct ceph_osd_request *req; 4961d3576fdSSage Weil 4971d3576fdSSage Weil dout("writepage %p idx %lu\n", page, page->index); 4981d3576fdSSage Weil 4991d3576fdSSage Weil /* verify this is a writeable snap context */ 50061600ef8SYan, Zheng snapc = page_snap_context(page); 501d37b1d99SMarkus Elfring if (!snapc) { 5021d3576fdSSage Weil dout("writepage %p page %p not dirty?\n", inode, page); 50343986881SYan, Zheng return 0; 5041d3576fdSSage Weil } 50505455e11SYan, Zheng oldest = get_oldest_context(inode, &ceph_wbc, snapc); 5066298a337SSage Weil if (snapc->seq > oldest->seq) { 5071d3576fdSSage Weil dout("writepage %p page %p snapc %p not writeable - noop\n", 50861600ef8SYan, Zheng inode, page, snapc); 5091d3576fdSSage Weil /* we should only noop if called by kswapd */ 510fa71fefbSYan, Zheng WARN_ON(!(current->flags & PF_MEMALLOC)); 5116298a337SSage Weil ceph_put_snap_context(oldest); 512fa71fefbSYan, Zheng redirty_page_for_writepage(wbc, page); 51343986881SYan, Zheng return 0; 5141d3576fdSSage Weil } 5156298a337SSage Weil ceph_put_snap_context(oldest); 5161d3576fdSSage Weil 5171d3576fdSSage Weil /* is this a partial page at end of file? */ 5181f934b00SYan, Zheng if (page_off >= ceph_wbc.i_size) { 5191f934b00SYan, Zheng dout("%p page eof %llu\n", page, ceph_wbc.i_size); 5208ff2d290SJeff Layton page->mapping->a_ops->invalidatepage(page, 0, thp_size(page)); 52143986881SYan, Zheng return 0; 522fc2744aaSYan, Zheng } 52343986881SYan, Zheng 5241f934b00SYan, Zheng if (ceph_wbc.i_size < page_off + len) 5251f934b00SYan, Zheng len = ceph_wbc.i_size - page_off; 5261d3576fdSSage Weil 5276390987fSJeff Layton dout("writepage %p page %p index %lu on %llu~%llu snapc %p seq %lld\n", 5281c0a9c2dSYan, Zheng inode, page, page->index, page_off, len, snapc, snapc->seq); 5291d3576fdSSage Weil 530314c4737SYan, Zheng if (atomic_long_inc_return(&fsc->writeback_count) > 5313d14c5d2SYehuda Sadeh CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) 53209dc9fc2SJan Kara set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); 5332baba250SYehuda Sadeh 5341d3576fdSSage Weil set_page_writeback(page); 5356390987fSJeff Layton req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode), page_off, &len, 0, 1, 5366390987fSJeff Layton CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, snapc, 5376390987fSJeff Layton ceph_wbc.truncate_seq, ceph_wbc.truncate_size, 5386390987fSJeff Layton true); 5396390987fSJeff Layton if (IS_ERR(req)) { 5406390987fSJeff Layton redirty_page_for_writepage(wbc, page); 5416390987fSJeff Layton end_page_writeback(page); 5426390987fSJeff Layton return PTR_ERR(req); 5436390987fSJeff Layton } 5446390987fSJeff Layton 5456390987fSJeff Layton /* it may be a short write due to an object boundary */ 5468ff2d290SJeff Layton WARN_ON_ONCE(len > thp_size(page)); 5476390987fSJeff Layton osd_req_op_extent_osd_data_pages(req, 0, &page, len, 0, false, false); 5486390987fSJeff Layton dout("writepage %llu~%llu (%llu bytes)\n", page_off, len, len); 5496390987fSJeff Layton 5506390987fSJeff Layton req->r_mtime = inode->i_mtime; 5516390987fSJeff Layton err = ceph_osdc_start_request(osdc, req, true); 5526390987fSJeff Layton if (!err) 5536390987fSJeff Layton err = ceph_osdc_wait_request(osdc, req); 5546390987fSJeff Layton 5558ae99ae2SXiubo Li ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 556903f4fecSXiubo Li req->r_end_latency, len, err); 5576390987fSJeff Layton 5586390987fSJeff Layton ceph_osdc_put_request(req); 5596390987fSJeff Layton if (err == 0) 5606390987fSJeff Layton err = len; 5616390987fSJeff Layton 5621d3576fdSSage Weil if (err < 0) { 563ad15ec06SYan, Zheng struct writeback_control tmp_wbc; 564ad15ec06SYan, Zheng if (!wbc) 565ad15ec06SYan, Zheng wbc = &tmp_wbc; 566ad15ec06SYan, Zheng if (err == -ERESTARTSYS) { 567ad15ec06SYan, Zheng /* killed by SIGKILL */ 568ad15ec06SYan, Zheng dout("writepage interrupted page %p\n", page); 569ad15ec06SYan, Zheng redirty_page_for_writepage(wbc, page); 570ad15ec06SYan, Zheng end_page_writeback(page); 57143986881SYan, Zheng return err; 572ad15ec06SYan, Zheng } 5730b98acd6SIlya Dryomov if (err == -EBLOCKLISTED) 5740b98acd6SIlya Dryomov fsc->blocklisted = true; 575ad15ec06SYan, Zheng dout("writepage setting page/mapping error %d %p\n", 576ad15ec06SYan, Zheng err, page); 5771d3576fdSSage Weil mapping_set_error(&inode->i_data, err); 5781d3576fdSSage Weil wbc->pages_skipped++; 5791d3576fdSSage Weil } else { 5801d3576fdSSage Weil dout("writepage cleaned page %p\n", page); 5811d3576fdSSage Weil err = 0; /* vfs expects us to return 0 */ 5821d3576fdSSage Weil } 583379fc7faSJeff Layton oldest = detach_page_private(page); 584379fc7faSJeff Layton WARN_ON_ONCE(oldest != snapc); 5851d3576fdSSage Weil end_page_writeback(page); 5861d3576fdSSage Weil ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 5876298a337SSage Weil ceph_put_snap_context(snapc); /* page's reference */ 588314c4737SYan, Zheng 589314c4737SYan, Zheng if (atomic_long_dec_return(&fsc->writeback_count) < 590314c4737SYan, Zheng CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb)) 591314c4737SYan, Zheng clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); 592314c4737SYan, Zheng 5931d3576fdSSage Weil return err; 5941d3576fdSSage Weil } 5951d3576fdSSage Weil 5961d3576fdSSage Weil static int ceph_writepage(struct page *page, struct writeback_control *wbc) 5971d3576fdSSage Weil { 598dbd646a8SYehuda Sadeh int err; 599dbd646a8SYehuda Sadeh struct inode *inode = page->mapping->host; 600dbd646a8SYehuda Sadeh BUG_ON(!inode); 60170b666c3SSage Weil ihold(inode); 602dbd646a8SYehuda Sadeh err = writepage_nounlock(page, wbc); 603ad15ec06SYan, Zheng if (err == -ERESTARTSYS) { 604ad15ec06SYan, Zheng /* direct memory reclaimer was killed by SIGKILL. return 0 605ad15ec06SYan, Zheng * to prevent caller from setting mapping/page error */ 606ad15ec06SYan, Zheng err = 0; 607ad15ec06SYan, Zheng } 6081d3576fdSSage Weil unlock_page(page); 609dbd646a8SYehuda Sadeh iput(inode); 6101d3576fdSSage Weil return err; 6111d3576fdSSage Weil } 6121d3576fdSSage Weil 6131d3576fdSSage Weil /* 6141d3576fdSSage Weil * async writeback completion handler. 6151d3576fdSSage Weil * 6161d3576fdSSage Weil * If we get an error, set the mapping error bit, but not the individual 6171d3576fdSSage Weil * page error bits. 6181d3576fdSSage Weil */ 61985e084feSIlya Dryomov static void writepages_finish(struct ceph_osd_request *req) 6201d3576fdSSage Weil { 6211d3576fdSSage Weil struct inode *inode = req->r_inode; 6221d3576fdSSage Weil struct ceph_inode_info *ci = ceph_inode(inode); 62387060c10SAlex Elder struct ceph_osd_data *osd_data; 6241d3576fdSSage Weil struct page *page; 6255b64640cSYan, Zheng int num_pages, total_pages = 0; 6265b64640cSYan, Zheng int i, j; 6275b64640cSYan, Zheng int rc = req->r_result; 6281d3576fdSSage Weil struct ceph_snap_context *snapc = req->r_snapc; 6291d3576fdSSage Weil struct address_space *mapping = inode->i_mapping; 6303d14c5d2SYehuda Sadeh struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 631903f4fecSXiubo Li unsigned int len = 0; 6325b64640cSYan, Zheng bool remove_page; 6331d3576fdSSage Weil 6345b64640cSYan, Zheng dout("writepages_finish %p rc %d\n", inode, rc); 63526544c62SJeff Layton if (rc < 0) { 6361d3576fdSSage Weil mapping_set_error(mapping, rc); 63726544c62SJeff Layton ceph_set_error_write(ci); 6380b98acd6SIlya Dryomov if (rc == -EBLOCKLISTED) 6390b98acd6SIlya Dryomov fsc->blocklisted = true; 64026544c62SJeff Layton } else { 64126544c62SJeff Layton ceph_clear_error_write(ci); 64226544c62SJeff Layton } 643e63dc5c7SYehuda Sadeh 644e63dc5c7SYehuda Sadeh /* 645e63dc5c7SYehuda Sadeh * We lost the cache cap, need to truncate the page before 646e63dc5c7SYehuda Sadeh * it is unlocked, otherwise we'd truncate it later in the 647e63dc5c7SYehuda Sadeh * page truncation thread, possibly losing some data that 648e63dc5c7SYehuda Sadeh * raced its way in 649e63dc5c7SYehuda Sadeh */ 6505b64640cSYan, Zheng remove_page = !(ceph_caps_issued(ci) & 6515b64640cSYan, Zheng (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)); 6525b64640cSYan, Zheng 6535b64640cSYan, Zheng /* clean all pages */ 6545b64640cSYan, Zheng for (i = 0; i < req->r_num_ops; i++) { 6555b64640cSYan, Zheng if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) 6565b64640cSYan, Zheng break; 6575b64640cSYan, Zheng 6585b64640cSYan, Zheng osd_data = osd_req_op_extent_osd_data(req, i); 6595b64640cSYan, Zheng BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 660903f4fecSXiubo Li len += osd_data->length; 6615b64640cSYan, Zheng num_pages = calc_pages_for((u64)osd_data->alignment, 6625b64640cSYan, Zheng (u64)osd_data->length); 6635b64640cSYan, Zheng total_pages += num_pages; 6645b64640cSYan, Zheng for (j = 0; j < num_pages; j++) { 6655b64640cSYan, Zheng page = osd_data->pages[j]; 6665b64640cSYan, Zheng BUG_ON(!page); 6675b64640cSYan, Zheng WARN_ON(!PageUptodate(page)); 6685b64640cSYan, Zheng 6695b64640cSYan, Zheng if (atomic_long_dec_return(&fsc->writeback_count) < 6705b64640cSYan, Zheng CONGESTION_OFF_THRESH( 6715b64640cSYan, Zheng fsc->mount_options->congestion_kb)) 67209dc9fc2SJan Kara clear_bdi_congested(inode_to_bdi(inode), 6735b64640cSYan, Zheng BLK_RW_ASYNC); 6745b64640cSYan, Zheng 675379fc7faSJeff Layton ceph_put_snap_context(detach_page_private(page)); 6765b64640cSYan, Zheng end_page_writeback(page); 677379fc7faSJeff Layton dout("unlocking %p\n", page); 6785b64640cSYan, Zheng 6795b64640cSYan, Zheng if (remove_page) 6805b64640cSYan, Zheng generic_error_remove_page(inode->i_mapping, 6815b64640cSYan, Zheng page); 682e63dc5c7SYehuda Sadeh 6831d3576fdSSage Weil unlock_page(page); 6841d3576fdSSage Weil } 6855b64640cSYan, Zheng dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n", 6865b64640cSYan, Zheng inode, osd_data->length, rc >= 0 ? num_pages : 0); 6871d3576fdSSage Weil 68896ac9158SJohn Hubbard release_pages(osd_data->pages, num_pages); 6895b64640cSYan, Zheng } 6905b64640cSYan, Zheng 691903f4fecSXiubo Li ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 692903f4fecSXiubo Li req->r_end_latency, len, rc); 693903f4fecSXiubo Li 6945b64640cSYan, Zheng ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc); 6955b64640cSYan, Zheng 6965b64640cSYan, Zheng osd_data = osd_req_op_extent_osd_data(req, 0); 69787060c10SAlex Elder if (osd_data->pages_from_pool) 698a0102bdaSJeff Layton mempool_free(osd_data->pages, ceph_wb_pagevec_pool); 6991d3576fdSSage Weil else 70087060c10SAlex Elder kfree(osd_data->pages); 7011d3576fdSSage Weil ceph_osdc_put_request(req); 7021d3576fdSSage Weil } 7031d3576fdSSage Weil 7041d3576fdSSage Weil /* 7051d3576fdSSage Weil * initiate async writeback 7061d3576fdSSage Weil */ 7071d3576fdSSage Weil static int ceph_writepages_start(struct address_space *mapping, 7081d3576fdSSage Weil struct writeback_control *wbc) 7091d3576fdSSage Weil { 7101d3576fdSSage Weil struct inode *inode = mapping->host; 7111d3576fdSSage Weil struct ceph_inode_info *ci = ceph_inode(inode); 712fc2744aaSYan, Zheng struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 713fc2744aaSYan, Zheng struct ceph_vino vino = ceph_vino(inode); 7142a2d927eSYan, Zheng pgoff_t index, start_index, end = -1; 71580e755feSSage Weil struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc; 7161d3576fdSSage Weil struct pagevec pvec; 7171d3576fdSSage Weil int rc = 0; 71893407472SFabian Frederick unsigned int wsize = i_blocksize(inode); 7191d3576fdSSage Weil struct ceph_osd_request *req = NULL; 7201f934b00SYan, Zheng struct ceph_writeback_ctl ceph_wbc; 721590e9d98SYan, Zheng bool should_loop, range_whole = false; 722af9cc401SYan, Zheng bool done = false; 7231d3576fdSSage Weil 7243fb99d48SYanhu Cao dout("writepages_start %p (mode=%s)\n", inode, 7251d3576fdSSage Weil wbc->sync_mode == WB_SYNC_NONE ? "NONE" : 7261d3576fdSSage Weil (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); 7271d3576fdSSage Weil 72850c9132dSJeff Layton if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) { 7296c93df5dSYan, Zheng if (ci->i_wrbuffer_ref > 0) { 7306c93df5dSYan, Zheng pr_warn_ratelimited( 7316c93df5dSYan, Zheng "writepage_start %p %lld forced umount\n", 7326c93df5dSYan, Zheng inode, ceph_ino(inode)); 7336c93df5dSYan, Zheng } 734a341d4dfSYan, Zheng mapping_set_error(mapping, -EIO); 7351d3576fdSSage Weil return -EIO; /* we're in a forced umount, don't write! */ 7361d3576fdSSage Weil } 73795cca2b4SYan, Zheng if (fsc->mount_options->wsize < wsize) 7383d14c5d2SYehuda Sadeh wsize = fsc->mount_options->wsize; 7391d3576fdSSage Weil 74086679820SMel Gorman pagevec_init(&pvec); 7411d3576fdSSage Weil 742590e9d98SYan, Zheng start_index = wbc->range_cyclic ? mapping->writeback_index : 0; 743590e9d98SYan, Zheng index = start_index; 7441d3576fdSSage Weil 7451d3576fdSSage Weil retry: 7461d3576fdSSage Weil /* find oldest snap context with dirty data */ 74705455e11SYan, Zheng snapc = get_oldest_context(inode, &ceph_wbc, NULL); 7481d3576fdSSage Weil if (!snapc) { 7491d3576fdSSage Weil /* hmm, why does writepages get called when there 7501d3576fdSSage Weil is no dirty data? */ 7511d3576fdSSage Weil dout(" no snap context with dirty data?\n"); 7521d3576fdSSage Weil goto out; 7531d3576fdSSage Weil } 7541d3576fdSSage Weil dout(" oldest snapc is %p seq %lld (%d snaps)\n", 7551d3576fdSSage Weil snapc, snapc->seq, snapc->num_snaps); 756fc2744aaSYan, Zheng 7572a2d927eSYan, Zheng should_loop = false; 7582a2d927eSYan, Zheng if (ceph_wbc.head_snapc && snapc != last_snapc) { 7592a2d927eSYan, Zheng /* where to start/end? */ 7602a2d927eSYan, Zheng if (wbc->range_cyclic) { 7612a2d927eSYan, Zheng index = start_index; 7622a2d927eSYan, Zheng end = -1; 7632a2d927eSYan, Zheng if (index > 0) 7642a2d927eSYan, Zheng should_loop = true; 7652a2d927eSYan, Zheng dout(" cyclic, start at %lu\n", index); 7662a2d927eSYan, Zheng } else { 7672a2d927eSYan, Zheng index = wbc->range_start >> PAGE_SHIFT; 7682a2d927eSYan, Zheng end = wbc->range_end >> PAGE_SHIFT; 7692a2d927eSYan, Zheng if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 7702a2d927eSYan, Zheng range_whole = true; 7712a2d927eSYan, Zheng dout(" not cyclic, %lu to %lu\n", index, end); 7721d3576fdSSage Weil } 7732a2d927eSYan, Zheng } else if (!ceph_wbc.head_snapc) { 7742a2d927eSYan, Zheng /* Do not respect wbc->range_{start,end}. Dirty pages 7752a2d927eSYan, Zheng * in that range can be associated with newer snapc. 7762a2d927eSYan, Zheng * They are not writeable until we write all dirty pages 7772a2d927eSYan, Zheng * associated with 'snapc' get written */ 7781582af2eSYan, Zheng if (index > 0) 7792a2d927eSYan, Zheng should_loop = true; 7802a2d927eSYan, Zheng dout(" non-head snapc, range whole\n"); 7812a2d927eSYan, Zheng } 7822a2d927eSYan, Zheng 7832a2d927eSYan, Zheng ceph_put_snap_context(last_snapc); 7841d3576fdSSage Weil last_snapc = snapc; 7851d3576fdSSage Weil 786af9cc401SYan, Zheng while (!done && index <= end) { 7875b64640cSYan, Zheng int num_ops = 0, op_idx; 7880e5ecac7SYan, Zheng unsigned i, pvec_pages, max_pages, locked_pages = 0; 7895b64640cSYan, Zheng struct page **pages = NULL, **data_pages; 7901d3576fdSSage Weil struct page *page; 7910e5ecac7SYan, Zheng pgoff_t strip_unit_end = 0; 7925b64640cSYan, Zheng u64 offset = 0, len = 0; 793a0102bdaSJeff Layton bool from_pool = false; 7941d3576fdSSage Weil 7950e5ecac7SYan, Zheng max_pages = wsize >> PAGE_SHIFT; 7961d3576fdSSage Weil 7971d3576fdSSage Weil get_more_pages: 7982e169296SJeff Layton pvec_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, 7992e169296SJeff Layton end, PAGECACHE_TAG_DIRTY); 8000ed75fc8SJan Kara dout("pagevec_lookup_range_tag got %d\n", pvec_pages); 8011d3576fdSSage Weil if (!pvec_pages && !locked_pages) 8021d3576fdSSage Weil break; 8031d3576fdSSage Weil for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) { 8041d3576fdSSage Weil page = pvec.pages[i]; 8051d3576fdSSage Weil dout("? %p idx %lu\n", page, page->index); 8061d3576fdSSage Weil if (locked_pages == 0) 8071d3576fdSSage Weil lock_page(page); /* first page */ 8081d3576fdSSage Weil else if (!trylock_page(page)) 8091d3576fdSSage Weil break; 8101d3576fdSSage Weil 8111d3576fdSSage Weil /* only dirty pages, or our accounting breaks */ 8121d3576fdSSage Weil if (unlikely(!PageDirty(page)) || 8131d3576fdSSage Weil unlikely(page->mapping != mapping)) { 8141d3576fdSSage Weil dout("!dirty or !mapping %p\n", page); 8151d3576fdSSage Weil unlock_page(page); 8160713e5f2SYan, Zheng continue; 8171d3576fdSSage Weil } 818af9cc401SYan, Zheng /* only if matching snap context */ 819af9cc401SYan, Zheng pgsnapc = page_snap_context(page); 820af9cc401SYan, Zheng if (pgsnapc != snapc) { 821af9cc401SYan, Zheng dout("page snapc %p %lld != oldest %p %lld\n", 822af9cc401SYan, Zheng pgsnapc, pgsnapc->seq, snapc, snapc->seq); 8231582af2eSYan, Zheng if (!should_loop && 8241582af2eSYan, Zheng !ceph_wbc.head_snapc && 8251582af2eSYan, Zheng wbc->sync_mode != WB_SYNC_NONE) 8261582af2eSYan, Zheng should_loop = true; 8271d3576fdSSage Weil unlock_page(page); 828af9cc401SYan, Zheng continue; 8291d3576fdSSage Weil } 8301f934b00SYan, Zheng if (page_offset(page) >= ceph_wbc.i_size) { 8311f934b00SYan, Zheng dout("%p page eof %llu\n", 8321f934b00SYan, Zheng page, ceph_wbc.i_size); 833c95f1c5fSErqi Chen if ((ceph_wbc.size_stable || 834c95f1c5fSErqi Chen page_offset(page) >= i_size_read(inode)) && 835c95f1c5fSErqi Chen clear_page_dirty_for_io(page)) 836af9cc401SYan, Zheng mapping->a_ops->invalidatepage(page, 8378ff2d290SJeff Layton 0, thp_size(page)); 838af9cc401SYan, Zheng unlock_page(page); 839af9cc401SYan, Zheng continue; 840af9cc401SYan, Zheng } 841af9cc401SYan, Zheng if (strip_unit_end && (page->index > strip_unit_end)) { 842af9cc401SYan, Zheng dout("end of strip unit %p\n", page); 8431d3576fdSSage Weil unlock_page(page); 8441d3576fdSSage Weil break; 8451d3576fdSSage Weil } 8461d3576fdSSage Weil if (PageWriteback(page)) { 8470713e5f2SYan, Zheng if (wbc->sync_mode == WB_SYNC_NONE) { 8481d3576fdSSage Weil dout("%p under writeback\n", page); 8491d3576fdSSage Weil unlock_page(page); 8500713e5f2SYan, Zheng continue; 8510713e5f2SYan, Zheng } 8520713e5f2SYan, Zheng dout("waiting on writeback %p\n", page); 8530713e5f2SYan, Zheng wait_on_page_writeback(page); 8541d3576fdSSage Weil } 8551d3576fdSSage Weil 8561d3576fdSSage Weil if (!clear_page_dirty_for_io(page)) { 8571d3576fdSSage Weil dout("%p !clear_page_dirty_for_io\n", page); 8581d3576fdSSage Weil unlock_page(page); 8590713e5f2SYan, Zheng continue; 8601d3576fdSSage Weil } 8611d3576fdSSage Weil 862e5975c7cSAlex Elder /* 863e5975c7cSAlex Elder * We have something to write. If this is 864e5975c7cSAlex Elder * the first locked page this time through, 8655b64640cSYan, Zheng * calculate max possinle write size and 8665b64640cSYan, Zheng * allocate a page array 867e5975c7cSAlex Elder */ 8681d3576fdSSage Weil if (locked_pages == 0) { 8695b64640cSYan, Zheng u64 objnum; 8705b64640cSYan, Zheng u64 objoff; 871dccbf080SIlya Dryomov u32 xlen; 8725b64640cSYan, Zheng 8731d3576fdSSage Weil /* prepare async write request */ 8746285bc23SAlex Elder offset = (u64)page_offset(page); 875dccbf080SIlya Dryomov ceph_calc_file_object_mapping(&ci->i_layout, 876dccbf080SIlya Dryomov offset, wsize, 8775b64640cSYan, Zheng &objnum, &objoff, 878dccbf080SIlya Dryomov &xlen); 879dccbf080SIlya Dryomov len = xlen; 8808c71897bSHenry C Chang 8813fb99d48SYanhu Cao num_ops = 1; 8825b64640cSYan, Zheng strip_unit_end = page->index + 88309cbfeafSKirill A. Shutemov ((len - 1) >> PAGE_SHIFT); 884715e4cd4SYan, Zheng 8855b64640cSYan, Zheng BUG_ON(pages); 88688486957SAlex Elder max_pages = calc_pages_for(0, (u64)len); 8876da2ec56SKees Cook pages = kmalloc_array(max_pages, 8886da2ec56SKees Cook sizeof(*pages), 889fc2744aaSYan, Zheng GFP_NOFS); 89088486957SAlex Elder if (!pages) { 891a0102bdaSJeff Layton from_pool = true; 892a0102bdaSJeff Layton pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS); 893e5975c7cSAlex Elder BUG_ON(!pages); 89488486957SAlex Elder } 8955b64640cSYan, Zheng 8965b64640cSYan, Zheng len = 0; 8975b64640cSYan, Zheng } else if (page->index != 89809cbfeafSKirill A. Shutemov (offset + len) >> PAGE_SHIFT) { 899a0102bdaSJeff Layton if (num_ops >= (from_pool ? CEPH_OSD_SLAB_OPS : 9005b64640cSYan, Zheng CEPH_OSD_MAX_OPS)) { 9015b64640cSYan, Zheng redirty_page_for_writepage(wbc, page); 9025b64640cSYan, Zheng unlock_page(page); 9035b64640cSYan, Zheng break; 9045b64640cSYan, Zheng } 9055b64640cSYan, Zheng 9065b64640cSYan, Zheng num_ops++; 9075b64640cSYan, Zheng offset = (u64)page_offset(page); 9085b64640cSYan, Zheng len = 0; 9091d3576fdSSage Weil } 9101d3576fdSSage Weil 9111d3576fdSSage Weil /* note position of first page in pvec */ 9121d3576fdSSage Weil dout("%p will write page %p idx %lu\n", 9131d3576fdSSage Weil inode, page, page->index); 9142baba250SYehuda Sadeh 9155b64640cSYan, Zheng if (atomic_long_inc_return(&fsc->writeback_count) > 9165b64640cSYan, Zheng CONGESTION_ON_THRESH( 9173d14c5d2SYehuda Sadeh fsc->mount_options->congestion_kb)) { 91809dc9fc2SJan Kara set_bdi_congested(inode_to_bdi(inode), 919213c99eeSSage Weil BLK_RW_ASYNC); 9202baba250SYehuda Sadeh } 9212baba250SYehuda Sadeh 9220713e5f2SYan, Zheng 9230713e5f2SYan, Zheng pages[locked_pages++] = page; 9240713e5f2SYan, Zheng pvec.pages[i] = NULL; 9250713e5f2SYan, Zheng 9268ff2d290SJeff Layton len += thp_size(page); 9271d3576fdSSage Weil } 9281d3576fdSSage Weil 9291d3576fdSSage Weil /* did we get anything? */ 9301d3576fdSSage Weil if (!locked_pages) 9311d3576fdSSage Weil goto release_pvec_pages; 9321d3576fdSSage Weil if (i) { 9330713e5f2SYan, Zheng unsigned j, n = 0; 9340713e5f2SYan, Zheng /* shift unused page to beginning of pvec */ 9350713e5f2SYan, Zheng for (j = 0; j < pvec_pages; j++) { 9360713e5f2SYan, Zheng if (!pvec.pages[j]) 9370713e5f2SYan, Zheng continue; 9380713e5f2SYan, Zheng if (n < j) 9390713e5f2SYan, Zheng pvec.pages[n] = pvec.pages[j]; 9400713e5f2SYan, Zheng n++; 9410713e5f2SYan, Zheng } 9420713e5f2SYan, Zheng pvec.nr = n; 9431d3576fdSSage Weil 9441d3576fdSSage Weil if (pvec_pages && i == pvec_pages && 9451d3576fdSSage Weil locked_pages < max_pages) { 9461d3576fdSSage Weil dout("reached end pvec, trying for more\n"); 9470713e5f2SYan, Zheng pagevec_release(&pvec); 9481d3576fdSSage Weil goto get_more_pages; 9491d3576fdSSage Weil } 9501d3576fdSSage Weil } 9511d3576fdSSage Weil 9525b64640cSYan, Zheng new_request: 953e5975c7cSAlex Elder offset = page_offset(pages[0]); 9545b64640cSYan, Zheng len = wsize; 9555b64640cSYan, Zheng 9565b64640cSYan, Zheng req = ceph_osdc_new_request(&fsc->client->osdc, 9575b64640cSYan, Zheng &ci->i_layout, vino, 9585b64640cSYan, Zheng offset, &len, 0, num_ops, 9591f934b00SYan, Zheng CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, 9601f934b00SYan, Zheng snapc, ceph_wbc.truncate_seq, 9611f934b00SYan, Zheng ceph_wbc.truncate_size, false); 9625b64640cSYan, Zheng if (IS_ERR(req)) { 9635b64640cSYan, Zheng req = ceph_osdc_new_request(&fsc->client->osdc, 9645b64640cSYan, Zheng &ci->i_layout, vino, 9655b64640cSYan, Zheng offset, &len, 0, 9665b64640cSYan, Zheng min(num_ops, 9675b64640cSYan, Zheng CEPH_OSD_SLAB_OPS), 9685b64640cSYan, Zheng CEPH_OSD_OP_WRITE, 96954ea0046SIlya Dryomov CEPH_OSD_FLAG_WRITE, 9701f934b00SYan, Zheng snapc, ceph_wbc.truncate_seq, 9711f934b00SYan, Zheng ceph_wbc.truncate_size, true); 9725b64640cSYan, Zheng BUG_ON(IS_ERR(req)); 9735b64640cSYan, Zheng } 9745b64640cSYan, Zheng BUG_ON(len < page_offset(pages[locked_pages - 1]) + 9758ff2d290SJeff Layton thp_size(page) - offset); 9765b64640cSYan, Zheng 9775b64640cSYan, Zheng req->r_callback = writepages_finish; 9785b64640cSYan, Zheng req->r_inode = inode; 9795b64640cSYan, Zheng 9805b64640cSYan, Zheng /* Format the osd request message and submit the write */ 9815b64640cSYan, Zheng len = 0; 9825b64640cSYan, Zheng data_pages = pages; 9835b64640cSYan, Zheng op_idx = 0; 9845b64640cSYan, Zheng for (i = 0; i < locked_pages; i++) { 9855b64640cSYan, Zheng u64 cur_offset = page_offset(pages[i]); 9865b64640cSYan, Zheng if (offset + len != cur_offset) { 9873fb99d48SYanhu Cao if (op_idx + 1 == req->r_num_ops) 9885b64640cSYan, Zheng break; 9895b64640cSYan, Zheng osd_req_op_extent_dup_last(req, op_idx, 9905b64640cSYan, Zheng cur_offset - offset); 9915b64640cSYan, Zheng dout("writepages got pages at %llu~%llu\n", 9925b64640cSYan, Zheng offset, len); 9935b64640cSYan, Zheng osd_req_op_extent_osd_data_pages(req, op_idx, 9945b64640cSYan, Zheng data_pages, len, 0, 995a0102bdaSJeff Layton from_pool, false); 9965b64640cSYan, Zheng osd_req_op_extent_update(req, op_idx, len); 9975b64640cSYan, Zheng 9985b64640cSYan, Zheng len = 0; 9995b64640cSYan, Zheng offset = cur_offset; 10005b64640cSYan, Zheng data_pages = pages + i; 10015b64640cSYan, Zheng op_idx++; 10025b64640cSYan, Zheng } 10035b64640cSYan, Zheng 10045b64640cSYan, Zheng set_page_writeback(pages[i]); 10058ff2d290SJeff Layton len += thp_size(page); 10065b64640cSYan, Zheng } 10075b64640cSYan, Zheng 10081f934b00SYan, Zheng if (ceph_wbc.size_stable) { 10091f934b00SYan, Zheng len = min(len, ceph_wbc.i_size - offset); 10105b64640cSYan, Zheng } else if (i == locked_pages) { 1011e1966b49SYan, Zheng /* writepages_finish() clears writeback pages 1012e1966b49SYan, Zheng * according to the data length, so make sure 1013e1966b49SYan, Zheng * data length covers all locked pages */ 10148ff2d290SJeff Layton u64 min_len = len + 1 - thp_size(page); 10151f934b00SYan, Zheng len = get_writepages_data_length(inode, pages[i - 1], 10161f934b00SYan, Zheng offset); 10175b64640cSYan, Zheng len = max(len, min_len); 1018e1966b49SYan, Zheng } 10195b64640cSYan, Zheng dout("writepages got pages at %llu~%llu\n", offset, len); 10201d3576fdSSage Weil 10215b64640cSYan, Zheng osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len, 1022a0102bdaSJeff Layton 0, from_pool, false); 10235b64640cSYan, Zheng osd_req_op_extent_update(req, op_idx, len); 1024e5975c7cSAlex Elder 10255b64640cSYan, Zheng BUG_ON(op_idx + 1 != req->r_num_ops); 10265b64640cSYan, Zheng 1027a0102bdaSJeff Layton from_pool = false; 10285b64640cSYan, Zheng if (i < locked_pages) { 10295b64640cSYan, Zheng BUG_ON(num_ops <= req->r_num_ops); 10305b64640cSYan, Zheng num_ops -= req->r_num_ops; 10315b64640cSYan, Zheng locked_pages -= i; 1032e5975c7cSAlex Elder 10335b64640cSYan, Zheng /* allocate new pages array for next request */ 10345b64640cSYan, Zheng data_pages = pages; 10356da2ec56SKees Cook pages = kmalloc_array(locked_pages, sizeof(*pages), 10365b64640cSYan, Zheng GFP_NOFS); 10375b64640cSYan, Zheng if (!pages) { 1038a0102bdaSJeff Layton from_pool = true; 1039a0102bdaSJeff Layton pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS); 10405b64640cSYan, Zheng BUG_ON(!pages); 10415b64640cSYan, Zheng } 10425b64640cSYan, Zheng memcpy(pages, data_pages + i, 10435b64640cSYan, Zheng locked_pages * sizeof(*pages)); 10445b64640cSYan, Zheng memset(data_pages + i, 0, 10455b64640cSYan, Zheng locked_pages * sizeof(*pages)); 10465b64640cSYan, Zheng } else { 10475b64640cSYan, Zheng BUG_ON(num_ops != req->r_num_ops); 10485b64640cSYan, Zheng index = pages[i - 1]->index + 1; 10495b64640cSYan, Zheng /* request message now owns the pages array */ 10505b64640cSYan, Zheng pages = NULL; 10515b64640cSYan, Zheng } 1052e5975c7cSAlex Elder 1053fac02ddfSArnd Bergmann req->r_mtime = inode->i_mtime; 10549d6fcb08SSage Weil rc = ceph_osdc_start_request(&fsc->client->osdc, req, true); 10559d6fcb08SSage Weil BUG_ON(rc); 10561d3576fdSSage Weil req = NULL; 10571d3576fdSSage Weil 10585b64640cSYan, Zheng wbc->nr_to_write -= i; 10595b64640cSYan, Zheng if (pages) 10605b64640cSYan, Zheng goto new_request; 10615b64640cSYan, Zheng 10622a2d927eSYan, Zheng /* 10632a2d927eSYan, Zheng * We stop writing back only if we are not doing 10642a2d927eSYan, Zheng * integrity sync. In case of integrity sync we have to 10652a2d927eSYan, Zheng * keep going until we have written all the pages 10662a2d927eSYan, Zheng * we tagged for writeback prior to entering this loop. 10672a2d927eSYan, Zheng */ 10682a2d927eSYan, Zheng if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) 1069af9cc401SYan, Zheng done = true; 10701d3576fdSSage Weil 10711d3576fdSSage Weil release_pvec_pages: 10721d3576fdSSage Weil dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr, 10731d3576fdSSage Weil pvec.nr ? pvec.pages[0] : NULL); 10741d3576fdSSage Weil pagevec_release(&pvec); 10751d3576fdSSage Weil } 10761d3576fdSSage Weil 10771d3576fdSSage Weil if (should_loop && !done) { 10781d3576fdSSage Weil /* more to do; loop back to beginning of file */ 10791d3576fdSSage Weil dout("writepages looping back to beginning of file\n"); 10802a2d927eSYan, Zheng end = start_index - 1; /* OK even when start_index == 0 */ 1081f275635eSYan, Zheng 1082f275635eSYan, Zheng /* to write dirty pages associated with next snapc, 1083f275635eSYan, Zheng * we need to wait until current writes complete */ 1084f275635eSYan, Zheng if (wbc->sync_mode != WB_SYNC_NONE && 1085f275635eSYan, Zheng start_index == 0 && /* all dirty pages were checked */ 1086f275635eSYan, Zheng !ceph_wbc.head_snapc) { 1087f275635eSYan, Zheng struct page *page; 1088f275635eSYan, Zheng unsigned i, nr; 1089f275635eSYan, Zheng index = 0; 1090f275635eSYan, Zheng while ((index <= end) && 1091f275635eSYan, Zheng (nr = pagevec_lookup_tag(&pvec, mapping, &index, 109267fd707fSJan Kara PAGECACHE_TAG_WRITEBACK))) { 1093f275635eSYan, Zheng for (i = 0; i < nr; i++) { 1094f275635eSYan, Zheng page = pvec.pages[i]; 1095f275635eSYan, Zheng if (page_snap_context(page) != snapc) 1096f275635eSYan, Zheng continue; 1097f275635eSYan, Zheng wait_on_page_writeback(page); 1098f275635eSYan, Zheng } 1099f275635eSYan, Zheng pagevec_release(&pvec); 1100f275635eSYan, Zheng cond_resched(); 1101f275635eSYan, Zheng } 1102f275635eSYan, Zheng } 1103f275635eSYan, Zheng 11042a2d927eSYan, Zheng start_index = 0; 11051d3576fdSSage Weil index = 0; 11061d3576fdSSage Weil goto retry; 11071d3576fdSSage Weil } 11081d3576fdSSage Weil 11091d3576fdSSage Weil if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 11101d3576fdSSage Weil mapping->writeback_index = index; 11111d3576fdSSage Weil 11121d3576fdSSage Weil out: 11131d3576fdSSage Weil ceph_osdc_put_request(req); 11142a2d927eSYan, Zheng ceph_put_snap_context(last_snapc); 11152a2d927eSYan, Zheng dout("writepages dend - startone, rc = %d\n", rc); 11161d3576fdSSage Weil return rc; 11171d3576fdSSage Weil } 11181d3576fdSSage Weil 11191d3576fdSSage Weil 11201d3576fdSSage Weil 11211d3576fdSSage Weil /* 11221d3576fdSSage Weil * See if a given @snapc is either writeable, or already written. 11231d3576fdSSage Weil */ 11241d3576fdSSage Weil static int context_is_writeable_or_written(struct inode *inode, 11251d3576fdSSage Weil struct ceph_snap_context *snapc) 11261d3576fdSSage Weil { 112705455e11SYan, Zheng struct ceph_snap_context *oldest = get_oldest_context(inode, NULL, NULL); 11286298a337SSage Weil int ret = !oldest || snapc->seq <= oldest->seq; 11296298a337SSage Weil 11306298a337SSage Weil ceph_put_snap_context(oldest); 11316298a337SSage Weil return ret; 11321d3576fdSSage Weil } 11331d3576fdSSage Weil 113418d620f0SJeff Layton /** 113518d620f0SJeff Layton * ceph_find_incompatible - find an incompatible context and return it 113618d620f0SJeff Layton * @page: page being dirtied 113718d620f0SJeff Layton * 113818d620f0SJeff Layton * We are only allowed to write into/dirty a page if the page is 113918d620f0SJeff Layton * clean, or already dirty within the same snap context. Returns a 114018d620f0SJeff Layton * conflicting context if there is one, NULL if there isn't, or a 114118d620f0SJeff Layton * negative error code on other errors. 114218d620f0SJeff Layton * 114318d620f0SJeff Layton * Must be called with page lock held. 114418d620f0SJeff Layton */ 114518d620f0SJeff Layton static struct ceph_snap_context * 1146d45156bfSJeff Layton ceph_find_incompatible(struct page *page) 114718d620f0SJeff Layton { 1148d45156bfSJeff Layton struct inode *inode = page->mapping->host; 114918d620f0SJeff Layton struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 115018d620f0SJeff Layton struct ceph_inode_info *ci = ceph_inode(inode); 115118d620f0SJeff Layton 115250c9132dSJeff Layton if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) { 115318d620f0SJeff Layton dout(" page %p forced umount\n", page); 115418d620f0SJeff Layton return ERR_PTR(-EIO); 115518d620f0SJeff Layton } 115618d620f0SJeff Layton 115718d620f0SJeff Layton for (;;) { 115818d620f0SJeff Layton struct ceph_snap_context *snapc, *oldest; 115918d620f0SJeff Layton 116018d620f0SJeff Layton wait_on_page_writeback(page); 116118d620f0SJeff Layton 116218d620f0SJeff Layton snapc = page_snap_context(page); 116318d620f0SJeff Layton if (!snapc || snapc == ci->i_head_snapc) 116418d620f0SJeff Layton break; 116518d620f0SJeff Layton 116618d620f0SJeff Layton /* 116718d620f0SJeff Layton * this page is already dirty in another (older) snap 116818d620f0SJeff Layton * context! is it writeable now? 116918d620f0SJeff Layton */ 117018d620f0SJeff Layton oldest = get_oldest_context(inode, NULL, NULL); 117118d620f0SJeff Layton if (snapc->seq > oldest->seq) { 117218d620f0SJeff Layton /* not writeable -- return it for the caller to deal with */ 117318d620f0SJeff Layton ceph_put_snap_context(oldest); 117418d620f0SJeff Layton dout(" page %p snapc %p not current or oldest\n", page, snapc); 117518d620f0SJeff Layton return ceph_get_snap_context(snapc); 117618d620f0SJeff Layton } 117718d620f0SJeff Layton ceph_put_snap_context(oldest); 117818d620f0SJeff Layton 117918d620f0SJeff Layton /* yay, writeable, do it now (without dropping page lock) */ 118018d620f0SJeff Layton dout(" page %p snapc %p not current, but oldest\n", page, snapc); 118118d620f0SJeff Layton if (clear_page_dirty_for_io(page)) { 118218d620f0SJeff Layton int r = writepage_nounlock(page, NULL); 118318d620f0SJeff Layton if (r < 0) 118418d620f0SJeff Layton return ERR_PTR(r); 118518d620f0SJeff Layton } 118618d620f0SJeff Layton } 118718d620f0SJeff Layton return NULL; 118818d620f0SJeff Layton } 118918d620f0SJeff Layton 1190d801327dSJeff Layton static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len, 1191*78525c74SDavid Howells struct folio *folio, void **_fsdata) 1192d801327dSJeff Layton { 1193d801327dSJeff Layton struct inode *inode = file_inode(file); 1194d801327dSJeff Layton struct ceph_inode_info *ci = ceph_inode(inode); 1195d801327dSJeff Layton struct ceph_snap_context *snapc; 1196d801327dSJeff Layton 1197*78525c74SDavid Howells snapc = ceph_find_incompatible(folio_page(folio, 0)); 1198d801327dSJeff Layton if (snapc) { 1199d801327dSJeff Layton int r; 1200d801327dSJeff Layton 1201*78525c74SDavid Howells folio_unlock(folio); 1202*78525c74SDavid Howells folio_put(folio); 1203d801327dSJeff Layton if (IS_ERR(snapc)) 1204d801327dSJeff Layton return PTR_ERR(snapc); 1205d801327dSJeff Layton 1206d801327dSJeff Layton ceph_queue_writeback(inode); 1207d801327dSJeff Layton r = wait_event_killable(ci->i_cap_wq, 1208d801327dSJeff Layton context_is_writeable_or_written(inode, snapc)); 1209d801327dSJeff Layton ceph_put_snap_context(snapc); 1210d801327dSJeff Layton return r == 0 ? -EAGAIN : r; 1211d801327dSJeff Layton } 1212d801327dSJeff Layton return 0; 1213d801327dSJeff Layton } 1214d801327dSJeff Layton 12151d3576fdSSage Weil /* 12161d3576fdSSage Weil * We are only allowed to write into/dirty the page if the page is 12171d3576fdSSage Weil * clean, or already dirty within the same snap context. 12184af6b225SYehuda Sadeh */ 12194af6b225SYehuda Sadeh static int ceph_write_begin(struct file *file, struct address_space *mapping, 1220*78525c74SDavid Howells loff_t pos, unsigned len, unsigned aop_flags, 12214af6b225SYehuda Sadeh struct page **pagep, void **fsdata) 12224af6b225SYehuda Sadeh { 1223496ad9aaSAl Viro struct inode *inode = file_inode(file); 12241cc16990SJeff Layton struct ceph_inode_info *ci = ceph_inode(inode); 1225*78525c74SDavid Howells struct folio *folio = NULL; 122609cbfeafSKirill A. Shutemov pgoff_t index = pos >> PAGE_SHIFT; 1227d801327dSJeff Layton int r; 12284af6b225SYehuda Sadeh 1229d801327dSJeff Layton /* 1230d801327dSJeff Layton * Uninlining should have already been done and everything updated, EXCEPT 1231d801327dSJeff Layton * for inline_version sent to the MDS. 1232d801327dSJeff Layton */ 1233d801327dSJeff Layton if (ci->i_inline_version != CEPH_INLINE_NONE) { 1234*78525c74SDavid Howells unsigned int fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE; 1235*78525c74SDavid Howells if (aop_flags & AOP_FLAG_NOFS) 1236*78525c74SDavid Howells fgp_flags |= FGP_NOFS; 1237*78525c74SDavid Howells folio = __filemap_get_folio(mapping, index, fgp_flags, 1238*78525c74SDavid Howells mapping_gfp_mask(mapping)); 1239*78525c74SDavid Howells if (!folio) 1240d801327dSJeff Layton return -ENOMEM; 12411cc16990SJeff Layton 12421cc16990SJeff Layton /* 1243d801327dSJeff Layton * The inline_version on a new inode is set to 1. If that's the 1244*78525c74SDavid Howells * case, then the folio is brand new and isn't yet Uptodate. 12451cc16990SJeff Layton */ 1246d801327dSJeff Layton r = 0; 1247d801327dSJeff Layton if (index == 0 && ci->i_inline_version != 1) { 1248*78525c74SDavid Howells if (!folio_test_uptodate(folio)) { 1249d801327dSJeff Layton WARN_ONCE(1, "ceph: write_begin called on still-inlined inode (inline_version %llu)!\n", 1250d801327dSJeff Layton ci->i_inline_version); 1251d801327dSJeff Layton r = -EINVAL; 1252d801327dSJeff Layton } 1253d801327dSJeff Layton goto out; 1254d801327dSJeff Layton } 1255*78525c74SDavid Howells zero_user_segment(&folio->page, 0, folio_size(folio)); 1256*78525c74SDavid Howells folio_mark_uptodate(folio); 1257d801327dSJeff Layton goto out; 12581cc16990SJeff Layton } 12591cc16990SJeff Layton 1260*78525c74SDavid Howells r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &folio, NULL, 1261d801327dSJeff Layton &ceph_netfs_read_ops, NULL); 1262d801327dSJeff Layton out: 1263d801327dSJeff Layton if (r == 0) 1264*78525c74SDavid Howells folio_wait_fscache(folio); 12651cc16990SJeff Layton if (r < 0) { 1266*78525c74SDavid Howells if (folio) 1267*78525c74SDavid Howells folio_put(folio); 12681cc16990SJeff Layton } else { 1269*78525c74SDavid Howells WARN_ON_ONCE(!folio_test_locked(folio)); 1270*78525c74SDavid Howells *pagep = &folio->page; 12711cc16990SJeff Layton } 12724af6b225SYehuda Sadeh return r; 12734af6b225SYehuda Sadeh } 12744af6b225SYehuda Sadeh 12754af6b225SYehuda Sadeh /* 12761d3576fdSSage Weil * we don't do anything in here that simple_write_end doesn't do 12775dda377cSYan, Zheng * except adjust dirty page accounting 12781d3576fdSSage Weil */ 12791d3576fdSSage Weil static int ceph_write_end(struct file *file, struct address_space *mapping, 12801d3576fdSSage Weil loff_t pos, unsigned len, unsigned copied, 1281*78525c74SDavid Howells struct page *subpage, void *fsdata) 12821d3576fdSSage Weil { 1283*78525c74SDavid Howells struct folio *folio = page_folio(subpage); 1284496ad9aaSAl Viro struct inode *inode = file_inode(file); 1285efb0ca76SYan, Zheng bool check_cap = false; 12861d3576fdSSage Weil 1287*78525c74SDavid Howells dout("write_end file %p inode %p folio %p %d~%d (%d)\n", file, 1288*78525c74SDavid Howells inode, folio, (int)pos, (int)copied, (int)len); 12891d3576fdSSage Weil 1290*78525c74SDavid Howells if (!folio_test_uptodate(folio)) { 1291ce3a8732SJeff Layton /* just return that nothing was copied on a short copy */ 1292b9de313cSAl Viro if (copied < len) { 1293b9de313cSAl Viro copied = 0; 1294b9de313cSAl Viro goto out; 1295b9de313cSAl Viro } 1296*78525c74SDavid Howells folio_mark_uptodate(folio); 1297b9de313cSAl Viro } 12981d3576fdSSage Weil 12991d3576fdSSage Weil /* did file size increase? */ 130099c88e69SYan, Zheng if (pos+copied > i_size_read(inode)) 13011d3576fdSSage Weil check_cap = ceph_inode_set_size(inode, pos+copied); 13021d3576fdSSage Weil 1303*78525c74SDavid Howells folio_mark_dirty(folio); 13041d3576fdSSage Weil 1305b9de313cSAl Viro out: 1306*78525c74SDavid Howells folio_unlock(folio); 1307*78525c74SDavid Howells folio_put(folio); 13081d3576fdSSage Weil 13091d3576fdSSage Weil if (check_cap) 13101d3576fdSSage Weil ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); 13111d3576fdSSage Weil 13121d3576fdSSage Weil return copied; 13131d3576fdSSage Weil } 13141d3576fdSSage Weil 13151d3576fdSSage Weil /* 13161d3576fdSSage Weil * we set .direct_IO to indicate direct io is supported, but since we 13171d3576fdSSage Weil * intercept O_DIRECT reads and writes early, this function should 13181d3576fdSSage Weil * never get called. 13191d3576fdSSage Weil */ 1320c8b8e32dSChristoph Hellwig static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter) 13211d3576fdSSage Weil { 13221d3576fdSSage Weil WARN_ON(1); 13231d3576fdSSage Weil return -EINVAL; 13241d3576fdSSage Weil } 13251d3576fdSSage Weil 13261d3576fdSSage Weil const struct address_space_operations ceph_aops = { 13271d3576fdSSage Weil .readpage = ceph_readpage, 132849870056SJeff Layton .readahead = ceph_readahead, 13291d3576fdSSage Weil .writepage = ceph_writepage, 13301d3576fdSSage Weil .writepages = ceph_writepages_start, 13311d3576fdSSage Weil .write_begin = ceph_write_begin, 13321d3576fdSSage Weil .write_end = ceph_write_end, 13331d3576fdSSage Weil .set_page_dirty = ceph_set_page_dirty, 13341d3576fdSSage Weil .invalidatepage = ceph_invalidatepage, 13351d3576fdSSage Weil .releasepage = ceph_releasepage, 13361d3576fdSSage Weil .direct_IO = ceph_direct_io, 13371d3576fdSSage Weil }; 13381d3576fdSSage Weil 13394f7e89f6SYan, Zheng static void ceph_block_sigs(sigset_t *oldset) 13404f7e89f6SYan, Zheng { 13414f7e89f6SYan, Zheng sigset_t mask; 13424f7e89f6SYan, Zheng siginitsetinv(&mask, sigmask(SIGKILL)); 13434f7e89f6SYan, Zheng sigprocmask(SIG_BLOCK, &mask, oldset); 13444f7e89f6SYan, Zheng } 13454f7e89f6SYan, Zheng 13464f7e89f6SYan, Zheng static void ceph_restore_sigs(sigset_t *oldset) 13474f7e89f6SYan, Zheng { 13484f7e89f6SYan, Zheng sigprocmask(SIG_SETMASK, oldset, NULL); 13494f7e89f6SYan, Zheng } 13501d3576fdSSage Weil 13511d3576fdSSage Weil /* 13521d3576fdSSage Weil * vm ops 13531d3576fdSSage Weil */ 135424499847SSouptick Joarder static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf) 135561f68816SYan, Zheng { 135611bac800SDave Jiang struct vm_area_struct *vma = vmf->vma; 135761f68816SYan, Zheng struct inode *inode = file_inode(vma->vm_file); 135861f68816SYan, Zheng struct ceph_inode_info *ci = ceph_inode(inode); 135961f68816SYan, Zheng struct ceph_file_info *fi = vma->vm_file->private_data; 1360c403c3a2SMatthew Wilcox (Oracle) loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT; 136124499847SSouptick Joarder int want, got, err; 13624f7e89f6SYan, Zheng sigset_t oldset; 136324499847SSouptick Joarder vm_fault_t ret = VM_FAULT_SIGBUS; 13644f7e89f6SYan, Zheng 13654f7e89f6SYan, Zheng ceph_block_sigs(&oldset); 136661f68816SYan, Zheng 13678ff2d290SJeff Layton dout("filemap_fault %p %llx.%llx %llu trying to get caps\n", 13688ff2d290SJeff Layton inode, ceph_vinop(inode), off); 136961f68816SYan, Zheng if (fi->fmode & CEPH_FILE_MODE_LAZY) 137061f68816SYan, Zheng want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 137161f68816SYan, Zheng else 137261f68816SYan, Zheng want = CEPH_CAP_FILE_CACHE; 13734f7e89f6SYan, Zheng 137461f68816SYan, Zheng got = 0; 1375e72968e1SJeff Layton err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_RD, want, -1, &got); 137624499847SSouptick Joarder if (err < 0) 13774f7e89f6SYan, Zheng goto out_restore; 13786ce026e4SYan, Zheng 13798ff2d290SJeff Layton dout("filemap_fault %p %llu got cap refs on %s\n", 13808ff2d290SJeff Layton inode, off, ceph_cap_string(got)); 138161f68816SYan, Zheng 138283701246SYan, Zheng if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) || 13832b1ac852SYan, Zheng ci->i_inline_version == CEPH_INLINE_NONE) { 13845d988308SYan, Zheng CEPH_DEFINE_RW_CONTEXT(rw_ctx, got); 13855d988308SYan, Zheng ceph_add_rw_context(fi, &rw_ctx); 138611bac800SDave Jiang ret = filemap_fault(vmf); 13875d988308SYan, Zheng ceph_del_rw_context(fi, &rw_ctx); 13888ff2d290SJeff Layton dout("filemap_fault %p %llu drop cap refs %s ret %x\n", 13898ff2d290SJeff Layton inode, off, ceph_cap_string(got), ret); 13902b1ac852SYan, Zheng } else 139124499847SSouptick Joarder err = -EAGAIN; 139261f68816SYan, Zheng 139361f68816SYan, Zheng ceph_put_cap_refs(ci, got); 139461f68816SYan, Zheng 139524499847SSouptick Joarder if (err != -EAGAIN) 13964f7e89f6SYan, Zheng goto out_restore; 139783701246SYan, Zheng 139883701246SYan, Zheng /* read inline data */ 139909cbfeafSKirill A. Shutemov if (off >= PAGE_SIZE) { 140083701246SYan, Zheng /* does not support inline data > PAGE_SIZE */ 140183701246SYan, Zheng ret = VM_FAULT_SIGBUS; 140283701246SYan, Zheng } else { 140383701246SYan, Zheng struct address_space *mapping = inode->i_mapping; 1404057ba5b2SJan Kara struct page *page; 1405057ba5b2SJan Kara 1406057ba5b2SJan Kara filemap_invalidate_lock_shared(mapping); 1407057ba5b2SJan Kara page = find_or_create_page(mapping, 0, 1408057ba5b2SJan Kara mapping_gfp_constraint(mapping, ~__GFP_FS)); 140983701246SYan, Zheng if (!page) { 141083701246SYan, Zheng ret = VM_FAULT_OOM; 14114f7e89f6SYan, Zheng goto out_inline; 141283701246SYan, Zheng } 141324499847SSouptick Joarder err = __ceph_do_getattr(inode, page, 141483701246SYan, Zheng CEPH_STAT_CAP_INLINE_DATA, true); 141524499847SSouptick Joarder if (err < 0 || off >= i_size_read(inode)) { 141683701246SYan, Zheng unlock_page(page); 141709cbfeafSKirill A. Shutemov put_page(page); 1418c64a2b05SSouptick Joarder ret = vmf_error(err); 14194f7e89f6SYan, Zheng goto out_inline; 142083701246SYan, Zheng } 142124499847SSouptick Joarder if (err < PAGE_SIZE) 142224499847SSouptick Joarder zero_user_segment(page, err, PAGE_SIZE); 142383701246SYan, Zheng else 142483701246SYan, Zheng flush_dcache_page(page); 142583701246SYan, Zheng SetPageUptodate(page); 142683701246SYan, Zheng vmf->page = page; 142783701246SYan, Zheng ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED; 14284f7e89f6SYan, Zheng out_inline: 1429057ba5b2SJan Kara filemap_invalidate_unlock_shared(mapping); 14308ff2d290SJeff Layton dout("filemap_fault %p %llu read inline data ret %x\n", 14318ff2d290SJeff Layton inode, off, ret); 14324f7e89f6SYan, Zheng } 14334f7e89f6SYan, Zheng out_restore: 14344f7e89f6SYan, Zheng ceph_restore_sigs(&oldset); 143524499847SSouptick Joarder if (err < 0) 143624499847SSouptick Joarder ret = vmf_error(err); 14376ce026e4SYan, Zheng 143861f68816SYan, Zheng return ret; 143961f68816SYan, Zheng } 14401d3576fdSSage Weil 144124499847SSouptick Joarder static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf) 14421d3576fdSSage Weil { 144311bac800SDave Jiang struct vm_area_struct *vma = vmf->vma; 1444496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 144561f68816SYan, Zheng struct ceph_inode_info *ci = ceph_inode(inode); 144661f68816SYan, Zheng struct ceph_file_info *fi = vma->vm_file->private_data; 1447f66fd9f0SYan, Zheng struct ceph_cap_flush *prealloc_cf; 144861f68816SYan, Zheng struct page *page = vmf->page; 14496285bc23SAlex Elder loff_t off = page_offset(page); 145061f68816SYan, Zheng loff_t size = i_size_read(inode); 145161f68816SYan, Zheng size_t len; 145224499847SSouptick Joarder int want, got, err; 14534f7e89f6SYan, Zheng sigset_t oldset; 145424499847SSouptick Joarder vm_fault_t ret = VM_FAULT_SIGBUS; 14551d3576fdSSage Weil 1456f66fd9f0SYan, Zheng prealloc_cf = ceph_alloc_cap_flush(); 1457f66fd9f0SYan, Zheng if (!prealloc_cf) 14586ce026e4SYan, Zheng return VM_FAULT_OOM; 1459f66fd9f0SYan, Zheng 1460249c1df5SJeff Layton sb_start_pagefault(inode->i_sb); 14614f7e89f6SYan, Zheng ceph_block_sigs(&oldset); 14621d3576fdSSage Weil 146328127bddSYan, Zheng if (ci->i_inline_version != CEPH_INLINE_NONE) { 146428127bddSYan, Zheng struct page *locked_page = NULL; 146528127bddSYan, Zheng if (off == 0) { 146628127bddSYan, Zheng lock_page(page); 146728127bddSYan, Zheng locked_page = page; 146828127bddSYan, Zheng } 146924499847SSouptick Joarder err = ceph_uninline_data(vma->vm_file, locked_page); 147028127bddSYan, Zheng if (locked_page) 147128127bddSYan, Zheng unlock_page(locked_page); 147224499847SSouptick Joarder if (err < 0) 1473f66fd9f0SYan, Zheng goto out_free; 1474f66fd9f0SYan, Zheng } 147528127bddSYan, Zheng 14768ff2d290SJeff Layton if (off + thp_size(page) <= size) 14778ff2d290SJeff Layton len = thp_size(page); 14781d3576fdSSage Weil else 14798ff2d290SJeff Layton len = offset_in_thp(page, size); 14801d3576fdSSage Weil 148161f68816SYan, Zheng dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n", 148261f68816SYan, Zheng inode, ceph_vinop(inode), off, len, size); 148361f68816SYan, Zheng if (fi->fmode & CEPH_FILE_MODE_LAZY) 148461f68816SYan, Zheng want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 148561f68816SYan, Zheng else 148661f68816SYan, Zheng want = CEPH_CAP_FILE_BUFFER; 14874f7e89f6SYan, Zheng 148861f68816SYan, Zheng got = 0; 1489e72968e1SJeff Layton err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_WR, want, off + len, &got); 149024499847SSouptick Joarder if (err < 0) 1491f66fd9f0SYan, Zheng goto out_free; 14926ce026e4SYan, Zheng 149361f68816SYan, Zheng dout("page_mkwrite %p %llu~%zd got cap refs on %s\n", 149461f68816SYan, Zheng inode, off, len, ceph_cap_string(got)); 149561f68816SYan, Zheng 149661f68816SYan, Zheng /* Update time before taking page lock */ 149761f68816SYan, Zheng file_update_time(vma->vm_file); 14985c308356SJeff Layton inode_inc_iversion_raw(inode); 14994af6b225SYehuda Sadeh 1500f0b33df5SYan, Zheng do { 1501d45156bfSJeff Layton struct ceph_snap_context *snapc; 1502d45156bfSJeff Layton 15034af6b225SYehuda Sadeh lock_page(page); 15044af6b225SYehuda Sadeh 1505cb03c143SAndreas Gruenbacher if (page_mkwrite_check_truncate(page, inode) < 0) { 1506f9cac5acSYan, Zheng unlock_page(page); 15076ce026e4SYan, Zheng ret = VM_FAULT_NOPAGE; 1508f0b33df5SYan, Zheng break; 1509f9cac5acSYan, Zheng } 15104af6b225SYehuda Sadeh 1511d45156bfSJeff Layton snapc = ceph_find_incompatible(page); 1512d45156bfSJeff Layton if (!snapc) { 15134af6b225SYehuda Sadeh /* success. we'll keep the page locked. */ 15141d3576fdSSage Weil set_page_dirty(page); 15151d3576fdSSage Weil ret = VM_FAULT_LOCKED; 1516d45156bfSJeff Layton break; 15171d3576fdSSage Weil } 1518d45156bfSJeff Layton 1519d45156bfSJeff Layton unlock_page(page); 1520d45156bfSJeff Layton 1521d45156bfSJeff Layton if (IS_ERR(snapc)) { 1522d45156bfSJeff Layton ret = VM_FAULT_SIGBUS; 1523d45156bfSJeff Layton break; 1524d45156bfSJeff Layton } 1525d45156bfSJeff Layton 1526d45156bfSJeff Layton ceph_queue_writeback(inode); 1527d45156bfSJeff Layton err = wait_event_killable(ci->i_cap_wq, 1528d45156bfSJeff Layton context_is_writeable_or_written(inode, snapc)); 1529d45156bfSJeff Layton ceph_put_snap_context(snapc); 1530d45156bfSJeff Layton } while (err == 0); 1531f0b33df5SYan, Zheng 153228127bddSYan, Zheng if (ret == VM_FAULT_LOCKED || 153328127bddSYan, Zheng ci->i_inline_version != CEPH_INLINE_NONE) { 153461f68816SYan, Zheng int dirty; 153561f68816SYan, Zheng spin_lock(&ci->i_ceph_lock); 153628127bddSYan, Zheng ci->i_inline_version = CEPH_INLINE_NONE; 1537f66fd9f0SYan, Zheng dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1538f66fd9f0SYan, Zheng &prealloc_cf); 153961f68816SYan, Zheng spin_unlock(&ci->i_ceph_lock); 154061f68816SYan, Zheng if (dirty) 154161f68816SYan, Zheng __mark_inode_dirty(inode, dirty); 154261f68816SYan, Zheng } 154361f68816SYan, Zheng 154424499847SSouptick Joarder dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %x\n", 154561f68816SYan, Zheng inode, off, len, ceph_cap_string(got), ret); 1546a8810cdcSJeff Layton ceph_put_cap_refs_async(ci, got); 1547f66fd9f0SYan, Zheng out_free: 15484f7e89f6SYan, Zheng ceph_restore_sigs(&oldset); 1549249c1df5SJeff Layton sb_end_pagefault(inode->i_sb); 1550f66fd9f0SYan, Zheng ceph_free_cap_flush(prealloc_cf); 155124499847SSouptick Joarder if (err < 0) 155224499847SSouptick Joarder ret = vmf_error(err); 15531d3576fdSSage Weil return ret; 15541d3576fdSSage Weil } 15551d3576fdSSage Weil 155631c542a1SYan, Zheng void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, 155731c542a1SYan, Zheng char *data, size_t len) 155831c542a1SYan, Zheng { 155931c542a1SYan, Zheng struct address_space *mapping = inode->i_mapping; 156031c542a1SYan, Zheng struct page *page; 156131c542a1SYan, Zheng 156231c542a1SYan, Zheng if (locked_page) { 156331c542a1SYan, Zheng page = locked_page; 156431c542a1SYan, Zheng } else { 156531c542a1SYan, Zheng if (i_size_read(inode) == 0) 156631c542a1SYan, Zheng return; 156731c542a1SYan, Zheng page = find_or_create_page(mapping, 0, 1568c62d2555SMichal Hocko mapping_gfp_constraint(mapping, 1569c62d2555SMichal Hocko ~__GFP_FS)); 157031c542a1SYan, Zheng if (!page) 157131c542a1SYan, Zheng return; 157231c542a1SYan, Zheng if (PageUptodate(page)) { 157331c542a1SYan, Zheng unlock_page(page); 157409cbfeafSKirill A. Shutemov put_page(page); 157531c542a1SYan, Zheng return; 157631c542a1SYan, Zheng } 157731c542a1SYan, Zheng } 157831c542a1SYan, Zheng 15790668ff52SIlya Dryomov dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n", 158031c542a1SYan, Zheng inode, ceph_vinop(inode), len, locked_page); 158131c542a1SYan, Zheng 158231c542a1SYan, Zheng if (len > 0) { 158331c542a1SYan, Zheng void *kaddr = kmap_atomic(page); 158431c542a1SYan, Zheng memcpy(kaddr, data, len); 158531c542a1SYan, Zheng kunmap_atomic(kaddr); 158631c542a1SYan, Zheng } 158731c542a1SYan, Zheng 158831c542a1SYan, Zheng if (page != locked_page) { 158909cbfeafSKirill A. Shutemov if (len < PAGE_SIZE) 159009cbfeafSKirill A. Shutemov zero_user_segment(page, len, PAGE_SIZE); 159131c542a1SYan, Zheng else 159231c542a1SYan, Zheng flush_dcache_page(page); 159331c542a1SYan, Zheng 159431c542a1SYan, Zheng SetPageUptodate(page); 159531c542a1SYan, Zheng unlock_page(page); 159609cbfeafSKirill A. Shutemov put_page(page); 159731c542a1SYan, Zheng } 159831c542a1SYan, Zheng } 159931c542a1SYan, Zheng 160028127bddSYan, Zheng int ceph_uninline_data(struct file *filp, struct page *locked_page) 160128127bddSYan, Zheng { 160228127bddSYan, Zheng struct inode *inode = file_inode(filp); 160328127bddSYan, Zheng struct ceph_inode_info *ci = ceph_inode(inode); 160428127bddSYan, Zheng struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 160528127bddSYan, Zheng struct ceph_osd_request *req; 160628127bddSYan, Zheng struct page *page = NULL; 160728127bddSYan, Zheng u64 len, inline_version; 160828127bddSYan, Zheng int err = 0; 160928127bddSYan, Zheng bool from_pagecache = false; 161028127bddSYan, Zheng 161128127bddSYan, Zheng spin_lock(&ci->i_ceph_lock); 161228127bddSYan, Zheng inline_version = ci->i_inline_version; 161328127bddSYan, Zheng spin_unlock(&ci->i_ceph_lock); 161428127bddSYan, Zheng 161528127bddSYan, Zheng dout("uninline_data %p %llx.%llx inline_version %llu\n", 161628127bddSYan, Zheng inode, ceph_vinop(inode), inline_version); 161728127bddSYan, Zheng 161828127bddSYan, Zheng if (inline_version == 1 || /* initial version, no data */ 161928127bddSYan, Zheng inline_version == CEPH_INLINE_NONE) 162028127bddSYan, Zheng goto out; 162128127bddSYan, Zheng 162228127bddSYan, Zheng if (locked_page) { 162328127bddSYan, Zheng page = locked_page; 162428127bddSYan, Zheng WARN_ON(!PageUptodate(page)); 162528127bddSYan, Zheng } else if (ceph_caps_issued(ci) & 162628127bddSYan, Zheng (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) { 162728127bddSYan, Zheng page = find_get_page(inode->i_mapping, 0); 162828127bddSYan, Zheng if (page) { 162928127bddSYan, Zheng if (PageUptodate(page)) { 163028127bddSYan, Zheng from_pagecache = true; 163128127bddSYan, Zheng lock_page(page); 163228127bddSYan, Zheng } else { 163309cbfeafSKirill A. Shutemov put_page(page); 163428127bddSYan, Zheng page = NULL; 163528127bddSYan, Zheng } 163628127bddSYan, Zheng } 163728127bddSYan, Zheng } 163828127bddSYan, Zheng 163928127bddSYan, Zheng if (page) { 164028127bddSYan, Zheng len = i_size_read(inode); 164109cbfeafSKirill A. Shutemov if (len > PAGE_SIZE) 164209cbfeafSKirill A. Shutemov len = PAGE_SIZE; 164328127bddSYan, Zheng } else { 164428127bddSYan, Zheng page = __page_cache_alloc(GFP_NOFS); 164528127bddSYan, Zheng if (!page) { 164628127bddSYan, Zheng err = -ENOMEM; 164728127bddSYan, Zheng goto out; 164828127bddSYan, Zheng } 164928127bddSYan, Zheng err = __ceph_do_getattr(inode, page, 165028127bddSYan, Zheng CEPH_STAT_CAP_INLINE_DATA, true); 165128127bddSYan, Zheng if (err < 0) { 165228127bddSYan, Zheng /* no inline data */ 165328127bddSYan, Zheng if (err == -ENODATA) 165428127bddSYan, Zheng err = 0; 165528127bddSYan, Zheng goto out; 165628127bddSYan, Zheng } 165728127bddSYan, Zheng len = err; 165828127bddSYan, Zheng } 165928127bddSYan, Zheng 166028127bddSYan, Zheng req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 166128127bddSYan, Zheng ceph_vino(inode), 0, &len, 0, 1, 166254ea0046SIlya Dryomov CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_WRITE, 166334b759b4SIlya Dryomov NULL, 0, 0, false); 166428127bddSYan, Zheng if (IS_ERR(req)) { 166528127bddSYan, Zheng err = PTR_ERR(req); 166628127bddSYan, Zheng goto out; 166728127bddSYan, Zheng } 166828127bddSYan, Zheng 1669fac02ddfSArnd Bergmann req->r_mtime = inode->i_mtime; 167028127bddSYan, Zheng err = ceph_osdc_start_request(&fsc->client->osdc, req, false); 167128127bddSYan, Zheng if (!err) 167228127bddSYan, Zheng err = ceph_osdc_wait_request(&fsc->client->osdc, req); 167328127bddSYan, Zheng ceph_osdc_put_request(req); 167428127bddSYan, Zheng if (err < 0) 167528127bddSYan, Zheng goto out; 167628127bddSYan, Zheng 167728127bddSYan, Zheng req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 167828127bddSYan, Zheng ceph_vino(inode), 0, &len, 1, 3, 167954ea0046SIlya Dryomov CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, 168034b759b4SIlya Dryomov NULL, ci->i_truncate_seq, 168134b759b4SIlya Dryomov ci->i_truncate_size, false); 168228127bddSYan, Zheng if (IS_ERR(req)) { 168328127bddSYan, Zheng err = PTR_ERR(req); 168428127bddSYan, Zheng goto out; 168528127bddSYan, Zheng } 168628127bddSYan, Zheng 168728127bddSYan, Zheng osd_req_op_extent_osd_data_pages(req, 1, &page, len, 0, false, false); 168828127bddSYan, Zheng 1689ec137c10SYan, Zheng { 1690ec137c10SYan, Zheng __le64 xattr_buf = cpu_to_le64(inline_version); 169128127bddSYan, Zheng err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR, 1692ec137c10SYan, Zheng "inline_version", &xattr_buf, 1693ec137c10SYan, Zheng sizeof(xattr_buf), 169428127bddSYan, Zheng CEPH_OSD_CMPXATTR_OP_GT, 169528127bddSYan, Zheng CEPH_OSD_CMPXATTR_MODE_U64); 169628127bddSYan, Zheng if (err) 169728127bddSYan, Zheng goto out_put; 1698ec137c10SYan, Zheng } 169928127bddSYan, Zheng 1700ec137c10SYan, Zheng { 1701ec137c10SYan, Zheng char xattr_buf[32]; 1702ec137c10SYan, Zheng int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf), 1703ec137c10SYan, Zheng "%llu", inline_version); 170428127bddSYan, Zheng err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR, 1705ec137c10SYan, Zheng "inline_version", 1706ec137c10SYan, Zheng xattr_buf, xattr_len, 0, 0); 170728127bddSYan, Zheng if (err) 170828127bddSYan, Zheng goto out_put; 1709ec137c10SYan, Zheng } 171028127bddSYan, Zheng 1711fac02ddfSArnd Bergmann req->r_mtime = inode->i_mtime; 171228127bddSYan, Zheng err = ceph_osdc_start_request(&fsc->client->osdc, req, false); 171328127bddSYan, Zheng if (!err) 171428127bddSYan, Zheng err = ceph_osdc_wait_request(&fsc->client->osdc, req); 171597e27aaaSXiubo Li 17168ae99ae2SXiubo Li ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 1717903f4fecSXiubo Li req->r_end_latency, len, err); 171897e27aaaSXiubo Li 171928127bddSYan, Zheng out_put: 172028127bddSYan, Zheng ceph_osdc_put_request(req); 172128127bddSYan, Zheng if (err == -ECANCELED) 172228127bddSYan, Zheng err = 0; 172328127bddSYan, Zheng out: 172428127bddSYan, Zheng if (page && page != locked_page) { 172528127bddSYan, Zheng if (from_pagecache) { 172628127bddSYan, Zheng unlock_page(page); 172709cbfeafSKirill A. Shutemov put_page(page); 172828127bddSYan, Zheng } else 172928127bddSYan, Zheng __free_pages(page, 0); 173028127bddSYan, Zheng } 173128127bddSYan, Zheng 173228127bddSYan, Zheng dout("uninline_data %p %llx.%llx inline_version %llu = %d\n", 173328127bddSYan, Zheng inode, ceph_vinop(inode), inline_version, err); 173428127bddSYan, Zheng return err; 173528127bddSYan, Zheng } 173628127bddSYan, Zheng 17377cbea8dcSKirill A. Shutemov static const struct vm_operations_struct ceph_vmops = { 173861f68816SYan, Zheng .fault = ceph_filemap_fault, 17391d3576fdSSage Weil .page_mkwrite = ceph_page_mkwrite, 17401d3576fdSSage Weil }; 17411d3576fdSSage Weil 17421d3576fdSSage Weil int ceph_mmap(struct file *file, struct vm_area_struct *vma) 17431d3576fdSSage Weil { 17441d3576fdSSage Weil struct address_space *mapping = file->f_mapping; 17451d3576fdSSage Weil 17461d3576fdSSage Weil if (!mapping->a_ops->readpage) 17471d3576fdSSage Weil return -ENOEXEC; 17481d3576fdSSage Weil file_accessed(file); 17491d3576fdSSage Weil vma->vm_ops = &ceph_vmops; 17501d3576fdSSage Weil return 0; 17511d3576fdSSage Weil } 175210183a69SYan, Zheng 175310183a69SYan, Zheng enum { 175410183a69SYan, Zheng POOL_READ = 1, 175510183a69SYan, Zheng POOL_WRITE = 2, 175610183a69SYan, Zheng }; 175710183a69SYan, Zheng 1758779fe0fbSYan, Zheng static int __ceph_pool_perm_get(struct ceph_inode_info *ci, 1759779fe0fbSYan, Zheng s64 pool, struct ceph_string *pool_ns) 176010183a69SYan, Zheng { 176110183a69SYan, Zheng struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode); 176210183a69SYan, Zheng struct ceph_mds_client *mdsc = fsc->mdsc; 176310183a69SYan, Zheng struct ceph_osd_request *rd_req = NULL, *wr_req = NULL; 176410183a69SYan, Zheng struct rb_node **p, *parent; 176510183a69SYan, Zheng struct ceph_pool_perm *perm; 176610183a69SYan, Zheng struct page **pages; 1767779fe0fbSYan, Zheng size_t pool_ns_len; 176810183a69SYan, Zheng int err = 0, err2 = 0, have = 0; 176910183a69SYan, Zheng 177010183a69SYan, Zheng down_read(&mdsc->pool_perm_rwsem); 177110183a69SYan, Zheng p = &mdsc->pool_perm_tree.rb_node; 177210183a69SYan, Zheng while (*p) { 177310183a69SYan, Zheng perm = rb_entry(*p, struct ceph_pool_perm, node); 177410183a69SYan, Zheng if (pool < perm->pool) 177510183a69SYan, Zheng p = &(*p)->rb_left; 177610183a69SYan, Zheng else if (pool > perm->pool) 177710183a69SYan, Zheng p = &(*p)->rb_right; 177810183a69SYan, Zheng else { 1779779fe0fbSYan, Zheng int ret = ceph_compare_string(pool_ns, 1780779fe0fbSYan, Zheng perm->pool_ns, 1781779fe0fbSYan, Zheng perm->pool_ns_len); 1782779fe0fbSYan, Zheng if (ret < 0) 1783779fe0fbSYan, Zheng p = &(*p)->rb_left; 1784779fe0fbSYan, Zheng else if (ret > 0) 1785779fe0fbSYan, Zheng p = &(*p)->rb_right; 1786779fe0fbSYan, Zheng else { 178710183a69SYan, Zheng have = perm->perm; 178810183a69SYan, Zheng break; 178910183a69SYan, Zheng } 179010183a69SYan, Zheng } 1791779fe0fbSYan, Zheng } 179210183a69SYan, Zheng up_read(&mdsc->pool_perm_rwsem); 179310183a69SYan, Zheng if (*p) 179410183a69SYan, Zheng goto out; 179510183a69SYan, Zheng 1796779fe0fbSYan, Zheng if (pool_ns) 1797779fe0fbSYan, Zheng dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n", 1798779fe0fbSYan, Zheng pool, (int)pool_ns->len, pool_ns->str); 1799779fe0fbSYan, Zheng else 18007627151eSYan, Zheng dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool); 180110183a69SYan, Zheng 180210183a69SYan, Zheng down_write(&mdsc->pool_perm_rwsem); 1803779fe0fbSYan, Zheng p = &mdsc->pool_perm_tree.rb_node; 180410183a69SYan, Zheng parent = NULL; 180510183a69SYan, Zheng while (*p) { 180610183a69SYan, Zheng parent = *p; 180710183a69SYan, Zheng perm = rb_entry(parent, struct ceph_pool_perm, node); 180810183a69SYan, Zheng if (pool < perm->pool) 180910183a69SYan, Zheng p = &(*p)->rb_left; 181010183a69SYan, Zheng else if (pool > perm->pool) 181110183a69SYan, Zheng p = &(*p)->rb_right; 181210183a69SYan, Zheng else { 1813779fe0fbSYan, Zheng int ret = ceph_compare_string(pool_ns, 1814779fe0fbSYan, Zheng perm->pool_ns, 1815779fe0fbSYan, Zheng perm->pool_ns_len); 1816779fe0fbSYan, Zheng if (ret < 0) 1817779fe0fbSYan, Zheng p = &(*p)->rb_left; 1818779fe0fbSYan, Zheng else if (ret > 0) 1819779fe0fbSYan, Zheng p = &(*p)->rb_right; 1820779fe0fbSYan, Zheng else { 182110183a69SYan, Zheng have = perm->perm; 182210183a69SYan, Zheng break; 182310183a69SYan, Zheng } 182410183a69SYan, Zheng } 1825779fe0fbSYan, Zheng } 182610183a69SYan, Zheng if (*p) { 182710183a69SYan, Zheng up_write(&mdsc->pool_perm_rwsem); 182810183a69SYan, Zheng goto out; 182910183a69SYan, Zheng } 183010183a69SYan, Zheng 183134b759b4SIlya Dryomov rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 183210183a69SYan, Zheng 1, false, GFP_NOFS); 183310183a69SYan, Zheng if (!rd_req) { 183410183a69SYan, Zheng err = -ENOMEM; 183510183a69SYan, Zheng goto out_unlock; 183610183a69SYan, Zheng } 183710183a69SYan, Zheng 183810183a69SYan, Zheng rd_req->r_flags = CEPH_OSD_FLAG_READ; 183910183a69SYan, Zheng osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0); 184010183a69SYan, Zheng rd_req->r_base_oloc.pool = pool; 1841779fe0fbSYan, Zheng if (pool_ns) 1842779fe0fbSYan, Zheng rd_req->r_base_oloc.pool_ns = ceph_get_string(pool_ns); 1843d30291b9SIlya Dryomov ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino); 184410183a69SYan, Zheng 184513d1ad16SIlya Dryomov err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS); 184613d1ad16SIlya Dryomov if (err) 184713d1ad16SIlya Dryomov goto out_unlock; 184810183a69SYan, Zheng 184934b759b4SIlya Dryomov wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 185010183a69SYan, Zheng 1, false, GFP_NOFS); 185110183a69SYan, Zheng if (!wr_req) { 185210183a69SYan, Zheng err = -ENOMEM; 185310183a69SYan, Zheng goto out_unlock; 185410183a69SYan, Zheng } 185510183a69SYan, Zheng 185654ea0046SIlya Dryomov wr_req->r_flags = CEPH_OSD_FLAG_WRITE; 185710183a69SYan, Zheng osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL); 185863244fa1SIlya Dryomov ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc); 1859d30291b9SIlya Dryomov ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid); 186010183a69SYan, Zheng 186113d1ad16SIlya Dryomov err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS); 186213d1ad16SIlya Dryomov if (err) 186313d1ad16SIlya Dryomov goto out_unlock; 186410183a69SYan, Zheng 186510183a69SYan, Zheng /* one page should be large enough for STAT data */ 186610183a69SYan, Zheng pages = ceph_alloc_page_vector(1, GFP_KERNEL); 186710183a69SYan, Zheng if (IS_ERR(pages)) { 186810183a69SYan, Zheng err = PTR_ERR(pages); 186910183a69SYan, Zheng goto out_unlock; 187010183a69SYan, Zheng } 187110183a69SYan, Zheng 187210183a69SYan, Zheng osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE, 187310183a69SYan, Zheng 0, false, true); 187410183a69SYan, Zheng err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false); 187510183a69SYan, Zheng 1876fac02ddfSArnd Bergmann wr_req->r_mtime = ci->vfs_inode.i_mtime; 187710183a69SYan, Zheng err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false); 187810183a69SYan, Zheng 187910183a69SYan, Zheng if (!err) 188010183a69SYan, Zheng err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req); 188110183a69SYan, Zheng if (!err2) 188210183a69SYan, Zheng err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req); 188310183a69SYan, Zheng 188410183a69SYan, Zheng if (err >= 0 || err == -ENOENT) 188510183a69SYan, Zheng have |= POOL_READ; 1886131d7eb4SYan, Zheng else if (err != -EPERM) { 18870b98acd6SIlya Dryomov if (err == -EBLOCKLISTED) 18880b98acd6SIlya Dryomov fsc->blocklisted = true; 188910183a69SYan, Zheng goto out_unlock; 1890131d7eb4SYan, Zheng } 189110183a69SYan, Zheng 189210183a69SYan, Zheng if (err2 == 0 || err2 == -EEXIST) 189310183a69SYan, Zheng have |= POOL_WRITE; 189410183a69SYan, Zheng else if (err2 != -EPERM) { 18950b98acd6SIlya Dryomov if (err2 == -EBLOCKLISTED) 18960b98acd6SIlya Dryomov fsc->blocklisted = true; 189710183a69SYan, Zheng err = err2; 189810183a69SYan, Zheng goto out_unlock; 189910183a69SYan, Zheng } 190010183a69SYan, Zheng 1901779fe0fbSYan, Zheng pool_ns_len = pool_ns ? pool_ns->len : 0; 1902779fe0fbSYan, Zheng perm = kmalloc(sizeof(*perm) + pool_ns_len + 1, GFP_NOFS); 190310183a69SYan, Zheng if (!perm) { 190410183a69SYan, Zheng err = -ENOMEM; 190510183a69SYan, Zheng goto out_unlock; 190610183a69SYan, Zheng } 190710183a69SYan, Zheng 190810183a69SYan, Zheng perm->pool = pool; 190910183a69SYan, Zheng perm->perm = have; 1910779fe0fbSYan, Zheng perm->pool_ns_len = pool_ns_len; 1911779fe0fbSYan, Zheng if (pool_ns_len > 0) 1912779fe0fbSYan, Zheng memcpy(perm->pool_ns, pool_ns->str, pool_ns_len); 1913779fe0fbSYan, Zheng perm->pool_ns[pool_ns_len] = 0; 1914779fe0fbSYan, Zheng 191510183a69SYan, Zheng rb_link_node(&perm->node, parent, p); 191610183a69SYan, Zheng rb_insert_color(&perm->node, &mdsc->pool_perm_tree); 191710183a69SYan, Zheng err = 0; 191810183a69SYan, Zheng out_unlock: 191910183a69SYan, Zheng up_write(&mdsc->pool_perm_rwsem); 192010183a69SYan, Zheng 192110183a69SYan, Zheng ceph_osdc_put_request(rd_req); 192210183a69SYan, Zheng ceph_osdc_put_request(wr_req); 192310183a69SYan, Zheng out: 192410183a69SYan, Zheng if (!err) 192510183a69SYan, Zheng err = have; 1926779fe0fbSYan, Zheng if (pool_ns) 1927779fe0fbSYan, Zheng dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n", 1928779fe0fbSYan, Zheng pool, (int)pool_ns->len, pool_ns->str, err); 1929779fe0fbSYan, Zheng else 19307627151eSYan, Zheng dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err); 193110183a69SYan, Zheng return err; 193210183a69SYan, Zheng } 193310183a69SYan, Zheng 19345e3ded1bSYan, Zheng int ceph_pool_perm_check(struct inode *inode, int need) 193510183a69SYan, Zheng { 19365e3ded1bSYan, Zheng struct ceph_inode_info *ci = ceph_inode(inode); 1937779fe0fbSYan, Zheng struct ceph_string *pool_ns; 19385e3ded1bSYan, Zheng s64 pool; 193910183a69SYan, Zheng int ret, flags; 194010183a69SYan, Zheng 1941e9b22501SJeff Layton /* Only need to do this for regular files */ 1942e9b22501SJeff Layton if (!S_ISREG(inode->i_mode)) 1943e9b22501SJeff Layton return 0; 1944e9b22501SJeff Layton 194580e80fbbSYan, Zheng if (ci->i_vino.snap != CEPH_NOSNAP) { 194680e80fbbSYan, Zheng /* 194780e80fbbSYan, Zheng * Pool permission check needs to write to the first object. 194880e80fbbSYan, Zheng * But for snapshot, head of the first object may have alread 194980e80fbbSYan, Zheng * been deleted. Skip check to avoid creating orphan object. 195080e80fbbSYan, Zheng */ 195180e80fbbSYan, Zheng return 0; 195280e80fbbSYan, Zheng } 195380e80fbbSYan, Zheng 19545e3ded1bSYan, Zheng if (ceph_test_mount_opt(ceph_inode_to_client(inode), 195510183a69SYan, Zheng NOPOOLPERM)) 195610183a69SYan, Zheng return 0; 195710183a69SYan, Zheng 195810183a69SYan, Zheng spin_lock(&ci->i_ceph_lock); 195910183a69SYan, Zheng flags = ci->i_ceph_flags; 19607627151eSYan, Zheng pool = ci->i_layout.pool_id; 196110183a69SYan, Zheng spin_unlock(&ci->i_ceph_lock); 196210183a69SYan, Zheng check: 196310183a69SYan, Zheng if (flags & CEPH_I_POOL_PERM) { 196410183a69SYan, Zheng if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) { 19657627151eSYan, Zheng dout("ceph_pool_perm_check pool %lld no read perm\n", 196610183a69SYan, Zheng pool); 196710183a69SYan, Zheng return -EPERM; 196810183a69SYan, Zheng } 196910183a69SYan, Zheng if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) { 19707627151eSYan, Zheng dout("ceph_pool_perm_check pool %lld no write perm\n", 197110183a69SYan, Zheng pool); 197210183a69SYan, Zheng return -EPERM; 197310183a69SYan, Zheng } 197410183a69SYan, Zheng return 0; 197510183a69SYan, Zheng } 197610183a69SYan, Zheng 1977779fe0fbSYan, Zheng pool_ns = ceph_try_get_string(ci->i_layout.pool_ns); 1978779fe0fbSYan, Zheng ret = __ceph_pool_perm_get(ci, pool, pool_ns); 1979779fe0fbSYan, Zheng ceph_put_string(pool_ns); 198010183a69SYan, Zheng if (ret < 0) 198110183a69SYan, Zheng return ret; 198210183a69SYan, Zheng 198310183a69SYan, Zheng flags = CEPH_I_POOL_PERM; 198410183a69SYan, Zheng if (ret & POOL_READ) 198510183a69SYan, Zheng flags |= CEPH_I_POOL_RD; 198610183a69SYan, Zheng if (ret & POOL_WRITE) 198710183a69SYan, Zheng flags |= CEPH_I_POOL_WR; 198810183a69SYan, Zheng 198910183a69SYan, Zheng spin_lock(&ci->i_ceph_lock); 1990779fe0fbSYan, Zheng if (pool == ci->i_layout.pool_id && 1991779fe0fbSYan, Zheng pool_ns == rcu_dereference_raw(ci->i_layout.pool_ns)) { 1992779fe0fbSYan, Zheng ci->i_ceph_flags |= flags; 199310183a69SYan, Zheng } else { 19947627151eSYan, Zheng pool = ci->i_layout.pool_id; 199510183a69SYan, Zheng flags = ci->i_ceph_flags; 199610183a69SYan, Zheng } 199710183a69SYan, Zheng spin_unlock(&ci->i_ceph_lock); 199810183a69SYan, Zheng goto check; 199910183a69SYan, Zheng } 200010183a69SYan, Zheng 200110183a69SYan, Zheng void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc) 200210183a69SYan, Zheng { 200310183a69SYan, Zheng struct ceph_pool_perm *perm; 200410183a69SYan, Zheng struct rb_node *n; 200510183a69SYan, Zheng 200610183a69SYan, Zheng while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) { 200710183a69SYan, Zheng n = rb_first(&mdsc->pool_perm_tree); 200810183a69SYan, Zheng perm = rb_entry(n, struct ceph_pool_perm, node); 200910183a69SYan, Zheng rb_erase(n, &mdsc->pool_perm_tree); 201010183a69SYan, Zheng kfree(perm); 201110183a69SYan, Zheng } 201210183a69SYan, Zheng } 2013