1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/backing-dev.h> 5 #include <linux/fs.h> 6 #include <linux/mm.h> 7 #include <linux/swap.h> 8 #include <linux/pagemap.h> 9 #include <linux/slab.h> 10 #include <linux/pagevec.h> 11 #include <linux/task_io_accounting_ops.h> 12 #include <linux/signal.h> 13 #include <linux/iversion.h> 14 #include <linux/ktime.h> 15 #include <linux/netfs.h> 16 17 #include "super.h" 18 #include "mds_client.h" 19 #include "cache.h" 20 #include "metric.h" 21 #include <linux/ceph/osd_client.h> 22 #include <linux/ceph/striper.h> 23 24 /* 25 * Ceph address space ops. 26 * 27 * There are a few funny things going on here. 28 * 29 * The page->private field is used to reference a struct 30 * ceph_snap_context for _every_ dirty page. This indicates which 31 * snapshot the page was logically dirtied in, and thus which snap 32 * context needs to be associated with the osd write during writeback. 33 * 34 * Similarly, struct ceph_inode_info maintains a set of counters to 35 * count dirty pages on the inode. In the absence of snapshots, 36 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count. 37 * 38 * When a snapshot is taken (that is, when the client receives 39 * notification that a snapshot was taken), each inode with caps and 40 * with dirty pages (dirty pages implies there is a cap) gets a new 41 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending 42 * order, new snaps go to the tail). The i_wrbuffer_ref_head count is 43 * moved to capsnap->dirty. (Unless a sync write is currently in 44 * progress. In that case, the capsnap is said to be "pending", new 45 * writes cannot start, and the capsnap isn't "finalized" until the 46 * write completes (or fails) and a final size/mtime for the inode for 47 * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0. 48 * 49 * On writeback, we must submit writes to the osd IN SNAP ORDER. So, 50 * we look for the first capsnap in i_cap_snaps and write out pages in 51 * that snap context _only_. Then we move on to the next capsnap, 52 * eventually reaching the "live" or "head" context (i.e., pages that 53 * are not yet snapped) and are writing the most recently dirtied 54 * pages. 55 * 56 * Invalidate and so forth must take care to ensure the dirty page 57 * accounting is preserved. 58 */ 59 60 #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10)) 61 #define CONGESTION_OFF_THRESH(congestion_kb) \ 62 (CONGESTION_ON_THRESH(congestion_kb) - \ 63 (CONGESTION_ON_THRESH(congestion_kb) >> 2)) 64 65 static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len, 66 struct folio **foliop, void **_fsdata); 67 68 static inline struct ceph_snap_context *page_snap_context(struct page *page) 69 { 70 if (PagePrivate(page)) 71 return (void *)page->private; 72 return NULL; 73 } 74 75 /* 76 * Dirty a page. Optimistically adjust accounting, on the assumption 77 * that we won't race with invalidate. If we do, readjust. 78 */ 79 static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio) 80 { 81 struct inode *inode; 82 struct ceph_inode_info *ci; 83 struct ceph_snap_context *snapc; 84 85 if (folio_test_dirty(folio)) { 86 dout("%p dirty_folio %p idx %lu -- already dirty\n", 87 mapping->host, folio, folio->index); 88 VM_BUG_ON_FOLIO(!folio_test_private(folio), folio); 89 return false; 90 } 91 92 inode = mapping->host; 93 ci = ceph_inode(inode); 94 95 /* dirty the head */ 96 spin_lock(&ci->i_ceph_lock); 97 BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference 98 if (__ceph_have_pending_cap_snap(ci)) { 99 struct ceph_cap_snap *capsnap = 100 list_last_entry(&ci->i_cap_snaps, 101 struct ceph_cap_snap, 102 ci_item); 103 snapc = ceph_get_snap_context(capsnap->context); 104 capsnap->dirty_pages++; 105 } else { 106 BUG_ON(!ci->i_head_snapc); 107 snapc = ceph_get_snap_context(ci->i_head_snapc); 108 ++ci->i_wrbuffer_ref_head; 109 } 110 if (ci->i_wrbuffer_ref == 0) 111 ihold(inode); 112 ++ci->i_wrbuffer_ref; 113 dout("%p dirty_folio %p idx %lu head %d/%d -> %d/%d " 114 "snapc %p seq %lld (%d snaps)\n", 115 mapping->host, folio, folio->index, 116 ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1, 117 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, 118 snapc, snapc->seq, snapc->num_snaps); 119 spin_unlock(&ci->i_ceph_lock); 120 121 /* 122 * Reference snap context in folio->private. Also set 123 * PagePrivate so that we get invalidate_folio callback. 124 */ 125 VM_WARN_ON_FOLIO(folio->private, folio); 126 folio_attach_private(folio, snapc); 127 128 return ceph_fscache_dirty_folio(mapping, folio); 129 } 130 131 /* 132 * If we are truncating the full folio (i.e. offset == 0), adjust the 133 * dirty folio counters appropriately. Only called if there is private 134 * data on the folio. 135 */ 136 static void ceph_invalidate_folio(struct folio *folio, size_t offset, 137 size_t length) 138 { 139 struct inode *inode; 140 struct ceph_inode_info *ci; 141 struct ceph_snap_context *snapc; 142 143 inode = folio->mapping->host; 144 ci = ceph_inode(inode); 145 146 if (offset != 0 || length != folio_size(folio)) { 147 dout("%p invalidate_folio idx %lu partial dirty page %zu~%zu\n", 148 inode, folio->index, offset, length); 149 return; 150 } 151 152 WARN_ON(!folio_test_locked(folio)); 153 if (folio_test_private(folio)) { 154 dout("%p invalidate_folio idx %lu full dirty page\n", 155 inode, folio->index); 156 157 snapc = folio_detach_private(folio); 158 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 159 ceph_put_snap_context(snapc); 160 } 161 162 folio_wait_fscache(folio); 163 } 164 165 static bool ceph_release_folio(struct folio *folio, gfp_t gfp) 166 { 167 struct inode *inode = folio->mapping->host; 168 169 dout("%llx:%llx release_folio idx %lu (%sdirty)\n", 170 ceph_vinop(inode), 171 folio->index, folio_test_dirty(folio) ? "" : "not "); 172 173 if (folio_test_private(folio)) 174 return false; 175 176 if (folio_test_fscache(folio)) { 177 if (current_is_kswapd() || !(gfp & __GFP_FS)) 178 return false; 179 folio_wait_fscache(folio); 180 } 181 ceph_fscache_note_page_release(inode); 182 return true; 183 } 184 185 static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq) 186 { 187 struct inode *inode = rreq->inode; 188 struct ceph_inode_info *ci = ceph_inode(inode); 189 struct ceph_file_layout *lo = &ci->i_layout; 190 u32 blockoff; 191 u64 blockno; 192 193 /* Expand the start downward */ 194 blockno = div_u64_rem(rreq->start, lo->stripe_unit, &blockoff); 195 rreq->start = blockno * lo->stripe_unit; 196 rreq->len += blockoff; 197 198 /* Now, round up the length to the next block */ 199 rreq->len = roundup(rreq->len, lo->stripe_unit); 200 } 201 202 static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq) 203 { 204 struct inode *inode = subreq->rreq->inode; 205 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 206 struct ceph_inode_info *ci = ceph_inode(inode); 207 u64 objno, objoff; 208 u32 xlen; 209 210 /* Truncate the extent at the end of the current block */ 211 ceph_calc_file_object_mapping(&ci->i_layout, subreq->start, subreq->len, 212 &objno, &objoff, &xlen); 213 subreq->len = min(xlen, fsc->mount_options->rsize); 214 return true; 215 } 216 217 static void finish_netfs_read(struct ceph_osd_request *req) 218 { 219 struct ceph_fs_client *fsc = ceph_inode_to_client(req->r_inode); 220 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); 221 struct netfs_io_subrequest *subreq = req->r_priv; 222 int num_pages; 223 int err = req->r_result; 224 225 ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency, 226 req->r_end_latency, osd_data->length, err); 227 228 dout("%s: result %d subreq->len=%zu i_size=%lld\n", __func__, req->r_result, 229 subreq->len, i_size_read(req->r_inode)); 230 231 /* no object means success but no data */ 232 if (err == -ENOENT) 233 err = 0; 234 else if (err == -EBLOCKLISTED) 235 fsc->blocklisted = true; 236 237 if (err >= 0 && err < subreq->len) 238 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 239 240 netfs_subreq_terminated(subreq, err, false); 241 242 num_pages = calc_pages_for(osd_data->alignment, osd_data->length); 243 ceph_put_page_vector(osd_data->pages, num_pages, false); 244 iput(req->r_inode); 245 } 246 247 static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq) 248 { 249 struct netfs_io_request *rreq = subreq->rreq; 250 struct inode *inode = rreq->inode; 251 struct ceph_mds_reply_info_parsed *rinfo; 252 struct ceph_mds_reply_info_in *iinfo; 253 struct ceph_mds_request *req; 254 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); 255 struct ceph_inode_info *ci = ceph_inode(inode); 256 struct iov_iter iter; 257 ssize_t err = 0; 258 size_t len; 259 int mode; 260 261 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 262 __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); 263 264 if (subreq->start >= inode->i_size) 265 goto out; 266 267 /* We need to fetch the inline data. */ 268 mode = ceph_try_to_choose_auth_mds(inode, CEPH_STAT_CAP_INLINE_DATA); 269 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode); 270 if (IS_ERR(req)) { 271 err = PTR_ERR(req); 272 goto out; 273 } 274 req->r_ino1 = ci->i_vino; 275 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INLINE_DATA); 276 req->r_num_caps = 2; 277 278 err = ceph_mdsc_do_request(mdsc, NULL, req); 279 if (err < 0) 280 goto out; 281 282 rinfo = &req->r_reply_info; 283 iinfo = &rinfo->targeti; 284 if (iinfo->inline_version == CEPH_INLINE_NONE) { 285 /* The data got uninlined */ 286 ceph_mdsc_put_request(req); 287 return false; 288 } 289 290 len = min_t(size_t, iinfo->inline_len - subreq->start, subreq->len); 291 iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len); 292 err = copy_to_iter(iinfo->inline_data + subreq->start, len, &iter); 293 if (err == 0) 294 err = -EFAULT; 295 296 ceph_mdsc_put_request(req); 297 out: 298 netfs_subreq_terminated(subreq, err, false); 299 return true; 300 } 301 302 static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq) 303 { 304 struct netfs_io_request *rreq = subreq->rreq; 305 struct inode *inode = rreq->inode; 306 struct ceph_inode_info *ci = ceph_inode(inode); 307 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 308 struct ceph_osd_request *req = NULL; 309 struct ceph_vino vino = ceph_vino(inode); 310 struct iov_iter iter; 311 struct page **pages; 312 size_t page_off; 313 int err = 0; 314 u64 len = subreq->len; 315 316 if (ceph_inode_is_shutdown(inode)) { 317 err = -EIO; 318 goto out; 319 } 320 321 if (ceph_has_inline_data(ci) && ceph_netfs_issue_op_inline(subreq)) 322 return; 323 324 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino, subreq->start, &len, 325 0, 1, CEPH_OSD_OP_READ, 326 CEPH_OSD_FLAG_READ | fsc->client->osdc.client->options->read_from_replica, 327 NULL, ci->i_truncate_seq, ci->i_truncate_size, false); 328 if (IS_ERR(req)) { 329 err = PTR_ERR(req); 330 req = NULL; 331 goto out; 332 } 333 334 dout("%s: pos=%llu orig_len=%zu len=%llu\n", __func__, subreq->start, subreq->len, len); 335 iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len); 336 err = iov_iter_get_pages_alloc2(&iter, &pages, len, &page_off); 337 if (err < 0) { 338 dout("%s: iov_ter_get_pages_alloc returned %d\n", __func__, err); 339 goto out; 340 } 341 342 /* should always give us a page-aligned read */ 343 WARN_ON_ONCE(page_off); 344 len = err; 345 err = 0; 346 347 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false); 348 req->r_callback = finish_netfs_read; 349 req->r_priv = subreq; 350 req->r_inode = inode; 351 ihold(inode); 352 353 ceph_osdc_start_request(req->r_osdc, req); 354 out: 355 ceph_osdc_put_request(req); 356 if (err) 357 netfs_subreq_terminated(subreq, err, false); 358 dout("%s: result %d\n", __func__, err); 359 } 360 361 static int ceph_init_request(struct netfs_io_request *rreq, struct file *file) 362 { 363 struct inode *inode = rreq->inode; 364 int got = 0, want = CEPH_CAP_FILE_CACHE; 365 struct ceph_netfs_request_data *priv; 366 int ret = 0; 367 368 if (rreq->origin != NETFS_READAHEAD) 369 return 0; 370 371 priv = kzalloc(sizeof(*priv), GFP_NOFS); 372 if (!priv) 373 return -ENOMEM; 374 375 if (file) { 376 struct ceph_rw_context *rw_ctx; 377 struct ceph_file_info *fi = file->private_data; 378 379 priv->file_ra_pages = file->f_ra.ra_pages; 380 priv->file_ra_disabled = file->f_mode & FMODE_RANDOM; 381 382 rw_ctx = ceph_find_rw_context(fi); 383 if (rw_ctx) { 384 rreq->netfs_priv = priv; 385 return 0; 386 } 387 } 388 389 /* 390 * readahead callers do not necessarily hold Fcb caps 391 * (e.g. fadvise, madvise). 392 */ 393 ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got); 394 if (ret < 0) { 395 dout("start_read %p, error getting cap\n", inode); 396 goto out; 397 } 398 399 if (!(got & want)) { 400 dout("start_read %p, no cache cap\n", inode); 401 ret = -EACCES; 402 goto out; 403 } 404 if (ret == 0) { 405 ret = -EACCES; 406 goto out; 407 } 408 409 priv->caps = got; 410 rreq->netfs_priv = priv; 411 412 out: 413 if (ret < 0) 414 kfree(priv); 415 416 return ret; 417 } 418 419 static void ceph_netfs_free_request(struct netfs_io_request *rreq) 420 { 421 struct ceph_netfs_request_data *priv = rreq->netfs_priv; 422 423 if (!priv) 424 return; 425 426 if (priv->caps) 427 ceph_put_cap_refs(ceph_inode(rreq->inode), priv->caps); 428 kfree(priv); 429 rreq->netfs_priv = NULL; 430 } 431 432 const struct netfs_request_ops ceph_netfs_ops = { 433 .init_request = ceph_init_request, 434 .free_request = ceph_netfs_free_request, 435 .begin_cache_operation = ceph_begin_cache_operation, 436 .issue_read = ceph_netfs_issue_read, 437 .expand_readahead = ceph_netfs_expand_readahead, 438 .clamp_length = ceph_netfs_clamp_length, 439 .check_write_begin = ceph_netfs_check_write_begin, 440 }; 441 442 #ifdef CONFIG_CEPH_FSCACHE 443 static void ceph_set_page_fscache(struct page *page) 444 { 445 set_page_fscache(page); 446 } 447 448 static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async) 449 { 450 struct inode *inode = priv; 451 452 if (IS_ERR_VALUE(error) && error != -ENOBUFS) 453 ceph_fscache_invalidate(inode, false); 454 } 455 456 static void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching) 457 { 458 struct ceph_inode_info *ci = ceph_inode(inode); 459 struct fscache_cookie *cookie = ceph_fscache_cookie(ci); 460 461 fscache_write_to_cache(cookie, inode->i_mapping, off, len, i_size_read(inode), 462 ceph_fscache_write_terminated, inode, caching); 463 } 464 #else 465 static inline void ceph_set_page_fscache(struct page *page) 466 { 467 } 468 469 static inline void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching) 470 { 471 } 472 #endif /* CONFIG_CEPH_FSCACHE */ 473 474 struct ceph_writeback_ctl 475 { 476 loff_t i_size; 477 u64 truncate_size; 478 u32 truncate_seq; 479 bool size_stable; 480 bool head_snapc; 481 }; 482 483 /* 484 * Get ref for the oldest snapc for an inode with dirty data... that is, the 485 * only snap context we are allowed to write back. 486 */ 487 static struct ceph_snap_context * 488 get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl, 489 struct ceph_snap_context *page_snapc) 490 { 491 struct ceph_inode_info *ci = ceph_inode(inode); 492 struct ceph_snap_context *snapc = NULL; 493 struct ceph_cap_snap *capsnap = NULL; 494 495 spin_lock(&ci->i_ceph_lock); 496 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 497 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, 498 capsnap->context, capsnap->dirty_pages); 499 if (!capsnap->dirty_pages) 500 continue; 501 502 /* get i_size, truncate_{seq,size} for page_snapc? */ 503 if (snapc && capsnap->context != page_snapc) 504 continue; 505 506 if (ctl) { 507 if (capsnap->writing) { 508 ctl->i_size = i_size_read(inode); 509 ctl->size_stable = false; 510 } else { 511 ctl->i_size = capsnap->size; 512 ctl->size_stable = true; 513 } 514 ctl->truncate_size = capsnap->truncate_size; 515 ctl->truncate_seq = capsnap->truncate_seq; 516 ctl->head_snapc = false; 517 } 518 519 if (snapc) 520 break; 521 522 snapc = ceph_get_snap_context(capsnap->context); 523 if (!page_snapc || 524 page_snapc == snapc || 525 page_snapc->seq > snapc->seq) 526 break; 527 } 528 if (!snapc && ci->i_wrbuffer_ref_head) { 529 snapc = ceph_get_snap_context(ci->i_head_snapc); 530 dout(" head snapc %p has %d dirty pages\n", 531 snapc, ci->i_wrbuffer_ref_head); 532 if (ctl) { 533 ctl->i_size = i_size_read(inode); 534 ctl->truncate_size = ci->i_truncate_size; 535 ctl->truncate_seq = ci->i_truncate_seq; 536 ctl->size_stable = false; 537 ctl->head_snapc = true; 538 } 539 } 540 spin_unlock(&ci->i_ceph_lock); 541 return snapc; 542 } 543 544 static u64 get_writepages_data_length(struct inode *inode, 545 struct page *page, u64 start) 546 { 547 struct ceph_inode_info *ci = ceph_inode(inode); 548 struct ceph_snap_context *snapc = page_snap_context(page); 549 struct ceph_cap_snap *capsnap = NULL; 550 u64 end = i_size_read(inode); 551 552 if (snapc != ci->i_head_snapc) { 553 bool found = false; 554 spin_lock(&ci->i_ceph_lock); 555 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 556 if (capsnap->context == snapc) { 557 if (!capsnap->writing) 558 end = capsnap->size; 559 found = true; 560 break; 561 } 562 } 563 spin_unlock(&ci->i_ceph_lock); 564 WARN_ON(!found); 565 } 566 if (end > page_offset(page) + thp_size(page)) 567 end = page_offset(page) + thp_size(page); 568 return end > start ? end - start : 0; 569 } 570 571 /* 572 * Write a single page, but leave the page locked. 573 * 574 * If we get a write error, mark the mapping for error, but still adjust the 575 * dirty page accounting (i.e., page is no longer dirty). 576 */ 577 static int writepage_nounlock(struct page *page, struct writeback_control *wbc) 578 { 579 struct folio *folio = page_folio(page); 580 struct inode *inode = page->mapping->host; 581 struct ceph_inode_info *ci = ceph_inode(inode); 582 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 583 struct ceph_snap_context *snapc, *oldest; 584 loff_t page_off = page_offset(page); 585 int err; 586 loff_t len = thp_size(page); 587 struct ceph_writeback_ctl ceph_wbc; 588 struct ceph_osd_client *osdc = &fsc->client->osdc; 589 struct ceph_osd_request *req; 590 bool caching = ceph_is_cache_enabled(inode); 591 592 dout("writepage %p idx %lu\n", page, page->index); 593 594 if (ceph_inode_is_shutdown(inode)) 595 return -EIO; 596 597 /* verify this is a writeable snap context */ 598 snapc = page_snap_context(page); 599 if (!snapc) { 600 dout("writepage %p page %p not dirty?\n", inode, page); 601 return 0; 602 } 603 oldest = get_oldest_context(inode, &ceph_wbc, snapc); 604 if (snapc->seq > oldest->seq) { 605 dout("writepage %p page %p snapc %p not writeable - noop\n", 606 inode, page, snapc); 607 /* we should only noop if called by kswapd */ 608 WARN_ON(!(current->flags & PF_MEMALLOC)); 609 ceph_put_snap_context(oldest); 610 redirty_page_for_writepage(wbc, page); 611 return 0; 612 } 613 ceph_put_snap_context(oldest); 614 615 /* is this a partial page at end of file? */ 616 if (page_off >= ceph_wbc.i_size) { 617 dout("folio at %lu beyond eof %llu\n", folio->index, 618 ceph_wbc.i_size); 619 folio_invalidate(folio, 0, folio_size(folio)); 620 return 0; 621 } 622 623 if (ceph_wbc.i_size < page_off + len) 624 len = ceph_wbc.i_size - page_off; 625 626 dout("writepage %p page %p index %lu on %llu~%llu snapc %p seq %lld\n", 627 inode, page, page->index, page_off, len, snapc, snapc->seq); 628 629 if (atomic_long_inc_return(&fsc->writeback_count) > 630 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) 631 fsc->write_congested = true; 632 633 req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode), page_off, &len, 0, 1, 634 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, snapc, 635 ceph_wbc.truncate_seq, ceph_wbc.truncate_size, 636 true); 637 if (IS_ERR(req)) { 638 redirty_page_for_writepage(wbc, page); 639 return PTR_ERR(req); 640 } 641 642 set_page_writeback(page); 643 if (caching) 644 ceph_set_page_fscache(page); 645 ceph_fscache_write_to_cache(inode, page_off, len, caching); 646 647 /* it may be a short write due to an object boundary */ 648 WARN_ON_ONCE(len > thp_size(page)); 649 osd_req_op_extent_osd_data_pages(req, 0, &page, len, 0, false, false); 650 dout("writepage %llu~%llu (%llu bytes)\n", page_off, len, len); 651 652 req->r_mtime = inode->i_mtime; 653 ceph_osdc_start_request(osdc, req); 654 err = ceph_osdc_wait_request(osdc, req); 655 656 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 657 req->r_end_latency, len, err); 658 659 ceph_osdc_put_request(req); 660 if (err == 0) 661 err = len; 662 663 if (err < 0) { 664 struct writeback_control tmp_wbc; 665 if (!wbc) 666 wbc = &tmp_wbc; 667 if (err == -ERESTARTSYS) { 668 /* killed by SIGKILL */ 669 dout("writepage interrupted page %p\n", page); 670 redirty_page_for_writepage(wbc, page); 671 end_page_writeback(page); 672 return err; 673 } 674 if (err == -EBLOCKLISTED) 675 fsc->blocklisted = true; 676 dout("writepage setting page/mapping error %d %p\n", 677 err, page); 678 mapping_set_error(&inode->i_data, err); 679 wbc->pages_skipped++; 680 } else { 681 dout("writepage cleaned page %p\n", page); 682 err = 0; /* vfs expects us to return 0 */ 683 } 684 oldest = detach_page_private(page); 685 WARN_ON_ONCE(oldest != snapc); 686 end_page_writeback(page); 687 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 688 ceph_put_snap_context(snapc); /* page's reference */ 689 690 if (atomic_long_dec_return(&fsc->writeback_count) < 691 CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb)) 692 fsc->write_congested = false; 693 694 return err; 695 } 696 697 static int ceph_writepage(struct page *page, struct writeback_control *wbc) 698 { 699 int err; 700 struct inode *inode = page->mapping->host; 701 BUG_ON(!inode); 702 ihold(inode); 703 704 if (wbc->sync_mode == WB_SYNC_NONE && 705 ceph_inode_to_client(inode)->write_congested) 706 return AOP_WRITEPAGE_ACTIVATE; 707 708 wait_on_page_fscache(page); 709 710 err = writepage_nounlock(page, wbc); 711 if (err == -ERESTARTSYS) { 712 /* direct memory reclaimer was killed by SIGKILL. return 0 713 * to prevent caller from setting mapping/page error */ 714 err = 0; 715 } 716 unlock_page(page); 717 iput(inode); 718 return err; 719 } 720 721 /* 722 * async writeback completion handler. 723 * 724 * If we get an error, set the mapping error bit, but not the individual 725 * page error bits. 726 */ 727 static void writepages_finish(struct ceph_osd_request *req) 728 { 729 struct inode *inode = req->r_inode; 730 struct ceph_inode_info *ci = ceph_inode(inode); 731 struct ceph_osd_data *osd_data; 732 struct page *page; 733 int num_pages, total_pages = 0; 734 int i, j; 735 int rc = req->r_result; 736 struct ceph_snap_context *snapc = req->r_snapc; 737 struct address_space *mapping = inode->i_mapping; 738 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 739 unsigned int len = 0; 740 bool remove_page; 741 742 dout("writepages_finish %p rc %d\n", inode, rc); 743 if (rc < 0) { 744 mapping_set_error(mapping, rc); 745 ceph_set_error_write(ci); 746 if (rc == -EBLOCKLISTED) 747 fsc->blocklisted = true; 748 } else { 749 ceph_clear_error_write(ci); 750 } 751 752 /* 753 * We lost the cache cap, need to truncate the page before 754 * it is unlocked, otherwise we'd truncate it later in the 755 * page truncation thread, possibly losing some data that 756 * raced its way in 757 */ 758 remove_page = !(ceph_caps_issued(ci) & 759 (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)); 760 761 /* clean all pages */ 762 for (i = 0; i < req->r_num_ops; i++) { 763 if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) { 764 pr_warn("%s incorrect op %d req %p index %d tid %llu\n", 765 __func__, req->r_ops[i].op, req, i, req->r_tid); 766 break; 767 } 768 769 osd_data = osd_req_op_extent_osd_data(req, i); 770 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 771 len += osd_data->length; 772 num_pages = calc_pages_for((u64)osd_data->alignment, 773 (u64)osd_data->length); 774 total_pages += num_pages; 775 for (j = 0; j < num_pages; j++) { 776 page = osd_data->pages[j]; 777 BUG_ON(!page); 778 WARN_ON(!PageUptodate(page)); 779 780 if (atomic_long_dec_return(&fsc->writeback_count) < 781 CONGESTION_OFF_THRESH( 782 fsc->mount_options->congestion_kb)) 783 fsc->write_congested = false; 784 785 ceph_put_snap_context(detach_page_private(page)); 786 end_page_writeback(page); 787 dout("unlocking %p\n", page); 788 789 if (remove_page) 790 generic_error_remove_page(inode->i_mapping, 791 page); 792 793 unlock_page(page); 794 } 795 dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n", 796 inode, osd_data->length, rc >= 0 ? num_pages : 0); 797 798 release_pages(osd_data->pages, num_pages); 799 } 800 801 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 802 req->r_end_latency, len, rc); 803 804 ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc); 805 806 osd_data = osd_req_op_extent_osd_data(req, 0); 807 if (osd_data->pages_from_pool) 808 mempool_free(osd_data->pages, ceph_wb_pagevec_pool); 809 else 810 kfree(osd_data->pages); 811 ceph_osdc_put_request(req); 812 } 813 814 /* 815 * initiate async writeback 816 */ 817 static int ceph_writepages_start(struct address_space *mapping, 818 struct writeback_control *wbc) 819 { 820 struct inode *inode = mapping->host; 821 struct ceph_inode_info *ci = ceph_inode(inode); 822 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 823 struct ceph_vino vino = ceph_vino(inode); 824 pgoff_t index, start_index, end = -1; 825 struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc; 826 struct folio_batch fbatch; 827 int rc = 0; 828 unsigned int wsize = i_blocksize(inode); 829 struct ceph_osd_request *req = NULL; 830 struct ceph_writeback_ctl ceph_wbc; 831 bool should_loop, range_whole = false; 832 bool done = false; 833 bool caching = ceph_is_cache_enabled(inode); 834 xa_mark_t tag; 835 836 if (wbc->sync_mode == WB_SYNC_NONE && 837 fsc->write_congested) 838 return 0; 839 840 dout("writepages_start %p (mode=%s)\n", inode, 841 wbc->sync_mode == WB_SYNC_NONE ? "NONE" : 842 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); 843 844 if (ceph_inode_is_shutdown(inode)) { 845 if (ci->i_wrbuffer_ref > 0) { 846 pr_warn_ratelimited( 847 "writepage_start %p %lld forced umount\n", 848 inode, ceph_ino(inode)); 849 } 850 mapping_set_error(mapping, -EIO); 851 return -EIO; /* we're in a forced umount, don't write! */ 852 } 853 if (fsc->mount_options->wsize < wsize) 854 wsize = fsc->mount_options->wsize; 855 856 folio_batch_init(&fbatch); 857 858 start_index = wbc->range_cyclic ? mapping->writeback_index : 0; 859 index = start_index; 860 861 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) { 862 tag = PAGECACHE_TAG_TOWRITE; 863 } else { 864 tag = PAGECACHE_TAG_DIRTY; 865 } 866 retry: 867 /* find oldest snap context with dirty data */ 868 snapc = get_oldest_context(inode, &ceph_wbc, NULL); 869 if (!snapc) { 870 /* hmm, why does writepages get called when there 871 is no dirty data? */ 872 dout(" no snap context with dirty data?\n"); 873 goto out; 874 } 875 dout(" oldest snapc is %p seq %lld (%d snaps)\n", 876 snapc, snapc->seq, snapc->num_snaps); 877 878 should_loop = false; 879 if (ceph_wbc.head_snapc && snapc != last_snapc) { 880 /* where to start/end? */ 881 if (wbc->range_cyclic) { 882 index = start_index; 883 end = -1; 884 if (index > 0) 885 should_loop = true; 886 dout(" cyclic, start at %lu\n", index); 887 } else { 888 index = wbc->range_start >> PAGE_SHIFT; 889 end = wbc->range_end >> PAGE_SHIFT; 890 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 891 range_whole = true; 892 dout(" not cyclic, %lu to %lu\n", index, end); 893 } 894 } else if (!ceph_wbc.head_snapc) { 895 /* Do not respect wbc->range_{start,end}. Dirty pages 896 * in that range can be associated with newer snapc. 897 * They are not writeable until we write all dirty pages 898 * associated with 'snapc' get written */ 899 if (index > 0) 900 should_loop = true; 901 dout(" non-head snapc, range whole\n"); 902 } 903 904 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 905 tag_pages_for_writeback(mapping, index, end); 906 907 ceph_put_snap_context(last_snapc); 908 last_snapc = snapc; 909 910 while (!done && index <= end) { 911 int num_ops = 0, op_idx; 912 unsigned i, nr_folios, max_pages, locked_pages = 0; 913 struct page **pages = NULL, **data_pages; 914 struct page *page; 915 pgoff_t strip_unit_end = 0; 916 u64 offset = 0, len = 0; 917 bool from_pool = false; 918 919 max_pages = wsize >> PAGE_SHIFT; 920 921 get_more_pages: 922 nr_folios = filemap_get_folios_tag(mapping, &index, 923 end, tag, &fbatch); 924 dout("pagevec_lookup_range_tag got %d\n", nr_folios); 925 if (!nr_folios && !locked_pages) 926 break; 927 for (i = 0; i < nr_folios && locked_pages < max_pages; i++) { 928 page = &fbatch.folios[i]->page; 929 dout("? %p idx %lu\n", page, page->index); 930 if (locked_pages == 0) 931 lock_page(page); /* first page */ 932 else if (!trylock_page(page)) 933 break; 934 935 /* only dirty pages, or our accounting breaks */ 936 if (unlikely(!PageDirty(page)) || 937 unlikely(page->mapping != mapping)) { 938 dout("!dirty or !mapping %p\n", page); 939 unlock_page(page); 940 continue; 941 } 942 /* only if matching snap context */ 943 pgsnapc = page_snap_context(page); 944 if (pgsnapc != snapc) { 945 dout("page snapc %p %lld != oldest %p %lld\n", 946 pgsnapc, pgsnapc->seq, snapc, snapc->seq); 947 if (!should_loop && 948 !ceph_wbc.head_snapc && 949 wbc->sync_mode != WB_SYNC_NONE) 950 should_loop = true; 951 unlock_page(page); 952 continue; 953 } 954 if (page_offset(page) >= ceph_wbc.i_size) { 955 struct folio *folio = page_folio(page); 956 957 dout("folio at %lu beyond eof %llu\n", 958 folio->index, ceph_wbc.i_size); 959 if ((ceph_wbc.size_stable || 960 folio_pos(folio) >= i_size_read(inode)) && 961 folio_clear_dirty_for_io(folio)) 962 folio_invalidate(folio, 0, 963 folio_size(folio)); 964 folio_unlock(folio); 965 continue; 966 } 967 if (strip_unit_end && (page->index > strip_unit_end)) { 968 dout("end of strip unit %p\n", page); 969 unlock_page(page); 970 break; 971 } 972 if (PageWriteback(page) || PageFsCache(page)) { 973 if (wbc->sync_mode == WB_SYNC_NONE) { 974 dout("%p under writeback\n", page); 975 unlock_page(page); 976 continue; 977 } 978 dout("waiting on writeback %p\n", page); 979 wait_on_page_writeback(page); 980 wait_on_page_fscache(page); 981 } 982 983 if (!clear_page_dirty_for_io(page)) { 984 dout("%p !clear_page_dirty_for_io\n", page); 985 unlock_page(page); 986 continue; 987 } 988 989 /* 990 * We have something to write. If this is 991 * the first locked page this time through, 992 * calculate max possinle write size and 993 * allocate a page array 994 */ 995 if (locked_pages == 0) { 996 u64 objnum; 997 u64 objoff; 998 u32 xlen; 999 1000 /* prepare async write request */ 1001 offset = (u64)page_offset(page); 1002 ceph_calc_file_object_mapping(&ci->i_layout, 1003 offset, wsize, 1004 &objnum, &objoff, 1005 &xlen); 1006 len = xlen; 1007 1008 num_ops = 1; 1009 strip_unit_end = page->index + 1010 ((len - 1) >> PAGE_SHIFT); 1011 1012 BUG_ON(pages); 1013 max_pages = calc_pages_for(0, (u64)len); 1014 pages = kmalloc_array(max_pages, 1015 sizeof(*pages), 1016 GFP_NOFS); 1017 if (!pages) { 1018 from_pool = true; 1019 pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS); 1020 BUG_ON(!pages); 1021 } 1022 1023 len = 0; 1024 } else if (page->index != 1025 (offset + len) >> PAGE_SHIFT) { 1026 if (num_ops >= (from_pool ? CEPH_OSD_SLAB_OPS : 1027 CEPH_OSD_MAX_OPS)) { 1028 redirty_page_for_writepage(wbc, page); 1029 unlock_page(page); 1030 break; 1031 } 1032 1033 num_ops++; 1034 offset = (u64)page_offset(page); 1035 len = 0; 1036 } 1037 1038 /* note position of first page in fbatch */ 1039 dout("%p will write page %p idx %lu\n", 1040 inode, page, page->index); 1041 1042 if (atomic_long_inc_return(&fsc->writeback_count) > 1043 CONGESTION_ON_THRESH( 1044 fsc->mount_options->congestion_kb)) 1045 fsc->write_congested = true; 1046 1047 pages[locked_pages++] = page; 1048 fbatch.folios[i] = NULL; 1049 1050 len += thp_size(page); 1051 } 1052 1053 /* did we get anything? */ 1054 if (!locked_pages) 1055 goto release_folios; 1056 if (i) { 1057 unsigned j, n = 0; 1058 /* shift unused page to beginning of fbatch */ 1059 for (j = 0; j < nr_folios; j++) { 1060 if (!fbatch.folios[j]) 1061 continue; 1062 if (n < j) 1063 fbatch.folios[n] = fbatch.folios[j]; 1064 n++; 1065 } 1066 fbatch.nr = n; 1067 1068 if (nr_folios && i == nr_folios && 1069 locked_pages < max_pages) { 1070 dout("reached end fbatch, trying for more\n"); 1071 folio_batch_release(&fbatch); 1072 goto get_more_pages; 1073 } 1074 } 1075 1076 new_request: 1077 offset = page_offset(pages[0]); 1078 len = wsize; 1079 1080 req = ceph_osdc_new_request(&fsc->client->osdc, 1081 &ci->i_layout, vino, 1082 offset, &len, 0, num_ops, 1083 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, 1084 snapc, ceph_wbc.truncate_seq, 1085 ceph_wbc.truncate_size, false); 1086 if (IS_ERR(req)) { 1087 req = ceph_osdc_new_request(&fsc->client->osdc, 1088 &ci->i_layout, vino, 1089 offset, &len, 0, 1090 min(num_ops, 1091 CEPH_OSD_SLAB_OPS), 1092 CEPH_OSD_OP_WRITE, 1093 CEPH_OSD_FLAG_WRITE, 1094 snapc, ceph_wbc.truncate_seq, 1095 ceph_wbc.truncate_size, true); 1096 BUG_ON(IS_ERR(req)); 1097 } 1098 BUG_ON(len < page_offset(pages[locked_pages - 1]) + 1099 thp_size(page) - offset); 1100 1101 req->r_callback = writepages_finish; 1102 req->r_inode = inode; 1103 1104 /* Format the osd request message and submit the write */ 1105 len = 0; 1106 data_pages = pages; 1107 op_idx = 0; 1108 for (i = 0; i < locked_pages; i++) { 1109 u64 cur_offset = page_offset(pages[i]); 1110 /* 1111 * Discontinuity in page range? Ceph can handle that by just passing 1112 * multiple extents in the write op. 1113 */ 1114 if (offset + len != cur_offset) { 1115 /* If it's full, stop here */ 1116 if (op_idx + 1 == req->r_num_ops) 1117 break; 1118 1119 /* Kick off an fscache write with what we have so far. */ 1120 ceph_fscache_write_to_cache(inode, offset, len, caching); 1121 1122 /* Start a new extent */ 1123 osd_req_op_extent_dup_last(req, op_idx, 1124 cur_offset - offset); 1125 dout("writepages got pages at %llu~%llu\n", 1126 offset, len); 1127 osd_req_op_extent_osd_data_pages(req, op_idx, 1128 data_pages, len, 0, 1129 from_pool, false); 1130 osd_req_op_extent_update(req, op_idx, len); 1131 1132 len = 0; 1133 offset = cur_offset; 1134 data_pages = pages + i; 1135 op_idx++; 1136 } 1137 1138 set_page_writeback(pages[i]); 1139 if (caching) 1140 ceph_set_page_fscache(pages[i]); 1141 len += thp_size(page); 1142 } 1143 ceph_fscache_write_to_cache(inode, offset, len, caching); 1144 1145 if (ceph_wbc.size_stable) { 1146 len = min(len, ceph_wbc.i_size - offset); 1147 } else if (i == locked_pages) { 1148 /* writepages_finish() clears writeback pages 1149 * according to the data length, so make sure 1150 * data length covers all locked pages */ 1151 u64 min_len = len + 1 - thp_size(page); 1152 len = get_writepages_data_length(inode, pages[i - 1], 1153 offset); 1154 len = max(len, min_len); 1155 } 1156 dout("writepages got pages at %llu~%llu\n", offset, len); 1157 1158 osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len, 1159 0, from_pool, false); 1160 osd_req_op_extent_update(req, op_idx, len); 1161 1162 BUG_ON(op_idx + 1 != req->r_num_ops); 1163 1164 from_pool = false; 1165 if (i < locked_pages) { 1166 BUG_ON(num_ops <= req->r_num_ops); 1167 num_ops -= req->r_num_ops; 1168 locked_pages -= i; 1169 1170 /* allocate new pages array for next request */ 1171 data_pages = pages; 1172 pages = kmalloc_array(locked_pages, sizeof(*pages), 1173 GFP_NOFS); 1174 if (!pages) { 1175 from_pool = true; 1176 pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS); 1177 BUG_ON(!pages); 1178 } 1179 memcpy(pages, data_pages + i, 1180 locked_pages * sizeof(*pages)); 1181 memset(data_pages + i, 0, 1182 locked_pages * sizeof(*pages)); 1183 } else { 1184 BUG_ON(num_ops != req->r_num_ops); 1185 index = pages[i - 1]->index + 1; 1186 /* request message now owns the pages array */ 1187 pages = NULL; 1188 } 1189 1190 req->r_mtime = inode->i_mtime; 1191 ceph_osdc_start_request(&fsc->client->osdc, req); 1192 req = NULL; 1193 1194 wbc->nr_to_write -= i; 1195 if (pages) 1196 goto new_request; 1197 1198 /* 1199 * We stop writing back only if we are not doing 1200 * integrity sync. In case of integrity sync we have to 1201 * keep going until we have written all the pages 1202 * we tagged for writeback prior to entering this loop. 1203 */ 1204 if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) 1205 done = true; 1206 1207 release_folios: 1208 dout("folio_batch release on %d folios (%p)\n", (int)fbatch.nr, 1209 fbatch.nr ? fbatch.folios[0] : NULL); 1210 folio_batch_release(&fbatch); 1211 } 1212 1213 if (should_loop && !done) { 1214 /* more to do; loop back to beginning of file */ 1215 dout("writepages looping back to beginning of file\n"); 1216 end = start_index - 1; /* OK even when start_index == 0 */ 1217 1218 /* to write dirty pages associated with next snapc, 1219 * we need to wait until current writes complete */ 1220 if (wbc->sync_mode != WB_SYNC_NONE && 1221 start_index == 0 && /* all dirty pages were checked */ 1222 !ceph_wbc.head_snapc) { 1223 struct page *page; 1224 unsigned i, nr; 1225 index = 0; 1226 while ((index <= end) && 1227 (nr = filemap_get_folios_tag(mapping, &index, 1228 (pgoff_t)-1, 1229 PAGECACHE_TAG_WRITEBACK, 1230 &fbatch))) { 1231 for (i = 0; i < nr; i++) { 1232 page = &fbatch.folios[i]->page; 1233 if (page_snap_context(page) != snapc) 1234 continue; 1235 wait_on_page_writeback(page); 1236 } 1237 folio_batch_release(&fbatch); 1238 cond_resched(); 1239 } 1240 } 1241 1242 start_index = 0; 1243 index = 0; 1244 goto retry; 1245 } 1246 1247 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 1248 mapping->writeback_index = index; 1249 1250 out: 1251 ceph_osdc_put_request(req); 1252 ceph_put_snap_context(last_snapc); 1253 dout("writepages dend - startone, rc = %d\n", rc); 1254 return rc; 1255 } 1256 1257 1258 1259 /* 1260 * See if a given @snapc is either writeable, or already written. 1261 */ 1262 static int context_is_writeable_or_written(struct inode *inode, 1263 struct ceph_snap_context *snapc) 1264 { 1265 struct ceph_snap_context *oldest = get_oldest_context(inode, NULL, NULL); 1266 int ret = !oldest || snapc->seq <= oldest->seq; 1267 1268 ceph_put_snap_context(oldest); 1269 return ret; 1270 } 1271 1272 /** 1273 * ceph_find_incompatible - find an incompatible context and return it 1274 * @page: page being dirtied 1275 * 1276 * We are only allowed to write into/dirty a page if the page is 1277 * clean, or already dirty within the same snap context. Returns a 1278 * conflicting context if there is one, NULL if there isn't, or a 1279 * negative error code on other errors. 1280 * 1281 * Must be called with page lock held. 1282 */ 1283 static struct ceph_snap_context * 1284 ceph_find_incompatible(struct page *page) 1285 { 1286 struct inode *inode = page->mapping->host; 1287 struct ceph_inode_info *ci = ceph_inode(inode); 1288 1289 if (ceph_inode_is_shutdown(inode)) { 1290 dout(" page %p %llx:%llx is shutdown\n", page, 1291 ceph_vinop(inode)); 1292 return ERR_PTR(-ESTALE); 1293 } 1294 1295 for (;;) { 1296 struct ceph_snap_context *snapc, *oldest; 1297 1298 wait_on_page_writeback(page); 1299 1300 snapc = page_snap_context(page); 1301 if (!snapc || snapc == ci->i_head_snapc) 1302 break; 1303 1304 /* 1305 * this page is already dirty in another (older) snap 1306 * context! is it writeable now? 1307 */ 1308 oldest = get_oldest_context(inode, NULL, NULL); 1309 if (snapc->seq > oldest->seq) { 1310 /* not writeable -- return it for the caller to deal with */ 1311 ceph_put_snap_context(oldest); 1312 dout(" page %p snapc %p not current or oldest\n", page, snapc); 1313 return ceph_get_snap_context(snapc); 1314 } 1315 ceph_put_snap_context(oldest); 1316 1317 /* yay, writeable, do it now (without dropping page lock) */ 1318 dout(" page %p snapc %p not current, but oldest\n", page, snapc); 1319 if (clear_page_dirty_for_io(page)) { 1320 int r = writepage_nounlock(page, NULL); 1321 if (r < 0) 1322 return ERR_PTR(r); 1323 } 1324 } 1325 return NULL; 1326 } 1327 1328 static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len, 1329 struct folio **foliop, void **_fsdata) 1330 { 1331 struct inode *inode = file_inode(file); 1332 struct ceph_inode_info *ci = ceph_inode(inode); 1333 struct ceph_snap_context *snapc; 1334 1335 snapc = ceph_find_incompatible(folio_page(*foliop, 0)); 1336 if (snapc) { 1337 int r; 1338 1339 folio_unlock(*foliop); 1340 folio_put(*foliop); 1341 *foliop = NULL; 1342 if (IS_ERR(snapc)) 1343 return PTR_ERR(snapc); 1344 1345 ceph_queue_writeback(inode); 1346 r = wait_event_killable(ci->i_cap_wq, 1347 context_is_writeable_or_written(inode, snapc)); 1348 ceph_put_snap_context(snapc); 1349 return r == 0 ? -EAGAIN : r; 1350 } 1351 return 0; 1352 } 1353 1354 /* 1355 * We are only allowed to write into/dirty the page if the page is 1356 * clean, or already dirty within the same snap context. 1357 */ 1358 static int ceph_write_begin(struct file *file, struct address_space *mapping, 1359 loff_t pos, unsigned len, 1360 struct page **pagep, void **fsdata) 1361 { 1362 struct inode *inode = file_inode(file); 1363 struct ceph_inode_info *ci = ceph_inode(inode); 1364 struct folio *folio = NULL; 1365 int r; 1366 1367 r = netfs_write_begin(&ci->netfs, file, inode->i_mapping, pos, len, &folio, NULL); 1368 if (r < 0) 1369 return r; 1370 1371 folio_wait_fscache(folio); 1372 WARN_ON_ONCE(!folio_test_locked(folio)); 1373 *pagep = &folio->page; 1374 return 0; 1375 } 1376 1377 /* 1378 * we don't do anything in here that simple_write_end doesn't do 1379 * except adjust dirty page accounting 1380 */ 1381 static int ceph_write_end(struct file *file, struct address_space *mapping, 1382 loff_t pos, unsigned len, unsigned copied, 1383 struct page *subpage, void *fsdata) 1384 { 1385 struct folio *folio = page_folio(subpage); 1386 struct inode *inode = file_inode(file); 1387 bool check_cap = false; 1388 1389 dout("write_end file %p inode %p folio %p %d~%d (%d)\n", file, 1390 inode, folio, (int)pos, (int)copied, (int)len); 1391 1392 if (!folio_test_uptodate(folio)) { 1393 /* just return that nothing was copied on a short copy */ 1394 if (copied < len) { 1395 copied = 0; 1396 goto out; 1397 } 1398 folio_mark_uptodate(folio); 1399 } 1400 1401 /* did file size increase? */ 1402 if (pos+copied > i_size_read(inode)) 1403 check_cap = ceph_inode_set_size(inode, pos+copied); 1404 1405 folio_mark_dirty(folio); 1406 1407 out: 1408 folio_unlock(folio); 1409 folio_put(folio); 1410 1411 if (check_cap) 1412 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY); 1413 1414 return copied; 1415 } 1416 1417 const struct address_space_operations ceph_aops = { 1418 .read_folio = netfs_read_folio, 1419 .readahead = netfs_readahead, 1420 .writepage = ceph_writepage, 1421 .writepages = ceph_writepages_start, 1422 .write_begin = ceph_write_begin, 1423 .write_end = ceph_write_end, 1424 .dirty_folio = ceph_dirty_folio, 1425 .invalidate_folio = ceph_invalidate_folio, 1426 .release_folio = ceph_release_folio, 1427 .direct_IO = noop_direct_IO, 1428 }; 1429 1430 static void ceph_block_sigs(sigset_t *oldset) 1431 { 1432 sigset_t mask; 1433 siginitsetinv(&mask, sigmask(SIGKILL)); 1434 sigprocmask(SIG_BLOCK, &mask, oldset); 1435 } 1436 1437 static void ceph_restore_sigs(sigset_t *oldset) 1438 { 1439 sigprocmask(SIG_SETMASK, oldset, NULL); 1440 } 1441 1442 /* 1443 * vm ops 1444 */ 1445 static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf) 1446 { 1447 struct vm_area_struct *vma = vmf->vma; 1448 struct inode *inode = file_inode(vma->vm_file); 1449 struct ceph_inode_info *ci = ceph_inode(inode); 1450 struct ceph_file_info *fi = vma->vm_file->private_data; 1451 loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT; 1452 int want, got, err; 1453 sigset_t oldset; 1454 vm_fault_t ret = VM_FAULT_SIGBUS; 1455 1456 if (ceph_inode_is_shutdown(inode)) 1457 return ret; 1458 1459 ceph_block_sigs(&oldset); 1460 1461 dout("filemap_fault %p %llx.%llx %llu trying to get caps\n", 1462 inode, ceph_vinop(inode), off); 1463 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1464 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 1465 else 1466 want = CEPH_CAP_FILE_CACHE; 1467 1468 got = 0; 1469 err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_RD, want, -1, &got); 1470 if (err < 0) 1471 goto out_restore; 1472 1473 dout("filemap_fault %p %llu got cap refs on %s\n", 1474 inode, off, ceph_cap_string(got)); 1475 1476 if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) || 1477 !ceph_has_inline_data(ci)) { 1478 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got); 1479 ceph_add_rw_context(fi, &rw_ctx); 1480 ret = filemap_fault(vmf); 1481 ceph_del_rw_context(fi, &rw_ctx); 1482 dout("filemap_fault %p %llu drop cap refs %s ret %x\n", 1483 inode, off, ceph_cap_string(got), ret); 1484 } else 1485 err = -EAGAIN; 1486 1487 ceph_put_cap_refs(ci, got); 1488 1489 if (err != -EAGAIN) 1490 goto out_restore; 1491 1492 /* read inline data */ 1493 if (off >= PAGE_SIZE) { 1494 /* does not support inline data > PAGE_SIZE */ 1495 ret = VM_FAULT_SIGBUS; 1496 } else { 1497 struct address_space *mapping = inode->i_mapping; 1498 struct page *page; 1499 1500 filemap_invalidate_lock_shared(mapping); 1501 page = find_or_create_page(mapping, 0, 1502 mapping_gfp_constraint(mapping, ~__GFP_FS)); 1503 if (!page) { 1504 ret = VM_FAULT_OOM; 1505 goto out_inline; 1506 } 1507 err = __ceph_do_getattr(inode, page, 1508 CEPH_STAT_CAP_INLINE_DATA, true); 1509 if (err < 0 || off >= i_size_read(inode)) { 1510 unlock_page(page); 1511 put_page(page); 1512 ret = vmf_error(err); 1513 goto out_inline; 1514 } 1515 if (err < PAGE_SIZE) 1516 zero_user_segment(page, err, PAGE_SIZE); 1517 else 1518 flush_dcache_page(page); 1519 SetPageUptodate(page); 1520 vmf->page = page; 1521 ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED; 1522 out_inline: 1523 filemap_invalidate_unlock_shared(mapping); 1524 dout("filemap_fault %p %llu read inline data ret %x\n", 1525 inode, off, ret); 1526 } 1527 out_restore: 1528 ceph_restore_sigs(&oldset); 1529 if (err < 0) 1530 ret = vmf_error(err); 1531 1532 return ret; 1533 } 1534 1535 static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf) 1536 { 1537 struct vm_area_struct *vma = vmf->vma; 1538 struct inode *inode = file_inode(vma->vm_file); 1539 struct ceph_inode_info *ci = ceph_inode(inode); 1540 struct ceph_file_info *fi = vma->vm_file->private_data; 1541 struct ceph_cap_flush *prealloc_cf; 1542 struct page *page = vmf->page; 1543 loff_t off = page_offset(page); 1544 loff_t size = i_size_read(inode); 1545 size_t len; 1546 int want, got, err; 1547 sigset_t oldset; 1548 vm_fault_t ret = VM_FAULT_SIGBUS; 1549 1550 if (ceph_inode_is_shutdown(inode)) 1551 return ret; 1552 1553 prealloc_cf = ceph_alloc_cap_flush(); 1554 if (!prealloc_cf) 1555 return VM_FAULT_OOM; 1556 1557 sb_start_pagefault(inode->i_sb); 1558 ceph_block_sigs(&oldset); 1559 1560 if (off + thp_size(page) <= size) 1561 len = thp_size(page); 1562 else 1563 len = offset_in_thp(page, size); 1564 1565 dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n", 1566 inode, ceph_vinop(inode), off, len, size); 1567 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1568 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1569 else 1570 want = CEPH_CAP_FILE_BUFFER; 1571 1572 got = 0; 1573 err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_WR, want, off + len, &got); 1574 if (err < 0) 1575 goto out_free; 1576 1577 dout("page_mkwrite %p %llu~%zd got cap refs on %s\n", 1578 inode, off, len, ceph_cap_string(got)); 1579 1580 /* Update time before taking page lock */ 1581 file_update_time(vma->vm_file); 1582 inode_inc_iversion_raw(inode); 1583 1584 do { 1585 struct ceph_snap_context *snapc; 1586 1587 lock_page(page); 1588 1589 if (page_mkwrite_check_truncate(page, inode) < 0) { 1590 unlock_page(page); 1591 ret = VM_FAULT_NOPAGE; 1592 break; 1593 } 1594 1595 snapc = ceph_find_incompatible(page); 1596 if (!snapc) { 1597 /* success. we'll keep the page locked. */ 1598 set_page_dirty(page); 1599 ret = VM_FAULT_LOCKED; 1600 break; 1601 } 1602 1603 unlock_page(page); 1604 1605 if (IS_ERR(snapc)) { 1606 ret = VM_FAULT_SIGBUS; 1607 break; 1608 } 1609 1610 ceph_queue_writeback(inode); 1611 err = wait_event_killable(ci->i_cap_wq, 1612 context_is_writeable_or_written(inode, snapc)); 1613 ceph_put_snap_context(snapc); 1614 } while (err == 0); 1615 1616 if (ret == VM_FAULT_LOCKED) { 1617 int dirty; 1618 spin_lock(&ci->i_ceph_lock); 1619 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1620 &prealloc_cf); 1621 spin_unlock(&ci->i_ceph_lock); 1622 if (dirty) 1623 __mark_inode_dirty(inode, dirty); 1624 } 1625 1626 dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %x\n", 1627 inode, off, len, ceph_cap_string(got), ret); 1628 ceph_put_cap_refs_async(ci, got); 1629 out_free: 1630 ceph_restore_sigs(&oldset); 1631 sb_end_pagefault(inode->i_sb); 1632 ceph_free_cap_flush(prealloc_cf); 1633 if (err < 0) 1634 ret = vmf_error(err); 1635 return ret; 1636 } 1637 1638 void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, 1639 char *data, size_t len) 1640 { 1641 struct address_space *mapping = inode->i_mapping; 1642 struct page *page; 1643 1644 if (locked_page) { 1645 page = locked_page; 1646 } else { 1647 if (i_size_read(inode) == 0) 1648 return; 1649 page = find_or_create_page(mapping, 0, 1650 mapping_gfp_constraint(mapping, 1651 ~__GFP_FS)); 1652 if (!page) 1653 return; 1654 if (PageUptodate(page)) { 1655 unlock_page(page); 1656 put_page(page); 1657 return; 1658 } 1659 } 1660 1661 dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n", 1662 inode, ceph_vinop(inode), len, locked_page); 1663 1664 if (len > 0) { 1665 void *kaddr = kmap_atomic(page); 1666 memcpy(kaddr, data, len); 1667 kunmap_atomic(kaddr); 1668 } 1669 1670 if (page != locked_page) { 1671 if (len < PAGE_SIZE) 1672 zero_user_segment(page, len, PAGE_SIZE); 1673 else 1674 flush_dcache_page(page); 1675 1676 SetPageUptodate(page); 1677 unlock_page(page); 1678 put_page(page); 1679 } 1680 } 1681 1682 int ceph_uninline_data(struct file *file) 1683 { 1684 struct inode *inode = file_inode(file); 1685 struct ceph_inode_info *ci = ceph_inode(inode); 1686 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1687 struct ceph_osd_request *req = NULL; 1688 struct ceph_cap_flush *prealloc_cf = NULL; 1689 struct folio *folio = NULL; 1690 u64 inline_version = CEPH_INLINE_NONE; 1691 struct page *pages[1]; 1692 int err = 0; 1693 u64 len; 1694 1695 spin_lock(&ci->i_ceph_lock); 1696 inline_version = ci->i_inline_version; 1697 spin_unlock(&ci->i_ceph_lock); 1698 1699 dout("uninline_data %p %llx.%llx inline_version %llu\n", 1700 inode, ceph_vinop(inode), inline_version); 1701 1702 if (ceph_inode_is_shutdown(inode)) { 1703 err = -EIO; 1704 goto out; 1705 } 1706 1707 if (inline_version == CEPH_INLINE_NONE) 1708 return 0; 1709 1710 prealloc_cf = ceph_alloc_cap_flush(); 1711 if (!prealloc_cf) 1712 return -ENOMEM; 1713 1714 if (inline_version == 1) /* initial version, no data */ 1715 goto out_uninline; 1716 1717 folio = read_mapping_folio(inode->i_mapping, 0, file); 1718 if (IS_ERR(folio)) { 1719 err = PTR_ERR(folio); 1720 goto out; 1721 } 1722 1723 folio_lock(folio); 1724 1725 len = i_size_read(inode); 1726 if (len > folio_size(folio)) 1727 len = folio_size(folio); 1728 1729 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1730 ceph_vino(inode), 0, &len, 0, 1, 1731 CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_WRITE, 1732 NULL, 0, 0, false); 1733 if (IS_ERR(req)) { 1734 err = PTR_ERR(req); 1735 goto out_unlock; 1736 } 1737 1738 req->r_mtime = inode->i_mtime; 1739 ceph_osdc_start_request(&fsc->client->osdc, req); 1740 err = ceph_osdc_wait_request(&fsc->client->osdc, req); 1741 ceph_osdc_put_request(req); 1742 if (err < 0) 1743 goto out_unlock; 1744 1745 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1746 ceph_vino(inode), 0, &len, 1, 3, 1747 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, 1748 NULL, ci->i_truncate_seq, 1749 ci->i_truncate_size, false); 1750 if (IS_ERR(req)) { 1751 err = PTR_ERR(req); 1752 goto out_unlock; 1753 } 1754 1755 pages[0] = folio_page(folio, 0); 1756 osd_req_op_extent_osd_data_pages(req, 1, pages, len, 0, false, false); 1757 1758 { 1759 __le64 xattr_buf = cpu_to_le64(inline_version); 1760 err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR, 1761 "inline_version", &xattr_buf, 1762 sizeof(xattr_buf), 1763 CEPH_OSD_CMPXATTR_OP_GT, 1764 CEPH_OSD_CMPXATTR_MODE_U64); 1765 if (err) 1766 goto out_put_req; 1767 } 1768 1769 { 1770 char xattr_buf[32]; 1771 int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf), 1772 "%llu", inline_version); 1773 err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR, 1774 "inline_version", 1775 xattr_buf, xattr_len, 0, 0); 1776 if (err) 1777 goto out_put_req; 1778 } 1779 1780 req->r_mtime = inode->i_mtime; 1781 ceph_osdc_start_request(&fsc->client->osdc, req); 1782 err = ceph_osdc_wait_request(&fsc->client->osdc, req); 1783 1784 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 1785 req->r_end_latency, len, err); 1786 1787 out_uninline: 1788 if (!err) { 1789 int dirty; 1790 1791 /* Set to CAP_INLINE_NONE and dirty the caps */ 1792 down_read(&fsc->mdsc->snap_rwsem); 1793 spin_lock(&ci->i_ceph_lock); 1794 ci->i_inline_version = CEPH_INLINE_NONE; 1795 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, &prealloc_cf); 1796 spin_unlock(&ci->i_ceph_lock); 1797 up_read(&fsc->mdsc->snap_rwsem); 1798 if (dirty) 1799 __mark_inode_dirty(inode, dirty); 1800 } 1801 out_put_req: 1802 ceph_osdc_put_request(req); 1803 if (err == -ECANCELED) 1804 err = 0; 1805 out_unlock: 1806 if (folio) { 1807 folio_unlock(folio); 1808 folio_put(folio); 1809 } 1810 out: 1811 ceph_free_cap_flush(prealloc_cf); 1812 dout("uninline_data %p %llx.%llx inline_version %llu = %d\n", 1813 inode, ceph_vinop(inode), inline_version, err); 1814 return err; 1815 } 1816 1817 static const struct vm_operations_struct ceph_vmops = { 1818 .fault = ceph_filemap_fault, 1819 .page_mkwrite = ceph_page_mkwrite, 1820 }; 1821 1822 int ceph_mmap(struct file *file, struct vm_area_struct *vma) 1823 { 1824 struct address_space *mapping = file->f_mapping; 1825 1826 if (!mapping->a_ops->read_folio) 1827 return -ENOEXEC; 1828 vma->vm_ops = &ceph_vmops; 1829 return 0; 1830 } 1831 1832 enum { 1833 POOL_READ = 1, 1834 POOL_WRITE = 2, 1835 }; 1836 1837 static int __ceph_pool_perm_get(struct ceph_inode_info *ci, 1838 s64 pool, struct ceph_string *pool_ns) 1839 { 1840 struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->netfs.inode); 1841 struct ceph_mds_client *mdsc = fsc->mdsc; 1842 struct ceph_osd_request *rd_req = NULL, *wr_req = NULL; 1843 struct rb_node **p, *parent; 1844 struct ceph_pool_perm *perm; 1845 struct page **pages; 1846 size_t pool_ns_len; 1847 int err = 0, err2 = 0, have = 0; 1848 1849 down_read(&mdsc->pool_perm_rwsem); 1850 p = &mdsc->pool_perm_tree.rb_node; 1851 while (*p) { 1852 perm = rb_entry(*p, struct ceph_pool_perm, node); 1853 if (pool < perm->pool) 1854 p = &(*p)->rb_left; 1855 else if (pool > perm->pool) 1856 p = &(*p)->rb_right; 1857 else { 1858 int ret = ceph_compare_string(pool_ns, 1859 perm->pool_ns, 1860 perm->pool_ns_len); 1861 if (ret < 0) 1862 p = &(*p)->rb_left; 1863 else if (ret > 0) 1864 p = &(*p)->rb_right; 1865 else { 1866 have = perm->perm; 1867 break; 1868 } 1869 } 1870 } 1871 up_read(&mdsc->pool_perm_rwsem); 1872 if (*p) 1873 goto out; 1874 1875 if (pool_ns) 1876 dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n", 1877 pool, (int)pool_ns->len, pool_ns->str); 1878 else 1879 dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool); 1880 1881 down_write(&mdsc->pool_perm_rwsem); 1882 p = &mdsc->pool_perm_tree.rb_node; 1883 parent = NULL; 1884 while (*p) { 1885 parent = *p; 1886 perm = rb_entry(parent, struct ceph_pool_perm, node); 1887 if (pool < perm->pool) 1888 p = &(*p)->rb_left; 1889 else if (pool > perm->pool) 1890 p = &(*p)->rb_right; 1891 else { 1892 int ret = ceph_compare_string(pool_ns, 1893 perm->pool_ns, 1894 perm->pool_ns_len); 1895 if (ret < 0) 1896 p = &(*p)->rb_left; 1897 else if (ret > 0) 1898 p = &(*p)->rb_right; 1899 else { 1900 have = perm->perm; 1901 break; 1902 } 1903 } 1904 } 1905 if (*p) { 1906 up_write(&mdsc->pool_perm_rwsem); 1907 goto out; 1908 } 1909 1910 rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 1911 1, false, GFP_NOFS); 1912 if (!rd_req) { 1913 err = -ENOMEM; 1914 goto out_unlock; 1915 } 1916 1917 rd_req->r_flags = CEPH_OSD_FLAG_READ; 1918 osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0); 1919 rd_req->r_base_oloc.pool = pool; 1920 if (pool_ns) 1921 rd_req->r_base_oloc.pool_ns = ceph_get_string(pool_ns); 1922 ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino); 1923 1924 err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS); 1925 if (err) 1926 goto out_unlock; 1927 1928 wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 1929 1, false, GFP_NOFS); 1930 if (!wr_req) { 1931 err = -ENOMEM; 1932 goto out_unlock; 1933 } 1934 1935 wr_req->r_flags = CEPH_OSD_FLAG_WRITE; 1936 osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL); 1937 ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc); 1938 ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid); 1939 1940 err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS); 1941 if (err) 1942 goto out_unlock; 1943 1944 /* one page should be large enough for STAT data */ 1945 pages = ceph_alloc_page_vector(1, GFP_KERNEL); 1946 if (IS_ERR(pages)) { 1947 err = PTR_ERR(pages); 1948 goto out_unlock; 1949 } 1950 1951 osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE, 1952 0, false, true); 1953 ceph_osdc_start_request(&fsc->client->osdc, rd_req); 1954 1955 wr_req->r_mtime = ci->netfs.inode.i_mtime; 1956 ceph_osdc_start_request(&fsc->client->osdc, wr_req); 1957 1958 err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req); 1959 err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req); 1960 1961 if (err >= 0 || err == -ENOENT) 1962 have |= POOL_READ; 1963 else if (err != -EPERM) { 1964 if (err == -EBLOCKLISTED) 1965 fsc->blocklisted = true; 1966 goto out_unlock; 1967 } 1968 1969 if (err2 == 0 || err2 == -EEXIST) 1970 have |= POOL_WRITE; 1971 else if (err2 != -EPERM) { 1972 if (err2 == -EBLOCKLISTED) 1973 fsc->blocklisted = true; 1974 err = err2; 1975 goto out_unlock; 1976 } 1977 1978 pool_ns_len = pool_ns ? pool_ns->len : 0; 1979 perm = kmalloc(sizeof(*perm) + pool_ns_len + 1, GFP_NOFS); 1980 if (!perm) { 1981 err = -ENOMEM; 1982 goto out_unlock; 1983 } 1984 1985 perm->pool = pool; 1986 perm->perm = have; 1987 perm->pool_ns_len = pool_ns_len; 1988 if (pool_ns_len > 0) 1989 memcpy(perm->pool_ns, pool_ns->str, pool_ns_len); 1990 perm->pool_ns[pool_ns_len] = 0; 1991 1992 rb_link_node(&perm->node, parent, p); 1993 rb_insert_color(&perm->node, &mdsc->pool_perm_tree); 1994 err = 0; 1995 out_unlock: 1996 up_write(&mdsc->pool_perm_rwsem); 1997 1998 ceph_osdc_put_request(rd_req); 1999 ceph_osdc_put_request(wr_req); 2000 out: 2001 if (!err) 2002 err = have; 2003 if (pool_ns) 2004 dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n", 2005 pool, (int)pool_ns->len, pool_ns->str, err); 2006 else 2007 dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err); 2008 return err; 2009 } 2010 2011 int ceph_pool_perm_check(struct inode *inode, int need) 2012 { 2013 struct ceph_inode_info *ci = ceph_inode(inode); 2014 struct ceph_string *pool_ns; 2015 s64 pool; 2016 int ret, flags; 2017 2018 /* Only need to do this for regular files */ 2019 if (!S_ISREG(inode->i_mode)) 2020 return 0; 2021 2022 if (ci->i_vino.snap != CEPH_NOSNAP) { 2023 /* 2024 * Pool permission check needs to write to the first object. 2025 * But for snapshot, head of the first object may have alread 2026 * been deleted. Skip check to avoid creating orphan object. 2027 */ 2028 return 0; 2029 } 2030 2031 if (ceph_test_mount_opt(ceph_inode_to_client(inode), 2032 NOPOOLPERM)) 2033 return 0; 2034 2035 spin_lock(&ci->i_ceph_lock); 2036 flags = ci->i_ceph_flags; 2037 pool = ci->i_layout.pool_id; 2038 spin_unlock(&ci->i_ceph_lock); 2039 check: 2040 if (flags & CEPH_I_POOL_PERM) { 2041 if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) { 2042 dout("ceph_pool_perm_check pool %lld no read perm\n", 2043 pool); 2044 return -EPERM; 2045 } 2046 if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) { 2047 dout("ceph_pool_perm_check pool %lld no write perm\n", 2048 pool); 2049 return -EPERM; 2050 } 2051 return 0; 2052 } 2053 2054 pool_ns = ceph_try_get_string(ci->i_layout.pool_ns); 2055 ret = __ceph_pool_perm_get(ci, pool, pool_ns); 2056 ceph_put_string(pool_ns); 2057 if (ret < 0) 2058 return ret; 2059 2060 flags = CEPH_I_POOL_PERM; 2061 if (ret & POOL_READ) 2062 flags |= CEPH_I_POOL_RD; 2063 if (ret & POOL_WRITE) 2064 flags |= CEPH_I_POOL_WR; 2065 2066 spin_lock(&ci->i_ceph_lock); 2067 if (pool == ci->i_layout.pool_id && 2068 pool_ns == rcu_dereference_raw(ci->i_layout.pool_ns)) { 2069 ci->i_ceph_flags |= flags; 2070 } else { 2071 pool = ci->i_layout.pool_id; 2072 flags = ci->i_ceph_flags; 2073 } 2074 spin_unlock(&ci->i_ceph_lock); 2075 goto check; 2076 } 2077 2078 void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc) 2079 { 2080 struct ceph_pool_perm *perm; 2081 struct rb_node *n; 2082 2083 while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) { 2084 n = rb_first(&mdsc->pool_perm_tree); 2085 perm = rb_entry(n, struct ceph_pool_perm, node); 2086 rb_erase(n, &mdsc->pool_perm_tree); 2087 kfree(perm); 2088 } 2089 } 2090