1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/backing-dev.h> 5 #include <linux/fs.h> 6 #include <linux/mm.h> 7 #include <linux/pagemap.h> 8 #include <linux/writeback.h> /* generic_writepages */ 9 #include <linux/slab.h> 10 #include <linux/pagevec.h> 11 #include <linux/task_io_accounting_ops.h> 12 #include <linux/signal.h> 13 #include <linux/iversion.h> 14 #include <linux/ktime.h> 15 #include <linux/netfs.h> 16 17 #include "super.h" 18 #include "mds_client.h" 19 #include "cache.h" 20 #include "metric.h" 21 #include <linux/ceph/osd_client.h> 22 #include <linux/ceph/striper.h> 23 24 /* 25 * Ceph address space ops. 26 * 27 * There are a few funny things going on here. 28 * 29 * The page->private field is used to reference a struct 30 * ceph_snap_context for _every_ dirty page. This indicates which 31 * snapshot the page was logically dirtied in, and thus which snap 32 * context needs to be associated with the osd write during writeback. 33 * 34 * Similarly, struct ceph_inode_info maintains a set of counters to 35 * count dirty pages on the inode. In the absence of snapshots, 36 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count. 37 * 38 * When a snapshot is taken (that is, when the client receives 39 * notification that a snapshot was taken), each inode with caps and 40 * with dirty pages (dirty pages implies there is a cap) gets a new 41 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending 42 * order, new snaps go to the tail). The i_wrbuffer_ref_head count is 43 * moved to capsnap->dirty. (Unless a sync write is currently in 44 * progress. In that case, the capsnap is said to be "pending", new 45 * writes cannot start, and the capsnap isn't "finalized" until the 46 * write completes (or fails) and a final size/mtime for the inode for 47 * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0. 48 * 49 * On writeback, we must submit writes to the osd IN SNAP ORDER. So, 50 * we look for the first capsnap in i_cap_snaps and write out pages in 51 * that snap context _only_. Then we move on to the next capsnap, 52 * eventually reaching the "live" or "head" context (i.e., pages that 53 * are not yet snapped) and are writing the most recently dirtied 54 * pages. 55 * 56 * Invalidate and so forth must take care to ensure the dirty page 57 * accounting is preserved. 58 */ 59 60 #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10)) 61 #define CONGESTION_OFF_THRESH(congestion_kb) \ 62 (CONGESTION_ON_THRESH(congestion_kb) - \ 63 (CONGESTION_ON_THRESH(congestion_kb) >> 2)) 64 65 static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len, 66 struct folio *folio, void **_fsdata); 67 68 static inline struct ceph_snap_context *page_snap_context(struct page *page) 69 { 70 if (PagePrivate(page)) 71 return (void *)page->private; 72 return NULL; 73 } 74 75 /* 76 * Dirty a page. Optimistically adjust accounting, on the assumption 77 * that we won't race with invalidate. If we do, readjust. 78 */ 79 static int ceph_set_page_dirty(struct page *page) 80 { 81 struct address_space *mapping = page->mapping; 82 struct inode *inode; 83 struct ceph_inode_info *ci; 84 struct ceph_snap_context *snapc; 85 86 if (PageDirty(page)) { 87 dout("%p set_page_dirty %p idx %lu -- already dirty\n", 88 mapping->host, page, page->index); 89 BUG_ON(!PagePrivate(page)); 90 return 0; 91 } 92 93 inode = mapping->host; 94 ci = ceph_inode(inode); 95 96 /* dirty the head */ 97 spin_lock(&ci->i_ceph_lock); 98 BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference 99 if (__ceph_have_pending_cap_snap(ci)) { 100 struct ceph_cap_snap *capsnap = 101 list_last_entry(&ci->i_cap_snaps, 102 struct ceph_cap_snap, 103 ci_item); 104 snapc = ceph_get_snap_context(capsnap->context); 105 capsnap->dirty_pages++; 106 } else { 107 BUG_ON(!ci->i_head_snapc); 108 snapc = ceph_get_snap_context(ci->i_head_snapc); 109 ++ci->i_wrbuffer_ref_head; 110 } 111 if (ci->i_wrbuffer_ref == 0) 112 ihold(inode); 113 ++ci->i_wrbuffer_ref; 114 dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d " 115 "snapc %p seq %lld (%d snaps)\n", 116 mapping->host, page, page->index, 117 ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1, 118 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, 119 snapc, snapc->seq, snapc->num_snaps); 120 spin_unlock(&ci->i_ceph_lock); 121 122 /* 123 * Reference snap context in page->private. Also set 124 * PagePrivate so that we get invalidatepage callback. 125 */ 126 BUG_ON(PagePrivate(page)); 127 attach_page_private(page, snapc); 128 129 return __set_page_dirty_nobuffers(page); 130 } 131 132 /* 133 * If we are truncating the full page (i.e. offset == 0), adjust the 134 * dirty page counters appropriately. Only called if there is private 135 * data on the page. 136 */ 137 static void ceph_invalidatepage(struct page *page, unsigned int offset, 138 unsigned int length) 139 { 140 struct inode *inode; 141 struct ceph_inode_info *ci; 142 struct ceph_snap_context *snapc; 143 144 wait_on_page_fscache(page); 145 146 inode = page->mapping->host; 147 ci = ceph_inode(inode); 148 149 if (offset != 0 || length != thp_size(page)) { 150 dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n", 151 inode, page, page->index, offset, length); 152 return; 153 } 154 155 WARN_ON(!PageLocked(page)); 156 if (!PagePrivate(page)) 157 return; 158 159 dout("%p invalidatepage %p idx %lu full dirty page\n", 160 inode, page, page->index); 161 162 snapc = detach_page_private(page); 163 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 164 ceph_put_snap_context(snapc); 165 } 166 167 static int ceph_releasepage(struct page *page, gfp_t gfp) 168 { 169 dout("%p releasepage %p idx %lu (%sdirty)\n", page->mapping->host, 170 page, page->index, PageDirty(page) ? "" : "not "); 171 172 if (PageFsCache(page)) { 173 if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) 174 return 0; 175 wait_on_page_fscache(page); 176 } 177 return !PagePrivate(page); 178 } 179 180 static void ceph_netfs_expand_readahead(struct netfs_read_request *rreq) 181 { 182 struct inode *inode = rreq->mapping->host; 183 struct ceph_inode_info *ci = ceph_inode(inode); 184 struct ceph_file_layout *lo = &ci->i_layout; 185 u32 blockoff; 186 u64 blockno; 187 188 /* Expand the start downward */ 189 blockno = div_u64_rem(rreq->start, lo->stripe_unit, &blockoff); 190 rreq->start = blockno * lo->stripe_unit; 191 rreq->len += blockoff; 192 193 /* Now, round up the length to the next block */ 194 rreq->len = roundup(rreq->len, lo->stripe_unit); 195 } 196 197 static bool ceph_netfs_clamp_length(struct netfs_read_subrequest *subreq) 198 { 199 struct inode *inode = subreq->rreq->mapping->host; 200 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 201 struct ceph_inode_info *ci = ceph_inode(inode); 202 u64 objno, objoff; 203 u32 xlen; 204 205 /* Truncate the extent at the end of the current block */ 206 ceph_calc_file_object_mapping(&ci->i_layout, subreq->start, subreq->len, 207 &objno, &objoff, &xlen); 208 subreq->len = min(xlen, fsc->mount_options->rsize); 209 return true; 210 } 211 212 static void finish_netfs_read(struct ceph_osd_request *req) 213 { 214 struct ceph_fs_client *fsc = ceph_inode_to_client(req->r_inode); 215 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); 216 struct netfs_read_subrequest *subreq = req->r_priv; 217 int num_pages; 218 int err = req->r_result; 219 220 ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency, 221 req->r_end_latency, osd_data->length, err); 222 223 dout("%s: result %d subreq->len=%zu i_size=%lld\n", __func__, req->r_result, 224 subreq->len, i_size_read(req->r_inode)); 225 226 /* no object means success but no data */ 227 if (err == -ENOENT) 228 err = 0; 229 else if (err == -EBLOCKLISTED) 230 fsc->blocklisted = true; 231 232 if (err >= 0 && err < subreq->len) 233 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 234 235 netfs_subreq_terminated(subreq, err, true); 236 237 num_pages = calc_pages_for(osd_data->alignment, osd_data->length); 238 ceph_put_page_vector(osd_data->pages, num_pages, false); 239 iput(req->r_inode); 240 } 241 242 static void ceph_netfs_issue_op(struct netfs_read_subrequest *subreq) 243 { 244 struct netfs_read_request *rreq = subreq->rreq; 245 struct inode *inode = rreq->mapping->host; 246 struct ceph_inode_info *ci = ceph_inode(inode); 247 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 248 struct ceph_osd_request *req; 249 struct ceph_vino vino = ceph_vino(inode); 250 struct iov_iter iter; 251 struct page **pages; 252 size_t page_off; 253 int err = 0; 254 u64 len = subreq->len; 255 256 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino, subreq->start, &len, 257 0, 1, CEPH_OSD_OP_READ, 258 CEPH_OSD_FLAG_READ | fsc->client->osdc.client->options->read_from_replica, 259 NULL, ci->i_truncate_seq, ci->i_truncate_size, false); 260 if (IS_ERR(req)) { 261 err = PTR_ERR(req); 262 req = NULL; 263 goto out; 264 } 265 266 dout("%s: pos=%llu orig_len=%zu len=%llu\n", __func__, subreq->start, subreq->len, len); 267 iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, subreq->start, len); 268 err = iov_iter_get_pages_alloc(&iter, &pages, len, &page_off); 269 if (err < 0) { 270 dout("%s: iov_ter_get_pages_alloc returned %d\n", __func__, err); 271 goto out; 272 } 273 274 /* should always give us a page-aligned read */ 275 WARN_ON_ONCE(page_off); 276 len = err; 277 278 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false); 279 req->r_callback = finish_netfs_read; 280 req->r_priv = subreq; 281 req->r_inode = inode; 282 ihold(inode); 283 284 err = ceph_osdc_start_request(req->r_osdc, req, false); 285 if (err) 286 iput(inode); 287 out: 288 ceph_osdc_put_request(req); 289 if (err) 290 netfs_subreq_terminated(subreq, err, false); 291 dout("%s: result %d\n", __func__, err); 292 } 293 294 static void ceph_init_rreq(struct netfs_read_request *rreq, struct file *file) 295 { 296 } 297 298 static void ceph_readahead_cleanup(struct address_space *mapping, void *priv) 299 { 300 struct inode *inode = mapping->host; 301 struct ceph_inode_info *ci = ceph_inode(inode); 302 int got = (uintptr_t)priv; 303 304 if (got) 305 ceph_put_cap_refs(ci, got); 306 } 307 308 static const struct netfs_read_request_ops ceph_netfs_read_ops = { 309 .init_rreq = ceph_init_rreq, 310 .is_cache_enabled = ceph_is_cache_enabled, 311 .begin_cache_operation = ceph_begin_cache_operation, 312 .issue_op = ceph_netfs_issue_op, 313 .expand_readahead = ceph_netfs_expand_readahead, 314 .clamp_length = ceph_netfs_clamp_length, 315 .check_write_begin = ceph_netfs_check_write_begin, 316 .cleanup = ceph_readahead_cleanup, 317 }; 318 319 /* read a single page, without unlocking it. */ 320 static int ceph_readpage(struct file *file, struct page *subpage) 321 { 322 struct folio *folio = page_folio(subpage); 323 struct inode *inode = file_inode(file); 324 struct ceph_inode_info *ci = ceph_inode(inode); 325 struct ceph_vino vino = ceph_vino(inode); 326 size_t len = folio_size(folio); 327 u64 off = folio_file_pos(folio); 328 329 if (ci->i_inline_version != CEPH_INLINE_NONE) { 330 /* 331 * Uptodate inline data should have been added 332 * into page cache while getting Fcr caps. 333 */ 334 if (off == 0) { 335 folio_unlock(folio); 336 return -EINVAL; 337 } 338 zero_user_segment(&folio->page, 0, folio_size(folio)); 339 folio_mark_uptodate(folio); 340 folio_unlock(folio); 341 return 0; 342 } 343 344 dout("readpage ino %llx.%llx file %p off %llu len %zu folio %p index %lu\n", 345 vino.ino, vino.snap, file, off, len, folio, folio_index(folio)); 346 347 return netfs_readpage(file, folio, &ceph_netfs_read_ops, NULL); 348 } 349 350 static void ceph_readahead(struct readahead_control *ractl) 351 { 352 struct inode *inode = file_inode(ractl->file); 353 struct ceph_file_info *fi = ractl->file->private_data; 354 struct ceph_rw_context *rw_ctx; 355 int got = 0; 356 int ret = 0; 357 358 if (ceph_inode(inode)->i_inline_version != CEPH_INLINE_NONE) 359 return; 360 361 rw_ctx = ceph_find_rw_context(fi); 362 if (!rw_ctx) { 363 /* 364 * readahead callers do not necessarily hold Fcb caps 365 * (e.g. fadvise, madvise). 366 */ 367 int want = CEPH_CAP_FILE_CACHE; 368 369 ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got); 370 if (ret < 0) 371 dout("start_read %p, error getting cap\n", inode); 372 else if (!(got & want)) 373 dout("start_read %p, no cache cap\n", inode); 374 375 if (ret <= 0) 376 return; 377 } 378 netfs_readahead(ractl, &ceph_netfs_read_ops, (void *)(uintptr_t)got); 379 } 380 381 struct ceph_writeback_ctl 382 { 383 loff_t i_size; 384 u64 truncate_size; 385 u32 truncate_seq; 386 bool size_stable; 387 bool head_snapc; 388 }; 389 390 /* 391 * Get ref for the oldest snapc for an inode with dirty data... that is, the 392 * only snap context we are allowed to write back. 393 */ 394 static struct ceph_snap_context * 395 get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl, 396 struct ceph_snap_context *page_snapc) 397 { 398 struct ceph_inode_info *ci = ceph_inode(inode); 399 struct ceph_snap_context *snapc = NULL; 400 struct ceph_cap_snap *capsnap = NULL; 401 402 spin_lock(&ci->i_ceph_lock); 403 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 404 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, 405 capsnap->context, capsnap->dirty_pages); 406 if (!capsnap->dirty_pages) 407 continue; 408 409 /* get i_size, truncate_{seq,size} for page_snapc? */ 410 if (snapc && capsnap->context != page_snapc) 411 continue; 412 413 if (ctl) { 414 if (capsnap->writing) { 415 ctl->i_size = i_size_read(inode); 416 ctl->size_stable = false; 417 } else { 418 ctl->i_size = capsnap->size; 419 ctl->size_stable = true; 420 } 421 ctl->truncate_size = capsnap->truncate_size; 422 ctl->truncate_seq = capsnap->truncate_seq; 423 ctl->head_snapc = false; 424 } 425 426 if (snapc) 427 break; 428 429 snapc = ceph_get_snap_context(capsnap->context); 430 if (!page_snapc || 431 page_snapc == snapc || 432 page_snapc->seq > snapc->seq) 433 break; 434 } 435 if (!snapc && ci->i_wrbuffer_ref_head) { 436 snapc = ceph_get_snap_context(ci->i_head_snapc); 437 dout(" head snapc %p has %d dirty pages\n", 438 snapc, ci->i_wrbuffer_ref_head); 439 if (ctl) { 440 ctl->i_size = i_size_read(inode); 441 ctl->truncate_size = ci->i_truncate_size; 442 ctl->truncate_seq = ci->i_truncate_seq; 443 ctl->size_stable = false; 444 ctl->head_snapc = true; 445 } 446 } 447 spin_unlock(&ci->i_ceph_lock); 448 return snapc; 449 } 450 451 static u64 get_writepages_data_length(struct inode *inode, 452 struct page *page, u64 start) 453 { 454 struct ceph_inode_info *ci = ceph_inode(inode); 455 struct ceph_snap_context *snapc = page_snap_context(page); 456 struct ceph_cap_snap *capsnap = NULL; 457 u64 end = i_size_read(inode); 458 459 if (snapc != ci->i_head_snapc) { 460 bool found = false; 461 spin_lock(&ci->i_ceph_lock); 462 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 463 if (capsnap->context == snapc) { 464 if (!capsnap->writing) 465 end = capsnap->size; 466 found = true; 467 break; 468 } 469 } 470 spin_unlock(&ci->i_ceph_lock); 471 WARN_ON(!found); 472 } 473 if (end > page_offset(page) + thp_size(page)) 474 end = page_offset(page) + thp_size(page); 475 return end > start ? end - start : 0; 476 } 477 478 /* 479 * Write a single page, but leave the page locked. 480 * 481 * If we get a write error, mark the mapping for error, but still adjust the 482 * dirty page accounting (i.e., page is no longer dirty). 483 */ 484 static int writepage_nounlock(struct page *page, struct writeback_control *wbc) 485 { 486 struct inode *inode = page->mapping->host; 487 struct ceph_inode_info *ci = ceph_inode(inode); 488 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 489 struct ceph_snap_context *snapc, *oldest; 490 loff_t page_off = page_offset(page); 491 int err; 492 loff_t len = thp_size(page); 493 struct ceph_writeback_ctl ceph_wbc; 494 struct ceph_osd_client *osdc = &fsc->client->osdc; 495 struct ceph_osd_request *req; 496 497 dout("writepage %p idx %lu\n", page, page->index); 498 499 /* verify this is a writeable snap context */ 500 snapc = page_snap_context(page); 501 if (!snapc) { 502 dout("writepage %p page %p not dirty?\n", inode, page); 503 return 0; 504 } 505 oldest = get_oldest_context(inode, &ceph_wbc, snapc); 506 if (snapc->seq > oldest->seq) { 507 dout("writepage %p page %p snapc %p not writeable - noop\n", 508 inode, page, snapc); 509 /* we should only noop if called by kswapd */ 510 WARN_ON(!(current->flags & PF_MEMALLOC)); 511 ceph_put_snap_context(oldest); 512 redirty_page_for_writepage(wbc, page); 513 return 0; 514 } 515 ceph_put_snap_context(oldest); 516 517 /* is this a partial page at end of file? */ 518 if (page_off >= ceph_wbc.i_size) { 519 dout("%p page eof %llu\n", page, ceph_wbc.i_size); 520 page->mapping->a_ops->invalidatepage(page, 0, thp_size(page)); 521 return 0; 522 } 523 524 if (ceph_wbc.i_size < page_off + len) 525 len = ceph_wbc.i_size - page_off; 526 527 dout("writepage %p page %p index %lu on %llu~%llu snapc %p seq %lld\n", 528 inode, page, page->index, page_off, len, snapc, snapc->seq); 529 530 if (atomic_long_inc_return(&fsc->writeback_count) > 531 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) 532 set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); 533 534 set_page_writeback(page); 535 req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode), page_off, &len, 0, 1, 536 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, snapc, 537 ceph_wbc.truncate_seq, ceph_wbc.truncate_size, 538 true); 539 if (IS_ERR(req)) { 540 redirty_page_for_writepage(wbc, page); 541 end_page_writeback(page); 542 return PTR_ERR(req); 543 } 544 545 /* it may be a short write due to an object boundary */ 546 WARN_ON_ONCE(len > thp_size(page)); 547 osd_req_op_extent_osd_data_pages(req, 0, &page, len, 0, false, false); 548 dout("writepage %llu~%llu (%llu bytes)\n", page_off, len, len); 549 550 req->r_mtime = inode->i_mtime; 551 err = ceph_osdc_start_request(osdc, req, true); 552 if (!err) 553 err = ceph_osdc_wait_request(osdc, req); 554 555 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 556 req->r_end_latency, len, err); 557 558 ceph_osdc_put_request(req); 559 if (err == 0) 560 err = len; 561 562 if (err < 0) { 563 struct writeback_control tmp_wbc; 564 if (!wbc) 565 wbc = &tmp_wbc; 566 if (err == -ERESTARTSYS) { 567 /* killed by SIGKILL */ 568 dout("writepage interrupted page %p\n", page); 569 redirty_page_for_writepage(wbc, page); 570 end_page_writeback(page); 571 return err; 572 } 573 if (err == -EBLOCKLISTED) 574 fsc->blocklisted = true; 575 dout("writepage setting page/mapping error %d %p\n", 576 err, page); 577 mapping_set_error(&inode->i_data, err); 578 wbc->pages_skipped++; 579 } else { 580 dout("writepage cleaned page %p\n", page); 581 err = 0; /* vfs expects us to return 0 */ 582 } 583 oldest = detach_page_private(page); 584 WARN_ON_ONCE(oldest != snapc); 585 end_page_writeback(page); 586 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 587 ceph_put_snap_context(snapc); /* page's reference */ 588 589 if (atomic_long_dec_return(&fsc->writeback_count) < 590 CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb)) 591 clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); 592 593 return err; 594 } 595 596 static int ceph_writepage(struct page *page, struct writeback_control *wbc) 597 { 598 int err; 599 struct inode *inode = page->mapping->host; 600 BUG_ON(!inode); 601 ihold(inode); 602 err = writepage_nounlock(page, wbc); 603 if (err == -ERESTARTSYS) { 604 /* direct memory reclaimer was killed by SIGKILL. return 0 605 * to prevent caller from setting mapping/page error */ 606 err = 0; 607 } 608 unlock_page(page); 609 iput(inode); 610 return err; 611 } 612 613 /* 614 * async writeback completion handler. 615 * 616 * If we get an error, set the mapping error bit, but not the individual 617 * page error bits. 618 */ 619 static void writepages_finish(struct ceph_osd_request *req) 620 { 621 struct inode *inode = req->r_inode; 622 struct ceph_inode_info *ci = ceph_inode(inode); 623 struct ceph_osd_data *osd_data; 624 struct page *page; 625 int num_pages, total_pages = 0; 626 int i, j; 627 int rc = req->r_result; 628 struct ceph_snap_context *snapc = req->r_snapc; 629 struct address_space *mapping = inode->i_mapping; 630 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 631 unsigned int len = 0; 632 bool remove_page; 633 634 dout("writepages_finish %p rc %d\n", inode, rc); 635 if (rc < 0) { 636 mapping_set_error(mapping, rc); 637 ceph_set_error_write(ci); 638 if (rc == -EBLOCKLISTED) 639 fsc->blocklisted = true; 640 } else { 641 ceph_clear_error_write(ci); 642 } 643 644 /* 645 * We lost the cache cap, need to truncate the page before 646 * it is unlocked, otherwise we'd truncate it later in the 647 * page truncation thread, possibly losing some data that 648 * raced its way in 649 */ 650 remove_page = !(ceph_caps_issued(ci) & 651 (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)); 652 653 /* clean all pages */ 654 for (i = 0; i < req->r_num_ops; i++) { 655 if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) 656 break; 657 658 osd_data = osd_req_op_extent_osd_data(req, i); 659 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 660 len += osd_data->length; 661 num_pages = calc_pages_for((u64)osd_data->alignment, 662 (u64)osd_data->length); 663 total_pages += num_pages; 664 for (j = 0; j < num_pages; j++) { 665 page = osd_data->pages[j]; 666 BUG_ON(!page); 667 WARN_ON(!PageUptodate(page)); 668 669 if (atomic_long_dec_return(&fsc->writeback_count) < 670 CONGESTION_OFF_THRESH( 671 fsc->mount_options->congestion_kb)) 672 clear_bdi_congested(inode_to_bdi(inode), 673 BLK_RW_ASYNC); 674 675 ceph_put_snap_context(detach_page_private(page)); 676 end_page_writeback(page); 677 dout("unlocking %p\n", page); 678 679 if (remove_page) 680 generic_error_remove_page(inode->i_mapping, 681 page); 682 683 unlock_page(page); 684 } 685 dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n", 686 inode, osd_data->length, rc >= 0 ? num_pages : 0); 687 688 release_pages(osd_data->pages, num_pages); 689 } 690 691 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 692 req->r_end_latency, len, rc); 693 694 ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc); 695 696 osd_data = osd_req_op_extent_osd_data(req, 0); 697 if (osd_data->pages_from_pool) 698 mempool_free(osd_data->pages, ceph_wb_pagevec_pool); 699 else 700 kfree(osd_data->pages); 701 ceph_osdc_put_request(req); 702 } 703 704 /* 705 * initiate async writeback 706 */ 707 static int ceph_writepages_start(struct address_space *mapping, 708 struct writeback_control *wbc) 709 { 710 struct inode *inode = mapping->host; 711 struct ceph_inode_info *ci = ceph_inode(inode); 712 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 713 struct ceph_vino vino = ceph_vino(inode); 714 pgoff_t index, start_index, end = -1; 715 struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc; 716 struct pagevec pvec; 717 int rc = 0; 718 unsigned int wsize = i_blocksize(inode); 719 struct ceph_osd_request *req = NULL; 720 struct ceph_writeback_ctl ceph_wbc; 721 bool should_loop, range_whole = false; 722 bool done = false; 723 724 dout("writepages_start %p (mode=%s)\n", inode, 725 wbc->sync_mode == WB_SYNC_NONE ? "NONE" : 726 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); 727 728 if (ceph_inode_is_shutdown(inode)) { 729 if (ci->i_wrbuffer_ref > 0) { 730 pr_warn_ratelimited( 731 "writepage_start %p %lld forced umount\n", 732 inode, ceph_ino(inode)); 733 } 734 mapping_set_error(mapping, -EIO); 735 return -EIO; /* we're in a forced umount, don't write! */ 736 } 737 if (fsc->mount_options->wsize < wsize) 738 wsize = fsc->mount_options->wsize; 739 740 pagevec_init(&pvec); 741 742 start_index = wbc->range_cyclic ? mapping->writeback_index : 0; 743 index = start_index; 744 745 retry: 746 /* find oldest snap context with dirty data */ 747 snapc = get_oldest_context(inode, &ceph_wbc, NULL); 748 if (!snapc) { 749 /* hmm, why does writepages get called when there 750 is no dirty data? */ 751 dout(" no snap context with dirty data?\n"); 752 goto out; 753 } 754 dout(" oldest snapc is %p seq %lld (%d snaps)\n", 755 snapc, snapc->seq, snapc->num_snaps); 756 757 should_loop = false; 758 if (ceph_wbc.head_snapc && snapc != last_snapc) { 759 /* where to start/end? */ 760 if (wbc->range_cyclic) { 761 index = start_index; 762 end = -1; 763 if (index > 0) 764 should_loop = true; 765 dout(" cyclic, start at %lu\n", index); 766 } else { 767 index = wbc->range_start >> PAGE_SHIFT; 768 end = wbc->range_end >> PAGE_SHIFT; 769 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 770 range_whole = true; 771 dout(" not cyclic, %lu to %lu\n", index, end); 772 } 773 } else if (!ceph_wbc.head_snapc) { 774 /* Do not respect wbc->range_{start,end}. Dirty pages 775 * in that range can be associated with newer snapc. 776 * They are not writeable until we write all dirty pages 777 * associated with 'snapc' get written */ 778 if (index > 0) 779 should_loop = true; 780 dout(" non-head snapc, range whole\n"); 781 } 782 783 ceph_put_snap_context(last_snapc); 784 last_snapc = snapc; 785 786 while (!done && index <= end) { 787 int num_ops = 0, op_idx; 788 unsigned i, pvec_pages, max_pages, locked_pages = 0; 789 struct page **pages = NULL, **data_pages; 790 struct page *page; 791 pgoff_t strip_unit_end = 0; 792 u64 offset = 0, len = 0; 793 bool from_pool = false; 794 795 max_pages = wsize >> PAGE_SHIFT; 796 797 get_more_pages: 798 pvec_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, 799 end, PAGECACHE_TAG_DIRTY); 800 dout("pagevec_lookup_range_tag got %d\n", pvec_pages); 801 if (!pvec_pages && !locked_pages) 802 break; 803 for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) { 804 page = pvec.pages[i]; 805 dout("? %p idx %lu\n", page, page->index); 806 if (locked_pages == 0) 807 lock_page(page); /* first page */ 808 else if (!trylock_page(page)) 809 break; 810 811 /* only dirty pages, or our accounting breaks */ 812 if (unlikely(!PageDirty(page)) || 813 unlikely(page->mapping != mapping)) { 814 dout("!dirty or !mapping %p\n", page); 815 unlock_page(page); 816 continue; 817 } 818 /* only if matching snap context */ 819 pgsnapc = page_snap_context(page); 820 if (pgsnapc != snapc) { 821 dout("page snapc %p %lld != oldest %p %lld\n", 822 pgsnapc, pgsnapc->seq, snapc, snapc->seq); 823 if (!should_loop && 824 !ceph_wbc.head_snapc && 825 wbc->sync_mode != WB_SYNC_NONE) 826 should_loop = true; 827 unlock_page(page); 828 continue; 829 } 830 if (page_offset(page) >= ceph_wbc.i_size) { 831 dout("%p page eof %llu\n", 832 page, ceph_wbc.i_size); 833 if ((ceph_wbc.size_stable || 834 page_offset(page) >= i_size_read(inode)) && 835 clear_page_dirty_for_io(page)) 836 mapping->a_ops->invalidatepage(page, 837 0, thp_size(page)); 838 unlock_page(page); 839 continue; 840 } 841 if (strip_unit_end && (page->index > strip_unit_end)) { 842 dout("end of strip unit %p\n", page); 843 unlock_page(page); 844 break; 845 } 846 if (PageWriteback(page)) { 847 if (wbc->sync_mode == WB_SYNC_NONE) { 848 dout("%p under writeback\n", page); 849 unlock_page(page); 850 continue; 851 } 852 dout("waiting on writeback %p\n", page); 853 wait_on_page_writeback(page); 854 } 855 856 if (!clear_page_dirty_for_io(page)) { 857 dout("%p !clear_page_dirty_for_io\n", page); 858 unlock_page(page); 859 continue; 860 } 861 862 /* 863 * We have something to write. If this is 864 * the first locked page this time through, 865 * calculate max possinle write size and 866 * allocate a page array 867 */ 868 if (locked_pages == 0) { 869 u64 objnum; 870 u64 objoff; 871 u32 xlen; 872 873 /* prepare async write request */ 874 offset = (u64)page_offset(page); 875 ceph_calc_file_object_mapping(&ci->i_layout, 876 offset, wsize, 877 &objnum, &objoff, 878 &xlen); 879 len = xlen; 880 881 num_ops = 1; 882 strip_unit_end = page->index + 883 ((len - 1) >> PAGE_SHIFT); 884 885 BUG_ON(pages); 886 max_pages = calc_pages_for(0, (u64)len); 887 pages = kmalloc_array(max_pages, 888 sizeof(*pages), 889 GFP_NOFS); 890 if (!pages) { 891 from_pool = true; 892 pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS); 893 BUG_ON(!pages); 894 } 895 896 len = 0; 897 } else if (page->index != 898 (offset + len) >> PAGE_SHIFT) { 899 if (num_ops >= (from_pool ? CEPH_OSD_SLAB_OPS : 900 CEPH_OSD_MAX_OPS)) { 901 redirty_page_for_writepage(wbc, page); 902 unlock_page(page); 903 break; 904 } 905 906 num_ops++; 907 offset = (u64)page_offset(page); 908 len = 0; 909 } 910 911 /* note position of first page in pvec */ 912 dout("%p will write page %p idx %lu\n", 913 inode, page, page->index); 914 915 if (atomic_long_inc_return(&fsc->writeback_count) > 916 CONGESTION_ON_THRESH( 917 fsc->mount_options->congestion_kb)) { 918 set_bdi_congested(inode_to_bdi(inode), 919 BLK_RW_ASYNC); 920 } 921 922 923 pages[locked_pages++] = page; 924 pvec.pages[i] = NULL; 925 926 len += thp_size(page); 927 } 928 929 /* did we get anything? */ 930 if (!locked_pages) 931 goto release_pvec_pages; 932 if (i) { 933 unsigned j, n = 0; 934 /* shift unused page to beginning of pvec */ 935 for (j = 0; j < pvec_pages; j++) { 936 if (!pvec.pages[j]) 937 continue; 938 if (n < j) 939 pvec.pages[n] = pvec.pages[j]; 940 n++; 941 } 942 pvec.nr = n; 943 944 if (pvec_pages && i == pvec_pages && 945 locked_pages < max_pages) { 946 dout("reached end pvec, trying for more\n"); 947 pagevec_release(&pvec); 948 goto get_more_pages; 949 } 950 } 951 952 new_request: 953 offset = page_offset(pages[0]); 954 len = wsize; 955 956 req = ceph_osdc_new_request(&fsc->client->osdc, 957 &ci->i_layout, vino, 958 offset, &len, 0, num_ops, 959 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, 960 snapc, ceph_wbc.truncate_seq, 961 ceph_wbc.truncate_size, false); 962 if (IS_ERR(req)) { 963 req = ceph_osdc_new_request(&fsc->client->osdc, 964 &ci->i_layout, vino, 965 offset, &len, 0, 966 min(num_ops, 967 CEPH_OSD_SLAB_OPS), 968 CEPH_OSD_OP_WRITE, 969 CEPH_OSD_FLAG_WRITE, 970 snapc, ceph_wbc.truncate_seq, 971 ceph_wbc.truncate_size, true); 972 BUG_ON(IS_ERR(req)); 973 } 974 BUG_ON(len < page_offset(pages[locked_pages - 1]) + 975 thp_size(page) - offset); 976 977 req->r_callback = writepages_finish; 978 req->r_inode = inode; 979 980 /* Format the osd request message and submit the write */ 981 len = 0; 982 data_pages = pages; 983 op_idx = 0; 984 for (i = 0; i < locked_pages; i++) { 985 u64 cur_offset = page_offset(pages[i]); 986 if (offset + len != cur_offset) { 987 if (op_idx + 1 == req->r_num_ops) 988 break; 989 osd_req_op_extent_dup_last(req, op_idx, 990 cur_offset - offset); 991 dout("writepages got pages at %llu~%llu\n", 992 offset, len); 993 osd_req_op_extent_osd_data_pages(req, op_idx, 994 data_pages, len, 0, 995 from_pool, false); 996 osd_req_op_extent_update(req, op_idx, len); 997 998 len = 0; 999 offset = cur_offset; 1000 data_pages = pages + i; 1001 op_idx++; 1002 } 1003 1004 set_page_writeback(pages[i]); 1005 len += thp_size(page); 1006 } 1007 1008 if (ceph_wbc.size_stable) { 1009 len = min(len, ceph_wbc.i_size - offset); 1010 } else if (i == locked_pages) { 1011 /* writepages_finish() clears writeback pages 1012 * according to the data length, so make sure 1013 * data length covers all locked pages */ 1014 u64 min_len = len + 1 - thp_size(page); 1015 len = get_writepages_data_length(inode, pages[i - 1], 1016 offset); 1017 len = max(len, min_len); 1018 } 1019 dout("writepages got pages at %llu~%llu\n", offset, len); 1020 1021 osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len, 1022 0, from_pool, false); 1023 osd_req_op_extent_update(req, op_idx, len); 1024 1025 BUG_ON(op_idx + 1 != req->r_num_ops); 1026 1027 from_pool = false; 1028 if (i < locked_pages) { 1029 BUG_ON(num_ops <= req->r_num_ops); 1030 num_ops -= req->r_num_ops; 1031 locked_pages -= i; 1032 1033 /* allocate new pages array for next request */ 1034 data_pages = pages; 1035 pages = kmalloc_array(locked_pages, sizeof(*pages), 1036 GFP_NOFS); 1037 if (!pages) { 1038 from_pool = true; 1039 pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS); 1040 BUG_ON(!pages); 1041 } 1042 memcpy(pages, data_pages + i, 1043 locked_pages * sizeof(*pages)); 1044 memset(data_pages + i, 0, 1045 locked_pages * sizeof(*pages)); 1046 } else { 1047 BUG_ON(num_ops != req->r_num_ops); 1048 index = pages[i - 1]->index + 1; 1049 /* request message now owns the pages array */ 1050 pages = NULL; 1051 } 1052 1053 req->r_mtime = inode->i_mtime; 1054 rc = ceph_osdc_start_request(&fsc->client->osdc, req, true); 1055 BUG_ON(rc); 1056 req = NULL; 1057 1058 wbc->nr_to_write -= i; 1059 if (pages) 1060 goto new_request; 1061 1062 /* 1063 * We stop writing back only if we are not doing 1064 * integrity sync. In case of integrity sync we have to 1065 * keep going until we have written all the pages 1066 * we tagged for writeback prior to entering this loop. 1067 */ 1068 if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) 1069 done = true; 1070 1071 release_pvec_pages: 1072 dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr, 1073 pvec.nr ? pvec.pages[0] : NULL); 1074 pagevec_release(&pvec); 1075 } 1076 1077 if (should_loop && !done) { 1078 /* more to do; loop back to beginning of file */ 1079 dout("writepages looping back to beginning of file\n"); 1080 end = start_index - 1; /* OK even when start_index == 0 */ 1081 1082 /* to write dirty pages associated with next snapc, 1083 * we need to wait until current writes complete */ 1084 if (wbc->sync_mode != WB_SYNC_NONE && 1085 start_index == 0 && /* all dirty pages were checked */ 1086 !ceph_wbc.head_snapc) { 1087 struct page *page; 1088 unsigned i, nr; 1089 index = 0; 1090 while ((index <= end) && 1091 (nr = pagevec_lookup_tag(&pvec, mapping, &index, 1092 PAGECACHE_TAG_WRITEBACK))) { 1093 for (i = 0; i < nr; i++) { 1094 page = pvec.pages[i]; 1095 if (page_snap_context(page) != snapc) 1096 continue; 1097 wait_on_page_writeback(page); 1098 } 1099 pagevec_release(&pvec); 1100 cond_resched(); 1101 } 1102 } 1103 1104 start_index = 0; 1105 index = 0; 1106 goto retry; 1107 } 1108 1109 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 1110 mapping->writeback_index = index; 1111 1112 out: 1113 ceph_osdc_put_request(req); 1114 ceph_put_snap_context(last_snapc); 1115 dout("writepages dend - startone, rc = %d\n", rc); 1116 return rc; 1117 } 1118 1119 1120 1121 /* 1122 * See if a given @snapc is either writeable, or already written. 1123 */ 1124 static int context_is_writeable_or_written(struct inode *inode, 1125 struct ceph_snap_context *snapc) 1126 { 1127 struct ceph_snap_context *oldest = get_oldest_context(inode, NULL, NULL); 1128 int ret = !oldest || snapc->seq <= oldest->seq; 1129 1130 ceph_put_snap_context(oldest); 1131 return ret; 1132 } 1133 1134 /** 1135 * ceph_find_incompatible - find an incompatible context and return it 1136 * @page: page being dirtied 1137 * 1138 * We are only allowed to write into/dirty a page if the page is 1139 * clean, or already dirty within the same snap context. Returns a 1140 * conflicting context if there is one, NULL if there isn't, or a 1141 * negative error code on other errors. 1142 * 1143 * Must be called with page lock held. 1144 */ 1145 static struct ceph_snap_context * 1146 ceph_find_incompatible(struct page *page) 1147 { 1148 struct inode *inode = page->mapping->host; 1149 struct ceph_inode_info *ci = ceph_inode(inode); 1150 1151 if (ceph_inode_is_shutdown(inode)) { 1152 dout(" page %p %llx:%llx is shutdown\n", page, 1153 ceph_vinop(inode)); 1154 return ERR_PTR(-ESTALE); 1155 } 1156 1157 for (;;) { 1158 struct ceph_snap_context *snapc, *oldest; 1159 1160 wait_on_page_writeback(page); 1161 1162 snapc = page_snap_context(page); 1163 if (!snapc || snapc == ci->i_head_snapc) 1164 break; 1165 1166 /* 1167 * this page is already dirty in another (older) snap 1168 * context! is it writeable now? 1169 */ 1170 oldest = get_oldest_context(inode, NULL, NULL); 1171 if (snapc->seq > oldest->seq) { 1172 /* not writeable -- return it for the caller to deal with */ 1173 ceph_put_snap_context(oldest); 1174 dout(" page %p snapc %p not current or oldest\n", page, snapc); 1175 return ceph_get_snap_context(snapc); 1176 } 1177 ceph_put_snap_context(oldest); 1178 1179 /* yay, writeable, do it now (without dropping page lock) */ 1180 dout(" page %p snapc %p not current, but oldest\n", page, snapc); 1181 if (clear_page_dirty_for_io(page)) { 1182 int r = writepage_nounlock(page, NULL); 1183 if (r < 0) 1184 return ERR_PTR(r); 1185 } 1186 } 1187 return NULL; 1188 } 1189 1190 static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len, 1191 struct folio *folio, void **_fsdata) 1192 { 1193 struct inode *inode = file_inode(file); 1194 struct ceph_inode_info *ci = ceph_inode(inode); 1195 struct ceph_snap_context *snapc; 1196 1197 snapc = ceph_find_incompatible(folio_page(folio, 0)); 1198 if (snapc) { 1199 int r; 1200 1201 folio_unlock(folio); 1202 folio_put(folio); 1203 if (IS_ERR(snapc)) 1204 return PTR_ERR(snapc); 1205 1206 ceph_queue_writeback(inode); 1207 r = wait_event_killable(ci->i_cap_wq, 1208 context_is_writeable_or_written(inode, snapc)); 1209 ceph_put_snap_context(snapc); 1210 return r == 0 ? -EAGAIN : r; 1211 } 1212 return 0; 1213 } 1214 1215 /* 1216 * We are only allowed to write into/dirty the page if the page is 1217 * clean, or already dirty within the same snap context. 1218 */ 1219 static int ceph_write_begin(struct file *file, struct address_space *mapping, 1220 loff_t pos, unsigned len, unsigned aop_flags, 1221 struct page **pagep, void **fsdata) 1222 { 1223 struct inode *inode = file_inode(file); 1224 struct ceph_inode_info *ci = ceph_inode(inode); 1225 struct folio *folio = NULL; 1226 pgoff_t index = pos >> PAGE_SHIFT; 1227 int r; 1228 1229 /* 1230 * Uninlining should have already been done and everything updated, EXCEPT 1231 * for inline_version sent to the MDS. 1232 */ 1233 if (ci->i_inline_version != CEPH_INLINE_NONE) { 1234 unsigned int fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE; 1235 if (aop_flags & AOP_FLAG_NOFS) 1236 fgp_flags |= FGP_NOFS; 1237 folio = __filemap_get_folio(mapping, index, fgp_flags, 1238 mapping_gfp_mask(mapping)); 1239 if (!folio) 1240 return -ENOMEM; 1241 1242 /* 1243 * The inline_version on a new inode is set to 1. If that's the 1244 * case, then the folio is brand new and isn't yet Uptodate. 1245 */ 1246 r = 0; 1247 if (index == 0 && ci->i_inline_version != 1) { 1248 if (!folio_test_uptodate(folio)) { 1249 WARN_ONCE(1, "ceph: write_begin called on still-inlined inode (inline_version %llu)!\n", 1250 ci->i_inline_version); 1251 r = -EINVAL; 1252 } 1253 goto out; 1254 } 1255 zero_user_segment(&folio->page, 0, folio_size(folio)); 1256 folio_mark_uptodate(folio); 1257 goto out; 1258 } 1259 1260 r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &folio, NULL, 1261 &ceph_netfs_read_ops, NULL); 1262 out: 1263 if (r == 0) 1264 folio_wait_fscache(folio); 1265 if (r < 0) { 1266 if (folio) 1267 folio_put(folio); 1268 } else { 1269 WARN_ON_ONCE(!folio_test_locked(folio)); 1270 *pagep = &folio->page; 1271 } 1272 return r; 1273 } 1274 1275 /* 1276 * we don't do anything in here that simple_write_end doesn't do 1277 * except adjust dirty page accounting 1278 */ 1279 static int ceph_write_end(struct file *file, struct address_space *mapping, 1280 loff_t pos, unsigned len, unsigned copied, 1281 struct page *subpage, void *fsdata) 1282 { 1283 struct folio *folio = page_folio(subpage); 1284 struct inode *inode = file_inode(file); 1285 bool check_cap = false; 1286 1287 dout("write_end file %p inode %p folio %p %d~%d (%d)\n", file, 1288 inode, folio, (int)pos, (int)copied, (int)len); 1289 1290 if (!folio_test_uptodate(folio)) { 1291 /* just return that nothing was copied on a short copy */ 1292 if (copied < len) { 1293 copied = 0; 1294 goto out; 1295 } 1296 folio_mark_uptodate(folio); 1297 } 1298 1299 /* did file size increase? */ 1300 if (pos+copied > i_size_read(inode)) 1301 check_cap = ceph_inode_set_size(inode, pos+copied); 1302 1303 folio_mark_dirty(folio); 1304 1305 out: 1306 folio_unlock(folio); 1307 folio_put(folio); 1308 1309 if (check_cap) 1310 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); 1311 1312 return copied; 1313 } 1314 1315 const struct address_space_operations ceph_aops = { 1316 .readpage = ceph_readpage, 1317 .readahead = ceph_readahead, 1318 .writepage = ceph_writepage, 1319 .writepages = ceph_writepages_start, 1320 .write_begin = ceph_write_begin, 1321 .write_end = ceph_write_end, 1322 .set_page_dirty = ceph_set_page_dirty, 1323 .invalidatepage = ceph_invalidatepage, 1324 .releasepage = ceph_releasepage, 1325 .direct_IO = noop_direct_IO, 1326 }; 1327 1328 static void ceph_block_sigs(sigset_t *oldset) 1329 { 1330 sigset_t mask; 1331 siginitsetinv(&mask, sigmask(SIGKILL)); 1332 sigprocmask(SIG_BLOCK, &mask, oldset); 1333 } 1334 1335 static void ceph_restore_sigs(sigset_t *oldset) 1336 { 1337 sigprocmask(SIG_SETMASK, oldset, NULL); 1338 } 1339 1340 /* 1341 * vm ops 1342 */ 1343 static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf) 1344 { 1345 struct vm_area_struct *vma = vmf->vma; 1346 struct inode *inode = file_inode(vma->vm_file); 1347 struct ceph_inode_info *ci = ceph_inode(inode); 1348 struct ceph_file_info *fi = vma->vm_file->private_data; 1349 loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT; 1350 int want, got, err; 1351 sigset_t oldset; 1352 vm_fault_t ret = VM_FAULT_SIGBUS; 1353 1354 if (ceph_inode_is_shutdown(inode)) 1355 return ret; 1356 1357 ceph_block_sigs(&oldset); 1358 1359 dout("filemap_fault %p %llx.%llx %llu trying to get caps\n", 1360 inode, ceph_vinop(inode), off); 1361 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1362 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 1363 else 1364 want = CEPH_CAP_FILE_CACHE; 1365 1366 got = 0; 1367 err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_RD, want, -1, &got); 1368 if (err < 0) 1369 goto out_restore; 1370 1371 dout("filemap_fault %p %llu got cap refs on %s\n", 1372 inode, off, ceph_cap_string(got)); 1373 1374 if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) || 1375 ci->i_inline_version == CEPH_INLINE_NONE) { 1376 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got); 1377 ceph_add_rw_context(fi, &rw_ctx); 1378 ret = filemap_fault(vmf); 1379 ceph_del_rw_context(fi, &rw_ctx); 1380 dout("filemap_fault %p %llu drop cap refs %s ret %x\n", 1381 inode, off, ceph_cap_string(got), ret); 1382 } else 1383 err = -EAGAIN; 1384 1385 ceph_put_cap_refs(ci, got); 1386 1387 if (err != -EAGAIN) 1388 goto out_restore; 1389 1390 /* read inline data */ 1391 if (off >= PAGE_SIZE) { 1392 /* does not support inline data > PAGE_SIZE */ 1393 ret = VM_FAULT_SIGBUS; 1394 } else { 1395 struct address_space *mapping = inode->i_mapping; 1396 struct page *page; 1397 1398 filemap_invalidate_lock_shared(mapping); 1399 page = find_or_create_page(mapping, 0, 1400 mapping_gfp_constraint(mapping, ~__GFP_FS)); 1401 if (!page) { 1402 ret = VM_FAULT_OOM; 1403 goto out_inline; 1404 } 1405 err = __ceph_do_getattr(inode, page, 1406 CEPH_STAT_CAP_INLINE_DATA, true); 1407 if (err < 0 || off >= i_size_read(inode)) { 1408 unlock_page(page); 1409 put_page(page); 1410 ret = vmf_error(err); 1411 goto out_inline; 1412 } 1413 if (err < PAGE_SIZE) 1414 zero_user_segment(page, err, PAGE_SIZE); 1415 else 1416 flush_dcache_page(page); 1417 SetPageUptodate(page); 1418 vmf->page = page; 1419 ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED; 1420 out_inline: 1421 filemap_invalidate_unlock_shared(mapping); 1422 dout("filemap_fault %p %llu read inline data ret %x\n", 1423 inode, off, ret); 1424 } 1425 out_restore: 1426 ceph_restore_sigs(&oldset); 1427 if (err < 0) 1428 ret = vmf_error(err); 1429 1430 return ret; 1431 } 1432 1433 static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf) 1434 { 1435 struct vm_area_struct *vma = vmf->vma; 1436 struct inode *inode = file_inode(vma->vm_file); 1437 struct ceph_inode_info *ci = ceph_inode(inode); 1438 struct ceph_file_info *fi = vma->vm_file->private_data; 1439 struct ceph_cap_flush *prealloc_cf; 1440 struct page *page = vmf->page; 1441 loff_t off = page_offset(page); 1442 loff_t size = i_size_read(inode); 1443 size_t len; 1444 int want, got, err; 1445 sigset_t oldset; 1446 vm_fault_t ret = VM_FAULT_SIGBUS; 1447 1448 if (ceph_inode_is_shutdown(inode)) 1449 return ret; 1450 1451 prealloc_cf = ceph_alloc_cap_flush(); 1452 if (!prealloc_cf) 1453 return VM_FAULT_OOM; 1454 1455 sb_start_pagefault(inode->i_sb); 1456 ceph_block_sigs(&oldset); 1457 1458 if (ci->i_inline_version != CEPH_INLINE_NONE) { 1459 struct page *locked_page = NULL; 1460 if (off == 0) { 1461 lock_page(page); 1462 locked_page = page; 1463 } 1464 err = ceph_uninline_data(vma->vm_file, locked_page); 1465 if (locked_page) 1466 unlock_page(locked_page); 1467 if (err < 0) 1468 goto out_free; 1469 } 1470 1471 if (off + thp_size(page) <= size) 1472 len = thp_size(page); 1473 else 1474 len = offset_in_thp(page, size); 1475 1476 dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n", 1477 inode, ceph_vinop(inode), off, len, size); 1478 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1479 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1480 else 1481 want = CEPH_CAP_FILE_BUFFER; 1482 1483 got = 0; 1484 err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_WR, want, off + len, &got); 1485 if (err < 0) 1486 goto out_free; 1487 1488 dout("page_mkwrite %p %llu~%zd got cap refs on %s\n", 1489 inode, off, len, ceph_cap_string(got)); 1490 1491 /* Update time before taking page lock */ 1492 file_update_time(vma->vm_file); 1493 inode_inc_iversion_raw(inode); 1494 1495 do { 1496 struct ceph_snap_context *snapc; 1497 1498 lock_page(page); 1499 1500 if (page_mkwrite_check_truncate(page, inode) < 0) { 1501 unlock_page(page); 1502 ret = VM_FAULT_NOPAGE; 1503 break; 1504 } 1505 1506 snapc = ceph_find_incompatible(page); 1507 if (!snapc) { 1508 /* success. we'll keep the page locked. */ 1509 set_page_dirty(page); 1510 ret = VM_FAULT_LOCKED; 1511 break; 1512 } 1513 1514 unlock_page(page); 1515 1516 if (IS_ERR(snapc)) { 1517 ret = VM_FAULT_SIGBUS; 1518 break; 1519 } 1520 1521 ceph_queue_writeback(inode); 1522 err = wait_event_killable(ci->i_cap_wq, 1523 context_is_writeable_or_written(inode, snapc)); 1524 ceph_put_snap_context(snapc); 1525 } while (err == 0); 1526 1527 if (ret == VM_FAULT_LOCKED || 1528 ci->i_inline_version != CEPH_INLINE_NONE) { 1529 int dirty; 1530 spin_lock(&ci->i_ceph_lock); 1531 ci->i_inline_version = CEPH_INLINE_NONE; 1532 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1533 &prealloc_cf); 1534 spin_unlock(&ci->i_ceph_lock); 1535 if (dirty) 1536 __mark_inode_dirty(inode, dirty); 1537 } 1538 1539 dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %x\n", 1540 inode, off, len, ceph_cap_string(got), ret); 1541 ceph_put_cap_refs_async(ci, got); 1542 out_free: 1543 ceph_restore_sigs(&oldset); 1544 sb_end_pagefault(inode->i_sb); 1545 ceph_free_cap_flush(prealloc_cf); 1546 if (err < 0) 1547 ret = vmf_error(err); 1548 return ret; 1549 } 1550 1551 void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, 1552 char *data, size_t len) 1553 { 1554 struct address_space *mapping = inode->i_mapping; 1555 struct page *page; 1556 1557 if (locked_page) { 1558 page = locked_page; 1559 } else { 1560 if (i_size_read(inode) == 0) 1561 return; 1562 page = find_or_create_page(mapping, 0, 1563 mapping_gfp_constraint(mapping, 1564 ~__GFP_FS)); 1565 if (!page) 1566 return; 1567 if (PageUptodate(page)) { 1568 unlock_page(page); 1569 put_page(page); 1570 return; 1571 } 1572 } 1573 1574 dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n", 1575 inode, ceph_vinop(inode), len, locked_page); 1576 1577 if (len > 0) { 1578 void *kaddr = kmap_atomic(page); 1579 memcpy(kaddr, data, len); 1580 kunmap_atomic(kaddr); 1581 } 1582 1583 if (page != locked_page) { 1584 if (len < PAGE_SIZE) 1585 zero_user_segment(page, len, PAGE_SIZE); 1586 else 1587 flush_dcache_page(page); 1588 1589 SetPageUptodate(page); 1590 unlock_page(page); 1591 put_page(page); 1592 } 1593 } 1594 1595 int ceph_uninline_data(struct file *filp, struct page *locked_page) 1596 { 1597 struct inode *inode = file_inode(filp); 1598 struct ceph_inode_info *ci = ceph_inode(inode); 1599 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1600 struct ceph_osd_request *req; 1601 struct page *page = NULL; 1602 u64 len, inline_version; 1603 int err = 0; 1604 bool from_pagecache = false; 1605 1606 spin_lock(&ci->i_ceph_lock); 1607 inline_version = ci->i_inline_version; 1608 spin_unlock(&ci->i_ceph_lock); 1609 1610 dout("uninline_data %p %llx.%llx inline_version %llu\n", 1611 inode, ceph_vinop(inode), inline_version); 1612 1613 if (inline_version == 1 || /* initial version, no data */ 1614 inline_version == CEPH_INLINE_NONE) 1615 goto out; 1616 1617 if (locked_page) { 1618 page = locked_page; 1619 WARN_ON(!PageUptodate(page)); 1620 } else if (ceph_caps_issued(ci) & 1621 (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) { 1622 page = find_get_page(inode->i_mapping, 0); 1623 if (page) { 1624 if (PageUptodate(page)) { 1625 from_pagecache = true; 1626 lock_page(page); 1627 } else { 1628 put_page(page); 1629 page = NULL; 1630 } 1631 } 1632 } 1633 1634 if (page) { 1635 len = i_size_read(inode); 1636 if (len > PAGE_SIZE) 1637 len = PAGE_SIZE; 1638 } else { 1639 page = __page_cache_alloc(GFP_NOFS); 1640 if (!page) { 1641 err = -ENOMEM; 1642 goto out; 1643 } 1644 err = __ceph_do_getattr(inode, page, 1645 CEPH_STAT_CAP_INLINE_DATA, true); 1646 if (err < 0) { 1647 /* no inline data */ 1648 if (err == -ENODATA) 1649 err = 0; 1650 goto out; 1651 } 1652 len = err; 1653 } 1654 1655 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1656 ceph_vino(inode), 0, &len, 0, 1, 1657 CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_WRITE, 1658 NULL, 0, 0, false); 1659 if (IS_ERR(req)) { 1660 err = PTR_ERR(req); 1661 goto out; 1662 } 1663 1664 req->r_mtime = inode->i_mtime; 1665 err = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1666 if (!err) 1667 err = ceph_osdc_wait_request(&fsc->client->osdc, req); 1668 ceph_osdc_put_request(req); 1669 if (err < 0) 1670 goto out; 1671 1672 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1673 ceph_vino(inode), 0, &len, 1, 3, 1674 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, 1675 NULL, ci->i_truncate_seq, 1676 ci->i_truncate_size, false); 1677 if (IS_ERR(req)) { 1678 err = PTR_ERR(req); 1679 goto out; 1680 } 1681 1682 osd_req_op_extent_osd_data_pages(req, 1, &page, len, 0, false, false); 1683 1684 { 1685 __le64 xattr_buf = cpu_to_le64(inline_version); 1686 err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR, 1687 "inline_version", &xattr_buf, 1688 sizeof(xattr_buf), 1689 CEPH_OSD_CMPXATTR_OP_GT, 1690 CEPH_OSD_CMPXATTR_MODE_U64); 1691 if (err) 1692 goto out_put; 1693 } 1694 1695 { 1696 char xattr_buf[32]; 1697 int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf), 1698 "%llu", inline_version); 1699 err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR, 1700 "inline_version", 1701 xattr_buf, xattr_len, 0, 0); 1702 if (err) 1703 goto out_put; 1704 } 1705 1706 req->r_mtime = inode->i_mtime; 1707 err = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1708 if (!err) 1709 err = ceph_osdc_wait_request(&fsc->client->osdc, req); 1710 1711 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency, 1712 req->r_end_latency, len, err); 1713 1714 out_put: 1715 ceph_osdc_put_request(req); 1716 if (err == -ECANCELED) 1717 err = 0; 1718 out: 1719 if (page && page != locked_page) { 1720 if (from_pagecache) { 1721 unlock_page(page); 1722 put_page(page); 1723 } else 1724 __free_pages(page, 0); 1725 } 1726 1727 dout("uninline_data %p %llx.%llx inline_version %llu = %d\n", 1728 inode, ceph_vinop(inode), inline_version, err); 1729 return err; 1730 } 1731 1732 static const struct vm_operations_struct ceph_vmops = { 1733 .fault = ceph_filemap_fault, 1734 .page_mkwrite = ceph_page_mkwrite, 1735 }; 1736 1737 int ceph_mmap(struct file *file, struct vm_area_struct *vma) 1738 { 1739 struct address_space *mapping = file->f_mapping; 1740 1741 if (!mapping->a_ops->readpage) 1742 return -ENOEXEC; 1743 file_accessed(file); 1744 vma->vm_ops = &ceph_vmops; 1745 return 0; 1746 } 1747 1748 enum { 1749 POOL_READ = 1, 1750 POOL_WRITE = 2, 1751 }; 1752 1753 static int __ceph_pool_perm_get(struct ceph_inode_info *ci, 1754 s64 pool, struct ceph_string *pool_ns) 1755 { 1756 struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode); 1757 struct ceph_mds_client *mdsc = fsc->mdsc; 1758 struct ceph_osd_request *rd_req = NULL, *wr_req = NULL; 1759 struct rb_node **p, *parent; 1760 struct ceph_pool_perm *perm; 1761 struct page **pages; 1762 size_t pool_ns_len; 1763 int err = 0, err2 = 0, have = 0; 1764 1765 down_read(&mdsc->pool_perm_rwsem); 1766 p = &mdsc->pool_perm_tree.rb_node; 1767 while (*p) { 1768 perm = rb_entry(*p, struct ceph_pool_perm, node); 1769 if (pool < perm->pool) 1770 p = &(*p)->rb_left; 1771 else if (pool > perm->pool) 1772 p = &(*p)->rb_right; 1773 else { 1774 int ret = ceph_compare_string(pool_ns, 1775 perm->pool_ns, 1776 perm->pool_ns_len); 1777 if (ret < 0) 1778 p = &(*p)->rb_left; 1779 else if (ret > 0) 1780 p = &(*p)->rb_right; 1781 else { 1782 have = perm->perm; 1783 break; 1784 } 1785 } 1786 } 1787 up_read(&mdsc->pool_perm_rwsem); 1788 if (*p) 1789 goto out; 1790 1791 if (pool_ns) 1792 dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n", 1793 pool, (int)pool_ns->len, pool_ns->str); 1794 else 1795 dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool); 1796 1797 down_write(&mdsc->pool_perm_rwsem); 1798 p = &mdsc->pool_perm_tree.rb_node; 1799 parent = NULL; 1800 while (*p) { 1801 parent = *p; 1802 perm = rb_entry(parent, struct ceph_pool_perm, node); 1803 if (pool < perm->pool) 1804 p = &(*p)->rb_left; 1805 else if (pool > perm->pool) 1806 p = &(*p)->rb_right; 1807 else { 1808 int ret = ceph_compare_string(pool_ns, 1809 perm->pool_ns, 1810 perm->pool_ns_len); 1811 if (ret < 0) 1812 p = &(*p)->rb_left; 1813 else if (ret > 0) 1814 p = &(*p)->rb_right; 1815 else { 1816 have = perm->perm; 1817 break; 1818 } 1819 } 1820 } 1821 if (*p) { 1822 up_write(&mdsc->pool_perm_rwsem); 1823 goto out; 1824 } 1825 1826 rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 1827 1, false, GFP_NOFS); 1828 if (!rd_req) { 1829 err = -ENOMEM; 1830 goto out_unlock; 1831 } 1832 1833 rd_req->r_flags = CEPH_OSD_FLAG_READ; 1834 osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0); 1835 rd_req->r_base_oloc.pool = pool; 1836 if (pool_ns) 1837 rd_req->r_base_oloc.pool_ns = ceph_get_string(pool_ns); 1838 ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino); 1839 1840 err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS); 1841 if (err) 1842 goto out_unlock; 1843 1844 wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 1845 1, false, GFP_NOFS); 1846 if (!wr_req) { 1847 err = -ENOMEM; 1848 goto out_unlock; 1849 } 1850 1851 wr_req->r_flags = CEPH_OSD_FLAG_WRITE; 1852 osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL); 1853 ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc); 1854 ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid); 1855 1856 err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS); 1857 if (err) 1858 goto out_unlock; 1859 1860 /* one page should be large enough for STAT data */ 1861 pages = ceph_alloc_page_vector(1, GFP_KERNEL); 1862 if (IS_ERR(pages)) { 1863 err = PTR_ERR(pages); 1864 goto out_unlock; 1865 } 1866 1867 osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE, 1868 0, false, true); 1869 err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false); 1870 1871 wr_req->r_mtime = ci->vfs_inode.i_mtime; 1872 err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false); 1873 1874 if (!err) 1875 err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req); 1876 if (!err2) 1877 err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req); 1878 1879 if (err >= 0 || err == -ENOENT) 1880 have |= POOL_READ; 1881 else if (err != -EPERM) { 1882 if (err == -EBLOCKLISTED) 1883 fsc->blocklisted = true; 1884 goto out_unlock; 1885 } 1886 1887 if (err2 == 0 || err2 == -EEXIST) 1888 have |= POOL_WRITE; 1889 else if (err2 != -EPERM) { 1890 if (err2 == -EBLOCKLISTED) 1891 fsc->blocklisted = true; 1892 err = err2; 1893 goto out_unlock; 1894 } 1895 1896 pool_ns_len = pool_ns ? pool_ns->len : 0; 1897 perm = kmalloc(sizeof(*perm) + pool_ns_len + 1, GFP_NOFS); 1898 if (!perm) { 1899 err = -ENOMEM; 1900 goto out_unlock; 1901 } 1902 1903 perm->pool = pool; 1904 perm->perm = have; 1905 perm->pool_ns_len = pool_ns_len; 1906 if (pool_ns_len > 0) 1907 memcpy(perm->pool_ns, pool_ns->str, pool_ns_len); 1908 perm->pool_ns[pool_ns_len] = 0; 1909 1910 rb_link_node(&perm->node, parent, p); 1911 rb_insert_color(&perm->node, &mdsc->pool_perm_tree); 1912 err = 0; 1913 out_unlock: 1914 up_write(&mdsc->pool_perm_rwsem); 1915 1916 ceph_osdc_put_request(rd_req); 1917 ceph_osdc_put_request(wr_req); 1918 out: 1919 if (!err) 1920 err = have; 1921 if (pool_ns) 1922 dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n", 1923 pool, (int)pool_ns->len, pool_ns->str, err); 1924 else 1925 dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err); 1926 return err; 1927 } 1928 1929 int ceph_pool_perm_check(struct inode *inode, int need) 1930 { 1931 struct ceph_inode_info *ci = ceph_inode(inode); 1932 struct ceph_string *pool_ns; 1933 s64 pool; 1934 int ret, flags; 1935 1936 /* Only need to do this for regular files */ 1937 if (!S_ISREG(inode->i_mode)) 1938 return 0; 1939 1940 if (ci->i_vino.snap != CEPH_NOSNAP) { 1941 /* 1942 * Pool permission check needs to write to the first object. 1943 * But for snapshot, head of the first object may have alread 1944 * been deleted. Skip check to avoid creating orphan object. 1945 */ 1946 return 0; 1947 } 1948 1949 if (ceph_test_mount_opt(ceph_inode_to_client(inode), 1950 NOPOOLPERM)) 1951 return 0; 1952 1953 spin_lock(&ci->i_ceph_lock); 1954 flags = ci->i_ceph_flags; 1955 pool = ci->i_layout.pool_id; 1956 spin_unlock(&ci->i_ceph_lock); 1957 check: 1958 if (flags & CEPH_I_POOL_PERM) { 1959 if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) { 1960 dout("ceph_pool_perm_check pool %lld no read perm\n", 1961 pool); 1962 return -EPERM; 1963 } 1964 if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) { 1965 dout("ceph_pool_perm_check pool %lld no write perm\n", 1966 pool); 1967 return -EPERM; 1968 } 1969 return 0; 1970 } 1971 1972 pool_ns = ceph_try_get_string(ci->i_layout.pool_ns); 1973 ret = __ceph_pool_perm_get(ci, pool, pool_ns); 1974 ceph_put_string(pool_ns); 1975 if (ret < 0) 1976 return ret; 1977 1978 flags = CEPH_I_POOL_PERM; 1979 if (ret & POOL_READ) 1980 flags |= CEPH_I_POOL_RD; 1981 if (ret & POOL_WRITE) 1982 flags |= CEPH_I_POOL_WR; 1983 1984 spin_lock(&ci->i_ceph_lock); 1985 if (pool == ci->i_layout.pool_id && 1986 pool_ns == rcu_dereference_raw(ci->i_layout.pool_ns)) { 1987 ci->i_ceph_flags |= flags; 1988 } else { 1989 pool = ci->i_layout.pool_id; 1990 flags = ci->i_ceph_flags; 1991 } 1992 spin_unlock(&ci->i_ceph_lock); 1993 goto check; 1994 } 1995 1996 void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc) 1997 { 1998 struct ceph_pool_perm *perm; 1999 struct rb_node *n; 2000 2001 while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) { 2002 n = rb_first(&mdsc->pool_perm_tree); 2003 perm = rb_entry(n, struct ceph_pool_perm, node); 2004 rb_erase(n, &mdsc->pool_perm_tree); 2005 kfree(perm); 2006 } 2007 } 2008