1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/backing-dev.h> 5 #include <linux/fs.h> 6 #include <linux/mm.h> 7 #include <linux/pagemap.h> 8 #include <linux/writeback.h> /* generic_writepages */ 9 #include <linux/slab.h> 10 #include <linux/pagevec.h> 11 #include <linux/task_io_accounting_ops.h> 12 #include <linux/signal.h> 13 14 #include "super.h" 15 #include "mds_client.h" 16 #include "cache.h" 17 #include <linux/ceph/osd_client.h> 18 19 /* 20 * Ceph address space ops. 21 * 22 * There are a few funny things going on here. 23 * 24 * The page->private field is used to reference a struct 25 * ceph_snap_context for _every_ dirty page. This indicates which 26 * snapshot the page was logically dirtied in, and thus which snap 27 * context needs to be associated with the osd write during writeback. 28 * 29 * Similarly, struct ceph_inode_info maintains a set of counters to 30 * count dirty pages on the inode. In the absence of snapshots, 31 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count. 32 * 33 * When a snapshot is taken (that is, when the client receives 34 * notification that a snapshot was taken), each inode with caps and 35 * with dirty pages (dirty pages implies there is a cap) gets a new 36 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending 37 * order, new snaps go to the tail). The i_wrbuffer_ref_head count is 38 * moved to capsnap->dirty. (Unless a sync write is currently in 39 * progress. In that case, the capsnap is said to be "pending", new 40 * writes cannot start, and the capsnap isn't "finalized" until the 41 * write completes (or fails) and a final size/mtime for the inode for 42 * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0. 43 * 44 * On writeback, we must submit writes to the osd IN SNAP ORDER. So, 45 * we look for the first capsnap in i_cap_snaps and write out pages in 46 * that snap context _only_. Then we move on to the next capsnap, 47 * eventually reaching the "live" or "head" context (i.e., pages that 48 * are not yet snapped) and are writing the most recently dirtied 49 * pages. 50 * 51 * Invalidate and so forth must take care to ensure the dirty page 52 * accounting is preserved. 53 */ 54 55 #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10)) 56 #define CONGESTION_OFF_THRESH(congestion_kb) \ 57 (CONGESTION_ON_THRESH(congestion_kb) - \ 58 (CONGESTION_ON_THRESH(congestion_kb) >> 2)) 59 60 static inline struct ceph_snap_context *page_snap_context(struct page *page) 61 { 62 if (PagePrivate(page)) 63 return (void *)page->private; 64 return NULL; 65 } 66 67 /* 68 * Dirty a page. Optimistically adjust accounting, on the assumption 69 * that we won't race with invalidate. If we do, readjust. 70 */ 71 static int ceph_set_page_dirty(struct page *page) 72 { 73 struct address_space *mapping = page->mapping; 74 struct inode *inode; 75 struct ceph_inode_info *ci; 76 struct ceph_snap_context *snapc; 77 int ret; 78 79 if (unlikely(!mapping)) 80 return !TestSetPageDirty(page); 81 82 if (PageDirty(page)) { 83 dout("%p set_page_dirty %p idx %lu -- already dirty\n", 84 mapping->host, page, page->index); 85 BUG_ON(!PagePrivate(page)); 86 return 0; 87 } 88 89 inode = mapping->host; 90 ci = ceph_inode(inode); 91 92 /* dirty the head */ 93 spin_lock(&ci->i_ceph_lock); 94 BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference 95 if (__ceph_have_pending_cap_snap(ci)) { 96 struct ceph_cap_snap *capsnap = 97 list_last_entry(&ci->i_cap_snaps, 98 struct ceph_cap_snap, 99 ci_item); 100 snapc = ceph_get_snap_context(capsnap->context); 101 capsnap->dirty_pages++; 102 } else { 103 BUG_ON(!ci->i_head_snapc); 104 snapc = ceph_get_snap_context(ci->i_head_snapc); 105 ++ci->i_wrbuffer_ref_head; 106 } 107 if (ci->i_wrbuffer_ref == 0) 108 ihold(inode); 109 ++ci->i_wrbuffer_ref; 110 dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d " 111 "snapc %p seq %lld (%d snaps)\n", 112 mapping->host, page, page->index, 113 ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1, 114 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, 115 snapc, snapc->seq, snapc->num_snaps); 116 spin_unlock(&ci->i_ceph_lock); 117 118 /* 119 * Reference snap context in page->private. Also set 120 * PagePrivate so that we get invalidatepage callback. 121 */ 122 BUG_ON(PagePrivate(page)); 123 page->private = (unsigned long)snapc; 124 SetPagePrivate(page); 125 126 ret = __set_page_dirty_nobuffers(page); 127 WARN_ON(!PageLocked(page)); 128 WARN_ON(!page->mapping); 129 130 return ret; 131 } 132 133 /* 134 * If we are truncating the full page (i.e. offset == 0), adjust the 135 * dirty page counters appropriately. Only called if there is private 136 * data on the page. 137 */ 138 static void ceph_invalidatepage(struct page *page, unsigned int offset, 139 unsigned int length) 140 { 141 struct inode *inode; 142 struct ceph_inode_info *ci; 143 struct ceph_snap_context *snapc = page_snap_context(page); 144 145 inode = page->mapping->host; 146 ci = ceph_inode(inode); 147 148 if (offset != 0 || length != PAGE_SIZE) { 149 dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n", 150 inode, page, page->index, offset, length); 151 return; 152 } 153 154 ceph_invalidate_fscache_page(inode, page); 155 156 WARN_ON(!PageLocked(page)); 157 if (!PagePrivate(page)) 158 return; 159 160 ClearPageChecked(page); 161 162 dout("%p invalidatepage %p idx %lu full dirty page\n", 163 inode, page, page->index); 164 165 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 166 ceph_put_snap_context(snapc); 167 page->private = 0; 168 ClearPagePrivate(page); 169 } 170 171 static int ceph_releasepage(struct page *page, gfp_t g) 172 { 173 dout("%p releasepage %p idx %lu (%sdirty)\n", page->mapping->host, 174 page, page->index, PageDirty(page) ? "" : "not "); 175 176 /* Can we release the page from the cache? */ 177 if (!ceph_release_fscache_page(page, g)) 178 return 0; 179 180 return !PagePrivate(page); 181 } 182 183 /* 184 * read a single page, without unlocking it. 185 */ 186 static int ceph_do_readpage(struct file *filp, struct page *page) 187 { 188 struct inode *inode = file_inode(filp); 189 struct ceph_inode_info *ci = ceph_inode(inode); 190 struct ceph_osd_client *osdc = 191 &ceph_inode_to_client(inode)->client->osdc; 192 int err = 0; 193 u64 off = page_offset(page); 194 u64 len = PAGE_SIZE; 195 196 if (off >= i_size_read(inode)) { 197 zero_user_segment(page, 0, PAGE_SIZE); 198 SetPageUptodate(page); 199 return 0; 200 } 201 202 if (ci->i_inline_version != CEPH_INLINE_NONE) { 203 /* 204 * Uptodate inline data should have been added 205 * into page cache while getting Fcr caps. 206 */ 207 if (off == 0) 208 return -EINVAL; 209 zero_user_segment(page, 0, PAGE_SIZE); 210 SetPageUptodate(page); 211 return 0; 212 } 213 214 err = ceph_readpage_from_fscache(inode, page); 215 if (err == 0) 216 return -EINPROGRESS; 217 218 dout("readpage inode %p file %p page %p index %lu\n", 219 inode, filp, page, page->index); 220 err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout, 221 off, &len, 222 ci->i_truncate_seq, ci->i_truncate_size, 223 &page, 1, 0); 224 if (err == -ENOENT) 225 err = 0; 226 if (err < 0) { 227 SetPageError(page); 228 ceph_fscache_readpage_cancel(inode, page); 229 goto out; 230 } 231 if (err < PAGE_SIZE) 232 /* zero fill remainder of page */ 233 zero_user_segment(page, err, PAGE_SIZE); 234 else 235 flush_dcache_page(page); 236 237 SetPageUptodate(page); 238 ceph_readpage_to_fscache(inode, page); 239 240 out: 241 return err < 0 ? err : 0; 242 } 243 244 static int ceph_readpage(struct file *filp, struct page *page) 245 { 246 int r = ceph_do_readpage(filp, page); 247 if (r != -EINPROGRESS) 248 unlock_page(page); 249 else 250 r = 0; 251 return r; 252 } 253 254 /* 255 * Finish an async read(ahead) op. 256 */ 257 static void finish_read(struct ceph_osd_request *req) 258 { 259 struct inode *inode = req->r_inode; 260 struct ceph_osd_data *osd_data; 261 int rc = req->r_result <= 0 ? req->r_result : 0; 262 int bytes = req->r_result >= 0 ? req->r_result : 0; 263 int num_pages; 264 int i; 265 266 dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes); 267 268 /* unlock all pages, zeroing any data we didn't read */ 269 osd_data = osd_req_op_extent_osd_data(req, 0); 270 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 271 num_pages = calc_pages_for((u64)osd_data->alignment, 272 (u64)osd_data->length); 273 for (i = 0; i < num_pages; i++) { 274 struct page *page = osd_data->pages[i]; 275 276 if (rc < 0 && rc != -ENOENT) { 277 ceph_fscache_readpage_cancel(inode, page); 278 goto unlock; 279 } 280 if (bytes < (int)PAGE_SIZE) { 281 /* zero (remainder of) page */ 282 int s = bytes < 0 ? 0 : bytes; 283 zero_user_segment(page, s, PAGE_SIZE); 284 } 285 dout("finish_read %p uptodate %p idx %lu\n", inode, page, 286 page->index); 287 flush_dcache_page(page); 288 SetPageUptodate(page); 289 ceph_readpage_to_fscache(inode, page); 290 unlock: 291 unlock_page(page); 292 put_page(page); 293 bytes -= PAGE_SIZE; 294 } 295 kfree(osd_data->pages); 296 } 297 298 /* 299 * start an async read(ahead) operation. return nr_pages we submitted 300 * a read for on success, or negative error code. 301 */ 302 static int start_read(struct inode *inode, struct ceph_rw_context *rw_ctx, 303 struct list_head *page_list, int max) 304 { 305 struct ceph_osd_client *osdc = 306 &ceph_inode_to_client(inode)->client->osdc; 307 struct ceph_inode_info *ci = ceph_inode(inode); 308 struct page *page = list_entry(page_list->prev, struct page, lru); 309 struct ceph_vino vino; 310 struct ceph_osd_request *req; 311 u64 off; 312 u64 len; 313 int i; 314 struct page **pages; 315 pgoff_t next_index; 316 int nr_pages = 0; 317 int got = 0; 318 int ret = 0; 319 320 if (!rw_ctx) { 321 /* caller of readpages does not hold buffer and read caps 322 * (fadvise, madvise and readahead cases) */ 323 int want = CEPH_CAP_FILE_CACHE; 324 ret = ceph_try_get_caps(ci, CEPH_CAP_FILE_RD, want, &got); 325 if (ret < 0) { 326 dout("start_read %p, error getting cap\n", inode); 327 } else if (!(got & want)) { 328 dout("start_read %p, no cache cap\n", inode); 329 ret = 0; 330 } 331 if (ret <= 0) { 332 if (got) 333 ceph_put_cap_refs(ci, got); 334 while (!list_empty(page_list)) { 335 page = list_entry(page_list->prev, 336 struct page, lru); 337 list_del(&page->lru); 338 put_page(page); 339 } 340 return ret; 341 } 342 } 343 344 off = (u64) page_offset(page); 345 346 /* count pages */ 347 next_index = page->index; 348 list_for_each_entry_reverse(page, page_list, lru) { 349 if (page->index != next_index) 350 break; 351 nr_pages++; 352 next_index++; 353 if (max && nr_pages == max) 354 break; 355 } 356 len = nr_pages << PAGE_SHIFT; 357 dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages, 358 off, len); 359 vino = ceph_vino(inode); 360 req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len, 361 0, 1, CEPH_OSD_OP_READ, 362 CEPH_OSD_FLAG_READ, NULL, 363 ci->i_truncate_seq, ci->i_truncate_size, 364 false); 365 if (IS_ERR(req)) { 366 ret = PTR_ERR(req); 367 goto out; 368 } 369 370 /* build page vector */ 371 nr_pages = calc_pages_for(0, len); 372 pages = kmalloc(sizeof(*pages) * nr_pages, GFP_KERNEL); 373 if (!pages) { 374 ret = -ENOMEM; 375 goto out_put; 376 } 377 for (i = 0; i < nr_pages; ++i) { 378 page = list_entry(page_list->prev, struct page, lru); 379 BUG_ON(PageLocked(page)); 380 list_del(&page->lru); 381 382 dout("start_read %p adding %p idx %lu\n", inode, page, 383 page->index); 384 if (add_to_page_cache_lru(page, &inode->i_data, page->index, 385 GFP_KERNEL)) { 386 ceph_fscache_uncache_page(inode, page); 387 put_page(page); 388 dout("start_read %p add_to_page_cache failed %p\n", 389 inode, page); 390 nr_pages = i; 391 if (nr_pages > 0) { 392 len = nr_pages << PAGE_SHIFT; 393 osd_req_op_extent_update(req, 0, len); 394 break; 395 } 396 goto out_pages; 397 } 398 pages[i] = page; 399 } 400 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false); 401 req->r_callback = finish_read; 402 req->r_inode = inode; 403 404 dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len); 405 ret = ceph_osdc_start_request(osdc, req, false); 406 if (ret < 0) 407 goto out_pages; 408 ceph_osdc_put_request(req); 409 410 /* After adding locked pages to page cache, the inode holds cache cap. 411 * So we can drop our cap refs. */ 412 if (got) 413 ceph_put_cap_refs(ci, got); 414 415 return nr_pages; 416 417 out_pages: 418 for (i = 0; i < nr_pages; ++i) { 419 ceph_fscache_readpage_cancel(inode, pages[i]); 420 unlock_page(pages[i]); 421 } 422 ceph_put_page_vector(pages, nr_pages, false); 423 out_put: 424 ceph_osdc_put_request(req); 425 out: 426 if (got) 427 ceph_put_cap_refs(ci, got); 428 return ret; 429 } 430 431 432 /* 433 * Read multiple pages. Leave pages we don't read + unlock in page_list; 434 * the caller (VM) cleans them up. 435 */ 436 static int ceph_readpages(struct file *file, struct address_space *mapping, 437 struct list_head *page_list, unsigned nr_pages) 438 { 439 struct inode *inode = file_inode(file); 440 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 441 struct ceph_file_info *ci = file->private_data; 442 struct ceph_rw_context *rw_ctx; 443 int rc = 0; 444 int max = 0; 445 446 if (ceph_inode(inode)->i_inline_version != CEPH_INLINE_NONE) 447 return -EINVAL; 448 449 rc = ceph_readpages_from_fscache(mapping->host, mapping, page_list, 450 &nr_pages); 451 452 if (rc == 0) 453 goto out; 454 455 rw_ctx = ceph_find_rw_context(ci); 456 max = fsc->mount_options->rsize >> PAGE_SHIFT; 457 dout("readpages %p file %p ctx %p nr_pages %d max %d\n", 458 inode, file, rw_ctx, nr_pages, max); 459 while (!list_empty(page_list)) { 460 rc = start_read(inode, rw_ctx, page_list, max); 461 if (rc < 0) 462 goto out; 463 } 464 out: 465 ceph_fscache_readpages_cancel(inode, page_list); 466 467 dout("readpages %p file %p ret %d\n", inode, file, rc); 468 return rc; 469 } 470 471 struct ceph_writeback_ctl 472 { 473 loff_t i_size; 474 u64 truncate_size; 475 u32 truncate_seq; 476 bool size_stable; 477 bool head_snapc; 478 }; 479 480 /* 481 * Get ref for the oldest snapc for an inode with dirty data... that is, the 482 * only snap context we are allowed to write back. 483 */ 484 static struct ceph_snap_context * 485 get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl, 486 struct ceph_snap_context *page_snapc) 487 { 488 struct ceph_inode_info *ci = ceph_inode(inode); 489 struct ceph_snap_context *snapc = NULL; 490 struct ceph_cap_snap *capsnap = NULL; 491 492 spin_lock(&ci->i_ceph_lock); 493 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 494 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, 495 capsnap->context, capsnap->dirty_pages); 496 if (!capsnap->dirty_pages) 497 continue; 498 499 /* get i_size, truncate_{seq,size} for page_snapc? */ 500 if (snapc && capsnap->context != page_snapc) 501 continue; 502 503 if (ctl) { 504 if (capsnap->writing) { 505 ctl->i_size = i_size_read(inode); 506 ctl->size_stable = false; 507 } else { 508 ctl->i_size = capsnap->size; 509 ctl->size_stable = true; 510 } 511 ctl->truncate_size = capsnap->truncate_size; 512 ctl->truncate_seq = capsnap->truncate_seq; 513 ctl->head_snapc = false; 514 } 515 516 if (snapc) 517 break; 518 519 snapc = ceph_get_snap_context(capsnap->context); 520 if (!page_snapc || 521 page_snapc == snapc || 522 page_snapc->seq > snapc->seq) 523 break; 524 } 525 if (!snapc && ci->i_wrbuffer_ref_head) { 526 snapc = ceph_get_snap_context(ci->i_head_snapc); 527 dout(" head snapc %p has %d dirty pages\n", 528 snapc, ci->i_wrbuffer_ref_head); 529 if (ctl) { 530 ctl->i_size = i_size_read(inode); 531 ctl->truncate_size = ci->i_truncate_size; 532 ctl->truncate_seq = ci->i_truncate_seq; 533 ctl->size_stable = false; 534 ctl->head_snapc = true; 535 } 536 } 537 spin_unlock(&ci->i_ceph_lock); 538 return snapc; 539 } 540 541 static u64 get_writepages_data_length(struct inode *inode, 542 struct page *page, u64 start) 543 { 544 struct ceph_inode_info *ci = ceph_inode(inode); 545 struct ceph_snap_context *snapc = page_snap_context(page); 546 struct ceph_cap_snap *capsnap = NULL; 547 u64 end = i_size_read(inode); 548 549 if (snapc != ci->i_head_snapc) { 550 bool found = false; 551 spin_lock(&ci->i_ceph_lock); 552 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 553 if (capsnap->context == snapc) { 554 if (!capsnap->writing) 555 end = capsnap->size; 556 found = true; 557 break; 558 } 559 } 560 spin_unlock(&ci->i_ceph_lock); 561 WARN_ON(!found); 562 } 563 if (end > page_offset(page) + PAGE_SIZE) 564 end = page_offset(page) + PAGE_SIZE; 565 return end > start ? end - start : 0; 566 } 567 568 /* 569 * Write a single page, but leave the page locked. 570 * 571 * If we get a write error, set the page error bit, but still adjust the 572 * dirty page accounting (i.e., page is no longer dirty). 573 */ 574 static int writepage_nounlock(struct page *page, struct writeback_control *wbc) 575 { 576 struct inode *inode; 577 struct ceph_inode_info *ci; 578 struct ceph_fs_client *fsc; 579 struct ceph_snap_context *snapc, *oldest; 580 loff_t page_off = page_offset(page); 581 int err, len = PAGE_SIZE; 582 struct ceph_writeback_ctl ceph_wbc; 583 584 dout("writepage %p idx %lu\n", page, page->index); 585 586 inode = page->mapping->host; 587 ci = ceph_inode(inode); 588 fsc = ceph_inode_to_client(inode); 589 590 /* verify this is a writeable snap context */ 591 snapc = page_snap_context(page); 592 if (!snapc) { 593 dout("writepage %p page %p not dirty?\n", inode, page); 594 return 0; 595 } 596 oldest = get_oldest_context(inode, &ceph_wbc, snapc); 597 if (snapc->seq > oldest->seq) { 598 dout("writepage %p page %p snapc %p not writeable - noop\n", 599 inode, page, snapc); 600 /* we should only noop if called by kswapd */ 601 WARN_ON(!(current->flags & PF_MEMALLOC)); 602 ceph_put_snap_context(oldest); 603 redirty_page_for_writepage(wbc, page); 604 return 0; 605 } 606 ceph_put_snap_context(oldest); 607 608 /* is this a partial page at end of file? */ 609 if (page_off >= ceph_wbc.i_size) { 610 dout("%p page eof %llu\n", page, ceph_wbc.i_size); 611 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); 612 return 0; 613 } 614 615 if (ceph_wbc.i_size < page_off + len) 616 len = ceph_wbc.i_size - page_off; 617 618 dout("writepage %p page %p index %lu on %llu~%u snapc %p seq %lld\n", 619 inode, page, page->index, page_off, len, snapc, snapc->seq); 620 621 if (atomic_long_inc_return(&fsc->writeback_count) > 622 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) 623 set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); 624 625 set_page_writeback(page); 626 err = ceph_osdc_writepages(&fsc->client->osdc, ceph_vino(inode), 627 &ci->i_layout, snapc, page_off, len, 628 ceph_wbc.truncate_seq, 629 ceph_wbc.truncate_size, 630 &inode->i_mtime, &page, 1); 631 if (err < 0) { 632 struct writeback_control tmp_wbc; 633 if (!wbc) 634 wbc = &tmp_wbc; 635 if (err == -ERESTARTSYS) { 636 /* killed by SIGKILL */ 637 dout("writepage interrupted page %p\n", page); 638 redirty_page_for_writepage(wbc, page); 639 end_page_writeback(page); 640 return err; 641 } 642 dout("writepage setting page/mapping error %d %p\n", 643 err, page); 644 SetPageError(page); 645 mapping_set_error(&inode->i_data, err); 646 wbc->pages_skipped++; 647 } else { 648 dout("writepage cleaned page %p\n", page); 649 err = 0; /* vfs expects us to return 0 */ 650 } 651 page->private = 0; 652 ClearPagePrivate(page); 653 end_page_writeback(page); 654 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 655 ceph_put_snap_context(snapc); /* page's reference */ 656 657 if (atomic_long_dec_return(&fsc->writeback_count) < 658 CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb)) 659 clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); 660 661 return err; 662 } 663 664 static int ceph_writepage(struct page *page, struct writeback_control *wbc) 665 { 666 int err; 667 struct inode *inode = page->mapping->host; 668 BUG_ON(!inode); 669 ihold(inode); 670 err = writepage_nounlock(page, wbc); 671 if (err == -ERESTARTSYS) { 672 /* direct memory reclaimer was killed by SIGKILL. return 0 673 * to prevent caller from setting mapping/page error */ 674 err = 0; 675 } 676 unlock_page(page); 677 iput(inode); 678 return err; 679 } 680 681 /* 682 * lame release_pages helper. release_pages() isn't exported to 683 * modules. 684 */ 685 static void ceph_release_pages(struct page **pages, int num) 686 { 687 struct pagevec pvec; 688 int i; 689 690 pagevec_init(&pvec); 691 for (i = 0; i < num; i++) { 692 if (pagevec_add(&pvec, pages[i]) == 0) 693 pagevec_release(&pvec); 694 } 695 pagevec_release(&pvec); 696 } 697 698 /* 699 * async writeback completion handler. 700 * 701 * If we get an error, set the mapping error bit, but not the individual 702 * page error bits. 703 */ 704 static void writepages_finish(struct ceph_osd_request *req) 705 { 706 struct inode *inode = req->r_inode; 707 struct ceph_inode_info *ci = ceph_inode(inode); 708 struct ceph_osd_data *osd_data; 709 struct page *page; 710 int num_pages, total_pages = 0; 711 int i, j; 712 int rc = req->r_result; 713 struct ceph_snap_context *snapc = req->r_snapc; 714 struct address_space *mapping = inode->i_mapping; 715 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 716 bool remove_page; 717 718 dout("writepages_finish %p rc %d\n", inode, rc); 719 if (rc < 0) { 720 mapping_set_error(mapping, rc); 721 ceph_set_error_write(ci); 722 } else { 723 ceph_clear_error_write(ci); 724 } 725 726 /* 727 * We lost the cache cap, need to truncate the page before 728 * it is unlocked, otherwise we'd truncate it later in the 729 * page truncation thread, possibly losing some data that 730 * raced its way in 731 */ 732 remove_page = !(ceph_caps_issued(ci) & 733 (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)); 734 735 /* clean all pages */ 736 for (i = 0; i < req->r_num_ops; i++) { 737 if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) 738 break; 739 740 osd_data = osd_req_op_extent_osd_data(req, i); 741 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 742 num_pages = calc_pages_for((u64)osd_data->alignment, 743 (u64)osd_data->length); 744 total_pages += num_pages; 745 for (j = 0; j < num_pages; j++) { 746 page = osd_data->pages[j]; 747 BUG_ON(!page); 748 WARN_ON(!PageUptodate(page)); 749 750 if (atomic_long_dec_return(&fsc->writeback_count) < 751 CONGESTION_OFF_THRESH( 752 fsc->mount_options->congestion_kb)) 753 clear_bdi_congested(inode_to_bdi(inode), 754 BLK_RW_ASYNC); 755 756 ceph_put_snap_context(page_snap_context(page)); 757 page->private = 0; 758 ClearPagePrivate(page); 759 dout("unlocking %p\n", page); 760 end_page_writeback(page); 761 762 if (remove_page) 763 generic_error_remove_page(inode->i_mapping, 764 page); 765 766 unlock_page(page); 767 } 768 dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n", 769 inode, osd_data->length, rc >= 0 ? num_pages : 0); 770 771 ceph_release_pages(osd_data->pages, num_pages); 772 } 773 774 ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc); 775 776 osd_data = osd_req_op_extent_osd_data(req, 0); 777 if (osd_data->pages_from_pool) 778 mempool_free(osd_data->pages, 779 ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool); 780 else 781 kfree(osd_data->pages); 782 ceph_osdc_put_request(req); 783 } 784 785 /* 786 * initiate async writeback 787 */ 788 static int ceph_writepages_start(struct address_space *mapping, 789 struct writeback_control *wbc) 790 { 791 struct inode *inode = mapping->host; 792 struct ceph_inode_info *ci = ceph_inode(inode); 793 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 794 struct ceph_vino vino = ceph_vino(inode); 795 pgoff_t index, start_index, end = -1; 796 struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc; 797 struct pagevec pvec; 798 int rc = 0; 799 unsigned int wsize = i_blocksize(inode); 800 struct ceph_osd_request *req = NULL; 801 struct ceph_writeback_ctl ceph_wbc; 802 bool should_loop, range_whole = false; 803 bool stop, done = false; 804 805 dout("writepages_start %p (mode=%s)\n", inode, 806 wbc->sync_mode == WB_SYNC_NONE ? "NONE" : 807 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); 808 809 if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { 810 if (ci->i_wrbuffer_ref > 0) { 811 pr_warn_ratelimited( 812 "writepage_start %p %lld forced umount\n", 813 inode, ceph_ino(inode)); 814 } 815 mapping_set_error(mapping, -EIO); 816 return -EIO; /* we're in a forced umount, don't write! */ 817 } 818 if (fsc->mount_options->wsize < wsize) 819 wsize = fsc->mount_options->wsize; 820 821 pagevec_init(&pvec); 822 823 start_index = wbc->range_cyclic ? mapping->writeback_index : 0; 824 index = start_index; 825 826 retry: 827 /* find oldest snap context with dirty data */ 828 snapc = get_oldest_context(inode, &ceph_wbc, NULL); 829 if (!snapc) { 830 /* hmm, why does writepages get called when there 831 is no dirty data? */ 832 dout(" no snap context with dirty data?\n"); 833 goto out; 834 } 835 dout(" oldest snapc is %p seq %lld (%d snaps)\n", 836 snapc, snapc->seq, snapc->num_snaps); 837 838 should_loop = false; 839 if (ceph_wbc.head_snapc && snapc != last_snapc) { 840 /* where to start/end? */ 841 if (wbc->range_cyclic) { 842 index = start_index; 843 end = -1; 844 if (index > 0) 845 should_loop = true; 846 dout(" cyclic, start at %lu\n", index); 847 } else { 848 index = wbc->range_start >> PAGE_SHIFT; 849 end = wbc->range_end >> PAGE_SHIFT; 850 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 851 range_whole = true; 852 dout(" not cyclic, %lu to %lu\n", index, end); 853 } 854 } else if (!ceph_wbc.head_snapc) { 855 /* Do not respect wbc->range_{start,end}. Dirty pages 856 * in that range can be associated with newer snapc. 857 * They are not writeable until we write all dirty pages 858 * associated with 'snapc' get written */ 859 if (index > 0 || wbc->sync_mode != WB_SYNC_NONE) 860 should_loop = true; 861 dout(" non-head snapc, range whole\n"); 862 } 863 864 ceph_put_snap_context(last_snapc); 865 last_snapc = snapc; 866 867 stop = false; 868 while (!stop && index <= end) { 869 int num_ops = 0, op_idx; 870 unsigned i, pvec_pages, max_pages, locked_pages = 0; 871 struct page **pages = NULL, **data_pages; 872 mempool_t *pool = NULL; /* Becomes non-null if mempool used */ 873 struct page *page; 874 pgoff_t strip_unit_end = 0; 875 u64 offset = 0, len = 0; 876 877 max_pages = wsize >> PAGE_SHIFT; 878 879 get_more_pages: 880 pvec_pages = pagevec_lookup_range_nr_tag(&pvec, mapping, &index, 881 end, PAGECACHE_TAG_DIRTY, 882 max_pages - locked_pages); 883 dout("pagevec_lookup_range_tag got %d\n", pvec_pages); 884 if (!pvec_pages && !locked_pages) 885 break; 886 for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) { 887 page = pvec.pages[i]; 888 dout("? %p idx %lu\n", page, page->index); 889 if (locked_pages == 0) 890 lock_page(page); /* first page */ 891 else if (!trylock_page(page)) 892 break; 893 894 /* only dirty pages, or our accounting breaks */ 895 if (unlikely(!PageDirty(page)) || 896 unlikely(page->mapping != mapping)) { 897 dout("!dirty or !mapping %p\n", page); 898 unlock_page(page); 899 continue; 900 } 901 if (strip_unit_end && (page->index > strip_unit_end)) { 902 dout("end of strip unit %p\n", page); 903 unlock_page(page); 904 break; 905 } 906 if (page_offset(page) >= ceph_wbc.i_size) { 907 dout("%p page eof %llu\n", 908 page, ceph_wbc.i_size); 909 /* not done if range_cyclic */ 910 stop = true; 911 unlock_page(page); 912 break; 913 } 914 if (PageWriteback(page)) { 915 if (wbc->sync_mode == WB_SYNC_NONE) { 916 dout("%p under writeback\n", page); 917 unlock_page(page); 918 continue; 919 } 920 dout("waiting on writeback %p\n", page); 921 wait_on_page_writeback(page); 922 } 923 924 /* only if matching snap context */ 925 pgsnapc = page_snap_context(page); 926 if (pgsnapc != snapc) { 927 dout("page snapc %p %lld != oldest %p %lld\n", 928 pgsnapc, pgsnapc->seq, snapc, snapc->seq); 929 unlock_page(page); 930 continue; 931 } 932 933 if (!clear_page_dirty_for_io(page)) { 934 dout("%p !clear_page_dirty_for_io\n", page); 935 unlock_page(page); 936 continue; 937 } 938 939 /* 940 * We have something to write. If this is 941 * the first locked page this time through, 942 * calculate max possinle write size and 943 * allocate a page array 944 */ 945 if (locked_pages == 0) { 946 u64 objnum; 947 u64 objoff; 948 949 /* prepare async write request */ 950 offset = (u64)page_offset(page); 951 len = wsize; 952 953 rc = ceph_calc_file_object_mapping(&ci->i_layout, 954 offset, len, 955 &objnum, &objoff, 956 &len); 957 if (rc < 0) { 958 unlock_page(page); 959 break; 960 } 961 962 num_ops = 1; 963 strip_unit_end = page->index + 964 ((len - 1) >> PAGE_SHIFT); 965 966 BUG_ON(pages); 967 max_pages = calc_pages_for(0, (u64)len); 968 pages = kmalloc(max_pages * sizeof (*pages), 969 GFP_NOFS); 970 if (!pages) { 971 pool = fsc->wb_pagevec_pool; 972 pages = mempool_alloc(pool, GFP_NOFS); 973 BUG_ON(!pages); 974 } 975 976 len = 0; 977 } else if (page->index != 978 (offset + len) >> PAGE_SHIFT) { 979 if (num_ops >= (pool ? CEPH_OSD_SLAB_OPS : 980 CEPH_OSD_MAX_OPS)) { 981 redirty_page_for_writepage(wbc, page); 982 unlock_page(page); 983 break; 984 } 985 986 num_ops++; 987 offset = (u64)page_offset(page); 988 len = 0; 989 } 990 991 /* note position of first page in pvec */ 992 dout("%p will write page %p idx %lu\n", 993 inode, page, page->index); 994 995 if (atomic_long_inc_return(&fsc->writeback_count) > 996 CONGESTION_ON_THRESH( 997 fsc->mount_options->congestion_kb)) { 998 set_bdi_congested(inode_to_bdi(inode), 999 BLK_RW_ASYNC); 1000 } 1001 1002 1003 pages[locked_pages++] = page; 1004 pvec.pages[i] = NULL; 1005 1006 len += PAGE_SIZE; 1007 } 1008 1009 /* did we get anything? */ 1010 if (!locked_pages) 1011 goto release_pvec_pages; 1012 if (i) { 1013 unsigned j, n = 0; 1014 /* shift unused page to beginning of pvec */ 1015 for (j = 0; j < pvec_pages; j++) { 1016 if (!pvec.pages[j]) 1017 continue; 1018 if (n < j) 1019 pvec.pages[n] = pvec.pages[j]; 1020 n++; 1021 } 1022 pvec.nr = n; 1023 1024 if (pvec_pages && i == pvec_pages && 1025 locked_pages < max_pages) { 1026 dout("reached end pvec, trying for more\n"); 1027 pagevec_release(&pvec); 1028 goto get_more_pages; 1029 } 1030 } 1031 1032 new_request: 1033 offset = page_offset(pages[0]); 1034 len = wsize; 1035 1036 req = ceph_osdc_new_request(&fsc->client->osdc, 1037 &ci->i_layout, vino, 1038 offset, &len, 0, num_ops, 1039 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, 1040 snapc, ceph_wbc.truncate_seq, 1041 ceph_wbc.truncate_size, false); 1042 if (IS_ERR(req)) { 1043 req = ceph_osdc_new_request(&fsc->client->osdc, 1044 &ci->i_layout, vino, 1045 offset, &len, 0, 1046 min(num_ops, 1047 CEPH_OSD_SLAB_OPS), 1048 CEPH_OSD_OP_WRITE, 1049 CEPH_OSD_FLAG_WRITE, 1050 snapc, ceph_wbc.truncate_seq, 1051 ceph_wbc.truncate_size, true); 1052 BUG_ON(IS_ERR(req)); 1053 } 1054 BUG_ON(len < page_offset(pages[locked_pages - 1]) + 1055 PAGE_SIZE - offset); 1056 1057 req->r_callback = writepages_finish; 1058 req->r_inode = inode; 1059 1060 /* Format the osd request message and submit the write */ 1061 len = 0; 1062 data_pages = pages; 1063 op_idx = 0; 1064 for (i = 0; i < locked_pages; i++) { 1065 u64 cur_offset = page_offset(pages[i]); 1066 if (offset + len != cur_offset) { 1067 if (op_idx + 1 == req->r_num_ops) 1068 break; 1069 osd_req_op_extent_dup_last(req, op_idx, 1070 cur_offset - offset); 1071 dout("writepages got pages at %llu~%llu\n", 1072 offset, len); 1073 osd_req_op_extent_osd_data_pages(req, op_idx, 1074 data_pages, len, 0, 1075 !!pool, false); 1076 osd_req_op_extent_update(req, op_idx, len); 1077 1078 len = 0; 1079 offset = cur_offset; 1080 data_pages = pages + i; 1081 op_idx++; 1082 } 1083 1084 set_page_writeback(pages[i]); 1085 len += PAGE_SIZE; 1086 } 1087 1088 if (ceph_wbc.size_stable) { 1089 len = min(len, ceph_wbc.i_size - offset); 1090 } else if (i == locked_pages) { 1091 /* writepages_finish() clears writeback pages 1092 * according to the data length, so make sure 1093 * data length covers all locked pages */ 1094 u64 min_len = len + 1 - PAGE_SIZE; 1095 len = get_writepages_data_length(inode, pages[i - 1], 1096 offset); 1097 len = max(len, min_len); 1098 } 1099 dout("writepages got pages at %llu~%llu\n", offset, len); 1100 1101 osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len, 1102 0, !!pool, false); 1103 osd_req_op_extent_update(req, op_idx, len); 1104 1105 BUG_ON(op_idx + 1 != req->r_num_ops); 1106 1107 pool = NULL; 1108 if (i < locked_pages) { 1109 BUG_ON(num_ops <= req->r_num_ops); 1110 num_ops -= req->r_num_ops; 1111 locked_pages -= i; 1112 1113 /* allocate new pages array for next request */ 1114 data_pages = pages; 1115 pages = kmalloc(locked_pages * sizeof (*pages), 1116 GFP_NOFS); 1117 if (!pages) { 1118 pool = fsc->wb_pagevec_pool; 1119 pages = mempool_alloc(pool, GFP_NOFS); 1120 BUG_ON(!pages); 1121 } 1122 memcpy(pages, data_pages + i, 1123 locked_pages * sizeof(*pages)); 1124 memset(data_pages + i, 0, 1125 locked_pages * sizeof(*pages)); 1126 } else { 1127 BUG_ON(num_ops != req->r_num_ops); 1128 index = pages[i - 1]->index + 1; 1129 /* request message now owns the pages array */ 1130 pages = NULL; 1131 } 1132 1133 req->r_mtime = inode->i_mtime; 1134 rc = ceph_osdc_start_request(&fsc->client->osdc, req, true); 1135 BUG_ON(rc); 1136 req = NULL; 1137 1138 wbc->nr_to_write -= i; 1139 if (pages) 1140 goto new_request; 1141 1142 /* 1143 * We stop writing back only if we are not doing 1144 * integrity sync. In case of integrity sync we have to 1145 * keep going until we have written all the pages 1146 * we tagged for writeback prior to entering this loop. 1147 */ 1148 if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) 1149 done = stop = true; 1150 1151 release_pvec_pages: 1152 dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr, 1153 pvec.nr ? pvec.pages[0] : NULL); 1154 pagevec_release(&pvec); 1155 } 1156 1157 if (should_loop && !done) { 1158 /* more to do; loop back to beginning of file */ 1159 dout("writepages looping back to beginning of file\n"); 1160 end = start_index - 1; /* OK even when start_index == 0 */ 1161 1162 /* to write dirty pages associated with next snapc, 1163 * we need to wait until current writes complete */ 1164 if (wbc->sync_mode != WB_SYNC_NONE && 1165 start_index == 0 && /* all dirty pages were checked */ 1166 !ceph_wbc.head_snapc) { 1167 struct page *page; 1168 unsigned i, nr; 1169 index = 0; 1170 while ((index <= end) && 1171 (nr = pagevec_lookup_tag(&pvec, mapping, &index, 1172 PAGECACHE_TAG_WRITEBACK))) { 1173 for (i = 0; i < nr; i++) { 1174 page = pvec.pages[i]; 1175 if (page_snap_context(page) != snapc) 1176 continue; 1177 wait_on_page_writeback(page); 1178 } 1179 pagevec_release(&pvec); 1180 cond_resched(); 1181 } 1182 } 1183 1184 start_index = 0; 1185 index = 0; 1186 goto retry; 1187 } 1188 1189 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 1190 mapping->writeback_index = index; 1191 1192 out: 1193 ceph_osdc_put_request(req); 1194 ceph_put_snap_context(last_snapc); 1195 dout("writepages dend - startone, rc = %d\n", rc); 1196 return rc; 1197 } 1198 1199 1200 1201 /* 1202 * See if a given @snapc is either writeable, or already written. 1203 */ 1204 static int context_is_writeable_or_written(struct inode *inode, 1205 struct ceph_snap_context *snapc) 1206 { 1207 struct ceph_snap_context *oldest = get_oldest_context(inode, NULL, NULL); 1208 int ret = !oldest || snapc->seq <= oldest->seq; 1209 1210 ceph_put_snap_context(oldest); 1211 return ret; 1212 } 1213 1214 /* 1215 * We are only allowed to write into/dirty the page if the page is 1216 * clean, or already dirty within the same snap context. 1217 * 1218 * called with page locked. 1219 * return success with page locked, 1220 * or any failure (incl -EAGAIN) with page unlocked. 1221 */ 1222 static int ceph_update_writeable_page(struct file *file, 1223 loff_t pos, unsigned len, 1224 struct page *page) 1225 { 1226 struct inode *inode = file_inode(file); 1227 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1228 struct ceph_inode_info *ci = ceph_inode(inode); 1229 loff_t page_off = pos & PAGE_MASK; 1230 int pos_in_page = pos & ~PAGE_MASK; 1231 int end_in_page = pos_in_page + len; 1232 loff_t i_size; 1233 int r; 1234 struct ceph_snap_context *snapc, *oldest; 1235 1236 if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { 1237 dout(" page %p forced umount\n", page); 1238 unlock_page(page); 1239 return -EIO; 1240 } 1241 1242 retry_locked: 1243 /* writepages currently holds page lock, but if we change that later, */ 1244 wait_on_page_writeback(page); 1245 1246 snapc = page_snap_context(page); 1247 if (snapc && snapc != ci->i_head_snapc) { 1248 /* 1249 * this page is already dirty in another (older) snap 1250 * context! is it writeable now? 1251 */ 1252 oldest = get_oldest_context(inode, NULL, NULL); 1253 if (snapc->seq > oldest->seq) { 1254 ceph_put_snap_context(oldest); 1255 dout(" page %p snapc %p not current or oldest\n", 1256 page, snapc); 1257 /* 1258 * queue for writeback, and wait for snapc to 1259 * be writeable or written 1260 */ 1261 snapc = ceph_get_snap_context(snapc); 1262 unlock_page(page); 1263 ceph_queue_writeback(inode); 1264 r = wait_event_killable(ci->i_cap_wq, 1265 context_is_writeable_or_written(inode, snapc)); 1266 ceph_put_snap_context(snapc); 1267 if (r == -ERESTARTSYS) 1268 return r; 1269 return -EAGAIN; 1270 } 1271 ceph_put_snap_context(oldest); 1272 1273 /* yay, writeable, do it now (without dropping page lock) */ 1274 dout(" page %p snapc %p not current, but oldest\n", 1275 page, snapc); 1276 if (!clear_page_dirty_for_io(page)) 1277 goto retry_locked; 1278 r = writepage_nounlock(page, NULL); 1279 if (r < 0) 1280 goto fail_unlock; 1281 goto retry_locked; 1282 } 1283 1284 if (PageUptodate(page)) { 1285 dout(" page %p already uptodate\n", page); 1286 return 0; 1287 } 1288 1289 /* full page? */ 1290 if (pos_in_page == 0 && len == PAGE_SIZE) 1291 return 0; 1292 1293 /* past end of file? */ 1294 i_size = i_size_read(inode); 1295 1296 if (page_off >= i_size || 1297 (pos_in_page == 0 && (pos+len) >= i_size && 1298 end_in_page - pos_in_page != PAGE_SIZE)) { 1299 dout(" zeroing %p 0 - %d and %d - %d\n", 1300 page, pos_in_page, end_in_page, (int)PAGE_SIZE); 1301 zero_user_segments(page, 1302 0, pos_in_page, 1303 end_in_page, PAGE_SIZE); 1304 return 0; 1305 } 1306 1307 /* we need to read it. */ 1308 r = ceph_do_readpage(file, page); 1309 if (r < 0) { 1310 if (r == -EINPROGRESS) 1311 return -EAGAIN; 1312 goto fail_unlock; 1313 } 1314 goto retry_locked; 1315 fail_unlock: 1316 unlock_page(page); 1317 return r; 1318 } 1319 1320 /* 1321 * We are only allowed to write into/dirty the page if the page is 1322 * clean, or already dirty within the same snap context. 1323 */ 1324 static int ceph_write_begin(struct file *file, struct address_space *mapping, 1325 loff_t pos, unsigned len, unsigned flags, 1326 struct page **pagep, void **fsdata) 1327 { 1328 struct inode *inode = file_inode(file); 1329 struct page *page; 1330 pgoff_t index = pos >> PAGE_SHIFT; 1331 int r; 1332 1333 do { 1334 /* get a page */ 1335 page = grab_cache_page_write_begin(mapping, index, 0); 1336 if (!page) 1337 return -ENOMEM; 1338 1339 dout("write_begin file %p inode %p page %p %d~%d\n", file, 1340 inode, page, (int)pos, (int)len); 1341 1342 r = ceph_update_writeable_page(file, pos, len, page); 1343 if (r < 0) 1344 put_page(page); 1345 else 1346 *pagep = page; 1347 } while (r == -EAGAIN); 1348 1349 return r; 1350 } 1351 1352 /* 1353 * we don't do anything in here that simple_write_end doesn't do 1354 * except adjust dirty page accounting 1355 */ 1356 static int ceph_write_end(struct file *file, struct address_space *mapping, 1357 loff_t pos, unsigned len, unsigned copied, 1358 struct page *page, void *fsdata) 1359 { 1360 struct inode *inode = file_inode(file); 1361 bool check_cap = false; 1362 1363 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file, 1364 inode, page, (int)pos, (int)copied, (int)len); 1365 1366 /* zero the stale part of the page if we did a short copy */ 1367 if (!PageUptodate(page)) { 1368 if (copied < len) { 1369 copied = 0; 1370 goto out; 1371 } 1372 SetPageUptodate(page); 1373 } 1374 1375 /* did file size increase? */ 1376 if (pos+copied > i_size_read(inode)) 1377 check_cap = ceph_inode_set_size(inode, pos+copied); 1378 1379 set_page_dirty(page); 1380 1381 out: 1382 unlock_page(page); 1383 put_page(page); 1384 1385 if (check_cap) 1386 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); 1387 1388 return copied; 1389 } 1390 1391 /* 1392 * we set .direct_IO to indicate direct io is supported, but since we 1393 * intercept O_DIRECT reads and writes early, this function should 1394 * never get called. 1395 */ 1396 static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter) 1397 { 1398 WARN_ON(1); 1399 return -EINVAL; 1400 } 1401 1402 const struct address_space_operations ceph_aops = { 1403 .readpage = ceph_readpage, 1404 .readpages = ceph_readpages, 1405 .writepage = ceph_writepage, 1406 .writepages = ceph_writepages_start, 1407 .write_begin = ceph_write_begin, 1408 .write_end = ceph_write_end, 1409 .set_page_dirty = ceph_set_page_dirty, 1410 .invalidatepage = ceph_invalidatepage, 1411 .releasepage = ceph_releasepage, 1412 .direct_IO = ceph_direct_io, 1413 }; 1414 1415 static void ceph_block_sigs(sigset_t *oldset) 1416 { 1417 sigset_t mask; 1418 siginitsetinv(&mask, sigmask(SIGKILL)); 1419 sigprocmask(SIG_BLOCK, &mask, oldset); 1420 } 1421 1422 static void ceph_restore_sigs(sigset_t *oldset) 1423 { 1424 sigprocmask(SIG_SETMASK, oldset, NULL); 1425 } 1426 1427 /* 1428 * vm ops 1429 */ 1430 static int ceph_filemap_fault(struct vm_fault *vmf) 1431 { 1432 struct vm_area_struct *vma = vmf->vma; 1433 struct inode *inode = file_inode(vma->vm_file); 1434 struct ceph_inode_info *ci = ceph_inode(inode); 1435 struct ceph_file_info *fi = vma->vm_file->private_data; 1436 struct page *pinned_page = NULL; 1437 loff_t off = vmf->pgoff << PAGE_SHIFT; 1438 int want, got, ret; 1439 sigset_t oldset; 1440 1441 ceph_block_sigs(&oldset); 1442 1443 dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n", 1444 inode, ceph_vinop(inode), off, (size_t)PAGE_SIZE); 1445 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1446 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 1447 else 1448 want = CEPH_CAP_FILE_CACHE; 1449 1450 got = 0; 1451 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page); 1452 if (ret < 0) 1453 goto out_restore; 1454 1455 dout("filemap_fault %p %llu~%zd got cap refs on %s\n", 1456 inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got)); 1457 1458 if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) || 1459 ci->i_inline_version == CEPH_INLINE_NONE) { 1460 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got); 1461 ceph_add_rw_context(fi, &rw_ctx); 1462 ret = filemap_fault(vmf); 1463 ceph_del_rw_context(fi, &rw_ctx); 1464 } else 1465 ret = -EAGAIN; 1466 1467 dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n", 1468 inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got), ret); 1469 if (pinned_page) 1470 put_page(pinned_page); 1471 ceph_put_cap_refs(ci, got); 1472 1473 if (ret != -EAGAIN) 1474 goto out_restore; 1475 1476 /* read inline data */ 1477 if (off >= PAGE_SIZE) { 1478 /* does not support inline data > PAGE_SIZE */ 1479 ret = VM_FAULT_SIGBUS; 1480 } else { 1481 int ret1; 1482 struct address_space *mapping = inode->i_mapping; 1483 struct page *page = find_or_create_page(mapping, 0, 1484 mapping_gfp_constraint(mapping, 1485 ~__GFP_FS)); 1486 if (!page) { 1487 ret = VM_FAULT_OOM; 1488 goto out_inline; 1489 } 1490 ret1 = __ceph_do_getattr(inode, page, 1491 CEPH_STAT_CAP_INLINE_DATA, true); 1492 if (ret1 < 0 || off >= i_size_read(inode)) { 1493 unlock_page(page); 1494 put_page(page); 1495 if (ret1 < 0) 1496 ret = ret1; 1497 else 1498 ret = VM_FAULT_SIGBUS; 1499 goto out_inline; 1500 } 1501 if (ret1 < PAGE_SIZE) 1502 zero_user_segment(page, ret1, PAGE_SIZE); 1503 else 1504 flush_dcache_page(page); 1505 SetPageUptodate(page); 1506 vmf->page = page; 1507 ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED; 1508 out_inline: 1509 dout("filemap_fault %p %llu~%zd read inline data ret %d\n", 1510 inode, off, (size_t)PAGE_SIZE, ret); 1511 } 1512 out_restore: 1513 ceph_restore_sigs(&oldset); 1514 if (ret < 0) 1515 ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; 1516 1517 return ret; 1518 } 1519 1520 /* 1521 * Reuse write_begin here for simplicity. 1522 */ 1523 static int ceph_page_mkwrite(struct vm_fault *vmf) 1524 { 1525 struct vm_area_struct *vma = vmf->vma; 1526 struct inode *inode = file_inode(vma->vm_file); 1527 struct ceph_inode_info *ci = ceph_inode(inode); 1528 struct ceph_file_info *fi = vma->vm_file->private_data; 1529 struct ceph_cap_flush *prealloc_cf; 1530 struct page *page = vmf->page; 1531 loff_t off = page_offset(page); 1532 loff_t size = i_size_read(inode); 1533 size_t len; 1534 int want, got, ret; 1535 sigset_t oldset; 1536 1537 prealloc_cf = ceph_alloc_cap_flush(); 1538 if (!prealloc_cf) 1539 return VM_FAULT_OOM; 1540 1541 ceph_block_sigs(&oldset); 1542 1543 if (ci->i_inline_version != CEPH_INLINE_NONE) { 1544 struct page *locked_page = NULL; 1545 if (off == 0) { 1546 lock_page(page); 1547 locked_page = page; 1548 } 1549 ret = ceph_uninline_data(vma->vm_file, locked_page); 1550 if (locked_page) 1551 unlock_page(locked_page); 1552 if (ret < 0) 1553 goto out_free; 1554 } 1555 1556 if (off + PAGE_SIZE <= size) 1557 len = PAGE_SIZE; 1558 else 1559 len = size & ~PAGE_MASK; 1560 1561 dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n", 1562 inode, ceph_vinop(inode), off, len, size); 1563 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1564 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1565 else 1566 want = CEPH_CAP_FILE_BUFFER; 1567 1568 got = 0; 1569 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, off + len, 1570 &got, NULL); 1571 if (ret < 0) 1572 goto out_free; 1573 1574 dout("page_mkwrite %p %llu~%zd got cap refs on %s\n", 1575 inode, off, len, ceph_cap_string(got)); 1576 1577 /* Update time before taking page lock */ 1578 file_update_time(vma->vm_file); 1579 1580 do { 1581 lock_page(page); 1582 1583 if ((off > size) || (page->mapping != inode->i_mapping)) { 1584 unlock_page(page); 1585 ret = VM_FAULT_NOPAGE; 1586 break; 1587 } 1588 1589 ret = ceph_update_writeable_page(vma->vm_file, off, len, page); 1590 if (ret >= 0) { 1591 /* success. we'll keep the page locked. */ 1592 set_page_dirty(page); 1593 ret = VM_FAULT_LOCKED; 1594 } 1595 } while (ret == -EAGAIN); 1596 1597 if (ret == VM_FAULT_LOCKED || 1598 ci->i_inline_version != CEPH_INLINE_NONE) { 1599 int dirty; 1600 spin_lock(&ci->i_ceph_lock); 1601 ci->i_inline_version = CEPH_INLINE_NONE; 1602 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1603 &prealloc_cf); 1604 spin_unlock(&ci->i_ceph_lock); 1605 if (dirty) 1606 __mark_inode_dirty(inode, dirty); 1607 } 1608 1609 dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %d\n", 1610 inode, off, len, ceph_cap_string(got), ret); 1611 ceph_put_cap_refs(ci, got); 1612 out_free: 1613 ceph_restore_sigs(&oldset); 1614 ceph_free_cap_flush(prealloc_cf); 1615 if (ret < 0) 1616 ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; 1617 return ret; 1618 } 1619 1620 void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, 1621 char *data, size_t len) 1622 { 1623 struct address_space *mapping = inode->i_mapping; 1624 struct page *page; 1625 1626 if (locked_page) { 1627 page = locked_page; 1628 } else { 1629 if (i_size_read(inode) == 0) 1630 return; 1631 page = find_or_create_page(mapping, 0, 1632 mapping_gfp_constraint(mapping, 1633 ~__GFP_FS)); 1634 if (!page) 1635 return; 1636 if (PageUptodate(page)) { 1637 unlock_page(page); 1638 put_page(page); 1639 return; 1640 } 1641 } 1642 1643 dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n", 1644 inode, ceph_vinop(inode), len, locked_page); 1645 1646 if (len > 0) { 1647 void *kaddr = kmap_atomic(page); 1648 memcpy(kaddr, data, len); 1649 kunmap_atomic(kaddr); 1650 } 1651 1652 if (page != locked_page) { 1653 if (len < PAGE_SIZE) 1654 zero_user_segment(page, len, PAGE_SIZE); 1655 else 1656 flush_dcache_page(page); 1657 1658 SetPageUptodate(page); 1659 unlock_page(page); 1660 put_page(page); 1661 } 1662 } 1663 1664 int ceph_uninline_data(struct file *filp, struct page *locked_page) 1665 { 1666 struct inode *inode = file_inode(filp); 1667 struct ceph_inode_info *ci = ceph_inode(inode); 1668 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1669 struct ceph_osd_request *req; 1670 struct page *page = NULL; 1671 u64 len, inline_version; 1672 int err = 0; 1673 bool from_pagecache = false; 1674 1675 spin_lock(&ci->i_ceph_lock); 1676 inline_version = ci->i_inline_version; 1677 spin_unlock(&ci->i_ceph_lock); 1678 1679 dout("uninline_data %p %llx.%llx inline_version %llu\n", 1680 inode, ceph_vinop(inode), inline_version); 1681 1682 if (inline_version == 1 || /* initial version, no data */ 1683 inline_version == CEPH_INLINE_NONE) 1684 goto out; 1685 1686 if (locked_page) { 1687 page = locked_page; 1688 WARN_ON(!PageUptodate(page)); 1689 } else if (ceph_caps_issued(ci) & 1690 (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) { 1691 page = find_get_page(inode->i_mapping, 0); 1692 if (page) { 1693 if (PageUptodate(page)) { 1694 from_pagecache = true; 1695 lock_page(page); 1696 } else { 1697 put_page(page); 1698 page = NULL; 1699 } 1700 } 1701 } 1702 1703 if (page) { 1704 len = i_size_read(inode); 1705 if (len > PAGE_SIZE) 1706 len = PAGE_SIZE; 1707 } else { 1708 page = __page_cache_alloc(GFP_NOFS); 1709 if (!page) { 1710 err = -ENOMEM; 1711 goto out; 1712 } 1713 err = __ceph_do_getattr(inode, page, 1714 CEPH_STAT_CAP_INLINE_DATA, true); 1715 if (err < 0) { 1716 /* no inline data */ 1717 if (err == -ENODATA) 1718 err = 0; 1719 goto out; 1720 } 1721 len = err; 1722 } 1723 1724 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1725 ceph_vino(inode), 0, &len, 0, 1, 1726 CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_WRITE, 1727 NULL, 0, 0, false); 1728 if (IS_ERR(req)) { 1729 err = PTR_ERR(req); 1730 goto out; 1731 } 1732 1733 req->r_mtime = inode->i_mtime; 1734 err = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1735 if (!err) 1736 err = ceph_osdc_wait_request(&fsc->client->osdc, req); 1737 ceph_osdc_put_request(req); 1738 if (err < 0) 1739 goto out; 1740 1741 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1742 ceph_vino(inode), 0, &len, 1, 3, 1743 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, 1744 NULL, ci->i_truncate_seq, 1745 ci->i_truncate_size, false); 1746 if (IS_ERR(req)) { 1747 err = PTR_ERR(req); 1748 goto out; 1749 } 1750 1751 osd_req_op_extent_osd_data_pages(req, 1, &page, len, 0, false, false); 1752 1753 { 1754 __le64 xattr_buf = cpu_to_le64(inline_version); 1755 err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR, 1756 "inline_version", &xattr_buf, 1757 sizeof(xattr_buf), 1758 CEPH_OSD_CMPXATTR_OP_GT, 1759 CEPH_OSD_CMPXATTR_MODE_U64); 1760 if (err) 1761 goto out_put; 1762 } 1763 1764 { 1765 char xattr_buf[32]; 1766 int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf), 1767 "%llu", inline_version); 1768 err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR, 1769 "inline_version", 1770 xattr_buf, xattr_len, 0, 0); 1771 if (err) 1772 goto out_put; 1773 } 1774 1775 req->r_mtime = inode->i_mtime; 1776 err = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1777 if (!err) 1778 err = ceph_osdc_wait_request(&fsc->client->osdc, req); 1779 out_put: 1780 ceph_osdc_put_request(req); 1781 if (err == -ECANCELED) 1782 err = 0; 1783 out: 1784 if (page && page != locked_page) { 1785 if (from_pagecache) { 1786 unlock_page(page); 1787 put_page(page); 1788 } else 1789 __free_pages(page, 0); 1790 } 1791 1792 dout("uninline_data %p %llx.%llx inline_version %llu = %d\n", 1793 inode, ceph_vinop(inode), inline_version, err); 1794 return err; 1795 } 1796 1797 static const struct vm_operations_struct ceph_vmops = { 1798 .fault = ceph_filemap_fault, 1799 .page_mkwrite = ceph_page_mkwrite, 1800 }; 1801 1802 int ceph_mmap(struct file *file, struct vm_area_struct *vma) 1803 { 1804 struct address_space *mapping = file->f_mapping; 1805 1806 if (!mapping->a_ops->readpage) 1807 return -ENOEXEC; 1808 file_accessed(file); 1809 vma->vm_ops = &ceph_vmops; 1810 return 0; 1811 } 1812 1813 enum { 1814 POOL_READ = 1, 1815 POOL_WRITE = 2, 1816 }; 1817 1818 static int __ceph_pool_perm_get(struct ceph_inode_info *ci, 1819 s64 pool, struct ceph_string *pool_ns) 1820 { 1821 struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode); 1822 struct ceph_mds_client *mdsc = fsc->mdsc; 1823 struct ceph_osd_request *rd_req = NULL, *wr_req = NULL; 1824 struct rb_node **p, *parent; 1825 struct ceph_pool_perm *perm; 1826 struct page **pages; 1827 size_t pool_ns_len; 1828 int err = 0, err2 = 0, have = 0; 1829 1830 down_read(&mdsc->pool_perm_rwsem); 1831 p = &mdsc->pool_perm_tree.rb_node; 1832 while (*p) { 1833 perm = rb_entry(*p, struct ceph_pool_perm, node); 1834 if (pool < perm->pool) 1835 p = &(*p)->rb_left; 1836 else if (pool > perm->pool) 1837 p = &(*p)->rb_right; 1838 else { 1839 int ret = ceph_compare_string(pool_ns, 1840 perm->pool_ns, 1841 perm->pool_ns_len); 1842 if (ret < 0) 1843 p = &(*p)->rb_left; 1844 else if (ret > 0) 1845 p = &(*p)->rb_right; 1846 else { 1847 have = perm->perm; 1848 break; 1849 } 1850 } 1851 } 1852 up_read(&mdsc->pool_perm_rwsem); 1853 if (*p) 1854 goto out; 1855 1856 if (pool_ns) 1857 dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n", 1858 pool, (int)pool_ns->len, pool_ns->str); 1859 else 1860 dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool); 1861 1862 down_write(&mdsc->pool_perm_rwsem); 1863 p = &mdsc->pool_perm_tree.rb_node; 1864 parent = NULL; 1865 while (*p) { 1866 parent = *p; 1867 perm = rb_entry(parent, struct ceph_pool_perm, node); 1868 if (pool < perm->pool) 1869 p = &(*p)->rb_left; 1870 else if (pool > perm->pool) 1871 p = &(*p)->rb_right; 1872 else { 1873 int ret = ceph_compare_string(pool_ns, 1874 perm->pool_ns, 1875 perm->pool_ns_len); 1876 if (ret < 0) 1877 p = &(*p)->rb_left; 1878 else if (ret > 0) 1879 p = &(*p)->rb_right; 1880 else { 1881 have = perm->perm; 1882 break; 1883 } 1884 } 1885 } 1886 if (*p) { 1887 up_write(&mdsc->pool_perm_rwsem); 1888 goto out; 1889 } 1890 1891 rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 1892 1, false, GFP_NOFS); 1893 if (!rd_req) { 1894 err = -ENOMEM; 1895 goto out_unlock; 1896 } 1897 1898 rd_req->r_flags = CEPH_OSD_FLAG_READ; 1899 osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0); 1900 rd_req->r_base_oloc.pool = pool; 1901 if (pool_ns) 1902 rd_req->r_base_oloc.pool_ns = ceph_get_string(pool_ns); 1903 ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino); 1904 1905 err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS); 1906 if (err) 1907 goto out_unlock; 1908 1909 wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 1910 1, false, GFP_NOFS); 1911 if (!wr_req) { 1912 err = -ENOMEM; 1913 goto out_unlock; 1914 } 1915 1916 wr_req->r_flags = CEPH_OSD_FLAG_WRITE; 1917 osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL); 1918 ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc); 1919 ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid); 1920 1921 err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS); 1922 if (err) 1923 goto out_unlock; 1924 1925 /* one page should be large enough for STAT data */ 1926 pages = ceph_alloc_page_vector(1, GFP_KERNEL); 1927 if (IS_ERR(pages)) { 1928 err = PTR_ERR(pages); 1929 goto out_unlock; 1930 } 1931 1932 osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE, 1933 0, false, true); 1934 err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false); 1935 1936 wr_req->r_mtime = ci->vfs_inode.i_mtime; 1937 wr_req->r_abort_on_full = true; 1938 err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false); 1939 1940 if (!err) 1941 err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req); 1942 if (!err2) 1943 err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req); 1944 1945 if (err >= 0 || err == -ENOENT) 1946 have |= POOL_READ; 1947 else if (err != -EPERM) 1948 goto out_unlock; 1949 1950 if (err2 == 0 || err2 == -EEXIST) 1951 have |= POOL_WRITE; 1952 else if (err2 != -EPERM) { 1953 err = err2; 1954 goto out_unlock; 1955 } 1956 1957 pool_ns_len = pool_ns ? pool_ns->len : 0; 1958 perm = kmalloc(sizeof(*perm) + pool_ns_len + 1, GFP_NOFS); 1959 if (!perm) { 1960 err = -ENOMEM; 1961 goto out_unlock; 1962 } 1963 1964 perm->pool = pool; 1965 perm->perm = have; 1966 perm->pool_ns_len = pool_ns_len; 1967 if (pool_ns_len > 0) 1968 memcpy(perm->pool_ns, pool_ns->str, pool_ns_len); 1969 perm->pool_ns[pool_ns_len] = 0; 1970 1971 rb_link_node(&perm->node, parent, p); 1972 rb_insert_color(&perm->node, &mdsc->pool_perm_tree); 1973 err = 0; 1974 out_unlock: 1975 up_write(&mdsc->pool_perm_rwsem); 1976 1977 ceph_osdc_put_request(rd_req); 1978 ceph_osdc_put_request(wr_req); 1979 out: 1980 if (!err) 1981 err = have; 1982 if (pool_ns) 1983 dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n", 1984 pool, (int)pool_ns->len, pool_ns->str, err); 1985 else 1986 dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err); 1987 return err; 1988 } 1989 1990 int ceph_pool_perm_check(struct ceph_inode_info *ci, int need) 1991 { 1992 s64 pool; 1993 struct ceph_string *pool_ns; 1994 int ret, flags; 1995 1996 if (ci->i_vino.snap != CEPH_NOSNAP) { 1997 /* 1998 * Pool permission check needs to write to the first object. 1999 * But for snapshot, head of the first object may have alread 2000 * been deleted. Skip check to avoid creating orphan object. 2001 */ 2002 return 0; 2003 } 2004 2005 if (ceph_test_mount_opt(ceph_inode_to_client(&ci->vfs_inode), 2006 NOPOOLPERM)) 2007 return 0; 2008 2009 spin_lock(&ci->i_ceph_lock); 2010 flags = ci->i_ceph_flags; 2011 pool = ci->i_layout.pool_id; 2012 spin_unlock(&ci->i_ceph_lock); 2013 check: 2014 if (flags & CEPH_I_POOL_PERM) { 2015 if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) { 2016 dout("ceph_pool_perm_check pool %lld no read perm\n", 2017 pool); 2018 return -EPERM; 2019 } 2020 if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) { 2021 dout("ceph_pool_perm_check pool %lld no write perm\n", 2022 pool); 2023 return -EPERM; 2024 } 2025 return 0; 2026 } 2027 2028 pool_ns = ceph_try_get_string(ci->i_layout.pool_ns); 2029 ret = __ceph_pool_perm_get(ci, pool, pool_ns); 2030 ceph_put_string(pool_ns); 2031 if (ret < 0) 2032 return ret; 2033 2034 flags = CEPH_I_POOL_PERM; 2035 if (ret & POOL_READ) 2036 flags |= CEPH_I_POOL_RD; 2037 if (ret & POOL_WRITE) 2038 flags |= CEPH_I_POOL_WR; 2039 2040 spin_lock(&ci->i_ceph_lock); 2041 if (pool == ci->i_layout.pool_id && 2042 pool_ns == rcu_dereference_raw(ci->i_layout.pool_ns)) { 2043 ci->i_ceph_flags |= flags; 2044 } else { 2045 pool = ci->i_layout.pool_id; 2046 flags = ci->i_ceph_flags; 2047 } 2048 spin_unlock(&ci->i_ceph_lock); 2049 goto check; 2050 } 2051 2052 void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc) 2053 { 2054 struct ceph_pool_perm *perm; 2055 struct rb_node *n; 2056 2057 while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) { 2058 n = rb_first(&mdsc->pool_perm_tree); 2059 perm = rb_entry(n, struct ceph_pool_perm, node); 2060 rb_erase(n, &mdsc->pool_perm_tree); 2061 kfree(perm); 2062 } 2063 } 2064