1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/backing-dev.h> 4 #include <linux/fs.h> 5 #include <linux/mm.h> 6 #include <linux/pagemap.h> 7 #include <linux/writeback.h> /* generic_writepages */ 8 #include <linux/slab.h> 9 #include <linux/pagevec.h> 10 #include <linux/task_io_accounting_ops.h> 11 #include <linux/signal.h> 12 13 #include "super.h" 14 #include "mds_client.h" 15 #include "cache.h" 16 #include <linux/ceph/osd_client.h> 17 18 /* 19 * Ceph address space ops. 20 * 21 * There are a few funny things going on here. 22 * 23 * The page->private field is used to reference a struct 24 * ceph_snap_context for _every_ dirty page. This indicates which 25 * snapshot the page was logically dirtied in, and thus which snap 26 * context needs to be associated with the osd write during writeback. 27 * 28 * Similarly, struct ceph_inode_info maintains a set of counters to 29 * count dirty pages on the inode. In the absence of snapshots, 30 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count. 31 * 32 * When a snapshot is taken (that is, when the client receives 33 * notification that a snapshot was taken), each inode with caps and 34 * with dirty pages (dirty pages implies there is a cap) gets a new 35 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending 36 * order, new snaps go to the tail). The i_wrbuffer_ref_head count is 37 * moved to capsnap->dirty. (Unless a sync write is currently in 38 * progress. In that case, the capsnap is said to be "pending", new 39 * writes cannot start, and the capsnap isn't "finalized" until the 40 * write completes (or fails) and a final size/mtime for the inode for 41 * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0. 42 * 43 * On writeback, we must submit writes to the osd IN SNAP ORDER. So, 44 * we look for the first capsnap in i_cap_snaps and write out pages in 45 * that snap context _only_. Then we move on to the next capsnap, 46 * eventually reaching the "live" or "head" context (i.e., pages that 47 * are not yet snapped) and are writing the most recently dirtied 48 * pages. 49 * 50 * Invalidate and so forth must take care to ensure the dirty page 51 * accounting is preserved. 52 */ 53 54 #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10)) 55 #define CONGESTION_OFF_THRESH(congestion_kb) \ 56 (CONGESTION_ON_THRESH(congestion_kb) - \ 57 (CONGESTION_ON_THRESH(congestion_kb) >> 2)) 58 59 static inline struct ceph_snap_context *page_snap_context(struct page *page) 60 { 61 if (PagePrivate(page)) 62 return (void *)page->private; 63 return NULL; 64 } 65 66 /* 67 * Dirty a page. Optimistically adjust accounting, on the assumption 68 * that we won't race with invalidate. If we do, readjust. 69 */ 70 static int ceph_set_page_dirty(struct page *page) 71 { 72 struct address_space *mapping = page->mapping; 73 struct inode *inode; 74 struct ceph_inode_info *ci; 75 struct ceph_snap_context *snapc; 76 int ret; 77 78 if (unlikely(!mapping)) 79 return !TestSetPageDirty(page); 80 81 if (PageDirty(page)) { 82 dout("%p set_page_dirty %p idx %lu -- already dirty\n", 83 mapping->host, page, page->index); 84 BUG_ON(!PagePrivate(page)); 85 return 0; 86 } 87 88 inode = mapping->host; 89 ci = ceph_inode(inode); 90 91 /* dirty the head */ 92 spin_lock(&ci->i_ceph_lock); 93 BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference 94 if (__ceph_have_pending_cap_snap(ci)) { 95 struct ceph_cap_snap *capsnap = 96 list_last_entry(&ci->i_cap_snaps, 97 struct ceph_cap_snap, 98 ci_item); 99 snapc = ceph_get_snap_context(capsnap->context); 100 capsnap->dirty_pages++; 101 } else { 102 BUG_ON(!ci->i_head_snapc); 103 snapc = ceph_get_snap_context(ci->i_head_snapc); 104 ++ci->i_wrbuffer_ref_head; 105 } 106 if (ci->i_wrbuffer_ref == 0) 107 ihold(inode); 108 ++ci->i_wrbuffer_ref; 109 dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d " 110 "snapc %p seq %lld (%d snaps)\n", 111 mapping->host, page, page->index, 112 ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1, 113 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, 114 snapc, snapc->seq, snapc->num_snaps); 115 spin_unlock(&ci->i_ceph_lock); 116 117 /* 118 * Reference snap context in page->private. Also set 119 * PagePrivate so that we get invalidatepage callback. 120 */ 121 BUG_ON(PagePrivate(page)); 122 page->private = (unsigned long)snapc; 123 SetPagePrivate(page); 124 125 ret = __set_page_dirty_nobuffers(page); 126 WARN_ON(!PageLocked(page)); 127 WARN_ON(!page->mapping); 128 129 return ret; 130 } 131 132 /* 133 * If we are truncating the full page (i.e. offset == 0), adjust the 134 * dirty page counters appropriately. Only called if there is private 135 * data on the page. 136 */ 137 static void ceph_invalidatepage(struct page *page, unsigned int offset, 138 unsigned int length) 139 { 140 struct inode *inode; 141 struct ceph_inode_info *ci; 142 struct ceph_snap_context *snapc = page_snap_context(page); 143 144 inode = page->mapping->host; 145 ci = ceph_inode(inode); 146 147 if (offset != 0 || length != PAGE_SIZE) { 148 dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n", 149 inode, page, page->index, offset, length); 150 return; 151 } 152 153 ceph_invalidate_fscache_page(inode, page); 154 155 WARN_ON(!PageLocked(page)); 156 if (!PagePrivate(page)) 157 return; 158 159 ClearPageChecked(page); 160 161 dout("%p invalidatepage %p idx %lu full dirty page\n", 162 inode, page, page->index); 163 164 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 165 ceph_put_snap_context(snapc); 166 page->private = 0; 167 ClearPagePrivate(page); 168 } 169 170 static int ceph_releasepage(struct page *page, gfp_t g) 171 { 172 dout("%p releasepage %p idx %lu (%sdirty)\n", page->mapping->host, 173 page, page->index, PageDirty(page) ? "" : "not "); 174 175 /* Can we release the page from the cache? */ 176 if (!ceph_release_fscache_page(page, g)) 177 return 0; 178 179 return !PagePrivate(page); 180 } 181 182 /* 183 * read a single page, without unlocking it. 184 */ 185 static int ceph_do_readpage(struct file *filp, struct page *page) 186 { 187 struct inode *inode = file_inode(filp); 188 struct ceph_inode_info *ci = ceph_inode(inode); 189 struct ceph_osd_client *osdc = 190 &ceph_inode_to_client(inode)->client->osdc; 191 int err = 0; 192 u64 off = page_offset(page); 193 u64 len = PAGE_SIZE; 194 195 if (off >= i_size_read(inode)) { 196 zero_user_segment(page, 0, PAGE_SIZE); 197 SetPageUptodate(page); 198 return 0; 199 } 200 201 if (ci->i_inline_version != CEPH_INLINE_NONE) { 202 /* 203 * Uptodate inline data should have been added 204 * into page cache while getting Fcr caps. 205 */ 206 if (off == 0) 207 return -EINVAL; 208 zero_user_segment(page, 0, PAGE_SIZE); 209 SetPageUptodate(page); 210 return 0; 211 } 212 213 err = ceph_readpage_from_fscache(inode, page); 214 if (err == 0) 215 return -EINPROGRESS; 216 217 dout("readpage inode %p file %p page %p index %lu\n", 218 inode, filp, page, page->index); 219 err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout, 220 off, &len, 221 ci->i_truncate_seq, ci->i_truncate_size, 222 &page, 1, 0); 223 if (err == -ENOENT) 224 err = 0; 225 if (err < 0) { 226 SetPageError(page); 227 ceph_fscache_readpage_cancel(inode, page); 228 goto out; 229 } 230 if (err < PAGE_SIZE) 231 /* zero fill remainder of page */ 232 zero_user_segment(page, err, PAGE_SIZE); 233 else 234 flush_dcache_page(page); 235 236 SetPageUptodate(page); 237 ceph_readpage_to_fscache(inode, page); 238 239 out: 240 return err < 0 ? err : 0; 241 } 242 243 static int ceph_readpage(struct file *filp, struct page *page) 244 { 245 int r = ceph_do_readpage(filp, page); 246 if (r != -EINPROGRESS) 247 unlock_page(page); 248 else 249 r = 0; 250 return r; 251 } 252 253 /* 254 * Finish an async read(ahead) op. 255 */ 256 static void finish_read(struct ceph_osd_request *req) 257 { 258 struct inode *inode = req->r_inode; 259 struct ceph_osd_data *osd_data; 260 int rc = req->r_result <= 0 ? req->r_result : 0; 261 int bytes = req->r_result >= 0 ? req->r_result : 0; 262 int num_pages; 263 int i; 264 265 dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes); 266 267 /* unlock all pages, zeroing any data we didn't read */ 268 osd_data = osd_req_op_extent_osd_data(req, 0); 269 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 270 num_pages = calc_pages_for((u64)osd_data->alignment, 271 (u64)osd_data->length); 272 for (i = 0; i < num_pages; i++) { 273 struct page *page = osd_data->pages[i]; 274 275 if (rc < 0 && rc != -ENOENT) { 276 ceph_fscache_readpage_cancel(inode, page); 277 goto unlock; 278 } 279 if (bytes < (int)PAGE_SIZE) { 280 /* zero (remainder of) page */ 281 int s = bytes < 0 ? 0 : bytes; 282 zero_user_segment(page, s, PAGE_SIZE); 283 } 284 dout("finish_read %p uptodate %p idx %lu\n", inode, page, 285 page->index); 286 flush_dcache_page(page); 287 SetPageUptodate(page); 288 ceph_readpage_to_fscache(inode, page); 289 unlock: 290 unlock_page(page); 291 put_page(page); 292 bytes -= PAGE_SIZE; 293 } 294 kfree(osd_data->pages); 295 } 296 297 /* 298 * start an async read(ahead) operation. return nr_pages we submitted 299 * a read for on success, or negative error code. 300 */ 301 static int start_read(struct inode *inode, struct list_head *page_list, int max) 302 { 303 struct ceph_osd_client *osdc = 304 &ceph_inode_to_client(inode)->client->osdc; 305 struct ceph_inode_info *ci = ceph_inode(inode); 306 struct page *page = list_entry(page_list->prev, struct page, lru); 307 struct ceph_vino vino; 308 struct ceph_osd_request *req; 309 u64 off; 310 u64 len; 311 int i; 312 struct page **pages; 313 pgoff_t next_index; 314 int nr_pages = 0; 315 int got = 0; 316 int ret = 0; 317 318 if (!current->journal_info) { 319 /* caller of readpages does not hold buffer and read caps 320 * (fadvise, madvise and readahead cases) */ 321 int want = CEPH_CAP_FILE_CACHE; 322 ret = ceph_try_get_caps(ci, CEPH_CAP_FILE_RD, want, &got); 323 if (ret < 0) { 324 dout("start_read %p, error getting cap\n", inode); 325 } else if (!(got & want)) { 326 dout("start_read %p, no cache cap\n", inode); 327 ret = 0; 328 } 329 if (ret <= 0) { 330 if (got) 331 ceph_put_cap_refs(ci, got); 332 while (!list_empty(page_list)) { 333 page = list_entry(page_list->prev, 334 struct page, lru); 335 list_del(&page->lru); 336 put_page(page); 337 } 338 return ret; 339 } 340 } 341 342 off = (u64) page_offset(page); 343 344 /* count pages */ 345 next_index = page->index; 346 list_for_each_entry_reverse(page, page_list, lru) { 347 if (page->index != next_index) 348 break; 349 nr_pages++; 350 next_index++; 351 if (max && nr_pages == max) 352 break; 353 } 354 len = nr_pages << PAGE_SHIFT; 355 dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages, 356 off, len); 357 vino = ceph_vino(inode); 358 req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len, 359 0, 1, CEPH_OSD_OP_READ, 360 CEPH_OSD_FLAG_READ, NULL, 361 ci->i_truncate_seq, ci->i_truncate_size, 362 false); 363 if (IS_ERR(req)) { 364 ret = PTR_ERR(req); 365 goto out; 366 } 367 368 /* build page vector */ 369 nr_pages = calc_pages_for(0, len); 370 pages = kmalloc(sizeof(*pages) * nr_pages, GFP_KERNEL); 371 if (!pages) { 372 ret = -ENOMEM; 373 goto out_put; 374 } 375 for (i = 0; i < nr_pages; ++i) { 376 page = list_entry(page_list->prev, struct page, lru); 377 BUG_ON(PageLocked(page)); 378 list_del(&page->lru); 379 380 dout("start_read %p adding %p idx %lu\n", inode, page, 381 page->index); 382 if (add_to_page_cache_lru(page, &inode->i_data, page->index, 383 GFP_KERNEL)) { 384 ceph_fscache_uncache_page(inode, page); 385 put_page(page); 386 dout("start_read %p add_to_page_cache failed %p\n", 387 inode, page); 388 nr_pages = i; 389 if (nr_pages > 0) { 390 len = nr_pages << PAGE_SHIFT; 391 osd_req_op_extent_update(req, 0, len); 392 break; 393 } 394 goto out_pages; 395 } 396 pages[i] = page; 397 } 398 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false); 399 req->r_callback = finish_read; 400 req->r_inode = inode; 401 402 dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len); 403 ret = ceph_osdc_start_request(osdc, req, false); 404 if (ret < 0) 405 goto out_pages; 406 ceph_osdc_put_request(req); 407 408 /* After adding locked pages to page cache, the inode holds cache cap. 409 * So we can drop our cap refs. */ 410 if (got) 411 ceph_put_cap_refs(ci, got); 412 413 return nr_pages; 414 415 out_pages: 416 for (i = 0; i < nr_pages; ++i) { 417 ceph_fscache_readpage_cancel(inode, pages[i]); 418 unlock_page(pages[i]); 419 } 420 ceph_put_page_vector(pages, nr_pages, false); 421 out_put: 422 ceph_osdc_put_request(req); 423 out: 424 if (got) 425 ceph_put_cap_refs(ci, got); 426 return ret; 427 } 428 429 430 /* 431 * Read multiple pages. Leave pages we don't read + unlock in page_list; 432 * the caller (VM) cleans them up. 433 */ 434 static int ceph_readpages(struct file *file, struct address_space *mapping, 435 struct list_head *page_list, unsigned nr_pages) 436 { 437 struct inode *inode = file_inode(file); 438 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 439 int rc = 0; 440 int max = 0; 441 442 if (ceph_inode(inode)->i_inline_version != CEPH_INLINE_NONE) 443 return -EINVAL; 444 445 rc = ceph_readpages_from_fscache(mapping->host, mapping, page_list, 446 &nr_pages); 447 448 if (rc == 0) 449 goto out; 450 451 max = fsc->mount_options->rsize >> PAGE_SHIFT; 452 dout("readpages %p file %p nr_pages %d max %d\n", 453 inode, file, nr_pages, max); 454 while (!list_empty(page_list)) { 455 rc = start_read(inode, page_list, max); 456 if (rc < 0) 457 goto out; 458 } 459 out: 460 ceph_fscache_readpages_cancel(inode, page_list); 461 462 dout("readpages %p file %p ret %d\n", inode, file, rc); 463 return rc; 464 } 465 466 struct ceph_writeback_ctl 467 { 468 loff_t i_size; 469 u64 truncate_size; 470 u32 truncate_seq; 471 bool size_stable; 472 bool head_snapc; 473 }; 474 475 /* 476 * Get ref for the oldest snapc for an inode with dirty data... that is, the 477 * only snap context we are allowed to write back. 478 */ 479 static struct ceph_snap_context * 480 get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl, 481 struct ceph_snap_context *page_snapc) 482 { 483 struct ceph_inode_info *ci = ceph_inode(inode); 484 struct ceph_snap_context *snapc = NULL; 485 struct ceph_cap_snap *capsnap = NULL; 486 487 spin_lock(&ci->i_ceph_lock); 488 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 489 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, 490 capsnap->context, capsnap->dirty_pages); 491 if (!capsnap->dirty_pages) 492 continue; 493 494 /* get i_size, truncate_{seq,size} for page_snapc? */ 495 if (snapc && capsnap->context != page_snapc) 496 continue; 497 498 if (ctl) { 499 if (capsnap->writing) { 500 ctl->i_size = i_size_read(inode); 501 ctl->size_stable = false; 502 } else { 503 ctl->i_size = capsnap->size; 504 ctl->size_stable = true; 505 } 506 ctl->truncate_size = capsnap->truncate_size; 507 ctl->truncate_seq = capsnap->truncate_seq; 508 ctl->head_snapc = false; 509 } 510 511 if (snapc) 512 break; 513 514 snapc = ceph_get_snap_context(capsnap->context); 515 if (!page_snapc || 516 page_snapc == snapc || 517 page_snapc->seq > snapc->seq) 518 break; 519 } 520 if (!snapc && ci->i_wrbuffer_ref_head) { 521 snapc = ceph_get_snap_context(ci->i_head_snapc); 522 dout(" head snapc %p has %d dirty pages\n", 523 snapc, ci->i_wrbuffer_ref_head); 524 if (ctl) { 525 ctl->i_size = i_size_read(inode); 526 ctl->truncate_size = ci->i_truncate_size; 527 ctl->truncate_seq = ci->i_truncate_seq; 528 ctl->size_stable = false; 529 ctl->head_snapc = true; 530 } 531 } 532 spin_unlock(&ci->i_ceph_lock); 533 return snapc; 534 } 535 536 static u64 get_writepages_data_length(struct inode *inode, 537 struct page *page, u64 start) 538 { 539 struct ceph_inode_info *ci = ceph_inode(inode); 540 struct ceph_snap_context *snapc = page_snap_context(page); 541 struct ceph_cap_snap *capsnap = NULL; 542 u64 end = i_size_read(inode); 543 544 if (snapc != ci->i_head_snapc) { 545 bool found = false; 546 spin_lock(&ci->i_ceph_lock); 547 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 548 if (capsnap->context == snapc) { 549 if (!capsnap->writing) 550 end = capsnap->size; 551 found = true; 552 break; 553 } 554 } 555 spin_unlock(&ci->i_ceph_lock); 556 WARN_ON(!found); 557 } 558 if (end > page_offset(page) + PAGE_SIZE) 559 end = page_offset(page) + PAGE_SIZE; 560 return end > start ? end - start : 0; 561 } 562 563 /* 564 * Write a single page, but leave the page locked. 565 * 566 * If we get a write error, set the page error bit, but still adjust the 567 * dirty page accounting (i.e., page is no longer dirty). 568 */ 569 static int writepage_nounlock(struct page *page, struct writeback_control *wbc) 570 { 571 struct inode *inode; 572 struct ceph_inode_info *ci; 573 struct ceph_fs_client *fsc; 574 struct ceph_snap_context *snapc, *oldest; 575 loff_t page_off = page_offset(page); 576 long writeback_stat; 577 int err, len = PAGE_SIZE; 578 struct ceph_writeback_ctl ceph_wbc; 579 580 dout("writepage %p idx %lu\n", page, page->index); 581 582 inode = page->mapping->host; 583 ci = ceph_inode(inode); 584 fsc = ceph_inode_to_client(inode); 585 586 /* verify this is a writeable snap context */ 587 snapc = page_snap_context(page); 588 if (!snapc) { 589 dout("writepage %p page %p not dirty?\n", inode, page); 590 return 0; 591 } 592 oldest = get_oldest_context(inode, &ceph_wbc, snapc); 593 if (snapc->seq > oldest->seq) { 594 dout("writepage %p page %p snapc %p not writeable - noop\n", 595 inode, page, snapc); 596 /* we should only noop if called by kswapd */ 597 WARN_ON(!(current->flags & PF_MEMALLOC)); 598 ceph_put_snap_context(oldest); 599 redirty_page_for_writepage(wbc, page); 600 return 0; 601 } 602 ceph_put_snap_context(oldest); 603 604 /* is this a partial page at end of file? */ 605 if (page_off >= ceph_wbc.i_size) { 606 dout("%p page eof %llu\n", page, ceph_wbc.i_size); 607 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); 608 return 0; 609 } 610 611 if (ceph_wbc.i_size < page_off + len) 612 len = ceph_wbc.i_size - page_off; 613 614 dout("writepage %p page %p index %lu on %llu~%u snapc %p seq %lld\n", 615 inode, page, page->index, page_off, len, snapc, snapc->seq); 616 617 writeback_stat = atomic_long_inc_return(&fsc->writeback_count); 618 if (writeback_stat > 619 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) 620 set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); 621 622 set_page_writeback(page); 623 err = ceph_osdc_writepages(&fsc->client->osdc, ceph_vino(inode), 624 &ci->i_layout, snapc, page_off, len, 625 ceph_wbc.truncate_seq, 626 ceph_wbc.truncate_size, 627 &inode->i_mtime, &page, 1); 628 if (err < 0) { 629 struct writeback_control tmp_wbc; 630 if (!wbc) 631 wbc = &tmp_wbc; 632 if (err == -ERESTARTSYS) { 633 /* killed by SIGKILL */ 634 dout("writepage interrupted page %p\n", page); 635 redirty_page_for_writepage(wbc, page); 636 end_page_writeback(page); 637 return err; 638 } 639 dout("writepage setting page/mapping error %d %p\n", 640 err, page); 641 SetPageError(page); 642 mapping_set_error(&inode->i_data, err); 643 wbc->pages_skipped++; 644 } else { 645 dout("writepage cleaned page %p\n", page); 646 err = 0; /* vfs expects us to return 0 */ 647 } 648 page->private = 0; 649 ClearPagePrivate(page); 650 end_page_writeback(page); 651 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 652 ceph_put_snap_context(snapc); /* page's reference */ 653 return err; 654 } 655 656 static int ceph_writepage(struct page *page, struct writeback_control *wbc) 657 { 658 int err; 659 struct inode *inode = page->mapping->host; 660 BUG_ON(!inode); 661 ihold(inode); 662 err = writepage_nounlock(page, wbc); 663 if (err == -ERESTARTSYS) { 664 /* direct memory reclaimer was killed by SIGKILL. return 0 665 * to prevent caller from setting mapping/page error */ 666 err = 0; 667 } 668 unlock_page(page); 669 iput(inode); 670 return err; 671 } 672 673 /* 674 * lame release_pages helper. release_pages() isn't exported to 675 * modules. 676 */ 677 static void ceph_release_pages(struct page **pages, int num) 678 { 679 struct pagevec pvec; 680 int i; 681 682 pagevec_init(&pvec, 0); 683 for (i = 0; i < num; i++) { 684 if (pagevec_add(&pvec, pages[i]) == 0) 685 pagevec_release(&pvec); 686 } 687 pagevec_release(&pvec); 688 } 689 690 /* 691 * async writeback completion handler. 692 * 693 * If we get an error, set the mapping error bit, but not the individual 694 * page error bits. 695 */ 696 static void writepages_finish(struct ceph_osd_request *req) 697 { 698 struct inode *inode = req->r_inode; 699 struct ceph_inode_info *ci = ceph_inode(inode); 700 struct ceph_osd_data *osd_data; 701 struct page *page; 702 int num_pages, total_pages = 0; 703 int i, j; 704 int rc = req->r_result; 705 struct ceph_snap_context *snapc = req->r_snapc; 706 struct address_space *mapping = inode->i_mapping; 707 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 708 bool remove_page; 709 710 dout("writepages_finish %p rc %d\n", inode, rc); 711 if (rc < 0) { 712 mapping_set_error(mapping, rc); 713 ceph_set_error_write(ci); 714 } else { 715 ceph_clear_error_write(ci); 716 } 717 718 /* 719 * We lost the cache cap, need to truncate the page before 720 * it is unlocked, otherwise we'd truncate it later in the 721 * page truncation thread, possibly losing some data that 722 * raced its way in 723 */ 724 remove_page = !(ceph_caps_issued(ci) & 725 (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)); 726 727 /* clean all pages */ 728 for (i = 0; i < req->r_num_ops; i++) { 729 if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) 730 break; 731 732 osd_data = osd_req_op_extent_osd_data(req, i); 733 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); 734 num_pages = calc_pages_for((u64)osd_data->alignment, 735 (u64)osd_data->length); 736 total_pages += num_pages; 737 for (j = 0; j < num_pages; j++) { 738 page = osd_data->pages[j]; 739 BUG_ON(!page); 740 WARN_ON(!PageUptodate(page)); 741 742 if (atomic_long_dec_return(&fsc->writeback_count) < 743 CONGESTION_OFF_THRESH( 744 fsc->mount_options->congestion_kb)) 745 clear_bdi_congested(inode_to_bdi(inode), 746 BLK_RW_ASYNC); 747 748 ceph_put_snap_context(page_snap_context(page)); 749 page->private = 0; 750 ClearPagePrivate(page); 751 dout("unlocking %p\n", page); 752 end_page_writeback(page); 753 754 if (remove_page) 755 generic_error_remove_page(inode->i_mapping, 756 page); 757 758 unlock_page(page); 759 } 760 dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n", 761 inode, osd_data->length, rc >= 0 ? num_pages : 0); 762 763 ceph_release_pages(osd_data->pages, num_pages); 764 } 765 766 ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc); 767 768 osd_data = osd_req_op_extent_osd_data(req, 0); 769 if (osd_data->pages_from_pool) 770 mempool_free(osd_data->pages, 771 ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool); 772 else 773 kfree(osd_data->pages); 774 ceph_osdc_put_request(req); 775 } 776 777 /* 778 * initiate async writeback 779 */ 780 static int ceph_writepages_start(struct address_space *mapping, 781 struct writeback_control *wbc) 782 { 783 struct inode *inode = mapping->host; 784 struct ceph_inode_info *ci = ceph_inode(inode); 785 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 786 struct ceph_vino vino = ceph_vino(inode); 787 pgoff_t index, start_index, end = -1; 788 struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc; 789 struct pagevec pvec; 790 int rc = 0; 791 unsigned int wsize = i_blocksize(inode); 792 struct ceph_osd_request *req = NULL; 793 struct ceph_writeback_ctl ceph_wbc; 794 bool should_loop, range_whole = false; 795 bool stop, done = false; 796 797 dout("writepages_start %p (mode=%s)\n", inode, 798 wbc->sync_mode == WB_SYNC_NONE ? "NONE" : 799 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); 800 801 if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { 802 if (ci->i_wrbuffer_ref > 0) { 803 pr_warn_ratelimited( 804 "writepage_start %p %lld forced umount\n", 805 inode, ceph_ino(inode)); 806 } 807 mapping_set_error(mapping, -EIO); 808 return -EIO; /* we're in a forced umount, don't write! */ 809 } 810 if (fsc->mount_options->wsize < wsize) 811 wsize = fsc->mount_options->wsize; 812 813 pagevec_init(&pvec, 0); 814 815 start_index = wbc->range_cyclic ? mapping->writeback_index : 0; 816 index = start_index; 817 818 retry: 819 /* find oldest snap context with dirty data */ 820 snapc = get_oldest_context(inode, &ceph_wbc, NULL); 821 if (!snapc) { 822 /* hmm, why does writepages get called when there 823 is no dirty data? */ 824 dout(" no snap context with dirty data?\n"); 825 goto out; 826 } 827 dout(" oldest snapc is %p seq %lld (%d snaps)\n", 828 snapc, snapc->seq, snapc->num_snaps); 829 830 should_loop = false; 831 if (ceph_wbc.head_snapc && snapc != last_snapc) { 832 /* where to start/end? */ 833 if (wbc->range_cyclic) { 834 index = start_index; 835 end = -1; 836 if (index > 0) 837 should_loop = true; 838 dout(" cyclic, start at %lu\n", index); 839 } else { 840 index = wbc->range_start >> PAGE_SHIFT; 841 end = wbc->range_end >> PAGE_SHIFT; 842 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 843 range_whole = true; 844 dout(" not cyclic, %lu to %lu\n", index, end); 845 } 846 } else if (!ceph_wbc.head_snapc) { 847 /* Do not respect wbc->range_{start,end}. Dirty pages 848 * in that range can be associated with newer snapc. 849 * They are not writeable until we write all dirty pages 850 * associated with 'snapc' get written */ 851 if (index > 0 || wbc->sync_mode != WB_SYNC_NONE) 852 should_loop = true; 853 dout(" non-head snapc, range whole\n"); 854 } 855 856 ceph_put_snap_context(last_snapc); 857 last_snapc = snapc; 858 859 stop = false; 860 while (!stop && index <= end) { 861 int num_ops = 0, op_idx; 862 unsigned i, pvec_pages, max_pages, locked_pages = 0; 863 struct page **pages = NULL, **data_pages; 864 mempool_t *pool = NULL; /* Becomes non-null if mempool used */ 865 struct page *page; 866 pgoff_t strip_unit_end = 0; 867 u64 offset = 0, len = 0; 868 869 max_pages = wsize >> PAGE_SHIFT; 870 871 get_more_pages: 872 pvec_pages = min_t(unsigned, PAGEVEC_SIZE, 873 max_pages - locked_pages); 874 if (end - index < (u64)(pvec_pages - 1)) 875 pvec_pages = (unsigned)(end - index) + 1; 876 877 pvec_pages = pagevec_lookup_tag(&pvec, mapping, &index, 878 PAGECACHE_TAG_DIRTY, 879 pvec_pages); 880 dout("pagevec_lookup_tag got %d\n", pvec_pages); 881 if (!pvec_pages && !locked_pages) 882 break; 883 for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) { 884 page = pvec.pages[i]; 885 dout("? %p idx %lu\n", page, page->index); 886 if (locked_pages == 0) 887 lock_page(page); /* first page */ 888 else if (!trylock_page(page)) 889 break; 890 891 /* only dirty pages, or our accounting breaks */ 892 if (unlikely(!PageDirty(page)) || 893 unlikely(page->mapping != mapping)) { 894 dout("!dirty or !mapping %p\n", page); 895 unlock_page(page); 896 continue; 897 } 898 if (page->index > end) { 899 dout("end of range %p\n", page); 900 /* can't be range_cyclic (1st pass) because 901 * end == -1 in that case. */ 902 stop = true; 903 if (ceph_wbc.head_snapc) 904 done = true; 905 unlock_page(page); 906 break; 907 } 908 if (strip_unit_end && (page->index > strip_unit_end)) { 909 dout("end of strip unit %p\n", page); 910 unlock_page(page); 911 break; 912 } 913 if (page_offset(page) >= ceph_wbc.i_size) { 914 dout("%p page eof %llu\n", 915 page, ceph_wbc.i_size); 916 /* not done if range_cyclic */ 917 stop = true; 918 unlock_page(page); 919 break; 920 } 921 if (PageWriteback(page)) { 922 if (wbc->sync_mode == WB_SYNC_NONE) { 923 dout("%p under writeback\n", page); 924 unlock_page(page); 925 continue; 926 } 927 dout("waiting on writeback %p\n", page); 928 wait_on_page_writeback(page); 929 } 930 931 /* only if matching snap context */ 932 pgsnapc = page_snap_context(page); 933 if (pgsnapc != snapc) { 934 dout("page snapc %p %lld != oldest %p %lld\n", 935 pgsnapc, pgsnapc->seq, snapc, snapc->seq); 936 unlock_page(page); 937 continue; 938 } 939 940 if (!clear_page_dirty_for_io(page)) { 941 dout("%p !clear_page_dirty_for_io\n", page); 942 unlock_page(page); 943 continue; 944 } 945 946 /* 947 * We have something to write. If this is 948 * the first locked page this time through, 949 * calculate max possinle write size and 950 * allocate a page array 951 */ 952 if (locked_pages == 0) { 953 u64 objnum; 954 u64 objoff; 955 956 /* prepare async write request */ 957 offset = (u64)page_offset(page); 958 len = wsize; 959 960 rc = ceph_calc_file_object_mapping(&ci->i_layout, 961 offset, len, 962 &objnum, &objoff, 963 &len); 964 if (rc < 0) { 965 unlock_page(page); 966 break; 967 } 968 969 num_ops = 1; 970 strip_unit_end = page->index + 971 ((len - 1) >> PAGE_SHIFT); 972 973 BUG_ON(pages); 974 max_pages = calc_pages_for(0, (u64)len); 975 pages = kmalloc(max_pages * sizeof (*pages), 976 GFP_NOFS); 977 if (!pages) { 978 pool = fsc->wb_pagevec_pool; 979 pages = mempool_alloc(pool, GFP_NOFS); 980 BUG_ON(!pages); 981 } 982 983 len = 0; 984 } else if (page->index != 985 (offset + len) >> PAGE_SHIFT) { 986 if (num_ops >= (pool ? CEPH_OSD_SLAB_OPS : 987 CEPH_OSD_MAX_OPS)) { 988 redirty_page_for_writepage(wbc, page); 989 unlock_page(page); 990 break; 991 } 992 993 num_ops++; 994 offset = (u64)page_offset(page); 995 len = 0; 996 } 997 998 /* note position of first page in pvec */ 999 dout("%p will write page %p idx %lu\n", 1000 inode, page, page->index); 1001 1002 if (atomic_long_inc_return(&fsc->writeback_count) > 1003 CONGESTION_ON_THRESH( 1004 fsc->mount_options->congestion_kb)) { 1005 set_bdi_congested(inode_to_bdi(inode), 1006 BLK_RW_ASYNC); 1007 } 1008 1009 1010 pages[locked_pages++] = page; 1011 pvec.pages[i] = NULL; 1012 1013 len += PAGE_SIZE; 1014 } 1015 1016 /* did we get anything? */ 1017 if (!locked_pages) 1018 goto release_pvec_pages; 1019 if (i) { 1020 unsigned j, n = 0; 1021 /* shift unused page to beginning of pvec */ 1022 for (j = 0; j < pvec_pages; j++) { 1023 if (!pvec.pages[j]) 1024 continue; 1025 if (n < j) 1026 pvec.pages[n] = pvec.pages[j]; 1027 n++; 1028 } 1029 pvec.nr = n; 1030 1031 if (pvec_pages && i == pvec_pages && 1032 locked_pages < max_pages) { 1033 dout("reached end pvec, trying for more\n"); 1034 pagevec_release(&pvec); 1035 goto get_more_pages; 1036 } 1037 } 1038 1039 new_request: 1040 offset = page_offset(pages[0]); 1041 len = wsize; 1042 1043 req = ceph_osdc_new_request(&fsc->client->osdc, 1044 &ci->i_layout, vino, 1045 offset, &len, 0, num_ops, 1046 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, 1047 snapc, ceph_wbc.truncate_seq, 1048 ceph_wbc.truncate_size, false); 1049 if (IS_ERR(req)) { 1050 req = ceph_osdc_new_request(&fsc->client->osdc, 1051 &ci->i_layout, vino, 1052 offset, &len, 0, 1053 min(num_ops, 1054 CEPH_OSD_SLAB_OPS), 1055 CEPH_OSD_OP_WRITE, 1056 CEPH_OSD_FLAG_WRITE, 1057 snapc, ceph_wbc.truncate_seq, 1058 ceph_wbc.truncate_size, true); 1059 BUG_ON(IS_ERR(req)); 1060 } 1061 BUG_ON(len < page_offset(pages[locked_pages - 1]) + 1062 PAGE_SIZE - offset); 1063 1064 req->r_callback = writepages_finish; 1065 req->r_inode = inode; 1066 1067 /* Format the osd request message and submit the write */ 1068 len = 0; 1069 data_pages = pages; 1070 op_idx = 0; 1071 for (i = 0; i < locked_pages; i++) { 1072 u64 cur_offset = page_offset(pages[i]); 1073 if (offset + len != cur_offset) { 1074 if (op_idx + 1 == req->r_num_ops) 1075 break; 1076 osd_req_op_extent_dup_last(req, op_idx, 1077 cur_offset - offset); 1078 dout("writepages got pages at %llu~%llu\n", 1079 offset, len); 1080 osd_req_op_extent_osd_data_pages(req, op_idx, 1081 data_pages, len, 0, 1082 !!pool, false); 1083 osd_req_op_extent_update(req, op_idx, len); 1084 1085 len = 0; 1086 offset = cur_offset; 1087 data_pages = pages + i; 1088 op_idx++; 1089 } 1090 1091 set_page_writeback(pages[i]); 1092 len += PAGE_SIZE; 1093 } 1094 1095 if (ceph_wbc.size_stable) { 1096 len = min(len, ceph_wbc.i_size - offset); 1097 } else if (i == locked_pages) { 1098 /* writepages_finish() clears writeback pages 1099 * according to the data length, so make sure 1100 * data length covers all locked pages */ 1101 u64 min_len = len + 1 - PAGE_SIZE; 1102 len = get_writepages_data_length(inode, pages[i - 1], 1103 offset); 1104 len = max(len, min_len); 1105 } 1106 dout("writepages got pages at %llu~%llu\n", offset, len); 1107 1108 osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len, 1109 0, !!pool, false); 1110 osd_req_op_extent_update(req, op_idx, len); 1111 1112 BUG_ON(op_idx + 1 != req->r_num_ops); 1113 1114 pool = NULL; 1115 if (i < locked_pages) { 1116 BUG_ON(num_ops <= req->r_num_ops); 1117 num_ops -= req->r_num_ops; 1118 locked_pages -= i; 1119 1120 /* allocate new pages array for next request */ 1121 data_pages = pages; 1122 pages = kmalloc(locked_pages * sizeof (*pages), 1123 GFP_NOFS); 1124 if (!pages) { 1125 pool = fsc->wb_pagevec_pool; 1126 pages = mempool_alloc(pool, GFP_NOFS); 1127 BUG_ON(!pages); 1128 } 1129 memcpy(pages, data_pages + i, 1130 locked_pages * sizeof(*pages)); 1131 memset(data_pages + i, 0, 1132 locked_pages * sizeof(*pages)); 1133 } else { 1134 BUG_ON(num_ops != req->r_num_ops); 1135 index = pages[i - 1]->index + 1; 1136 /* request message now owns the pages array */ 1137 pages = NULL; 1138 } 1139 1140 req->r_mtime = inode->i_mtime; 1141 rc = ceph_osdc_start_request(&fsc->client->osdc, req, true); 1142 BUG_ON(rc); 1143 req = NULL; 1144 1145 wbc->nr_to_write -= i; 1146 if (pages) 1147 goto new_request; 1148 1149 /* 1150 * We stop writing back only if we are not doing 1151 * integrity sync. In case of integrity sync we have to 1152 * keep going until we have written all the pages 1153 * we tagged for writeback prior to entering this loop. 1154 */ 1155 if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) 1156 done = stop = true; 1157 1158 release_pvec_pages: 1159 dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr, 1160 pvec.nr ? pvec.pages[0] : NULL); 1161 pagevec_release(&pvec); 1162 } 1163 1164 if (should_loop && !done) { 1165 /* more to do; loop back to beginning of file */ 1166 dout("writepages looping back to beginning of file\n"); 1167 end = start_index - 1; /* OK even when start_index == 0 */ 1168 1169 /* to write dirty pages associated with next snapc, 1170 * we need to wait until current writes complete */ 1171 if (wbc->sync_mode != WB_SYNC_NONE && 1172 start_index == 0 && /* all dirty pages were checked */ 1173 !ceph_wbc.head_snapc) { 1174 struct page *page; 1175 unsigned i, nr; 1176 index = 0; 1177 while ((index <= end) && 1178 (nr = pagevec_lookup_tag(&pvec, mapping, &index, 1179 PAGECACHE_TAG_WRITEBACK, 1180 PAGEVEC_SIZE))) { 1181 for (i = 0; i < nr; i++) { 1182 page = pvec.pages[i]; 1183 if (page_snap_context(page) != snapc) 1184 continue; 1185 wait_on_page_writeback(page); 1186 } 1187 pagevec_release(&pvec); 1188 cond_resched(); 1189 } 1190 } 1191 1192 start_index = 0; 1193 index = 0; 1194 goto retry; 1195 } 1196 1197 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 1198 mapping->writeback_index = index; 1199 1200 out: 1201 ceph_osdc_put_request(req); 1202 ceph_put_snap_context(last_snapc); 1203 dout("writepages dend - startone, rc = %d\n", rc); 1204 return rc; 1205 } 1206 1207 1208 1209 /* 1210 * See if a given @snapc is either writeable, or already written. 1211 */ 1212 static int context_is_writeable_or_written(struct inode *inode, 1213 struct ceph_snap_context *snapc) 1214 { 1215 struct ceph_snap_context *oldest = get_oldest_context(inode, NULL, NULL); 1216 int ret = !oldest || snapc->seq <= oldest->seq; 1217 1218 ceph_put_snap_context(oldest); 1219 return ret; 1220 } 1221 1222 /* 1223 * We are only allowed to write into/dirty the page if the page is 1224 * clean, or already dirty within the same snap context. 1225 * 1226 * called with page locked. 1227 * return success with page locked, 1228 * or any failure (incl -EAGAIN) with page unlocked. 1229 */ 1230 static int ceph_update_writeable_page(struct file *file, 1231 loff_t pos, unsigned len, 1232 struct page *page) 1233 { 1234 struct inode *inode = file_inode(file); 1235 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1236 struct ceph_inode_info *ci = ceph_inode(inode); 1237 loff_t page_off = pos & PAGE_MASK; 1238 int pos_in_page = pos & ~PAGE_MASK; 1239 int end_in_page = pos_in_page + len; 1240 loff_t i_size; 1241 int r; 1242 struct ceph_snap_context *snapc, *oldest; 1243 1244 if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { 1245 dout(" page %p forced umount\n", page); 1246 unlock_page(page); 1247 return -EIO; 1248 } 1249 1250 retry_locked: 1251 /* writepages currently holds page lock, but if we change that later, */ 1252 wait_on_page_writeback(page); 1253 1254 snapc = page_snap_context(page); 1255 if (snapc && snapc != ci->i_head_snapc) { 1256 /* 1257 * this page is already dirty in another (older) snap 1258 * context! is it writeable now? 1259 */ 1260 oldest = get_oldest_context(inode, NULL, NULL); 1261 if (snapc->seq > oldest->seq) { 1262 ceph_put_snap_context(oldest); 1263 dout(" page %p snapc %p not current or oldest\n", 1264 page, snapc); 1265 /* 1266 * queue for writeback, and wait for snapc to 1267 * be writeable or written 1268 */ 1269 snapc = ceph_get_snap_context(snapc); 1270 unlock_page(page); 1271 ceph_queue_writeback(inode); 1272 r = wait_event_killable(ci->i_cap_wq, 1273 context_is_writeable_or_written(inode, snapc)); 1274 ceph_put_snap_context(snapc); 1275 if (r == -ERESTARTSYS) 1276 return r; 1277 return -EAGAIN; 1278 } 1279 ceph_put_snap_context(oldest); 1280 1281 /* yay, writeable, do it now (without dropping page lock) */ 1282 dout(" page %p snapc %p not current, but oldest\n", 1283 page, snapc); 1284 if (!clear_page_dirty_for_io(page)) 1285 goto retry_locked; 1286 r = writepage_nounlock(page, NULL); 1287 if (r < 0) 1288 goto fail_unlock; 1289 goto retry_locked; 1290 } 1291 1292 if (PageUptodate(page)) { 1293 dout(" page %p already uptodate\n", page); 1294 return 0; 1295 } 1296 1297 /* full page? */ 1298 if (pos_in_page == 0 && len == PAGE_SIZE) 1299 return 0; 1300 1301 /* past end of file? */ 1302 i_size = i_size_read(inode); 1303 1304 if (page_off >= i_size || 1305 (pos_in_page == 0 && (pos+len) >= i_size && 1306 end_in_page - pos_in_page != PAGE_SIZE)) { 1307 dout(" zeroing %p 0 - %d and %d - %d\n", 1308 page, pos_in_page, end_in_page, (int)PAGE_SIZE); 1309 zero_user_segments(page, 1310 0, pos_in_page, 1311 end_in_page, PAGE_SIZE); 1312 return 0; 1313 } 1314 1315 /* we need to read it. */ 1316 r = ceph_do_readpage(file, page); 1317 if (r < 0) { 1318 if (r == -EINPROGRESS) 1319 return -EAGAIN; 1320 goto fail_unlock; 1321 } 1322 goto retry_locked; 1323 fail_unlock: 1324 unlock_page(page); 1325 return r; 1326 } 1327 1328 /* 1329 * We are only allowed to write into/dirty the page if the page is 1330 * clean, or already dirty within the same snap context. 1331 */ 1332 static int ceph_write_begin(struct file *file, struct address_space *mapping, 1333 loff_t pos, unsigned len, unsigned flags, 1334 struct page **pagep, void **fsdata) 1335 { 1336 struct inode *inode = file_inode(file); 1337 struct page *page; 1338 pgoff_t index = pos >> PAGE_SHIFT; 1339 int r; 1340 1341 do { 1342 /* get a page */ 1343 page = grab_cache_page_write_begin(mapping, index, 0); 1344 if (!page) 1345 return -ENOMEM; 1346 1347 dout("write_begin file %p inode %p page %p %d~%d\n", file, 1348 inode, page, (int)pos, (int)len); 1349 1350 r = ceph_update_writeable_page(file, pos, len, page); 1351 if (r < 0) 1352 put_page(page); 1353 else 1354 *pagep = page; 1355 } while (r == -EAGAIN); 1356 1357 return r; 1358 } 1359 1360 /* 1361 * we don't do anything in here that simple_write_end doesn't do 1362 * except adjust dirty page accounting 1363 */ 1364 static int ceph_write_end(struct file *file, struct address_space *mapping, 1365 loff_t pos, unsigned len, unsigned copied, 1366 struct page *page, void *fsdata) 1367 { 1368 struct inode *inode = file_inode(file); 1369 bool check_cap = false; 1370 1371 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file, 1372 inode, page, (int)pos, (int)copied, (int)len); 1373 1374 /* zero the stale part of the page if we did a short copy */ 1375 if (!PageUptodate(page)) { 1376 if (copied < len) { 1377 copied = 0; 1378 goto out; 1379 } 1380 SetPageUptodate(page); 1381 } 1382 1383 /* did file size increase? */ 1384 if (pos+copied > i_size_read(inode)) 1385 check_cap = ceph_inode_set_size(inode, pos+copied); 1386 1387 set_page_dirty(page); 1388 1389 out: 1390 unlock_page(page); 1391 put_page(page); 1392 1393 if (check_cap) 1394 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); 1395 1396 return copied; 1397 } 1398 1399 /* 1400 * we set .direct_IO to indicate direct io is supported, but since we 1401 * intercept O_DIRECT reads and writes early, this function should 1402 * never get called. 1403 */ 1404 static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter) 1405 { 1406 WARN_ON(1); 1407 return -EINVAL; 1408 } 1409 1410 const struct address_space_operations ceph_aops = { 1411 .readpage = ceph_readpage, 1412 .readpages = ceph_readpages, 1413 .writepage = ceph_writepage, 1414 .writepages = ceph_writepages_start, 1415 .write_begin = ceph_write_begin, 1416 .write_end = ceph_write_end, 1417 .set_page_dirty = ceph_set_page_dirty, 1418 .invalidatepage = ceph_invalidatepage, 1419 .releasepage = ceph_releasepage, 1420 .direct_IO = ceph_direct_io, 1421 }; 1422 1423 static void ceph_block_sigs(sigset_t *oldset) 1424 { 1425 sigset_t mask; 1426 siginitsetinv(&mask, sigmask(SIGKILL)); 1427 sigprocmask(SIG_BLOCK, &mask, oldset); 1428 } 1429 1430 static void ceph_restore_sigs(sigset_t *oldset) 1431 { 1432 sigprocmask(SIG_SETMASK, oldset, NULL); 1433 } 1434 1435 /* 1436 * vm ops 1437 */ 1438 static int ceph_filemap_fault(struct vm_fault *vmf) 1439 { 1440 struct vm_area_struct *vma = vmf->vma; 1441 struct inode *inode = file_inode(vma->vm_file); 1442 struct ceph_inode_info *ci = ceph_inode(inode); 1443 struct ceph_file_info *fi = vma->vm_file->private_data; 1444 struct page *pinned_page = NULL; 1445 loff_t off = vmf->pgoff << PAGE_SHIFT; 1446 int want, got, ret; 1447 sigset_t oldset; 1448 1449 ceph_block_sigs(&oldset); 1450 1451 dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n", 1452 inode, ceph_vinop(inode), off, (size_t)PAGE_SIZE); 1453 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1454 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 1455 else 1456 want = CEPH_CAP_FILE_CACHE; 1457 1458 got = 0; 1459 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page); 1460 if (ret < 0) 1461 goto out_restore; 1462 1463 dout("filemap_fault %p %llu~%zd got cap refs on %s\n", 1464 inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got)); 1465 1466 if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) || 1467 ci->i_inline_version == CEPH_INLINE_NONE) { 1468 current->journal_info = vma->vm_file; 1469 ret = filemap_fault(vmf); 1470 current->journal_info = NULL; 1471 } else 1472 ret = -EAGAIN; 1473 1474 dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n", 1475 inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got), ret); 1476 if (pinned_page) 1477 put_page(pinned_page); 1478 ceph_put_cap_refs(ci, got); 1479 1480 if (ret != -EAGAIN) 1481 goto out_restore; 1482 1483 /* read inline data */ 1484 if (off >= PAGE_SIZE) { 1485 /* does not support inline data > PAGE_SIZE */ 1486 ret = VM_FAULT_SIGBUS; 1487 } else { 1488 int ret1; 1489 struct address_space *mapping = inode->i_mapping; 1490 struct page *page = find_or_create_page(mapping, 0, 1491 mapping_gfp_constraint(mapping, 1492 ~__GFP_FS)); 1493 if (!page) { 1494 ret = VM_FAULT_OOM; 1495 goto out_inline; 1496 } 1497 ret1 = __ceph_do_getattr(inode, page, 1498 CEPH_STAT_CAP_INLINE_DATA, true); 1499 if (ret1 < 0 || off >= i_size_read(inode)) { 1500 unlock_page(page); 1501 put_page(page); 1502 if (ret1 < 0) 1503 ret = ret1; 1504 else 1505 ret = VM_FAULT_SIGBUS; 1506 goto out_inline; 1507 } 1508 if (ret1 < PAGE_SIZE) 1509 zero_user_segment(page, ret1, PAGE_SIZE); 1510 else 1511 flush_dcache_page(page); 1512 SetPageUptodate(page); 1513 vmf->page = page; 1514 ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED; 1515 out_inline: 1516 dout("filemap_fault %p %llu~%zd read inline data ret %d\n", 1517 inode, off, (size_t)PAGE_SIZE, ret); 1518 } 1519 out_restore: 1520 ceph_restore_sigs(&oldset); 1521 if (ret < 0) 1522 ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; 1523 1524 return ret; 1525 } 1526 1527 /* 1528 * Reuse write_begin here for simplicity. 1529 */ 1530 static int ceph_page_mkwrite(struct vm_fault *vmf) 1531 { 1532 struct vm_area_struct *vma = vmf->vma; 1533 struct inode *inode = file_inode(vma->vm_file); 1534 struct ceph_inode_info *ci = ceph_inode(inode); 1535 struct ceph_file_info *fi = vma->vm_file->private_data; 1536 struct ceph_cap_flush *prealloc_cf; 1537 struct page *page = vmf->page; 1538 loff_t off = page_offset(page); 1539 loff_t size = i_size_read(inode); 1540 size_t len; 1541 int want, got, ret; 1542 sigset_t oldset; 1543 1544 prealloc_cf = ceph_alloc_cap_flush(); 1545 if (!prealloc_cf) 1546 return VM_FAULT_OOM; 1547 1548 ceph_block_sigs(&oldset); 1549 1550 if (ci->i_inline_version != CEPH_INLINE_NONE) { 1551 struct page *locked_page = NULL; 1552 if (off == 0) { 1553 lock_page(page); 1554 locked_page = page; 1555 } 1556 ret = ceph_uninline_data(vma->vm_file, locked_page); 1557 if (locked_page) 1558 unlock_page(locked_page); 1559 if (ret < 0) 1560 goto out_free; 1561 } 1562 1563 if (off + PAGE_SIZE <= size) 1564 len = PAGE_SIZE; 1565 else 1566 len = size & ~PAGE_MASK; 1567 1568 dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n", 1569 inode, ceph_vinop(inode), off, len, size); 1570 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1571 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1572 else 1573 want = CEPH_CAP_FILE_BUFFER; 1574 1575 got = 0; 1576 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, off + len, 1577 &got, NULL); 1578 if (ret < 0) 1579 goto out_free; 1580 1581 dout("page_mkwrite %p %llu~%zd got cap refs on %s\n", 1582 inode, off, len, ceph_cap_string(got)); 1583 1584 /* Update time before taking page lock */ 1585 file_update_time(vma->vm_file); 1586 1587 do { 1588 lock_page(page); 1589 1590 if ((off > size) || (page->mapping != inode->i_mapping)) { 1591 unlock_page(page); 1592 ret = VM_FAULT_NOPAGE; 1593 break; 1594 } 1595 1596 ret = ceph_update_writeable_page(vma->vm_file, off, len, page); 1597 if (ret >= 0) { 1598 /* success. we'll keep the page locked. */ 1599 set_page_dirty(page); 1600 ret = VM_FAULT_LOCKED; 1601 } 1602 } while (ret == -EAGAIN); 1603 1604 if (ret == VM_FAULT_LOCKED || 1605 ci->i_inline_version != CEPH_INLINE_NONE) { 1606 int dirty; 1607 spin_lock(&ci->i_ceph_lock); 1608 ci->i_inline_version = CEPH_INLINE_NONE; 1609 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, 1610 &prealloc_cf); 1611 spin_unlock(&ci->i_ceph_lock); 1612 if (dirty) 1613 __mark_inode_dirty(inode, dirty); 1614 } 1615 1616 dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %d\n", 1617 inode, off, len, ceph_cap_string(got), ret); 1618 ceph_put_cap_refs(ci, got); 1619 out_free: 1620 ceph_restore_sigs(&oldset); 1621 ceph_free_cap_flush(prealloc_cf); 1622 if (ret < 0) 1623 ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; 1624 return ret; 1625 } 1626 1627 void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, 1628 char *data, size_t len) 1629 { 1630 struct address_space *mapping = inode->i_mapping; 1631 struct page *page; 1632 1633 if (locked_page) { 1634 page = locked_page; 1635 } else { 1636 if (i_size_read(inode) == 0) 1637 return; 1638 page = find_or_create_page(mapping, 0, 1639 mapping_gfp_constraint(mapping, 1640 ~__GFP_FS)); 1641 if (!page) 1642 return; 1643 if (PageUptodate(page)) { 1644 unlock_page(page); 1645 put_page(page); 1646 return; 1647 } 1648 } 1649 1650 dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n", 1651 inode, ceph_vinop(inode), len, locked_page); 1652 1653 if (len > 0) { 1654 void *kaddr = kmap_atomic(page); 1655 memcpy(kaddr, data, len); 1656 kunmap_atomic(kaddr); 1657 } 1658 1659 if (page != locked_page) { 1660 if (len < PAGE_SIZE) 1661 zero_user_segment(page, len, PAGE_SIZE); 1662 else 1663 flush_dcache_page(page); 1664 1665 SetPageUptodate(page); 1666 unlock_page(page); 1667 put_page(page); 1668 } 1669 } 1670 1671 int ceph_uninline_data(struct file *filp, struct page *locked_page) 1672 { 1673 struct inode *inode = file_inode(filp); 1674 struct ceph_inode_info *ci = ceph_inode(inode); 1675 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1676 struct ceph_osd_request *req; 1677 struct page *page = NULL; 1678 u64 len, inline_version; 1679 int err = 0; 1680 bool from_pagecache = false; 1681 1682 spin_lock(&ci->i_ceph_lock); 1683 inline_version = ci->i_inline_version; 1684 spin_unlock(&ci->i_ceph_lock); 1685 1686 dout("uninline_data %p %llx.%llx inline_version %llu\n", 1687 inode, ceph_vinop(inode), inline_version); 1688 1689 if (inline_version == 1 || /* initial version, no data */ 1690 inline_version == CEPH_INLINE_NONE) 1691 goto out; 1692 1693 if (locked_page) { 1694 page = locked_page; 1695 WARN_ON(!PageUptodate(page)); 1696 } else if (ceph_caps_issued(ci) & 1697 (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) { 1698 page = find_get_page(inode->i_mapping, 0); 1699 if (page) { 1700 if (PageUptodate(page)) { 1701 from_pagecache = true; 1702 lock_page(page); 1703 } else { 1704 put_page(page); 1705 page = NULL; 1706 } 1707 } 1708 } 1709 1710 if (page) { 1711 len = i_size_read(inode); 1712 if (len > PAGE_SIZE) 1713 len = PAGE_SIZE; 1714 } else { 1715 page = __page_cache_alloc(GFP_NOFS); 1716 if (!page) { 1717 err = -ENOMEM; 1718 goto out; 1719 } 1720 err = __ceph_do_getattr(inode, page, 1721 CEPH_STAT_CAP_INLINE_DATA, true); 1722 if (err < 0) { 1723 /* no inline data */ 1724 if (err == -ENODATA) 1725 err = 0; 1726 goto out; 1727 } 1728 len = err; 1729 } 1730 1731 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1732 ceph_vino(inode), 0, &len, 0, 1, 1733 CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_WRITE, 1734 NULL, 0, 0, false); 1735 if (IS_ERR(req)) { 1736 err = PTR_ERR(req); 1737 goto out; 1738 } 1739 1740 req->r_mtime = inode->i_mtime; 1741 err = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1742 if (!err) 1743 err = ceph_osdc_wait_request(&fsc->client->osdc, req); 1744 ceph_osdc_put_request(req); 1745 if (err < 0) 1746 goto out; 1747 1748 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 1749 ceph_vino(inode), 0, &len, 1, 3, 1750 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, 1751 NULL, ci->i_truncate_seq, 1752 ci->i_truncate_size, false); 1753 if (IS_ERR(req)) { 1754 err = PTR_ERR(req); 1755 goto out; 1756 } 1757 1758 osd_req_op_extent_osd_data_pages(req, 1, &page, len, 0, false, false); 1759 1760 { 1761 __le64 xattr_buf = cpu_to_le64(inline_version); 1762 err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR, 1763 "inline_version", &xattr_buf, 1764 sizeof(xattr_buf), 1765 CEPH_OSD_CMPXATTR_OP_GT, 1766 CEPH_OSD_CMPXATTR_MODE_U64); 1767 if (err) 1768 goto out_put; 1769 } 1770 1771 { 1772 char xattr_buf[32]; 1773 int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf), 1774 "%llu", inline_version); 1775 err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR, 1776 "inline_version", 1777 xattr_buf, xattr_len, 0, 0); 1778 if (err) 1779 goto out_put; 1780 } 1781 1782 req->r_mtime = inode->i_mtime; 1783 err = ceph_osdc_start_request(&fsc->client->osdc, req, false); 1784 if (!err) 1785 err = ceph_osdc_wait_request(&fsc->client->osdc, req); 1786 out_put: 1787 ceph_osdc_put_request(req); 1788 if (err == -ECANCELED) 1789 err = 0; 1790 out: 1791 if (page && page != locked_page) { 1792 if (from_pagecache) { 1793 unlock_page(page); 1794 put_page(page); 1795 } else 1796 __free_pages(page, 0); 1797 } 1798 1799 dout("uninline_data %p %llx.%llx inline_version %llu = %d\n", 1800 inode, ceph_vinop(inode), inline_version, err); 1801 return err; 1802 } 1803 1804 static const struct vm_operations_struct ceph_vmops = { 1805 .fault = ceph_filemap_fault, 1806 .page_mkwrite = ceph_page_mkwrite, 1807 }; 1808 1809 int ceph_mmap(struct file *file, struct vm_area_struct *vma) 1810 { 1811 struct address_space *mapping = file->f_mapping; 1812 1813 if (!mapping->a_ops->readpage) 1814 return -ENOEXEC; 1815 file_accessed(file); 1816 vma->vm_ops = &ceph_vmops; 1817 return 0; 1818 } 1819 1820 enum { 1821 POOL_READ = 1, 1822 POOL_WRITE = 2, 1823 }; 1824 1825 static int __ceph_pool_perm_get(struct ceph_inode_info *ci, 1826 s64 pool, struct ceph_string *pool_ns) 1827 { 1828 struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode); 1829 struct ceph_mds_client *mdsc = fsc->mdsc; 1830 struct ceph_osd_request *rd_req = NULL, *wr_req = NULL; 1831 struct rb_node **p, *parent; 1832 struct ceph_pool_perm *perm; 1833 struct page **pages; 1834 size_t pool_ns_len; 1835 int err = 0, err2 = 0, have = 0; 1836 1837 down_read(&mdsc->pool_perm_rwsem); 1838 p = &mdsc->pool_perm_tree.rb_node; 1839 while (*p) { 1840 perm = rb_entry(*p, struct ceph_pool_perm, node); 1841 if (pool < perm->pool) 1842 p = &(*p)->rb_left; 1843 else if (pool > perm->pool) 1844 p = &(*p)->rb_right; 1845 else { 1846 int ret = ceph_compare_string(pool_ns, 1847 perm->pool_ns, 1848 perm->pool_ns_len); 1849 if (ret < 0) 1850 p = &(*p)->rb_left; 1851 else if (ret > 0) 1852 p = &(*p)->rb_right; 1853 else { 1854 have = perm->perm; 1855 break; 1856 } 1857 } 1858 } 1859 up_read(&mdsc->pool_perm_rwsem); 1860 if (*p) 1861 goto out; 1862 1863 if (pool_ns) 1864 dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n", 1865 pool, (int)pool_ns->len, pool_ns->str); 1866 else 1867 dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool); 1868 1869 down_write(&mdsc->pool_perm_rwsem); 1870 p = &mdsc->pool_perm_tree.rb_node; 1871 parent = NULL; 1872 while (*p) { 1873 parent = *p; 1874 perm = rb_entry(parent, struct ceph_pool_perm, node); 1875 if (pool < perm->pool) 1876 p = &(*p)->rb_left; 1877 else if (pool > perm->pool) 1878 p = &(*p)->rb_right; 1879 else { 1880 int ret = ceph_compare_string(pool_ns, 1881 perm->pool_ns, 1882 perm->pool_ns_len); 1883 if (ret < 0) 1884 p = &(*p)->rb_left; 1885 else if (ret > 0) 1886 p = &(*p)->rb_right; 1887 else { 1888 have = perm->perm; 1889 break; 1890 } 1891 } 1892 } 1893 if (*p) { 1894 up_write(&mdsc->pool_perm_rwsem); 1895 goto out; 1896 } 1897 1898 rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 1899 1, false, GFP_NOFS); 1900 if (!rd_req) { 1901 err = -ENOMEM; 1902 goto out_unlock; 1903 } 1904 1905 rd_req->r_flags = CEPH_OSD_FLAG_READ; 1906 osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0); 1907 rd_req->r_base_oloc.pool = pool; 1908 if (pool_ns) 1909 rd_req->r_base_oloc.pool_ns = ceph_get_string(pool_ns); 1910 ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino); 1911 1912 err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS); 1913 if (err) 1914 goto out_unlock; 1915 1916 wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL, 1917 1, false, GFP_NOFS); 1918 if (!wr_req) { 1919 err = -ENOMEM; 1920 goto out_unlock; 1921 } 1922 1923 wr_req->r_flags = CEPH_OSD_FLAG_WRITE; 1924 osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL); 1925 ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc); 1926 ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid); 1927 1928 err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS); 1929 if (err) 1930 goto out_unlock; 1931 1932 /* one page should be large enough for STAT data */ 1933 pages = ceph_alloc_page_vector(1, GFP_KERNEL); 1934 if (IS_ERR(pages)) { 1935 err = PTR_ERR(pages); 1936 goto out_unlock; 1937 } 1938 1939 osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE, 1940 0, false, true); 1941 err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false); 1942 1943 wr_req->r_mtime = ci->vfs_inode.i_mtime; 1944 wr_req->r_abort_on_full = true; 1945 err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false); 1946 1947 if (!err) 1948 err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req); 1949 if (!err2) 1950 err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req); 1951 1952 if (err >= 0 || err == -ENOENT) 1953 have |= POOL_READ; 1954 else if (err != -EPERM) 1955 goto out_unlock; 1956 1957 if (err2 == 0 || err2 == -EEXIST) 1958 have |= POOL_WRITE; 1959 else if (err2 != -EPERM) { 1960 err = err2; 1961 goto out_unlock; 1962 } 1963 1964 pool_ns_len = pool_ns ? pool_ns->len : 0; 1965 perm = kmalloc(sizeof(*perm) + pool_ns_len + 1, GFP_NOFS); 1966 if (!perm) { 1967 err = -ENOMEM; 1968 goto out_unlock; 1969 } 1970 1971 perm->pool = pool; 1972 perm->perm = have; 1973 perm->pool_ns_len = pool_ns_len; 1974 if (pool_ns_len > 0) 1975 memcpy(perm->pool_ns, pool_ns->str, pool_ns_len); 1976 perm->pool_ns[pool_ns_len] = 0; 1977 1978 rb_link_node(&perm->node, parent, p); 1979 rb_insert_color(&perm->node, &mdsc->pool_perm_tree); 1980 err = 0; 1981 out_unlock: 1982 up_write(&mdsc->pool_perm_rwsem); 1983 1984 ceph_osdc_put_request(rd_req); 1985 ceph_osdc_put_request(wr_req); 1986 out: 1987 if (!err) 1988 err = have; 1989 if (pool_ns) 1990 dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n", 1991 pool, (int)pool_ns->len, pool_ns->str, err); 1992 else 1993 dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err); 1994 return err; 1995 } 1996 1997 int ceph_pool_perm_check(struct ceph_inode_info *ci, int need) 1998 { 1999 s64 pool; 2000 struct ceph_string *pool_ns; 2001 int ret, flags; 2002 2003 if (ci->i_vino.snap != CEPH_NOSNAP) { 2004 /* 2005 * Pool permission check needs to write to the first object. 2006 * But for snapshot, head of the first object may have alread 2007 * been deleted. Skip check to avoid creating orphan object. 2008 */ 2009 return 0; 2010 } 2011 2012 if (ceph_test_mount_opt(ceph_inode_to_client(&ci->vfs_inode), 2013 NOPOOLPERM)) 2014 return 0; 2015 2016 spin_lock(&ci->i_ceph_lock); 2017 flags = ci->i_ceph_flags; 2018 pool = ci->i_layout.pool_id; 2019 spin_unlock(&ci->i_ceph_lock); 2020 check: 2021 if (flags & CEPH_I_POOL_PERM) { 2022 if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) { 2023 dout("ceph_pool_perm_check pool %lld no read perm\n", 2024 pool); 2025 return -EPERM; 2026 } 2027 if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) { 2028 dout("ceph_pool_perm_check pool %lld no write perm\n", 2029 pool); 2030 return -EPERM; 2031 } 2032 return 0; 2033 } 2034 2035 pool_ns = ceph_try_get_string(ci->i_layout.pool_ns); 2036 ret = __ceph_pool_perm_get(ci, pool, pool_ns); 2037 ceph_put_string(pool_ns); 2038 if (ret < 0) 2039 return ret; 2040 2041 flags = CEPH_I_POOL_PERM; 2042 if (ret & POOL_READ) 2043 flags |= CEPH_I_POOL_RD; 2044 if (ret & POOL_WRITE) 2045 flags |= CEPH_I_POOL_WR; 2046 2047 spin_lock(&ci->i_ceph_lock); 2048 if (pool == ci->i_layout.pool_id && 2049 pool_ns == rcu_dereference_raw(ci->i_layout.pool_ns)) { 2050 ci->i_ceph_flags |= flags; 2051 } else { 2052 pool = ci->i_layout.pool_id; 2053 flags = ci->i_ceph_flags; 2054 } 2055 spin_unlock(&ci->i_ceph_lock); 2056 goto check; 2057 } 2058 2059 void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc) 2060 { 2061 struct ceph_pool_perm *perm; 2062 struct rb_node *n; 2063 2064 while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) { 2065 n = rb_first(&mdsc->pool_perm_tree); 2066 perm = rb_entry(n, struct ceph_pool_perm, node); 2067 rb_erase(n, &mdsc->pool_perm_tree); 2068 kfree(perm); 2069 } 2070 } 2071