1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* AFS filesystem file handling 3 * 4 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/init.h> 11 #include <linux/fs.h> 12 #include <linux/pagemap.h> 13 #include <linux/writeback.h> 14 #include <linux/gfp.h> 15 #include <linux/task_io_accounting_ops.h> 16 #include <linux/mm.h> 17 #include "internal.h" 18 19 static int afs_file_mmap(struct file *file, struct vm_area_struct *vma); 20 static int afs_readpage(struct file *file, struct page *page); 21 static void afs_invalidatepage(struct page *page, unsigned int offset, 22 unsigned int length); 23 static int afs_releasepage(struct page *page, gfp_t gfp_flags); 24 25 static int afs_readpages(struct file *filp, struct address_space *mapping, 26 struct list_head *pages, unsigned nr_pages); 27 28 const struct file_operations afs_file_operations = { 29 .open = afs_open, 30 .release = afs_release, 31 .llseek = generic_file_llseek, 32 .read_iter = generic_file_read_iter, 33 .write_iter = afs_file_write, 34 .mmap = afs_file_mmap, 35 .splice_read = generic_file_splice_read, 36 .fsync = afs_fsync, 37 .lock = afs_lock, 38 .flock = afs_flock, 39 }; 40 41 const struct inode_operations afs_file_inode_operations = { 42 .getattr = afs_getattr, 43 .setattr = afs_setattr, 44 .permission = afs_permission, 45 .listxattr = afs_listxattr, 46 }; 47 48 const struct address_space_operations afs_fs_aops = { 49 .readpage = afs_readpage, 50 .readpages = afs_readpages, 51 .set_page_dirty = afs_set_page_dirty, 52 .launder_page = afs_launder_page, 53 .releasepage = afs_releasepage, 54 .invalidatepage = afs_invalidatepage, 55 .write_begin = afs_write_begin, 56 .write_end = afs_write_end, 57 .writepage = afs_writepage, 58 .writepages = afs_writepages, 59 }; 60 61 static const struct vm_operations_struct afs_vm_ops = { 62 .fault = filemap_fault, 63 .map_pages = filemap_map_pages, 64 .page_mkwrite = afs_page_mkwrite, 65 }; 66 67 /* 68 * Discard a pin on a writeback key. 69 */ 70 void afs_put_wb_key(struct afs_wb_key *wbk) 71 { 72 if (refcount_dec_and_test(&wbk->usage)) { 73 key_put(wbk->key); 74 kfree(wbk); 75 } 76 } 77 78 /* 79 * Cache key for writeback. 80 */ 81 int afs_cache_wb_key(struct afs_vnode *vnode, struct afs_file *af) 82 { 83 struct afs_wb_key *wbk, *p; 84 85 wbk = kzalloc(sizeof(struct afs_wb_key), GFP_KERNEL); 86 if (!wbk) 87 return -ENOMEM; 88 refcount_set(&wbk->usage, 2); 89 wbk->key = af->key; 90 91 spin_lock(&vnode->wb_lock); 92 list_for_each_entry(p, &vnode->wb_keys, vnode_link) { 93 if (p->key == wbk->key) 94 goto found; 95 } 96 97 key_get(wbk->key); 98 list_add_tail(&wbk->vnode_link, &vnode->wb_keys); 99 spin_unlock(&vnode->wb_lock); 100 af->wb = wbk; 101 return 0; 102 103 found: 104 refcount_inc(&p->usage); 105 spin_unlock(&vnode->wb_lock); 106 af->wb = p; 107 kfree(wbk); 108 return 0; 109 } 110 111 /* 112 * open an AFS file or directory and attach a key to it 113 */ 114 int afs_open(struct inode *inode, struct file *file) 115 { 116 struct afs_vnode *vnode = AFS_FS_I(inode); 117 struct afs_file *af; 118 struct key *key; 119 int ret; 120 121 _enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode); 122 123 key = afs_request_key(vnode->volume->cell); 124 if (IS_ERR(key)) { 125 ret = PTR_ERR(key); 126 goto error; 127 } 128 129 af = kzalloc(sizeof(*af), GFP_KERNEL); 130 if (!af) { 131 ret = -ENOMEM; 132 goto error_key; 133 } 134 af->key = key; 135 136 ret = afs_validate(vnode, key); 137 if (ret < 0) 138 goto error_af; 139 140 if (file->f_mode & FMODE_WRITE) { 141 ret = afs_cache_wb_key(vnode, af); 142 if (ret < 0) 143 goto error_af; 144 } 145 146 if (file->f_flags & O_TRUNC) 147 set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); 148 149 file->private_data = af; 150 _leave(" = 0"); 151 return 0; 152 153 error_af: 154 kfree(af); 155 error_key: 156 key_put(key); 157 error: 158 _leave(" = %d", ret); 159 return ret; 160 } 161 162 /* 163 * release an AFS file or directory and discard its key 164 */ 165 int afs_release(struct inode *inode, struct file *file) 166 { 167 struct afs_vnode *vnode = AFS_FS_I(inode); 168 struct afs_file *af = file->private_data; 169 int ret = 0; 170 171 _enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode); 172 173 if ((file->f_mode & FMODE_WRITE)) 174 ret = vfs_fsync(file, 0); 175 176 file->private_data = NULL; 177 if (af->wb) 178 afs_put_wb_key(af->wb); 179 key_put(af->key); 180 kfree(af); 181 afs_prune_wb_keys(vnode); 182 _leave(" = %d", ret); 183 return ret; 184 } 185 186 /* 187 * Dispose of a ref to a read record. 188 */ 189 void afs_put_read(struct afs_read *req) 190 { 191 int i; 192 193 if (refcount_dec_and_test(&req->usage)) { 194 if (req->pages) { 195 for (i = 0; i < req->nr_pages; i++) 196 if (req->pages[i]) 197 put_page(req->pages[i]); 198 if (req->pages != req->array) 199 kfree(req->pages); 200 } 201 kfree(req); 202 } 203 } 204 205 #ifdef CONFIG_AFS_FSCACHE 206 /* 207 * deal with notification that a page was read from the cache 208 */ 209 static void afs_file_readpage_read_complete(struct page *page, 210 void *data, 211 int error) 212 { 213 _enter("%p,%p,%d", page, data, error); 214 215 /* if the read completes with an error, we just unlock the page and let 216 * the VM reissue the readpage */ 217 if (!error) 218 SetPageUptodate(page); 219 unlock_page(page); 220 } 221 #endif 222 223 /* 224 * Fetch file data from the volume. 225 */ 226 int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *desc) 227 { 228 struct afs_fs_cursor fc; 229 struct afs_status_cb *scb; 230 int ret; 231 232 _enter("%s{%llx:%llu.%u},%x,,,", 233 vnode->volume->name, 234 vnode->fid.vid, 235 vnode->fid.vnode, 236 vnode->fid.unique, 237 key_serial(key)); 238 239 scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL); 240 if (!scb) 241 return -ENOMEM; 242 243 ret = -ERESTARTSYS; 244 if (afs_begin_vnode_operation(&fc, vnode, key, true)) { 245 afs_dataversion_t data_version = vnode->status.data_version; 246 247 while (afs_select_fileserver(&fc)) { 248 fc.cb_break = afs_calc_vnode_cb_break(vnode); 249 afs_fs_fetch_data(&fc, scb, desc); 250 } 251 252 afs_check_for_remote_deletion(&fc, vnode); 253 afs_vnode_commit_status(&fc, vnode, fc.cb_break, 254 &data_version, scb); 255 ret = afs_end_vnode_operation(&fc); 256 } 257 258 if (ret == 0) { 259 afs_stat_v(vnode, n_fetches); 260 atomic_long_add(desc->actual_len, 261 &afs_v2net(vnode)->n_fetch_bytes); 262 } 263 264 kfree(scb); 265 _leave(" = %d", ret); 266 return ret; 267 } 268 269 /* 270 * read page from file, directory or symlink, given a key to use 271 */ 272 int afs_page_filler(void *data, struct page *page) 273 { 274 struct inode *inode = page->mapping->host; 275 struct afs_vnode *vnode = AFS_FS_I(inode); 276 struct afs_read *req; 277 struct key *key = data; 278 int ret; 279 280 _enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index); 281 282 BUG_ON(!PageLocked(page)); 283 284 ret = -ESTALE; 285 if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) 286 goto error; 287 288 /* is it cached? */ 289 #ifdef CONFIG_AFS_FSCACHE 290 ret = fscache_read_or_alloc_page(vnode->cache, 291 page, 292 afs_file_readpage_read_complete, 293 NULL, 294 GFP_KERNEL); 295 #else 296 ret = -ENOBUFS; 297 #endif 298 switch (ret) { 299 /* read BIO submitted (page in cache) */ 300 case 0: 301 break; 302 303 /* page not yet cached */ 304 case -ENODATA: 305 _debug("cache said ENODATA"); 306 goto go_on; 307 308 /* page will not be cached */ 309 case -ENOBUFS: 310 _debug("cache said ENOBUFS"); 311 312 /* fall through */ 313 default: 314 go_on: 315 req = kzalloc(struct_size(req, array, 1), GFP_KERNEL); 316 if (!req) 317 goto enomem; 318 319 /* We request a full page. If the page is a partial one at the 320 * end of the file, the server will return a short read and the 321 * unmarshalling code will clear the unfilled space. 322 */ 323 refcount_set(&req->usage, 1); 324 req->pos = (loff_t)page->index << PAGE_SHIFT; 325 req->len = PAGE_SIZE; 326 req->nr_pages = 1; 327 req->pages = req->array; 328 req->pages[0] = page; 329 get_page(page); 330 331 /* read the contents of the file from the server into the 332 * page */ 333 ret = afs_fetch_data(vnode, key, req); 334 afs_put_read(req); 335 336 if (ret < 0) { 337 if (ret == -ENOENT) { 338 _debug("got NOENT from server" 339 " - marking file deleted and stale"); 340 set_bit(AFS_VNODE_DELETED, &vnode->flags); 341 ret = -ESTALE; 342 } 343 344 #ifdef CONFIG_AFS_FSCACHE 345 fscache_uncache_page(vnode->cache, page); 346 #endif 347 BUG_ON(PageFsCache(page)); 348 349 if (ret == -EINTR || 350 ret == -ENOMEM || 351 ret == -ERESTARTSYS || 352 ret == -EAGAIN) 353 goto error; 354 goto io_error; 355 } 356 357 SetPageUptodate(page); 358 359 /* send the page to the cache */ 360 #ifdef CONFIG_AFS_FSCACHE 361 if (PageFsCache(page) && 362 fscache_write_page(vnode->cache, page, vnode->status.size, 363 GFP_KERNEL) != 0) { 364 fscache_uncache_page(vnode->cache, page); 365 BUG_ON(PageFsCache(page)); 366 } 367 #endif 368 unlock_page(page); 369 } 370 371 _leave(" = 0"); 372 return 0; 373 374 io_error: 375 SetPageError(page); 376 goto error; 377 enomem: 378 ret = -ENOMEM; 379 error: 380 unlock_page(page); 381 _leave(" = %d", ret); 382 return ret; 383 } 384 385 /* 386 * read page from file, directory or symlink, given a file to nominate the key 387 * to be used 388 */ 389 static int afs_readpage(struct file *file, struct page *page) 390 { 391 struct key *key; 392 int ret; 393 394 if (file) { 395 key = afs_file_key(file); 396 ASSERT(key != NULL); 397 ret = afs_page_filler(key, page); 398 } else { 399 struct inode *inode = page->mapping->host; 400 key = afs_request_key(AFS_FS_S(inode->i_sb)->cell); 401 if (IS_ERR(key)) { 402 ret = PTR_ERR(key); 403 } else { 404 ret = afs_page_filler(key, page); 405 key_put(key); 406 } 407 } 408 return ret; 409 } 410 411 /* 412 * Make pages available as they're filled. 413 */ 414 static void afs_readpages_page_done(struct afs_read *req) 415 { 416 #ifdef CONFIG_AFS_FSCACHE 417 struct afs_vnode *vnode = req->vnode; 418 #endif 419 struct page *page = req->pages[req->index]; 420 421 req->pages[req->index] = NULL; 422 SetPageUptodate(page); 423 424 /* send the page to the cache */ 425 #ifdef CONFIG_AFS_FSCACHE 426 if (PageFsCache(page) && 427 fscache_write_page(vnode->cache, page, vnode->status.size, 428 GFP_KERNEL) != 0) { 429 fscache_uncache_page(vnode->cache, page); 430 BUG_ON(PageFsCache(page)); 431 } 432 #endif 433 unlock_page(page); 434 put_page(page); 435 } 436 437 /* 438 * Read a contiguous set of pages. 439 */ 440 static int afs_readpages_one(struct file *file, struct address_space *mapping, 441 struct list_head *pages) 442 { 443 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 444 struct afs_read *req; 445 struct list_head *p; 446 struct page *first, *page; 447 struct key *key = afs_file_key(file); 448 pgoff_t index; 449 int ret, n, i; 450 451 /* Count the number of contiguous pages at the front of the list. Note 452 * that the list goes prev-wards rather than next-wards. 453 */ 454 first = lru_to_page(pages); 455 index = first->index + 1; 456 n = 1; 457 for (p = first->lru.prev; p != pages; p = p->prev) { 458 page = list_entry(p, struct page, lru); 459 if (page->index != index) 460 break; 461 index++; 462 n++; 463 } 464 465 req = kzalloc(struct_size(req, array, n), GFP_NOFS); 466 if (!req) 467 return -ENOMEM; 468 469 refcount_set(&req->usage, 1); 470 req->vnode = vnode; 471 req->page_done = afs_readpages_page_done; 472 req->pos = first->index; 473 req->pos <<= PAGE_SHIFT; 474 req->pages = req->array; 475 476 /* Transfer the pages to the request. We add them in until one fails 477 * to add to the LRU and then we stop (as that'll make a hole in the 478 * contiguous run. 479 * 480 * Note that it's possible for the file size to change whilst we're 481 * doing this, but we rely on the server returning less than we asked 482 * for if the file shrank. We also rely on this to deal with a partial 483 * page at the end of the file. 484 */ 485 do { 486 page = lru_to_page(pages); 487 list_del(&page->lru); 488 index = page->index; 489 if (add_to_page_cache_lru(page, mapping, index, 490 readahead_gfp_mask(mapping))) { 491 #ifdef CONFIG_AFS_FSCACHE 492 fscache_uncache_page(vnode->cache, page); 493 #endif 494 put_page(page); 495 break; 496 } 497 498 req->pages[req->nr_pages++] = page; 499 req->len += PAGE_SIZE; 500 } while (req->nr_pages < n); 501 502 if (req->nr_pages == 0) { 503 kfree(req); 504 return 0; 505 } 506 507 ret = afs_fetch_data(vnode, key, req); 508 if (ret < 0) 509 goto error; 510 511 task_io_account_read(PAGE_SIZE * req->nr_pages); 512 afs_put_read(req); 513 return 0; 514 515 error: 516 if (ret == -ENOENT) { 517 _debug("got NOENT from server" 518 " - marking file deleted and stale"); 519 set_bit(AFS_VNODE_DELETED, &vnode->flags); 520 ret = -ESTALE; 521 } 522 523 for (i = 0; i < req->nr_pages; i++) { 524 page = req->pages[i]; 525 if (page) { 526 #ifdef CONFIG_AFS_FSCACHE 527 fscache_uncache_page(vnode->cache, page); 528 #endif 529 SetPageError(page); 530 unlock_page(page); 531 } 532 } 533 534 afs_put_read(req); 535 return ret; 536 } 537 538 /* 539 * read a set of pages 540 */ 541 static int afs_readpages(struct file *file, struct address_space *mapping, 542 struct list_head *pages, unsigned nr_pages) 543 { 544 struct key *key = afs_file_key(file); 545 struct afs_vnode *vnode; 546 int ret = 0; 547 548 _enter("{%d},{%lu},,%d", 549 key_serial(key), mapping->host->i_ino, nr_pages); 550 551 ASSERT(key != NULL); 552 553 vnode = AFS_FS_I(mapping->host); 554 if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { 555 _leave(" = -ESTALE"); 556 return -ESTALE; 557 } 558 559 /* attempt to read as many of the pages as possible */ 560 #ifdef CONFIG_AFS_FSCACHE 561 ret = fscache_read_or_alloc_pages(vnode->cache, 562 mapping, 563 pages, 564 &nr_pages, 565 afs_file_readpage_read_complete, 566 NULL, 567 mapping_gfp_mask(mapping)); 568 #else 569 ret = -ENOBUFS; 570 #endif 571 572 switch (ret) { 573 /* all pages are being read from the cache */ 574 case 0: 575 BUG_ON(!list_empty(pages)); 576 BUG_ON(nr_pages != 0); 577 _leave(" = 0 [reading all]"); 578 return 0; 579 580 /* there were pages that couldn't be read from the cache */ 581 case -ENODATA: 582 case -ENOBUFS: 583 break; 584 585 /* other error */ 586 default: 587 _leave(" = %d", ret); 588 return ret; 589 } 590 591 while (!list_empty(pages)) { 592 ret = afs_readpages_one(file, mapping, pages); 593 if (ret < 0) 594 break; 595 } 596 597 _leave(" = %d [netting]", ret); 598 return ret; 599 } 600 601 /* 602 * invalidate part or all of a page 603 * - release a page and clean up its private data if offset is 0 (indicating 604 * the entire page) 605 */ 606 static void afs_invalidatepage(struct page *page, unsigned int offset, 607 unsigned int length) 608 { 609 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); 610 unsigned long priv; 611 612 _enter("{%lu},%u,%u", page->index, offset, length); 613 614 BUG_ON(!PageLocked(page)); 615 616 /* we clean up only if the entire page is being invalidated */ 617 if (offset == 0 && length == PAGE_SIZE) { 618 #ifdef CONFIG_AFS_FSCACHE 619 if (PageFsCache(page)) { 620 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); 621 fscache_wait_on_page_write(vnode->cache, page); 622 fscache_uncache_page(vnode->cache, page); 623 } 624 #endif 625 626 if (PagePrivate(page)) { 627 priv = page_private(page); 628 trace_afs_page_dirty(vnode, tracepoint_string("inval"), 629 page->index, priv); 630 set_page_private(page, 0); 631 ClearPagePrivate(page); 632 } 633 } 634 635 _leave(""); 636 } 637 638 /* 639 * release a page and clean up its private state if it's not busy 640 * - return true if the page can now be released, false if not 641 */ 642 static int afs_releasepage(struct page *page, gfp_t gfp_flags) 643 { 644 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); 645 unsigned long priv; 646 647 _enter("{{%llx:%llu}[%lu],%lx},%x", 648 vnode->fid.vid, vnode->fid.vnode, page->index, page->flags, 649 gfp_flags); 650 651 /* deny if page is being written to the cache and the caller hasn't 652 * elected to wait */ 653 #ifdef CONFIG_AFS_FSCACHE 654 if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) { 655 _leave(" = F [cache busy]"); 656 return 0; 657 } 658 #endif 659 660 if (PagePrivate(page)) { 661 priv = page_private(page); 662 trace_afs_page_dirty(vnode, tracepoint_string("rel"), 663 page->index, priv); 664 set_page_private(page, 0); 665 ClearPagePrivate(page); 666 } 667 668 /* indicate that the page can be released */ 669 _leave(" = T"); 670 return 1; 671 } 672 673 /* 674 * Handle setting up a memory mapping on an AFS file. 675 */ 676 static int afs_file_mmap(struct file *file, struct vm_area_struct *vma) 677 { 678 int ret; 679 680 ret = generic_file_mmap(file, vma); 681 if (ret == 0) 682 vma->vm_ops = &afs_vm_ops; 683 return ret; 684 } 685