1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2002, 2004 Oracle. All rights reserved. 4 */ 5 6 #include <linux/fs.h> 7 #include <linux/slab.h> 8 #include <linux/highmem.h> 9 #include <linux/pagemap.h> 10 #include <asm/byteorder.h> 11 #include <linux/swap.h> 12 #include <linux/mpage.h> 13 #include <linux/quotaops.h> 14 #include <linux/blkdev.h> 15 #include <linux/uio.h> 16 #include <linux/mm.h> 17 18 #include <cluster/masklog.h> 19 20 #include "ocfs2.h" 21 22 #include "alloc.h" 23 #include "aops.h" 24 #include "dlmglue.h" 25 #include "extent_map.h" 26 #include "file.h" 27 #include "inode.h" 28 #include "journal.h" 29 #include "suballoc.h" 30 #include "super.h" 31 #include "symlink.h" 32 #include "refcounttree.h" 33 #include "ocfs2_trace.h" 34 35 #include "buffer_head_io.h" 36 #include "dir.h" 37 #include "namei.h" 38 #include "sysfile.h" 39 40 static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock, 41 struct buffer_head *bh_result, int create) 42 { 43 int err = -EIO; 44 int status; 45 struct ocfs2_dinode *fe = NULL; 46 struct buffer_head *bh = NULL; 47 struct buffer_head *buffer_cache_bh = NULL; 48 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 49 void *kaddr; 50 51 trace_ocfs2_symlink_get_block( 52 (unsigned long long)OCFS2_I(inode)->ip_blkno, 53 (unsigned long long)iblock, bh_result, create); 54 55 BUG_ON(ocfs2_inode_is_fast_symlink(inode)); 56 57 if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) { 58 mlog(ML_ERROR, "block offset > PATH_MAX: %llu", 59 (unsigned long long)iblock); 60 goto bail; 61 } 62 63 status = ocfs2_read_inode_block(inode, &bh); 64 if (status < 0) { 65 mlog_errno(status); 66 goto bail; 67 } 68 fe = (struct ocfs2_dinode *) bh->b_data; 69 70 if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb, 71 le32_to_cpu(fe->i_clusters))) { 72 err = -ENOMEM; 73 mlog(ML_ERROR, "block offset is outside the allocated size: " 74 "%llu\n", (unsigned long long)iblock); 75 goto bail; 76 } 77 78 /* We don't use the page cache to create symlink data, so if 79 * need be, copy it over from the buffer cache. */ 80 if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) { 81 u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + 82 iblock; 83 buffer_cache_bh = sb_getblk(osb->sb, blkno); 84 if (!buffer_cache_bh) { 85 err = -ENOMEM; 86 mlog(ML_ERROR, "couldn't getblock for symlink!\n"); 87 goto bail; 88 } 89 90 /* we haven't locked out transactions, so a commit 91 * could've happened. Since we've got a reference on 92 * the bh, even if it commits while we're doing the 93 * copy, the data is still good. */ 94 if (buffer_jbd(buffer_cache_bh) 95 && ocfs2_inode_is_new(inode)) { 96 kaddr = kmap_atomic(bh_result->b_page); 97 if (!kaddr) { 98 mlog(ML_ERROR, "couldn't kmap!\n"); 99 goto bail; 100 } 101 memcpy(kaddr + (bh_result->b_size * iblock), 102 buffer_cache_bh->b_data, 103 bh_result->b_size); 104 kunmap_atomic(kaddr); 105 set_buffer_uptodate(bh_result); 106 } 107 brelse(buffer_cache_bh); 108 } 109 110 map_bh(bh_result, inode->i_sb, 111 le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock); 112 113 err = 0; 114 115 bail: 116 brelse(bh); 117 118 return err; 119 } 120 121 static int ocfs2_lock_get_block(struct inode *inode, sector_t iblock, 122 struct buffer_head *bh_result, int create) 123 { 124 int ret = 0; 125 struct ocfs2_inode_info *oi = OCFS2_I(inode); 126 127 down_read(&oi->ip_alloc_sem); 128 ret = ocfs2_get_block(inode, iblock, bh_result, create); 129 up_read(&oi->ip_alloc_sem); 130 131 return ret; 132 } 133 134 int ocfs2_get_block(struct inode *inode, sector_t iblock, 135 struct buffer_head *bh_result, int create) 136 { 137 int err = 0; 138 unsigned int ext_flags; 139 u64 max_blocks = bh_result->b_size >> inode->i_blkbits; 140 u64 p_blkno, count, past_eof; 141 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 142 143 trace_ocfs2_get_block((unsigned long long)OCFS2_I(inode)->ip_blkno, 144 (unsigned long long)iblock, bh_result, create); 145 146 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) 147 mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n", 148 inode, inode->i_ino); 149 150 if (S_ISLNK(inode->i_mode)) { 151 /* this always does I/O for some reason. */ 152 err = ocfs2_symlink_get_block(inode, iblock, bh_result, create); 153 goto bail; 154 } 155 156 err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &count, 157 &ext_flags); 158 if (err) { 159 mlog(ML_ERROR, "get_blocks() failed, inode: 0x%p, " 160 "block: %llu\n", inode, (unsigned long long)iblock); 161 goto bail; 162 } 163 164 if (max_blocks < count) 165 count = max_blocks; 166 167 /* 168 * ocfs2 never allocates in this function - the only time we 169 * need to use BH_New is when we're extending i_size on a file 170 * system which doesn't support holes, in which case BH_New 171 * allows __block_write_begin() to zero. 172 * 173 * If we see this on a sparse file system, then a truncate has 174 * raced us and removed the cluster. In this case, we clear 175 * the buffers dirty and uptodate bits and let the buffer code 176 * ignore it as a hole. 177 */ 178 if (create && p_blkno == 0 && ocfs2_sparse_alloc(osb)) { 179 clear_buffer_dirty(bh_result); 180 clear_buffer_uptodate(bh_result); 181 goto bail; 182 } 183 184 /* Treat the unwritten extent as a hole for zeroing purposes. */ 185 if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN)) 186 map_bh(bh_result, inode->i_sb, p_blkno); 187 188 bh_result->b_size = count << inode->i_blkbits; 189 190 if (!ocfs2_sparse_alloc(osb)) { 191 if (p_blkno == 0) { 192 err = -EIO; 193 mlog(ML_ERROR, 194 "iblock = %llu p_blkno = %llu blkno=(%llu)\n", 195 (unsigned long long)iblock, 196 (unsigned long long)p_blkno, 197 (unsigned long long)OCFS2_I(inode)->ip_blkno); 198 mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters); 199 dump_stack(); 200 goto bail; 201 } 202 } 203 204 past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); 205 206 trace_ocfs2_get_block_end((unsigned long long)OCFS2_I(inode)->ip_blkno, 207 (unsigned long long)past_eof); 208 if (create && (iblock >= past_eof)) 209 set_buffer_new(bh_result); 210 211 bail: 212 if (err < 0) 213 err = -EIO; 214 215 return err; 216 } 217 218 int ocfs2_read_inline_data(struct inode *inode, struct page *page, 219 struct buffer_head *di_bh) 220 { 221 void *kaddr; 222 loff_t size; 223 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 224 225 if (!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL)) { 226 ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag\n", 227 (unsigned long long)OCFS2_I(inode)->ip_blkno); 228 return -EROFS; 229 } 230 231 size = i_size_read(inode); 232 233 if (size > PAGE_SIZE || 234 size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) { 235 ocfs2_error(inode->i_sb, 236 "Inode %llu has with inline data has bad size: %Lu\n", 237 (unsigned long long)OCFS2_I(inode)->ip_blkno, 238 (unsigned long long)size); 239 return -EROFS; 240 } 241 242 kaddr = kmap_atomic(page); 243 if (size) 244 memcpy(kaddr, di->id2.i_data.id_data, size); 245 /* Clear the remaining part of the page */ 246 memset(kaddr + size, 0, PAGE_SIZE - size); 247 flush_dcache_page(page); 248 kunmap_atomic(kaddr); 249 250 SetPageUptodate(page); 251 252 return 0; 253 } 254 255 static int ocfs2_readpage_inline(struct inode *inode, struct page *page) 256 { 257 int ret; 258 struct buffer_head *di_bh = NULL; 259 260 BUG_ON(!PageLocked(page)); 261 BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)); 262 263 ret = ocfs2_read_inode_block(inode, &di_bh); 264 if (ret) { 265 mlog_errno(ret); 266 goto out; 267 } 268 269 ret = ocfs2_read_inline_data(inode, page, di_bh); 270 out: 271 unlock_page(page); 272 273 brelse(di_bh); 274 return ret; 275 } 276 277 static int ocfs2_read_folio(struct file *file, struct folio *folio) 278 { 279 struct inode *inode = folio->mapping->host; 280 struct ocfs2_inode_info *oi = OCFS2_I(inode); 281 loff_t start = folio_pos(folio); 282 int ret, unlock = 1; 283 284 trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, folio->index); 285 286 ret = ocfs2_inode_lock_with_page(inode, NULL, 0, &folio->page); 287 if (ret != 0) { 288 if (ret == AOP_TRUNCATED_PAGE) 289 unlock = 0; 290 mlog_errno(ret); 291 goto out; 292 } 293 294 if (down_read_trylock(&oi->ip_alloc_sem) == 0) { 295 /* 296 * Unlock the folio and cycle ip_alloc_sem so that we don't 297 * busyloop waiting for ip_alloc_sem to unlock 298 */ 299 ret = AOP_TRUNCATED_PAGE; 300 folio_unlock(folio); 301 unlock = 0; 302 down_read(&oi->ip_alloc_sem); 303 up_read(&oi->ip_alloc_sem); 304 goto out_inode_unlock; 305 } 306 307 /* 308 * i_size might have just been updated as we grabed the meta lock. We 309 * might now be discovering a truncate that hit on another node. 310 * block_read_full_folio->get_block freaks out if it is asked to read 311 * beyond the end of a file, so we check here. Callers 312 * (generic_file_read, vm_ops->fault) are clever enough to check i_size 313 * and notice that the folio they just read isn't needed. 314 * 315 * XXX sys_readahead() seems to get that wrong? 316 */ 317 if (start >= i_size_read(inode)) { 318 folio_zero_segment(folio, 0, folio_size(folio)); 319 folio_mark_uptodate(folio); 320 ret = 0; 321 goto out_alloc; 322 } 323 324 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) 325 ret = ocfs2_readpage_inline(inode, &folio->page); 326 else 327 ret = block_read_full_folio(folio, ocfs2_get_block); 328 unlock = 0; 329 330 out_alloc: 331 up_read(&oi->ip_alloc_sem); 332 out_inode_unlock: 333 ocfs2_inode_unlock(inode, 0); 334 out: 335 if (unlock) 336 folio_unlock(folio); 337 return ret; 338 } 339 340 /* 341 * This is used only for read-ahead. Failures or difficult to handle 342 * situations are safe to ignore. 343 * 344 * Right now, we don't bother with BH_Boundary - in-inode extent lists 345 * are quite large (243 extents on 4k blocks), so most inodes don't 346 * grow out to a tree. If need be, detecting boundary extents could 347 * trivially be added in a future version of ocfs2_get_block(). 348 */ 349 static void ocfs2_readahead(struct readahead_control *rac) 350 { 351 int ret; 352 struct inode *inode = rac->mapping->host; 353 struct ocfs2_inode_info *oi = OCFS2_I(inode); 354 355 /* 356 * Use the nonblocking flag for the dlm code to avoid page 357 * lock inversion, but don't bother with retrying. 358 */ 359 ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK); 360 if (ret) 361 return; 362 363 if (down_read_trylock(&oi->ip_alloc_sem) == 0) 364 goto out_unlock; 365 366 /* 367 * Don't bother with inline-data. There isn't anything 368 * to read-ahead in that case anyway... 369 */ 370 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) 371 goto out_up; 372 373 /* 374 * Check whether a remote node truncated this file - we just 375 * drop out in that case as it's not worth handling here. 376 */ 377 if (readahead_pos(rac) >= i_size_read(inode)) 378 goto out_up; 379 380 mpage_readahead(rac, ocfs2_get_block); 381 382 out_up: 383 up_read(&oi->ip_alloc_sem); 384 out_unlock: 385 ocfs2_inode_unlock(inode, 0); 386 } 387 388 /* Note: Because we don't support holes, our allocation has 389 * already happened (allocation writes zeros to the file data) 390 * so we don't have to worry about ordered writes in 391 * ocfs2_writepage. 392 * 393 * ->writepage is called during the process of invalidating the page cache 394 * during blocked lock processing. It can't block on any cluster locks 395 * to during block mapping. It's relying on the fact that the block 396 * mapping can't have disappeared under the dirty pages that it is 397 * being asked to write back. 398 */ 399 static int ocfs2_writepage(struct page *page, struct writeback_control *wbc) 400 { 401 trace_ocfs2_writepage( 402 (unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno, 403 page->index); 404 405 return block_write_full_page(page, ocfs2_get_block, wbc); 406 } 407 408 /* Taken from ext3. We don't necessarily need the full blown 409 * functionality yet, but IMHO it's better to cut and paste the whole 410 * thing so we can avoid introducing our own bugs (and easily pick up 411 * their fixes when they happen) --Mark */ 412 int walk_page_buffers( handle_t *handle, 413 struct buffer_head *head, 414 unsigned from, 415 unsigned to, 416 int *partial, 417 int (*fn)( handle_t *handle, 418 struct buffer_head *bh)) 419 { 420 struct buffer_head *bh; 421 unsigned block_start, block_end; 422 unsigned blocksize = head->b_size; 423 int err, ret = 0; 424 struct buffer_head *next; 425 426 for ( bh = head, block_start = 0; 427 ret == 0 && (bh != head || !block_start); 428 block_start = block_end, bh = next) 429 { 430 next = bh->b_this_page; 431 block_end = block_start + blocksize; 432 if (block_end <= from || block_start >= to) { 433 if (partial && !buffer_uptodate(bh)) 434 *partial = 1; 435 continue; 436 } 437 err = (*fn)(handle, bh); 438 if (!ret) 439 ret = err; 440 } 441 return ret; 442 } 443 444 static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block) 445 { 446 sector_t status; 447 u64 p_blkno = 0; 448 int err = 0; 449 struct inode *inode = mapping->host; 450 451 trace_ocfs2_bmap((unsigned long long)OCFS2_I(inode)->ip_blkno, 452 (unsigned long long)block); 453 454 /* 455 * The swap code (ab-)uses ->bmap to get a block mapping and then 456 * bypasseѕ the file system for actual I/O. We really can't allow 457 * that on refcounted inodes, so we have to skip out here. And yes, 458 * 0 is the magic code for a bmap error.. 459 */ 460 if (ocfs2_is_refcount_inode(inode)) 461 return 0; 462 463 /* We don't need to lock journal system files, since they aren't 464 * accessed concurrently from multiple nodes. 465 */ 466 if (!INODE_JOURNAL(inode)) { 467 err = ocfs2_inode_lock(inode, NULL, 0); 468 if (err) { 469 if (err != -ENOENT) 470 mlog_errno(err); 471 goto bail; 472 } 473 down_read(&OCFS2_I(inode)->ip_alloc_sem); 474 } 475 476 if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) 477 err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL, 478 NULL); 479 480 if (!INODE_JOURNAL(inode)) { 481 up_read(&OCFS2_I(inode)->ip_alloc_sem); 482 ocfs2_inode_unlock(inode, 0); 483 } 484 485 if (err) { 486 mlog(ML_ERROR, "get_blocks() failed, block = %llu\n", 487 (unsigned long long)block); 488 mlog_errno(err); 489 goto bail; 490 } 491 492 bail: 493 status = err ? 0 : p_blkno; 494 495 return status; 496 } 497 498 static bool ocfs2_release_folio(struct folio *folio, gfp_t wait) 499 { 500 if (!folio_buffers(folio)) 501 return false; 502 return try_to_free_buffers(folio); 503 } 504 505 static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb, 506 u32 cpos, 507 unsigned int *start, 508 unsigned int *end) 509 { 510 unsigned int cluster_start = 0, cluster_end = PAGE_SIZE; 511 512 if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) { 513 unsigned int cpp; 514 515 cpp = 1 << (PAGE_SHIFT - osb->s_clustersize_bits); 516 517 cluster_start = cpos % cpp; 518 cluster_start = cluster_start << osb->s_clustersize_bits; 519 520 cluster_end = cluster_start + osb->s_clustersize; 521 } 522 523 BUG_ON(cluster_start > PAGE_SIZE); 524 BUG_ON(cluster_end > PAGE_SIZE); 525 526 if (start) 527 *start = cluster_start; 528 if (end) 529 *end = cluster_end; 530 } 531 532 /* 533 * 'from' and 'to' are the region in the page to avoid zeroing. 534 * 535 * If pagesize > clustersize, this function will avoid zeroing outside 536 * of the cluster boundary. 537 * 538 * from == to == 0 is code for "zero the entire cluster region" 539 */ 540 static void ocfs2_clear_page_regions(struct page *page, 541 struct ocfs2_super *osb, u32 cpos, 542 unsigned from, unsigned to) 543 { 544 void *kaddr; 545 unsigned int cluster_start, cluster_end; 546 547 ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end); 548 549 kaddr = kmap_atomic(page); 550 551 if (from || to) { 552 if (from > cluster_start) 553 memset(kaddr + cluster_start, 0, from - cluster_start); 554 if (to < cluster_end) 555 memset(kaddr + to, 0, cluster_end - to); 556 } else { 557 memset(kaddr + cluster_start, 0, cluster_end - cluster_start); 558 } 559 560 kunmap_atomic(kaddr); 561 } 562 563 /* 564 * Nonsparse file systems fully allocate before we get to the write 565 * code. This prevents ocfs2_write() from tagging the write as an 566 * allocating one, which means ocfs2_map_page_blocks() might try to 567 * read-in the blocks at the tail of our file. Avoid reading them by 568 * testing i_size against each block offset. 569 */ 570 static int ocfs2_should_read_blk(struct inode *inode, struct page *page, 571 unsigned int block_start) 572 { 573 u64 offset = page_offset(page) + block_start; 574 575 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) 576 return 1; 577 578 if (i_size_read(inode) > offset) 579 return 1; 580 581 return 0; 582 } 583 584 /* 585 * Some of this taken from __block_write_begin(). We already have our 586 * mapping by now though, and the entire write will be allocating or 587 * it won't, so not much need to use BH_New. 588 * 589 * This will also skip zeroing, which is handled externally. 590 */ 591 int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, 592 struct inode *inode, unsigned int from, 593 unsigned int to, int new) 594 { 595 int ret = 0; 596 struct buffer_head *head, *bh, *wait[2], **wait_bh = wait; 597 unsigned int block_end, block_start; 598 unsigned int bsize = i_blocksize(inode); 599 600 if (!page_has_buffers(page)) 601 create_empty_buffers(page, bsize, 0); 602 603 head = page_buffers(page); 604 for (bh = head, block_start = 0; bh != head || !block_start; 605 bh = bh->b_this_page, block_start += bsize) { 606 block_end = block_start + bsize; 607 608 clear_buffer_new(bh); 609 610 /* 611 * Ignore blocks outside of our i/o range - 612 * they may belong to unallocated clusters. 613 */ 614 if (block_start >= to || block_end <= from) { 615 if (PageUptodate(page)) 616 set_buffer_uptodate(bh); 617 continue; 618 } 619 620 /* 621 * For an allocating write with cluster size >= page 622 * size, we always write the entire page. 623 */ 624 if (new) 625 set_buffer_new(bh); 626 627 if (!buffer_mapped(bh)) { 628 map_bh(bh, inode->i_sb, *p_blkno); 629 clean_bdev_bh_alias(bh); 630 } 631 632 if (PageUptodate(page)) { 633 set_buffer_uptodate(bh); 634 } else if (!buffer_uptodate(bh) && !buffer_delay(bh) && 635 !buffer_new(bh) && 636 ocfs2_should_read_blk(inode, page, block_start) && 637 (block_start < from || block_end > to)) { 638 bh_read_nowait(bh, 0); 639 *wait_bh++=bh; 640 } 641 642 *p_blkno = *p_blkno + 1; 643 } 644 645 /* 646 * If we issued read requests - let them complete. 647 */ 648 while(wait_bh > wait) { 649 wait_on_buffer(*--wait_bh); 650 if (!buffer_uptodate(*wait_bh)) 651 ret = -EIO; 652 } 653 654 if (ret == 0 || !new) 655 return ret; 656 657 /* 658 * If we get -EIO above, zero out any newly allocated blocks 659 * to avoid exposing stale data. 660 */ 661 bh = head; 662 block_start = 0; 663 do { 664 block_end = block_start + bsize; 665 if (block_end <= from) 666 goto next_bh; 667 if (block_start >= to) 668 break; 669 670 zero_user(page, block_start, bh->b_size); 671 set_buffer_uptodate(bh); 672 mark_buffer_dirty(bh); 673 674 next_bh: 675 block_start = block_end; 676 bh = bh->b_this_page; 677 } while (bh != head); 678 679 return ret; 680 } 681 682 #if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE) 683 #define OCFS2_MAX_CTXT_PAGES 1 684 #else 685 #define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE) 686 #endif 687 688 #define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_SIZE / OCFS2_MIN_CLUSTERSIZE) 689 690 struct ocfs2_unwritten_extent { 691 struct list_head ue_node; 692 struct list_head ue_ip_node; 693 u32 ue_cpos; 694 u32 ue_phys; 695 }; 696 697 /* 698 * Describe the state of a single cluster to be written to. 699 */ 700 struct ocfs2_write_cluster_desc { 701 u32 c_cpos; 702 u32 c_phys; 703 /* 704 * Give this a unique field because c_phys eventually gets 705 * filled. 706 */ 707 unsigned c_new; 708 unsigned c_clear_unwritten; 709 unsigned c_needs_zero; 710 }; 711 712 struct ocfs2_write_ctxt { 713 /* Logical cluster position / len of write */ 714 u32 w_cpos; 715 u32 w_clen; 716 717 /* First cluster allocated in a nonsparse extend */ 718 u32 w_first_new_cpos; 719 720 /* Type of caller. Must be one of buffer, mmap, direct. */ 721 ocfs2_write_type_t w_type; 722 723 struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE]; 724 725 /* 726 * This is true if page_size > cluster_size. 727 * 728 * It triggers a set of special cases during write which might 729 * have to deal with allocating writes to partial pages. 730 */ 731 unsigned int w_large_pages; 732 733 /* 734 * Pages involved in this write. 735 * 736 * w_target_page is the page being written to by the user. 737 * 738 * w_pages is an array of pages which always contains 739 * w_target_page, and in the case of an allocating write with 740 * page_size < cluster size, it will contain zero'd and mapped 741 * pages adjacent to w_target_page which need to be written 742 * out in so that future reads from that region will get 743 * zero's. 744 */ 745 unsigned int w_num_pages; 746 struct page *w_pages[OCFS2_MAX_CTXT_PAGES]; 747 struct page *w_target_page; 748 749 /* 750 * w_target_locked is used for page_mkwrite path indicating no unlocking 751 * against w_target_page in ocfs2_write_end_nolock. 752 */ 753 unsigned int w_target_locked:1; 754 755 /* 756 * ocfs2_write_end() uses this to know what the real range to 757 * write in the target should be. 758 */ 759 unsigned int w_target_from; 760 unsigned int w_target_to; 761 762 /* 763 * We could use journal_current_handle() but this is cleaner, 764 * IMHO -Mark 765 */ 766 handle_t *w_handle; 767 768 struct buffer_head *w_di_bh; 769 770 struct ocfs2_cached_dealloc_ctxt w_dealloc; 771 772 struct list_head w_unwritten_list; 773 unsigned int w_unwritten_count; 774 }; 775 776 void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages) 777 { 778 int i; 779 780 for(i = 0; i < num_pages; i++) { 781 if (pages[i]) { 782 unlock_page(pages[i]); 783 mark_page_accessed(pages[i]); 784 put_page(pages[i]); 785 } 786 } 787 } 788 789 static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc) 790 { 791 int i; 792 793 /* 794 * w_target_locked is only set to true in the page_mkwrite() case. 795 * The intent is to allow us to lock the target page from write_begin() 796 * to write_end(). The caller must hold a ref on w_target_page. 797 */ 798 if (wc->w_target_locked) { 799 BUG_ON(!wc->w_target_page); 800 for (i = 0; i < wc->w_num_pages; i++) { 801 if (wc->w_target_page == wc->w_pages[i]) { 802 wc->w_pages[i] = NULL; 803 break; 804 } 805 } 806 mark_page_accessed(wc->w_target_page); 807 put_page(wc->w_target_page); 808 } 809 ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); 810 } 811 812 static void ocfs2_free_unwritten_list(struct inode *inode, 813 struct list_head *head) 814 { 815 struct ocfs2_inode_info *oi = OCFS2_I(inode); 816 struct ocfs2_unwritten_extent *ue = NULL, *tmp = NULL; 817 818 list_for_each_entry_safe(ue, tmp, head, ue_node) { 819 list_del(&ue->ue_node); 820 spin_lock(&oi->ip_lock); 821 list_del(&ue->ue_ip_node); 822 spin_unlock(&oi->ip_lock); 823 kfree(ue); 824 } 825 } 826 827 static void ocfs2_free_write_ctxt(struct inode *inode, 828 struct ocfs2_write_ctxt *wc) 829 { 830 ocfs2_free_unwritten_list(inode, &wc->w_unwritten_list); 831 ocfs2_unlock_pages(wc); 832 brelse(wc->w_di_bh); 833 kfree(wc); 834 } 835 836 static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp, 837 struct ocfs2_super *osb, loff_t pos, 838 unsigned len, ocfs2_write_type_t type, 839 struct buffer_head *di_bh) 840 { 841 u32 cend; 842 struct ocfs2_write_ctxt *wc; 843 844 wc = kzalloc(sizeof(struct ocfs2_write_ctxt), GFP_NOFS); 845 if (!wc) 846 return -ENOMEM; 847 848 wc->w_cpos = pos >> osb->s_clustersize_bits; 849 wc->w_first_new_cpos = UINT_MAX; 850 cend = (pos + len - 1) >> osb->s_clustersize_bits; 851 wc->w_clen = cend - wc->w_cpos + 1; 852 get_bh(di_bh); 853 wc->w_di_bh = di_bh; 854 wc->w_type = type; 855 856 if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) 857 wc->w_large_pages = 1; 858 else 859 wc->w_large_pages = 0; 860 861 ocfs2_init_dealloc_ctxt(&wc->w_dealloc); 862 INIT_LIST_HEAD(&wc->w_unwritten_list); 863 864 *wcp = wc; 865 866 return 0; 867 } 868 869 /* 870 * If a page has any new buffers, zero them out here, and mark them uptodate 871 * and dirty so they'll be written out (in order to prevent uninitialised 872 * block data from leaking). And clear the new bit. 873 */ 874 static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to) 875 { 876 unsigned int block_start, block_end; 877 struct buffer_head *head, *bh; 878 879 BUG_ON(!PageLocked(page)); 880 if (!page_has_buffers(page)) 881 return; 882 883 bh = head = page_buffers(page); 884 block_start = 0; 885 do { 886 block_end = block_start + bh->b_size; 887 888 if (buffer_new(bh)) { 889 if (block_end > from && block_start < to) { 890 if (!PageUptodate(page)) { 891 unsigned start, end; 892 893 start = max(from, block_start); 894 end = min(to, block_end); 895 896 zero_user_segment(page, start, end); 897 set_buffer_uptodate(bh); 898 } 899 900 clear_buffer_new(bh); 901 mark_buffer_dirty(bh); 902 } 903 } 904 905 block_start = block_end; 906 bh = bh->b_this_page; 907 } while (bh != head); 908 } 909 910 /* 911 * Only called when we have a failure during allocating write to write 912 * zero's to the newly allocated region. 913 */ 914 static void ocfs2_write_failure(struct inode *inode, 915 struct ocfs2_write_ctxt *wc, 916 loff_t user_pos, unsigned user_len) 917 { 918 int i; 919 unsigned from = user_pos & (PAGE_SIZE - 1), 920 to = user_pos + user_len; 921 struct page *tmppage; 922 923 if (wc->w_target_page) 924 ocfs2_zero_new_buffers(wc->w_target_page, from, to); 925 926 for(i = 0; i < wc->w_num_pages; i++) { 927 tmppage = wc->w_pages[i]; 928 929 if (tmppage && page_has_buffers(tmppage)) { 930 if (ocfs2_should_order_data(inode)) 931 ocfs2_jbd2_inode_add_write(wc->w_handle, inode, 932 user_pos, user_len); 933 934 block_commit_write(tmppage, from, to); 935 } 936 } 937 } 938 939 static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, 940 struct ocfs2_write_ctxt *wc, 941 struct page *page, u32 cpos, 942 loff_t user_pos, unsigned user_len, 943 int new) 944 { 945 int ret; 946 unsigned int map_from = 0, map_to = 0; 947 unsigned int cluster_start, cluster_end; 948 unsigned int user_data_from = 0, user_data_to = 0; 949 950 ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos, 951 &cluster_start, &cluster_end); 952 953 /* treat the write as new if the a hole/lseek spanned across 954 * the page boundary. 955 */ 956 new = new | ((i_size_read(inode) <= page_offset(page)) && 957 (page_offset(page) <= user_pos)); 958 959 if (page == wc->w_target_page) { 960 map_from = user_pos & (PAGE_SIZE - 1); 961 map_to = map_from + user_len; 962 963 if (new) 964 ret = ocfs2_map_page_blocks(page, p_blkno, inode, 965 cluster_start, cluster_end, 966 new); 967 else 968 ret = ocfs2_map_page_blocks(page, p_blkno, inode, 969 map_from, map_to, new); 970 if (ret) { 971 mlog_errno(ret); 972 goto out; 973 } 974 975 user_data_from = map_from; 976 user_data_to = map_to; 977 if (new) { 978 map_from = cluster_start; 979 map_to = cluster_end; 980 } 981 } else { 982 /* 983 * If we haven't allocated the new page yet, we 984 * shouldn't be writing it out without copying user 985 * data. This is likely a math error from the caller. 986 */ 987 BUG_ON(!new); 988 989 map_from = cluster_start; 990 map_to = cluster_end; 991 992 ret = ocfs2_map_page_blocks(page, p_blkno, inode, 993 cluster_start, cluster_end, new); 994 if (ret) { 995 mlog_errno(ret); 996 goto out; 997 } 998 } 999 1000 /* 1001 * Parts of newly allocated pages need to be zero'd. 1002 * 1003 * Above, we have also rewritten 'to' and 'from' - as far as 1004 * the rest of the function is concerned, the entire cluster 1005 * range inside of a page needs to be written. 1006 * 1007 * We can skip this if the page is up to date - it's already 1008 * been zero'd from being read in as a hole. 1009 */ 1010 if (new && !PageUptodate(page)) 1011 ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb), 1012 cpos, user_data_from, user_data_to); 1013 1014 flush_dcache_page(page); 1015 1016 out: 1017 return ret; 1018 } 1019 1020 /* 1021 * This function will only grab one clusters worth of pages. 1022 */ 1023 static int ocfs2_grab_pages_for_write(struct address_space *mapping, 1024 struct ocfs2_write_ctxt *wc, 1025 u32 cpos, loff_t user_pos, 1026 unsigned user_len, int new, 1027 struct page *mmap_page) 1028 { 1029 int ret = 0, i; 1030 unsigned long start, target_index, end_index, index; 1031 struct inode *inode = mapping->host; 1032 loff_t last_byte; 1033 1034 target_index = user_pos >> PAGE_SHIFT; 1035 1036 /* 1037 * Figure out how many pages we'll be manipulating here. For 1038 * non allocating write, we just change the one 1039 * page. Otherwise, we'll need a whole clusters worth. If we're 1040 * writing past i_size, we only need enough pages to cover the 1041 * last page of the write. 1042 */ 1043 if (new) { 1044 wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb); 1045 start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos); 1046 /* 1047 * We need the index *past* the last page we could possibly 1048 * touch. This is the page past the end of the write or 1049 * i_size, whichever is greater. 1050 */ 1051 last_byte = max(user_pos + user_len, i_size_read(inode)); 1052 BUG_ON(last_byte < 1); 1053 end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1; 1054 if ((start + wc->w_num_pages) > end_index) 1055 wc->w_num_pages = end_index - start; 1056 } else { 1057 wc->w_num_pages = 1; 1058 start = target_index; 1059 } 1060 end_index = (user_pos + user_len - 1) >> PAGE_SHIFT; 1061 1062 for(i = 0; i < wc->w_num_pages; i++) { 1063 index = start + i; 1064 1065 if (index >= target_index && index <= end_index && 1066 wc->w_type == OCFS2_WRITE_MMAP) { 1067 /* 1068 * ocfs2_pagemkwrite() is a little different 1069 * and wants us to directly use the page 1070 * passed in. 1071 */ 1072 lock_page(mmap_page); 1073 1074 /* Exit and let the caller retry */ 1075 if (mmap_page->mapping != mapping) { 1076 WARN_ON(mmap_page->mapping); 1077 unlock_page(mmap_page); 1078 ret = -EAGAIN; 1079 goto out; 1080 } 1081 1082 get_page(mmap_page); 1083 wc->w_pages[i] = mmap_page; 1084 wc->w_target_locked = true; 1085 } else if (index >= target_index && index <= end_index && 1086 wc->w_type == OCFS2_WRITE_DIRECT) { 1087 /* Direct write has no mapping page. */ 1088 wc->w_pages[i] = NULL; 1089 continue; 1090 } else { 1091 wc->w_pages[i] = find_or_create_page(mapping, index, 1092 GFP_NOFS); 1093 if (!wc->w_pages[i]) { 1094 ret = -ENOMEM; 1095 mlog_errno(ret); 1096 goto out; 1097 } 1098 } 1099 wait_for_stable_page(wc->w_pages[i]); 1100 1101 if (index == target_index) 1102 wc->w_target_page = wc->w_pages[i]; 1103 } 1104 out: 1105 if (ret) 1106 wc->w_target_locked = false; 1107 return ret; 1108 } 1109 1110 /* 1111 * Prepare a single cluster for write one cluster into the file. 1112 */ 1113 static int ocfs2_write_cluster(struct address_space *mapping, 1114 u32 *phys, unsigned int new, 1115 unsigned int clear_unwritten, 1116 unsigned int should_zero, 1117 struct ocfs2_alloc_context *data_ac, 1118 struct ocfs2_alloc_context *meta_ac, 1119 struct ocfs2_write_ctxt *wc, u32 cpos, 1120 loff_t user_pos, unsigned user_len) 1121 { 1122 int ret, i; 1123 u64 p_blkno; 1124 struct inode *inode = mapping->host; 1125 struct ocfs2_extent_tree et; 1126 int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); 1127 1128 if (new) { 1129 u32 tmp_pos; 1130 1131 /* 1132 * This is safe to call with the page locks - it won't take 1133 * any additional semaphores or cluster locks. 1134 */ 1135 tmp_pos = cpos; 1136 ret = ocfs2_add_inode_data(OCFS2_SB(inode->i_sb), inode, 1137 &tmp_pos, 1, !clear_unwritten, 1138 wc->w_di_bh, wc->w_handle, 1139 data_ac, meta_ac, NULL); 1140 /* 1141 * This shouldn't happen because we must have already 1142 * calculated the correct meta data allocation required. The 1143 * internal tree allocation code should know how to increase 1144 * transaction credits itself. 1145 * 1146 * If need be, we could handle -EAGAIN for a 1147 * RESTART_TRANS here. 1148 */ 1149 mlog_bug_on_msg(ret == -EAGAIN, 1150 "Inode %llu: EAGAIN return during allocation.\n", 1151 (unsigned long long)OCFS2_I(inode)->ip_blkno); 1152 if (ret < 0) { 1153 mlog_errno(ret); 1154 goto out; 1155 } 1156 } else if (clear_unwritten) { 1157 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), 1158 wc->w_di_bh); 1159 ret = ocfs2_mark_extent_written(inode, &et, 1160 wc->w_handle, cpos, 1, *phys, 1161 meta_ac, &wc->w_dealloc); 1162 if (ret < 0) { 1163 mlog_errno(ret); 1164 goto out; 1165 } 1166 } 1167 1168 /* 1169 * The only reason this should fail is due to an inability to 1170 * find the extent added. 1171 */ 1172 ret = ocfs2_get_clusters(inode, cpos, phys, NULL, NULL); 1173 if (ret < 0) { 1174 mlog(ML_ERROR, "Get physical blkno failed for inode %llu, " 1175 "at logical cluster %u", 1176 (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos); 1177 goto out; 1178 } 1179 1180 BUG_ON(*phys == 0); 1181 1182 p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *phys); 1183 if (!should_zero) 1184 p_blkno += (user_pos >> inode->i_sb->s_blocksize_bits) & (u64)(bpc - 1); 1185 1186 for(i = 0; i < wc->w_num_pages; i++) { 1187 int tmpret; 1188 1189 /* This is the direct io target page. */ 1190 if (wc->w_pages[i] == NULL) { 1191 p_blkno++; 1192 continue; 1193 } 1194 1195 tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc, 1196 wc->w_pages[i], cpos, 1197 user_pos, user_len, 1198 should_zero); 1199 if (tmpret) { 1200 mlog_errno(tmpret); 1201 if (ret == 0) 1202 ret = tmpret; 1203 } 1204 } 1205 1206 /* 1207 * We only have cleanup to do in case of allocating write. 1208 */ 1209 if (ret && new) 1210 ocfs2_write_failure(inode, wc, user_pos, user_len); 1211 1212 out: 1213 1214 return ret; 1215 } 1216 1217 static int ocfs2_write_cluster_by_desc(struct address_space *mapping, 1218 struct ocfs2_alloc_context *data_ac, 1219 struct ocfs2_alloc_context *meta_ac, 1220 struct ocfs2_write_ctxt *wc, 1221 loff_t pos, unsigned len) 1222 { 1223 int ret, i; 1224 loff_t cluster_off; 1225 unsigned int local_len = len; 1226 struct ocfs2_write_cluster_desc *desc; 1227 struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb); 1228 1229 for (i = 0; i < wc->w_clen; i++) { 1230 desc = &wc->w_desc[i]; 1231 1232 /* 1233 * We have to make sure that the total write passed in 1234 * doesn't extend past a single cluster. 1235 */ 1236 local_len = len; 1237 cluster_off = pos & (osb->s_clustersize - 1); 1238 if ((cluster_off + local_len) > osb->s_clustersize) 1239 local_len = osb->s_clustersize - cluster_off; 1240 1241 ret = ocfs2_write_cluster(mapping, &desc->c_phys, 1242 desc->c_new, 1243 desc->c_clear_unwritten, 1244 desc->c_needs_zero, 1245 data_ac, meta_ac, 1246 wc, desc->c_cpos, pos, local_len); 1247 if (ret) { 1248 mlog_errno(ret); 1249 goto out; 1250 } 1251 1252 len -= local_len; 1253 pos += local_len; 1254 } 1255 1256 ret = 0; 1257 out: 1258 return ret; 1259 } 1260 1261 /* 1262 * ocfs2_write_end() wants to know which parts of the target page it 1263 * should complete the write on. It's easiest to compute them ahead of 1264 * time when a more complete view of the write is available. 1265 */ 1266 static void ocfs2_set_target_boundaries(struct ocfs2_super *osb, 1267 struct ocfs2_write_ctxt *wc, 1268 loff_t pos, unsigned len, int alloc) 1269 { 1270 struct ocfs2_write_cluster_desc *desc; 1271 1272 wc->w_target_from = pos & (PAGE_SIZE - 1); 1273 wc->w_target_to = wc->w_target_from + len; 1274 1275 if (alloc == 0) 1276 return; 1277 1278 /* 1279 * Allocating write - we may have different boundaries based 1280 * on page size and cluster size. 1281 * 1282 * NOTE: We can no longer compute one value from the other as 1283 * the actual write length and user provided length may be 1284 * different. 1285 */ 1286 1287 if (wc->w_large_pages) { 1288 /* 1289 * We only care about the 1st and last cluster within 1290 * our range and whether they should be zero'd or not. Either 1291 * value may be extended out to the start/end of a 1292 * newly allocated cluster. 1293 */ 1294 desc = &wc->w_desc[0]; 1295 if (desc->c_needs_zero) 1296 ocfs2_figure_cluster_boundaries(osb, 1297 desc->c_cpos, 1298 &wc->w_target_from, 1299 NULL); 1300 1301 desc = &wc->w_desc[wc->w_clen - 1]; 1302 if (desc->c_needs_zero) 1303 ocfs2_figure_cluster_boundaries(osb, 1304 desc->c_cpos, 1305 NULL, 1306 &wc->w_target_to); 1307 } else { 1308 wc->w_target_from = 0; 1309 wc->w_target_to = PAGE_SIZE; 1310 } 1311 } 1312 1313 /* 1314 * Check if this extent is marked UNWRITTEN by direct io. If so, we need not to 1315 * do the zero work. And should not to clear UNWRITTEN since it will be cleared 1316 * by the direct io procedure. 1317 * If this is a new extent that allocated by direct io, we should mark it in 1318 * the ip_unwritten_list. 1319 */ 1320 static int ocfs2_unwritten_check(struct inode *inode, 1321 struct ocfs2_write_ctxt *wc, 1322 struct ocfs2_write_cluster_desc *desc) 1323 { 1324 struct ocfs2_inode_info *oi = OCFS2_I(inode); 1325 struct ocfs2_unwritten_extent *ue = NULL, *new = NULL; 1326 int ret = 0; 1327 1328 if (!desc->c_needs_zero) 1329 return 0; 1330 1331 retry: 1332 spin_lock(&oi->ip_lock); 1333 /* Needs not to zero no metter buffer or direct. The one who is zero 1334 * the cluster is doing zero. And he will clear unwritten after all 1335 * cluster io finished. */ 1336 list_for_each_entry(ue, &oi->ip_unwritten_list, ue_ip_node) { 1337 if (desc->c_cpos == ue->ue_cpos) { 1338 BUG_ON(desc->c_new); 1339 desc->c_needs_zero = 0; 1340 desc->c_clear_unwritten = 0; 1341 goto unlock; 1342 } 1343 } 1344 1345 if (wc->w_type != OCFS2_WRITE_DIRECT) 1346 goto unlock; 1347 1348 if (new == NULL) { 1349 spin_unlock(&oi->ip_lock); 1350 new = kmalloc(sizeof(struct ocfs2_unwritten_extent), 1351 GFP_NOFS); 1352 if (new == NULL) { 1353 ret = -ENOMEM; 1354 goto out; 1355 } 1356 goto retry; 1357 } 1358 /* This direct write will doing zero. */ 1359 new->ue_cpos = desc->c_cpos; 1360 new->ue_phys = desc->c_phys; 1361 desc->c_clear_unwritten = 0; 1362 list_add_tail(&new->ue_ip_node, &oi->ip_unwritten_list); 1363 list_add_tail(&new->ue_node, &wc->w_unwritten_list); 1364 wc->w_unwritten_count++; 1365 new = NULL; 1366 unlock: 1367 spin_unlock(&oi->ip_lock); 1368 out: 1369 kfree(new); 1370 return ret; 1371 } 1372 1373 /* 1374 * Populate each single-cluster write descriptor in the write context 1375 * with information about the i/o to be done. 1376 * 1377 * Returns the number of clusters that will have to be allocated, as 1378 * well as a worst case estimate of the number of extent records that 1379 * would have to be created during a write to an unwritten region. 1380 */ 1381 static int ocfs2_populate_write_desc(struct inode *inode, 1382 struct ocfs2_write_ctxt *wc, 1383 unsigned int *clusters_to_alloc, 1384 unsigned int *extents_to_split) 1385 { 1386 int ret; 1387 struct ocfs2_write_cluster_desc *desc; 1388 unsigned int num_clusters = 0; 1389 unsigned int ext_flags = 0; 1390 u32 phys = 0; 1391 int i; 1392 1393 *clusters_to_alloc = 0; 1394 *extents_to_split = 0; 1395 1396 for (i = 0; i < wc->w_clen; i++) { 1397 desc = &wc->w_desc[i]; 1398 desc->c_cpos = wc->w_cpos + i; 1399 1400 if (num_clusters == 0) { 1401 /* 1402 * Need to look up the next extent record. 1403 */ 1404 ret = ocfs2_get_clusters(inode, desc->c_cpos, &phys, 1405 &num_clusters, &ext_flags); 1406 if (ret) { 1407 mlog_errno(ret); 1408 goto out; 1409 } 1410 1411 /* We should already CoW the refcountd extent. */ 1412 BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED); 1413 1414 /* 1415 * Assume worst case - that we're writing in 1416 * the middle of the extent. 1417 * 1418 * We can assume that the write proceeds from 1419 * left to right, in which case the extent 1420 * insert code is smart enough to coalesce the 1421 * next splits into the previous records created. 1422 */ 1423 if (ext_flags & OCFS2_EXT_UNWRITTEN) 1424 *extents_to_split = *extents_to_split + 2; 1425 } else if (phys) { 1426 /* 1427 * Only increment phys if it doesn't describe 1428 * a hole. 1429 */ 1430 phys++; 1431 } 1432 1433 /* 1434 * If w_first_new_cpos is < UINT_MAX, we have a non-sparse 1435 * file that got extended. w_first_new_cpos tells us 1436 * where the newly allocated clusters are so we can 1437 * zero them. 1438 */ 1439 if (desc->c_cpos >= wc->w_first_new_cpos) { 1440 BUG_ON(phys == 0); 1441 desc->c_needs_zero = 1; 1442 } 1443 1444 desc->c_phys = phys; 1445 if (phys == 0) { 1446 desc->c_new = 1; 1447 desc->c_needs_zero = 1; 1448 desc->c_clear_unwritten = 1; 1449 *clusters_to_alloc = *clusters_to_alloc + 1; 1450 } 1451 1452 if (ext_flags & OCFS2_EXT_UNWRITTEN) { 1453 desc->c_clear_unwritten = 1; 1454 desc->c_needs_zero = 1; 1455 } 1456 1457 ret = ocfs2_unwritten_check(inode, wc, desc); 1458 if (ret) { 1459 mlog_errno(ret); 1460 goto out; 1461 } 1462 1463 num_clusters--; 1464 } 1465 1466 ret = 0; 1467 out: 1468 return ret; 1469 } 1470 1471 static int ocfs2_write_begin_inline(struct address_space *mapping, 1472 struct inode *inode, 1473 struct ocfs2_write_ctxt *wc) 1474 { 1475 int ret; 1476 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1477 struct page *page; 1478 handle_t *handle; 1479 struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; 1480 1481 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); 1482 if (IS_ERR(handle)) { 1483 ret = PTR_ERR(handle); 1484 mlog_errno(ret); 1485 goto out; 1486 } 1487 1488 page = find_or_create_page(mapping, 0, GFP_NOFS); 1489 if (!page) { 1490 ocfs2_commit_trans(osb, handle); 1491 ret = -ENOMEM; 1492 mlog_errno(ret); 1493 goto out; 1494 } 1495 /* 1496 * If we don't set w_num_pages then this page won't get unlocked 1497 * and freed on cleanup of the write context. 1498 */ 1499 wc->w_pages[0] = wc->w_target_page = page; 1500 wc->w_num_pages = 1; 1501 1502 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, 1503 OCFS2_JOURNAL_ACCESS_WRITE); 1504 if (ret) { 1505 ocfs2_commit_trans(osb, handle); 1506 1507 mlog_errno(ret); 1508 goto out; 1509 } 1510 1511 if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) 1512 ocfs2_set_inode_data_inline(inode, di); 1513 1514 if (!PageUptodate(page)) { 1515 ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh); 1516 if (ret) { 1517 ocfs2_commit_trans(osb, handle); 1518 1519 goto out; 1520 } 1521 } 1522 1523 wc->w_handle = handle; 1524 out: 1525 return ret; 1526 } 1527 1528 int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size) 1529 { 1530 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 1531 1532 if (new_size <= le16_to_cpu(di->id2.i_data.id_count)) 1533 return 1; 1534 return 0; 1535 } 1536 1537 static int ocfs2_try_to_write_inline_data(struct address_space *mapping, 1538 struct inode *inode, loff_t pos, 1539 unsigned len, struct page *mmap_page, 1540 struct ocfs2_write_ctxt *wc) 1541 { 1542 int ret, written = 0; 1543 loff_t end = pos + len; 1544 struct ocfs2_inode_info *oi = OCFS2_I(inode); 1545 struct ocfs2_dinode *di = NULL; 1546 1547 trace_ocfs2_try_to_write_inline_data((unsigned long long)oi->ip_blkno, 1548 len, (unsigned long long)pos, 1549 oi->ip_dyn_features); 1550 1551 /* 1552 * Handle inodes which already have inline data 1st. 1553 */ 1554 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) { 1555 if (mmap_page == NULL && 1556 ocfs2_size_fits_inline_data(wc->w_di_bh, end)) 1557 goto do_inline_write; 1558 1559 /* 1560 * The write won't fit - we have to give this inode an 1561 * inline extent list now. 1562 */ 1563 ret = ocfs2_convert_inline_data_to_extents(inode, wc->w_di_bh); 1564 if (ret) 1565 mlog_errno(ret); 1566 goto out; 1567 } 1568 1569 /* 1570 * Check whether the inode can accept inline data. 1571 */ 1572 if (oi->ip_clusters != 0 || i_size_read(inode) != 0) 1573 return 0; 1574 1575 /* 1576 * Check whether the write can fit. 1577 */ 1578 di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; 1579 if (mmap_page || 1580 end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) 1581 return 0; 1582 1583 do_inline_write: 1584 ret = ocfs2_write_begin_inline(mapping, inode, wc); 1585 if (ret) { 1586 mlog_errno(ret); 1587 goto out; 1588 } 1589 1590 /* 1591 * This signals to the caller that the data can be written 1592 * inline. 1593 */ 1594 written = 1; 1595 out: 1596 return written ? written : ret; 1597 } 1598 1599 /* 1600 * This function only does anything for file systems which can't 1601 * handle sparse files. 1602 * 1603 * What we want to do here is fill in any hole between the current end 1604 * of allocation and the end of our write. That way the rest of the 1605 * write path can treat it as an non-allocating write, which has no 1606 * special case code for sparse/nonsparse files. 1607 */ 1608 static int ocfs2_expand_nonsparse_inode(struct inode *inode, 1609 struct buffer_head *di_bh, 1610 loff_t pos, unsigned len, 1611 struct ocfs2_write_ctxt *wc) 1612 { 1613 int ret; 1614 loff_t newsize = pos + len; 1615 1616 BUG_ON(ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))); 1617 1618 if (newsize <= i_size_read(inode)) 1619 return 0; 1620 1621 ret = ocfs2_extend_no_holes(inode, di_bh, newsize, pos); 1622 if (ret) 1623 mlog_errno(ret); 1624 1625 /* There is no wc if this is call from direct. */ 1626 if (wc) 1627 wc->w_first_new_cpos = 1628 ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)); 1629 1630 return ret; 1631 } 1632 1633 static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh, 1634 loff_t pos) 1635 { 1636 int ret = 0; 1637 1638 BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))); 1639 if (pos > i_size_read(inode)) 1640 ret = ocfs2_zero_extend(inode, di_bh, pos); 1641 1642 return ret; 1643 } 1644 1645 int ocfs2_write_begin_nolock(struct address_space *mapping, 1646 loff_t pos, unsigned len, ocfs2_write_type_t type, 1647 struct page **pagep, void **fsdata, 1648 struct buffer_head *di_bh, struct page *mmap_page) 1649 { 1650 int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS; 1651 unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0; 1652 struct ocfs2_write_ctxt *wc; 1653 struct inode *inode = mapping->host; 1654 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1655 struct ocfs2_dinode *di; 1656 struct ocfs2_alloc_context *data_ac = NULL; 1657 struct ocfs2_alloc_context *meta_ac = NULL; 1658 handle_t *handle; 1659 struct ocfs2_extent_tree et; 1660 int try_free = 1, ret1; 1661 1662 try_again: 1663 ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, type, di_bh); 1664 if (ret) { 1665 mlog_errno(ret); 1666 return ret; 1667 } 1668 1669 if (ocfs2_supports_inline_data(osb)) { 1670 ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len, 1671 mmap_page, wc); 1672 if (ret == 1) { 1673 ret = 0; 1674 goto success; 1675 } 1676 if (ret < 0) { 1677 mlog_errno(ret); 1678 goto out; 1679 } 1680 } 1681 1682 /* Direct io change i_size late, should not zero tail here. */ 1683 if (type != OCFS2_WRITE_DIRECT) { 1684 if (ocfs2_sparse_alloc(osb)) 1685 ret = ocfs2_zero_tail(inode, di_bh, pos); 1686 else 1687 ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, 1688 len, wc); 1689 if (ret) { 1690 mlog_errno(ret); 1691 goto out; 1692 } 1693 } 1694 1695 ret = ocfs2_check_range_for_refcount(inode, pos, len); 1696 if (ret < 0) { 1697 mlog_errno(ret); 1698 goto out; 1699 } else if (ret == 1) { 1700 clusters_need = wc->w_clen; 1701 ret = ocfs2_refcount_cow(inode, di_bh, 1702 wc->w_cpos, wc->w_clen, UINT_MAX); 1703 if (ret) { 1704 mlog_errno(ret); 1705 goto out; 1706 } 1707 } 1708 1709 ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc, 1710 &extents_to_split); 1711 if (ret) { 1712 mlog_errno(ret); 1713 goto out; 1714 } 1715 clusters_need += clusters_to_alloc; 1716 1717 di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; 1718 1719 trace_ocfs2_write_begin_nolock( 1720 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1721 (long long)i_size_read(inode), 1722 le32_to_cpu(di->i_clusters), 1723 pos, len, type, mmap_page, 1724 clusters_to_alloc, extents_to_split); 1725 1726 /* 1727 * We set w_target_from, w_target_to here so that 1728 * ocfs2_write_end() knows which range in the target page to 1729 * write out. An allocation requires that we write the entire 1730 * cluster range. 1731 */ 1732 if (clusters_to_alloc || extents_to_split) { 1733 /* 1734 * XXX: We are stretching the limits of 1735 * ocfs2_lock_allocators(). It greatly over-estimates 1736 * the work to be done. 1737 */ 1738 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), 1739 wc->w_di_bh); 1740 ret = ocfs2_lock_allocators(inode, &et, 1741 clusters_to_alloc, extents_to_split, 1742 &data_ac, &meta_ac); 1743 if (ret) { 1744 mlog_errno(ret); 1745 goto out; 1746 } 1747 1748 if (data_ac) 1749 data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv; 1750 1751 credits = ocfs2_calc_extend_credits(inode->i_sb, 1752 &di->id2.i_list); 1753 } else if (type == OCFS2_WRITE_DIRECT) 1754 /* direct write needs not to start trans if no extents alloc. */ 1755 goto success; 1756 1757 /* 1758 * We have to zero sparse allocated clusters, unwritten extent clusters, 1759 * and non-sparse clusters we just extended. For non-sparse writes, 1760 * we know zeros will only be needed in the first and/or last cluster. 1761 */ 1762 if (wc->w_clen && (wc->w_desc[0].c_needs_zero || 1763 wc->w_desc[wc->w_clen - 1].c_needs_zero)) 1764 cluster_of_pages = 1; 1765 else 1766 cluster_of_pages = 0; 1767 1768 ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages); 1769 1770 handle = ocfs2_start_trans(osb, credits); 1771 if (IS_ERR(handle)) { 1772 ret = PTR_ERR(handle); 1773 mlog_errno(ret); 1774 goto out; 1775 } 1776 1777 wc->w_handle = handle; 1778 1779 if (clusters_to_alloc) { 1780 ret = dquot_alloc_space_nodirty(inode, 1781 ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc)); 1782 if (ret) 1783 goto out_commit; 1784 } 1785 1786 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, 1787 OCFS2_JOURNAL_ACCESS_WRITE); 1788 if (ret) { 1789 mlog_errno(ret); 1790 goto out_quota; 1791 } 1792 1793 /* 1794 * Fill our page array first. That way we've grabbed enough so 1795 * that we can zero and flush if we error after adding the 1796 * extent. 1797 */ 1798 ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len, 1799 cluster_of_pages, mmap_page); 1800 if (ret) { 1801 /* 1802 * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock 1803 * the target page. In this case, we exit with no error and no target 1804 * page. This will trigger the caller, page_mkwrite(), to re-try 1805 * the operation. 1806 */ 1807 if (type == OCFS2_WRITE_MMAP && ret == -EAGAIN) { 1808 BUG_ON(wc->w_target_page); 1809 ret = 0; 1810 goto out_quota; 1811 } 1812 1813 mlog_errno(ret); 1814 goto out_quota; 1815 } 1816 1817 ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos, 1818 len); 1819 if (ret) { 1820 mlog_errno(ret); 1821 goto out_quota; 1822 } 1823 1824 if (data_ac) 1825 ocfs2_free_alloc_context(data_ac); 1826 if (meta_ac) 1827 ocfs2_free_alloc_context(meta_ac); 1828 1829 success: 1830 if (pagep) 1831 *pagep = wc->w_target_page; 1832 *fsdata = wc; 1833 return 0; 1834 out_quota: 1835 if (clusters_to_alloc) 1836 dquot_free_space(inode, 1837 ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc)); 1838 out_commit: 1839 ocfs2_commit_trans(osb, handle); 1840 1841 out: 1842 /* 1843 * The mmapped page won't be unlocked in ocfs2_free_write_ctxt(), 1844 * even in case of error here like ENOSPC and ENOMEM. So, we need 1845 * to unlock the target page manually to prevent deadlocks when 1846 * retrying again on ENOSPC, or when returning non-VM_FAULT_LOCKED 1847 * to VM code. 1848 */ 1849 if (wc->w_target_locked) 1850 unlock_page(mmap_page); 1851 1852 ocfs2_free_write_ctxt(inode, wc); 1853 1854 if (data_ac) { 1855 ocfs2_free_alloc_context(data_ac); 1856 data_ac = NULL; 1857 } 1858 if (meta_ac) { 1859 ocfs2_free_alloc_context(meta_ac); 1860 meta_ac = NULL; 1861 } 1862 1863 if (ret == -ENOSPC && try_free) { 1864 /* 1865 * Try to free some truncate log so that we can have enough 1866 * clusters to allocate. 1867 */ 1868 try_free = 0; 1869 1870 ret1 = ocfs2_try_to_free_truncate_log(osb, clusters_need); 1871 if (ret1 == 1) 1872 goto try_again; 1873 1874 if (ret1 < 0) 1875 mlog_errno(ret1); 1876 } 1877 1878 return ret; 1879 } 1880 1881 static int ocfs2_write_begin(struct file *file, struct address_space *mapping, 1882 loff_t pos, unsigned len, 1883 struct page **pagep, void **fsdata) 1884 { 1885 int ret; 1886 struct buffer_head *di_bh = NULL; 1887 struct inode *inode = mapping->host; 1888 1889 ret = ocfs2_inode_lock(inode, &di_bh, 1); 1890 if (ret) { 1891 mlog_errno(ret); 1892 return ret; 1893 } 1894 1895 /* 1896 * Take alloc sem here to prevent concurrent lookups. That way 1897 * the mapping, zeroing and tree manipulation within 1898 * ocfs2_write() will be safe against ->read_folio(). This 1899 * should also serve to lock out allocation from a shared 1900 * writeable region. 1901 */ 1902 down_write(&OCFS2_I(inode)->ip_alloc_sem); 1903 1904 ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_BUFFER, 1905 pagep, fsdata, di_bh, NULL); 1906 if (ret) { 1907 mlog_errno(ret); 1908 goto out_fail; 1909 } 1910 1911 brelse(di_bh); 1912 1913 return 0; 1914 1915 out_fail: 1916 up_write(&OCFS2_I(inode)->ip_alloc_sem); 1917 1918 brelse(di_bh); 1919 ocfs2_inode_unlock(inode, 1); 1920 1921 return ret; 1922 } 1923 1924 static void ocfs2_write_end_inline(struct inode *inode, loff_t pos, 1925 unsigned len, unsigned *copied, 1926 struct ocfs2_dinode *di, 1927 struct ocfs2_write_ctxt *wc) 1928 { 1929 void *kaddr; 1930 1931 if (unlikely(*copied < len)) { 1932 if (!PageUptodate(wc->w_target_page)) { 1933 *copied = 0; 1934 return; 1935 } 1936 } 1937 1938 kaddr = kmap_atomic(wc->w_target_page); 1939 memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied); 1940 kunmap_atomic(kaddr); 1941 1942 trace_ocfs2_write_end_inline( 1943 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1944 (unsigned long long)pos, *copied, 1945 le16_to_cpu(di->id2.i_data.id_count), 1946 le16_to_cpu(di->i_dyn_features)); 1947 } 1948 1949 int ocfs2_write_end_nolock(struct address_space *mapping, 1950 loff_t pos, unsigned len, unsigned copied, void *fsdata) 1951 { 1952 int i, ret; 1953 unsigned from, to, start = pos & (PAGE_SIZE - 1); 1954 struct inode *inode = mapping->host; 1955 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1956 struct ocfs2_write_ctxt *wc = fsdata; 1957 struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; 1958 handle_t *handle = wc->w_handle; 1959 struct page *tmppage; 1960 1961 BUG_ON(!list_empty(&wc->w_unwritten_list)); 1962 1963 if (handle) { 1964 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), 1965 wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE); 1966 if (ret) { 1967 copied = ret; 1968 mlog_errno(ret); 1969 goto out; 1970 } 1971 } 1972 1973 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { 1974 ocfs2_write_end_inline(inode, pos, len, &copied, di, wc); 1975 goto out_write_size; 1976 } 1977 1978 if (unlikely(copied < len) && wc->w_target_page) { 1979 loff_t new_isize; 1980 1981 if (!PageUptodate(wc->w_target_page)) 1982 copied = 0; 1983 1984 new_isize = max_t(loff_t, i_size_read(inode), pos + copied); 1985 if (new_isize > page_offset(wc->w_target_page)) 1986 ocfs2_zero_new_buffers(wc->w_target_page, start+copied, 1987 start+len); 1988 else { 1989 /* 1990 * When page is fully beyond new isize (data copy 1991 * failed), do not bother zeroing the page. Invalidate 1992 * it instead so that writeback does not get confused 1993 * put page & buffer dirty bits into inconsistent 1994 * state. 1995 */ 1996 block_invalidate_folio(page_folio(wc->w_target_page), 1997 0, PAGE_SIZE); 1998 } 1999 } 2000 if (wc->w_target_page) 2001 flush_dcache_page(wc->w_target_page); 2002 2003 for(i = 0; i < wc->w_num_pages; i++) { 2004 tmppage = wc->w_pages[i]; 2005 2006 /* This is the direct io target page. */ 2007 if (tmppage == NULL) 2008 continue; 2009 2010 if (tmppage == wc->w_target_page) { 2011 from = wc->w_target_from; 2012 to = wc->w_target_to; 2013 2014 BUG_ON(from > PAGE_SIZE || 2015 to > PAGE_SIZE || 2016 to < from); 2017 } else { 2018 /* 2019 * Pages adjacent to the target (if any) imply 2020 * a hole-filling write in which case we want 2021 * to flush their entire range. 2022 */ 2023 from = 0; 2024 to = PAGE_SIZE; 2025 } 2026 2027 if (page_has_buffers(tmppage)) { 2028 if (handle && ocfs2_should_order_data(inode)) { 2029 loff_t start_byte = 2030 ((loff_t)tmppage->index << PAGE_SHIFT) + 2031 from; 2032 loff_t length = to - from; 2033 ocfs2_jbd2_inode_add_write(handle, inode, 2034 start_byte, length); 2035 } 2036 block_commit_write(tmppage, from, to); 2037 } 2038 } 2039 2040 out_write_size: 2041 /* Direct io do not update i_size here. */ 2042 if (wc->w_type != OCFS2_WRITE_DIRECT) { 2043 pos += copied; 2044 if (pos > i_size_read(inode)) { 2045 i_size_write(inode, pos); 2046 mark_inode_dirty(inode); 2047 } 2048 inode->i_blocks = ocfs2_inode_sector_count(inode); 2049 di->i_size = cpu_to_le64((u64)i_size_read(inode)); 2050 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 2051 di->i_mtime = di->i_ctime = cpu_to_le64(inode_get_mtime_sec(inode)); 2052 di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode)); 2053 if (handle) 2054 ocfs2_update_inode_fsync_trans(handle, inode, 1); 2055 } 2056 if (handle) 2057 ocfs2_journal_dirty(handle, wc->w_di_bh); 2058 2059 out: 2060 /* unlock pages before dealloc since it needs acquiring j_trans_barrier 2061 * lock, or it will cause a deadlock since journal commit threads holds 2062 * this lock and will ask for the page lock when flushing the data. 2063 * put it here to preserve the unlock order. 2064 */ 2065 ocfs2_unlock_pages(wc); 2066 2067 if (handle) 2068 ocfs2_commit_trans(osb, handle); 2069 2070 ocfs2_run_deallocs(osb, &wc->w_dealloc); 2071 2072 brelse(wc->w_di_bh); 2073 kfree(wc); 2074 2075 return copied; 2076 } 2077 2078 static int ocfs2_write_end(struct file *file, struct address_space *mapping, 2079 loff_t pos, unsigned len, unsigned copied, 2080 struct page *page, void *fsdata) 2081 { 2082 int ret; 2083 struct inode *inode = mapping->host; 2084 2085 ret = ocfs2_write_end_nolock(mapping, pos, len, copied, fsdata); 2086 2087 up_write(&OCFS2_I(inode)->ip_alloc_sem); 2088 ocfs2_inode_unlock(inode, 1); 2089 2090 return ret; 2091 } 2092 2093 struct ocfs2_dio_write_ctxt { 2094 struct list_head dw_zero_list; 2095 unsigned dw_zero_count; 2096 int dw_orphaned; 2097 pid_t dw_writer_pid; 2098 }; 2099 2100 static struct ocfs2_dio_write_ctxt * 2101 ocfs2_dio_alloc_write_ctx(struct buffer_head *bh, int *alloc) 2102 { 2103 struct ocfs2_dio_write_ctxt *dwc = NULL; 2104 2105 if (bh->b_private) 2106 return bh->b_private; 2107 2108 dwc = kmalloc(sizeof(struct ocfs2_dio_write_ctxt), GFP_NOFS); 2109 if (dwc == NULL) 2110 return NULL; 2111 INIT_LIST_HEAD(&dwc->dw_zero_list); 2112 dwc->dw_zero_count = 0; 2113 dwc->dw_orphaned = 0; 2114 dwc->dw_writer_pid = task_pid_nr(current); 2115 bh->b_private = dwc; 2116 *alloc = 1; 2117 2118 return dwc; 2119 } 2120 2121 static void ocfs2_dio_free_write_ctx(struct inode *inode, 2122 struct ocfs2_dio_write_ctxt *dwc) 2123 { 2124 ocfs2_free_unwritten_list(inode, &dwc->dw_zero_list); 2125 kfree(dwc); 2126 } 2127 2128 /* 2129 * TODO: Make this into a generic get_blocks function. 2130 * 2131 * From do_direct_io in direct-io.c: 2132 * "So what we do is to permit the ->get_blocks function to populate 2133 * bh.b_size with the size of IO which is permitted at this offset and 2134 * this i_blkbits." 2135 * 2136 * This function is called directly from get_more_blocks in direct-io.c. 2137 * 2138 * called like this: dio->get_blocks(dio->inode, fs_startblk, 2139 * fs_count, map_bh, dio->rw == WRITE); 2140 */ 2141 static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock, 2142 struct buffer_head *bh_result, int create) 2143 { 2144 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 2145 struct ocfs2_inode_info *oi = OCFS2_I(inode); 2146 struct ocfs2_write_ctxt *wc; 2147 struct ocfs2_write_cluster_desc *desc = NULL; 2148 struct ocfs2_dio_write_ctxt *dwc = NULL; 2149 struct buffer_head *di_bh = NULL; 2150 u64 p_blkno; 2151 unsigned int i_blkbits = inode->i_sb->s_blocksize_bits; 2152 loff_t pos = iblock << i_blkbits; 2153 sector_t endblk = (i_size_read(inode) - 1) >> i_blkbits; 2154 unsigned len, total_len = bh_result->b_size; 2155 int ret = 0, first_get_block = 0; 2156 2157 len = osb->s_clustersize - (pos & (osb->s_clustersize - 1)); 2158 len = min(total_len, len); 2159 2160 /* 2161 * bh_result->b_size is count in get_more_blocks according to write 2162 * "pos" and "end", we need map twice to return different buffer state: 2163 * 1. area in file size, not set NEW; 2164 * 2. area out file size, set NEW. 2165 * 2166 * iblock endblk 2167 * |--------|---------|---------|--------- 2168 * |<-------area in file------->| 2169 */ 2170 2171 if ((iblock <= endblk) && 2172 ((iblock + ((len - 1) >> i_blkbits)) > endblk)) 2173 len = (endblk - iblock + 1) << i_blkbits; 2174 2175 mlog(0, "get block of %lu at %llu:%u req %u\n", 2176 inode->i_ino, pos, len, total_len); 2177 2178 /* 2179 * Because we need to change file size in ocfs2_dio_end_io_write(), or 2180 * we may need to add it to orphan dir. So can not fall to fast path 2181 * while file size will be changed. 2182 */ 2183 if (pos + total_len <= i_size_read(inode)) { 2184 2185 /* This is the fast path for re-write. */ 2186 ret = ocfs2_lock_get_block(inode, iblock, bh_result, create); 2187 if (buffer_mapped(bh_result) && 2188 !buffer_new(bh_result) && 2189 ret == 0) 2190 goto out; 2191 2192 /* Clear state set by ocfs2_get_block. */ 2193 bh_result->b_state = 0; 2194 } 2195 2196 dwc = ocfs2_dio_alloc_write_ctx(bh_result, &first_get_block); 2197 if (unlikely(dwc == NULL)) { 2198 ret = -ENOMEM; 2199 mlog_errno(ret); 2200 goto out; 2201 } 2202 2203 if (ocfs2_clusters_for_bytes(inode->i_sb, pos + total_len) > 2204 ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)) && 2205 !dwc->dw_orphaned) { 2206 /* 2207 * when we are going to alloc extents beyond file size, add the 2208 * inode to orphan dir, so we can recall those spaces when 2209 * system crashed during write. 2210 */ 2211 ret = ocfs2_add_inode_to_orphan(osb, inode); 2212 if (ret < 0) { 2213 mlog_errno(ret); 2214 goto out; 2215 } 2216 dwc->dw_orphaned = 1; 2217 } 2218 2219 ret = ocfs2_inode_lock(inode, &di_bh, 1); 2220 if (ret) { 2221 mlog_errno(ret); 2222 goto out; 2223 } 2224 2225 down_write(&oi->ip_alloc_sem); 2226 2227 if (first_get_block) { 2228 if (ocfs2_sparse_alloc(osb)) 2229 ret = ocfs2_zero_tail(inode, di_bh, pos); 2230 else 2231 ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, 2232 total_len, NULL); 2233 if (ret < 0) { 2234 mlog_errno(ret); 2235 goto unlock; 2236 } 2237 } 2238 2239 ret = ocfs2_write_begin_nolock(inode->i_mapping, pos, len, 2240 OCFS2_WRITE_DIRECT, NULL, 2241 (void **)&wc, di_bh, NULL); 2242 if (ret) { 2243 mlog_errno(ret); 2244 goto unlock; 2245 } 2246 2247 desc = &wc->w_desc[0]; 2248 2249 p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, desc->c_phys); 2250 BUG_ON(p_blkno == 0); 2251 p_blkno += iblock & (u64)(ocfs2_clusters_to_blocks(inode->i_sb, 1) - 1); 2252 2253 map_bh(bh_result, inode->i_sb, p_blkno); 2254 bh_result->b_size = len; 2255 if (desc->c_needs_zero) 2256 set_buffer_new(bh_result); 2257 2258 if (iblock > endblk) 2259 set_buffer_new(bh_result); 2260 2261 /* May sleep in end_io. It should not happen in a irq context. So defer 2262 * it to dio work queue. */ 2263 set_buffer_defer_completion(bh_result); 2264 2265 if (!list_empty(&wc->w_unwritten_list)) { 2266 struct ocfs2_unwritten_extent *ue = NULL; 2267 2268 ue = list_first_entry(&wc->w_unwritten_list, 2269 struct ocfs2_unwritten_extent, 2270 ue_node); 2271 BUG_ON(ue->ue_cpos != desc->c_cpos); 2272 /* The physical address may be 0, fill it. */ 2273 ue->ue_phys = desc->c_phys; 2274 2275 list_splice_tail_init(&wc->w_unwritten_list, &dwc->dw_zero_list); 2276 dwc->dw_zero_count += wc->w_unwritten_count; 2277 } 2278 2279 ret = ocfs2_write_end_nolock(inode->i_mapping, pos, len, len, wc); 2280 BUG_ON(ret != len); 2281 ret = 0; 2282 unlock: 2283 up_write(&oi->ip_alloc_sem); 2284 ocfs2_inode_unlock(inode, 1); 2285 brelse(di_bh); 2286 out: 2287 if (ret < 0) 2288 ret = -EIO; 2289 return ret; 2290 } 2291 2292 static int ocfs2_dio_end_io_write(struct inode *inode, 2293 struct ocfs2_dio_write_ctxt *dwc, 2294 loff_t offset, 2295 ssize_t bytes) 2296 { 2297 struct ocfs2_cached_dealloc_ctxt dealloc; 2298 struct ocfs2_extent_tree et; 2299 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 2300 struct ocfs2_inode_info *oi = OCFS2_I(inode); 2301 struct ocfs2_unwritten_extent *ue = NULL; 2302 struct buffer_head *di_bh = NULL; 2303 struct ocfs2_dinode *di; 2304 struct ocfs2_alloc_context *data_ac = NULL; 2305 struct ocfs2_alloc_context *meta_ac = NULL; 2306 handle_t *handle = NULL; 2307 loff_t end = offset + bytes; 2308 int ret = 0, credits = 0; 2309 2310 ocfs2_init_dealloc_ctxt(&dealloc); 2311 2312 /* We do clear unwritten, delete orphan, change i_size here. If neither 2313 * of these happen, we can skip all this. */ 2314 if (list_empty(&dwc->dw_zero_list) && 2315 end <= i_size_read(inode) && 2316 !dwc->dw_orphaned) 2317 goto out; 2318 2319 ret = ocfs2_inode_lock(inode, &di_bh, 1); 2320 if (ret < 0) { 2321 mlog_errno(ret); 2322 goto out; 2323 } 2324 2325 down_write(&oi->ip_alloc_sem); 2326 2327 /* Delete orphan before acquire i_rwsem. */ 2328 if (dwc->dw_orphaned) { 2329 BUG_ON(dwc->dw_writer_pid != task_pid_nr(current)); 2330 2331 end = end > i_size_read(inode) ? end : 0; 2332 2333 ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh, 2334 !!end, end); 2335 if (ret < 0) 2336 mlog_errno(ret); 2337 } 2338 2339 di = (struct ocfs2_dinode *)di_bh->b_data; 2340 2341 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh); 2342 2343 /* Attach dealloc with extent tree in case that we may reuse extents 2344 * which are already unlinked from current extent tree due to extent 2345 * rotation and merging. 2346 */ 2347 et.et_dealloc = &dealloc; 2348 2349 ret = ocfs2_lock_allocators(inode, &et, 0, dwc->dw_zero_count*2, 2350 &data_ac, &meta_ac); 2351 if (ret) { 2352 mlog_errno(ret); 2353 goto unlock; 2354 } 2355 2356 credits = ocfs2_calc_extend_credits(inode->i_sb, &di->id2.i_list); 2357 2358 handle = ocfs2_start_trans(osb, credits); 2359 if (IS_ERR(handle)) { 2360 ret = PTR_ERR(handle); 2361 mlog_errno(ret); 2362 goto unlock; 2363 } 2364 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, 2365 OCFS2_JOURNAL_ACCESS_WRITE); 2366 if (ret) { 2367 mlog_errno(ret); 2368 goto commit; 2369 } 2370 2371 list_for_each_entry(ue, &dwc->dw_zero_list, ue_node) { 2372 ret = ocfs2_assure_trans_credits(handle, credits); 2373 if (ret < 0) { 2374 mlog_errno(ret); 2375 break; 2376 } 2377 ret = ocfs2_mark_extent_written(inode, &et, handle, 2378 ue->ue_cpos, 1, 2379 ue->ue_phys, 2380 meta_ac, &dealloc); 2381 if (ret < 0) { 2382 mlog_errno(ret); 2383 break; 2384 } 2385 } 2386 2387 if (end > i_size_read(inode)) { 2388 ret = ocfs2_set_inode_size(handle, inode, di_bh, end); 2389 if (ret < 0) 2390 mlog_errno(ret); 2391 } 2392 commit: 2393 ocfs2_commit_trans(osb, handle); 2394 unlock: 2395 up_write(&oi->ip_alloc_sem); 2396 ocfs2_inode_unlock(inode, 1); 2397 brelse(di_bh); 2398 out: 2399 if (data_ac) 2400 ocfs2_free_alloc_context(data_ac); 2401 if (meta_ac) 2402 ocfs2_free_alloc_context(meta_ac); 2403 ocfs2_run_deallocs(osb, &dealloc); 2404 ocfs2_dio_free_write_ctx(inode, dwc); 2405 2406 return ret; 2407 } 2408 2409 /* 2410 * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're 2411 * particularly interested in the aio/dio case. We use the rw_lock DLM lock 2412 * to protect io on one node from truncation on another. 2413 */ 2414 static int ocfs2_dio_end_io(struct kiocb *iocb, 2415 loff_t offset, 2416 ssize_t bytes, 2417 void *private) 2418 { 2419 struct inode *inode = file_inode(iocb->ki_filp); 2420 int level; 2421 int ret = 0; 2422 2423 /* this io's submitter should not have unlocked this before we could */ 2424 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); 2425 2426 if (bytes <= 0) 2427 mlog_ratelimited(ML_ERROR, "Direct IO failed, bytes = %lld", 2428 (long long)bytes); 2429 if (private) { 2430 if (bytes > 0) 2431 ret = ocfs2_dio_end_io_write(inode, private, offset, 2432 bytes); 2433 else 2434 ocfs2_dio_free_write_ctx(inode, private); 2435 } 2436 2437 ocfs2_iocb_clear_rw_locked(iocb); 2438 2439 level = ocfs2_iocb_rw_locked_level(iocb); 2440 ocfs2_rw_unlock(inode, level); 2441 return ret; 2442 } 2443 2444 static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 2445 { 2446 struct file *file = iocb->ki_filp; 2447 struct inode *inode = file->f_mapping->host; 2448 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 2449 get_block_t *get_block; 2450 2451 /* 2452 * Fallback to buffered I/O if we see an inode without 2453 * extents. 2454 */ 2455 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) 2456 return 0; 2457 2458 /* Fallback to buffered I/O if we do not support append dio. */ 2459 if (iocb->ki_pos + iter->count > i_size_read(inode) && 2460 !ocfs2_supports_append_dio(osb)) 2461 return 0; 2462 2463 if (iov_iter_rw(iter) == READ) 2464 get_block = ocfs2_lock_get_block; 2465 else 2466 get_block = ocfs2_dio_wr_get_block; 2467 2468 return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, 2469 iter, get_block, 2470 ocfs2_dio_end_io, 0); 2471 } 2472 2473 const struct address_space_operations ocfs2_aops = { 2474 .dirty_folio = block_dirty_folio, 2475 .read_folio = ocfs2_read_folio, 2476 .readahead = ocfs2_readahead, 2477 .writepage = ocfs2_writepage, 2478 .write_begin = ocfs2_write_begin, 2479 .write_end = ocfs2_write_end, 2480 .bmap = ocfs2_bmap, 2481 .direct_IO = ocfs2_direct_IO, 2482 .invalidate_folio = block_invalidate_folio, 2483 .release_folio = ocfs2_release_folio, 2484 .migrate_folio = buffer_migrate_folio, 2485 .is_partially_uptodate = block_is_partially_uptodate, 2486 .error_remove_page = generic_error_remove_page, 2487 }; 2488