1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * file.c 5 * 6 * File open, close, extend, truncate 7 * 8 * Copyright (C) 2002, 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 */ 25 26 #include <linux/capability.h> 27 #include <linux/fs.h> 28 #include <linux/types.h> 29 #include <linux/slab.h> 30 #include <linux/highmem.h> 31 #include <linux/pagemap.h> 32 #include <linux/uio.h> 33 #include <linux/sched.h> 34 #include <linux/splice.h> 35 #include <linux/mount.h> 36 #include <linux/writeback.h> 37 #include <linux/falloc.h> 38 #include <linux/quotaops.h> 39 #include <linux/blkdev.h> 40 41 #include <cluster/masklog.h> 42 43 #include "ocfs2.h" 44 45 #include "alloc.h" 46 #include "aops.h" 47 #include "dir.h" 48 #include "dlmglue.h" 49 #include "extent_map.h" 50 #include "file.h" 51 #include "sysfile.h" 52 #include "inode.h" 53 #include "ioctl.h" 54 #include "journal.h" 55 #include "locks.h" 56 #include "mmap.h" 57 #include "suballoc.h" 58 #include "super.h" 59 #include "xattr.h" 60 #include "acl.h" 61 #include "quota.h" 62 #include "refcounttree.h" 63 #include "ocfs2_trace.h" 64 65 #include "buffer_head_io.h" 66 67 static int ocfs2_init_file_private(struct inode *inode, struct file *file) 68 { 69 struct ocfs2_file_private *fp; 70 71 fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL); 72 if (!fp) 73 return -ENOMEM; 74 75 fp->fp_file = file; 76 mutex_init(&fp->fp_mutex); 77 ocfs2_file_lock_res_init(&fp->fp_flock, fp); 78 file->private_data = fp; 79 80 return 0; 81 } 82 83 static void ocfs2_free_file_private(struct inode *inode, struct file *file) 84 { 85 struct ocfs2_file_private *fp = file->private_data; 86 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 87 88 if (fp) { 89 ocfs2_simple_drop_lockres(osb, &fp->fp_flock); 90 ocfs2_lock_res_free(&fp->fp_flock); 91 kfree(fp); 92 file->private_data = NULL; 93 } 94 } 95 96 static int ocfs2_file_open(struct inode *inode, struct file *file) 97 { 98 int status; 99 int mode = file->f_flags; 100 struct ocfs2_inode_info *oi = OCFS2_I(inode); 101 102 trace_ocfs2_file_open(inode, file, file->f_path.dentry, 103 (unsigned long long)OCFS2_I(inode)->ip_blkno, 104 file->f_path.dentry->d_name.len, 105 file->f_path.dentry->d_name.name, mode); 106 107 if (file->f_mode & FMODE_WRITE) 108 dquot_initialize(inode); 109 110 spin_lock(&oi->ip_lock); 111 112 /* Check that the inode hasn't been wiped from disk by another 113 * node. If it hasn't then we're safe as long as we hold the 114 * spin lock until our increment of open count. */ 115 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) { 116 spin_unlock(&oi->ip_lock); 117 118 status = -ENOENT; 119 goto leave; 120 } 121 122 if (mode & O_DIRECT) 123 oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT; 124 125 oi->ip_open_count++; 126 spin_unlock(&oi->ip_lock); 127 128 status = ocfs2_init_file_private(inode, file); 129 if (status) { 130 /* 131 * We want to set open count back if we're failing the 132 * open. 133 */ 134 spin_lock(&oi->ip_lock); 135 oi->ip_open_count--; 136 spin_unlock(&oi->ip_lock); 137 } 138 139 leave: 140 return status; 141 } 142 143 static int ocfs2_file_release(struct inode *inode, struct file *file) 144 { 145 struct ocfs2_inode_info *oi = OCFS2_I(inode); 146 147 spin_lock(&oi->ip_lock); 148 if (!--oi->ip_open_count) 149 oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT; 150 151 trace_ocfs2_file_release(inode, file, file->f_path.dentry, 152 oi->ip_blkno, 153 file->f_path.dentry->d_name.len, 154 file->f_path.dentry->d_name.name, 155 oi->ip_open_count); 156 spin_unlock(&oi->ip_lock); 157 158 ocfs2_free_file_private(inode, file); 159 160 return 0; 161 } 162 163 static int ocfs2_dir_open(struct inode *inode, struct file *file) 164 { 165 return ocfs2_init_file_private(inode, file); 166 } 167 168 static int ocfs2_dir_release(struct inode *inode, struct file *file) 169 { 170 ocfs2_free_file_private(inode, file); 171 return 0; 172 } 173 174 static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end, 175 int datasync) 176 { 177 int err = 0; 178 journal_t *journal; 179 struct inode *inode = file->f_mapping->host; 180 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 181 182 trace_ocfs2_sync_file(inode, file, file->f_path.dentry, 183 OCFS2_I(inode)->ip_blkno, 184 file->f_path.dentry->d_name.len, 185 file->f_path.dentry->d_name.name, 186 (unsigned long long)datasync); 187 188 err = filemap_write_and_wait_range(inode->i_mapping, start, end); 189 if (err) 190 return err; 191 192 /* 193 * Probably don't need the i_mutex at all in here, just putting it here 194 * to be consistent with how fsync used to be called, someone more 195 * familiar with the fs could possibly remove it. 196 */ 197 mutex_lock(&inode->i_mutex); 198 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) { 199 /* 200 * We still have to flush drive's caches to get data to the 201 * platter 202 */ 203 if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER) 204 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); 205 goto bail; 206 } 207 208 journal = osb->journal->j_journal; 209 err = jbd2_journal_force_commit(journal); 210 211 bail: 212 if (err) 213 mlog_errno(err); 214 mutex_unlock(&inode->i_mutex); 215 216 return (err < 0) ? -EIO : 0; 217 } 218 219 int ocfs2_should_update_atime(struct inode *inode, 220 struct vfsmount *vfsmnt) 221 { 222 struct timespec now; 223 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 224 225 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) 226 return 0; 227 228 if ((inode->i_flags & S_NOATIME) || 229 ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))) 230 return 0; 231 232 /* 233 * We can be called with no vfsmnt structure - NFSD will 234 * sometimes do this. 235 * 236 * Note that our action here is different than touch_atime() - 237 * if we can't tell whether this is a noatime mount, then we 238 * don't know whether to trust the value of s_atime_quantum. 239 */ 240 if (vfsmnt == NULL) 241 return 0; 242 243 if ((vfsmnt->mnt_flags & MNT_NOATIME) || 244 ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))) 245 return 0; 246 247 if (vfsmnt->mnt_flags & MNT_RELATIME) { 248 if ((timespec_compare(&inode->i_atime, &inode->i_mtime) <= 0) || 249 (timespec_compare(&inode->i_atime, &inode->i_ctime) <= 0)) 250 return 1; 251 252 return 0; 253 } 254 255 now = CURRENT_TIME; 256 if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum)) 257 return 0; 258 else 259 return 1; 260 } 261 262 int ocfs2_update_inode_atime(struct inode *inode, 263 struct buffer_head *bh) 264 { 265 int ret; 266 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 267 handle_t *handle; 268 struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data; 269 270 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); 271 if (IS_ERR(handle)) { 272 ret = PTR_ERR(handle); 273 mlog_errno(ret); 274 goto out; 275 } 276 277 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh, 278 OCFS2_JOURNAL_ACCESS_WRITE); 279 if (ret) { 280 mlog_errno(ret); 281 goto out_commit; 282 } 283 284 /* 285 * Don't use ocfs2_mark_inode_dirty() here as we don't always 286 * have i_mutex to guard against concurrent changes to other 287 * inode fields. 288 */ 289 inode->i_atime = CURRENT_TIME; 290 di->i_atime = cpu_to_le64(inode->i_atime.tv_sec); 291 di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec); 292 ocfs2_journal_dirty(handle, bh); 293 294 out_commit: 295 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); 296 out: 297 return ret; 298 } 299 300 static int ocfs2_set_inode_size(handle_t *handle, 301 struct inode *inode, 302 struct buffer_head *fe_bh, 303 u64 new_i_size) 304 { 305 int status; 306 307 i_size_write(inode, new_i_size); 308 inode->i_blocks = ocfs2_inode_sector_count(inode); 309 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 310 311 status = ocfs2_mark_inode_dirty(handle, inode, fe_bh); 312 if (status < 0) { 313 mlog_errno(status); 314 goto bail; 315 } 316 317 bail: 318 return status; 319 } 320 321 int ocfs2_simple_size_update(struct inode *inode, 322 struct buffer_head *di_bh, 323 u64 new_i_size) 324 { 325 int ret; 326 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 327 handle_t *handle = NULL; 328 329 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); 330 if (IS_ERR(handle)) { 331 ret = PTR_ERR(handle); 332 mlog_errno(ret); 333 goto out; 334 } 335 336 ret = ocfs2_set_inode_size(handle, inode, di_bh, 337 new_i_size); 338 if (ret < 0) 339 mlog_errno(ret); 340 341 ocfs2_commit_trans(osb, handle); 342 out: 343 return ret; 344 } 345 346 static int ocfs2_cow_file_pos(struct inode *inode, 347 struct buffer_head *fe_bh, 348 u64 offset) 349 { 350 int status; 351 u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits; 352 unsigned int num_clusters = 0; 353 unsigned int ext_flags = 0; 354 355 /* 356 * If the new offset is aligned to the range of the cluster, there is 357 * no space for ocfs2_zero_range_for_truncate to fill, so no need to 358 * CoW either. 359 */ 360 if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0) 361 return 0; 362 363 status = ocfs2_get_clusters(inode, cpos, &phys, 364 &num_clusters, &ext_flags); 365 if (status) { 366 mlog_errno(status); 367 goto out; 368 } 369 370 if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) 371 goto out; 372 373 return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1); 374 375 out: 376 return status; 377 } 378 379 static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb, 380 struct inode *inode, 381 struct buffer_head *fe_bh, 382 u64 new_i_size) 383 { 384 int status; 385 handle_t *handle; 386 struct ocfs2_dinode *di; 387 u64 cluster_bytes; 388 389 /* 390 * We need to CoW the cluster contains the offset if it is reflinked 391 * since we will call ocfs2_zero_range_for_truncate later which will 392 * write "0" from offset to the end of the cluster. 393 */ 394 status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size); 395 if (status) { 396 mlog_errno(status); 397 return status; 398 } 399 400 /* TODO: This needs to actually orphan the inode in this 401 * transaction. */ 402 403 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); 404 if (IS_ERR(handle)) { 405 status = PTR_ERR(handle); 406 mlog_errno(status); 407 goto out; 408 } 409 410 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh, 411 OCFS2_JOURNAL_ACCESS_WRITE); 412 if (status < 0) { 413 mlog_errno(status); 414 goto out_commit; 415 } 416 417 /* 418 * Do this before setting i_size. 419 */ 420 cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size); 421 status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size, 422 cluster_bytes); 423 if (status) { 424 mlog_errno(status); 425 goto out_commit; 426 } 427 428 i_size_write(inode, new_i_size); 429 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 430 431 di = (struct ocfs2_dinode *) fe_bh->b_data; 432 di->i_size = cpu_to_le64(new_i_size); 433 di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec); 434 di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); 435 436 ocfs2_journal_dirty(handle, fe_bh); 437 438 out_commit: 439 ocfs2_commit_trans(osb, handle); 440 out: 441 return status; 442 } 443 444 static int ocfs2_truncate_file(struct inode *inode, 445 struct buffer_head *di_bh, 446 u64 new_i_size) 447 { 448 int status = 0; 449 struct ocfs2_dinode *fe = NULL; 450 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 451 452 /* We trust di_bh because it comes from ocfs2_inode_lock(), which 453 * already validated it */ 454 fe = (struct ocfs2_dinode *) di_bh->b_data; 455 456 trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode)->ip_blkno, 457 (unsigned long long)le64_to_cpu(fe->i_size), 458 (unsigned long long)new_i_size); 459 460 mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode), 461 "Inode %llu, inode i_size = %lld != di " 462 "i_size = %llu, i_flags = 0x%x\n", 463 (unsigned long long)OCFS2_I(inode)->ip_blkno, 464 i_size_read(inode), 465 (unsigned long long)le64_to_cpu(fe->i_size), 466 le32_to_cpu(fe->i_flags)); 467 468 if (new_i_size > le64_to_cpu(fe->i_size)) { 469 trace_ocfs2_truncate_file_error( 470 (unsigned long long)le64_to_cpu(fe->i_size), 471 (unsigned long long)new_i_size); 472 status = -EINVAL; 473 mlog_errno(status); 474 goto bail; 475 } 476 477 /* lets handle the simple truncate cases before doing any more 478 * cluster locking. */ 479 if (new_i_size == le64_to_cpu(fe->i_size)) 480 goto bail; 481 482 down_write(&OCFS2_I(inode)->ip_alloc_sem); 483 484 ocfs2_resv_discard(&osb->osb_la_resmap, 485 &OCFS2_I(inode)->ip_la_data_resv); 486 487 /* 488 * The inode lock forced other nodes to sync and drop their 489 * pages, which (correctly) happens even if we have a truncate 490 * without allocation change - ocfs2 cluster sizes can be much 491 * greater than page size, so we have to truncate them 492 * anyway. 493 */ 494 unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1); 495 truncate_inode_pages(inode->i_mapping, new_i_size); 496 497 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { 498 status = ocfs2_truncate_inline(inode, di_bh, new_i_size, 499 i_size_read(inode), 1); 500 if (status) 501 mlog_errno(status); 502 503 goto bail_unlock_sem; 504 } 505 506 /* alright, we're going to need to do a full blown alloc size 507 * change. Orphan the inode so that recovery can complete the 508 * truncate if necessary. This does the task of marking 509 * i_size. */ 510 status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size); 511 if (status < 0) { 512 mlog_errno(status); 513 goto bail_unlock_sem; 514 } 515 516 status = ocfs2_commit_truncate(osb, inode, di_bh); 517 if (status < 0) { 518 mlog_errno(status); 519 goto bail_unlock_sem; 520 } 521 522 /* TODO: orphan dir cleanup here. */ 523 bail_unlock_sem: 524 up_write(&OCFS2_I(inode)->ip_alloc_sem); 525 526 bail: 527 if (!status && OCFS2_I(inode)->ip_clusters == 0) 528 status = ocfs2_try_remove_refcount_tree(inode, di_bh); 529 530 return status; 531 } 532 533 /* 534 * extend file allocation only here. 535 * we'll update all the disk stuff, and oip->alloc_size 536 * 537 * expect stuff to be locked, a transaction started and enough data / 538 * metadata reservations in the contexts. 539 * 540 * Will return -EAGAIN, and a reason if a restart is needed. 541 * If passed in, *reason will always be set, even in error. 542 */ 543 int ocfs2_add_inode_data(struct ocfs2_super *osb, 544 struct inode *inode, 545 u32 *logical_offset, 546 u32 clusters_to_add, 547 int mark_unwritten, 548 struct buffer_head *fe_bh, 549 handle_t *handle, 550 struct ocfs2_alloc_context *data_ac, 551 struct ocfs2_alloc_context *meta_ac, 552 enum ocfs2_alloc_restarted *reason_ret) 553 { 554 int ret; 555 struct ocfs2_extent_tree et; 556 557 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh); 558 ret = ocfs2_add_clusters_in_btree(handle, &et, logical_offset, 559 clusters_to_add, mark_unwritten, 560 data_ac, meta_ac, reason_ret); 561 562 return ret; 563 } 564 565 static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start, 566 u32 clusters_to_add, int mark_unwritten) 567 { 568 int status = 0; 569 int restart_func = 0; 570 int credits; 571 u32 prev_clusters; 572 struct buffer_head *bh = NULL; 573 struct ocfs2_dinode *fe = NULL; 574 handle_t *handle = NULL; 575 struct ocfs2_alloc_context *data_ac = NULL; 576 struct ocfs2_alloc_context *meta_ac = NULL; 577 enum ocfs2_alloc_restarted why; 578 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 579 struct ocfs2_extent_tree et; 580 int did_quota = 0; 581 582 /* 583 * This function only exists for file systems which don't 584 * support holes. 585 */ 586 BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb)); 587 588 status = ocfs2_read_inode_block(inode, &bh); 589 if (status < 0) { 590 mlog_errno(status); 591 goto leave; 592 } 593 fe = (struct ocfs2_dinode *) bh->b_data; 594 595 restart_all: 596 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters); 597 598 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh); 599 status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0, 600 &data_ac, &meta_ac); 601 if (status) { 602 mlog_errno(status); 603 goto leave; 604 } 605 606 credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list, 607 clusters_to_add); 608 handle = ocfs2_start_trans(osb, credits); 609 if (IS_ERR(handle)) { 610 status = PTR_ERR(handle); 611 handle = NULL; 612 mlog_errno(status); 613 goto leave; 614 } 615 616 restarted_transaction: 617 trace_ocfs2_extend_allocation( 618 (unsigned long long)OCFS2_I(inode)->ip_blkno, 619 (unsigned long long)i_size_read(inode), 620 le32_to_cpu(fe->i_clusters), clusters_to_add, 621 why, restart_func); 622 623 status = dquot_alloc_space_nodirty(inode, 624 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add)); 625 if (status) 626 goto leave; 627 did_quota = 1; 628 629 /* reserve a write to the file entry early on - that we if we 630 * run out of credits in the allocation path, we can still 631 * update i_size. */ 632 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh, 633 OCFS2_JOURNAL_ACCESS_WRITE); 634 if (status < 0) { 635 mlog_errno(status); 636 goto leave; 637 } 638 639 prev_clusters = OCFS2_I(inode)->ip_clusters; 640 641 status = ocfs2_add_inode_data(osb, 642 inode, 643 &logical_start, 644 clusters_to_add, 645 mark_unwritten, 646 bh, 647 handle, 648 data_ac, 649 meta_ac, 650 &why); 651 if ((status < 0) && (status != -EAGAIN)) { 652 if (status != -ENOSPC) 653 mlog_errno(status); 654 goto leave; 655 } 656 657 ocfs2_journal_dirty(handle, bh); 658 659 spin_lock(&OCFS2_I(inode)->ip_lock); 660 clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters); 661 spin_unlock(&OCFS2_I(inode)->ip_lock); 662 /* Release unused quota reservation */ 663 dquot_free_space(inode, 664 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add)); 665 did_quota = 0; 666 667 if (why != RESTART_NONE && clusters_to_add) { 668 if (why == RESTART_META) { 669 restart_func = 1; 670 status = 0; 671 } else { 672 BUG_ON(why != RESTART_TRANS); 673 674 status = ocfs2_allocate_extend_trans(handle, 1); 675 if (status < 0) { 676 /* handle still has to be committed at 677 * this point. */ 678 status = -ENOMEM; 679 mlog_errno(status); 680 goto leave; 681 } 682 goto restarted_transaction; 683 } 684 } 685 686 trace_ocfs2_extend_allocation_end(OCFS2_I(inode)->ip_blkno, 687 le32_to_cpu(fe->i_clusters), 688 (unsigned long long)le64_to_cpu(fe->i_size), 689 OCFS2_I(inode)->ip_clusters, 690 (unsigned long long)i_size_read(inode)); 691 692 leave: 693 if (status < 0 && did_quota) 694 dquot_free_space(inode, 695 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add)); 696 if (handle) { 697 ocfs2_commit_trans(osb, handle); 698 handle = NULL; 699 } 700 if (data_ac) { 701 ocfs2_free_alloc_context(data_ac); 702 data_ac = NULL; 703 } 704 if (meta_ac) { 705 ocfs2_free_alloc_context(meta_ac); 706 meta_ac = NULL; 707 } 708 if ((!status) && restart_func) { 709 restart_func = 0; 710 goto restart_all; 711 } 712 brelse(bh); 713 bh = NULL; 714 715 return status; 716 } 717 718 /* 719 * While a write will already be ordering the data, a truncate will not. 720 * Thus, we need to explicitly order the zeroed pages. 721 */ 722 static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode) 723 { 724 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 725 handle_t *handle = NULL; 726 int ret = 0; 727 728 if (!ocfs2_should_order_data(inode)) 729 goto out; 730 731 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); 732 if (IS_ERR(handle)) { 733 ret = -ENOMEM; 734 mlog_errno(ret); 735 goto out; 736 } 737 738 ret = ocfs2_jbd2_file_inode(handle, inode); 739 if (ret < 0) 740 mlog_errno(ret); 741 742 out: 743 if (ret) { 744 if (!IS_ERR(handle)) 745 ocfs2_commit_trans(osb, handle); 746 handle = ERR_PTR(ret); 747 } 748 return handle; 749 } 750 751 /* Some parts of this taken from generic_cont_expand, which turned out 752 * to be too fragile to do exactly what we need without us having to 753 * worry about recursive locking in ->write_begin() and ->write_end(). */ 754 static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, 755 u64 abs_to) 756 { 757 struct address_space *mapping = inode->i_mapping; 758 struct page *page; 759 unsigned long index = abs_from >> PAGE_CACHE_SHIFT; 760 handle_t *handle = NULL; 761 int ret = 0; 762 unsigned zero_from, zero_to, block_start, block_end; 763 764 BUG_ON(abs_from >= abs_to); 765 BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT)); 766 BUG_ON(abs_from & (inode->i_blkbits - 1)); 767 768 page = find_or_create_page(mapping, index, GFP_NOFS); 769 if (!page) { 770 ret = -ENOMEM; 771 mlog_errno(ret); 772 goto out; 773 } 774 775 /* Get the offsets within the page that we want to zero */ 776 zero_from = abs_from & (PAGE_CACHE_SIZE - 1); 777 zero_to = abs_to & (PAGE_CACHE_SIZE - 1); 778 if (!zero_to) 779 zero_to = PAGE_CACHE_SIZE; 780 781 trace_ocfs2_write_zero_page( 782 (unsigned long long)OCFS2_I(inode)->ip_blkno, 783 (unsigned long long)abs_from, 784 (unsigned long long)abs_to, 785 index, zero_from, zero_to); 786 787 /* We know that zero_from is block aligned */ 788 for (block_start = zero_from; block_start < zero_to; 789 block_start = block_end) { 790 block_end = block_start + (1 << inode->i_blkbits); 791 792 /* 793 * block_start is block-aligned. Bump it by one to force 794 * __block_write_begin and block_commit_write to zero the 795 * whole block. 796 */ 797 ret = __block_write_begin(page, block_start + 1, 0, 798 ocfs2_get_block); 799 if (ret < 0) { 800 mlog_errno(ret); 801 goto out_unlock; 802 } 803 804 if (!handle) { 805 handle = ocfs2_zero_start_ordered_transaction(inode); 806 if (IS_ERR(handle)) { 807 ret = PTR_ERR(handle); 808 handle = NULL; 809 break; 810 } 811 } 812 813 /* must not update i_size! */ 814 ret = block_commit_write(page, block_start + 1, 815 block_start + 1); 816 if (ret < 0) 817 mlog_errno(ret); 818 else 819 ret = 0; 820 } 821 822 if (handle) 823 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); 824 825 out_unlock: 826 unlock_page(page); 827 page_cache_release(page); 828 out: 829 return ret; 830 } 831 832 /* 833 * Find the next range to zero. We do this in terms of bytes because 834 * that's what ocfs2_zero_extend() wants, and it is dealing with the 835 * pagecache. We may return multiple extents. 836 * 837 * zero_start and zero_end are ocfs2_zero_extend()s current idea of what 838 * needs to be zeroed. range_start and range_end return the next zeroing 839 * range. A subsequent call should pass the previous range_end as its 840 * zero_start. If range_end is 0, there's nothing to do. 841 * 842 * Unwritten extents are skipped over. Refcounted extents are CoWd. 843 */ 844 static int ocfs2_zero_extend_get_range(struct inode *inode, 845 struct buffer_head *di_bh, 846 u64 zero_start, u64 zero_end, 847 u64 *range_start, u64 *range_end) 848 { 849 int rc = 0, needs_cow = 0; 850 u32 p_cpos, zero_clusters = 0; 851 u32 zero_cpos = 852 zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits; 853 u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end); 854 unsigned int num_clusters = 0; 855 unsigned int ext_flags = 0; 856 857 while (zero_cpos < last_cpos) { 858 rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos, 859 &num_clusters, &ext_flags); 860 if (rc) { 861 mlog_errno(rc); 862 goto out; 863 } 864 865 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) { 866 zero_clusters = num_clusters; 867 if (ext_flags & OCFS2_EXT_REFCOUNTED) 868 needs_cow = 1; 869 break; 870 } 871 872 zero_cpos += num_clusters; 873 } 874 if (!zero_clusters) { 875 *range_end = 0; 876 goto out; 877 } 878 879 while ((zero_cpos + zero_clusters) < last_cpos) { 880 rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters, 881 &p_cpos, &num_clusters, 882 &ext_flags); 883 if (rc) { 884 mlog_errno(rc); 885 goto out; 886 } 887 888 if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN)) 889 break; 890 if (ext_flags & OCFS2_EXT_REFCOUNTED) 891 needs_cow = 1; 892 zero_clusters += num_clusters; 893 } 894 if ((zero_cpos + zero_clusters) > last_cpos) 895 zero_clusters = last_cpos - zero_cpos; 896 897 if (needs_cow) { 898 rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos, 899 zero_clusters, UINT_MAX); 900 if (rc) { 901 mlog_errno(rc); 902 goto out; 903 } 904 } 905 906 *range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos); 907 *range_end = ocfs2_clusters_to_bytes(inode->i_sb, 908 zero_cpos + zero_clusters); 909 910 out: 911 return rc; 912 } 913 914 /* 915 * Zero one range returned from ocfs2_zero_extend_get_range(). The caller 916 * has made sure that the entire range needs zeroing. 917 */ 918 static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start, 919 u64 range_end) 920 { 921 int rc = 0; 922 u64 next_pos; 923 u64 zero_pos = range_start; 924 925 trace_ocfs2_zero_extend_range( 926 (unsigned long long)OCFS2_I(inode)->ip_blkno, 927 (unsigned long long)range_start, 928 (unsigned long long)range_end); 929 BUG_ON(range_start >= range_end); 930 931 while (zero_pos < range_end) { 932 next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE; 933 if (next_pos > range_end) 934 next_pos = range_end; 935 rc = ocfs2_write_zero_page(inode, zero_pos, next_pos); 936 if (rc < 0) { 937 mlog_errno(rc); 938 break; 939 } 940 zero_pos = next_pos; 941 942 /* 943 * Very large extends have the potential to lock up 944 * the cpu for extended periods of time. 945 */ 946 cond_resched(); 947 } 948 949 return rc; 950 } 951 952 int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh, 953 loff_t zero_to_size) 954 { 955 int ret = 0; 956 u64 zero_start, range_start = 0, range_end = 0; 957 struct super_block *sb = inode->i_sb; 958 959 zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode)); 960 trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode)->ip_blkno, 961 (unsigned long long)zero_start, 962 (unsigned long long)i_size_read(inode)); 963 while (zero_start < zero_to_size) { 964 ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start, 965 zero_to_size, 966 &range_start, 967 &range_end); 968 if (ret) { 969 mlog_errno(ret); 970 break; 971 } 972 if (!range_end) 973 break; 974 /* Trim the ends */ 975 if (range_start < zero_start) 976 range_start = zero_start; 977 if (range_end > zero_to_size) 978 range_end = zero_to_size; 979 980 ret = ocfs2_zero_extend_range(inode, range_start, 981 range_end); 982 if (ret) { 983 mlog_errno(ret); 984 break; 985 } 986 zero_start = range_end; 987 } 988 989 return ret; 990 } 991 992 int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh, 993 u64 new_i_size, u64 zero_to) 994 { 995 int ret; 996 u32 clusters_to_add; 997 struct ocfs2_inode_info *oi = OCFS2_I(inode); 998 999 /* 1000 * Only quota files call this without a bh, and they can't be 1001 * refcounted. 1002 */ 1003 BUG_ON(!di_bh && (oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)); 1004 BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE)); 1005 1006 clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size); 1007 if (clusters_to_add < oi->ip_clusters) 1008 clusters_to_add = 0; 1009 else 1010 clusters_to_add -= oi->ip_clusters; 1011 1012 if (clusters_to_add) { 1013 ret = __ocfs2_extend_allocation(inode, oi->ip_clusters, 1014 clusters_to_add, 0); 1015 if (ret) { 1016 mlog_errno(ret); 1017 goto out; 1018 } 1019 } 1020 1021 /* 1022 * Call this even if we don't add any clusters to the tree. We 1023 * still need to zero the area between the old i_size and the 1024 * new i_size. 1025 */ 1026 ret = ocfs2_zero_extend(inode, di_bh, zero_to); 1027 if (ret < 0) 1028 mlog_errno(ret); 1029 1030 out: 1031 return ret; 1032 } 1033 1034 static int ocfs2_extend_file(struct inode *inode, 1035 struct buffer_head *di_bh, 1036 u64 new_i_size) 1037 { 1038 int ret = 0; 1039 struct ocfs2_inode_info *oi = OCFS2_I(inode); 1040 1041 BUG_ON(!di_bh); 1042 1043 /* setattr sometimes calls us like this. */ 1044 if (new_i_size == 0) 1045 goto out; 1046 1047 if (i_size_read(inode) == new_i_size) 1048 goto out; 1049 BUG_ON(new_i_size < i_size_read(inode)); 1050 1051 /* 1052 * The alloc sem blocks people in read/write from reading our 1053 * allocation until we're done changing it. We depend on 1054 * i_mutex to block other extend/truncate calls while we're 1055 * here. We even have to hold it for sparse files because there 1056 * might be some tail zeroing. 1057 */ 1058 down_write(&oi->ip_alloc_sem); 1059 1060 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) { 1061 /* 1062 * We can optimize small extends by keeping the inodes 1063 * inline data. 1064 */ 1065 if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) { 1066 up_write(&oi->ip_alloc_sem); 1067 goto out_update_size; 1068 } 1069 1070 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh); 1071 if (ret) { 1072 up_write(&oi->ip_alloc_sem); 1073 mlog_errno(ret); 1074 goto out; 1075 } 1076 } 1077 1078 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) 1079 ret = ocfs2_zero_extend(inode, di_bh, new_i_size); 1080 else 1081 ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size, 1082 new_i_size); 1083 1084 up_write(&oi->ip_alloc_sem); 1085 1086 if (ret < 0) { 1087 mlog_errno(ret); 1088 goto out; 1089 } 1090 1091 out_update_size: 1092 ret = ocfs2_simple_size_update(inode, di_bh, new_i_size); 1093 if (ret < 0) 1094 mlog_errno(ret); 1095 1096 out: 1097 return ret; 1098 } 1099 1100 int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) 1101 { 1102 int status = 0, size_change; 1103 struct inode *inode = dentry->d_inode; 1104 struct super_block *sb = inode->i_sb; 1105 struct ocfs2_super *osb = OCFS2_SB(sb); 1106 struct buffer_head *bh = NULL; 1107 handle_t *handle = NULL; 1108 struct dquot *transfer_to[MAXQUOTAS] = { }; 1109 int qtype; 1110 1111 trace_ocfs2_setattr(inode, dentry, 1112 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1113 dentry->d_name.len, dentry->d_name.name, 1114 attr->ia_valid, attr->ia_mode, 1115 from_kuid(&init_user_ns, attr->ia_uid), 1116 from_kgid(&init_user_ns, attr->ia_gid)); 1117 1118 /* ensuring we don't even attempt to truncate a symlink */ 1119 if (S_ISLNK(inode->i_mode)) 1120 attr->ia_valid &= ~ATTR_SIZE; 1121 1122 #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \ 1123 | ATTR_GID | ATTR_UID | ATTR_MODE) 1124 if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) 1125 return 0; 1126 1127 status = inode_change_ok(inode, attr); 1128 if (status) 1129 return status; 1130 1131 if (is_quota_modification(inode, attr)) 1132 dquot_initialize(inode); 1133 size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE; 1134 if (size_change) { 1135 status = ocfs2_rw_lock(inode, 1); 1136 if (status < 0) { 1137 mlog_errno(status); 1138 goto bail; 1139 } 1140 } 1141 1142 status = ocfs2_inode_lock(inode, &bh, 1); 1143 if (status < 0) { 1144 if (status != -ENOENT) 1145 mlog_errno(status); 1146 goto bail_unlock_rw; 1147 } 1148 1149 if (size_change && attr->ia_size != i_size_read(inode)) { 1150 status = inode_newsize_ok(inode, attr->ia_size); 1151 if (status) 1152 goto bail_unlock; 1153 1154 inode_dio_wait(inode); 1155 1156 if (i_size_read(inode) > attr->ia_size) { 1157 if (ocfs2_should_order_data(inode)) { 1158 status = ocfs2_begin_ordered_truncate(inode, 1159 attr->ia_size); 1160 if (status) 1161 goto bail_unlock; 1162 } 1163 status = ocfs2_truncate_file(inode, bh, attr->ia_size); 1164 } else 1165 status = ocfs2_extend_file(inode, bh, attr->ia_size); 1166 if (status < 0) { 1167 if (status != -ENOSPC) 1168 mlog_errno(status); 1169 status = -ENOSPC; 1170 goto bail_unlock; 1171 } 1172 } 1173 1174 if ((attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || 1175 (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { 1176 /* 1177 * Gather pointers to quota structures so that allocation / 1178 * freeing of quota structures happens here and not inside 1179 * dquot_transfer() where we have problems with lock ordering 1180 */ 1181 if (attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid) 1182 && OCFS2_HAS_RO_COMPAT_FEATURE(sb, 1183 OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) { 1184 transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid)); 1185 if (!transfer_to[USRQUOTA]) { 1186 status = -ESRCH; 1187 goto bail_unlock; 1188 } 1189 } 1190 if (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid) 1191 && OCFS2_HAS_RO_COMPAT_FEATURE(sb, 1192 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) { 1193 transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid)); 1194 if (!transfer_to[GRPQUOTA]) { 1195 status = -ESRCH; 1196 goto bail_unlock; 1197 } 1198 } 1199 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS + 1200 2 * ocfs2_quota_trans_credits(sb)); 1201 if (IS_ERR(handle)) { 1202 status = PTR_ERR(handle); 1203 mlog_errno(status); 1204 goto bail_unlock; 1205 } 1206 status = __dquot_transfer(inode, transfer_to); 1207 if (status < 0) 1208 goto bail_commit; 1209 } else { 1210 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); 1211 if (IS_ERR(handle)) { 1212 status = PTR_ERR(handle); 1213 mlog_errno(status); 1214 goto bail_unlock; 1215 } 1216 } 1217 1218 setattr_copy(inode, attr); 1219 mark_inode_dirty(inode); 1220 1221 status = ocfs2_mark_inode_dirty(handle, inode, bh); 1222 if (status < 0) 1223 mlog_errno(status); 1224 1225 bail_commit: 1226 ocfs2_commit_trans(osb, handle); 1227 bail_unlock: 1228 ocfs2_inode_unlock(inode, 1); 1229 bail_unlock_rw: 1230 if (size_change) 1231 ocfs2_rw_unlock(inode, 1); 1232 bail: 1233 brelse(bh); 1234 1235 /* Release quota pointers in case we acquired them */ 1236 for (qtype = 0; qtype < MAXQUOTAS; qtype++) 1237 dqput(transfer_to[qtype]); 1238 1239 if (!status && attr->ia_valid & ATTR_MODE) { 1240 status = ocfs2_acl_chmod(inode); 1241 if (status < 0) 1242 mlog_errno(status); 1243 } 1244 1245 return status; 1246 } 1247 1248 int ocfs2_getattr(struct vfsmount *mnt, 1249 struct dentry *dentry, 1250 struct kstat *stat) 1251 { 1252 struct inode *inode = dentry->d_inode; 1253 struct super_block *sb = dentry->d_inode->i_sb; 1254 struct ocfs2_super *osb = sb->s_fs_info; 1255 int err; 1256 1257 err = ocfs2_inode_revalidate(dentry); 1258 if (err) { 1259 if (err != -ENOENT) 1260 mlog_errno(err); 1261 goto bail; 1262 } 1263 1264 generic_fillattr(inode, stat); 1265 1266 /* We set the blksize from the cluster size for performance */ 1267 stat->blksize = osb->s_clustersize; 1268 1269 bail: 1270 return err; 1271 } 1272 1273 int ocfs2_permission(struct inode *inode, int mask) 1274 { 1275 int ret; 1276 1277 if (mask & MAY_NOT_BLOCK) 1278 return -ECHILD; 1279 1280 ret = ocfs2_inode_lock(inode, NULL, 0); 1281 if (ret) { 1282 if (ret != -ENOENT) 1283 mlog_errno(ret); 1284 goto out; 1285 } 1286 1287 ret = generic_permission(inode, mask); 1288 1289 ocfs2_inode_unlock(inode, 0); 1290 out: 1291 return ret; 1292 } 1293 1294 static int __ocfs2_write_remove_suid(struct inode *inode, 1295 struct buffer_head *bh) 1296 { 1297 int ret; 1298 handle_t *handle; 1299 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1300 struct ocfs2_dinode *di; 1301 1302 trace_ocfs2_write_remove_suid( 1303 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1304 inode->i_mode); 1305 1306 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); 1307 if (IS_ERR(handle)) { 1308 ret = PTR_ERR(handle); 1309 mlog_errno(ret); 1310 goto out; 1311 } 1312 1313 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh, 1314 OCFS2_JOURNAL_ACCESS_WRITE); 1315 if (ret < 0) { 1316 mlog_errno(ret); 1317 goto out_trans; 1318 } 1319 1320 inode->i_mode &= ~S_ISUID; 1321 if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP)) 1322 inode->i_mode &= ~S_ISGID; 1323 1324 di = (struct ocfs2_dinode *) bh->b_data; 1325 di->i_mode = cpu_to_le16(inode->i_mode); 1326 1327 ocfs2_journal_dirty(handle, bh); 1328 1329 out_trans: 1330 ocfs2_commit_trans(osb, handle); 1331 out: 1332 return ret; 1333 } 1334 1335 /* 1336 * Will look for holes and unwritten extents in the range starting at 1337 * pos for count bytes (inclusive). 1338 */ 1339 static int ocfs2_check_range_for_holes(struct inode *inode, loff_t pos, 1340 size_t count) 1341 { 1342 int ret = 0; 1343 unsigned int extent_flags; 1344 u32 cpos, clusters, extent_len, phys_cpos; 1345 struct super_block *sb = inode->i_sb; 1346 1347 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits; 1348 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos; 1349 1350 while (clusters) { 1351 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len, 1352 &extent_flags); 1353 if (ret < 0) { 1354 mlog_errno(ret); 1355 goto out; 1356 } 1357 1358 if (phys_cpos == 0 || (extent_flags & OCFS2_EXT_UNWRITTEN)) { 1359 ret = 1; 1360 break; 1361 } 1362 1363 if (extent_len > clusters) 1364 extent_len = clusters; 1365 1366 clusters -= extent_len; 1367 cpos += extent_len; 1368 } 1369 out: 1370 return ret; 1371 } 1372 1373 static int ocfs2_write_remove_suid(struct inode *inode) 1374 { 1375 int ret; 1376 struct buffer_head *bh = NULL; 1377 1378 ret = ocfs2_read_inode_block(inode, &bh); 1379 if (ret < 0) { 1380 mlog_errno(ret); 1381 goto out; 1382 } 1383 1384 ret = __ocfs2_write_remove_suid(inode, bh); 1385 out: 1386 brelse(bh); 1387 return ret; 1388 } 1389 1390 /* 1391 * Allocate enough extents to cover the region starting at byte offset 1392 * start for len bytes. Existing extents are skipped, any extents 1393 * added are marked as "unwritten". 1394 */ 1395 static int ocfs2_allocate_unwritten_extents(struct inode *inode, 1396 u64 start, u64 len) 1397 { 1398 int ret; 1399 u32 cpos, phys_cpos, clusters, alloc_size; 1400 u64 end = start + len; 1401 struct buffer_head *di_bh = NULL; 1402 1403 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { 1404 ret = ocfs2_read_inode_block(inode, &di_bh); 1405 if (ret) { 1406 mlog_errno(ret); 1407 goto out; 1408 } 1409 1410 /* 1411 * Nothing to do if the requested reservation range 1412 * fits within the inode. 1413 */ 1414 if (ocfs2_size_fits_inline_data(di_bh, end)) 1415 goto out; 1416 1417 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh); 1418 if (ret) { 1419 mlog_errno(ret); 1420 goto out; 1421 } 1422 } 1423 1424 /* 1425 * We consider both start and len to be inclusive. 1426 */ 1427 cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits; 1428 clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len); 1429 clusters -= cpos; 1430 1431 while (clusters) { 1432 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, 1433 &alloc_size, NULL); 1434 if (ret) { 1435 mlog_errno(ret); 1436 goto out; 1437 } 1438 1439 /* 1440 * Hole or existing extent len can be arbitrary, so 1441 * cap it to our own allocation request. 1442 */ 1443 if (alloc_size > clusters) 1444 alloc_size = clusters; 1445 1446 if (phys_cpos) { 1447 /* 1448 * We already have an allocation at this 1449 * region so we can safely skip it. 1450 */ 1451 goto next; 1452 } 1453 1454 ret = __ocfs2_extend_allocation(inode, cpos, alloc_size, 1); 1455 if (ret) { 1456 if (ret != -ENOSPC) 1457 mlog_errno(ret); 1458 goto out; 1459 } 1460 1461 next: 1462 cpos += alloc_size; 1463 clusters -= alloc_size; 1464 } 1465 1466 ret = 0; 1467 out: 1468 1469 brelse(di_bh); 1470 return ret; 1471 } 1472 1473 /* 1474 * Truncate a byte range, avoiding pages within partial clusters. This 1475 * preserves those pages for the zeroing code to write to. 1476 */ 1477 static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start, 1478 u64 byte_len) 1479 { 1480 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1481 loff_t start, end; 1482 struct address_space *mapping = inode->i_mapping; 1483 1484 start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start); 1485 end = byte_start + byte_len; 1486 end = end & ~(osb->s_clustersize - 1); 1487 1488 if (start < end) { 1489 unmap_mapping_range(mapping, start, end - start, 0); 1490 truncate_inode_pages_range(mapping, start, end - 1); 1491 } 1492 } 1493 1494 static int ocfs2_zero_partial_clusters(struct inode *inode, 1495 u64 start, u64 len) 1496 { 1497 int ret = 0; 1498 u64 tmpend, end = start + len; 1499 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1500 unsigned int csize = osb->s_clustersize; 1501 handle_t *handle; 1502 1503 /* 1504 * The "start" and "end" values are NOT necessarily part of 1505 * the range whose allocation is being deleted. Rather, this 1506 * is what the user passed in with the request. We must zero 1507 * partial clusters here. There's no need to worry about 1508 * physical allocation - the zeroing code knows to skip holes. 1509 */ 1510 trace_ocfs2_zero_partial_clusters( 1511 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1512 (unsigned long long)start, (unsigned long long)end); 1513 1514 /* 1515 * If both edges are on a cluster boundary then there's no 1516 * zeroing required as the region is part of the allocation to 1517 * be truncated. 1518 */ 1519 if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0) 1520 goto out; 1521 1522 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); 1523 if (IS_ERR(handle)) { 1524 ret = PTR_ERR(handle); 1525 mlog_errno(ret); 1526 goto out; 1527 } 1528 1529 /* 1530 * We want to get the byte offset of the end of the 1st cluster. 1531 */ 1532 tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1)); 1533 if (tmpend > end) 1534 tmpend = end; 1535 1536 trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start, 1537 (unsigned long long)tmpend); 1538 1539 ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend); 1540 if (ret) 1541 mlog_errno(ret); 1542 1543 if (tmpend < end) { 1544 /* 1545 * This may make start and end equal, but the zeroing 1546 * code will skip any work in that case so there's no 1547 * need to catch it up here. 1548 */ 1549 start = end & ~(osb->s_clustersize - 1); 1550 1551 trace_ocfs2_zero_partial_clusters_range2( 1552 (unsigned long long)start, (unsigned long long)end); 1553 1554 ret = ocfs2_zero_range_for_truncate(inode, handle, start, end); 1555 if (ret) 1556 mlog_errno(ret); 1557 } 1558 1559 ocfs2_commit_trans(osb, handle); 1560 out: 1561 return ret; 1562 } 1563 1564 static int ocfs2_find_rec(struct ocfs2_extent_list *el, u32 pos) 1565 { 1566 int i; 1567 struct ocfs2_extent_rec *rec = NULL; 1568 1569 for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) { 1570 1571 rec = &el->l_recs[i]; 1572 1573 if (le32_to_cpu(rec->e_cpos) < pos) 1574 break; 1575 } 1576 1577 return i; 1578 } 1579 1580 /* 1581 * Helper to calculate the punching pos and length in one run, we handle the 1582 * following three cases in order: 1583 * 1584 * - remove the entire record 1585 * - remove a partial record 1586 * - no record needs to be removed (hole-punching completed) 1587 */ 1588 static void ocfs2_calc_trunc_pos(struct inode *inode, 1589 struct ocfs2_extent_list *el, 1590 struct ocfs2_extent_rec *rec, 1591 u32 trunc_start, u32 *trunc_cpos, 1592 u32 *trunc_len, u32 *trunc_end, 1593 u64 *blkno, int *done) 1594 { 1595 int ret = 0; 1596 u32 coff, range; 1597 1598 range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); 1599 1600 if (le32_to_cpu(rec->e_cpos) >= trunc_start) { 1601 /* 1602 * remove an entire extent record. 1603 */ 1604 *trunc_cpos = le32_to_cpu(rec->e_cpos); 1605 /* 1606 * Skip holes if any. 1607 */ 1608 if (range < *trunc_end) 1609 *trunc_end = range; 1610 *trunc_len = *trunc_end - le32_to_cpu(rec->e_cpos); 1611 *blkno = le64_to_cpu(rec->e_blkno); 1612 *trunc_end = le32_to_cpu(rec->e_cpos); 1613 } else if (range > trunc_start) { 1614 /* 1615 * remove a partial extent record, which means we're 1616 * removing the last extent record. 1617 */ 1618 *trunc_cpos = trunc_start; 1619 /* 1620 * skip hole if any. 1621 */ 1622 if (range < *trunc_end) 1623 *trunc_end = range; 1624 *trunc_len = *trunc_end - trunc_start; 1625 coff = trunc_start - le32_to_cpu(rec->e_cpos); 1626 *blkno = le64_to_cpu(rec->e_blkno) + 1627 ocfs2_clusters_to_blocks(inode->i_sb, coff); 1628 *trunc_end = trunc_start; 1629 } else { 1630 /* 1631 * It may have two following possibilities: 1632 * 1633 * - last record has been removed 1634 * - trunc_start was within a hole 1635 * 1636 * both two cases mean the completion of hole punching. 1637 */ 1638 ret = 1; 1639 } 1640 1641 *done = ret; 1642 } 1643 1644 static int ocfs2_remove_inode_range(struct inode *inode, 1645 struct buffer_head *di_bh, u64 byte_start, 1646 u64 byte_len) 1647 { 1648 int ret = 0, flags = 0, done = 0, i; 1649 u32 trunc_start, trunc_len, trunc_end, trunc_cpos, phys_cpos; 1650 u32 cluster_in_el; 1651 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1652 struct ocfs2_cached_dealloc_ctxt dealloc; 1653 struct address_space *mapping = inode->i_mapping; 1654 struct ocfs2_extent_tree et; 1655 struct ocfs2_path *path = NULL; 1656 struct ocfs2_extent_list *el = NULL; 1657 struct ocfs2_extent_rec *rec = NULL; 1658 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 1659 u64 blkno, refcount_loc = le64_to_cpu(di->i_refcount_loc); 1660 1661 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh); 1662 ocfs2_init_dealloc_ctxt(&dealloc); 1663 1664 trace_ocfs2_remove_inode_range( 1665 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1666 (unsigned long long)byte_start, 1667 (unsigned long long)byte_len); 1668 1669 if (byte_len == 0) 1670 return 0; 1671 1672 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { 1673 ret = ocfs2_truncate_inline(inode, di_bh, byte_start, 1674 byte_start + byte_len, 0); 1675 if (ret) { 1676 mlog_errno(ret); 1677 goto out; 1678 } 1679 /* 1680 * There's no need to get fancy with the page cache 1681 * truncate of an inline-data inode. We're talking 1682 * about less than a page here, which will be cached 1683 * in the dinode buffer anyway. 1684 */ 1685 unmap_mapping_range(mapping, 0, 0, 0); 1686 truncate_inode_pages(mapping, 0); 1687 goto out; 1688 } 1689 1690 /* 1691 * For reflinks, we may need to CoW 2 clusters which might be 1692 * partially zero'd later, if hole's start and end offset were 1693 * within one cluster(means is not exactly aligned to clustersize). 1694 */ 1695 1696 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) { 1697 1698 ret = ocfs2_cow_file_pos(inode, di_bh, byte_start); 1699 if (ret) { 1700 mlog_errno(ret); 1701 goto out; 1702 } 1703 1704 ret = ocfs2_cow_file_pos(inode, di_bh, byte_start + byte_len); 1705 if (ret) { 1706 mlog_errno(ret); 1707 goto out; 1708 } 1709 } 1710 1711 trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start); 1712 trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits; 1713 cluster_in_el = trunc_end; 1714 1715 ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len); 1716 if (ret) { 1717 mlog_errno(ret); 1718 goto out; 1719 } 1720 1721 path = ocfs2_new_path_from_et(&et); 1722 if (!path) { 1723 ret = -ENOMEM; 1724 mlog_errno(ret); 1725 goto out; 1726 } 1727 1728 while (trunc_end > trunc_start) { 1729 1730 ret = ocfs2_find_path(INODE_CACHE(inode), path, 1731 cluster_in_el); 1732 if (ret) { 1733 mlog_errno(ret); 1734 goto out; 1735 } 1736 1737 el = path_leaf_el(path); 1738 1739 i = ocfs2_find_rec(el, trunc_end); 1740 /* 1741 * Need to go to previous extent block. 1742 */ 1743 if (i < 0) { 1744 if (path->p_tree_depth == 0) 1745 break; 1746 1747 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, 1748 path, 1749 &cluster_in_el); 1750 if (ret) { 1751 mlog_errno(ret); 1752 goto out; 1753 } 1754 1755 /* 1756 * We've reached the leftmost extent block, 1757 * it's safe to leave. 1758 */ 1759 if (cluster_in_el == 0) 1760 break; 1761 1762 /* 1763 * The 'pos' searched for previous extent block is 1764 * always one cluster less than actual trunc_end. 1765 */ 1766 trunc_end = cluster_in_el + 1; 1767 1768 ocfs2_reinit_path(path, 1); 1769 1770 continue; 1771 1772 } else 1773 rec = &el->l_recs[i]; 1774 1775 ocfs2_calc_trunc_pos(inode, el, rec, trunc_start, &trunc_cpos, 1776 &trunc_len, &trunc_end, &blkno, &done); 1777 if (done) 1778 break; 1779 1780 flags = rec->e_flags; 1781 phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno); 1782 1783 ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos, 1784 phys_cpos, trunc_len, flags, 1785 &dealloc, refcount_loc); 1786 if (ret < 0) { 1787 mlog_errno(ret); 1788 goto out; 1789 } 1790 1791 cluster_in_el = trunc_end; 1792 1793 ocfs2_reinit_path(path, 1); 1794 } 1795 1796 ocfs2_truncate_cluster_pages(inode, byte_start, byte_len); 1797 1798 out: 1799 ocfs2_free_path(path); 1800 ocfs2_schedule_truncate_log_flush(osb, 1); 1801 ocfs2_run_deallocs(osb, &dealloc); 1802 1803 return ret; 1804 } 1805 1806 /* 1807 * Parts of this function taken from xfs_change_file_space() 1808 */ 1809 static int __ocfs2_change_file_space(struct file *file, struct inode *inode, 1810 loff_t f_pos, unsigned int cmd, 1811 struct ocfs2_space_resv *sr, 1812 int change_size) 1813 { 1814 int ret; 1815 s64 llen; 1816 loff_t size; 1817 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1818 struct buffer_head *di_bh = NULL; 1819 handle_t *handle; 1820 unsigned long long max_off = inode->i_sb->s_maxbytes; 1821 1822 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) 1823 return -EROFS; 1824 1825 mutex_lock(&inode->i_mutex); 1826 1827 /* 1828 * This prevents concurrent writes on other nodes 1829 */ 1830 ret = ocfs2_rw_lock(inode, 1); 1831 if (ret) { 1832 mlog_errno(ret); 1833 goto out; 1834 } 1835 1836 ret = ocfs2_inode_lock(inode, &di_bh, 1); 1837 if (ret) { 1838 mlog_errno(ret); 1839 goto out_rw_unlock; 1840 } 1841 1842 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) { 1843 ret = -EPERM; 1844 goto out_inode_unlock; 1845 } 1846 1847 switch (sr->l_whence) { 1848 case 0: /*SEEK_SET*/ 1849 break; 1850 case 1: /*SEEK_CUR*/ 1851 sr->l_start += f_pos; 1852 break; 1853 case 2: /*SEEK_END*/ 1854 sr->l_start += i_size_read(inode); 1855 break; 1856 default: 1857 ret = -EINVAL; 1858 goto out_inode_unlock; 1859 } 1860 sr->l_whence = 0; 1861 1862 llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len; 1863 1864 if (sr->l_start < 0 1865 || sr->l_start > max_off 1866 || (sr->l_start + llen) < 0 1867 || (sr->l_start + llen) > max_off) { 1868 ret = -EINVAL; 1869 goto out_inode_unlock; 1870 } 1871 size = sr->l_start + sr->l_len; 1872 1873 if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) { 1874 if (sr->l_len <= 0) { 1875 ret = -EINVAL; 1876 goto out_inode_unlock; 1877 } 1878 } 1879 1880 if (file && should_remove_suid(file->f_path.dentry)) { 1881 ret = __ocfs2_write_remove_suid(inode, di_bh); 1882 if (ret) { 1883 mlog_errno(ret); 1884 goto out_inode_unlock; 1885 } 1886 } 1887 1888 down_write(&OCFS2_I(inode)->ip_alloc_sem); 1889 switch (cmd) { 1890 case OCFS2_IOC_RESVSP: 1891 case OCFS2_IOC_RESVSP64: 1892 /* 1893 * This takes unsigned offsets, but the signed ones we 1894 * pass have been checked against overflow above. 1895 */ 1896 ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start, 1897 sr->l_len); 1898 break; 1899 case OCFS2_IOC_UNRESVSP: 1900 case OCFS2_IOC_UNRESVSP64: 1901 ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start, 1902 sr->l_len); 1903 break; 1904 default: 1905 ret = -EINVAL; 1906 } 1907 up_write(&OCFS2_I(inode)->ip_alloc_sem); 1908 if (ret) { 1909 mlog_errno(ret); 1910 goto out_inode_unlock; 1911 } 1912 1913 /* 1914 * We update c/mtime for these changes 1915 */ 1916 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); 1917 if (IS_ERR(handle)) { 1918 ret = PTR_ERR(handle); 1919 mlog_errno(ret); 1920 goto out_inode_unlock; 1921 } 1922 1923 if (change_size && i_size_read(inode) < size) 1924 i_size_write(inode, size); 1925 1926 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 1927 ret = ocfs2_mark_inode_dirty(handle, inode, di_bh); 1928 if (ret < 0) 1929 mlog_errno(ret); 1930 1931 if (file && (file->f_flags & O_SYNC)) 1932 handle->h_sync = 1; 1933 1934 ocfs2_commit_trans(osb, handle); 1935 1936 out_inode_unlock: 1937 brelse(di_bh); 1938 ocfs2_inode_unlock(inode, 1); 1939 out_rw_unlock: 1940 ocfs2_rw_unlock(inode, 1); 1941 1942 out: 1943 mutex_unlock(&inode->i_mutex); 1944 return ret; 1945 } 1946 1947 int ocfs2_change_file_space(struct file *file, unsigned int cmd, 1948 struct ocfs2_space_resv *sr) 1949 { 1950 struct inode *inode = file_inode(file); 1951 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1952 int ret; 1953 1954 if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) && 1955 !ocfs2_writes_unwritten_extents(osb)) 1956 return -ENOTTY; 1957 else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) && 1958 !ocfs2_sparse_alloc(osb)) 1959 return -ENOTTY; 1960 1961 if (!S_ISREG(inode->i_mode)) 1962 return -EINVAL; 1963 1964 if (!(file->f_mode & FMODE_WRITE)) 1965 return -EBADF; 1966 1967 ret = mnt_want_write_file(file); 1968 if (ret) 1969 return ret; 1970 ret = __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0); 1971 mnt_drop_write_file(file); 1972 return ret; 1973 } 1974 1975 static long ocfs2_fallocate(struct file *file, int mode, loff_t offset, 1976 loff_t len) 1977 { 1978 struct inode *inode = file_inode(file); 1979 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1980 struct ocfs2_space_resv sr; 1981 int change_size = 1; 1982 int cmd = OCFS2_IOC_RESVSP64; 1983 1984 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 1985 return -EOPNOTSUPP; 1986 if (!ocfs2_writes_unwritten_extents(osb)) 1987 return -EOPNOTSUPP; 1988 1989 if (mode & FALLOC_FL_KEEP_SIZE) 1990 change_size = 0; 1991 1992 if (mode & FALLOC_FL_PUNCH_HOLE) 1993 cmd = OCFS2_IOC_UNRESVSP64; 1994 1995 sr.l_whence = 0; 1996 sr.l_start = (s64)offset; 1997 sr.l_len = (s64)len; 1998 1999 return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr, 2000 change_size); 2001 } 2002 2003 int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos, 2004 size_t count) 2005 { 2006 int ret = 0; 2007 unsigned int extent_flags; 2008 u32 cpos, clusters, extent_len, phys_cpos; 2009 struct super_block *sb = inode->i_sb; 2010 2011 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) || 2012 !(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) || 2013 OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) 2014 return 0; 2015 2016 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits; 2017 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos; 2018 2019 while (clusters) { 2020 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len, 2021 &extent_flags); 2022 if (ret < 0) { 2023 mlog_errno(ret); 2024 goto out; 2025 } 2026 2027 if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) { 2028 ret = 1; 2029 break; 2030 } 2031 2032 if (extent_len > clusters) 2033 extent_len = clusters; 2034 2035 clusters -= extent_len; 2036 cpos += extent_len; 2037 } 2038 out: 2039 return ret; 2040 } 2041 2042 static void ocfs2_aiodio_wait(struct inode *inode) 2043 { 2044 wait_queue_head_t *wq = ocfs2_ioend_wq(inode); 2045 2046 wait_event(*wq, (atomic_read(&OCFS2_I(inode)->ip_unaligned_aio) == 0)); 2047 } 2048 2049 static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos) 2050 { 2051 int blockmask = inode->i_sb->s_blocksize - 1; 2052 loff_t final_size = pos + count; 2053 2054 if ((pos & blockmask) || (final_size & blockmask)) 2055 return 1; 2056 return 0; 2057 } 2058 2059 static int ocfs2_prepare_inode_for_refcount(struct inode *inode, 2060 struct file *file, 2061 loff_t pos, size_t count, 2062 int *meta_level) 2063 { 2064 int ret; 2065 struct buffer_head *di_bh = NULL; 2066 u32 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits; 2067 u32 clusters = 2068 ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos; 2069 2070 ret = ocfs2_inode_lock(inode, &di_bh, 1); 2071 if (ret) { 2072 mlog_errno(ret); 2073 goto out; 2074 } 2075 2076 *meta_level = 1; 2077 2078 ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX); 2079 if (ret) 2080 mlog_errno(ret); 2081 out: 2082 brelse(di_bh); 2083 return ret; 2084 } 2085 2086 static int ocfs2_prepare_inode_for_write(struct file *file, 2087 loff_t *ppos, 2088 size_t count, 2089 int appending, 2090 int *direct_io, 2091 int *has_refcount) 2092 { 2093 int ret = 0, meta_level = 0; 2094 struct dentry *dentry = file->f_path.dentry; 2095 struct inode *inode = dentry->d_inode; 2096 loff_t saved_pos = 0, end; 2097 2098 /* 2099 * We start with a read level meta lock and only jump to an ex 2100 * if we need to make modifications here. 2101 */ 2102 for(;;) { 2103 ret = ocfs2_inode_lock(inode, NULL, meta_level); 2104 if (ret < 0) { 2105 meta_level = -1; 2106 mlog_errno(ret); 2107 goto out; 2108 } 2109 2110 /* Clear suid / sgid if necessary. We do this here 2111 * instead of later in the write path because 2112 * remove_suid() calls ->setattr without any hint that 2113 * we may have already done our cluster locking. Since 2114 * ocfs2_setattr() *must* take cluster locks to 2115 * proceed, this will lead us to recursively lock the 2116 * inode. There's also the dinode i_size state which 2117 * can be lost via setattr during extending writes (we 2118 * set inode->i_size at the end of a write. */ 2119 if (should_remove_suid(dentry)) { 2120 if (meta_level == 0) { 2121 ocfs2_inode_unlock(inode, meta_level); 2122 meta_level = 1; 2123 continue; 2124 } 2125 2126 ret = ocfs2_write_remove_suid(inode); 2127 if (ret < 0) { 2128 mlog_errno(ret); 2129 goto out_unlock; 2130 } 2131 } 2132 2133 /* work on a copy of ppos until we're sure that we won't have 2134 * to recalculate it due to relocking. */ 2135 if (appending) 2136 saved_pos = i_size_read(inode); 2137 else 2138 saved_pos = *ppos; 2139 2140 end = saved_pos + count; 2141 2142 ret = ocfs2_check_range_for_refcount(inode, saved_pos, count); 2143 if (ret == 1) { 2144 ocfs2_inode_unlock(inode, meta_level); 2145 meta_level = -1; 2146 2147 ret = ocfs2_prepare_inode_for_refcount(inode, 2148 file, 2149 saved_pos, 2150 count, 2151 &meta_level); 2152 if (has_refcount) 2153 *has_refcount = 1; 2154 if (direct_io) 2155 *direct_io = 0; 2156 } 2157 2158 if (ret < 0) { 2159 mlog_errno(ret); 2160 goto out_unlock; 2161 } 2162 2163 /* 2164 * Skip the O_DIRECT checks if we don't need 2165 * them. 2166 */ 2167 if (!direct_io || !(*direct_io)) 2168 break; 2169 2170 /* 2171 * There's no sane way to do direct writes to an inode 2172 * with inline data. 2173 */ 2174 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { 2175 *direct_io = 0; 2176 break; 2177 } 2178 2179 /* 2180 * Allowing concurrent direct writes means 2181 * i_size changes wouldn't be synchronized, so 2182 * one node could wind up truncating another 2183 * nodes writes. 2184 */ 2185 if (end > i_size_read(inode)) { 2186 *direct_io = 0; 2187 break; 2188 } 2189 2190 /* 2191 * We don't fill holes during direct io, so 2192 * check for them here. If any are found, the 2193 * caller will have to retake some cluster 2194 * locks and initiate the io as buffered. 2195 */ 2196 ret = ocfs2_check_range_for_holes(inode, saved_pos, count); 2197 if (ret == 1) { 2198 *direct_io = 0; 2199 ret = 0; 2200 } else if (ret < 0) 2201 mlog_errno(ret); 2202 break; 2203 } 2204 2205 if (appending) 2206 *ppos = saved_pos; 2207 2208 out_unlock: 2209 trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno, 2210 saved_pos, appending, count, 2211 direct_io, has_refcount); 2212 2213 if (meta_level >= 0) 2214 ocfs2_inode_unlock(inode, meta_level); 2215 2216 out: 2217 return ret; 2218 } 2219 2220 static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, 2221 const struct iovec *iov, 2222 unsigned long nr_segs, 2223 loff_t pos) 2224 { 2225 int ret, direct_io, appending, rw_level, have_alloc_sem = 0; 2226 int can_do_direct, has_refcount = 0; 2227 ssize_t written = 0; 2228 size_t ocount; /* original count */ 2229 size_t count; /* after file limit checks */ 2230 loff_t old_size, *ppos = &iocb->ki_pos; 2231 u32 old_clusters; 2232 struct file *file = iocb->ki_filp; 2233 struct inode *inode = file_inode(file); 2234 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 2235 int full_coherency = !(osb->s_mount_opt & 2236 OCFS2_MOUNT_COHERENCY_BUFFERED); 2237 int unaligned_dio = 0; 2238 2239 trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry, 2240 (unsigned long long)OCFS2_I(inode)->ip_blkno, 2241 file->f_path.dentry->d_name.len, 2242 file->f_path.dentry->d_name.name, 2243 (unsigned int)nr_segs); 2244 2245 if (iocb->ki_nbytes == 0) 2246 return 0; 2247 2248 appending = file->f_flags & O_APPEND ? 1 : 0; 2249 direct_io = file->f_flags & O_DIRECT ? 1 : 0; 2250 2251 mutex_lock(&inode->i_mutex); 2252 2253 ocfs2_iocb_clear_sem_locked(iocb); 2254 2255 relock: 2256 /* to match setattr's i_mutex -> rw_lock ordering */ 2257 if (direct_io) { 2258 have_alloc_sem = 1; 2259 /* communicate with ocfs2_dio_end_io */ 2260 ocfs2_iocb_set_sem_locked(iocb); 2261 } 2262 2263 /* 2264 * Concurrent O_DIRECT writes are allowed with 2265 * mount_option "coherency=buffered". 2266 */ 2267 rw_level = (!direct_io || full_coherency); 2268 2269 ret = ocfs2_rw_lock(inode, rw_level); 2270 if (ret < 0) { 2271 mlog_errno(ret); 2272 goto out_sems; 2273 } 2274 2275 /* 2276 * O_DIRECT writes with "coherency=full" need to take EX cluster 2277 * inode_lock to guarantee coherency. 2278 */ 2279 if (direct_io && full_coherency) { 2280 /* 2281 * We need to take and drop the inode lock to force 2282 * other nodes to drop their caches. Buffered I/O 2283 * already does this in write_begin(). 2284 */ 2285 ret = ocfs2_inode_lock(inode, NULL, 1); 2286 if (ret < 0) { 2287 mlog_errno(ret); 2288 goto out; 2289 } 2290 2291 ocfs2_inode_unlock(inode, 1); 2292 } 2293 2294 can_do_direct = direct_io; 2295 ret = ocfs2_prepare_inode_for_write(file, ppos, 2296 iocb->ki_nbytes, appending, 2297 &can_do_direct, &has_refcount); 2298 if (ret < 0) { 2299 mlog_errno(ret); 2300 goto out; 2301 } 2302 2303 if (direct_io && !is_sync_kiocb(iocb)) 2304 unaligned_dio = ocfs2_is_io_unaligned(inode, iocb->ki_nbytes, 2305 *ppos); 2306 2307 /* 2308 * We can't complete the direct I/O as requested, fall back to 2309 * buffered I/O. 2310 */ 2311 if (direct_io && !can_do_direct) { 2312 ocfs2_rw_unlock(inode, rw_level); 2313 2314 have_alloc_sem = 0; 2315 rw_level = -1; 2316 2317 direct_io = 0; 2318 goto relock; 2319 } 2320 2321 if (unaligned_dio) { 2322 /* 2323 * Wait on previous unaligned aio to complete before 2324 * proceeding. 2325 */ 2326 ocfs2_aiodio_wait(inode); 2327 2328 /* Mark the iocb as needing a decrement in ocfs2_dio_end_io */ 2329 atomic_inc(&OCFS2_I(inode)->ip_unaligned_aio); 2330 ocfs2_iocb_set_unaligned_aio(iocb); 2331 } 2332 2333 /* 2334 * To later detect whether a journal commit for sync writes is 2335 * necessary, we sample i_size, and cluster count here. 2336 */ 2337 old_size = i_size_read(inode); 2338 old_clusters = OCFS2_I(inode)->ip_clusters; 2339 2340 /* communicate with ocfs2_dio_end_io */ 2341 ocfs2_iocb_set_rw_locked(iocb, rw_level); 2342 2343 ret = generic_segment_checks(iov, &nr_segs, &ocount, 2344 VERIFY_READ); 2345 if (ret) 2346 goto out_dio; 2347 2348 count = ocount; 2349 ret = generic_write_checks(file, ppos, &count, 2350 S_ISBLK(inode->i_mode)); 2351 if (ret) 2352 goto out_dio; 2353 2354 if (direct_io) { 2355 written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos, 2356 ppos, count, ocount); 2357 if (written < 0) { 2358 ret = written; 2359 goto out_dio; 2360 } 2361 } else { 2362 current->backing_dev_info = file->f_mapping->backing_dev_info; 2363 written = generic_file_buffered_write(iocb, iov, nr_segs, *ppos, 2364 ppos, count, 0); 2365 current->backing_dev_info = NULL; 2366 } 2367 2368 out_dio: 2369 /* buffered aio wouldn't have proper lock coverage today */ 2370 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); 2371 2372 if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || 2373 ((file->f_flags & O_DIRECT) && !direct_io)) { 2374 ret = filemap_fdatawrite_range(file->f_mapping, pos, 2375 pos + count - 1); 2376 if (ret < 0) 2377 written = ret; 2378 2379 if (!ret && ((old_size != i_size_read(inode)) || 2380 (old_clusters != OCFS2_I(inode)->ip_clusters) || 2381 has_refcount)) { 2382 ret = jbd2_journal_force_commit(osb->journal->j_journal); 2383 if (ret < 0) 2384 written = ret; 2385 } 2386 2387 if (!ret) 2388 ret = filemap_fdatawait_range(file->f_mapping, pos, 2389 pos + count - 1); 2390 } 2391 2392 /* 2393 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io 2394 * function pointer which is called when o_direct io completes so that 2395 * it can unlock our rw lock. 2396 * Unfortunately there are error cases which call end_io and others 2397 * that don't. so we don't have to unlock the rw_lock if either an 2398 * async dio is going to do it in the future or an end_io after an 2399 * error has already done it. 2400 */ 2401 if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) { 2402 rw_level = -1; 2403 have_alloc_sem = 0; 2404 unaligned_dio = 0; 2405 } 2406 2407 if (unaligned_dio) { 2408 ocfs2_iocb_clear_unaligned_aio(iocb); 2409 atomic_dec(&OCFS2_I(inode)->ip_unaligned_aio); 2410 } 2411 2412 out: 2413 if (rw_level != -1) 2414 ocfs2_rw_unlock(inode, rw_level); 2415 2416 out_sems: 2417 if (have_alloc_sem) 2418 ocfs2_iocb_clear_sem_locked(iocb); 2419 2420 mutex_unlock(&inode->i_mutex); 2421 2422 if (written) 2423 ret = written; 2424 return ret; 2425 } 2426 2427 static int ocfs2_splice_to_file(struct pipe_inode_info *pipe, 2428 struct file *out, 2429 struct splice_desc *sd) 2430 { 2431 int ret; 2432 2433 ret = ocfs2_prepare_inode_for_write(out, &sd->pos, 2434 sd->total_len, 0, NULL, NULL); 2435 if (ret < 0) { 2436 mlog_errno(ret); 2437 return ret; 2438 } 2439 2440 return splice_from_pipe_feed(pipe, sd, pipe_to_file); 2441 } 2442 2443 static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe, 2444 struct file *out, 2445 loff_t *ppos, 2446 size_t len, 2447 unsigned int flags) 2448 { 2449 int ret; 2450 struct address_space *mapping = out->f_mapping; 2451 struct inode *inode = mapping->host; 2452 struct splice_desc sd = { 2453 .total_len = len, 2454 .flags = flags, 2455 .pos = *ppos, 2456 .u.file = out, 2457 }; 2458 2459 2460 trace_ocfs2_file_splice_write(inode, out, out->f_path.dentry, 2461 (unsigned long long)OCFS2_I(inode)->ip_blkno, 2462 out->f_path.dentry->d_name.len, 2463 out->f_path.dentry->d_name.name, len); 2464 2465 pipe_lock(pipe); 2466 2467 splice_from_pipe_begin(&sd); 2468 do { 2469 ret = splice_from_pipe_next(pipe, &sd); 2470 if (ret <= 0) 2471 break; 2472 2473 mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD); 2474 ret = ocfs2_rw_lock(inode, 1); 2475 if (ret < 0) 2476 mlog_errno(ret); 2477 else { 2478 ret = ocfs2_splice_to_file(pipe, out, &sd); 2479 ocfs2_rw_unlock(inode, 1); 2480 } 2481 mutex_unlock(&inode->i_mutex); 2482 } while (ret > 0); 2483 splice_from_pipe_end(pipe, &sd); 2484 2485 pipe_unlock(pipe); 2486 2487 if (sd.num_spliced) 2488 ret = sd.num_spliced; 2489 2490 if (ret > 0) { 2491 int err; 2492 2493 err = generic_write_sync(out, *ppos, ret); 2494 if (err) 2495 ret = err; 2496 else 2497 *ppos += ret; 2498 2499 balance_dirty_pages_ratelimited(mapping); 2500 } 2501 2502 return ret; 2503 } 2504 2505 static ssize_t ocfs2_file_splice_read(struct file *in, 2506 loff_t *ppos, 2507 struct pipe_inode_info *pipe, 2508 size_t len, 2509 unsigned int flags) 2510 { 2511 int ret = 0, lock_level = 0; 2512 struct inode *inode = file_inode(in); 2513 2514 trace_ocfs2_file_splice_read(inode, in, in->f_path.dentry, 2515 (unsigned long long)OCFS2_I(inode)->ip_blkno, 2516 in->f_path.dentry->d_name.len, 2517 in->f_path.dentry->d_name.name, len); 2518 2519 /* 2520 * See the comment in ocfs2_file_aio_read() 2521 */ 2522 ret = ocfs2_inode_lock_atime(inode, in->f_path.mnt, &lock_level); 2523 if (ret < 0) { 2524 mlog_errno(ret); 2525 goto bail; 2526 } 2527 ocfs2_inode_unlock(inode, lock_level); 2528 2529 ret = generic_file_splice_read(in, ppos, pipe, len, flags); 2530 2531 bail: 2532 return ret; 2533 } 2534 2535 static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, 2536 const struct iovec *iov, 2537 unsigned long nr_segs, 2538 loff_t pos) 2539 { 2540 int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0; 2541 struct file *filp = iocb->ki_filp; 2542 struct inode *inode = file_inode(filp); 2543 2544 trace_ocfs2_file_aio_read(inode, filp, filp->f_path.dentry, 2545 (unsigned long long)OCFS2_I(inode)->ip_blkno, 2546 filp->f_path.dentry->d_name.len, 2547 filp->f_path.dentry->d_name.name, nr_segs); 2548 2549 2550 if (!inode) { 2551 ret = -EINVAL; 2552 mlog_errno(ret); 2553 goto bail; 2554 } 2555 2556 ocfs2_iocb_clear_sem_locked(iocb); 2557 2558 /* 2559 * buffered reads protect themselves in ->readpage(). O_DIRECT reads 2560 * need locks to protect pending reads from racing with truncate. 2561 */ 2562 if (filp->f_flags & O_DIRECT) { 2563 have_alloc_sem = 1; 2564 ocfs2_iocb_set_sem_locked(iocb); 2565 2566 ret = ocfs2_rw_lock(inode, 0); 2567 if (ret < 0) { 2568 mlog_errno(ret); 2569 goto bail; 2570 } 2571 rw_level = 0; 2572 /* communicate with ocfs2_dio_end_io */ 2573 ocfs2_iocb_set_rw_locked(iocb, rw_level); 2574 } 2575 2576 /* 2577 * We're fine letting folks race truncates and extending 2578 * writes with read across the cluster, just like they can 2579 * locally. Hence no rw_lock during read. 2580 * 2581 * Take and drop the meta data lock to update inode fields 2582 * like i_size. This allows the checks down below 2583 * generic_file_aio_read() a chance of actually working. 2584 */ 2585 ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level); 2586 if (ret < 0) { 2587 mlog_errno(ret); 2588 goto bail; 2589 } 2590 ocfs2_inode_unlock(inode, lock_level); 2591 2592 ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos); 2593 trace_generic_file_aio_read_ret(ret); 2594 2595 /* buffered aio wouldn't have proper lock coverage today */ 2596 BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT)); 2597 2598 /* see ocfs2_file_aio_write */ 2599 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) { 2600 rw_level = -1; 2601 have_alloc_sem = 0; 2602 } 2603 2604 bail: 2605 if (have_alloc_sem) 2606 ocfs2_iocb_clear_sem_locked(iocb); 2607 2608 if (rw_level != -1) 2609 ocfs2_rw_unlock(inode, rw_level); 2610 2611 return ret; 2612 } 2613 2614 /* Refer generic_file_llseek_unlocked() */ 2615 static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int whence) 2616 { 2617 struct inode *inode = file->f_mapping->host; 2618 int ret = 0; 2619 2620 mutex_lock(&inode->i_mutex); 2621 2622 switch (whence) { 2623 case SEEK_SET: 2624 break; 2625 case SEEK_END: 2626 offset += inode->i_size; 2627 break; 2628 case SEEK_CUR: 2629 if (offset == 0) { 2630 offset = file->f_pos; 2631 goto out; 2632 } 2633 offset += file->f_pos; 2634 break; 2635 case SEEK_DATA: 2636 case SEEK_HOLE: 2637 ret = ocfs2_seek_data_hole_offset(file, &offset, whence); 2638 if (ret) 2639 goto out; 2640 break; 2641 default: 2642 ret = -EINVAL; 2643 goto out; 2644 } 2645 2646 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); 2647 2648 out: 2649 mutex_unlock(&inode->i_mutex); 2650 if (ret) 2651 return ret; 2652 return offset; 2653 } 2654 2655 const struct inode_operations ocfs2_file_iops = { 2656 .setattr = ocfs2_setattr, 2657 .getattr = ocfs2_getattr, 2658 .permission = ocfs2_permission, 2659 .setxattr = generic_setxattr, 2660 .getxattr = generic_getxattr, 2661 .listxattr = ocfs2_listxattr, 2662 .removexattr = generic_removexattr, 2663 .fiemap = ocfs2_fiemap, 2664 .get_acl = ocfs2_iop_get_acl, 2665 }; 2666 2667 const struct inode_operations ocfs2_special_file_iops = { 2668 .setattr = ocfs2_setattr, 2669 .getattr = ocfs2_getattr, 2670 .permission = ocfs2_permission, 2671 .get_acl = ocfs2_iop_get_acl, 2672 }; 2673 2674 /* 2675 * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with 2676 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks! 2677 */ 2678 const struct file_operations ocfs2_fops = { 2679 .llseek = ocfs2_file_llseek, 2680 .read = do_sync_read, 2681 .write = do_sync_write, 2682 .mmap = ocfs2_mmap, 2683 .fsync = ocfs2_sync_file, 2684 .release = ocfs2_file_release, 2685 .open = ocfs2_file_open, 2686 .aio_read = ocfs2_file_aio_read, 2687 .aio_write = ocfs2_file_aio_write, 2688 .unlocked_ioctl = ocfs2_ioctl, 2689 #ifdef CONFIG_COMPAT 2690 .compat_ioctl = ocfs2_compat_ioctl, 2691 #endif 2692 .lock = ocfs2_lock, 2693 .flock = ocfs2_flock, 2694 .splice_read = ocfs2_file_splice_read, 2695 .splice_write = ocfs2_file_splice_write, 2696 .fallocate = ocfs2_fallocate, 2697 }; 2698 2699 const struct file_operations ocfs2_dops = { 2700 .llseek = generic_file_llseek, 2701 .read = generic_read_dir, 2702 .iterate = ocfs2_readdir, 2703 .fsync = ocfs2_sync_file, 2704 .release = ocfs2_dir_release, 2705 .open = ocfs2_dir_open, 2706 .unlocked_ioctl = ocfs2_ioctl, 2707 #ifdef CONFIG_COMPAT 2708 .compat_ioctl = ocfs2_compat_ioctl, 2709 #endif 2710 .lock = ocfs2_lock, 2711 .flock = ocfs2_flock, 2712 }; 2713 2714 /* 2715 * POSIX-lockless variants of our file_operations. 2716 * 2717 * These will be used if the underlying cluster stack does not support 2718 * posix file locking, if the user passes the "localflocks" mount 2719 * option, or if we have a local-only fs. 2720 * 2721 * ocfs2_flock is in here because all stacks handle UNIX file locks, 2722 * so we still want it in the case of no stack support for 2723 * plocks. Internally, it will do the right thing when asked to ignore 2724 * the cluster. 2725 */ 2726 const struct file_operations ocfs2_fops_no_plocks = { 2727 .llseek = ocfs2_file_llseek, 2728 .read = do_sync_read, 2729 .write = do_sync_write, 2730 .mmap = ocfs2_mmap, 2731 .fsync = ocfs2_sync_file, 2732 .release = ocfs2_file_release, 2733 .open = ocfs2_file_open, 2734 .aio_read = ocfs2_file_aio_read, 2735 .aio_write = ocfs2_file_aio_write, 2736 .unlocked_ioctl = ocfs2_ioctl, 2737 #ifdef CONFIG_COMPAT 2738 .compat_ioctl = ocfs2_compat_ioctl, 2739 #endif 2740 .flock = ocfs2_flock, 2741 .splice_read = ocfs2_file_splice_read, 2742 .splice_write = ocfs2_file_splice_write, 2743 .fallocate = ocfs2_fallocate, 2744 }; 2745 2746 const struct file_operations ocfs2_dops_no_plocks = { 2747 .llseek = generic_file_llseek, 2748 .read = generic_read_dir, 2749 .iterate = ocfs2_readdir, 2750 .fsync = ocfs2_sync_file, 2751 .release = ocfs2_dir_release, 2752 .open = ocfs2_dir_open, 2753 .unlocked_ioctl = ocfs2_ioctl, 2754 #ifdef CONFIG_COMPAT 2755 .compat_ioctl = ocfs2_compat_ioctl, 2756 #endif 2757 .flock = ocfs2_flock, 2758 }; 2759