1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/slab.h> 11 #include <linux/spinlock.h> 12 #include <linux/completion.h> 13 #include <linux/buffer_head.h> 14 #include <linux/pagemap.h> 15 #include <linux/uio.h> 16 #include <linux/blkdev.h> 17 #include <linux/mm.h> 18 #include <linux/mount.h> 19 #include <linux/fs.h> 20 #include <linux/gfs2_ondisk.h> 21 #include <linux/falloc.h> 22 #include <linux/swap.h> 23 #include <linux/crc32.h> 24 #include <linux/writeback.h> 25 #include <asm/uaccess.h> 26 #include <linux/dlm.h> 27 #include <linux/dlm_plock.h> 28 #include <linux/aio.h> 29 #include <linux/delay.h> 30 31 #include "gfs2.h" 32 #include "incore.h" 33 #include "bmap.h" 34 #include "dir.h" 35 #include "glock.h" 36 #include "glops.h" 37 #include "inode.h" 38 #include "log.h" 39 #include "meta_io.h" 40 #include "quota.h" 41 #include "rgrp.h" 42 #include "trans.h" 43 #include "util.h" 44 45 /** 46 * gfs2_llseek - seek to a location in a file 47 * @file: the file 48 * @offset: the offset 49 * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END) 50 * 51 * SEEK_END requires the glock for the file because it references the 52 * file's size. 53 * 54 * Returns: The new offset, or errno 55 */ 56 57 static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence) 58 { 59 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); 60 struct gfs2_holder i_gh; 61 loff_t error; 62 63 switch (whence) { 64 case SEEK_END: /* These reference inode->i_size */ 65 case SEEK_DATA: 66 case SEEK_HOLE: 67 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, 68 &i_gh); 69 if (!error) { 70 error = generic_file_llseek(file, offset, whence); 71 gfs2_glock_dq_uninit(&i_gh); 72 } 73 break; 74 case SEEK_CUR: 75 case SEEK_SET: 76 error = generic_file_llseek(file, offset, whence); 77 break; 78 default: 79 error = -EINVAL; 80 } 81 82 return error; 83 } 84 85 /** 86 * gfs2_readdir - Iterator for a directory 87 * @file: The directory to read from 88 * @ctx: What to feed directory entries to 89 * 90 * Returns: errno 91 */ 92 93 static int gfs2_readdir(struct file *file, struct dir_context *ctx) 94 { 95 struct inode *dir = file->f_mapping->host; 96 struct gfs2_inode *dip = GFS2_I(dir); 97 struct gfs2_holder d_gh; 98 int error; 99 100 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); 101 if (error) 102 return error; 103 104 error = gfs2_dir_read(dir, ctx, &file->f_ra); 105 106 gfs2_glock_dq_uninit(&d_gh); 107 108 return error; 109 } 110 111 /** 112 * fsflags_cvt 113 * @table: A table of 32 u32 flags 114 * @val: a 32 bit value to convert 115 * 116 * This function can be used to convert between fsflags values and 117 * GFS2's own flags values. 118 * 119 * Returns: the converted flags 120 */ 121 static u32 fsflags_cvt(const u32 *table, u32 val) 122 { 123 u32 res = 0; 124 while(val) { 125 if (val & 1) 126 res |= *table; 127 table++; 128 val >>= 1; 129 } 130 return res; 131 } 132 133 static const u32 fsflags_to_gfs2[32] = { 134 [3] = GFS2_DIF_SYNC, 135 [4] = GFS2_DIF_IMMUTABLE, 136 [5] = GFS2_DIF_APPENDONLY, 137 [7] = GFS2_DIF_NOATIME, 138 [12] = GFS2_DIF_EXHASH, 139 [14] = GFS2_DIF_INHERIT_JDATA, 140 [17] = GFS2_DIF_TOPDIR, 141 }; 142 143 static const u32 gfs2_to_fsflags[32] = { 144 [gfs2fl_Sync] = FS_SYNC_FL, 145 [gfs2fl_Immutable] = FS_IMMUTABLE_FL, 146 [gfs2fl_AppendOnly] = FS_APPEND_FL, 147 [gfs2fl_NoAtime] = FS_NOATIME_FL, 148 [gfs2fl_ExHash] = FS_INDEX_FL, 149 [gfs2fl_TopLevel] = FS_TOPDIR_FL, 150 [gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL, 151 }; 152 153 static int gfs2_get_flags(struct file *filp, u32 __user *ptr) 154 { 155 struct inode *inode = file_inode(filp); 156 struct gfs2_inode *ip = GFS2_I(inode); 157 struct gfs2_holder gh; 158 int error; 159 u32 fsflags; 160 161 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 162 error = gfs2_glock_nq(&gh); 163 if (error) 164 return error; 165 166 fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags); 167 if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA) 168 fsflags |= FS_JOURNAL_DATA_FL; 169 if (put_user(fsflags, ptr)) 170 error = -EFAULT; 171 172 gfs2_glock_dq(&gh); 173 gfs2_holder_uninit(&gh); 174 return error; 175 } 176 177 void gfs2_set_inode_flags(struct inode *inode) 178 { 179 struct gfs2_inode *ip = GFS2_I(inode); 180 unsigned int flags = inode->i_flags; 181 182 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC); 183 if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode)) 184 inode->i_flags |= S_NOSEC; 185 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE) 186 flags |= S_IMMUTABLE; 187 if (ip->i_diskflags & GFS2_DIF_APPENDONLY) 188 flags |= S_APPEND; 189 if (ip->i_diskflags & GFS2_DIF_NOATIME) 190 flags |= S_NOATIME; 191 if (ip->i_diskflags & GFS2_DIF_SYNC) 192 flags |= S_SYNC; 193 inode->i_flags = flags; 194 } 195 196 /* Flags that can be set by user space */ 197 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \ 198 GFS2_DIF_IMMUTABLE| \ 199 GFS2_DIF_APPENDONLY| \ 200 GFS2_DIF_NOATIME| \ 201 GFS2_DIF_SYNC| \ 202 GFS2_DIF_SYSTEM| \ 203 GFS2_DIF_TOPDIR| \ 204 GFS2_DIF_INHERIT_JDATA) 205 206 /** 207 * do_gfs2_set_flags - set flags on an inode 208 * @filp: file pointer 209 * @reqflags: The flags to set 210 * @mask: Indicates which flags are valid 211 * 212 */ 213 static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask) 214 { 215 struct inode *inode = file_inode(filp); 216 struct gfs2_inode *ip = GFS2_I(inode); 217 struct gfs2_sbd *sdp = GFS2_SB(inode); 218 struct buffer_head *bh; 219 struct gfs2_holder gh; 220 int error; 221 u32 new_flags, flags; 222 223 error = mnt_want_write_file(filp); 224 if (error) 225 return error; 226 227 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 228 if (error) 229 goto out_drop_write; 230 231 error = -EACCES; 232 if (!inode_owner_or_capable(inode)) 233 goto out; 234 235 error = 0; 236 flags = ip->i_diskflags; 237 new_flags = (flags & ~mask) | (reqflags & mask); 238 if ((new_flags ^ flags) == 0) 239 goto out; 240 241 error = -EINVAL; 242 if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET) 243 goto out; 244 245 error = -EPERM; 246 if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE)) 247 goto out; 248 if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY)) 249 goto out; 250 if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) && 251 !capable(CAP_LINUX_IMMUTABLE)) 252 goto out; 253 if (!IS_IMMUTABLE(inode)) { 254 error = gfs2_permission(inode, MAY_WRITE); 255 if (error) 256 goto out; 257 } 258 if ((flags ^ new_flags) & GFS2_DIF_JDATA) { 259 if (flags & GFS2_DIF_JDATA) 260 gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH); 261 error = filemap_fdatawrite(inode->i_mapping); 262 if (error) 263 goto out; 264 error = filemap_fdatawait(inode->i_mapping); 265 if (error) 266 goto out; 267 } 268 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 269 if (error) 270 goto out; 271 error = gfs2_meta_inode_buffer(ip, &bh); 272 if (error) 273 goto out_trans_end; 274 gfs2_trans_add_meta(ip->i_gl, bh); 275 ip->i_diskflags = new_flags; 276 gfs2_dinode_out(ip, bh->b_data); 277 brelse(bh); 278 gfs2_set_inode_flags(inode); 279 gfs2_set_aops(inode); 280 out_trans_end: 281 gfs2_trans_end(sdp); 282 out: 283 gfs2_glock_dq_uninit(&gh); 284 out_drop_write: 285 mnt_drop_write_file(filp); 286 return error; 287 } 288 289 static int gfs2_set_flags(struct file *filp, u32 __user *ptr) 290 { 291 struct inode *inode = file_inode(filp); 292 u32 fsflags, gfsflags; 293 294 if (get_user(fsflags, ptr)) 295 return -EFAULT; 296 297 gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags); 298 if (!S_ISDIR(inode->i_mode)) { 299 gfsflags &= ~GFS2_DIF_TOPDIR; 300 if (gfsflags & GFS2_DIF_INHERIT_JDATA) 301 gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA); 302 return do_gfs2_set_flags(filp, gfsflags, ~0); 303 } 304 return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA); 305 } 306 307 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 308 { 309 switch(cmd) { 310 case FS_IOC_GETFLAGS: 311 return gfs2_get_flags(filp, (u32 __user *)arg); 312 case FS_IOC_SETFLAGS: 313 return gfs2_set_flags(filp, (u32 __user *)arg); 314 case FITRIM: 315 return gfs2_fitrim(filp, (void __user *)arg); 316 } 317 return -ENOTTY; 318 } 319 320 /** 321 * gfs2_size_hint - Give a hint to the size of a write request 322 * @filep: The struct file 323 * @offset: The file offset of the write 324 * @size: The length of the write 325 * 326 * When we are about to do a write, this function records the total 327 * write size in order to provide a suitable hint to the lower layers 328 * about how many blocks will be required. 329 * 330 */ 331 332 static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size) 333 { 334 struct inode *inode = file_inode(filep); 335 struct gfs2_sbd *sdp = GFS2_SB(inode); 336 struct gfs2_inode *ip = GFS2_I(inode); 337 size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift; 338 int hint = min_t(size_t, INT_MAX, blks); 339 340 if (hint > atomic_read(&ip->i_res->rs_sizehint)) 341 atomic_set(&ip->i_res->rs_sizehint, hint); 342 } 343 344 /** 345 * gfs2_allocate_page_backing - Use bmap to allocate blocks 346 * @page: The (locked) page to allocate backing for 347 * 348 * We try to allocate all the blocks required for the page in 349 * one go. This might fail for various reasons, so we keep 350 * trying until all the blocks to back this page are allocated. 351 * If some of the blocks are already allocated, thats ok too. 352 */ 353 354 static int gfs2_allocate_page_backing(struct page *page) 355 { 356 struct inode *inode = page->mapping->host; 357 struct buffer_head bh; 358 unsigned long size = PAGE_CACHE_SIZE; 359 u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 360 361 do { 362 bh.b_state = 0; 363 bh.b_size = size; 364 gfs2_block_map(inode, lblock, &bh, 1); 365 if (!buffer_mapped(&bh)) 366 return -EIO; 367 size -= bh.b_size; 368 lblock += (bh.b_size >> inode->i_blkbits); 369 } while(size > 0); 370 return 0; 371 } 372 373 /** 374 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable 375 * @vma: The virtual memory area 376 * @vmf: The virtual memory fault containing the page to become writable 377 * 378 * When the page becomes writable, we need to ensure that we have 379 * blocks allocated on disk to back that page. 380 */ 381 382 static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 383 { 384 struct page *page = vmf->page; 385 struct inode *inode = file_inode(vma->vm_file); 386 struct gfs2_inode *ip = GFS2_I(inode); 387 struct gfs2_sbd *sdp = GFS2_SB(inode); 388 struct gfs2_alloc_parms ap = { .aflags = 0, }; 389 unsigned long last_index; 390 u64 pos = page->index << PAGE_CACHE_SHIFT; 391 unsigned int data_blocks, ind_blocks, rblocks; 392 struct gfs2_holder gh; 393 loff_t size; 394 int ret; 395 396 sb_start_pagefault(inode->i_sb); 397 398 /* Update file times before taking page lock */ 399 file_update_time(vma->vm_file); 400 401 ret = get_write_access(inode); 402 if (ret) 403 goto out; 404 405 ret = gfs2_rs_alloc(ip); 406 if (ret) 407 goto out_write_access; 408 409 gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE); 410 411 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 412 ret = gfs2_glock_nq(&gh); 413 if (ret) 414 goto out_uninit; 415 416 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); 417 set_bit(GIF_SW_PAGED, &ip->i_flags); 418 419 if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) { 420 lock_page(page); 421 if (!PageUptodate(page) || page->mapping != inode->i_mapping) { 422 ret = -EAGAIN; 423 unlock_page(page); 424 } 425 goto out_unlock; 426 } 427 428 ret = gfs2_rindex_update(sdp); 429 if (ret) 430 goto out_unlock; 431 432 ret = gfs2_quota_lock_check(ip); 433 if (ret) 434 goto out_unlock; 435 gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks); 436 ap.target = data_blocks + ind_blocks; 437 ret = gfs2_inplace_reserve(ip, &ap); 438 if (ret) 439 goto out_quota_unlock; 440 441 rblocks = RES_DINODE + ind_blocks; 442 if (gfs2_is_jdata(ip)) 443 rblocks += data_blocks ? data_blocks : 1; 444 if (ind_blocks || data_blocks) { 445 rblocks += RES_STATFS + RES_QUOTA; 446 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks); 447 } 448 ret = gfs2_trans_begin(sdp, rblocks, 0); 449 if (ret) 450 goto out_trans_fail; 451 452 lock_page(page); 453 ret = -EINVAL; 454 size = i_size_read(inode); 455 last_index = (size - 1) >> PAGE_CACHE_SHIFT; 456 /* Check page index against inode size */ 457 if (size == 0 || (page->index > last_index)) 458 goto out_trans_end; 459 460 ret = -EAGAIN; 461 /* If truncated, we must retry the operation, we may have raced 462 * with the glock demotion code. 463 */ 464 if (!PageUptodate(page) || page->mapping != inode->i_mapping) 465 goto out_trans_end; 466 467 /* Unstuff, if required, and allocate backing blocks for page */ 468 ret = 0; 469 if (gfs2_is_stuffed(ip)) 470 ret = gfs2_unstuff_dinode(ip, page); 471 if (ret == 0) 472 ret = gfs2_allocate_page_backing(page); 473 474 out_trans_end: 475 if (ret) 476 unlock_page(page); 477 gfs2_trans_end(sdp); 478 out_trans_fail: 479 gfs2_inplace_release(ip); 480 out_quota_unlock: 481 gfs2_quota_unlock(ip); 482 out_unlock: 483 gfs2_glock_dq(&gh); 484 out_uninit: 485 gfs2_holder_uninit(&gh); 486 if (ret == 0) { 487 set_page_dirty(page); 488 wait_for_stable_page(page); 489 } 490 out_write_access: 491 put_write_access(inode); 492 out: 493 sb_end_pagefault(inode->i_sb); 494 return block_page_mkwrite_return(ret); 495 } 496 497 static const struct vm_operations_struct gfs2_vm_ops = { 498 .fault = filemap_fault, 499 .map_pages = filemap_map_pages, 500 .page_mkwrite = gfs2_page_mkwrite, 501 .remap_pages = generic_file_remap_pages, 502 }; 503 504 /** 505 * gfs2_mmap - 506 * @file: The file to map 507 * @vma: The VMA which described the mapping 508 * 509 * There is no need to get a lock here unless we should be updating 510 * atime. We ignore any locking errors since the only consequence is 511 * a missed atime update (which will just be deferred until later). 512 * 513 * Returns: 0 514 */ 515 516 static int gfs2_mmap(struct file *file, struct vm_area_struct *vma) 517 { 518 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); 519 520 if (!(file->f_flags & O_NOATIME) && 521 !IS_NOATIME(&ip->i_inode)) { 522 struct gfs2_holder i_gh; 523 int error; 524 525 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, 526 &i_gh); 527 if (error) 528 return error; 529 /* grab lock to update inode */ 530 gfs2_glock_dq_uninit(&i_gh); 531 file_accessed(file); 532 } 533 vma->vm_ops = &gfs2_vm_ops; 534 535 return 0; 536 } 537 538 /** 539 * gfs2_open_common - This is common to open and atomic_open 540 * @inode: The inode being opened 541 * @file: The file being opened 542 * 543 * This maybe called under a glock or not depending upon how it has 544 * been called. We must always be called under a glock for regular 545 * files, however. For other file types, it does not matter whether 546 * we hold the glock or not. 547 * 548 * Returns: Error code or 0 for success 549 */ 550 551 int gfs2_open_common(struct inode *inode, struct file *file) 552 { 553 struct gfs2_file *fp; 554 int ret; 555 556 if (S_ISREG(inode->i_mode)) { 557 ret = generic_file_open(inode, file); 558 if (ret) 559 return ret; 560 } 561 562 fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS); 563 if (!fp) 564 return -ENOMEM; 565 566 mutex_init(&fp->f_fl_mutex); 567 568 gfs2_assert_warn(GFS2_SB(inode), !file->private_data); 569 file->private_data = fp; 570 return 0; 571 } 572 573 /** 574 * gfs2_open - open a file 575 * @inode: the inode to open 576 * @file: the struct file for this opening 577 * 578 * After atomic_open, this function is only used for opening files 579 * which are already cached. We must still get the glock for regular 580 * files to ensure that we have the file size uptodate for the large 581 * file check which is in the common code. That is only an issue for 582 * regular files though. 583 * 584 * Returns: errno 585 */ 586 587 static int gfs2_open(struct inode *inode, struct file *file) 588 { 589 struct gfs2_inode *ip = GFS2_I(inode); 590 struct gfs2_holder i_gh; 591 int error; 592 bool need_unlock = false; 593 594 if (S_ISREG(ip->i_inode.i_mode)) { 595 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, 596 &i_gh); 597 if (error) 598 return error; 599 need_unlock = true; 600 } 601 602 error = gfs2_open_common(inode, file); 603 604 if (need_unlock) 605 gfs2_glock_dq_uninit(&i_gh); 606 607 return error; 608 } 609 610 /** 611 * gfs2_release - called to close a struct file 612 * @inode: the inode the struct file belongs to 613 * @file: the struct file being closed 614 * 615 * Returns: errno 616 */ 617 618 static int gfs2_release(struct inode *inode, struct file *file) 619 { 620 struct gfs2_inode *ip = GFS2_I(inode); 621 622 kfree(file->private_data); 623 file->private_data = NULL; 624 625 if (!(file->f_mode & FMODE_WRITE)) 626 return 0; 627 628 gfs2_rs_delete(ip, &inode->i_writecount); 629 return 0; 630 } 631 632 /** 633 * gfs2_fsync - sync the dirty data for a file (across the cluster) 634 * @file: the file that points to the dentry 635 * @start: the start position in the file to sync 636 * @end: the end position in the file to sync 637 * @datasync: set if we can ignore timestamp changes 638 * 639 * We split the data flushing here so that we don't wait for the data 640 * until after we've also sent the metadata to disk. Note that for 641 * data=ordered, we will write & wait for the data at the log flush 642 * stage anyway, so this is unlikely to make much of a difference 643 * except in the data=writeback case. 644 * 645 * If the fdatawrite fails due to any reason except -EIO, we will 646 * continue the remainder of the fsync, although we'll still report 647 * the error at the end. This is to match filemap_write_and_wait_range() 648 * behaviour. 649 * 650 * Returns: errno 651 */ 652 653 static int gfs2_fsync(struct file *file, loff_t start, loff_t end, 654 int datasync) 655 { 656 struct address_space *mapping = file->f_mapping; 657 struct inode *inode = mapping->host; 658 int sync_state = inode->i_state & I_DIRTY; 659 struct gfs2_inode *ip = GFS2_I(inode); 660 int ret = 0, ret1 = 0; 661 662 if (mapping->nrpages) { 663 ret1 = filemap_fdatawrite_range(mapping, start, end); 664 if (ret1 == -EIO) 665 return ret1; 666 } 667 668 if (!gfs2_is_jdata(ip)) 669 sync_state &= ~I_DIRTY_PAGES; 670 if (datasync) 671 sync_state &= ~I_DIRTY_SYNC; 672 673 if (sync_state) { 674 ret = sync_inode_metadata(inode, 1); 675 if (ret) 676 return ret; 677 if (gfs2_is_jdata(ip)) 678 filemap_write_and_wait(mapping); 679 gfs2_ail_flush(ip->i_gl, 1); 680 } 681 682 if (mapping->nrpages) 683 ret = filemap_fdatawait_range(mapping, start, end); 684 685 return ret ? ret : ret1; 686 } 687 688 /** 689 * gfs2_file_write_iter - Perform a write to a file 690 * @iocb: The io context 691 * @iov: The data to write 692 * @nr_segs: Number of @iov segments 693 * @pos: The file position 694 * 695 * We have to do a lock/unlock here to refresh the inode size for 696 * O_APPEND writes, otherwise we can land up writing at the wrong 697 * offset. There is still a race, but provided the app is using its 698 * own file locking, this will make O_APPEND work as expected. 699 * 700 */ 701 702 static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 703 { 704 struct file *file = iocb->ki_filp; 705 struct gfs2_inode *ip = GFS2_I(file_inode(file)); 706 int ret; 707 708 ret = gfs2_rs_alloc(ip); 709 if (ret) 710 return ret; 711 712 gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from)); 713 714 if (file->f_flags & O_APPEND) { 715 struct gfs2_holder gh; 716 717 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 718 if (ret) 719 return ret; 720 gfs2_glock_dq_uninit(&gh); 721 } 722 723 return generic_file_write_iter(iocb, from); 724 } 725 726 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len, 727 int mode) 728 { 729 struct gfs2_inode *ip = GFS2_I(inode); 730 struct buffer_head *dibh; 731 int error; 732 unsigned int nr_blks; 733 sector_t lblock = offset >> inode->i_blkbits; 734 735 error = gfs2_meta_inode_buffer(ip, &dibh); 736 if (unlikely(error)) 737 return error; 738 739 gfs2_trans_add_meta(ip->i_gl, dibh); 740 741 if (gfs2_is_stuffed(ip)) { 742 error = gfs2_unstuff_dinode(ip, NULL); 743 if (unlikely(error)) 744 goto out; 745 } 746 747 while (len) { 748 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 }; 749 bh_map.b_size = len; 750 set_buffer_zeronew(&bh_map); 751 752 error = gfs2_block_map(inode, lblock, &bh_map, 1); 753 if (unlikely(error)) 754 goto out; 755 len -= bh_map.b_size; 756 nr_blks = bh_map.b_size >> inode->i_blkbits; 757 lblock += nr_blks; 758 if (!buffer_new(&bh_map)) 759 continue; 760 if (unlikely(!buffer_zeronew(&bh_map))) { 761 error = -EIO; 762 goto out; 763 } 764 } 765 out: 766 brelse(dibh); 767 return error; 768 } 769 770 static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len, 771 unsigned int *data_blocks, unsigned int *ind_blocks) 772 { 773 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 774 unsigned int max_blocks = ip->i_rgd->rd_free_clone; 775 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1); 776 777 for (tmp = max_data; tmp > sdp->sd_diptrs;) { 778 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs); 779 max_data -= tmp; 780 } 781 /* This calculation isn't the exact reverse of gfs2_write_calc_reserve, 782 so it might end up with fewer data blocks */ 783 if (max_data <= *data_blocks) 784 return; 785 *data_blocks = max_data; 786 *ind_blocks = max_blocks - max_data; 787 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift; 788 if (*len > max) { 789 *len = max; 790 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks); 791 } 792 } 793 794 static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len) 795 { 796 struct inode *inode = file_inode(file); 797 struct gfs2_sbd *sdp = GFS2_SB(inode); 798 struct gfs2_inode *ip = GFS2_I(inode); 799 struct gfs2_alloc_parms ap = { .aflags = 0, }; 800 unsigned int data_blocks = 0, ind_blocks = 0, rblocks; 801 loff_t bytes, max_bytes; 802 int error; 803 const loff_t pos = offset; 804 const loff_t count = len; 805 loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1); 806 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift; 807 loff_t max_chunk_size = UINT_MAX & bsize_mask; 808 809 next = (next + 1) << sdp->sd_sb.sb_bsize_shift; 810 811 offset &= bsize_mask; 812 813 len = next - offset; 814 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2; 815 if (!bytes) 816 bytes = UINT_MAX; 817 bytes &= bsize_mask; 818 if (bytes == 0) 819 bytes = sdp->sd_sb.sb_bsize; 820 821 gfs2_size_hint(file, offset, len); 822 823 while (len > 0) { 824 if (len < bytes) 825 bytes = len; 826 if (!gfs2_write_alloc_required(ip, offset, bytes)) { 827 len -= bytes; 828 offset += bytes; 829 continue; 830 } 831 error = gfs2_quota_lock_check(ip); 832 if (error) 833 return error; 834 retry: 835 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks); 836 837 ap.target = data_blocks + ind_blocks; 838 error = gfs2_inplace_reserve(ip, &ap); 839 if (error) { 840 if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) { 841 bytes >>= 1; 842 bytes &= bsize_mask; 843 if (bytes == 0) 844 bytes = sdp->sd_sb.sb_bsize; 845 goto retry; 846 } 847 goto out_qunlock; 848 } 849 max_bytes = bytes; 850 calc_max_reserv(ip, (len > max_chunk_size)? max_chunk_size: len, 851 &max_bytes, &data_blocks, &ind_blocks); 852 853 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA + 854 RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks); 855 if (gfs2_is_jdata(ip)) 856 rblocks += data_blocks ? data_blocks : 1; 857 858 error = gfs2_trans_begin(sdp, rblocks, 859 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); 860 if (error) 861 goto out_trans_fail; 862 863 error = fallocate_chunk(inode, offset, max_bytes, mode); 864 gfs2_trans_end(sdp); 865 866 if (error) 867 goto out_trans_fail; 868 869 len -= max_bytes; 870 offset += max_bytes; 871 gfs2_inplace_release(ip); 872 gfs2_quota_unlock(ip); 873 } 874 875 if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size) { 876 i_size_write(inode, pos + count); 877 /* Marks the inode as dirty */ 878 file_update_time(file); 879 } 880 881 return generic_write_sync(file, pos, count); 882 883 out_trans_fail: 884 gfs2_inplace_release(ip); 885 out_qunlock: 886 gfs2_quota_unlock(ip); 887 return error; 888 } 889 890 static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len) 891 { 892 struct inode *inode = file_inode(file); 893 struct gfs2_inode *ip = GFS2_I(inode); 894 struct gfs2_holder gh; 895 int ret; 896 897 if (mode & ~FALLOC_FL_KEEP_SIZE) 898 return -EOPNOTSUPP; 899 900 mutex_lock(&inode->i_mutex); 901 902 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 903 ret = gfs2_glock_nq(&gh); 904 if (ret) 905 goto out_uninit; 906 907 if (!(mode & FALLOC_FL_KEEP_SIZE) && 908 (offset + len) > inode->i_size) { 909 ret = inode_newsize_ok(inode, offset + len); 910 if (ret) 911 goto out_unlock; 912 } 913 914 ret = get_write_access(inode); 915 if (ret) 916 goto out_unlock; 917 918 ret = gfs2_rs_alloc(ip); 919 if (ret) 920 goto out_putw; 921 922 ret = __gfs2_fallocate(file, mode, offset, len); 923 if (ret) 924 gfs2_rs_deltree(ip->i_res); 925 out_putw: 926 put_write_access(inode); 927 out_unlock: 928 gfs2_glock_dq(&gh); 929 out_uninit: 930 gfs2_holder_uninit(&gh); 931 mutex_unlock(&inode->i_mutex); 932 return ret; 933 } 934 935 #ifdef CONFIG_GFS2_FS_LOCKING_DLM 936 937 /** 938 * gfs2_lock - acquire/release a posix lock on a file 939 * @file: the file pointer 940 * @cmd: either modify or retrieve lock state, possibly wait 941 * @fl: type and range of lock 942 * 943 * Returns: errno 944 */ 945 946 static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl) 947 { 948 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); 949 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host); 950 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 951 952 if (!(fl->fl_flags & FL_POSIX)) 953 return -ENOLCK; 954 if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK) 955 return -ENOLCK; 956 957 if (cmd == F_CANCELLK) { 958 /* Hack: */ 959 cmd = F_SETLK; 960 fl->fl_type = F_UNLCK; 961 } 962 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { 963 if (fl->fl_type == F_UNLCK) 964 posix_lock_file_wait(file, fl); 965 return -EIO; 966 } 967 if (IS_GETLK(cmd)) 968 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl); 969 else if (fl->fl_type == F_UNLCK) 970 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl); 971 else 972 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl); 973 } 974 975 static int do_flock(struct file *file, int cmd, struct file_lock *fl) 976 { 977 struct gfs2_file *fp = file->private_data; 978 struct gfs2_holder *fl_gh = &fp->f_fl_gh; 979 struct gfs2_inode *ip = GFS2_I(file_inode(file)); 980 struct gfs2_glock *gl; 981 unsigned int state; 982 int flags; 983 int error = 0; 984 int sleeptime; 985 986 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; 987 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT; 988 989 mutex_lock(&fp->f_fl_mutex); 990 991 gl = fl_gh->gh_gl; 992 if (gl) { 993 if (fl_gh->gh_state == state) 994 goto out; 995 flock_lock_file_wait(file, 996 &(struct file_lock){.fl_type = F_UNLCK}); 997 gfs2_glock_dq(fl_gh); 998 gfs2_holder_reinit(state, flags, fl_gh); 999 } else { 1000 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr, 1001 &gfs2_flock_glops, CREATE, &gl); 1002 if (error) 1003 goto out; 1004 gfs2_holder_init(gl, state, flags, fl_gh); 1005 gfs2_glock_put(gl); 1006 } 1007 for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) { 1008 error = gfs2_glock_nq(fl_gh); 1009 if (error != GLR_TRYFAILED) 1010 break; 1011 fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT; 1012 fl_gh->gh_error = 0; 1013 msleep(sleeptime); 1014 } 1015 if (error) { 1016 gfs2_holder_uninit(fl_gh); 1017 if (error == GLR_TRYFAILED) 1018 error = -EAGAIN; 1019 } else { 1020 error = flock_lock_file_wait(file, fl); 1021 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error); 1022 } 1023 1024 out: 1025 mutex_unlock(&fp->f_fl_mutex); 1026 return error; 1027 } 1028 1029 static void do_unflock(struct file *file, struct file_lock *fl) 1030 { 1031 struct gfs2_file *fp = file->private_data; 1032 struct gfs2_holder *fl_gh = &fp->f_fl_gh; 1033 1034 mutex_lock(&fp->f_fl_mutex); 1035 flock_lock_file_wait(file, fl); 1036 if (fl_gh->gh_gl) { 1037 gfs2_glock_dq(fl_gh); 1038 gfs2_holder_uninit(fl_gh); 1039 } 1040 mutex_unlock(&fp->f_fl_mutex); 1041 } 1042 1043 /** 1044 * gfs2_flock - acquire/release a flock lock on a file 1045 * @file: the file pointer 1046 * @cmd: either modify or retrieve lock state, possibly wait 1047 * @fl: type and range of lock 1048 * 1049 * Returns: errno 1050 */ 1051 1052 static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl) 1053 { 1054 if (!(fl->fl_flags & FL_FLOCK)) 1055 return -ENOLCK; 1056 if (fl->fl_type & LOCK_MAND) 1057 return -EOPNOTSUPP; 1058 1059 if (fl->fl_type == F_UNLCK) { 1060 do_unflock(file, fl); 1061 return 0; 1062 } else { 1063 return do_flock(file, cmd, fl); 1064 } 1065 } 1066 1067 const struct file_operations gfs2_file_fops = { 1068 .llseek = gfs2_llseek, 1069 .read = new_sync_read, 1070 .read_iter = generic_file_read_iter, 1071 .write = new_sync_write, 1072 .write_iter = gfs2_file_write_iter, 1073 .unlocked_ioctl = gfs2_ioctl, 1074 .mmap = gfs2_mmap, 1075 .open = gfs2_open, 1076 .release = gfs2_release, 1077 .fsync = gfs2_fsync, 1078 .lock = gfs2_lock, 1079 .flock = gfs2_flock, 1080 .splice_read = generic_file_splice_read, 1081 .splice_write = iter_file_splice_write, 1082 .setlease = simple_nosetlease, 1083 .fallocate = gfs2_fallocate, 1084 }; 1085 1086 const struct file_operations gfs2_dir_fops = { 1087 .iterate = gfs2_readdir, 1088 .unlocked_ioctl = gfs2_ioctl, 1089 .open = gfs2_open, 1090 .release = gfs2_release, 1091 .fsync = gfs2_fsync, 1092 .lock = gfs2_lock, 1093 .flock = gfs2_flock, 1094 .llseek = default_llseek, 1095 }; 1096 1097 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */ 1098 1099 const struct file_operations gfs2_file_fops_nolock = { 1100 .llseek = gfs2_llseek, 1101 .read = new_sync_read, 1102 .read_iter = generic_file_read_iter, 1103 .write = new_sync_write, 1104 .write_iter = gfs2_file_write_iter, 1105 .unlocked_ioctl = gfs2_ioctl, 1106 .mmap = gfs2_mmap, 1107 .open = gfs2_open, 1108 .release = gfs2_release, 1109 .fsync = gfs2_fsync, 1110 .splice_read = generic_file_splice_read, 1111 .splice_write = iter_file_splice_write, 1112 .setlease = generic_setlease, 1113 .fallocate = gfs2_fallocate, 1114 }; 1115 1116 const struct file_operations gfs2_dir_fops_nolock = { 1117 .iterate = gfs2_readdir, 1118 .unlocked_ioctl = gfs2_ioctl, 1119 .open = gfs2_open, 1120 .release = gfs2_release, 1121 .fsync = gfs2_fsync, 1122 .llseek = default_llseek, 1123 }; 1124 1125