1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/slab.h> 11 #include <linux/spinlock.h> 12 #include <linux/completion.h> 13 #include <linux/buffer_head.h> 14 #include <linux/pagemap.h> 15 #include <linux/uio.h> 16 #include <linux/blkdev.h> 17 #include <linux/mm.h> 18 #include <linux/mount.h> 19 #include <linux/fs.h> 20 #include <linux/gfs2_ondisk.h> 21 #include <linux/falloc.h> 22 #include <linux/swap.h> 23 #include <linux/crc32.h> 24 #include <linux/writeback.h> 25 #include <asm/uaccess.h> 26 #include <linux/dlm.h> 27 #include <linux/dlm_plock.h> 28 #include <linux/aio.h> 29 30 #include "gfs2.h" 31 #include "incore.h" 32 #include "bmap.h" 33 #include "dir.h" 34 #include "glock.h" 35 #include "glops.h" 36 #include "inode.h" 37 #include "log.h" 38 #include "meta_io.h" 39 #include "quota.h" 40 #include "rgrp.h" 41 #include "trans.h" 42 #include "util.h" 43 44 /** 45 * gfs2_llseek - seek to a location in a file 46 * @file: the file 47 * @offset: the offset 48 * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END) 49 * 50 * SEEK_END requires the glock for the file because it references the 51 * file's size. 52 * 53 * Returns: The new offset, or errno 54 */ 55 56 static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence) 57 { 58 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); 59 struct gfs2_holder i_gh; 60 loff_t error; 61 62 switch (whence) { 63 case SEEK_END: /* These reference inode->i_size */ 64 case SEEK_DATA: 65 case SEEK_HOLE: 66 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, 67 &i_gh); 68 if (!error) { 69 error = generic_file_llseek(file, offset, whence); 70 gfs2_glock_dq_uninit(&i_gh); 71 } 72 break; 73 case SEEK_CUR: 74 case SEEK_SET: 75 error = generic_file_llseek(file, offset, whence); 76 break; 77 default: 78 error = -EINVAL; 79 } 80 81 return error; 82 } 83 84 /** 85 * gfs2_readdir - Iterator for a directory 86 * @file: The directory to read from 87 * @ctx: What to feed directory entries to 88 * 89 * Returns: errno 90 */ 91 92 static int gfs2_readdir(struct file *file, struct dir_context *ctx) 93 { 94 struct inode *dir = file->f_mapping->host; 95 struct gfs2_inode *dip = GFS2_I(dir); 96 struct gfs2_holder d_gh; 97 int error; 98 99 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); 100 if (error) 101 return error; 102 103 error = gfs2_dir_read(dir, ctx, &file->f_ra); 104 105 gfs2_glock_dq_uninit(&d_gh); 106 107 return error; 108 } 109 110 /** 111 * fsflags_cvt 112 * @table: A table of 32 u32 flags 113 * @val: a 32 bit value to convert 114 * 115 * This function can be used to convert between fsflags values and 116 * GFS2's own flags values. 117 * 118 * Returns: the converted flags 119 */ 120 static u32 fsflags_cvt(const u32 *table, u32 val) 121 { 122 u32 res = 0; 123 while(val) { 124 if (val & 1) 125 res |= *table; 126 table++; 127 val >>= 1; 128 } 129 return res; 130 } 131 132 static const u32 fsflags_to_gfs2[32] = { 133 [3] = GFS2_DIF_SYNC, 134 [4] = GFS2_DIF_IMMUTABLE, 135 [5] = GFS2_DIF_APPENDONLY, 136 [7] = GFS2_DIF_NOATIME, 137 [12] = GFS2_DIF_EXHASH, 138 [14] = GFS2_DIF_INHERIT_JDATA, 139 [17] = GFS2_DIF_TOPDIR, 140 }; 141 142 static const u32 gfs2_to_fsflags[32] = { 143 [gfs2fl_Sync] = FS_SYNC_FL, 144 [gfs2fl_Immutable] = FS_IMMUTABLE_FL, 145 [gfs2fl_AppendOnly] = FS_APPEND_FL, 146 [gfs2fl_NoAtime] = FS_NOATIME_FL, 147 [gfs2fl_ExHash] = FS_INDEX_FL, 148 [gfs2fl_TopLevel] = FS_TOPDIR_FL, 149 [gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL, 150 }; 151 152 static int gfs2_get_flags(struct file *filp, u32 __user *ptr) 153 { 154 struct inode *inode = file_inode(filp); 155 struct gfs2_inode *ip = GFS2_I(inode); 156 struct gfs2_holder gh; 157 int error; 158 u32 fsflags; 159 160 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 161 error = gfs2_glock_nq(&gh); 162 if (error) 163 return error; 164 165 fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags); 166 if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA) 167 fsflags |= FS_JOURNAL_DATA_FL; 168 if (put_user(fsflags, ptr)) 169 error = -EFAULT; 170 171 gfs2_glock_dq(&gh); 172 gfs2_holder_uninit(&gh); 173 return error; 174 } 175 176 void gfs2_set_inode_flags(struct inode *inode) 177 { 178 struct gfs2_inode *ip = GFS2_I(inode); 179 unsigned int flags = inode->i_flags; 180 181 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC); 182 if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode)) 183 inode->i_flags |= S_NOSEC; 184 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE) 185 flags |= S_IMMUTABLE; 186 if (ip->i_diskflags & GFS2_DIF_APPENDONLY) 187 flags |= S_APPEND; 188 if (ip->i_diskflags & GFS2_DIF_NOATIME) 189 flags |= S_NOATIME; 190 if (ip->i_diskflags & GFS2_DIF_SYNC) 191 flags |= S_SYNC; 192 inode->i_flags = flags; 193 } 194 195 /* Flags that can be set by user space */ 196 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \ 197 GFS2_DIF_IMMUTABLE| \ 198 GFS2_DIF_APPENDONLY| \ 199 GFS2_DIF_NOATIME| \ 200 GFS2_DIF_SYNC| \ 201 GFS2_DIF_SYSTEM| \ 202 GFS2_DIF_TOPDIR| \ 203 GFS2_DIF_INHERIT_JDATA) 204 205 /** 206 * do_gfs2_set_flags - set flags on an inode 207 * @filp: file pointer 208 * @reqflags: The flags to set 209 * @mask: Indicates which flags are valid 210 * 211 */ 212 static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask) 213 { 214 struct inode *inode = file_inode(filp); 215 struct gfs2_inode *ip = GFS2_I(inode); 216 struct gfs2_sbd *sdp = GFS2_SB(inode); 217 struct buffer_head *bh; 218 struct gfs2_holder gh; 219 int error; 220 u32 new_flags, flags; 221 222 error = mnt_want_write_file(filp); 223 if (error) 224 return error; 225 226 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 227 if (error) 228 goto out_drop_write; 229 230 error = -EACCES; 231 if (!inode_owner_or_capable(inode)) 232 goto out; 233 234 error = 0; 235 flags = ip->i_diskflags; 236 new_flags = (flags & ~mask) | (reqflags & mask); 237 if ((new_flags ^ flags) == 0) 238 goto out; 239 240 error = -EINVAL; 241 if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET) 242 goto out; 243 244 error = -EPERM; 245 if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE)) 246 goto out; 247 if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY)) 248 goto out; 249 if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) && 250 !capable(CAP_LINUX_IMMUTABLE)) 251 goto out; 252 if (!IS_IMMUTABLE(inode)) { 253 error = gfs2_permission(inode, MAY_WRITE); 254 if (error) 255 goto out; 256 } 257 if ((flags ^ new_flags) & GFS2_DIF_JDATA) { 258 if (flags & GFS2_DIF_JDATA) 259 gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH); 260 error = filemap_fdatawrite(inode->i_mapping); 261 if (error) 262 goto out; 263 error = filemap_fdatawait(inode->i_mapping); 264 if (error) 265 goto out; 266 } 267 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 268 if (error) 269 goto out; 270 error = gfs2_meta_inode_buffer(ip, &bh); 271 if (error) 272 goto out_trans_end; 273 gfs2_trans_add_meta(ip->i_gl, bh); 274 ip->i_diskflags = new_flags; 275 gfs2_dinode_out(ip, bh->b_data); 276 brelse(bh); 277 gfs2_set_inode_flags(inode); 278 gfs2_set_aops(inode); 279 out_trans_end: 280 gfs2_trans_end(sdp); 281 out: 282 gfs2_glock_dq_uninit(&gh); 283 out_drop_write: 284 mnt_drop_write_file(filp); 285 return error; 286 } 287 288 static int gfs2_set_flags(struct file *filp, u32 __user *ptr) 289 { 290 struct inode *inode = file_inode(filp); 291 u32 fsflags, gfsflags; 292 293 if (get_user(fsflags, ptr)) 294 return -EFAULT; 295 296 gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags); 297 if (!S_ISDIR(inode->i_mode)) { 298 gfsflags &= ~GFS2_DIF_TOPDIR; 299 if (gfsflags & GFS2_DIF_INHERIT_JDATA) 300 gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA); 301 return do_gfs2_set_flags(filp, gfsflags, ~0); 302 } 303 return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA); 304 } 305 306 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 307 { 308 switch(cmd) { 309 case FS_IOC_GETFLAGS: 310 return gfs2_get_flags(filp, (u32 __user *)arg); 311 case FS_IOC_SETFLAGS: 312 return gfs2_set_flags(filp, (u32 __user *)arg); 313 case FITRIM: 314 return gfs2_fitrim(filp, (void __user *)arg); 315 } 316 return -ENOTTY; 317 } 318 319 /** 320 * gfs2_size_hint - Give a hint to the size of a write request 321 * @filep: The struct file 322 * @offset: The file offset of the write 323 * @size: The length of the write 324 * 325 * When we are about to do a write, this function records the total 326 * write size in order to provide a suitable hint to the lower layers 327 * about how many blocks will be required. 328 * 329 */ 330 331 static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size) 332 { 333 struct inode *inode = file_inode(filep); 334 struct gfs2_sbd *sdp = GFS2_SB(inode); 335 struct gfs2_inode *ip = GFS2_I(inode); 336 size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift; 337 int hint = min_t(size_t, INT_MAX, blks); 338 339 atomic_set(&ip->i_res->rs_sizehint, hint); 340 } 341 342 /** 343 * gfs2_allocate_page_backing - Use bmap to allocate blocks 344 * @page: The (locked) page to allocate backing for 345 * 346 * We try to allocate all the blocks required for the page in 347 * one go. This might fail for various reasons, so we keep 348 * trying until all the blocks to back this page are allocated. 349 * If some of the blocks are already allocated, thats ok too. 350 */ 351 352 static int gfs2_allocate_page_backing(struct page *page) 353 { 354 struct inode *inode = page->mapping->host; 355 struct buffer_head bh; 356 unsigned long size = PAGE_CACHE_SIZE; 357 u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 358 359 do { 360 bh.b_state = 0; 361 bh.b_size = size; 362 gfs2_block_map(inode, lblock, &bh, 1); 363 if (!buffer_mapped(&bh)) 364 return -EIO; 365 size -= bh.b_size; 366 lblock += (bh.b_size >> inode->i_blkbits); 367 } while(size > 0); 368 return 0; 369 } 370 371 /** 372 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable 373 * @vma: The virtual memory area 374 * @vmf: The virtual memory fault containing the page to become writable 375 * 376 * When the page becomes writable, we need to ensure that we have 377 * blocks allocated on disk to back that page. 378 */ 379 380 static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 381 { 382 struct page *page = vmf->page; 383 struct inode *inode = file_inode(vma->vm_file); 384 struct gfs2_inode *ip = GFS2_I(inode); 385 struct gfs2_sbd *sdp = GFS2_SB(inode); 386 struct gfs2_alloc_parms ap = { .aflags = 0, }; 387 unsigned long last_index; 388 u64 pos = page->index << PAGE_CACHE_SHIFT; 389 unsigned int data_blocks, ind_blocks, rblocks; 390 struct gfs2_holder gh; 391 loff_t size; 392 int ret; 393 394 sb_start_pagefault(inode->i_sb); 395 396 /* Update file times before taking page lock */ 397 file_update_time(vma->vm_file); 398 399 ret = get_write_access(inode); 400 if (ret) 401 goto out; 402 403 ret = gfs2_rs_alloc(ip); 404 if (ret) 405 goto out_write_access; 406 407 gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE); 408 409 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 410 ret = gfs2_glock_nq(&gh); 411 if (ret) 412 goto out_uninit; 413 414 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); 415 set_bit(GIF_SW_PAGED, &ip->i_flags); 416 417 if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) { 418 lock_page(page); 419 if (!PageUptodate(page) || page->mapping != inode->i_mapping) { 420 ret = -EAGAIN; 421 unlock_page(page); 422 } 423 goto out_unlock; 424 } 425 426 ret = gfs2_rindex_update(sdp); 427 if (ret) 428 goto out_unlock; 429 430 ret = gfs2_quota_lock_check(ip); 431 if (ret) 432 goto out_unlock; 433 gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks); 434 ap.target = data_blocks + ind_blocks; 435 ret = gfs2_inplace_reserve(ip, &ap); 436 if (ret) 437 goto out_quota_unlock; 438 439 rblocks = RES_DINODE + ind_blocks; 440 if (gfs2_is_jdata(ip)) 441 rblocks += data_blocks ? data_blocks : 1; 442 if (ind_blocks || data_blocks) { 443 rblocks += RES_STATFS + RES_QUOTA; 444 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks); 445 } 446 ret = gfs2_trans_begin(sdp, rblocks, 0); 447 if (ret) 448 goto out_trans_fail; 449 450 lock_page(page); 451 ret = -EINVAL; 452 size = i_size_read(inode); 453 last_index = (size - 1) >> PAGE_CACHE_SHIFT; 454 /* Check page index against inode size */ 455 if (size == 0 || (page->index > last_index)) 456 goto out_trans_end; 457 458 ret = -EAGAIN; 459 /* If truncated, we must retry the operation, we may have raced 460 * with the glock demotion code. 461 */ 462 if (!PageUptodate(page) || page->mapping != inode->i_mapping) 463 goto out_trans_end; 464 465 /* Unstuff, if required, and allocate backing blocks for page */ 466 ret = 0; 467 if (gfs2_is_stuffed(ip)) 468 ret = gfs2_unstuff_dinode(ip, page); 469 if (ret == 0) 470 ret = gfs2_allocate_page_backing(page); 471 472 out_trans_end: 473 if (ret) 474 unlock_page(page); 475 gfs2_trans_end(sdp); 476 out_trans_fail: 477 gfs2_inplace_release(ip); 478 out_quota_unlock: 479 gfs2_quota_unlock(ip); 480 out_unlock: 481 gfs2_glock_dq(&gh); 482 out_uninit: 483 gfs2_holder_uninit(&gh); 484 if (ret == 0) { 485 set_page_dirty(page); 486 wait_for_stable_page(page); 487 } 488 out_write_access: 489 put_write_access(inode); 490 out: 491 sb_end_pagefault(inode->i_sb); 492 return block_page_mkwrite_return(ret); 493 } 494 495 static const struct vm_operations_struct gfs2_vm_ops = { 496 .fault = filemap_fault, 497 .map_pages = filemap_map_pages, 498 .page_mkwrite = gfs2_page_mkwrite, 499 .remap_pages = generic_file_remap_pages, 500 }; 501 502 /** 503 * gfs2_mmap - 504 * @file: The file to map 505 * @vma: The VMA which described the mapping 506 * 507 * There is no need to get a lock here unless we should be updating 508 * atime. We ignore any locking errors since the only consequence is 509 * a missed atime update (which will just be deferred until later). 510 * 511 * Returns: 0 512 */ 513 514 static int gfs2_mmap(struct file *file, struct vm_area_struct *vma) 515 { 516 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); 517 518 if (!(file->f_flags & O_NOATIME) && 519 !IS_NOATIME(&ip->i_inode)) { 520 struct gfs2_holder i_gh; 521 int error; 522 523 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, 524 &i_gh); 525 if (error) 526 return error; 527 /* grab lock to update inode */ 528 gfs2_glock_dq_uninit(&i_gh); 529 file_accessed(file); 530 } 531 vma->vm_ops = &gfs2_vm_ops; 532 533 return 0; 534 } 535 536 /** 537 * gfs2_open_common - This is common to open and atomic_open 538 * @inode: The inode being opened 539 * @file: The file being opened 540 * 541 * This maybe called under a glock or not depending upon how it has 542 * been called. We must always be called under a glock for regular 543 * files, however. For other file types, it does not matter whether 544 * we hold the glock or not. 545 * 546 * Returns: Error code or 0 for success 547 */ 548 549 int gfs2_open_common(struct inode *inode, struct file *file) 550 { 551 struct gfs2_file *fp; 552 int ret; 553 554 if (S_ISREG(inode->i_mode)) { 555 ret = generic_file_open(inode, file); 556 if (ret) 557 return ret; 558 } 559 560 fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS); 561 if (!fp) 562 return -ENOMEM; 563 564 mutex_init(&fp->f_fl_mutex); 565 566 gfs2_assert_warn(GFS2_SB(inode), !file->private_data); 567 file->private_data = fp; 568 return 0; 569 } 570 571 /** 572 * gfs2_open - open a file 573 * @inode: the inode to open 574 * @file: the struct file for this opening 575 * 576 * After atomic_open, this function is only used for opening files 577 * which are already cached. We must still get the glock for regular 578 * files to ensure that we have the file size uptodate for the large 579 * file check which is in the common code. That is only an issue for 580 * regular files though. 581 * 582 * Returns: errno 583 */ 584 585 static int gfs2_open(struct inode *inode, struct file *file) 586 { 587 struct gfs2_inode *ip = GFS2_I(inode); 588 struct gfs2_holder i_gh; 589 int error; 590 bool need_unlock = false; 591 592 if (S_ISREG(ip->i_inode.i_mode)) { 593 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, 594 &i_gh); 595 if (error) 596 return error; 597 need_unlock = true; 598 } 599 600 error = gfs2_open_common(inode, file); 601 602 if (need_unlock) 603 gfs2_glock_dq_uninit(&i_gh); 604 605 return error; 606 } 607 608 /** 609 * gfs2_release - called to close a struct file 610 * @inode: the inode the struct file belongs to 611 * @file: the struct file being closed 612 * 613 * Returns: errno 614 */ 615 616 static int gfs2_release(struct inode *inode, struct file *file) 617 { 618 struct gfs2_inode *ip = GFS2_I(inode); 619 620 kfree(file->private_data); 621 file->private_data = NULL; 622 623 if (!(file->f_mode & FMODE_WRITE)) 624 return 0; 625 626 gfs2_rs_delete(ip, &inode->i_writecount); 627 return 0; 628 } 629 630 /** 631 * gfs2_fsync - sync the dirty data for a file (across the cluster) 632 * @file: the file that points to the dentry 633 * @start: the start position in the file to sync 634 * @end: the end position in the file to sync 635 * @datasync: set if we can ignore timestamp changes 636 * 637 * We split the data flushing here so that we don't wait for the data 638 * until after we've also sent the metadata to disk. Note that for 639 * data=ordered, we will write & wait for the data at the log flush 640 * stage anyway, so this is unlikely to make much of a difference 641 * except in the data=writeback case. 642 * 643 * If the fdatawrite fails due to any reason except -EIO, we will 644 * continue the remainder of the fsync, although we'll still report 645 * the error at the end. This is to match filemap_write_and_wait_range() 646 * behaviour. 647 * 648 * Returns: errno 649 */ 650 651 static int gfs2_fsync(struct file *file, loff_t start, loff_t end, 652 int datasync) 653 { 654 struct address_space *mapping = file->f_mapping; 655 struct inode *inode = mapping->host; 656 int sync_state = inode->i_state & I_DIRTY; 657 struct gfs2_inode *ip = GFS2_I(inode); 658 int ret = 0, ret1 = 0; 659 660 if (mapping->nrpages) { 661 ret1 = filemap_fdatawrite_range(mapping, start, end); 662 if (ret1 == -EIO) 663 return ret1; 664 } 665 666 if (!gfs2_is_jdata(ip)) 667 sync_state &= ~I_DIRTY_PAGES; 668 if (datasync) 669 sync_state &= ~I_DIRTY_SYNC; 670 671 if (sync_state) { 672 ret = sync_inode_metadata(inode, 1); 673 if (ret) 674 return ret; 675 if (gfs2_is_jdata(ip)) 676 filemap_write_and_wait(mapping); 677 gfs2_ail_flush(ip->i_gl, 1); 678 } 679 680 if (mapping->nrpages) 681 ret = filemap_fdatawait_range(mapping, start, end); 682 683 return ret ? ret : ret1; 684 } 685 686 /** 687 * gfs2_file_write_iter - Perform a write to a file 688 * @iocb: The io context 689 * @iov: The data to write 690 * @nr_segs: Number of @iov segments 691 * @pos: The file position 692 * 693 * We have to do a lock/unlock here to refresh the inode size for 694 * O_APPEND writes, otherwise we can land up writing at the wrong 695 * offset. There is still a race, but provided the app is using its 696 * own file locking, this will make O_APPEND work as expected. 697 * 698 */ 699 700 static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 701 { 702 struct file *file = iocb->ki_filp; 703 struct gfs2_inode *ip = GFS2_I(file_inode(file)); 704 int ret; 705 706 ret = gfs2_rs_alloc(ip); 707 if (ret) 708 return ret; 709 710 gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from)); 711 712 if (file->f_flags & O_APPEND) { 713 struct gfs2_holder gh; 714 715 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 716 if (ret) 717 return ret; 718 gfs2_glock_dq_uninit(&gh); 719 } 720 721 return generic_file_write_iter(iocb, from); 722 } 723 724 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len, 725 int mode) 726 { 727 struct gfs2_inode *ip = GFS2_I(inode); 728 struct buffer_head *dibh; 729 int error; 730 loff_t size = len; 731 unsigned int nr_blks; 732 sector_t lblock = offset >> inode->i_blkbits; 733 734 error = gfs2_meta_inode_buffer(ip, &dibh); 735 if (unlikely(error)) 736 return error; 737 738 gfs2_trans_add_meta(ip->i_gl, dibh); 739 740 if (gfs2_is_stuffed(ip)) { 741 error = gfs2_unstuff_dinode(ip, NULL); 742 if (unlikely(error)) 743 goto out; 744 } 745 746 while (len) { 747 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 }; 748 bh_map.b_size = len; 749 set_buffer_zeronew(&bh_map); 750 751 error = gfs2_block_map(inode, lblock, &bh_map, 1); 752 if (unlikely(error)) 753 goto out; 754 len -= bh_map.b_size; 755 nr_blks = bh_map.b_size >> inode->i_blkbits; 756 lblock += nr_blks; 757 if (!buffer_new(&bh_map)) 758 continue; 759 if (unlikely(!buffer_zeronew(&bh_map))) { 760 error = -EIO; 761 goto out; 762 } 763 } 764 if (offset + size > inode->i_size && !(mode & FALLOC_FL_KEEP_SIZE)) 765 i_size_write(inode, offset + size); 766 767 mark_inode_dirty(inode); 768 769 out: 770 brelse(dibh); 771 return error; 772 } 773 774 static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len, 775 unsigned int *data_blocks, unsigned int *ind_blocks) 776 { 777 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 778 unsigned int max_blocks = ip->i_rgd->rd_free_clone; 779 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1); 780 781 for (tmp = max_data; tmp > sdp->sd_diptrs;) { 782 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs); 783 max_data -= tmp; 784 } 785 /* This calculation isn't the exact reverse of gfs2_write_calc_reserve, 786 so it might end up with fewer data blocks */ 787 if (max_data <= *data_blocks) 788 return; 789 *data_blocks = max_data; 790 *ind_blocks = max_blocks - max_data; 791 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift; 792 if (*len > max) { 793 *len = max; 794 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks); 795 } 796 } 797 798 static long gfs2_fallocate(struct file *file, int mode, loff_t offset, 799 loff_t len) 800 { 801 struct inode *inode = file_inode(file); 802 struct gfs2_sbd *sdp = GFS2_SB(inode); 803 struct gfs2_inode *ip = GFS2_I(inode); 804 struct gfs2_alloc_parms ap = { .aflags = 0, }; 805 unsigned int data_blocks = 0, ind_blocks = 0, rblocks; 806 loff_t bytes, max_bytes; 807 int error; 808 const loff_t pos = offset; 809 const loff_t count = len; 810 loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1); 811 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift; 812 loff_t max_chunk_size = UINT_MAX & bsize_mask; 813 struct gfs2_holder gh; 814 815 next = (next + 1) << sdp->sd_sb.sb_bsize_shift; 816 817 /* We only support the FALLOC_FL_KEEP_SIZE mode */ 818 if (mode & ~FALLOC_FL_KEEP_SIZE) 819 return -EOPNOTSUPP; 820 821 offset &= bsize_mask; 822 823 len = next - offset; 824 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2; 825 if (!bytes) 826 bytes = UINT_MAX; 827 bytes &= bsize_mask; 828 if (bytes == 0) 829 bytes = sdp->sd_sb.sb_bsize; 830 831 error = gfs2_rs_alloc(ip); 832 if (error) 833 return error; 834 835 mutex_lock(&inode->i_mutex); 836 837 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 838 error = gfs2_glock_nq(&gh); 839 if (unlikely(error)) 840 goto out_uninit; 841 842 gfs2_size_hint(file, offset, len); 843 844 while (len > 0) { 845 if (len < bytes) 846 bytes = len; 847 if (!gfs2_write_alloc_required(ip, offset, bytes)) { 848 len -= bytes; 849 offset += bytes; 850 continue; 851 } 852 error = gfs2_quota_lock_check(ip); 853 if (error) 854 goto out_unlock; 855 856 retry: 857 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks); 858 859 ap.target = data_blocks + ind_blocks; 860 error = gfs2_inplace_reserve(ip, &ap); 861 if (error) { 862 if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) { 863 bytes >>= 1; 864 bytes &= bsize_mask; 865 if (bytes == 0) 866 bytes = sdp->sd_sb.sb_bsize; 867 goto retry; 868 } 869 goto out_qunlock; 870 } 871 max_bytes = bytes; 872 calc_max_reserv(ip, (len > max_chunk_size)? max_chunk_size: len, 873 &max_bytes, &data_blocks, &ind_blocks); 874 875 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA + 876 RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks); 877 if (gfs2_is_jdata(ip)) 878 rblocks += data_blocks ? data_blocks : 1; 879 880 error = gfs2_trans_begin(sdp, rblocks, 881 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); 882 if (error) 883 goto out_trans_fail; 884 885 error = fallocate_chunk(inode, offset, max_bytes, mode); 886 gfs2_trans_end(sdp); 887 888 if (error) 889 goto out_trans_fail; 890 891 len -= max_bytes; 892 offset += max_bytes; 893 gfs2_inplace_release(ip); 894 gfs2_quota_unlock(ip); 895 } 896 897 if (error == 0) 898 error = generic_write_sync(file, pos, count); 899 goto out_unlock; 900 901 out_trans_fail: 902 gfs2_inplace_release(ip); 903 out_qunlock: 904 gfs2_quota_unlock(ip); 905 out_unlock: 906 gfs2_glock_dq(&gh); 907 out_uninit: 908 gfs2_holder_uninit(&gh); 909 mutex_unlock(&inode->i_mutex); 910 return error; 911 } 912 913 #ifdef CONFIG_GFS2_FS_LOCKING_DLM 914 915 /** 916 * gfs2_setlease - acquire/release a file lease 917 * @file: the file pointer 918 * @arg: lease type 919 * @fl: file lock 920 * 921 * We don't currently have a way to enforce a lease across the whole 922 * cluster; until we do, disable leases (by just returning -EINVAL), 923 * unless the administrator has requested purely local locking. 924 * 925 * Locking: called under i_lock 926 * 927 * Returns: errno 928 */ 929 930 static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl) 931 { 932 return -EINVAL; 933 } 934 935 /** 936 * gfs2_lock - acquire/release a posix lock on a file 937 * @file: the file pointer 938 * @cmd: either modify or retrieve lock state, possibly wait 939 * @fl: type and range of lock 940 * 941 * Returns: errno 942 */ 943 944 static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl) 945 { 946 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); 947 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host); 948 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 949 950 if (!(fl->fl_flags & FL_POSIX)) 951 return -ENOLCK; 952 if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK) 953 return -ENOLCK; 954 955 if (cmd == F_CANCELLK) { 956 /* Hack: */ 957 cmd = F_SETLK; 958 fl->fl_type = F_UNLCK; 959 } 960 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { 961 if (fl->fl_type == F_UNLCK) 962 posix_lock_file_wait(file, fl); 963 return -EIO; 964 } 965 if (IS_GETLK(cmd)) 966 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl); 967 else if (fl->fl_type == F_UNLCK) 968 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl); 969 else 970 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl); 971 } 972 973 static int do_flock(struct file *file, int cmd, struct file_lock *fl) 974 { 975 struct gfs2_file *fp = file->private_data; 976 struct gfs2_holder *fl_gh = &fp->f_fl_gh; 977 struct gfs2_inode *ip = GFS2_I(file_inode(file)); 978 struct gfs2_glock *gl; 979 unsigned int state; 980 int flags; 981 int error = 0; 982 983 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; 984 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE; 985 986 mutex_lock(&fp->f_fl_mutex); 987 988 gl = fl_gh->gh_gl; 989 if (gl) { 990 if (fl_gh->gh_state == state) 991 goto out; 992 flock_lock_file_wait(file, 993 &(struct file_lock){.fl_type = F_UNLCK}); 994 gfs2_glock_dq_wait(fl_gh); 995 gfs2_holder_reinit(state, flags, fl_gh); 996 } else { 997 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr, 998 &gfs2_flock_glops, CREATE, &gl); 999 if (error) 1000 goto out; 1001 gfs2_holder_init(gl, state, flags, fl_gh); 1002 gfs2_glock_put(gl); 1003 } 1004 error = gfs2_glock_nq(fl_gh); 1005 if (error) { 1006 gfs2_holder_uninit(fl_gh); 1007 if (error == GLR_TRYFAILED) 1008 error = -EAGAIN; 1009 } else { 1010 error = flock_lock_file_wait(file, fl); 1011 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error); 1012 } 1013 1014 out: 1015 mutex_unlock(&fp->f_fl_mutex); 1016 return error; 1017 } 1018 1019 static void do_unflock(struct file *file, struct file_lock *fl) 1020 { 1021 struct gfs2_file *fp = file->private_data; 1022 struct gfs2_holder *fl_gh = &fp->f_fl_gh; 1023 1024 mutex_lock(&fp->f_fl_mutex); 1025 flock_lock_file_wait(file, fl); 1026 if (fl_gh->gh_gl) { 1027 gfs2_glock_dq_wait(fl_gh); 1028 gfs2_holder_uninit(fl_gh); 1029 } 1030 mutex_unlock(&fp->f_fl_mutex); 1031 } 1032 1033 /** 1034 * gfs2_flock - acquire/release a flock lock on a file 1035 * @file: the file pointer 1036 * @cmd: either modify or retrieve lock state, possibly wait 1037 * @fl: type and range of lock 1038 * 1039 * Returns: errno 1040 */ 1041 1042 static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl) 1043 { 1044 if (!(fl->fl_flags & FL_FLOCK)) 1045 return -ENOLCK; 1046 if (fl->fl_type & LOCK_MAND) 1047 return -EOPNOTSUPP; 1048 1049 if (fl->fl_type == F_UNLCK) { 1050 do_unflock(file, fl); 1051 return 0; 1052 } else { 1053 return do_flock(file, cmd, fl); 1054 } 1055 } 1056 1057 const struct file_operations gfs2_file_fops = { 1058 .llseek = gfs2_llseek, 1059 .read = new_sync_read, 1060 .read_iter = generic_file_read_iter, 1061 .write = new_sync_write, 1062 .write_iter = gfs2_file_write_iter, 1063 .unlocked_ioctl = gfs2_ioctl, 1064 .mmap = gfs2_mmap, 1065 .open = gfs2_open, 1066 .release = gfs2_release, 1067 .fsync = gfs2_fsync, 1068 .lock = gfs2_lock, 1069 .flock = gfs2_flock, 1070 .splice_read = generic_file_splice_read, 1071 .splice_write = iter_file_splice_write, 1072 .setlease = gfs2_setlease, 1073 .fallocate = gfs2_fallocate, 1074 }; 1075 1076 const struct file_operations gfs2_dir_fops = { 1077 .iterate = gfs2_readdir, 1078 .unlocked_ioctl = gfs2_ioctl, 1079 .open = gfs2_open, 1080 .release = gfs2_release, 1081 .fsync = gfs2_fsync, 1082 .lock = gfs2_lock, 1083 .flock = gfs2_flock, 1084 .llseek = default_llseek, 1085 }; 1086 1087 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */ 1088 1089 const struct file_operations gfs2_file_fops_nolock = { 1090 .llseek = gfs2_llseek, 1091 .read = new_sync_read, 1092 .read_iter = generic_file_read_iter, 1093 .write = new_sync_write, 1094 .write_iter = gfs2_file_write_iter, 1095 .unlocked_ioctl = gfs2_ioctl, 1096 .mmap = gfs2_mmap, 1097 .open = gfs2_open, 1098 .release = gfs2_release, 1099 .fsync = gfs2_fsync, 1100 .splice_read = generic_file_splice_read, 1101 .splice_write = iter_file_splice_write, 1102 .setlease = generic_setlease, 1103 .fallocate = gfs2_fallocate, 1104 }; 1105 1106 const struct file_operations gfs2_dir_fops_nolock = { 1107 .iterate = gfs2_readdir, 1108 .unlocked_ioctl = gfs2_ioctl, 1109 .open = gfs2_open, 1110 .release = gfs2_release, 1111 .fsync = gfs2_fsync, 1112 .llseek = default_llseek, 1113 }; 1114 1115