1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/slab.h> 11 #include <linux/spinlock.h> 12 #include <linux/completion.h> 13 #include <linux/buffer_head.h> 14 #include <linux/pagemap.h> 15 #include <linux/uio.h> 16 #include <linux/blkdev.h> 17 #include <linux/mm.h> 18 #include <linux/mount.h> 19 #include <linux/fs.h> 20 #include <linux/gfs2_ondisk.h> 21 #include <linux/ext2_fs.h> 22 #include <linux/falloc.h> 23 #include <linux/swap.h> 24 #include <linux/crc32.h> 25 #include <linux/writeback.h> 26 #include <asm/uaccess.h> 27 #include <linux/dlm.h> 28 #include <linux/dlm_plock.h> 29 30 #include "gfs2.h" 31 #include "incore.h" 32 #include "bmap.h" 33 #include "dir.h" 34 #include "glock.h" 35 #include "glops.h" 36 #include "inode.h" 37 #include "log.h" 38 #include "meta_io.h" 39 #include "quota.h" 40 #include "rgrp.h" 41 #include "trans.h" 42 #include "util.h" 43 44 /** 45 * gfs2_llseek - seek to a location in a file 46 * @file: the file 47 * @offset: the offset 48 * @origin: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END) 49 * 50 * SEEK_END requires the glock for the file because it references the 51 * file's size. 52 * 53 * Returns: The new offset, or errno 54 */ 55 56 static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin) 57 { 58 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); 59 struct gfs2_holder i_gh; 60 loff_t error; 61 62 if (origin == 2) { 63 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, 64 &i_gh); 65 if (!error) { 66 error = generic_file_llseek_unlocked(file, offset, origin); 67 gfs2_glock_dq_uninit(&i_gh); 68 } 69 } else 70 error = generic_file_llseek_unlocked(file, offset, origin); 71 72 return error; 73 } 74 75 /** 76 * gfs2_readdir - Read directory entries from a directory 77 * @file: The directory to read from 78 * @dirent: Buffer for dirents 79 * @filldir: Function used to do the copying 80 * 81 * Returns: errno 82 */ 83 84 static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir) 85 { 86 struct inode *dir = file->f_mapping->host; 87 struct gfs2_inode *dip = GFS2_I(dir); 88 struct gfs2_holder d_gh; 89 u64 offset = file->f_pos; 90 int error; 91 92 gfs2_holder_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); 93 error = gfs2_glock_nq(&d_gh); 94 if (error) { 95 gfs2_holder_uninit(&d_gh); 96 return error; 97 } 98 99 error = gfs2_dir_read(dir, &offset, dirent, filldir); 100 101 gfs2_glock_dq_uninit(&d_gh); 102 103 file->f_pos = offset; 104 105 return error; 106 } 107 108 /** 109 * fsflags_cvt 110 * @table: A table of 32 u32 flags 111 * @val: a 32 bit value to convert 112 * 113 * This function can be used to convert between fsflags values and 114 * GFS2's own flags values. 115 * 116 * Returns: the converted flags 117 */ 118 static u32 fsflags_cvt(const u32 *table, u32 val) 119 { 120 u32 res = 0; 121 while(val) { 122 if (val & 1) 123 res |= *table; 124 table++; 125 val >>= 1; 126 } 127 return res; 128 } 129 130 static const u32 fsflags_to_gfs2[32] = { 131 [3] = GFS2_DIF_SYNC, 132 [4] = GFS2_DIF_IMMUTABLE, 133 [5] = GFS2_DIF_APPENDONLY, 134 [7] = GFS2_DIF_NOATIME, 135 [12] = GFS2_DIF_EXHASH, 136 [14] = GFS2_DIF_INHERIT_JDATA, 137 }; 138 139 static const u32 gfs2_to_fsflags[32] = { 140 [gfs2fl_Sync] = FS_SYNC_FL, 141 [gfs2fl_Immutable] = FS_IMMUTABLE_FL, 142 [gfs2fl_AppendOnly] = FS_APPEND_FL, 143 [gfs2fl_NoAtime] = FS_NOATIME_FL, 144 [gfs2fl_ExHash] = FS_INDEX_FL, 145 [gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL, 146 }; 147 148 static int gfs2_get_flags(struct file *filp, u32 __user *ptr) 149 { 150 struct inode *inode = filp->f_path.dentry->d_inode; 151 struct gfs2_inode *ip = GFS2_I(inode); 152 struct gfs2_holder gh; 153 int error; 154 u32 fsflags; 155 156 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 157 error = gfs2_glock_nq(&gh); 158 if (error) 159 return error; 160 161 fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags); 162 if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA) 163 fsflags |= FS_JOURNAL_DATA_FL; 164 if (put_user(fsflags, ptr)) 165 error = -EFAULT; 166 167 gfs2_glock_dq(&gh); 168 gfs2_holder_uninit(&gh); 169 return error; 170 } 171 172 void gfs2_set_inode_flags(struct inode *inode) 173 { 174 struct gfs2_inode *ip = GFS2_I(inode); 175 unsigned int flags = inode->i_flags; 176 177 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC); 178 if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode)) 179 inode->i_flags |= S_NOSEC; 180 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE) 181 flags |= S_IMMUTABLE; 182 if (ip->i_diskflags & GFS2_DIF_APPENDONLY) 183 flags |= S_APPEND; 184 if (ip->i_diskflags & GFS2_DIF_NOATIME) 185 flags |= S_NOATIME; 186 if (ip->i_diskflags & GFS2_DIF_SYNC) 187 flags |= S_SYNC; 188 inode->i_flags = flags; 189 } 190 191 /* Flags that can be set by user space */ 192 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \ 193 GFS2_DIF_IMMUTABLE| \ 194 GFS2_DIF_APPENDONLY| \ 195 GFS2_DIF_NOATIME| \ 196 GFS2_DIF_SYNC| \ 197 GFS2_DIF_SYSTEM| \ 198 GFS2_DIF_INHERIT_JDATA) 199 200 /** 201 * gfs2_set_flags - set flags on an inode 202 * @inode: The inode 203 * @flags: The flags to set 204 * @mask: Indicates which flags are valid 205 * 206 */ 207 static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask) 208 { 209 struct inode *inode = filp->f_path.dentry->d_inode; 210 struct gfs2_inode *ip = GFS2_I(inode); 211 struct gfs2_sbd *sdp = GFS2_SB(inode); 212 struct buffer_head *bh; 213 struct gfs2_holder gh; 214 int error; 215 u32 new_flags, flags; 216 217 error = mnt_want_write(filp->f_path.mnt); 218 if (error) 219 return error; 220 221 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 222 if (error) 223 goto out_drop_write; 224 225 error = -EACCES; 226 if (!inode_owner_or_capable(inode)) 227 goto out; 228 229 error = 0; 230 flags = ip->i_diskflags; 231 new_flags = (flags & ~mask) | (reqflags & mask); 232 if ((new_flags ^ flags) == 0) 233 goto out; 234 235 error = -EINVAL; 236 if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET) 237 goto out; 238 239 error = -EPERM; 240 if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE)) 241 goto out; 242 if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY)) 243 goto out; 244 if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) && 245 !capable(CAP_LINUX_IMMUTABLE)) 246 goto out; 247 if (!IS_IMMUTABLE(inode)) { 248 error = gfs2_permission(inode, MAY_WRITE); 249 if (error) 250 goto out; 251 } 252 if ((flags ^ new_flags) & GFS2_DIF_JDATA) { 253 if (flags & GFS2_DIF_JDATA) 254 gfs2_log_flush(sdp, ip->i_gl); 255 error = filemap_fdatawrite(inode->i_mapping); 256 if (error) 257 goto out; 258 error = filemap_fdatawait(inode->i_mapping); 259 if (error) 260 goto out; 261 } 262 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 263 if (error) 264 goto out; 265 error = gfs2_meta_inode_buffer(ip, &bh); 266 if (error) 267 goto out_trans_end; 268 gfs2_trans_add_bh(ip->i_gl, bh, 1); 269 ip->i_diskflags = new_flags; 270 gfs2_dinode_out(ip, bh->b_data); 271 brelse(bh); 272 gfs2_set_inode_flags(inode); 273 gfs2_set_aops(inode); 274 out_trans_end: 275 gfs2_trans_end(sdp); 276 out: 277 gfs2_glock_dq_uninit(&gh); 278 out_drop_write: 279 mnt_drop_write(filp->f_path.mnt); 280 return error; 281 } 282 283 static int gfs2_set_flags(struct file *filp, u32 __user *ptr) 284 { 285 struct inode *inode = filp->f_path.dentry->d_inode; 286 u32 fsflags, gfsflags; 287 288 if (get_user(fsflags, ptr)) 289 return -EFAULT; 290 291 gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags); 292 if (!S_ISDIR(inode->i_mode)) { 293 if (gfsflags & GFS2_DIF_INHERIT_JDATA) 294 gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA); 295 return do_gfs2_set_flags(filp, gfsflags, ~0); 296 } 297 return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA); 298 } 299 300 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 301 { 302 switch(cmd) { 303 case FS_IOC_GETFLAGS: 304 return gfs2_get_flags(filp, (u32 __user *)arg); 305 case FS_IOC_SETFLAGS: 306 return gfs2_set_flags(filp, (u32 __user *)arg); 307 } 308 return -ENOTTY; 309 } 310 311 /** 312 * gfs2_allocate_page_backing - Use bmap to allocate blocks 313 * @page: The (locked) page to allocate backing for 314 * 315 * We try to allocate all the blocks required for the page in 316 * one go. This might fail for various reasons, so we keep 317 * trying until all the blocks to back this page are allocated. 318 * If some of the blocks are already allocated, thats ok too. 319 */ 320 321 static int gfs2_allocate_page_backing(struct page *page) 322 { 323 struct inode *inode = page->mapping->host; 324 struct buffer_head bh; 325 unsigned long size = PAGE_CACHE_SIZE; 326 u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 327 328 do { 329 bh.b_state = 0; 330 bh.b_size = size; 331 gfs2_block_map(inode, lblock, &bh, 1); 332 if (!buffer_mapped(&bh)) 333 return -EIO; 334 size -= bh.b_size; 335 lblock += (bh.b_size >> inode->i_blkbits); 336 } while(size > 0); 337 return 0; 338 } 339 340 /** 341 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable 342 * @vma: The virtual memory area 343 * @page: The page which is about to become writable 344 * 345 * When the page becomes writable, we need to ensure that we have 346 * blocks allocated on disk to back that page. 347 */ 348 349 static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 350 { 351 struct page *page = vmf->page; 352 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 353 struct gfs2_inode *ip = GFS2_I(inode); 354 struct gfs2_sbd *sdp = GFS2_SB(inode); 355 unsigned long last_index; 356 u64 pos = page->index << PAGE_CACHE_SHIFT; 357 unsigned int data_blocks, ind_blocks, rblocks; 358 struct gfs2_holder gh; 359 struct gfs2_alloc *al; 360 int ret; 361 362 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 363 ret = gfs2_glock_nq(&gh); 364 if (ret) 365 goto out; 366 367 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); 368 set_bit(GIF_SW_PAGED, &ip->i_flags); 369 370 if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) 371 goto out_unlock; 372 ret = -ENOMEM; 373 al = gfs2_alloc_get(ip); 374 if (al == NULL) 375 goto out_unlock; 376 377 ret = gfs2_quota_lock_check(ip); 378 if (ret) 379 goto out_alloc_put; 380 gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks); 381 al->al_requested = data_blocks + ind_blocks; 382 ret = gfs2_inplace_reserve(ip); 383 if (ret) 384 goto out_quota_unlock; 385 386 rblocks = RES_DINODE + ind_blocks; 387 if (gfs2_is_jdata(ip)) 388 rblocks += data_blocks ? data_blocks : 1; 389 if (ind_blocks || data_blocks) { 390 rblocks += RES_STATFS + RES_QUOTA; 391 rblocks += gfs2_rg_blocks(al); 392 } 393 ret = gfs2_trans_begin(sdp, rblocks, 0); 394 if (ret) 395 goto out_trans_fail; 396 397 lock_page(page); 398 ret = -EINVAL; 399 last_index = ip->i_inode.i_size >> PAGE_CACHE_SHIFT; 400 if (page->index > last_index) 401 goto out_unlock_page; 402 ret = 0; 403 if (!PageUptodate(page) || page->mapping != ip->i_inode.i_mapping) 404 goto out_unlock_page; 405 if (gfs2_is_stuffed(ip)) { 406 ret = gfs2_unstuff_dinode(ip, page); 407 if (ret) 408 goto out_unlock_page; 409 } 410 ret = gfs2_allocate_page_backing(page); 411 412 out_unlock_page: 413 unlock_page(page); 414 gfs2_trans_end(sdp); 415 out_trans_fail: 416 gfs2_inplace_release(ip); 417 out_quota_unlock: 418 gfs2_quota_unlock(ip); 419 out_alloc_put: 420 gfs2_alloc_put(ip); 421 out_unlock: 422 gfs2_glock_dq(&gh); 423 out: 424 gfs2_holder_uninit(&gh); 425 if (ret == -ENOMEM) 426 ret = VM_FAULT_OOM; 427 else if (ret) 428 ret = VM_FAULT_SIGBUS; 429 return ret; 430 } 431 432 static const struct vm_operations_struct gfs2_vm_ops = { 433 .fault = filemap_fault, 434 .page_mkwrite = gfs2_page_mkwrite, 435 }; 436 437 /** 438 * gfs2_mmap - 439 * @file: The file to map 440 * @vma: The VMA which described the mapping 441 * 442 * There is no need to get a lock here unless we should be updating 443 * atime. We ignore any locking errors since the only consequence is 444 * a missed atime update (which will just be deferred until later). 445 * 446 * Returns: 0 447 */ 448 449 static int gfs2_mmap(struct file *file, struct vm_area_struct *vma) 450 { 451 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); 452 453 if (!(file->f_flags & O_NOATIME) && 454 !IS_NOATIME(&ip->i_inode)) { 455 struct gfs2_holder i_gh; 456 int error; 457 458 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); 459 error = gfs2_glock_nq(&i_gh); 460 if (error == 0) { 461 file_accessed(file); 462 gfs2_glock_dq(&i_gh); 463 } 464 gfs2_holder_uninit(&i_gh); 465 if (error) 466 return error; 467 } 468 vma->vm_ops = &gfs2_vm_ops; 469 vma->vm_flags |= VM_CAN_NONLINEAR; 470 471 return 0; 472 } 473 474 /** 475 * gfs2_open - open a file 476 * @inode: the inode to open 477 * @file: the struct file for this opening 478 * 479 * Returns: errno 480 */ 481 482 static int gfs2_open(struct inode *inode, struct file *file) 483 { 484 struct gfs2_inode *ip = GFS2_I(inode); 485 struct gfs2_holder i_gh; 486 struct gfs2_file *fp; 487 int error; 488 489 fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL); 490 if (!fp) 491 return -ENOMEM; 492 493 mutex_init(&fp->f_fl_mutex); 494 495 gfs2_assert_warn(GFS2_SB(inode), !file->private_data); 496 file->private_data = fp; 497 498 if (S_ISREG(ip->i_inode.i_mode)) { 499 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, 500 &i_gh); 501 if (error) 502 goto fail; 503 504 if (!(file->f_flags & O_LARGEFILE) && 505 i_size_read(inode) > MAX_NON_LFS) { 506 error = -EOVERFLOW; 507 goto fail_gunlock; 508 } 509 510 gfs2_glock_dq_uninit(&i_gh); 511 } 512 513 return 0; 514 515 fail_gunlock: 516 gfs2_glock_dq_uninit(&i_gh); 517 fail: 518 file->private_data = NULL; 519 kfree(fp); 520 return error; 521 } 522 523 /** 524 * gfs2_close - called to close a struct file 525 * @inode: the inode the struct file belongs to 526 * @file: the struct file being closed 527 * 528 * Returns: errno 529 */ 530 531 static int gfs2_close(struct inode *inode, struct file *file) 532 { 533 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; 534 struct gfs2_file *fp; 535 536 fp = file->private_data; 537 file->private_data = NULL; 538 539 if (gfs2_assert_warn(sdp, fp)) 540 return -EIO; 541 542 kfree(fp); 543 544 return 0; 545 } 546 547 /** 548 * gfs2_fsync - sync the dirty data for a file (across the cluster) 549 * @file: the file that points to the dentry 550 * @start: the start position in the file to sync 551 * @end: the end position in the file to sync 552 * @datasync: set if we can ignore timestamp changes 553 * 554 * The VFS will flush data for us. We only need to worry 555 * about metadata here. 556 * 557 * Returns: errno 558 */ 559 560 static int gfs2_fsync(struct file *file, loff_t start, loff_t end, 561 int datasync) 562 { 563 struct inode *inode = file->f_mapping->host; 564 int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC); 565 struct gfs2_inode *ip = GFS2_I(inode); 566 int ret; 567 568 ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 569 if (ret) 570 return ret; 571 mutex_lock(&inode->i_mutex); 572 573 if (datasync) 574 sync_state &= ~I_DIRTY_SYNC; 575 576 if (sync_state) { 577 ret = sync_inode_metadata(inode, 1); 578 if (ret) { 579 mutex_unlock(&inode->i_mutex); 580 return ret; 581 } 582 gfs2_ail_flush(ip->i_gl); 583 } 584 585 mutex_unlock(&inode->i_mutex); 586 return 0; 587 } 588 589 /** 590 * gfs2_file_aio_write - Perform a write to a file 591 * @iocb: The io context 592 * @iov: The data to write 593 * @nr_segs: Number of @iov segments 594 * @pos: The file position 595 * 596 * We have to do a lock/unlock here to refresh the inode size for 597 * O_APPEND writes, otherwise we can land up writing at the wrong 598 * offset. There is still a race, but provided the app is using its 599 * own file locking, this will make O_APPEND work as expected. 600 * 601 */ 602 603 static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov, 604 unsigned long nr_segs, loff_t pos) 605 { 606 struct file *file = iocb->ki_filp; 607 608 if (file->f_flags & O_APPEND) { 609 struct dentry *dentry = file->f_dentry; 610 struct gfs2_inode *ip = GFS2_I(dentry->d_inode); 611 struct gfs2_holder gh; 612 int ret; 613 614 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 615 if (ret) 616 return ret; 617 gfs2_glock_dq_uninit(&gh); 618 } 619 620 return generic_file_aio_write(iocb, iov, nr_segs, pos); 621 } 622 623 static int empty_write_end(struct page *page, unsigned from, 624 unsigned to, int mode) 625 { 626 struct inode *inode = page->mapping->host; 627 struct gfs2_inode *ip = GFS2_I(inode); 628 struct buffer_head *bh; 629 unsigned offset, blksize = 1 << inode->i_blkbits; 630 pgoff_t end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT; 631 632 zero_user(page, from, to-from); 633 mark_page_accessed(page); 634 635 if (page->index < end_index || !(mode & FALLOC_FL_KEEP_SIZE)) { 636 if (!gfs2_is_writeback(ip)) 637 gfs2_page_add_databufs(ip, page, from, to); 638 639 block_commit_write(page, from, to); 640 return 0; 641 } 642 643 offset = 0; 644 bh = page_buffers(page); 645 while (offset < to) { 646 if (offset >= from) { 647 set_buffer_uptodate(bh); 648 mark_buffer_dirty(bh); 649 clear_buffer_new(bh); 650 write_dirty_buffer(bh, WRITE); 651 } 652 offset += blksize; 653 bh = bh->b_this_page; 654 } 655 656 offset = 0; 657 bh = page_buffers(page); 658 while (offset < to) { 659 if (offset >= from) { 660 wait_on_buffer(bh); 661 if (!buffer_uptodate(bh)) 662 return -EIO; 663 } 664 offset += blksize; 665 bh = bh->b_this_page; 666 } 667 return 0; 668 } 669 670 static int needs_empty_write(sector_t block, struct inode *inode) 671 { 672 int error; 673 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 }; 674 675 bh_map.b_size = 1 << inode->i_blkbits; 676 error = gfs2_block_map(inode, block, &bh_map, 0); 677 if (unlikely(error)) 678 return error; 679 return !buffer_mapped(&bh_map); 680 } 681 682 static int write_empty_blocks(struct page *page, unsigned from, unsigned to, 683 int mode) 684 { 685 struct inode *inode = page->mapping->host; 686 unsigned start, end, next, blksize; 687 sector_t block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 688 int ret; 689 690 blksize = 1 << inode->i_blkbits; 691 next = end = 0; 692 while (next < from) { 693 next += blksize; 694 block++; 695 } 696 start = next; 697 do { 698 next += blksize; 699 ret = needs_empty_write(block, inode); 700 if (unlikely(ret < 0)) 701 return ret; 702 if (ret == 0) { 703 if (end) { 704 ret = __block_write_begin(page, start, end - start, 705 gfs2_block_map); 706 if (unlikely(ret)) 707 return ret; 708 ret = empty_write_end(page, start, end, mode); 709 if (unlikely(ret)) 710 return ret; 711 end = 0; 712 } 713 start = next; 714 } 715 else 716 end = next; 717 block++; 718 } while (next < to); 719 720 if (end) { 721 ret = __block_write_begin(page, start, end - start, gfs2_block_map); 722 if (unlikely(ret)) 723 return ret; 724 ret = empty_write_end(page, start, end, mode); 725 if (unlikely(ret)) 726 return ret; 727 } 728 729 return 0; 730 } 731 732 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len, 733 int mode) 734 { 735 struct gfs2_inode *ip = GFS2_I(inode); 736 struct buffer_head *dibh; 737 int error; 738 u64 start = offset >> PAGE_CACHE_SHIFT; 739 unsigned int start_offset = offset & ~PAGE_CACHE_MASK; 740 u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT; 741 pgoff_t curr; 742 struct page *page; 743 unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK; 744 unsigned int from, to; 745 746 if (!end_offset) 747 end_offset = PAGE_CACHE_SIZE; 748 749 error = gfs2_meta_inode_buffer(ip, &dibh); 750 if (unlikely(error)) 751 goto out; 752 753 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 754 755 if (gfs2_is_stuffed(ip)) { 756 error = gfs2_unstuff_dinode(ip, NULL); 757 if (unlikely(error)) 758 goto out; 759 } 760 761 curr = start; 762 offset = start << PAGE_CACHE_SHIFT; 763 from = start_offset; 764 to = PAGE_CACHE_SIZE; 765 while (curr <= end) { 766 page = grab_cache_page_write_begin(inode->i_mapping, curr, 767 AOP_FLAG_NOFS); 768 if (unlikely(!page)) { 769 error = -ENOMEM; 770 goto out; 771 } 772 773 if (curr == end) 774 to = end_offset; 775 error = write_empty_blocks(page, from, to, mode); 776 if (!error && offset + to > inode->i_size && 777 !(mode & FALLOC_FL_KEEP_SIZE)) { 778 i_size_write(inode, offset + to); 779 } 780 unlock_page(page); 781 page_cache_release(page); 782 if (error) 783 goto out; 784 curr++; 785 offset += PAGE_CACHE_SIZE; 786 from = 0; 787 } 788 789 gfs2_dinode_out(ip, dibh->b_data); 790 mark_inode_dirty(inode); 791 792 brelse(dibh); 793 794 out: 795 return error; 796 } 797 798 static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len, 799 unsigned int *data_blocks, unsigned int *ind_blocks) 800 { 801 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 802 unsigned int max_blocks = ip->i_alloc->al_rgd->rd_free_clone; 803 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1); 804 805 for (tmp = max_data; tmp > sdp->sd_diptrs;) { 806 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs); 807 max_data -= tmp; 808 } 809 /* This calculation isn't the exact reverse of gfs2_write_calc_reserve, 810 so it might end up with fewer data blocks */ 811 if (max_data <= *data_blocks) 812 return; 813 *data_blocks = max_data; 814 *ind_blocks = max_blocks - max_data; 815 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift; 816 if (*len > max) { 817 *len = max; 818 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks); 819 } 820 } 821 822 static long gfs2_fallocate(struct file *file, int mode, loff_t offset, 823 loff_t len) 824 { 825 struct inode *inode = file->f_path.dentry->d_inode; 826 struct gfs2_sbd *sdp = GFS2_SB(inode); 827 struct gfs2_inode *ip = GFS2_I(inode); 828 unsigned int data_blocks = 0, ind_blocks = 0, rblocks; 829 loff_t bytes, max_bytes; 830 struct gfs2_alloc *al; 831 int error; 832 loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1); 833 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift; 834 next = (next + 1) << sdp->sd_sb.sb_bsize_shift; 835 836 /* We only support the FALLOC_FL_KEEP_SIZE mode */ 837 if (mode & ~FALLOC_FL_KEEP_SIZE) 838 return -EOPNOTSUPP; 839 840 offset &= bsize_mask; 841 842 len = next - offset; 843 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2; 844 if (!bytes) 845 bytes = UINT_MAX; 846 bytes &= bsize_mask; 847 if (bytes == 0) 848 bytes = sdp->sd_sb.sb_bsize; 849 850 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); 851 error = gfs2_glock_nq(&ip->i_gh); 852 if (unlikely(error)) 853 goto out_uninit; 854 855 if (!gfs2_write_alloc_required(ip, offset, len)) 856 goto out_unlock; 857 858 while (len > 0) { 859 if (len < bytes) 860 bytes = len; 861 al = gfs2_alloc_get(ip); 862 if (!al) { 863 error = -ENOMEM; 864 goto out_unlock; 865 } 866 867 error = gfs2_quota_lock_check(ip); 868 if (error) 869 goto out_alloc_put; 870 871 retry: 872 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks); 873 874 al->al_requested = data_blocks + ind_blocks; 875 error = gfs2_inplace_reserve(ip); 876 if (error) { 877 if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) { 878 bytes >>= 1; 879 bytes &= bsize_mask; 880 if (bytes == 0) 881 bytes = sdp->sd_sb.sb_bsize; 882 goto retry; 883 } 884 goto out_qunlock; 885 } 886 max_bytes = bytes; 887 calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks); 888 al->al_requested = data_blocks + ind_blocks; 889 890 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA + 891 RES_RG_HDR + gfs2_rg_blocks(al); 892 if (gfs2_is_jdata(ip)) 893 rblocks += data_blocks ? data_blocks : 1; 894 895 error = gfs2_trans_begin(sdp, rblocks, 896 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); 897 if (error) 898 goto out_trans_fail; 899 900 error = fallocate_chunk(inode, offset, max_bytes, mode); 901 gfs2_trans_end(sdp); 902 903 if (error) 904 goto out_trans_fail; 905 906 len -= max_bytes; 907 offset += max_bytes; 908 gfs2_inplace_release(ip); 909 gfs2_quota_unlock(ip); 910 gfs2_alloc_put(ip); 911 } 912 goto out_unlock; 913 914 out_trans_fail: 915 gfs2_inplace_release(ip); 916 out_qunlock: 917 gfs2_quota_unlock(ip); 918 out_alloc_put: 919 gfs2_alloc_put(ip); 920 out_unlock: 921 gfs2_glock_dq(&ip->i_gh); 922 out_uninit: 923 gfs2_holder_uninit(&ip->i_gh); 924 return error; 925 } 926 927 #ifdef CONFIG_GFS2_FS_LOCKING_DLM 928 929 /** 930 * gfs2_setlease - acquire/release a file lease 931 * @file: the file pointer 932 * @arg: lease type 933 * @fl: file lock 934 * 935 * We don't currently have a way to enforce a lease across the whole 936 * cluster; until we do, disable leases (by just returning -EINVAL), 937 * unless the administrator has requested purely local locking. 938 * 939 * Locking: called under lock_flocks 940 * 941 * Returns: errno 942 */ 943 944 static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl) 945 { 946 return -EINVAL; 947 } 948 949 /** 950 * gfs2_lock - acquire/release a posix lock on a file 951 * @file: the file pointer 952 * @cmd: either modify or retrieve lock state, possibly wait 953 * @fl: type and range of lock 954 * 955 * Returns: errno 956 */ 957 958 static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl) 959 { 960 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); 961 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host); 962 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 963 964 if (!(fl->fl_flags & FL_POSIX)) 965 return -ENOLCK; 966 if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK) 967 return -ENOLCK; 968 969 if (cmd == F_CANCELLK) { 970 /* Hack: */ 971 cmd = F_SETLK; 972 fl->fl_type = F_UNLCK; 973 } 974 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 975 return -EIO; 976 if (IS_GETLK(cmd)) 977 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl); 978 else if (fl->fl_type == F_UNLCK) 979 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl); 980 else 981 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl); 982 } 983 984 static int do_flock(struct file *file, int cmd, struct file_lock *fl) 985 { 986 struct gfs2_file *fp = file->private_data; 987 struct gfs2_holder *fl_gh = &fp->f_fl_gh; 988 struct gfs2_inode *ip = GFS2_I(file->f_path.dentry->d_inode); 989 struct gfs2_glock *gl; 990 unsigned int state; 991 int flags; 992 int error = 0; 993 994 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; 995 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE; 996 997 mutex_lock(&fp->f_fl_mutex); 998 999 gl = fl_gh->gh_gl; 1000 if (gl) { 1001 if (fl_gh->gh_state == state) 1002 goto out; 1003 flock_lock_file_wait(file, 1004 &(struct file_lock){.fl_type = F_UNLCK}); 1005 gfs2_glock_dq_wait(fl_gh); 1006 gfs2_holder_reinit(state, flags, fl_gh); 1007 } else { 1008 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr, 1009 &gfs2_flock_glops, CREATE, &gl); 1010 if (error) 1011 goto out; 1012 gfs2_holder_init(gl, state, flags, fl_gh); 1013 gfs2_glock_put(gl); 1014 } 1015 error = gfs2_glock_nq(fl_gh); 1016 if (error) { 1017 gfs2_holder_uninit(fl_gh); 1018 if (error == GLR_TRYFAILED) 1019 error = -EAGAIN; 1020 } else { 1021 error = flock_lock_file_wait(file, fl); 1022 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error); 1023 } 1024 1025 out: 1026 mutex_unlock(&fp->f_fl_mutex); 1027 return error; 1028 } 1029 1030 static void do_unflock(struct file *file, struct file_lock *fl) 1031 { 1032 struct gfs2_file *fp = file->private_data; 1033 struct gfs2_holder *fl_gh = &fp->f_fl_gh; 1034 1035 mutex_lock(&fp->f_fl_mutex); 1036 flock_lock_file_wait(file, fl); 1037 if (fl_gh->gh_gl) { 1038 gfs2_glock_dq_wait(fl_gh); 1039 gfs2_holder_uninit(fl_gh); 1040 } 1041 mutex_unlock(&fp->f_fl_mutex); 1042 } 1043 1044 /** 1045 * gfs2_flock - acquire/release a flock lock on a file 1046 * @file: the file pointer 1047 * @cmd: either modify or retrieve lock state, possibly wait 1048 * @fl: type and range of lock 1049 * 1050 * Returns: errno 1051 */ 1052 1053 static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl) 1054 { 1055 if (!(fl->fl_flags & FL_FLOCK)) 1056 return -ENOLCK; 1057 if (fl->fl_type & LOCK_MAND) 1058 return -EOPNOTSUPP; 1059 1060 if (fl->fl_type == F_UNLCK) { 1061 do_unflock(file, fl); 1062 return 0; 1063 } else { 1064 return do_flock(file, cmd, fl); 1065 } 1066 } 1067 1068 const struct file_operations gfs2_file_fops = { 1069 .llseek = gfs2_llseek, 1070 .read = do_sync_read, 1071 .aio_read = generic_file_aio_read, 1072 .write = do_sync_write, 1073 .aio_write = gfs2_file_aio_write, 1074 .unlocked_ioctl = gfs2_ioctl, 1075 .mmap = gfs2_mmap, 1076 .open = gfs2_open, 1077 .release = gfs2_close, 1078 .fsync = gfs2_fsync, 1079 .lock = gfs2_lock, 1080 .flock = gfs2_flock, 1081 .splice_read = generic_file_splice_read, 1082 .splice_write = generic_file_splice_write, 1083 .setlease = gfs2_setlease, 1084 .fallocate = gfs2_fallocate, 1085 }; 1086 1087 const struct file_operations gfs2_dir_fops = { 1088 .readdir = gfs2_readdir, 1089 .unlocked_ioctl = gfs2_ioctl, 1090 .open = gfs2_open, 1091 .release = gfs2_close, 1092 .fsync = gfs2_fsync, 1093 .lock = gfs2_lock, 1094 .flock = gfs2_flock, 1095 .llseek = default_llseek, 1096 }; 1097 1098 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */ 1099 1100 const struct file_operations gfs2_file_fops_nolock = { 1101 .llseek = gfs2_llseek, 1102 .read = do_sync_read, 1103 .aio_read = generic_file_aio_read, 1104 .write = do_sync_write, 1105 .aio_write = gfs2_file_aio_write, 1106 .unlocked_ioctl = gfs2_ioctl, 1107 .mmap = gfs2_mmap, 1108 .open = gfs2_open, 1109 .release = gfs2_close, 1110 .fsync = gfs2_fsync, 1111 .splice_read = generic_file_splice_read, 1112 .splice_write = generic_file_splice_write, 1113 .setlease = generic_setlease, 1114 .fallocate = gfs2_fallocate, 1115 }; 1116 1117 const struct file_operations gfs2_dir_fops_nolock = { 1118 .readdir = gfs2_readdir, 1119 .unlocked_ioctl = gfs2_ioctl, 1120 .open = gfs2_open, 1121 .release = gfs2_close, 1122 .fsync = gfs2_fsync, 1123 .llseek = default_llseek, 1124 }; 1125 1126