1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/spinlock.h> 11 #include <linux/completion.h> 12 #include <linux/buffer_head.h> 13 #include <linux/blkdev.h> 14 #include <linux/gfs2_ondisk.h> 15 #include <linux/crc32.h> 16 #include <linux/iomap.h> 17 18 #include "gfs2.h" 19 #include "incore.h" 20 #include "bmap.h" 21 #include "glock.h" 22 #include "inode.h" 23 #include "meta_io.h" 24 #include "quota.h" 25 #include "rgrp.h" 26 #include "log.h" 27 #include "super.h" 28 #include "trans.h" 29 #include "dir.h" 30 #include "util.h" 31 #include "trace_gfs2.h" 32 33 /* This doesn't need to be that large as max 64 bit pointers in a 4k 34 * block is 512, so __u16 is fine for that. It saves stack space to 35 * keep it small. 36 */ 37 struct metapath { 38 struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT]; 39 __u16 mp_list[GFS2_MAX_META_HEIGHT]; 40 int mp_fheight; /* find_metapath height */ 41 int mp_aheight; /* actual height (lookup height) */ 42 }; 43 44 /** 45 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page 46 * @ip: the inode 47 * @dibh: the dinode buffer 48 * @block: the block number that was allocated 49 * @page: The (optional) page. This is looked up if @page is NULL 50 * 51 * Returns: errno 52 */ 53 54 static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh, 55 u64 block, struct page *page) 56 { 57 struct inode *inode = &ip->i_inode; 58 struct buffer_head *bh; 59 int release = 0; 60 61 if (!page || page->index) { 62 page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS); 63 if (!page) 64 return -ENOMEM; 65 release = 1; 66 } 67 68 if (!PageUptodate(page)) { 69 void *kaddr = kmap(page); 70 u64 dsize = i_size_read(inode); 71 72 if (dsize > gfs2_max_stuffed_size(ip)) 73 dsize = gfs2_max_stuffed_size(ip); 74 75 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); 76 memset(kaddr + dsize, 0, PAGE_SIZE - dsize); 77 kunmap(page); 78 79 SetPageUptodate(page); 80 } 81 82 if (!page_has_buffers(page)) 83 create_empty_buffers(page, BIT(inode->i_blkbits), 84 BIT(BH_Uptodate)); 85 86 bh = page_buffers(page); 87 88 if (!buffer_mapped(bh)) 89 map_bh(bh, inode->i_sb, block); 90 91 set_buffer_uptodate(bh); 92 if (!gfs2_is_jdata(ip)) 93 mark_buffer_dirty(bh); 94 if (!gfs2_is_writeback(ip)) 95 gfs2_trans_add_data(ip->i_gl, bh); 96 97 if (release) { 98 unlock_page(page); 99 put_page(page); 100 } 101 102 return 0; 103 } 104 105 /** 106 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big 107 * @ip: The GFS2 inode to unstuff 108 * @page: The (optional) page. This is looked up if the @page is NULL 109 * 110 * This routine unstuffs a dinode and returns it to a "normal" state such 111 * that the height can be grown in the traditional way. 112 * 113 * Returns: errno 114 */ 115 116 int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page) 117 { 118 struct buffer_head *bh, *dibh; 119 struct gfs2_dinode *di; 120 u64 block = 0; 121 int isdir = gfs2_is_dir(ip); 122 int error; 123 124 down_write(&ip->i_rw_mutex); 125 126 error = gfs2_meta_inode_buffer(ip, &dibh); 127 if (error) 128 goto out; 129 130 if (i_size_read(&ip->i_inode)) { 131 /* Get a free block, fill it with the stuffed data, 132 and write it out to disk */ 133 134 unsigned int n = 1; 135 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL); 136 if (error) 137 goto out_brelse; 138 if (isdir) { 139 gfs2_trans_add_unrevoke(GFS2_SB(&ip->i_inode), block, 1); 140 error = gfs2_dir_get_new_buffer(ip, block, &bh); 141 if (error) 142 goto out_brelse; 143 gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header), 144 dibh, sizeof(struct gfs2_dinode)); 145 brelse(bh); 146 } else { 147 error = gfs2_unstuffer_page(ip, dibh, block, page); 148 if (error) 149 goto out_brelse; 150 } 151 } 152 153 /* Set up the pointer to the new block */ 154 155 gfs2_trans_add_meta(ip->i_gl, dibh); 156 di = (struct gfs2_dinode *)dibh->b_data; 157 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 158 159 if (i_size_read(&ip->i_inode)) { 160 *(__be64 *)(di + 1) = cpu_to_be64(block); 161 gfs2_add_inode_blocks(&ip->i_inode, 1); 162 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); 163 } 164 165 ip->i_height = 1; 166 di->di_height = cpu_to_be16(1); 167 168 out_brelse: 169 brelse(dibh); 170 out: 171 up_write(&ip->i_rw_mutex); 172 return error; 173 } 174 175 176 /** 177 * find_metapath - Find path through the metadata tree 178 * @sdp: The superblock 179 * @mp: The metapath to return the result in 180 * @block: The disk block to look up 181 * @height: The pre-calculated height of the metadata tree 182 * 183 * This routine returns a struct metapath structure that defines a path 184 * through the metadata of inode "ip" to get to block "block". 185 * 186 * Example: 187 * Given: "ip" is a height 3 file, "offset" is 101342453, and this is a 188 * filesystem with a blocksize of 4096. 189 * 190 * find_metapath() would return a struct metapath structure set to: 191 * mp_offset = 101342453, mp_height = 3, mp_list[0] = 0, mp_list[1] = 48, 192 * and mp_list[2] = 165. 193 * 194 * That means that in order to get to the block containing the byte at 195 * offset 101342453, we would load the indirect block pointed to by pointer 196 * 0 in the dinode. We would then load the indirect block pointed to by 197 * pointer 48 in that indirect block. We would then load the data block 198 * pointed to by pointer 165 in that indirect block. 199 * 200 * ---------------------------------------- 201 * | Dinode | | 202 * | | 4| 203 * | |0 1 2 3 4 5 9| 204 * | | 6| 205 * ---------------------------------------- 206 * | 207 * | 208 * V 209 * ---------------------------------------- 210 * | Indirect Block | 211 * | 5| 212 * | 4 4 4 4 4 5 5 1| 213 * |0 5 6 7 8 9 0 1 2| 214 * ---------------------------------------- 215 * | 216 * | 217 * V 218 * ---------------------------------------- 219 * | Indirect Block | 220 * | 1 1 1 1 1 5| 221 * | 6 6 6 6 6 1| 222 * |0 3 4 5 6 7 2| 223 * ---------------------------------------- 224 * | 225 * | 226 * V 227 * ---------------------------------------- 228 * | Data block containing offset | 229 * | 101342453 | 230 * | | 231 * | | 232 * ---------------------------------------- 233 * 234 */ 235 236 static void find_metapath(const struct gfs2_sbd *sdp, u64 block, 237 struct metapath *mp, unsigned int height) 238 { 239 unsigned int i; 240 241 mp->mp_fheight = height; 242 for (i = height; i--;) 243 mp->mp_list[i] = do_div(block, sdp->sd_inptrs); 244 } 245 246 static inline unsigned int metapath_branch_start(const struct metapath *mp) 247 { 248 if (mp->mp_list[0] == 0) 249 return 2; 250 return 1; 251 } 252 253 /** 254 * metaptr1 - Return the first possible metadata pointer in a metapath buffer 255 * @height: The metadata height (0 = dinode) 256 * @mp: The metapath 257 */ 258 static inline __be64 *metaptr1(unsigned int height, const struct metapath *mp) 259 { 260 struct buffer_head *bh = mp->mp_bh[height]; 261 if (height == 0) 262 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_dinode))); 263 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header))); 264 } 265 266 /** 267 * metapointer - Return pointer to start of metadata in a buffer 268 * @height: The metadata height (0 = dinode) 269 * @mp: The metapath 270 * 271 * Return a pointer to the block number of the next height of the metadata 272 * tree given a buffer containing the pointer to the current height of the 273 * metadata tree. 274 */ 275 276 static inline __be64 *metapointer(unsigned int height, const struct metapath *mp) 277 { 278 __be64 *p = metaptr1(height, mp); 279 return p + mp->mp_list[height]; 280 } 281 282 static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end) 283 { 284 const __be64 *t; 285 286 for (t = start; t < end; t++) { 287 struct buffer_head *rabh; 288 289 if (!*t) 290 continue; 291 292 rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE); 293 if (trylock_buffer(rabh)) { 294 if (!buffer_uptodate(rabh)) { 295 rabh->b_end_io = end_buffer_read_sync; 296 submit_bh(REQ_OP_READ, 297 REQ_RAHEAD | REQ_META | REQ_PRIO, 298 rabh); 299 continue; 300 } 301 unlock_buffer(rabh); 302 } 303 brelse(rabh); 304 } 305 } 306 307 static int __fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, 308 unsigned int x, unsigned int h) 309 { 310 for (; x < h; x++) { 311 __be64 *ptr = metapointer(x, mp); 312 u64 dblock = be64_to_cpu(*ptr); 313 int ret; 314 315 if (!dblock) 316 break; 317 ret = gfs2_meta_indirect_buffer(ip, x + 1, dblock, &mp->mp_bh[x + 1]); 318 if (ret) 319 return ret; 320 } 321 mp->mp_aheight = x + 1; 322 return 0; 323 } 324 325 /** 326 * lookup_metapath - Walk the metadata tree to a specific point 327 * @ip: The inode 328 * @mp: The metapath 329 * 330 * Assumes that the inode's buffer has already been looked up and 331 * hooked onto mp->mp_bh[0] and that the metapath has been initialised 332 * by find_metapath(). 333 * 334 * If this function encounters part of the tree which has not been 335 * allocated, it returns the current height of the tree at the point 336 * at which it found the unallocated block. Blocks which are found are 337 * added to the mp->mp_bh[] list. 338 * 339 * Returns: error 340 */ 341 342 static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp) 343 { 344 return __fillup_metapath(ip, mp, 0, ip->i_height - 1); 345 } 346 347 /** 348 * fillup_metapath - fill up buffers for the metadata path to a specific height 349 * @ip: The inode 350 * @mp: The metapath 351 * @h: The height to which it should be mapped 352 * 353 * Similar to lookup_metapath, but does lookups for a range of heights 354 * 355 * Returns: error or the number of buffers filled 356 */ 357 358 static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h) 359 { 360 unsigned int x = 0; 361 int ret; 362 363 if (h) { 364 /* find the first buffer we need to look up. */ 365 for (x = h - 1; x > 0; x--) { 366 if (mp->mp_bh[x]) 367 break; 368 } 369 } 370 ret = __fillup_metapath(ip, mp, x, h); 371 if (ret) 372 return ret; 373 return mp->mp_aheight - x - 1; 374 } 375 376 static inline void release_metapath(struct metapath *mp) 377 { 378 int i; 379 380 for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) { 381 if (mp->mp_bh[i] == NULL) 382 break; 383 brelse(mp->mp_bh[i]); 384 } 385 } 386 387 /** 388 * gfs2_extent_length - Returns length of an extent of blocks 389 * @start: Start of the buffer 390 * @len: Length of the buffer in bytes 391 * @ptr: Current position in the buffer 392 * @limit: Max extent length to return (0 = unlimited) 393 * @eob: Set to 1 if we hit "end of block" 394 * 395 * If the first block is zero (unallocated) it will return the number of 396 * unallocated blocks in the extent, otherwise it will return the number 397 * of contiguous blocks in the extent. 398 * 399 * Returns: The length of the extent (minimum of one block) 400 */ 401 402 static inline unsigned int gfs2_extent_length(void *start, unsigned int len, __be64 *ptr, size_t limit, int *eob) 403 { 404 const __be64 *end = (start + len); 405 const __be64 *first = ptr; 406 u64 d = be64_to_cpu(*ptr); 407 408 *eob = 0; 409 do { 410 ptr++; 411 if (ptr >= end) 412 break; 413 if (limit && --limit == 0) 414 break; 415 if (d) 416 d++; 417 } while(be64_to_cpu(*ptr) == d); 418 if (ptr >= end) 419 *eob = 1; 420 return (ptr - first); 421 } 422 423 static inline void bmap_lock(struct gfs2_inode *ip, int create) 424 { 425 if (create) 426 down_write(&ip->i_rw_mutex); 427 else 428 down_read(&ip->i_rw_mutex); 429 } 430 431 static inline void bmap_unlock(struct gfs2_inode *ip, int create) 432 { 433 if (create) 434 up_write(&ip->i_rw_mutex); 435 else 436 up_read(&ip->i_rw_mutex); 437 } 438 439 static inline __be64 *gfs2_indirect_init(struct metapath *mp, 440 struct gfs2_glock *gl, unsigned int i, 441 unsigned offset, u64 bn) 442 { 443 __be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data + 444 ((i > 1) ? sizeof(struct gfs2_meta_header) : 445 sizeof(struct gfs2_dinode))); 446 BUG_ON(i < 1); 447 BUG_ON(mp->mp_bh[i] != NULL); 448 mp->mp_bh[i] = gfs2_meta_new(gl, bn); 449 gfs2_trans_add_meta(gl, mp->mp_bh[i]); 450 gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN); 451 gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header)); 452 ptr += offset; 453 *ptr = cpu_to_be64(bn); 454 return ptr; 455 } 456 457 enum alloc_state { 458 ALLOC_DATA = 0, 459 ALLOC_GROW_DEPTH = 1, 460 ALLOC_GROW_HEIGHT = 2, 461 /* ALLOC_UNSTUFF = 3, TBD and rather complicated */ 462 }; 463 464 /** 465 * gfs2_bmap_alloc - Build a metadata tree of the requested height 466 * @inode: The GFS2 inode 467 * @lblock: The logical starting block of the extent 468 * @bh_map: This is used to return the mapping details 469 * @zero_new: True if newly allocated blocks should be zeroed 470 * @mp: The metapath, with proper height information calculated 471 * @maxlen: The max number of data blocks to alloc 472 * @dblock: Pointer to return the resulting new block 473 * @dblks: Pointer to return the number of blocks allocated 474 * 475 * In this routine we may have to alloc: 476 * i) Indirect blocks to grow the metadata tree height 477 * ii) Indirect blocks to fill in lower part of the metadata tree 478 * iii) Data blocks 479 * 480 * The function is in two parts. The first part works out the total 481 * number of blocks which we need. The second part does the actual 482 * allocation asking for an extent at a time (if enough contiguous free 483 * blocks are available, there will only be one request per bmap call) 484 * and uses the state machine to initialise the blocks in order. 485 * 486 * Returns: errno on error 487 */ 488 489 static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap, 490 unsigned flags, struct metapath *mp) 491 { 492 struct gfs2_inode *ip = GFS2_I(inode); 493 struct gfs2_sbd *sdp = GFS2_SB(inode); 494 struct buffer_head *dibh = mp->mp_bh[0]; 495 u64 bn; 496 unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0; 497 unsigned dblks = 0; 498 unsigned ptrs_per_blk; 499 const unsigned end_of_metadata = mp->mp_fheight - 1; 500 enum alloc_state state; 501 __be64 *ptr; 502 __be64 zero_bn = 0; 503 size_t maxlen = iomap->length >> inode->i_blkbits; 504 505 BUG_ON(mp->mp_aheight < 1); 506 BUG_ON(dibh == NULL); 507 508 gfs2_trans_add_meta(ip->i_gl, dibh); 509 510 if (mp->mp_fheight == mp->mp_aheight) { 511 struct buffer_head *bh; 512 int eob; 513 514 /* Bottom indirect block exists, find unalloced extent size */ 515 ptr = metapointer(end_of_metadata, mp); 516 bh = mp->mp_bh[end_of_metadata]; 517 dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr, 518 maxlen, &eob); 519 BUG_ON(dblks < 1); 520 state = ALLOC_DATA; 521 } else { 522 /* Need to allocate indirect blocks */ 523 ptrs_per_blk = mp->mp_fheight > 1 ? sdp->sd_inptrs : 524 sdp->sd_diptrs; 525 dblks = min(maxlen, (size_t)(ptrs_per_blk - 526 mp->mp_list[end_of_metadata])); 527 if (mp->mp_fheight == ip->i_height) { 528 /* Writing into existing tree, extend tree down */ 529 iblks = mp->mp_fheight - mp->mp_aheight; 530 state = ALLOC_GROW_DEPTH; 531 } else { 532 /* Building up tree height */ 533 state = ALLOC_GROW_HEIGHT; 534 iblks = mp->mp_fheight - ip->i_height; 535 branch_start = metapath_branch_start(mp); 536 iblks += (mp->mp_fheight - branch_start); 537 } 538 } 539 540 /* start of the second part of the function (state machine) */ 541 542 blks = dblks + iblks; 543 i = mp->mp_aheight; 544 do { 545 int error; 546 n = blks - alloced; 547 error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL); 548 if (error) 549 return error; 550 alloced += n; 551 if (state != ALLOC_DATA || gfs2_is_jdata(ip)) 552 gfs2_trans_add_unrevoke(sdp, bn, n); 553 switch (state) { 554 /* Growing height of tree */ 555 case ALLOC_GROW_HEIGHT: 556 if (i == 1) { 557 ptr = (__be64 *)(dibh->b_data + 558 sizeof(struct gfs2_dinode)); 559 zero_bn = *ptr; 560 } 561 for (; i - 1 < mp->mp_fheight - ip->i_height && n > 0; 562 i++, n--) 563 gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++); 564 if (i - 1 == mp->mp_fheight - ip->i_height) { 565 i--; 566 gfs2_buffer_copy_tail(mp->mp_bh[i], 567 sizeof(struct gfs2_meta_header), 568 dibh, sizeof(struct gfs2_dinode)); 569 gfs2_buffer_clear_tail(dibh, 570 sizeof(struct gfs2_dinode) + 571 sizeof(__be64)); 572 ptr = (__be64 *)(mp->mp_bh[i]->b_data + 573 sizeof(struct gfs2_meta_header)); 574 *ptr = zero_bn; 575 state = ALLOC_GROW_DEPTH; 576 for(i = branch_start; i < mp->mp_fheight; i++) { 577 if (mp->mp_bh[i] == NULL) 578 break; 579 brelse(mp->mp_bh[i]); 580 mp->mp_bh[i] = NULL; 581 } 582 i = branch_start; 583 } 584 if (n == 0) 585 break; 586 /* Branching from existing tree */ 587 case ALLOC_GROW_DEPTH: 588 if (i > 1 && i < mp->mp_fheight) 589 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]); 590 for (; i < mp->mp_fheight && n > 0; i++, n--) 591 gfs2_indirect_init(mp, ip->i_gl, i, 592 mp->mp_list[i-1], bn++); 593 if (i == mp->mp_fheight) 594 state = ALLOC_DATA; 595 if (n == 0) 596 break; 597 /* Tree complete, adding data blocks */ 598 case ALLOC_DATA: 599 BUG_ON(n > dblks); 600 BUG_ON(mp->mp_bh[end_of_metadata] == NULL); 601 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]); 602 dblks = n; 603 ptr = metapointer(end_of_metadata, mp); 604 iomap->addr = bn << inode->i_blkbits; 605 iomap->flags |= IOMAP_F_NEW; 606 while (n-- > 0) 607 *ptr++ = cpu_to_be64(bn++); 608 break; 609 } 610 } while (iomap->addr == IOMAP_NULL_ADDR); 611 612 iomap->length = (u64)dblks << inode->i_blkbits; 613 ip->i_height = mp->mp_fheight; 614 gfs2_add_inode_blocks(&ip->i_inode, alloced); 615 gfs2_dinode_out(ip, mp->mp_bh[0]->b_data); 616 return 0; 617 } 618 619 /** 620 * hole_size - figure out the size of a hole 621 * @inode: The inode 622 * @lblock: The logical starting block number 623 * @mp: The metapath 624 * 625 * Returns: The hole size in bytes 626 * 627 */ 628 static u64 hole_size(struct inode *inode, sector_t lblock, struct metapath *mp) 629 { 630 struct gfs2_inode *ip = GFS2_I(inode); 631 struct gfs2_sbd *sdp = GFS2_SB(inode); 632 struct metapath mp_eof; 633 u64 factor = 1; 634 int hgt; 635 u64 holesz = 0; 636 const __be64 *first, *end, *ptr; 637 const struct buffer_head *bh; 638 u64 lblock_stop = (i_size_read(inode) - 1) >> inode->i_blkbits; 639 int zeroptrs; 640 bool done = false; 641 642 /* Get another metapath, to the very last byte */ 643 find_metapath(sdp, lblock_stop, &mp_eof, ip->i_height); 644 for (hgt = ip->i_height - 1; hgt >= 0 && !done; hgt--) { 645 bh = mp->mp_bh[hgt]; 646 if (bh) { 647 zeroptrs = 0; 648 first = metapointer(hgt, mp); 649 end = (const __be64 *)(bh->b_data + bh->b_size); 650 651 for (ptr = first; ptr < end; ptr++) { 652 if (*ptr) { 653 done = true; 654 break; 655 } else { 656 zeroptrs++; 657 } 658 } 659 } else { 660 zeroptrs = sdp->sd_inptrs; 661 } 662 if (factor * zeroptrs >= lblock_stop - lblock + 1) { 663 holesz = lblock_stop - lblock + 1; 664 break; 665 } 666 holesz += factor * zeroptrs; 667 668 factor *= sdp->sd_inptrs; 669 if (hgt && (mp->mp_list[hgt - 1] < mp_eof.mp_list[hgt - 1])) 670 (mp->mp_list[hgt - 1])++; 671 } 672 return holesz << inode->i_blkbits; 673 } 674 675 static void gfs2_stuffed_iomap(struct inode *inode, struct iomap *iomap) 676 { 677 struct gfs2_inode *ip = GFS2_I(inode); 678 679 iomap->addr = (ip->i_no_addr << inode->i_blkbits) + 680 sizeof(struct gfs2_dinode); 681 iomap->offset = 0; 682 iomap->length = i_size_read(inode); 683 iomap->type = IOMAP_MAPPED; 684 iomap->flags = IOMAP_F_DATA_INLINE; 685 } 686 687 /** 688 * gfs2_iomap_begin - Map blocks from an inode to disk blocks 689 * @inode: The inode 690 * @pos: Starting position in bytes 691 * @length: Length to map, in bytes 692 * @flags: iomap flags 693 * @iomap: The iomap structure 694 * 695 * Returns: errno 696 */ 697 int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length, 698 unsigned flags, struct iomap *iomap) 699 { 700 struct gfs2_inode *ip = GFS2_I(inode); 701 struct gfs2_sbd *sdp = GFS2_SB(inode); 702 struct metapath mp = { .mp_aheight = 1, }; 703 unsigned int factor = sdp->sd_sb.sb_bsize; 704 const u64 *arr = sdp->sd_heightsize; 705 __be64 *ptr; 706 sector_t lblock; 707 sector_t lend; 708 int ret = 0; 709 int eob; 710 unsigned int len; 711 struct buffer_head *bh; 712 u8 height; 713 714 trace_gfs2_iomap_start(ip, pos, length, flags); 715 if (!length) { 716 ret = -EINVAL; 717 goto out; 718 } 719 720 if (gfs2_is_stuffed(ip)) { 721 if (flags & IOMAP_REPORT) { 722 gfs2_stuffed_iomap(inode, iomap); 723 if (pos >= iomap->length) 724 ret = -ENOENT; 725 goto out; 726 } 727 BUG_ON(!(flags & IOMAP_WRITE)); 728 } 729 730 lblock = pos >> inode->i_blkbits; 731 lend = (pos + length + sdp->sd_sb.sb_bsize - 1) >> inode->i_blkbits; 732 733 iomap->offset = lblock << inode->i_blkbits; 734 iomap->addr = IOMAP_NULL_ADDR; 735 iomap->type = IOMAP_HOLE; 736 iomap->length = (u64)(lend - lblock) << inode->i_blkbits; 737 iomap->flags = IOMAP_F_MERGED; 738 bmap_lock(ip, flags & IOMAP_WRITE); 739 740 /* 741 * Directory data blocks have a struct gfs2_meta_header header, so the 742 * remaining size is smaller than the filesystem block size. Logical 743 * block numbers for directories are in units of this remaining size! 744 */ 745 if (gfs2_is_dir(ip)) { 746 factor = sdp->sd_jbsize; 747 arr = sdp->sd_jheightsize; 748 } 749 750 ret = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]); 751 if (ret) 752 goto out_release; 753 754 height = ip->i_height; 755 while ((lblock + 1) * factor > arr[height]) 756 height++; 757 find_metapath(sdp, lblock, &mp, height); 758 if (height > ip->i_height || gfs2_is_stuffed(ip)) 759 goto do_alloc; 760 761 ret = lookup_metapath(ip, &mp); 762 if (ret) 763 goto out_release; 764 765 if (mp.mp_aheight != ip->i_height) 766 goto do_alloc; 767 768 ptr = metapointer(ip->i_height - 1, &mp); 769 if (*ptr == 0) 770 goto do_alloc; 771 772 iomap->type = IOMAP_MAPPED; 773 iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits; 774 775 bh = mp.mp_bh[ip->i_height - 1]; 776 len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, lend - lblock, &eob); 777 if (eob) 778 iomap->flags |= IOMAP_F_BOUNDARY; 779 iomap->length = (u64)len << inode->i_blkbits; 780 781 out_release: 782 release_metapath(&mp); 783 bmap_unlock(ip, flags & IOMAP_WRITE); 784 out: 785 trace_gfs2_iomap_end(ip, iomap, ret); 786 return ret; 787 788 do_alloc: 789 if (flags & IOMAP_WRITE) { 790 ret = gfs2_iomap_alloc(inode, iomap, flags, &mp); 791 } else if (flags & IOMAP_REPORT) { 792 loff_t size = i_size_read(inode); 793 if (pos >= size) 794 ret = -ENOENT; 795 else if (height <= ip->i_height) 796 iomap->length = hole_size(inode, lblock, &mp); 797 else 798 iomap->length = size - pos; 799 } 800 goto out_release; 801 } 802 803 /** 804 * gfs2_block_map - Map one or more blocks of an inode to a disk block 805 * @inode: The inode 806 * @lblock: The logical block number 807 * @bh_map: The bh to be mapped 808 * @create: True if its ok to alloc blocks to satify the request 809 * 810 * The size of the requested mapping is defined in bh_map->b_size. 811 * 812 * Clears buffer_mapped(bh_map) and leaves bh_map->b_size unchanged 813 * when @lblock is not mapped. Sets buffer_mapped(bh_map) and 814 * bh_map->b_size to indicate the size of the mapping when @lblock and 815 * successive blocks are mapped, up to the requested size. 816 * 817 * Sets buffer_boundary() if a read of metadata will be required 818 * before the next block can be mapped. Sets buffer_new() if new 819 * blocks were allocated. 820 * 821 * Returns: errno 822 */ 823 824 int gfs2_block_map(struct inode *inode, sector_t lblock, 825 struct buffer_head *bh_map, int create) 826 { 827 struct gfs2_inode *ip = GFS2_I(inode); 828 struct iomap iomap; 829 int ret, flags = 0; 830 831 clear_buffer_mapped(bh_map); 832 clear_buffer_new(bh_map); 833 clear_buffer_boundary(bh_map); 834 trace_gfs2_bmap(ip, bh_map, lblock, create, 1); 835 836 if (create) 837 flags |= IOMAP_WRITE; 838 ret = gfs2_iomap_begin(inode, (loff_t)lblock << inode->i_blkbits, 839 bh_map->b_size, flags, &iomap); 840 if (ret) { 841 if (!create && ret == -ENOENT) { 842 /* Return unmapped buffer beyond the end of file. */ 843 ret = 0; 844 } 845 goto out; 846 } 847 848 if (iomap.length > bh_map->b_size) { 849 iomap.length = bh_map->b_size; 850 iomap.flags &= ~IOMAP_F_BOUNDARY; 851 } 852 if (iomap.addr != IOMAP_NULL_ADDR) 853 map_bh(bh_map, inode->i_sb, iomap.addr >> inode->i_blkbits); 854 bh_map->b_size = iomap.length; 855 if (iomap.flags & IOMAP_F_BOUNDARY) 856 set_buffer_boundary(bh_map); 857 if (iomap.flags & IOMAP_F_NEW) 858 set_buffer_new(bh_map); 859 860 out: 861 trace_gfs2_bmap(ip, bh_map, lblock, create, ret); 862 return ret; 863 } 864 865 /* 866 * Deprecated: do not use in new code 867 */ 868 int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen) 869 { 870 struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 }; 871 int ret; 872 int create = *new; 873 874 BUG_ON(!extlen); 875 BUG_ON(!dblock); 876 BUG_ON(!new); 877 878 bh.b_size = BIT(inode->i_blkbits + (create ? 0 : 5)); 879 ret = gfs2_block_map(inode, lblock, &bh, create); 880 *extlen = bh.b_size >> inode->i_blkbits; 881 *dblock = bh.b_blocknr; 882 if (buffer_new(&bh)) 883 *new = 1; 884 else 885 *new = 0; 886 return ret; 887 } 888 889 /** 890 * gfs2_block_zero_range - Deal with zeroing out data 891 * 892 * This is partly borrowed from ext3. 893 */ 894 static int gfs2_block_zero_range(struct inode *inode, loff_t from, 895 unsigned int length) 896 { 897 struct address_space *mapping = inode->i_mapping; 898 struct gfs2_inode *ip = GFS2_I(inode); 899 unsigned long index = from >> PAGE_SHIFT; 900 unsigned offset = from & (PAGE_SIZE-1); 901 unsigned blocksize, iblock, pos; 902 struct buffer_head *bh; 903 struct page *page; 904 int err; 905 906 page = find_or_create_page(mapping, index, GFP_NOFS); 907 if (!page) 908 return 0; 909 910 blocksize = inode->i_sb->s_blocksize; 911 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits); 912 913 if (!page_has_buffers(page)) 914 create_empty_buffers(page, blocksize, 0); 915 916 /* Find the buffer that contains "offset" */ 917 bh = page_buffers(page); 918 pos = blocksize; 919 while (offset >= pos) { 920 bh = bh->b_this_page; 921 iblock++; 922 pos += blocksize; 923 } 924 925 err = 0; 926 927 if (!buffer_mapped(bh)) { 928 gfs2_block_map(inode, iblock, bh, 0); 929 /* unmapped? It's a hole - nothing to do */ 930 if (!buffer_mapped(bh)) 931 goto unlock; 932 } 933 934 /* Ok, it's mapped. Make sure it's up-to-date */ 935 if (PageUptodate(page)) 936 set_buffer_uptodate(bh); 937 938 if (!buffer_uptodate(bh)) { 939 err = -EIO; 940 ll_rw_block(REQ_OP_READ, 0, 1, &bh); 941 wait_on_buffer(bh); 942 /* Uhhuh. Read error. Complain and punt. */ 943 if (!buffer_uptodate(bh)) 944 goto unlock; 945 err = 0; 946 } 947 948 if (!gfs2_is_writeback(ip)) 949 gfs2_trans_add_data(ip->i_gl, bh); 950 951 zero_user(page, offset, length); 952 mark_buffer_dirty(bh); 953 unlock: 954 unlock_page(page); 955 put_page(page); 956 return err; 957 } 958 959 #define GFS2_JTRUNC_REVOKES 8192 960 961 /** 962 * gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files 963 * @inode: The inode being truncated 964 * @oldsize: The original (larger) size 965 * @newsize: The new smaller size 966 * 967 * With jdata files, we have to journal a revoke for each block which is 968 * truncated. As a result, we need to split this into separate transactions 969 * if the number of pages being truncated gets too large. 970 */ 971 972 static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize) 973 { 974 struct gfs2_sbd *sdp = GFS2_SB(inode); 975 u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize; 976 u64 chunk; 977 int error; 978 979 while (oldsize != newsize) { 980 struct gfs2_trans *tr; 981 unsigned int offs; 982 983 chunk = oldsize - newsize; 984 if (chunk > max_chunk) 985 chunk = max_chunk; 986 987 offs = oldsize & ~PAGE_MASK; 988 if (offs && chunk > PAGE_SIZE) 989 chunk = offs + ((chunk - offs) & PAGE_MASK); 990 991 truncate_pagecache(inode, oldsize - chunk); 992 oldsize -= chunk; 993 994 tr = current->journal_info; 995 if (!test_bit(TR_TOUCHED, &tr->tr_flags)) 996 continue; 997 998 gfs2_trans_end(sdp); 999 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES); 1000 if (error) 1001 return error; 1002 } 1003 1004 return 0; 1005 } 1006 1007 static int trunc_start(struct inode *inode, u64 newsize) 1008 { 1009 struct gfs2_inode *ip = GFS2_I(inode); 1010 struct gfs2_sbd *sdp = GFS2_SB(inode); 1011 struct buffer_head *dibh = NULL; 1012 int journaled = gfs2_is_jdata(ip); 1013 u64 oldsize = inode->i_size; 1014 int error; 1015 1016 if (journaled) 1017 error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES); 1018 else 1019 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 1020 if (error) 1021 return error; 1022 1023 error = gfs2_meta_inode_buffer(ip, &dibh); 1024 if (error) 1025 goto out; 1026 1027 gfs2_trans_add_meta(ip->i_gl, dibh); 1028 1029 if (gfs2_is_stuffed(ip)) { 1030 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize); 1031 } else { 1032 unsigned int blocksize = i_blocksize(inode); 1033 unsigned int offs = newsize & (blocksize - 1); 1034 if (offs) { 1035 error = gfs2_block_zero_range(inode, newsize, 1036 blocksize - offs); 1037 if (error) 1038 goto out; 1039 } 1040 ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG; 1041 } 1042 1043 i_size_write(inode, newsize); 1044 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode); 1045 gfs2_dinode_out(ip, dibh->b_data); 1046 1047 if (journaled) 1048 error = gfs2_journaled_truncate(inode, oldsize, newsize); 1049 else 1050 truncate_pagecache(inode, newsize); 1051 1052 out: 1053 brelse(dibh); 1054 if (current->journal_info) 1055 gfs2_trans_end(sdp); 1056 return error; 1057 } 1058 1059 /** 1060 * sweep_bh_for_rgrps - find an rgrp in a meta buffer and free blocks therein 1061 * @ip: inode 1062 * @rg_gh: holder of resource group glock 1063 * @bh: buffer head to sweep 1064 * @start: starting point in bh 1065 * @end: end point in bh 1066 * @meta: true if bh points to metadata (rather than data) 1067 * @btotal: place to keep count of total blocks freed 1068 * 1069 * We sweep a metadata buffer (provided by the metapath) for blocks we need to 1070 * free, and free them all. However, we do it one rgrp at a time. If this 1071 * block has references to multiple rgrps, we break it into individual 1072 * transactions. This allows other processes to use the rgrps while we're 1073 * focused on a single one, for better concurrency / performance. 1074 * At every transaction boundary, we rewrite the inode into the journal. 1075 * That way the bitmaps are kept consistent with the inode and we can recover 1076 * if we're interrupted by power-outages. 1077 * 1078 * Returns: 0, or return code if an error occurred. 1079 * *btotal has the total number of blocks freed 1080 */ 1081 static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh, 1082 struct buffer_head *bh, __be64 *start, __be64 *end, 1083 bool meta, u32 *btotal) 1084 { 1085 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1086 struct gfs2_rgrpd *rgd; 1087 struct gfs2_trans *tr; 1088 __be64 *p; 1089 int blks_outside_rgrp; 1090 u64 bn, bstart, isize_blks; 1091 s64 blen; /* needs to be s64 or gfs2_add_inode_blocks breaks */ 1092 int ret = 0; 1093 bool buf_in_tr = false; /* buffer was added to transaction */ 1094 1095 more_rgrps: 1096 rgd = NULL; 1097 if (gfs2_holder_initialized(rd_gh)) { 1098 rgd = gfs2_glock2rgrp(rd_gh->gh_gl); 1099 gfs2_assert_withdraw(sdp, 1100 gfs2_glock_is_locked_by_me(rd_gh->gh_gl)); 1101 } 1102 blks_outside_rgrp = 0; 1103 bstart = 0; 1104 blen = 0; 1105 1106 for (p = start; p < end; p++) { 1107 if (!*p) 1108 continue; 1109 bn = be64_to_cpu(*p); 1110 1111 if (rgd) { 1112 if (!rgrp_contains_block(rgd, bn)) { 1113 blks_outside_rgrp++; 1114 continue; 1115 } 1116 } else { 1117 rgd = gfs2_blk2rgrpd(sdp, bn, true); 1118 if (unlikely(!rgd)) { 1119 ret = -EIO; 1120 goto out; 1121 } 1122 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 1123 0, rd_gh); 1124 if (ret) 1125 goto out; 1126 1127 /* Must be done with the rgrp glock held: */ 1128 if (gfs2_rs_active(&ip->i_res) && 1129 rgd == ip->i_res.rs_rbm.rgd) 1130 gfs2_rs_deltree(&ip->i_res); 1131 } 1132 1133 /* The size of our transactions will be unknown until we 1134 actually process all the metadata blocks that relate to 1135 the rgrp. So we estimate. We know it can't be more than 1136 the dinode's i_blocks and we don't want to exceed the 1137 journal flush threshold, sd_log_thresh2. */ 1138 if (current->journal_info == NULL) { 1139 unsigned int jblocks_rqsted, revokes; 1140 1141 jblocks_rqsted = rgd->rd_length + RES_DINODE + 1142 RES_INDIRECT; 1143 isize_blks = gfs2_get_inode_blocks(&ip->i_inode); 1144 if (isize_blks > atomic_read(&sdp->sd_log_thresh2)) 1145 jblocks_rqsted += 1146 atomic_read(&sdp->sd_log_thresh2); 1147 else 1148 jblocks_rqsted += isize_blks; 1149 revokes = jblocks_rqsted; 1150 if (meta) 1151 revokes += end - start; 1152 else if (ip->i_depth) 1153 revokes += sdp->sd_inptrs; 1154 ret = gfs2_trans_begin(sdp, jblocks_rqsted, revokes); 1155 if (ret) 1156 goto out_unlock; 1157 down_write(&ip->i_rw_mutex); 1158 } 1159 /* check if we will exceed the transaction blocks requested */ 1160 tr = current->journal_info; 1161 if (tr->tr_num_buf_new + RES_STATFS + 1162 RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) { 1163 /* We set blks_outside_rgrp to ensure the loop will 1164 be repeated for the same rgrp, but with a new 1165 transaction. */ 1166 blks_outside_rgrp++; 1167 /* This next part is tricky. If the buffer was added 1168 to the transaction, we've already set some block 1169 pointers to 0, so we better follow through and free 1170 them, or we will introduce corruption (so break). 1171 This may be impossible, or at least rare, but I 1172 decided to cover the case regardless. 1173 1174 If the buffer was not added to the transaction 1175 (this call), doing so would exceed our transaction 1176 size, so we need to end the transaction and start a 1177 new one (so goto). */ 1178 1179 if (buf_in_tr) 1180 break; 1181 goto out_unlock; 1182 } 1183 1184 gfs2_trans_add_meta(ip->i_gl, bh); 1185 buf_in_tr = true; 1186 *p = 0; 1187 if (bstart + blen == bn) { 1188 blen++; 1189 continue; 1190 } 1191 if (bstart) { 1192 __gfs2_free_blocks(ip, bstart, (u32)blen, meta); 1193 (*btotal) += blen; 1194 gfs2_add_inode_blocks(&ip->i_inode, -blen); 1195 } 1196 bstart = bn; 1197 blen = 1; 1198 } 1199 if (bstart) { 1200 __gfs2_free_blocks(ip, bstart, (u32)blen, meta); 1201 (*btotal) += blen; 1202 gfs2_add_inode_blocks(&ip->i_inode, -blen); 1203 } 1204 out_unlock: 1205 if (!ret && blks_outside_rgrp) { /* If buffer still has non-zero blocks 1206 outside the rgrp we just processed, 1207 do it all over again. */ 1208 if (current->journal_info) { 1209 struct buffer_head *dibh; 1210 1211 ret = gfs2_meta_inode_buffer(ip, &dibh); 1212 if (ret) 1213 goto out; 1214 1215 /* Every transaction boundary, we rewrite the dinode 1216 to keep its di_blocks current in case of failure. */ 1217 ip->i_inode.i_mtime = ip->i_inode.i_ctime = 1218 current_time(&ip->i_inode); 1219 gfs2_trans_add_meta(ip->i_gl, dibh); 1220 gfs2_dinode_out(ip, dibh->b_data); 1221 brelse(dibh); 1222 up_write(&ip->i_rw_mutex); 1223 gfs2_trans_end(sdp); 1224 } 1225 gfs2_glock_dq_uninit(rd_gh); 1226 cond_resched(); 1227 goto more_rgrps; 1228 } 1229 out: 1230 return ret; 1231 } 1232 1233 static bool mp_eq_to_hgt(struct metapath *mp, __u16 *list, unsigned int h) 1234 { 1235 if (memcmp(mp->mp_list, list, h * sizeof(mp->mp_list[0]))) 1236 return false; 1237 return true; 1238 } 1239 1240 /** 1241 * find_nonnull_ptr - find a non-null pointer given a metapath and height 1242 * @mp: starting metapath 1243 * @h: desired height to search 1244 * 1245 * Assumes the metapath is valid (with buffers) out to height h. 1246 * Returns: true if a non-null pointer was found in the metapath buffer 1247 * false if all remaining pointers are NULL in the buffer 1248 */ 1249 static bool find_nonnull_ptr(struct gfs2_sbd *sdp, struct metapath *mp, 1250 unsigned int h, 1251 __u16 *end_list, unsigned int end_aligned) 1252 { 1253 struct buffer_head *bh = mp->mp_bh[h]; 1254 __be64 *first, *ptr, *end; 1255 1256 first = metaptr1(h, mp); 1257 ptr = first + mp->mp_list[h]; 1258 end = (__be64 *)(bh->b_data + bh->b_size); 1259 if (end_list && mp_eq_to_hgt(mp, end_list, h)) { 1260 bool keep_end = h < end_aligned; 1261 end = first + end_list[h] + keep_end; 1262 } 1263 1264 while (ptr < end) { 1265 if (*ptr) { /* if we have a non-null pointer */ 1266 mp->mp_list[h] = ptr - first; 1267 h++; 1268 if (h < GFS2_MAX_META_HEIGHT) 1269 mp->mp_list[h] = 0; 1270 return true; 1271 } 1272 ptr++; 1273 } 1274 return false; 1275 } 1276 1277 enum dealloc_states { 1278 DEALLOC_MP_FULL = 0, /* Strip a metapath with all buffers read in */ 1279 DEALLOC_MP_LOWER = 1, /* lower the metapath strip height */ 1280 DEALLOC_FILL_MP = 2, /* Fill in the metapath to the given height. */ 1281 DEALLOC_DONE = 3, /* process complete */ 1282 }; 1283 1284 static inline void 1285 metapointer_range(struct metapath *mp, int height, 1286 __u16 *start_list, unsigned int start_aligned, 1287 __u16 *end_list, unsigned int end_aligned, 1288 __be64 **start, __be64 **end) 1289 { 1290 struct buffer_head *bh = mp->mp_bh[height]; 1291 __be64 *first; 1292 1293 first = metaptr1(height, mp); 1294 *start = first; 1295 if (mp_eq_to_hgt(mp, start_list, height)) { 1296 bool keep_start = height < start_aligned; 1297 *start = first + start_list[height] + keep_start; 1298 } 1299 *end = (__be64 *)(bh->b_data + bh->b_size); 1300 if (end_list && mp_eq_to_hgt(mp, end_list, height)) { 1301 bool keep_end = height < end_aligned; 1302 *end = first + end_list[height] + keep_end; 1303 } 1304 } 1305 1306 static inline bool walk_done(struct gfs2_sbd *sdp, 1307 struct metapath *mp, int height, 1308 __u16 *end_list, unsigned int end_aligned) 1309 { 1310 __u16 end; 1311 1312 if (end_list) { 1313 bool keep_end = height < end_aligned; 1314 if (!mp_eq_to_hgt(mp, end_list, height)) 1315 return false; 1316 end = end_list[height] + keep_end; 1317 } else 1318 end = (height > 0) ? sdp->sd_inptrs : sdp->sd_diptrs; 1319 return mp->mp_list[height] >= end; 1320 } 1321 1322 /** 1323 * punch_hole - deallocate blocks in a file 1324 * @ip: inode to truncate 1325 * @offset: the start of the hole 1326 * @length: the size of the hole (or 0 for truncate) 1327 * 1328 * Punch a hole into a file or truncate a file at a given position. This 1329 * function operates in whole blocks (@offset and @length are rounded 1330 * accordingly); partially filled blocks must be cleared otherwise. 1331 * 1332 * This function works from the bottom up, and from the right to the left. In 1333 * other words, it strips off the highest layer (data) before stripping any of 1334 * the metadata. Doing it this way is best in case the operation is interrupted 1335 * by power failure, etc. The dinode is rewritten in every transaction to 1336 * guarantee integrity. 1337 */ 1338 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length) 1339 { 1340 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1341 u64 maxsize = sdp->sd_heightsize[ip->i_height]; 1342 struct metapath mp = {}; 1343 struct buffer_head *dibh, *bh; 1344 struct gfs2_holder rd_gh; 1345 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift; 1346 u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift; 1347 __u16 start_list[GFS2_MAX_META_HEIGHT]; 1348 __u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL; 1349 unsigned int start_aligned, uninitialized_var(end_aligned); 1350 unsigned int strip_h = ip->i_height - 1; 1351 u32 btotal = 0; 1352 int ret, state; 1353 int mp_h; /* metapath buffers are read in to this height */ 1354 u64 prev_bnr = 0; 1355 __be64 *start, *end; 1356 1357 if (offset >= maxsize) { 1358 /* 1359 * The starting point lies beyond the allocated meta-data; 1360 * there are no blocks do deallocate. 1361 */ 1362 return 0; 1363 } 1364 1365 /* 1366 * The start position of the hole is defined by lblock, start_list, and 1367 * start_aligned. The end position of the hole is defined by lend, 1368 * end_list, and end_aligned. 1369 * 1370 * start_aligned and end_aligned define down to which height the start 1371 * and end positions are aligned to the metadata tree (i.e., the 1372 * position is a multiple of the metadata granularity at the height 1373 * above). This determines at which heights additional meta pointers 1374 * needs to be preserved for the remaining data. 1375 */ 1376 1377 if (length) { 1378 u64 end_offset = offset + length; 1379 u64 lend; 1380 1381 /* 1382 * Clip the end at the maximum file size for the given height: 1383 * that's how far the metadata goes; files bigger than that 1384 * will have additional layers of indirection. 1385 */ 1386 if (end_offset > maxsize) 1387 end_offset = maxsize; 1388 lend = end_offset >> bsize_shift; 1389 1390 if (lblock >= lend) 1391 return 0; 1392 1393 find_metapath(sdp, lend, &mp, ip->i_height); 1394 end_list = __end_list; 1395 memcpy(end_list, mp.mp_list, sizeof(mp.mp_list)); 1396 1397 for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) { 1398 if (end_list[mp_h]) 1399 break; 1400 } 1401 end_aligned = mp_h; 1402 } 1403 1404 find_metapath(sdp, lblock, &mp, ip->i_height); 1405 memcpy(start_list, mp.mp_list, sizeof(start_list)); 1406 1407 for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) { 1408 if (start_list[mp_h]) 1409 break; 1410 } 1411 start_aligned = mp_h; 1412 1413 ret = gfs2_meta_inode_buffer(ip, &dibh); 1414 if (ret) 1415 return ret; 1416 1417 mp.mp_bh[0] = dibh; 1418 ret = lookup_metapath(ip, &mp); 1419 if (ret) 1420 goto out_metapath; 1421 1422 /* issue read-ahead on metadata */ 1423 for (mp_h = 0; mp_h < mp.mp_aheight - 1; mp_h++) { 1424 metapointer_range(&mp, mp_h, start_list, start_aligned, 1425 end_list, end_aligned, &start, &end); 1426 gfs2_metapath_ra(ip->i_gl, start, end); 1427 } 1428 1429 if (mp.mp_aheight == ip->i_height) 1430 state = DEALLOC_MP_FULL; /* We have a complete metapath */ 1431 else 1432 state = DEALLOC_FILL_MP; /* deal with partial metapath */ 1433 1434 ret = gfs2_rindex_update(sdp); 1435 if (ret) 1436 goto out_metapath; 1437 1438 ret = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); 1439 if (ret) 1440 goto out_metapath; 1441 gfs2_holder_mark_uninitialized(&rd_gh); 1442 1443 mp_h = strip_h; 1444 1445 while (state != DEALLOC_DONE) { 1446 switch (state) { 1447 /* Truncate a full metapath at the given strip height. 1448 * Note that strip_h == mp_h in order to be in this state. */ 1449 case DEALLOC_MP_FULL: 1450 bh = mp.mp_bh[mp_h]; 1451 gfs2_assert_withdraw(sdp, bh); 1452 if (gfs2_assert_withdraw(sdp, 1453 prev_bnr != bh->b_blocknr)) { 1454 printk(KERN_EMERG "GFS2: fsid=%s:inode %llu, " 1455 "block:%llu, i_h:%u, s_h:%u, mp_h:%u\n", 1456 sdp->sd_fsname, 1457 (unsigned long long)ip->i_no_addr, 1458 prev_bnr, ip->i_height, strip_h, mp_h); 1459 } 1460 prev_bnr = bh->b_blocknr; 1461 1462 if (gfs2_metatype_check(sdp, bh, 1463 (mp_h ? GFS2_METATYPE_IN : 1464 GFS2_METATYPE_DI))) { 1465 ret = -EIO; 1466 goto out; 1467 } 1468 1469 /* 1470 * Below, passing end_aligned as 0 gives us the 1471 * metapointer range excluding the end point: the end 1472 * point is the first metapath we must not deallocate! 1473 */ 1474 1475 metapointer_range(&mp, mp_h, start_list, start_aligned, 1476 end_list, 0 /* end_aligned */, 1477 &start, &end); 1478 ret = sweep_bh_for_rgrps(ip, &rd_gh, mp.mp_bh[mp_h], 1479 start, end, 1480 mp_h != ip->i_height - 1, 1481 &btotal); 1482 1483 /* If we hit an error or just swept dinode buffer, 1484 just exit. */ 1485 if (ret || !mp_h) { 1486 state = DEALLOC_DONE; 1487 break; 1488 } 1489 state = DEALLOC_MP_LOWER; 1490 break; 1491 1492 /* lower the metapath strip height */ 1493 case DEALLOC_MP_LOWER: 1494 /* We're done with the current buffer, so release it, 1495 unless it's the dinode buffer. Then back up to the 1496 previous pointer. */ 1497 if (mp_h) { 1498 brelse(mp.mp_bh[mp_h]); 1499 mp.mp_bh[mp_h] = NULL; 1500 } 1501 /* If we can't get any lower in height, we've stripped 1502 off all we can. Next step is to back up and start 1503 stripping the previous level of metadata. */ 1504 if (mp_h == 0) { 1505 strip_h--; 1506 memcpy(mp.mp_list, start_list, sizeof(start_list)); 1507 mp_h = strip_h; 1508 state = DEALLOC_FILL_MP; 1509 break; 1510 } 1511 mp.mp_list[mp_h] = 0; 1512 mp_h--; /* search one metadata height down */ 1513 mp.mp_list[mp_h]++; 1514 if (walk_done(sdp, &mp, mp_h, end_list, end_aligned)) 1515 break; 1516 /* Here we've found a part of the metapath that is not 1517 * allocated. We need to search at that height for the 1518 * next non-null pointer. */ 1519 if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) { 1520 state = DEALLOC_FILL_MP; 1521 mp_h++; 1522 } 1523 /* No more non-null pointers at this height. Back up 1524 to the previous height and try again. */ 1525 break; /* loop around in the same state */ 1526 1527 /* Fill the metapath with buffers to the given height. */ 1528 case DEALLOC_FILL_MP: 1529 /* Fill the buffers out to the current height. */ 1530 ret = fillup_metapath(ip, &mp, mp_h); 1531 if (ret < 0) 1532 goto out; 1533 1534 /* issue read-ahead on metadata */ 1535 if (mp.mp_aheight > 1) { 1536 for (; ret > 1; ret--) { 1537 metapointer_range(&mp, mp.mp_aheight - ret, 1538 start_list, start_aligned, 1539 end_list, end_aligned, 1540 &start, &end); 1541 gfs2_metapath_ra(ip->i_gl, start, end); 1542 } 1543 } 1544 1545 /* If buffers found for the entire strip height */ 1546 if (mp.mp_aheight - 1 == strip_h) { 1547 state = DEALLOC_MP_FULL; 1548 break; 1549 } 1550 if (mp.mp_aheight < ip->i_height) /* We have a partial height */ 1551 mp_h = mp.mp_aheight - 1; 1552 1553 /* If we find a non-null block pointer, crawl a bit 1554 higher up in the metapath and try again, otherwise 1555 we need to look lower for a new starting point. */ 1556 if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) 1557 mp_h++; 1558 else 1559 state = DEALLOC_MP_LOWER; 1560 break; 1561 } 1562 } 1563 1564 if (btotal) { 1565 if (current->journal_info == NULL) { 1566 ret = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + 1567 RES_QUOTA, 0); 1568 if (ret) 1569 goto out; 1570 down_write(&ip->i_rw_mutex); 1571 } 1572 gfs2_statfs_change(sdp, 0, +btotal, 0); 1573 gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid, 1574 ip->i_inode.i_gid); 1575 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode); 1576 gfs2_trans_add_meta(ip->i_gl, dibh); 1577 gfs2_dinode_out(ip, dibh->b_data); 1578 up_write(&ip->i_rw_mutex); 1579 gfs2_trans_end(sdp); 1580 } 1581 1582 out: 1583 if (gfs2_holder_initialized(&rd_gh)) 1584 gfs2_glock_dq_uninit(&rd_gh); 1585 if (current->journal_info) { 1586 up_write(&ip->i_rw_mutex); 1587 gfs2_trans_end(sdp); 1588 cond_resched(); 1589 } 1590 gfs2_quota_unhold(ip); 1591 out_metapath: 1592 release_metapath(&mp); 1593 return ret; 1594 } 1595 1596 static int trunc_end(struct gfs2_inode *ip) 1597 { 1598 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1599 struct buffer_head *dibh; 1600 int error; 1601 1602 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 1603 if (error) 1604 return error; 1605 1606 down_write(&ip->i_rw_mutex); 1607 1608 error = gfs2_meta_inode_buffer(ip, &dibh); 1609 if (error) 1610 goto out; 1611 1612 if (!i_size_read(&ip->i_inode)) { 1613 ip->i_height = 0; 1614 ip->i_goal = ip->i_no_addr; 1615 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 1616 gfs2_ordered_del_inode(ip); 1617 } 1618 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode); 1619 ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG; 1620 1621 gfs2_trans_add_meta(ip->i_gl, dibh); 1622 gfs2_dinode_out(ip, dibh->b_data); 1623 brelse(dibh); 1624 1625 out: 1626 up_write(&ip->i_rw_mutex); 1627 gfs2_trans_end(sdp); 1628 return error; 1629 } 1630 1631 /** 1632 * do_shrink - make a file smaller 1633 * @inode: the inode 1634 * @newsize: the size to make the file 1635 * 1636 * Called with an exclusive lock on @inode. The @size must 1637 * be equal to or smaller than the current inode size. 1638 * 1639 * Returns: errno 1640 */ 1641 1642 static int do_shrink(struct inode *inode, u64 newsize) 1643 { 1644 struct gfs2_inode *ip = GFS2_I(inode); 1645 int error; 1646 1647 error = trunc_start(inode, newsize); 1648 if (error < 0) 1649 return error; 1650 if (gfs2_is_stuffed(ip)) 1651 return 0; 1652 1653 error = punch_hole(ip, newsize, 0); 1654 if (error == 0) 1655 error = trunc_end(ip); 1656 1657 return error; 1658 } 1659 1660 void gfs2_trim_blocks(struct inode *inode) 1661 { 1662 int ret; 1663 1664 ret = do_shrink(inode, inode->i_size); 1665 WARN_ON(ret != 0); 1666 } 1667 1668 /** 1669 * do_grow - Touch and update inode size 1670 * @inode: The inode 1671 * @size: The new size 1672 * 1673 * This function updates the timestamps on the inode and 1674 * may also increase the size of the inode. This function 1675 * must not be called with @size any smaller than the current 1676 * inode size. 1677 * 1678 * Although it is not strictly required to unstuff files here, 1679 * earlier versions of GFS2 have a bug in the stuffed file reading 1680 * code which will result in a buffer overrun if the size is larger 1681 * than the max stuffed file size. In order to prevent this from 1682 * occurring, such files are unstuffed, but in other cases we can 1683 * just update the inode size directly. 1684 * 1685 * Returns: 0 on success, or -ve on error 1686 */ 1687 1688 static int do_grow(struct inode *inode, u64 size) 1689 { 1690 struct gfs2_inode *ip = GFS2_I(inode); 1691 struct gfs2_sbd *sdp = GFS2_SB(inode); 1692 struct gfs2_alloc_parms ap = { .target = 1, }; 1693 struct buffer_head *dibh; 1694 int error; 1695 int unstuff = 0; 1696 1697 if (gfs2_is_stuffed(ip) && size > gfs2_max_stuffed_size(ip)) { 1698 error = gfs2_quota_lock_check(ip, &ap); 1699 if (error) 1700 return error; 1701 1702 error = gfs2_inplace_reserve(ip, &ap); 1703 if (error) 1704 goto do_grow_qunlock; 1705 unstuff = 1; 1706 } 1707 1708 error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT + 1709 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ? 1710 0 : RES_QUOTA), 0); 1711 if (error) 1712 goto do_grow_release; 1713 1714 if (unstuff) { 1715 error = gfs2_unstuff_dinode(ip, NULL); 1716 if (error) 1717 goto do_end_trans; 1718 } 1719 1720 error = gfs2_meta_inode_buffer(ip, &dibh); 1721 if (error) 1722 goto do_end_trans; 1723 1724 i_size_write(inode, size); 1725 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode); 1726 gfs2_trans_add_meta(ip->i_gl, dibh); 1727 gfs2_dinode_out(ip, dibh->b_data); 1728 brelse(dibh); 1729 1730 do_end_trans: 1731 gfs2_trans_end(sdp); 1732 do_grow_release: 1733 if (unstuff) { 1734 gfs2_inplace_release(ip); 1735 do_grow_qunlock: 1736 gfs2_quota_unlock(ip); 1737 } 1738 return error; 1739 } 1740 1741 /** 1742 * gfs2_setattr_size - make a file a given size 1743 * @inode: the inode 1744 * @newsize: the size to make the file 1745 * 1746 * The file size can grow, shrink, or stay the same size. This 1747 * is called holding i_rwsem and an exclusive glock on the inode 1748 * in question. 1749 * 1750 * Returns: errno 1751 */ 1752 1753 int gfs2_setattr_size(struct inode *inode, u64 newsize) 1754 { 1755 struct gfs2_inode *ip = GFS2_I(inode); 1756 int ret; 1757 1758 BUG_ON(!S_ISREG(inode->i_mode)); 1759 1760 ret = inode_newsize_ok(inode, newsize); 1761 if (ret) 1762 return ret; 1763 1764 inode_dio_wait(inode); 1765 1766 ret = gfs2_rsqa_alloc(ip); 1767 if (ret) 1768 goto out; 1769 1770 if (newsize >= inode->i_size) { 1771 ret = do_grow(inode, newsize); 1772 goto out; 1773 } 1774 1775 ret = do_shrink(inode, newsize); 1776 out: 1777 gfs2_rsqa_delete(ip, NULL); 1778 return ret; 1779 } 1780 1781 int gfs2_truncatei_resume(struct gfs2_inode *ip) 1782 { 1783 int error; 1784 error = punch_hole(ip, i_size_read(&ip->i_inode), 0); 1785 if (!error) 1786 error = trunc_end(ip); 1787 return error; 1788 } 1789 1790 int gfs2_file_dealloc(struct gfs2_inode *ip) 1791 { 1792 return punch_hole(ip, 0, 0); 1793 } 1794 1795 /** 1796 * gfs2_free_journal_extents - Free cached journal bmap info 1797 * @jd: The journal 1798 * 1799 */ 1800 1801 void gfs2_free_journal_extents(struct gfs2_jdesc *jd) 1802 { 1803 struct gfs2_journal_extent *jext; 1804 1805 while(!list_empty(&jd->extent_list)) { 1806 jext = list_entry(jd->extent_list.next, struct gfs2_journal_extent, list); 1807 list_del(&jext->list); 1808 kfree(jext); 1809 } 1810 } 1811 1812 /** 1813 * gfs2_add_jextent - Add or merge a new extent to extent cache 1814 * @jd: The journal descriptor 1815 * @lblock: The logical block at start of new extent 1816 * @dblock: The physical block at start of new extent 1817 * @blocks: Size of extent in fs blocks 1818 * 1819 * Returns: 0 on success or -ENOMEM 1820 */ 1821 1822 static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks) 1823 { 1824 struct gfs2_journal_extent *jext; 1825 1826 if (!list_empty(&jd->extent_list)) { 1827 jext = list_entry(jd->extent_list.prev, struct gfs2_journal_extent, list); 1828 if ((jext->dblock + jext->blocks) == dblock) { 1829 jext->blocks += blocks; 1830 return 0; 1831 } 1832 } 1833 1834 jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS); 1835 if (jext == NULL) 1836 return -ENOMEM; 1837 jext->dblock = dblock; 1838 jext->lblock = lblock; 1839 jext->blocks = blocks; 1840 list_add_tail(&jext->list, &jd->extent_list); 1841 jd->nr_extents++; 1842 return 0; 1843 } 1844 1845 /** 1846 * gfs2_map_journal_extents - Cache journal bmap info 1847 * @sdp: The super block 1848 * @jd: The journal to map 1849 * 1850 * Create a reusable "extent" mapping from all logical 1851 * blocks to all physical blocks for the given journal. This will save 1852 * us time when writing journal blocks. Most journals will have only one 1853 * extent that maps all their logical blocks. That's because gfs2.mkfs 1854 * arranges the journal blocks sequentially to maximize performance. 1855 * So the extent would map the first block for the entire file length. 1856 * However, gfs2_jadd can happen while file activity is happening, so 1857 * those journals may not be sequential. Less likely is the case where 1858 * the users created their own journals by mounting the metafs and 1859 * laying it out. But it's still possible. These journals might have 1860 * several extents. 1861 * 1862 * Returns: 0 on success, or error on failure 1863 */ 1864 1865 int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd) 1866 { 1867 u64 lblock = 0; 1868 u64 lblock_stop; 1869 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 1870 struct buffer_head bh; 1871 unsigned int shift = sdp->sd_sb.sb_bsize_shift; 1872 u64 size; 1873 int rc; 1874 1875 lblock_stop = i_size_read(jd->jd_inode) >> shift; 1876 size = (lblock_stop - lblock) << shift; 1877 jd->nr_extents = 0; 1878 WARN_ON(!list_empty(&jd->extent_list)); 1879 1880 do { 1881 bh.b_state = 0; 1882 bh.b_blocknr = 0; 1883 bh.b_size = size; 1884 rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0); 1885 if (rc || !buffer_mapped(&bh)) 1886 goto fail; 1887 rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift); 1888 if (rc) 1889 goto fail; 1890 size -= bh.b_size; 1891 lblock += (bh.b_size >> ip->i_inode.i_blkbits); 1892 } while(size > 0); 1893 1894 fs_info(sdp, "journal %d mapped with %u extents\n", jd->jd_jid, 1895 jd->nr_extents); 1896 return 0; 1897 1898 fail: 1899 fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n", 1900 rc, jd->jd_jid, 1901 (unsigned long long)(i_size_read(jd->jd_inode) - size), 1902 jd->nr_extents); 1903 fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n", 1904 rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr, 1905 bh.b_state, (unsigned long long)bh.b_size); 1906 gfs2_free_journal_extents(jd); 1907 return rc; 1908 } 1909 1910 /** 1911 * gfs2_write_alloc_required - figure out if a write will require an allocation 1912 * @ip: the file being written to 1913 * @offset: the offset to write to 1914 * @len: the number of bytes being written 1915 * 1916 * Returns: 1 if an alloc is required, 0 otherwise 1917 */ 1918 1919 int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, 1920 unsigned int len) 1921 { 1922 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1923 struct buffer_head bh; 1924 unsigned int shift; 1925 u64 lblock, lblock_stop, size; 1926 u64 end_of_file; 1927 1928 if (!len) 1929 return 0; 1930 1931 if (gfs2_is_stuffed(ip)) { 1932 if (offset + len > gfs2_max_stuffed_size(ip)) 1933 return 1; 1934 return 0; 1935 } 1936 1937 shift = sdp->sd_sb.sb_bsize_shift; 1938 BUG_ON(gfs2_is_dir(ip)); 1939 end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift; 1940 lblock = offset >> shift; 1941 lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift; 1942 if (lblock_stop > end_of_file) 1943 return 1; 1944 1945 size = (lblock_stop - lblock) << shift; 1946 do { 1947 bh.b_state = 0; 1948 bh.b_size = size; 1949 gfs2_block_map(&ip->i_inode, lblock, &bh, 0); 1950 if (!buffer_mapped(&bh)) 1951 return 1; 1952 size -= bh.b_size; 1953 lblock += (bh.b_size >> ip->i_inode.i_blkbits); 1954 } while(size > 0); 1955 1956 return 0; 1957 } 1958 1959 static int stuffed_zero_range(struct inode *inode, loff_t offset, loff_t length) 1960 { 1961 struct gfs2_inode *ip = GFS2_I(inode); 1962 struct buffer_head *dibh; 1963 int error; 1964 1965 if (offset >= inode->i_size) 1966 return 0; 1967 if (offset + length > inode->i_size) 1968 length = inode->i_size - offset; 1969 1970 error = gfs2_meta_inode_buffer(ip, &dibh); 1971 if (error) 1972 return error; 1973 gfs2_trans_add_meta(ip->i_gl, dibh); 1974 memset(dibh->b_data + sizeof(struct gfs2_dinode) + offset, 0, 1975 length); 1976 brelse(dibh); 1977 return 0; 1978 } 1979 1980 static int gfs2_journaled_truncate_range(struct inode *inode, loff_t offset, 1981 loff_t length) 1982 { 1983 struct gfs2_sbd *sdp = GFS2_SB(inode); 1984 loff_t max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize; 1985 int error; 1986 1987 while (length) { 1988 struct gfs2_trans *tr; 1989 loff_t chunk; 1990 unsigned int offs; 1991 1992 chunk = length; 1993 if (chunk > max_chunk) 1994 chunk = max_chunk; 1995 1996 offs = offset & ~PAGE_MASK; 1997 if (offs && chunk > PAGE_SIZE) 1998 chunk = offs + ((chunk - offs) & PAGE_MASK); 1999 2000 truncate_pagecache_range(inode, offset, chunk); 2001 offset += chunk; 2002 length -= chunk; 2003 2004 tr = current->journal_info; 2005 if (!test_bit(TR_TOUCHED, &tr->tr_flags)) 2006 continue; 2007 2008 gfs2_trans_end(sdp); 2009 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES); 2010 if (error) 2011 return error; 2012 } 2013 return 0; 2014 } 2015 2016 int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length) 2017 { 2018 struct inode *inode = file_inode(file); 2019 struct gfs2_inode *ip = GFS2_I(inode); 2020 struct gfs2_sbd *sdp = GFS2_SB(inode); 2021 int error; 2022 2023 if (gfs2_is_jdata(ip)) 2024 error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA, 2025 GFS2_JTRUNC_REVOKES); 2026 else 2027 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 2028 if (error) 2029 return error; 2030 2031 if (gfs2_is_stuffed(ip)) { 2032 error = stuffed_zero_range(inode, offset, length); 2033 if (error) 2034 goto out; 2035 } else { 2036 unsigned int start_off, end_off, blocksize; 2037 2038 blocksize = i_blocksize(inode); 2039 start_off = offset & (blocksize - 1); 2040 end_off = (offset + length) & (blocksize - 1); 2041 if (start_off) { 2042 unsigned int len = length; 2043 if (length > blocksize - start_off) 2044 len = blocksize - start_off; 2045 error = gfs2_block_zero_range(inode, offset, len); 2046 if (error) 2047 goto out; 2048 if (start_off + length < blocksize) 2049 end_off = 0; 2050 } 2051 if (end_off) { 2052 error = gfs2_block_zero_range(inode, 2053 offset + length - end_off, end_off); 2054 if (error) 2055 goto out; 2056 } 2057 } 2058 2059 if (gfs2_is_jdata(ip)) { 2060 BUG_ON(!current->journal_info); 2061 gfs2_journaled_truncate_range(inode, offset, length); 2062 } else 2063 truncate_pagecache_range(inode, offset, offset + length - 1); 2064 2065 file_update_time(file); 2066 mark_inode_dirty(inode); 2067 2068 if (current->journal_info) 2069 gfs2_trans_end(sdp); 2070 2071 if (!gfs2_is_stuffed(ip)) 2072 error = punch_hole(ip, offset, length); 2073 2074 out: 2075 if (current->journal_info) 2076 gfs2_trans_end(sdp); 2077 return error; 2078 } 2079