1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/slab.h> 11 #include <linux/spinlock.h> 12 #include <linux/completion.h> 13 #include <linux/buffer_head.h> 14 #include <linux/gfs2_ondisk.h> 15 #include <linux/crc32.h> 16 #include <linux/lm_interface.h> 17 18 #include "gfs2.h" 19 #include "incore.h" 20 #include "bmap.h" 21 #include "glock.h" 22 #include "inode.h" 23 #include "meta_io.h" 24 #include "quota.h" 25 #include "rgrp.h" 26 #include "trans.h" 27 #include "dir.h" 28 #include "util.h" 29 #include "ops_address.h" 30 31 /* This doesn't need to be that large as max 64 bit pointers in a 4k 32 * block is 512, so __u16 is fine for that. It saves stack space to 33 * keep it small. 34 */ 35 struct metapath { 36 struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT]; 37 __u16 mp_list[GFS2_MAX_META_HEIGHT]; 38 }; 39 40 typedef int (*block_call_t) (struct gfs2_inode *ip, struct buffer_head *dibh, 41 struct buffer_head *bh, __be64 *top, 42 __be64 *bottom, unsigned int height, 43 void *data); 44 45 struct strip_mine { 46 int sm_first; 47 unsigned int sm_height; 48 }; 49 50 /** 51 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page 52 * @ip: the inode 53 * @dibh: the dinode buffer 54 * @block: the block number that was allocated 55 * @private: any locked page held by the caller process 56 * 57 * Returns: errno 58 */ 59 60 static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh, 61 u64 block, struct page *page) 62 { 63 struct inode *inode = &ip->i_inode; 64 struct buffer_head *bh; 65 int release = 0; 66 67 if (!page || page->index) { 68 page = grab_cache_page(inode->i_mapping, 0); 69 if (!page) 70 return -ENOMEM; 71 release = 1; 72 } 73 74 if (!PageUptodate(page)) { 75 void *kaddr = kmap(page); 76 77 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), 78 ip->i_di.di_size); 79 memset(kaddr + ip->i_di.di_size, 0, 80 PAGE_CACHE_SIZE - ip->i_di.di_size); 81 kunmap(page); 82 83 SetPageUptodate(page); 84 } 85 86 if (!page_has_buffers(page)) 87 create_empty_buffers(page, 1 << inode->i_blkbits, 88 (1 << BH_Uptodate)); 89 90 bh = page_buffers(page); 91 92 if (!buffer_mapped(bh)) 93 map_bh(bh, inode->i_sb, block); 94 95 set_buffer_uptodate(bh); 96 if (!gfs2_is_jdata(ip)) 97 mark_buffer_dirty(bh); 98 if (!gfs2_is_writeback(ip)) 99 gfs2_trans_add_bh(ip->i_gl, bh, 0); 100 101 if (release) { 102 unlock_page(page); 103 page_cache_release(page); 104 } 105 106 return 0; 107 } 108 109 /** 110 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big 111 * @ip: The GFS2 inode to unstuff 112 * @unstuffer: the routine that handles unstuffing a non-zero length file 113 * @private: private data for the unstuffer 114 * 115 * This routine unstuffs a dinode and returns it to a "normal" state such 116 * that the height can be grown in the traditional way. 117 * 118 * Returns: errno 119 */ 120 121 int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page) 122 { 123 struct buffer_head *bh, *dibh; 124 struct gfs2_dinode *di; 125 u64 block = 0; 126 int isdir = gfs2_is_dir(ip); 127 int error; 128 129 down_write(&ip->i_rw_mutex); 130 131 error = gfs2_meta_inode_buffer(ip, &dibh); 132 if (error) 133 goto out; 134 135 if (ip->i_di.di_size) { 136 /* Get a free block, fill it with the stuffed data, 137 and write it out to disk */ 138 139 unsigned int n = 1; 140 block = gfs2_alloc_block(ip, &n); 141 if (isdir) { 142 gfs2_trans_add_unrevoke(GFS2_SB(&ip->i_inode), block, 1); 143 error = gfs2_dir_get_new_buffer(ip, block, &bh); 144 if (error) 145 goto out_brelse; 146 gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header), 147 dibh, sizeof(struct gfs2_dinode)); 148 brelse(bh); 149 } else { 150 error = gfs2_unstuffer_page(ip, dibh, block, page); 151 if (error) 152 goto out_brelse; 153 } 154 } 155 156 /* Set up the pointer to the new block */ 157 158 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 159 di = (struct gfs2_dinode *)dibh->b_data; 160 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 161 162 if (ip->i_di.di_size) { 163 *(__be64 *)(di + 1) = cpu_to_be64(block); 164 gfs2_add_inode_blocks(&ip->i_inode, 1); 165 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); 166 } 167 168 ip->i_height = 1; 169 di->di_height = cpu_to_be16(1); 170 171 out_brelse: 172 brelse(dibh); 173 out: 174 up_write(&ip->i_rw_mutex); 175 return error; 176 } 177 178 179 /** 180 * find_metapath - Find path through the metadata tree 181 * @sdp: The superblock 182 * @mp: The metapath to return the result in 183 * @block: The disk block to look up 184 * @height: The pre-calculated height of the metadata tree 185 * 186 * This routine returns a struct metapath structure that defines a path 187 * through the metadata of inode "ip" to get to block "block". 188 * 189 * Example: 190 * Given: "ip" is a height 3 file, "offset" is 101342453, and this is a 191 * filesystem with a blocksize of 4096. 192 * 193 * find_metapath() would return a struct metapath structure set to: 194 * mp_offset = 101342453, mp_height = 3, mp_list[0] = 0, mp_list[1] = 48, 195 * and mp_list[2] = 165. 196 * 197 * That means that in order to get to the block containing the byte at 198 * offset 101342453, we would load the indirect block pointed to by pointer 199 * 0 in the dinode. We would then load the indirect block pointed to by 200 * pointer 48 in that indirect block. We would then load the data block 201 * pointed to by pointer 165 in that indirect block. 202 * 203 * ---------------------------------------- 204 * | Dinode | | 205 * | | 4| 206 * | |0 1 2 3 4 5 9| 207 * | | 6| 208 * ---------------------------------------- 209 * | 210 * | 211 * V 212 * ---------------------------------------- 213 * | Indirect Block | 214 * | 5| 215 * | 4 4 4 4 4 5 5 1| 216 * |0 5 6 7 8 9 0 1 2| 217 * ---------------------------------------- 218 * | 219 * | 220 * V 221 * ---------------------------------------- 222 * | Indirect Block | 223 * | 1 1 1 1 1 5| 224 * | 6 6 6 6 6 1| 225 * |0 3 4 5 6 7 2| 226 * ---------------------------------------- 227 * | 228 * | 229 * V 230 * ---------------------------------------- 231 * | Data block containing offset | 232 * | 101342453 | 233 * | | 234 * | | 235 * ---------------------------------------- 236 * 237 */ 238 239 static void find_metapath(const struct gfs2_sbd *sdp, u64 block, 240 struct metapath *mp, unsigned int height) 241 { 242 unsigned int i; 243 244 for (i = height; i--;) 245 mp->mp_list[i] = do_div(block, sdp->sd_inptrs); 246 247 } 248 249 static inline unsigned int zero_metapath_length(const struct metapath *mp, 250 unsigned height) 251 { 252 unsigned int i; 253 for (i = 0; i < height - 1; i++) { 254 if (mp->mp_list[i] != 0) 255 return i; 256 } 257 return height; 258 } 259 260 /** 261 * metapointer - Return pointer to start of metadata in a buffer 262 * @height: The metadata height (0 = dinode) 263 * @mp: The metapath 264 * 265 * Return a pointer to the block number of the next height of the metadata 266 * tree given a buffer containing the pointer to the current height of the 267 * metadata tree. 268 */ 269 270 static inline __be64 *metapointer(unsigned int height, const struct metapath *mp) 271 { 272 struct buffer_head *bh = mp->mp_bh[height]; 273 unsigned int head_size = (height > 0) ? 274 sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode); 275 return ((__be64 *)(bh->b_data + head_size)) + mp->mp_list[height]; 276 } 277 278 /** 279 * lookup_metapath - Walk the metadata tree to a specific point 280 * @ip: The inode 281 * @mp: The metapath 282 * 283 * Assumes that the inode's buffer has already been looked up and 284 * hooked onto mp->mp_bh[0] and that the metapath has been initialised 285 * by find_metapath(). 286 * 287 * If this function encounters part of the tree which has not been 288 * allocated, it returns the current height of the tree at the point 289 * at which it found the unallocated block. Blocks which are found are 290 * added to the mp->mp_bh[] list. 291 * 292 * Returns: error or height of metadata tree 293 */ 294 295 static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp) 296 { 297 unsigned int end_of_metadata = ip->i_height - 1; 298 unsigned int x; 299 __be64 *ptr; 300 u64 dblock; 301 int ret; 302 303 for (x = 0; x < end_of_metadata; x++) { 304 ptr = metapointer(x, mp); 305 dblock = be64_to_cpu(*ptr); 306 if (!dblock) 307 return x + 1; 308 309 ret = gfs2_meta_indirect_buffer(ip, x+1, dblock, 0, &mp->mp_bh[x+1]); 310 if (ret) 311 return ret; 312 } 313 314 return ip->i_height; 315 } 316 317 static inline void release_metapath(struct metapath *mp) 318 { 319 int i; 320 321 for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) { 322 if (mp->mp_bh[i] == NULL) 323 break; 324 brelse(mp->mp_bh[i]); 325 } 326 } 327 328 /** 329 * gfs2_extent_length - Returns length of an extent of blocks 330 * @start: Start of the buffer 331 * @len: Length of the buffer in bytes 332 * @ptr: Current position in the buffer 333 * @limit: Max extent length to return (0 = unlimited) 334 * @eob: Set to 1 if we hit "end of block" 335 * 336 * If the first block is zero (unallocated) it will return the number of 337 * unallocated blocks in the extent, otherwise it will return the number 338 * of contiguous blocks in the extent. 339 * 340 * Returns: The length of the extent (minimum of one block) 341 */ 342 343 static inline unsigned int gfs2_extent_length(void *start, unsigned int len, __be64 *ptr, unsigned limit, int *eob) 344 { 345 const __be64 *end = (start + len); 346 const __be64 *first = ptr; 347 u64 d = be64_to_cpu(*ptr); 348 349 *eob = 0; 350 do { 351 ptr++; 352 if (ptr >= end) 353 break; 354 if (limit && --limit == 0) 355 break; 356 if (d) 357 d++; 358 } while(be64_to_cpu(*ptr) == d); 359 if (ptr >= end) 360 *eob = 1; 361 return (ptr - first); 362 } 363 364 static inline void bmap_lock(struct gfs2_inode *ip, int create) 365 { 366 if (create) 367 down_write(&ip->i_rw_mutex); 368 else 369 down_read(&ip->i_rw_mutex); 370 } 371 372 static inline void bmap_unlock(struct gfs2_inode *ip, int create) 373 { 374 if (create) 375 up_write(&ip->i_rw_mutex); 376 else 377 up_read(&ip->i_rw_mutex); 378 } 379 380 static inline __be64 *gfs2_indirect_init(struct metapath *mp, 381 struct gfs2_glock *gl, unsigned int i, 382 unsigned offset, u64 bn) 383 { 384 __be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data + 385 ((i > 1) ? sizeof(struct gfs2_meta_header) : 386 sizeof(struct gfs2_dinode))); 387 BUG_ON(i < 1); 388 BUG_ON(mp->mp_bh[i] != NULL); 389 mp->mp_bh[i] = gfs2_meta_new(gl, bn); 390 gfs2_trans_add_bh(gl, mp->mp_bh[i], 1); 391 gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN); 392 gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header)); 393 ptr += offset; 394 *ptr = cpu_to_be64(bn); 395 return ptr; 396 } 397 398 enum alloc_state { 399 ALLOC_DATA = 0, 400 ALLOC_GROW_DEPTH = 1, 401 ALLOC_GROW_HEIGHT = 2, 402 /* ALLOC_UNSTUFF = 3, TBD and rather complicated */ 403 }; 404 405 /** 406 * gfs2_bmap_alloc - Build a metadata tree of the requested height 407 * @inode: The GFS2 inode 408 * @lblock: The logical starting block of the extent 409 * @bh_map: This is used to return the mapping details 410 * @mp: The metapath 411 * @sheight: The starting height (i.e. whats already mapped) 412 * @height: The height to build to 413 * @maxlen: The max number of data blocks to alloc 414 * 415 * In this routine we may have to alloc: 416 * i) Indirect blocks to grow the metadata tree height 417 * ii) Indirect blocks to fill in lower part of the metadata tree 418 * iii) Data blocks 419 * 420 * The function is in two parts. The first part works out the total 421 * number of blocks which we need. The second part does the actual 422 * allocation asking for an extent at a time (if enough contiguous free 423 * blocks are available, there will only be one request per bmap call) 424 * and uses the state machine to initialise the blocks in order. 425 * 426 * Returns: errno on error 427 */ 428 429 static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock, 430 struct buffer_head *bh_map, struct metapath *mp, 431 const unsigned int sheight, 432 const unsigned int height, 433 const unsigned int maxlen) 434 { 435 struct gfs2_inode *ip = GFS2_I(inode); 436 struct gfs2_sbd *sdp = GFS2_SB(inode); 437 struct buffer_head *dibh = mp->mp_bh[0]; 438 u64 bn, dblock = 0; 439 unsigned n, i, blks, alloced = 0, iblks = 0, zmpl = 0; 440 unsigned dblks = 0; 441 unsigned ptrs_per_blk; 442 const unsigned end_of_metadata = height - 1; 443 int eob = 0; 444 enum alloc_state state; 445 __be64 *ptr; 446 __be64 zero_bn = 0; 447 448 BUG_ON(sheight < 1); 449 BUG_ON(dibh == NULL); 450 451 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 452 453 if (height == sheight) { 454 struct buffer_head *bh; 455 /* Bottom indirect block exists, find unalloced extent size */ 456 ptr = metapointer(end_of_metadata, mp); 457 bh = mp->mp_bh[end_of_metadata]; 458 dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen, 459 &eob); 460 BUG_ON(dblks < 1); 461 state = ALLOC_DATA; 462 } else { 463 /* Need to allocate indirect blocks */ 464 ptrs_per_blk = height > 1 ? sdp->sd_inptrs : sdp->sd_diptrs; 465 dblks = min(maxlen, ptrs_per_blk - mp->mp_list[end_of_metadata]); 466 if (height == ip->i_height) { 467 /* Writing into existing tree, extend tree down */ 468 iblks = height - sheight; 469 state = ALLOC_GROW_DEPTH; 470 } else { 471 /* Building up tree height */ 472 state = ALLOC_GROW_HEIGHT; 473 iblks = height - ip->i_height; 474 zmpl = zero_metapath_length(mp, height); 475 iblks -= zmpl; 476 iblks += height; 477 } 478 } 479 480 /* start of the second part of the function (state machine) */ 481 482 blks = dblks + iblks; 483 i = sheight; 484 do { 485 n = blks - alloced; 486 bn = gfs2_alloc_block(ip, &n); 487 alloced += n; 488 if (state != ALLOC_DATA || gfs2_is_jdata(ip)) 489 gfs2_trans_add_unrevoke(sdp, bn, n); 490 switch (state) { 491 /* Growing height of tree */ 492 case ALLOC_GROW_HEIGHT: 493 if (i == 1) { 494 ptr = (__be64 *)(dibh->b_data + 495 sizeof(struct gfs2_dinode)); 496 zero_bn = *ptr; 497 } 498 for (; i - 1 < height - ip->i_height && n > 0; i++, n--) 499 gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++); 500 if (i - 1 == height - ip->i_height) { 501 i--; 502 gfs2_buffer_copy_tail(mp->mp_bh[i], 503 sizeof(struct gfs2_meta_header), 504 dibh, sizeof(struct gfs2_dinode)); 505 gfs2_buffer_clear_tail(dibh, 506 sizeof(struct gfs2_dinode) + 507 sizeof(__be64)); 508 ptr = (__be64 *)(mp->mp_bh[i]->b_data + 509 sizeof(struct gfs2_meta_header)); 510 *ptr = zero_bn; 511 state = ALLOC_GROW_DEPTH; 512 for(i = zmpl; i < height; i++) { 513 if (mp->mp_bh[i] == NULL) 514 break; 515 brelse(mp->mp_bh[i]); 516 mp->mp_bh[i] = NULL; 517 } 518 i = zmpl; 519 } 520 if (n == 0) 521 break; 522 /* Branching from existing tree */ 523 case ALLOC_GROW_DEPTH: 524 if (i > 1 && i < height) 525 gfs2_trans_add_bh(ip->i_gl, mp->mp_bh[i-1], 1); 526 for (; i < height && n > 0; i++, n--) 527 gfs2_indirect_init(mp, ip->i_gl, i, 528 mp->mp_list[i-1], bn++); 529 if (i == height) 530 state = ALLOC_DATA; 531 if (n == 0) 532 break; 533 /* Tree complete, adding data blocks */ 534 case ALLOC_DATA: 535 BUG_ON(n > dblks); 536 BUG_ON(mp->mp_bh[end_of_metadata] == NULL); 537 gfs2_trans_add_bh(ip->i_gl, mp->mp_bh[end_of_metadata], 1); 538 dblks = n; 539 ptr = metapointer(end_of_metadata, mp); 540 dblock = bn; 541 while (n-- > 0) 542 *ptr++ = cpu_to_be64(bn++); 543 break; 544 } 545 } while (state != ALLOC_DATA); 546 547 ip->i_height = height; 548 gfs2_add_inode_blocks(&ip->i_inode, alloced); 549 gfs2_dinode_out(ip, mp->mp_bh[0]->b_data); 550 map_bh(bh_map, inode->i_sb, dblock); 551 bh_map->b_size = dblks << inode->i_blkbits; 552 set_buffer_new(bh_map); 553 return 0; 554 } 555 556 /** 557 * gfs2_block_map - Map a block from an inode to a disk block 558 * @inode: The inode 559 * @lblock: The logical block number 560 * @bh_map: The bh to be mapped 561 * @create: True if its ok to alloc blocks to satify the request 562 * 563 * Sets buffer_mapped() if successful, sets buffer_boundary() if a 564 * read of metadata will be required before the next block can be 565 * mapped. Sets buffer_new() if new blocks were allocated. 566 * 567 * Returns: errno 568 */ 569 570 int gfs2_block_map(struct inode *inode, sector_t lblock, 571 struct buffer_head *bh_map, int create) 572 { 573 struct gfs2_inode *ip = GFS2_I(inode); 574 struct gfs2_sbd *sdp = GFS2_SB(inode); 575 unsigned int bsize = sdp->sd_sb.sb_bsize; 576 const unsigned int maxlen = bh_map->b_size >> inode->i_blkbits; 577 const u64 *arr = sdp->sd_heightsize; 578 __be64 *ptr; 579 u64 size; 580 struct metapath mp; 581 int ret; 582 int eob; 583 unsigned int len; 584 struct buffer_head *bh; 585 u8 height; 586 587 BUG_ON(maxlen == 0); 588 589 memset(mp.mp_bh, 0, sizeof(mp.mp_bh)); 590 bmap_lock(ip, create); 591 clear_buffer_mapped(bh_map); 592 clear_buffer_new(bh_map); 593 clear_buffer_boundary(bh_map); 594 if (gfs2_is_dir(ip)) { 595 bsize = sdp->sd_jbsize; 596 arr = sdp->sd_jheightsize; 597 } 598 599 ret = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]); 600 if (ret) 601 goto out; 602 603 height = ip->i_height; 604 size = (lblock + 1) * bsize; 605 while (size > arr[height]) 606 height++; 607 find_metapath(sdp, lblock, &mp, height); 608 ret = 1; 609 if (height > ip->i_height || gfs2_is_stuffed(ip)) 610 goto do_alloc; 611 ret = lookup_metapath(ip, &mp); 612 if (ret < 0) 613 goto out; 614 if (ret != ip->i_height) 615 goto do_alloc; 616 ptr = metapointer(ip->i_height - 1, &mp); 617 if (*ptr == 0) 618 goto do_alloc; 619 map_bh(bh_map, inode->i_sb, be64_to_cpu(*ptr)); 620 bh = mp.mp_bh[ip->i_height - 1]; 621 len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen, &eob); 622 bh_map->b_size = (len << inode->i_blkbits); 623 if (eob) 624 set_buffer_boundary(bh_map); 625 ret = 0; 626 out: 627 release_metapath(&mp); 628 bmap_unlock(ip, create); 629 return ret; 630 631 do_alloc: 632 /* All allocations are done here, firstly check create flag */ 633 if (!create) { 634 BUG_ON(gfs2_is_stuffed(ip)); 635 ret = 0; 636 goto out; 637 } 638 639 /* At this point ret is the tree depth of already allocated blocks */ 640 ret = gfs2_bmap_alloc(inode, lblock, bh_map, &mp, ret, height, maxlen); 641 goto out; 642 } 643 644 /* 645 * Deprecated: do not use in new code 646 */ 647 int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen) 648 { 649 struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 }; 650 int ret; 651 int create = *new; 652 653 BUG_ON(!extlen); 654 BUG_ON(!dblock); 655 BUG_ON(!new); 656 657 bh.b_size = 1 << (inode->i_blkbits + (create ? 0 : 5)); 658 ret = gfs2_block_map(inode, lblock, &bh, create); 659 *extlen = bh.b_size >> inode->i_blkbits; 660 *dblock = bh.b_blocknr; 661 if (buffer_new(&bh)) 662 *new = 1; 663 else 664 *new = 0; 665 return ret; 666 } 667 668 /** 669 * recursive_scan - recursively scan through the end of a file 670 * @ip: the inode 671 * @dibh: the dinode buffer 672 * @mp: the path through the metadata to the point to start 673 * @height: the height the recursion is at 674 * @block: the indirect block to look at 675 * @first: 1 if this is the first block 676 * @bc: the call to make for each piece of metadata 677 * @data: data opaque to this function to pass to @bc 678 * 679 * When this is first called @height and @block should be zero and 680 * @first should be 1. 681 * 682 * Returns: errno 683 */ 684 685 static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh, 686 struct metapath *mp, unsigned int height, 687 u64 block, int first, block_call_t bc, 688 void *data) 689 { 690 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 691 struct buffer_head *bh = NULL; 692 __be64 *top, *bottom; 693 u64 bn; 694 int error; 695 int mh_size = sizeof(struct gfs2_meta_header); 696 697 if (!height) { 698 error = gfs2_meta_inode_buffer(ip, &bh); 699 if (error) 700 return error; 701 dibh = bh; 702 703 top = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0]; 704 bottom = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs; 705 } else { 706 error = gfs2_meta_indirect_buffer(ip, height, block, 0, &bh); 707 if (error) 708 return error; 709 710 top = (__be64 *)(bh->b_data + mh_size) + 711 (first ? mp->mp_list[height] : 0); 712 713 bottom = (__be64 *)(bh->b_data + mh_size) + sdp->sd_inptrs; 714 } 715 716 error = bc(ip, dibh, bh, top, bottom, height, data); 717 if (error) 718 goto out; 719 720 if (height < ip->i_height - 1) 721 for (; top < bottom; top++, first = 0) { 722 if (!*top) 723 continue; 724 725 bn = be64_to_cpu(*top); 726 727 error = recursive_scan(ip, dibh, mp, height + 1, bn, 728 first, bc, data); 729 if (error) 730 break; 731 } 732 733 out: 734 brelse(bh); 735 return error; 736 } 737 738 /** 739 * do_strip - Look for a layer a particular layer of the file and strip it off 740 * @ip: the inode 741 * @dibh: the dinode buffer 742 * @bh: A buffer of pointers 743 * @top: The first pointer in the buffer 744 * @bottom: One more than the last pointer 745 * @height: the height this buffer is at 746 * @data: a pointer to a struct strip_mine 747 * 748 * Returns: errno 749 */ 750 751 static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, 752 struct buffer_head *bh, __be64 *top, __be64 *bottom, 753 unsigned int height, void *data) 754 { 755 struct strip_mine *sm = data; 756 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 757 struct gfs2_rgrp_list rlist; 758 u64 bn, bstart; 759 u32 blen; 760 __be64 *p; 761 unsigned int rg_blocks = 0; 762 int metadata; 763 unsigned int revokes = 0; 764 int x; 765 int error; 766 767 if (!*top) 768 sm->sm_first = 0; 769 770 if (height != sm->sm_height) 771 return 0; 772 773 if (sm->sm_first) { 774 top++; 775 sm->sm_first = 0; 776 } 777 778 metadata = (height != ip->i_height - 1); 779 if (metadata) 780 revokes = (height) ? sdp->sd_inptrs : sdp->sd_diptrs; 781 782 error = gfs2_rindex_hold(sdp, &ip->i_alloc->al_ri_gh); 783 if (error) 784 return error; 785 786 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list)); 787 bstart = 0; 788 blen = 0; 789 790 for (p = top; p < bottom; p++) { 791 if (!*p) 792 continue; 793 794 bn = be64_to_cpu(*p); 795 796 if (bstart + blen == bn) 797 blen++; 798 else { 799 if (bstart) 800 gfs2_rlist_add(sdp, &rlist, bstart); 801 802 bstart = bn; 803 blen = 1; 804 } 805 } 806 807 if (bstart) 808 gfs2_rlist_add(sdp, &rlist, bstart); 809 else 810 goto out; /* Nothing to do */ 811 812 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE); 813 814 for (x = 0; x < rlist.rl_rgrps; x++) { 815 struct gfs2_rgrpd *rgd; 816 rgd = rlist.rl_ghs[x].gh_gl->gl_object; 817 rg_blocks += rgd->rd_length; 818 } 819 820 error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs); 821 if (error) 822 goto out_rlist; 823 824 error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + 825 RES_INDIRECT + RES_STATFS + RES_QUOTA, 826 revokes); 827 if (error) 828 goto out_rg_gunlock; 829 830 down_write(&ip->i_rw_mutex); 831 832 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 833 gfs2_trans_add_bh(ip->i_gl, bh, 1); 834 835 bstart = 0; 836 blen = 0; 837 838 for (p = top; p < bottom; p++) { 839 if (!*p) 840 continue; 841 842 bn = be64_to_cpu(*p); 843 844 if (bstart + blen == bn) 845 blen++; 846 else { 847 if (bstart) { 848 if (metadata) 849 gfs2_free_meta(ip, bstart, blen); 850 else 851 gfs2_free_data(ip, bstart, blen); 852 } 853 854 bstart = bn; 855 blen = 1; 856 } 857 858 *p = 0; 859 gfs2_add_inode_blocks(&ip->i_inode, -1); 860 } 861 if (bstart) { 862 if (metadata) 863 gfs2_free_meta(ip, bstart, blen); 864 else 865 gfs2_free_data(ip, bstart, blen); 866 } 867 868 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 869 870 gfs2_dinode_out(ip, dibh->b_data); 871 872 up_write(&ip->i_rw_mutex); 873 874 gfs2_trans_end(sdp); 875 876 out_rg_gunlock: 877 gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs); 878 out_rlist: 879 gfs2_rlist_free(&rlist); 880 out: 881 gfs2_glock_dq_uninit(&ip->i_alloc->al_ri_gh); 882 return error; 883 } 884 885 /** 886 * do_grow - Make a file look bigger than it is 887 * @ip: the inode 888 * @size: the size to set the file to 889 * 890 * Called with an exclusive lock on @ip. 891 * 892 * Returns: errno 893 */ 894 895 static int do_grow(struct gfs2_inode *ip, u64 size) 896 { 897 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 898 struct gfs2_alloc *al; 899 struct buffer_head *dibh; 900 int error; 901 902 al = gfs2_alloc_get(ip); 903 if (!al) 904 return -ENOMEM; 905 906 error = gfs2_quota_lock_check(ip); 907 if (error) 908 goto out; 909 910 al->al_requested = sdp->sd_max_height + RES_DATA; 911 912 error = gfs2_inplace_reserve(ip); 913 if (error) 914 goto out_gunlock_q; 915 916 error = gfs2_trans_begin(sdp, 917 sdp->sd_max_height + al->al_rgd->rd_length + 918 RES_JDATA + RES_DINODE + RES_STATFS + RES_QUOTA, 0); 919 if (error) 920 goto out_ipres; 921 922 error = gfs2_meta_inode_buffer(ip, &dibh); 923 if (error) 924 goto out_end_trans; 925 926 if (size > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) { 927 if (gfs2_is_stuffed(ip)) { 928 error = gfs2_unstuff_dinode(ip, NULL); 929 if (error) 930 goto out_brelse; 931 } 932 } 933 934 ip->i_di.di_size = size; 935 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 936 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 937 gfs2_dinode_out(ip, dibh->b_data); 938 939 out_brelse: 940 brelse(dibh); 941 out_end_trans: 942 gfs2_trans_end(sdp); 943 out_ipres: 944 gfs2_inplace_release(ip); 945 out_gunlock_q: 946 gfs2_quota_unlock(ip); 947 out: 948 gfs2_alloc_put(ip); 949 return error; 950 } 951 952 953 /** 954 * gfs2_block_truncate_page - Deal with zeroing out data for truncate 955 * 956 * This is partly borrowed from ext3. 957 */ 958 static int gfs2_block_truncate_page(struct address_space *mapping) 959 { 960 struct inode *inode = mapping->host; 961 struct gfs2_inode *ip = GFS2_I(inode); 962 loff_t from = inode->i_size; 963 unsigned long index = from >> PAGE_CACHE_SHIFT; 964 unsigned offset = from & (PAGE_CACHE_SIZE-1); 965 unsigned blocksize, iblock, length, pos; 966 struct buffer_head *bh; 967 struct page *page; 968 int err; 969 970 page = grab_cache_page(mapping, index); 971 if (!page) 972 return 0; 973 974 blocksize = inode->i_sb->s_blocksize; 975 length = blocksize - (offset & (blocksize - 1)); 976 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 977 978 if (!page_has_buffers(page)) 979 create_empty_buffers(page, blocksize, 0); 980 981 /* Find the buffer that contains "offset" */ 982 bh = page_buffers(page); 983 pos = blocksize; 984 while (offset >= pos) { 985 bh = bh->b_this_page; 986 iblock++; 987 pos += blocksize; 988 } 989 990 err = 0; 991 992 if (!buffer_mapped(bh)) { 993 gfs2_block_map(inode, iblock, bh, 0); 994 /* unmapped? It's a hole - nothing to do */ 995 if (!buffer_mapped(bh)) 996 goto unlock; 997 } 998 999 /* Ok, it's mapped. Make sure it's up-to-date */ 1000 if (PageUptodate(page)) 1001 set_buffer_uptodate(bh); 1002 1003 if (!buffer_uptodate(bh)) { 1004 err = -EIO; 1005 ll_rw_block(READ, 1, &bh); 1006 wait_on_buffer(bh); 1007 /* Uhhuh. Read error. Complain and punt. */ 1008 if (!buffer_uptodate(bh)) 1009 goto unlock; 1010 err = 0; 1011 } 1012 1013 if (!gfs2_is_writeback(ip)) 1014 gfs2_trans_add_bh(ip->i_gl, bh, 0); 1015 1016 zero_user(page, offset, length); 1017 1018 unlock: 1019 unlock_page(page); 1020 page_cache_release(page); 1021 return err; 1022 } 1023 1024 static int trunc_start(struct gfs2_inode *ip, u64 size) 1025 { 1026 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1027 struct buffer_head *dibh; 1028 int journaled = gfs2_is_jdata(ip); 1029 int error; 1030 1031 error = gfs2_trans_begin(sdp, 1032 RES_DINODE + (journaled ? RES_JDATA : 0), 0); 1033 if (error) 1034 return error; 1035 1036 error = gfs2_meta_inode_buffer(ip, &dibh); 1037 if (error) 1038 goto out; 1039 1040 if (gfs2_is_stuffed(ip)) { 1041 ip->i_di.di_size = size; 1042 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 1043 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1044 gfs2_dinode_out(ip, dibh->b_data); 1045 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + size); 1046 error = 1; 1047 1048 } else { 1049 if (size & (u64)(sdp->sd_sb.sb_bsize - 1)) 1050 error = gfs2_block_truncate_page(ip->i_inode.i_mapping); 1051 1052 if (!error) { 1053 ip->i_di.di_size = size; 1054 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 1055 ip->i_di.di_flags |= GFS2_DIF_TRUNC_IN_PROG; 1056 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1057 gfs2_dinode_out(ip, dibh->b_data); 1058 } 1059 } 1060 1061 brelse(dibh); 1062 1063 out: 1064 gfs2_trans_end(sdp); 1065 return error; 1066 } 1067 1068 static int trunc_dealloc(struct gfs2_inode *ip, u64 size) 1069 { 1070 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1071 unsigned int height = ip->i_height; 1072 u64 lblock; 1073 struct metapath mp; 1074 int error; 1075 1076 if (!size) 1077 lblock = 0; 1078 else 1079 lblock = (size - 1) >> sdp->sd_sb.sb_bsize_shift; 1080 1081 find_metapath(sdp, lblock, &mp, ip->i_height); 1082 if (!gfs2_alloc_get(ip)) 1083 return -ENOMEM; 1084 1085 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); 1086 if (error) 1087 goto out; 1088 1089 while (height--) { 1090 struct strip_mine sm; 1091 sm.sm_first = !!size; 1092 sm.sm_height = height; 1093 1094 error = recursive_scan(ip, NULL, &mp, 0, 0, 1, do_strip, &sm); 1095 if (error) 1096 break; 1097 } 1098 1099 gfs2_quota_unhold(ip); 1100 1101 out: 1102 gfs2_alloc_put(ip); 1103 return error; 1104 } 1105 1106 static int trunc_end(struct gfs2_inode *ip) 1107 { 1108 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1109 struct buffer_head *dibh; 1110 int error; 1111 1112 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 1113 if (error) 1114 return error; 1115 1116 down_write(&ip->i_rw_mutex); 1117 1118 error = gfs2_meta_inode_buffer(ip, &dibh); 1119 if (error) 1120 goto out; 1121 1122 if (!ip->i_di.di_size) { 1123 ip->i_height = 0; 1124 ip->i_goal = ip->i_no_addr; 1125 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 1126 } 1127 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 1128 ip->i_di.di_flags &= ~GFS2_DIF_TRUNC_IN_PROG; 1129 1130 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1131 gfs2_dinode_out(ip, dibh->b_data); 1132 brelse(dibh); 1133 1134 out: 1135 up_write(&ip->i_rw_mutex); 1136 gfs2_trans_end(sdp); 1137 return error; 1138 } 1139 1140 /** 1141 * do_shrink - make a file smaller 1142 * @ip: the inode 1143 * @size: the size to make the file 1144 * @truncator: function to truncate the last partial block 1145 * 1146 * Called with an exclusive lock on @ip. 1147 * 1148 * Returns: errno 1149 */ 1150 1151 static int do_shrink(struct gfs2_inode *ip, u64 size) 1152 { 1153 int error; 1154 1155 error = trunc_start(ip, size); 1156 if (error < 0) 1157 return error; 1158 if (error > 0) 1159 return 0; 1160 1161 error = trunc_dealloc(ip, size); 1162 if (!error) 1163 error = trunc_end(ip); 1164 1165 return error; 1166 } 1167 1168 static int do_touch(struct gfs2_inode *ip, u64 size) 1169 { 1170 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1171 struct buffer_head *dibh; 1172 int error; 1173 1174 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 1175 if (error) 1176 return error; 1177 1178 down_write(&ip->i_rw_mutex); 1179 1180 error = gfs2_meta_inode_buffer(ip, &dibh); 1181 if (error) 1182 goto do_touch_out; 1183 1184 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 1185 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1186 gfs2_dinode_out(ip, dibh->b_data); 1187 brelse(dibh); 1188 1189 do_touch_out: 1190 up_write(&ip->i_rw_mutex); 1191 gfs2_trans_end(sdp); 1192 return error; 1193 } 1194 1195 /** 1196 * gfs2_truncatei - make a file a given size 1197 * @ip: the inode 1198 * @size: the size to make the file 1199 * @truncator: function to truncate the last partial block 1200 * 1201 * The file size can grow, shrink, or stay the same size. 1202 * 1203 * Returns: errno 1204 */ 1205 1206 int gfs2_truncatei(struct gfs2_inode *ip, u64 size) 1207 { 1208 int error; 1209 1210 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), S_ISREG(ip->i_inode.i_mode))) 1211 return -EINVAL; 1212 1213 if (size > ip->i_di.di_size) 1214 error = do_grow(ip, size); 1215 else if (size < ip->i_di.di_size) 1216 error = do_shrink(ip, size); 1217 else 1218 /* update time stamps */ 1219 error = do_touch(ip, size); 1220 1221 return error; 1222 } 1223 1224 int gfs2_truncatei_resume(struct gfs2_inode *ip) 1225 { 1226 int error; 1227 error = trunc_dealloc(ip, ip->i_di.di_size); 1228 if (!error) 1229 error = trunc_end(ip); 1230 return error; 1231 } 1232 1233 int gfs2_file_dealloc(struct gfs2_inode *ip) 1234 { 1235 return trunc_dealloc(ip, 0); 1236 } 1237 1238 /** 1239 * gfs2_write_calc_reserv - calculate number of blocks needed to write to a file 1240 * @ip: the file 1241 * @len: the number of bytes to be written to the file 1242 * @data_blocks: returns the number of data blocks required 1243 * @ind_blocks: returns the number of indirect blocks required 1244 * 1245 */ 1246 1247 void gfs2_write_calc_reserv(struct gfs2_inode *ip, unsigned int len, 1248 unsigned int *data_blocks, unsigned int *ind_blocks) 1249 { 1250 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1251 unsigned int tmp; 1252 1253 if (gfs2_is_dir(ip)) { 1254 *data_blocks = DIV_ROUND_UP(len, sdp->sd_jbsize) + 2; 1255 *ind_blocks = 3 * (sdp->sd_max_jheight - 1); 1256 } else { 1257 *data_blocks = (len >> sdp->sd_sb.sb_bsize_shift) + 3; 1258 *ind_blocks = 3 * (sdp->sd_max_height - 1); 1259 } 1260 1261 for (tmp = *data_blocks; tmp > sdp->sd_diptrs;) { 1262 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs); 1263 *ind_blocks += tmp; 1264 } 1265 } 1266 1267 /** 1268 * gfs2_write_alloc_required - figure out if a write will require an allocation 1269 * @ip: the file being written to 1270 * @offset: the offset to write to 1271 * @len: the number of bytes being written 1272 * @alloc_required: set to 1 if an alloc is required, 0 otherwise 1273 * 1274 * Returns: errno 1275 */ 1276 1277 int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, 1278 unsigned int len, int *alloc_required) 1279 { 1280 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1281 struct buffer_head bh; 1282 unsigned int shift; 1283 u64 lblock, lblock_stop, size; 1284 1285 *alloc_required = 0; 1286 1287 if (!len) 1288 return 0; 1289 1290 if (gfs2_is_stuffed(ip)) { 1291 if (offset + len > 1292 sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) 1293 *alloc_required = 1; 1294 return 0; 1295 } 1296 1297 *alloc_required = 1; 1298 shift = sdp->sd_sb.sb_bsize_shift; 1299 if (gfs2_is_dir(ip)) { 1300 unsigned int bsize = sdp->sd_jbsize; 1301 lblock = offset; 1302 do_div(lblock, bsize); 1303 lblock_stop = offset + len + bsize - 1; 1304 do_div(lblock_stop, bsize); 1305 } else { 1306 u64 end_of_file = (ip->i_di.di_size + sdp->sd_sb.sb_bsize - 1) >> shift; 1307 lblock = offset >> shift; 1308 lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift; 1309 if (lblock_stop > end_of_file) 1310 return 0; 1311 } 1312 1313 size = (lblock_stop - lblock) << shift; 1314 do { 1315 bh.b_state = 0; 1316 bh.b_size = size; 1317 gfs2_block_map(&ip->i_inode, lblock, &bh, 0); 1318 if (!buffer_mapped(&bh)) 1319 return 0; 1320 size -= bh.b_size; 1321 lblock += (bh.b_size >> ip->i_inode.i_blkbits); 1322 } while(size > 0); 1323 1324 *alloc_required = 0; 1325 return 0; 1326 } 1327 1328