1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 /* 11 * Implements Extendible Hashing as described in: 12 * "Extendible Hashing" by Fagin, et al in 13 * __ACM Trans. on Database Systems__, Sept 1979. 14 * 15 * 16 * Here's the layout of dirents which is essentially the same as that of ext2 17 * within a single block. The field de_name_len is the number of bytes 18 * actually required for the name (no null terminator). The field de_rec_len 19 * is the number of bytes allocated to the dirent. The offset of the next 20 * dirent in the block is (dirent + dirent->de_rec_len). When a dirent is 21 * deleted, the preceding dirent inherits its allocated space, ie 22 * prev->de_rec_len += deleted->de_rec_len. Since the next dirent is obtained 23 * by adding de_rec_len to the current dirent, this essentially causes the 24 * deleted dirent to get jumped over when iterating through all the dirents. 25 * 26 * When deleting the first dirent in a block, there is no previous dirent so 27 * the field de_ino is set to zero to designate it as deleted. When allocating 28 * a dirent, gfs2_dirent_alloc iterates through the dirents in a block. If the 29 * first dirent has (de_ino == 0) and de_rec_len is large enough, this first 30 * dirent is allocated. Otherwise it must go through all the 'used' dirents 31 * searching for one in which the amount of total space minus the amount of 32 * used space will provide enough space for the new dirent. 33 * 34 * There are two types of blocks in which dirents reside. In a stuffed dinode, 35 * the dirents begin at offset sizeof(struct gfs2_dinode) from the beginning of 36 * the block. In leaves, they begin at offset sizeof(struct gfs2_leaf) from the 37 * beginning of the leaf block. The dirents reside in leaves when 38 * 39 * dip->i_diskflags & GFS2_DIF_EXHASH is true 40 * 41 * Otherwise, the dirents are "linear", within a single stuffed dinode block. 42 * 43 * When the dirents are in leaves, the actual contents of the directory file are 44 * used as an array of 64-bit block pointers pointing to the leaf blocks. The 45 * dirents are NOT in the directory file itself. There can be more than one 46 * block pointer in the array that points to the same leaf. In fact, when a 47 * directory is first converted from linear to exhash, all of the pointers 48 * point to the same leaf. 49 * 50 * When a leaf is completely full, the size of the hash table can be 51 * doubled unless it is already at the maximum size which is hard coded into 52 * GFS2_DIR_MAX_DEPTH. After that, leaves are chained together in a linked list, 53 * but never before the maximum hash table size has been reached. 54 */ 55 56 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 57 58 #include <linux/slab.h> 59 #include <linux/spinlock.h> 60 #include <linux/buffer_head.h> 61 #include <linux/sort.h> 62 #include <linux/gfs2_ondisk.h> 63 #include <linux/crc32.h> 64 #include <linux/vmalloc.h> 65 66 #include "gfs2.h" 67 #include "incore.h" 68 #include "dir.h" 69 #include "glock.h" 70 #include "inode.h" 71 #include "meta_io.h" 72 #include "quota.h" 73 #include "rgrp.h" 74 #include "trans.h" 75 #include "bmap.h" 76 #include "util.h" 77 78 #define IS_LEAF 1 /* Hashed (leaf) directory */ 79 #define IS_DINODE 2 /* Linear (stuffed dinode block) directory */ 80 81 #define MAX_RA_BLOCKS 32 /* max read-ahead blocks */ 82 83 #define gfs2_disk_hash2offset(h) (((u64)(h)) >> 1) 84 #define gfs2_dir_offset2hash(p) ((u32)(((u64)(p)) << 1)) 85 #define GFS2_HASH_INDEX_MASK 0xffffc000 86 #define GFS2_USE_HASH_FLAG 0x2000 87 88 struct qstr gfs2_qdot __read_mostly; 89 struct qstr gfs2_qdotdot __read_mostly; 90 91 typedef int (*gfs2_dscan_t)(const struct gfs2_dirent *dent, 92 const struct qstr *name, void *opaque); 93 94 int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block, 95 struct buffer_head **bhp) 96 { 97 struct buffer_head *bh; 98 99 bh = gfs2_meta_new(ip->i_gl, block); 100 gfs2_trans_add_meta(ip->i_gl, bh); 101 gfs2_metatype_set(bh, GFS2_METATYPE_JD, GFS2_FORMAT_JD); 102 gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header)); 103 *bhp = bh; 104 return 0; 105 } 106 107 static int gfs2_dir_get_existing_buffer(struct gfs2_inode *ip, u64 block, 108 struct buffer_head **bhp) 109 { 110 struct buffer_head *bh; 111 int error; 112 113 error = gfs2_meta_read(ip->i_gl, block, DIO_WAIT, 0, &bh); 114 if (error) 115 return error; 116 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_JD)) { 117 brelse(bh); 118 return -EIO; 119 } 120 *bhp = bh; 121 return 0; 122 } 123 124 static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf, 125 unsigned int offset, unsigned int size) 126 { 127 struct buffer_head *dibh; 128 int error; 129 130 error = gfs2_meta_inode_buffer(ip, &dibh); 131 if (error) 132 return error; 133 134 gfs2_trans_add_meta(ip->i_gl, dibh); 135 memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size); 136 if (ip->i_inode.i_size < offset + size) 137 i_size_write(&ip->i_inode, offset + size); 138 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 139 gfs2_dinode_out(ip, dibh->b_data); 140 141 brelse(dibh); 142 143 return size; 144 } 145 146 147 148 /** 149 * gfs2_dir_write_data - Write directory information to the inode 150 * @ip: The GFS2 inode 151 * @buf: The buffer containing information to be written 152 * @offset: The file offset to start writing at 153 * @size: The amount of data to write 154 * 155 * Returns: The number of bytes correctly written or error code 156 */ 157 static int gfs2_dir_write_data(struct gfs2_inode *ip, const char *buf, 158 u64 offset, unsigned int size) 159 { 160 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 161 struct buffer_head *dibh; 162 u64 lblock, dblock; 163 u32 extlen = 0; 164 unsigned int o; 165 int copied = 0; 166 int error = 0; 167 int new = 0; 168 169 if (!size) 170 return 0; 171 172 if (gfs2_is_stuffed(ip) && 173 offset + size <= sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) 174 return gfs2_dir_write_stuffed(ip, buf, (unsigned int)offset, 175 size); 176 177 if (gfs2_assert_warn(sdp, gfs2_is_jdata(ip))) 178 return -EINVAL; 179 180 if (gfs2_is_stuffed(ip)) { 181 error = gfs2_unstuff_dinode(ip, NULL); 182 if (error) 183 return error; 184 } 185 186 lblock = offset; 187 o = do_div(lblock, sdp->sd_jbsize) + sizeof(struct gfs2_meta_header); 188 189 while (copied < size) { 190 unsigned int amount; 191 struct buffer_head *bh; 192 193 amount = size - copied; 194 if (amount > sdp->sd_sb.sb_bsize - o) 195 amount = sdp->sd_sb.sb_bsize - o; 196 197 if (!extlen) { 198 new = 1; 199 error = gfs2_extent_map(&ip->i_inode, lblock, &new, 200 &dblock, &extlen); 201 if (error) 202 goto fail; 203 error = -EIO; 204 if (gfs2_assert_withdraw(sdp, dblock)) 205 goto fail; 206 } 207 208 if (amount == sdp->sd_jbsize || new) 209 error = gfs2_dir_get_new_buffer(ip, dblock, &bh); 210 else 211 error = gfs2_dir_get_existing_buffer(ip, dblock, &bh); 212 213 if (error) 214 goto fail; 215 216 gfs2_trans_add_meta(ip->i_gl, bh); 217 memcpy(bh->b_data + o, buf, amount); 218 brelse(bh); 219 220 buf += amount; 221 copied += amount; 222 lblock++; 223 dblock++; 224 extlen--; 225 226 o = sizeof(struct gfs2_meta_header); 227 } 228 229 out: 230 error = gfs2_meta_inode_buffer(ip, &dibh); 231 if (error) 232 return error; 233 234 if (ip->i_inode.i_size < offset + copied) 235 i_size_write(&ip->i_inode, offset + copied); 236 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 237 238 gfs2_trans_add_meta(ip->i_gl, dibh); 239 gfs2_dinode_out(ip, dibh->b_data); 240 brelse(dibh); 241 242 return copied; 243 fail: 244 if (copied) 245 goto out; 246 return error; 247 } 248 249 static int gfs2_dir_read_stuffed(struct gfs2_inode *ip, __be64 *buf, 250 unsigned int size) 251 { 252 struct buffer_head *dibh; 253 int error; 254 255 error = gfs2_meta_inode_buffer(ip, &dibh); 256 if (!error) { 257 memcpy(buf, dibh->b_data + sizeof(struct gfs2_dinode), size); 258 brelse(dibh); 259 } 260 261 return (error) ? error : size; 262 } 263 264 265 /** 266 * gfs2_dir_read_data - Read a data from a directory inode 267 * @ip: The GFS2 Inode 268 * @buf: The buffer to place result into 269 * @size: Amount of data to transfer 270 * 271 * Returns: The amount of data actually copied or the error 272 */ 273 static int gfs2_dir_read_data(struct gfs2_inode *ip, __be64 *buf, 274 unsigned int size) 275 { 276 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 277 u64 lblock, dblock; 278 u32 extlen = 0; 279 unsigned int o; 280 int copied = 0; 281 int error = 0; 282 283 if (gfs2_is_stuffed(ip)) 284 return gfs2_dir_read_stuffed(ip, buf, size); 285 286 if (gfs2_assert_warn(sdp, gfs2_is_jdata(ip))) 287 return -EINVAL; 288 289 lblock = 0; 290 o = do_div(lblock, sdp->sd_jbsize) + sizeof(struct gfs2_meta_header); 291 292 while (copied < size) { 293 unsigned int amount; 294 struct buffer_head *bh; 295 int new; 296 297 amount = size - copied; 298 if (amount > sdp->sd_sb.sb_bsize - o) 299 amount = sdp->sd_sb.sb_bsize - o; 300 301 if (!extlen) { 302 new = 0; 303 error = gfs2_extent_map(&ip->i_inode, lblock, &new, 304 &dblock, &extlen); 305 if (error || !dblock) 306 goto fail; 307 BUG_ON(extlen < 1); 308 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); 309 } else { 310 error = gfs2_meta_read(ip->i_gl, dblock, DIO_WAIT, 0, &bh); 311 if (error) 312 goto fail; 313 } 314 error = gfs2_metatype_check(sdp, bh, GFS2_METATYPE_JD); 315 if (error) { 316 brelse(bh); 317 goto fail; 318 } 319 dblock++; 320 extlen--; 321 memcpy(buf, bh->b_data + o, amount); 322 brelse(bh); 323 buf += (amount/sizeof(__be64)); 324 copied += amount; 325 lblock++; 326 o = sizeof(struct gfs2_meta_header); 327 } 328 329 return copied; 330 fail: 331 return (copied) ? copied : error; 332 } 333 334 /** 335 * gfs2_dir_get_hash_table - Get pointer to the dir hash table 336 * @ip: The inode in question 337 * 338 * Returns: The hash table or an error 339 */ 340 341 static __be64 *gfs2_dir_get_hash_table(struct gfs2_inode *ip) 342 { 343 struct inode *inode = &ip->i_inode; 344 int ret; 345 u32 hsize; 346 __be64 *hc; 347 348 BUG_ON(!(ip->i_diskflags & GFS2_DIF_EXHASH)); 349 350 hc = ip->i_hash_cache; 351 if (hc) 352 return hc; 353 354 hsize = 1 << ip->i_depth; 355 hsize *= sizeof(__be64); 356 if (hsize != i_size_read(&ip->i_inode)) { 357 gfs2_consist_inode(ip); 358 return ERR_PTR(-EIO); 359 } 360 361 hc = kmalloc(hsize, GFP_NOFS | __GFP_NOWARN); 362 if (hc == NULL) 363 hc = __vmalloc(hsize, GFP_NOFS, PAGE_KERNEL); 364 365 if (hc == NULL) 366 return ERR_PTR(-ENOMEM); 367 368 ret = gfs2_dir_read_data(ip, hc, hsize); 369 if (ret < 0) { 370 kvfree(hc); 371 return ERR_PTR(ret); 372 } 373 374 spin_lock(&inode->i_lock); 375 if (likely(!ip->i_hash_cache)) { 376 ip->i_hash_cache = hc; 377 hc = NULL; 378 } 379 spin_unlock(&inode->i_lock); 380 kvfree(hc); 381 382 return ip->i_hash_cache; 383 } 384 385 /** 386 * gfs2_dir_hash_inval - Invalidate dir hash 387 * @ip: The directory inode 388 * 389 * Must be called with an exclusive glock, or during glock invalidation. 390 */ 391 void gfs2_dir_hash_inval(struct gfs2_inode *ip) 392 { 393 __be64 *hc; 394 395 spin_lock(&ip->i_inode.i_lock); 396 hc = ip->i_hash_cache; 397 ip->i_hash_cache = NULL; 398 spin_unlock(&ip->i_inode.i_lock); 399 400 kvfree(hc); 401 } 402 403 static inline int gfs2_dirent_sentinel(const struct gfs2_dirent *dent) 404 { 405 return dent->de_inum.no_addr == 0 || dent->de_inum.no_formal_ino == 0; 406 } 407 408 static inline int __gfs2_dirent_find(const struct gfs2_dirent *dent, 409 const struct qstr *name, int ret) 410 { 411 if (!gfs2_dirent_sentinel(dent) && 412 be32_to_cpu(dent->de_hash) == name->hash && 413 be16_to_cpu(dent->de_name_len) == name->len && 414 memcmp(dent+1, name->name, name->len) == 0) 415 return ret; 416 return 0; 417 } 418 419 static int gfs2_dirent_find(const struct gfs2_dirent *dent, 420 const struct qstr *name, 421 void *opaque) 422 { 423 return __gfs2_dirent_find(dent, name, 1); 424 } 425 426 static int gfs2_dirent_prev(const struct gfs2_dirent *dent, 427 const struct qstr *name, 428 void *opaque) 429 { 430 return __gfs2_dirent_find(dent, name, 2); 431 } 432 433 /* 434 * name->name holds ptr to start of block. 435 * name->len holds size of block. 436 */ 437 static int gfs2_dirent_last(const struct gfs2_dirent *dent, 438 const struct qstr *name, 439 void *opaque) 440 { 441 const char *start = name->name; 442 const char *end = (const char *)dent + be16_to_cpu(dent->de_rec_len); 443 if (name->len == (end - start)) 444 return 1; 445 return 0; 446 } 447 448 /* Look for the dirent that contains the offset specified in data. Once we 449 * find that dirent, there must be space available there for the new dirent */ 450 static int gfs2_dirent_find_offset(const struct gfs2_dirent *dent, 451 const struct qstr *name, 452 void *ptr) 453 { 454 unsigned required = GFS2_DIRENT_SIZE(name->len); 455 unsigned actual = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len)); 456 unsigned totlen = be16_to_cpu(dent->de_rec_len); 457 458 if (ptr < (void *)dent || ptr >= (void *)dent + totlen) 459 return 0; 460 if (gfs2_dirent_sentinel(dent)) 461 actual = 0; 462 if (ptr < (void *)dent + actual) 463 return -1; 464 if ((void *)dent + totlen >= ptr + required) 465 return 1; 466 return -1; 467 } 468 469 static int gfs2_dirent_find_space(const struct gfs2_dirent *dent, 470 const struct qstr *name, 471 void *opaque) 472 { 473 unsigned required = GFS2_DIRENT_SIZE(name->len); 474 unsigned actual = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len)); 475 unsigned totlen = be16_to_cpu(dent->de_rec_len); 476 477 if (gfs2_dirent_sentinel(dent)) 478 actual = 0; 479 if (totlen - actual >= required) 480 return 1; 481 return 0; 482 } 483 484 struct dirent_gather { 485 const struct gfs2_dirent **pdent; 486 unsigned offset; 487 }; 488 489 static int gfs2_dirent_gather(const struct gfs2_dirent *dent, 490 const struct qstr *name, 491 void *opaque) 492 { 493 struct dirent_gather *g = opaque; 494 if (!gfs2_dirent_sentinel(dent)) { 495 g->pdent[g->offset++] = dent; 496 } 497 return 0; 498 } 499 500 /* 501 * Other possible things to check: 502 * - Inode located within filesystem size (and on valid block) 503 * - Valid directory entry type 504 * Not sure how heavy-weight we want to make this... could also check 505 * hash is correct for example, but that would take a lot of extra time. 506 * For now the most important thing is to check that the various sizes 507 * are correct. 508 */ 509 static int gfs2_check_dirent(struct gfs2_dirent *dent, unsigned int offset, 510 unsigned int size, unsigned int len, int first) 511 { 512 const char *msg = "gfs2_dirent too small"; 513 if (unlikely(size < sizeof(struct gfs2_dirent))) 514 goto error; 515 msg = "gfs2_dirent misaligned"; 516 if (unlikely(offset & 0x7)) 517 goto error; 518 msg = "gfs2_dirent points beyond end of block"; 519 if (unlikely(offset + size > len)) 520 goto error; 521 msg = "zero inode number"; 522 if (unlikely(!first && gfs2_dirent_sentinel(dent))) 523 goto error; 524 msg = "name length is greater than space in dirent"; 525 if (!gfs2_dirent_sentinel(dent) && 526 unlikely(sizeof(struct gfs2_dirent)+be16_to_cpu(dent->de_name_len) > 527 size)) 528 goto error; 529 return 0; 530 error: 531 pr_warn("%s: %s (%s)\n", 532 __func__, msg, first ? "first in block" : "not first in block"); 533 return -EIO; 534 } 535 536 static int gfs2_dirent_offset(const void *buf) 537 { 538 const struct gfs2_meta_header *h = buf; 539 int offset; 540 541 BUG_ON(buf == NULL); 542 543 switch(be32_to_cpu(h->mh_type)) { 544 case GFS2_METATYPE_LF: 545 offset = sizeof(struct gfs2_leaf); 546 break; 547 case GFS2_METATYPE_DI: 548 offset = sizeof(struct gfs2_dinode); 549 break; 550 default: 551 goto wrong_type; 552 } 553 return offset; 554 wrong_type: 555 pr_warn("%s: wrong block type %u\n", __func__, be32_to_cpu(h->mh_type)); 556 return -1; 557 } 558 559 static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf, 560 unsigned int len, gfs2_dscan_t scan, 561 const struct qstr *name, 562 void *opaque) 563 { 564 struct gfs2_dirent *dent, *prev; 565 unsigned offset; 566 unsigned size; 567 int ret = 0; 568 569 ret = gfs2_dirent_offset(buf); 570 if (ret < 0) 571 goto consist_inode; 572 573 offset = ret; 574 prev = NULL; 575 dent = buf + offset; 576 size = be16_to_cpu(dent->de_rec_len); 577 if (gfs2_check_dirent(dent, offset, size, len, 1)) 578 goto consist_inode; 579 do { 580 ret = scan(dent, name, opaque); 581 if (ret) 582 break; 583 offset += size; 584 if (offset == len) 585 break; 586 prev = dent; 587 dent = buf + offset; 588 size = be16_to_cpu(dent->de_rec_len); 589 if (gfs2_check_dirent(dent, offset, size, len, 0)) 590 goto consist_inode; 591 } while(1); 592 593 switch(ret) { 594 case 0: 595 return NULL; 596 case 1: 597 return dent; 598 case 2: 599 return prev ? prev : dent; 600 default: 601 BUG_ON(ret > 0); 602 return ERR_PTR(ret); 603 } 604 605 consist_inode: 606 gfs2_consist_inode(GFS2_I(inode)); 607 return ERR_PTR(-EIO); 608 } 609 610 static int dirent_check_reclen(struct gfs2_inode *dip, 611 const struct gfs2_dirent *d, const void *end_p) 612 { 613 const void *ptr = d; 614 u16 rec_len = be16_to_cpu(d->de_rec_len); 615 616 if (unlikely(rec_len < sizeof(struct gfs2_dirent))) 617 goto broken; 618 ptr += rec_len; 619 if (ptr < end_p) 620 return rec_len; 621 if (ptr == end_p) 622 return -ENOENT; 623 broken: 624 gfs2_consist_inode(dip); 625 return -EIO; 626 } 627 628 /** 629 * dirent_next - Next dirent 630 * @dip: the directory 631 * @bh: The buffer 632 * @dent: Pointer to list of dirents 633 * 634 * Returns: 0 on success, error code otherwise 635 */ 636 637 static int dirent_next(struct gfs2_inode *dip, struct buffer_head *bh, 638 struct gfs2_dirent **dent) 639 { 640 struct gfs2_dirent *cur = *dent, *tmp; 641 char *bh_end = bh->b_data + bh->b_size; 642 int ret; 643 644 ret = dirent_check_reclen(dip, cur, bh_end); 645 if (ret < 0) 646 return ret; 647 648 tmp = (void *)cur + ret; 649 ret = dirent_check_reclen(dip, tmp, bh_end); 650 if (ret == -EIO) 651 return ret; 652 653 /* Only the first dent could ever have de_inum.no_addr == 0 */ 654 if (gfs2_dirent_sentinel(tmp)) { 655 gfs2_consist_inode(dip); 656 return -EIO; 657 } 658 659 *dent = tmp; 660 return 0; 661 } 662 663 /** 664 * dirent_del - Delete a dirent 665 * @dip: The GFS2 inode 666 * @bh: The buffer 667 * @prev: The previous dirent 668 * @cur: The current dirent 669 * 670 */ 671 672 static void dirent_del(struct gfs2_inode *dip, struct buffer_head *bh, 673 struct gfs2_dirent *prev, struct gfs2_dirent *cur) 674 { 675 u16 cur_rec_len, prev_rec_len; 676 677 if (gfs2_dirent_sentinel(cur)) { 678 gfs2_consist_inode(dip); 679 return; 680 } 681 682 gfs2_trans_add_meta(dip->i_gl, bh); 683 684 /* If there is no prev entry, this is the first entry in the block. 685 The de_rec_len is already as big as it needs to be. Just zero 686 out the inode number and return. */ 687 688 if (!prev) { 689 cur->de_inum.no_addr = 0; 690 cur->de_inum.no_formal_ino = 0; 691 return; 692 } 693 694 /* Combine this dentry with the previous one. */ 695 696 prev_rec_len = be16_to_cpu(prev->de_rec_len); 697 cur_rec_len = be16_to_cpu(cur->de_rec_len); 698 699 if ((char *)prev + prev_rec_len != (char *)cur) 700 gfs2_consist_inode(dip); 701 if ((char *)cur + cur_rec_len > bh->b_data + bh->b_size) 702 gfs2_consist_inode(dip); 703 704 prev_rec_len += cur_rec_len; 705 prev->de_rec_len = cpu_to_be16(prev_rec_len); 706 } 707 708 709 static struct gfs2_dirent *do_init_dirent(struct inode *inode, 710 struct gfs2_dirent *dent, 711 const struct qstr *name, 712 struct buffer_head *bh, 713 unsigned offset) 714 { 715 struct gfs2_inode *ip = GFS2_I(inode); 716 struct gfs2_dirent *ndent; 717 unsigned totlen; 718 719 totlen = be16_to_cpu(dent->de_rec_len); 720 BUG_ON(offset + name->len > totlen); 721 gfs2_trans_add_meta(ip->i_gl, bh); 722 ndent = (struct gfs2_dirent *)((char *)dent + offset); 723 dent->de_rec_len = cpu_to_be16(offset); 724 gfs2_qstr2dirent(name, totlen - offset, ndent); 725 return ndent; 726 } 727 728 729 /* 730 * Takes a dent from which to grab space as an argument. Returns the 731 * newly created dent. 732 */ 733 static struct gfs2_dirent *gfs2_init_dirent(struct inode *inode, 734 struct gfs2_dirent *dent, 735 const struct qstr *name, 736 struct buffer_head *bh) 737 { 738 unsigned offset = 0; 739 740 if (!gfs2_dirent_sentinel(dent)) 741 offset = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len)); 742 return do_init_dirent(inode, dent, name, bh, offset); 743 } 744 745 static struct gfs2_dirent *gfs2_dirent_split_alloc(struct inode *inode, 746 struct buffer_head *bh, 747 const struct qstr *name, 748 void *ptr) 749 { 750 struct gfs2_dirent *dent; 751 dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size, 752 gfs2_dirent_find_offset, name, ptr); 753 if (!dent || IS_ERR(dent)) 754 return dent; 755 return do_init_dirent(inode, dent, name, bh, 756 (unsigned)(ptr - (void *)dent)); 757 } 758 759 static int get_leaf(struct gfs2_inode *dip, u64 leaf_no, 760 struct buffer_head **bhp) 761 { 762 int error; 763 764 error = gfs2_meta_read(dip->i_gl, leaf_no, DIO_WAIT, 0, bhp); 765 if (!error && gfs2_metatype_check(GFS2_SB(&dip->i_inode), *bhp, GFS2_METATYPE_LF)) { 766 /* pr_info("block num=%llu\n", leaf_no); */ 767 error = -EIO; 768 } 769 770 return error; 771 } 772 773 /** 774 * get_leaf_nr - Get a leaf number associated with the index 775 * @dip: The GFS2 inode 776 * @index: 777 * @leaf_out: 778 * 779 * Returns: 0 on success, error code otherwise 780 */ 781 782 static int get_leaf_nr(struct gfs2_inode *dip, u32 index, 783 u64 *leaf_out) 784 { 785 __be64 *hash; 786 int error; 787 788 hash = gfs2_dir_get_hash_table(dip); 789 error = PTR_ERR_OR_ZERO(hash); 790 791 if (!error) 792 *leaf_out = be64_to_cpu(*(hash + index)); 793 794 return error; 795 } 796 797 static int get_first_leaf(struct gfs2_inode *dip, u32 index, 798 struct buffer_head **bh_out) 799 { 800 u64 leaf_no; 801 int error; 802 803 error = get_leaf_nr(dip, index, &leaf_no); 804 if (!error) 805 error = get_leaf(dip, leaf_no, bh_out); 806 807 return error; 808 } 809 810 static struct gfs2_dirent *gfs2_dirent_search(struct inode *inode, 811 const struct qstr *name, 812 gfs2_dscan_t scan, 813 struct buffer_head **pbh) 814 { 815 struct buffer_head *bh; 816 struct gfs2_dirent *dent; 817 struct gfs2_inode *ip = GFS2_I(inode); 818 int error; 819 820 if (ip->i_diskflags & GFS2_DIF_EXHASH) { 821 struct gfs2_leaf *leaf; 822 unsigned hsize = 1 << ip->i_depth; 823 unsigned index; 824 u64 ln; 825 if (hsize * sizeof(u64) != i_size_read(inode)) { 826 gfs2_consist_inode(ip); 827 return ERR_PTR(-EIO); 828 } 829 830 index = name->hash >> (32 - ip->i_depth); 831 error = get_first_leaf(ip, index, &bh); 832 if (error) 833 return ERR_PTR(error); 834 do { 835 dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size, 836 scan, name, NULL); 837 if (dent) 838 goto got_dent; 839 leaf = (struct gfs2_leaf *)bh->b_data; 840 ln = be64_to_cpu(leaf->lf_next); 841 brelse(bh); 842 if (!ln) 843 break; 844 845 error = get_leaf(ip, ln, &bh); 846 } while(!error); 847 848 return error ? ERR_PTR(error) : NULL; 849 } 850 851 852 error = gfs2_meta_inode_buffer(ip, &bh); 853 if (error) 854 return ERR_PTR(error); 855 dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size, scan, name, NULL); 856 got_dent: 857 if (unlikely(dent == NULL || IS_ERR(dent))) { 858 brelse(bh); 859 bh = NULL; 860 } 861 *pbh = bh; 862 return dent; 863 } 864 865 static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh, u16 depth) 866 { 867 struct gfs2_inode *ip = GFS2_I(inode); 868 unsigned int n = 1; 869 u64 bn; 870 int error; 871 struct buffer_head *bh; 872 struct gfs2_leaf *leaf; 873 struct gfs2_dirent *dent; 874 struct qstr name = { .name = "" }; 875 struct timespec tv = CURRENT_TIME; 876 877 error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL); 878 if (error) 879 return NULL; 880 bh = gfs2_meta_new(ip->i_gl, bn); 881 if (!bh) 882 return NULL; 883 884 gfs2_trans_add_unrevoke(GFS2_SB(inode), bn, 1); 885 gfs2_trans_add_meta(ip->i_gl, bh); 886 gfs2_metatype_set(bh, GFS2_METATYPE_LF, GFS2_FORMAT_LF); 887 leaf = (struct gfs2_leaf *)bh->b_data; 888 leaf->lf_depth = cpu_to_be16(depth); 889 leaf->lf_entries = 0; 890 leaf->lf_dirent_format = cpu_to_be32(GFS2_FORMAT_DE); 891 leaf->lf_next = 0; 892 leaf->lf_inode = cpu_to_be64(ip->i_no_addr); 893 leaf->lf_dist = cpu_to_be32(1); 894 leaf->lf_nsec = cpu_to_be32(tv.tv_nsec); 895 leaf->lf_sec = cpu_to_be64(tv.tv_sec); 896 memset(leaf->lf_reserved2, 0, sizeof(leaf->lf_reserved2)); 897 dent = (struct gfs2_dirent *)(leaf+1); 898 gfs2_qstr2dirent(&name, bh->b_size - sizeof(struct gfs2_leaf), dent); 899 *pbh = bh; 900 return leaf; 901 } 902 903 /** 904 * dir_make_exhash - Convert a stuffed directory into an ExHash directory 905 * @dip: The GFS2 inode 906 * 907 * Returns: 0 on success, error code otherwise 908 */ 909 910 static int dir_make_exhash(struct inode *inode) 911 { 912 struct gfs2_inode *dip = GFS2_I(inode); 913 struct gfs2_sbd *sdp = GFS2_SB(inode); 914 struct gfs2_dirent *dent; 915 struct qstr args; 916 struct buffer_head *bh, *dibh; 917 struct gfs2_leaf *leaf; 918 int y; 919 u32 x; 920 __be64 *lp; 921 u64 bn; 922 int error; 923 924 error = gfs2_meta_inode_buffer(dip, &dibh); 925 if (error) 926 return error; 927 928 /* Turn over a new leaf */ 929 930 leaf = new_leaf(inode, &bh, 0); 931 if (!leaf) 932 return -ENOSPC; 933 bn = bh->b_blocknr; 934 935 gfs2_assert(sdp, dip->i_entries < (1 << 16)); 936 leaf->lf_entries = cpu_to_be16(dip->i_entries); 937 938 /* Copy dirents */ 939 940 gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_leaf), dibh, 941 sizeof(struct gfs2_dinode)); 942 943 /* Find last entry */ 944 945 x = 0; 946 args.len = bh->b_size - sizeof(struct gfs2_dinode) + 947 sizeof(struct gfs2_leaf); 948 args.name = bh->b_data; 949 dent = gfs2_dirent_scan(&dip->i_inode, bh->b_data, bh->b_size, 950 gfs2_dirent_last, &args, NULL); 951 if (!dent) { 952 brelse(bh); 953 brelse(dibh); 954 return -EIO; 955 } 956 if (IS_ERR(dent)) { 957 brelse(bh); 958 brelse(dibh); 959 return PTR_ERR(dent); 960 } 961 962 /* Adjust the last dirent's record length 963 (Remember that dent still points to the last entry.) */ 964 965 dent->de_rec_len = cpu_to_be16(be16_to_cpu(dent->de_rec_len) + 966 sizeof(struct gfs2_dinode) - 967 sizeof(struct gfs2_leaf)); 968 969 brelse(bh); 970 971 /* We're done with the new leaf block, now setup the new 972 hash table. */ 973 974 gfs2_trans_add_meta(dip->i_gl, dibh); 975 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 976 977 lp = (__be64 *)(dibh->b_data + sizeof(struct gfs2_dinode)); 978 979 for (x = sdp->sd_hash_ptrs; x--; lp++) 980 *lp = cpu_to_be64(bn); 981 982 i_size_write(inode, sdp->sd_sb.sb_bsize / 2); 983 gfs2_add_inode_blocks(&dip->i_inode, 1); 984 dip->i_diskflags |= GFS2_DIF_EXHASH; 985 986 for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ; 987 dip->i_depth = y; 988 989 gfs2_dinode_out(dip, dibh->b_data); 990 991 brelse(dibh); 992 993 return 0; 994 } 995 996 /** 997 * dir_split_leaf - Split a leaf block into two 998 * @dip: The GFS2 inode 999 * @index: 1000 * @leaf_no: 1001 * 1002 * Returns: 0 on success, error code on failure 1003 */ 1004 1005 static int dir_split_leaf(struct inode *inode, const struct qstr *name) 1006 { 1007 struct gfs2_inode *dip = GFS2_I(inode); 1008 struct buffer_head *nbh, *obh, *dibh; 1009 struct gfs2_leaf *nleaf, *oleaf; 1010 struct gfs2_dirent *dent = NULL, *prev = NULL, *next = NULL, *new; 1011 u32 start, len, half_len, divider; 1012 u64 bn, leaf_no; 1013 __be64 *lp; 1014 u32 index; 1015 int x, moved = 0; 1016 int error; 1017 1018 index = name->hash >> (32 - dip->i_depth); 1019 error = get_leaf_nr(dip, index, &leaf_no); 1020 if (error) 1021 return error; 1022 1023 /* Get the old leaf block */ 1024 error = get_leaf(dip, leaf_no, &obh); 1025 if (error) 1026 return error; 1027 1028 oleaf = (struct gfs2_leaf *)obh->b_data; 1029 if (dip->i_depth == be16_to_cpu(oleaf->lf_depth)) { 1030 brelse(obh); 1031 return 1; /* can't split */ 1032 } 1033 1034 gfs2_trans_add_meta(dip->i_gl, obh); 1035 1036 nleaf = new_leaf(inode, &nbh, be16_to_cpu(oleaf->lf_depth) + 1); 1037 if (!nleaf) { 1038 brelse(obh); 1039 return -ENOSPC; 1040 } 1041 bn = nbh->b_blocknr; 1042 1043 /* Compute the start and len of leaf pointers in the hash table. */ 1044 len = 1 << (dip->i_depth - be16_to_cpu(oleaf->lf_depth)); 1045 half_len = len >> 1; 1046 if (!half_len) { 1047 pr_warn("i_depth %u lf_depth %u index %u\n", 1048 dip->i_depth, be16_to_cpu(oleaf->lf_depth), index); 1049 gfs2_consist_inode(dip); 1050 error = -EIO; 1051 goto fail_brelse; 1052 } 1053 1054 start = (index & ~(len - 1)); 1055 1056 /* Change the pointers. 1057 Don't bother distinguishing stuffed from non-stuffed. 1058 This code is complicated enough already. */ 1059 lp = kmalloc(half_len * sizeof(__be64), GFP_NOFS); 1060 if (!lp) { 1061 error = -ENOMEM; 1062 goto fail_brelse; 1063 } 1064 1065 /* Change the pointers */ 1066 for (x = 0; x < half_len; x++) 1067 lp[x] = cpu_to_be64(bn); 1068 1069 gfs2_dir_hash_inval(dip); 1070 1071 error = gfs2_dir_write_data(dip, (char *)lp, start * sizeof(u64), 1072 half_len * sizeof(u64)); 1073 if (error != half_len * sizeof(u64)) { 1074 if (error >= 0) 1075 error = -EIO; 1076 goto fail_lpfree; 1077 } 1078 1079 kfree(lp); 1080 1081 /* Compute the divider */ 1082 divider = (start + half_len) << (32 - dip->i_depth); 1083 1084 /* Copy the entries */ 1085 dent = (struct gfs2_dirent *)(obh->b_data + sizeof(struct gfs2_leaf)); 1086 1087 do { 1088 next = dent; 1089 if (dirent_next(dip, obh, &next)) 1090 next = NULL; 1091 1092 if (!gfs2_dirent_sentinel(dent) && 1093 be32_to_cpu(dent->de_hash) < divider) { 1094 struct qstr str; 1095 void *ptr = ((char *)dent - obh->b_data) + nbh->b_data; 1096 str.name = (char*)(dent+1); 1097 str.len = be16_to_cpu(dent->de_name_len); 1098 str.hash = be32_to_cpu(dent->de_hash); 1099 new = gfs2_dirent_split_alloc(inode, nbh, &str, ptr); 1100 if (IS_ERR(new)) { 1101 error = PTR_ERR(new); 1102 break; 1103 } 1104 1105 new->de_inum = dent->de_inum; /* No endian worries */ 1106 new->de_type = dent->de_type; /* No endian worries */ 1107 be16_add_cpu(&nleaf->lf_entries, 1); 1108 1109 dirent_del(dip, obh, prev, dent); 1110 1111 if (!oleaf->lf_entries) 1112 gfs2_consist_inode(dip); 1113 be16_add_cpu(&oleaf->lf_entries, -1); 1114 1115 if (!prev) 1116 prev = dent; 1117 1118 moved = 1; 1119 } else { 1120 prev = dent; 1121 } 1122 dent = next; 1123 } while (dent); 1124 1125 oleaf->lf_depth = nleaf->lf_depth; 1126 1127 error = gfs2_meta_inode_buffer(dip, &dibh); 1128 if (!gfs2_assert_withdraw(GFS2_SB(&dip->i_inode), !error)) { 1129 gfs2_trans_add_meta(dip->i_gl, dibh); 1130 gfs2_add_inode_blocks(&dip->i_inode, 1); 1131 gfs2_dinode_out(dip, dibh->b_data); 1132 brelse(dibh); 1133 } 1134 1135 brelse(obh); 1136 brelse(nbh); 1137 1138 return error; 1139 1140 fail_lpfree: 1141 kfree(lp); 1142 1143 fail_brelse: 1144 brelse(obh); 1145 brelse(nbh); 1146 return error; 1147 } 1148 1149 /** 1150 * dir_double_exhash - Double size of ExHash table 1151 * @dip: The GFS2 dinode 1152 * 1153 * Returns: 0 on success, error code on failure 1154 */ 1155 1156 static int dir_double_exhash(struct gfs2_inode *dip) 1157 { 1158 struct buffer_head *dibh; 1159 u32 hsize; 1160 u32 hsize_bytes; 1161 __be64 *hc; 1162 __be64 *hc2, *h; 1163 int x; 1164 int error = 0; 1165 1166 hsize = 1 << dip->i_depth; 1167 hsize_bytes = hsize * sizeof(__be64); 1168 1169 hc = gfs2_dir_get_hash_table(dip); 1170 if (IS_ERR(hc)) 1171 return PTR_ERR(hc); 1172 1173 hc2 = kmalloc(hsize_bytes * 2, GFP_NOFS | __GFP_NOWARN); 1174 if (hc2 == NULL) 1175 hc2 = __vmalloc(hsize_bytes * 2, GFP_NOFS, PAGE_KERNEL); 1176 1177 if (!hc2) 1178 return -ENOMEM; 1179 1180 h = hc2; 1181 error = gfs2_meta_inode_buffer(dip, &dibh); 1182 if (error) 1183 goto out_kfree; 1184 1185 for (x = 0; x < hsize; x++) { 1186 *h++ = *hc; 1187 *h++ = *hc; 1188 hc++; 1189 } 1190 1191 error = gfs2_dir_write_data(dip, (char *)hc2, 0, hsize_bytes * 2); 1192 if (error != (hsize_bytes * 2)) 1193 goto fail; 1194 1195 gfs2_dir_hash_inval(dip); 1196 dip->i_hash_cache = hc2; 1197 dip->i_depth++; 1198 gfs2_dinode_out(dip, dibh->b_data); 1199 brelse(dibh); 1200 return 0; 1201 1202 fail: 1203 /* Replace original hash table & size */ 1204 gfs2_dir_write_data(dip, (char *)hc, 0, hsize_bytes); 1205 i_size_write(&dip->i_inode, hsize_bytes); 1206 gfs2_dinode_out(dip, dibh->b_data); 1207 brelse(dibh); 1208 out_kfree: 1209 kvfree(hc2); 1210 return error; 1211 } 1212 1213 /** 1214 * compare_dents - compare directory entries by hash value 1215 * @a: first dent 1216 * @b: second dent 1217 * 1218 * When comparing the hash entries of @a to @b: 1219 * gt: returns 1 1220 * lt: returns -1 1221 * eq: returns 0 1222 */ 1223 1224 static int compare_dents(const void *a, const void *b) 1225 { 1226 const struct gfs2_dirent *dent_a, *dent_b; 1227 u32 hash_a, hash_b; 1228 int ret = 0; 1229 1230 dent_a = *(const struct gfs2_dirent **)a; 1231 hash_a = dent_a->de_cookie; 1232 1233 dent_b = *(const struct gfs2_dirent **)b; 1234 hash_b = dent_b->de_cookie; 1235 1236 if (hash_a > hash_b) 1237 ret = 1; 1238 else if (hash_a < hash_b) 1239 ret = -1; 1240 else { 1241 unsigned int len_a = be16_to_cpu(dent_a->de_name_len); 1242 unsigned int len_b = be16_to_cpu(dent_b->de_name_len); 1243 1244 if (len_a > len_b) 1245 ret = 1; 1246 else if (len_a < len_b) 1247 ret = -1; 1248 else 1249 ret = memcmp(dent_a + 1, dent_b + 1, len_a); 1250 } 1251 1252 return ret; 1253 } 1254 1255 /** 1256 * do_filldir_main - read out directory entries 1257 * @dip: The GFS2 inode 1258 * @ctx: what to feed the entries to 1259 * @darr: an array of struct gfs2_dirent pointers to read 1260 * @entries: the number of entries in darr 1261 * @copied: pointer to int that's non-zero if a entry has been copied out 1262 * 1263 * Jump through some hoops to make sure that if there are hash collsions, 1264 * they are read out at the beginning of a buffer. We want to minimize 1265 * the possibility that they will fall into different readdir buffers or 1266 * that someone will want to seek to that location. 1267 * 1268 * Returns: errno, >0 if the actor tells you to stop 1269 */ 1270 1271 static int do_filldir_main(struct gfs2_inode *dip, struct dir_context *ctx, 1272 struct gfs2_dirent **darr, u32 entries, 1273 u32 sort_start, int *copied) 1274 { 1275 const struct gfs2_dirent *dent, *dent_next; 1276 u64 off, off_next; 1277 unsigned int x, y; 1278 int run = 0; 1279 1280 if (sort_start < entries) 1281 sort(&darr[sort_start], entries - sort_start, 1282 sizeof(struct gfs2_dirent *), compare_dents, NULL); 1283 1284 dent_next = darr[0]; 1285 off_next = dent_next->de_cookie; 1286 1287 for (x = 0, y = 1; x < entries; x++, y++) { 1288 dent = dent_next; 1289 off = off_next; 1290 1291 if (y < entries) { 1292 dent_next = darr[y]; 1293 off_next = dent_next->de_cookie; 1294 1295 if (off < ctx->pos) 1296 continue; 1297 ctx->pos = off; 1298 1299 if (off_next == off) { 1300 if (*copied && !run) 1301 return 1; 1302 run = 1; 1303 } else 1304 run = 0; 1305 } else { 1306 if (off < ctx->pos) 1307 continue; 1308 ctx->pos = off; 1309 } 1310 1311 if (!dir_emit(ctx, (const char *)(dent + 1), 1312 be16_to_cpu(dent->de_name_len), 1313 be64_to_cpu(dent->de_inum.no_addr), 1314 be16_to_cpu(dent->de_type))) 1315 return 1; 1316 1317 *copied = 1; 1318 } 1319 1320 /* Increment the ctx->pos by one, so the next time we come into the 1321 do_filldir fxn, we get the next entry instead of the last one in the 1322 current leaf */ 1323 1324 ctx->pos++; 1325 1326 return 0; 1327 } 1328 1329 static void *gfs2_alloc_sort_buffer(unsigned size) 1330 { 1331 void *ptr = NULL; 1332 1333 if (size < KMALLOC_MAX_SIZE) 1334 ptr = kmalloc(size, GFP_NOFS | __GFP_NOWARN); 1335 if (!ptr) 1336 ptr = __vmalloc(size, GFP_NOFS, PAGE_KERNEL); 1337 return ptr; 1338 } 1339 1340 1341 static int gfs2_set_cookies(struct gfs2_sbd *sdp, struct buffer_head *bh, 1342 unsigned leaf_nr, struct gfs2_dirent **darr, 1343 unsigned entries) 1344 { 1345 int sort_id = -1; 1346 int i; 1347 1348 for (i = 0; i < entries; i++) { 1349 unsigned offset; 1350 1351 darr[i]->de_cookie = be32_to_cpu(darr[i]->de_hash); 1352 darr[i]->de_cookie = gfs2_disk_hash2offset(darr[i]->de_cookie); 1353 1354 if (!sdp->sd_args.ar_loccookie) 1355 continue; 1356 offset = (char *)(darr[i]) - 1357 (bh->b_data + gfs2_dirent_offset(bh->b_data)); 1358 offset /= GFS2_MIN_DIRENT_SIZE; 1359 offset += leaf_nr * sdp->sd_max_dents_per_leaf; 1360 if (offset >= GFS2_USE_HASH_FLAG || 1361 leaf_nr >= GFS2_USE_HASH_FLAG) { 1362 darr[i]->de_cookie |= GFS2_USE_HASH_FLAG; 1363 if (sort_id < 0) 1364 sort_id = i; 1365 continue; 1366 } 1367 darr[i]->de_cookie &= GFS2_HASH_INDEX_MASK; 1368 darr[i]->de_cookie |= offset; 1369 } 1370 return sort_id; 1371 } 1372 1373 1374 static int gfs2_dir_read_leaf(struct inode *inode, struct dir_context *ctx, 1375 int *copied, unsigned *depth, 1376 u64 leaf_no) 1377 { 1378 struct gfs2_inode *ip = GFS2_I(inode); 1379 struct gfs2_sbd *sdp = GFS2_SB(inode); 1380 struct buffer_head *bh; 1381 struct gfs2_leaf *lf; 1382 unsigned entries = 0, entries2 = 0; 1383 unsigned leaves = 0, leaf = 0, offset, sort_offset; 1384 struct gfs2_dirent **darr, *dent; 1385 struct dirent_gather g; 1386 struct buffer_head **larr; 1387 int error, i, need_sort = 0, sort_id; 1388 u64 lfn = leaf_no; 1389 1390 do { 1391 error = get_leaf(ip, lfn, &bh); 1392 if (error) 1393 goto out; 1394 lf = (struct gfs2_leaf *)bh->b_data; 1395 if (leaves == 0) 1396 *depth = be16_to_cpu(lf->lf_depth); 1397 entries += be16_to_cpu(lf->lf_entries); 1398 leaves++; 1399 lfn = be64_to_cpu(lf->lf_next); 1400 brelse(bh); 1401 } while(lfn); 1402 1403 if (*depth < GFS2_DIR_MAX_DEPTH || !sdp->sd_args.ar_loccookie) { 1404 need_sort = 1; 1405 sort_offset = 0; 1406 } 1407 1408 if (!entries) 1409 return 0; 1410 1411 error = -ENOMEM; 1412 /* 1413 * The extra 99 entries are not normally used, but are a buffer 1414 * zone in case the number of entries in the leaf is corrupt. 1415 * 99 is the maximum number of entries that can fit in a single 1416 * leaf block. 1417 */ 1418 larr = gfs2_alloc_sort_buffer((leaves + entries + 99) * sizeof(void *)); 1419 if (!larr) 1420 goto out; 1421 darr = (struct gfs2_dirent **)(larr + leaves); 1422 g.pdent = (const struct gfs2_dirent **)darr; 1423 g.offset = 0; 1424 lfn = leaf_no; 1425 1426 do { 1427 error = get_leaf(ip, lfn, &bh); 1428 if (error) 1429 goto out_free; 1430 lf = (struct gfs2_leaf *)bh->b_data; 1431 lfn = be64_to_cpu(lf->lf_next); 1432 if (lf->lf_entries) { 1433 offset = g.offset; 1434 entries2 += be16_to_cpu(lf->lf_entries); 1435 dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size, 1436 gfs2_dirent_gather, NULL, &g); 1437 error = PTR_ERR(dent); 1438 if (IS_ERR(dent)) 1439 goto out_free; 1440 if (entries2 != g.offset) { 1441 fs_warn(sdp, "Number of entries corrupt in dir " 1442 "leaf %llu, entries2 (%u) != " 1443 "g.offset (%u)\n", 1444 (unsigned long long)bh->b_blocknr, 1445 entries2, g.offset); 1446 1447 error = -EIO; 1448 goto out_free; 1449 } 1450 error = 0; 1451 sort_id = gfs2_set_cookies(sdp, bh, leaf, &darr[offset], 1452 be16_to_cpu(lf->lf_entries)); 1453 if (!need_sort && sort_id >= 0) { 1454 need_sort = 1; 1455 sort_offset = offset + sort_id; 1456 } 1457 larr[leaf++] = bh; 1458 } else { 1459 larr[leaf++] = NULL; 1460 brelse(bh); 1461 } 1462 } while(lfn); 1463 1464 BUG_ON(entries2 != entries); 1465 error = do_filldir_main(ip, ctx, darr, entries, need_sort ? 1466 sort_offset : entries, copied); 1467 out_free: 1468 for(i = 0; i < leaf; i++) 1469 if (larr[i]) 1470 brelse(larr[i]); 1471 kvfree(larr); 1472 out: 1473 return error; 1474 } 1475 1476 /** 1477 * gfs2_dir_readahead - Issue read-ahead requests for leaf blocks. 1478 * 1479 * Note: we can't calculate each index like dir_e_read can because we don't 1480 * have the leaf, and therefore we don't have the depth, and therefore we 1481 * don't have the length. So we have to just read enough ahead to make up 1482 * for the loss of information. 1483 */ 1484 static void gfs2_dir_readahead(struct inode *inode, unsigned hsize, u32 index, 1485 struct file_ra_state *f_ra) 1486 { 1487 struct gfs2_inode *ip = GFS2_I(inode); 1488 struct gfs2_glock *gl = ip->i_gl; 1489 struct buffer_head *bh; 1490 u64 blocknr = 0, last; 1491 unsigned count; 1492 1493 /* First check if we've already read-ahead for the whole range. */ 1494 if (index + MAX_RA_BLOCKS < f_ra->start) 1495 return; 1496 1497 f_ra->start = max((pgoff_t)index, f_ra->start); 1498 for (count = 0; count < MAX_RA_BLOCKS; count++) { 1499 if (f_ra->start >= hsize) /* if exceeded the hash table */ 1500 break; 1501 1502 last = blocknr; 1503 blocknr = be64_to_cpu(ip->i_hash_cache[f_ra->start]); 1504 f_ra->start++; 1505 if (blocknr == last) 1506 continue; 1507 1508 bh = gfs2_getbuf(gl, blocknr, 1); 1509 if (trylock_buffer(bh)) { 1510 if (buffer_uptodate(bh)) { 1511 unlock_buffer(bh); 1512 brelse(bh); 1513 continue; 1514 } 1515 bh->b_end_io = end_buffer_read_sync; 1516 submit_bh(READA | REQ_META, bh); 1517 continue; 1518 } 1519 brelse(bh); 1520 } 1521 } 1522 1523 /** 1524 * dir_e_read - Reads the entries from a directory into a filldir buffer 1525 * @dip: dinode pointer 1526 * @ctx: actor to feed the entries to 1527 * 1528 * Returns: errno 1529 */ 1530 1531 static int dir_e_read(struct inode *inode, struct dir_context *ctx, 1532 struct file_ra_state *f_ra) 1533 { 1534 struct gfs2_inode *dip = GFS2_I(inode); 1535 u32 hsize, len = 0; 1536 u32 hash, index; 1537 __be64 *lp; 1538 int copied = 0; 1539 int error = 0; 1540 unsigned depth = 0; 1541 1542 hsize = 1 << dip->i_depth; 1543 hash = gfs2_dir_offset2hash(ctx->pos); 1544 index = hash >> (32 - dip->i_depth); 1545 1546 if (dip->i_hash_cache == NULL) 1547 f_ra->start = 0; 1548 lp = gfs2_dir_get_hash_table(dip); 1549 if (IS_ERR(lp)) 1550 return PTR_ERR(lp); 1551 1552 gfs2_dir_readahead(inode, hsize, index, f_ra); 1553 1554 while (index < hsize) { 1555 error = gfs2_dir_read_leaf(inode, ctx, 1556 &copied, &depth, 1557 be64_to_cpu(lp[index])); 1558 if (error) 1559 break; 1560 1561 len = 1 << (dip->i_depth - depth); 1562 index = (index & ~(len - 1)) + len; 1563 } 1564 1565 if (error > 0) 1566 error = 0; 1567 return error; 1568 } 1569 1570 int gfs2_dir_read(struct inode *inode, struct dir_context *ctx, 1571 struct file_ra_state *f_ra) 1572 { 1573 struct gfs2_inode *dip = GFS2_I(inode); 1574 struct gfs2_sbd *sdp = GFS2_SB(inode); 1575 struct dirent_gather g; 1576 struct gfs2_dirent **darr, *dent; 1577 struct buffer_head *dibh; 1578 int copied = 0; 1579 int error; 1580 1581 if (!dip->i_entries) 1582 return 0; 1583 1584 if (dip->i_diskflags & GFS2_DIF_EXHASH) 1585 return dir_e_read(inode, ctx, f_ra); 1586 1587 if (!gfs2_is_stuffed(dip)) { 1588 gfs2_consist_inode(dip); 1589 return -EIO; 1590 } 1591 1592 error = gfs2_meta_inode_buffer(dip, &dibh); 1593 if (error) 1594 return error; 1595 1596 error = -ENOMEM; 1597 /* 96 is max number of dirents which can be stuffed into an inode */ 1598 darr = kmalloc(96 * sizeof(struct gfs2_dirent *), GFP_NOFS); 1599 if (darr) { 1600 g.pdent = (const struct gfs2_dirent **)darr; 1601 g.offset = 0; 1602 dent = gfs2_dirent_scan(inode, dibh->b_data, dibh->b_size, 1603 gfs2_dirent_gather, NULL, &g); 1604 if (IS_ERR(dent)) { 1605 error = PTR_ERR(dent); 1606 goto out; 1607 } 1608 if (dip->i_entries != g.offset) { 1609 fs_warn(sdp, "Number of entries corrupt in dir %llu, " 1610 "ip->i_entries (%u) != g.offset (%u)\n", 1611 (unsigned long long)dip->i_no_addr, 1612 dip->i_entries, 1613 g.offset); 1614 error = -EIO; 1615 goto out; 1616 } 1617 gfs2_set_cookies(sdp, dibh, 0, darr, dip->i_entries); 1618 error = do_filldir_main(dip, ctx, darr, 1619 dip->i_entries, 0, &copied); 1620 out: 1621 kfree(darr); 1622 } 1623 1624 if (error > 0) 1625 error = 0; 1626 1627 brelse(dibh); 1628 1629 return error; 1630 } 1631 1632 /** 1633 * gfs2_dir_search - Search a directory 1634 * @dip: The GFS2 dir inode 1635 * @name: The name we are looking up 1636 * @fail_on_exist: Fail if the name exists rather than looking it up 1637 * 1638 * This routine searches a directory for a file or another directory. 1639 * Assumes a glock is held on dip. 1640 * 1641 * Returns: errno 1642 */ 1643 1644 struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name, 1645 bool fail_on_exist) 1646 { 1647 struct buffer_head *bh; 1648 struct gfs2_dirent *dent; 1649 u64 addr, formal_ino; 1650 u16 dtype; 1651 1652 dent = gfs2_dirent_search(dir, name, gfs2_dirent_find, &bh); 1653 if (dent) { 1654 struct inode *inode; 1655 u16 rahead; 1656 1657 if (IS_ERR(dent)) 1658 return ERR_CAST(dent); 1659 dtype = be16_to_cpu(dent->de_type); 1660 rahead = be16_to_cpu(dent->de_rahead); 1661 addr = be64_to_cpu(dent->de_inum.no_addr); 1662 formal_ino = be64_to_cpu(dent->de_inum.no_formal_ino); 1663 brelse(bh); 1664 if (fail_on_exist) 1665 return ERR_PTR(-EEXIST); 1666 inode = gfs2_inode_lookup(dir->i_sb, dtype, addr, formal_ino); 1667 if (!IS_ERR(inode)) 1668 GFS2_I(inode)->i_rahead = rahead; 1669 return inode; 1670 } 1671 return ERR_PTR(-ENOENT); 1672 } 1673 1674 int gfs2_dir_check(struct inode *dir, const struct qstr *name, 1675 const struct gfs2_inode *ip) 1676 { 1677 struct buffer_head *bh; 1678 struct gfs2_dirent *dent; 1679 int ret = -ENOENT; 1680 1681 dent = gfs2_dirent_search(dir, name, gfs2_dirent_find, &bh); 1682 if (dent) { 1683 if (IS_ERR(dent)) 1684 return PTR_ERR(dent); 1685 if (ip) { 1686 if (be64_to_cpu(dent->de_inum.no_addr) != ip->i_no_addr) 1687 goto out; 1688 if (be64_to_cpu(dent->de_inum.no_formal_ino) != 1689 ip->i_no_formal_ino) 1690 goto out; 1691 if (unlikely(IF2DT(ip->i_inode.i_mode) != 1692 be16_to_cpu(dent->de_type))) { 1693 gfs2_consist_inode(GFS2_I(dir)); 1694 ret = -EIO; 1695 goto out; 1696 } 1697 } 1698 ret = 0; 1699 out: 1700 brelse(bh); 1701 } 1702 return ret; 1703 } 1704 1705 /** 1706 * dir_new_leaf - Add a new leaf onto hash chain 1707 * @inode: The directory 1708 * @name: The name we are adding 1709 * 1710 * This adds a new dir leaf onto an existing leaf when there is not 1711 * enough space to add a new dir entry. This is a last resort after 1712 * we've expanded the hash table to max size and also split existing 1713 * leaf blocks, so it will only occur for very large directories. 1714 * 1715 * The dist parameter is set to 1 for leaf blocks directly attached 1716 * to the hash table, 2 for one layer of indirection, 3 for two layers 1717 * etc. We are thus able to tell the difference between an old leaf 1718 * with dist set to zero (i.e. "don't know") and a new one where we 1719 * set this information for debug/fsck purposes. 1720 * 1721 * Returns: 0 on success, or -ve on error 1722 */ 1723 1724 static int dir_new_leaf(struct inode *inode, const struct qstr *name) 1725 { 1726 struct buffer_head *bh, *obh; 1727 struct gfs2_inode *ip = GFS2_I(inode); 1728 struct gfs2_leaf *leaf, *oleaf; 1729 u32 dist = 1; 1730 int error; 1731 u32 index; 1732 u64 bn; 1733 1734 index = name->hash >> (32 - ip->i_depth); 1735 error = get_first_leaf(ip, index, &obh); 1736 if (error) 1737 return error; 1738 do { 1739 dist++; 1740 oleaf = (struct gfs2_leaf *)obh->b_data; 1741 bn = be64_to_cpu(oleaf->lf_next); 1742 if (!bn) 1743 break; 1744 brelse(obh); 1745 error = get_leaf(ip, bn, &obh); 1746 if (error) 1747 return error; 1748 } while(1); 1749 1750 gfs2_trans_add_meta(ip->i_gl, obh); 1751 1752 leaf = new_leaf(inode, &bh, be16_to_cpu(oleaf->lf_depth)); 1753 if (!leaf) { 1754 brelse(obh); 1755 return -ENOSPC; 1756 } 1757 leaf->lf_dist = cpu_to_be32(dist); 1758 oleaf->lf_next = cpu_to_be64(bh->b_blocknr); 1759 brelse(bh); 1760 brelse(obh); 1761 1762 error = gfs2_meta_inode_buffer(ip, &bh); 1763 if (error) 1764 return error; 1765 gfs2_trans_add_meta(ip->i_gl, bh); 1766 gfs2_add_inode_blocks(&ip->i_inode, 1); 1767 gfs2_dinode_out(ip, bh->b_data); 1768 brelse(bh); 1769 return 0; 1770 } 1771 1772 static u16 gfs2_inode_ra_len(const struct gfs2_inode *ip) 1773 { 1774 u64 where = ip->i_no_addr + 1; 1775 if (ip->i_eattr == where) 1776 return 1; 1777 return 0; 1778 } 1779 1780 /** 1781 * gfs2_dir_add - Add new filename into directory 1782 * @inode: The directory inode 1783 * @name: The new name 1784 * @nip: The GFS2 inode to be linked in to the directory 1785 * @da: The directory addition info 1786 * 1787 * If the call to gfs2_diradd_alloc_required resulted in there being 1788 * no need to allocate any new directory blocks, then it will contain 1789 * a pointer to the directory entry and the bh in which it resides. We 1790 * can use that without having to repeat the search. If there was no 1791 * free space, then we must now create more space. 1792 * 1793 * Returns: 0 on success, error code on failure 1794 */ 1795 1796 int gfs2_dir_add(struct inode *inode, const struct qstr *name, 1797 const struct gfs2_inode *nip, struct gfs2_diradd *da) 1798 { 1799 struct gfs2_inode *ip = GFS2_I(inode); 1800 struct buffer_head *bh = da->bh; 1801 struct gfs2_dirent *dent = da->dent; 1802 struct timespec tv; 1803 struct gfs2_leaf *leaf; 1804 int error; 1805 1806 while(1) { 1807 if (da->bh == NULL) { 1808 dent = gfs2_dirent_search(inode, name, 1809 gfs2_dirent_find_space, &bh); 1810 } 1811 if (dent) { 1812 if (IS_ERR(dent)) 1813 return PTR_ERR(dent); 1814 dent = gfs2_init_dirent(inode, dent, name, bh); 1815 gfs2_inum_out(nip, dent); 1816 dent->de_type = cpu_to_be16(IF2DT(nip->i_inode.i_mode)); 1817 dent->de_rahead = cpu_to_be16(gfs2_inode_ra_len(nip)); 1818 tv = CURRENT_TIME; 1819 if (ip->i_diskflags & GFS2_DIF_EXHASH) { 1820 leaf = (struct gfs2_leaf *)bh->b_data; 1821 be16_add_cpu(&leaf->lf_entries, 1); 1822 leaf->lf_nsec = cpu_to_be32(tv.tv_nsec); 1823 leaf->lf_sec = cpu_to_be64(tv.tv_sec); 1824 } 1825 da->dent = NULL; 1826 da->bh = NULL; 1827 brelse(bh); 1828 ip->i_entries++; 1829 ip->i_inode.i_mtime = ip->i_inode.i_ctime = tv; 1830 if (S_ISDIR(nip->i_inode.i_mode)) 1831 inc_nlink(&ip->i_inode); 1832 mark_inode_dirty(inode); 1833 error = 0; 1834 break; 1835 } 1836 if (!(ip->i_diskflags & GFS2_DIF_EXHASH)) { 1837 error = dir_make_exhash(inode); 1838 if (error) 1839 break; 1840 continue; 1841 } 1842 error = dir_split_leaf(inode, name); 1843 if (error == 0) 1844 continue; 1845 if (error < 0) 1846 break; 1847 if (ip->i_depth < GFS2_DIR_MAX_DEPTH) { 1848 error = dir_double_exhash(ip); 1849 if (error) 1850 break; 1851 error = dir_split_leaf(inode, name); 1852 if (error < 0) 1853 break; 1854 if (error == 0) 1855 continue; 1856 } 1857 error = dir_new_leaf(inode, name); 1858 if (!error) 1859 continue; 1860 error = -ENOSPC; 1861 break; 1862 } 1863 return error; 1864 } 1865 1866 1867 /** 1868 * gfs2_dir_del - Delete a directory entry 1869 * @dip: The GFS2 inode 1870 * @filename: The filename 1871 * 1872 * Returns: 0 on success, error code on failure 1873 */ 1874 1875 int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry) 1876 { 1877 const struct qstr *name = &dentry->d_name; 1878 struct gfs2_dirent *dent, *prev = NULL; 1879 struct buffer_head *bh; 1880 struct timespec tv = CURRENT_TIME; 1881 1882 /* Returns _either_ the entry (if its first in block) or the 1883 previous entry otherwise */ 1884 dent = gfs2_dirent_search(&dip->i_inode, name, gfs2_dirent_prev, &bh); 1885 if (!dent) { 1886 gfs2_consist_inode(dip); 1887 return -EIO; 1888 } 1889 if (IS_ERR(dent)) { 1890 gfs2_consist_inode(dip); 1891 return PTR_ERR(dent); 1892 } 1893 /* If not first in block, adjust pointers accordingly */ 1894 if (gfs2_dirent_find(dent, name, NULL) == 0) { 1895 prev = dent; 1896 dent = (struct gfs2_dirent *)((char *)dent + be16_to_cpu(prev->de_rec_len)); 1897 } 1898 1899 dirent_del(dip, bh, prev, dent); 1900 if (dip->i_diskflags & GFS2_DIF_EXHASH) { 1901 struct gfs2_leaf *leaf = (struct gfs2_leaf *)bh->b_data; 1902 u16 entries = be16_to_cpu(leaf->lf_entries); 1903 if (!entries) 1904 gfs2_consist_inode(dip); 1905 leaf->lf_entries = cpu_to_be16(--entries); 1906 leaf->lf_nsec = cpu_to_be32(tv.tv_nsec); 1907 leaf->lf_sec = cpu_to_be64(tv.tv_sec); 1908 } 1909 brelse(bh); 1910 1911 if (!dip->i_entries) 1912 gfs2_consist_inode(dip); 1913 dip->i_entries--; 1914 dip->i_inode.i_mtime = dip->i_inode.i_ctime = tv; 1915 if (d_is_dir(dentry)) 1916 drop_nlink(&dip->i_inode); 1917 mark_inode_dirty(&dip->i_inode); 1918 1919 return 0; 1920 } 1921 1922 /** 1923 * gfs2_dir_mvino - Change inode number of directory entry 1924 * @dip: The GFS2 inode 1925 * @filename: 1926 * @new_inode: 1927 * 1928 * This routine changes the inode number of a directory entry. It's used 1929 * by rename to change ".." when a directory is moved. 1930 * Assumes a glock is held on dvp. 1931 * 1932 * Returns: errno 1933 */ 1934 1935 int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename, 1936 const struct gfs2_inode *nip, unsigned int new_type) 1937 { 1938 struct buffer_head *bh; 1939 struct gfs2_dirent *dent; 1940 int error; 1941 1942 dent = gfs2_dirent_search(&dip->i_inode, filename, gfs2_dirent_find, &bh); 1943 if (!dent) { 1944 gfs2_consist_inode(dip); 1945 return -EIO; 1946 } 1947 if (IS_ERR(dent)) 1948 return PTR_ERR(dent); 1949 1950 gfs2_trans_add_meta(dip->i_gl, bh); 1951 gfs2_inum_out(nip, dent); 1952 dent->de_type = cpu_to_be16(new_type); 1953 1954 if (dip->i_diskflags & GFS2_DIF_EXHASH) { 1955 brelse(bh); 1956 error = gfs2_meta_inode_buffer(dip, &bh); 1957 if (error) 1958 return error; 1959 gfs2_trans_add_meta(dip->i_gl, bh); 1960 } 1961 1962 dip->i_inode.i_mtime = dip->i_inode.i_ctime = CURRENT_TIME; 1963 gfs2_dinode_out(dip, bh->b_data); 1964 brelse(bh); 1965 return 0; 1966 } 1967 1968 /** 1969 * leaf_dealloc - Deallocate a directory leaf 1970 * @dip: the directory 1971 * @index: the hash table offset in the directory 1972 * @len: the number of pointers to this leaf 1973 * @leaf_no: the leaf number 1974 * @leaf_bh: buffer_head for the starting leaf 1975 * last_dealloc: 1 if this is the final dealloc for the leaf, else 0 1976 * 1977 * Returns: errno 1978 */ 1979 1980 static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len, 1981 u64 leaf_no, struct buffer_head *leaf_bh, 1982 int last_dealloc) 1983 { 1984 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); 1985 struct gfs2_leaf *tmp_leaf; 1986 struct gfs2_rgrp_list rlist; 1987 struct buffer_head *bh, *dibh; 1988 u64 blk, nblk; 1989 unsigned int rg_blocks = 0, l_blocks = 0; 1990 char *ht; 1991 unsigned int x, size = len * sizeof(u64); 1992 int error; 1993 1994 error = gfs2_rindex_update(sdp); 1995 if (error) 1996 return error; 1997 1998 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list)); 1999 2000 ht = kzalloc(size, GFP_NOFS | __GFP_NOWARN); 2001 if (ht == NULL) 2002 ht = __vmalloc(size, GFP_NOFS | __GFP_NOWARN | __GFP_ZERO, 2003 PAGE_KERNEL); 2004 if (!ht) 2005 return -ENOMEM; 2006 2007 error = gfs2_quota_hold(dip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); 2008 if (error) 2009 goto out; 2010 2011 /* Count the number of leaves */ 2012 bh = leaf_bh; 2013 2014 for (blk = leaf_no; blk; blk = nblk) { 2015 if (blk != leaf_no) { 2016 error = get_leaf(dip, blk, &bh); 2017 if (error) 2018 goto out_rlist; 2019 } 2020 tmp_leaf = (struct gfs2_leaf *)bh->b_data; 2021 nblk = be64_to_cpu(tmp_leaf->lf_next); 2022 if (blk != leaf_no) 2023 brelse(bh); 2024 2025 gfs2_rlist_add(dip, &rlist, blk); 2026 l_blocks++; 2027 } 2028 2029 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE); 2030 2031 for (x = 0; x < rlist.rl_rgrps; x++) { 2032 struct gfs2_rgrpd *rgd; 2033 rgd = rlist.rl_ghs[x].gh_gl->gl_object; 2034 rg_blocks += rgd->rd_length; 2035 } 2036 2037 error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs); 2038 if (error) 2039 goto out_rlist; 2040 2041 error = gfs2_trans_begin(sdp, 2042 rg_blocks + (DIV_ROUND_UP(size, sdp->sd_jbsize) + 1) + 2043 RES_DINODE + RES_STATFS + RES_QUOTA, l_blocks); 2044 if (error) 2045 goto out_rg_gunlock; 2046 2047 bh = leaf_bh; 2048 2049 for (blk = leaf_no; blk; blk = nblk) { 2050 if (blk != leaf_no) { 2051 error = get_leaf(dip, blk, &bh); 2052 if (error) 2053 goto out_end_trans; 2054 } 2055 tmp_leaf = (struct gfs2_leaf *)bh->b_data; 2056 nblk = be64_to_cpu(tmp_leaf->lf_next); 2057 if (blk != leaf_no) 2058 brelse(bh); 2059 2060 gfs2_free_meta(dip, blk, 1); 2061 gfs2_add_inode_blocks(&dip->i_inode, -1); 2062 } 2063 2064 error = gfs2_dir_write_data(dip, ht, index * sizeof(u64), size); 2065 if (error != size) { 2066 if (error >= 0) 2067 error = -EIO; 2068 goto out_end_trans; 2069 } 2070 2071 error = gfs2_meta_inode_buffer(dip, &dibh); 2072 if (error) 2073 goto out_end_trans; 2074 2075 gfs2_trans_add_meta(dip->i_gl, dibh); 2076 /* On the last dealloc, make this a regular file in case we crash. 2077 (We don't want to free these blocks a second time.) */ 2078 if (last_dealloc) 2079 dip->i_inode.i_mode = S_IFREG; 2080 gfs2_dinode_out(dip, dibh->b_data); 2081 brelse(dibh); 2082 2083 out_end_trans: 2084 gfs2_trans_end(sdp); 2085 out_rg_gunlock: 2086 gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs); 2087 out_rlist: 2088 gfs2_rlist_free(&rlist); 2089 gfs2_quota_unhold(dip); 2090 out: 2091 kvfree(ht); 2092 return error; 2093 } 2094 2095 /** 2096 * gfs2_dir_exhash_dealloc - free all the leaf blocks in a directory 2097 * @dip: the directory 2098 * 2099 * Dealloc all on-disk directory leaves to FREEMETA state 2100 * Change on-disk inode type to "regular file" 2101 * 2102 * Returns: errno 2103 */ 2104 2105 int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip) 2106 { 2107 struct buffer_head *bh; 2108 struct gfs2_leaf *leaf; 2109 u32 hsize, len; 2110 u32 index = 0, next_index; 2111 __be64 *lp; 2112 u64 leaf_no; 2113 int error = 0, last; 2114 2115 hsize = 1 << dip->i_depth; 2116 2117 lp = gfs2_dir_get_hash_table(dip); 2118 if (IS_ERR(lp)) 2119 return PTR_ERR(lp); 2120 2121 while (index < hsize) { 2122 leaf_no = be64_to_cpu(lp[index]); 2123 if (leaf_no) { 2124 error = get_leaf(dip, leaf_no, &bh); 2125 if (error) 2126 goto out; 2127 leaf = (struct gfs2_leaf *)bh->b_data; 2128 len = 1 << (dip->i_depth - be16_to_cpu(leaf->lf_depth)); 2129 2130 next_index = (index & ~(len - 1)) + len; 2131 last = ((next_index >= hsize) ? 1 : 0); 2132 error = leaf_dealloc(dip, index, len, leaf_no, bh, 2133 last); 2134 brelse(bh); 2135 if (error) 2136 goto out; 2137 index = next_index; 2138 } else 2139 index++; 2140 } 2141 2142 if (index != hsize) { 2143 gfs2_consist_inode(dip); 2144 error = -EIO; 2145 } 2146 2147 out: 2148 2149 return error; 2150 } 2151 2152 /** 2153 * gfs2_diradd_alloc_required - find if adding entry will require an allocation 2154 * @ip: the file being written to 2155 * @filname: the filename that's going to be added 2156 * @da: The structure to return dir alloc info 2157 * 2158 * Returns: 0 if ok, -ve on error 2159 */ 2160 2161 int gfs2_diradd_alloc_required(struct inode *inode, const struct qstr *name, 2162 struct gfs2_diradd *da) 2163 { 2164 struct gfs2_inode *ip = GFS2_I(inode); 2165 struct gfs2_sbd *sdp = GFS2_SB(inode); 2166 const unsigned int extra = sizeof(struct gfs2_dinode) - sizeof(struct gfs2_leaf); 2167 struct gfs2_dirent *dent; 2168 struct buffer_head *bh; 2169 2170 da->nr_blocks = 0; 2171 da->bh = NULL; 2172 da->dent = NULL; 2173 2174 dent = gfs2_dirent_search(inode, name, gfs2_dirent_find_space, &bh); 2175 if (!dent) { 2176 da->nr_blocks = sdp->sd_max_dirres; 2177 if (!(ip->i_diskflags & GFS2_DIF_EXHASH) && 2178 (GFS2_DIRENT_SIZE(name->len) < extra)) 2179 da->nr_blocks = 1; 2180 return 0; 2181 } 2182 if (IS_ERR(dent)) 2183 return PTR_ERR(dent); 2184 2185 if (da->save_loc) { 2186 da->bh = bh; 2187 da->dent = dent; 2188 } else { 2189 brelse(bh); 2190 } 2191 return 0; 2192 } 2193 2194