1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/slab.h> 11 #include <linux/spinlock.h> 12 #include <linux/completion.h> 13 #include <linux/buffer_head.h> 14 #include <linux/fs.h> 15 #include <linux/gfs2_ondisk.h> 16 #include <linux/lm_interface.h> 17 18 #include "gfs2.h" 19 #include "incore.h" 20 #include "glock.h" 21 #include "glops.h" 22 #include "lops.h" 23 #include "meta_io.h" 24 #include "quota.h" 25 #include "rgrp.h" 26 #include "super.h" 27 #include "trans.h" 28 #include "ops_file.h" 29 #include "util.h" 30 #include "log.h" 31 32 #define BFITNOENT ((u32)~0) 33 34 /* 35 * These routines are used by the resource group routines (rgrp.c) 36 * to keep track of block allocation. Each block is represented by two 37 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks. 38 * 39 * 0 = Free 40 * 1 = Used (not metadata) 41 * 2 = Unlinked (still in use) inode 42 * 3 = Used (metadata) 43 */ 44 45 static const char valid_change[16] = { 46 /* current */ 47 /* n */ 0, 1, 1, 1, 48 /* e */ 1, 0, 0, 0, 49 /* w */ 0, 0, 0, 1, 50 1, 0, 0, 0 51 }; 52 53 /** 54 * gfs2_setbit - Set a bit in the bitmaps 55 * @buffer: the buffer that holds the bitmaps 56 * @buflen: the length (in bytes) of the buffer 57 * @block: the block to set 58 * @new_state: the new state of the block 59 * 60 */ 61 62 static void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buffer, 63 unsigned int buflen, u32 block, 64 unsigned char new_state) 65 { 66 unsigned char *byte, *end, cur_state; 67 unsigned int bit; 68 69 byte = buffer + (block / GFS2_NBBY); 70 bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE; 71 end = buffer + buflen; 72 73 gfs2_assert(rgd->rd_sbd, byte < end); 74 75 cur_state = (*byte >> bit) & GFS2_BIT_MASK; 76 77 if (valid_change[new_state * 4 + cur_state]) { 78 *byte ^= cur_state << bit; 79 *byte |= new_state << bit; 80 } else 81 gfs2_consist_rgrpd(rgd); 82 } 83 84 /** 85 * gfs2_testbit - test a bit in the bitmaps 86 * @buffer: the buffer that holds the bitmaps 87 * @buflen: the length (in bytes) of the buffer 88 * @block: the block to read 89 * 90 */ 91 92 static unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd, unsigned char *buffer, 93 unsigned int buflen, u32 block) 94 { 95 unsigned char *byte, *end, cur_state; 96 unsigned int bit; 97 98 byte = buffer + (block / GFS2_NBBY); 99 bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE; 100 end = buffer + buflen; 101 102 gfs2_assert(rgd->rd_sbd, byte < end); 103 104 cur_state = (*byte >> bit) & GFS2_BIT_MASK; 105 106 return cur_state; 107 } 108 109 /** 110 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing 111 * a block in a given allocation state. 112 * @buffer: the buffer that holds the bitmaps 113 * @buflen: the length (in bytes) of the buffer 114 * @goal: start search at this block's bit-pair (within @buffer) 115 * @old_state: GFS2_BLKST_XXX the state of the block we're looking for; 116 * bit 0 = alloc(1)/free(0), bit 1 = meta(1)/data(0) 117 * 118 * Scope of @goal and returned block number is only within this bitmap buffer, 119 * not entire rgrp or filesystem. @buffer will be offset from the actual 120 * beginning of a bitmap block buffer, skipping any header structures. 121 * 122 * Return: the block number (bitmap buffer scope) that was found 123 */ 124 125 static u32 gfs2_bitfit(struct gfs2_rgrpd *rgd, unsigned char *buffer, 126 unsigned int buflen, u32 goal, 127 unsigned char old_state) 128 { 129 unsigned char *byte, *end, alloc; 130 u32 blk = goal; 131 unsigned int bit; 132 133 byte = buffer + (goal / GFS2_NBBY); 134 bit = (goal % GFS2_NBBY) * GFS2_BIT_SIZE; 135 end = buffer + buflen; 136 alloc = (old_state & 1) ? 0 : 0x55; 137 138 while (byte < end) { 139 if ((*byte & 0x55) == alloc) { 140 blk += (8 - bit) >> 1; 141 142 bit = 0; 143 byte++; 144 145 continue; 146 } 147 148 if (((*byte >> bit) & GFS2_BIT_MASK) == old_state) 149 return blk; 150 151 bit += GFS2_BIT_SIZE; 152 if (bit >= 8) { 153 bit = 0; 154 byte++; 155 } 156 157 blk++; 158 } 159 160 return BFITNOENT; 161 } 162 163 /** 164 * gfs2_bitcount - count the number of bits in a certain state 165 * @buffer: the buffer that holds the bitmaps 166 * @buflen: the length (in bytes) of the buffer 167 * @state: the state of the block we're looking for 168 * 169 * Returns: The number of bits 170 */ 171 172 static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, unsigned char *buffer, 173 unsigned int buflen, unsigned char state) 174 { 175 unsigned char *byte = buffer; 176 unsigned char *end = buffer + buflen; 177 unsigned char state1 = state << 2; 178 unsigned char state2 = state << 4; 179 unsigned char state3 = state << 6; 180 u32 count = 0; 181 182 for (; byte < end; byte++) { 183 if (((*byte) & 0x03) == state) 184 count++; 185 if (((*byte) & 0x0C) == state1) 186 count++; 187 if (((*byte) & 0x30) == state2) 188 count++; 189 if (((*byte) & 0xC0) == state3) 190 count++; 191 } 192 193 return count; 194 } 195 196 /** 197 * gfs2_rgrp_verify - Verify that a resource group is consistent 198 * @sdp: the filesystem 199 * @rgd: the rgrp 200 * 201 */ 202 203 void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd) 204 { 205 struct gfs2_sbd *sdp = rgd->rd_sbd; 206 struct gfs2_bitmap *bi = NULL; 207 u32 length = rgd->rd_ri.ri_length; 208 u32 count[4], tmp; 209 int buf, x; 210 211 memset(count, 0, 4 * sizeof(u32)); 212 213 /* Count # blocks in each of 4 possible allocation states */ 214 for (buf = 0; buf < length; buf++) { 215 bi = rgd->rd_bits + buf; 216 for (x = 0; x < 4; x++) 217 count[x] += gfs2_bitcount(rgd, 218 bi->bi_bh->b_data + 219 bi->bi_offset, 220 bi->bi_len, x); 221 } 222 223 if (count[0] != rgd->rd_rg.rg_free) { 224 if (gfs2_consist_rgrpd(rgd)) 225 fs_err(sdp, "free data mismatch: %u != %u\n", 226 count[0], rgd->rd_rg.rg_free); 227 return; 228 } 229 230 tmp = rgd->rd_ri.ri_data - 231 rgd->rd_rg.rg_free - 232 rgd->rd_rg.rg_dinodes; 233 if (count[1] + count[2] != tmp) { 234 if (gfs2_consist_rgrpd(rgd)) 235 fs_err(sdp, "used data mismatch: %u != %u\n", 236 count[1], tmp); 237 return; 238 } 239 240 if (count[3] != rgd->rd_rg.rg_dinodes) { 241 if (gfs2_consist_rgrpd(rgd)) 242 fs_err(sdp, "used metadata mismatch: %u != %u\n", 243 count[3], rgd->rd_rg.rg_dinodes); 244 return; 245 } 246 247 if (count[2] > count[3]) { 248 if (gfs2_consist_rgrpd(rgd)) 249 fs_err(sdp, "unlinked inodes > inodes: %u\n", 250 count[2]); 251 return; 252 } 253 254 } 255 256 static inline int rgrp_contains_block(struct gfs2_rindex_host *ri, u64 block) 257 { 258 u64 first = ri->ri_data0; 259 u64 last = first + ri->ri_data; 260 return first <= block && block < last; 261 } 262 263 /** 264 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number 265 * @sdp: The GFS2 superblock 266 * @n: The data block number 267 * 268 * Returns: The resource group, or NULL if not found 269 */ 270 271 struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk) 272 { 273 struct gfs2_rgrpd *rgd; 274 275 spin_lock(&sdp->sd_rindex_spin); 276 277 list_for_each_entry(rgd, &sdp->sd_rindex_mru_list, rd_list_mru) { 278 if (rgrp_contains_block(&rgd->rd_ri, blk)) { 279 list_move(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list); 280 spin_unlock(&sdp->sd_rindex_spin); 281 return rgd; 282 } 283 } 284 285 spin_unlock(&sdp->sd_rindex_spin); 286 287 return NULL; 288 } 289 290 /** 291 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem 292 * @sdp: The GFS2 superblock 293 * 294 * Returns: The first rgrp in the filesystem 295 */ 296 297 struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp) 298 { 299 gfs2_assert(sdp, !list_empty(&sdp->sd_rindex_list)); 300 return list_entry(sdp->sd_rindex_list.next, struct gfs2_rgrpd, rd_list); 301 } 302 303 /** 304 * gfs2_rgrpd_get_next - get the next RG 305 * @rgd: A RG 306 * 307 * Returns: The next rgrp 308 */ 309 310 struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd) 311 { 312 if (rgd->rd_list.next == &rgd->rd_sbd->sd_rindex_list) 313 return NULL; 314 return list_entry(rgd->rd_list.next, struct gfs2_rgrpd, rd_list); 315 } 316 317 static void clear_rgrpdi(struct gfs2_sbd *sdp) 318 { 319 struct list_head *head; 320 struct gfs2_rgrpd *rgd; 321 struct gfs2_glock *gl; 322 323 spin_lock(&sdp->sd_rindex_spin); 324 sdp->sd_rindex_forward = NULL; 325 head = &sdp->sd_rindex_recent_list; 326 while (!list_empty(head)) { 327 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent); 328 list_del(&rgd->rd_recent); 329 } 330 spin_unlock(&sdp->sd_rindex_spin); 331 332 head = &sdp->sd_rindex_list; 333 while (!list_empty(head)) { 334 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_list); 335 gl = rgd->rd_gl; 336 337 list_del(&rgd->rd_list); 338 list_del(&rgd->rd_list_mru); 339 340 if (gl) { 341 gl->gl_object = NULL; 342 gfs2_glock_put(gl); 343 } 344 345 kfree(rgd->rd_bits); 346 kfree(rgd); 347 } 348 } 349 350 void gfs2_clear_rgrpd(struct gfs2_sbd *sdp) 351 { 352 mutex_lock(&sdp->sd_rindex_mutex); 353 clear_rgrpdi(sdp); 354 mutex_unlock(&sdp->sd_rindex_mutex); 355 } 356 357 /** 358 * gfs2_compute_bitstructs - Compute the bitmap sizes 359 * @rgd: The resource group descriptor 360 * 361 * Calculates bitmap descriptors, one for each block that contains bitmap data 362 * 363 * Returns: errno 364 */ 365 366 static int compute_bitstructs(struct gfs2_rgrpd *rgd) 367 { 368 struct gfs2_sbd *sdp = rgd->rd_sbd; 369 struct gfs2_bitmap *bi; 370 u32 length = rgd->rd_ri.ri_length; /* # blocks in hdr & bitmap */ 371 u32 bytes_left, bytes; 372 int x; 373 374 if (!length) 375 return -EINVAL; 376 377 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS); 378 if (!rgd->rd_bits) 379 return -ENOMEM; 380 381 bytes_left = rgd->rd_ri.ri_bitbytes; 382 383 for (x = 0; x < length; x++) { 384 bi = rgd->rd_bits + x; 385 386 /* small rgrp; bitmap stored completely in header block */ 387 if (length == 1) { 388 bytes = bytes_left; 389 bi->bi_offset = sizeof(struct gfs2_rgrp); 390 bi->bi_start = 0; 391 bi->bi_len = bytes; 392 /* header block */ 393 } else if (x == 0) { 394 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp); 395 bi->bi_offset = sizeof(struct gfs2_rgrp); 396 bi->bi_start = 0; 397 bi->bi_len = bytes; 398 /* last block */ 399 } else if (x + 1 == length) { 400 bytes = bytes_left; 401 bi->bi_offset = sizeof(struct gfs2_meta_header); 402 bi->bi_start = rgd->rd_ri.ri_bitbytes - bytes_left; 403 bi->bi_len = bytes; 404 /* other blocks */ 405 } else { 406 bytes = sdp->sd_sb.sb_bsize - 407 sizeof(struct gfs2_meta_header); 408 bi->bi_offset = sizeof(struct gfs2_meta_header); 409 bi->bi_start = rgd->rd_ri.ri_bitbytes - bytes_left; 410 bi->bi_len = bytes; 411 } 412 413 bytes_left -= bytes; 414 } 415 416 if (bytes_left) { 417 gfs2_consist_rgrpd(rgd); 418 return -EIO; 419 } 420 bi = rgd->rd_bits + (length - 1); 421 if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_ri.ri_data) { 422 if (gfs2_consist_rgrpd(rgd)) { 423 gfs2_rindex_print(&rgd->rd_ri); 424 fs_err(sdp, "start=%u len=%u offset=%u\n", 425 bi->bi_start, bi->bi_len, bi->bi_offset); 426 } 427 return -EIO; 428 } 429 430 return 0; 431 } 432 433 /** 434 * gfs2_ri_update - Pull in a new resource index from the disk 435 * @gl: The glock covering the rindex inode 436 * 437 * Returns: 0 on successful update, error code otherwise 438 */ 439 440 static int gfs2_ri_update(struct gfs2_inode *ip) 441 { 442 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 443 struct inode *inode = &ip->i_inode; 444 struct gfs2_rgrpd *rgd; 445 char buf[sizeof(struct gfs2_rindex)]; 446 struct file_ra_state ra_state; 447 u64 junk = ip->i_di.di_size; 448 int error; 449 450 if (do_div(junk, sizeof(struct gfs2_rindex))) { 451 gfs2_consist_inode(ip); 452 return -EIO; 453 } 454 455 clear_rgrpdi(sdp); 456 457 file_ra_state_init(&ra_state, inode->i_mapping); 458 for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) { 459 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex); 460 error = gfs2_internal_read(ip, &ra_state, buf, &pos, 461 sizeof(struct gfs2_rindex)); 462 if (!error) 463 break; 464 if (error != sizeof(struct gfs2_rindex)) { 465 if (error > 0) 466 error = -EIO; 467 goto fail; 468 } 469 470 rgd = kzalloc(sizeof(struct gfs2_rgrpd), GFP_NOFS); 471 error = -ENOMEM; 472 if (!rgd) 473 goto fail; 474 475 mutex_init(&rgd->rd_mutex); 476 lops_init_le(&rgd->rd_le, &gfs2_rg_lops); 477 rgd->rd_sbd = sdp; 478 479 list_add_tail(&rgd->rd_list, &sdp->sd_rindex_list); 480 list_add_tail(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list); 481 482 gfs2_rindex_in(&rgd->rd_ri, buf); 483 error = compute_bitstructs(rgd); 484 if (error) 485 goto fail; 486 487 error = gfs2_glock_get(sdp, rgd->rd_ri.ri_addr, 488 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl); 489 if (error) 490 goto fail; 491 492 rgd->rd_gl->gl_object = rgd; 493 rgd->rd_rg_vn = rgd->rd_gl->gl_vn - 1; 494 } 495 496 sdp->sd_rindex_vn = ip->i_gl->gl_vn; 497 return 0; 498 499 fail: 500 clear_rgrpdi(sdp); 501 return error; 502 } 503 504 /** 505 * gfs2_rindex_hold - Grab a lock on the rindex 506 * @sdp: The GFS2 superblock 507 * @ri_gh: the glock holder 508 * 509 * We grab a lock on the rindex inode to make sure that it doesn't 510 * change whilst we are performing an operation. We keep this lock 511 * for quite long periods of time compared to other locks. This 512 * doesn't matter, since it is shared and it is very, very rarely 513 * accessed in the exclusive mode (i.e. only when expanding the filesystem). 514 * 515 * This makes sure that we're using the latest copy of the resource index 516 * special file, which might have been updated if someone expanded the 517 * filesystem (via gfs2_grow utility), which adds new resource groups. 518 * 519 * Returns: 0 on success, error code otherwise 520 */ 521 522 int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh) 523 { 524 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex); 525 struct gfs2_glock *gl = ip->i_gl; 526 int error; 527 528 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, ri_gh); 529 if (error) 530 return error; 531 532 /* Read new copy from disk if we don't have the latest */ 533 if (sdp->sd_rindex_vn != gl->gl_vn) { 534 mutex_lock(&sdp->sd_rindex_mutex); 535 if (sdp->sd_rindex_vn != gl->gl_vn) { 536 error = gfs2_ri_update(ip); 537 if (error) 538 gfs2_glock_dq_uninit(ri_gh); 539 } 540 mutex_unlock(&sdp->sd_rindex_mutex); 541 } 542 543 return error; 544 } 545 546 /** 547 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps 548 * @rgd: the struct gfs2_rgrpd describing the RG to read in 549 * 550 * Read in all of a Resource Group's header and bitmap blocks. 551 * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps. 552 * 553 * Returns: errno 554 */ 555 556 int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd) 557 { 558 struct gfs2_sbd *sdp = rgd->rd_sbd; 559 struct gfs2_glock *gl = rgd->rd_gl; 560 unsigned int length = rgd->rd_ri.ri_length; 561 struct gfs2_bitmap *bi; 562 unsigned int x, y; 563 int error; 564 565 mutex_lock(&rgd->rd_mutex); 566 567 spin_lock(&sdp->sd_rindex_spin); 568 if (rgd->rd_bh_count) { 569 rgd->rd_bh_count++; 570 spin_unlock(&sdp->sd_rindex_spin); 571 mutex_unlock(&rgd->rd_mutex); 572 return 0; 573 } 574 spin_unlock(&sdp->sd_rindex_spin); 575 576 for (x = 0; x < length; x++) { 577 bi = rgd->rd_bits + x; 578 error = gfs2_meta_read(gl, rgd->rd_ri.ri_addr + x, 0, &bi->bi_bh); 579 if (error) 580 goto fail; 581 } 582 583 for (y = length; y--;) { 584 bi = rgd->rd_bits + y; 585 error = gfs2_meta_wait(sdp, bi->bi_bh); 586 if (error) 587 goto fail; 588 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB : 589 GFS2_METATYPE_RG)) { 590 error = -EIO; 591 goto fail; 592 } 593 } 594 595 if (rgd->rd_rg_vn != gl->gl_vn) { 596 gfs2_rgrp_in(&rgd->rd_rg, (rgd->rd_bits[0].bi_bh)->b_data); 597 rgd->rd_rg_vn = gl->gl_vn; 598 } 599 600 spin_lock(&sdp->sd_rindex_spin); 601 rgd->rd_free_clone = rgd->rd_rg.rg_free; 602 rgd->rd_bh_count++; 603 spin_unlock(&sdp->sd_rindex_spin); 604 605 mutex_unlock(&rgd->rd_mutex); 606 607 return 0; 608 609 fail: 610 while (x--) { 611 bi = rgd->rd_bits + x; 612 brelse(bi->bi_bh); 613 bi->bi_bh = NULL; 614 gfs2_assert_warn(sdp, !bi->bi_clone); 615 } 616 mutex_unlock(&rgd->rd_mutex); 617 618 return error; 619 } 620 621 void gfs2_rgrp_bh_hold(struct gfs2_rgrpd *rgd) 622 { 623 struct gfs2_sbd *sdp = rgd->rd_sbd; 624 625 spin_lock(&sdp->sd_rindex_spin); 626 gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count); 627 rgd->rd_bh_count++; 628 spin_unlock(&sdp->sd_rindex_spin); 629 } 630 631 /** 632 * gfs2_rgrp_bh_put - Release RG bitmaps read in with gfs2_rgrp_bh_get() 633 * @rgd: the struct gfs2_rgrpd describing the RG to read in 634 * 635 */ 636 637 void gfs2_rgrp_bh_put(struct gfs2_rgrpd *rgd) 638 { 639 struct gfs2_sbd *sdp = rgd->rd_sbd; 640 int x, length = rgd->rd_ri.ri_length; 641 642 spin_lock(&sdp->sd_rindex_spin); 643 gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count); 644 if (--rgd->rd_bh_count) { 645 spin_unlock(&sdp->sd_rindex_spin); 646 return; 647 } 648 649 for (x = 0; x < length; x++) { 650 struct gfs2_bitmap *bi = rgd->rd_bits + x; 651 kfree(bi->bi_clone); 652 bi->bi_clone = NULL; 653 brelse(bi->bi_bh); 654 bi->bi_bh = NULL; 655 } 656 657 spin_unlock(&sdp->sd_rindex_spin); 658 } 659 660 void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd) 661 { 662 struct gfs2_sbd *sdp = rgd->rd_sbd; 663 unsigned int length = rgd->rd_ri.ri_length; 664 unsigned int x; 665 666 for (x = 0; x < length; x++) { 667 struct gfs2_bitmap *bi = rgd->rd_bits + x; 668 if (!bi->bi_clone) 669 continue; 670 memcpy(bi->bi_clone + bi->bi_offset, 671 bi->bi_bh->b_data + bi->bi_offset, bi->bi_len); 672 } 673 674 spin_lock(&sdp->sd_rindex_spin); 675 rgd->rd_free_clone = rgd->rd_rg.rg_free; 676 spin_unlock(&sdp->sd_rindex_spin); 677 } 678 679 /** 680 * gfs2_alloc_get - get the struct gfs2_alloc structure for an inode 681 * @ip: the incore GFS2 inode structure 682 * 683 * Returns: the struct gfs2_alloc 684 */ 685 686 struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip) 687 { 688 struct gfs2_alloc *al = &ip->i_alloc; 689 690 /* FIXME: Should assert that the correct locks are held here... */ 691 memset(al, 0, sizeof(*al)); 692 return al; 693 } 694 695 /** 696 * try_rgrp_fit - See if a given reservation will fit in a given RG 697 * @rgd: the RG data 698 * @al: the struct gfs2_alloc structure describing the reservation 699 * 700 * If there's room for the requested blocks to be allocated from the RG: 701 * Sets the $al_rgd field in @al. 702 * 703 * Returns: 1 on success (it fits), 0 on failure (it doesn't fit) 704 */ 705 706 static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al) 707 { 708 struct gfs2_sbd *sdp = rgd->rd_sbd; 709 int ret = 0; 710 711 if (rgd->rd_rg.rg_flags & GFS2_RGF_NOALLOC) 712 return 0; 713 714 spin_lock(&sdp->sd_rindex_spin); 715 if (rgd->rd_free_clone >= al->al_requested) { 716 al->al_rgd = rgd; 717 ret = 1; 718 } 719 spin_unlock(&sdp->sd_rindex_spin); 720 721 return ret; 722 } 723 724 /** 725 * recent_rgrp_first - get first RG from "recent" list 726 * @sdp: The GFS2 superblock 727 * @rglast: address of the rgrp used last 728 * 729 * Returns: The first rgrp in the recent list 730 */ 731 732 static struct gfs2_rgrpd *recent_rgrp_first(struct gfs2_sbd *sdp, 733 u64 rglast) 734 { 735 struct gfs2_rgrpd *rgd = NULL; 736 737 spin_lock(&sdp->sd_rindex_spin); 738 739 if (list_empty(&sdp->sd_rindex_recent_list)) 740 goto out; 741 742 if (!rglast) 743 goto first; 744 745 list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) { 746 if (rgd->rd_ri.ri_addr == rglast) 747 goto out; 748 } 749 750 first: 751 rgd = list_entry(sdp->sd_rindex_recent_list.next, struct gfs2_rgrpd, 752 rd_recent); 753 out: 754 spin_unlock(&sdp->sd_rindex_spin); 755 return rgd; 756 } 757 758 /** 759 * recent_rgrp_next - get next RG from "recent" list 760 * @cur_rgd: current rgrp 761 * @remove: 762 * 763 * Returns: The next rgrp in the recent list 764 */ 765 766 static struct gfs2_rgrpd *recent_rgrp_next(struct gfs2_rgrpd *cur_rgd, 767 int remove) 768 { 769 struct gfs2_sbd *sdp = cur_rgd->rd_sbd; 770 struct list_head *head; 771 struct gfs2_rgrpd *rgd; 772 773 spin_lock(&sdp->sd_rindex_spin); 774 775 head = &sdp->sd_rindex_recent_list; 776 777 list_for_each_entry(rgd, head, rd_recent) { 778 if (rgd == cur_rgd) { 779 if (cur_rgd->rd_recent.next != head) 780 rgd = list_entry(cur_rgd->rd_recent.next, 781 struct gfs2_rgrpd, rd_recent); 782 else 783 rgd = NULL; 784 785 if (remove) 786 list_del(&cur_rgd->rd_recent); 787 788 goto out; 789 } 790 } 791 792 rgd = NULL; 793 if (!list_empty(head)) 794 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent); 795 796 out: 797 spin_unlock(&sdp->sd_rindex_spin); 798 return rgd; 799 } 800 801 /** 802 * recent_rgrp_add - add an RG to tail of "recent" list 803 * @new_rgd: The rgrp to add 804 * 805 */ 806 807 static void recent_rgrp_add(struct gfs2_rgrpd *new_rgd) 808 { 809 struct gfs2_sbd *sdp = new_rgd->rd_sbd; 810 struct gfs2_rgrpd *rgd; 811 unsigned int count = 0; 812 unsigned int max = sdp->sd_rgrps / gfs2_jindex_size(sdp); 813 814 spin_lock(&sdp->sd_rindex_spin); 815 816 list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) { 817 if (rgd == new_rgd) 818 goto out; 819 820 if (++count >= max) 821 goto out; 822 } 823 list_add_tail(&new_rgd->rd_recent, &sdp->sd_rindex_recent_list); 824 825 out: 826 spin_unlock(&sdp->sd_rindex_spin); 827 } 828 829 /** 830 * forward_rgrp_get - get an rgrp to try next from full list 831 * @sdp: The GFS2 superblock 832 * 833 * Returns: The rgrp to try next 834 */ 835 836 static struct gfs2_rgrpd *forward_rgrp_get(struct gfs2_sbd *sdp) 837 { 838 struct gfs2_rgrpd *rgd; 839 unsigned int journals = gfs2_jindex_size(sdp); 840 unsigned int rg = 0, x; 841 842 spin_lock(&sdp->sd_rindex_spin); 843 844 rgd = sdp->sd_rindex_forward; 845 if (!rgd) { 846 if (sdp->sd_rgrps >= journals) 847 rg = sdp->sd_rgrps * sdp->sd_jdesc->jd_jid / journals; 848 849 for (x = 0, rgd = gfs2_rgrpd_get_first(sdp); x < rg; 850 x++, rgd = gfs2_rgrpd_get_next(rgd)) 851 /* Do Nothing */; 852 853 sdp->sd_rindex_forward = rgd; 854 } 855 856 spin_unlock(&sdp->sd_rindex_spin); 857 858 return rgd; 859 } 860 861 /** 862 * forward_rgrp_set - set the forward rgrp pointer 863 * @sdp: the filesystem 864 * @rgd: The new forward rgrp 865 * 866 */ 867 868 static void forward_rgrp_set(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd) 869 { 870 spin_lock(&sdp->sd_rindex_spin); 871 sdp->sd_rindex_forward = rgd; 872 spin_unlock(&sdp->sd_rindex_spin); 873 } 874 875 /** 876 * get_local_rgrp - Choose and lock a rgrp for allocation 877 * @ip: the inode to reserve space for 878 * @rgp: the chosen and locked rgrp 879 * 880 * Try to acquire rgrp in way which avoids contending with others. 881 * 882 * Returns: errno 883 */ 884 885 static int get_local_rgrp(struct gfs2_inode *ip) 886 { 887 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 888 struct gfs2_rgrpd *rgd, *begin = NULL; 889 struct gfs2_alloc *al = &ip->i_alloc; 890 int flags = LM_FLAG_TRY; 891 int skipped = 0; 892 int loops = 0; 893 int error; 894 895 /* Try recently successful rgrps */ 896 897 rgd = recent_rgrp_first(sdp, ip->i_last_rg_alloc); 898 899 while (rgd) { 900 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 901 LM_FLAG_TRY, &al->al_rgd_gh); 902 switch (error) { 903 case 0: 904 if (try_rgrp_fit(rgd, al)) 905 goto out; 906 gfs2_glock_dq_uninit(&al->al_rgd_gh); 907 rgd = recent_rgrp_next(rgd, 1); 908 break; 909 910 case GLR_TRYFAILED: 911 rgd = recent_rgrp_next(rgd, 0); 912 break; 913 914 default: 915 return error; 916 } 917 } 918 919 /* Go through full list of rgrps */ 920 921 begin = rgd = forward_rgrp_get(sdp); 922 923 for (;;) { 924 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, flags, 925 &al->al_rgd_gh); 926 switch (error) { 927 case 0: 928 if (try_rgrp_fit(rgd, al)) 929 goto out; 930 gfs2_glock_dq_uninit(&al->al_rgd_gh); 931 break; 932 933 case GLR_TRYFAILED: 934 skipped++; 935 break; 936 937 default: 938 return error; 939 } 940 941 rgd = gfs2_rgrpd_get_next(rgd); 942 if (!rgd) 943 rgd = gfs2_rgrpd_get_first(sdp); 944 945 if (rgd == begin) { 946 if (++loops >= 3) 947 return -ENOSPC; 948 if (!skipped) 949 loops++; 950 flags = 0; 951 if (loops == 2) 952 gfs2_log_flush(sdp, NULL); 953 } 954 } 955 956 out: 957 ip->i_last_rg_alloc = rgd->rd_ri.ri_addr; 958 959 if (begin) { 960 recent_rgrp_add(rgd); 961 rgd = gfs2_rgrpd_get_next(rgd); 962 if (!rgd) 963 rgd = gfs2_rgrpd_get_first(sdp); 964 forward_rgrp_set(sdp, rgd); 965 } 966 967 return 0; 968 } 969 970 /** 971 * gfs2_inplace_reserve_i - Reserve space in the filesystem 972 * @ip: the inode to reserve space for 973 * 974 * Returns: errno 975 */ 976 977 int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, unsigned int line) 978 { 979 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 980 struct gfs2_alloc *al = &ip->i_alloc; 981 int error; 982 983 if (gfs2_assert_warn(sdp, al->al_requested)) 984 return -EINVAL; 985 986 error = gfs2_rindex_hold(sdp, &al->al_ri_gh); 987 if (error) 988 return error; 989 990 error = get_local_rgrp(ip); 991 if (error) { 992 gfs2_glock_dq_uninit(&al->al_ri_gh); 993 return error; 994 } 995 996 al->al_file = file; 997 al->al_line = line; 998 999 return 0; 1000 } 1001 1002 /** 1003 * gfs2_inplace_release - release an inplace reservation 1004 * @ip: the inode the reservation was taken out on 1005 * 1006 * Release a reservation made by gfs2_inplace_reserve(). 1007 */ 1008 1009 void gfs2_inplace_release(struct gfs2_inode *ip) 1010 { 1011 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1012 struct gfs2_alloc *al = &ip->i_alloc; 1013 1014 if (gfs2_assert_warn(sdp, al->al_alloced <= al->al_requested) == -1) 1015 fs_warn(sdp, "al_alloced = %u, al_requested = %u " 1016 "al_file = %s, al_line = %u\n", 1017 al->al_alloced, al->al_requested, al->al_file, 1018 al->al_line); 1019 1020 al->al_rgd = NULL; 1021 gfs2_glock_dq_uninit(&al->al_rgd_gh); 1022 gfs2_glock_dq_uninit(&al->al_ri_gh); 1023 } 1024 1025 /** 1026 * gfs2_get_block_type - Check a block in a RG is of given type 1027 * @rgd: the resource group holding the block 1028 * @block: the block number 1029 * 1030 * Returns: The block type (GFS2_BLKST_*) 1031 */ 1032 1033 unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block) 1034 { 1035 struct gfs2_bitmap *bi = NULL; 1036 u32 length, rgrp_block, buf_block; 1037 unsigned int buf; 1038 unsigned char type; 1039 1040 length = rgd->rd_ri.ri_length; 1041 rgrp_block = block - rgd->rd_ri.ri_data0; 1042 1043 for (buf = 0; buf < length; buf++) { 1044 bi = rgd->rd_bits + buf; 1045 if (rgrp_block < (bi->bi_start + bi->bi_len) * GFS2_NBBY) 1046 break; 1047 } 1048 1049 gfs2_assert(rgd->rd_sbd, buf < length); 1050 buf_block = rgrp_block - bi->bi_start * GFS2_NBBY; 1051 1052 type = gfs2_testbit(rgd, bi->bi_bh->b_data + bi->bi_offset, 1053 bi->bi_len, buf_block); 1054 1055 return type; 1056 } 1057 1058 /** 1059 * rgblk_search - find a block in @old_state, change allocation 1060 * state to @new_state 1061 * @rgd: the resource group descriptor 1062 * @goal: the goal block within the RG (start here to search for avail block) 1063 * @old_state: GFS2_BLKST_XXX the before-allocation state to find 1064 * @new_state: GFS2_BLKST_XXX the after-allocation block state 1065 * 1066 * Walk rgrp's bitmap to find bits that represent a block in @old_state. 1067 * Add the found bitmap buffer to the transaction. 1068 * Set the found bits to @new_state to change block's allocation state. 1069 * 1070 * This function never fails, because we wouldn't call it unless we 1071 * know (from reservation results, etc.) that a block is available. 1072 * 1073 * Scope of @goal and returned block is just within rgrp, not the whole 1074 * filesystem. 1075 * 1076 * Returns: the block number allocated 1077 */ 1078 1079 static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal, 1080 unsigned char old_state, unsigned char new_state) 1081 { 1082 struct gfs2_bitmap *bi = NULL; 1083 u32 length = rgd->rd_ri.ri_length; 1084 u32 blk = 0; 1085 unsigned int buf, x; 1086 1087 /* Find bitmap block that contains bits for goal block */ 1088 for (buf = 0; buf < length; buf++) { 1089 bi = rgd->rd_bits + buf; 1090 if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY) 1091 break; 1092 } 1093 1094 gfs2_assert(rgd->rd_sbd, buf < length); 1095 1096 /* Convert scope of "goal" from rgrp-wide to within found bit block */ 1097 goal -= bi->bi_start * GFS2_NBBY; 1098 1099 /* Search (up to entire) bitmap in this rgrp for allocatable block. 1100 "x <= length", instead of "x < length", because we typically start 1101 the search in the middle of a bit block, but if we can't find an 1102 allocatable block anywhere else, we want to be able wrap around and 1103 search in the first part of our first-searched bit block. */ 1104 for (x = 0; x <= length; x++) { 1105 if (bi->bi_clone) 1106 blk = gfs2_bitfit(rgd, bi->bi_clone + bi->bi_offset, 1107 bi->bi_len, goal, old_state); 1108 else 1109 blk = gfs2_bitfit(rgd, 1110 bi->bi_bh->b_data + bi->bi_offset, 1111 bi->bi_len, goal, old_state); 1112 if (blk != BFITNOENT) 1113 break; 1114 1115 /* Try next bitmap block (wrap back to rgrp header if at end) */ 1116 buf = (buf + 1) % length; 1117 bi = rgd->rd_bits + buf; 1118 goal = 0; 1119 } 1120 1121 if (gfs2_assert_withdraw(rgd->rd_sbd, x <= length)) 1122 blk = 0; 1123 1124 gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1); 1125 gfs2_setbit(rgd, bi->bi_bh->b_data + bi->bi_offset, 1126 bi->bi_len, blk, new_state); 1127 if (bi->bi_clone) 1128 gfs2_setbit(rgd, bi->bi_clone + bi->bi_offset, 1129 bi->bi_len, blk, new_state); 1130 1131 return bi->bi_start * GFS2_NBBY + blk; 1132 } 1133 1134 /** 1135 * rgblk_free - Change alloc state of given block(s) 1136 * @sdp: the filesystem 1137 * @bstart: the start of a run of blocks to free 1138 * @blen: the length of the block run (all must lie within ONE RG!) 1139 * @new_state: GFS2_BLKST_XXX the after-allocation block state 1140 * 1141 * Returns: Resource group containing the block(s) 1142 */ 1143 1144 static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart, 1145 u32 blen, unsigned char new_state) 1146 { 1147 struct gfs2_rgrpd *rgd; 1148 struct gfs2_bitmap *bi = NULL; 1149 u32 length, rgrp_blk, buf_blk; 1150 unsigned int buf; 1151 1152 rgd = gfs2_blk2rgrpd(sdp, bstart); 1153 if (!rgd) { 1154 if (gfs2_consist(sdp)) 1155 fs_err(sdp, "block = %llu\n", (unsigned long long)bstart); 1156 return NULL; 1157 } 1158 1159 length = rgd->rd_ri.ri_length; 1160 1161 rgrp_blk = bstart - rgd->rd_ri.ri_data0; 1162 1163 while (blen--) { 1164 for (buf = 0; buf < length; buf++) { 1165 bi = rgd->rd_bits + buf; 1166 if (rgrp_blk < (bi->bi_start + bi->bi_len) * GFS2_NBBY) 1167 break; 1168 } 1169 1170 gfs2_assert(rgd->rd_sbd, buf < length); 1171 1172 buf_blk = rgrp_blk - bi->bi_start * GFS2_NBBY; 1173 rgrp_blk++; 1174 1175 if (!bi->bi_clone) { 1176 bi->bi_clone = kmalloc(bi->bi_bh->b_size, 1177 GFP_NOFS | __GFP_NOFAIL); 1178 memcpy(bi->bi_clone + bi->bi_offset, 1179 bi->bi_bh->b_data + bi->bi_offset, 1180 bi->bi_len); 1181 } 1182 gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1); 1183 gfs2_setbit(rgd, bi->bi_bh->b_data + bi->bi_offset, 1184 bi->bi_len, buf_blk, new_state); 1185 } 1186 1187 return rgd; 1188 } 1189 1190 /** 1191 * gfs2_alloc_data - Allocate a data block 1192 * @ip: the inode to allocate the data block for 1193 * 1194 * Returns: the allocated block 1195 */ 1196 1197 u64 gfs2_alloc_data(struct gfs2_inode *ip) 1198 { 1199 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1200 struct gfs2_alloc *al = &ip->i_alloc; 1201 struct gfs2_rgrpd *rgd = al->al_rgd; 1202 u32 goal, blk; 1203 u64 block; 1204 1205 if (rgrp_contains_block(&rgd->rd_ri, ip->i_di.di_goal_data)) 1206 goal = ip->i_di.di_goal_data - rgd->rd_ri.ri_data0; 1207 else 1208 goal = rgd->rd_last_alloc_data; 1209 1210 blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED); 1211 rgd->rd_last_alloc_data = blk; 1212 1213 block = rgd->rd_ri.ri_data0 + blk; 1214 ip->i_di.di_goal_data = block; 1215 1216 gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free); 1217 rgd->rd_rg.rg_free--; 1218 1219 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1220 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); 1221 1222 al->al_alloced++; 1223 1224 gfs2_statfs_change(sdp, 0, -1, 0); 1225 gfs2_quota_change(ip, +1, ip->i_inode.i_uid, ip->i_inode.i_gid); 1226 1227 spin_lock(&sdp->sd_rindex_spin); 1228 rgd->rd_free_clone--; 1229 spin_unlock(&sdp->sd_rindex_spin); 1230 1231 return block; 1232 } 1233 1234 /** 1235 * gfs2_alloc_meta - Allocate a metadata block 1236 * @ip: the inode to allocate the metadata block for 1237 * 1238 * Returns: the allocated block 1239 */ 1240 1241 u64 gfs2_alloc_meta(struct gfs2_inode *ip) 1242 { 1243 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1244 struct gfs2_alloc *al = &ip->i_alloc; 1245 struct gfs2_rgrpd *rgd = al->al_rgd; 1246 u32 goal, blk; 1247 u64 block; 1248 1249 if (rgrp_contains_block(&rgd->rd_ri, ip->i_di.di_goal_meta)) 1250 goal = ip->i_di.di_goal_meta - rgd->rd_ri.ri_data0; 1251 else 1252 goal = rgd->rd_last_alloc_meta; 1253 1254 blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED); 1255 rgd->rd_last_alloc_meta = blk; 1256 1257 block = rgd->rd_ri.ri_data0 + blk; 1258 ip->i_di.di_goal_meta = block; 1259 1260 gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free); 1261 rgd->rd_rg.rg_free--; 1262 1263 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1264 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); 1265 1266 al->al_alloced++; 1267 1268 gfs2_statfs_change(sdp, 0, -1, 0); 1269 gfs2_quota_change(ip, +1, ip->i_inode.i_uid, ip->i_inode.i_gid); 1270 gfs2_trans_add_unrevoke(sdp, block); 1271 1272 spin_lock(&sdp->sd_rindex_spin); 1273 rgd->rd_free_clone--; 1274 spin_unlock(&sdp->sd_rindex_spin); 1275 1276 return block; 1277 } 1278 1279 /** 1280 * gfs2_alloc_di - Allocate a dinode 1281 * @dip: the directory that the inode is going in 1282 * 1283 * Returns: the block allocated 1284 */ 1285 1286 u64 gfs2_alloc_di(struct gfs2_inode *dip, u64 *generation) 1287 { 1288 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); 1289 struct gfs2_alloc *al = &dip->i_alloc; 1290 struct gfs2_rgrpd *rgd = al->al_rgd; 1291 u32 blk; 1292 u64 block; 1293 1294 blk = rgblk_search(rgd, rgd->rd_last_alloc_meta, 1295 GFS2_BLKST_FREE, GFS2_BLKST_DINODE); 1296 1297 rgd->rd_last_alloc_meta = blk; 1298 1299 block = rgd->rd_ri.ri_data0 + blk; 1300 1301 gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free); 1302 rgd->rd_rg.rg_free--; 1303 rgd->rd_rg.rg_dinodes++; 1304 *generation = rgd->rd_rg.rg_igeneration++; 1305 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1306 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); 1307 1308 al->al_alloced++; 1309 1310 gfs2_statfs_change(sdp, 0, -1, +1); 1311 gfs2_trans_add_unrevoke(sdp, block); 1312 1313 spin_lock(&sdp->sd_rindex_spin); 1314 rgd->rd_free_clone--; 1315 spin_unlock(&sdp->sd_rindex_spin); 1316 1317 return block; 1318 } 1319 1320 /** 1321 * gfs2_free_data - free a contiguous run of data block(s) 1322 * @ip: the inode these blocks are being freed from 1323 * @bstart: first block of a run of contiguous blocks 1324 * @blen: the length of the block run 1325 * 1326 */ 1327 1328 void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen) 1329 { 1330 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1331 struct gfs2_rgrpd *rgd; 1332 1333 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE); 1334 if (!rgd) 1335 return; 1336 1337 rgd->rd_rg.rg_free += blen; 1338 1339 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1340 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); 1341 1342 gfs2_trans_add_rg(rgd); 1343 1344 gfs2_statfs_change(sdp, 0, +blen, 0); 1345 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid); 1346 } 1347 1348 /** 1349 * gfs2_free_meta - free a contiguous run of data block(s) 1350 * @ip: the inode these blocks are being freed from 1351 * @bstart: first block of a run of contiguous blocks 1352 * @blen: the length of the block run 1353 * 1354 */ 1355 1356 void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen) 1357 { 1358 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1359 struct gfs2_rgrpd *rgd; 1360 1361 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE); 1362 if (!rgd) 1363 return; 1364 1365 rgd->rd_rg.rg_free += blen; 1366 1367 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1368 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); 1369 1370 gfs2_trans_add_rg(rgd); 1371 1372 gfs2_statfs_change(sdp, 0, +blen, 0); 1373 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid); 1374 gfs2_meta_wipe(ip, bstart, blen); 1375 } 1376 1377 void gfs2_unlink_di(struct inode *inode) 1378 { 1379 struct gfs2_inode *ip = GFS2_I(inode); 1380 struct gfs2_sbd *sdp = GFS2_SB(inode); 1381 struct gfs2_rgrpd *rgd; 1382 u64 blkno = ip->i_num.no_addr; 1383 1384 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED); 1385 if (!rgd) 1386 return; 1387 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1388 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); 1389 gfs2_trans_add_rg(rgd); 1390 } 1391 1392 static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno) 1393 { 1394 struct gfs2_sbd *sdp = rgd->rd_sbd; 1395 struct gfs2_rgrpd *tmp_rgd; 1396 1397 tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE); 1398 if (!tmp_rgd) 1399 return; 1400 gfs2_assert_withdraw(sdp, rgd == tmp_rgd); 1401 1402 if (!rgd->rd_rg.rg_dinodes) 1403 gfs2_consist_rgrpd(rgd); 1404 rgd->rd_rg.rg_dinodes--; 1405 rgd->rd_rg.rg_free++; 1406 1407 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1408 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); 1409 1410 gfs2_statfs_change(sdp, 0, +1, -1); 1411 gfs2_trans_add_rg(rgd); 1412 } 1413 1414 1415 void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip) 1416 { 1417 gfs2_free_uninit_di(rgd, ip->i_num.no_addr); 1418 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid); 1419 gfs2_meta_wipe(ip, ip->i_num.no_addr, 1); 1420 } 1421 1422 /** 1423 * gfs2_rlist_add - add a RG to a list of RGs 1424 * @sdp: the filesystem 1425 * @rlist: the list of resource groups 1426 * @block: the block 1427 * 1428 * Figure out what RG a block belongs to and add that RG to the list 1429 * 1430 * FIXME: Don't use NOFAIL 1431 * 1432 */ 1433 1434 void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist, 1435 u64 block) 1436 { 1437 struct gfs2_rgrpd *rgd; 1438 struct gfs2_rgrpd **tmp; 1439 unsigned int new_space; 1440 unsigned int x; 1441 1442 if (gfs2_assert_warn(sdp, !rlist->rl_ghs)) 1443 return; 1444 1445 rgd = gfs2_blk2rgrpd(sdp, block); 1446 if (!rgd) { 1447 if (gfs2_consist(sdp)) 1448 fs_err(sdp, "block = %llu\n", (unsigned long long)block); 1449 return; 1450 } 1451 1452 for (x = 0; x < rlist->rl_rgrps; x++) 1453 if (rlist->rl_rgd[x] == rgd) 1454 return; 1455 1456 if (rlist->rl_rgrps == rlist->rl_space) { 1457 new_space = rlist->rl_space + 10; 1458 1459 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *), 1460 GFP_NOFS | __GFP_NOFAIL); 1461 1462 if (rlist->rl_rgd) { 1463 memcpy(tmp, rlist->rl_rgd, 1464 rlist->rl_space * sizeof(struct gfs2_rgrpd *)); 1465 kfree(rlist->rl_rgd); 1466 } 1467 1468 rlist->rl_space = new_space; 1469 rlist->rl_rgd = tmp; 1470 } 1471 1472 rlist->rl_rgd[rlist->rl_rgrps++] = rgd; 1473 } 1474 1475 /** 1476 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate 1477 * and initialize an array of glock holders for them 1478 * @rlist: the list of resource groups 1479 * @state: the lock state to acquire the RG lock in 1480 * @flags: the modifier flags for the holder structures 1481 * 1482 * FIXME: Don't use NOFAIL 1483 * 1484 */ 1485 1486 void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state, 1487 int flags) 1488 { 1489 unsigned int x; 1490 1491 rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder), 1492 GFP_NOFS | __GFP_NOFAIL); 1493 for (x = 0; x < rlist->rl_rgrps; x++) 1494 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl, 1495 state, flags, 1496 &rlist->rl_ghs[x]); 1497 } 1498 1499 /** 1500 * gfs2_rlist_free - free a resource group list 1501 * @list: the list of resource groups 1502 * 1503 */ 1504 1505 void gfs2_rlist_free(struct gfs2_rgrp_list *rlist) 1506 { 1507 unsigned int x; 1508 1509 kfree(rlist->rl_rgd); 1510 1511 if (rlist->rl_ghs) { 1512 for (x = 0; x < rlist->rl_rgrps; x++) 1513 gfs2_holder_uninit(&rlist->rl_ghs[x]); 1514 kfree(rlist->rl_ghs); 1515 } 1516 } 1517 1518