1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/slab.h> 11 #include <linux/spinlock.h> 12 #include <linux/completion.h> 13 #include <linux/buffer_head.h> 14 #include <linux/fs.h> 15 #include <linux/gfs2_ondisk.h> 16 #include <linux/lm_interface.h> 17 18 #include "gfs2.h" 19 #include "incore.h" 20 #include "glock.h" 21 #include "glops.h" 22 #include "lops.h" 23 #include "meta_io.h" 24 #include "quota.h" 25 #include "rgrp.h" 26 #include "super.h" 27 #include "trans.h" 28 #include "ops_file.h" 29 #include "util.h" 30 #include "log.h" 31 #include "inode.h" 32 33 #define BFITNOENT ((u32)~0) 34 35 /* 36 * These routines are used by the resource group routines (rgrp.c) 37 * to keep track of block allocation. Each block is represented by two 38 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks. 39 * 40 * 0 = Free 41 * 1 = Used (not metadata) 42 * 2 = Unlinked (still in use) inode 43 * 3 = Used (metadata) 44 */ 45 46 static const char valid_change[16] = { 47 /* current */ 48 /* n */ 0, 1, 1, 1, 49 /* e */ 1, 0, 0, 0, 50 /* w */ 0, 0, 0, 1, 51 1, 0, 0, 0 52 }; 53 54 static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal, 55 unsigned char old_state, unsigned char new_state); 56 57 /** 58 * gfs2_setbit - Set a bit in the bitmaps 59 * @buffer: the buffer that holds the bitmaps 60 * @buflen: the length (in bytes) of the buffer 61 * @block: the block to set 62 * @new_state: the new state of the block 63 * 64 */ 65 66 static void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buffer, 67 unsigned int buflen, u32 block, 68 unsigned char new_state) 69 { 70 unsigned char *byte, *end, cur_state; 71 unsigned int bit; 72 73 byte = buffer + (block / GFS2_NBBY); 74 bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE; 75 end = buffer + buflen; 76 77 gfs2_assert(rgd->rd_sbd, byte < end); 78 79 cur_state = (*byte >> bit) & GFS2_BIT_MASK; 80 81 if (valid_change[new_state * 4 + cur_state]) { 82 *byte ^= cur_state << bit; 83 *byte |= new_state << bit; 84 } else 85 gfs2_consist_rgrpd(rgd); 86 } 87 88 /** 89 * gfs2_testbit - test a bit in the bitmaps 90 * @buffer: the buffer that holds the bitmaps 91 * @buflen: the length (in bytes) of the buffer 92 * @block: the block to read 93 * 94 */ 95 96 static unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd, unsigned char *buffer, 97 unsigned int buflen, u32 block) 98 { 99 unsigned char *byte, *end, cur_state; 100 unsigned int bit; 101 102 byte = buffer + (block / GFS2_NBBY); 103 bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE; 104 end = buffer + buflen; 105 106 gfs2_assert(rgd->rd_sbd, byte < end); 107 108 cur_state = (*byte >> bit) & GFS2_BIT_MASK; 109 110 return cur_state; 111 } 112 113 /** 114 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing 115 * a block in a given allocation state. 116 * @buffer: the buffer that holds the bitmaps 117 * @buflen: the length (in bytes) of the buffer 118 * @goal: start search at this block's bit-pair (within @buffer) 119 * @old_state: GFS2_BLKST_XXX the state of the block we're looking for; 120 * bit 0 = alloc(1)/free(0), bit 1 = meta(1)/data(0) 121 * 122 * Scope of @goal and returned block number is only within this bitmap buffer, 123 * not entire rgrp or filesystem. @buffer will be offset from the actual 124 * beginning of a bitmap block buffer, skipping any header structures. 125 * 126 * Return: the block number (bitmap buffer scope) that was found 127 */ 128 129 static u32 gfs2_bitfit(struct gfs2_rgrpd *rgd, unsigned char *buffer, 130 unsigned int buflen, u32 goal, 131 unsigned char old_state) 132 { 133 unsigned char *byte, *end, alloc; 134 u32 blk = goal; 135 unsigned int bit; 136 137 byte = buffer + (goal / GFS2_NBBY); 138 bit = (goal % GFS2_NBBY) * GFS2_BIT_SIZE; 139 end = buffer + buflen; 140 alloc = (old_state & 1) ? 0 : 0x55; 141 142 while (byte < end) { 143 if ((*byte & 0x55) == alloc) { 144 blk += (8 - bit) >> 1; 145 146 bit = 0; 147 byte++; 148 149 continue; 150 } 151 152 if (((*byte >> bit) & GFS2_BIT_MASK) == old_state) 153 return blk; 154 155 bit += GFS2_BIT_SIZE; 156 if (bit >= 8) { 157 bit = 0; 158 byte++; 159 } 160 161 blk++; 162 } 163 164 return BFITNOENT; 165 } 166 167 /** 168 * gfs2_bitcount - count the number of bits in a certain state 169 * @buffer: the buffer that holds the bitmaps 170 * @buflen: the length (in bytes) of the buffer 171 * @state: the state of the block we're looking for 172 * 173 * Returns: The number of bits 174 */ 175 176 static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, unsigned char *buffer, 177 unsigned int buflen, unsigned char state) 178 { 179 unsigned char *byte = buffer; 180 unsigned char *end = buffer + buflen; 181 unsigned char state1 = state << 2; 182 unsigned char state2 = state << 4; 183 unsigned char state3 = state << 6; 184 u32 count = 0; 185 186 for (; byte < end; byte++) { 187 if (((*byte) & 0x03) == state) 188 count++; 189 if (((*byte) & 0x0C) == state1) 190 count++; 191 if (((*byte) & 0x30) == state2) 192 count++; 193 if (((*byte) & 0xC0) == state3) 194 count++; 195 } 196 197 return count; 198 } 199 200 /** 201 * gfs2_rgrp_verify - Verify that a resource group is consistent 202 * @sdp: the filesystem 203 * @rgd: the rgrp 204 * 205 */ 206 207 void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd) 208 { 209 struct gfs2_sbd *sdp = rgd->rd_sbd; 210 struct gfs2_bitmap *bi = NULL; 211 u32 length = rgd->rd_length; 212 u32 count[4], tmp; 213 int buf, x; 214 215 memset(count, 0, 4 * sizeof(u32)); 216 217 /* Count # blocks in each of 4 possible allocation states */ 218 for (buf = 0; buf < length; buf++) { 219 bi = rgd->rd_bits + buf; 220 for (x = 0; x < 4; x++) 221 count[x] += gfs2_bitcount(rgd, 222 bi->bi_bh->b_data + 223 bi->bi_offset, 224 bi->bi_len, x); 225 } 226 227 if (count[0] != rgd->rd_rg.rg_free) { 228 if (gfs2_consist_rgrpd(rgd)) 229 fs_err(sdp, "free data mismatch: %u != %u\n", 230 count[0], rgd->rd_rg.rg_free); 231 return; 232 } 233 234 tmp = rgd->rd_data - 235 rgd->rd_rg.rg_free - 236 rgd->rd_rg.rg_dinodes; 237 if (count[1] + count[2] != tmp) { 238 if (gfs2_consist_rgrpd(rgd)) 239 fs_err(sdp, "used data mismatch: %u != %u\n", 240 count[1], tmp); 241 return; 242 } 243 244 if (count[3] != rgd->rd_rg.rg_dinodes) { 245 if (gfs2_consist_rgrpd(rgd)) 246 fs_err(sdp, "used metadata mismatch: %u != %u\n", 247 count[3], rgd->rd_rg.rg_dinodes); 248 return; 249 } 250 251 if (count[2] > count[3]) { 252 if (gfs2_consist_rgrpd(rgd)) 253 fs_err(sdp, "unlinked inodes > inodes: %u\n", 254 count[2]); 255 return; 256 } 257 258 } 259 260 static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block) 261 { 262 u64 first = rgd->rd_data0; 263 u64 last = first + rgd->rd_data; 264 return first <= block && block < last; 265 } 266 267 /** 268 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number 269 * @sdp: The GFS2 superblock 270 * @n: The data block number 271 * 272 * Returns: The resource group, or NULL if not found 273 */ 274 275 struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk) 276 { 277 struct gfs2_rgrpd *rgd; 278 279 spin_lock(&sdp->sd_rindex_spin); 280 281 list_for_each_entry(rgd, &sdp->sd_rindex_mru_list, rd_list_mru) { 282 if (rgrp_contains_block(rgd, blk)) { 283 list_move(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list); 284 spin_unlock(&sdp->sd_rindex_spin); 285 return rgd; 286 } 287 } 288 289 spin_unlock(&sdp->sd_rindex_spin); 290 291 return NULL; 292 } 293 294 /** 295 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem 296 * @sdp: The GFS2 superblock 297 * 298 * Returns: The first rgrp in the filesystem 299 */ 300 301 struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp) 302 { 303 gfs2_assert(sdp, !list_empty(&sdp->sd_rindex_list)); 304 return list_entry(sdp->sd_rindex_list.next, struct gfs2_rgrpd, rd_list); 305 } 306 307 /** 308 * gfs2_rgrpd_get_next - get the next RG 309 * @rgd: A RG 310 * 311 * Returns: The next rgrp 312 */ 313 314 struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd) 315 { 316 if (rgd->rd_list.next == &rgd->rd_sbd->sd_rindex_list) 317 return NULL; 318 return list_entry(rgd->rd_list.next, struct gfs2_rgrpd, rd_list); 319 } 320 321 static void clear_rgrpdi(struct gfs2_sbd *sdp) 322 { 323 struct list_head *head; 324 struct gfs2_rgrpd *rgd; 325 struct gfs2_glock *gl; 326 327 spin_lock(&sdp->sd_rindex_spin); 328 sdp->sd_rindex_forward = NULL; 329 head = &sdp->sd_rindex_recent_list; 330 while (!list_empty(head)) { 331 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent); 332 list_del(&rgd->rd_recent); 333 } 334 spin_unlock(&sdp->sd_rindex_spin); 335 336 head = &sdp->sd_rindex_list; 337 while (!list_empty(head)) { 338 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_list); 339 gl = rgd->rd_gl; 340 341 list_del(&rgd->rd_list); 342 list_del(&rgd->rd_list_mru); 343 344 if (gl) { 345 gl->gl_object = NULL; 346 gfs2_glock_put(gl); 347 } 348 349 kfree(rgd->rd_bits); 350 kfree(rgd); 351 } 352 } 353 354 void gfs2_clear_rgrpd(struct gfs2_sbd *sdp) 355 { 356 mutex_lock(&sdp->sd_rindex_mutex); 357 clear_rgrpdi(sdp); 358 mutex_unlock(&sdp->sd_rindex_mutex); 359 } 360 361 static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd) 362 { 363 printk(KERN_INFO " ri_addr = %llu\n", (unsigned long long)rgd->rd_addr); 364 printk(KERN_INFO " ri_length = %u\n", rgd->rd_length); 365 printk(KERN_INFO " ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0); 366 printk(KERN_INFO " ri_data = %u\n", rgd->rd_data); 367 printk(KERN_INFO " ri_bitbytes = %u\n", rgd->rd_bitbytes); 368 } 369 370 /** 371 * gfs2_compute_bitstructs - Compute the bitmap sizes 372 * @rgd: The resource group descriptor 373 * 374 * Calculates bitmap descriptors, one for each block that contains bitmap data 375 * 376 * Returns: errno 377 */ 378 379 static int compute_bitstructs(struct gfs2_rgrpd *rgd) 380 { 381 struct gfs2_sbd *sdp = rgd->rd_sbd; 382 struct gfs2_bitmap *bi; 383 u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */ 384 u32 bytes_left, bytes; 385 int x; 386 387 if (!length) 388 return -EINVAL; 389 390 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS); 391 if (!rgd->rd_bits) 392 return -ENOMEM; 393 394 bytes_left = rgd->rd_bitbytes; 395 396 for (x = 0; x < length; x++) { 397 bi = rgd->rd_bits + x; 398 399 /* small rgrp; bitmap stored completely in header block */ 400 if (length == 1) { 401 bytes = bytes_left; 402 bi->bi_offset = sizeof(struct gfs2_rgrp); 403 bi->bi_start = 0; 404 bi->bi_len = bytes; 405 /* header block */ 406 } else if (x == 0) { 407 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp); 408 bi->bi_offset = sizeof(struct gfs2_rgrp); 409 bi->bi_start = 0; 410 bi->bi_len = bytes; 411 /* last block */ 412 } else if (x + 1 == length) { 413 bytes = bytes_left; 414 bi->bi_offset = sizeof(struct gfs2_meta_header); 415 bi->bi_start = rgd->rd_bitbytes - bytes_left; 416 bi->bi_len = bytes; 417 /* other blocks */ 418 } else { 419 bytes = sdp->sd_sb.sb_bsize - 420 sizeof(struct gfs2_meta_header); 421 bi->bi_offset = sizeof(struct gfs2_meta_header); 422 bi->bi_start = rgd->rd_bitbytes - bytes_left; 423 bi->bi_len = bytes; 424 } 425 426 bytes_left -= bytes; 427 } 428 429 if (bytes_left) { 430 gfs2_consist_rgrpd(rgd); 431 return -EIO; 432 } 433 bi = rgd->rd_bits + (length - 1); 434 if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) { 435 if (gfs2_consist_rgrpd(rgd)) { 436 gfs2_rindex_print(rgd); 437 fs_err(sdp, "start=%u len=%u offset=%u\n", 438 bi->bi_start, bi->bi_len, bi->bi_offset); 439 } 440 return -EIO; 441 } 442 443 return 0; 444 } 445 446 /** 447 * gfs2_ri_total - Total up the file system space, according to the rindex. 448 * 449 */ 450 u64 gfs2_ri_total(struct gfs2_sbd *sdp) 451 { 452 u64 total_data = 0; 453 struct inode *inode = sdp->sd_rindex; 454 struct gfs2_inode *ip = GFS2_I(inode); 455 char buf[sizeof(struct gfs2_rindex)]; 456 struct file_ra_state ra_state; 457 int error, rgrps; 458 459 mutex_lock(&sdp->sd_rindex_mutex); 460 file_ra_state_init(&ra_state, inode->i_mapping); 461 for (rgrps = 0;; rgrps++) { 462 loff_t pos = rgrps * sizeof(struct gfs2_rindex); 463 464 if (pos + sizeof(struct gfs2_rindex) >= ip->i_di.di_size) 465 break; 466 error = gfs2_internal_read(ip, &ra_state, buf, &pos, 467 sizeof(struct gfs2_rindex)); 468 if (error != sizeof(struct gfs2_rindex)) 469 break; 470 total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data); 471 } 472 mutex_unlock(&sdp->sd_rindex_mutex); 473 return total_data; 474 } 475 476 static void gfs2_rindex_in(struct gfs2_rgrpd *rgd, const void *buf) 477 { 478 const struct gfs2_rindex *str = buf; 479 480 rgd->rd_addr = be64_to_cpu(str->ri_addr); 481 rgd->rd_length = be32_to_cpu(str->ri_length); 482 rgd->rd_data0 = be64_to_cpu(str->ri_data0); 483 rgd->rd_data = be32_to_cpu(str->ri_data); 484 rgd->rd_bitbytes = be32_to_cpu(str->ri_bitbytes); 485 } 486 487 /** 488 * read_rindex_entry - Pull in a new resource index entry from the disk 489 * @gl: The glock covering the rindex inode 490 * 491 * Returns: 0 on success, error code otherwise 492 */ 493 494 static int read_rindex_entry(struct gfs2_inode *ip, 495 struct file_ra_state *ra_state) 496 { 497 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 498 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex); 499 char buf[sizeof(struct gfs2_rindex)]; 500 int error; 501 struct gfs2_rgrpd *rgd; 502 503 error = gfs2_internal_read(ip, ra_state, buf, &pos, 504 sizeof(struct gfs2_rindex)); 505 if (!error) 506 return 0; 507 if (error != sizeof(struct gfs2_rindex)) { 508 if (error > 0) 509 error = -EIO; 510 return error; 511 } 512 513 rgd = kzalloc(sizeof(struct gfs2_rgrpd), GFP_NOFS); 514 error = -ENOMEM; 515 if (!rgd) 516 return error; 517 518 mutex_init(&rgd->rd_mutex); 519 lops_init_le(&rgd->rd_le, &gfs2_rg_lops); 520 rgd->rd_sbd = sdp; 521 522 list_add_tail(&rgd->rd_list, &sdp->sd_rindex_list); 523 list_add_tail(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list); 524 525 gfs2_rindex_in(rgd, buf); 526 error = compute_bitstructs(rgd); 527 if (error) 528 return error; 529 530 error = gfs2_glock_get(sdp, rgd->rd_addr, 531 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl); 532 if (error) 533 return error; 534 535 rgd->rd_gl->gl_object = rgd; 536 rgd->rd_rg_vn = rgd->rd_gl->gl_vn - 1; 537 rgd->rd_flags |= GFS2_RDF_CHECK; 538 return error; 539 } 540 541 /** 542 * gfs2_ri_update - Pull in a new resource index from the disk 543 * @ip: pointer to the rindex inode 544 * 545 * Returns: 0 on successful update, error code otherwise 546 */ 547 548 static int gfs2_ri_update(struct gfs2_inode *ip) 549 { 550 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 551 struct inode *inode = &ip->i_inode; 552 struct file_ra_state ra_state; 553 u64 rgrp_count = ip->i_di.di_size; 554 int error; 555 556 if (do_div(rgrp_count, sizeof(struct gfs2_rindex))) { 557 gfs2_consist_inode(ip); 558 return -EIO; 559 } 560 561 clear_rgrpdi(sdp); 562 563 file_ra_state_init(&ra_state, inode->i_mapping); 564 for (sdp->sd_rgrps = 0; sdp->sd_rgrps < rgrp_count; sdp->sd_rgrps++) { 565 error = read_rindex_entry(ip, &ra_state); 566 if (error) { 567 clear_rgrpdi(sdp); 568 return error; 569 } 570 } 571 572 sdp->sd_rindex_vn = ip->i_gl->gl_vn; 573 return 0; 574 } 575 576 /** 577 * gfs2_ri_update_special - Pull in a new resource index from the disk 578 * 579 * This is a special version that's safe to call from gfs2_inplace_reserve_i. 580 * In this case we know that we don't have any resource groups in memory yet. 581 * 582 * @ip: pointer to the rindex inode 583 * 584 * Returns: 0 on successful update, error code otherwise 585 */ 586 static int gfs2_ri_update_special(struct gfs2_inode *ip) 587 { 588 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 589 struct inode *inode = &ip->i_inode; 590 struct file_ra_state ra_state; 591 int error; 592 593 file_ra_state_init(&ra_state, inode->i_mapping); 594 for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) { 595 /* Ignore partials */ 596 if ((sdp->sd_rgrps + 1) * sizeof(struct gfs2_rindex) > 597 ip->i_di.di_size) 598 break; 599 error = read_rindex_entry(ip, &ra_state); 600 if (error) { 601 clear_rgrpdi(sdp); 602 return error; 603 } 604 } 605 606 sdp->sd_rindex_vn = ip->i_gl->gl_vn; 607 return 0; 608 } 609 610 /** 611 * gfs2_rindex_hold - Grab a lock on the rindex 612 * @sdp: The GFS2 superblock 613 * @ri_gh: the glock holder 614 * 615 * We grab a lock on the rindex inode to make sure that it doesn't 616 * change whilst we are performing an operation. We keep this lock 617 * for quite long periods of time compared to other locks. This 618 * doesn't matter, since it is shared and it is very, very rarely 619 * accessed in the exclusive mode (i.e. only when expanding the filesystem). 620 * 621 * This makes sure that we're using the latest copy of the resource index 622 * special file, which might have been updated if someone expanded the 623 * filesystem (via gfs2_grow utility), which adds new resource groups. 624 * 625 * Returns: 0 on success, error code otherwise 626 */ 627 628 int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh) 629 { 630 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex); 631 struct gfs2_glock *gl = ip->i_gl; 632 int error; 633 634 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, ri_gh); 635 if (error) 636 return error; 637 638 /* Read new copy from disk if we don't have the latest */ 639 if (sdp->sd_rindex_vn != gl->gl_vn) { 640 mutex_lock(&sdp->sd_rindex_mutex); 641 if (sdp->sd_rindex_vn != gl->gl_vn) { 642 error = gfs2_ri_update(ip); 643 if (error) 644 gfs2_glock_dq_uninit(ri_gh); 645 } 646 mutex_unlock(&sdp->sd_rindex_mutex); 647 } 648 649 return error; 650 } 651 652 static void gfs2_rgrp_in(struct gfs2_rgrp_host *rg, const void *buf) 653 { 654 const struct gfs2_rgrp *str = buf; 655 656 rg->rg_flags = be32_to_cpu(str->rg_flags); 657 rg->rg_free = be32_to_cpu(str->rg_free); 658 rg->rg_dinodes = be32_to_cpu(str->rg_dinodes); 659 rg->rg_igeneration = be64_to_cpu(str->rg_igeneration); 660 } 661 662 static void gfs2_rgrp_out(const struct gfs2_rgrp_host *rg, void *buf) 663 { 664 struct gfs2_rgrp *str = buf; 665 666 str->rg_flags = cpu_to_be32(rg->rg_flags); 667 str->rg_free = cpu_to_be32(rg->rg_free); 668 str->rg_dinodes = cpu_to_be32(rg->rg_dinodes); 669 str->__pad = cpu_to_be32(0); 670 str->rg_igeneration = cpu_to_be64(rg->rg_igeneration); 671 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved)); 672 } 673 674 /** 675 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps 676 * @rgd: the struct gfs2_rgrpd describing the RG to read in 677 * 678 * Read in all of a Resource Group's header and bitmap blocks. 679 * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps. 680 * 681 * Returns: errno 682 */ 683 684 int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd) 685 { 686 struct gfs2_sbd *sdp = rgd->rd_sbd; 687 struct gfs2_glock *gl = rgd->rd_gl; 688 unsigned int length = rgd->rd_length; 689 struct gfs2_bitmap *bi; 690 unsigned int x, y; 691 int error; 692 693 mutex_lock(&rgd->rd_mutex); 694 695 spin_lock(&sdp->sd_rindex_spin); 696 if (rgd->rd_bh_count) { 697 rgd->rd_bh_count++; 698 spin_unlock(&sdp->sd_rindex_spin); 699 mutex_unlock(&rgd->rd_mutex); 700 return 0; 701 } 702 spin_unlock(&sdp->sd_rindex_spin); 703 704 for (x = 0; x < length; x++) { 705 bi = rgd->rd_bits + x; 706 error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, &bi->bi_bh); 707 if (error) 708 goto fail; 709 } 710 711 for (y = length; y--;) { 712 bi = rgd->rd_bits + y; 713 error = gfs2_meta_wait(sdp, bi->bi_bh); 714 if (error) 715 goto fail; 716 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB : 717 GFS2_METATYPE_RG)) { 718 error = -EIO; 719 goto fail; 720 } 721 } 722 723 if (rgd->rd_rg_vn != gl->gl_vn) { 724 gfs2_rgrp_in(&rgd->rd_rg, (rgd->rd_bits[0].bi_bh)->b_data); 725 rgd->rd_rg_vn = gl->gl_vn; 726 } 727 728 spin_lock(&sdp->sd_rindex_spin); 729 rgd->rd_free_clone = rgd->rd_rg.rg_free; 730 rgd->rd_bh_count++; 731 spin_unlock(&sdp->sd_rindex_spin); 732 733 mutex_unlock(&rgd->rd_mutex); 734 735 return 0; 736 737 fail: 738 while (x--) { 739 bi = rgd->rd_bits + x; 740 brelse(bi->bi_bh); 741 bi->bi_bh = NULL; 742 gfs2_assert_warn(sdp, !bi->bi_clone); 743 } 744 mutex_unlock(&rgd->rd_mutex); 745 746 return error; 747 } 748 749 void gfs2_rgrp_bh_hold(struct gfs2_rgrpd *rgd) 750 { 751 struct gfs2_sbd *sdp = rgd->rd_sbd; 752 753 spin_lock(&sdp->sd_rindex_spin); 754 gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count); 755 rgd->rd_bh_count++; 756 spin_unlock(&sdp->sd_rindex_spin); 757 } 758 759 /** 760 * gfs2_rgrp_bh_put - Release RG bitmaps read in with gfs2_rgrp_bh_get() 761 * @rgd: the struct gfs2_rgrpd describing the RG to read in 762 * 763 */ 764 765 void gfs2_rgrp_bh_put(struct gfs2_rgrpd *rgd) 766 { 767 struct gfs2_sbd *sdp = rgd->rd_sbd; 768 int x, length = rgd->rd_length; 769 770 spin_lock(&sdp->sd_rindex_spin); 771 gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count); 772 if (--rgd->rd_bh_count) { 773 spin_unlock(&sdp->sd_rindex_spin); 774 return; 775 } 776 777 for (x = 0; x < length; x++) { 778 struct gfs2_bitmap *bi = rgd->rd_bits + x; 779 kfree(bi->bi_clone); 780 bi->bi_clone = NULL; 781 brelse(bi->bi_bh); 782 bi->bi_bh = NULL; 783 } 784 785 spin_unlock(&sdp->sd_rindex_spin); 786 } 787 788 void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd) 789 { 790 struct gfs2_sbd *sdp = rgd->rd_sbd; 791 unsigned int length = rgd->rd_length; 792 unsigned int x; 793 794 for (x = 0; x < length; x++) { 795 struct gfs2_bitmap *bi = rgd->rd_bits + x; 796 if (!bi->bi_clone) 797 continue; 798 memcpy(bi->bi_clone + bi->bi_offset, 799 bi->bi_bh->b_data + bi->bi_offset, bi->bi_len); 800 } 801 802 spin_lock(&sdp->sd_rindex_spin); 803 rgd->rd_free_clone = rgd->rd_rg.rg_free; 804 spin_unlock(&sdp->sd_rindex_spin); 805 } 806 807 /** 808 * gfs2_alloc_get - get the struct gfs2_alloc structure for an inode 809 * @ip: the incore GFS2 inode structure 810 * 811 * Returns: the struct gfs2_alloc 812 */ 813 814 struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip) 815 { 816 struct gfs2_alloc *al = &ip->i_alloc; 817 818 /* FIXME: Should assert that the correct locks are held here... */ 819 memset(al, 0, sizeof(*al)); 820 return al; 821 } 822 823 /** 824 * try_rgrp_fit - See if a given reservation will fit in a given RG 825 * @rgd: the RG data 826 * @al: the struct gfs2_alloc structure describing the reservation 827 * 828 * If there's room for the requested blocks to be allocated from the RG: 829 * Sets the $al_rgd field in @al. 830 * 831 * Returns: 1 on success (it fits), 0 on failure (it doesn't fit) 832 */ 833 834 static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al) 835 { 836 struct gfs2_sbd *sdp = rgd->rd_sbd; 837 int ret = 0; 838 839 if (rgd->rd_rg.rg_flags & GFS2_RGF_NOALLOC) 840 return 0; 841 842 spin_lock(&sdp->sd_rindex_spin); 843 if (rgd->rd_free_clone >= al->al_requested) { 844 al->al_rgd = rgd; 845 ret = 1; 846 } 847 spin_unlock(&sdp->sd_rindex_spin); 848 849 return ret; 850 } 851 852 /** 853 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes 854 * @rgd: The rgrp 855 * 856 * Returns: The inode, if one has been found 857 */ 858 859 static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked) 860 { 861 struct inode *inode; 862 u32 goal = 0; 863 u64 no_addr; 864 865 for(;;) { 866 goal = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED, 867 GFS2_BLKST_UNLINKED); 868 if (goal == 0) 869 return 0; 870 no_addr = goal + rgd->rd_data0; 871 if (no_addr <= *last_unlinked) 872 continue; 873 *last_unlinked = no_addr; 874 inode = gfs2_inode_lookup(rgd->rd_sbd->sd_vfs, DT_UNKNOWN, 875 no_addr, -1); 876 if (!IS_ERR(inode)) 877 return inode; 878 } 879 880 rgd->rd_flags &= ~GFS2_RDF_CHECK; 881 return NULL; 882 } 883 884 /** 885 * recent_rgrp_first - get first RG from "recent" list 886 * @sdp: The GFS2 superblock 887 * @rglast: address of the rgrp used last 888 * 889 * Returns: The first rgrp in the recent list 890 */ 891 892 static struct gfs2_rgrpd *recent_rgrp_first(struct gfs2_sbd *sdp, 893 u64 rglast) 894 { 895 struct gfs2_rgrpd *rgd = NULL; 896 897 spin_lock(&sdp->sd_rindex_spin); 898 899 if (list_empty(&sdp->sd_rindex_recent_list)) 900 goto out; 901 902 if (!rglast) 903 goto first; 904 905 list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) { 906 if (rgd->rd_addr == rglast) 907 goto out; 908 } 909 910 first: 911 rgd = list_entry(sdp->sd_rindex_recent_list.next, struct gfs2_rgrpd, 912 rd_recent); 913 out: 914 spin_unlock(&sdp->sd_rindex_spin); 915 return rgd; 916 } 917 918 /** 919 * recent_rgrp_next - get next RG from "recent" list 920 * @cur_rgd: current rgrp 921 * @remove: 922 * 923 * Returns: The next rgrp in the recent list 924 */ 925 926 static struct gfs2_rgrpd *recent_rgrp_next(struct gfs2_rgrpd *cur_rgd, 927 int remove) 928 { 929 struct gfs2_sbd *sdp = cur_rgd->rd_sbd; 930 struct list_head *head; 931 struct gfs2_rgrpd *rgd; 932 933 spin_lock(&sdp->sd_rindex_spin); 934 935 head = &sdp->sd_rindex_recent_list; 936 937 list_for_each_entry(rgd, head, rd_recent) { 938 if (rgd == cur_rgd) { 939 if (cur_rgd->rd_recent.next != head) 940 rgd = list_entry(cur_rgd->rd_recent.next, 941 struct gfs2_rgrpd, rd_recent); 942 else 943 rgd = NULL; 944 945 if (remove) 946 list_del(&cur_rgd->rd_recent); 947 948 goto out; 949 } 950 } 951 952 rgd = NULL; 953 if (!list_empty(head)) 954 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent); 955 956 out: 957 spin_unlock(&sdp->sd_rindex_spin); 958 return rgd; 959 } 960 961 /** 962 * recent_rgrp_add - add an RG to tail of "recent" list 963 * @new_rgd: The rgrp to add 964 * 965 */ 966 967 static void recent_rgrp_add(struct gfs2_rgrpd *new_rgd) 968 { 969 struct gfs2_sbd *sdp = new_rgd->rd_sbd; 970 struct gfs2_rgrpd *rgd; 971 unsigned int count = 0; 972 unsigned int max = sdp->sd_rgrps / gfs2_jindex_size(sdp); 973 974 spin_lock(&sdp->sd_rindex_spin); 975 976 list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) { 977 if (rgd == new_rgd) 978 goto out; 979 980 if (++count >= max) 981 goto out; 982 } 983 list_add_tail(&new_rgd->rd_recent, &sdp->sd_rindex_recent_list); 984 985 out: 986 spin_unlock(&sdp->sd_rindex_spin); 987 } 988 989 /** 990 * forward_rgrp_get - get an rgrp to try next from full list 991 * @sdp: The GFS2 superblock 992 * 993 * Returns: The rgrp to try next 994 */ 995 996 static struct gfs2_rgrpd *forward_rgrp_get(struct gfs2_sbd *sdp) 997 { 998 struct gfs2_rgrpd *rgd; 999 unsigned int journals = gfs2_jindex_size(sdp); 1000 unsigned int rg = 0, x; 1001 1002 spin_lock(&sdp->sd_rindex_spin); 1003 1004 rgd = sdp->sd_rindex_forward; 1005 if (!rgd) { 1006 if (sdp->sd_rgrps >= journals) 1007 rg = sdp->sd_rgrps * sdp->sd_jdesc->jd_jid / journals; 1008 1009 for (x = 0, rgd = gfs2_rgrpd_get_first(sdp); x < rg; 1010 x++, rgd = gfs2_rgrpd_get_next(rgd)) 1011 /* Do Nothing */; 1012 1013 sdp->sd_rindex_forward = rgd; 1014 } 1015 1016 spin_unlock(&sdp->sd_rindex_spin); 1017 1018 return rgd; 1019 } 1020 1021 /** 1022 * forward_rgrp_set - set the forward rgrp pointer 1023 * @sdp: the filesystem 1024 * @rgd: The new forward rgrp 1025 * 1026 */ 1027 1028 static void forward_rgrp_set(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd) 1029 { 1030 spin_lock(&sdp->sd_rindex_spin); 1031 sdp->sd_rindex_forward = rgd; 1032 spin_unlock(&sdp->sd_rindex_spin); 1033 } 1034 1035 /** 1036 * get_local_rgrp - Choose and lock a rgrp for allocation 1037 * @ip: the inode to reserve space for 1038 * @rgp: the chosen and locked rgrp 1039 * 1040 * Try to acquire rgrp in way which avoids contending with others. 1041 * 1042 * Returns: errno 1043 */ 1044 1045 static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked) 1046 { 1047 struct inode *inode = NULL; 1048 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1049 struct gfs2_rgrpd *rgd, *begin = NULL; 1050 struct gfs2_alloc *al = &ip->i_alloc; 1051 int flags = LM_FLAG_TRY; 1052 int skipped = 0; 1053 int loops = 0; 1054 int error; 1055 1056 /* Try recently successful rgrps */ 1057 1058 rgd = recent_rgrp_first(sdp, ip->i_last_rg_alloc); 1059 1060 while (rgd) { 1061 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 1062 LM_FLAG_TRY, &al->al_rgd_gh); 1063 switch (error) { 1064 case 0: 1065 if (try_rgrp_fit(rgd, al)) 1066 goto out; 1067 if (rgd->rd_flags & GFS2_RDF_CHECK) 1068 inode = try_rgrp_unlink(rgd, last_unlinked); 1069 gfs2_glock_dq_uninit(&al->al_rgd_gh); 1070 if (inode) 1071 return inode; 1072 rgd = recent_rgrp_next(rgd, 1); 1073 break; 1074 1075 case GLR_TRYFAILED: 1076 rgd = recent_rgrp_next(rgd, 0); 1077 break; 1078 1079 default: 1080 return ERR_PTR(error); 1081 } 1082 } 1083 1084 /* Go through full list of rgrps */ 1085 1086 begin = rgd = forward_rgrp_get(sdp); 1087 1088 for (;;) { 1089 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, flags, 1090 &al->al_rgd_gh); 1091 switch (error) { 1092 case 0: 1093 if (try_rgrp_fit(rgd, al)) 1094 goto out; 1095 if (rgd->rd_flags & GFS2_RDF_CHECK) 1096 inode = try_rgrp_unlink(rgd, last_unlinked); 1097 gfs2_glock_dq_uninit(&al->al_rgd_gh); 1098 if (inode) 1099 return inode; 1100 break; 1101 1102 case GLR_TRYFAILED: 1103 skipped++; 1104 break; 1105 1106 default: 1107 return ERR_PTR(error); 1108 } 1109 1110 rgd = gfs2_rgrpd_get_next(rgd); 1111 if (!rgd) 1112 rgd = gfs2_rgrpd_get_first(sdp); 1113 1114 if (rgd == begin) { 1115 if (++loops >= 3) 1116 return ERR_PTR(-ENOSPC); 1117 if (!skipped) 1118 loops++; 1119 flags = 0; 1120 if (loops == 2) 1121 gfs2_log_flush(sdp, NULL); 1122 } 1123 } 1124 1125 out: 1126 ip->i_last_rg_alloc = rgd->rd_addr; 1127 1128 if (begin) { 1129 recent_rgrp_add(rgd); 1130 rgd = gfs2_rgrpd_get_next(rgd); 1131 if (!rgd) 1132 rgd = gfs2_rgrpd_get_first(sdp); 1133 forward_rgrp_set(sdp, rgd); 1134 } 1135 1136 return NULL; 1137 } 1138 1139 /** 1140 * gfs2_inplace_reserve_i - Reserve space in the filesystem 1141 * @ip: the inode to reserve space for 1142 * 1143 * Returns: errno 1144 */ 1145 1146 int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, unsigned int line) 1147 { 1148 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1149 struct gfs2_alloc *al = &ip->i_alloc; 1150 struct inode *inode; 1151 int error = 0; 1152 u64 last_unlinked = 0; 1153 1154 if (gfs2_assert_warn(sdp, al->al_requested)) 1155 return -EINVAL; 1156 1157 try_again: 1158 /* We need to hold the rindex unless the inode we're using is 1159 the rindex itself, in which case it's already held. */ 1160 if (ip != GFS2_I(sdp->sd_rindex)) 1161 error = gfs2_rindex_hold(sdp, &al->al_ri_gh); 1162 else if (!sdp->sd_rgrps) /* We may not have the rindex read in, so: */ 1163 error = gfs2_ri_update_special(ip); 1164 1165 if (error) 1166 return error; 1167 1168 inode = get_local_rgrp(ip, &last_unlinked); 1169 if (inode) { 1170 if (ip != GFS2_I(sdp->sd_rindex)) 1171 gfs2_glock_dq_uninit(&al->al_ri_gh); 1172 if (IS_ERR(inode)) 1173 return PTR_ERR(inode); 1174 iput(inode); 1175 gfs2_log_flush(sdp, NULL); 1176 goto try_again; 1177 } 1178 1179 al->al_file = file; 1180 al->al_line = line; 1181 1182 return 0; 1183 } 1184 1185 /** 1186 * gfs2_inplace_release - release an inplace reservation 1187 * @ip: the inode the reservation was taken out on 1188 * 1189 * Release a reservation made by gfs2_inplace_reserve(). 1190 */ 1191 1192 void gfs2_inplace_release(struct gfs2_inode *ip) 1193 { 1194 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1195 struct gfs2_alloc *al = &ip->i_alloc; 1196 1197 if (gfs2_assert_warn(sdp, al->al_alloced <= al->al_requested) == -1) 1198 fs_warn(sdp, "al_alloced = %u, al_requested = %u " 1199 "al_file = %s, al_line = %u\n", 1200 al->al_alloced, al->al_requested, al->al_file, 1201 al->al_line); 1202 1203 al->al_rgd = NULL; 1204 gfs2_glock_dq_uninit(&al->al_rgd_gh); 1205 if (ip != GFS2_I(sdp->sd_rindex)) 1206 gfs2_glock_dq_uninit(&al->al_ri_gh); 1207 } 1208 1209 /** 1210 * gfs2_get_block_type - Check a block in a RG is of given type 1211 * @rgd: the resource group holding the block 1212 * @block: the block number 1213 * 1214 * Returns: The block type (GFS2_BLKST_*) 1215 */ 1216 1217 unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block) 1218 { 1219 struct gfs2_bitmap *bi = NULL; 1220 u32 length, rgrp_block, buf_block; 1221 unsigned int buf; 1222 unsigned char type; 1223 1224 length = rgd->rd_length; 1225 rgrp_block = block - rgd->rd_data0; 1226 1227 for (buf = 0; buf < length; buf++) { 1228 bi = rgd->rd_bits + buf; 1229 if (rgrp_block < (bi->bi_start + bi->bi_len) * GFS2_NBBY) 1230 break; 1231 } 1232 1233 gfs2_assert(rgd->rd_sbd, buf < length); 1234 buf_block = rgrp_block - bi->bi_start * GFS2_NBBY; 1235 1236 type = gfs2_testbit(rgd, bi->bi_bh->b_data + bi->bi_offset, 1237 bi->bi_len, buf_block); 1238 1239 return type; 1240 } 1241 1242 /** 1243 * rgblk_search - find a block in @old_state, change allocation 1244 * state to @new_state 1245 * @rgd: the resource group descriptor 1246 * @goal: the goal block within the RG (start here to search for avail block) 1247 * @old_state: GFS2_BLKST_XXX the before-allocation state to find 1248 * @new_state: GFS2_BLKST_XXX the after-allocation block state 1249 * 1250 * Walk rgrp's bitmap to find bits that represent a block in @old_state. 1251 * Add the found bitmap buffer to the transaction. 1252 * Set the found bits to @new_state to change block's allocation state. 1253 * 1254 * This function never fails, because we wouldn't call it unless we 1255 * know (from reservation results, etc.) that a block is available. 1256 * 1257 * Scope of @goal and returned block is just within rgrp, not the whole 1258 * filesystem. 1259 * 1260 * Returns: the block number allocated 1261 */ 1262 1263 static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal, 1264 unsigned char old_state, unsigned char new_state) 1265 { 1266 struct gfs2_bitmap *bi = NULL; 1267 u32 length = rgd->rd_length; 1268 u32 blk = 0; 1269 unsigned int buf, x; 1270 1271 /* Find bitmap block that contains bits for goal block */ 1272 for (buf = 0; buf < length; buf++) { 1273 bi = rgd->rd_bits + buf; 1274 if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY) 1275 break; 1276 } 1277 1278 gfs2_assert(rgd->rd_sbd, buf < length); 1279 1280 /* Convert scope of "goal" from rgrp-wide to within found bit block */ 1281 goal -= bi->bi_start * GFS2_NBBY; 1282 1283 /* Search (up to entire) bitmap in this rgrp for allocatable block. 1284 "x <= length", instead of "x < length", because we typically start 1285 the search in the middle of a bit block, but if we can't find an 1286 allocatable block anywhere else, we want to be able wrap around and 1287 search in the first part of our first-searched bit block. */ 1288 for (x = 0; x <= length; x++) { 1289 if (bi->bi_clone) 1290 blk = gfs2_bitfit(rgd, bi->bi_clone + bi->bi_offset, 1291 bi->bi_len, goal, old_state); 1292 else 1293 blk = gfs2_bitfit(rgd, 1294 bi->bi_bh->b_data + bi->bi_offset, 1295 bi->bi_len, goal, old_state); 1296 if (blk != BFITNOENT) 1297 break; 1298 1299 /* Try next bitmap block (wrap back to rgrp header if at end) */ 1300 buf = (buf + 1) % length; 1301 bi = rgd->rd_bits + buf; 1302 goal = 0; 1303 } 1304 1305 if (old_state != new_state) { 1306 gfs2_assert_withdraw(rgd->rd_sbd, blk != BFITNOENT); 1307 1308 gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1); 1309 gfs2_setbit(rgd, bi->bi_bh->b_data + bi->bi_offset, 1310 bi->bi_len, blk, new_state); 1311 if (bi->bi_clone) 1312 gfs2_setbit(rgd, bi->bi_clone + bi->bi_offset, 1313 bi->bi_len, blk, new_state); 1314 } 1315 1316 return (blk == BFITNOENT) ? 0 : (bi->bi_start * GFS2_NBBY) + blk; 1317 } 1318 1319 /** 1320 * rgblk_free - Change alloc state of given block(s) 1321 * @sdp: the filesystem 1322 * @bstart: the start of a run of blocks to free 1323 * @blen: the length of the block run (all must lie within ONE RG!) 1324 * @new_state: GFS2_BLKST_XXX the after-allocation block state 1325 * 1326 * Returns: Resource group containing the block(s) 1327 */ 1328 1329 static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart, 1330 u32 blen, unsigned char new_state) 1331 { 1332 struct gfs2_rgrpd *rgd; 1333 struct gfs2_bitmap *bi = NULL; 1334 u32 length, rgrp_blk, buf_blk; 1335 unsigned int buf; 1336 1337 rgd = gfs2_blk2rgrpd(sdp, bstart); 1338 if (!rgd) { 1339 if (gfs2_consist(sdp)) 1340 fs_err(sdp, "block = %llu\n", (unsigned long long)bstart); 1341 return NULL; 1342 } 1343 1344 length = rgd->rd_length; 1345 1346 rgrp_blk = bstart - rgd->rd_data0; 1347 1348 while (blen--) { 1349 for (buf = 0; buf < length; buf++) { 1350 bi = rgd->rd_bits + buf; 1351 if (rgrp_blk < (bi->bi_start + bi->bi_len) * GFS2_NBBY) 1352 break; 1353 } 1354 1355 gfs2_assert(rgd->rd_sbd, buf < length); 1356 1357 buf_blk = rgrp_blk - bi->bi_start * GFS2_NBBY; 1358 rgrp_blk++; 1359 1360 if (!bi->bi_clone) { 1361 bi->bi_clone = kmalloc(bi->bi_bh->b_size, 1362 GFP_NOFS | __GFP_NOFAIL); 1363 memcpy(bi->bi_clone + bi->bi_offset, 1364 bi->bi_bh->b_data + bi->bi_offset, 1365 bi->bi_len); 1366 } 1367 gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1); 1368 gfs2_setbit(rgd, bi->bi_bh->b_data + bi->bi_offset, 1369 bi->bi_len, buf_blk, new_state); 1370 } 1371 1372 return rgd; 1373 } 1374 1375 /** 1376 * gfs2_alloc_data - Allocate a data block 1377 * @ip: the inode to allocate the data block for 1378 * 1379 * Returns: the allocated block 1380 */ 1381 1382 u64 gfs2_alloc_data(struct gfs2_inode *ip) 1383 { 1384 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1385 struct gfs2_alloc *al = &ip->i_alloc; 1386 struct gfs2_rgrpd *rgd = al->al_rgd; 1387 u32 goal, blk; 1388 u64 block; 1389 1390 if (rgrp_contains_block(rgd, ip->i_di.di_goal_data)) 1391 goal = ip->i_di.di_goal_data - rgd->rd_data0; 1392 else 1393 goal = rgd->rd_last_alloc_data; 1394 1395 blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED); 1396 rgd->rd_last_alloc_data = blk; 1397 1398 block = rgd->rd_data0 + blk; 1399 ip->i_di.di_goal_data = block; 1400 1401 gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free); 1402 rgd->rd_rg.rg_free--; 1403 1404 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1405 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); 1406 1407 al->al_alloced++; 1408 1409 gfs2_statfs_change(sdp, 0, -1, 0); 1410 gfs2_quota_change(ip, +1, ip->i_inode.i_uid, ip->i_inode.i_gid); 1411 1412 spin_lock(&sdp->sd_rindex_spin); 1413 rgd->rd_free_clone--; 1414 spin_unlock(&sdp->sd_rindex_spin); 1415 1416 return block; 1417 } 1418 1419 /** 1420 * gfs2_alloc_meta - Allocate a metadata block 1421 * @ip: the inode to allocate the metadata block for 1422 * 1423 * Returns: the allocated block 1424 */ 1425 1426 u64 gfs2_alloc_meta(struct gfs2_inode *ip) 1427 { 1428 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1429 struct gfs2_alloc *al = &ip->i_alloc; 1430 struct gfs2_rgrpd *rgd = al->al_rgd; 1431 u32 goal, blk; 1432 u64 block; 1433 1434 if (rgrp_contains_block(rgd, ip->i_di.di_goal_meta)) 1435 goal = ip->i_di.di_goal_meta - rgd->rd_data0; 1436 else 1437 goal = rgd->rd_last_alloc_meta; 1438 1439 blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED); 1440 rgd->rd_last_alloc_meta = blk; 1441 1442 block = rgd->rd_data0 + blk; 1443 ip->i_di.di_goal_meta = block; 1444 1445 gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free); 1446 rgd->rd_rg.rg_free--; 1447 1448 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1449 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); 1450 1451 al->al_alloced++; 1452 1453 gfs2_statfs_change(sdp, 0, -1, 0); 1454 gfs2_quota_change(ip, +1, ip->i_inode.i_uid, ip->i_inode.i_gid); 1455 gfs2_trans_add_unrevoke(sdp, block); 1456 1457 spin_lock(&sdp->sd_rindex_spin); 1458 rgd->rd_free_clone--; 1459 spin_unlock(&sdp->sd_rindex_spin); 1460 1461 return block; 1462 } 1463 1464 /** 1465 * gfs2_alloc_di - Allocate a dinode 1466 * @dip: the directory that the inode is going in 1467 * 1468 * Returns: the block allocated 1469 */ 1470 1471 u64 gfs2_alloc_di(struct gfs2_inode *dip, u64 *generation) 1472 { 1473 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); 1474 struct gfs2_alloc *al = &dip->i_alloc; 1475 struct gfs2_rgrpd *rgd = al->al_rgd; 1476 u32 blk; 1477 u64 block; 1478 1479 blk = rgblk_search(rgd, rgd->rd_last_alloc_meta, 1480 GFS2_BLKST_FREE, GFS2_BLKST_DINODE); 1481 1482 rgd->rd_last_alloc_meta = blk; 1483 1484 block = rgd->rd_data0 + blk; 1485 1486 gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free); 1487 rgd->rd_rg.rg_free--; 1488 rgd->rd_rg.rg_dinodes++; 1489 *generation = rgd->rd_rg.rg_igeneration++; 1490 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1491 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); 1492 1493 al->al_alloced++; 1494 1495 gfs2_statfs_change(sdp, 0, -1, +1); 1496 gfs2_trans_add_unrevoke(sdp, block); 1497 1498 spin_lock(&sdp->sd_rindex_spin); 1499 rgd->rd_free_clone--; 1500 spin_unlock(&sdp->sd_rindex_spin); 1501 1502 return block; 1503 } 1504 1505 /** 1506 * gfs2_free_data - free a contiguous run of data block(s) 1507 * @ip: the inode these blocks are being freed from 1508 * @bstart: first block of a run of contiguous blocks 1509 * @blen: the length of the block run 1510 * 1511 */ 1512 1513 void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen) 1514 { 1515 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1516 struct gfs2_rgrpd *rgd; 1517 1518 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE); 1519 if (!rgd) 1520 return; 1521 1522 rgd->rd_rg.rg_free += blen; 1523 1524 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1525 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); 1526 1527 gfs2_trans_add_rg(rgd); 1528 1529 gfs2_statfs_change(sdp, 0, +blen, 0); 1530 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid); 1531 } 1532 1533 /** 1534 * gfs2_free_meta - free a contiguous run of data block(s) 1535 * @ip: the inode these blocks are being freed from 1536 * @bstart: first block of a run of contiguous blocks 1537 * @blen: the length of the block run 1538 * 1539 */ 1540 1541 void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen) 1542 { 1543 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1544 struct gfs2_rgrpd *rgd; 1545 1546 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE); 1547 if (!rgd) 1548 return; 1549 1550 rgd->rd_rg.rg_free += blen; 1551 1552 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1553 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); 1554 1555 gfs2_trans_add_rg(rgd); 1556 1557 gfs2_statfs_change(sdp, 0, +blen, 0); 1558 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid); 1559 gfs2_meta_wipe(ip, bstart, blen); 1560 } 1561 1562 void gfs2_unlink_di(struct inode *inode) 1563 { 1564 struct gfs2_inode *ip = GFS2_I(inode); 1565 struct gfs2_sbd *sdp = GFS2_SB(inode); 1566 struct gfs2_rgrpd *rgd; 1567 u64 blkno = ip->i_no_addr; 1568 1569 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED); 1570 if (!rgd) 1571 return; 1572 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1573 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); 1574 gfs2_trans_add_rg(rgd); 1575 } 1576 1577 static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno) 1578 { 1579 struct gfs2_sbd *sdp = rgd->rd_sbd; 1580 struct gfs2_rgrpd *tmp_rgd; 1581 1582 tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE); 1583 if (!tmp_rgd) 1584 return; 1585 gfs2_assert_withdraw(sdp, rgd == tmp_rgd); 1586 1587 if (!rgd->rd_rg.rg_dinodes) 1588 gfs2_consist_rgrpd(rgd); 1589 rgd->rd_rg.rg_dinodes--; 1590 rgd->rd_rg.rg_free++; 1591 1592 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1593 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data); 1594 1595 gfs2_statfs_change(sdp, 0, +1, -1); 1596 gfs2_trans_add_rg(rgd); 1597 } 1598 1599 1600 void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip) 1601 { 1602 gfs2_free_uninit_di(rgd, ip->i_no_addr); 1603 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid); 1604 gfs2_meta_wipe(ip, ip->i_no_addr, 1); 1605 } 1606 1607 /** 1608 * gfs2_rlist_add - add a RG to a list of RGs 1609 * @sdp: the filesystem 1610 * @rlist: the list of resource groups 1611 * @block: the block 1612 * 1613 * Figure out what RG a block belongs to and add that RG to the list 1614 * 1615 * FIXME: Don't use NOFAIL 1616 * 1617 */ 1618 1619 void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist, 1620 u64 block) 1621 { 1622 struct gfs2_rgrpd *rgd; 1623 struct gfs2_rgrpd **tmp; 1624 unsigned int new_space; 1625 unsigned int x; 1626 1627 if (gfs2_assert_warn(sdp, !rlist->rl_ghs)) 1628 return; 1629 1630 rgd = gfs2_blk2rgrpd(sdp, block); 1631 if (!rgd) { 1632 if (gfs2_consist(sdp)) 1633 fs_err(sdp, "block = %llu\n", (unsigned long long)block); 1634 return; 1635 } 1636 1637 for (x = 0; x < rlist->rl_rgrps; x++) 1638 if (rlist->rl_rgd[x] == rgd) 1639 return; 1640 1641 if (rlist->rl_rgrps == rlist->rl_space) { 1642 new_space = rlist->rl_space + 10; 1643 1644 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *), 1645 GFP_NOFS | __GFP_NOFAIL); 1646 1647 if (rlist->rl_rgd) { 1648 memcpy(tmp, rlist->rl_rgd, 1649 rlist->rl_space * sizeof(struct gfs2_rgrpd *)); 1650 kfree(rlist->rl_rgd); 1651 } 1652 1653 rlist->rl_space = new_space; 1654 rlist->rl_rgd = tmp; 1655 } 1656 1657 rlist->rl_rgd[rlist->rl_rgrps++] = rgd; 1658 } 1659 1660 /** 1661 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate 1662 * and initialize an array of glock holders for them 1663 * @rlist: the list of resource groups 1664 * @state: the lock state to acquire the RG lock in 1665 * @flags: the modifier flags for the holder structures 1666 * 1667 * FIXME: Don't use NOFAIL 1668 * 1669 */ 1670 1671 void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state, 1672 int flags) 1673 { 1674 unsigned int x; 1675 1676 rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder), 1677 GFP_NOFS | __GFP_NOFAIL); 1678 for (x = 0; x < rlist->rl_rgrps; x++) 1679 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl, 1680 state, flags, 1681 &rlist->rl_ghs[x]); 1682 } 1683 1684 /** 1685 * gfs2_rlist_free - free a resource group list 1686 * @list: the list of resource groups 1687 * 1688 */ 1689 1690 void gfs2_rlist_free(struct gfs2_rgrp_list *rlist) 1691 { 1692 unsigned int x; 1693 1694 kfree(rlist->rl_rgd); 1695 1696 if (rlist->rl_ghs) { 1697 for (x = 0; x < rlist->rl_rgrps; x++) 1698 gfs2_holder_uninit(&rlist->rl_ghs[x]); 1699 kfree(rlist->rl_ghs); 1700 } 1701 } 1702 1703