1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * localalloc.c 5 * 6 * Node local data allocation 7 * 8 * Copyright (C) 2002, 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 */ 25 26 #include <linux/fs.h> 27 #include <linux/types.h> 28 #include <linux/slab.h> 29 #include <linux/highmem.h> 30 #include <linux/bitops.h> 31 32 #include <cluster/masklog.h> 33 34 #include "ocfs2.h" 35 36 #include "alloc.h" 37 #include "blockcheck.h" 38 #include "dlmglue.h" 39 #include "inode.h" 40 #include "journal.h" 41 #include "localalloc.h" 42 #include "suballoc.h" 43 #include "super.h" 44 #include "sysfile.h" 45 #include "ocfs2_trace.h" 46 47 #include "buffer_head_io.h" 48 49 #define OCFS2_LOCAL_ALLOC(dinode) (&((dinode)->id2.i_lab)) 50 51 static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc); 52 53 static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb, 54 struct ocfs2_dinode *alloc, 55 u32 *numbits, 56 struct ocfs2_alloc_reservation *resv); 57 58 static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc); 59 60 static int ocfs2_sync_local_to_main(struct ocfs2_super *osb, 61 handle_t *handle, 62 struct ocfs2_dinode *alloc, 63 struct inode *main_bm_inode, 64 struct buffer_head *main_bm_bh); 65 66 static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb, 67 struct ocfs2_alloc_context **ac, 68 struct inode **bitmap_inode, 69 struct buffer_head **bitmap_bh); 70 71 static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb, 72 handle_t *handle, 73 struct ocfs2_alloc_context *ac); 74 75 static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb, 76 struct inode *local_alloc_inode); 77 78 /* 79 * ocfs2_la_default_mb() - determine a default size, in megabytes of 80 * the local alloc. 81 * 82 * Generally, we'd like to pick as large a local alloc as 83 * possible. Performance on large workloads tends to scale 84 * proportionally to la size. In addition to that, the reservations 85 * code functions more efficiently as it can reserve more windows for 86 * write. 87 * 88 * Some things work against us when trying to choose a large local alloc: 89 * 90 * - We need to ensure our sizing is picked to leave enough space in 91 * group descriptors for other allocations (such as block groups, 92 * etc). Picking default sizes which are a multiple of 4 could help 93 * - block groups are allocated in 2mb and 4mb chunks. 94 * 95 * - Likewise, we don't want to starve other nodes of bits on small 96 * file systems. This can easily be taken care of by limiting our 97 * default to a reasonable size (256M) on larger cluster sizes. 98 * 99 * - Some file systems can't support very large sizes - 4k and 8k in 100 * particular are limited to less than 128 and 256 megabytes respectively. 101 * 102 * The following reference table shows group descriptor and local 103 * alloc maximums at various cluster sizes (4k blocksize) 104 * 105 * csize: 4K group: 126M la: 121M 106 * csize: 8K group: 252M la: 243M 107 * csize: 16K group: 504M la: 486M 108 * csize: 32K group: 1008M la: 972M 109 * csize: 64K group: 2016M la: 1944M 110 * csize: 128K group: 4032M la: 3888M 111 * csize: 256K group: 8064M la: 7776M 112 * csize: 512K group: 16128M la: 15552M 113 * csize: 1024K group: 32256M la: 31104M 114 */ 115 #define OCFS2_LA_MAX_DEFAULT_MB 256 116 #define OCFS2_LA_OLD_DEFAULT 8 117 unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb) 118 { 119 unsigned int la_mb; 120 unsigned int gd_mb; 121 unsigned int la_max_mb; 122 unsigned int megs_per_slot; 123 struct super_block *sb = osb->sb; 124 125 gd_mb = ocfs2_clusters_to_megabytes(osb->sb, 126 8 * ocfs2_group_bitmap_size(sb, 0, osb->s_feature_incompat)); 127 128 /* 129 * This takes care of files systems with very small group 130 * descriptors - 512 byte blocksize at cluster sizes lower 131 * than 16K and also 1k blocksize with 4k cluster size. 132 */ 133 if ((sb->s_blocksize == 512 && osb->s_clustersize <= 8192) 134 || (sb->s_blocksize == 1024 && osb->s_clustersize == 4096)) 135 return OCFS2_LA_OLD_DEFAULT; 136 137 /* 138 * Leave enough room for some block groups and make the final 139 * value we work from a multiple of 4. 140 */ 141 gd_mb -= 16; 142 gd_mb &= 0xFFFFFFFB; 143 144 la_mb = gd_mb; 145 146 /* 147 * Keep window sizes down to a reasonable default 148 */ 149 if (la_mb > OCFS2_LA_MAX_DEFAULT_MB) { 150 /* 151 * Some clustersize / blocksize combinations will have 152 * given us a larger than OCFS2_LA_MAX_DEFAULT_MB 153 * default size, but get poor distribution when 154 * limited to exactly 256 megabytes. 155 * 156 * As an example, 16K clustersize at 4K blocksize 157 * gives us a cluster group size of 504M. Paring the 158 * local alloc size down to 256 however, would give us 159 * only one window and around 200MB left in the 160 * cluster group. Instead, find the first size below 161 * 256 which would give us an even distribution. 162 * 163 * Larger cluster group sizes actually work out pretty 164 * well when pared to 256, so we don't have to do this 165 * for any group that fits more than two 166 * OCFS2_LA_MAX_DEFAULT_MB windows. 167 */ 168 if (gd_mb > (2 * OCFS2_LA_MAX_DEFAULT_MB)) 169 la_mb = 256; 170 else { 171 unsigned int gd_mult = gd_mb; 172 173 while (gd_mult > 256) 174 gd_mult = gd_mult >> 1; 175 176 la_mb = gd_mult; 177 } 178 } 179 180 megs_per_slot = osb->osb_clusters_at_boot / osb->max_slots; 181 megs_per_slot = ocfs2_clusters_to_megabytes(osb->sb, megs_per_slot); 182 /* Too many nodes, too few disk clusters. */ 183 if (megs_per_slot < la_mb) 184 la_mb = megs_per_slot; 185 186 /* We can't store more bits than we can in a block. */ 187 la_max_mb = ocfs2_clusters_to_megabytes(osb->sb, 188 ocfs2_local_alloc_size(sb) * 8); 189 if (la_mb > la_max_mb) 190 la_mb = la_max_mb; 191 192 return la_mb; 193 } 194 195 void ocfs2_la_set_sizes(struct ocfs2_super *osb, int requested_mb) 196 { 197 struct super_block *sb = osb->sb; 198 unsigned int la_default_mb = ocfs2_la_default_mb(osb); 199 unsigned int la_max_mb; 200 201 la_max_mb = ocfs2_clusters_to_megabytes(sb, 202 ocfs2_local_alloc_size(sb) * 8); 203 204 trace_ocfs2_la_set_sizes(requested_mb, la_max_mb, la_default_mb); 205 206 if (requested_mb == -1) { 207 /* No user request - use defaults */ 208 osb->local_alloc_default_bits = 209 ocfs2_megabytes_to_clusters(sb, la_default_mb); 210 } else if (requested_mb > la_max_mb) { 211 /* Request is too big, we give the maximum available */ 212 osb->local_alloc_default_bits = 213 ocfs2_megabytes_to_clusters(sb, la_max_mb); 214 } else { 215 osb->local_alloc_default_bits = 216 ocfs2_megabytes_to_clusters(sb, requested_mb); 217 } 218 219 osb->local_alloc_bits = osb->local_alloc_default_bits; 220 } 221 222 static inline int ocfs2_la_state_enabled(struct ocfs2_super *osb) 223 { 224 return (osb->local_alloc_state == OCFS2_LA_THROTTLED || 225 osb->local_alloc_state == OCFS2_LA_ENABLED); 226 } 227 228 void ocfs2_local_alloc_seen_free_bits(struct ocfs2_super *osb, 229 unsigned int num_clusters) 230 { 231 spin_lock(&osb->osb_lock); 232 if (osb->local_alloc_state == OCFS2_LA_DISABLED || 233 osb->local_alloc_state == OCFS2_LA_THROTTLED) 234 if (num_clusters >= osb->local_alloc_default_bits) { 235 cancel_delayed_work(&osb->la_enable_wq); 236 osb->local_alloc_state = OCFS2_LA_ENABLED; 237 } 238 spin_unlock(&osb->osb_lock); 239 } 240 241 void ocfs2_la_enable_worker(struct work_struct *work) 242 { 243 struct ocfs2_super *osb = 244 container_of(work, struct ocfs2_super, 245 la_enable_wq.work); 246 spin_lock(&osb->osb_lock); 247 osb->local_alloc_state = OCFS2_LA_ENABLED; 248 spin_unlock(&osb->osb_lock); 249 } 250 251 /* 252 * Tell us whether a given allocation should use the local alloc 253 * file. Otherwise, it has to go to the main bitmap. 254 * 255 * This function does semi-dirty reads of local alloc size and state! 256 * This is ok however, as the values are re-checked once under mutex. 257 */ 258 int ocfs2_alloc_should_use_local(struct ocfs2_super *osb, u64 bits) 259 { 260 int ret = 0; 261 int la_bits; 262 263 spin_lock(&osb->osb_lock); 264 la_bits = osb->local_alloc_bits; 265 266 if (!ocfs2_la_state_enabled(osb)) 267 goto bail; 268 269 /* la_bits should be at least twice the size (in clusters) of 270 * a new block group. We want to be sure block group 271 * allocations go through the local alloc, so allow an 272 * allocation to take up to half the bitmap. */ 273 if (bits > (la_bits / 2)) 274 goto bail; 275 276 ret = 1; 277 bail: 278 trace_ocfs2_alloc_should_use_local( 279 (unsigned long long)bits, osb->local_alloc_state, la_bits, ret); 280 spin_unlock(&osb->osb_lock); 281 return ret; 282 } 283 284 int ocfs2_load_local_alloc(struct ocfs2_super *osb) 285 { 286 int status = 0; 287 struct ocfs2_dinode *alloc = NULL; 288 struct buffer_head *alloc_bh = NULL; 289 u32 num_used; 290 struct inode *inode = NULL; 291 struct ocfs2_local_alloc *la; 292 293 if (osb->local_alloc_bits == 0) 294 goto bail; 295 296 if (osb->local_alloc_bits >= osb->bitmap_cpg) { 297 mlog(ML_NOTICE, "Requested local alloc window %d is larger " 298 "than max possible %u. Using defaults.\n", 299 osb->local_alloc_bits, (osb->bitmap_cpg - 1)); 300 osb->local_alloc_bits = 301 ocfs2_megabytes_to_clusters(osb->sb, 302 ocfs2_la_default_mb(osb)); 303 } 304 305 /* read the alloc off disk */ 306 inode = ocfs2_get_system_file_inode(osb, LOCAL_ALLOC_SYSTEM_INODE, 307 osb->slot_num); 308 if (!inode) { 309 status = -EINVAL; 310 mlog_errno(status); 311 goto bail; 312 } 313 314 status = ocfs2_read_inode_block_full(inode, &alloc_bh, 315 OCFS2_BH_IGNORE_CACHE); 316 if (status < 0) { 317 mlog_errno(status); 318 goto bail; 319 } 320 321 alloc = (struct ocfs2_dinode *) alloc_bh->b_data; 322 la = OCFS2_LOCAL_ALLOC(alloc); 323 324 if (!(le32_to_cpu(alloc->i_flags) & 325 (OCFS2_LOCAL_ALLOC_FL|OCFS2_BITMAP_FL))) { 326 mlog(ML_ERROR, "Invalid local alloc inode, %llu\n", 327 (unsigned long long)OCFS2_I(inode)->ip_blkno); 328 status = -EINVAL; 329 goto bail; 330 } 331 332 if ((la->la_size == 0) || 333 (le16_to_cpu(la->la_size) > ocfs2_local_alloc_size(inode->i_sb))) { 334 mlog(ML_ERROR, "Local alloc size is invalid (la_size = %u)\n", 335 le16_to_cpu(la->la_size)); 336 status = -EINVAL; 337 goto bail; 338 } 339 340 /* do a little verification. */ 341 num_used = ocfs2_local_alloc_count_bits(alloc); 342 343 /* hopefully the local alloc has always been recovered before 344 * we load it. */ 345 if (num_used 346 || alloc->id1.bitmap1.i_used 347 || alloc->id1.bitmap1.i_total 348 || la->la_bm_off) 349 mlog(ML_ERROR, "Local alloc hasn't been recovered!\n" 350 "found = %u, set = %u, taken = %u, off = %u\n", 351 num_used, le32_to_cpu(alloc->id1.bitmap1.i_used), 352 le32_to_cpu(alloc->id1.bitmap1.i_total), 353 OCFS2_LOCAL_ALLOC(alloc)->la_bm_off); 354 355 osb->local_alloc_bh = alloc_bh; 356 osb->local_alloc_state = OCFS2_LA_ENABLED; 357 358 bail: 359 if (status < 0) 360 brelse(alloc_bh); 361 if (inode) 362 iput(inode); 363 364 trace_ocfs2_load_local_alloc(osb->local_alloc_bits); 365 366 if (status) 367 mlog_errno(status); 368 return status; 369 } 370 371 /* 372 * return any unused bits to the bitmap and write out a clean 373 * local_alloc. 374 * 375 * local_alloc_bh is optional. If not passed, we will simply use the 376 * one off osb. If you do pass it however, be warned that it *will* be 377 * returned brelse'd and NULL'd out.*/ 378 void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb) 379 { 380 int status; 381 handle_t *handle; 382 struct inode *local_alloc_inode = NULL; 383 struct buffer_head *bh = NULL; 384 struct buffer_head *main_bm_bh = NULL; 385 struct inode *main_bm_inode = NULL; 386 struct ocfs2_dinode *alloc_copy = NULL; 387 struct ocfs2_dinode *alloc = NULL; 388 389 cancel_delayed_work(&osb->la_enable_wq); 390 flush_workqueue(ocfs2_wq); 391 392 if (osb->local_alloc_state == OCFS2_LA_UNUSED) 393 goto out; 394 395 local_alloc_inode = 396 ocfs2_get_system_file_inode(osb, 397 LOCAL_ALLOC_SYSTEM_INODE, 398 osb->slot_num); 399 if (!local_alloc_inode) { 400 status = -ENOENT; 401 mlog_errno(status); 402 goto out; 403 } 404 405 osb->local_alloc_state = OCFS2_LA_DISABLED; 406 407 ocfs2_resmap_uninit(&osb->osb_la_resmap); 408 409 main_bm_inode = ocfs2_get_system_file_inode(osb, 410 GLOBAL_BITMAP_SYSTEM_INODE, 411 OCFS2_INVALID_SLOT); 412 if (!main_bm_inode) { 413 status = -EINVAL; 414 mlog_errno(status); 415 goto out; 416 } 417 418 mutex_lock(&main_bm_inode->i_mutex); 419 420 status = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1); 421 if (status < 0) { 422 mlog_errno(status); 423 goto out_mutex; 424 } 425 426 /* WINDOW_MOVE_CREDITS is a bit heavy... */ 427 handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS); 428 if (IS_ERR(handle)) { 429 mlog_errno(PTR_ERR(handle)); 430 handle = NULL; 431 goto out_unlock; 432 } 433 434 bh = osb->local_alloc_bh; 435 alloc = (struct ocfs2_dinode *) bh->b_data; 436 437 alloc_copy = kmalloc(bh->b_size, GFP_NOFS); 438 if (!alloc_copy) { 439 status = -ENOMEM; 440 goto out_commit; 441 } 442 memcpy(alloc_copy, alloc, bh->b_size); 443 444 status = ocfs2_journal_access_di(handle, INODE_CACHE(local_alloc_inode), 445 bh, OCFS2_JOURNAL_ACCESS_WRITE); 446 if (status < 0) { 447 mlog_errno(status); 448 goto out_commit; 449 } 450 451 ocfs2_clear_local_alloc(alloc); 452 ocfs2_journal_dirty(handle, bh); 453 454 brelse(bh); 455 osb->local_alloc_bh = NULL; 456 osb->local_alloc_state = OCFS2_LA_UNUSED; 457 458 status = ocfs2_sync_local_to_main(osb, handle, alloc_copy, 459 main_bm_inode, main_bm_bh); 460 if (status < 0) 461 mlog_errno(status); 462 463 out_commit: 464 ocfs2_commit_trans(osb, handle); 465 466 out_unlock: 467 brelse(main_bm_bh); 468 469 ocfs2_inode_unlock(main_bm_inode, 1); 470 471 out_mutex: 472 mutex_unlock(&main_bm_inode->i_mutex); 473 iput(main_bm_inode); 474 475 out: 476 if (local_alloc_inode) 477 iput(local_alloc_inode); 478 479 kfree(alloc_copy); 480 } 481 482 /* 483 * We want to free the bitmap bits outside of any recovery context as 484 * we'll need a cluster lock to do so, but we must clear the local 485 * alloc before giving up the recovered nodes journal. To solve this, 486 * we kmalloc a copy of the local alloc before it's change for the 487 * caller to process with ocfs2_complete_local_alloc_recovery 488 */ 489 int ocfs2_begin_local_alloc_recovery(struct ocfs2_super *osb, 490 int slot_num, 491 struct ocfs2_dinode **alloc_copy) 492 { 493 int status = 0; 494 struct buffer_head *alloc_bh = NULL; 495 struct inode *inode = NULL; 496 struct ocfs2_dinode *alloc; 497 498 trace_ocfs2_begin_local_alloc_recovery(slot_num); 499 500 *alloc_copy = NULL; 501 502 inode = ocfs2_get_system_file_inode(osb, 503 LOCAL_ALLOC_SYSTEM_INODE, 504 slot_num); 505 if (!inode) { 506 status = -EINVAL; 507 mlog_errno(status); 508 goto bail; 509 } 510 511 mutex_lock(&inode->i_mutex); 512 513 status = ocfs2_read_inode_block_full(inode, &alloc_bh, 514 OCFS2_BH_IGNORE_CACHE); 515 if (status < 0) { 516 mlog_errno(status); 517 goto bail; 518 } 519 520 *alloc_copy = kmalloc(alloc_bh->b_size, GFP_KERNEL); 521 if (!(*alloc_copy)) { 522 status = -ENOMEM; 523 goto bail; 524 } 525 memcpy((*alloc_copy), alloc_bh->b_data, alloc_bh->b_size); 526 527 alloc = (struct ocfs2_dinode *) alloc_bh->b_data; 528 ocfs2_clear_local_alloc(alloc); 529 530 ocfs2_compute_meta_ecc(osb->sb, alloc_bh->b_data, &alloc->i_check); 531 status = ocfs2_write_block(osb, alloc_bh, INODE_CACHE(inode)); 532 if (status < 0) 533 mlog_errno(status); 534 535 bail: 536 if (status < 0) { 537 kfree(*alloc_copy); 538 *alloc_copy = NULL; 539 } 540 541 brelse(alloc_bh); 542 543 if (inode) { 544 mutex_unlock(&inode->i_mutex); 545 iput(inode); 546 } 547 548 if (status) 549 mlog_errno(status); 550 return status; 551 } 552 553 /* 554 * Step 2: By now, we've completed the journal recovery, we've stamped 555 * a clean local alloc on disk and dropped the node out of the 556 * recovery map. Dlm locks will no longer stall, so lets clear out the 557 * main bitmap. 558 */ 559 int ocfs2_complete_local_alloc_recovery(struct ocfs2_super *osb, 560 struct ocfs2_dinode *alloc) 561 { 562 int status; 563 handle_t *handle; 564 struct buffer_head *main_bm_bh = NULL; 565 struct inode *main_bm_inode; 566 567 main_bm_inode = ocfs2_get_system_file_inode(osb, 568 GLOBAL_BITMAP_SYSTEM_INODE, 569 OCFS2_INVALID_SLOT); 570 if (!main_bm_inode) { 571 status = -EINVAL; 572 mlog_errno(status); 573 goto out; 574 } 575 576 mutex_lock(&main_bm_inode->i_mutex); 577 578 status = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1); 579 if (status < 0) { 580 mlog_errno(status); 581 goto out_mutex; 582 } 583 584 handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS); 585 if (IS_ERR(handle)) { 586 status = PTR_ERR(handle); 587 handle = NULL; 588 mlog_errno(status); 589 goto out_unlock; 590 } 591 592 /* we want the bitmap change to be recorded on disk asap */ 593 handle->h_sync = 1; 594 595 status = ocfs2_sync_local_to_main(osb, handle, alloc, 596 main_bm_inode, main_bm_bh); 597 if (status < 0) 598 mlog_errno(status); 599 600 ocfs2_commit_trans(osb, handle); 601 602 out_unlock: 603 ocfs2_inode_unlock(main_bm_inode, 1); 604 605 out_mutex: 606 mutex_unlock(&main_bm_inode->i_mutex); 607 608 brelse(main_bm_bh); 609 610 iput(main_bm_inode); 611 612 out: 613 if (!status) 614 ocfs2_init_steal_slots(osb); 615 if (status) 616 mlog_errno(status); 617 return status; 618 } 619 620 /* 621 * make sure we've got at least bits_wanted contiguous bits in the 622 * local alloc. You lose them when you drop i_mutex. 623 * 624 * We will add ourselves to the transaction passed in, but may start 625 * our own in order to shift windows. 626 */ 627 int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb, 628 u32 bits_wanted, 629 struct ocfs2_alloc_context *ac) 630 { 631 int status; 632 struct ocfs2_dinode *alloc; 633 struct inode *local_alloc_inode; 634 unsigned int free_bits; 635 636 BUG_ON(!ac); 637 638 local_alloc_inode = 639 ocfs2_get_system_file_inode(osb, 640 LOCAL_ALLOC_SYSTEM_INODE, 641 osb->slot_num); 642 if (!local_alloc_inode) { 643 status = -ENOENT; 644 mlog_errno(status); 645 goto bail; 646 } 647 648 mutex_lock(&local_alloc_inode->i_mutex); 649 650 /* 651 * We must double check state and allocator bits because 652 * another process may have changed them while holding i_mutex. 653 */ 654 spin_lock(&osb->osb_lock); 655 if (!ocfs2_la_state_enabled(osb) || 656 (bits_wanted > osb->local_alloc_bits)) { 657 spin_unlock(&osb->osb_lock); 658 status = -ENOSPC; 659 goto bail; 660 } 661 spin_unlock(&osb->osb_lock); 662 663 alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; 664 665 #ifdef CONFIG_OCFS2_DEBUG_FS 666 if (le32_to_cpu(alloc->id1.bitmap1.i_used) != 667 ocfs2_local_alloc_count_bits(alloc)) { 668 ocfs2_error(osb->sb, "local alloc inode %llu says it has %u used bits, but a count shows %u\n", 669 (unsigned long long)le64_to_cpu(alloc->i_blkno), 670 le32_to_cpu(alloc->id1.bitmap1.i_used), 671 ocfs2_local_alloc_count_bits(alloc)); 672 status = -EIO; 673 goto bail; 674 } 675 #endif 676 677 free_bits = le32_to_cpu(alloc->id1.bitmap1.i_total) - 678 le32_to_cpu(alloc->id1.bitmap1.i_used); 679 if (bits_wanted > free_bits) { 680 /* uhoh, window change time. */ 681 status = 682 ocfs2_local_alloc_slide_window(osb, local_alloc_inode); 683 if (status < 0) { 684 if (status != -ENOSPC) 685 mlog_errno(status); 686 goto bail; 687 } 688 689 /* 690 * Under certain conditions, the window slide code 691 * might have reduced the number of bits available or 692 * disabled the the local alloc entirely. Re-check 693 * here and return -ENOSPC if necessary. 694 */ 695 status = -ENOSPC; 696 if (!ocfs2_la_state_enabled(osb)) 697 goto bail; 698 699 free_bits = le32_to_cpu(alloc->id1.bitmap1.i_total) - 700 le32_to_cpu(alloc->id1.bitmap1.i_used); 701 if (bits_wanted > free_bits) 702 goto bail; 703 } 704 705 ac->ac_inode = local_alloc_inode; 706 /* We should never use localalloc from another slot */ 707 ac->ac_alloc_slot = osb->slot_num; 708 ac->ac_which = OCFS2_AC_USE_LOCAL; 709 get_bh(osb->local_alloc_bh); 710 ac->ac_bh = osb->local_alloc_bh; 711 status = 0; 712 bail: 713 if (status < 0 && local_alloc_inode) { 714 mutex_unlock(&local_alloc_inode->i_mutex); 715 iput(local_alloc_inode); 716 } 717 718 trace_ocfs2_reserve_local_alloc_bits( 719 (unsigned long long)ac->ac_max_block, 720 bits_wanted, osb->slot_num, status); 721 722 if (status) 723 mlog_errno(status); 724 return status; 725 } 726 727 int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb, 728 handle_t *handle, 729 struct ocfs2_alloc_context *ac, 730 u32 bits_wanted, 731 u32 *bit_off, 732 u32 *num_bits) 733 { 734 int status, start; 735 struct inode *local_alloc_inode; 736 void *bitmap; 737 struct ocfs2_dinode *alloc; 738 struct ocfs2_local_alloc *la; 739 740 BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL); 741 742 local_alloc_inode = ac->ac_inode; 743 alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; 744 la = OCFS2_LOCAL_ALLOC(alloc); 745 746 start = ocfs2_local_alloc_find_clear_bits(osb, alloc, &bits_wanted, 747 ac->ac_resv); 748 if (start == -1) { 749 /* TODO: Shouldn't we just BUG here? */ 750 status = -ENOSPC; 751 mlog_errno(status); 752 goto bail; 753 } 754 755 bitmap = la->la_bitmap; 756 *bit_off = le32_to_cpu(la->la_bm_off) + start; 757 *num_bits = bits_wanted; 758 759 status = ocfs2_journal_access_di(handle, 760 INODE_CACHE(local_alloc_inode), 761 osb->local_alloc_bh, 762 OCFS2_JOURNAL_ACCESS_WRITE); 763 if (status < 0) { 764 mlog_errno(status); 765 goto bail; 766 } 767 768 ocfs2_resmap_claimed_bits(&osb->osb_la_resmap, ac->ac_resv, start, 769 bits_wanted); 770 771 while(bits_wanted--) 772 ocfs2_set_bit(start++, bitmap); 773 774 le32_add_cpu(&alloc->id1.bitmap1.i_used, *num_bits); 775 ocfs2_journal_dirty(handle, osb->local_alloc_bh); 776 777 bail: 778 if (status) 779 mlog_errno(status); 780 return status; 781 } 782 783 int ocfs2_free_local_alloc_bits(struct ocfs2_super *osb, 784 handle_t *handle, 785 struct ocfs2_alloc_context *ac, 786 u32 bit_off, 787 u32 num_bits) 788 { 789 int status, start; 790 u32 clear_bits; 791 struct inode *local_alloc_inode; 792 void *bitmap; 793 struct ocfs2_dinode *alloc; 794 struct ocfs2_local_alloc *la; 795 796 BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL); 797 798 local_alloc_inode = ac->ac_inode; 799 alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; 800 la = OCFS2_LOCAL_ALLOC(alloc); 801 802 bitmap = la->la_bitmap; 803 start = bit_off - le32_to_cpu(la->la_bm_off); 804 clear_bits = num_bits; 805 806 status = ocfs2_journal_access_di(handle, 807 INODE_CACHE(local_alloc_inode), 808 osb->local_alloc_bh, 809 OCFS2_JOURNAL_ACCESS_WRITE); 810 if (status < 0) { 811 mlog_errno(status); 812 goto bail; 813 } 814 815 while (clear_bits--) 816 ocfs2_clear_bit(start++, bitmap); 817 818 le32_add_cpu(&alloc->id1.bitmap1.i_used, -num_bits); 819 ocfs2_journal_dirty(handle, osb->local_alloc_bh); 820 821 bail: 822 return status; 823 } 824 825 static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc) 826 { 827 u32 count; 828 struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc); 829 830 count = memweight(la->la_bitmap, le16_to_cpu(la->la_size)); 831 832 trace_ocfs2_local_alloc_count_bits(count); 833 return count; 834 } 835 836 static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb, 837 struct ocfs2_dinode *alloc, 838 u32 *numbits, 839 struct ocfs2_alloc_reservation *resv) 840 { 841 int numfound = 0, bitoff, left, startoff, lastzero; 842 int local_resv = 0; 843 struct ocfs2_alloc_reservation r; 844 void *bitmap = NULL; 845 struct ocfs2_reservation_map *resmap = &osb->osb_la_resmap; 846 847 if (!alloc->id1.bitmap1.i_total) { 848 bitoff = -1; 849 goto bail; 850 } 851 852 if (!resv) { 853 local_resv = 1; 854 ocfs2_resv_init_once(&r); 855 ocfs2_resv_set_type(&r, OCFS2_RESV_FLAG_TMP); 856 resv = &r; 857 } 858 859 numfound = *numbits; 860 if (ocfs2_resmap_resv_bits(resmap, resv, &bitoff, &numfound) == 0) { 861 if (numfound < *numbits) 862 *numbits = numfound; 863 goto bail; 864 } 865 866 /* 867 * Code error. While reservations are enabled, local 868 * allocation should _always_ go through them. 869 */ 870 BUG_ON(osb->osb_resv_level != 0); 871 872 /* 873 * Reservations are disabled. Handle this the old way. 874 */ 875 876 bitmap = OCFS2_LOCAL_ALLOC(alloc)->la_bitmap; 877 878 numfound = bitoff = startoff = 0; 879 lastzero = -1; 880 left = le32_to_cpu(alloc->id1.bitmap1.i_total); 881 while ((bitoff = ocfs2_find_next_zero_bit(bitmap, left, startoff)) != -1) { 882 if (bitoff == left) { 883 /* mlog(0, "bitoff (%d) == left", bitoff); */ 884 break; 885 } 886 /* mlog(0, "Found a zero: bitoff = %d, startoff = %d, " 887 "numfound = %d\n", bitoff, startoff, numfound);*/ 888 889 /* Ok, we found a zero bit... is it contig. or do we 890 * start over?*/ 891 if (bitoff == startoff) { 892 /* we found a zero */ 893 numfound++; 894 startoff++; 895 } else { 896 /* got a zero after some ones */ 897 numfound = 1; 898 startoff = bitoff+1; 899 } 900 /* we got everything we needed */ 901 if (numfound == *numbits) { 902 /* mlog(0, "Found it all!\n"); */ 903 break; 904 } 905 } 906 907 trace_ocfs2_local_alloc_find_clear_bits_search_bitmap(bitoff, numfound); 908 909 if (numfound == *numbits) 910 bitoff = startoff - numfound; 911 else 912 bitoff = -1; 913 914 bail: 915 if (local_resv) 916 ocfs2_resv_discard(resmap, resv); 917 918 trace_ocfs2_local_alloc_find_clear_bits(*numbits, 919 le32_to_cpu(alloc->id1.bitmap1.i_total), 920 bitoff, numfound); 921 922 return bitoff; 923 } 924 925 static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc) 926 { 927 struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc); 928 int i; 929 930 alloc->id1.bitmap1.i_total = 0; 931 alloc->id1.bitmap1.i_used = 0; 932 la->la_bm_off = 0; 933 for(i = 0; i < le16_to_cpu(la->la_size); i++) 934 la->la_bitmap[i] = 0; 935 } 936 937 #if 0 938 /* turn this on and uncomment below to aid debugging window shifts. */ 939 static void ocfs2_verify_zero_bits(unsigned long *bitmap, 940 unsigned int start, 941 unsigned int count) 942 { 943 unsigned int tmp = count; 944 while(tmp--) { 945 if (ocfs2_test_bit(start + tmp, bitmap)) { 946 printk("ocfs2_verify_zero_bits: start = %u, count = " 947 "%u\n", start, count); 948 printk("ocfs2_verify_zero_bits: bit %u is set!", 949 start + tmp); 950 BUG(); 951 } 952 } 953 } 954 #endif 955 956 /* 957 * sync the local alloc to main bitmap. 958 * 959 * assumes you've already locked the main bitmap -- the bitmap inode 960 * passed is used for caching. 961 */ 962 static int ocfs2_sync_local_to_main(struct ocfs2_super *osb, 963 handle_t *handle, 964 struct ocfs2_dinode *alloc, 965 struct inode *main_bm_inode, 966 struct buffer_head *main_bm_bh) 967 { 968 int status = 0; 969 int bit_off, left, count, start; 970 u64 la_start_blk; 971 u64 blkno; 972 void *bitmap; 973 struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc); 974 975 trace_ocfs2_sync_local_to_main( 976 le32_to_cpu(alloc->id1.bitmap1.i_total), 977 le32_to_cpu(alloc->id1.bitmap1.i_used)); 978 979 if (!alloc->id1.bitmap1.i_total) { 980 goto bail; 981 } 982 983 if (le32_to_cpu(alloc->id1.bitmap1.i_used) == 984 le32_to_cpu(alloc->id1.bitmap1.i_total)) { 985 goto bail; 986 } 987 988 la_start_blk = ocfs2_clusters_to_blocks(osb->sb, 989 le32_to_cpu(la->la_bm_off)); 990 bitmap = la->la_bitmap; 991 start = count = bit_off = 0; 992 left = le32_to_cpu(alloc->id1.bitmap1.i_total); 993 994 while ((bit_off = ocfs2_find_next_zero_bit(bitmap, left, start)) 995 != -1) { 996 if ((bit_off < left) && (bit_off == start)) { 997 count++; 998 start++; 999 continue; 1000 } 1001 if (count) { 1002 blkno = la_start_blk + 1003 ocfs2_clusters_to_blocks(osb->sb, 1004 start - count); 1005 1006 trace_ocfs2_sync_local_to_main_free( 1007 count, start - count, 1008 (unsigned long long)la_start_blk, 1009 (unsigned long long)blkno); 1010 1011 status = ocfs2_release_clusters(handle, 1012 main_bm_inode, 1013 main_bm_bh, blkno, 1014 count); 1015 if (status < 0) { 1016 mlog_errno(status); 1017 goto bail; 1018 } 1019 } 1020 if (bit_off >= left) 1021 break; 1022 count = 1; 1023 start = bit_off + 1; 1024 } 1025 1026 bail: 1027 if (status) 1028 mlog_errno(status); 1029 return status; 1030 } 1031 1032 enum ocfs2_la_event { 1033 OCFS2_LA_EVENT_SLIDE, /* Normal window slide. */ 1034 OCFS2_LA_EVENT_FRAGMENTED, /* The global bitmap has 1035 * enough bits theoretically 1036 * free, but a contiguous 1037 * allocation could not be 1038 * found. */ 1039 OCFS2_LA_EVENT_ENOSPC, /* Global bitmap doesn't have 1040 * enough bits free to satisfy 1041 * our request. */ 1042 }; 1043 #define OCFS2_LA_ENABLE_INTERVAL (30 * HZ) 1044 /* 1045 * Given an event, calculate the size of our next local alloc window. 1046 * 1047 * This should always be called under i_mutex of the local alloc inode 1048 * so that local alloc disabling doesn't race with processes trying to 1049 * use the allocator. 1050 * 1051 * Returns the state which the local alloc was left in. This value can 1052 * be ignored by some paths. 1053 */ 1054 static int ocfs2_recalc_la_window(struct ocfs2_super *osb, 1055 enum ocfs2_la_event event) 1056 { 1057 unsigned int bits; 1058 int state; 1059 1060 spin_lock(&osb->osb_lock); 1061 if (osb->local_alloc_state == OCFS2_LA_DISABLED) { 1062 WARN_ON_ONCE(osb->local_alloc_state == OCFS2_LA_DISABLED); 1063 goto out_unlock; 1064 } 1065 1066 /* 1067 * ENOSPC and fragmentation are treated similarly for now. 1068 */ 1069 if (event == OCFS2_LA_EVENT_ENOSPC || 1070 event == OCFS2_LA_EVENT_FRAGMENTED) { 1071 /* 1072 * We ran out of contiguous space in the primary 1073 * bitmap. Drastically reduce the number of bits used 1074 * by local alloc until we have to disable it. 1075 */ 1076 bits = osb->local_alloc_bits >> 1; 1077 if (bits > ocfs2_megabytes_to_clusters(osb->sb, 1)) { 1078 /* 1079 * By setting state to THROTTLED, we'll keep 1080 * the number of local alloc bits used down 1081 * until an event occurs which would give us 1082 * reason to assume the bitmap situation might 1083 * have changed. 1084 */ 1085 osb->local_alloc_state = OCFS2_LA_THROTTLED; 1086 osb->local_alloc_bits = bits; 1087 } else { 1088 osb->local_alloc_state = OCFS2_LA_DISABLED; 1089 } 1090 queue_delayed_work(ocfs2_wq, &osb->la_enable_wq, 1091 OCFS2_LA_ENABLE_INTERVAL); 1092 goto out_unlock; 1093 } 1094 1095 /* 1096 * Don't increase the size of the local alloc window until we 1097 * know we might be able to fulfill the request. Otherwise, we 1098 * risk bouncing around the global bitmap during periods of 1099 * low space. 1100 */ 1101 if (osb->local_alloc_state != OCFS2_LA_THROTTLED) 1102 osb->local_alloc_bits = osb->local_alloc_default_bits; 1103 1104 out_unlock: 1105 state = osb->local_alloc_state; 1106 spin_unlock(&osb->osb_lock); 1107 1108 return state; 1109 } 1110 1111 static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb, 1112 struct ocfs2_alloc_context **ac, 1113 struct inode **bitmap_inode, 1114 struct buffer_head **bitmap_bh) 1115 { 1116 int status; 1117 1118 *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL); 1119 if (!(*ac)) { 1120 status = -ENOMEM; 1121 mlog_errno(status); 1122 goto bail; 1123 } 1124 1125 retry_enospc: 1126 (*ac)->ac_bits_wanted = osb->local_alloc_bits; 1127 status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac); 1128 if (status == -ENOSPC) { 1129 if (ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_ENOSPC) == 1130 OCFS2_LA_DISABLED) 1131 goto bail; 1132 1133 ocfs2_free_ac_resource(*ac); 1134 memset(*ac, 0, sizeof(struct ocfs2_alloc_context)); 1135 goto retry_enospc; 1136 } 1137 if (status < 0) { 1138 mlog_errno(status); 1139 goto bail; 1140 } 1141 1142 *bitmap_inode = (*ac)->ac_inode; 1143 igrab(*bitmap_inode); 1144 *bitmap_bh = (*ac)->ac_bh; 1145 get_bh(*bitmap_bh); 1146 status = 0; 1147 bail: 1148 if ((status < 0) && *ac) { 1149 ocfs2_free_alloc_context(*ac); 1150 *ac = NULL; 1151 } 1152 1153 if (status) 1154 mlog_errno(status); 1155 return status; 1156 } 1157 1158 /* 1159 * pass it the bitmap lock in lock_bh if you have it. 1160 */ 1161 static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb, 1162 handle_t *handle, 1163 struct ocfs2_alloc_context *ac) 1164 { 1165 int status = 0; 1166 u32 cluster_off, cluster_count; 1167 struct ocfs2_dinode *alloc = NULL; 1168 struct ocfs2_local_alloc *la; 1169 1170 alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; 1171 la = OCFS2_LOCAL_ALLOC(alloc); 1172 1173 trace_ocfs2_local_alloc_new_window( 1174 le32_to_cpu(alloc->id1.bitmap1.i_total), 1175 osb->local_alloc_bits); 1176 1177 /* Instruct the allocation code to try the most recently used 1178 * cluster group. We'll re-record the group used this pass 1179 * below. */ 1180 ac->ac_last_group = osb->la_last_gd; 1181 1182 /* we used the generic suballoc reserve function, but we set 1183 * everything up nicely, so there's no reason why we can't use 1184 * the more specific cluster api to claim bits. */ 1185 status = ocfs2_claim_clusters(handle, ac, osb->local_alloc_bits, 1186 &cluster_off, &cluster_count); 1187 if (status == -ENOSPC) { 1188 retry_enospc: 1189 /* 1190 * Note: We could also try syncing the journal here to 1191 * allow use of any free bits which the current 1192 * transaction can't give us access to. --Mark 1193 */ 1194 if (ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_FRAGMENTED) == 1195 OCFS2_LA_DISABLED) 1196 goto bail; 1197 1198 ac->ac_bits_wanted = osb->local_alloc_bits; 1199 status = ocfs2_claim_clusters(handle, ac, 1200 osb->local_alloc_bits, 1201 &cluster_off, 1202 &cluster_count); 1203 if (status == -ENOSPC) 1204 goto retry_enospc; 1205 /* 1206 * We only shrunk the *minimum* number of in our 1207 * request - it's entirely possible that the allocator 1208 * might give us more than we asked for. 1209 */ 1210 if (status == 0) { 1211 spin_lock(&osb->osb_lock); 1212 osb->local_alloc_bits = cluster_count; 1213 spin_unlock(&osb->osb_lock); 1214 } 1215 } 1216 if (status < 0) { 1217 if (status != -ENOSPC) 1218 mlog_errno(status); 1219 goto bail; 1220 } 1221 1222 osb->la_last_gd = ac->ac_last_group; 1223 1224 la->la_bm_off = cpu_to_le32(cluster_off); 1225 alloc->id1.bitmap1.i_total = cpu_to_le32(cluster_count); 1226 /* just in case... In the future when we find space ourselves, 1227 * we don't have to get all contiguous -- but we'll have to 1228 * set all previously used bits in bitmap and update 1229 * la_bits_set before setting the bits in the main bitmap. */ 1230 alloc->id1.bitmap1.i_used = 0; 1231 memset(OCFS2_LOCAL_ALLOC(alloc)->la_bitmap, 0, 1232 le16_to_cpu(la->la_size)); 1233 1234 ocfs2_resmap_restart(&osb->osb_la_resmap, cluster_count, 1235 OCFS2_LOCAL_ALLOC(alloc)->la_bitmap); 1236 1237 trace_ocfs2_local_alloc_new_window_result( 1238 OCFS2_LOCAL_ALLOC(alloc)->la_bm_off, 1239 le32_to_cpu(alloc->id1.bitmap1.i_total)); 1240 1241 bail: 1242 if (status) 1243 mlog_errno(status); 1244 return status; 1245 } 1246 1247 /* Note that we do *NOT* lock the local alloc inode here as 1248 * it's been locked already for us. */ 1249 static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb, 1250 struct inode *local_alloc_inode) 1251 { 1252 int status = 0; 1253 struct buffer_head *main_bm_bh = NULL; 1254 struct inode *main_bm_inode = NULL; 1255 handle_t *handle = NULL; 1256 struct ocfs2_dinode *alloc; 1257 struct ocfs2_dinode *alloc_copy = NULL; 1258 struct ocfs2_alloc_context *ac = NULL; 1259 1260 ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_SLIDE); 1261 1262 /* This will lock the main bitmap for us. */ 1263 status = ocfs2_local_alloc_reserve_for_window(osb, 1264 &ac, 1265 &main_bm_inode, 1266 &main_bm_bh); 1267 if (status < 0) { 1268 if (status != -ENOSPC) 1269 mlog_errno(status); 1270 goto bail; 1271 } 1272 1273 handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS); 1274 if (IS_ERR(handle)) { 1275 status = PTR_ERR(handle); 1276 handle = NULL; 1277 mlog_errno(status); 1278 goto bail; 1279 } 1280 1281 alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; 1282 1283 /* We want to clear the local alloc before doing anything 1284 * else, so that if we error later during this operation, 1285 * local alloc shutdown won't try to double free main bitmap 1286 * bits. Make a copy so the sync function knows which bits to 1287 * free. */ 1288 alloc_copy = kmalloc(osb->local_alloc_bh->b_size, GFP_NOFS); 1289 if (!alloc_copy) { 1290 status = -ENOMEM; 1291 mlog_errno(status); 1292 goto bail; 1293 } 1294 memcpy(alloc_copy, alloc, osb->local_alloc_bh->b_size); 1295 1296 status = ocfs2_journal_access_di(handle, 1297 INODE_CACHE(local_alloc_inode), 1298 osb->local_alloc_bh, 1299 OCFS2_JOURNAL_ACCESS_WRITE); 1300 if (status < 0) { 1301 mlog_errno(status); 1302 goto bail; 1303 } 1304 1305 ocfs2_clear_local_alloc(alloc); 1306 ocfs2_journal_dirty(handle, osb->local_alloc_bh); 1307 1308 status = ocfs2_sync_local_to_main(osb, handle, alloc_copy, 1309 main_bm_inode, main_bm_bh); 1310 if (status < 0) { 1311 mlog_errno(status); 1312 goto bail; 1313 } 1314 1315 status = ocfs2_local_alloc_new_window(osb, handle, ac); 1316 if (status < 0) { 1317 if (status != -ENOSPC) 1318 mlog_errno(status); 1319 goto bail; 1320 } 1321 1322 atomic_inc(&osb->alloc_stats.moves); 1323 1324 bail: 1325 if (handle) 1326 ocfs2_commit_trans(osb, handle); 1327 1328 brelse(main_bm_bh); 1329 1330 if (main_bm_inode) 1331 iput(main_bm_inode); 1332 1333 kfree(alloc_copy); 1334 1335 if (ac) 1336 ocfs2_free_alloc_context(ac); 1337 1338 if (status) 1339 mlog_errno(status); 1340 return status; 1341 } 1342 1343