1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) International Business Machines Corp., 2000-2004 4 * Portions Copyright (C) Tino Reichardt, 2012 5 */ 6 7 #include <linux/fs.h> 8 #include <linux/slab.h> 9 #include "jfs_incore.h" 10 #include "jfs_superblock.h" 11 #include "jfs_dmap.h" 12 #include "jfs_imap.h" 13 #include "jfs_lock.h" 14 #include "jfs_metapage.h" 15 #include "jfs_debug.h" 16 #include "jfs_discard.h" 17 18 /* 19 * SERIALIZATION of the Block Allocation Map. 20 * 21 * the working state of the block allocation map is accessed in 22 * two directions: 23 * 24 * 1) allocation and free requests that start at the dmap 25 * level and move up through the dmap control pages (i.e. 26 * the vast majority of requests). 27 * 28 * 2) allocation requests that start at dmap control page 29 * level and work down towards the dmaps. 30 * 31 * the serialization scheme used here is as follows. 32 * 33 * requests which start at the bottom are serialized against each 34 * other through buffers and each requests holds onto its buffers 35 * as it works it way up from a single dmap to the required level 36 * of dmap control page. 37 * requests that start at the top are serialized against each other 38 * and request that start from the bottom by the multiple read/single 39 * write inode lock of the bmap inode. requests starting at the top 40 * take this lock in write mode while request starting at the bottom 41 * take the lock in read mode. a single top-down request may proceed 42 * exclusively while multiple bottoms-up requests may proceed 43 * simultaneously (under the protection of busy buffers). 44 * 45 * in addition to information found in dmaps and dmap control pages, 46 * the working state of the block allocation map also includes read/ 47 * write information maintained in the bmap descriptor (i.e. total 48 * free block count, allocation group level free block counts). 49 * a single exclusive lock (BMAP_LOCK) is used to guard this information 50 * in the face of multiple-bottoms up requests. 51 * (lock ordering: IREAD_LOCK, BMAP_LOCK); 52 * 53 * accesses to the persistent state of the block allocation map (limited 54 * to the persistent bitmaps in dmaps) is guarded by (busy) buffers. 55 */ 56 57 #define BMAP_LOCK_INIT(bmp) mutex_init(&bmp->db_bmaplock) 58 #define BMAP_LOCK(bmp) mutex_lock(&bmp->db_bmaplock) 59 #define BMAP_UNLOCK(bmp) mutex_unlock(&bmp->db_bmaplock) 60 61 /* 62 * forward references 63 */ 64 static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno, 65 int nblocks); 66 static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval); 67 static int dbBackSplit(dmtree_t * tp, int leafno); 68 static int dbJoin(dmtree_t * tp, int leafno, int newval); 69 static void dbAdjTree(dmtree_t * tp, int leafno, int newval); 70 static int dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, 71 int level); 72 static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results); 73 static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno, 74 int nblocks); 75 static int dbAllocNear(struct bmap * bmp, struct dmap * dp, s64 blkno, 76 int nblocks, 77 int l2nb, s64 * results); 78 static int dbAllocDmap(struct bmap * bmp, struct dmap * dp, s64 blkno, 79 int nblocks); 80 static int dbAllocDmapLev(struct bmap * bmp, struct dmap * dp, int nblocks, 81 int l2nb, 82 s64 * results); 83 static int dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, 84 s64 * results); 85 static int dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno, 86 s64 * results); 87 static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks); 88 static int dbFindBits(u32 word, int l2nb); 89 static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno); 90 static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx); 91 static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno, 92 int nblocks); 93 static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno, 94 int nblocks); 95 static int dbMaxBud(u8 * cp); 96 static int blkstol2(s64 nb); 97 98 static int cntlz(u32 value); 99 static int cnttz(u32 word); 100 101 static int dbAllocDmapBU(struct bmap * bmp, struct dmap * dp, s64 blkno, 102 int nblocks); 103 static int dbInitDmap(struct dmap * dp, s64 blkno, int nblocks); 104 static int dbInitDmapTree(struct dmap * dp); 105 static int dbInitTree(struct dmaptree * dtp); 106 static int dbInitDmapCtl(struct dmapctl * dcp, int level, int i); 107 static int dbGetL2AGSize(s64 nblocks); 108 109 /* 110 * buddy table 111 * 112 * table used for determining buddy sizes within characters of 113 * dmap bitmap words. the characters themselves serve as indexes 114 * into the table, with the table elements yielding the maximum 115 * binary buddy of free bits within the character. 116 */ 117 static const s8 budtab[256] = { 118 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 119 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 120 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 121 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 122 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 123 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 124 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 125 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 126 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 127 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 128 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 129 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 130 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 131 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 132 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 133 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, -1 134 }; 135 136 /* 137 * NAME: dbMount() 138 * 139 * FUNCTION: initializate the block allocation map. 140 * 141 * memory is allocated for the in-core bmap descriptor and 142 * the in-core descriptor is initialized from disk. 143 * 144 * PARAMETERS: 145 * ipbmap - pointer to in-core inode for the block map. 146 * 147 * RETURN VALUES: 148 * 0 - success 149 * -ENOMEM - insufficient memory 150 * -EIO - i/o error 151 * -EINVAL - wrong bmap data 152 */ 153 int dbMount(struct inode *ipbmap) 154 { 155 struct bmap *bmp; 156 struct dbmap_disk *dbmp_le; 157 struct metapage *mp; 158 int i, err; 159 160 /* 161 * allocate/initialize the in-memory bmap descriptor 162 */ 163 /* allocate memory for the in-memory bmap descriptor */ 164 bmp = kmalloc(sizeof(struct bmap), GFP_KERNEL); 165 if (bmp == NULL) 166 return -ENOMEM; 167 168 /* read the on-disk bmap descriptor. */ 169 mp = read_metapage(ipbmap, 170 BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage, 171 PSIZE, 0); 172 if (mp == NULL) { 173 err = -EIO; 174 goto err_kfree_bmp; 175 } 176 177 /* copy the on-disk bmap descriptor to its in-memory version. */ 178 dbmp_le = (struct dbmap_disk *) mp->data; 179 bmp->db_mapsize = le64_to_cpu(dbmp_le->dn_mapsize); 180 bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree); 181 bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage); 182 bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag); 183 if (!bmp->db_numag) { 184 err = -EINVAL; 185 goto err_release_metapage; 186 } 187 188 bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel); 189 bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag); 190 bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref); 191 bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel); 192 bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight); 193 bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth); 194 bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart); 195 bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size); 196 if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG || 197 bmp->db_agl2size < 0) { 198 err = -EINVAL; 199 goto err_release_metapage; 200 } 201 202 if (((bmp->db_mapsize - 1) >> bmp->db_agl2size) > MAXAG) { 203 err = -EINVAL; 204 goto err_release_metapage; 205 } 206 207 for (i = 0; i < MAXAG; i++) 208 bmp->db_agfree[i] = le64_to_cpu(dbmp_le->dn_agfree[i]); 209 bmp->db_agsize = le64_to_cpu(dbmp_le->dn_agsize); 210 bmp->db_maxfreebud = dbmp_le->dn_maxfreebud; 211 212 /* release the buffer. */ 213 release_metapage(mp); 214 215 /* bind the bmap inode and the bmap descriptor to each other. */ 216 bmp->db_ipbmap = ipbmap; 217 JFS_SBI(ipbmap->i_sb)->bmap = bmp; 218 219 memset(bmp->db_active, 0, sizeof(bmp->db_active)); 220 221 /* 222 * allocate/initialize the bmap lock 223 */ 224 BMAP_LOCK_INIT(bmp); 225 226 return (0); 227 228 err_release_metapage: 229 release_metapage(mp); 230 err_kfree_bmp: 231 kfree(bmp); 232 return err; 233 } 234 235 236 /* 237 * NAME: dbUnmount() 238 * 239 * FUNCTION: terminate the block allocation map in preparation for 240 * file system unmount. 241 * 242 * the in-core bmap descriptor is written to disk and 243 * the memory for this descriptor is freed. 244 * 245 * PARAMETERS: 246 * ipbmap - pointer to in-core inode for the block map. 247 * 248 * RETURN VALUES: 249 * 0 - success 250 * -EIO - i/o error 251 */ 252 int dbUnmount(struct inode *ipbmap, int mounterror) 253 { 254 struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap; 255 256 if (!(mounterror || isReadOnly(ipbmap))) 257 dbSync(ipbmap); 258 259 /* 260 * Invalidate the page cache buffers 261 */ 262 truncate_inode_pages(ipbmap->i_mapping, 0); 263 264 /* free the memory for the in-memory bmap. */ 265 kfree(bmp); 266 267 return (0); 268 } 269 270 /* 271 * dbSync() 272 */ 273 int dbSync(struct inode *ipbmap) 274 { 275 struct dbmap_disk *dbmp_le; 276 struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap; 277 struct metapage *mp; 278 int i; 279 280 /* 281 * write bmap global control page 282 */ 283 /* get the buffer for the on-disk bmap descriptor. */ 284 mp = read_metapage(ipbmap, 285 BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage, 286 PSIZE, 0); 287 if (mp == NULL) { 288 jfs_err("dbSync: read_metapage failed!"); 289 return -EIO; 290 } 291 /* copy the in-memory version of the bmap to the on-disk version */ 292 dbmp_le = (struct dbmap_disk *) mp->data; 293 dbmp_le->dn_mapsize = cpu_to_le64(bmp->db_mapsize); 294 dbmp_le->dn_nfree = cpu_to_le64(bmp->db_nfree); 295 dbmp_le->dn_l2nbperpage = cpu_to_le32(bmp->db_l2nbperpage); 296 dbmp_le->dn_numag = cpu_to_le32(bmp->db_numag); 297 dbmp_le->dn_maxlevel = cpu_to_le32(bmp->db_maxlevel); 298 dbmp_le->dn_maxag = cpu_to_le32(bmp->db_maxag); 299 dbmp_le->dn_agpref = cpu_to_le32(bmp->db_agpref); 300 dbmp_le->dn_aglevel = cpu_to_le32(bmp->db_aglevel); 301 dbmp_le->dn_agheight = cpu_to_le32(bmp->db_agheight); 302 dbmp_le->dn_agwidth = cpu_to_le32(bmp->db_agwidth); 303 dbmp_le->dn_agstart = cpu_to_le32(bmp->db_agstart); 304 dbmp_le->dn_agl2size = cpu_to_le32(bmp->db_agl2size); 305 for (i = 0; i < MAXAG; i++) 306 dbmp_le->dn_agfree[i] = cpu_to_le64(bmp->db_agfree[i]); 307 dbmp_le->dn_agsize = cpu_to_le64(bmp->db_agsize); 308 dbmp_le->dn_maxfreebud = bmp->db_maxfreebud; 309 310 /* write the buffer */ 311 write_metapage(mp); 312 313 /* 314 * write out dirty pages of bmap 315 */ 316 filemap_write_and_wait(ipbmap->i_mapping); 317 318 diWriteSpecial(ipbmap, 0); 319 320 return (0); 321 } 322 323 /* 324 * NAME: dbFree() 325 * 326 * FUNCTION: free the specified block range from the working block 327 * allocation map. 328 * 329 * the blocks will be free from the working map one dmap 330 * at a time. 331 * 332 * PARAMETERS: 333 * ip - pointer to in-core inode; 334 * blkno - starting block number to be freed. 335 * nblocks - number of blocks to be freed. 336 * 337 * RETURN VALUES: 338 * 0 - success 339 * -EIO - i/o error 340 */ 341 int dbFree(struct inode *ip, s64 blkno, s64 nblocks) 342 { 343 struct metapage *mp; 344 struct dmap *dp; 345 int nb, rc; 346 s64 lblkno, rem; 347 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; 348 struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap; 349 struct super_block *sb = ipbmap->i_sb; 350 351 IREAD_LOCK(ipbmap, RDWRLOCK_DMAP); 352 353 /* block to be freed better be within the mapsize. */ 354 if (unlikely((blkno == 0) || (blkno + nblocks > bmp->db_mapsize))) { 355 IREAD_UNLOCK(ipbmap); 356 printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n", 357 (unsigned long long) blkno, 358 (unsigned long long) nblocks); 359 jfs_error(ip->i_sb, "block to be freed is outside the map\n"); 360 return -EIO; 361 } 362 363 /** 364 * TRIM the blocks, when mounted with discard option 365 */ 366 if (JFS_SBI(sb)->flag & JFS_DISCARD) 367 if (JFS_SBI(sb)->minblks_trim <= nblocks) 368 jfs_issue_discard(ipbmap, blkno, nblocks); 369 370 /* 371 * free the blocks a dmap at a time. 372 */ 373 mp = NULL; 374 for (rem = nblocks; rem > 0; rem -= nb, blkno += nb) { 375 /* release previous dmap if any */ 376 if (mp) { 377 write_metapage(mp); 378 } 379 380 /* get the buffer for the current dmap. */ 381 lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage); 382 mp = read_metapage(ipbmap, lblkno, PSIZE, 0); 383 if (mp == NULL) { 384 IREAD_UNLOCK(ipbmap); 385 return -EIO; 386 } 387 dp = (struct dmap *) mp->data; 388 389 /* determine the number of blocks to be freed from 390 * this dmap. 391 */ 392 nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1))); 393 394 /* free the blocks. */ 395 if ((rc = dbFreeDmap(bmp, dp, blkno, nb))) { 396 jfs_error(ip->i_sb, "error in block map\n"); 397 release_metapage(mp); 398 IREAD_UNLOCK(ipbmap); 399 return (rc); 400 } 401 } 402 403 /* write the last buffer. */ 404 if (mp) 405 write_metapage(mp); 406 407 IREAD_UNLOCK(ipbmap); 408 409 return (0); 410 } 411 412 413 /* 414 * NAME: dbUpdatePMap() 415 * 416 * FUNCTION: update the allocation state (free or allocate) of the 417 * specified block range in the persistent block allocation map. 418 * 419 * the blocks will be updated in the persistent map one 420 * dmap at a time. 421 * 422 * PARAMETERS: 423 * ipbmap - pointer to in-core inode for the block map. 424 * free - 'true' if block range is to be freed from the persistent 425 * map; 'false' if it is to be allocated. 426 * blkno - starting block number of the range. 427 * nblocks - number of contiguous blocks in the range. 428 * tblk - transaction block; 429 * 430 * RETURN VALUES: 431 * 0 - success 432 * -EIO - i/o error 433 */ 434 int 435 dbUpdatePMap(struct inode *ipbmap, 436 int free, s64 blkno, s64 nblocks, struct tblock * tblk) 437 { 438 int nblks, dbitno, wbitno, rbits; 439 int word, nbits, nwords; 440 struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap; 441 s64 lblkno, rem, lastlblkno; 442 u32 mask; 443 struct dmap *dp; 444 struct metapage *mp; 445 struct jfs_log *log; 446 int lsn, difft, diffp; 447 unsigned long flags; 448 449 /* the blocks better be within the mapsize. */ 450 if (blkno + nblocks > bmp->db_mapsize) { 451 printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n", 452 (unsigned long long) blkno, 453 (unsigned long long) nblocks); 454 jfs_error(ipbmap->i_sb, "blocks are outside the map\n"); 455 return -EIO; 456 } 457 458 /* compute delta of transaction lsn from log syncpt */ 459 lsn = tblk->lsn; 460 log = (struct jfs_log *) JFS_SBI(tblk->sb)->log; 461 logdiff(difft, lsn, log); 462 463 /* 464 * update the block state a dmap at a time. 465 */ 466 mp = NULL; 467 lastlblkno = 0; 468 for (rem = nblocks; rem > 0; rem -= nblks, blkno += nblks) { 469 /* get the buffer for the current dmap. */ 470 lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage); 471 if (lblkno != lastlblkno) { 472 if (mp) { 473 write_metapage(mp); 474 } 475 476 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 477 0); 478 if (mp == NULL) 479 return -EIO; 480 metapage_wait_for_io(mp); 481 } 482 dp = (struct dmap *) mp->data; 483 484 /* determine the bit number and word within the dmap of 485 * the starting block. also determine how many blocks 486 * are to be updated within this dmap. 487 */ 488 dbitno = blkno & (BPERDMAP - 1); 489 word = dbitno >> L2DBWORD; 490 nblks = min(rem, (s64)BPERDMAP - dbitno); 491 492 /* update the bits of the dmap words. the first and last 493 * words may only have a subset of their bits updated. if 494 * this is the case, we'll work against that word (i.e. 495 * partial first and/or last) only in a single pass. a 496 * single pass will also be used to update all words that 497 * are to have all their bits updated. 498 */ 499 for (rbits = nblks; rbits > 0; 500 rbits -= nbits, dbitno += nbits) { 501 /* determine the bit number within the word and 502 * the number of bits within the word. 503 */ 504 wbitno = dbitno & (DBWORD - 1); 505 nbits = min(rbits, DBWORD - wbitno); 506 507 /* check if only part of the word is to be updated. */ 508 if (nbits < DBWORD) { 509 /* update (free or allocate) the bits 510 * in this word. 511 */ 512 mask = 513 (ONES << (DBWORD - nbits) >> wbitno); 514 if (free) 515 dp->pmap[word] &= 516 cpu_to_le32(~mask); 517 else 518 dp->pmap[word] |= 519 cpu_to_le32(mask); 520 521 word += 1; 522 } else { 523 /* one or more words are to have all 524 * their bits updated. determine how 525 * many words and how many bits. 526 */ 527 nwords = rbits >> L2DBWORD; 528 nbits = nwords << L2DBWORD; 529 530 /* update (free or allocate) the bits 531 * in these words. 532 */ 533 if (free) 534 memset(&dp->pmap[word], 0, 535 nwords * 4); 536 else 537 memset(&dp->pmap[word], (int) ONES, 538 nwords * 4); 539 540 word += nwords; 541 } 542 } 543 544 /* 545 * update dmap lsn 546 */ 547 if (lblkno == lastlblkno) 548 continue; 549 550 lastlblkno = lblkno; 551 552 LOGSYNC_LOCK(log, flags); 553 if (mp->lsn != 0) { 554 /* inherit older/smaller lsn */ 555 logdiff(diffp, mp->lsn, log); 556 if (difft < diffp) { 557 mp->lsn = lsn; 558 559 /* move bp after tblock in logsync list */ 560 list_move(&mp->synclist, &tblk->synclist); 561 } 562 563 /* inherit younger/larger clsn */ 564 logdiff(difft, tblk->clsn, log); 565 logdiff(diffp, mp->clsn, log); 566 if (difft > diffp) 567 mp->clsn = tblk->clsn; 568 } else { 569 mp->log = log; 570 mp->lsn = lsn; 571 572 /* insert bp after tblock in logsync list */ 573 log->count++; 574 list_add(&mp->synclist, &tblk->synclist); 575 576 mp->clsn = tblk->clsn; 577 } 578 LOGSYNC_UNLOCK(log, flags); 579 } 580 581 /* write the last buffer. */ 582 if (mp) { 583 write_metapage(mp); 584 } 585 586 return (0); 587 } 588 589 590 /* 591 * NAME: dbNextAG() 592 * 593 * FUNCTION: find the preferred allocation group for new allocations. 594 * 595 * Within the allocation groups, we maintain a preferred 596 * allocation group which consists of a group with at least 597 * average free space. It is the preferred group that we target 598 * new inode allocation towards. The tie-in between inode 599 * allocation and block allocation occurs as we allocate the 600 * first (data) block of an inode and specify the inode (block) 601 * as the allocation hint for this block. 602 * 603 * We try to avoid having more than one open file growing in 604 * an allocation group, as this will lead to fragmentation. 605 * This differs from the old OS/2 method of trying to keep 606 * empty ags around for large allocations. 607 * 608 * PARAMETERS: 609 * ipbmap - pointer to in-core inode for the block map. 610 * 611 * RETURN VALUES: 612 * the preferred allocation group number. 613 */ 614 int dbNextAG(struct inode *ipbmap) 615 { 616 s64 avgfree; 617 int agpref; 618 s64 hwm = 0; 619 int i; 620 int next_best = -1; 621 struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap; 622 623 BMAP_LOCK(bmp); 624 625 /* determine the average number of free blocks within the ags. */ 626 avgfree = (u32)bmp->db_nfree / bmp->db_numag; 627 628 /* 629 * if the current preferred ag does not have an active allocator 630 * and has at least average freespace, return it 631 */ 632 agpref = bmp->db_agpref; 633 if ((atomic_read(&bmp->db_active[agpref]) == 0) && 634 (bmp->db_agfree[agpref] >= avgfree)) 635 goto unlock; 636 637 /* From the last preferred ag, find the next one with at least 638 * average free space. 639 */ 640 for (i = 0 ; i < bmp->db_numag; i++, agpref++) { 641 if (agpref == bmp->db_numag) 642 agpref = 0; 643 644 if (atomic_read(&bmp->db_active[agpref])) 645 /* open file is currently growing in this ag */ 646 continue; 647 if (bmp->db_agfree[agpref] >= avgfree) { 648 /* Return this one */ 649 bmp->db_agpref = agpref; 650 goto unlock; 651 } else if (bmp->db_agfree[agpref] > hwm) { 652 /* Less than avg. freespace, but best so far */ 653 hwm = bmp->db_agfree[agpref]; 654 next_best = agpref; 655 } 656 } 657 658 /* 659 * If no inactive ag was found with average freespace, use the 660 * next best 661 */ 662 if (next_best != -1) 663 bmp->db_agpref = next_best; 664 /* else leave db_agpref unchanged */ 665 unlock: 666 BMAP_UNLOCK(bmp); 667 668 /* return the preferred group. 669 */ 670 return (bmp->db_agpref); 671 } 672 673 /* 674 * NAME: dbAlloc() 675 * 676 * FUNCTION: attempt to allocate a specified number of contiguous free 677 * blocks from the working allocation block map. 678 * 679 * the block allocation policy uses hints and a multi-step 680 * approach. 681 * 682 * for allocation requests smaller than the number of blocks 683 * per dmap, we first try to allocate the new blocks 684 * immediately following the hint. if these blocks are not 685 * available, we try to allocate blocks near the hint. if 686 * no blocks near the hint are available, we next try to 687 * allocate within the same dmap as contains the hint. 688 * 689 * if no blocks are available in the dmap or the allocation 690 * request is larger than the dmap size, we try to allocate 691 * within the same allocation group as contains the hint. if 692 * this does not succeed, we finally try to allocate anywhere 693 * within the aggregate. 694 * 695 * we also try to allocate anywhere within the aggregate 696 * for allocation requests larger than the allocation group 697 * size or requests that specify no hint value. 698 * 699 * PARAMETERS: 700 * ip - pointer to in-core inode; 701 * hint - allocation hint. 702 * nblocks - number of contiguous blocks in the range. 703 * results - on successful return, set to the starting block number 704 * of the newly allocated contiguous range. 705 * 706 * RETURN VALUES: 707 * 0 - success 708 * -ENOSPC - insufficient disk resources 709 * -EIO - i/o error 710 */ 711 int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results) 712 { 713 int rc, agno; 714 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; 715 struct bmap *bmp; 716 struct metapage *mp; 717 s64 lblkno, blkno; 718 struct dmap *dp; 719 int l2nb; 720 s64 mapSize; 721 int writers; 722 723 /* assert that nblocks is valid */ 724 assert(nblocks > 0); 725 726 /* get the log2 number of blocks to be allocated. 727 * if the number of blocks is not a log2 multiple, 728 * it will be rounded up to the next log2 multiple. 729 */ 730 l2nb = BLKSTOL2(nblocks); 731 732 bmp = JFS_SBI(ip->i_sb)->bmap; 733 734 mapSize = bmp->db_mapsize; 735 736 /* the hint should be within the map */ 737 if (hint >= mapSize) { 738 jfs_error(ip->i_sb, "the hint is outside the map\n"); 739 return -EIO; 740 } 741 742 /* if the number of blocks to be allocated is greater than the 743 * allocation group size, try to allocate anywhere. 744 */ 745 if (l2nb > bmp->db_agl2size) { 746 IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP); 747 748 rc = dbAllocAny(bmp, nblocks, l2nb, results); 749 750 goto write_unlock; 751 } 752 753 /* 754 * If no hint, let dbNextAG recommend an allocation group 755 */ 756 if (hint == 0) 757 goto pref_ag; 758 759 /* we would like to allocate close to the hint. adjust the 760 * hint to the block following the hint since the allocators 761 * will start looking for free space starting at this point. 762 */ 763 blkno = hint + 1; 764 765 if (blkno >= bmp->db_mapsize) 766 goto pref_ag; 767 768 agno = blkno >> bmp->db_agl2size; 769 770 /* check if blkno crosses over into a new allocation group. 771 * if so, check if we should allow allocations within this 772 * allocation group. 773 */ 774 if ((blkno & (bmp->db_agsize - 1)) == 0) 775 /* check if the AG is currently being written to. 776 * if so, call dbNextAG() to find a non-busy 777 * AG with sufficient free space. 778 */ 779 if (atomic_read(&bmp->db_active[agno])) 780 goto pref_ag; 781 782 /* check if the allocation request size can be satisfied from a 783 * single dmap. if so, try to allocate from the dmap containing 784 * the hint using a tiered strategy. 785 */ 786 if (nblocks <= BPERDMAP) { 787 IREAD_LOCK(ipbmap, RDWRLOCK_DMAP); 788 789 /* get the buffer for the dmap containing the hint. 790 */ 791 rc = -EIO; 792 lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage); 793 mp = read_metapage(ipbmap, lblkno, PSIZE, 0); 794 if (mp == NULL) 795 goto read_unlock; 796 797 dp = (struct dmap *) mp->data; 798 799 /* first, try to satisfy the allocation request with the 800 * blocks beginning at the hint. 801 */ 802 if ((rc = dbAllocNext(bmp, dp, blkno, (int) nblocks)) 803 != -ENOSPC) { 804 if (rc == 0) { 805 *results = blkno; 806 mark_metapage_dirty(mp); 807 } 808 809 release_metapage(mp); 810 goto read_unlock; 811 } 812 813 writers = atomic_read(&bmp->db_active[agno]); 814 if ((writers > 1) || 815 ((writers == 1) && (JFS_IP(ip)->active_ag != agno))) { 816 /* 817 * Someone else is writing in this allocation 818 * group. To avoid fragmenting, try another ag 819 */ 820 release_metapage(mp); 821 IREAD_UNLOCK(ipbmap); 822 goto pref_ag; 823 } 824 825 /* next, try to satisfy the allocation request with blocks 826 * near the hint. 827 */ 828 if ((rc = 829 dbAllocNear(bmp, dp, blkno, (int) nblocks, l2nb, results)) 830 != -ENOSPC) { 831 if (rc == 0) 832 mark_metapage_dirty(mp); 833 834 release_metapage(mp); 835 goto read_unlock; 836 } 837 838 /* try to satisfy the allocation request with blocks within 839 * the same dmap as the hint. 840 */ 841 if ((rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results)) 842 != -ENOSPC) { 843 if (rc == 0) 844 mark_metapage_dirty(mp); 845 846 release_metapage(mp); 847 goto read_unlock; 848 } 849 850 release_metapage(mp); 851 IREAD_UNLOCK(ipbmap); 852 } 853 854 /* try to satisfy the allocation request with blocks within 855 * the same allocation group as the hint. 856 */ 857 IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP); 858 if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) != -ENOSPC) 859 goto write_unlock; 860 861 IWRITE_UNLOCK(ipbmap); 862 863 864 pref_ag: 865 /* 866 * Let dbNextAG recommend a preferred allocation group 867 */ 868 agno = dbNextAG(ipbmap); 869 IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP); 870 871 /* Try to allocate within this allocation group. if that fails, try to 872 * allocate anywhere in the map. 873 */ 874 if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) == -ENOSPC) 875 rc = dbAllocAny(bmp, nblocks, l2nb, results); 876 877 write_unlock: 878 IWRITE_UNLOCK(ipbmap); 879 880 return (rc); 881 882 read_unlock: 883 IREAD_UNLOCK(ipbmap); 884 885 return (rc); 886 } 887 888 /* 889 * NAME: dbReAlloc() 890 * 891 * FUNCTION: attempt to extend a current allocation by a specified 892 * number of blocks. 893 * 894 * this routine attempts to satisfy the allocation request 895 * by first trying to extend the existing allocation in 896 * place by allocating the additional blocks as the blocks 897 * immediately following the current allocation. if these 898 * blocks are not available, this routine will attempt to 899 * allocate a new set of contiguous blocks large enough 900 * to cover the existing allocation plus the additional 901 * number of blocks required. 902 * 903 * PARAMETERS: 904 * ip - pointer to in-core inode requiring allocation. 905 * blkno - starting block of the current allocation. 906 * nblocks - number of contiguous blocks within the current 907 * allocation. 908 * addnblocks - number of blocks to add to the allocation. 909 * results - on successful return, set to the starting block number 910 * of the existing allocation if the existing allocation 911 * was extended in place or to a newly allocated contiguous 912 * range if the existing allocation could not be extended 913 * in place. 914 * 915 * RETURN VALUES: 916 * 0 - success 917 * -ENOSPC - insufficient disk resources 918 * -EIO - i/o error 919 */ 920 int 921 dbReAlloc(struct inode *ip, 922 s64 blkno, s64 nblocks, s64 addnblocks, s64 * results) 923 { 924 int rc; 925 926 /* try to extend the allocation in place. 927 */ 928 if ((rc = dbExtend(ip, blkno, nblocks, addnblocks)) == 0) { 929 *results = blkno; 930 return (0); 931 } else { 932 if (rc != -ENOSPC) 933 return (rc); 934 } 935 936 /* could not extend the allocation in place, so allocate a 937 * new set of blocks for the entire request (i.e. try to get 938 * a range of contiguous blocks large enough to cover the 939 * existing allocation plus the additional blocks.) 940 */ 941 return (dbAlloc 942 (ip, blkno + nblocks - 1, addnblocks + nblocks, results)); 943 } 944 945 946 /* 947 * NAME: dbExtend() 948 * 949 * FUNCTION: attempt to extend a current allocation by a specified 950 * number of blocks. 951 * 952 * this routine attempts to satisfy the allocation request 953 * by first trying to extend the existing allocation in 954 * place by allocating the additional blocks as the blocks 955 * immediately following the current allocation. 956 * 957 * PARAMETERS: 958 * ip - pointer to in-core inode requiring allocation. 959 * blkno - starting block of the current allocation. 960 * nblocks - number of contiguous blocks within the current 961 * allocation. 962 * addnblocks - number of blocks to add to the allocation. 963 * 964 * RETURN VALUES: 965 * 0 - success 966 * -ENOSPC - insufficient disk resources 967 * -EIO - i/o error 968 */ 969 static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks) 970 { 971 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); 972 s64 lblkno, lastblkno, extblkno; 973 uint rel_block; 974 struct metapage *mp; 975 struct dmap *dp; 976 int rc; 977 struct inode *ipbmap = sbi->ipbmap; 978 struct bmap *bmp; 979 980 /* 981 * We don't want a non-aligned extent to cross a page boundary 982 */ 983 if (((rel_block = blkno & (sbi->nbperpage - 1))) && 984 (rel_block + nblocks + addnblocks > sbi->nbperpage)) 985 return -ENOSPC; 986 987 /* get the last block of the current allocation */ 988 lastblkno = blkno + nblocks - 1; 989 990 /* determine the block number of the block following 991 * the existing allocation. 992 */ 993 extblkno = lastblkno + 1; 994 995 IREAD_LOCK(ipbmap, RDWRLOCK_DMAP); 996 997 /* better be within the file system */ 998 bmp = sbi->bmap; 999 if (lastblkno < 0 || lastblkno >= bmp->db_mapsize) { 1000 IREAD_UNLOCK(ipbmap); 1001 jfs_error(ip->i_sb, "the block is outside the filesystem\n"); 1002 return -EIO; 1003 } 1004 1005 /* we'll attempt to extend the current allocation in place by 1006 * allocating the additional blocks as the blocks immediately 1007 * following the current allocation. we only try to extend the 1008 * current allocation in place if the number of additional blocks 1009 * can fit into a dmap, the last block of the current allocation 1010 * is not the last block of the file system, and the start of the 1011 * inplace extension is not on an allocation group boundary. 1012 */ 1013 if (addnblocks > BPERDMAP || extblkno >= bmp->db_mapsize || 1014 (extblkno & (bmp->db_agsize - 1)) == 0) { 1015 IREAD_UNLOCK(ipbmap); 1016 return -ENOSPC; 1017 } 1018 1019 /* get the buffer for the dmap containing the first block 1020 * of the extension. 1021 */ 1022 lblkno = BLKTODMAP(extblkno, bmp->db_l2nbperpage); 1023 mp = read_metapage(ipbmap, lblkno, PSIZE, 0); 1024 if (mp == NULL) { 1025 IREAD_UNLOCK(ipbmap); 1026 return -EIO; 1027 } 1028 1029 dp = (struct dmap *) mp->data; 1030 1031 /* try to allocate the blocks immediately following the 1032 * current allocation. 1033 */ 1034 rc = dbAllocNext(bmp, dp, extblkno, (int) addnblocks); 1035 1036 IREAD_UNLOCK(ipbmap); 1037 1038 /* were we successful ? */ 1039 if (rc == 0) 1040 write_metapage(mp); 1041 else 1042 /* we were not successful */ 1043 release_metapage(mp); 1044 1045 return (rc); 1046 } 1047 1048 1049 /* 1050 * NAME: dbAllocNext() 1051 * 1052 * FUNCTION: attempt to allocate the blocks of the specified block 1053 * range within a dmap. 1054 * 1055 * PARAMETERS: 1056 * bmp - pointer to bmap descriptor 1057 * dp - pointer to dmap. 1058 * blkno - starting block number of the range. 1059 * nblocks - number of contiguous free blocks of the range. 1060 * 1061 * RETURN VALUES: 1062 * 0 - success 1063 * -ENOSPC - insufficient disk resources 1064 * -EIO - i/o error 1065 * 1066 * serialization: IREAD_LOCK(ipbmap) held on entry/exit; 1067 */ 1068 static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno, 1069 int nblocks) 1070 { 1071 int dbitno, word, rembits, nb, nwords, wbitno, nw; 1072 int l2size; 1073 s8 *leaf; 1074 u32 mask; 1075 1076 if (dp->tree.leafidx != cpu_to_le32(LEAFIND)) { 1077 jfs_error(bmp->db_ipbmap->i_sb, "Corrupt dmap page\n"); 1078 return -EIO; 1079 } 1080 1081 /* pick up a pointer to the leaves of the dmap tree. 1082 */ 1083 leaf = dp->tree.stree + le32_to_cpu(dp->tree.leafidx); 1084 1085 /* determine the bit number and word within the dmap of the 1086 * starting block. 1087 */ 1088 dbitno = blkno & (BPERDMAP - 1); 1089 word = dbitno >> L2DBWORD; 1090 1091 /* check if the specified block range is contained within 1092 * this dmap. 1093 */ 1094 if (dbitno + nblocks > BPERDMAP) 1095 return -ENOSPC; 1096 1097 /* check if the starting leaf indicates that anything 1098 * is free. 1099 */ 1100 if (leaf[word] == NOFREE) 1101 return -ENOSPC; 1102 1103 /* check the dmaps words corresponding to block range to see 1104 * if the block range is free. not all bits of the first and 1105 * last words may be contained within the block range. if this 1106 * is the case, we'll work against those words (i.e. partial first 1107 * and/or last) on an individual basis (a single pass) and examine 1108 * the actual bits to determine if they are free. a single pass 1109 * will be used for all dmap words fully contained within the 1110 * specified range. within this pass, the leaves of the dmap 1111 * tree will be examined to determine if the blocks are free. a 1112 * single leaf may describe the free space of multiple dmap 1113 * words, so we may visit only a subset of the actual leaves 1114 * corresponding to the dmap words of the block range. 1115 */ 1116 for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) { 1117 /* determine the bit number within the word and 1118 * the number of bits within the word. 1119 */ 1120 wbitno = dbitno & (DBWORD - 1); 1121 nb = min(rembits, DBWORD - wbitno); 1122 1123 /* check if only part of the word is to be examined. 1124 */ 1125 if (nb < DBWORD) { 1126 /* check if the bits are free. 1127 */ 1128 mask = (ONES << (DBWORD - nb) >> wbitno); 1129 if ((mask & ~le32_to_cpu(dp->wmap[word])) != mask) 1130 return -ENOSPC; 1131 1132 word += 1; 1133 } else { 1134 /* one or more dmap words are fully contained 1135 * within the block range. determine how many 1136 * words and how many bits. 1137 */ 1138 nwords = rembits >> L2DBWORD; 1139 nb = nwords << L2DBWORD; 1140 1141 /* now examine the appropriate leaves to determine 1142 * if the blocks are free. 1143 */ 1144 while (nwords > 0) { 1145 /* does the leaf describe any free space ? 1146 */ 1147 if (leaf[word] < BUDMIN) 1148 return -ENOSPC; 1149 1150 /* determine the l2 number of bits provided 1151 * by this leaf. 1152 */ 1153 l2size = 1154 min_t(int, leaf[word], NLSTOL2BSZ(nwords)); 1155 1156 /* determine how many words were handled. 1157 */ 1158 nw = BUDSIZE(l2size, BUDMIN); 1159 1160 nwords -= nw; 1161 word += nw; 1162 } 1163 } 1164 } 1165 1166 /* allocate the blocks. 1167 */ 1168 return (dbAllocDmap(bmp, dp, blkno, nblocks)); 1169 } 1170 1171 1172 /* 1173 * NAME: dbAllocNear() 1174 * 1175 * FUNCTION: attempt to allocate a number of contiguous free blocks near 1176 * a specified block (hint) within a dmap. 1177 * 1178 * starting with the dmap leaf that covers the hint, we'll 1179 * check the next four contiguous leaves for sufficient free 1180 * space. if sufficient free space is found, we'll allocate 1181 * the desired free space. 1182 * 1183 * PARAMETERS: 1184 * bmp - pointer to bmap descriptor 1185 * dp - pointer to dmap. 1186 * blkno - block number to allocate near. 1187 * nblocks - actual number of contiguous free blocks desired. 1188 * l2nb - log2 number of contiguous free blocks desired. 1189 * results - on successful return, set to the starting block number 1190 * of the newly allocated range. 1191 * 1192 * RETURN VALUES: 1193 * 0 - success 1194 * -ENOSPC - insufficient disk resources 1195 * -EIO - i/o error 1196 * 1197 * serialization: IREAD_LOCK(ipbmap) held on entry/exit; 1198 */ 1199 static int 1200 dbAllocNear(struct bmap * bmp, 1201 struct dmap * dp, s64 blkno, int nblocks, int l2nb, s64 * results) 1202 { 1203 int word, lword, rc; 1204 s8 *leaf; 1205 1206 if (dp->tree.leafidx != cpu_to_le32(LEAFIND)) { 1207 jfs_error(bmp->db_ipbmap->i_sb, "Corrupt dmap page\n"); 1208 return -EIO; 1209 } 1210 1211 leaf = dp->tree.stree + le32_to_cpu(dp->tree.leafidx); 1212 1213 /* determine the word within the dmap that holds the hint 1214 * (i.e. blkno). also, determine the last word in the dmap 1215 * that we'll include in our examination. 1216 */ 1217 word = (blkno & (BPERDMAP - 1)) >> L2DBWORD; 1218 lword = min(word + 4, LPERDMAP); 1219 1220 /* examine the leaves for sufficient free space. 1221 */ 1222 for (; word < lword; word++) { 1223 /* does the leaf describe sufficient free space ? 1224 */ 1225 if (leaf[word] < l2nb) 1226 continue; 1227 1228 /* determine the block number within the file system 1229 * of the first block described by this dmap word. 1230 */ 1231 blkno = le64_to_cpu(dp->start) + (word << L2DBWORD); 1232 1233 /* if not all bits of the dmap word are free, get the 1234 * starting bit number within the dmap word of the required 1235 * string of free bits and adjust the block number with the 1236 * value. 1237 */ 1238 if (leaf[word] < BUDMIN) 1239 blkno += 1240 dbFindBits(le32_to_cpu(dp->wmap[word]), l2nb); 1241 1242 /* allocate the blocks. 1243 */ 1244 if ((rc = dbAllocDmap(bmp, dp, blkno, nblocks)) == 0) 1245 *results = blkno; 1246 1247 return (rc); 1248 } 1249 1250 return -ENOSPC; 1251 } 1252 1253 1254 /* 1255 * NAME: dbAllocAG() 1256 * 1257 * FUNCTION: attempt to allocate the specified number of contiguous 1258 * free blocks within the specified allocation group. 1259 * 1260 * unless the allocation group size is equal to the number 1261 * of blocks per dmap, the dmap control pages will be used to 1262 * find the required free space, if available. we start the 1263 * search at the highest dmap control page level which 1264 * distinctly describes the allocation group's free space 1265 * (i.e. the highest level at which the allocation group's 1266 * free space is not mixed in with that of any other group). 1267 * in addition, we start the search within this level at a 1268 * height of the dmapctl dmtree at which the nodes distinctly 1269 * describe the allocation group's free space. at this height, 1270 * the allocation group's free space may be represented by 1 1271 * or two sub-trees, depending on the allocation group size. 1272 * we search the top nodes of these subtrees left to right for 1273 * sufficient free space. if sufficient free space is found, 1274 * the subtree is searched to find the leftmost leaf that 1275 * has free space. once we have made it to the leaf, we 1276 * move the search to the next lower level dmap control page 1277 * corresponding to this leaf. we continue down the dmap control 1278 * pages until we find the dmap that contains or starts the 1279 * sufficient free space and we allocate at this dmap. 1280 * 1281 * if the allocation group size is equal to the dmap size, 1282 * we'll start at the dmap corresponding to the allocation 1283 * group and attempt the allocation at this level. 1284 * 1285 * the dmap control page search is also not performed if the 1286 * allocation group is completely free and we go to the first 1287 * dmap of the allocation group to do the allocation. this is 1288 * done because the allocation group may be part (not the first 1289 * part) of a larger binary buddy system, causing the dmap 1290 * control pages to indicate no free space (NOFREE) within 1291 * the allocation group. 1292 * 1293 * PARAMETERS: 1294 * bmp - pointer to bmap descriptor 1295 * agno - allocation group number. 1296 * nblocks - actual number of contiguous free blocks desired. 1297 * l2nb - log2 number of contiguous free blocks desired. 1298 * results - on successful return, set to the starting block number 1299 * of the newly allocated range. 1300 * 1301 * RETURN VALUES: 1302 * 0 - success 1303 * -ENOSPC - insufficient disk resources 1304 * -EIO - i/o error 1305 * 1306 * note: IWRITE_LOCK(ipmap) held on entry/exit; 1307 */ 1308 static int 1309 dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results) 1310 { 1311 struct metapage *mp; 1312 struct dmapctl *dcp; 1313 int rc, ti, i, k, m, n, agperlev; 1314 s64 blkno, lblkno; 1315 int budmin; 1316 1317 /* allocation request should not be for more than the 1318 * allocation group size. 1319 */ 1320 if (l2nb > bmp->db_agl2size) { 1321 jfs_error(bmp->db_ipbmap->i_sb, 1322 "allocation request is larger than the allocation group size\n"); 1323 return -EIO; 1324 } 1325 1326 /* determine the starting block number of the allocation 1327 * group. 1328 */ 1329 blkno = (s64) agno << bmp->db_agl2size; 1330 1331 /* check if the allocation group size is the minimum allocation 1332 * group size or if the allocation group is completely free. if 1333 * the allocation group size is the minimum size of BPERDMAP (i.e. 1334 * 1 dmap), there is no need to search the dmap control page (below) 1335 * that fully describes the allocation group since the allocation 1336 * group is already fully described by a dmap. in this case, we 1337 * just call dbAllocCtl() to search the dmap tree and allocate the 1338 * required space if available. 1339 * 1340 * if the allocation group is completely free, dbAllocCtl() is 1341 * also called to allocate the required space. this is done for 1342 * two reasons. first, it makes no sense searching the dmap control 1343 * pages for free space when we know that free space exists. second, 1344 * the dmap control pages may indicate that the allocation group 1345 * has no free space if the allocation group is part (not the first 1346 * part) of a larger binary buddy system. 1347 */ 1348 if (bmp->db_agsize == BPERDMAP 1349 || bmp->db_agfree[agno] == bmp->db_agsize) { 1350 rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results); 1351 if ((rc == -ENOSPC) && 1352 (bmp->db_agfree[agno] == bmp->db_agsize)) { 1353 printk(KERN_ERR "blkno = %Lx, blocks = %Lx\n", 1354 (unsigned long long) blkno, 1355 (unsigned long long) nblocks); 1356 jfs_error(bmp->db_ipbmap->i_sb, 1357 "dbAllocCtl failed in free AG\n"); 1358 } 1359 return (rc); 1360 } 1361 1362 /* the buffer for the dmap control page that fully describes the 1363 * allocation group. 1364 */ 1365 lblkno = BLKTOCTL(blkno, bmp->db_l2nbperpage, bmp->db_aglevel); 1366 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); 1367 if (mp == NULL) 1368 return -EIO; 1369 dcp = (struct dmapctl *) mp->data; 1370 budmin = dcp->budmin; 1371 1372 if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) { 1373 jfs_error(bmp->db_ipbmap->i_sb, "Corrupt dmapctl page\n"); 1374 release_metapage(mp); 1375 return -EIO; 1376 } 1377 1378 /* search the subtree(s) of the dmap control page that describes 1379 * the allocation group, looking for sufficient free space. to begin, 1380 * determine how many allocation groups are represented in a dmap 1381 * control page at the control page level (i.e. L0, L1, L2) that 1382 * fully describes an allocation group. next, determine the starting 1383 * tree index of this allocation group within the control page. 1384 */ 1385 agperlev = 1386 (1 << (L2LPERCTL - (bmp->db_agheight << 1))) / bmp->db_agwidth; 1387 ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1)); 1388 1389 /* dmap control page trees fan-out by 4 and a single allocation 1390 * group may be described by 1 or 2 subtrees within the ag level 1391 * dmap control page, depending upon the ag size. examine the ag's 1392 * subtrees for sufficient free space, starting with the leftmost 1393 * subtree. 1394 */ 1395 for (i = 0; i < bmp->db_agwidth; i++, ti++) { 1396 /* is there sufficient free space ? 1397 */ 1398 if (l2nb > dcp->stree[ti]) 1399 continue; 1400 1401 /* sufficient free space found in a subtree. now search down 1402 * the subtree to find the leftmost leaf that describes this 1403 * free space. 1404 */ 1405 for (k = bmp->db_agheight; k > 0; k--) { 1406 for (n = 0, m = (ti << 2) + 1; n < 4; n++) { 1407 if (l2nb <= dcp->stree[m + n]) { 1408 ti = m + n; 1409 break; 1410 } 1411 } 1412 if (n == 4) { 1413 jfs_error(bmp->db_ipbmap->i_sb, 1414 "failed descending stree\n"); 1415 release_metapage(mp); 1416 return -EIO; 1417 } 1418 } 1419 1420 /* determine the block number within the file system 1421 * that corresponds to this leaf. 1422 */ 1423 if (bmp->db_aglevel == 2) 1424 blkno = 0; 1425 else if (bmp->db_aglevel == 1) 1426 blkno &= ~(MAXL1SIZE - 1); 1427 else /* bmp->db_aglevel == 0 */ 1428 blkno &= ~(MAXL0SIZE - 1); 1429 1430 blkno += 1431 ((s64) (ti - le32_to_cpu(dcp->leafidx))) << budmin; 1432 1433 /* release the buffer in preparation for going down 1434 * the next level of dmap control pages. 1435 */ 1436 release_metapage(mp); 1437 1438 /* check if we need to continue to search down the lower 1439 * level dmap control pages. we need to if the number of 1440 * blocks required is less than maximum number of blocks 1441 * described at the next lower level. 1442 */ 1443 if (l2nb < budmin) { 1444 1445 /* search the lower level dmap control pages to get 1446 * the starting block number of the dmap that 1447 * contains or starts off the free space. 1448 */ 1449 if ((rc = 1450 dbFindCtl(bmp, l2nb, bmp->db_aglevel - 1, 1451 &blkno))) { 1452 if (rc == -ENOSPC) { 1453 jfs_error(bmp->db_ipbmap->i_sb, 1454 "control page inconsistent\n"); 1455 return -EIO; 1456 } 1457 return (rc); 1458 } 1459 } 1460 1461 /* allocate the blocks. 1462 */ 1463 rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results); 1464 if (rc == -ENOSPC) { 1465 jfs_error(bmp->db_ipbmap->i_sb, 1466 "unable to allocate blocks\n"); 1467 rc = -EIO; 1468 } 1469 return (rc); 1470 } 1471 1472 /* no space in the allocation group. release the buffer and 1473 * return -ENOSPC. 1474 */ 1475 release_metapage(mp); 1476 1477 return -ENOSPC; 1478 } 1479 1480 1481 /* 1482 * NAME: dbAllocAny() 1483 * 1484 * FUNCTION: attempt to allocate the specified number of contiguous 1485 * free blocks anywhere in the file system. 1486 * 1487 * dbAllocAny() attempts to find the sufficient free space by 1488 * searching down the dmap control pages, starting with the 1489 * highest level (i.e. L0, L1, L2) control page. if free space 1490 * large enough to satisfy the desired free space is found, the 1491 * desired free space is allocated. 1492 * 1493 * PARAMETERS: 1494 * bmp - pointer to bmap descriptor 1495 * nblocks - actual number of contiguous free blocks desired. 1496 * l2nb - log2 number of contiguous free blocks desired. 1497 * results - on successful return, set to the starting block number 1498 * of the newly allocated range. 1499 * 1500 * RETURN VALUES: 1501 * 0 - success 1502 * -ENOSPC - insufficient disk resources 1503 * -EIO - i/o error 1504 * 1505 * serialization: IWRITE_LOCK(ipbmap) held on entry/exit; 1506 */ 1507 static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results) 1508 { 1509 int rc; 1510 s64 blkno = 0; 1511 1512 /* starting with the top level dmap control page, search 1513 * down the dmap control levels for sufficient free space. 1514 * if free space is found, dbFindCtl() returns the starting 1515 * block number of the dmap that contains or starts off the 1516 * range of free space. 1517 */ 1518 if ((rc = dbFindCtl(bmp, l2nb, bmp->db_maxlevel, &blkno))) 1519 return (rc); 1520 1521 /* allocate the blocks. 1522 */ 1523 rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results); 1524 if (rc == -ENOSPC) { 1525 jfs_error(bmp->db_ipbmap->i_sb, "unable to allocate blocks\n"); 1526 return -EIO; 1527 } 1528 return (rc); 1529 } 1530 1531 1532 /* 1533 * NAME: dbDiscardAG() 1534 * 1535 * FUNCTION: attempt to discard (TRIM) all free blocks of specific AG 1536 * 1537 * algorithm: 1538 * 1) allocate blocks, as large as possible and save them 1539 * while holding IWRITE_LOCK on ipbmap 1540 * 2) trim all these saved block/length values 1541 * 3) mark the blocks free again 1542 * 1543 * benefit: 1544 * - we work only on one ag at some time, minimizing how long we 1545 * need to lock ipbmap 1546 * - reading / writing the fs is possible most time, even on 1547 * trimming 1548 * 1549 * downside: 1550 * - we write two times to the dmapctl and dmap pages 1551 * - but for me, this seems the best way, better ideas? 1552 * /TR 2012 1553 * 1554 * PARAMETERS: 1555 * ip - pointer to in-core inode 1556 * agno - ag to trim 1557 * minlen - minimum value of contiguous blocks 1558 * 1559 * RETURN VALUES: 1560 * s64 - actual number of blocks trimmed 1561 */ 1562 s64 dbDiscardAG(struct inode *ip, int agno, s64 minlen) 1563 { 1564 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; 1565 struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap; 1566 s64 nblocks, blkno; 1567 u64 trimmed = 0; 1568 int rc, l2nb; 1569 struct super_block *sb = ipbmap->i_sb; 1570 1571 struct range2trim { 1572 u64 blkno; 1573 u64 nblocks; 1574 } *totrim, *tt; 1575 1576 /* max blkno / nblocks pairs to trim */ 1577 int count = 0, range_cnt; 1578 u64 max_ranges; 1579 1580 /* prevent others from writing new stuff here, while trimming */ 1581 IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP); 1582 1583 nblocks = bmp->db_agfree[agno]; 1584 max_ranges = nblocks; 1585 do_div(max_ranges, minlen); 1586 range_cnt = min_t(u64, max_ranges + 1, 32 * 1024); 1587 totrim = kmalloc_array(range_cnt, sizeof(struct range2trim), GFP_NOFS); 1588 if (totrim == NULL) { 1589 jfs_error(bmp->db_ipbmap->i_sb, "no memory for trim array\n"); 1590 IWRITE_UNLOCK(ipbmap); 1591 return 0; 1592 } 1593 1594 tt = totrim; 1595 while (nblocks >= minlen) { 1596 l2nb = BLKSTOL2(nblocks); 1597 1598 /* 0 = okay, -EIO = fatal, -ENOSPC -> try smaller block */ 1599 rc = dbAllocAG(bmp, agno, nblocks, l2nb, &blkno); 1600 if (rc == 0) { 1601 tt->blkno = blkno; 1602 tt->nblocks = nblocks; 1603 tt++; count++; 1604 1605 /* the whole ag is free, trim now */ 1606 if (bmp->db_agfree[agno] == 0) 1607 break; 1608 1609 /* give a hint for the next while */ 1610 nblocks = bmp->db_agfree[agno]; 1611 continue; 1612 } else if (rc == -ENOSPC) { 1613 /* search for next smaller log2 block */ 1614 l2nb = BLKSTOL2(nblocks) - 1; 1615 nblocks = 1LL << l2nb; 1616 } else { 1617 /* Trim any already allocated blocks */ 1618 jfs_error(bmp->db_ipbmap->i_sb, "-EIO\n"); 1619 break; 1620 } 1621 1622 /* check, if our trim array is full */ 1623 if (unlikely(count >= range_cnt - 1)) 1624 break; 1625 } 1626 IWRITE_UNLOCK(ipbmap); 1627 1628 tt->nblocks = 0; /* mark the current end */ 1629 for (tt = totrim; tt->nblocks != 0; tt++) { 1630 /* when mounted with online discard, dbFree() will 1631 * call jfs_issue_discard() itself */ 1632 if (!(JFS_SBI(sb)->flag & JFS_DISCARD)) 1633 jfs_issue_discard(ip, tt->blkno, tt->nblocks); 1634 dbFree(ip, tt->blkno, tt->nblocks); 1635 trimmed += tt->nblocks; 1636 } 1637 kfree(totrim); 1638 1639 return trimmed; 1640 } 1641 1642 /* 1643 * NAME: dbFindCtl() 1644 * 1645 * FUNCTION: starting at a specified dmap control page level and block 1646 * number, search down the dmap control levels for a range of 1647 * contiguous free blocks large enough to satisfy an allocation 1648 * request for the specified number of free blocks. 1649 * 1650 * if sufficient contiguous free blocks are found, this routine 1651 * returns the starting block number within a dmap page that 1652 * contains or starts a range of contiqious free blocks that 1653 * is sufficient in size. 1654 * 1655 * PARAMETERS: 1656 * bmp - pointer to bmap descriptor 1657 * level - starting dmap control page level. 1658 * l2nb - log2 number of contiguous free blocks desired. 1659 * *blkno - on entry, starting block number for conducting the search. 1660 * on successful return, the first block within a dmap page 1661 * that contains or starts a range of contiguous free blocks. 1662 * 1663 * RETURN VALUES: 1664 * 0 - success 1665 * -ENOSPC - insufficient disk resources 1666 * -EIO - i/o error 1667 * 1668 * serialization: IWRITE_LOCK(ipbmap) held on entry/exit; 1669 */ 1670 static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno) 1671 { 1672 int rc, leafidx, lev; 1673 s64 b, lblkno; 1674 struct dmapctl *dcp; 1675 int budmin; 1676 struct metapage *mp; 1677 1678 /* starting at the specified dmap control page level and block 1679 * number, search down the dmap control levels for the starting 1680 * block number of a dmap page that contains or starts off 1681 * sufficient free blocks. 1682 */ 1683 for (lev = level, b = *blkno; lev >= 0; lev--) { 1684 /* get the buffer of the dmap control page for the block 1685 * number and level (i.e. L0, L1, L2). 1686 */ 1687 lblkno = BLKTOCTL(b, bmp->db_l2nbperpage, lev); 1688 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); 1689 if (mp == NULL) 1690 return -EIO; 1691 dcp = (struct dmapctl *) mp->data; 1692 budmin = dcp->budmin; 1693 1694 if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) { 1695 jfs_error(bmp->db_ipbmap->i_sb, 1696 "Corrupt dmapctl page\n"); 1697 release_metapage(mp); 1698 return -EIO; 1699 } 1700 1701 /* search the tree within the dmap control page for 1702 * sufficient free space. if sufficient free space is found, 1703 * dbFindLeaf() returns the index of the leaf at which 1704 * free space was found. 1705 */ 1706 rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx); 1707 1708 /* release the buffer. 1709 */ 1710 release_metapage(mp); 1711 1712 /* space found ? 1713 */ 1714 if (rc) { 1715 if (lev != level) { 1716 jfs_error(bmp->db_ipbmap->i_sb, 1717 "dmap inconsistent\n"); 1718 return -EIO; 1719 } 1720 return -ENOSPC; 1721 } 1722 1723 /* adjust the block number to reflect the location within 1724 * the dmap control page (i.e. the leaf) at which free 1725 * space was found. 1726 */ 1727 b += (((s64) leafidx) << budmin); 1728 1729 /* we stop the search at this dmap control page level if 1730 * the number of blocks required is greater than or equal 1731 * to the maximum number of blocks described at the next 1732 * (lower) level. 1733 */ 1734 if (l2nb >= budmin) 1735 break; 1736 } 1737 1738 *blkno = b; 1739 return (0); 1740 } 1741 1742 1743 /* 1744 * NAME: dbAllocCtl() 1745 * 1746 * FUNCTION: attempt to allocate a specified number of contiguous 1747 * blocks starting within a specific dmap. 1748 * 1749 * this routine is called by higher level routines that search 1750 * the dmap control pages above the actual dmaps for contiguous 1751 * free space. the result of successful searches by these 1752 * routines are the starting block numbers within dmaps, with 1753 * the dmaps themselves containing the desired contiguous free 1754 * space or starting a contiguous free space of desired size 1755 * that is made up of the blocks of one or more dmaps. these 1756 * calls should not fail due to insufficent resources. 1757 * 1758 * this routine is called in some cases where it is not known 1759 * whether it will fail due to insufficient resources. more 1760 * specifically, this occurs when allocating from an allocation 1761 * group whose size is equal to the number of blocks per dmap. 1762 * in this case, the dmap control pages are not examined prior 1763 * to calling this routine (to save pathlength) and the call 1764 * might fail. 1765 * 1766 * for a request size that fits within a dmap, this routine relies 1767 * upon the dmap's dmtree to find the requested contiguous free 1768 * space. for request sizes that are larger than a dmap, the 1769 * requested free space will start at the first block of the 1770 * first dmap (i.e. blkno). 1771 * 1772 * PARAMETERS: 1773 * bmp - pointer to bmap descriptor 1774 * nblocks - actual number of contiguous free blocks to allocate. 1775 * l2nb - log2 number of contiguous free blocks to allocate. 1776 * blkno - starting block number of the dmap to start the allocation 1777 * from. 1778 * results - on successful return, set to the starting block number 1779 * of the newly allocated range. 1780 * 1781 * RETURN VALUES: 1782 * 0 - success 1783 * -ENOSPC - insufficient disk resources 1784 * -EIO - i/o error 1785 * 1786 * serialization: IWRITE_LOCK(ipbmap) held on entry/exit; 1787 */ 1788 static int 1789 dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results) 1790 { 1791 int rc, nb; 1792 s64 b, lblkno, n; 1793 struct metapage *mp; 1794 struct dmap *dp; 1795 1796 /* check if the allocation request is confined to a single dmap. 1797 */ 1798 if (l2nb <= L2BPERDMAP) { 1799 /* get the buffer for the dmap. 1800 */ 1801 lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage); 1802 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); 1803 if (mp == NULL) 1804 return -EIO; 1805 dp = (struct dmap *) mp->data; 1806 1807 /* try to allocate the blocks. 1808 */ 1809 rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results); 1810 if (rc == 0) 1811 mark_metapage_dirty(mp); 1812 1813 release_metapage(mp); 1814 1815 return (rc); 1816 } 1817 1818 /* allocation request involving multiple dmaps. it must start on 1819 * a dmap boundary. 1820 */ 1821 assert((blkno & (BPERDMAP - 1)) == 0); 1822 1823 /* allocate the blocks dmap by dmap. 1824 */ 1825 for (n = nblocks, b = blkno; n > 0; n -= nb, b += nb) { 1826 /* get the buffer for the dmap. 1827 */ 1828 lblkno = BLKTODMAP(b, bmp->db_l2nbperpage); 1829 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); 1830 if (mp == NULL) { 1831 rc = -EIO; 1832 goto backout; 1833 } 1834 dp = (struct dmap *) mp->data; 1835 1836 /* the dmap better be all free. 1837 */ 1838 if (dp->tree.stree[ROOT] != L2BPERDMAP) { 1839 release_metapage(mp); 1840 jfs_error(bmp->db_ipbmap->i_sb, 1841 "the dmap is not all free\n"); 1842 rc = -EIO; 1843 goto backout; 1844 } 1845 1846 /* determine how many blocks to allocate from this dmap. 1847 */ 1848 nb = min_t(s64, n, BPERDMAP); 1849 1850 /* allocate the blocks from the dmap. 1851 */ 1852 if ((rc = dbAllocDmap(bmp, dp, b, nb))) { 1853 release_metapage(mp); 1854 goto backout; 1855 } 1856 1857 /* write the buffer. 1858 */ 1859 write_metapage(mp); 1860 } 1861 1862 /* set the results (starting block number) and return. 1863 */ 1864 *results = blkno; 1865 return (0); 1866 1867 /* something failed in handling an allocation request involving 1868 * multiple dmaps. we'll try to clean up by backing out any 1869 * allocation that has already happened for this request. if 1870 * we fail in backing out the allocation, we'll mark the file 1871 * system to indicate that blocks have been leaked. 1872 */ 1873 backout: 1874 1875 /* try to backout the allocations dmap by dmap. 1876 */ 1877 for (n = nblocks - n, b = blkno; n > 0; 1878 n -= BPERDMAP, b += BPERDMAP) { 1879 /* get the buffer for this dmap. 1880 */ 1881 lblkno = BLKTODMAP(b, bmp->db_l2nbperpage); 1882 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); 1883 if (mp == NULL) { 1884 /* could not back out. mark the file system 1885 * to indicate that we have leaked blocks. 1886 */ 1887 jfs_error(bmp->db_ipbmap->i_sb, 1888 "I/O Error: Block Leakage\n"); 1889 continue; 1890 } 1891 dp = (struct dmap *) mp->data; 1892 1893 /* free the blocks is this dmap. 1894 */ 1895 if (dbFreeDmap(bmp, dp, b, BPERDMAP)) { 1896 /* could not back out. mark the file system 1897 * to indicate that we have leaked blocks. 1898 */ 1899 release_metapage(mp); 1900 jfs_error(bmp->db_ipbmap->i_sb, "Block Leakage\n"); 1901 continue; 1902 } 1903 1904 /* write the buffer. 1905 */ 1906 write_metapage(mp); 1907 } 1908 1909 return (rc); 1910 } 1911 1912 1913 /* 1914 * NAME: dbAllocDmapLev() 1915 * 1916 * FUNCTION: attempt to allocate a specified number of contiguous blocks 1917 * from a specified dmap. 1918 * 1919 * this routine checks if the contiguous blocks are available. 1920 * if so, nblocks of blocks are allocated; otherwise, ENOSPC is 1921 * returned. 1922 * 1923 * PARAMETERS: 1924 * mp - pointer to bmap descriptor 1925 * dp - pointer to dmap to attempt to allocate blocks from. 1926 * l2nb - log2 number of contiguous block desired. 1927 * nblocks - actual number of contiguous block desired. 1928 * results - on successful return, set to the starting block number 1929 * of the newly allocated range. 1930 * 1931 * RETURN VALUES: 1932 * 0 - success 1933 * -ENOSPC - insufficient disk resources 1934 * -EIO - i/o error 1935 * 1936 * serialization: IREAD_LOCK(ipbmap), e.g., from dbAlloc(), or 1937 * IWRITE_LOCK(ipbmap), e.g., dbAllocCtl(), held on entry/exit; 1938 */ 1939 static int 1940 dbAllocDmapLev(struct bmap * bmp, 1941 struct dmap * dp, int nblocks, int l2nb, s64 * results) 1942 { 1943 s64 blkno; 1944 int leafidx, rc; 1945 1946 /* can't be more than a dmaps worth of blocks */ 1947 assert(l2nb <= L2BPERDMAP); 1948 1949 /* search the tree within the dmap page for sufficient 1950 * free space. if sufficient free space is found, dbFindLeaf() 1951 * returns the index of the leaf at which free space was found. 1952 */ 1953 if (dbFindLeaf((dmtree_t *) & dp->tree, l2nb, &leafidx)) 1954 return -ENOSPC; 1955 1956 /* determine the block number within the file system corresponding 1957 * to the leaf at which free space was found. 1958 */ 1959 blkno = le64_to_cpu(dp->start) + (leafidx << L2DBWORD); 1960 1961 /* if not all bits of the dmap word are free, get the starting 1962 * bit number within the dmap word of the required string of free 1963 * bits and adjust the block number with this value. 1964 */ 1965 if (dp->tree.stree[leafidx + LEAFIND] < BUDMIN) 1966 blkno += dbFindBits(le32_to_cpu(dp->wmap[leafidx]), l2nb); 1967 1968 /* allocate the blocks */ 1969 if ((rc = dbAllocDmap(bmp, dp, blkno, nblocks)) == 0) 1970 *results = blkno; 1971 1972 return (rc); 1973 } 1974 1975 1976 /* 1977 * NAME: dbAllocDmap() 1978 * 1979 * FUNCTION: adjust the disk allocation map to reflect the allocation 1980 * of a specified block range within a dmap. 1981 * 1982 * this routine allocates the specified blocks from the dmap 1983 * through a call to dbAllocBits(). if the allocation of the 1984 * block range causes the maximum string of free blocks within 1985 * the dmap to change (i.e. the value of the root of the dmap's 1986 * dmtree), this routine will cause this change to be reflected 1987 * up through the appropriate levels of the dmap control pages 1988 * by a call to dbAdjCtl() for the L0 dmap control page that 1989 * covers this dmap. 1990 * 1991 * PARAMETERS: 1992 * bmp - pointer to bmap descriptor 1993 * dp - pointer to dmap to allocate the block range from. 1994 * blkno - starting block number of the block to be allocated. 1995 * nblocks - number of blocks to be allocated. 1996 * 1997 * RETURN VALUES: 1998 * 0 - success 1999 * -EIO - i/o error 2000 * 2001 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit; 2002 */ 2003 static int dbAllocDmap(struct bmap * bmp, struct dmap * dp, s64 blkno, 2004 int nblocks) 2005 { 2006 s8 oldroot; 2007 int rc; 2008 2009 /* save the current value of the root (i.e. maximum free string) 2010 * of the dmap tree. 2011 */ 2012 oldroot = dp->tree.stree[ROOT]; 2013 2014 /* allocate the specified (blocks) bits */ 2015 dbAllocBits(bmp, dp, blkno, nblocks); 2016 2017 /* if the root has not changed, done. */ 2018 if (dp->tree.stree[ROOT] == oldroot) 2019 return (0); 2020 2021 /* root changed. bubble the change up to the dmap control pages. 2022 * if the adjustment of the upper level control pages fails, 2023 * backout the bit allocation (thus making everything consistent). 2024 */ 2025 if ((rc = dbAdjCtl(bmp, blkno, dp->tree.stree[ROOT], 1, 0))) 2026 dbFreeBits(bmp, dp, blkno, nblocks); 2027 2028 return (rc); 2029 } 2030 2031 2032 /* 2033 * NAME: dbFreeDmap() 2034 * 2035 * FUNCTION: adjust the disk allocation map to reflect the allocation 2036 * of a specified block range within a dmap. 2037 * 2038 * this routine frees the specified blocks from the dmap through 2039 * a call to dbFreeBits(). if the deallocation of the block range 2040 * causes the maximum string of free blocks within the dmap to 2041 * change (i.e. the value of the root of the dmap's dmtree), this 2042 * routine will cause this change to be reflected up through the 2043 * appropriate levels of the dmap control pages by a call to 2044 * dbAdjCtl() for the L0 dmap control page that covers this dmap. 2045 * 2046 * PARAMETERS: 2047 * bmp - pointer to bmap descriptor 2048 * dp - pointer to dmap to free the block range from. 2049 * blkno - starting block number of the block to be freed. 2050 * nblocks - number of blocks to be freed. 2051 * 2052 * RETURN VALUES: 2053 * 0 - success 2054 * -EIO - i/o error 2055 * 2056 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit; 2057 */ 2058 static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno, 2059 int nblocks) 2060 { 2061 s8 oldroot; 2062 int rc = 0, word; 2063 2064 /* save the current value of the root (i.e. maximum free string) 2065 * of the dmap tree. 2066 */ 2067 oldroot = dp->tree.stree[ROOT]; 2068 2069 /* free the specified (blocks) bits */ 2070 rc = dbFreeBits(bmp, dp, blkno, nblocks); 2071 2072 /* if error or the root has not changed, done. */ 2073 if (rc || (dp->tree.stree[ROOT] == oldroot)) 2074 return (rc); 2075 2076 /* root changed. bubble the change up to the dmap control pages. 2077 * if the adjustment of the upper level control pages fails, 2078 * backout the deallocation. 2079 */ 2080 if ((rc = dbAdjCtl(bmp, blkno, dp->tree.stree[ROOT], 0, 0))) { 2081 word = (blkno & (BPERDMAP - 1)) >> L2DBWORD; 2082 2083 /* as part of backing out the deallocation, we will have 2084 * to back split the dmap tree if the deallocation caused 2085 * the freed blocks to become part of a larger binary buddy 2086 * system. 2087 */ 2088 if (dp->tree.stree[word] == NOFREE) 2089 dbBackSplit((dmtree_t *) & dp->tree, word); 2090 2091 dbAllocBits(bmp, dp, blkno, nblocks); 2092 } 2093 2094 return (rc); 2095 } 2096 2097 2098 /* 2099 * NAME: dbAllocBits() 2100 * 2101 * FUNCTION: allocate a specified block range from a dmap. 2102 * 2103 * this routine updates the dmap to reflect the working 2104 * state allocation of the specified block range. it directly 2105 * updates the bits of the working map and causes the adjustment 2106 * of the binary buddy system described by the dmap's dmtree 2107 * leaves to reflect the bits allocated. it also causes the 2108 * dmap's dmtree, as a whole, to reflect the allocated range. 2109 * 2110 * PARAMETERS: 2111 * bmp - pointer to bmap descriptor 2112 * dp - pointer to dmap to allocate bits from. 2113 * blkno - starting block number of the bits to be allocated. 2114 * nblocks - number of bits to be allocated. 2115 * 2116 * RETURN VALUES: none 2117 * 2118 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit; 2119 */ 2120 static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno, 2121 int nblocks) 2122 { 2123 int dbitno, word, rembits, nb, nwords, wbitno, nw, agno; 2124 dmtree_t *tp = (dmtree_t *) & dp->tree; 2125 int size; 2126 s8 *leaf; 2127 2128 /* pick up a pointer to the leaves of the dmap tree */ 2129 leaf = dp->tree.stree + LEAFIND; 2130 2131 /* determine the bit number and word within the dmap of the 2132 * starting block. 2133 */ 2134 dbitno = blkno & (BPERDMAP - 1); 2135 word = dbitno >> L2DBWORD; 2136 2137 /* block range better be within the dmap */ 2138 assert(dbitno + nblocks <= BPERDMAP); 2139 2140 /* allocate the bits of the dmap's words corresponding to the block 2141 * range. not all bits of the first and last words may be contained 2142 * within the block range. if this is the case, we'll work against 2143 * those words (i.e. partial first and/or last) on an individual basis 2144 * (a single pass), allocating the bits of interest by hand and 2145 * updating the leaf corresponding to the dmap word. a single pass 2146 * will be used for all dmap words fully contained within the 2147 * specified range. within this pass, the bits of all fully contained 2148 * dmap words will be marked as free in a single shot and the leaves 2149 * will be updated. a single leaf may describe the free space of 2150 * multiple dmap words, so we may update only a subset of the actual 2151 * leaves corresponding to the dmap words of the block range. 2152 */ 2153 for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) { 2154 /* determine the bit number within the word and 2155 * the number of bits within the word. 2156 */ 2157 wbitno = dbitno & (DBWORD - 1); 2158 nb = min(rembits, DBWORD - wbitno); 2159 2160 /* check if only part of a word is to be allocated. 2161 */ 2162 if (nb < DBWORD) { 2163 /* allocate (set to 1) the appropriate bits within 2164 * this dmap word. 2165 */ 2166 dp->wmap[word] |= cpu_to_le32(ONES << (DBWORD - nb) 2167 >> wbitno); 2168 2169 /* update the leaf for this dmap word. in addition 2170 * to setting the leaf value to the binary buddy max 2171 * of the updated dmap word, dbSplit() will split 2172 * the binary system of the leaves if need be. 2173 */ 2174 dbSplit(tp, word, BUDMIN, 2175 dbMaxBud((u8 *) & dp->wmap[word])); 2176 2177 word += 1; 2178 } else { 2179 /* one or more dmap words are fully contained 2180 * within the block range. determine how many 2181 * words and allocate (set to 1) the bits of these 2182 * words. 2183 */ 2184 nwords = rembits >> L2DBWORD; 2185 memset(&dp->wmap[word], (int) ONES, nwords * 4); 2186 2187 /* determine how many bits. 2188 */ 2189 nb = nwords << L2DBWORD; 2190 2191 /* now update the appropriate leaves to reflect 2192 * the allocated words. 2193 */ 2194 for (; nwords > 0; nwords -= nw) { 2195 if (leaf[word] < BUDMIN) { 2196 jfs_error(bmp->db_ipbmap->i_sb, 2197 "leaf page corrupt\n"); 2198 break; 2199 } 2200 2201 /* determine what the leaf value should be 2202 * updated to as the minimum of the l2 number 2203 * of bits being allocated and the l2 number 2204 * of bits currently described by this leaf. 2205 */ 2206 size = min_t(int, leaf[word], 2207 NLSTOL2BSZ(nwords)); 2208 2209 /* update the leaf to reflect the allocation. 2210 * in addition to setting the leaf value to 2211 * NOFREE, dbSplit() will split the binary 2212 * system of the leaves to reflect the current 2213 * allocation (size). 2214 */ 2215 dbSplit(tp, word, size, NOFREE); 2216 2217 /* get the number of dmap words handled */ 2218 nw = BUDSIZE(size, BUDMIN); 2219 word += nw; 2220 } 2221 } 2222 } 2223 2224 /* update the free count for this dmap */ 2225 le32_add_cpu(&dp->nfree, -nblocks); 2226 2227 BMAP_LOCK(bmp); 2228 2229 /* if this allocation group is completely free, 2230 * update the maximum allocation group number if this allocation 2231 * group is the new max. 2232 */ 2233 agno = blkno >> bmp->db_agl2size; 2234 if (agno > bmp->db_maxag) 2235 bmp->db_maxag = agno; 2236 2237 /* update the free count for the allocation group and map */ 2238 bmp->db_agfree[agno] -= nblocks; 2239 bmp->db_nfree -= nblocks; 2240 2241 BMAP_UNLOCK(bmp); 2242 } 2243 2244 2245 /* 2246 * NAME: dbFreeBits() 2247 * 2248 * FUNCTION: free a specified block range from a dmap. 2249 * 2250 * this routine updates the dmap to reflect the working 2251 * state allocation of the specified block range. it directly 2252 * updates the bits of the working map and causes the adjustment 2253 * of the binary buddy system described by the dmap's dmtree 2254 * leaves to reflect the bits freed. it also causes the dmap's 2255 * dmtree, as a whole, to reflect the deallocated range. 2256 * 2257 * PARAMETERS: 2258 * bmp - pointer to bmap descriptor 2259 * dp - pointer to dmap to free bits from. 2260 * blkno - starting block number of the bits to be freed. 2261 * nblocks - number of bits to be freed. 2262 * 2263 * RETURN VALUES: 0 for success 2264 * 2265 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit; 2266 */ 2267 static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno, 2268 int nblocks) 2269 { 2270 int dbitno, word, rembits, nb, nwords, wbitno, nw, agno; 2271 dmtree_t *tp = (dmtree_t *) & dp->tree; 2272 int rc = 0; 2273 int size; 2274 2275 /* determine the bit number and word within the dmap of the 2276 * starting block. 2277 */ 2278 dbitno = blkno & (BPERDMAP - 1); 2279 word = dbitno >> L2DBWORD; 2280 2281 /* block range better be within the dmap. 2282 */ 2283 assert(dbitno + nblocks <= BPERDMAP); 2284 2285 /* free the bits of the dmaps words corresponding to the block range. 2286 * not all bits of the first and last words may be contained within 2287 * the block range. if this is the case, we'll work against those 2288 * words (i.e. partial first and/or last) on an individual basis 2289 * (a single pass), freeing the bits of interest by hand and updating 2290 * the leaf corresponding to the dmap word. a single pass will be used 2291 * for all dmap words fully contained within the specified range. 2292 * within this pass, the bits of all fully contained dmap words will 2293 * be marked as free in a single shot and the leaves will be updated. a 2294 * single leaf may describe the free space of multiple dmap words, 2295 * so we may update only a subset of the actual leaves corresponding 2296 * to the dmap words of the block range. 2297 * 2298 * dbJoin() is used to update leaf values and will join the binary 2299 * buddy system of the leaves if the new leaf values indicate this 2300 * should be done. 2301 */ 2302 for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) { 2303 /* determine the bit number within the word and 2304 * the number of bits within the word. 2305 */ 2306 wbitno = dbitno & (DBWORD - 1); 2307 nb = min(rembits, DBWORD - wbitno); 2308 2309 /* check if only part of a word is to be freed. 2310 */ 2311 if (nb < DBWORD) { 2312 /* free (zero) the appropriate bits within this 2313 * dmap word. 2314 */ 2315 dp->wmap[word] &= 2316 cpu_to_le32(~(ONES << (DBWORD - nb) 2317 >> wbitno)); 2318 2319 /* update the leaf for this dmap word. 2320 */ 2321 rc = dbJoin(tp, word, 2322 dbMaxBud((u8 *) & dp->wmap[word])); 2323 if (rc) 2324 return rc; 2325 2326 word += 1; 2327 } else { 2328 /* one or more dmap words are fully contained 2329 * within the block range. determine how many 2330 * words and free (zero) the bits of these words. 2331 */ 2332 nwords = rembits >> L2DBWORD; 2333 memset(&dp->wmap[word], 0, nwords * 4); 2334 2335 /* determine how many bits. 2336 */ 2337 nb = nwords << L2DBWORD; 2338 2339 /* now update the appropriate leaves to reflect 2340 * the freed words. 2341 */ 2342 for (; nwords > 0; nwords -= nw) { 2343 /* determine what the leaf value should be 2344 * updated to as the minimum of the l2 number 2345 * of bits being freed and the l2 (max) number 2346 * of bits that can be described by this leaf. 2347 */ 2348 size = 2349 min(LITOL2BSZ 2350 (word, L2LPERDMAP, BUDMIN), 2351 NLSTOL2BSZ(nwords)); 2352 2353 /* update the leaf. 2354 */ 2355 rc = dbJoin(tp, word, size); 2356 if (rc) 2357 return rc; 2358 2359 /* get the number of dmap words handled. 2360 */ 2361 nw = BUDSIZE(size, BUDMIN); 2362 word += nw; 2363 } 2364 } 2365 } 2366 2367 /* update the free count for this dmap. 2368 */ 2369 le32_add_cpu(&dp->nfree, nblocks); 2370 2371 BMAP_LOCK(bmp); 2372 2373 /* update the free count for the allocation group and 2374 * map. 2375 */ 2376 agno = blkno >> bmp->db_agl2size; 2377 bmp->db_nfree += nblocks; 2378 bmp->db_agfree[agno] += nblocks; 2379 2380 /* check if this allocation group is not completely free and 2381 * if it is currently the maximum (rightmost) allocation group. 2382 * if so, establish the new maximum allocation group number by 2383 * searching left for the first allocation group with allocation. 2384 */ 2385 if ((bmp->db_agfree[agno] == bmp->db_agsize && agno == bmp->db_maxag) || 2386 (agno == bmp->db_numag - 1 && 2387 bmp->db_agfree[agno] == (bmp-> db_mapsize & (BPERDMAP - 1)))) { 2388 while (bmp->db_maxag > 0) { 2389 bmp->db_maxag -= 1; 2390 if (bmp->db_agfree[bmp->db_maxag] != 2391 bmp->db_agsize) 2392 break; 2393 } 2394 2395 /* re-establish the allocation group preference if the 2396 * current preference is right of the maximum allocation 2397 * group. 2398 */ 2399 if (bmp->db_agpref > bmp->db_maxag) 2400 bmp->db_agpref = bmp->db_maxag; 2401 } 2402 2403 BMAP_UNLOCK(bmp); 2404 2405 return 0; 2406 } 2407 2408 2409 /* 2410 * NAME: dbAdjCtl() 2411 * 2412 * FUNCTION: adjust a dmap control page at a specified level to reflect 2413 * the change in a lower level dmap or dmap control page's 2414 * maximum string of free blocks (i.e. a change in the root 2415 * of the lower level object's dmtree) due to the allocation 2416 * or deallocation of a range of blocks with a single dmap. 2417 * 2418 * on entry, this routine is provided with the new value of 2419 * the lower level dmap or dmap control page root and the 2420 * starting block number of the block range whose allocation 2421 * or deallocation resulted in the root change. this range 2422 * is respresented by a single leaf of the current dmapctl 2423 * and the leaf will be updated with this value, possibly 2424 * causing a binary buddy system within the leaves to be 2425 * split or joined. the update may also cause the dmapctl's 2426 * dmtree to be updated. 2427 * 2428 * if the adjustment of the dmap control page, itself, causes its 2429 * root to change, this change will be bubbled up to the next dmap 2430 * control level by a recursive call to this routine, specifying 2431 * the new root value and the next dmap control page level to 2432 * be adjusted. 2433 * PARAMETERS: 2434 * bmp - pointer to bmap descriptor 2435 * blkno - the first block of a block range within a dmap. it is 2436 * the allocation or deallocation of this block range that 2437 * requires the dmap control page to be adjusted. 2438 * newval - the new value of the lower level dmap or dmap control 2439 * page root. 2440 * alloc - 'true' if adjustment is due to an allocation. 2441 * level - current level of dmap control page (i.e. L0, L1, L2) to 2442 * be adjusted. 2443 * 2444 * RETURN VALUES: 2445 * 0 - success 2446 * -EIO - i/o error 2447 * 2448 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit; 2449 */ 2450 static int 2451 dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level) 2452 { 2453 struct metapage *mp; 2454 s8 oldroot; 2455 int oldval; 2456 s64 lblkno; 2457 struct dmapctl *dcp; 2458 int rc, leafno, ti; 2459 2460 /* get the buffer for the dmap control page for the specified 2461 * block number and control page level. 2462 */ 2463 lblkno = BLKTOCTL(blkno, bmp->db_l2nbperpage, level); 2464 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); 2465 if (mp == NULL) 2466 return -EIO; 2467 dcp = (struct dmapctl *) mp->data; 2468 2469 if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) { 2470 jfs_error(bmp->db_ipbmap->i_sb, "Corrupt dmapctl page\n"); 2471 release_metapage(mp); 2472 return -EIO; 2473 } 2474 2475 /* determine the leaf number corresponding to the block and 2476 * the index within the dmap control tree. 2477 */ 2478 leafno = BLKTOCTLLEAF(blkno, dcp->budmin); 2479 ti = leafno + le32_to_cpu(dcp->leafidx); 2480 2481 /* save the current leaf value and the current root level (i.e. 2482 * maximum l2 free string described by this dmapctl). 2483 */ 2484 oldval = dcp->stree[ti]; 2485 oldroot = dcp->stree[ROOT]; 2486 2487 /* check if this is a control page update for an allocation. 2488 * if so, update the leaf to reflect the new leaf value using 2489 * dbSplit(); otherwise (deallocation), use dbJoin() to update 2490 * the leaf with the new value. in addition to updating the 2491 * leaf, dbSplit() will also split the binary buddy system of 2492 * the leaves, if required, and bubble new values within the 2493 * dmapctl tree, if required. similarly, dbJoin() will join 2494 * the binary buddy system of leaves and bubble new values up 2495 * the dmapctl tree as required by the new leaf value. 2496 */ 2497 if (alloc) { 2498 /* check if we are in the middle of a binary buddy 2499 * system. this happens when we are performing the 2500 * first allocation out of an allocation group that 2501 * is part (not the first part) of a larger binary 2502 * buddy system. if we are in the middle, back split 2503 * the system prior to calling dbSplit() which assumes 2504 * that it is at the front of a binary buddy system. 2505 */ 2506 if (oldval == NOFREE) { 2507 rc = dbBackSplit((dmtree_t *) dcp, leafno); 2508 if (rc) { 2509 release_metapage(mp); 2510 return rc; 2511 } 2512 oldval = dcp->stree[ti]; 2513 } 2514 dbSplit((dmtree_t *) dcp, leafno, dcp->budmin, newval); 2515 } else { 2516 rc = dbJoin((dmtree_t *) dcp, leafno, newval); 2517 if (rc) { 2518 release_metapage(mp); 2519 return rc; 2520 } 2521 } 2522 2523 /* check if the root of the current dmap control page changed due 2524 * to the update and if the current dmap control page is not at 2525 * the current top level (i.e. L0, L1, L2) of the map. if so (i.e. 2526 * root changed and this is not the top level), call this routine 2527 * again (recursion) for the next higher level of the mapping to 2528 * reflect the change in root for the current dmap control page. 2529 */ 2530 if (dcp->stree[ROOT] != oldroot) { 2531 /* are we below the top level of the map. if so, 2532 * bubble the root up to the next higher level. 2533 */ 2534 if (level < bmp->db_maxlevel) { 2535 /* bubble up the new root of this dmap control page to 2536 * the next level. 2537 */ 2538 if ((rc = 2539 dbAdjCtl(bmp, blkno, dcp->stree[ROOT], alloc, 2540 level + 1))) { 2541 /* something went wrong in bubbling up the new 2542 * root value, so backout the changes to the 2543 * current dmap control page. 2544 */ 2545 if (alloc) { 2546 dbJoin((dmtree_t *) dcp, leafno, 2547 oldval); 2548 } else { 2549 /* the dbJoin() above might have 2550 * caused a larger binary buddy system 2551 * to form and we may now be in the 2552 * middle of it. if this is the case, 2553 * back split the buddies. 2554 */ 2555 if (dcp->stree[ti] == NOFREE) 2556 dbBackSplit((dmtree_t *) 2557 dcp, leafno); 2558 dbSplit((dmtree_t *) dcp, leafno, 2559 dcp->budmin, oldval); 2560 } 2561 2562 /* release the buffer and return the error. 2563 */ 2564 release_metapage(mp); 2565 return (rc); 2566 } 2567 } else { 2568 /* we're at the top level of the map. update 2569 * the bmap control page to reflect the size 2570 * of the maximum free buddy system. 2571 */ 2572 assert(level == bmp->db_maxlevel); 2573 if (bmp->db_maxfreebud != oldroot) { 2574 jfs_error(bmp->db_ipbmap->i_sb, 2575 "the maximum free buddy is not the old root\n"); 2576 } 2577 bmp->db_maxfreebud = dcp->stree[ROOT]; 2578 } 2579 } 2580 2581 /* write the buffer. 2582 */ 2583 write_metapage(mp); 2584 2585 return (0); 2586 } 2587 2588 2589 /* 2590 * NAME: dbSplit() 2591 * 2592 * FUNCTION: update the leaf of a dmtree with a new value, splitting 2593 * the leaf from the binary buddy system of the dmtree's 2594 * leaves, as required. 2595 * 2596 * PARAMETERS: 2597 * tp - pointer to the tree containing the leaf. 2598 * leafno - the number of the leaf to be updated. 2599 * splitsz - the size the binary buddy system starting at the leaf 2600 * must be split to, specified as the log2 number of blocks. 2601 * newval - the new value for the leaf. 2602 * 2603 * RETURN VALUES: none 2604 * 2605 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit; 2606 */ 2607 static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval) 2608 { 2609 int budsz; 2610 int cursz; 2611 s8 *leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx); 2612 2613 /* check if the leaf needs to be split. 2614 */ 2615 if (leaf[leafno] > tp->dmt_budmin) { 2616 /* the split occurs by cutting the buddy system in half 2617 * at the specified leaf until we reach the specified 2618 * size. pick up the starting split size (current size 2619 * - 1 in l2) and the corresponding buddy size. 2620 */ 2621 cursz = leaf[leafno] - 1; 2622 budsz = BUDSIZE(cursz, tp->dmt_budmin); 2623 2624 /* split until we reach the specified size. 2625 */ 2626 while (cursz >= splitsz) { 2627 /* update the buddy's leaf with its new value. 2628 */ 2629 dbAdjTree(tp, leafno ^ budsz, cursz); 2630 2631 /* on to the next size and buddy. 2632 */ 2633 cursz -= 1; 2634 budsz >>= 1; 2635 } 2636 } 2637 2638 /* adjust the dmap tree to reflect the specified leaf's new 2639 * value. 2640 */ 2641 dbAdjTree(tp, leafno, newval); 2642 } 2643 2644 2645 /* 2646 * NAME: dbBackSplit() 2647 * 2648 * FUNCTION: back split the binary buddy system of dmtree leaves 2649 * that hold a specified leaf until the specified leaf 2650 * starts its own binary buddy system. 2651 * 2652 * the allocators typically perform allocations at the start 2653 * of binary buddy systems and dbSplit() is used to accomplish 2654 * any required splits. in some cases, however, allocation 2655 * may occur in the middle of a binary system and requires a 2656 * back split, with the split proceeding out from the middle of 2657 * the system (less efficient) rather than the start of the 2658 * system (more efficient). the cases in which a back split 2659 * is required are rare and are limited to the first allocation 2660 * within an allocation group which is a part (not first part) 2661 * of a larger binary buddy system and a few exception cases 2662 * in which a previous join operation must be backed out. 2663 * 2664 * PARAMETERS: 2665 * tp - pointer to the tree containing the leaf. 2666 * leafno - the number of the leaf to be updated. 2667 * 2668 * RETURN VALUES: none 2669 * 2670 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit; 2671 */ 2672 static int dbBackSplit(dmtree_t * tp, int leafno) 2673 { 2674 int budsz, bud, w, bsz, size; 2675 int cursz; 2676 s8 *leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx); 2677 2678 /* leaf should be part (not first part) of a binary 2679 * buddy system. 2680 */ 2681 assert(leaf[leafno] == NOFREE); 2682 2683 /* the back split is accomplished by iteratively finding the leaf 2684 * that starts the buddy system that contains the specified leaf and 2685 * splitting that system in two. this iteration continues until 2686 * the specified leaf becomes the start of a buddy system. 2687 * 2688 * determine maximum possible l2 size for the specified leaf. 2689 */ 2690 size = 2691 LITOL2BSZ(leafno, le32_to_cpu(tp->dmt_l2nleafs), 2692 tp->dmt_budmin); 2693 2694 /* determine the number of leaves covered by this size. this 2695 * is the buddy size that we will start with as we search for 2696 * the buddy system that contains the specified leaf. 2697 */ 2698 budsz = BUDSIZE(size, tp->dmt_budmin); 2699 2700 /* back split. 2701 */ 2702 while (leaf[leafno] == NOFREE) { 2703 /* find the leftmost buddy leaf. 2704 */ 2705 for (w = leafno, bsz = budsz;; bsz <<= 1, 2706 w = (w < bud) ? w : bud) { 2707 if (bsz >= le32_to_cpu(tp->dmt_nleafs)) { 2708 jfs_err("JFS: block map error in dbBackSplit"); 2709 return -EIO; 2710 } 2711 2712 /* determine the buddy. 2713 */ 2714 bud = w ^ bsz; 2715 2716 /* check if this buddy is the start of the system. 2717 */ 2718 if (leaf[bud] != NOFREE) { 2719 /* split the leaf at the start of the 2720 * system in two. 2721 */ 2722 cursz = leaf[bud] - 1; 2723 dbSplit(tp, bud, cursz, cursz); 2724 break; 2725 } 2726 } 2727 } 2728 2729 if (leaf[leafno] != size) { 2730 jfs_err("JFS: wrong leaf value in dbBackSplit"); 2731 return -EIO; 2732 } 2733 return 0; 2734 } 2735 2736 2737 /* 2738 * NAME: dbJoin() 2739 * 2740 * FUNCTION: update the leaf of a dmtree with a new value, joining 2741 * the leaf with other leaves of the dmtree into a multi-leaf 2742 * binary buddy system, as required. 2743 * 2744 * PARAMETERS: 2745 * tp - pointer to the tree containing the leaf. 2746 * leafno - the number of the leaf to be updated. 2747 * newval - the new value for the leaf. 2748 * 2749 * RETURN VALUES: none 2750 */ 2751 static int dbJoin(dmtree_t * tp, int leafno, int newval) 2752 { 2753 int budsz, buddy; 2754 s8 *leaf; 2755 2756 /* can the new leaf value require a join with other leaves ? 2757 */ 2758 if (newval >= tp->dmt_budmin) { 2759 /* pickup a pointer to the leaves of the tree. 2760 */ 2761 leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx); 2762 2763 /* try to join the specified leaf into a large binary 2764 * buddy system. the join proceeds by attempting to join 2765 * the specified leafno with its buddy (leaf) at new value. 2766 * if the join occurs, we attempt to join the left leaf 2767 * of the joined buddies with its buddy at new value + 1. 2768 * we continue to join until we find a buddy that cannot be 2769 * joined (does not have a value equal to the size of the 2770 * last join) or until all leaves have been joined into a 2771 * single system. 2772 * 2773 * get the buddy size (number of words covered) of 2774 * the new value. 2775 */ 2776 budsz = BUDSIZE(newval, tp->dmt_budmin); 2777 2778 /* try to join. 2779 */ 2780 while (budsz < le32_to_cpu(tp->dmt_nleafs)) { 2781 /* get the buddy leaf. 2782 */ 2783 buddy = leafno ^ budsz; 2784 2785 /* if the leaf's new value is greater than its 2786 * buddy's value, we join no more. 2787 */ 2788 if (newval > leaf[buddy]) 2789 break; 2790 2791 /* It shouldn't be less */ 2792 if (newval < leaf[buddy]) 2793 return -EIO; 2794 2795 /* check which (leafno or buddy) is the left buddy. 2796 * the left buddy gets to claim the blocks resulting 2797 * from the join while the right gets to claim none. 2798 * the left buddy is also eligible to participate in 2799 * a join at the next higher level while the right 2800 * is not. 2801 * 2802 */ 2803 if (leafno < buddy) { 2804 /* leafno is the left buddy. 2805 */ 2806 dbAdjTree(tp, buddy, NOFREE); 2807 } else { 2808 /* buddy is the left buddy and becomes 2809 * leafno. 2810 */ 2811 dbAdjTree(tp, leafno, NOFREE); 2812 leafno = buddy; 2813 } 2814 2815 /* on to try the next join. 2816 */ 2817 newval += 1; 2818 budsz <<= 1; 2819 } 2820 } 2821 2822 /* update the leaf value. 2823 */ 2824 dbAdjTree(tp, leafno, newval); 2825 2826 return 0; 2827 } 2828 2829 2830 /* 2831 * NAME: dbAdjTree() 2832 * 2833 * FUNCTION: update a leaf of a dmtree with a new value, adjusting 2834 * the dmtree, as required, to reflect the new leaf value. 2835 * the combination of any buddies must already be done before 2836 * this is called. 2837 * 2838 * PARAMETERS: 2839 * tp - pointer to the tree to be adjusted. 2840 * leafno - the number of the leaf to be updated. 2841 * newval - the new value for the leaf. 2842 * 2843 * RETURN VALUES: none 2844 */ 2845 static void dbAdjTree(dmtree_t * tp, int leafno, int newval) 2846 { 2847 int lp, pp, k; 2848 int max; 2849 2850 /* pick up the index of the leaf for this leafno. 2851 */ 2852 lp = leafno + le32_to_cpu(tp->dmt_leafidx); 2853 2854 /* is the current value the same as the old value ? if so, 2855 * there is nothing to do. 2856 */ 2857 if (tp->dmt_stree[lp] == newval) 2858 return; 2859 2860 /* set the new value. 2861 */ 2862 tp->dmt_stree[lp] = newval; 2863 2864 /* bubble the new value up the tree as required. 2865 */ 2866 for (k = 0; k < le32_to_cpu(tp->dmt_height); k++) { 2867 /* get the index of the first leaf of the 4 leaf 2868 * group containing the specified leaf (leafno). 2869 */ 2870 lp = ((lp - 1) & ~0x03) + 1; 2871 2872 /* get the index of the parent of this 4 leaf group. 2873 */ 2874 pp = (lp - 1) >> 2; 2875 2876 /* determine the maximum of the 4 leaves. 2877 */ 2878 max = TREEMAX(&tp->dmt_stree[lp]); 2879 2880 /* if the maximum of the 4 is the same as the 2881 * parent's value, we're done. 2882 */ 2883 if (tp->dmt_stree[pp] == max) 2884 break; 2885 2886 /* parent gets new value. 2887 */ 2888 tp->dmt_stree[pp] = max; 2889 2890 /* parent becomes leaf for next go-round. 2891 */ 2892 lp = pp; 2893 } 2894 } 2895 2896 2897 /* 2898 * NAME: dbFindLeaf() 2899 * 2900 * FUNCTION: search a dmtree_t for sufficient free blocks, returning 2901 * the index of a leaf describing the free blocks if 2902 * sufficient free blocks are found. 2903 * 2904 * the search starts at the top of the dmtree_t tree and 2905 * proceeds down the tree to the leftmost leaf with sufficient 2906 * free space. 2907 * 2908 * PARAMETERS: 2909 * tp - pointer to the tree to be searched. 2910 * l2nb - log2 number of free blocks to search for. 2911 * leafidx - return pointer to be set to the index of the leaf 2912 * describing at least l2nb free blocks if sufficient 2913 * free blocks are found. 2914 * 2915 * RETURN VALUES: 2916 * 0 - success 2917 * -ENOSPC - insufficient free blocks. 2918 */ 2919 static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx) 2920 { 2921 int ti, n = 0, k, x = 0; 2922 2923 /* first check the root of the tree to see if there is 2924 * sufficient free space. 2925 */ 2926 if (l2nb > tp->dmt_stree[ROOT]) 2927 return -ENOSPC; 2928 2929 /* sufficient free space available. now search down the tree 2930 * starting at the next level for the leftmost leaf that 2931 * describes sufficient free space. 2932 */ 2933 for (k = le32_to_cpu(tp->dmt_height), ti = 1; 2934 k > 0; k--, ti = ((ti + n) << 2) + 1) { 2935 /* search the four nodes at this level, starting from 2936 * the left. 2937 */ 2938 for (x = ti, n = 0; n < 4; n++) { 2939 /* sufficient free space found. move to the next 2940 * level (or quit if this is the last level). 2941 */ 2942 if (l2nb <= tp->dmt_stree[x + n]) 2943 break; 2944 } 2945 2946 /* better have found something since the higher 2947 * levels of the tree said it was here. 2948 */ 2949 assert(n < 4); 2950 } 2951 2952 /* set the return to the leftmost leaf describing sufficient 2953 * free space. 2954 */ 2955 *leafidx = x + n - le32_to_cpu(tp->dmt_leafidx); 2956 2957 return (0); 2958 } 2959 2960 2961 /* 2962 * NAME: dbFindBits() 2963 * 2964 * FUNCTION: find a specified number of binary buddy free bits within a 2965 * dmap bitmap word value. 2966 * 2967 * this routine searches the bitmap value for (1 << l2nb) free 2968 * bits at (1 << l2nb) alignments within the value. 2969 * 2970 * PARAMETERS: 2971 * word - dmap bitmap word value. 2972 * l2nb - number of free bits specified as a log2 number. 2973 * 2974 * RETURN VALUES: 2975 * starting bit number of free bits. 2976 */ 2977 static int dbFindBits(u32 word, int l2nb) 2978 { 2979 int bitno, nb; 2980 u32 mask; 2981 2982 /* get the number of bits. 2983 */ 2984 nb = 1 << l2nb; 2985 assert(nb <= DBWORD); 2986 2987 /* complement the word so we can use a mask (i.e. 0s represent 2988 * free bits) and compute the mask. 2989 */ 2990 word = ~word; 2991 mask = ONES << (DBWORD - nb); 2992 2993 /* scan the word for nb free bits at nb alignments. 2994 */ 2995 for (bitno = 0; mask != 0; bitno += nb, mask >>= nb) { 2996 if ((mask & word) == mask) 2997 break; 2998 } 2999 3000 ASSERT(bitno < 32); 3001 3002 /* return the bit number. 3003 */ 3004 return (bitno); 3005 } 3006 3007 3008 /* 3009 * NAME: dbMaxBud(u8 *cp) 3010 * 3011 * FUNCTION: determine the largest binary buddy string of free 3012 * bits within 32-bits of the map. 3013 * 3014 * PARAMETERS: 3015 * cp - pointer to the 32-bit value. 3016 * 3017 * RETURN VALUES: 3018 * largest binary buddy of free bits within a dmap word. 3019 */ 3020 static int dbMaxBud(u8 * cp) 3021 { 3022 signed char tmp1, tmp2; 3023 3024 /* check if the wmap word is all free. if so, the 3025 * free buddy size is BUDMIN. 3026 */ 3027 if (*((uint *) cp) == 0) 3028 return (BUDMIN); 3029 3030 /* check if the wmap word is half free. if so, the 3031 * free buddy size is BUDMIN-1. 3032 */ 3033 if (*((u16 *) cp) == 0 || *((u16 *) cp + 1) == 0) 3034 return (BUDMIN - 1); 3035 3036 /* not all free or half free. determine the free buddy 3037 * size thru table lookup using quarters of the wmap word. 3038 */ 3039 tmp1 = max(budtab[cp[2]], budtab[cp[3]]); 3040 tmp2 = max(budtab[cp[0]], budtab[cp[1]]); 3041 return (max(tmp1, tmp2)); 3042 } 3043 3044 3045 /* 3046 * NAME: cnttz(uint word) 3047 * 3048 * FUNCTION: determine the number of trailing zeros within a 32-bit 3049 * value. 3050 * 3051 * PARAMETERS: 3052 * value - 32-bit value to be examined. 3053 * 3054 * RETURN VALUES: 3055 * count of trailing zeros 3056 */ 3057 static int cnttz(u32 word) 3058 { 3059 int n; 3060 3061 for (n = 0; n < 32; n++, word >>= 1) { 3062 if (word & 0x01) 3063 break; 3064 } 3065 3066 return (n); 3067 } 3068 3069 3070 /* 3071 * NAME: cntlz(u32 value) 3072 * 3073 * FUNCTION: determine the number of leading zeros within a 32-bit 3074 * value. 3075 * 3076 * PARAMETERS: 3077 * value - 32-bit value to be examined. 3078 * 3079 * RETURN VALUES: 3080 * count of leading zeros 3081 */ 3082 static int cntlz(u32 value) 3083 { 3084 int n; 3085 3086 for (n = 0; n < 32; n++, value <<= 1) { 3087 if (value & HIGHORDER) 3088 break; 3089 } 3090 return (n); 3091 } 3092 3093 3094 /* 3095 * NAME: blkstol2(s64 nb) 3096 * 3097 * FUNCTION: convert a block count to its log2 value. if the block 3098 * count is not a l2 multiple, it is rounded up to the next 3099 * larger l2 multiple. 3100 * 3101 * PARAMETERS: 3102 * nb - number of blocks 3103 * 3104 * RETURN VALUES: 3105 * log2 number of blocks 3106 */ 3107 static int blkstol2(s64 nb) 3108 { 3109 int l2nb; 3110 s64 mask; /* meant to be signed */ 3111 3112 mask = (s64) 1 << (64 - 1); 3113 3114 /* count the leading bits. 3115 */ 3116 for (l2nb = 0; l2nb < 64; l2nb++, mask >>= 1) { 3117 /* leading bit found. 3118 */ 3119 if (nb & mask) { 3120 /* determine the l2 value. 3121 */ 3122 l2nb = (64 - 1) - l2nb; 3123 3124 /* check if we need to round up. 3125 */ 3126 if (~mask & nb) 3127 l2nb++; 3128 3129 return (l2nb); 3130 } 3131 } 3132 assert(0); 3133 return 0; /* fix compiler warning */ 3134 } 3135 3136 3137 /* 3138 * NAME: dbAllocBottomUp() 3139 * 3140 * FUNCTION: alloc the specified block range from the working block 3141 * allocation map. 3142 * 3143 * the blocks will be alloc from the working map one dmap 3144 * at a time. 3145 * 3146 * PARAMETERS: 3147 * ip - pointer to in-core inode; 3148 * blkno - starting block number to be freed. 3149 * nblocks - number of blocks to be freed. 3150 * 3151 * RETURN VALUES: 3152 * 0 - success 3153 * -EIO - i/o error 3154 */ 3155 int dbAllocBottomUp(struct inode *ip, s64 blkno, s64 nblocks) 3156 { 3157 struct metapage *mp; 3158 struct dmap *dp; 3159 int nb, rc; 3160 s64 lblkno, rem; 3161 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; 3162 struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap; 3163 3164 IREAD_LOCK(ipbmap, RDWRLOCK_DMAP); 3165 3166 /* block to be allocated better be within the mapsize. */ 3167 ASSERT(nblocks <= bmp->db_mapsize - blkno); 3168 3169 /* 3170 * allocate the blocks a dmap at a time. 3171 */ 3172 mp = NULL; 3173 for (rem = nblocks; rem > 0; rem -= nb, blkno += nb) { 3174 /* release previous dmap if any */ 3175 if (mp) { 3176 write_metapage(mp); 3177 } 3178 3179 /* get the buffer for the current dmap. */ 3180 lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage); 3181 mp = read_metapage(ipbmap, lblkno, PSIZE, 0); 3182 if (mp == NULL) { 3183 IREAD_UNLOCK(ipbmap); 3184 return -EIO; 3185 } 3186 dp = (struct dmap *) mp->data; 3187 3188 /* determine the number of blocks to be allocated from 3189 * this dmap. 3190 */ 3191 nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1))); 3192 3193 /* allocate the blocks. */ 3194 if ((rc = dbAllocDmapBU(bmp, dp, blkno, nb))) { 3195 release_metapage(mp); 3196 IREAD_UNLOCK(ipbmap); 3197 return (rc); 3198 } 3199 } 3200 3201 /* write the last buffer. */ 3202 write_metapage(mp); 3203 3204 IREAD_UNLOCK(ipbmap); 3205 3206 return (0); 3207 } 3208 3209 3210 static int dbAllocDmapBU(struct bmap * bmp, struct dmap * dp, s64 blkno, 3211 int nblocks) 3212 { 3213 int rc; 3214 int dbitno, word, rembits, nb, nwords, wbitno, agno; 3215 s8 oldroot; 3216 struct dmaptree *tp = (struct dmaptree *) & dp->tree; 3217 3218 /* save the current value of the root (i.e. maximum free string) 3219 * of the dmap tree. 3220 */ 3221 oldroot = tp->stree[ROOT]; 3222 3223 /* determine the bit number and word within the dmap of the 3224 * starting block. 3225 */ 3226 dbitno = blkno & (BPERDMAP - 1); 3227 word = dbitno >> L2DBWORD; 3228 3229 /* block range better be within the dmap */ 3230 assert(dbitno + nblocks <= BPERDMAP); 3231 3232 /* allocate the bits of the dmap's words corresponding to the block 3233 * range. not all bits of the first and last words may be contained 3234 * within the block range. if this is the case, we'll work against 3235 * those words (i.e. partial first and/or last) on an individual basis 3236 * (a single pass), allocating the bits of interest by hand and 3237 * updating the leaf corresponding to the dmap word. a single pass 3238 * will be used for all dmap words fully contained within the 3239 * specified range. within this pass, the bits of all fully contained 3240 * dmap words will be marked as free in a single shot and the leaves 3241 * will be updated. a single leaf may describe the free space of 3242 * multiple dmap words, so we may update only a subset of the actual 3243 * leaves corresponding to the dmap words of the block range. 3244 */ 3245 for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) { 3246 /* determine the bit number within the word and 3247 * the number of bits within the word. 3248 */ 3249 wbitno = dbitno & (DBWORD - 1); 3250 nb = min(rembits, DBWORD - wbitno); 3251 3252 /* check if only part of a word is to be allocated. 3253 */ 3254 if (nb < DBWORD) { 3255 /* allocate (set to 1) the appropriate bits within 3256 * this dmap word. 3257 */ 3258 dp->wmap[word] |= cpu_to_le32(ONES << (DBWORD - nb) 3259 >> wbitno); 3260 3261 word++; 3262 } else { 3263 /* one or more dmap words are fully contained 3264 * within the block range. determine how many 3265 * words and allocate (set to 1) the bits of these 3266 * words. 3267 */ 3268 nwords = rembits >> L2DBWORD; 3269 memset(&dp->wmap[word], (int) ONES, nwords * 4); 3270 3271 /* determine how many bits */ 3272 nb = nwords << L2DBWORD; 3273 word += nwords; 3274 } 3275 } 3276 3277 /* update the free count for this dmap */ 3278 le32_add_cpu(&dp->nfree, -nblocks); 3279 3280 /* reconstruct summary tree */ 3281 dbInitDmapTree(dp); 3282 3283 BMAP_LOCK(bmp); 3284 3285 /* if this allocation group is completely free, 3286 * update the highest active allocation group number 3287 * if this allocation group is the new max. 3288 */ 3289 agno = blkno >> bmp->db_agl2size; 3290 if (agno > bmp->db_maxag) 3291 bmp->db_maxag = agno; 3292 3293 /* update the free count for the allocation group and map */ 3294 bmp->db_agfree[agno] -= nblocks; 3295 bmp->db_nfree -= nblocks; 3296 3297 BMAP_UNLOCK(bmp); 3298 3299 /* if the root has not changed, done. */ 3300 if (tp->stree[ROOT] == oldroot) 3301 return (0); 3302 3303 /* root changed. bubble the change up to the dmap control pages. 3304 * if the adjustment of the upper level control pages fails, 3305 * backout the bit allocation (thus making everything consistent). 3306 */ 3307 if ((rc = dbAdjCtl(bmp, blkno, tp->stree[ROOT], 1, 0))) 3308 dbFreeBits(bmp, dp, blkno, nblocks); 3309 3310 return (rc); 3311 } 3312 3313 3314 /* 3315 * NAME: dbExtendFS() 3316 * 3317 * FUNCTION: extend bmap from blkno for nblocks; 3318 * dbExtendFS() updates bmap ready for dbAllocBottomUp(); 3319 * 3320 * L2 3321 * | 3322 * L1---------------------------------L1 3323 * | | 3324 * L0---------L0---------L0 L0---------L0---------L0 3325 * | | | | | | 3326 * d0,...,dn d0,...,dn d0,...,dn d0,...,dn d0,...,dn d0,.,dm; 3327 * L2L1L0d0,...,dnL0d0,...,dnL0d0,...,dnL1L0d0,...,dnL0d0,...,dnL0d0,..dm 3328 * 3329 * <---old---><----------------------------extend-----------------------> 3330 */ 3331 int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks) 3332 { 3333 struct jfs_sb_info *sbi = JFS_SBI(ipbmap->i_sb); 3334 int nbperpage = sbi->nbperpage; 3335 int i, i0 = true, j, j0 = true, k, n; 3336 s64 newsize; 3337 s64 p; 3338 struct metapage *mp, *l2mp, *l1mp = NULL, *l0mp = NULL; 3339 struct dmapctl *l2dcp, *l1dcp, *l0dcp; 3340 struct dmap *dp; 3341 s8 *l0leaf, *l1leaf, *l2leaf; 3342 struct bmap *bmp = sbi->bmap; 3343 int agno, l2agsize, oldl2agsize; 3344 s64 ag_rem; 3345 3346 newsize = blkno + nblocks; 3347 3348 jfs_info("dbExtendFS: blkno:%Ld nblocks:%Ld newsize:%Ld", 3349 (long long) blkno, (long long) nblocks, (long long) newsize); 3350 3351 /* 3352 * initialize bmap control page. 3353 * 3354 * all the data in bmap control page should exclude 3355 * the mkfs hidden dmap page. 3356 */ 3357 3358 /* update mapsize */ 3359 bmp->db_mapsize = newsize; 3360 bmp->db_maxlevel = BMAPSZTOLEV(bmp->db_mapsize); 3361 3362 /* compute new AG size */ 3363 l2agsize = dbGetL2AGSize(newsize); 3364 oldl2agsize = bmp->db_agl2size; 3365 3366 bmp->db_agl2size = l2agsize; 3367 bmp->db_agsize = 1 << l2agsize; 3368 3369 /* compute new number of AG */ 3370 agno = bmp->db_numag; 3371 bmp->db_numag = newsize >> l2agsize; 3372 bmp->db_numag += ((u32) newsize % (u32) bmp->db_agsize) ? 1 : 0; 3373 3374 /* 3375 * reconfigure db_agfree[] 3376 * from old AG configuration to new AG configuration; 3377 * 3378 * coalesce contiguous k (newAGSize/oldAGSize) AGs; 3379 * i.e., (AGi, ..., AGj) where i = k*n and j = k*(n+1) - 1 to AGn; 3380 * note: new AG size = old AG size * (2**x). 3381 */ 3382 if (l2agsize == oldl2agsize) 3383 goto extend; 3384 k = 1 << (l2agsize - oldl2agsize); 3385 ag_rem = bmp->db_agfree[0]; /* save agfree[0] */ 3386 for (i = 0, n = 0; i < agno; n++) { 3387 bmp->db_agfree[n] = 0; /* init collection point */ 3388 3389 /* coalesce contiguous k AGs; */ 3390 for (j = 0; j < k && i < agno; j++, i++) { 3391 /* merge AGi to AGn */ 3392 bmp->db_agfree[n] += bmp->db_agfree[i]; 3393 } 3394 } 3395 bmp->db_agfree[0] += ag_rem; /* restore agfree[0] */ 3396 3397 for (; n < MAXAG; n++) 3398 bmp->db_agfree[n] = 0; 3399 3400 /* 3401 * update highest active ag number 3402 */ 3403 3404 bmp->db_maxag = bmp->db_maxag / k; 3405 3406 /* 3407 * extend bmap 3408 * 3409 * update bit maps and corresponding level control pages; 3410 * global control page db_nfree, db_agfree[agno], db_maxfreebud; 3411 */ 3412 extend: 3413 /* get L2 page */ 3414 p = BMAPBLKNO + nbperpage; /* L2 page */ 3415 l2mp = read_metapage(ipbmap, p, PSIZE, 0); 3416 if (!l2mp) { 3417 jfs_error(ipbmap->i_sb, "L2 page could not be read\n"); 3418 return -EIO; 3419 } 3420 l2dcp = (struct dmapctl *) l2mp->data; 3421 3422 /* compute start L1 */ 3423 k = blkno >> L2MAXL1SIZE; 3424 l2leaf = l2dcp->stree + CTLLEAFIND + k; 3425 p = BLKTOL1(blkno, sbi->l2nbperpage); /* L1 page */ 3426 3427 /* 3428 * extend each L1 in L2 3429 */ 3430 for (; k < LPERCTL; k++, p += nbperpage) { 3431 /* get L1 page */ 3432 if (j0) { 3433 /* read in L1 page: (blkno & (MAXL1SIZE - 1)) */ 3434 l1mp = read_metapage(ipbmap, p, PSIZE, 0); 3435 if (l1mp == NULL) 3436 goto errout; 3437 l1dcp = (struct dmapctl *) l1mp->data; 3438 3439 /* compute start L0 */ 3440 j = (blkno & (MAXL1SIZE - 1)) >> L2MAXL0SIZE; 3441 l1leaf = l1dcp->stree + CTLLEAFIND + j; 3442 p = BLKTOL0(blkno, sbi->l2nbperpage); 3443 j0 = false; 3444 } else { 3445 /* assign/init L1 page */ 3446 l1mp = get_metapage(ipbmap, p, PSIZE, 0); 3447 if (l1mp == NULL) 3448 goto errout; 3449 3450 l1dcp = (struct dmapctl *) l1mp->data; 3451 3452 /* compute start L0 */ 3453 j = 0; 3454 l1leaf = l1dcp->stree + CTLLEAFIND; 3455 p += nbperpage; /* 1st L0 of L1.k */ 3456 } 3457 3458 /* 3459 * extend each L0 in L1 3460 */ 3461 for (; j < LPERCTL; j++) { 3462 /* get L0 page */ 3463 if (i0) { 3464 /* read in L0 page: (blkno & (MAXL0SIZE - 1)) */ 3465 3466 l0mp = read_metapage(ipbmap, p, PSIZE, 0); 3467 if (l0mp == NULL) 3468 goto errout; 3469 l0dcp = (struct dmapctl *) l0mp->data; 3470 3471 /* compute start dmap */ 3472 i = (blkno & (MAXL0SIZE - 1)) >> 3473 L2BPERDMAP; 3474 l0leaf = l0dcp->stree + CTLLEAFIND + i; 3475 p = BLKTODMAP(blkno, 3476 sbi->l2nbperpage); 3477 i0 = false; 3478 } else { 3479 /* assign/init L0 page */ 3480 l0mp = get_metapage(ipbmap, p, PSIZE, 0); 3481 if (l0mp == NULL) 3482 goto errout; 3483 3484 l0dcp = (struct dmapctl *) l0mp->data; 3485 3486 /* compute start dmap */ 3487 i = 0; 3488 l0leaf = l0dcp->stree + CTLLEAFIND; 3489 p += nbperpage; /* 1st dmap of L0.j */ 3490 } 3491 3492 /* 3493 * extend each dmap in L0 3494 */ 3495 for (; i < LPERCTL; i++) { 3496 /* 3497 * reconstruct the dmap page, and 3498 * initialize corresponding parent L0 leaf 3499 */ 3500 if ((n = blkno & (BPERDMAP - 1))) { 3501 /* read in dmap page: */ 3502 mp = read_metapage(ipbmap, p, 3503 PSIZE, 0); 3504 if (mp == NULL) 3505 goto errout; 3506 n = min(nblocks, (s64)BPERDMAP - n); 3507 } else { 3508 /* assign/init dmap page */ 3509 mp = read_metapage(ipbmap, p, 3510 PSIZE, 0); 3511 if (mp == NULL) 3512 goto errout; 3513 3514 n = min_t(s64, nblocks, BPERDMAP); 3515 } 3516 3517 dp = (struct dmap *) mp->data; 3518 *l0leaf = dbInitDmap(dp, blkno, n); 3519 3520 bmp->db_nfree += n; 3521 agno = le64_to_cpu(dp->start) >> l2agsize; 3522 bmp->db_agfree[agno] += n; 3523 3524 write_metapage(mp); 3525 3526 l0leaf++; 3527 p += nbperpage; 3528 3529 blkno += n; 3530 nblocks -= n; 3531 if (nblocks == 0) 3532 break; 3533 } /* for each dmap in a L0 */ 3534 3535 /* 3536 * build current L0 page from its leaves, and 3537 * initialize corresponding parent L1 leaf 3538 */ 3539 *l1leaf = dbInitDmapCtl(l0dcp, 0, ++i); 3540 write_metapage(l0mp); 3541 l0mp = NULL; 3542 3543 if (nblocks) 3544 l1leaf++; /* continue for next L0 */ 3545 else { 3546 /* more than 1 L0 ? */ 3547 if (j > 0) 3548 break; /* build L1 page */ 3549 else { 3550 /* summarize in global bmap page */ 3551 bmp->db_maxfreebud = *l1leaf; 3552 release_metapage(l1mp); 3553 release_metapage(l2mp); 3554 goto finalize; 3555 } 3556 } 3557 } /* for each L0 in a L1 */ 3558 3559 /* 3560 * build current L1 page from its leaves, and 3561 * initialize corresponding parent L2 leaf 3562 */ 3563 *l2leaf = dbInitDmapCtl(l1dcp, 1, ++j); 3564 write_metapage(l1mp); 3565 l1mp = NULL; 3566 3567 if (nblocks) 3568 l2leaf++; /* continue for next L1 */ 3569 else { 3570 /* more than 1 L1 ? */ 3571 if (k > 0) 3572 break; /* build L2 page */ 3573 else { 3574 /* summarize in global bmap page */ 3575 bmp->db_maxfreebud = *l2leaf; 3576 release_metapage(l2mp); 3577 goto finalize; 3578 } 3579 } 3580 } /* for each L1 in a L2 */ 3581 3582 jfs_error(ipbmap->i_sb, "function has not returned as expected\n"); 3583 errout: 3584 if (l0mp) 3585 release_metapage(l0mp); 3586 if (l1mp) 3587 release_metapage(l1mp); 3588 release_metapage(l2mp); 3589 return -EIO; 3590 3591 /* 3592 * finalize bmap control page 3593 */ 3594 finalize: 3595 3596 return 0; 3597 } 3598 3599 3600 /* 3601 * dbFinalizeBmap() 3602 */ 3603 void dbFinalizeBmap(struct inode *ipbmap) 3604 { 3605 struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap; 3606 int actags, inactags, l2nl; 3607 s64 ag_rem, actfree, inactfree, avgfree; 3608 int i, n; 3609 3610 /* 3611 * finalize bmap control page 3612 */ 3613 //finalize: 3614 /* 3615 * compute db_agpref: preferred ag to allocate from 3616 * (the leftmost ag with average free space in it); 3617 */ 3618 //agpref: 3619 /* get the number of active ags and inactive ags */ 3620 actags = bmp->db_maxag + 1; 3621 inactags = bmp->db_numag - actags; 3622 ag_rem = bmp->db_mapsize & (bmp->db_agsize - 1); /* ??? */ 3623 3624 /* determine how many blocks are in the inactive allocation 3625 * groups. in doing this, we must account for the fact that 3626 * the rightmost group might be a partial group (i.e. file 3627 * system size is not a multiple of the group size). 3628 */ 3629 inactfree = (inactags && ag_rem) ? 3630 ((inactags - 1) << bmp->db_agl2size) + ag_rem 3631 : inactags << bmp->db_agl2size; 3632 3633 /* determine how many free blocks are in the active 3634 * allocation groups plus the average number of free blocks 3635 * within the active ags. 3636 */ 3637 actfree = bmp->db_nfree - inactfree; 3638 avgfree = (u32) actfree / (u32) actags; 3639 3640 /* if the preferred allocation group has not average free space. 3641 * re-establish the preferred group as the leftmost 3642 * group with average free space. 3643 */ 3644 if (bmp->db_agfree[bmp->db_agpref] < avgfree) { 3645 for (bmp->db_agpref = 0; bmp->db_agpref < actags; 3646 bmp->db_agpref++) { 3647 if (bmp->db_agfree[bmp->db_agpref] >= avgfree) 3648 break; 3649 } 3650 if (bmp->db_agpref >= bmp->db_numag) { 3651 jfs_error(ipbmap->i_sb, 3652 "cannot find ag with average freespace\n"); 3653 } 3654 } 3655 3656 /* 3657 * compute db_aglevel, db_agheight, db_width, db_agstart: 3658 * an ag is covered in aglevel dmapctl summary tree, 3659 * at agheight level height (from leaf) with agwidth number of nodes 3660 * each, which starts at agstart index node of the smmary tree node 3661 * array; 3662 */ 3663 bmp->db_aglevel = BMAPSZTOLEV(bmp->db_agsize); 3664 l2nl = 3665 bmp->db_agl2size - (L2BPERDMAP + bmp->db_aglevel * L2LPERCTL); 3666 bmp->db_agheight = l2nl >> 1; 3667 bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheight << 1)); 3668 for (i = 5 - bmp->db_agheight, bmp->db_agstart = 0, n = 1; i > 0; 3669 i--) { 3670 bmp->db_agstart += n; 3671 n <<= 2; 3672 } 3673 3674 } 3675 3676 3677 /* 3678 * NAME: dbInitDmap()/ujfs_idmap_page() 3679 * 3680 * FUNCTION: initialize working/persistent bitmap of the dmap page 3681 * for the specified number of blocks: 3682 * 3683 * at entry, the bitmaps had been initialized as free (ZEROS); 3684 * The number of blocks will only account for the actually 3685 * existing blocks. Blocks which don't actually exist in 3686 * the aggregate will be marked as allocated (ONES); 3687 * 3688 * PARAMETERS: 3689 * dp - pointer to page of map 3690 * nblocks - number of blocks this page 3691 * 3692 * RETURNS: NONE 3693 */ 3694 static int dbInitDmap(struct dmap * dp, s64 Blkno, int nblocks) 3695 { 3696 int blkno, w, b, r, nw, nb, i; 3697 3698 /* starting block number within the dmap */ 3699 blkno = Blkno & (BPERDMAP - 1); 3700 3701 if (blkno == 0) { 3702 dp->nblocks = dp->nfree = cpu_to_le32(nblocks); 3703 dp->start = cpu_to_le64(Blkno); 3704 3705 if (nblocks == BPERDMAP) { 3706 memset(&dp->wmap[0], 0, LPERDMAP * 4); 3707 memset(&dp->pmap[0], 0, LPERDMAP * 4); 3708 goto initTree; 3709 } 3710 } else { 3711 le32_add_cpu(&dp->nblocks, nblocks); 3712 le32_add_cpu(&dp->nfree, nblocks); 3713 } 3714 3715 /* word number containing start block number */ 3716 w = blkno >> L2DBWORD; 3717 3718 /* 3719 * free the bits corresponding to the block range (ZEROS): 3720 * note: not all bits of the first and last words may be contained 3721 * within the block range. 3722 */ 3723 for (r = nblocks; r > 0; r -= nb, blkno += nb) { 3724 /* number of bits preceding range to be freed in the word */ 3725 b = blkno & (DBWORD - 1); 3726 /* number of bits to free in the word */ 3727 nb = min(r, DBWORD - b); 3728 3729 /* is partial word to be freed ? */ 3730 if (nb < DBWORD) { 3731 /* free (set to 0) from the bitmap word */ 3732 dp->wmap[w] &= cpu_to_le32(~(ONES << (DBWORD - nb) 3733 >> b)); 3734 dp->pmap[w] &= cpu_to_le32(~(ONES << (DBWORD - nb) 3735 >> b)); 3736 3737 /* skip the word freed */ 3738 w++; 3739 } else { 3740 /* free (set to 0) contiguous bitmap words */ 3741 nw = r >> L2DBWORD; 3742 memset(&dp->wmap[w], 0, nw * 4); 3743 memset(&dp->pmap[w], 0, nw * 4); 3744 3745 /* skip the words freed */ 3746 nb = nw << L2DBWORD; 3747 w += nw; 3748 } 3749 } 3750 3751 /* 3752 * mark bits following the range to be freed (non-existing 3753 * blocks) as allocated (ONES) 3754 */ 3755 3756 if (blkno == BPERDMAP) 3757 goto initTree; 3758 3759 /* the first word beyond the end of existing blocks */ 3760 w = blkno >> L2DBWORD; 3761 3762 /* does nblocks fall on a 32-bit boundary ? */ 3763 b = blkno & (DBWORD - 1); 3764 if (b) { 3765 /* mark a partial word allocated */ 3766 dp->wmap[w] = dp->pmap[w] = cpu_to_le32(ONES >> b); 3767 w++; 3768 } 3769 3770 /* set the rest of the words in the page to allocated (ONES) */ 3771 for (i = w; i < LPERDMAP; i++) 3772 dp->pmap[i] = dp->wmap[i] = cpu_to_le32(ONES); 3773 3774 /* 3775 * init tree 3776 */ 3777 initTree: 3778 return (dbInitDmapTree(dp)); 3779 } 3780 3781 3782 /* 3783 * NAME: dbInitDmapTree()/ujfs_complete_dmap() 3784 * 3785 * FUNCTION: initialize summary tree of the specified dmap: 3786 * 3787 * at entry, bitmap of the dmap has been initialized; 3788 * 3789 * PARAMETERS: 3790 * dp - dmap to complete 3791 * blkno - starting block number for this dmap 3792 * treemax - will be filled in with max free for this dmap 3793 * 3794 * RETURNS: max free string at the root of the tree 3795 */ 3796 static int dbInitDmapTree(struct dmap * dp) 3797 { 3798 struct dmaptree *tp; 3799 s8 *cp; 3800 int i; 3801 3802 /* init fixed info of tree */ 3803 tp = &dp->tree; 3804 tp->nleafs = cpu_to_le32(LPERDMAP); 3805 tp->l2nleafs = cpu_to_le32(L2LPERDMAP); 3806 tp->leafidx = cpu_to_le32(LEAFIND); 3807 tp->height = cpu_to_le32(4); 3808 tp->budmin = BUDMIN; 3809 3810 /* init each leaf from corresponding wmap word: 3811 * note: leaf is set to NOFREE(-1) if all blocks of corresponding 3812 * bitmap word are allocated. 3813 */ 3814 cp = tp->stree + le32_to_cpu(tp->leafidx); 3815 for (i = 0; i < LPERDMAP; i++) 3816 *cp++ = dbMaxBud((u8 *) & dp->wmap[i]); 3817 3818 /* build the dmap's binary buddy summary tree */ 3819 return (dbInitTree(tp)); 3820 } 3821 3822 3823 /* 3824 * NAME: dbInitTree()/ujfs_adjtree() 3825 * 3826 * FUNCTION: initialize binary buddy summary tree of a dmap or dmapctl. 3827 * 3828 * at entry, the leaves of the tree has been initialized 3829 * from corresponding bitmap word or root of summary tree 3830 * of the child control page; 3831 * configure binary buddy system at the leaf level, then 3832 * bubble up the values of the leaf nodes up the tree. 3833 * 3834 * PARAMETERS: 3835 * cp - Pointer to the root of the tree 3836 * l2leaves- Number of leaf nodes as a power of 2 3837 * l2min - Number of blocks that can be covered by a leaf 3838 * as a power of 2 3839 * 3840 * RETURNS: max free string at the root of the tree 3841 */ 3842 static int dbInitTree(struct dmaptree * dtp) 3843 { 3844 int l2max, l2free, bsize, nextb, i; 3845 int child, parent, nparent; 3846 s8 *tp, *cp, *cp1; 3847 3848 tp = dtp->stree; 3849 3850 /* Determine the maximum free string possible for the leaves */ 3851 l2max = le32_to_cpu(dtp->l2nleafs) + dtp->budmin; 3852 3853 /* 3854 * configure the leaf levevl into binary buddy system 3855 * 3856 * Try to combine buddies starting with a buddy size of 1 3857 * (i.e. two leaves). At a buddy size of 1 two buddy leaves 3858 * can be combined if both buddies have a maximum free of l2min; 3859 * the combination will result in the left-most buddy leaf having 3860 * a maximum free of l2min+1. 3861 * After processing all buddies for a given size, process buddies 3862 * at the next higher buddy size (i.e. current size * 2) and 3863 * the next maximum free (current free + 1). 3864 * This continues until the maximum possible buddy combination 3865 * yields maximum free. 3866 */ 3867 for (l2free = dtp->budmin, bsize = 1; l2free < l2max; 3868 l2free++, bsize = nextb) { 3869 /* get next buddy size == current buddy pair size */ 3870 nextb = bsize << 1; 3871 3872 /* scan each adjacent buddy pair at current buddy size */ 3873 for (i = 0, cp = tp + le32_to_cpu(dtp->leafidx); 3874 i < le32_to_cpu(dtp->nleafs); 3875 i += nextb, cp += nextb) { 3876 /* coalesce if both adjacent buddies are max free */ 3877 if (*cp == l2free && *(cp + bsize) == l2free) { 3878 *cp = l2free + 1; /* left take right */ 3879 *(cp + bsize) = -1; /* right give left */ 3880 } 3881 } 3882 } 3883 3884 /* 3885 * bubble summary information of leaves up the tree. 3886 * 3887 * Starting at the leaf node level, the four nodes described by 3888 * the higher level parent node are compared for a maximum free and 3889 * this maximum becomes the value of the parent node. 3890 * when all lower level nodes are processed in this fashion then 3891 * move up to the next level (parent becomes a lower level node) and 3892 * continue the process for that level. 3893 */ 3894 for (child = le32_to_cpu(dtp->leafidx), 3895 nparent = le32_to_cpu(dtp->nleafs) >> 2; 3896 nparent > 0; nparent >>= 2, child = parent) { 3897 /* get index of 1st node of parent level */ 3898 parent = (child - 1) >> 2; 3899 3900 /* set the value of the parent node as the maximum 3901 * of the four nodes of the current level. 3902 */ 3903 for (i = 0, cp = tp + child, cp1 = tp + parent; 3904 i < nparent; i++, cp += 4, cp1++) 3905 *cp1 = TREEMAX(cp); 3906 } 3907 3908 return (*tp); 3909 } 3910 3911 3912 /* 3913 * dbInitDmapCtl() 3914 * 3915 * function: initialize dmapctl page 3916 */ 3917 static int dbInitDmapCtl(struct dmapctl * dcp, int level, int i) 3918 { /* start leaf index not covered by range */ 3919 s8 *cp; 3920 3921 dcp->nleafs = cpu_to_le32(LPERCTL); 3922 dcp->l2nleafs = cpu_to_le32(L2LPERCTL); 3923 dcp->leafidx = cpu_to_le32(CTLLEAFIND); 3924 dcp->height = cpu_to_le32(5); 3925 dcp->budmin = L2BPERDMAP + L2LPERCTL * level; 3926 3927 /* 3928 * initialize the leaves of current level that were not covered 3929 * by the specified input block range (i.e. the leaves have no 3930 * low level dmapctl or dmap). 3931 */ 3932 cp = &dcp->stree[CTLLEAFIND + i]; 3933 for (; i < LPERCTL; i++) 3934 *cp++ = NOFREE; 3935 3936 /* build the dmap's binary buddy summary tree */ 3937 return (dbInitTree((struct dmaptree *) dcp)); 3938 } 3939 3940 3941 /* 3942 * NAME: dbGetL2AGSize()/ujfs_getagl2size() 3943 * 3944 * FUNCTION: Determine log2(allocation group size) from aggregate size 3945 * 3946 * PARAMETERS: 3947 * nblocks - Number of blocks in aggregate 3948 * 3949 * RETURNS: log2(allocation group size) in aggregate blocks 3950 */ 3951 static int dbGetL2AGSize(s64 nblocks) 3952 { 3953 s64 sz; 3954 s64 m; 3955 int l2sz; 3956 3957 if (nblocks < BPERDMAP * MAXAG) 3958 return (L2BPERDMAP); 3959 3960 /* round up aggregate size to power of 2 */ 3961 m = ((u64) 1 << (64 - 1)); 3962 for (l2sz = 64; l2sz >= 0; l2sz--, m >>= 1) { 3963 if (m & nblocks) 3964 break; 3965 } 3966 3967 sz = (s64) 1 << l2sz; 3968 if (sz < nblocks) 3969 l2sz += 1; 3970 3971 /* agsize = roundupSize/max_number_of_ag */ 3972 return (l2sz - L2MAXAG); 3973 } 3974 3975 3976 /* 3977 * NAME: dbMapFileSizeToMapSize() 3978 * 3979 * FUNCTION: compute number of blocks the block allocation map file 3980 * can cover from the map file size; 3981 * 3982 * RETURNS: Number of blocks which can be covered by this block map file; 3983 */ 3984 3985 /* 3986 * maximum number of map pages at each level including control pages 3987 */ 3988 #define MAXL0PAGES (1 + LPERCTL) 3989 #define MAXL1PAGES (1 + LPERCTL * MAXL0PAGES) 3990 3991 /* 3992 * convert number of map pages to the zero origin top dmapctl level 3993 */ 3994 #define BMAPPGTOLEV(npages) \ 3995 (((npages) <= 3 + MAXL0PAGES) ? 0 : \ 3996 ((npages) <= 2 + MAXL1PAGES) ? 1 : 2) 3997 3998 s64 dbMapFileSizeToMapSize(struct inode * ipbmap) 3999 { 4000 struct super_block *sb = ipbmap->i_sb; 4001 s64 nblocks; 4002 s64 npages, ndmaps; 4003 int level, i; 4004 int complete, factor; 4005 4006 nblocks = ipbmap->i_size >> JFS_SBI(sb)->l2bsize; 4007 npages = nblocks >> JFS_SBI(sb)->l2nbperpage; 4008 level = BMAPPGTOLEV(npages); 4009 4010 /* At each level, accumulate the number of dmap pages covered by 4011 * the number of full child levels below it; 4012 * repeat for the last incomplete child level. 4013 */ 4014 ndmaps = 0; 4015 npages--; /* skip the first global control page */ 4016 /* skip higher level control pages above top level covered by map */ 4017 npages -= (2 - level); 4018 npages--; /* skip top level's control page */ 4019 for (i = level; i >= 0; i--) { 4020 factor = 4021 (i == 2) ? MAXL1PAGES : ((i == 1) ? MAXL0PAGES : 1); 4022 complete = (u32) npages / factor; 4023 ndmaps += complete * ((i == 2) ? LPERCTL * LPERCTL : 4024 ((i == 1) ? LPERCTL : 1)); 4025 4026 /* pages in last/incomplete child */ 4027 npages = (u32) npages % factor; 4028 /* skip incomplete child's level control page */ 4029 npages--; 4030 } 4031 4032 /* convert the number of dmaps into the number of blocks 4033 * which can be covered by the dmaps; 4034 */ 4035 nblocks = ndmaps << L2BPERDMAP; 4036 4037 return (nblocks); 4038 } 4039