1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) International Business Machines Corp., 2000-2004
4 * Portions Copyright (C) Tino Reichardt, 2012
5 */
6
7 #include <linux/fs.h>
8 #include <linux/slab.h>
9 #include "jfs_incore.h"
10 #include "jfs_superblock.h"
11 #include "jfs_dmap.h"
12 #include "jfs_imap.h"
13 #include "jfs_lock.h"
14 #include "jfs_metapage.h"
15 #include "jfs_debug.h"
16 #include "jfs_discard.h"
17
18 /*
19 * SERIALIZATION of the Block Allocation Map.
20 *
21 * the working state of the block allocation map is accessed in
22 * two directions:
23 *
24 * 1) allocation and free requests that start at the dmap
25 * level and move up through the dmap control pages (i.e.
26 * the vast majority of requests).
27 *
28 * 2) allocation requests that start at dmap control page
29 * level and work down towards the dmaps.
30 *
31 * the serialization scheme used here is as follows.
32 *
33 * requests which start at the bottom are serialized against each
34 * other through buffers and each requests holds onto its buffers
35 * as it works it way up from a single dmap to the required level
36 * of dmap control page.
37 * requests that start at the top are serialized against each other
38 * and request that start from the bottom by the multiple read/single
39 * write inode lock of the bmap inode. requests starting at the top
40 * take this lock in write mode while request starting at the bottom
41 * take the lock in read mode. a single top-down request may proceed
42 * exclusively while multiple bottoms-up requests may proceed
43 * simultaneously (under the protection of busy buffers).
44 *
45 * in addition to information found in dmaps and dmap control pages,
46 * the working state of the block allocation map also includes read/
47 * write information maintained in the bmap descriptor (i.e. total
48 * free block count, allocation group level free block counts).
49 * a single exclusive lock (BMAP_LOCK) is used to guard this information
50 * in the face of multiple-bottoms up requests.
51 * (lock ordering: IREAD_LOCK, BMAP_LOCK);
52 *
53 * accesses to the persistent state of the block allocation map (limited
54 * to the persistent bitmaps in dmaps) is guarded by (busy) buffers.
55 */
56
57 #define BMAP_LOCK_INIT(bmp) mutex_init(&bmp->db_bmaplock)
58 #define BMAP_LOCK(bmp) mutex_lock(&bmp->db_bmaplock)
59 #define BMAP_UNLOCK(bmp) mutex_unlock(&bmp->db_bmaplock)
60
61 /*
62 * forward references
63 */
64 static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
65 int nblocks);
66 static void dbSplit(dmtree_t *tp, int leafno, int splitsz, int newval, bool is_ctl);
67 static int dbBackSplit(dmtree_t *tp, int leafno, bool is_ctl);
68 static int dbJoin(dmtree_t *tp, int leafno, int newval, bool is_ctl);
69 static void dbAdjTree(dmtree_t *tp, int leafno, int newval, bool is_ctl);
70 static int dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc,
71 int level);
72 static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results);
73 static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno,
74 int nblocks);
75 static int dbAllocNear(struct bmap * bmp, struct dmap * dp, s64 blkno,
76 int nblocks,
77 int l2nb, s64 * results);
78 static int dbAllocDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
79 int nblocks);
80 static int dbAllocDmapLev(struct bmap * bmp, struct dmap * dp, int nblocks,
81 int l2nb,
82 s64 * results);
83 static int dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb,
84 s64 * results);
85 static int dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno,
86 s64 * results);
87 static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks);
88 static int dbFindBits(u32 word, int l2nb);
89 static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno);
90 static int dbFindLeaf(dmtree_t *tp, int l2nb, int *leafidx, bool is_ctl);
91 static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
92 int nblocks);
93 static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
94 int nblocks);
95 static int dbMaxBud(u8 * cp);
96 static int blkstol2(s64 nb);
97
98 static int cntlz(u32 value);
99 static int cnttz(u32 word);
100
101 static int dbAllocDmapBU(struct bmap * bmp, struct dmap * dp, s64 blkno,
102 int nblocks);
103 static int dbInitDmap(struct dmap * dp, s64 blkno, int nblocks);
104 static int dbInitDmapTree(struct dmap * dp);
105 static int dbInitTree(struct dmaptree * dtp);
106 static int dbInitDmapCtl(struct dmapctl * dcp, int level, int i);
107 static int dbGetL2AGSize(s64 nblocks);
108
109 /*
110 * buddy table
111 *
112 * table used for determining buddy sizes within characters of
113 * dmap bitmap words. the characters themselves serve as indexes
114 * into the table, with the table elements yielding the maximum
115 * binary buddy of free bits within the character.
116 */
117 static const s8 budtab[256] = {
118 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
119 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
120 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
121 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
122 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
123 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
124 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
125 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
126 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
127 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
128 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
129 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
130 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
131 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
132 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
133 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, -1
134 };
135
136 /*
137 * NAME: dbMount()
138 *
139 * FUNCTION: initializate the block allocation map.
140 *
141 * memory is allocated for the in-core bmap descriptor and
142 * the in-core descriptor is initialized from disk.
143 *
144 * PARAMETERS:
145 * ipbmap - pointer to in-core inode for the block map.
146 *
147 * RETURN VALUES:
148 * 0 - success
149 * -ENOMEM - insufficient memory
150 * -EIO - i/o error
151 * -EINVAL - wrong bmap data
152 */
dbMount(struct inode * ipbmap)153 int dbMount(struct inode *ipbmap)
154 {
155 struct bmap *bmp;
156 struct dbmap_disk *dbmp_le;
157 struct metapage *mp;
158 int i, err;
159
160 /*
161 * allocate/initialize the in-memory bmap descriptor
162 */
163 /* allocate memory for the in-memory bmap descriptor */
164 bmp = kmalloc(sizeof(struct bmap), GFP_KERNEL);
165 if (bmp == NULL)
166 return -ENOMEM;
167
168 /* read the on-disk bmap descriptor. */
169 mp = read_metapage(ipbmap,
170 BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage,
171 PSIZE, 0);
172 if (mp == NULL) {
173 err = -EIO;
174 goto err_kfree_bmp;
175 }
176
177 /* copy the on-disk bmap descriptor to its in-memory version. */
178 dbmp_le = (struct dbmap_disk *) mp->data;
179 bmp->db_mapsize = le64_to_cpu(dbmp_le->dn_mapsize);
180 bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree);
181 bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage);
182 bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag);
183 bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel);
184 bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag);
185 bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref);
186 bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel);
187 bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight);
188 bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
189 bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart);
190 bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size);
191
192 if ((bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE) ||
193 (bmp->db_l2nbperpage < 0) ||
194 !bmp->db_numag || (bmp->db_numag > MAXAG) ||
195 (bmp->db_maxag >= MAXAG) || (bmp->db_maxag < 0) ||
196 (bmp->db_agpref >= MAXAG) || (bmp->db_agpref < 0) ||
197 (bmp->db_agheight < 0) || (bmp->db_agheight > (L2LPERCTL >> 1)) ||
198 (bmp->db_agwidth < 1) || (bmp->db_agwidth > (LPERCTL / MAXAG)) ||
199 (bmp->db_agwidth > (1 << (L2LPERCTL - (bmp->db_agheight << 1)))) ||
200 (bmp->db_agstart < 0) ||
201 (bmp->db_agstart > (CTLTREESIZE - 1 - bmp->db_agwidth * (MAXAG - 1))) ||
202 (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG) ||
203 (bmp->db_agl2size < 0) ||
204 ((bmp->db_mapsize - 1) >> bmp->db_agl2size) > MAXAG) {
205 err = -EINVAL;
206 goto err_release_metapage;
207 }
208
209 for (i = 0; i < MAXAG; i++)
210 bmp->db_agfree[i] = le64_to_cpu(dbmp_le->dn_agfree[i]);
211 bmp->db_agsize = le64_to_cpu(dbmp_le->dn_agsize);
212 bmp->db_maxfreebud = dbmp_le->dn_maxfreebud;
213
214 /* release the buffer. */
215 release_metapage(mp);
216
217 /* bind the bmap inode and the bmap descriptor to each other. */
218 bmp->db_ipbmap = ipbmap;
219 JFS_SBI(ipbmap->i_sb)->bmap = bmp;
220
221 memset(bmp->db_active, 0, sizeof(bmp->db_active));
222
223 /*
224 * allocate/initialize the bmap lock
225 */
226 BMAP_LOCK_INIT(bmp);
227
228 return (0);
229
230 err_release_metapage:
231 release_metapage(mp);
232 err_kfree_bmp:
233 kfree(bmp);
234 return err;
235 }
236
237
238 /*
239 * NAME: dbUnmount()
240 *
241 * FUNCTION: terminate the block allocation map in preparation for
242 * file system unmount.
243 *
244 * the in-core bmap descriptor is written to disk and
245 * the memory for this descriptor is freed.
246 *
247 * PARAMETERS:
248 * ipbmap - pointer to in-core inode for the block map.
249 *
250 * RETURN VALUES:
251 * 0 - success
252 * -EIO - i/o error
253 */
dbUnmount(struct inode * ipbmap,int mounterror)254 int dbUnmount(struct inode *ipbmap, int mounterror)
255 {
256 struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
257
258 if (!(mounterror || isReadOnly(ipbmap)))
259 dbSync(ipbmap);
260
261 /*
262 * Invalidate the page cache buffers
263 */
264 truncate_inode_pages(ipbmap->i_mapping, 0);
265
266 /* free the memory for the in-memory bmap. */
267 kfree(bmp);
268 JFS_SBI(ipbmap->i_sb)->bmap = NULL;
269
270 return (0);
271 }
272
273 /*
274 * dbSync()
275 */
dbSync(struct inode * ipbmap)276 int dbSync(struct inode *ipbmap)
277 {
278 struct dbmap_disk *dbmp_le;
279 struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
280 struct metapage *mp;
281 int i;
282
283 /*
284 * write bmap global control page
285 */
286 /* get the buffer for the on-disk bmap descriptor. */
287 mp = read_metapage(ipbmap,
288 BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage,
289 PSIZE, 0);
290 if (mp == NULL) {
291 jfs_err("dbSync: read_metapage failed!");
292 return -EIO;
293 }
294 /* copy the in-memory version of the bmap to the on-disk version */
295 dbmp_le = (struct dbmap_disk *) mp->data;
296 dbmp_le->dn_mapsize = cpu_to_le64(bmp->db_mapsize);
297 dbmp_le->dn_nfree = cpu_to_le64(bmp->db_nfree);
298 dbmp_le->dn_l2nbperpage = cpu_to_le32(bmp->db_l2nbperpage);
299 dbmp_le->dn_numag = cpu_to_le32(bmp->db_numag);
300 dbmp_le->dn_maxlevel = cpu_to_le32(bmp->db_maxlevel);
301 dbmp_le->dn_maxag = cpu_to_le32(bmp->db_maxag);
302 dbmp_le->dn_agpref = cpu_to_le32(bmp->db_agpref);
303 dbmp_le->dn_aglevel = cpu_to_le32(bmp->db_aglevel);
304 dbmp_le->dn_agheight = cpu_to_le32(bmp->db_agheight);
305 dbmp_le->dn_agwidth = cpu_to_le32(bmp->db_agwidth);
306 dbmp_le->dn_agstart = cpu_to_le32(bmp->db_agstart);
307 dbmp_le->dn_agl2size = cpu_to_le32(bmp->db_agl2size);
308 for (i = 0; i < MAXAG; i++)
309 dbmp_le->dn_agfree[i] = cpu_to_le64(bmp->db_agfree[i]);
310 dbmp_le->dn_agsize = cpu_to_le64(bmp->db_agsize);
311 dbmp_le->dn_maxfreebud = bmp->db_maxfreebud;
312
313 /* write the buffer */
314 write_metapage(mp);
315
316 /*
317 * write out dirty pages of bmap
318 */
319 filemap_write_and_wait(ipbmap->i_mapping);
320
321 diWriteSpecial(ipbmap, 0);
322
323 return (0);
324 }
325
326 /*
327 * NAME: dbFree()
328 *
329 * FUNCTION: free the specified block range from the working block
330 * allocation map.
331 *
332 * the blocks will be free from the working map one dmap
333 * at a time.
334 *
335 * PARAMETERS:
336 * ip - pointer to in-core inode;
337 * blkno - starting block number to be freed.
338 * nblocks - number of blocks to be freed.
339 *
340 * RETURN VALUES:
341 * 0 - success
342 * -EIO - i/o error
343 */
dbFree(struct inode * ip,s64 blkno,s64 nblocks)344 int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
345 {
346 struct metapage *mp;
347 struct dmap *dp;
348 int nb, rc;
349 s64 lblkno, rem;
350 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
351 struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
352 struct super_block *sb = ipbmap->i_sb;
353
354 IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
355
356 /* block to be freed better be within the mapsize. */
357 if (unlikely((blkno == 0) || (blkno + nblocks > bmp->db_mapsize))) {
358 IREAD_UNLOCK(ipbmap);
359 printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n",
360 (unsigned long long) blkno,
361 (unsigned long long) nblocks);
362 jfs_error(ip->i_sb, "block to be freed is outside the map\n");
363 return -EIO;
364 }
365
366 /**
367 * TRIM the blocks, when mounted with discard option
368 */
369 if (JFS_SBI(sb)->flag & JFS_DISCARD)
370 if (JFS_SBI(sb)->minblks_trim <= nblocks)
371 jfs_issue_discard(ipbmap, blkno, nblocks);
372
373 /*
374 * free the blocks a dmap at a time.
375 */
376 mp = NULL;
377 for (rem = nblocks; rem > 0; rem -= nb, blkno += nb) {
378 /* release previous dmap if any */
379 if (mp) {
380 write_metapage(mp);
381 }
382
383 /* get the buffer for the current dmap. */
384 lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
385 mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
386 if (mp == NULL) {
387 IREAD_UNLOCK(ipbmap);
388 return -EIO;
389 }
390 dp = (struct dmap *) mp->data;
391
392 /* determine the number of blocks to be freed from
393 * this dmap.
394 */
395 nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1)));
396
397 /* free the blocks. */
398 if ((rc = dbFreeDmap(bmp, dp, blkno, nb))) {
399 jfs_error(ip->i_sb, "error in block map\n");
400 release_metapage(mp);
401 IREAD_UNLOCK(ipbmap);
402 return (rc);
403 }
404 }
405
406 /* write the last buffer. */
407 if (mp)
408 write_metapage(mp);
409
410 IREAD_UNLOCK(ipbmap);
411
412 return (0);
413 }
414
415
416 /*
417 * NAME: dbUpdatePMap()
418 *
419 * FUNCTION: update the allocation state (free or allocate) of the
420 * specified block range in the persistent block allocation map.
421 *
422 * the blocks will be updated in the persistent map one
423 * dmap at a time.
424 *
425 * PARAMETERS:
426 * ipbmap - pointer to in-core inode for the block map.
427 * free - 'true' if block range is to be freed from the persistent
428 * map; 'false' if it is to be allocated.
429 * blkno - starting block number of the range.
430 * nblocks - number of contiguous blocks in the range.
431 * tblk - transaction block;
432 *
433 * RETURN VALUES:
434 * 0 - success
435 * -EIO - i/o error
436 */
437 int
dbUpdatePMap(struct inode * ipbmap,int free,s64 blkno,s64 nblocks,struct tblock * tblk)438 dbUpdatePMap(struct inode *ipbmap,
439 int free, s64 blkno, s64 nblocks, struct tblock * tblk)
440 {
441 int nblks, dbitno, wbitno, rbits;
442 int word, nbits, nwords;
443 struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
444 s64 lblkno, rem, lastlblkno;
445 u32 mask;
446 struct dmap *dp;
447 struct metapage *mp;
448 struct jfs_log *log;
449 int lsn, difft, diffp;
450 unsigned long flags;
451
452 /* the blocks better be within the mapsize. */
453 if (blkno + nblocks > bmp->db_mapsize) {
454 printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n",
455 (unsigned long long) blkno,
456 (unsigned long long) nblocks);
457 jfs_error(ipbmap->i_sb, "blocks are outside the map\n");
458 return -EIO;
459 }
460
461 /* compute delta of transaction lsn from log syncpt */
462 lsn = tblk->lsn;
463 log = (struct jfs_log *) JFS_SBI(tblk->sb)->log;
464 logdiff(difft, lsn, log);
465
466 /*
467 * update the block state a dmap at a time.
468 */
469 mp = NULL;
470 lastlblkno = 0;
471 for (rem = nblocks; rem > 0; rem -= nblks, blkno += nblks) {
472 /* get the buffer for the current dmap. */
473 lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
474 if (lblkno != lastlblkno) {
475 if (mp) {
476 write_metapage(mp);
477 }
478
479 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE,
480 0);
481 if (mp == NULL)
482 return -EIO;
483 metapage_wait_for_io(mp);
484 }
485 dp = (struct dmap *) mp->data;
486
487 /* determine the bit number and word within the dmap of
488 * the starting block. also determine how many blocks
489 * are to be updated within this dmap.
490 */
491 dbitno = blkno & (BPERDMAP - 1);
492 word = dbitno >> L2DBWORD;
493 nblks = min(rem, (s64)BPERDMAP - dbitno);
494
495 /* update the bits of the dmap words. the first and last
496 * words may only have a subset of their bits updated. if
497 * this is the case, we'll work against that word (i.e.
498 * partial first and/or last) only in a single pass. a
499 * single pass will also be used to update all words that
500 * are to have all their bits updated.
501 */
502 for (rbits = nblks; rbits > 0;
503 rbits -= nbits, dbitno += nbits) {
504 /* determine the bit number within the word and
505 * the number of bits within the word.
506 */
507 wbitno = dbitno & (DBWORD - 1);
508 nbits = min(rbits, DBWORD - wbitno);
509
510 /* check if only part of the word is to be updated. */
511 if (nbits < DBWORD) {
512 /* update (free or allocate) the bits
513 * in this word.
514 */
515 mask =
516 (ONES << (DBWORD - nbits) >> wbitno);
517 if (free)
518 dp->pmap[word] &=
519 cpu_to_le32(~mask);
520 else
521 dp->pmap[word] |=
522 cpu_to_le32(mask);
523
524 word += 1;
525 } else {
526 /* one or more words are to have all
527 * their bits updated. determine how
528 * many words and how many bits.
529 */
530 nwords = rbits >> L2DBWORD;
531 nbits = nwords << L2DBWORD;
532
533 /* update (free or allocate) the bits
534 * in these words.
535 */
536 if (free)
537 memset(&dp->pmap[word], 0,
538 nwords * 4);
539 else
540 memset(&dp->pmap[word], (int) ONES,
541 nwords * 4);
542
543 word += nwords;
544 }
545 }
546
547 /*
548 * update dmap lsn
549 */
550 if (lblkno == lastlblkno)
551 continue;
552
553 lastlblkno = lblkno;
554
555 LOGSYNC_LOCK(log, flags);
556 if (mp->lsn != 0) {
557 /* inherit older/smaller lsn */
558 logdiff(diffp, mp->lsn, log);
559 if (difft < diffp) {
560 mp->lsn = lsn;
561
562 /* move bp after tblock in logsync list */
563 list_move(&mp->synclist, &tblk->synclist);
564 }
565
566 /* inherit younger/larger clsn */
567 logdiff(difft, tblk->clsn, log);
568 logdiff(diffp, mp->clsn, log);
569 if (difft > diffp)
570 mp->clsn = tblk->clsn;
571 } else {
572 mp->log = log;
573 mp->lsn = lsn;
574
575 /* insert bp after tblock in logsync list */
576 log->count++;
577 list_add(&mp->synclist, &tblk->synclist);
578
579 mp->clsn = tblk->clsn;
580 }
581 LOGSYNC_UNLOCK(log, flags);
582 }
583
584 /* write the last buffer. */
585 if (mp) {
586 write_metapage(mp);
587 }
588
589 return (0);
590 }
591
592
593 /*
594 * NAME: dbNextAG()
595 *
596 * FUNCTION: find the preferred allocation group for new allocations.
597 *
598 * Within the allocation groups, we maintain a preferred
599 * allocation group which consists of a group with at least
600 * average free space. It is the preferred group that we target
601 * new inode allocation towards. The tie-in between inode
602 * allocation and block allocation occurs as we allocate the
603 * first (data) block of an inode and specify the inode (block)
604 * as the allocation hint for this block.
605 *
606 * We try to avoid having more than one open file growing in
607 * an allocation group, as this will lead to fragmentation.
608 * This differs from the old OS/2 method of trying to keep
609 * empty ags around for large allocations.
610 *
611 * PARAMETERS:
612 * ipbmap - pointer to in-core inode for the block map.
613 *
614 * RETURN VALUES:
615 * the preferred allocation group number.
616 */
dbNextAG(struct inode * ipbmap)617 int dbNextAG(struct inode *ipbmap)
618 {
619 s64 avgfree;
620 int agpref;
621 s64 hwm = 0;
622 int i;
623 int next_best = -1;
624 struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
625
626 BMAP_LOCK(bmp);
627
628 /* determine the average number of free blocks within the ags. */
629 avgfree = (u32)bmp->db_nfree / bmp->db_numag;
630
631 /*
632 * if the current preferred ag does not have an active allocator
633 * and has at least average freespace, return it
634 */
635 agpref = bmp->db_agpref;
636 if ((atomic_read(&bmp->db_active[agpref]) == 0) &&
637 (bmp->db_agfree[agpref] >= avgfree))
638 goto unlock;
639
640 /* From the last preferred ag, find the next one with at least
641 * average free space.
642 */
643 for (i = 0 ; i < bmp->db_numag; i++, agpref++) {
644 if (agpref >= bmp->db_numag)
645 agpref = 0;
646
647 if (atomic_read(&bmp->db_active[agpref]))
648 /* open file is currently growing in this ag */
649 continue;
650 if (bmp->db_agfree[agpref] >= avgfree) {
651 /* Return this one */
652 bmp->db_agpref = agpref;
653 goto unlock;
654 } else if (bmp->db_agfree[agpref] > hwm) {
655 /* Less than avg. freespace, but best so far */
656 hwm = bmp->db_agfree[agpref];
657 next_best = agpref;
658 }
659 }
660
661 /*
662 * If no inactive ag was found with average freespace, use the
663 * next best
664 */
665 if (next_best != -1)
666 bmp->db_agpref = next_best;
667 /* else leave db_agpref unchanged */
668 unlock:
669 BMAP_UNLOCK(bmp);
670
671 /* return the preferred group.
672 */
673 return (bmp->db_agpref);
674 }
675
676 /*
677 * NAME: dbAlloc()
678 *
679 * FUNCTION: attempt to allocate a specified number of contiguous free
680 * blocks from the working allocation block map.
681 *
682 * the block allocation policy uses hints and a multi-step
683 * approach.
684 *
685 * for allocation requests smaller than the number of blocks
686 * per dmap, we first try to allocate the new blocks
687 * immediately following the hint. if these blocks are not
688 * available, we try to allocate blocks near the hint. if
689 * no blocks near the hint are available, we next try to
690 * allocate within the same dmap as contains the hint.
691 *
692 * if no blocks are available in the dmap or the allocation
693 * request is larger than the dmap size, we try to allocate
694 * within the same allocation group as contains the hint. if
695 * this does not succeed, we finally try to allocate anywhere
696 * within the aggregate.
697 *
698 * we also try to allocate anywhere within the aggregate
699 * for allocation requests larger than the allocation group
700 * size or requests that specify no hint value.
701 *
702 * PARAMETERS:
703 * ip - pointer to in-core inode;
704 * hint - allocation hint.
705 * nblocks - number of contiguous blocks in the range.
706 * results - on successful return, set to the starting block number
707 * of the newly allocated contiguous range.
708 *
709 * RETURN VALUES:
710 * 0 - success
711 * -ENOSPC - insufficient disk resources
712 * -EIO - i/o error
713 */
dbAlloc(struct inode * ip,s64 hint,s64 nblocks,s64 * results)714 int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
715 {
716 int rc, agno;
717 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
718 struct bmap *bmp;
719 struct metapage *mp;
720 s64 lblkno, blkno;
721 struct dmap *dp;
722 int l2nb;
723 s64 mapSize;
724 int writers;
725
726 /* assert that nblocks is valid */
727 assert(nblocks > 0);
728
729 /* get the log2 number of blocks to be allocated.
730 * if the number of blocks is not a log2 multiple,
731 * it will be rounded up to the next log2 multiple.
732 */
733 l2nb = BLKSTOL2(nblocks);
734
735 bmp = JFS_SBI(ip->i_sb)->bmap;
736
737 mapSize = bmp->db_mapsize;
738
739 /* the hint should be within the map */
740 if (hint >= mapSize) {
741 jfs_error(ip->i_sb, "the hint is outside the map\n");
742 return -EIO;
743 }
744
745 /* if the number of blocks to be allocated is greater than the
746 * allocation group size, try to allocate anywhere.
747 */
748 if (l2nb > bmp->db_agl2size) {
749 IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
750
751 rc = dbAllocAny(bmp, nblocks, l2nb, results);
752
753 goto write_unlock;
754 }
755
756 /*
757 * If no hint, let dbNextAG recommend an allocation group
758 */
759 if (hint == 0)
760 goto pref_ag;
761
762 /* we would like to allocate close to the hint. adjust the
763 * hint to the block following the hint since the allocators
764 * will start looking for free space starting at this point.
765 */
766 blkno = hint + 1;
767
768 if (blkno >= bmp->db_mapsize)
769 goto pref_ag;
770
771 agno = blkno >> bmp->db_agl2size;
772
773 /* check if blkno crosses over into a new allocation group.
774 * if so, check if we should allow allocations within this
775 * allocation group.
776 */
777 if ((blkno & (bmp->db_agsize - 1)) == 0)
778 /* check if the AG is currently being written to.
779 * if so, call dbNextAG() to find a non-busy
780 * AG with sufficient free space.
781 */
782 if (atomic_read(&bmp->db_active[agno]))
783 goto pref_ag;
784
785 /* check if the allocation request size can be satisfied from a
786 * single dmap. if so, try to allocate from the dmap containing
787 * the hint using a tiered strategy.
788 */
789 if (nblocks <= BPERDMAP) {
790 IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
791
792 /* get the buffer for the dmap containing the hint.
793 */
794 rc = -EIO;
795 lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
796 mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
797 if (mp == NULL)
798 goto read_unlock;
799
800 dp = (struct dmap *) mp->data;
801
802 /* first, try to satisfy the allocation request with the
803 * blocks beginning at the hint.
804 */
805 if ((rc = dbAllocNext(bmp, dp, blkno, (int) nblocks))
806 != -ENOSPC) {
807 if (rc == 0) {
808 *results = blkno;
809 mark_metapage_dirty(mp);
810 }
811
812 release_metapage(mp);
813 goto read_unlock;
814 }
815
816 writers = atomic_read(&bmp->db_active[agno]);
817 if ((writers > 1) ||
818 ((writers == 1) && (JFS_IP(ip)->active_ag != agno))) {
819 /*
820 * Someone else is writing in this allocation
821 * group. To avoid fragmenting, try another ag
822 */
823 release_metapage(mp);
824 IREAD_UNLOCK(ipbmap);
825 goto pref_ag;
826 }
827
828 /* next, try to satisfy the allocation request with blocks
829 * near the hint.
830 */
831 if ((rc =
832 dbAllocNear(bmp, dp, blkno, (int) nblocks, l2nb, results))
833 != -ENOSPC) {
834 if (rc == 0)
835 mark_metapage_dirty(mp);
836
837 release_metapage(mp);
838 goto read_unlock;
839 }
840
841 /* try to satisfy the allocation request with blocks within
842 * the same dmap as the hint.
843 */
844 if ((rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results))
845 != -ENOSPC) {
846 if (rc == 0)
847 mark_metapage_dirty(mp);
848
849 release_metapage(mp);
850 goto read_unlock;
851 }
852
853 release_metapage(mp);
854 IREAD_UNLOCK(ipbmap);
855 }
856
857 /* try to satisfy the allocation request with blocks within
858 * the same allocation group as the hint.
859 */
860 IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
861 if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) != -ENOSPC)
862 goto write_unlock;
863
864 IWRITE_UNLOCK(ipbmap);
865
866
867 pref_ag:
868 /*
869 * Let dbNextAG recommend a preferred allocation group
870 */
871 agno = dbNextAG(ipbmap);
872 IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
873
874 /* Try to allocate within this allocation group. if that fails, try to
875 * allocate anywhere in the map.
876 */
877 if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) == -ENOSPC)
878 rc = dbAllocAny(bmp, nblocks, l2nb, results);
879
880 write_unlock:
881 IWRITE_UNLOCK(ipbmap);
882
883 return (rc);
884
885 read_unlock:
886 IREAD_UNLOCK(ipbmap);
887
888 return (rc);
889 }
890
891 /*
892 * NAME: dbReAlloc()
893 *
894 * FUNCTION: attempt to extend a current allocation by a specified
895 * number of blocks.
896 *
897 * this routine attempts to satisfy the allocation request
898 * by first trying to extend the existing allocation in
899 * place by allocating the additional blocks as the blocks
900 * immediately following the current allocation. if these
901 * blocks are not available, this routine will attempt to
902 * allocate a new set of contiguous blocks large enough
903 * to cover the existing allocation plus the additional
904 * number of blocks required.
905 *
906 * PARAMETERS:
907 * ip - pointer to in-core inode requiring allocation.
908 * blkno - starting block of the current allocation.
909 * nblocks - number of contiguous blocks within the current
910 * allocation.
911 * addnblocks - number of blocks to add to the allocation.
912 * results - on successful return, set to the starting block number
913 * of the existing allocation if the existing allocation
914 * was extended in place or to a newly allocated contiguous
915 * range if the existing allocation could not be extended
916 * in place.
917 *
918 * RETURN VALUES:
919 * 0 - success
920 * -ENOSPC - insufficient disk resources
921 * -EIO - i/o error
922 */
923 int
dbReAlloc(struct inode * ip,s64 blkno,s64 nblocks,s64 addnblocks,s64 * results)924 dbReAlloc(struct inode *ip,
925 s64 blkno, s64 nblocks, s64 addnblocks, s64 * results)
926 {
927 int rc;
928
929 /* try to extend the allocation in place.
930 */
931 if ((rc = dbExtend(ip, blkno, nblocks, addnblocks)) == 0) {
932 *results = blkno;
933 return (0);
934 } else {
935 if (rc != -ENOSPC)
936 return (rc);
937 }
938
939 /* could not extend the allocation in place, so allocate a
940 * new set of blocks for the entire request (i.e. try to get
941 * a range of contiguous blocks large enough to cover the
942 * existing allocation plus the additional blocks.)
943 */
944 return (dbAlloc
945 (ip, blkno + nblocks - 1, addnblocks + nblocks, results));
946 }
947
948
949 /*
950 * NAME: dbExtend()
951 *
952 * FUNCTION: attempt to extend a current allocation by a specified
953 * number of blocks.
954 *
955 * this routine attempts to satisfy the allocation request
956 * by first trying to extend the existing allocation in
957 * place by allocating the additional blocks as the blocks
958 * immediately following the current allocation.
959 *
960 * PARAMETERS:
961 * ip - pointer to in-core inode requiring allocation.
962 * blkno - starting block of the current allocation.
963 * nblocks - number of contiguous blocks within the current
964 * allocation.
965 * addnblocks - number of blocks to add to the allocation.
966 *
967 * RETURN VALUES:
968 * 0 - success
969 * -ENOSPC - insufficient disk resources
970 * -EIO - i/o error
971 */
dbExtend(struct inode * ip,s64 blkno,s64 nblocks,s64 addnblocks)972 static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks)
973 {
974 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
975 s64 lblkno, lastblkno, extblkno;
976 uint rel_block;
977 struct metapage *mp;
978 struct dmap *dp;
979 int rc;
980 struct inode *ipbmap = sbi->ipbmap;
981 struct bmap *bmp;
982
983 /*
984 * We don't want a non-aligned extent to cross a page boundary
985 */
986 if (((rel_block = blkno & (sbi->nbperpage - 1))) &&
987 (rel_block + nblocks + addnblocks > sbi->nbperpage))
988 return -ENOSPC;
989
990 /* get the last block of the current allocation */
991 lastblkno = blkno + nblocks - 1;
992
993 /* determine the block number of the block following
994 * the existing allocation.
995 */
996 extblkno = lastblkno + 1;
997
998 IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
999
1000 /* better be within the file system */
1001 bmp = sbi->bmap;
1002 if (lastblkno < 0 || lastblkno >= bmp->db_mapsize) {
1003 IREAD_UNLOCK(ipbmap);
1004 jfs_error(ip->i_sb, "the block is outside the filesystem\n");
1005 return -EIO;
1006 }
1007
1008 /* we'll attempt to extend the current allocation in place by
1009 * allocating the additional blocks as the blocks immediately
1010 * following the current allocation. we only try to extend the
1011 * current allocation in place if the number of additional blocks
1012 * can fit into a dmap, the last block of the current allocation
1013 * is not the last block of the file system, and the start of the
1014 * inplace extension is not on an allocation group boundary.
1015 */
1016 if (addnblocks > BPERDMAP || extblkno >= bmp->db_mapsize ||
1017 (extblkno & (bmp->db_agsize - 1)) == 0) {
1018 IREAD_UNLOCK(ipbmap);
1019 return -ENOSPC;
1020 }
1021
1022 /* get the buffer for the dmap containing the first block
1023 * of the extension.
1024 */
1025 lblkno = BLKTODMAP(extblkno, bmp->db_l2nbperpage);
1026 mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
1027 if (mp == NULL) {
1028 IREAD_UNLOCK(ipbmap);
1029 return -EIO;
1030 }
1031
1032 dp = (struct dmap *) mp->data;
1033
1034 /* try to allocate the blocks immediately following the
1035 * current allocation.
1036 */
1037 rc = dbAllocNext(bmp, dp, extblkno, (int) addnblocks);
1038
1039 IREAD_UNLOCK(ipbmap);
1040
1041 /* were we successful ? */
1042 if (rc == 0)
1043 write_metapage(mp);
1044 else
1045 /* we were not successful */
1046 release_metapage(mp);
1047
1048 return (rc);
1049 }
1050
1051
1052 /*
1053 * NAME: dbAllocNext()
1054 *
1055 * FUNCTION: attempt to allocate the blocks of the specified block
1056 * range within a dmap.
1057 *
1058 * PARAMETERS:
1059 * bmp - pointer to bmap descriptor
1060 * dp - pointer to dmap.
1061 * blkno - starting block number of the range.
1062 * nblocks - number of contiguous free blocks of the range.
1063 *
1064 * RETURN VALUES:
1065 * 0 - success
1066 * -ENOSPC - insufficient disk resources
1067 * -EIO - i/o error
1068 *
1069 * serialization: IREAD_LOCK(ipbmap) held on entry/exit;
1070 */
dbAllocNext(struct bmap * bmp,struct dmap * dp,s64 blkno,int nblocks)1071 static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno,
1072 int nblocks)
1073 {
1074 int dbitno, word, rembits, nb, nwords, wbitno, nw;
1075 int l2size;
1076 s8 *leaf;
1077 u32 mask;
1078
1079 if (dp->tree.leafidx != cpu_to_le32(LEAFIND)) {
1080 jfs_error(bmp->db_ipbmap->i_sb, "Corrupt dmap page\n");
1081 return -EIO;
1082 }
1083
1084 /* pick up a pointer to the leaves of the dmap tree.
1085 */
1086 leaf = dp->tree.stree + le32_to_cpu(dp->tree.leafidx);
1087
1088 /* determine the bit number and word within the dmap of the
1089 * starting block.
1090 */
1091 dbitno = blkno & (BPERDMAP - 1);
1092 word = dbitno >> L2DBWORD;
1093
1094 /* check if the specified block range is contained within
1095 * this dmap.
1096 */
1097 if (dbitno + nblocks > BPERDMAP)
1098 return -ENOSPC;
1099
1100 /* check if the starting leaf indicates that anything
1101 * is free.
1102 */
1103 if (leaf[word] == NOFREE)
1104 return -ENOSPC;
1105
1106 /* check the dmaps words corresponding to block range to see
1107 * if the block range is free. not all bits of the first and
1108 * last words may be contained within the block range. if this
1109 * is the case, we'll work against those words (i.e. partial first
1110 * and/or last) on an individual basis (a single pass) and examine
1111 * the actual bits to determine if they are free. a single pass
1112 * will be used for all dmap words fully contained within the
1113 * specified range. within this pass, the leaves of the dmap
1114 * tree will be examined to determine if the blocks are free. a
1115 * single leaf may describe the free space of multiple dmap
1116 * words, so we may visit only a subset of the actual leaves
1117 * corresponding to the dmap words of the block range.
1118 */
1119 for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) {
1120 /* determine the bit number within the word and
1121 * the number of bits within the word.
1122 */
1123 wbitno = dbitno & (DBWORD - 1);
1124 nb = min(rembits, DBWORD - wbitno);
1125
1126 /* check if only part of the word is to be examined.
1127 */
1128 if (nb < DBWORD) {
1129 /* check if the bits are free.
1130 */
1131 mask = (ONES << (DBWORD - nb) >> wbitno);
1132 if ((mask & ~le32_to_cpu(dp->wmap[word])) != mask)
1133 return -ENOSPC;
1134
1135 word += 1;
1136 } else {
1137 /* one or more dmap words are fully contained
1138 * within the block range. determine how many
1139 * words and how many bits.
1140 */
1141 nwords = rembits >> L2DBWORD;
1142 nb = nwords << L2DBWORD;
1143
1144 /* now examine the appropriate leaves to determine
1145 * if the blocks are free.
1146 */
1147 while (nwords > 0) {
1148 /* does the leaf describe any free space ?
1149 */
1150 if (leaf[word] < BUDMIN)
1151 return -ENOSPC;
1152
1153 /* determine the l2 number of bits provided
1154 * by this leaf.
1155 */
1156 l2size =
1157 min_t(int, leaf[word], NLSTOL2BSZ(nwords));
1158
1159 /* determine how many words were handled.
1160 */
1161 nw = BUDSIZE(l2size, BUDMIN);
1162
1163 nwords -= nw;
1164 word += nw;
1165 }
1166 }
1167 }
1168
1169 /* allocate the blocks.
1170 */
1171 return (dbAllocDmap(bmp, dp, blkno, nblocks));
1172 }
1173
1174
1175 /*
1176 * NAME: dbAllocNear()
1177 *
1178 * FUNCTION: attempt to allocate a number of contiguous free blocks near
1179 * a specified block (hint) within a dmap.
1180 *
1181 * starting with the dmap leaf that covers the hint, we'll
1182 * check the next four contiguous leaves for sufficient free
1183 * space. if sufficient free space is found, we'll allocate
1184 * the desired free space.
1185 *
1186 * PARAMETERS:
1187 * bmp - pointer to bmap descriptor
1188 * dp - pointer to dmap.
1189 * blkno - block number to allocate near.
1190 * nblocks - actual number of contiguous free blocks desired.
1191 * l2nb - log2 number of contiguous free blocks desired.
1192 * results - on successful return, set to the starting block number
1193 * of the newly allocated range.
1194 *
1195 * RETURN VALUES:
1196 * 0 - success
1197 * -ENOSPC - insufficient disk resources
1198 * -EIO - i/o error
1199 *
1200 * serialization: IREAD_LOCK(ipbmap) held on entry/exit;
1201 */
1202 static int
dbAllocNear(struct bmap * bmp,struct dmap * dp,s64 blkno,int nblocks,int l2nb,s64 * results)1203 dbAllocNear(struct bmap * bmp,
1204 struct dmap * dp, s64 blkno, int nblocks, int l2nb, s64 * results)
1205 {
1206 int word, lword, rc;
1207 s8 *leaf;
1208
1209 if (dp->tree.leafidx != cpu_to_le32(LEAFIND)) {
1210 jfs_error(bmp->db_ipbmap->i_sb, "Corrupt dmap page\n");
1211 return -EIO;
1212 }
1213
1214 leaf = dp->tree.stree + le32_to_cpu(dp->tree.leafidx);
1215
1216 /* determine the word within the dmap that holds the hint
1217 * (i.e. blkno). also, determine the last word in the dmap
1218 * that we'll include in our examination.
1219 */
1220 word = (blkno & (BPERDMAP - 1)) >> L2DBWORD;
1221 lword = min(word + 4, LPERDMAP);
1222
1223 /* examine the leaves for sufficient free space.
1224 */
1225 for (; word < lword; word++) {
1226 /* does the leaf describe sufficient free space ?
1227 */
1228 if (leaf[word] < l2nb)
1229 continue;
1230
1231 /* determine the block number within the file system
1232 * of the first block described by this dmap word.
1233 */
1234 blkno = le64_to_cpu(dp->start) + (word << L2DBWORD);
1235
1236 /* if not all bits of the dmap word are free, get the
1237 * starting bit number within the dmap word of the required
1238 * string of free bits and adjust the block number with the
1239 * value.
1240 */
1241 if (leaf[word] < BUDMIN)
1242 blkno +=
1243 dbFindBits(le32_to_cpu(dp->wmap[word]), l2nb);
1244
1245 /* allocate the blocks.
1246 */
1247 if ((rc = dbAllocDmap(bmp, dp, blkno, nblocks)) == 0)
1248 *results = blkno;
1249
1250 return (rc);
1251 }
1252
1253 return -ENOSPC;
1254 }
1255
1256
1257 /*
1258 * NAME: dbAllocAG()
1259 *
1260 * FUNCTION: attempt to allocate the specified number of contiguous
1261 * free blocks within the specified allocation group.
1262 *
1263 * unless the allocation group size is equal to the number
1264 * of blocks per dmap, the dmap control pages will be used to
1265 * find the required free space, if available. we start the
1266 * search at the highest dmap control page level which
1267 * distinctly describes the allocation group's free space
1268 * (i.e. the highest level at which the allocation group's
1269 * free space is not mixed in with that of any other group).
1270 * in addition, we start the search within this level at a
1271 * height of the dmapctl dmtree at which the nodes distinctly
1272 * describe the allocation group's free space. at this height,
1273 * the allocation group's free space may be represented by 1
1274 * or two sub-trees, depending on the allocation group size.
1275 * we search the top nodes of these subtrees left to right for
1276 * sufficient free space. if sufficient free space is found,
1277 * the subtree is searched to find the leftmost leaf that
1278 * has free space. once we have made it to the leaf, we
1279 * move the search to the next lower level dmap control page
1280 * corresponding to this leaf. we continue down the dmap control
1281 * pages until we find the dmap that contains or starts the
1282 * sufficient free space and we allocate at this dmap.
1283 *
1284 * if the allocation group size is equal to the dmap size,
1285 * we'll start at the dmap corresponding to the allocation
1286 * group and attempt the allocation at this level.
1287 *
1288 * the dmap control page search is also not performed if the
1289 * allocation group is completely free and we go to the first
1290 * dmap of the allocation group to do the allocation. this is
1291 * done because the allocation group may be part (not the first
1292 * part) of a larger binary buddy system, causing the dmap
1293 * control pages to indicate no free space (NOFREE) within
1294 * the allocation group.
1295 *
1296 * PARAMETERS:
1297 * bmp - pointer to bmap descriptor
1298 * agno - allocation group number.
1299 * nblocks - actual number of contiguous free blocks desired.
1300 * l2nb - log2 number of contiguous free blocks desired.
1301 * results - on successful return, set to the starting block number
1302 * of the newly allocated range.
1303 *
1304 * RETURN VALUES:
1305 * 0 - success
1306 * -ENOSPC - insufficient disk resources
1307 * -EIO - i/o error
1308 *
1309 * note: IWRITE_LOCK(ipmap) held on entry/exit;
1310 */
1311 static int
dbAllocAG(struct bmap * bmp,int agno,s64 nblocks,int l2nb,s64 * results)1312 dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
1313 {
1314 struct metapage *mp;
1315 struct dmapctl *dcp;
1316 int rc, ti, i, k, m, n, agperlev;
1317 s64 blkno, lblkno;
1318 int budmin;
1319
1320 /* allocation request should not be for more than the
1321 * allocation group size.
1322 */
1323 if (l2nb > bmp->db_agl2size) {
1324 jfs_error(bmp->db_ipbmap->i_sb,
1325 "allocation request is larger than the allocation group size\n");
1326 return -EIO;
1327 }
1328
1329 /* determine the starting block number of the allocation
1330 * group.
1331 */
1332 blkno = (s64) agno << bmp->db_agl2size;
1333
1334 /* check if the allocation group size is the minimum allocation
1335 * group size or if the allocation group is completely free. if
1336 * the allocation group size is the minimum size of BPERDMAP (i.e.
1337 * 1 dmap), there is no need to search the dmap control page (below)
1338 * that fully describes the allocation group since the allocation
1339 * group is already fully described by a dmap. in this case, we
1340 * just call dbAllocCtl() to search the dmap tree and allocate the
1341 * required space if available.
1342 *
1343 * if the allocation group is completely free, dbAllocCtl() is
1344 * also called to allocate the required space. this is done for
1345 * two reasons. first, it makes no sense searching the dmap control
1346 * pages for free space when we know that free space exists. second,
1347 * the dmap control pages may indicate that the allocation group
1348 * has no free space if the allocation group is part (not the first
1349 * part) of a larger binary buddy system.
1350 */
1351 if (bmp->db_agsize == BPERDMAP
1352 || bmp->db_agfree[agno] == bmp->db_agsize) {
1353 rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results);
1354 if ((rc == -ENOSPC) &&
1355 (bmp->db_agfree[agno] == bmp->db_agsize)) {
1356 printk(KERN_ERR "blkno = %Lx, blocks = %Lx\n",
1357 (unsigned long long) blkno,
1358 (unsigned long long) nblocks);
1359 jfs_error(bmp->db_ipbmap->i_sb,
1360 "dbAllocCtl failed in free AG\n");
1361 }
1362 return (rc);
1363 }
1364
1365 /* the buffer for the dmap control page that fully describes the
1366 * allocation group.
1367 */
1368 lblkno = BLKTOCTL(blkno, bmp->db_l2nbperpage, bmp->db_aglevel);
1369 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
1370 if (mp == NULL)
1371 return -EIO;
1372 dcp = (struct dmapctl *) mp->data;
1373 budmin = dcp->budmin;
1374
1375 if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) {
1376 jfs_error(bmp->db_ipbmap->i_sb, "Corrupt dmapctl page\n");
1377 release_metapage(mp);
1378 return -EIO;
1379 }
1380
1381 /* search the subtree(s) of the dmap control page that describes
1382 * the allocation group, looking for sufficient free space. to begin,
1383 * determine how many allocation groups are represented in a dmap
1384 * control page at the control page level (i.e. L0, L1, L2) that
1385 * fully describes an allocation group. next, determine the starting
1386 * tree index of this allocation group within the control page.
1387 */
1388 agperlev =
1389 (1 << (L2LPERCTL - (bmp->db_agheight << 1))) / bmp->db_agwidth;
1390 ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1));
1391
1392 /* dmap control page trees fan-out by 4 and a single allocation
1393 * group may be described by 1 or 2 subtrees within the ag level
1394 * dmap control page, depending upon the ag size. examine the ag's
1395 * subtrees for sufficient free space, starting with the leftmost
1396 * subtree.
1397 */
1398 for (i = 0; i < bmp->db_agwidth; i++, ti++) {
1399 /* is there sufficient free space ?
1400 */
1401 if (l2nb > dcp->stree[ti])
1402 continue;
1403
1404 /* sufficient free space found in a subtree. now search down
1405 * the subtree to find the leftmost leaf that describes this
1406 * free space.
1407 */
1408 for (k = bmp->db_agheight; k > 0; k--) {
1409 for (n = 0, m = (ti << 2) + 1; n < 4; n++) {
1410 if (l2nb <= dcp->stree[m + n]) {
1411 ti = m + n;
1412 break;
1413 }
1414 }
1415 if (n == 4) {
1416 jfs_error(bmp->db_ipbmap->i_sb,
1417 "failed descending stree\n");
1418 release_metapage(mp);
1419 return -EIO;
1420 }
1421 }
1422
1423 /* determine the block number within the file system
1424 * that corresponds to this leaf.
1425 */
1426 if (bmp->db_aglevel == 2)
1427 blkno = 0;
1428 else if (bmp->db_aglevel == 1)
1429 blkno &= ~(MAXL1SIZE - 1);
1430 else /* bmp->db_aglevel == 0 */
1431 blkno &= ~(MAXL0SIZE - 1);
1432
1433 blkno +=
1434 ((s64) (ti - le32_to_cpu(dcp->leafidx))) << budmin;
1435
1436 /* release the buffer in preparation for going down
1437 * the next level of dmap control pages.
1438 */
1439 release_metapage(mp);
1440
1441 /* check if we need to continue to search down the lower
1442 * level dmap control pages. we need to if the number of
1443 * blocks required is less than maximum number of blocks
1444 * described at the next lower level.
1445 */
1446 if (l2nb < budmin) {
1447
1448 /* search the lower level dmap control pages to get
1449 * the starting block number of the dmap that
1450 * contains or starts off the free space.
1451 */
1452 if ((rc =
1453 dbFindCtl(bmp, l2nb, bmp->db_aglevel - 1,
1454 &blkno))) {
1455 if (rc == -ENOSPC) {
1456 jfs_error(bmp->db_ipbmap->i_sb,
1457 "control page inconsistent\n");
1458 return -EIO;
1459 }
1460 return (rc);
1461 }
1462 }
1463
1464 /* allocate the blocks.
1465 */
1466 rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results);
1467 if (rc == -ENOSPC) {
1468 jfs_error(bmp->db_ipbmap->i_sb,
1469 "unable to allocate blocks\n");
1470 rc = -EIO;
1471 }
1472 return (rc);
1473 }
1474
1475 /* no space in the allocation group. release the buffer and
1476 * return -ENOSPC.
1477 */
1478 release_metapage(mp);
1479
1480 return -ENOSPC;
1481 }
1482
1483
1484 /*
1485 * NAME: dbAllocAny()
1486 *
1487 * FUNCTION: attempt to allocate the specified number of contiguous
1488 * free blocks anywhere in the file system.
1489 *
1490 * dbAllocAny() attempts to find the sufficient free space by
1491 * searching down the dmap control pages, starting with the
1492 * highest level (i.e. L0, L1, L2) control page. if free space
1493 * large enough to satisfy the desired free space is found, the
1494 * desired free space is allocated.
1495 *
1496 * PARAMETERS:
1497 * bmp - pointer to bmap descriptor
1498 * nblocks - actual number of contiguous free blocks desired.
1499 * l2nb - log2 number of contiguous free blocks desired.
1500 * results - on successful return, set to the starting block number
1501 * of the newly allocated range.
1502 *
1503 * RETURN VALUES:
1504 * 0 - success
1505 * -ENOSPC - insufficient disk resources
1506 * -EIO - i/o error
1507 *
1508 * serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
1509 */
dbAllocAny(struct bmap * bmp,s64 nblocks,int l2nb,s64 * results)1510 static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results)
1511 {
1512 int rc;
1513 s64 blkno = 0;
1514
1515 /* starting with the top level dmap control page, search
1516 * down the dmap control levels for sufficient free space.
1517 * if free space is found, dbFindCtl() returns the starting
1518 * block number of the dmap that contains or starts off the
1519 * range of free space.
1520 */
1521 if ((rc = dbFindCtl(bmp, l2nb, bmp->db_maxlevel, &blkno)))
1522 return (rc);
1523
1524 /* allocate the blocks.
1525 */
1526 rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results);
1527 if (rc == -ENOSPC) {
1528 jfs_error(bmp->db_ipbmap->i_sb, "unable to allocate blocks\n");
1529 return -EIO;
1530 }
1531 return (rc);
1532 }
1533
1534
1535 /*
1536 * NAME: dbDiscardAG()
1537 *
1538 * FUNCTION: attempt to discard (TRIM) all free blocks of specific AG
1539 *
1540 * algorithm:
1541 * 1) allocate blocks, as large as possible and save them
1542 * while holding IWRITE_LOCK on ipbmap
1543 * 2) trim all these saved block/length values
1544 * 3) mark the blocks free again
1545 *
1546 * benefit:
1547 * - we work only on one ag at some time, minimizing how long we
1548 * need to lock ipbmap
1549 * - reading / writing the fs is possible most time, even on
1550 * trimming
1551 *
1552 * downside:
1553 * - we write two times to the dmapctl and dmap pages
1554 * - but for me, this seems the best way, better ideas?
1555 * /TR 2012
1556 *
1557 * PARAMETERS:
1558 * ip - pointer to in-core inode
1559 * agno - ag to trim
1560 * minlen - minimum value of contiguous blocks
1561 *
1562 * RETURN VALUES:
1563 * s64 - actual number of blocks trimmed
1564 */
dbDiscardAG(struct inode * ip,int agno,s64 minlen)1565 s64 dbDiscardAG(struct inode *ip, int agno, s64 minlen)
1566 {
1567 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
1568 struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
1569 s64 nblocks, blkno;
1570 u64 trimmed = 0;
1571 int rc, l2nb;
1572 struct super_block *sb = ipbmap->i_sb;
1573
1574 struct range2trim {
1575 u64 blkno;
1576 u64 nblocks;
1577 } *totrim, *tt;
1578
1579 /* max blkno / nblocks pairs to trim */
1580 int count = 0, range_cnt;
1581 u64 max_ranges;
1582
1583 /* prevent others from writing new stuff here, while trimming */
1584 IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
1585
1586 nblocks = bmp->db_agfree[agno];
1587 max_ranges = nblocks;
1588 do_div(max_ranges, minlen);
1589 range_cnt = min_t(u64, max_ranges + 1, 32 * 1024);
1590 totrim = kmalloc_array(range_cnt, sizeof(struct range2trim), GFP_NOFS);
1591 if (totrim == NULL) {
1592 jfs_error(bmp->db_ipbmap->i_sb, "no memory for trim array\n");
1593 IWRITE_UNLOCK(ipbmap);
1594 return 0;
1595 }
1596
1597 tt = totrim;
1598 while (nblocks >= minlen) {
1599 l2nb = BLKSTOL2(nblocks);
1600
1601 /* 0 = okay, -EIO = fatal, -ENOSPC -> try smaller block */
1602 rc = dbAllocAG(bmp, agno, nblocks, l2nb, &blkno);
1603 if (rc == 0) {
1604 tt->blkno = blkno;
1605 tt->nblocks = nblocks;
1606 tt++; count++;
1607
1608 /* the whole ag is free, trim now */
1609 if (bmp->db_agfree[agno] == 0)
1610 break;
1611
1612 /* give a hint for the next while */
1613 nblocks = bmp->db_agfree[agno];
1614 continue;
1615 } else if (rc == -ENOSPC) {
1616 /* search for next smaller log2 block */
1617 l2nb = BLKSTOL2(nblocks) - 1;
1618 if (unlikely(l2nb < 0))
1619 break;
1620 nblocks = 1LL << l2nb;
1621 } else {
1622 /* Trim any already allocated blocks */
1623 jfs_error(bmp->db_ipbmap->i_sb, "-EIO\n");
1624 break;
1625 }
1626
1627 /* check, if our trim array is full */
1628 if (unlikely(count >= range_cnt - 1))
1629 break;
1630 }
1631 IWRITE_UNLOCK(ipbmap);
1632
1633 tt->nblocks = 0; /* mark the current end */
1634 for (tt = totrim; tt->nblocks != 0; tt++) {
1635 /* when mounted with online discard, dbFree() will
1636 * call jfs_issue_discard() itself */
1637 if (!(JFS_SBI(sb)->flag & JFS_DISCARD))
1638 jfs_issue_discard(ip, tt->blkno, tt->nblocks);
1639 dbFree(ip, tt->blkno, tt->nblocks);
1640 trimmed += tt->nblocks;
1641 }
1642 kfree(totrim);
1643
1644 return trimmed;
1645 }
1646
1647 /*
1648 * NAME: dbFindCtl()
1649 *
1650 * FUNCTION: starting at a specified dmap control page level and block
1651 * number, search down the dmap control levels for a range of
1652 * contiguous free blocks large enough to satisfy an allocation
1653 * request for the specified number of free blocks.
1654 *
1655 * if sufficient contiguous free blocks are found, this routine
1656 * returns the starting block number within a dmap page that
1657 * contains or starts a range of contiqious free blocks that
1658 * is sufficient in size.
1659 *
1660 * PARAMETERS:
1661 * bmp - pointer to bmap descriptor
1662 * level - starting dmap control page level.
1663 * l2nb - log2 number of contiguous free blocks desired.
1664 * *blkno - on entry, starting block number for conducting the search.
1665 * on successful return, the first block within a dmap page
1666 * that contains or starts a range of contiguous free blocks.
1667 *
1668 * RETURN VALUES:
1669 * 0 - success
1670 * -ENOSPC - insufficient disk resources
1671 * -EIO - i/o error
1672 *
1673 * serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
1674 */
dbFindCtl(struct bmap * bmp,int l2nb,int level,s64 * blkno)1675 static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno)
1676 {
1677 int rc, leafidx, lev;
1678 s64 b, lblkno;
1679 struct dmapctl *dcp;
1680 int budmin;
1681 struct metapage *mp;
1682
1683 /* starting at the specified dmap control page level and block
1684 * number, search down the dmap control levels for the starting
1685 * block number of a dmap page that contains or starts off
1686 * sufficient free blocks.
1687 */
1688 for (lev = level, b = *blkno; lev >= 0; lev--) {
1689 /* get the buffer of the dmap control page for the block
1690 * number and level (i.e. L0, L1, L2).
1691 */
1692 lblkno = BLKTOCTL(b, bmp->db_l2nbperpage, lev);
1693 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
1694 if (mp == NULL)
1695 return -EIO;
1696 dcp = (struct dmapctl *) mp->data;
1697 budmin = dcp->budmin;
1698
1699 if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) {
1700 jfs_error(bmp->db_ipbmap->i_sb,
1701 "Corrupt dmapctl page\n");
1702 release_metapage(mp);
1703 return -EIO;
1704 }
1705
1706 /* search the tree within the dmap control page for
1707 * sufficient free space. if sufficient free space is found,
1708 * dbFindLeaf() returns the index of the leaf at which
1709 * free space was found.
1710 */
1711 rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx, true);
1712
1713 /* release the buffer.
1714 */
1715 release_metapage(mp);
1716
1717 /* space found ?
1718 */
1719 if (rc) {
1720 if (lev != level) {
1721 jfs_error(bmp->db_ipbmap->i_sb,
1722 "dmap inconsistent\n");
1723 return -EIO;
1724 }
1725 return -ENOSPC;
1726 }
1727
1728 /* adjust the block number to reflect the location within
1729 * the dmap control page (i.e. the leaf) at which free
1730 * space was found.
1731 */
1732 b += (((s64) leafidx) << budmin);
1733
1734 /* we stop the search at this dmap control page level if
1735 * the number of blocks required is greater than or equal
1736 * to the maximum number of blocks described at the next
1737 * (lower) level.
1738 */
1739 if (l2nb >= budmin)
1740 break;
1741 }
1742
1743 *blkno = b;
1744 return (0);
1745 }
1746
1747
1748 /*
1749 * NAME: dbAllocCtl()
1750 *
1751 * FUNCTION: attempt to allocate a specified number of contiguous
1752 * blocks starting within a specific dmap.
1753 *
1754 * this routine is called by higher level routines that search
1755 * the dmap control pages above the actual dmaps for contiguous
1756 * free space. the result of successful searches by these
1757 * routines are the starting block numbers within dmaps, with
1758 * the dmaps themselves containing the desired contiguous free
1759 * space or starting a contiguous free space of desired size
1760 * that is made up of the blocks of one or more dmaps. these
1761 * calls should not fail due to insufficent resources.
1762 *
1763 * this routine is called in some cases where it is not known
1764 * whether it will fail due to insufficient resources. more
1765 * specifically, this occurs when allocating from an allocation
1766 * group whose size is equal to the number of blocks per dmap.
1767 * in this case, the dmap control pages are not examined prior
1768 * to calling this routine (to save pathlength) and the call
1769 * might fail.
1770 *
1771 * for a request size that fits within a dmap, this routine relies
1772 * upon the dmap's dmtree to find the requested contiguous free
1773 * space. for request sizes that are larger than a dmap, the
1774 * requested free space will start at the first block of the
1775 * first dmap (i.e. blkno).
1776 *
1777 * PARAMETERS:
1778 * bmp - pointer to bmap descriptor
1779 * nblocks - actual number of contiguous free blocks to allocate.
1780 * l2nb - log2 number of contiguous free blocks to allocate.
1781 * blkno - starting block number of the dmap to start the allocation
1782 * from.
1783 * results - on successful return, set to the starting block number
1784 * of the newly allocated range.
1785 *
1786 * RETURN VALUES:
1787 * 0 - success
1788 * -ENOSPC - insufficient disk resources
1789 * -EIO - i/o error
1790 *
1791 * serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
1792 */
1793 static int
dbAllocCtl(struct bmap * bmp,s64 nblocks,int l2nb,s64 blkno,s64 * results)1794 dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results)
1795 {
1796 int rc, nb;
1797 s64 b, lblkno, n;
1798 struct metapage *mp;
1799 struct dmap *dp;
1800
1801 /* check if the allocation request is confined to a single dmap.
1802 */
1803 if (l2nb <= L2BPERDMAP) {
1804 /* get the buffer for the dmap.
1805 */
1806 lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
1807 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
1808 if (mp == NULL)
1809 return -EIO;
1810 dp = (struct dmap *) mp->data;
1811
1812 if (dp->tree.budmin < 0)
1813 return -EIO;
1814
1815 /* try to allocate the blocks.
1816 */
1817 rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results);
1818 if (rc == 0)
1819 mark_metapage_dirty(mp);
1820
1821 release_metapage(mp);
1822
1823 return (rc);
1824 }
1825
1826 /* allocation request involving multiple dmaps. it must start on
1827 * a dmap boundary.
1828 */
1829 assert((blkno & (BPERDMAP - 1)) == 0);
1830
1831 /* allocate the blocks dmap by dmap.
1832 */
1833 for (n = nblocks, b = blkno; n > 0; n -= nb, b += nb) {
1834 /* get the buffer for the dmap.
1835 */
1836 lblkno = BLKTODMAP(b, bmp->db_l2nbperpage);
1837 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
1838 if (mp == NULL) {
1839 rc = -EIO;
1840 goto backout;
1841 }
1842 dp = (struct dmap *) mp->data;
1843
1844 /* the dmap better be all free.
1845 */
1846 if (dp->tree.stree[ROOT] != L2BPERDMAP) {
1847 release_metapage(mp);
1848 jfs_error(bmp->db_ipbmap->i_sb,
1849 "the dmap is not all free\n");
1850 rc = -EIO;
1851 goto backout;
1852 }
1853
1854 /* determine how many blocks to allocate from this dmap.
1855 */
1856 nb = min_t(s64, n, BPERDMAP);
1857
1858 /* allocate the blocks from the dmap.
1859 */
1860 if ((rc = dbAllocDmap(bmp, dp, b, nb))) {
1861 release_metapage(mp);
1862 goto backout;
1863 }
1864
1865 /* write the buffer.
1866 */
1867 write_metapage(mp);
1868 }
1869
1870 /* set the results (starting block number) and return.
1871 */
1872 *results = blkno;
1873 return (0);
1874
1875 /* something failed in handling an allocation request involving
1876 * multiple dmaps. we'll try to clean up by backing out any
1877 * allocation that has already happened for this request. if
1878 * we fail in backing out the allocation, we'll mark the file
1879 * system to indicate that blocks have been leaked.
1880 */
1881 backout:
1882
1883 /* try to backout the allocations dmap by dmap.
1884 */
1885 for (n = nblocks - n, b = blkno; n > 0;
1886 n -= BPERDMAP, b += BPERDMAP) {
1887 /* get the buffer for this dmap.
1888 */
1889 lblkno = BLKTODMAP(b, bmp->db_l2nbperpage);
1890 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
1891 if (mp == NULL) {
1892 /* could not back out. mark the file system
1893 * to indicate that we have leaked blocks.
1894 */
1895 jfs_error(bmp->db_ipbmap->i_sb,
1896 "I/O Error: Block Leakage\n");
1897 continue;
1898 }
1899 dp = (struct dmap *) mp->data;
1900
1901 /* free the blocks is this dmap.
1902 */
1903 if (dbFreeDmap(bmp, dp, b, BPERDMAP)) {
1904 /* could not back out. mark the file system
1905 * to indicate that we have leaked blocks.
1906 */
1907 release_metapage(mp);
1908 jfs_error(bmp->db_ipbmap->i_sb, "Block Leakage\n");
1909 continue;
1910 }
1911
1912 /* write the buffer.
1913 */
1914 write_metapage(mp);
1915 }
1916
1917 return (rc);
1918 }
1919
1920
1921 /*
1922 * NAME: dbAllocDmapLev()
1923 *
1924 * FUNCTION: attempt to allocate a specified number of contiguous blocks
1925 * from a specified dmap.
1926 *
1927 * this routine checks if the contiguous blocks are available.
1928 * if so, nblocks of blocks are allocated; otherwise, ENOSPC is
1929 * returned.
1930 *
1931 * PARAMETERS:
1932 * mp - pointer to bmap descriptor
1933 * dp - pointer to dmap to attempt to allocate blocks from.
1934 * l2nb - log2 number of contiguous block desired.
1935 * nblocks - actual number of contiguous block desired.
1936 * results - on successful return, set to the starting block number
1937 * of the newly allocated range.
1938 *
1939 * RETURN VALUES:
1940 * 0 - success
1941 * -ENOSPC - insufficient disk resources
1942 * -EIO - i/o error
1943 *
1944 * serialization: IREAD_LOCK(ipbmap), e.g., from dbAlloc(), or
1945 * IWRITE_LOCK(ipbmap), e.g., dbAllocCtl(), held on entry/exit;
1946 */
1947 static int
dbAllocDmapLev(struct bmap * bmp,struct dmap * dp,int nblocks,int l2nb,s64 * results)1948 dbAllocDmapLev(struct bmap * bmp,
1949 struct dmap * dp, int nblocks, int l2nb, s64 * results)
1950 {
1951 s64 blkno;
1952 int leafidx, rc;
1953
1954 /* can't be more than a dmaps worth of blocks */
1955 assert(l2nb <= L2BPERDMAP);
1956
1957 /* search the tree within the dmap page for sufficient
1958 * free space. if sufficient free space is found, dbFindLeaf()
1959 * returns the index of the leaf at which free space was found.
1960 */
1961 if (dbFindLeaf((dmtree_t *) &dp->tree, l2nb, &leafidx, false))
1962 return -ENOSPC;
1963
1964 if (leafidx < 0)
1965 return -EIO;
1966
1967 /* determine the block number within the file system corresponding
1968 * to the leaf at which free space was found.
1969 */
1970 blkno = le64_to_cpu(dp->start) + (leafidx << L2DBWORD);
1971
1972 /* if not all bits of the dmap word are free, get the starting
1973 * bit number within the dmap word of the required string of free
1974 * bits and adjust the block number with this value.
1975 */
1976 if (dp->tree.stree[leafidx + LEAFIND] < BUDMIN)
1977 blkno += dbFindBits(le32_to_cpu(dp->wmap[leafidx]), l2nb);
1978
1979 /* allocate the blocks */
1980 if ((rc = dbAllocDmap(bmp, dp, blkno, nblocks)) == 0)
1981 *results = blkno;
1982
1983 return (rc);
1984 }
1985
1986
1987 /*
1988 * NAME: dbAllocDmap()
1989 *
1990 * FUNCTION: adjust the disk allocation map to reflect the allocation
1991 * of a specified block range within a dmap.
1992 *
1993 * this routine allocates the specified blocks from the dmap
1994 * through a call to dbAllocBits(). if the allocation of the
1995 * block range causes the maximum string of free blocks within
1996 * the dmap to change (i.e. the value of the root of the dmap's
1997 * dmtree), this routine will cause this change to be reflected
1998 * up through the appropriate levels of the dmap control pages
1999 * by a call to dbAdjCtl() for the L0 dmap control page that
2000 * covers this dmap.
2001 *
2002 * PARAMETERS:
2003 * bmp - pointer to bmap descriptor
2004 * dp - pointer to dmap to allocate the block range from.
2005 * blkno - starting block number of the block to be allocated.
2006 * nblocks - number of blocks to be allocated.
2007 *
2008 * RETURN VALUES:
2009 * 0 - success
2010 * -EIO - i/o error
2011 *
2012 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
2013 */
dbAllocDmap(struct bmap * bmp,struct dmap * dp,s64 blkno,int nblocks)2014 static int dbAllocDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
2015 int nblocks)
2016 {
2017 s8 oldroot;
2018 int rc;
2019
2020 /* save the current value of the root (i.e. maximum free string)
2021 * of the dmap tree.
2022 */
2023 oldroot = dp->tree.stree[ROOT];
2024
2025 /* allocate the specified (blocks) bits */
2026 dbAllocBits(bmp, dp, blkno, nblocks);
2027
2028 /* if the root has not changed, done. */
2029 if (dp->tree.stree[ROOT] == oldroot)
2030 return (0);
2031
2032 /* root changed. bubble the change up to the dmap control pages.
2033 * if the adjustment of the upper level control pages fails,
2034 * backout the bit allocation (thus making everything consistent).
2035 */
2036 if ((rc = dbAdjCtl(bmp, blkno, dp->tree.stree[ROOT], 1, 0)))
2037 dbFreeBits(bmp, dp, blkno, nblocks);
2038
2039 return (rc);
2040 }
2041
2042
2043 /*
2044 * NAME: dbFreeDmap()
2045 *
2046 * FUNCTION: adjust the disk allocation map to reflect the allocation
2047 * of a specified block range within a dmap.
2048 *
2049 * this routine frees the specified blocks from the dmap through
2050 * a call to dbFreeBits(). if the deallocation of the block range
2051 * causes the maximum string of free blocks within the dmap to
2052 * change (i.e. the value of the root of the dmap's dmtree), this
2053 * routine will cause this change to be reflected up through the
2054 * appropriate levels of the dmap control pages by a call to
2055 * dbAdjCtl() for the L0 dmap control page that covers this dmap.
2056 *
2057 * PARAMETERS:
2058 * bmp - pointer to bmap descriptor
2059 * dp - pointer to dmap to free the block range from.
2060 * blkno - starting block number of the block to be freed.
2061 * nblocks - number of blocks to be freed.
2062 *
2063 * RETURN VALUES:
2064 * 0 - success
2065 * -EIO - i/o error
2066 *
2067 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
2068 */
dbFreeDmap(struct bmap * bmp,struct dmap * dp,s64 blkno,int nblocks)2069 static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
2070 int nblocks)
2071 {
2072 s8 oldroot;
2073 int rc = 0, word;
2074
2075 /* save the current value of the root (i.e. maximum free string)
2076 * of the dmap tree.
2077 */
2078 oldroot = dp->tree.stree[ROOT];
2079
2080 /* free the specified (blocks) bits */
2081 rc = dbFreeBits(bmp, dp, blkno, nblocks);
2082
2083 /* if error or the root has not changed, done. */
2084 if (rc || (dp->tree.stree[ROOT] == oldroot))
2085 return (rc);
2086
2087 /* root changed. bubble the change up to the dmap control pages.
2088 * if the adjustment of the upper level control pages fails,
2089 * backout the deallocation.
2090 */
2091 if ((rc = dbAdjCtl(bmp, blkno, dp->tree.stree[ROOT], 0, 0))) {
2092 word = (blkno & (BPERDMAP - 1)) >> L2DBWORD;
2093
2094 /* as part of backing out the deallocation, we will have
2095 * to back split the dmap tree if the deallocation caused
2096 * the freed blocks to become part of a larger binary buddy
2097 * system.
2098 */
2099 if (dp->tree.stree[word] == NOFREE)
2100 dbBackSplit((dmtree_t *)&dp->tree, word, false);
2101
2102 dbAllocBits(bmp, dp, blkno, nblocks);
2103 }
2104
2105 return (rc);
2106 }
2107
2108
2109 /*
2110 * NAME: dbAllocBits()
2111 *
2112 * FUNCTION: allocate a specified block range from a dmap.
2113 *
2114 * this routine updates the dmap to reflect the working
2115 * state allocation of the specified block range. it directly
2116 * updates the bits of the working map and causes the adjustment
2117 * of the binary buddy system described by the dmap's dmtree
2118 * leaves to reflect the bits allocated. it also causes the
2119 * dmap's dmtree, as a whole, to reflect the allocated range.
2120 *
2121 * PARAMETERS:
2122 * bmp - pointer to bmap descriptor
2123 * dp - pointer to dmap to allocate bits from.
2124 * blkno - starting block number of the bits to be allocated.
2125 * nblocks - number of bits to be allocated.
2126 *
2127 * RETURN VALUES: none
2128 *
2129 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
2130 */
dbAllocBits(struct bmap * bmp,struct dmap * dp,s64 blkno,int nblocks)2131 static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
2132 int nblocks)
2133 {
2134 int dbitno, word, rembits, nb, nwords, wbitno, nw, agno;
2135 dmtree_t *tp = (dmtree_t *) & dp->tree;
2136 int size;
2137 s8 *leaf;
2138
2139 /* pick up a pointer to the leaves of the dmap tree */
2140 leaf = dp->tree.stree + LEAFIND;
2141
2142 /* determine the bit number and word within the dmap of the
2143 * starting block.
2144 */
2145 dbitno = blkno & (BPERDMAP - 1);
2146 word = dbitno >> L2DBWORD;
2147
2148 /* block range better be within the dmap */
2149 assert(dbitno + nblocks <= BPERDMAP);
2150
2151 /* allocate the bits of the dmap's words corresponding to the block
2152 * range. not all bits of the first and last words may be contained
2153 * within the block range. if this is the case, we'll work against
2154 * those words (i.e. partial first and/or last) on an individual basis
2155 * (a single pass), allocating the bits of interest by hand and
2156 * updating the leaf corresponding to the dmap word. a single pass
2157 * will be used for all dmap words fully contained within the
2158 * specified range. within this pass, the bits of all fully contained
2159 * dmap words will be marked as free in a single shot and the leaves
2160 * will be updated. a single leaf may describe the free space of
2161 * multiple dmap words, so we may update only a subset of the actual
2162 * leaves corresponding to the dmap words of the block range.
2163 */
2164 for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) {
2165 /* determine the bit number within the word and
2166 * the number of bits within the word.
2167 */
2168 wbitno = dbitno & (DBWORD - 1);
2169 nb = min(rembits, DBWORD - wbitno);
2170
2171 /* check if only part of a word is to be allocated.
2172 */
2173 if (nb < DBWORD) {
2174 /* allocate (set to 1) the appropriate bits within
2175 * this dmap word.
2176 */
2177 dp->wmap[word] |= cpu_to_le32(ONES << (DBWORD - nb)
2178 >> wbitno);
2179
2180 /* update the leaf for this dmap word. in addition
2181 * to setting the leaf value to the binary buddy max
2182 * of the updated dmap word, dbSplit() will split
2183 * the binary system of the leaves if need be.
2184 */
2185 dbSplit(tp, word, BUDMIN,
2186 dbMaxBud((u8 *)&dp->wmap[word]), false);
2187
2188 word += 1;
2189 } else {
2190 /* one or more dmap words are fully contained
2191 * within the block range. determine how many
2192 * words and allocate (set to 1) the bits of these
2193 * words.
2194 */
2195 nwords = rembits >> L2DBWORD;
2196 memset(&dp->wmap[word], (int) ONES, nwords * 4);
2197
2198 /* determine how many bits.
2199 */
2200 nb = nwords << L2DBWORD;
2201
2202 /* now update the appropriate leaves to reflect
2203 * the allocated words.
2204 */
2205 for (; nwords > 0; nwords -= nw) {
2206 if (leaf[word] < BUDMIN) {
2207 jfs_error(bmp->db_ipbmap->i_sb,
2208 "leaf page corrupt\n");
2209 break;
2210 }
2211
2212 /* determine what the leaf value should be
2213 * updated to as the minimum of the l2 number
2214 * of bits being allocated and the l2 number
2215 * of bits currently described by this leaf.
2216 */
2217 size = min_t(int, leaf[word],
2218 NLSTOL2BSZ(nwords));
2219
2220 /* update the leaf to reflect the allocation.
2221 * in addition to setting the leaf value to
2222 * NOFREE, dbSplit() will split the binary
2223 * system of the leaves to reflect the current
2224 * allocation (size).
2225 */
2226 dbSplit(tp, word, size, NOFREE, false);
2227
2228 /* get the number of dmap words handled */
2229 nw = BUDSIZE(size, BUDMIN);
2230 word += nw;
2231 }
2232 }
2233 }
2234
2235 /* update the free count for this dmap */
2236 le32_add_cpu(&dp->nfree, -nblocks);
2237
2238 BMAP_LOCK(bmp);
2239
2240 /* if this allocation group is completely free,
2241 * update the maximum allocation group number if this allocation
2242 * group is the new max.
2243 */
2244 agno = blkno >> bmp->db_agl2size;
2245 if (agno > bmp->db_maxag)
2246 bmp->db_maxag = agno;
2247
2248 /* update the free count for the allocation group and map */
2249 bmp->db_agfree[agno] -= nblocks;
2250 bmp->db_nfree -= nblocks;
2251
2252 BMAP_UNLOCK(bmp);
2253 }
2254
2255
2256 /*
2257 * NAME: dbFreeBits()
2258 *
2259 * FUNCTION: free a specified block range from a dmap.
2260 *
2261 * this routine updates the dmap to reflect the working
2262 * state allocation of the specified block range. it directly
2263 * updates the bits of the working map and causes the adjustment
2264 * of the binary buddy system described by the dmap's dmtree
2265 * leaves to reflect the bits freed. it also causes the dmap's
2266 * dmtree, as a whole, to reflect the deallocated range.
2267 *
2268 * PARAMETERS:
2269 * bmp - pointer to bmap descriptor
2270 * dp - pointer to dmap to free bits from.
2271 * blkno - starting block number of the bits to be freed.
2272 * nblocks - number of bits to be freed.
2273 *
2274 * RETURN VALUES: 0 for success
2275 *
2276 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
2277 */
dbFreeBits(struct bmap * bmp,struct dmap * dp,s64 blkno,int nblocks)2278 static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
2279 int nblocks)
2280 {
2281 int dbitno, word, rembits, nb, nwords, wbitno, nw, agno;
2282 dmtree_t *tp = (dmtree_t *) & dp->tree;
2283 int rc = 0;
2284 int size;
2285
2286 /* determine the bit number and word within the dmap of the
2287 * starting block.
2288 */
2289 dbitno = blkno & (BPERDMAP - 1);
2290 word = dbitno >> L2DBWORD;
2291
2292 /* block range better be within the dmap.
2293 */
2294 assert(dbitno + nblocks <= BPERDMAP);
2295
2296 /* free the bits of the dmaps words corresponding to the block range.
2297 * not all bits of the first and last words may be contained within
2298 * the block range. if this is the case, we'll work against those
2299 * words (i.e. partial first and/or last) on an individual basis
2300 * (a single pass), freeing the bits of interest by hand and updating
2301 * the leaf corresponding to the dmap word. a single pass will be used
2302 * for all dmap words fully contained within the specified range.
2303 * within this pass, the bits of all fully contained dmap words will
2304 * be marked as free in a single shot and the leaves will be updated. a
2305 * single leaf may describe the free space of multiple dmap words,
2306 * so we may update only a subset of the actual leaves corresponding
2307 * to the dmap words of the block range.
2308 *
2309 * dbJoin() is used to update leaf values and will join the binary
2310 * buddy system of the leaves if the new leaf values indicate this
2311 * should be done.
2312 */
2313 for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) {
2314 /* determine the bit number within the word and
2315 * the number of bits within the word.
2316 */
2317 wbitno = dbitno & (DBWORD - 1);
2318 nb = min(rembits, DBWORD - wbitno);
2319
2320 /* check if only part of a word is to be freed.
2321 */
2322 if (nb < DBWORD) {
2323 /* free (zero) the appropriate bits within this
2324 * dmap word.
2325 */
2326 dp->wmap[word] &=
2327 cpu_to_le32(~(ONES << (DBWORD - nb)
2328 >> wbitno));
2329
2330 /* update the leaf for this dmap word.
2331 */
2332 rc = dbJoin(tp, word,
2333 dbMaxBud((u8 *)&dp->wmap[word]), false);
2334 if (rc)
2335 return rc;
2336
2337 word += 1;
2338 } else {
2339 /* one or more dmap words are fully contained
2340 * within the block range. determine how many
2341 * words and free (zero) the bits of these words.
2342 */
2343 nwords = rembits >> L2DBWORD;
2344 memset(&dp->wmap[word], 0, nwords * 4);
2345
2346 /* determine how many bits.
2347 */
2348 nb = nwords << L2DBWORD;
2349
2350 /* now update the appropriate leaves to reflect
2351 * the freed words.
2352 */
2353 for (; nwords > 0; nwords -= nw) {
2354 /* determine what the leaf value should be
2355 * updated to as the minimum of the l2 number
2356 * of bits being freed and the l2 (max) number
2357 * of bits that can be described by this leaf.
2358 */
2359 size =
2360 min(LITOL2BSZ
2361 (word, L2LPERDMAP, BUDMIN),
2362 NLSTOL2BSZ(nwords));
2363
2364 /* update the leaf.
2365 */
2366 rc = dbJoin(tp, word, size, false);
2367 if (rc)
2368 return rc;
2369
2370 /* get the number of dmap words handled.
2371 */
2372 nw = BUDSIZE(size, BUDMIN);
2373 word += nw;
2374 }
2375 }
2376 }
2377
2378 /* update the free count for this dmap.
2379 */
2380 le32_add_cpu(&dp->nfree, nblocks);
2381
2382 BMAP_LOCK(bmp);
2383
2384 /* update the free count for the allocation group and
2385 * map.
2386 */
2387 agno = blkno >> bmp->db_agl2size;
2388 bmp->db_nfree += nblocks;
2389 bmp->db_agfree[agno] += nblocks;
2390
2391 /* check if this allocation group is not completely free and
2392 * if it is currently the maximum (rightmost) allocation group.
2393 * if so, establish the new maximum allocation group number by
2394 * searching left for the first allocation group with allocation.
2395 */
2396 if ((bmp->db_agfree[agno] == bmp->db_agsize && agno == bmp->db_maxag) ||
2397 (agno == bmp->db_numag - 1 &&
2398 bmp->db_agfree[agno] == (bmp-> db_mapsize & (BPERDMAP - 1)))) {
2399 while (bmp->db_maxag > 0) {
2400 bmp->db_maxag -= 1;
2401 if (bmp->db_agfree[bmp->db_maxag] !=
2402 bmp->db_agsize)
2403 break;
2404 }
2405
2406 /* re-establish the allocation group preference if the
2407 * current preference is right of the maximum allocation
2408 * group.
2409 */
2410 if (bmp->db_agpref > bmp->db_maxag)
2411 bmp->db_agpref = bmp->db_maxag;
2412 }
2413
2414 BMAP_UNLOCK(bmp);
2415
2416 return 0;
2417 }
2418
2419
2420 /*
2421 * NAME: dbAdjCtl()
2422 *
2423 * FUNCTION: adjust a dmap control page at a specified level to reflect
2424 * the change in a lower level dmap or dmap control page's
2425 * maximum string of free blocks (i.e. a change in the root
2426 * of the lower level object's dmtree) due to the allocation
2427 * or deallocation of a range of blocks with a single dmap.
2428 *
2429 * on entry, this routine is provided with the new value of
2430 * the lower level dmap or dmap control page root and the
2431 * starting block number of the block range whose allocation
2432 * or deallocation resulted in the root change. this range
2433 * is respresented by a single leaf of the current dmapctl
2434 * and the leaf will be updated with this value, possibly
2435 * causing a binary buddy system within the leaves to be
2436 * split or joined. the update may also cause the dmapctl's
2437 * dmtree to be updated.
2438 *
2439 * if the adjustment of the dmap control page, itself, causes its
2440 * root to change, this change will be bubbled up to the next dmap
2441 * control level by a recursive call to this routine, specifying
2442 * the new root value and the next dmap control page level to
2443 * be adjusted.
2444 * PARAMETERS:
2445 * bmp - pointer to bmap descriptor
2446 * blkno - the first block of a block range within a dmap. it is
2447 * the allocation or deallocation of this block range that
2448 * requires the dmap control page to be adjusted.
2449 * newval - the new value of the lower level dmap or dmap control
2450 * page root.
2451 * alloc - 'true' if adjustment is due to an allocation.
2452 * level - current level of dmap control page (i.e. L0, L1, L2) to
2453 * be adjusted.
2454 *
2455 * RETURN VALUES:
2456 * 0 - success
2457 * -EIO - i/o error
2458 *
2459 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
2460 */
2461 static int
dbAdjCtl(struct bmap * bmp,s64 blkno,int newval,int alloc,int level)2462 dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
2463 {
2464 struct metapage *mp;
2465 s8 oldroot;
2466 int oldval;
2467 s64 lblkno;
2468 struct dmapctl *dcp;
2469 int rc, leafno, ti;
2470
2471 /* get the buffer for the dmap control page for the specified
2472 * block number and control page level.
2473 */
2474 lblkno = BLKTOCTL(blkno, bmp->db_l2nbperpage, level);
2475 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
2476 if (mp == NULL)
2477 return -EIO;
2478 dcp = (struct dmapctl *) mp->data;
2479
2480 if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) {
2481 jfs_error(bmp->db_ipbmap->i_sb, "Corrupt dmapctl page\n");
2482 release_metapage(mp);
2483 return -EIO;
2484 }
2485
2486 /* determine the leaf number corresponding to the block and
2487 * the index within the dmap control tree.
2488 */
2489 leafno = BLKTOCTLLEAF(blkno, dcp->budmin);
2490 ti = leafno + le32_to_cpu(dcp->leafidx);
2491
2492 /* save the current leaf value and the current root level (i.e.
2493 * maximum l2 free string described by this dmapctl).
2494 */
2495 oldval = dcp->stree[ti];
2496 oldroot = dcp->stree[ROOT];
2497
2498 /* check if this is a control page update for an allocation.
2499 * if so, update the leaf to reflect the new leaf value using
2500 * dbSplit(); otherwise (deallocation), use dbJoin() to update
2501 * the leaf with the new value. in addition to updating the
2502 * leaf, dbSplit() will also split the binary buddy system of
2503 * the leaves, if required, and bubble new values within the
2504 * dmapctl tree, if required. similarly, dbJoin() will join
2505 * the binary buddy system of leaves and bubble new values up
2506 * the dmapctl tree as required by the new leaf value.
2507 */
2508 if (alloc) {
2509 /* check if we are in the middle of a binary buddy
2510 * system. this happens when we are performing the
2511 * first allocation out of an allocation group that
2512 * is part (not the first part) of a larger binary
2513 * buddy system. if we are in the middle, back split
2514 * the system prior to calling dbSplit() which assumes
2515 * that it is at the front of a binary buddy system.
2516 */
2517 if (oldval == NOFREE) {
2518 rc = dbBackSplit((dmtree_t *)dcp, leafno, true);
2519 if (rc) {
2520 release_metapage(mp);
2521 return rc;
2522 }
2523 oldval = dcp->stree[ti];
2524 }
2525 dbSplit((dmtree_t *) dcp, leafno, dcp->budmin, newval, true);
2526 } else {
2527 rc = dbJoin((dmtree_t *) dcp, leafno, newval, true);
2528 if (rc) {
2529 release_metapage(mp);
2530 return rc;
2531 }
2532 }
2533
2534 /* check if the root of the current dmap control page changed due
2535 * to the update and if the current dmap control page is not at
2536 * the current top level (i.e. L0, L1, L2) of the map. if so (i.e.
2537 * root changed and this is not the top level), call this routine
2538 * again (recursion) for the next higher level of the mapping to
2539 * reflect the change in root for the current dmap control page.
2540 */
2541 if (dcp->stree[ROOT] != oldroot) {
2542 /* are we below the top level of the map. if so,
2543 * bubble the root up to the next higher level.
2544 */
2545 if (level < bmp->db_maxlevel) {
2546 /* bubble up the new root of this dmap control page to
2547 * the next level.
2548 */
2549 if ((rc =
2550 dbAdjCtl(bmp, blkno, dcp->stree[ROOT], alloc,
2551 level + 1))) {
2552 /* something went wrong in bubbling up the new
2553 * root value, so backout the changes to the
2554 * current dmap control page.
2555 */
2556 if (alloc) {
2557 dbJoin((dmtree_t *) dcp, leafno,
2558 oldval, true);
2559 } else {
2560 /* the dbJoin() above might have
2561 * caused a larger binary buddy system
2562 * to form and we may now be in the
2563 * middle of it. if this is the case,
2564 * back split the buddies.
2565 */
2566 if (dcp->stree[ti] == NOFREE)
2567 dbBackSplit((dmtree_t *)
2568 dcp, leafno, true);
2569 dbSplit((dmtree_t *) dcp, leafno,
2570 dcp->budmin, oldval, true);
2571 }
2572
2573 /* release the buffer and return the error.
2574 */
2575 release_metapage(mp);
2576 return (rc);
2577 }
2578 } else {
2579 /* we're at the top level of the map. update
2580 * the bmap control page to reflect the size
2581 * of the maximum free buddy system.
2582 */
2583 assert(level == bmp->db_maxlevel);
2584 if (bmp->db_maxfreebud != oldroot) {
2585 jfs_error(bmp->db_ipbmap->i_sb,
2586 "the maximum free buddy is not the old root\n");
2587 }
2588 bmp->db_maxfreebud = dcp->stree[ROOT];
2589 }
2590 }
2591
2592 /* write the buffer.
2593 */
2594 write_metapage(mp);
2595
2596 return (0);
2597 }
2598
2599
2600 /*
2601 * NAME: dbSplit()
2602 *
2603 * FUNCTION: update the leaf of a dmtree with a new value, splitting
2604 * the leaf from the binary buddy system of the dmtree's
2605 * leaves, as required.
2606 *
2607 * PARAMETERS:
2608 * tp - pointer to the tree containing the leaf.
2609 * leafno - the number of the leaf to be updated.
2610 * splitsz - the size the binary buddy system starting at the leaf
2611 * must be split to, specified as the log2 number of blocks.
2612 * newval - the new value for the leaf.
2613 *
2614 * RETURN VALUES: none
2615 *
2616 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
2617 */
dbSplit(dmtree_t * tp,int leafno,int splitsz,int newval,bool is_ctl)2618 static void dbSplit(dmtree_t *tp, int leafno, int splitsz, int newval, bool is_ctl)
2619 {
2620 int budsz;
2621 int cursz;
2622 s8 *leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx);
2623
2624 /* check if the leaf needs to be split.
2625 */
2626 if (leaf[leafno] > tp->dmt_budmin) {
2627 /* the split occurs by cutting the buddy system in half
2628 * at the specified leaf until we reach the specified
2629 * size. pick up the starting split size (current size
2630 * - 1 in l2) and the corresponding buddy size.
2631 */
2632 cursz = leaf[leafno] - 1;
2633 budsz = BUDSIZE(cursz, tp->dmt_budmin);
2634
2635 /* split until we reach the specified size.
2636 */
2637 while (cursz >= splitsz) {
2638 /* update the buddy's leaf with its new value.
2639 */
2640 dbAdjTree(tp, leafno ^ budsz, cursz, is_ctl);
2641
2642 /* on to the next size and buddy.
2643 */
2644 cursz -= 1;
2645 budsz >>= 1;
2646 }
2647 }
2648
2649 /* adjust the dmap tree to reflect the specified leaf's new
2650 * value.
2651 */
2652 dbAdjTree(tp, leafno, newval, is_ctl);
2653 }
2654
2655
2656 /*
2657 * NAME: dbBackSplit()
2658 *
2659 * FUNCTION: back split the binary buddy system of dmtree leaves
2660 * that hold a specified leaf until the specified leaf
2661 * starts its own binary buddy system.
2662 *
2663 * the allocators typically perform allocations at the start
2664 * of binary buddy systems and dbSplit() is used to accomplish
2665 * any required splits. in some cases, however, allocation
2666 * may occur in the middle of a binary system and requires a
2667 * back split, with the split proceeding out from the middle of
2668 * the system (less efficient) rather than the start of the
2669 * system (more efficient). the cases in which a back split
2670 * is required are rare and are limited to the first allocation
2671 * within an allocation group which is a part (not first part)
2672 * of a larger binary buddy system and a few exception cases
2673 * in which a previous join operation must be backed out.
2674 *
2675 * PARAMETERS:
2676 * tp - pointer to the tree containing the leaf.
2677 * leafno - the number of the leaf to be updated.
2678 *
2679 * RETURN VALUES: none
2680 *
2681 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
2682 */
dbBackSplit(dmtree_t * tp,int leafno,bool is_ctl)2683 static int dbBackSplit(dmtree_t *tp, int leafno, bool is_ctl)
2684 {
2685 int budsz, bud, w, bsz, size;
2686 int cursz;
2687 s8 *leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx);
2688
2689 /* leaf should be part (not first part) of a binary
2690 * buddy system.
2691 */
2692 assert(leaf[leafno] == NOFREE);
2693
2694 /* the back split is accomplished by iteratively finding the leaf
2695 * that starts the buddy system that contains the specified leaf and
2696 * splitting that system in two. this iteration continues until
2697 * the specified leaf becomes the start of a buddy system.
2698 *
2699 * determine maximum possible l2 size for the specified leaf.
2700 */
2701 size =
2702 LITOL2BSZ(leafno, le32_to_cpu(tp->dmt_l2nleafs),
2703 tp->dmt_budmin);
2704
2705 /* determine the number of leaves covered by this size. this
2706 * is the buddy size that we will start with as we search for
2707 * the buddy system that contains the specified leaf.
2708 */
2709 budsz = BUDSIZE(size, tp->dmt_budmin);
2710
2711 /* back split.
2712 */
2713 while (leaf[leafno] == NOFREE) {
2714 /* find the leftmost buddy leaf.
2715 */
2716 for (w = leafno, bsz = budsz;; bsz <<= 1,
2717 w = (w < bud) ? w : bud) {
2718 if (bsz >= le32_to_cpu(tp->dmt_nleafs)) {
2719 jfs_err("JFS: block map error in dbBackSplit");
2720 return -EIO;
2721 }
2722
2723 /* determine the buddy.
2724 */
2725 bud = w ^ bsz;
2726
2727 /* check if this buddy is the start of the system.
2728 */
2729 if (leaf[bud] != NOFREE) {
2730 /* split the leaf at the start of the
2731 * system in two.
2732 */
2733 cursz = leaf[bud] - 1;
2734 dbSplit(tp, bud, cursz, cursz, is_ctl);
2735 break;
2736 }
2737 }
2738 }
2739
2740 if (leaf[leafno] != size) {
2741 jfs_err("JFS: wrong leaf value in dbBackSplit");
2742 return -EIO;
2743 }
2744 return 0;
2745 }
2746
2747
2748 /*
2749 * NAME: dbJoin()
2750 *
2751 * FUNCTION: update the leaf of a dmtree with a new value, joining
2752 * the leaf with other leaves of the dmtree into a multi-leaf
2753 * binary buddy system, as required.
2754 *
2755 * PARAMETERS:
2756 * tp - pointer to the tree containing the leaf.
2757 * leafno - the number of the leaf to be updated.
2758 * newval - the new value for the leaf.
2759 *
2760 * RETURN VALUES: none
2761 */
dbJoin(dmtree_t * tp,int leafno,int newval,bool is_ctl)2762 static int dbJoin(dmtree_t *tp, int leafno, int newval, bool is_ctl)
2763 {
2764 int budsz, buddy;
2765 s8 *leaf;
2766
2767 /* can the new leaf value require a join with other leaves ?
2768 */
2769 if (newval >= tp->dmt_budmin) {
2770 /* pickup a pointer to the leaves of the tree.
2771 */
2772 leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx);
2773
2774 /* try to join the specified leaf into a large binary
2775 * buddy system. the join proceeds by attempting to join
2776 * the specified leafno with its buddy (leaf) at new value.
2777 * if the join occurs, we attempt to join the left leaf
2778 * of the joined buddies with its buddy at new value + 1.
2779 * we continue to join until we find a buddy that cannot be
2780 * joined (does not have a value equal to the size of the
2781 * last join) or until all leaves have been joined into a
2782 * single system.
2783 *
2784 * get the buddy size (number of words covered) of
2785 * the new value.
2786 */
2787 budsz = BUDSIZE(newval, tp->dmt_budmin);
2788
2789 /* try to join.
2790 */
2791 while (budsz < le32_to_cpu(tp->dmt_nleafs)) {
2792 /* get the buddy leaf.
2793 */
2794 buddy = leafno ^ budsz;
2795
2796 /* if the leaf's new value is greater than its
2797 * buddy's value, we join no more.
2798 */
2799 if (newval > leaf[buddy])
2800 break;
2801
2802 /* It shouldn't be less */
2803 if (newval < leaf[buddy])
2804 return -EIO;
2805
2806 /* check which (leafno or buddy) is the left buddy.
2807 * the left buddy gets to claim the blocks resulting
2808 * from the join while the right gets to claim none.
2809 * the left buddy is also eligible to participate in
2810 * a join at the next higher level while the right
2811 * is not.
2812 *
2813 */
2814 if (leafno < buddy) {
2815 /* leafno is the left buddy.
2816 */
2817 dbAdjTree(tp, buddy, NOFREE, is_ctl);
2818 } else {
2819 /* buddy is the left buddy and becomes
2820 * leafno.
2821 */
2822 dbAdjTree(tp, leafno, NOFREE, is_ctl);
2823 leafno = buddy;
2824 }
2825
2826 /* on to try the next join.
2827 */
2828 newval += 1;
2829 budsz <<= 1;
2830 }
2831 }
2832
2833 /* update the leaf value.
2834 */
2835 dbAdjTree(tp, leafno, newval, is_ctl);
2836
2837 return 0;
2838 }
2839
2840
2841 /*
2842 * NAME: dbAdjTree()
2843 *
2844 * FUNCTION: update a leaf of a dmtree with a new value, adjusting
2845 * the dmtree, as required, to reflect the new leaf value.
2846 * the combination of any buddies must already be done before
2847 * this is called.
2848 *
2849 * PARAMETERS:
2850 * tp - pointer to the tree to be adjusted.
2851 * leafno - the number of the leaf to be updated.
2852 * newval - the new value for the leaf.
2853 *
2854 * RETURN VALUES: none
2855 */
dbAdjTree(dmtree_t * tp,int leafno,int newval,bool is_ctl)2856 static void dbAdjTree(dmtree_t *tp, int leafno, int newval, bool is_ctl)
2857 {
2858 int lp, pp, k;
2859 int max, size;
2860
2861 size = is_ctl ? CTLTREESIZE : TREESIZE;
2862
2863 /* pick up the index of the leaf for this leafno.
2864 */
2865 lp = leafno + le32_to_cpu(tp->dmt_leafidx);
2866
2867 if (WARN_ON_ONCE(lp >= size || lp < 0))
2868 return;
2869
2870 /* is the current value the same as the old value ? if so,
2871 * there is nothing to do.
2872 */
2873 if (tp->dmt_stree[lp] == newval)
2874 return;
2875
2876 /* set the new value.
2877 */
2878 tp->dmt_stree[lp] = newval;
2879
2880 /* bubble the new value up the tree as required.
2881 */
2882 for (k = 0; k < le32_to_cpu(tp->dmt_height); k++) {
2883 if (lp == 0)
2884 break;
2885
2886 /* get the index of the first leaf of the 4 leaf
2887 * group containing the specified leaf (leafno).
2888 */
2889 lp = ((lp - 1) & ~0x03) + 1;
2890
2891 /* get the index of the parent of this 4 leaf group.
2892 */
2893 pp = (lp - 1) >> 2;
2894
2895 /* determine the maximum of the 4 leaves.
2896 */
2897 max = TREEMAX(&tp->dmt_stree[lp]);
2898
2899 /* if the maximum of the 4 is the same as the
2900 * parent's value, we're done.
2901 */
2902 if (tp->dmt_stree[pp] == max)
2903 break;
2904
2905 /* parent gets new value.
2906 */
2907 tp->dmt_stree[pp] = max;
2908
2909 /* parent becomes leaf for next go-round.
2910 */
2911 lp = pp;
2912 }
2913 }
2914
2915
2916 /*
2917 * NAME: dbFindLeaf()
2918 *
2919 * FUNCTION: search a dmtree_t for sufficient free blocks, returning
2920 * the index of a leaf describing the free blocks if
2921 * sufficient free blocks are found.
2922 *
2923 * the search starts at the top of the dmtree_t tree and
2924 * proceeds down the tree to the leftmost leaf with sufficient
2925 * free space.
2926 *
2927 * PARAMETERS:
2928 * tp - pointer to the tree to be searched.
2929 * l2nb - log2 number of free blocks to search for.
2930 * leafidx - return pointer to be set to the index of the leaf
2931 * describing at least l2nb free blocks if sufficient
2932 * free blocks are found.
2933 * is_ctl - determines if the tree is of type ctl
2934 *
2935 * RETURN VALUES:
2936 * 0 - success
2937 * -ENOSPC - insufficient free blocks.
2938 */
dbFindLeaf(dmtree_t * tp,int l2nb,int * leafidx,bool is_ctl)2939 static int dbFindLeaf(dmtree_t *tp, int l2nb, int *leafidx, bool is_ctl)
2940 {
2941 int ti, n = 0, k, x = 0;
2942 int max_size, max_idx;
2943
2944 max_size = is_ctl ? CTLTREESIZE : TREESIZE;
2945 max_idx = is_ctl ? LPERCTL : LPERDMAP;
2946
2947 /* first check the root of the tree to see if there is
2948 * sufficient free space.
2949 */
2950 if (l2nb > tp->dmt_stree[ROOT])
2951 return -ENOSPC;
2952
2953 /* sufficient free space available. now search down the tree
2954 * starting at the next level for the leftmost leaf that
2955 * describes sufficient free space.
2956 */
2957 for (k = le32_to_cpu(tp->dmt_height), ti = 1;
2958 k > 0; k--, ti = ((ti + n) << 2) + 1) {
2959 /* search the four nodes at this level, starting from
2960 * the left.
2961 */
2962 for (x = ti, n = 0; n < 4; n++) {
2963 /* sufficient free space found. move to the next
2964 * level (or quit if this is the last level).
2965 */
2966 if (x + n > max_size)
2967 return -ENOSPC;
2968 if (l2nb <= tp->dmt_stree[x + n])
2969 break;
2970 }
2971
2972 /* better have found something since the higher
2973 * levels of the tree said it was here.
2974 */
2975 assert(n < 4);
2976 }
2977 if (le32_to_cpu(tp->dmt_leafidx) >= max_idx)
2978 return -ENOSPC;
2979
2980 /* set the return to the leftmost leaf describing sufficient
2981 * free space.
2982 */
2983 *leafidx = x + n - le32_to_cpu(tp->dmt_leafidx);
2984
2985 return (0);
2986 }
2987
2988
2989 /*
2990 * NAME: dbFindBits()
2991 *
2992 * FUNCTION: find a specified number of binary buddy free bits within a
2993 * dmap bitmap word value.
2994 *
2995 * this routine searches the bitmap value for (1 << l2nb) free
2996 * bits at (1 << l2nb) alignments within the value.
2997 *
2998 * PARAMETERS:
2999 * word - dmap bitmap word value.
3000 * l2nb - number of free bits specified as a log2 number.
3001 *
3002 * RETURN VALUES:
3003 * starting bit number of free bits.
3004 */
dbFindBits(u32 word,int l2nb)3005 static int dbFindBits(u32 word, int l2nb)
3006 {
3007 int bitno, nb;
3008 u32 mask;
3009
3010 /* get the number of bits.
3011 */
3012 nb = 1 << l2nb;
3013 assert(nb <= DBWORD);
3014
3015 /* complement the word so we can use a mask (i.e. 0s represent
3016 * free bits) and compute the mask.
3017 */
3018 word = ~word;
3019 mask = ONES << (DBWORD - nb);
3020
3021 /* scan the word for nb free bits at nb alignments.
3022 */
3023 for (bitno = 0; mask != 0; bitno += nb, mask = (mask >> nb)) {
3024 if ((mask & word) == mask)
3025 break;
3026 }
3027
3028 ASSERT(bitno < 32);
3029
3030 /* return the bit number.
3031 */
3032 return (bitno);
3033 }
3034
3035
3036 /*
3037 * NAME: dbMaxBud(u8 *cp)
3038 *
3039 * FUNCTION: determine the largest binary buddy string of free
3040 * bits within 32-bits of the map.
3041 *
3042 * PARAMETERS:
3043 * cp - pointer to the 32-bit value.
3044 *
3045 * RETURN VALUES:
3046 * largest binary buddy of free bits within a dmap word.
3047 */
dbMaxBud(u8 * cp)3048 static int dbMaxBud(u8 * cp)
3049 {
3050 signed char tmp1, tmp2;
3051
3052 /* check if the wmap word is all free. if so, the
3053 * free buddy size is BUDMIN.
3054 */
3055 if (*((uint *) cp) == 0)
3056 return (BUDMIN);
3057
3058 /* check if the wmap word is half free. if so, the
3059 * free buddy size is BUDMIN-1.
3060 */
3061 if (*((u16 *) cp) == 0 || *((u16 *) cp + 1) == 0)
3062 return (BUDMIN - 1);
3063
3064 /* not all free or half free. determine the free buddy
3065 * size thru table lookup using quarters of the wmap word.
3066 */
3067 tmp1 = max(budtab[cp[2]], budtab[cp[3]]);
3068 tmp2 = max(budtab[cp[0]], budtab[cp[1]]);
3069 return (max(tmp1, tmp2));
3070 }
3071
3072
3073 /*
3074 * NAME: cnttz(uint word)
3075 *
3076 * FUNCTION: determine the number of trailing zeros within a 32-bit
3077 * value.
3078 *
3079 * PARAMETERS:
3080 * value - 32-bit value to be examined.
3081 *
3082 * RETURN VALUES:
3083 * count of trailing zeros
3084 */
cnttz(u32 word)3085 static int cnttz(u32 word)
3086 {
3087 int n;
3088
3089 for (n = 0; n < 32; n++, word >>= 1) {
3090 if (word & 0x01)
3091 break;
3092 }
3093
3094 return (n);
3095 }
3096
3097
3098 /*
3099 * NAME: cntlz(u32 value)
3100 *
3101 * FUNCTION: determine the number of leading zeros within a 32-bit
3102 * value.
3103 *
3104 * PARAMETERS:
3105 * value - 32-bit value to be examined.
3106 *
3107 * RETURN VALUES:
3108 * count of leading zeros
3109 */
cntlz(u32 value)3110 static int cntlz(u32 value)
3111 {
3112 int n;
3113
3114 for (n = 0; n < 32; n++, value <<= 1) {
3115 if (value & HIGHORDER)
3116 break;
3117 }
3118 return (n);
3119 }
3120
3121
3122 /*
3123 * NAME: blkstol2(s64 nb)
3124 *
3125 * FUNCTION: convert a block count to its log2 value. if the block
3126 * count is not a l2 multiple, it is rounded up to the next
3127 * larger l2 multiple.
3128 *
3129 * PARAMETERS:
3130 * nb - number of blocks
3131 *
3132 * RETURN VALUES:
3133 * log2 number of blocks
3134 */
blkstol2(s64 nb)3135 static int blkstol2(s64 nb)
3136 {
3137 int l2nb;
3138 s64 mask; /* meant to be signed */
3139
3140 mask = (s64) 1 << (64 - 1);
3141
3142 /* count the leading bits.
3143 */
3144 for (l2nb = 0; l2nb < 64; l2nb++, mask >>= 1) {
3145 /* leading bit found.
3146 */
3147 if (nb & mask) {
3148 /* determine the l2 value.
3149 */
3150 l2nb = (64 - 1) - l2nb;
3151
3152 /* check if we need to round up.
3153 */
3154 if (~mask & nb)
3155 l2nb++;
3156
3157 return (l2nb);
3158 }
3159 }
3160 assert(0);
3161 return 0; /* fix compiler warning */
3162 }
3163
3164
3165 /*
3166 * NAME: dbAllocBottomUp()
3167 *
3168 * FUNCTION: alloc the specified block range from the working block
3169 * allocation map.
3170 *
3171 * the blocks will be alloc from the working map one dmap
3172 * at a time.
3173 *
3174 * PARAMETERS:
3175 * ip - pointer to in-core inode;
3176 * blkno - starting block number to be freed.
3177 * nblocks - number of blocks to be freed.
3178 *
3179 * RETURN VALUES:
3180 * 0 - success
3181 * -EIO - i/o error
3182 */
dbAllocBottomUp(struct inode * ip,s64 blkno,s64 nblocks)3183 int dbAllocBottomUp(struct inode *ip, s64 blkno, s64 nblocks)
3184 {
3185 struct metapage *mp;
3186 struct dmap *dp;
3187 int nb, rc;
3188 s64 lblkno, rem;
3189 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
3190 struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
3191
3192 IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
3193
3194 /* block to be allocated better be within the mapsize. */
3195 ASSERT(nblocks <= bmp->db_mapsize - blkno);
3196
3197 /*
3198 * allocate the blocks a dmap at a time.
3199 */
3200 mp = NULL;
3201 for (rem = nblocks; rem > 0; rem -= nb, blkno += nb) {
3202 /* release previous dmap if any */
3203 if (mp) {
3204 write_metapage(mp);
3205 }
3206
3207 /* get the buffer for the current dmap. */
3208 lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
3209 mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
3210 if (mp == NULL) {
3211 IREAD_UNLOCK(ipbmap);
3212 return -EIO;
3213 }
3214 dp = (struct dmap *) mp->data;
3215
3216 /* determine the number of blocks to be allocated from
3217 * this dmap.
3218 */
3219 nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1)));
3220
3221 /* allocate the blocks. */
3222 if ((rc = dbAllocDmapBU(bmp, dp, blkno, nb))) {
3223 release_metapage(mp);
3224 IREAD_UNLOCK(ipbmap);
3225 return (rc);
3226 }
3227 }
3228
3229 /* write the last buffer. */
3230 write_metapage(mp);
3231
3232 IREAD_UNLOCK(ipbmap);
3233
3234 return (0);
3235 }
3236
3237
dbAllocDmapBU(struct bmap * bmp,struct dmap * dp,s64 blkno,int nblocks)3238 static int dbAllocDmapBU(struct bmap * bmp, struct dmap * dp, s64 blkno,
3239 int nblocks)
3240 {
3241 int rc;
3242 int dbitno, word, rembits, nb, nwords, wbitno, agno;
3243 s8 oldroot;
3244 struct dmaptree *tp = (struct dmaptree *) & dp->tree;
3245
3246 /* save the current value of the root (i.e. maximum free string)
3247 * of the dmap tree.
3248 */
3249 oldroot = tp->stree[ROOT];
3250
3251 /* determine the bit number and word within the dmap of the
3252 * starting block.
3253 */
3254 dbitno = blkno & (BPERDMAP - 1);
3255 word = dbitno >> L2DBWORD;
3256
3257 /* block range better be within the dmap */
3258 assert(dbitno + nblocks <= BPERDMAP);
3259
3260 /* allocate the bits of the dmap's words corresponding to the block
3261 * range. not all bits of the first and last words may be contained
3262 * within the block range. if this is the case, we'll work against
3263 * those words (i.e. partial first and/or last) on an individual basis
3264 * (a single pass), allocating the bits of interest by hand and
3265 * updating the leaf corresponding to the dmap word. a single pass
3266 * will be used for all dmap words fully contained within the
3267 * specified range. within this pass, the bits of all fully contained
3268 * dmap words will be marked as free in a single shot and the leaves
3269 * will be updated. a single leaf may describe the free space of
3270 * multiple dmap words, so we may update only a subset of the actual
3271 * leaves corresponding to the dmap words of the block range.
3272 */
3273 for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) {
3274 /* determine the bit number within the word and
3275 * the number of bits within the word.
3276 */
3277 wbitno = dbitno & (DBWORD - 1);
3278 nb = min(rembits, DBWORD - wbitno);
3279
3280 /* check if only part of a word is to be allocated.
3281 */
3282 if (nb < DBWORD) {
3283 /* allocate (set to 1) the appropriate bits within
3284 * this dmap word.
3285 */
3286 dp->wmap[word] |= cpu_to_le32(ONES << (DBWORD - nb)
3287 >> wbitno);
3288
3289 word++;
3290 } else {
3291 /* one or more dmap words are fully contained
3292 * within the block range. determine how many
3293 * words and allocate (set to 1) the bits of these
3294 * words.
3295 */
3296 nwords = rembits >> L2DBWORD;
3297 memset(&dp->wmap[word], (int) ONES, nwords * 4);
3298
3299 /* determine how many bits */
3300 nb = nwords << L2DBWORD;
3301 word += nwords;
3302 }
3303 }
3304
3305 /* update the free count for this dmap */
3306 le32_add_cpu(&dp->nfree, -nblocks);
3307
3308 /* reconstruct summary tree */
3309 dbInitDmapTree(dp);
3310
3311 BMAP_LOCK(bmp);
3312
3313 /* if this allocation group is completely free,
3314 * update the highest active allocation group number
3315 * if this allocation group is the new max.
3316 */
3317 agno = blkno >> bmp->db_agl2size;
3318 if (agno > bmp->db_maxag)
3319 bmp->db_maxag = agno;
3320
3321 /* update the free count for the allocation group and map */
3322 bmp->db_agfree[agno] -= nblocks;
3323 bmp->db_nfree -= nblocks;
3324
3325 BMAP_UNLOCK(bmp);
3326
3327 /* if the root has not changed, done. */
3328 if (tp->stree[ROOT] == oldroot)
3329 return (0);
3330
3331 /* root changed. bubble the change up to the dmap control pages.
3332 * if the adjustment of the upper level control pages fails,
3333 * backout the bit allocation (thus making everything consistent).
3334 */
3335 if ((rc = dbAdjCtl(bmp, blkno, tp->stree[ROOT], 1, 0)))
3336 dbFreeBits(bmp, dp, blkno, nblocks);
3337
3338 return (rc);
3339 }
3340
3341
3342 /*
3343 * NAME: dbExtendFS()
3344 *
3345 * FUNCTION: extend bmap from blkno for nblocks;
3346 * dbExtendFS() updates bmap ready for dbAllocBottomUp();
3347 *
3348 * L2
3349 * |
3350 * L1---------------------------------L1
3351 * | |
3352 * L0---------L0---------L0 L0---------L0---------L0
3353 * | | | | | |
3354 * d0,...,dn d0,...,dn d0,...,dn d0,...,dn d0,...,dn d0,.,dm;
3355 * L2L1L0d0,...,dnL0d0,...,dnL0d0,...,dnL1L0d0,...,dnL0d0,...,dnL0d0,..dm
3356 *
3357 * <---old---><----------------------------extend----------------------->
3358 */
dbExtendFS(struct inode * ipbmap,s64 blkno,s64 nblocks)3359 int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks)
3360 {
3361 struct jfs_sb_info *sbi = JFS_SBI(ipbmap->i_sb);
3362 int nbperpage = sbi->nbperpage;
3363 int i, i0 = true, j, j0 = true, k, n;
3364 s64 newsize;
3365 s64 p;
3366 struct metapage *mp, *l2mp, *l1mp = NULL, *l0mp = NULL;
3367 struct dmapctl *l2dcp, *l1dcp, *l0dcp;
3368 struct dmap *dp;
3369 s8 *l0leaf, *l1leaf, *l2leaf;
3370 struct bmap *bmp = sbi->bmap;
3371 int agno, l2agsize, oldl2agsize;
3372 s64 ag_rem;
3373
3374 newsize = blkno + nblocks;
3375
3376 jfs_info("dbExtendFS: blkno:%Ld nblocks:%Ld newsize:%Ld",
3377 (long long) blkno, (long long) nblocks, (long long) newsize);
3378
3379 /*
3380 * initialize bmap control page.
3381 *
3382 * all the data in bmap control page should exclude
3383 * the mkfs hidden dmap page.
3384 */
3385
3386 /* update mapsize */
3387 bmp->db_mapsize = newsize;
3388 bmp->db_maxlevel = BMAPSZTOLEV(bmp->db_mapsize);
3389
3390 /* compute new AG size */
3391 l2agsize = dbGetL2AGSize(newsize);
3392 oldl2agsize = bmp->db_agl2size;
3393
3394 bmp->db_agl2size = l2agsize;
3395 bmp->db_agsize = (s64)1 << l2agsize;
3396
3397 /* compute new number of AG */
3398 agno = bmp->db_numag;
3399 bmp->db_numag = newsize >> l2agsize;
3400 bmp->db_numag += ((u32) newsize % (u32) bmp->db_agsize) ? 1 : 0;
3401
3402 /*
3403 * reconfigure db_agfree[]
3404 * from old AG configuration to new AG configuration;
3405 *
3406 * coalesce contiguous k (newAGSize/oldAGSize) AGs;
3407 * i.e., (AGi, ..., AGj) where i = k*n and j = k*(n+1) - 1 to AGn;
3408 * note: new AG size = old AG size * (2**x).
3409 */
3410 if (l2agsize == oldl2agsize)
3411 goto extend;
3412 k = 1 << (l2agsize - oldl2agsize);
3413 ag_rem = bmp->db_agfree[0]; /* save agfree[0] */
3414 for (i = 0, n = 0; i < agno; n++) {
3415 bmp->db_agfree[n] = 0; /* init collection point */
3416
3417 /* coalesce contiguous k AGs; */
3418 for (j = 0; j < k && i < agno; j++, i++) {
3419 /* merge AGi to AGn */
3420 bmp->db_agfree[n] += bmp->db_agfree[i];
3421 }
3422 }
3423 bmp->db_agfree[0] += ag_rem; /* restore agfree[0] */
3424
3425 for (; n < MAXAG; n++)
3426 bmp->db_agfree[n] = 0;
3427
3428 /*
3429 * update highest active ag number
3430 */
3431
3432 bmp->db_maxag = bmp->db_maxag / k;
3433
3434 /*
3435 * extend bmap
3436 *
3437 * update bit maps and corresponding level control pages;
3438 * global control page db_nfree, db_agfree[agno], db_maxfreebud;
3439 */
3440 extend:
3441 /* get L2 page */
3442 p = BMAPBLKNO + nbperpage; /* L2 page */
3443 l2mp = read_metapage(ipbmap, p, PSIZE, 0);
3444 if (!l2mp) {
3445 jfs_error(ipbmap->i_sb, "L2 page could not be read\n");
3446 return -EIO;
3447 }
3448 l2dcp = (struct dmapctl *) l2mp->data;
3449
3450 /* compute start L1 */
3451 k = blkno >> L2MAXL1SIZE;
3452 l2leaf = l2dcp->stree + CTLLEAFIND + k;
3453 p = BLKTOL1(blkno, sbi->l2nbperpage); /* L1 page */
3454
3455 /*
3456 * extend each L1 in L2
3457 */
3458 for (; k < LPERCTL; k++, p += nbperpage) {
3459 /* get L1 page */
3460 if (j0) {
3461 /* read in L1 page: (blkno & (MAXL1SIZE - 1)) */
3462 l1mp = read_metapage(ipbmap, p, PSIZE, 0);
3463 if (l1mp == NULL)
3464 goto errout;
3465 l1dcp = (struct dmapctl *) l1mp->data;
3466
3467 /* compute start L0 */
3468 j = (blkno & (MAXL1SIZE - 1)) >> L2MAXL0SIZE;
3469 l1leaf = l1dcp->stree + CTLLEAFIND + j;
3470 p = BLKTOL0(blkno, sbi->l2nbperpage);
3471 j0 = false;
3472 } else {
3473 /* assign/init L1 page */
3474 l1mp = get_metapage(ipbmap, p, PSIZE, 0);
3475 if (l1mp == NULL)
3476 goto errout;
3477
3478 l1dcp = (struct dmapctl *) l1mp->data;
3479
3480 /* compute start L0 */
3481 j = 0;
3482 l1leaf = l1dcp->stree + CTLLEAFIND;
3483 p += nbperpage; /* 1st L0 of L1.k */
3484 }
3485
3486 /*
3487 * extend each L0 in L1
3488 */
3489 for (; j < LPERCTL; j++) {
3490 /* get L0 page */
3491 if (i0) {
3492 /* read in L0 page: (blkno & (MAXL0SIZE - 1)) */
3493
3494 l0mp = read_metapage(ipbmap, p, PSIZE, 0);
3495 if (l0mp == NULL)
3496 goto errout;
3497 l0dcp = (struct dmapctl *) l0mp->data;
3498
3499 /* compute start dmap */
3500 i = (blkno & (MAXL0SIZE - 1)) >>
3501 L2BPERDMAP;
3502 l0leaf = l0dcp->stree + CTLLEAFIND + i;
3503 p = BLKTODMAP(blkno,
3504 sbi->l2nbperpage);
3505 i0 = false;
3506 } else {
3507 /* assign/init L0 page */
3508 l0mp = get_metapage(ipbmap, p, PSIZE, 0);
3509 if (l0mp == NULL)
3510 goto errout;
3511
3512 l0dcp = (struct dmapctl *) l0mp->data;
3513
3514 /* compute start dmap */
3515 i = 0;
3516 l0leaf = l0dcp->stree + CTLLEAFIND;
3517 p += nbperpage; /* 1st dmap of L0.j */
3518 }
3519
3520 /*
3521 * extend each dmap in L0
3522 */
3523 for (; i < LPERCTL; i++) {
3524 /*
3525 * reconstruct the dmap page, and
3526 * initialize corresponding parent L0 leaf
3527 */
3528 if ((n = blkno & (BPERDMAP - 1))) {
3529 /* read in dmap page: */
3530 mp = read_metapage(ipbmap, p,
3531 PSIZE, 0);
3532 if (mp == NULL)
3533 goto errout;
3534 n = min(nblocks, (s64)BPERDMAP - n);
3535 } else {
3536 /* assign/init dmap page */
3537 mp = read_metapage(ipbmap, p,
3538 PSIZE, 0);
3539 if (mp == NULL)
3540 goto errout;
3541
3542 n = min_t(s64, nblocks, BPERDMAP);
3543 }
3544
3545 dp = (struct dmap *) mp->data;
3546 *l0leaf = dbInitDmap(dp, blkno, n);
3547
3548 bmp->db_nfree += n;
3549 agno = le64_to_cpu(dp->start) >> l2agsize;
3550 bmp->db_agfree[agno] += n;
3551
3552 write_metapage(mp);
3553
3554 l0leaf++;
3555 p += nbperpage;
3556
3557 blkno += n;
3558 nblocks -= n;
3559 if (nblocks == 0)
3560 break;
3561 } /* for each dmap in a L0 */
3562
3563 /*
3564 * build current L0 page from its leaves, and
3565 * initialize corresponding parent L1 leaf
3566 */
3567 *l1leaf = dbInitDmapCtl(l0dcp, 0, ++i);
3568 write_metapage(l0mp);
3569 l0mp = NULL;
3570
3571 if (nblocks)
3572 l1leaf++; /* continue for next L0 */
3573 else {
3574 /* more than 1 L0 ? */
3575 if (j > 0)
3576 break; /* build L1 page */
3577 else {
3578 /* summarize in global bmap page */
3579 bmp->db_maxfreebud = *l1leaf;
3580 release_metapage(l1mp);
3581 release_metapage(l2mp);
3582 goto finalize;
3583 }
3584 }
3585 } /* for each L0 in a L1 */
3586
3587 /*
3588 * build current L1 page from its leaves, and
3589 * initialize corresponding parent L2 leaf
3590 */
3591 *l2leaf = dbInitDmapCtl(l1dcp, 1, ++j);
3592 write_metapage(l1mp);
3593 l1mp = NULL;
3594
3595 if (nblocks)
3596 l2leaf++; /* continue for next L1 */
3597 else {
3598 /* more than 1 L1 ? */
3599 if (k > 0)
3600 break; /* build L2 page */
3601 else {
3602 /* summarize in global bmap page */
3603 bmp->db_maxfreebud = *l2leaf;
3604 release_metapage(l2mp);
3605 goto finalize;
3606 }
3607 }
3608 } /* for each L1 in a L2 */
3609
3610 jfs_error(ipbmap->i_sb, "function has not returned as expected\n");
3611 errout:
3612 if (l0mp)
3613 release_metapage(l0mp);
3614 if (l1mp)
3615 release_metapage(l1mp);
3616 release_metapage(l2mp);
3617 return -EIO;
3618
3619 /*
3620 * finalize bmap control page
3621 */
3622 finalize:
3623
3624 return 0;
3625 }
3626
3627
3628 /*
3629 * dbFinalizeBmap()
3630 */
dbFinalizeBmap(struct inode * ipbmap)3631 void dbFinalizeBmap(struct inode *ipbmap)
3632 {
3633 struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
3634 int actags, inactags, l2nl;
3635 s64 ag_rem, actfree, inactfree, avgfree;
3636 int i, n;
3637
3638 /*
3639 * finalize bmap control page
3640 */
3641 //finalize:
3642 /*
3643 * compute db_agpref: preferred ag to allocate from
3644 * (the leftmost ag with average free space in it);
3645 */
3646 //agpref:
3647 /* get the number of active ags and inactive ags */
3648 actags = bmp->db_maxag + 1;
3649 inactags = bmp->db_numag - actags;
3650 ag_rem = bmp->db_mapsize & (bmp->db_agsize - 1); /* ??? */
3651
3652 /* determine how many blocks are in the inactive allocation
3653 * groups. in doing this, we must account for the fact that
3654 * the rightmost group might be a partial group (i.e. file
3655 * system size is not a multiple of the group size).
3656 */
3657 inactfree = (inactags && ag_rem) ?
3658 (((s64)inactags - 1) << bmp->db_agl2size) + ag_rem
3659 : ((s64)inactags << bmp->db_agl2size);
3660
3661 /* determine how many free blocks are in the active
3662 * allocation groups plus the average number of free blocks
3663 * within the active ags.
3664 */
3665 actfree = bmp->db_nfree - inactfree;
3666 avgfree = (u32) actfree / (u32) actags;
3667
3668 /* if the preferred allocation group has not average free space.
3669 * re-establish the preferred group as the leftmost
3670 * group with average free space.
3671 */
3672 if (bmp->db_agfree[bmp->db_agpref] < avgfree) {
3673 for (bmp->db_agpref = 0; bmp->db_agpref < actags;
3674 bmp->db_agpref++) {
3675 if (bmp->db_agfree[bmp->db_agpref] >= avgfree)
3676 break;
3677 }
3678 if (bmp->db_agpref >= bmp->db_numag) {
3679 jfs_error(ipbmap->i_sb,
3680 "cannot find ag with average freespace\n");
3681 }
3682 }
3683
3684 /*
3685 * compute db_aglevel, db_agheight, db_width, db_agstart:
3686 * an ag is covered in aglevel dmapctl summary tree,
3687 * at agheight level height (from leaf) with agwidth number of nodes
3688 * each, which starts at agstart index node of the smmary tree node
3689 * array;
3690 */
3691 bmp->db_aglevel = BMAPSZTOLEV(bmp->db_agsize);
3692 l2nl =
3693 bmp->db_agl2size - (L2BPERDMAP + bmp->db_aglevel * L2LPERCTL);
3694 bmp->db_agheight = l2nl >> 1;
3695 bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheight << 1));
3696 for (i = 5 - bmp->db_agheight, bmp->db_agstart = 0, n = 1; i > 0;
3697 i--) {
3698 bmp->db_agstart += n;
3699 n <<= 2;
3700 }
3701
3702 }
3703
3704
3705 /*
3706 * NAME: dbInitDmap()/ujfs_idmap_page()
3707 *
3708 * FUNCTION: initialize working/persistent bitmap of the dmap page
3709 * for the specified number of blocks:
3710 *
3711 * at entry, the bitmaps had been initialized as free (ZEROS);
3712 * The number of blocks will only account for the actually
3713 * existing blocks. Blocks which don't actually exist in
3714 * the aggregate will be marked as allocated (ONES);
3715 *
3716 * PARAMETERS:
3717 * dp - pointer to page of map
3718 * nblocks - number of blocks this page
3719 *
3720 * RETURNS: NONE
3721 */
dbInitDmap(struct dmap * dp,s64 Blkno,int nblocks)3722 static int dbInitDmap(struct dmap * dp, s64 Blkno, int nblocks)
3723 {
3724 int blkno, w, b, r, nw, nb, i;
3725
3726 /* starting block number within the dmap */
3727 blkno = Blkno & (BPERDMAP - 1);
3728
3729 if (blkno == 0) {
3730 dp->nblocks = dp->nfree = cpu_to_le32(nblocks);
3731 dp->start = cpu_to_le64(Blkno);
3732
3733 if (nblocks == BPERDMAP) {
3734 memset(&dp->wmap[0], 0, LPERDMAP * 4);
3735 memset(&dp->pmap[0], 0, LPERDMAP * 4);
3736 goto initTree;
3737 }
3738 } else {
3739 le32_add_cpu(&dp->nblocks, nblocks);
3740 le32_add_cpu(&dp->nfree, nblocks);
3741 }
3742
3743 /* word number containing start block number */
3744 w = blkno >> L2DBWORD;
3745
3746 /*
3747 * free the bits corresponding to the block range (ZEROS):
3748 * note: not all bits of the first and last words may be contained
3749 * within the block range.
3750 */
3751 for (r = nblocks; r > 0; r -= nb, blkno += nb) {
3752 /* number of bits preceding range to be freed in the word */
3753 b = blkno & (DBWORD - 1);
3754 /* number of bits to free in the word */
3755 nb = min(r, DBWORD - b);
3756
3757 /* is partial word to be freed ? */
3758 if (nb < DBWORD) {
3759 /* free (set to 0) from the bitmap word */
3760 dp->wmap[w] &= cpu_to_le32(~(ONES << (DBWORD - nb)
3761 >> b));
3762 dp->pmap[w] &= cpu_to_le32(~(ONES << (DBWORD - nb)
3763 >> b));
3764
3765 /* skip the word freed */
3766 w++;
3767 } else {
3768 /* free (set to 0) contiguous bitmap words */
3769 nw = r >> L2DBWORD;
3770 memset(&dp->wmap[w], 0, nw * 4);
3771 memset(&dp->pmap[w], 0, nw * 4);
3772
3773 /* skip the words freed */
3774 nb = nw << L2DBWORD;
3775 w += nw;
3776 }
3777 }
3778
3779 /*
3780 * mark bits following the range to be freed (non-existing
3781 * blocks) as allocated (ONES)
3782 */
3783
3784 if (blkno == BPERDMAP)
3785 goto initTree;
3786
3787 /* the first word beyond the end of existing blocks */
3788 w = blkno >> L2DBWORD;
3789
3790 /* does nblocks fall on a 32-bit boundary ? */
3791 b = blkno & (DBWORD - 1);
3792 if (b) {
3793 /* mark a partial word allocated */
3794 dp->wmap[w] = dp->pmap[w] = cpu_to_le32(ONES >> b);
3795 w++;
3796 }
3797
3798 /* set the rest of the words in the page to allocated (ONES) */
3799 for (i = w; i < LPERDMAP; i++)
3800 dp->pmap[i] = dp->wmap[i] = cpu_to_le32(ONES);
3801
3802 /*
3803 * init tree
3804 */
3805 initTree:
3806 return (dbInitDmapTree(dp));
3807 }
3808
3809
3810 /*
3811 * NAME: dbInitDmapTree()/ujfs_complete_dmap()
3812 *
3813 * FUNCTION: initialize summary tree of the specified dmap:
3814 *
3815 * at entry, bitmap of the dmap has been initialized;
3816 *
3817 * PARAMETERS:
3818 * dp - dmap to complete
3819 * blkno - starting block number for this dmap
3820 * treemax - will be filled in with max free for this dmap
3821 *
3822 * RETURNS: max free string at the root of the tree
3823 */
dbInitDmapTree(struct dmap * dp)3824 static int dbInitDmapTree(struct dmap * dp)
3825 {
3826 struct dmaptree *tp;
3827 s8 *cp;
3828 int i;
3829
3830 /* init fixed info of tree */
3831 tp = &dp->tree;
3832 tp->nleafs = cpu_to_le32(LPERDMAP);
3833 tp->l2nleafs = cpu_to_le32(L2LPERDMAP);
3834 tp->leafidx = cpu_to_le32(LEAFIND);
3835 tp->height = cpu_to_le32(4);
3836 tp->budmin = BUDMIN;
3837
3838 /* init each leaf from corresponding wmap word:
3839 * note: leaf is set to NOFREE(-1) if all blocks of corresponding
3840 * bitmap word are allocated.
3841 */
3842 cp = tp->stree + le32_to_cpu(tp->leafidx);
3843 for (i = 0; i < LPERDMAP; i++)
3844 *cp++ = dbMaxBud((u8 *) & dp->wmap[i]);
3845
3846 /* build the dmap's binary buddy summary tree */
3847 return (dbInitTree(tp));
3848 }
3849
3850
3851 /*
3852 * NAME: dbInitTree()/ujfs_adjtree()
3853 *
3854 * FUNCTION: initialize binary buddy summary tree of a dmap or dmapctl.
3855 *
3856 * at entry, the leaves of the tree has been initialized
3857 * from corresponding bitmap word or root of summary tree
3858 * of the child control page;
3859 * configure binary buddy system at the leaf level, then
3860 * bubble up the values of the leaf nodes up the tree.
3861 *
3862 * PARAMETERS:
3863 * cp - Pointer to the root of the tree
3864 * l2leaves- Number of leaf nodes as a power of 2
3865 * l2min - Number of blocks that can be covered by a leaf
3866 * as a power of 2
3867 *
3868 * RETURNS: max free string at the root of the tree
3869 */
dbInitTree(struct dmaptree * dtp)3870 static int dbInitTree(struct dmaptree * dtp)
3871 {
3872 int l2max, l2free, bsize, nextb, i;
3873 int child, parent, nparent;
3874 s8 *tp, *cp, *cp1;
3875
3876 tp = dtp->stree;
3877
3878 /* Determine the maximum free string possible for the leaves */
3879 l2max = le32_to_cpu(dtp->l2nleafs) + dtp->budmin;
3880
3881 /*
3882 * configure the leaf level into binary buddy system
3883 *
3884 * Try to combine buddies starting with a buddy size of 1
3885 * (i.e. two leaves). At a buddy size of 1 two buddy leaves
3886 * can be combined if both buddies have a maximum free of l2min;
3887 * the combination will result in the left-most buddy leaf having
3888 * a maximum free of l2min+1.
3889 * After processing all buddies for a given size, process buddies
3890 * at the next higher buddy size (i.e. current size * 2) and
3891 * the next maximum free (current free + 1).
3892 * This continues until the maximum possible buddy combination
3893 * yields maximum free.
3894 */
3895 for (l2free = dtp->budmin, bsize = 1; l2free < l2max;
3896 l2free++, bsize = nextb) {
3897 /* get next buddy size == current buddy pair size */
3898 nextb = bsize << 1;
3899
3900 /* scan each adjacent buddy pair at current buddy size */
3901 for (i = 0, cp = tp + le32_to_cpu(dtp->leafidx);
3902 i < le32_to_cpu(dtp->nleafs);
3903 i += nextb, cp += nextb) {
3904 /* coalesce if both adjacent buddies are max free */
3905 if (*cp == l2free && *(cp + bsize) == l2free) {
3906 *cp = l2free + 1; /* left take right */
3907 *(cp + bsize) = -1; /* right give left */
3908 }
3909 }
3910 }
3911
3912 /*
3913 * bubble summary information of leaves up the tree.
3914 *
3915 * Starting at the leaf node level, the four nodes described by
3916 * the higher level parent node are compared for a maximum free and
3917 * this maximum becomes the value of the parent node.
3918 * when all lower level nodes are processed in this fashion then
3919 * move up to the next level (parent becomes a lower level node) and
3920 * continue the process for that level.
3921 */
3922 for (child = le32_to_cpu(dtp->leafidx),
3923 nparent = le32_to_cpu(dtp->nleafs) >> 2;
3924 nparent > 0; nparent >>= 2, child = parent) {
3925 /* get index of 1st node of parent level */
3926 parent = (child - 1) >> 2;
3927
3928 /* set the value of the parent node as the maximum
3929 * of the four nodes of the current level.
3930 */
3931 for (i = 0, cp = tp + child, cp1 = tp + parent;
3932 i < nparent; i++, cp += 4, cp1++)
3933 *cp1 = TREEMAX(cp);
3934 }
3935
3936 return (*tp);
3937 }
3938
3939
3940 /*
3941 * dbInitDmapCtl()
3942 *
3943 * function: initialize dmapctl page
3944 */
dbInitDmapCtl(struct dmapctl * dcp,int level,int i)3945 static int dbInitDmapCtl(struct dmapctl * dcp, int level, int i)
3946 { /* start leaf index not covered by range */
3947 s8 *cp;
3948
3949 dcp->nleafs = cpu_to_le32(LPERCTL);
3950 dcp->l2nleafs = cpu_to_le32(L2LPERCTL);
3951 dcp->leafidx = cpu_to_le32(CTLLEAFIND);
3952 dcp->height = cpu_to_le32(5);
3953 dcp->budmin = L2BPERDMAP + L2LPERCTL * level;
3954
3955 /*
3956 * initialize the leaves of current level that were not covered
3957 * by the specified input block range (i.e. the leaves have no
3958 * low level dmapctl or dmap).
3959 */
3960 cp = &dcp->stree[CTLLEAFIND + i];
3961 for (; i < LPERCTL; i++)
3962 *cp++ = NOFREE;
3963
3964 /* build the dmap's binary buddy summary tree */
3965 return (dbInitTree((struct dmaptree *) dcp));
3966 }
3967
3968
3969 /*
3970 * NAME: dbGetL2AGSize()/ujfs_getagl2size()
3971 *
3972 * FUNCTION: Determine log2(allocation group size) from aggregate size
3973 *
3974 * PARAMETERS:
3975 * nblocks - Number of blocks in aggregate
3976 *
3977 * RETURNS: log2(allocation group size) in aggregate blocks
3978 */
dbGetL2AGSize(s64 nblocks)3979 static int dbGetL2AGSize(s64 nblocks)
3980 {
3981 s64 sz;
3982 s64 m;
3983 int l2sz;
3984
3985 if (nblocks < BPERDMAP * MAXAG)
3986 return (L2BPERDMAP);
3987
3988 /* round up aggregate size to power of 2 */
3989 m = ((u64) 1 << (64 - 1));
3990 for (l2sz = 64; l2sz >= 0; l2sz--, m >>= 1) {
3991 if (m & nblocks)
3992 break;
3993 }
3994
3995 sz = (s64) 1 << l2sz;
3996 if (sz < nblocks)
3997 l2sz += 1;
3998
3999 /* agsize = roundupSize/max_number_of_ag */
4000 return (l2sz - L2MAXAG);
4001 }
4002
4003
4004 /*
4005 * NAME: dbMapFileSizeToMapSize()
4006 *
4007 * FUNCTION: compute number of blocks the block allocation map file
4008 * can cover from the map file size;
4009 *
4010 * RETURNS: Number of blocks which can be covered by this block map file;
4011 */
4012
4013 /*
4014 * maximum number of map pages at each level including control pages
4015 */
4016 #define MAXL0PAGES (1 + LPERCTL)
4017 #define MAXL1PAGES (1 + LPERCTL * MAXL0PAGES)
4018
4019 /*
4020 * convert number of map pages to the zero origin top dmapctl level
4021 */
4022 #define BMAPPGTOLEV(npages) \
4023 (((npages) <= 3 + MAXL0PAGES) ? 0 : \
4024 ((npages) <= 2 + MAXL1PAGES) ? 1 : 2)
4025
dbMapFileSizeToMapSize(struct inode * ipbmap)4026 s64 dbMapFileSizeToMapSize(struct inode * ipbmap)
4027 {
4028 struct super_block *sb = ipbmap->i_sb;
4029 s64 nblocks;
4030 s64 npages, ndmaps;
4031 int level, i;
4032 int complete, factor;
4033
4034 nblocks = ipbmap->i_size >> JFS_SBI(sb)->l2bsize;
4035 npages = nblocks >> JFS_SBI(sb)->l2nbperpage;
4036 level = BMAPPGTOLEV(npages);
4037
4038 /* At each level, accumulate the number of dmap pages covered by
4039 * the number of full child levels below it;
4040 * repeat for the last incomplete child level.
4041 */
4042 ndmaps = 0;
4043 npages--; /* skip the first global control page */
4044 /* skip higher level control pages above top level covered by map */
4045 npages -= (2 - level);
4046 npages--; /* skip top level's control page */
4047 for (i = level; i >= 0; i--) {
4048 factor =
4049 (i == 2) ? MAXL1PAGES : ((i == 1) ? MAXL0PAGES : 1);
4050 complete = (u32) npages / factor;
4051 ndmaps += complete * ((i == 2) ? LPERCTL * LPERCTL :
4052 ((i == 1) ? LPERCTL : 1));
4053
4054 /* pages in last/incomplete child */
4055 npages = (u32) npages % factor;
4056 /* skip incomplete child's level control page */
4057 npages--;
4058 }
4059
4060 /* convert the number of dmaps into the number of blocks
4061 * which can be covered by the dmaps;
4062 */
4063 nblocks = ndmaps << L2BPERDMAP;
4064
4065 return (nblocks);
4066 }
4067