1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * balloc.c
4 *
5 * PURPOSE
6 * Block allocation handling routines for the OSTA-UDF(tm) filesystem.
7 *
8 * COPYRIGHT
9 * (C) 1999-2001 Ben Fennema
10 * (C) 1999 Stelias Computing Inc
11 *
12 * HISTORY
13 *
14 * 02/24/99 blf Created.
15 *
16 */
17
18 #include "udfdecl.h"
19
20 #include <linux/bitops.h>
21 #include <linux/overflow.h>
22
23 #include "udf_i.h"
24 #include "udf_sb.h"
25
26 #define udf_clear_bit __test_and_clear_bit_le
27 #define udf_set_bit __test_and_set_bit_le
28 #define udf_test_bit test_bit_le
29 #define udf_find_next_one_bit find_next_bit_le
30
read_block_bitmap(struct super_block * sb,struct udf_bitmap * bitmap,unsigned int block,unsigned long bitmap_nr)31 static int read_block_bitmap(struct super_block *sb,
32 struct udf_bitmap *bitmap, unsigned int block,
33 unsigned long bitmap_nr)
34 {
35 struct buffer_head *bh = NULL;
36 int i;
37 int max_bits, off, count;
38 struct kernel_lb_addr loc;
39
40 loc.logicalBlockNum = bitmap->s_extPosition;
41 loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
42
43 bh = sb_bread(sb, udf_get_lb_pblock(sb, &loc, block));
44 bitmap->s_block_bitmap[bitmap_nr] = bh;
45 if (!bh)
46 return -EIO;
47
48 /* Check consistency of Space Bitmap buffer. */
49 max_bits = sb->s_blocksize * 8;
50 if (!bitmap_nr) {
51 off = sizeof(struct spaceBitmapDesc) << 3;
52 count = min(max_bits - off, bitmap->s_nr_groups);
53 } else {
54 /*
55 * Rough check if bitmap number is too big to have any bitmap
56 * blocks reserved.
57 */
58 if (bitmap_nr >
59 (bitmap->s_nr_groups >> (sb->s_blocksize_bits + 3)) + 2)
60 return 0;
61 off = 0;
62 count = bitmap->s_nr_groups - bitmap_nr * max_bits +
63 (sizeof(struct spaceBitmapDesc) << 3);
64 count = min(count, max_bits);
65 }
66
67 for (i = 0; i < count; i++)
68 if (udf_test_bit(i + off, bh->b_data)) {
69 bitmap->s_block_bitmap[bitmap_nr] =
70 ERR_PTR(-EFSCORRUPTED);
71 brelse(bh);
72 return -EFSCORRUPTED;
73 }
74 return 0;
75 }
76
__load_block_bitmap(struct super_block * sb,struct udf_bitmap * bitmap,unsigned int block_group)77 static int __load_block_bitmap(struct super_block *sb,
78 struct udf_bitmap *bitmap,
79 unsigned int block_group)
80 {
81 int retval = 0;
82 int nr_groups = bitmap->s_nr_groups;
83
84 if (block_group >= nr_groups) {
85 udf_debug("block_group (%u) > nr_groups (%d)\n",
86 block_group, nr_groups);
87 }
88
89 if (bitmap->s_block_bitmap[block_group]) {
90 /*
91 * The bitmap failed verification in the past. No point in
92 * trying again.
93 */
94 if (IS_ERR(bitmap->s_block_bitmap[block_group]))
95 return PTR_ERR(bitmap->s_block_bitmap[block_group]);
96 return block_group;
97 }
98
99 retval = read_block_bitmap(sb, bitmap, block_group, block_group);
100 if (retval < 0)
101 return retval;
102
103 return block_group;
104 }
105
load_block_bitmap(struct super_block * sb,struct udf_bitmap * bitmap,unsigned int block_group)106 static inline int load_block_bitmap(struct super_block *sb,
107 struct udf_bitmap *bitmap,
108 unsigned int block_group)
109 {
110 int slot;
111
112 slot = __load_block_bitmap(sb, bitmap, block_group);
113
114 if (slot < 0)
115 return slot;
116
117 if (!bitmap->s_block_bitmap[slot])
118 return -EIO;
119
120 return slot;
121 }
122
udf_add_free_space(struct super_block * sb,u16 partition,u32 cnt)123 static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt)
124 {
125 struct udf_sb_info *sbi = UDF_SB(sb);
126 struct logicalVolIntegrityDesc *lvid;
127
128 if (!sbi->s_lvid_bh)
129 return;
130
131 lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
132 le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
133 udf_updated_lvid(sb);
134 }
135
udf_bitmap_free_blocks(struct super_block * sb,struct udf_bitmap * bitmap,struct kernel_lb_addr * bloc,uint32_t offset,uint32_t count)136 static void udf_bitmap_free_blocks(struct super_block *sb,
137 struct udf_bitmap *bitmap,
138 struct kernel_lb_addr *bloc,
139 uint32_t offset,
140 uint32_t count)
141 {
142 struct udf_sb_info *sbi = UDF_SB(sb);
143 struct buffer_head *bh = NULL;
144 unsigned long block;
145 unsigned long block_group;
146 unsigned long bit;
147 unsigned long i;
148 int bitmap_nr;
149 unsigned long overflow;
150
151 mutex_lock(&sbi->s_alloc_mutex);
152 /* We make sure this cannot overflow when mounting the filesystem */
153 block = bloc->logicalBlockNum + offset +
154 (sizeof(struct spaceBitmapDesc) << 3);
155 do {
156 overflow = 0;
157 block_group = block >> (sb->s_blocksize_bits + 3);
158 bit = block % (sb->s_blocksize << 3);
159
160 /*
161 * Check to see if we are freeing blocks across a group boundary.
162 */
163 if (bit + count > (sb->s_blocksize << 3)) {
164 overflow = bit + count - (sb->s_blocksize << 3);
165 count -= overflow;
166 }
167 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
168 if (bitmap_nr < 0)
169 goto error_return;
170
171 bh = bitmap->s_block_bitmap[bitmap_nr];
172 for (i = 0; i < count; i++) {
173 if (udf_set_bit(bit + i, bh->b_data)) {
174 udf_debug("bit %lu already set\n", bit + i);
175 udf_debug("byte=%2x\n",
176 ((__u8 *)bh->b_data)[(bit + i) >> 3]);
177 }
178 }
179 udf_add_free_space(sb, sbi->s_partition, count);
180 mark_buffer_dirty(bh);
181 if (overflow) {
182 block += count;
183 count = overflow;
184 }
185 } while (overflow);
186
187 error_return:
188 mutex_unlock(&sbi->s_alloc_mutex);
189 }
190
udf_bitmap_prealloc_blocks(struct super_block * sb,struct udf_bitmap * bitmap,uint16_t partition,uint32_t first_block,uint32_t block_count)191 static int udf_bitmap_prealloc_blocks(struct super_block *sb,
192 struct udf_bitmap *bitmap,
193 uint16_t partition, uint32_t first_block,
194 uint32_t block_count)
195 {
196 struct udf_sb_info *sbi = UDF_SB(sb);
197 int alloc_count = 0;
198 int bit, block, block_group;
199 int bitmap_nr;
200 struct buffer_head *bh;
201 __u32 part_len;
202
203 mutex_lock(&sbi->s_alloc_mutex);
204 part_len = sbi->s_partmaps[partition].s_partition_len;
205 if (first_block >= part_len)
206 goto out;
207
208 if (first_block + block_count > part_len)
209 block_count = part_len - first_block;
210
211 do {
212 block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
213 block_group = block >> (sb->s_blocksize_bits + 3);
214
215 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
216 if (bitmap_nr < 0)
217 goto out;
218 bh = bitmap->s_block_bitmap[bitmap_nr];
219
220 bit = block % (sb->s_blocksize << 3);
221
222 while (bit < (sb->s_blocksize << 3) && block_count > 0) {
223 if (!udf_clear_bit(bit, bh->b_data))
224 goto out;
225 block_count--;
226 alloc_count++;
227 bit++;
228 block++;
229 }
230 mark_buffer_dirty(bh);
231 } while (block_count > 0);
232
233 out:
234 udf_add_free_space(sb, partition, -alloc_count);
235 mutex_unlock(&sbi->s_alloc_mutex);
236 return alloc_count;
237 }
238
udf_bitmap_new_block(struct super_block * sb,struct udf_bitmap * bitmap,uint16_t partition,uint32_t goal,int * err)239 static udf_pblk_t udf_bitmap_new_block(struct super_block *sb,
240 struct udf_bitmap *bitmap, uint16_t partition,
241 uint32_t goal, int *err)
242 {
243 struct udf_sb_info *sbi = UDF_SB(sb);
244 int newbit, bit = 0;
245 udf_pblk_t block;
246 int block_group, group_start;
247 int end_goal, nr_groups, bitmap_nr, i;
248 struct buffer_head *bh = NULL;
249 char *ptr;
250 udf_pblk_t newblock = 0;
251
252 *err = -ENOSPC;
253 mutex_lock(&sbi->s_alloc_mutex);
254
255 repeat:
256 if (goal >= sbi->s_partmaps[partition].s_partition_len)
257 goal = 0;
258
259 nr_groups = bitmap->s_nr_groups;
260 block = goal + (sizeof(struct spaceBitmapDesc) << 3);
261 block_group = block >> (sb->s_blocksize_bits + 3);
262 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
263
264 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
265 if (bitmap_nr < 0)
266 goto error_return;
267 bh = bitmap->s_block_bitmap[bitmap_nr];
268 ptr = memscan((char *)bh->b_data + group_start, 0xFF,
269 sb->s_blocksize - group_start);
270
271 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
272 bit = block % (sb->s_blocksize << 3);
273 if (udf_test_bit(bit, bh->b_data))
274 goto got_block;
275
276 end_goal = (bit + 63) & ~63;
277 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
278 if (bit < end_goal)
279 goto got_block;
280
281 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF,
282 sb->s_blocksize - ((bit + 7) >> 3));
283 newbit = (ptr - ((char *)bh->b_data)) << 3;
284 if (newbit < sb->s_blocksize << 3) {
285 bit = newbit;
286 goto search_back;
287 }
288
289 newbit = udf_find_next_one_bit(bh->b_data,
290 sb->s_blocksize << 3, bit);
291 if (newbit < sb->s_blocksize << 3) {
292 bit = newbit;
293 goto got_block;
294 }
295 }
296
297 for (i = 0; i < (nr_groups * 2); i++) {
298 block_group++;
299 if (block_group >= nr_groups)
300 block_group = 0;
301 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
302
303 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
304 if (bitmap_nr < 0)
305 goto error_return;
306 bh = bitmap->s_block_bitmap[bitmap_nr];
307 if (i < nr_groups) {
308 ptr = memscan((char *)bh->b_data + group_start, 0xFF,
309 sb->s_blocksize - group_start);
310 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
311 bit = (ptr - ((char *)bh->b_data)) << 3;
312 break;
313 }
314 } else {
315 bit = udf_find_next_one_bit(bh->b_data,
316 sb->s_blocksize << 3,
317 group_start << 3);
318 if (bit < sb->s_blocksize << 3)
319 break;
320 }
321 }
322 if (i >= (nr_groups * 2)) {
323 mutex_unlock(&sbi->s_alloc_mutex);
324 return newblock;
325 }
326 if (bit < sb->s_blocksize << 3)
327 goto search_back;
328 else
329 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
330 group_start << 3);
331 if (bit >= sb->s_blocksize << 3) {
332 mutex_unlock(&sbi->s_alloc_mutex);
333 return 0;
334 }
335
336 search_back:
337 i = 0;
338 while (i < 7 && bit > (group_start << 3) &&
339 udf_test_bit(bit - 1, bh->b_data)) {
340 ++i;
341 --bit;
342 }
343
344 got_block:
345 newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
346 (sizeof(struct spaceBitmapDesc) << 3);
347
348 if (newblock >= sbi->s_partmaps[partition].s_partition_len) {
349 /*
350 * Ran off the end of the bitmap, and bits following are
351 * non-compliant (not all zero)
352 */
353 udf_err(sb, "bitmap for partition %d corrupted (block %u marked"
354 " as free, partition length is %u)\n", partition,
355 newblock, sbi->s_partmaps[partition].s_partition_len);
356 goto error_return;
357 }
358
359 if (!udf_clear_bit(bit, bh->b_data)) {
360 udf_debug("bit already cleared for block %d\n", bit);
361 goto repeat;
362 }
363
364 mark_buffer_dirty(bh);
365
366 udf_add_free_space(sb, partition, -1);
367 mutex_unlock(&sbi->s_alloc_mutex);
368 *err = 0;
369 return newblock;
370
371 error_return:
372 *err = -EIO;
373 mutex_unlock(&sbi->s_alloc_mutex);
374 return 0;
375 }
376
udf_table_free_blocks(struct super_block * sb,struct inode * table,struct kernel_lb_addr * bloc,uint32_t offset,uint32_t count)377 static void udf_table_free_blocks(struct super_block *sb,
378 struct inode *table,
379 struct kernel_lb_addr *bloc,
380 uint32_t offset,
381 uint32_t count)
382 {
383 struct udf_sb_info *sbi = UDF_SB(sb);
384 uint32_t start, end;
385 uint32_t elen;
386 struct kernel_lb_addr eloc;
387 struct extent_position oepos, epos;
388 int8_t etype;
389 struct udf_inode_info *iinfo;
390
391 mutex_lock(&sbi->s_alloc_mutex);
392 iinfo = UDF_I(table);
393 udf_add_free_space(sb, sbi->s_partition, count);
394
395 start = bloc->logicalBlockNum + offset;
396 end = bloc->logicalBlockNum + offset + count - 1;
397
398 epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
399 elen = 0;
400 epos.block = oepos.block = iinfo->i_location;
401 epos.bh = oepos.bh = NULL;
402
403 while (count &&
404 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
405 if (((eloc.logicalBlockNum +
406 (elen >> sb->s_blocksize_bits)) == start)) {
407 if ((0x3FFFFFFF - elen) <
408 (count << sb->s_blocksize_bits)) {
409 uint32_t tmp = ((0x3FFFFFFF - elen) >>
410 sb->s_blocksize_bits);
411 count -= tmp;
412 start += tmp;
413 elen = (etype << 30) |
414 (0x40000000 - sb->s_blocksize);
415 } else {
416 elen = (etype << 30) |
417 (elen +
418 (count << sb->s_blocksize_bits));
419 start += count;
420 count = 0;
421 }
422 udf_write_aext(table, &oepos, &eloc, elen, 1);
423 } else if (eloc.logicalBlockNum == (end + 1)) {
424 if ((0x3FFFFFFF - elen) <
425 (count << sb->s_blocksize_bits)) {
426 uint32_t tmp = ((0x3FFFFFFF - elen) >>
427 sb->s_blocksize_bits);
428 count -= tmp;
429 end -= tmp;
430 eloc.logicalBlockNum -= tmp;
431 elen = (etype << 30) |
432 (0x40000000 - sb->s_blocksize);
433 } else {
434 eloc.logicalBlockNum = start;
435 elen = (etype << 30) |
436 (elen +
437 (count << sb->s_blocksize_bits));
438 end -= count;
439 count = 0;
440 }
441 udf_write_aext(table, &oepos, &eloc, elen, 1);
442 }
443
444 if (epos.bh != oepos.bh) {
445 oepos.block = epos.block;
446 brelse(oepos.bh);
447 get_bh(epos.bh);
448 oepos.bh = epos.bh;
449 oepos.offset = 0;
450 } else {
451 oepos.offset = epos.offset;
452 }
453 }
454
455 if (count) {
456 /*
457 * NOTE: we CANNOT use udf_add_aext here, as it can try to
458 * allocate a new block, and since we hold the super block
459 * lock already very bad things would happen :)
460 *
461 * We copy the behavior of udf_add_aext, but instead of
462 * trying to allocate a new block close to the existing one,
463 * we just steal a block from the extent we are trying to add.
464 *
465 * It would be nice if the blocks were close together, but it
466 * isn't required.
467 */
468
469 int adsize;
470
471 eloc.logicalBlockNum = start;
472 elen = EXT_RECORDED_ALLOCATED |
473 (count << sb->s_blocksize_bits);
474
475 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
476 adsize = sizeof(struct short_ad);
477 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
478 adsize = sizeof(struct long_ad);
479 else {
480 brelse(oepos.bh);
481 brelse(epos.bh);
482 goto error_return;
483 }
484
485 if (epos.offset + (2 * adsize) > sb->s_blocksize) {
486 /* Steal a block from the extent being free'd */
487 udf_setup_indirect_aext(table, eloc.logicalBlockNum,
488 &epos);
489
490 eloc.logicalBlockNum++;
491 elen -= sb->s_blocksize;
492 }
493
494 /* It's possible that stealing the block emptied the extent */
495 if (elen)
496 __udf_add_aext(table, &epos, &eloc, elen, 1);
497 }
498
499 brelse(epos.bh);
500 brelse(oepos.bh);
501
502 error_return:
503 mutex_unlock(&sbi->s_alloc_mutex);
504 return;
505 }
506
udf_table_prealloc_blocks(struct super_block * sb,struct inode * table,uint16_t partition,uint32_t first_block,uint32_t block_count)507 static int udf_table_prealloc_blocks(struct super_block *sb,
508 struct inode *table, uint16_t partition,
509 uint32_t first_block, uint32_t block_count)
510 {
511 struct udf_sb_info *sbi = UDF_SB(sb);
512 int alloc_count = 0;
513 uint32_t elen, adsize;
514 struct kernel_lb_addr eloc;
515 struct extent_position epos;
516 int8_t etype = -1;
517 struct udf_inode_info *iinfo;
518
519 if (first_block >= sbi->s_partmaps[partition].s_partition_len)
520 return 0;
521
522 iinfo = UDF_I(table);
523 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
524 adsize = sizeof(struct short_ad);
525 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
526 adsize = sizeof(struct long_ad);
527 else
528 return 0;
529
530 mutex_lock(&sbi->s_alloc_mutex);
531 epos.offset = sizeof(struct unallocSpaceEntry);
532 epos.block = iinfo->i_location;
533 epos.bh = NULL;
534 eloc.logicalBlockNum = 0xFFFFFFFF;
535
536 while (first_block != eloc.logicalBlockNum &&
537 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
538 udf_debug("eloc=%u, elen=%u, first_block=%u\n",
539 eloc.logicalBlockNum, elen, first_block);
540 ; /* empty loop body */
541 }
542
543 if (first_block == eloc.logicalBlockNum) {
544 epos.offset -= adsize;
545
546 alloc_count = (elen >> sb->s_blocksize_bits);
547 if (alloc_count > block_count) {
548 alloc_count = block_count;
549 eloc.logicalBlockNum += alloc_count;
550 elen -= (alloc_count << sb->s_blocksize_bits);
551 udf_write_aext(table, &epos, &eloc,
552 (etype << 30) | elen, 1);
553 } else
554 udf_delete_aext(table, epos);
555 } else {
556 alloc_count = 0;
557 }
558
559 brelse(epos.bh);
560
561 if (alloc_count)
562 udf_add_free_space(sb, partition, -alloc_count);
563 mutex_unlock(&sbi->s_alloc_mutex);
564 return alloc_count;
565 }
566
udf_table_new_block(struct super_block * sb,struct inode * table,uint16_t partition,uint32_t goal,int * err)567 static udf_pblk_t udf_table_new_block(struct super_block *sb,
568 struct inode *table, uint16_t partition,
569 uint32_t goal, int *err)
570 {
571 struct udf_sb_info *sbi = UDF_SB(sb);
572 uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
573 udf_pblk_t newblock = 0;
574 uint32_t adsize;
575 uint32_t elen, goal_elen = 0;
576 struct kernel_lb_addr eloc, goal_eloc;
577 struct extent_position epos, goal_epos;
578 int8_t etype;
579 struct udf_inode_info *iinfo = UDF_I(table);
580
581 *err = -ENOSPC;
582
583 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
584 adsize = sizeof(struct short_ad);
585 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
586 adsize = sizeof(struct long_ad);
587 else
588 return newblock;
589
590 mutex_lock(&sbi->s_alloc_mutex);
591 if (goal >= sbi->s_partmaps[partition].s_partition_len)
592 goal = 0;
593
594 /* We search for the closest matching block to goal. If we find
595 a exact hit, we stop. Otherwise we keep going till we run out
596 of extents. We store the buffer_head, bloc, and extoffset
597 of the current closest match and use that when we are done.
598 */
599 epos.offset = sizeof(struct unallocSpaceEntry);
600 epos.block = iinfo->i_location;
601 epos.bh = goal_epos.bh = NULL;
602
603 while (spread &&
604 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
605 if (goal >= eloc.logicalBlockNum) {
606 if (goal < eloc.logicalBlockNum +
607 (elen >> sb->s_blocksize_bits))
608 nspread = 0;
609 else
610 nspread = goal - eloc.logicalBlockNum -
611 (elen >> sb->s_blocksize_bits);
612 } else {
613 nspread = eloc.logicalBlockNum - goal;
614 }
615
616 if (nspread < spread) {
617 spread = nspread;
618 if (goal_epos.bh != epos.bh) {
619 brelse(goal_epos.bh);
620 goal_epos.bh = epos.bh;
621 get_bh(goal_epos.bh);
622 }
623 goal_epos.block = epos.block;
624 goal_epos.offset = epos.offset - adsize;
625 goal_eloc = eloc;
626 goal_elen = (etype << 30) | elen;
627 }
628 }
629
630 brelse(epos.bh);
631
632 if (spread == 0xFFFFFFFF) {
633 brelse(goal_epos.bh);
634 mutex_unlock(&sbi->s_alloc_mutex);
635 return 0;
636 }
637
638 /* Only allocate blocks from the beginning of the extent.
639 That way, we only delete (empty) extents, never have to insert an
640 extent because of splitting */
641 /* This works, but very poorly.... */
642
643 newblock = goal_eloc.logicalBlockNum;
644 goal_eloc.logicalBlockNum++;
645 goal_elen -= sb->s_blocksize;
646
647 if (goal_elen)
648 udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
649 else
650 udf_delete_aext(table, goal_epos);
651 brelse(goal_epos.bh);
652
653 udf_add_free_space(sb, partition, -1);
654
655 mutex_unlock(&sbi->s_alloc_mutex);
656 *err = 0;
657 return newblock;
658 }
659
udf_free_blocks(struct super_block * sb,struct inode * inode,struct kernel_lb_addr * bloc,uint32_t offset,uint32_t count)660 void udf_free_blocks(struct super_block *sb, struct inode *inode,
661 struct kernel_lb_addr *bloc, uint32_t offset,
662 uint32_t count)
663 {
664 uint16_t partition = bloc->partitionReferenceNum;
665 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
666 uint32_t blk;
667
668 if (check_add_overflow(bloc->logicalBlockNum, offset, &blk) ||
669 check_add_overflow(blk, count, &blk) ||
670 bloc->logicalBlockNum + count > map->s_partition_len) {
671 udf_debug("Invalid request to free blocks: (%d, %u), off %u, "
672 "len %u, partition len %u\n",
673 partition, bloc->logicalBlockNum, offset, count,
674 map->s_partition_len);
675 return;
676 }
677
678 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
679 udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap,
680 bloc, offset, count);
681 } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
682 udf_table_free_blocks(sb, map->s_uspace.s_table,
683 bloc, offset, count);
684 }
685
686 if (inode) {
687 inode_sub_bytes(inode,
688 ((sector_t)count) << sb->s_blocksize_bits);
689 }
690 }
691
udf_prealloc_blocks(struct super_block * sb,struct inode * inode,uint16_t partition,uint32_t first_block,uint32_t block_count)692 inline int udf_prealloc_blocks(struct super_block *sb,
693 struct inode *inode,
694 uint16_t partition, uint32_t first_block,
695 uint32_t block_count)
696 {
697 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
698 int allocated;
699
700 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
701 allocated = udf_bitmap_prealloc_blocks(sb,
702 map->s_uspace.s_bitmap,
703 partition, first_block,
704 block_count);
705 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
706 allocated = udf_table_prealloc_blocks(sb,
707 map->s_uspace.s_table,
708 partition, first_block,
709 block_count);
710 else
711 return 0;
712
713 if (inode && allocated > 0)
714 inode_add_bytes(inode, allocated << sb->s_blocksize_bits);
715 return allocated;
716 }
717
udf_new_block(struct super_block * sb,struct inode * inode,uint16_t partition,uint32_t goal,int * err)718 inline udf_pblk_t udf_new_block(struct super_block *sb,
719 struct inode *inode,
720 uint16_t partition, uint32_t goal, int *err)
721 {
722 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
723 udf_pblk_t block;
724
725 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
726 block = udf_bitmap_new_block(sb,
727 map->s_uspace.s_bitmap,
728 partition, goal, err);
729 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
730 block = udf_table_new_block(sb,
731 map->s_uspace.s_table,
732 partition, goal, err);
733 else {
734 *err = -EIO;
735 return 0;
736 }
737 if (inode && block)
738 inode_add_bytes(inode, sb->s_blocksize);
739 return block;
740 }
741