1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * balloc.c
4 *
5 * PURPOSE
6 * Block allocation handling routines for the OSTA-UDF(tm) filesystem.
7 *
8 * COPYRIGHT
9 * (C) 1999-2001 Ben Fennema
10 * (C) 1999 Stelias Computing Inc
11 *
12 * HISTORY
13 *
14 * 02/24/99 blf Created.
15 *
16 */
17
18 #include "udfdecl.h"
19
20 #include <linux/bitops.h>
21 #include <linux/overflow.h>
22
23 #include "udf_i.h"
24 #include "udf_sb.h"
25
26 #define udf_clear_bit __test_and_clear_bit_le
27 #define udf_set_bit __test_and_set_bit_le
28 #define udf_test_bit test_bit_le
29 #define udf_find_next_one_bit find_next_bit_le
30
read_block_bitmap(struct super_block * sb,struct udf_bitmap * bitmap,unsigned int block,unsigned long bitmap_nr)31 static int read_block_bitmap(struct super_block *sb,
32 struct udf_bitmap *bitmap, unsigned int block,
33 unsigned long bitmap_nr)
34 {
35 struct buffer_head *bh = NULL;
36 int i;
37 int max_bits, off, count;
38 struct kernel_lb_addr loc;
39
40 loc.logicalBlockNum = bitmap->s_extPosition;
41 loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
42
43 bh = sb_bread(sb, udf_get_lb_pblock(sb, &loc, block));
44 bitmap->s_block_bitmap[bitmap_nr] = bh;
45 if (!bh)
46 return -EIO;
47
48 /* Check consistency of Space Bitmap buffer. */
49 max_bits = sb->s_blocksize * 8;
50 if (!bitmap_nr) {
51 off = sizeof(struct spaceBitmapDesc) << 3;
52 count = min(max_bits - off, bitmap->s_nr_groups);
53 } else {
54 /*
55 * Rough check if bitmap number is too big to have any bitmap
56 * blocks reserved.
57 */
58 if (bitmap_nr >
59 (bitmap->s_nr_groups >> (sb->s_blocksize_bits + 3)) + 2)
60 return 0;
61 off = 0;
62 count = bitmap->s_nr_groups - bitmap_nr * max_bits +
63 (sizeof(struct spaceBitmapDesc) << 3);
64 count = min(count, max_bits);
65 }
66
67 for (i = 0; i < count; i++)
68 if (udf_test_bit(i + off, bh->b_data)) {
69 bitmap->s_block_bitmap[bitmap_nr] =
70 ERR_PTR(-EFSCORRUPTED);
71 brelse(bh);
72 return -EFSCORRUPTED;
73 }
74 return 0;
75 }
76
__load_block_bitmap(struct super_block * sb,struct udf_bitmap * bitmap,unsigned int block_group)77 static int __load_block_bitmap(struct super_block *sb,
78 struct udf_bitmap *bitmap,
79 unsigned int block_group)
80 {
81 int retval = 0;
82 int nr_groups = bitmap->s_nr_groups;
83
84 if (block_group >= nr_groups) {
85 udf_debug("block_group (%u) > nr_groups (%d)\n",
86 block_group, nr_groups);
87 }
88
89 if (bitmap->s_block_bitmap[block_group]) {
90 /*
91 * The bitmap failed verification in the past. No point in
92 * trying again.
93 */
94 if (IS_ERR(bitmap->s_block_bitmap[block_group]))
95 return PTR_ERR(bitmap->s_block_bitmap[block_group]);
96 return block_group;
97 }
98
99 retval = read_block_bitmap(sb, bitmap, block_group, block_group);
100 if (retval < 0)
101 return retval;
102
103 return block_group;
104 }
105
load_block_bitmap(struct super_block * sb,struct udf_bitmap * bitmap,unsigned int block_group)106 static inline int load_block_bitmap(struct super_block *sb,
107 struct udf_bitmap *bitmap,
108 unsigned int block_group)
109 {
110 int slot;
111
112 slot = __load_block_bitmap(sb, bitmap, block_group);
113
114 if (slot < 0)
115 return slot;
116
117 if (!bitmap->s_block_bitmap[slot])
118 return -EIO;
119
120 return slot;
121 }
122
udf_add_free_space(struct super_block * sb,u16 partition,u32 cnt)123 static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt)
124 {
125 struct udf_sb_info *sbi = UDF_SB(sb);
126 struct logicalVolIntegrityDesc *lvid;
127
128 if (!sbi->s_lvid_bh)
129 return;
130
131 lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
132 le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
133 udf_updated_lvid(sb);
134 }
135
udf_bitmap_free_blocks(struct super_block * sb,struct udf_bitmap * bitmap,struct kernel_lb_addr * bloc,uint32_t offset,uint32_t count)136 static void udf_bitmap_free_blocks(struct super_block *sb,
137 struct udf_bitmap *bitmap,
138 struct kernel_lb_addr *bloc,
139 uint32_t offset,
140 uint32_t count)
141 {
142 struct udf_sb_info *sbi = UDF_SB(sb);
143 struct buffer_head *bh = NULL;
144 unsigned long block;
145 unsigned long block_group;
146 unsigned long bit;
147 unsigned long i;
148 int bitmap_nr;
149 unsigned long overflow;
150
151 mutex_lock(&sbi->s_alloc_mutex);
152 /* We make sure this cannot overflow when mounting the filesystem */
153 block = bloc->logicalBlockNum + offset +
154 (sizeof(struct spaceBitmapDesc) << 3);
155 do {
156 overflow = 0;
157 block_group = block >> (sb->s_blocksize_bits + 3);
158 bit = block % (sb->s_blocksize << 3);
159
160 /*
161 * Check to see if we are freeing blocks across a group boundary.
162 */
163 if (bit + count > (sb->s_blocksize << 3)) {
164 overflow = bit + count - (sb->s_blocksize << 3);
165 count -= overflow;
166 }
167 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
168 if (bitmap_nr < 0)
169 goto error_return;
170
171 bh = bitmap->s_block_bitmap[bitmap_nr];
172 for (i = 0; i < count; i++) {
173 if (udf_set_bit(bit + i, bh->b_data)) {
174 udf_debug("bit %lu already set\n", bit + i);
175 udf_debug("byte=%2x\n",
176 ((__u8 *)bh->b_data)[(bit + i) >> 3]);
177 }
178 }
179 udf_add_free_space(sb, sbi->s_partition, count);
180 mark_buffer_dirty(bh);
181 if (overflow) {
182 block += count;
183 count = overflow;
184 }
185 } while (overflow);
186
187 error_return:
188 mutex_unlock(&sbi->s_alloc_mutex);
189 }
190
udf_bitmap_prealloc_blocks(struct super_block * sb,struct udf_bitmap * bitmap,uint16_t partition,uint32_t first_block,uint32_t block_count)191 static int udf_bitmap_prealloc_blocks(struct super_block *sb,
192 struct udf_bitmap *bitmap,
193 uint16_t partition, uint32_t first_block,
194 uint32_t block_count)
195 {
196 struct udf_sb_info *sbi = UDF_SB(sb);
197 int alloc_count = 0;
198 int bit, block, block_group;
199 int bitmap_nr;
200 struct buffer_head *bh;
201 __u32 part_len;
202
203 mutex_lock(&sbi->s_alloc_mutex);
204 part_len = sbi->s_partmaps[partition].s_partition_len;
205 if (first_block >= part_len)
206 goto out;
207
208 if (first_block + block_count > part_len)
209 block_count = part_len - first_block;
210
211 do {
212 block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
213 block_group = block >> (sb->s_blocksize_bits + 3);
214
215 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
216 if (bitmap_nr < 0)
217 goto out;
218 bh = bitmap->s_block_bitmap[bitmap_nr];
219
220 bit = block % (sb->s_blocksize << 3);
221
222 while (bit < (sb->s_blocksize << 3) && block_count > 0) {
223 if (!udf_clear_bit(bit, bh->b_data))
224 goto out;
225 block_count--;
226 alloc_count++;
227 bit++;
228 block++;
229 }
230 mark_buffer_dirty(bh);
231 } while (block_count > 0);
232
233 out:
234 udf_add_free_space(sb, partition, -alloc_count);
235 mutex_unlock(&sbi->s_alloc_mutex);
236 return alloc_count;
237 }
238
udf_bitmap_new_block(struct super_block * sb,struct udf_bitmap * bitmap,uint16_t partition,uint32_t goal,int * err)239 static udf_pblk_t udf_bitmap_new_block(struct super_block *sb,
240 struct udf_bitmap *bitmap, uint16_t partition,
241 uint32_t goal, int *err)
242 {
243 struct udf_sb_info *sbi = UDF_SB(sb);
244 int newbit, bit = 0;
245 udf_pblk_t block;
246 int block_group, group_start;
247 int end_goal, nr_groups, bitmap_nr, i;
248 struct buffer_head *bh = NULL;
249 char *ptr;
250 udf_pblk_t newblock = 0;
251
252 *err = -ENOSPC;
253 mutex_lock(&sbi->s_alloc_mutex);
254
255 repeat:
256 if (goal >= sbi->s_partmaps[partition].s_partition_len)
257 goal = 0;
258
259 nr_groups = bitmap->s_nr_groups;
260 block = goal + (sizeof(struct spaceBitmapDesc) << 3);
261 block_group = block >> (sb->s_blocksize_bits + 3);
262 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
263
264 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
265 if (bitmap_nr < 0)
266 goto error_return;
267 bh = bitmap->s_block_bitmap[bitmap_nr];
268 ptr = memscan((char *)bh->b_data + group_start, 0xFF,
269 sb->s_blocksize - group_start);
270
271 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
272 bit = block % (sb->s_blocksize << 3);
273 if (udf_test_bit(bit, bh->b_data))
274 goto got_block;
275
276 end_goal = (bit + 63) & ~63;
277 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
278 if (bit < end_goal)
279 goto got_block;
280
281 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF,
282 sb->s_blocksize - ((bit + 7) >> 3));
283 newbit = (ptr - ((char *)bh->b_data)) << 3;
284 if (newbit < sb->s_blocksize << 3) {
285 bit = newbit;
286 goto search_back;
287 }
288
289 newbit = udf_find_next_one_bit(bh->b_data,
290 sb->s_blocksize << 3, bit);
291 if (newbit < sb->s_blocksize << 3) {
292 bit = newbit;
293 goto got_block;
294 }
295 }
296
297 for (i = 0; i < (nr_groups * 2); i++) {
298 block_group++;
299 if (block_group >= nr_groups)
300 block_group = 0;
301 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
302
303 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
304 if (bitmap_nr < 0)
305 goto error_return;
306 bh = bitmap->s_block_bitmap[bitmap_nr];
307 if (i < nr_groups) {
308 ptr = memscan((char *)bh->b_data + group_start, 0xFF,
309 sb->s_blocksize - group_start);
310 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
311 bit = (ptr - ((char *)bh->b_data)) << 3;
312 break;
313 }
314 } else {
315 bit = udf_find_next_one_bit(bh->b_data,
316 sb->s_blocksize << 3,
317 group_start << 3);
318 if (bit < sb->s_blocksize << 3)
319 break;
320 }
321 }
322 if (i >= (nr_groups * 2)) {
323 mutex_unlock(&sbi->s_alloc_mutex);
324 return newblock;
325 }
326 if (bit < sb->s_blocksize << 3)
327 goto search_back;
328 else
329 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
330 group_start << 3);
331 if (bit >= sb->s_blocksize << 3) {
332 mutex_unlock(&sbi->s_alloc_mutex);
333 return 0;
334 }
335
336 search_back:
337 i = 0;
338 while (i < 7 && bit > (group_start << 3) &&
339 udf_test_bit(bit - 1, bh->b_data)) {
340 ++i;
341 --bit;
342 }
343
344 got_block:
345 newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
346 (sizeof(struct spaceBitmapDesc) << 3);
347
348 if (newblock >= sbi->s_partmaps[partition].s_partition_len) {
349 /*
350 * Ran off the end of the bitmap, and bits following are
351 * non-compliant (not all zero)
352 */
353 udf_err(sb, "bitmap for partition %d corrupted (block %u marked"
354 " as free, partition length is %u)\n", partition,
355 newblock, sbi->s_partmaps[partition].s_partition_len);
356 goto error_return;
357 }
358
359 if (!udf_clear_bit(bit, bh->b_data)) {
360 udf_debug("bit already cleared for block %d\n", bit);
361 goto repeat;
362 }
363
364 mark_buffer_dirty(bh);
365
366 udf_add_free_space(sb, partition, -1);
367 mutex_unlock(&sbi->s_alloc_mutex);
368 *err = 0;
369 return newblock;
370
371 error_return:
372 *err = -EIO;
373 mutex_unlock(&sbi->s_alloc_mutex);
374 return 0;
375 }
376
udf_table_free_blocks(struct super_block * sb,struct inode * table,struct kernel_lb_addr * bloc,uint32_t offset,uint32_t count)377 static void udf_table_free_blocks(struct super_block *sb,
378 struct inode *table,
379 struct kernel_lb_addr *bloc,
380 uint32_t offset,
381 uint32_t count)
382 {
383 struct udf_sb_info *sbi = UDF_SB(sb);
384 uint32_t start, end;
385 uint32_t elen;
386 struct kernel_lb_addr eloc;
387 struct extent_position oepos, epos;
388 int8_t etype;
389 struct udf_inode_info *iinfo;
390 int ret = 0;
391
392 mutex_lock(&sbi->s_alloc_mutex);
393 iinfo = UDF_I(table);
394 udf_add_free_space(sb, sbi->s_partition, count);
395
396 start = bloc->logicalBlockNum + offset;
397 end = bloc->logicalBlockNum + offset + count - 1;
398
399 epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
400 elen = 0;
401 epos.block = oepos.block = iinfo->i_location;
402 epos.bh = oepos.bh = NULL;
403
404 while (count) {
405 ret = udf_next_aext(table, &epos, &eloc, &elen, &etype, 1);
406 if (ret < 0)
407 goto error_return;
408 if (ret == 0)
409 break;
410 if (((eloc.logicalBlockNum +
411 (elen >> sb->s_blocksize_bits)) == start)) {
412 if ((0x3FFFFFFF - elen) <
413 (count << sb->s_blocksize_bits)) {
414 uint32_t tmp = ((0x3FFFFFFF - elen) >>
415 sb->s_blocksize_bits);
416 count -= tmp;
417 start += tmp;
418 elen = (etype << 30) |
419 (0x40000000 - sb->s_blocksize);
420 } else {
421 elen = (etype << 30) |
422 (elen +
423 (count << sb->s_blocksize_bits));
424 start += count;
425 count = 0;
426 }
427 udf_write_aext(table, &oepos, &eloc, elen, 1);
428 } else if (eloc.logicalBlockNum == (end + 1)) {
429 if ((0x3FFFFFFF - elen) <
430 (count << sb->s_blocksize_bits)) {
431 uint32_t tmp = ((0x3FFFFFFF - elen) >>
432 sb->s_blocksize_bits);
433 count -= tmp;
434 end -= tmp;
435 eloc.logicalBlockNum -= tmp;
436 elen = (etype << 30) |
437 (0x40000000 - sb->s_blocksize);
438 } else {
439 eloc.logicalBlockNum = start;
440 elen = (etype << 30) |
441 (elen +
442 (count << sb->s_blocksize_bits));
443 end -= count;
444 count = 0;
445 }
446 udf_write_aext(table, &oepos, &eloc, elen, 1);
447 }
448
449 if (epos.bh != oepos.bh) {
450 oepos.block = epos.block;
451 brelse(oepos.bh);
452 get_bh(epos.bh);
453 oepos.bh = epos.bh;
454 oepos.offset = 0;
455 } else {
456 oepos.offset = epos.offset;
457 }
458 }
459
460 if (count) {
461 /*
462 * NOTE: we CANNOT use udf_add_aext here, as it can try to
463 * allocate a new block, and since we hold the super block
464 * lock already very bad things would happen :)
465 *
466 * We copy the behavior of udf_add_aext, but instead of
467 * trying to allocate a new block close to the existing one,
468 * we just steal a block from the extent we are trying to add.
469 *
470 * It would be nice if the blocks were close together, but it
471 * isn't required.
472 */
473
474 int adsize;
475
476 eloc.logicalBlockNum = start;
477 elen = EXT_RECORDED_ALLOCATED |
478 (count << sb->s_blocksize_bits);
479
480 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
481 adsize = sizeof(struct short_ad);
482 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
483 adsize = sizeof(struct long_ad);
484 else
485 goto error_return;
486
487 if (epos.offset + (2 * adsize) > sb->s_blocksize) {
488 /* Steal a block from the extent being free'd */
489 udf_setup_indirect_aext(table, eloc.logicalBlockNum,
490 &epos);
491
492 eloc.logicalBlockNum++;
493 elen -= sb->s_blocksize;
494 }
495
496 /* It's possible that stealing the block emptied the extent */
497 if (elen)
498 __udf_add_aext(table, &epos, &eloc, elen, 1);
499 }
500
501 error_return:
502 brelse(epos.bh);
503 brelse(oepos.bh);
504
505 mutex_unlock(&sbi->s_alloc_mutex);
506 return;
507 }
508
udf_table_prealloc_blocks(struct super_block * sb,struct inode * table,uint16_t partition,uint32_t first_block,uint32_t block_count)509 static int udf_table_prealloc_blocks(struct super_block *sb,
510 struct inode *table, uint16_t partition,
511 uint32_t first_block, uint32_t block_count)
512 {
513 struct udf_sb_info *sbi = UDF_SB(sb);
514 int alloc_count = 0;
515 uint32_t elen, adsize;
516 struct kernel_lb_addr eloc;
517 struct extent_position epos;
518 int8_t etype = -1;
519 struct udf_inode_info *iinfo;
520 int ret = 0;
521
522 if (first_block >= sbi->s_partmaps[partition].s_partition_len)
523 return 0;
524
525 iinfo = UDF_I(table);
526 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
527 adsize = sizeof(struct short_ad);
528 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
529 adsize = sizeof(struct long_ad);
530 else
531 return 0;
532
533 mutex_lock(&sbi->s_alloc_mutex);
534 epos.offset = sizeof(struct unallocSpaceEntry);
535 epos.block = iinfo->i_location;
536 epos.bh = NULL;
537 eloc.logicalBlockNum = 0xFFFFFFFF;
538
539 while (first_block != eloc.logicalBlockNum) {
540 ret = udf_next_aext(table, &epos, &eloc, &elen, &etype, 1);
541 if (ret < 0)
542 goto err_out;
543 if (ret == 0)
544 break;
545 udf_debug("eloc=%u, elen=%u, first_block=%u\n",
546 eloc.logicalBlockNum, elen, first_block);
547 }
548
549 if (first_block == eloc.logicalBlockNum) {
550 epos.offset -= adsize;
551
552 alloc_count = (elen >> sb->s_blocksize_bits);
553 if (alloc_count > block_count) {
554 alloc_count = block_count;
555 eloc.logicalBlockNum += alloc_count;
556 elen -= (alloc_count << sb->s_blocksize_bits);
557 udf_write_aext(table, &epos, &eloc,
558 (etype << 30) | elen, 1);
559 } else
560 udf_delete_aext(table, epos);
561 } else {
562 alloc_count = 0;
563 }
564
565 err_out:
566 brelse(epos.bh);
567
568 if (alloc_count)
569 udf_add_free_space(sb, partition, -alloc_count);
570 mutex_unlock(&sbi->s_alloc_mutex);
571 return alloc_count;
572 }
573
udf_table_new_block(struct super_block * sb,struct inode * table,uint16_t partition,uint32_t goal,int * err)574 static udf_pblk_t udf_table_new_block(struct super_block *sb,
575 struct inode *table, uint16_t partition,
576 uint32_t goal, int *err)
577 {
578 struct udf_sb_info *sbi = UDF_SB(sb);
579 uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
580 udf_pblk_t newblock = 0;
581 uint32_t adsize;
582 uint32_t elen, goal_elen = 0;
583 struct kernel_lb_addr eloc, goal_eloc;
584 struct extent_position epos, goal_epos;
585 int8_t etype;
586 struct udf_inode_info *iinfo = UDF_I(table);
587 int ret = 0;
588
589 *err = -ENOSPC;
590
591 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
592 adsize = sizeof(struct short_ad);
593 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
594 adsize = sizeof(struct long_ad);
595 else
596 return newblock;
597
598 mutex_lock(&sbi->s_alloc_mutex);
599 if (goal >= sbi->s_partmaps[partition].s_partition_len)
600 goal = 0;
601
602 /* We search for the closest matching block to goal. If we find
603 a exact hit, we stop. Otherwise we keep going till we run out
604 of extents. We store the buffer_head, bloc, and extoffset
605 of the current closest match and use that when we are done.
606 */
607 epos.offset = sizeof(struct unallocSpaceEntry);
608 epos.block = iinfo->i_location;
609 epos.bh = goal_epos.bh = NULL;
610
611 while (spread) {
612 ret = udf_next_aext(table, &epos, &eloc, &elen, &etype, 1);
613 if (ret <= 0)
614 break;
615 if (goal >= eloc.logicalBlockNum) {
616 if (goal < eloc.logicalBlockNum +
617 (elen >> sb->s_blocksize_bits))
618 nspread = 0;
619 else
620 nspread = goal - eloc.logicalBlockNum -
621 (elen >> sb->s_blocksize_bits);
622 } else {
623 nspread = eloc.logicalBlockNum - goal;
624 }
625
626 if (nspread < spread) {
627 spread = nspread;
628 if (goal_epos.bh != epos.bh) {
629 brelse(goal_epos.bh);
630 goal_epos.bh = epos.bh;
631 get_bh(goal_epos.bh);
632 }
633 goal_epos.block = epos.block;
634 goal_epos.offset = epos.offset - adsize;
635 goal_eloc = eloc;
636 goal_elen = (etype << 30) | elen;
637 }
638 }
639
640 brelse(epos.bh);
641
642 if (ret < 0 || spread == 0xFFFFFFFF) {
643 brelse(goal_epos.bh);
644 mutex_unlock(&sbi->s_alloc_mutex);
645 if (ret < 0)
646 *err = ret;
647 return 0;
648 }
649
650 /* Only allocate blocks from the beginning of the extent.
651 That way, we only delete (empty) extents, never have to insert an
652 extent because of splitting */
653 /* This works, but very poorly.... */
654
655 newblock = goal_eloc.logicalBlockNum;
656 goal_eloc.logicalBlockNum++;
657 goal_elen -= sb->s_blocksize;
658
659 if (goal_elen)
660 udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
661 else
662 udf_delete_aext(table, goal_epos);
663 brelse(goal_epos.bh);
664
665 udf_add_free_space(sb, partition, -1);
666
667 mutex_unlock(&sbi->s_alloc_mutex);
668 *err = 0;
669 return newblock;
670 }
671
udf_free_blocks(struct super_block * sb,struct inode * inode,struct kernel_lb_addr * bloc,uint32_t offset,uint32_t count)672 void udf_free_blocks(struct super_block *sb, struct inode *inode,
673 struct kernel_lb_addr *bloc, uint32_t offset,
674 uint32_t count)
675 {
676 uint16_t partition = bloc->partitionReferenceNum;
677 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
678 uint32_t blk;
679
680 if (check_add_overflow(bloc->logicalBlockNum, offset, &blk) ||
681 check_add_overflow(blk, count, &blk) ||
682 bloc->logicalBlockNum + count > map->s_partition_len) {
683 udf_debug("Invalid request to free blocks: (%d, %u), off %u, "
684 "len %u, partition len %u\n",
685 partition, bloc->logicalBlockNum, offset, count,
686 map->s_partition_len);
687 return;
688 }
689
690 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
691 udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap,
692 bloc, offset, count);
693 } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
694 udf_table_free_blocks(sb, map->s_uspace.s_table,
695 bloc, offset, count);
696 }
697
698 if (inode) {
699 inode_sub_bytes(inode,
700 ((sector_t)count) << sb->s_blocksize_bits);
701 }
702 }
703
udf_prealloc_blocks(struct super_block * sb,struct inode * inode,uint16_t partition,uint32_t first_block,uint32_t block_count)704 inline int udf_prealloc_blocks(struct super_block *sb,
705 struct inode *inode,
706 uint16_t partition, uint32_t first_block,
707 uint32_t block_count)
708 {
709 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
710 int allocated;
711
712 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
713 allocated = udf_bitmap_prealloc_blocks(sb,
714 map->s_uspace.s_bitmap,
715 partition, first_block,
716 block_count);
717 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
718 allocated = udf_table_prealloc_blocks(sb,
719 map->s_uspace.s_table,
720 partition, first_block,
721 block_count);
722 else
723 return 0;
724
725 if (inode && allocated > 0)
726 inode_add_bytes(inode, allocated << sb->s_blocksize_bits);
727 return allocated;
728 }
729
udf_new_block(struct super_block * sb,struct inode * inode,uint16_t partition,uint32_t goal,int * err)730 inline udf_pblk_t udf_new_block(struct super_block *sb,
731 struct inode *inode,
732 uint16_t partition, uint32_t goal, int *err)
733 {
734 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
735 udf_pblk_t block;
736
737 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
738 block = udf_bitmap_new_block(sb,
739 map->s_uspace.s_bitmap,
740 partition, goal, err);
741 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
742 block = udf_table_new_block(sb,
743 map->s_uspace.s_table,
744 partition, goal, err);
745 else {
746 *err = -EIO;
747 return 0;
748 }
749 if (inode && block)
750 inode_add_bytes(inode, sb->s_blocksize);
751 return block;
752 }
753