xref: /openbmc/linux/fs/udf/balloc.c (revision 6b6c2ebd83f2bf97e8f221479372aaca97a4a9b2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * balloc.c
4  *
5  * PURPOSE
6  *	Block allocation handling routines for the OSTA-UDF(tm) filesystem.
7  *
8  * COPYRIGHT
9  *  (C) 1999-2001 Ben Fennema
10  *  (C) 1999 Stelias Computing Inc
11  *
12  * HISTORY
13  *
14  *  02/24/99 blf  Created.
15  *
16  */
17 
18 #include "udfdecl.h"
19 
20 #include <linux/bitops.h>
21 
22 #include "udf_i.h"
23 #include "udf_sb.h"
24 
25 #define udf_clear_bit	__test_and_clear_bit_le
26 #define udf_set_bit	__test_and_set_bit_le
27 #define udf_test_bit	test_bit_le
28 #define udf_find_next_one_bit	find_next_bit_le
29 
30 static int read_block_bitmap(struct super_block *sb,
31 			     struct udf_bitmap *bitmap, unsigned int block,
32 			     unsigned long bitmap_nr)
33 {
34 	struct buffer_head *bh = NULL;
35 	int i;
36 	int max_bits, off, count;
37 	struct kernel_lb_addr loc;
38 
39 	loc.logicalBlockNum = bitmap->s_extPosition;
40 	loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
41 
42 	bh = sb_bread(sb, udf_get_lb_pblock(sb, &loc, block));
43 	bitmap->s_block_bitmap[bitmap_nr] = bh;
44 	if (!bh)
45 		return -EIO;
46 
47 	/* Check consistency of Space Bitmap buffer. */
48 	max_bits = sb->s_blocksize * 8;
49 	if (!bitmap_nr) {
50 		off = sizeof(struct spaceBitmapDesc) << 3;
51 		count = min(max_bits - off, bitmap->s_nr_groups);
52 	} else {
53 		/*
54 		 * Rough check if bitmap number is too big to have any bitmap
55  		 * blocks reserved.
56 		 */
57 		if (bitmap_nr >
58 		    (bitmap->s_nr_groups >> (sb->s_blocksize_bits + 3)) + 2)
59 			return 0;
60 		off = 0;
61 		count = bitmap->s_nr_groups - bitmap_nr * max_bits +
62 				(sizeof(struct spaceBitmapDesc) << 3);
63 		count = min(count, max_bits);
64 	}
65 
66 	for (i = 0; i < count; i++)
67 		if (udf_test_bit(i + off, bh->b_data)) {
68 			bitmap->s_block_bitmap[bitmap_nr] =
69 							ERR_PTR(-EFSCORRUPTED);
70 			brelse(bh);
71 			return -EFSCORRUPTED;
72 		}
73 	return 0;
74 }
75 
76 static int __load_block_bitmap(struct super_block *sb,
77 			       struct udf_bitmap *bitmap,
78 			       unsigned int block_group)
79 {
80 	int retval = 0;
81 	int nr_groups = bitmap->s_nr_groups;
82 
83 	if (block_group >= nr_groups) {
84 		udf_debug("block_group (%u) > nr_groups (%d)\n",
85 			  block_group, nr_groups);
86 	}
87 
88 	if (bitmap->s_block_bitmap[block_group]) {
89 		/*
90 		 * The bitmap failed verification in the past. No point in
91 		 * trying again.
92 		 */
93 		if (IS_ERR(bitmap->s_block_bitmap[block_group]))
94 			return PTR_ERR(bitmap->s_block_bitmap[block_group]);
95 		return block_group;
96 	}
97 
98 	retval = read_block_bitmap(sb, bitmap, block_group, block_group);
99 	if (retval < 0)
100 		return retval;
101 
102 	return block_group;
103 }
104 
105 static inline int load_block_bitmap(struct super_block *sb,
106 				    struct udf_bitmap *bitmap,
107 				    unsigned int block_group)
108 {
109 	int slot;
110 
111 	slot = __load_block_bitmap(sb, bitmap, block_group);
112 
113 	if (slot < 0)
114 		return slot;
115 
116 	if (!bitmap->s_block_bitmap[slot])
117 		return -EIO;
118 
119 	return slot;
120 }
121 
122 static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt)
123 {
124 	struct udf_sb_info *sbi = UDF_SB(sb);
125 	struct logicalVolIntegrityDesc *lvid;
126 
127 	if (!sbi->s_lvid_bh)
128 		return;
129 
130 	lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
131 	le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
132 	udf_updated_lvid(sb);
133 }
134 
135 static void udf_bitmap_free_blocks(struct super_block *sb,
136 				   struct udf_bitmap *bitmap,
137 				   struct kernel_lb_addr *bloc,
138 				   uint32_t offset,
139 				   uint32_t count)
140 {
141 	struct udf_sb_info *sbi = UDF_SB(sb);
142 	struct buffer_head *bh = NULL;
143 	struct udf_part_map *partmap;
144 	unsigned long block;
145 	unsigned long block_group;
146 	unsigned long bit;
147 	unsigned long i;
148 	int bitmap_nr;
149 	unsigned long overflow;
150 
151 	mutex_lock(&sbi->s_alloc_mutex);
152 	partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
153 	if (bloc->logicalBlockNum + count < count ||
154 	    (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
155 		udf_debug("%u < %d || %u + %u > %u\n",
156 			  bloc->logicalBlockNum, 0,
157 			  bloc->logicalBlockNum, count,
158 			  partmap->s_partition_len);
159 		goto error_return;
160 	}
161 
162 	block = bloc->logicalBlockNum + offset +
163 		(sizeof(struct spaceBitmapDesc) << 3);
164 
165 	do {
166 		overflow = 0;
167 		block_group = block >> (sb->s_blocksize_bits + 3);
168 		bit = block % (sb->s_blocksize << 3);
169 
170 		/*
171 		* Check to see if we are freeing blocks across a group boundary.
172 		*/
173 		if (bit + count > (sb->s_blocksize << 3)) {
174 			overflow = bit + count - (sb->s_blocksize << 3);
175 			count -= overflow;
176 		}
177 		bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
178 		if (bitmap_nr < 0)
179 			goto error_return;
180 
181 		bh = bitmap->s_block_bitmap[bitmap_nr];
182 		for (i = 0; i < count; i++) {
183 			if (udf_set_bit(bit + i, bh->b_data)) {
184 				udf_debug("bit %lu already set\n", bit + i);
185 				udf_debug("byte=%2x\n",
186 					  ((__u8 *)bh->b_data)[(bit + i) >> 3]);
187 			}
188 		}
189 		udf_add_free_space(sb, sbi->s_partition, count);
190 		mark_buffer_dirty(bh);
191 		if (overflow) {
192 			block += count;
193 			count = overflow;
194 		}
195 	} while (overflow);
196 
197 error_return:
198 	mutex_unlock(&sbi->s_alloc_mutex);
199 }
200 
201 static int udf_bitmap_prealloc_blocks(struct super_block *sb,
202 				      struct udf_bitmap *bitmap,
203 				      uint16_t partition, uint32_t first_block,
204 				      uint32_t block_count)
205 {
206 	struct udf_sb_info *sbi = UDF_SB(sb);
207 	int alloc_count = 0;
208 	int bit, block, block_group;
209 	int bitmap_nr;
210 	struct buffer_head *bh;
211 	__u32 part_len;
212 
213 	mutex_lock(&sbi->s_alloc_mutex);
214 	part_len = sbi->s_partmaps[partition].s_partition_len;
215 	if (first_block >= part_len)
216 		goto out;
217 
218 	if (first_block + block_count > part_len)
219 		block_count = part_len - first_block;
220 
221 	do {
222 		block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
223 		block_group = block >> (sb->s_blocksize_bits + 3);
224 
225 		bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
226 		if (bitmap_nr < 0)
227 			goto out;
228 		bh = bitmap->s_block_bitmap[bitmap_nr];
229 
230 		bit = block % (sb->s_blocksize << 3);
231 
232 		while (bit < (sb->s_blocksize << 3) && block_count > 0) {
233 			if (!udf_clear_bit(bit, bh->b_data))
234 				goto out;
235 			block_count--;
236 			alloc_count++;
237 			bit++;
238 			block++;
239 		}
240 		mark_buffer_dirty(bh);
241 	} while (block_count > 0);
242 
243 out:
244 	udf_add_free_space(sb, partition, -alloc_count);
245 	mutex_unlock(&sbi->s_alloc_mutex);
246 	return alloc_count;
247 }
248 
249 static udf_pblk_t udf_bitmap_new_block(struct super_block *sb,
250 				struct udf_bitmap *bitmap, uint16_t partition,
251 				uint32_t goal, int *err)
252 {
253 	struct udf_sb_info *sbi = UDF_SB(sb);
254 	int newbit, bit = 0;
255 	udf_pblk_t block;
256 	int block_group, group_start;
257 	int end_goal, nr_groups, bitmap_nr, i;
258 	struct buffer_head *bh = NULL;
259 	char *ptr;
260 	udf_pblk_t newblock = 0;
261 
262 	*err = -ENOSPC;
263 	mutex_lock(&sbi->s_alloc_mutex);
264 
265 repeat:
266 	if (goal >= sbi->s_partmaps[partition].s_partition_len)
267 		goal = 0;
268 
269 	nr_groups = bitmap->s_nr_groups;
270 	block = goal + (sizeof(struct spaceBitmapDesc) << 3);
271 	block_group = block >> (sb->s_blocksize_bits + 3);
272 	group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
273 
274 	bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
275 	if (bitmap_nr < 0)
276 		goto error_return;
277 	bh = bitmap->s_block_bitmap[bitmap_nr];
278 	ptr = memscan((char *)bh->b_data + group_start, 0xFF,
279 		      sb->s_blocksize - group_start);
280 
281 	if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
282 		bit = block % (sb->s_blocksize << 3);
283 		if (udf_test_bit(bit, bh->b_data))
284 			goto got_block;
285 
286 		end_goal = (bit + 63) & ~63;
287 		bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
288 		if (bit < end_goal)
289 			goto got_block;
290 
291 		ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF,
292 			      sb->s_blocksize - ((bit + 7) >> 3));
293 		newbit = (ptr - ((char *)bh->b_data)) << 3;
294 		if (newbit < sb->s_blocksize << 3) {
295 			bit = newbit;
296 			goto search_back;
297 		}
298 
299 		newbit = udf_find_next_one_bit(bh->b_data,
300 					       sb->s_blocksize << 3, bit);
301 		if (newbit < sb->s_blocksize << 3) {
302 			bit = newbit;
303 			goto got_block;
304 		}
305 	}
306 
307 	for (i = 0; i < (nr_groups * 2); i++) {
308 		block_group++;
309 		if (block_group >= nr_groups)
310 			block_group = 0;
311 		group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
312 
313 		bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
314 		if (bitmap_nr < 0)
315 			goto error_return;
316 		bh = bitmap->s_block_bitmap[bitmap_nr];
317 		if (i < nr_groups) {
318 			ptr = memscan((char *)bh->b_data + group_start, 0xFF,
319 				      sb->s_blocksize - group_start);
320 			if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
321 				bit = (ptr - ((char *)bh->b_data)) << 3;
322 				break;
323 			}
324 		} else {
325 			bit = udf_find_next_one_bit(bh->b_data,
326 						    sb->s_blocksize << 3,
327 						    group_start << 3);
328 			if (bit < sb->s_blocksize << 3)
329 				break;
330 		}
331 	}
332 	if (i >= (nr_groups * 2)) {
333 		mutex_unlock(&sbi->s_alloc_mutex);
334 		return newblock;
335 	}
336 	if (bit < sb->s_blocksize << 3)
337 		goto search_back;
338 	else
339 		bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
340 					    group_start << 3);
341 	if (bit >= sb->s_blocksize << 3) {
342 		mutex_unlock(&sbi->s_alloc_mutex);
343 		return 0;
344 	}
345 
346 search_back:
347 	i = 0;
348 	while (i < 7 && bit > (group_start << 3) &&
349 	       udf_test_bit(bit - 1, bh->b_data)) {
350 		++i;
351 		--bit;
352 	}
353 
354 got_block:
355 	newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
356 		(sizeof(struct spaceBitmapDesc) << 3);
357 
358 	if (newblock >= sbi->s_partmaps[partition].s_partition_len) {
359 		/*
360 		 * Ran off the end of the bitmap, and bits following are
361 		 * non-compliant (not all zero)
362 		 */
363 		udf_err(sb, "bitmap for partition %d corrupted (block %u marked"
364 			" as free, partition length is %u)\n", partition,
365 			newblock, sbi->s_partmaps[partition].s_partition_len);
366 		goto error_return;
367 	}
368 
369 	if (!udf_clear_bit(bit, bh->b_data)) {
370 		udf_debug("bit already cleared for block %d\n", bit);
371 		goto repeat;
372 	}
373 
374 	mark_buffer_dirty(bh);
375 
376 	udf_add_free_space(sb, partition, -1);
377 	mutex_unlock(&sbi->s_alloc_mutex);
378 	*err = 0;
379 	return newblock;
380 
381 error_return:
382 	*err = -EIO;
383 	mutex_unlock(&sbi->s_alloc_mutex);
384 	return 0;
385 }
386 
387 static void udf_table_free_blocks(struct super_block *sb,
388 				  struct inode *table,
389 				  struct kernel_lb_addr *bloc,
390 				  uint32_t offset,
391 				  uint32_t count)
392 {
393 	struct udf_sb_info *sbi = UDF_SB(sb);
394 	struct udf_part_map *partmap;
395 	uint32_t start, end;
396 	uint32_t elen;
397 	struct kernel_lb_addr eloc;
398 	struct extent_position oepos, epos;
399 	int8_t etype;
400 	struct udf_inode_info *iinfo;
401 
402 	mutex_lock(&sbi->s_alloc_mutex);
403 	partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
404 	if (bloc->logicalBlockNum + count < count ||
405 	    (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
406 		udf_debug("%u < %d || %u + %u > %u\n",
407 			  bloc->logicalBlockNum, 0,
408 			  bloc->logicalBlockNum, count,
409 			  partmap->s_partition_len);
410 		goto error_return;
411 	}
412 
413 	iinfo = UDF_I(table);
414 	udf_add_free_space(sb, sbi->s_partition, count);
415 
416 	start = bloc->logicalBlockNum + offset;
417 	end = bloc->logicalBlockNum + offset + count - 1;
418 
419 	epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
420 	elen = 0;
421 	epos.block = oepos.block = iinfo->i_location;
422 	epos.bh = oepos.bh = NULL;
423 
424 	while (count &&
425 	       (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
426 		if (((eloc.logicalBlockNum +
427 			(elen >> sb->s_blocksize_bits)) == start)) {
428 			if ((0x3FFFFFFF - elen) <
429 					(count << sb->s_blocksize_bits)) {
430 				uint32_t tmp = ((0x3FFFFFFF - elen) >>
431 							sb->s_blocksize_bits);
432 				count -= tmp;
433 				start += tmp;
434 				elen = (etype << 30) |
435 					(0x40000000 - sb->s_blocksize);
436 			} else {
437 				elen = (etype << 30) |
438 					(elen +
439 					(count << sb->s_blocksize_bits));
440 				start += count;
441 				count = 0;
442 			}
443 			udf_write_aext(table, &oepos, &eloc, elen, 1);
444 		} else if (eloc.logicalBlockNum == (end + 1)) {
445 			if ((0x3FFFFFFF - elen) <
446 					(count << sb->s_blocksize_bits)) {
447 				uint32_t tmp = ((0x3FFFFFFF - elen) >>
448 						sb->s_blocksize_bits);
449 				count -= tmp;
450 				end -= tmp;
451 				eloc.logicalBlockNum -= tmp;
452 				elen = (etype << 30) |
453 					(0x40000000 - sb->s_blocksize);
454 			} else {
455 				eloc.logicalBlockNum = start;
456 				elen = (etype << 30) |
457 					(elen +
458 					(count << sb->s_blocksize_bits));
459 				end -= count;
460 				count = 0;
461 			}
462 			udf_write_aext(table, &oepos, &eloc, elen, 1);
463 		}
464 
465 		if (epos.bh != oepos.bh) {
466 			oepos.block = epos.block;
467 			brelse(oepos.bh);
468 			get_bh(epos.bh);
469 			oepos.bh = epos.bh;
470 			oepos.offset = 0;
471 		} else {
472 			oepos.offset = epos.offset;
473 		}
474 	}
475 
476 	if (count) {
477 		/*
478 		 * NOTE: we CANNOT use udf_add_aext here, as it can try to
479 		 * allocate a new block, and since we hold the super block
480 		 * lock already very bad things would happen :)
481 		 *
482 		 * We copy the behavior of udf_add_aext, but instead of
483 		 * trying to allocate a new block close to the existing one,
484 		 * we just steal a block from the extent we are trying to add.
485 		 *
486 		 * It would be nice if the blocks were close together, but it
487 		 * isn't required.
488 		 */
489 
490 		int adsize;
491 
492 		eloc.logicalBlockNum = start;
493 		elen = EXT_RECORDED_ALLOCATED |
494 			(count << sb->s_blocksize_bits);
495 
496 		if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
497 			adsize = sizeof(struct short_ad);
498 		else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
499 			adsize = sizeof(struct long_ad);
500 		else {
501 			brelse(oepos.bh);
502 			brelse(epos.bh);
503 			goto error_return;
504 		}
505 
506 		if (epos.offset + (2 * adsize) > sb->s_blocksize) {
507 			/* Steal a block from the extent being free'd */
508 			udf_setup_indirect_aext(table, eloc.logicalBlockNum,
509 						&epos);
510 
511 			eloc.logicalBlockNum++;
512 			elen -= sb->s_blocksize;
513 		}
514 
515 		/* It's possible that stealing the block emptied the extent */
516 		if (elen)
517 			__udf_add_aext(table, &epos, &eloc, elen, 1);
518 	}
519 
520 	brelse(epos.bh);
521 	brelse(oepos.bh);
522 
523 error_return:
524 	mutex_unlock(&sbi->s_alloc_mutex);
525 	return;
526 }
527 
528 static int udf_table_prealloc_blocks(struct super_block *sb,
529 				     struct inode *table, uint16_t partition,
530 				     uint32_t first_block, uint32_t block_count)
531 {
532 	struct udf_sb_info *sbi = UDF_SB(sb);
533 	int alloc_count = 0;
534 	uint32_t elen, adsize;
535 	struct kernel_lb_addr eloc;
536 	struct extent_position epos;
537 	int8_t etype = -1;
538 	struct udf_inode_info *iinfo;
539 
540 	if (first_block >= sbi->s_partmaps[partition].s_partition_len)
541 		return 0;
542 
543 	iinfo = UDF_I(table);
544 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
545 		adsize = sizeof(struct short_ad);
546 	else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
547 		adsize = sizeof(struct long_ad);
548 	else
549 		return 0;
550 
551 	mutex_lock(&sbi->s_alloc_mutex);
552 	epos.offset = sizeof(struct unallocSpaceEntry);
553 	epos.block = iinfo->i_location;
554 	epos.bh = NULL;
555 	eloc.logicalBlockNum = 0xFFFFFFFF;
556 
557 	while (first_block != eloc.logicalBlockNum &&
558 	       (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
559 		udf_debug("eloc=%u, elen=%u, first_block=%u\n",
560 			  eloc.logicalBlockNum, elen, first_block);
561 		; /* empty loop body */
562 	}
563 
564 	if (first_block == eloc.logicalBlockNum) {
565 		epos.offset -= adsize;
566 
567 		alloc_count = (elen >> sb->s_blocksize_bits);
568 		if (alloc_count > block_count) {
569 			alloc_count = block_count;
570 			eloc.logicalBlockNum += alloc_count;
571 			elen -= (alloc_count << sb->s_blocksize_bits);
572 			udf_write_aext(table, &epos, &eloc,
573 					(etype << 30) | elen, 1);
574 		} else
575 			udf_delete_aext(table, epos);
576 	} else {
577 		alloc_count = 0;
578 	}
579 
580 	brelse(epos.bh);
581 
582 	if (alloc_count)
583 		udf_add_free_space(sb, partition, -alloc_count);
584 	mutex_unlock(&sbi->s_alloc_mutex);
585 	return alloc_count;
586 }
587 
588 static udf_pblk_t udf_table_new_block(struct super_block *sb,
589 			       struct inode *table, uint16_t partition,
590 			       uint32_t goal, int *err)
591 {
592 	struct udf_sb_info *sbi = UDF_SB(sb);
593 	uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
594 	udf_pblk_t newblock = 0;
595 	uint32_t adsize;
596 	uint32_t elen, goal_elen = 0;
597 	struct kernel_lb_addr eloc, goal_eloc;
598 	struct extent_position epos, goal_epos;
599 	int8_t etype;
600 	struct udf_inode_info *iinfo = UDF_I(table);
601 
602 	*err = -ENOSPC;
603 
604 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
605 		adsize = sizeof(struct short_ad);
606 	else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
607 		adsize = sizeof(struct long_ad);
608 	else
609 		return newblock;
610 
611 	mutex_lock(&sbi->s_alloc_mutex);
612 	if (goal >= sbi->s_partmaps[partition].s_partition_len)
613 		goal = 0;
614 
615 	/* We search for the closest matching block to goal. If we find
616 	   a exact hit, we stop. Otherwise we keep going till we run out
617 	   of extents. We store the buffer_head, bloc, and extoffset
618 	   of the current closest match and use that when we are done.
619 	 */
620 	epos.offset = sizeof(struct unallocSpaceEntry);
621 	epos.block = iinfo->i_location;
622 	epos.bh = goal_epos.bh = NULL;
623 
624 	while (spread &&
625 	       (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
626 		if (goal >= eloc.logicalBlockNum) {
627 			if (goal < eloc.logicalBlockNum +
628 					(elen >> sb->s_blocksize_bits))
629 				nspread = 0;
630 			else
631 				nspread = goal - eloc.logicalBlockNum -
632 					(elen >> sb->s_blocksize_bits);
633 		} else {
634 			nspread = eloc.logicalBlockNum - goal;
635 		}
636 
637 		if (nspread < spread) {
638 			spread = nspread;
639 			if (goal_epos.bh != epos.bh) {
640 				brelse(goal_epos.bh);
641 				goal_epos.bh = epos.bh;
642 				get_bh(goal_epos.bh);
643 			}
644 			goal_epos.block = epos.block;
645 			goal_epos.offset = epos.offset - adsize;
646 			goal_eloc = eloc;
647 			goal_elen = (etype << 30) | elen;
648 		}
649 	}
650 
651 	brelse(epos.bh);
652 
653 	if (spread == 0xFFFFFFFF) {
654 		brelse(goal_epos.bh);
655 		mutex_unlock(&sbi->s_alloc_mutex);
656 		return 0;
657 	}
658 
659 	/* Only allocate blocks from the beginning of the extent.
660 	   That way, we only delete (empty) extents, never have to insert an
661 	   extent because of splitting */
662 	/* This works, but very poorly.... */
663 
664 	newblock = goal_eloc.logicalBlockNum;
665 	goal_eloc.logicalBlockNum++;
666 	goal_elen -= sb->s_blocksize;
667 
668 	if (goal_elen)
669 		udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
670 	else
671 		udf_delete_aext(table, goal_epos);
672 	brelse(goal_epos.bh);
673 
674 	udf_add_free_space(sb, partition, -1);
675 
676 	mutex_unlock(&sbi->s_alloc_mutex);
677 	*err = 0;
678 	return newblock;
679 }
680 
681 void udf_free_blocks(struct super_block *sb, struct inode *inode,
682 		     struct kernel_lb_addr *bloc, uint32_t offset,
683 		     uint32_t count)
684 {
685 	uint16_t partition = bloc->partitionReferenceNum;
686 	struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
687 
688 	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
689 		udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap,
690 				       bloc, offset, count);
691 	} else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
692 		udf_table_free_blocks(sb, map->s_uspace.s_table,
693 				      bloc, offset, count);
694 	}
695 
696 	if (inode) {
697 		inode_sub_bytes(inode,
698 				((sector_t)count) << sb->s_blocksize_bits);
699 	}
700 }
701 
702 inline int udf_prealloc_blocks(struct super_block *sb,
703 			       struct inode *inode,
704 			       uint16_t partition, uint32_t first_block,
705 			       uint32_t block_count)
706 {
707 	struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
708 	int allocated;
709 
710 	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
711 		allocated = udf_bitmap_prealloc_blocks(sb,
712 						       map->s_uspace.s_bitmap,
713 						       partition, first_block,
714 						       block_count);
715 	else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
716 		allocated = udf_table_prealloc_blocks(sb,
717 						      map->s_uspace.s_table,
718 						      partition, first_block,
719 						      block_count);
720 	else
721 		return 0;
722 
723 	if (inode && allocated > 0)
724 		inode_add_bytes(inode, allocated << sb->s_blocksize_bits);
725 	return allocated;
726 }
727 
728 inline udf_pblk_t udf_new_block(struct super_block *sb,
729 			 struct inode *inode,
730 			 uint16_t partition, uint32_t goal, int *err)
731 {
732 	struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
733 	udf_pblk_t block;
734 
735 	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
736 		block = udf_bitmap_new_block(sb,
737 					     map->s_uspace.s_bitmap,
738 					     partition, goal, err);
739 	else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
740 		block = udf_table_new_block(sb,
741 					    map->s_uspace.s_table,
742 					    partition, goal, err);
743 	else {
744 		*err = -EIO;
745 		return 0;
746 	}
747 	if (inode && block)
748 		inode_add_bytes(inode, sb->s_blocksize);
749 	return block;
750 }
751