xref: /openbmc/linux/fs/udf/balloc.c (revision 643d1f7f)
1 /*
2  * balloc.c
3  *
4  * PURPOSE
5  *	Block allocation handling routines for the OSTA-UDF(tm) filesystem.
6  *
7  * COPYRIGHT
8  *	This file is distributed under the terms of the GNU General Public
9  *	License (GPL). Copies of the GPL can be obtained from:
10  *		ftp://prep.ai.mit.edu/pub/gnu/GPL
11  *	Each contributing author retains all rights to their own work.
12  *
13  *  (C) 1999-2001 Ben Fennema
14  *  (C) 1999 Stelias Computing Inc
15  *
16  * HISTORY
17  *
18  *  02/24/99 blf  Created.
19  *
20  */
21 
22 #include "udfdecl.h"
23 
24 #include <linux/quotaops.h>
25 #include <linux/buffer_head.h>
26 #include <linux/bitops.h>
27 
28 #include "udf_i.h"
29 #include "udf_sb.h"
30 
31 #define udf_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
32 #define udf_set_bit(nr,addr) ext2_set_bit(nr,addr)
33 #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
34 #define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size)
35 #define udf_find_next_one_bit(addr, size, offset) find_next_one_bit(addr, size, offset)
36 
37 #define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x)
38 #define leNUM_to_cpup(x,y) xleNUM_to_cpup(x,y)
39 #define xleNUM_to_cpup(x,y) (le ## x ## _to_cpup(y))
40 #define uintBPL_t uint(BITS_PER_LONG)
41 #define uint(x) xuint(x)
42 #define xuint(x) __le ## x
43 
44 static inline int find_next_one_bit(void *addr, int size, int offset)
45 {
46 	uintBPL_t *p = ((uintBPL_t *) addr) + (offset / BITS_PER_LONG);
47 	int result = offset & ~(BITS_PER_LONG - 1);
48 	unsigned long tmp;
49 
50 	if (offset >= size)
51 		return size;
52 	size -= result;
53 	offset &= (BITS_PER_LONG - 1);
54 	if (offset) {
55 		tmp = leBPL_to_cpup(p++);
56 		tmp &= ~0UL << offset;
57 		if (size < BITS_PER_LONG)
58 			goto found_first;
59 		if (tmp)
60 			goto found_middle;
61 		size -= BITS_PER_LONG;
62 		result += BITS_PER_LONG;
63 	}
64 	while (size & ~(BITS_PER_LONG - 1)) {
65 		if ((tmp = leBPL_to_cpup(p++)))
66 			goto found_middle;
67 		result += BITS_PER_LONG;
68 		size -= BITS_PER_LONG;
69 	}
70 	if (!size)
71 		return result;
72 	tmp = leBPL_to_cpup(p);
73 found_first:
74 	tmp &= ~0UL >> (BITS_PER_LONG - size);
75 found_middle:
76 	return result + ffz(~tmp);
77 }
78 
79 #define find_first_one_bit(addr, size)\
80 	find_next_one_bit((addr), (size), 0)
81 
82 static int read_block_bitmap(struct super_block *sb,
83 			     struct udf_bitmap *bitmap, unsigned int block,
84 			     unsigned long bitmap_nr)
85 {
86 	struct buffer_head *bh = NULL;
87 	int retval = 0;
88 	kernel_lb_addr loc;
89 
90 	loc.logicalBlockNum = bitmap->s_extPosition;
91 	loc.partitionReferenceNum = UDF_SB_PARTITION(sb);
92 
93 	bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block));
94 	if (!bh) {
95 		retval = -EIO;
96 	}
97 	bitmap->s_block_bitmap[bitmap_nr] = bh;
98 	return retval;
99 }
100 
101 static int __load_block_bitmap(struct super_block *sb,
102 			       struct udf_bitmap *bitmap,
103 			       unsigned int block_group)
104 {
105 	int retval = 0;
106 	int nr_groups = bitmap->s_nr_groups;
107 
108 	if (block_group >= nr_groups) {
109 		udf_debug("block_group (%d) > nr_groups (%d)\n", block_group,
110 			  nr_groups);
111 	}
112 
113 	if (bitmap->s_block_bitmap[block_group]) {
114 		return block_group;
115 	} else {
116 		retval = read_block_bitmap(sb, bitmap, block_group,
117 					   block_group);
118 		if (retval < 0)
119 			return retval;
120 		return block_group;
121 	}
122 }
123 
124 static inline int load_block_bitmap(struct super_block *sb,
125 				    struct udf_bitmap *bitmap,
126 				    unsigned int block_group)
127 {
128 	int slot;
129 
130 	slot = __load_block_bitmap(sb, bitmap, block_group);
131 
132 	if (slot < 0)
133 		return slot;
134 
135 	if (!bitmap->s_block_bitmap[slot])
136 		return -EIO;
137 
138 	return slot;
139 }
140 
141 static void udf_bitmap_free_blocks(struct super_block *sb,
142 				   struct inode *inode,
143 				   struct udf_bitmap *bitmap,
144 				   kernel_lb_addr bloc, uint32_t offset,
145 				   uint32_t count)
146 {
147 	struct udf_sb_info *sbi = UDF_SB(sb);
148 	struct buffer_head *bh = NULL;
149 	unsigned long block;
150 	unsigned long block_group;
151 	unsigned long bit;
152 	unsigned long i;
153 	int bitmap_nr;
154 	unsigned long overflow;
155 
156 	mutex_lock(&sbi->s_alloc_mutex);
157 	if (bloc.logicalBlockNum < 0 ||
158 	    (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) {
159 		udf_debug("%d < %d || %d + %d > %d\n",
160 			  bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
161 			  UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
162 		goto error_return;
163 	}
164 
165 	block = bloc.logicalBlockNum + offset + (sizeof(struct spaceBitmapDesc) << 3);
166 
167 do_more:
168 	overflow = 0;
169 	block_group = block >> (sb->s_blocksize_bits + 3);
170 	bit = block % (sb->s_blocksize << 3);
171 
172 	/*
173 	 * Check to see if we are freeing blocks across a group boundary.
174 	 */
175 	if (bit + count > (sb->s_blocksize << 3)) {
176 		overflow = bit + count - (sb->s_blocksize << 3);
177 		count -= overflow;
178 	}
179 	bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
180 	if (bitmap_nr < 0)
181 		goto error_return;
182 
183 	bh = bitmap->s_block_bitmap[bitmap_nr];
184 	for (i = 0; i < count; i++) {
185 		if (udf_set_bit(bit + i, bh->b_data)) {
186 			udf_debug("bit %ld already set\n", bit + i);
187 			udf_debug("byte=%2x\n", ((char *)bh->b_data)[(bit + i) >> 3]);
188 		} else {
189 			if (inode)
190 				DQUOT_FREE_BLOCK(inode, 1);
191 			if (UDF_SB_LVIDBH(sb)) {
192 				UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] =
193 					cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]) + 1);
194 			}
195 		}
196 	}
197 	mark_buffer_dirty(bh);
198 	if (overflow) {
199 		block += count;
200 		count = overflow;
201 		goto do_more;
202 	}
203 error_return:
204 	sb->s_dirt = 1;
205 	if (UDF_SB_LVIDBH(sb))
206 		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
207 	mutex_unlock(&sbi->s_alloc_mutex);
208 	return;
209 }
210 
211 static int udf_bitmap_prealloc_blocks(struct super_block *sb,
212 				      struct inode *inode,
213 				      struct udf_bitmap *bitmap,
214 				      uint16_t partition, uint32_t first_block,
215 				      uint32_t block_count)
216 {
217 	struct udf_sb_info *sbi = UDF_SB(sb);
218 	int alloc_count = 0;
219 	int bit, block, block_group, group_start;
220 	int nr_groups, bitmap_nr;
221 	struct buffer_head *bh;
222 
223 	mutex_lock(&sbi->s_alloc_mutex);
224 	if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition))
225 		goto out;
226 
227 	if (first_block + block_count > UDF_SB_PARTLEN(sb, partition))
228 		block_count = UDF_SB_PARTLEN(sb, partition) - first_block;
229 
230 repeat:
231 	nr_groups = (UDF_SB_PARTLEN(sb, partition) +
232 		     (sizeof(struct spaceBitmapDesc) << 3) +
233 		     (sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8);
234 	block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
235 	block_group = block >> (sb->s_blocksize_bits + 3);
236 	group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
237 
238 	bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
239 	if (bitmap_nr < 0)
240 		goto out;
241 	bh = bitmap->s_block_bitmap[bitmap_nr];
242 
243 	bit = block % (sb->s_blocksize << 3);
244 
245 	while (bit < (sb->s_blocksize << 3) && block_count > 0) {
246 		if (!udf_test_bit(bit, bh->b_data)) {
247 			goto out;
248 		} else if (DQUOT_PREALLOC_BLOCK(inode, 1)) {
249 			goto out;
250 		} else if (!udf_clear_bit(bit, bh->b_data)) {
251 			udf_debug("bit already cleared for block %d\n", bit);
252 			DQUOT_FREE_BLOCK(inode, 1);
253 			goto out;
254 		}
255 		block_count--;
256 		alloc_count++;
257 		bit++;
258 		block++;
259 	}
260 	mark_buffer_dirty(bh);
261 	if (block_count > 0)
262 		goto repeat;
263 out:
264 	if (UDF_SB_LVIDBH(sb)) {
265 		UDF_SB_LVID(sb)->freeSpaceTable[partition] =
266 			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - alloc_count);
267 		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
268 	}
269 	sb->s_dirt = 1;
270 	mutex_unlock(&sbi->s_alloc_mutex);
271 	return alloc_count;
272 }
273 
274 static int udf_bitmap_new_block(struct super_block *sb,
275 				struct inode *inode,
276 				struct udf_bitmap *bitmap, uint16_t partition,
277 				uint32_t goal, int *err)
278 {
279 	struct udf_sb_info *sbi = UDF_SB(sb);
280 	int newbit, bit = 0, block, block_group, group_start;
281 	int end_goal, nr_groups, bitmap_nr, i;
282 	struct buffer_head *bh = NULL;
283 	char *ptr;
284 	int newblock = 0;
285 
286 	*err = -ENOSPC;
287 	mutex_lock(&sbi->s_alloc_mutex);
288 
289 repeat:
290 	if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
291 		goal = 0;
292 
293 	nr_groups = bitmap->s_nr_groups;
294 	block = goal + (sizeof(struct spaceBitmapDesc) << 3);
295 	block_group = block >> (sb->s_blocksize_bits + 3);
296 	group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
297 
298 	bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
299 	if (bitmap_nr < 0)
300 		goto error_return;
301 	bh = bitmap->s_block_bitmap[bitmap_nr];
302 	ptr = memscan((char *)bh->b_data + group_start, 0xFF,
303 		      sb->s_blocksize - group_start);
304 
305 	if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
306 		bit = block % (sb->s_blocksize << 3);
307 		if (udf_test_bit(bit, bh->b_data))
308 			goto got_block;
309 
310 		end_goal = (bit + 63) & ~63;
311 		bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
312 		if (bit < end_goal)
313 			goto got_block;
314 
315 		ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF, sb->s_blocksize - ((bit + 7) >> 3));
316 		newbit = (ptr - ((char *)bh->b_data)) << 3;
317 		if (newbit < sb->s_blocksize << 3) {
318 			bit = newbit;
319 			goto search_back;
320 		}
321 
322 		newbit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, bit);
323 		if (newbit < sb->s_blocksize << 3) {
324 			bit = newbit;
325 			goto got_block;
326 		}
327 	}
328 
329 	for (i = 0; i < (nr_groups * 2); i++) {
330 		block_group++;
331 		if (block_group >= nr_groups)
332 			block_group = 0;
333 		group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
334 
335 		bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
336 		if (bitmap_nr < 0)
337 			goto error_return;
338 		bh = bitmap->s_block_bitmap[bitmap_nr];
339 		if (i < nr_groups) {
340 			ptr = memscan((char *)bh->b_data + group_start, 0xFF,
341 				      sb->s_blocksize - group_start);
342 			if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
343 				bit = (ptr - ((char *)bh->b_data)) << 3;
344 				break;
345 			}
346 		} else {
347 			bit = udf_find_next_one_bit((char *)bh->b_data,
348 						    sb->s_blocksize << 3,
349 						    group_start << 3);
350 			if (bit < sb->s_blocksize << 3)
351 				break;
352 		}
353 	}
354 	if (i >= (nr_groups * 2)) {
355 		mutex_unlock(&sbi->s_alloc_mutex);
356 		return newblock;
357 	}
358 	if (bit < sb->s_blocksize << 3)
359 		goto search_back;
360 	else
361 		bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3);
362 	if (bit >= sb->s_blocksize << 3) {
363 		mutex_unlock(&sbi->s_alloc_mutex);
364 		return 0;
365 	}
366 
367 search_back:
368 	for (i = 0; i < 7 && bit > (group_start << 3) && udf_test_bit(bit - 1, bh->b_data); i++, bit--)
369 		; /* empty loop */
370 
371 got_block:
372 
373 	/*
374 	 * Check quota for allocation of this block.
375 	 */
376 	if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
377 		mutex_unlock(&sbi->s_alloc_mutex);
378 		*err = -EDQUOT;
379 		return 0;
380 	}
381 
382 	newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
383 		(sizeof(struct spaceBitmapDesc) << 3);
384 
385 	if (!udf_clear_bit(bit, bh->b_data)) {
386 		udf_debug("bit already cleared for block %d\n", bit);
387 		goto repeat;
388 	}
389 
390 	mark_buffer_dirty(bh);
391 
392 	if (UDF_SB_LVIDBH(sb)) {
393 		UDF_SB_LVID(sb)->freeSpaceTable[partition] =
394 			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - 1);
395 		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
396 	}
397 	sb->s_dirt = 1;
398 	mutex_unlock(&sbi->s_alloc_mutex);
399 	*err = 0;
400 	return newblock;
401 
402 error_return:
403 	*err = -EIO;
404 	mutex_unlock(&sbi->s_alloc_mutex);
405 	return 0;
406 }
407 
408 static void udf_table_free_blocks(struct super_block *sb,
409 				  struct inode *inode,
410 				  struct inode *table,
411 				  kernel_lb_addr bloc, uint32_t offset,
412 				  uint32_t count)
413 {
414 	struct udf_sb_info *sbi = UDF_SB(sb);
415 	uint32_t start, end;
416 	uint32_t elen;
417 	kernel_lb_addr eloc;
418 	struct extent_position oepos, epos;
419 	int8_t etype;
420 	int i;
421 
422 	mutex_lock(&sbi->s_alloc_mutex);
423 	if (bloc.logicalBlockNum < 0 ||
424 	    (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) {
425 		udf_debug("%d < %d || %d + %d > %d\n",
426 			  bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
427 			  UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
428 		goto error_return;
429 	}
430 
431 	/* We do this up front - There are some error conditions that could occure,
432 	   but.. oh well */
433 	if (inode)
434 		DQUOT_FREE_BLOCK(inode, count);
435 	if (UDF_SB_LVIDBH(sb)) {
436 		UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] =
437 			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]) + count);
438 		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
439 	}
440 
441 	start = bloc.logicalBlockNum + offset;
442 	end = bloc.logicalBlockNum + offset + count - 1;
443 
444 	epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
445 	elen = 0;
446 	epos.block = oepos.block = UDF_I_LOCATION(table);
447 	epos.bh = oepos.bh = NULL;
448 
449 	while (count &&
450 	       (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
451 		if (((eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) == start)) {
452 			if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) {
453 				count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
454 				start += ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
455 				elen = (etype << 30) | (0x40000000 - sb->s_blocksize);
456 			} else {
457 				elen = (etype << 30) | (elen + (count << sb->s_blocksize_bits));
458 				start += count;
459 				count = 0;
460 			}
461 			udf_write_aext(table, &oepos, eloc, elen, 1);
462 		} else if (eloc.logicalBlockNum == (end + 1)) {
463 			if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) {
464 				count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
465 				end -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
466 				eloc.logicalBlockNum -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
467 				elen = (etype << 30) | (0x40000000 - sb->s_blocksize);
468 			} else {
469 				eloc.logicalBlockNum = start;
470 				elen = (etype << 30) | (elen + (count << sb->s_blocksize_bits));
471 				end -= count;
472 				count = 0;
473 			}
474 			udf_write_aext(table, &oepos, eloc, elen, 1);
475 		}
476 
477 		if (epos.bh != oepos.bh) {
478 			i = -1;
479 			oepos.block = epos.block;
480 			brelse(oepos.bh);
481 			get_bh(epos.bh);
482 			oepos.bh = epos.bh;
483 			oepos.offset = 0;
484 		} else {
485 			oepos.offset = epos.offset;
486 		}
487 	}
488 
489 	if (count) {
490 		/*
491 		 * NOTE: we CANNOT use udf_add_aext here, as it can try to allocate
492 		 * a new block, and since we hold the super block lock already
493 		 * very bad things would happen :)
494 		 *
495 		 * We copy the behavior of udf_add_aext, but instead of
496 		 * trying to allocate a new block close to the existing one,
497 		 * we just steal a block from the extent we are trying to add.
498 		 *
499 		 * It would be nice if the blocks were close together, but it
500 		 * isn't required.
501 		 */
502 
503 		int adsize;
504 		short_ad *sad = NULL;
505 		long_ad *lad = NULL;
506 		struct allocExtDesc *aed;
507 
508 		eloc.logicalBlockNum = start;
509 		elen = EXT_RECORDED_ALLOCATED |
510 			(count << sb->s_blocksize_bits);
511 
512 		if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) {
513 			adsize = sizeof(short_ad);
514 		} else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG) {
515 			adsize = sizeof(long_ad);
516 		} else {
517 			brelse(oepos.bh);
518 			brelse(epos.bh);
519 			goto error_return;
520 		}
521 
522 		if (epos.offset + (2 * adsize) > sb->s_blocksize) {
523 			char *sptr, *dptr;
524 			int loffset;
525 
526 			brelse(oepos.bh);
527 			oepos = epos;
528 
529 			/* Steal a block from the extent being free'd */
530 			epos.block.logicalBlockNum = eloc.logicalBlockNum;
531 			eloc.logicalBlockNum++;
532 			elen -= sb->s_blocksize;
533 
534 			if (!(epos.bh = udf_tread(sb, udf_get_lb_pblock(sb, epos.block, 0)))) {
535 				brelse(oepos.bh);
536 				goto error_return;
537 			}
538 			aed = (struct allocExtDesc *)(epos.bh->b_data);
539 			aed->previousAllocExtLocation = cpu_to_le32(oepos.block.logicalBlockNum);
540 			if (epos.offset + adsize > sb->s_blocksize) {
541 				loffset = epos.offset;
542 				aed->lengthAllocDescs = cpu_to_le32(adsize);
543 				sptr = UDF_I_DATA(table) + epos.offset - adsize;
544 				dptr = epos.bh->b_data + sizeof(struct allocExtDesc);
545 				memcpy(dptr, sptr, adsize);
546 				epos.offset = sizeof(struct allocExtDesc) + adsize;
547 			} else {
548 				loffset = epos.offset + adsize;
549 				aed->lengthAllocDescs = cpu_to_le32(0);
550 				if (oepos.bh) {
551 					sptr = oepos.bh->b_data + epos.offset;
552 					aed = (struct allocExtDesc *)oepos.bh->b_data;
553 					aed->lengthAllocDescs =
554 						cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
555 				} else {
556 					sptr = UDF_I_DATA(table) + epos.offset;
557 					UDF_I_LENALLOC(table) += adsize;
558 					mark_inode_dirty(table);
559 				}
560 				epos.offset = sizeof(struct allocExtDesc);
561 			}
562 			if (UDF_SB_UDFREV(sb) >= 0x0200)
563 				udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 1,
564 					    epos.block.logicalBlockNum, sizeof(tag));
565 			else
566 				udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 2, 1,
567 					    epos.block.logicalBlockNum, sizeof(tag));
568 
569 			switch (UDF_I_ALLOCTYPE(table)) {
570 				case ICBTAG_FLAG_AD_SHORT:
571 					sad = (short_ad *)sptr;
572 					sad->extLength = cpu_to_le32(
573 						EXT_NEXT_EXTENT_ALLOCDECS |
574 						sb->s_blocksize);
575 					sad->extPosition = cpu_to_le32(epos.block.logicalBlockNum);
576 					break;
577 				case ICBTAG_FLAG_AD_LONG:
578 					lad = (long_ad *)sptr;
579 					lad->extLength = cpu_to_le32(
580 						EXT_NEXT_EXTENT_ALLOCDECS |
581 						sb->s_blocksize);
582 					lad->extLocation = cpu_to_lelb(epos.block);
583 					break;
584 			}
585 			if (oepos.bh) {
586 				udf_update_tag(oepos.bh->b_data, loffset);
587 				mark_buffer_dirty(oepos.bh);
588 			} else {
589 				mark_inode_dirty(table);
590 			}
591 		}
592 
593 		if (elen) { /* It's possible that stealing the block emptied the extent */
594 			udf_write_aext(table, &epos, eloc, elen, 1);
595 
596 			if (!epos.bh) {
597 				UDF_I_LENALLOC(table) += adsize;
598 				mark_inode_dirty(table);
599 			} else {
600 				aed = (struct allocExtDesc *)epos.bh->b_data;
601 				aed->lengthAllocDescs =
602 					cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
603 				udf_update_tag(epos.bh->b_data, epos.offset);
604 				mark_buffer_dirty(epos.bh);
605 			}
606 		}
607 	}
608 
609 	brelse(epos.bh);
610 	brelse(oepos.bh);
611 
612 error_return:
613 	sb->s_dirt = 1;
614 	mutex_unlock(&sbi->s_alloc_mutex);
615 	return;
616 }
617 
618 static int udf_table_prealloc_blocks(struct super_block *sb,
619 				     struct inode *inode,
620 				     struct inode *table, uint16_t partition,
621 				     uint32_t first_block, uint32_t block_count)
622 {
623 	struct udf_sb_info *sbi = UDF_SB(sb);
624 	int alloc_count = 0;
625 	uint32_t elen, adsize;
626 	kernel_lb_addr eloc;
627 	struct extent_position epos;
628 	int8_t etype = -1;
629 
630 	if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition))
631 		return 0;
632 
633 	if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
634 		adsize = sizeof(short_ad);
635 	else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
636 		adsize = sizeof(long_ad);
637 	else
638 		return 0;
639 
640 	mutex_lock(&sbi->s_alloc_mutex);
641 	epos.offset = sizeof(struct unallocSpaceEntry);
642 	epos.block = UDF_I_LOCATION(table);
643 	epos.bh = NULL;
644 	eloc.logicalBlockNum = 0xFFFFFFFF;
645 
646 	while (first_block != eloc.logicalBlockNum &&
647 	       (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
648 		udf_debug("eloc=%d, elen=%d, first_block=%d\n",
649 			  eloc.logicalBlockNum, elen, first_block);
650 		; /* empty loop body */
651 	}
652 
653 	if (first_block == eloc.logicalBlockNum) {
654 		epos.offset -= adsize;
655 
656 		alloc_count = (elen >> sb->s_blocksize_bits);
657 		if (inode && DQUOT_PREALLOC_BLOCK(inode, alloc_count > block_count ? block_count : alloc_count)) {
658 			alloc_count = 0;
659 		} else if (alloc_count > block_count) {
660 			alloc_count = block_count;
661 			eloc.logicalBlockNum += alloc_count;
662 			elen -= (alloc_count << sb->s_blocksize_bits);
663 			udf_write_aext(table, &epos, eloc, (etype << 30) | elen, 1);
664 		} else {
665 			udf_delete_aext(table, epos, eloc, (etype << 30) | elen);
666 		}
667 	} else {
668 		alloc_count = 0;
669 	}
670 
671 	brelse(epos.bh);
672 
673 	if (alloc_count && UDF_SB_LVIDBH(sb)) {
674 		UDF_SB_LVID(sb)->freeSpaceTable[partition] =
675 			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - alloc_count);
676 		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
677 		sb->s_dirt = 1;
678 	}
679 	mutex_unlock(&sbi->s_alloc_mutex);
680 	return alloc_count;
681 }
682 
683 static int udf_table_new_block(struct super_block *sb,
684 			       struct inode *inode,
685 			       struct inode *table, uint16_t partition,
686 			       uint32_t goal, int *err)
687 {
688 	struct udf_sb_info *sbi = UDF_SB(sb);
689 	uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
690 	uint32_t newblock = 0, adsize;
691 	uint32_t elen, goal_elen = 0;
692 	kernel_lb_addr eloc, uninitialized_var(goal_eloc);
693 	struct extent_position epos, goal_epos;
694 	int8_t etype;
695 
696 	*err = -ENOSPC;
697 
698 	if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
699 		adsize = sizeof(short_ad);
700 	else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
701 		adsize = sizeof(long_ad);
702 	else
703 		return newblock;
704 
705 	mutex_lock(&sbi->s_alloc_mutex);
706 	if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
707 		goal = 0;
708 
709 	/* We search for the closest matching block to goal. If we find a exact hit,
710 	   we stop. Otherwise we keep going till we run out of extents.
711 	   We store the buffer_head, bloc, and extoffset of the current closest
712 	   match and use that when we are done.
713 	 */
714 	epos.offset = sizeof(struct unallocSpaceEntry);
715 	epos.block = UDF_I_LOCATION(table);
716 	epos.bh = goal_epos.bh = NULL;
717 
718 	while (spread &&
719 	       (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
720 		if (goal >= eloc.logicalBlockNum) {
721 			if (goal < eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits))
722 				nspread = 0;
723 			else
724 				nspread = goal - eloc.logicalBlockNum -
725 					(elen >> sb->s_blocksize_bits);
726 		} else {
727 			nspread = eloc.logicalBlockNum - goal;
728 		}
729 
730 		if (nspread < spread) {
731 			spread = nspread;
732 			if (goal_epos.bh != epos.bh) {
733 				brelse(goal_epos.bh);
734 				goal_epos.bh = epos.bh;
735 				get_bh(goal_epos.bh);
736 			}
737 			goal_epos.block = epos.block;
738 			goal_epos.offset = epos.offset - adsize;
739 			goal_eloc = eloc;
740 			goal_elen = (etype << 30) | elen;
741 		}
742 	}
743 
744 	brelse(epos.bh);
745 
746 	if (spread == 0xFFFFFFFF) {
747 		brelse(goal_epos.bh);
748 		mutex_unlock(&sbi->s_alloc_mutex);
749 		return 0;
750 	}
751 
752 	/* Only allocate blocks from the beginning of the extent.
753 	   That way, we only delete (empty) extents, never have to insert an
754 	   extent because of splitting */
755 	/* This works, but very poorly.... */
756 
757 	newblock = goal_eloc.logicalBlockNum;
758 	goal_eloc.logicalBlockNum++;
759 	goal_elen -= sb->s_blocksize;
760 
761 	if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
762 		brelse(goal_epos.bh);
763 		mutex_unlock(&sbi->s_alloc_mutex);
764 		*err = -EDQUOT;
765 		return 0;
766 	}
767 
768 	if (goal_elen)
769 		udf_write_aext(table, &goal_epos, goal_eloc, goal_elen, 1);
770 	else
771 		udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
772 	brelse(goal_epos.bh);
773 
774 	if (UDF_SB_LVIDBH(sb)) {
775 		UDF_SB_LVID(sb)->freeSpaceTable[partition] =
776 			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - 1);
777 		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
778 	}
779 
780 	sb->s_dirt = 1;
781 	mutex_unlock(&sbi->s_alloc_mutex);
782 	*err = 0;
783 	return newblock;
784 }
785 
786 inline void udf_free_blocks(struct super_block *sb,
787 			    struct inode *inode,
788 			    kernel_lb_addr bloc, uint32_t offset,
789 			    uint32_t count)
790 {
791 	uint16_t partition = bloc.partitionReferenceNum;
792 
793 	if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
794 		return udf_bitmap_free_blocks(sb, inode,
795 					      UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
796 					      bloc, offset, count);
797 	} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) {
798 		return udf_table_free_blocks(sb, inode,
799 					     UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
800 					     bloc, offset, count);
801 	} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
802 		return udf_bitmap_free_blocks(sb, inode,
803 					      UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
804 					      bloc, offset, count);
805 	} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
806 		return udf_table_free_blocks(sb, inode,
807 					     UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
808 					     bloc, offset, count);
809 	} else {
810 		return;
811 	}
812 }
813 
814 inline int udf_prealloc_blocks(struct super_block *sb,
815 			       struct inode *inode,
816 			       uint16_t partition, uint32_t first_block,
817 			       uint32_t block_count)
818 {
819 	if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
820 		return udf_bitmap_prealloc_blocks(sb, inode,
821 						  UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
822 						  partition, first_block, block_count);
823 	} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) {
824 		return udf_table_prealloc_blocks(sb, inode,
825 						 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
826 						 partition, first_block, block_count);
827 	} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
828 		return udf_bitmap_prealloc_blocks(sb, inode,
829 						  UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
830 						  partition, first_block, block_count);
831 	} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
832 		return udf_table_prealloc_blocks(sb, inode,
833 						 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
834 						 partition, first_block, block_count);
835 	} else {
836 		return 0;
837 	}
838 }
839 
840 inline int udf_new_block(struct super_block *sb,
841 			 struct inode *inode,
842 			 uint16_t partition, uint32_t goal, int *err)
843 {
844 	int ret;
845 
846 	if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
847 		ret = udf_bitmap_new_block(sb, inode,
848 					   UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
849 					   partition, goal, err);
850 		return ret;
851 	} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) {
852 		return udf_table_new_block(sb, inode,
853 					   UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
854 					   partition, goal, err);
855 	} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
856 		return udf_bitmap_new_block(sb, inode,
857 					    UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
858 					    partition, goal, err);
859 	} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
860 		return udf_table_new_block(sb, inode,
861 					   UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
862 					   partition, goal, err);
863 	} else {
864 		*err = -EIO;
865 		return 0;
866 	}
867 }
868