xref: /openbmc/linux/fs/udf/balloc.c (revision c21b37f6)
1 /*
2  * balloc.c
3  *
4  * PURPOSE
5  *	Block allocation handling routines for the OSTA-UDF(tm) filesystem.
6  *
7  * COPYRIGHT
8  *	This file is distributed under the terms of the GNU General Public
9  *	License (GPL). Copies of the GPL can be obtained from:
10  *		ftp://prep.ai.mit.edu/pub/gnu/GPL
11  *	Each contributing author retains all rights to their own work.
12  *
13  *  (C) 1999-2001 Ben Fennema
14  *  (C) 1999 Stelias Computing Inc
15  *
16  * HISTORY
17  *
18  *  02/24/99 blf  Created.
19  *
20  */
21 
22 #include "udfdecl.h"
23 
24 #include <linux/quotaops.h>
25 #include <linux/buffer_head.h>
26 #include <linux/bitops.h>
27 
28 #include "udf_i.h"
29 #include "udf_sb.h"
30 
31 #define udf_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
32 #define udf_set_bit(nr,addr) ext2_set_bit(nr,addr)
33 #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
34 #define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size)
35 #define udf_find_next_one_bit(addr, size, offset) find_next_one_bit(addr, size, offset)
36 
37 #define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x)
38 #define leNUM_to_cpup(x,y) xleNUM_to_cpup(x,y)
39 #define xleNUM_to_cpup(x,y) (le ## x ## _to_cpup(y))
40 #define uintBPL_t uint(BITS_PER_LONG)
41 #define uint(x) xuint(x)
42 #define xuint(x) __le ## x
43 
44 static inline int find_next_one_bit(void *addr, int size, int offset)
45 {
46 	uintBPL_t *p = ((uintBPL_t *) addr) + (offset / BITS_PER_LONG);
47 	int result = offset & ~(BITS_PER_LONG - 1);
48 	unsigned long tmp;
49 
50 	if (offset >= size)
51 		return size;
52 	size -= result;
53 	offset &= (BITS_PER_LONG - 1);
54 	if (offset) {
55 		tmp = leBPL_to_cpup(p++);
56 		tmp &= ~0UL << offset;
57 		if (size < BITS_PER_LONG)
58 			goto found_first;
59 		if (tmp)
60 			goto found_middle;
61 		size -= BITS_PER_LONG;
62 		result += BITS_PER_LONG;
63 	}
64 	while (size & ~(BITS_PER_LONG - 1)) {
65 		if ((tmp = leBPL_to_cpup(p++)))
66 			goto found_middle;
67 		result += BITS_PER_LONG;
68 		size -= BITS_PER_LONG;
69 	}
70 	if (!size)
71 		return result;
72 	tmp = leBPL_to_cpup(p);
73 found_first:
74 	tmp &= ~0UL >> (BITS_PER_LONG - size);
75 found_middle:
76 	return result + ffz(~tmp);
77 }
78 
79 #define find_first_one_bit(addr, size)\
80 	find_next_one_bit((addr), (size), 0)
81 
82 static int read_block_bitmap(struct super_block *sb,
83 			     struct udf_bitmap *bitmap, unsigned int block,
84 			     unsigned long bitmap_nr)
85 {
86 	struct buffer_head *bh = NULL;
87 	int retval = 0;
88 	kernel_lb_addr loc;
89 
90 	loc.logicalBlockNum = bitmap->s_extPosition;
91 	loc.partitionReferenceNum = UDF_SB_PARTITION(sb);
92 
93 	bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block));
94 	if (!bh) {
95 		retval = -EIO;
96 	}
97 	bitmap->s_block_bitmap[bitmap_nr] = bh;
98 	return retval;
99 }
100 
101 static int __load_block_bitmap(struct super_block *sb,
102 			       struct udf_bitmap *bitmap,
103 			       unsigned int block_group)
104 {
105 	int retval = 0;
106 	int nr_groups = bitmap->s_nr_groups;
107 
108 	if (block_group >= nr_groups) {
109 		udf_debug("block_group (%d) > nr_groups (%d)\n", block_group,
110 			  nr_groups);
111 	}
112 
113 	if (bitmap->s_block_bitmap[block_group]) {
114 		return block_group;
115 	} else {
116 		retval = read_block_bitmap(sb, bitmap, block_group,
117 					   block_group);
118 		if (retval < 0)
119 			return retval;
120 		return block_group;
121 	}
122 }
123 
124 static inline int load_block_bitmap(struct super_block *sb,
125 				    struct udf_bitmap *bitmap,
126 				    unsigned int block_group)
127 {
128 	int slot;
129 
130 	slot = __load_block_bitmap(sb, bitmap, block_group);
131 
132 	if (slot < 0)
133 		return slot;
134 
135 	if (!bitmap->s_block_bitmap[slot])
136 		return -EIO;
137 
138 	return slot;
139 }
140 
141 static void udf_bitmap_free_blocks(struct super_block *sb,
142 				   struct inode *inode,
143 				   struct udf_bitmap *bitmap,
144 				   kernel_lb_addr bloc, uint32_t offset,
145 				   uint32_t count)
146 {
147 	struct udf_sb_info *sbi = UDF_SB(sb);
148 	struct buffer_head *bh = NULL;
149 	unsigned long block;
150 	unsigned long block_group;
151 	unsigned long bit;
152 	unsigned long i;
153 	int bitmap_nr;
154 	unsigned long overflow;
155 
156 	mutex_lock(&sbi->s_alloc_mutex);
157 	if (bloc.logicalBlockNum < 0 ||
158 	    (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) {
159 		udf_debug("%d < %d || %d + %d > %d\n",
160 			  bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
161 			  UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
162 		goto error_return;
163 	}
164 
165 	block = bloc.logicalBlockNum + offset + (sizeof(struct spaceBitmapDesc) << 3);
166 
167 do_more:
168 	overflow = 0;
169 	block_group = block >> (sb->s_blocksize_bits + 3);
170 	bit = block % (sb->s_blocksize << 3);
171 
172 	/*
173 	 * Check to see if we are freeing blocks across a group boundary.
174 	 */
175 	if (bit + count > (sb->s_blocksize << 3)) {
176 		overflow = bit + count - (sb->s_blocksize << 3);
177 		count -= overflow;
178 	}
179 	bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
180 	if (bitmap_nr < 0)
181 		goto error_return;
182 
183 	bh = bitmap->s_block_bitmap[bitmap_nr];
184 	for (i = 0; i < count; i++) {
185 		if (udf_set_bit(bit + i, bh->b_data)) {
186 			udf_debug("bit %ld already set\n", bit + i);
187 			udf_debug("byte=%2x\n", ((char *)bh->b_data)[(bit + i) >> 3]);
188 		} else {
189 			if (inode)
190 				DQUOT_FREE_BLOCK(inode, 1);
191 			if (UDF_SB_LVIDBH(sb)) {
192 				UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] =
193 					cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]) + 1);
194 			}
195 		}
196 	}
197 	mark_buffer_dirty(bh);
198 	if (overflow) {
199 		block += count;
200 		count = overflow;
201 		goto do_more;
202 	}
203 error_return:
204 	sb->s_dirt = 1;
205 	if (UDF_SB_LVIDBH(sb))
206 		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
207 	mutex_unlock(&sbi->s_alloc_mutex);
208 	return;
209 }
210 
211 static int udf_bitmap_prealloc_blocks(struct super_block *sb,
212 				      struct inode *inode,
213 				      struct udf_bitmap *bitmap,
214 				      uint16_t partition, uint32_t first_block,
215 				      uint32_t block_count)
216 {
217 	struct udf_sb_info *sbi = UDF_SB(sb);
218 	int alloc_count = 0;
219 	int bit, block, block_group, group_start;
220 	int nr_groups, bitmap_nr;
221 	struct buffer_head *bh;
222 
223 	mutex_lock(&sbi->s_alloc_mutex);
224 	if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition))
225 		goto out;
226 
227 	if (first_block + block_count > UDF_SB_PARTLEN(sb, partition))
228 		block_count = UDF_SB_PARTLEN(sb, partition) - first_block;
229 
230 repeat:
231 	nr_groups = (UDF_SB_PARTLEN(sb, partition) +
232 		     (sizeof(struct spaceBitmapDesc) << 3) +
233 		     (sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8);
234 	block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
235 	block_group = block >> (sb->s_blocksize_bits + 3);
236 	group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
237 
238 	bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
239 	if (bitmap_nr < 0)
240 		goto out;
241 	bh = bitmap->s_block_bitmap[bitmap_nr];
242 
243 	bit = block % (sb->s_blocksize << 3);
244 
245 	while (bit < (sb->s_blocksize << 3) && block_count > 0) {
246 		if (!udf_test_bit(bit, bh->b_data)) {
247 			goto out;
248 		} else if (DQUOT_PREALLOC_BLOCK(inode, 1)) {
249 			goto out;
250 		} else if (!udf_clear_bit(bit, bh->b_data)) {
251 			udf_debug("bit already cleared for block %d\n", bit);
252 			DQUOT_FREE_BLOCK(inode, 1);
253 			goto out;
254 		}
255 		block_count--;
256 		alloc_count++;
257 		bit++;
258 		block++;
259 	}
260 	mark_buffer_dirty(bh);
261 	if (block_count > 0)
262 		goto repeat;
263 out:
264 	if (UDF_SB_LVIDBH(sb)) {
265 		UDF_SB_LVID(sb)->freeSpaceTable[partition] =
266 			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - alloc_count);
267 		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
268 	}
269 	sb->s_dirt = 1;
270 	mutex_unlock(&sbi->s_alloc_mutex);
271 	return alloc_count;
272 }
273 
274 static int udf_bitmap_new_block(struct super_block *sb,
275 				struct inode *inode,
276 				struct udf_bitmap *bitmap, uint16_t partition,
277 				uint32_t goal, int *err)
278 {
279 	struct udf_sb_info *sbi = UDF_SB(sb);
280 	int newbit, bit = 0, block, block_group, group_start;
281 	int end_goal, nr_groups, bitmap_nr, i;
282 	struct buffer_head *bh = NULL;
283 	char *ptr;
284 	int newblock = 0;
285 
286 	*err = -ENOSPC;
287 	mutex_lock(&sbi->s_alloc_mutex);
288 
289 repeat:
290 	if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
291 		goal = 0;
292 
293 	nr_groups = bitmap->s_nr_groups;
294 	block = goal + (sizeof(struct spaceBitmapDesc) << 3);
295 	block_group = block >> (sb->s_blocksize_bits + 3);
296 	group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
297 
298 	bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
299 	if (bitmap_nr < 0)
300 		goto error_return;
301 	bh = bitmap->s_block_bitmap[bitmap_nr];
302 	ptr = memscan((char *)bh->b_data + group_start, 0xFF,
303 		      sb->s_blocksize - group_start);
304 
305 	if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
306 		bit = block % (sb->s_blocksize << 3);
307 		if (udf_test_bit(bit, bh->b_data))
308 			goto got_block;
309 
310 		end_goal = (bit + 63) & ~63;
311 		bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
312 		if (bit < end_goal)
313 			goto got_block;
314 
315 		ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF, sb->s_blocksize - ((bit + 7) >> 3));
316 		newbit = (ptr - ((char *)bh->b_data)) << 3;
317 		if (newbit < sb->s_blocksize << 3) {
318 			bit = newbit;
319 			goto search_back;
320 		}
321 
322 		newbit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, bit);
323 		if (newbit < sb->s_blocksize << 3) {
324 			bit = newbit;
325 			goto got_block;
326 		}
327 	}
328 
329 	for (i = 0; i < (nr_groups * 2); i++) {
330 		block_group++;
331 		if (block_group >= nr_groups)
332 			block_group = 0;
333 		group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
334 
335 		bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
336 		if (bitmap_nr < 0)
337 			goto error_return;
338 		bh = bitmap->s_block_bitmap[bitmap_nr];
339 		if (i < nr_groups) {
340 			ptr = memscan((char *)bh->b_data + group_start, 0xFF,
341 				      sb->s_blocksize - group_start);
342 			if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
343 				bit = (ptr - ((char *)bh->b_data)) << 3;
344 				break;
345 			}
346 		} else {
347 			bit = udf_find_next_one_bit((char *)bh->b_data,
348 						    sb->s_blocksize << 3,
349 						    group_start << 3);
350 			if (bit < sb->s_blocksize << 3)
351 				break;
352 		}
353 	}
354 	if (i >= (nr_groups * 2)) {
355 		mutex_unlock(&sbi->s_alloc_mutex);
356 		return newblock;
357 	}
358 	if (bit < sb->s_blocksize << 3)
359 		goto search_back;
360 	else
361 		bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3);
362 	if (bit >= sb->s_blocksize << 3) {
363 		mutex_unlock(&sbi->s_alloc_mutex);
364 		return 0;
365 	}
366 
367 search_back:
368 	for (i = 0; i < 7 && bit > (group_start << 3) && udf_test_bit(bit - 1, bh->b_data); i++, bit--)
369 		; /* empty loop */
370 
371 got_block:
372 
373 	/*
374 	 * Check quota for allocation of this block.
375 	 */
376 	if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
377 		mutex_unlock(&sbi->s_alloc_mutex);
378 		*err = -EDQUOT;
379 		return 0;
380 	}
381 
382 	newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
383 		(sizeof(struct spaceBitmapDesc) << 3);
384 
385 	if (!udf_clear_bit(bit, bh->b_data)) {
386 		udf_debug("bit already cleared for block %d\n", bit);
387 		goto repeat;
388 	}
389 
390 	mark_buffer_dirty(bh);
391 
392 	if (UDF_SB_LVIDBH(sb)) {
393 		UDF_SB_LVID(sb)->freeSpaceTable[partition] =
394 			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - 1);
395 		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
396 	}
397 	sb->s_dirt = 1;
398 	mutex_unlock(&sbi->s_alloc_mutex);
399 	*err = 0;
400 	return newblock;
401 
402 error_return:
403 	*err = -EIO;
404 	mutex_unlock(&sbi->s_alloc_mutex);
405 	return 0;
406 }
407 
408 static void udf_table_free_blocks(struct super_block *sb,
409 				  struct inode *inode,
410 				  struct inode *table,
411 				  kernel_lb_addr bloc, uint32_t offset,
412 				  uint32_t count)
413 {
414 	struct udf_sb_info *sbi = UDF_SB(sb);
415 	uint32_t start, end;
416 	uint32_t elen;
417 	kernel_lb_addr eloc;
418 	struct extent_position oepos, epos;
419 	int8_t etype;
420 	int i;
421 
422 	mutex_lock(&sbi->s_alloc_mutex);
423 	if (bloc.logicalBlockNum < 0 ||
424 	    (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) {
425 		udf_debug("%d < %d || %d + %d > %d\n",
426 			  bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
427 			  UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
428 		goto error_return;
429 	}
430 
431 	/* We do this up front - There are some error conditions that could occure,
432 	   but.. oh well */
433 	if (inode)
434 		DQUOT_FREE_BLOCK(inode, count);
435 	if (UDF_SB_LVIDBH(sb)) {
436 		UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] =
437 			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]) + count);
438 		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
439 	}
440 
441 	start = bloc.logicalBlockNum + offset;
442 	end = bloc.logicalBlockNum + offset + count - 1;
443 
444 	epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
445 	elen = 0;
446 	epos.block = oepos.block = UDF_I_LOCATION(table);
447 	epos.bh = oepos.bh = NULL;
448 
449 	while (count &&
450 	       (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
451 		if (((eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) == start)) {
452 			if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) {
453 				count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
454 				start += ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
455 				elen = (etype << 30) | (0x40000000 - sb->s_blocksize);
456 			} else {
457 				elen = (etype << 30) | (elen + (count << sb->s_blocksize_bits));
458 				start += count;
459 				count = 0;
460 			}
461 			udf_write_aext(table, &oepos, eloc, elen, 1);
462 		} else if (eloc.logicalBlockNum == (end + 1)) {
463 			if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) {
464 				count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
465 				end -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
466 				eloc.logicalBlockNum -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
467 				elen = (etype << 30) | (0x40000000 - sb->s_blocksize);
468 			} else {
469 				eloc.logicalBlockNum = start;
470 				elen = (etype << 30) | (elen + (count << sb->s_blocksize_bits));
471 				end -= count;
472 				count = 0;
473 			}
474 			udf_write_aext(table, &oepos, eloc, elen, 1);
475 		}
476 
477 		if (epos.bh != oepos.bh) {
478 			i = -1;
479 			oepos.block = epos.block;
480 			brelse(oepos.bh);
481 			get_bh(epos.bh);
482 			oepos.bh = epos.bh;
483 			oepos.offset = 0;
484 		} else {
485 			oepos.offset = epos.offset;
486 		}
487 	}
488 
489 	if (count) {
490 		/*
491 		 * NOTE: we CANNOT use udf_add_aext here, as it can try to allocate
492 		 * a new block, and since we hold the super block lock already
493 		 * very bad things would happen :)
494 		 *
495 		 * We copy the behavior of udf_add_aext, but instead of
496 		 * trying to allocate a new block close to the existing one,
497 		 * we just steal a block from the extent we are trying to add.
498 		 *
499 		 * It would be nice if the blocks were close together, but it
500 		 * isn't required.
501 		 */
502 
503 		int adsize;
504 		short_ad *sad = NULL;
505 		long_ad *lad = NULL;
506 		struct allocExtDesc *aed;
507 
508 		eloc.logicalBlockNum = start;
509 		elen = EXT_RECORDED_ALLOCATED |
510 			(count << sb->s_blocksize_bits);
511 
512 		if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) {
513 			adsize = sizeof(short_ad);
514 		} else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG) {
515 			adsize = sizeof(long_ad);
516 		} else {
517 			brelse(oepos.bh);
518 			brelse(epos.bh);
519 			goto error_return;
520 		}
521 
522 		if (epos.offset + (2 * adsize) > sb->s_blocksize) {
523 			char *sptr, *dptr;
524 			int loffset;
525 
526 			brelse(oepos.bh);
527 			oepos = epos;
528 
529 			/* Steal a block from the extent being free'd */
530 			epos.block.logicalBlockNum = eloc.logicalBlockNum;
531 			eloc.logicalBlockNum++;
532 			elen -= sb->s_blocksize;
533 
534 			if (!(epos.bh = udf_tread(sb, udf_get_lb_pblock(sb, epos.block, 0)))) {
535 				brelse(oepos.bh);
536 				goto error_return;
537 			}
538 			aed = (struct allocExtDesc *)(epos.bh->b_data);
539 			aed->previousAllocExtLocation = cpu_to_le32(oepos.block.logicalBlockNum);
540 			if (epos.offset + adsize > sb->s_blocksize) {
541 				loffset = epos.offset;
542 				aed->lengthAllocDescs = cpu_to_le32(adsize);
543 				sptr = UDF_I_DATA(inode) + epos.offset -
544 					udf_file_entry_alloc_offset(inode) +
545 					UDF_I_LENEATTR(inode) - adsize;
546 				dptr = epos.bh->b_data + sizeof(struct allocExtDesc);
547 				memcpy(dptr, sptr, adsize);
548 				epos.offset = sizeof(struct allocExtDesc) + adsize;
549 			} else {
550 				loffset = epos.offset + adsize;
551 				aed->lengthAllocDescs = cpu_to_le32(0);
552 				sptr = oepos.bh->b_data + epos.offset;
553 				epos.offset = sizeof(struct allocExtDesc);
554 
555 				if (oepos.bh) {
556 					aed = (struct allocExtDesc *)oepos.bh->b_data;
557 					aed->lengthAllocDescs =
558 						cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
559 				} else {
560 					UDF_I_LENALLOC(table) += adsize;
561 					mark_inode_dirty(table);
562 				}
563 			}
564 			if (UDF_SB_UDFREV(sb) >= 0x0200)
565 				udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 1,
566 					    epos.block.logicalBlockNum, sizeof(tag));
567 			else
568 				udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 2, 1,
569 					    epos.block.logicalBlockNum, sizeof(tag));
570 
571 			switch (UDF_I_ALLOCTYPE(table)) {
572 				case ICBTAG_FLAG_AD_SHORT:
573 					sad = (short_ad *)sptr;
574 					sad->extLength = cpu_to_le32(
575 						EXT_NEXT_EXTENT_ALLOCDECS |
576 						sb->s_blocksize);
577 					sad->extPosition = cpu_to_le32(epos.block.logicalBlockNum);
578 					break;
579 				case ICBTAG_FLAG_AD_LONG:
580 					lad = (long_ad *)sptr;
581 					lad->extLength = cpu_to_le32(
582 						EXT_NEXT_EXTENT_ALLOCDECS |
583 						sb->s_blocksize);
584 					lad->extLocation = cpu_to_lelb(epos.block);
585 					break;
586 			}
587 			if (oepos.bh) {
588 				udf_update_tag(oepos.bh->b_data, loffset);
589 				mark_buffer_dirty(oepos.bh);
590 			} else {
591 				mark_inode_dirty(table);
592 			}
593 		}
594 
595 		if (elen) { /* It's possible that stealing the block emptied the extent */
596 			udf_write_aext(table, &epos, eloc, elen, 1);
597 
598 			if (!epos.bh) {
599 				UDF_I_LENALLOC(table) += adsize;
600 				mark_inode_dirty(table);
601 			} else {
602 				aed = (struct allocExtDesc *)epos.bh->b_data;
603 				aed->lengthAllocDescs =
604 					cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
605 				udf_update_tag(epos.bh->b_data, epos.offset);
606 				mark_buffer_dirty(epos.bh);
607 			}
608 		}
609 	}
610 
611 	brelse(epos.bh);
612 	brelse(oepos.bh);
613 
614 error_return:
615 	sb->s_dirt = 1;
616 	mutex_unlock(&sbi->s_alloc_mutex);
617 	return;
618 }
619 
620 static int udf_table_prealloc_blocks(struct super_block *sb,
621 				     struct inode *inode,
622 				     struct inode *table, uint16_t partition,
623 				     uint32_t first_block, uint32_t block_count)
624 {
625 	struct udf_sb_info *sbi = UDF_SB(sb);
626 	int alloc_count = 0;
627 	uint32_t elen, adsize;
628 	kernel_lb_addr eloc;
629 	struct extent_position epos;
630 	int8_t etype = -1;
631 
632 	if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition))
633 		return 0;
634 
635 	if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
636 		adsize = sizeof(short_ad);
637 	else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
638 		adsize = sizeof(long_ad);
639 	else
640 		return 0;
641 
642 	mutex_lock(&sbi->s_alloc_mutex);
643 	epos.offset = sizeof(struct unallocSpaceEntry);
644 	epos.block = UDF_I_LOCATION(table);
645 	epos.bh = NULL;
646 	eloc.logicalBlockNum = 0xFFFFFFFF;
647 
648 	while (first_block != eloc.logicalBlockNum &&
649 	       (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
650 		udf_debug("eloc=%d, elen=%d, first_block=%d\n",
651 			  eloc.logicalBlockNum, elen, first_block);
652 		; /* empty loop body */
653 	}
654 
655 	if (first_block == eloc.logicalBlockNum) {
656 		epos.offset -= adsize;
657 
658 		alloc_count = (elen >> sb->s_blocksize_bits);
659 		if (inode && DQUOT_PREALLOC_BLOCK(inode, alloc_count > block_count ? block_count : alloc_count)) {
660 			alloc_count = 0;
661 		} else if (alloc_count > block_count) {
662 			alloc_count = block_count;
663 			eloc.logicalBlockNum += alloc_count;
664 			elen -= (alloc_count << sb->s_blocksize_bits);
665 			udf_write_aext(table, &epos, eloc, (etype << 30) | elen, 1);
666 		} else {
667 			udf_delete_aext(table, epos, eloc, (etype << 30) | elen);
668 		}
669 	} else {
670 		alloc_count = 0;
671 	}
672 
673 	brelse(epos.bh);
674 
675 	if (alloc_count && UDF_SB_LVIDBH(sb)) {
676 		UDF_SB_LVID(sb)->freeSpaceTable[partition] =
677 			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - alloc_count);
678 		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
679 		sb->s_dirt = 1;
680 	}
681 	mutex_unlock(&sbi->s_alloc_mutex);
682 	return alloc_count;
683 }
684 
685 static int udf_table_new_block(struct super_block *sb,
686 			       struct inode *inode,
687 			       struct inode *table, uint16_t partition,
688 			       uint32_t goal, int *err)
689 {
690 	struct udf_sb_info *sbi = UDF_SB(sb);
691 	uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
692 	uint32_t newblock = 0, adsize;
693 	uint32_t elen, goal_elen = 0;
694 	kernel_lb_addr eloc, goal_eloc;
695 	struct extent_position epos, goal_epos;
696 	int8_t etype;
697 
698 	*err = -ENOSPC;
699 
700 	if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
701 		adsize = sizeof(short_ad);
702 	else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
703 		adsize = sizeof(long_ad);
704 	else
705 		return newblock;
706 
707 	mutex_lock(&sbi->s_alloc_mutex);
708 	if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
709 		goal = 0;
710 
711 	/* We search for the closest matching block to goal. If we find a exact hit,
712 	   we stop. Otherwise we keep going till we run out of extents.
713 	   We store the buffer_head, bloc, and extoffset of the current closest
714 	   match and use that when we are done.
715 	 */
716 	epos.offset = sizeof(struct unallocSpaceEntry);
717 	epos.block = UDF_I_LOCATION(table);
718 	epos.bh = goal_epos.bh = NULL;
719 
720 	while (spread &&
721 	       (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
722 		if (goal >= eloc.logicalBlockNum) {
723 			if (goal < eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits))
724 				nspread = 0;
725 			else
726 				nspread = goal - eloc.logicalBlockNum -
727 					(elen >> sb->s_blocksize_bits);
728 		} else {
729 			nspread = eloc.logicalBlockNum - goal;
730 		}
731 
732 		if (nspread < spread) {
733 			spread = nspread;
734 			if (goal_epos.bh != epos.bh) {
735 				brelse(goal_epos.bh);
736 				goal_epos.bh = epos.bh;
737 				get_bh(goal_epos.bh);
738 			}
739 			goal_epos.block = epos.block;
740 			goal_epos.offset = epos.offset - adsize;
741 			goal_eloc = eloc;
742 			goal_elen = (etype << 30) | elen;
743 		}
744 	}
745 
746 	brelse(epos.bh);
747 
748 	if (spread == 0xFFFFFFFF) {
749 		brelse(goal_epos.bh);
750 		mutex_unlock(&sbi->s_alloc_mutex);
751 		return 0;
752 	}
753 
754 	/* Only allocate blocks from the beginning of the extent.
755 	   That way, we only delete (empty) extents, never have to insert an
756 	   extent because of splitting */
757 	/* This works, but very poorly.... */
758 
759 	newblock = goal_eloc.logicalBlockNum;
760 	goal_eloc.logicalBlockNum++;
761 	goal_elen -= sb->s_blocksize;
762 
763 	if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
764 		brelse(goal_epos.bh);
765 		mutex_unlock(&sbi->s_alloc_mutex);
766 		*err = -EDQUOT;
767 		return 0;
768 	}
769 
770 	if (goal_elen)
771 		udf_write_aext(table, &goal_epos, goal_eloc, goal_elen, 1);
772 	else
773 		udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
774 	brelse(goal_epos.bh);
775 
776 	if (UDF_SB_LVIDBH(sb)) {
777 		UDF_SB_LVID(sb)->freeSpaceTable[partition] =
778 			cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - 1);
779 		mark_buffer_dirty(UDF_SB_LVIDBH(sb));
780 	}
781 
782 	sb->s_dirt = 1;
783 	mutex_unlock(&sbi->s_alloc_mutex);
784 	*err = 0;
785 	return newblock;
786 }
787 
788 inline void udf_free_blocks(struct super_block *sb,
789 			    struct inode *inode,
790 			    kernel_lb_addr bloc, uint32_t offset,
791 			    uint32_t count)
792 {
793 	uint16_t partition = bloc.partitionReferenceNum;
794 
795 	if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
796 		return udf_bitmap_free_blocks(sb, inode,
797 					      UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
798 					      bloc, offset, count);
799 	} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) {
800 		return udf_table_free_blocks(sb, inode,
801 					     UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
802 					     bloc, offset, count);
803 	} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
804 		return udf_bitmap_free_blocks(sb, inode,
805 					      UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
806 					      bloc, offset, count);
807 	} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
808 		return udf_table_free_blocks(sb, inode,
809 					     UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
810 					     bloc, offset, count);
811 	} else {
812 		return;
813 	}
814 }
815 
816 inline int udf_prealloc_blocks(struct super_block *sb,
817 			       struct inode *inode,
818 			       uint16_t partition, uint32_t first_block,
819 			       uint32_t block_count)
820 {
821 	if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
822 		return udf_bitmap_prealloc_blocks(sb, inode,
823 						  UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
824 						  partition, first_block, block_count);
825 	} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) {
826 		return udf_table_prealloc_blocks(sb, inode,
827 						 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
828 						 partition, first_block, block_count);
829 	} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
830 		return udf_bitmap_prealloc_blocks(sb, inode,
831 						  UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
832 						  partition, first_block, block_count);
833 	} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
834 		return udf_table_prealloc_blocks(sb, inode,
835 						 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
836 						 partition, first_block, block_count);
837 	} else {
838 		return 0;
839 	}
840 }
841 
842 inline int udf_new_block(struct super_block *sb,
843 			 struct inode *inode,
844 			 uint16_t partition, uint32_t goal, int *err)
845 {
846 	int ret;
847 
848 	if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
849 		ret = udf_bitmap_new_block(sb, inode,
850 					   UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
851 					   partition, goal, err);
852 		return ret;
853 	} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) {
854 		return udf_table_new_block(sb, inode,
855 					   UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
856 					   partition, goal, err);
857 	} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
858 		return udf_bitmap_new_block(sb, inode,
859 					    UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
860 					    partition, goal, err);
861 	} else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
862 		return udf_table_new_block(sb, inode,
863 					   UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
864 					   partition, goal, err);
865 	} else {
866 		*err = -EIO;
867 		return 0;
868 	}
869 }
870