xref: /openbmc/linux/fs/udf/inode.c (revision 9f754758)
1 /*
2  * inode.c
3  *
4  * PURPOSE
5  *  Inode handling routines for the OSTA-UDF(tm) filesystem.
6  *
7  * COPYRIGHT
8  *  This file is distributed under the terms of the GNU General Public
9  *  License (GPL). Copies of the GPL can be obtained from:
10  *    ftp://prep.ai.mit.edu/pub/gnu/GPL
11  *  Each contributing author retains all rights to their own work.
12  *
13  *  (C) 1998 Dave Boynton
14  *  (C) 1998-2004 Ben Fennema
15  *  (C) 1999-2000 Stelias Computing Inc
16  *
17  * HISTORY
18  *
19  *  10/04/98 dgb  Added rudimentary directory functions
20  *  10/07/98      Fully working udf_block_map! It works!
21  *  11/25/98      bmap altered to better support extents
22  *  12/06/98 blf  partition support in udf_iget, udf_block_map
23  *                and udf_read_inode
24  *  12/12/98      rewrote udf_block_map to handle next extents and descs across
25  *                block boundaries (which is not actually allowed)
26  *  12/20/98      added support for strategy 4096
27  *  03/07/99      rewrote udf_block_map (again)
28  *                New funcs, inode_bmap, udf_next_aext
29  *  04/19/99      Support for writing device EA's for major/minor #
30  */
31 
32 #include "udfdecl.h"
33 #include <linux/mm.h>
34 #include <linux/smp_lock.h>
35 #include <linux/module.h>
36 #include <linux/pagemap.h>
37 #include <linux/buffer_head.h>
38 #include <linux/writeback.h>
39 #include <linux/slab.h>
40 #include <linux/crc-itu-t.h>
41 
42 #include "udf_i.h"
43 #include "udf_sb.h"
44 
45 MODULE_AUTHOR("Ben Fennema");
46 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
47 MODULE_LICENSE("GPL");
48 
49 #define EXTENT_MERGE_SIZE 5
50 
51 static mode_t udf_convert_permissions(struct fileEntry *);
52 static int udf_update_inode(struct inode *, int);
53 static void udf_fill_inode(struct inode *, struct buffer_head *);
54 static int udf_alloc_i_data(struct inode *inode, size_t size);
55 static struct buffer_head *inode_getblk(struct inode *, sector_t, int *,
56 					sector_t *, int *);
57 static int8_t udf_insert_aext(struct inode *, struct extent_position,
58 			      struct kernel_lb_addr, uint32_t);
59 static void udf_split_extents(struct inode *, int *, int, int,
60 			      struct kernel_long_ad[EXTENT_MERGE_SIZE], int *);
61 static void udf_prealloc_extents(struct inode *, int, int,
62 				 struct kernel_long_ad[EXTENT_MERGE_SIZE], int *);
63 static void udf_merge_extents(struct inode *,
64 			      struct kernel_long_ad[EXTENT_MERGE_SIZE], int *);
65 static void udf_update_extents(struct inode *,
66 			       struct kernel_long_ad[EXTENT_MERGE_SIZE], int, int,
67 			       struct extent_position *);
68 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
69 
70 
71 void udf_delete_inode(struct inode *inode)
72 {
73 	truncate_inode_pages(&inode->i_data, 0);
74 
75 	if (is_bad_inode(inode))
76 		goto no_delete;
77 
78 	inode->i_size = 0;
79 	udf_truncate(inode);
80 	lock_kernel();
81 
82 	udf_update_inode(inode, IS_SYNC(inode));
83 	udf_free_inode(inode);
84 
85 	unlock_kernel();
86 	return;
87 
88 no_delete:
89 	clear_inode(inode);
90 }
91 
92 /*
93  * If we are going to release inode from memory, we truncate last inode extent
94  * to proper length. We could use drop_inode() but it's called under inode_lock
95  * and thus we cannot mark inode dirty there.  We use clear_inode() but we have
96  * to make sure to write inode as it's not written automatically.
97  */
98 void udf_clear_inode(struct inode *inode)
99 {
100 	struct udf_inode_info *iinfo = UDF_I(inode);
101 
102 	if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
103 	    inode->i_size != iinfo->i_lenExtents) {
104 		printk(KERN_WARNING "UDF-fs (%s): Inode %lu (mode %o) has "
105 			"inode size %llu different from extent lenght %llu. "
106 			"Filesystem need not be standards compliant.\n",
107 			inode->i_sb->s_id, inode->i_ino, inode->i_mode,
108 			(unsigned long long)inode->i_size,
109 			(unsigned long long)iinfo->i_lenExtents);
110 	}
111 
112 	dquot_drop(inode);
113 	kfree(iinfo->i_ext.i_data);
114 	iinfo->i_ext.i_data = NULL;
115 }
116 
117 static int udf_writepage(struct page *page, struct writeback_control *wbc)
118 {
119 	return block_write_full_page(page, udf_get_block, wbc);
120 }
121 
122 static int udf_readpage(struct file *file, struct page *page)
123 {
124 	return block_read_full_page(page, udf_get_block);
125 }
126 
127 static int udf_write_begin(struct file *file, struct address_space *mapping,
128 			loff_t pos, unsigned len, unsigned flags,
129 			struct page **pagep, void **fsdata)
130 {
131 	*pagep = NULL;
132 	return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
133 				udf_get_block);
134 }
135 
136 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
137 {
138 	return generic_block_bmap(mapping, block, udf_get_block);
139 }
140 
141 const struct address_space_operations udf_aops = {
142 	.readpage	= udf_readpage,
143 	.writepage	= udf_writepage,
144 	.sync_page	= block_sync_page,
145 	.write_begin		= udf_write_begin,
146 	.write_end		= generic_write_end,
147 	.bmap		= udf_bmap,
148 };
149 
150 void udf_expand_file_adinicb(struct inode *inode, int newsize, int *err)
151 {
152 	struct page *page;
153 	char *kaddr;
154 	struct udf_inode_info *iinfo = UDF_I(inode);
155 	struct writeback_control udf_wbc = {
156 		.sync_mode = WB_SYNC_NONE,
157 		.nr_to_write = 1,
158 	};
159 
160 	/* from now on we have normal address_space methods */
161 	inode->i_data.a_ops = &udf_aops;
162 
163 	if (!iinfo->i_lenAlloc) {
164 		if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
165 			iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
166 		else
167 			iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
168 		mark_inode_dirty(inode);
169 		return;
170 	}
171 
172 	page = grab_cache_page(inode->i_mapping, 0);
173 	BUG_ON(!PageLocked(page));
174 
175 	if (!PageUptodate(page)) {
176 		kaddr = kmap(page);
177 		memset(kaddr + iinfo->i_lenAlloc, 0x00,
178 		       PAGE_CACHE_SIZE - iinfo->i_lenAlloc);
179 		memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr,
180 			iinfo->i_lenAlloc);
181 		flush_dcache_page(page);
182 		SetPageUptodate(page);
183 		kunmap(page);
184 	}
185 	memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0x00,
186 	       iinfo->i_lenAlloc);
187 	iinfo->i_lenAlloc = 0;
188 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
189 		iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
190 	else
191 		iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
192 
193 	inode->i_data.a_ops->writepage(page, &udf_wbc);
194 	page_cache_release(page);
195 
196 	mark_inode_dirty(inode);
197 }
198 
199 struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
200 					   int *err)
201 {
202 	int newblock;
203 	struct buffer_head *dbh = NULL;
204 	struct kernel_lb_addr eloc;
205 	uint8_t alloctype;
206 	struct extent_position epos;
207 
208 	struct udf_fileident_bh sfibh, dfibh;
209 	loff_t f_pos = udf_ext0_offset(inode);
210 	int size = udf_ext0_offset(inode) + inode->i_size;
211 	struct fileIdentDesc cfi, *sfi, *dfi;
212 	struct udf_inode_info *iinfo = UDF_I(inode);
213 
214 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
215 		alloctype = ICBTAG_FLAG_AD_SHORT;
216 	else
217 		alloctype = ICBTAG_FLAG_AD_LONG;
218 
219 	if (!inode->i_size) {
220 		iinfo->i_alloc_type = alloctype;
221 		mark_inode_dirty(inode);
222 		return NULL;
223 	}
224 
225 	/* alloc block, and copy data to it */
226 	*block = udf_new_block(inode->i_sb, inode,
227 			       iinfo->i_location.partitionReferenceNum,
228 			       iinfo->i_location.logicalBlockNum, err);
229 	if (!(*block))
230 		return NULL;
231 	newblock = udf_get_pblock(inode->i_sb, *block,
232 				  iinfo->i_location.partitionReferenceNum,
233 				0);
234 	if (!newblock)
235 		return NULL;
236 	dbh = udf_tgetblk(inode->i_sb, newblock);
237 	if (!dbh)
238 		return NULL;
239 	lock_buffer(dbh);
240 	memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
241 	set_buffer_uptodate(dbh);
242 	unlock_buffer(dbh);
243 	mark_buffer_dirty_inode(dbh, inode);
244 
245 	sfibh.soffset = sfibh.eoffset =
246 			f_pos & (inode->i_sb->s_blocksize - 1);
247 	sfibh.sbh = sfibh.ebh = NULL;
248 	dfibh.soffset = dfibh.eoffset = 0;
249 	dfibh.sbh = dfibh.ebh = dbh;
250 	while (f_pos < size) {
251 		iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
252 		sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL,
253 					 NULL, NULL, NULL);
254 		if (!sfi) {
255 			brelse(dbh);
256 			return NULL;
257 		}
258 		iinfo->i_alloc_type = alloctype;
259 		sfi->descTag.tagLocation = cpu_to_le32(*block);
260 		dfibh.soffset = dfibh.eoffset;
261 		dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
262 		dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
263 		if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
264 				 sfi->fileIdent +
265 					le16_to_cpu(sfi->lengthOfImpUse))) {
266 			iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
267 			brelse(dbh);
268 			return NULL;
269 		}
270 	}
271 	mark_buffer_dirty_inode(dbh, inode);
272 
273 	memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0,
274 		iinfo->i_lenAlloc);
275 	iinfo->i_lenAlloc = 0;
276 	eloc.logicalBlockNum = *block;
277 	eloc.partitionReferenceNum =
278 				iinfo->i_location.partitionReferenceNum;
279 	iinfo->i_lenExtents = inode->i_size;
280 	epos.bh = NULL;
281 	epos.block = iinfo->i_location;
282 	epos.offset = udf_file_entry_alloc_offset(inode);
283 	udf_add_aext(inode, &epos, &eloc, inode->i_size, 0);
284 	/* UniqueID stuff */
285 
286 	brelse(epos.bh);
287 	mark_inode_dirty(inode);
288 	return dbh;
289 }
290 
291 static int udf_get_block(struct inode *inode, sector_t block,
292 			 struct buffer_head *bh_result, int create)
293 {
294 	int err, new;
295 	struct buffer_head *bh;
296 	sector_t phys = 0;
297 	struct udf_inode_info *iinfo;
298 
299 	if (!create) {
300 		phys = udf_block_map(inode, block);
301 		if (phys)
302 			map_bh(bh_result, inode->i_sb, phys);
303 		return 0;
304 	}
305 
306 	err = -EIO;
307 	new = 0;
308 	bh = NULL;
309 
310 	lock_kernel();
311 
312 	iinfo = UDF_I(inode);
313 	if (block == iinfo->i_next_alloc_block + 1) {
314 		iinfo->i_next_alloc_block++;
315 		iinfo->i_next_alloc_goal++;
316 	}
317 
318 	err = 0;
319 
320 	bh = inode_getblk(inode, block, &err, &phys, &new);
321 	BUG_ON(bh);
322 	if (err)
323 		goto abort;
324 	BUG_ON(!phys);
325 
326 	if (new)
327 		set_buffer_new(bh_result);
328 	map_bh(bh_result, inode->i_sb, phys);
329 
330 abort:
331 	unlock_kernel();
332 	return err;
333 }
334 
335 static struct buffer_head *udf_getblk(struct inode *inode, long block,
336 				      int create, int *err)
337 {
338 	struct buffer_head *bh;
339 	struct buffer_head dummy;
340 
341 	dummy.b_state = 0;
342 	dummy.b_blocknr = -1000;
343 	*err = udf_get_block(inode, block, &dummy, create);
344 	if (!*err && buffer_mapped(&dummy)) {
345 		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
346 		if (buffer_new(&dummy)) {
347 			lock_buffer(bh);
348 			memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
349 			set_buffer_uptodate(bh);
350 			unlock_buffer(bh);
351 			mark_buffer_dirty_inode(bh, inode);
352 		}
353 		return bh;
354 	}
355 
356 	return NULL;
357 }
358 
359 /* Extend the file by 'blocks' blocks, return the number of extents added */
360 int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
361 		    struct kernel_long_ad *last_ext, sector_t blocks)
362 {
363 	sector_t add;
364 	int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
365 	struct super_block *sb = inode->i_sb;
366 	struct kernel_lb_addr prealloc_loc = {};
367 	int prealloc_len = 0;
368 	struct udf_inode_info *iinfo;
369 
370 	/* The previous extent is fake and we should not extend by anything
371 	 * - there's nothing to do... */
372 	if (!blocks && fake)
373 		return 0;
374 
375 	iinfo = UDF_I(inode);
376 	/* Round the last extent up to a multiple of block size */
377 	if (last_ext->extLength & (sb->s_blocksize - 1)) {
378 		last_ext->extLength =
379 			(last_ext->extLength & UDF_EXTENT_FLAG_MASK) |
380 			(((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) +
381 			  sb->s_blocksize - 1) & ~(sb->s_blocksize - 1));
382 		iinfo->i_lenExtents =
383 			(iinfo->i_lenExtents + sb->s_blocksize - 1) &
384 			~(sb->s_blocksize - 1);
385 	}
386 
387 	/* Last extent are just preallocated blocks? */
388 	if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
389 						EXT_NOT_RECORDED_ALLOCATED) {
390 		/* Save the extent so that we can reattach it to the end */
391 		prealloc_loc = last_ext->extLocation;
392 		prealloc_len = last_ext->extLength;
393 		/* Mark the extent as a hole */
394 		last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
395 			(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
396 		last_ext->extLocation.logicalBlockNum = 0;
397 		last_ext->extLocation.partitionReferenceNum = 0;
398 	}
399 
400 	/* Can we merge with the previous extent? */
401 	if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
402 					EXT_NOT_RECORDED_NOT_ALLOCATED) {
403 		add = ((1 << 30) - sb->s_blocksize -
404 			(last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) >>
405 			sb->s_blocksize_bits;
406 		if (add > blocks)
407 			add = blocks;
408 		blocks -= add;
409 		last_ext->extLength += add << sb->s_blocksize_bits;
410 	}
411 
412 	if (fake) {
413 		udf_add_aext(inode, last_pos, &last_ext->extLocation,
414 			     last_ext->extLength, 1);
415 		count++;
416 	} else
417 		udf_write_aext(inode, last_pos, &last_ext->extLocation,
418 				last_ext->extLength, 1);
419 
420 	/* Managed to do everything necessary? */
421 	if (!blocks)
422 		goto out;
423 
424 	/* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
425 	last_ext->extLocation.logicalBlockNum = 0;
426 	last_ext->extLocation.partitionReferenceNum = 0;
427 	add = (1 << (30-sb->s_blocksize_bits)) - 1;
428 	last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
429 				(add << sb->s_blocksize_bits);
430 
431 	/* Create enough extents to cover the whole hole */
432 	while (blocks > add) {
433 		blocks -= add;
434 		if (udf_add_aext(inode, last_pos, &last_ext->extLocation,
435 				 last_ext->extLength, 1) == -1)
436 			return -1;
437 		count++;
438 	}
439 	if (blocks) {
440 		last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
441 			(blocks << sb->s_blocksize_bits);
442 		if (udf_add_aext(inode, last_pos, &last_ext->extLocation,
443 				 last_ext->extLength, 1) == -1)
444 			return -1;
445 		count++;
446 	}
447 
448 out:
449 	/* Do we have some preallocated blocks saved? */
450 	if (prealloc_len) {
451 		if (udf_add_aext(inode, last_pos, &prealloc_loc,
452 				 prealloc_len, 1) == -1)
453 			return -1;
454 		last_ext->extLocation = prealloc_loc;
455 		last_ext->extLength = prealloc_len;
456 		count++;
457 	}
458 
459 	/* last_pos should point to the last written extent... */
460 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
461 		last_pos->offset -= sizeof(struct short_ad);
462 	else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
463 		last_pos->offset -= sizeof(struct long_ad);
464 	else
465 		return -1;
466 
467 	return count;
468 }
469 
470 static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
471 					int *err, sector_t *phys, int *new)
472 {
473 	static sector_t last_block;
474 	struct buffer_head *result = NULL;
475 	struct kernel_long_ad laarr[EXTENT_MERGE_SIZE];
476 	struct extent_position prev_epos, cur_epos, next_epos;
477 	int count = 0, startnum = 0, endnum = 0;
478 	uint32_t elen = 0, tmpelen;
479 	struct kernel_lb_addr eloc, tmpeloc;
480 	int c = 1;
481 	loff_t lbcount = 0, b_off = 0;
482 	uint32_t newblocknum, newblock;
483 	sector_t offset = 0;
484 	int8_t etype;
485 	struct udf_inode_info *iinfo = UDF_I(inode);
486 	int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
487 	int lastblock = 0;
488 
489 	prev_epos.offset = udf_file_entry_alloc_offset(inode);
490 	prev_epos.block = iinfo->i_location;
491 	prev_epos.bh = NULL;
492 	cur_epos = next_epos = prev_epos;
493 	b_off = (loff_t)block << inode->i_sb->s_blocksize_bits;
494 
495 	/* find the extent which contains the block we are looking for.
496 	   alternate between laarr[0] and laarr[1] for locations of the
497 	   current extent, and the previous extent */
498 	do {
499 		if (prev_epos.bh != cur_epos.bh) {
500 			brelse(prev_epos.bh);
501 			get_bh(cur_epos.bh);
502 			prev_epos.bh = cur_epos.bh;
503 		}
504 		if (cur_epos.bh != next_epos.bh) {
505 			brelse(cur_epos.bh);
506 			get_bh(next_epos.bh);
507 			cur_epos.bh = next_epos.bh;
508 		}
509 
510 		lbcount += elen;
511 
512 		prev_epos.block = cur_epos.block;
513 		cur_epos.block = next_epos.block;
514 
515 		prev_epos.offset = cur_epos.offset;
516 		cur_epos.offset = next_epos.offset;
517 
518 		etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1);
519 		if (etype == -1)
520 			break;
521 
522 		c = !c;
523 
524 		laarr[c].extLength = (etype << 30) | elen;
525 		laarr[c].extLocation = eloc;
526 
527 		if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
528 			pgoal = eloc.logicalBlockNum +
529 				((elen + inode->i_sb->s_blocksize - 1) >>
530 				 inode->i_sb->s_blocksize_bits);
531 
532 		count++;
533 	} while (lbcount + elen <= b_off);
534 
535 	b_off -= lbcount;
536 	offset = b_off >> inode->i_sb->s_blocksize_bits;
537 	/*
538 	 * Move prev_epos and cur_epos into indirect extent if we are at
539 	 * the pointer to it
540 	 */
541 	udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, 0);
542 	udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0);
543 
544 	/* if the extent is allocated and recorded, return the block
545 	   if the extent is not a multiple of the blocksize, round up */
546 
547 	if (etype == (EXT_RECORDED_ALLOCATED >> 30)) {
548 		if (elen & (inode->i_sb->s_blocksize - 1)) {
549 			elen = EXT_RECORDED_ALLOCATED |
550 				((elen + inode->i_sb->s_blocksize - 1) &
551 				 ~(inode->i_sb->s_blocksize - 1));
552 			etype = udf_write_aext(inode, &cur_epos, &eloc, elen, 1);
553 		}
554 		brelse(prev_epos.bh);
555 		brelse(cur_epos.bh);
556 		brelse(next_epos.bh);
557 		newblock = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
558 		*phys = newblock;
559 		return NULL;
560 	}
561 
562 	last_block = block;
563 	/* Are we beyond EOF? */
564 	if (etype == -1) {
565 		int ret;
566 
567 		if (count) {
568 			if (c)
569 				laarr[0] = laarr[1];
570 			startnum = 1;
571 		} else {
572 			/* Create a fake extent when there's not one */
573 			memset(&laarr[0].extLocation, 0x00,
574 				sizeof(struct kernel_lb_addr));
575 			laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
576 			/* Will udf_extend_file() create real extent from
577 			   a fake one? */
578 			startnum = (offset > 0);
579 		}
580 		/* Create extents for the hole between EOF and offset */
581 		ret = udf_extend_file(inode, &prev_epos, laarr, offset);
582 		if (ret == -1) {
583 			brelse(prev_epos.bh);
584 			brelse(cur_epos.bh);
585 			brelse(next_epos.bh);
586 			/* We don't really know the error here so we just make
587 			 * something up */
588 			*err = -ENOSPC;
589 			return NULL;
590 		}
591 		c = 0;
592 		offset = 0;
593 		count += ret;
594 		/* We are not covered by a preallocated extent? */
595 		if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) !=
596 						EXT_NOT_RECORDED_ALLOCATED) {
597 			/* Is there any real extent? - otherwise we overwrite
598 			 * the fake one... */
599 			if (count)
600 				c = !c;
601 			laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
602 				inode->i_sb->s_blocksize;
603 			memset(&laarr[c].extLocation, 0x00,
604 				sizeof(struct kernel_lb_addr));
605 			count++;
606 			endnum++;
607 		}
608 		endnum = c + 1;
609 		lastblock = 1;
610 	} else {
611 		endnum = startnum = ((count > 2) ? 2 : count);
612 
613 		/* if the current extent is in position 0,
614 		   swap it with the previous */
615 		if (!c && count != 1) {
616 			laarr[2] = laarr[0];
617 			laarr[0] = laarr[1];
618 			laarr[1] = laarr[2];
619 			c = 1;
620 		}
621 
622 		/* if the current block is located in an extent,
623 		   read the next extent */
624 		etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0);
625 		if (etype != -1) {
626 			laarr[c + 1].extLength = (etype << 30) | elen;
627 			laarr[c + 1].extLocation = eloc;
628 			count++;
629 			startnum++;
630 			endnum++;
631 		} else
632 			lastblock = 1;
633 	}
634 
635 	/* if the current extent is not recorded but allocated, get the
636 	 * block in the extent corresponding to the requested block */
637 	if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
638 		newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
639 	else { /* otherwise, allocate a new block */
640 		if (iinfo->i_next_alloc_block == block)
641 			goal = iinfo->i_next_alloc_goal;
642 
643 		if (!goal) {
644 			if (!(goal = pgoal)) /* XXX: what was intended here? */
645 				goal = iinfo->i_location.logicalBlockNum + 1;
646 		}
647 
648 		newblocknum = udf_new_block(inode->i_sb, inode,
649 				iinfo->i_location.partitionReferenceNum,
650 				goal, err);
651 		if (!newblocknum) {
652 			brelse(prev_epos.bh);
653 			*err = -ENOSPC;
654 			return NULL;
655 		}
656 		iinfo->i_lenExtents += inode->i_sb->s_blocksize;
657 	}
658 
659 	/* if the extent the requsted block is located in contains multiple
660 	 * blocks, split the extent into at most three extents. blocks prior
661 	 * to requested block, requested block, and blocks after requested
662 	 * block */
663 	udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
664 
665 #ifdef UDF_PREALLOCATE
666 	/* We preallocate blocks only for regular files. It also makes sense
667 	 * for directories but there's a problem when to drop the
668 	 * preallocation. We might use some delayed work for that but I feel
669 	 * it's overengineering for a filesystem like UDF. */
670 	if (S_ISREG(inode->i_mode))
671 		udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
672 #endif
673 
674 	/* merge any continuous blocks in laarr */
675 	udf_merge_extents(inode, laarr, &endnum);
676 
677 	/* write back the new extents, inserting new extents if the new number
678 	 * of extents is greater than the old number, and deleting extents if
679 	 * the new number of extents is less than the old number */
680 	udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
681 
682 	brelse(prev_epos.bh);
683 
684 	newblock = udf_get_pblock(inode->i_sb, newblocknum,
685 				iinfo->i_location.partitionReferenceNum, 0);
686 	if (!newblock)
687 		return NULL;
688 	*phys = newblock;
689 	*err = 0;
690 	*new = 1;
691 	iinfo->i_next_alloc_block = block;
692 	iinfo->i_next_alloc_goal = newblocknum;
693 	inode->i_ctime = current_fs_time(inode->i_sb);
694 
695 	if (IS_SYNC(inode))
696 		udf_sync_inode(inode);
697 	else
698 		mark_inode_dirty(inode);
699 
700 	return result;
701 }
702 
703 static void udf_split_extents(struct inode *inode, int *c, int offset,
704 			      int newblocknum,
705 			      struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
706 			      int *endnum)
707 {
708 	unsigned long blocksize = inode->i_sb->s_blocksize;
709 	unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
710 
711 	if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
712 	    (laarr[*c].extLength >> 30) ==
713 				(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
714 		int curr = *c;
715 		int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
716 			    blocksize - 1) >> blocksize_bits;
717 		int8_t etype = (laarr[curr].extLength >> 30);
718 
719 		if (blen == 1)
720 			;
721 		else if (!offset || blen == offset + 1) {
722 			laarr[curr + 2] = laarr[curr + 1];
723 			laarr[curr + 1] = laarr[curr];
724 		} else {
725 			laarr[curr + 3] = laarr[curr + 1];
726 			laarr[curr + 2] = laarr[curr + 1] = laarr[curr];
727 		}
728 
729 		if (offset) {
730 			if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
731 				udf_free_blocks(inode->i_sb, inode,
732 						&laarr[curr].extLocation,
733 						0, offset);
734 				laarr[curr].extLength =
735 					EXT_NOT_RECORDED_NOT_ALLOCATED |
736 					(offset << blocksize_bits);
737 				laarr[curr].extLocation.logicalBlockNum = 0;
738 				laarr[curr].extLocation.
739 						partitionReferenceNum = 0;
740 			} else
741 				laarr[curr].extLength = (etype << 30) |
742 					(offset << blocksize_bits);
743 			curr++;
744 			(*c)++;
745 			(*endnum)++;
746 		}
747 
748 		laarr[curr].extLocation.logicalBlockNum = newblocknum;
749 		if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
750 			laarr[curr].extLocation.partitionReferenceNum =
751 				UDF_I(inode)->i_location.partitionReferenceNum;
752 		laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
753 			blocksize;
754 		curr++;
755 
756 		if (blen != offset + 1) {
757 			if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
758 				laarr[curr].extLocation.logicalBlockNum +=
759 								offset + 1;
760 			laarr[curr].extLength = (etype << 30) |
761 				((blen - (offset + 1)) << blocksize_bits);
762 			curr++;
763 			(*endnum)++;
764 		}
765 	}
766 }
767 
768 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
769 				 struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
770 				 int *endnum)
771 {
772 	int start, length = 0, currlength = 0, i;
773 
774 	if (*endnum >= (c + 1)) {
775 		if (!lastblock)
776 			return;
777 		else
778 			start = c;
779 	} else {
780 		if ((laarr[c + 1].extLength >> 30) ==
781 					(EXT_NOT_RECORDED_ALLOCATED >> 30)) {
782 			start = c + 1;
783 			length = currlength =
784 				(((laarr[c + 1].extLength &
785 					UDF_EXTENT_LENGTH_MASK) +
786 				inode->i_sb->s_blocksize - 1) >>
787 				inode->i_sb->s_blocksize_bits);
788 		} else
789 			start = c;
790 	}
791 
792 	for (i = start + 1; i <= *endnum; i++) {
793 		if (i == *endnum) {
794 			if (lastblock)
795 				length += UDF_DEFAULT_PREALLOC_BLOCKS;
796 		} else if ((laarr[i].extLength >> 30) ==
797 				(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
798 			length += (((laarr[i].extLength &
799 						UDF_EXTENT_LENGTH_MASK) +
800 				    inode->i_sb->s_blocksize - 1) >>
801 				    inode->i_sb->s_blocksize_bits);
802 		} else
803 			break;
804 	}
805 
806 	if (length) {
807 		int next = laarr[start].extLocation.logicalBlockNum +
808 			(((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
809 			  inode->i_sb->s_blocksize - 1) >>
810 			  inode->i_sb->s_blocksize_bits);
811 		int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
812 				laarr[start].extLocation.partitionReferenceNum,
813 				next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ?
814 				length : UDF_DEFAULT_PREALLOC_BLOCKS) -
815 				currlength);
816 		if (numalloc) 	{
817 			if (start == (c + 1))
818 				laarr[start].extLength +=
819 					(numalloc <<
820 					 inode->i_sb->s_blocksize_bits);
821 			else {
822 				memmove(&laarr[c + 2], &laarr[c + 1],
823 					sizeof(struct long_ad) * (*endnum - (c + 1)));
824 				(*endnum)++;
825 				laarr[c + 1].extLocation.logicalBlockNum = next;
826 				laarr[c + 1].extLocation.partitionReferenceNum =
827 					laarr[c].extLocation.
828 							partitionReferenceNum;
829 				laarr[c + 1].extLength =
830 					EXT_NOT_RECORDED_ALLOCATED |
831 					(numalloc <<
832 					 inode->i_sb->s_blocksize_bits);
833 				start = c + 1;
834 			}
835 
836 			for (i = start + 1; numalloc && i < *endnum; i++) {
837 				int elen = ((laarr[i].extLength &
838 						UDF_EXTENT_LENGTH_MASK) +
839 					    inode->i_sb->s_blocksize - 1) >>
840 					    inode->i_sb->s_blocksize_bits;
841 
842 				if (elen > numalloc) {
843 					laarr[i].extLength -=
844 						(numalloc <<
845 						 inode->i_sb->s_blocksize_bits);
846 					numalloc = 0;
847 				} else {
848 					numalloc -= elen;
849 					if (*endnum > (i + 1))
850 						memmove(&laarr[i],
851 							&laarr[i + 1],
852 							sizeof(struct long_ad) *
853 							(*endnum - (i + 1)));
854 					i--;
855 					(*endnum)--;
856 				}
857 			}
858 			UDF_I(inode)->i_lenExtents +=
859 				numalloc << inode->i_sb->s_blocksize_bits;
860 		}
861 	}
862 }
863 
864 static void udf_merge_extents(struct inode *inode,
865 			      struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
866 			      int *endnum)
867 {
868 	int i;
869 	unsigned long blocksize = inode->i_sb->s_blocksize;
870 	unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
871 
872 	for (i = 0; i < (*endnum - 1); i++) {
873 		struct kernel_long_ad *li /*l[i]*/ = &laarr[i];
874 		struct kernel_long_ad *lip1 /*l[i plus 1]*/ = &laarr[i + 1];
875 
876 		if (((li->extLength >> 30) == (lip1->extLength >> 30)) &&
877 			(((li->extLength >> 30) ==
878 				(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
879 			((lip1->extLocation.logicalBlockNum -
880 			  li->extLocation.logicalBlockNum) ==
881 			(((li->extLength & UDF_EXTENT_LENGTH_MASK) +
882 			blocksize - 1) >> blocksize_bits)))) {
883 
884 			if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
885 				(lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
886 				blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
887 				lip1->extLength = (lip1->extLength -
888 						  (li->extLength &
889 						   UDF_EXTENT_LENGTH_MASK) +
890 						   UDF_EXTENT_LENGTH_MASK) &
891 							~(blocksize - 1);
892 				li->extLength = (li->extLength &
893 						 UDF_EXTENT_FLAG_MASK) +
894 						(UDF_EXTENT_LENGTH_MASK + 1) -
895 						blocksize;
896 				lip1->extLocation.logicalBlockNum =
897 					li->extLocation.logicalBlockNum +
898 					((li->extLength &
899 						UDF_EXTENT_LENGTH_MASK) >>
900 						blocksize_bits);
901 			} else {
902 				li->extLength = lip1->extLength +
903 					(((li->extLength &
904 						UDF_EXTENT_LENGTH_MASK) +
905 					 blocksize - 1) & ~(blocksize - 1));
906 				if (*endnum > (i + 2))
907 					memmove(&laarr[i + 1], &laarr[i + 2],
908 						sizeof(struct long_ad) *
909 						(*endnum - (i + 2)));
910 				i--;
911 				(*endnum)--;
912 			}
913 		} else if (((li->extLength >> 30) ==
914 				(EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
915 			   ((lip1->extLength >> 30) ==
916 				(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) {
917 			udf_free_blocks(inode->i_sb, inode, &li->extLocation, 0,
918 					((li->extLength &
919 					  UDF_EXTENT_LENGTH_MASK) +
920 					 blocksize - 1) >> blocksize_bits);
921 			li->extLocation.logicalBlockNum = 0;
922 			li->extLocation.partitionReferenceNum = 0;
923 
924 			if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
925 			     (lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
926 			     blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
927 				lip1->extLength = (lip1->extLength -
928 						   (li->extLength &
929 						   UDF_EXTENT_LENGTH_MASK) +
930 						   UDF_EXTENT_LENGTH_MASK) &
931 						   ~(blocksize - 1);
932 				li->extLength = (li->extLength &
933 						 UDF_EXTENT_FLAG_MASK) +
934 						(UDF_EXTENT_LENGTH_MASK + 1) -
935 						blocksize;
936 			} else {
937 				li->extLength = lip1->extLength +
938 					(((li->extLength &
939 						UDF_EXTENT_LENGTH_MASK) +
940 					  blocksize - 1) & ~(blocksize - 1));
941 				if (*endnum > (i + 2))
942 					memmove(&laarr[i + 1], &laarr[i + 2],
943 						sizeof(struct long_ad) *
944 						(*endnum - (i + 2)));
945 				i--;
946 				(*endnum)--;
947 			}
948 		} else if ((li->extLength >> 30) ==
949 					(EXT_NOT_RECORDED_ALLOCATED >> 30)) {
950 			udf_free_blocks(inode->i_sb, inode,
951 					&li->extLocation, 0,
952 					((li->extLength &
953 						UDF_EXTENT_LENGTH_MASK) +
954 					 blocksize - 1) >> blocksize_bits);
955 			li->extLocation.logicalBlockNum = 0;
956 			li->extLocation.partitionReferenceNum = 0;
957 			li->extLength = (li->extLength &
958 						UDF_EXTENT_LENGTH_MASK) |
959 						EXT_NOT_RECORDED_NOT_ALLOCATED;
960 		}
961 	}
962 }
963 
964 static void udf_update_extents(struct inode *inode,
965 			       struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
966 			       int startnum, int endnum,
967 			       struct extent_position *epos)
968 {
969 	int start = 0, i;
970 	struct kernel_lb_addr tmploc;
971 	uint32_t tmplen;
972 
973 	if (startnum > endnum) {
974 		for (i = 0; i < (startnum - endnum); i++)
975 			udf_delete_aext(inode, *epos, laarr[i].extLocation,
976 					laarr[i].extLength);
977 	} else if (startnum < endnum) {
978 		for (i = 0; i < (endnum - startnum); i++) {
979 			udf_insert_aext(inode, *epos, laarr[i].extLocation,
980 					laarr[i].extLength);
981 			udf_next_aext(inode, epos, &laarr[i].extLocation,
982 				      &laarr[i].extLength, 1);
983 			start++;
984 		}
985 	}
986 
987 	for (i = start; i < endnum; i++) {
988 		udf_next_aext(inode, epos, &tmploc, &tmplen, 0);
989 		udf_write_aext(inode, epos, &laarr[i].extLocation,
990 			       laarr[i].extLength, 1);
991 	}
992 }
993 
994 struct buffer_head *udf_bread(struct inode *inode, int block,
995 			      int create, int *err)
996 {
997 	struct buffer_head *bh = NULL;
998 
999 	bh = udf_getblk(inode, block, create, err);
1000 	if (!bh)
1001 		return NULL;
1002 
1003 	if (buffer_uptodate(bh))
1004 		return bh;
1005 
1006 	ll_rw_block(READ, 1, &bh);
1007 
1008 	wait_on_buffer(bh);
1009 	if (buffer_uptodate(bh))
1010 		return bh;
1011 
1012 	brelse(bh);
1013 	*err = -EIO;
1014 	return NULL;
1015 }
1016 
1017 void udf_truncate(struct inode *inode)
1018 {
1019 	int offset;
1020 	int err;
1021 	struct udf_inode_info *iinfo;
1022 
1023 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1024 	      S_ISLNK(inode->i_mode)))
1025 		return;
1026 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1027 		return;
1028 
1029 	lock_kernel();
1030 	iinfo = UDF_I(inode);
1031 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1032 		if (inode->i_sb->s_blocksize <
1033 				(udf_file_entry_alloc_offset(inode) +
1034 				 inode->i_size)) {
1035 			udf_expand_file_adinicb(inode, inode->i_size, &err);
1036 			if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1037 				inode->i_size = iinfo->i_lenAlloc;
1038 				unlock_kernel();
1039 				return;
1040 			} else
1041 				udf_truncate_extents(inode);
1042 		} else {
1043 			offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
1044 			memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset,
1045 				0x00, inode->i_sb->s_blocksize -
1046 				offset - udf_file_entry_alloc_offset(inode));
1047 			iinfo->i_lenAlloc = inode->i_size;
1048 		}
1049 	} else {
1050 		block_truncate_page(inode->i_mapping, inode->i_size,
1051 				    udf_get_block);
1052 		udf_truncate_extents(inode);
1053 	}
1054 
1055 	inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
1056 	if (IS_SYNC(inode))
1057 		udf_sync_inode(inode);
1058 	else
1059 		mark_inode_dirty(inode);
1060 	unlock_kernel();
1061 }
1062 
1063 static void __udf_read_inode(struct inode *inode)
1064 {
1065 	struct buffer_head *bh = NULL;
1066 	struct fileEntry *fe;
1067 	uint16_t ident;
1068 	struct udf_inode_info *iinfo = UDF_I(inode);
1069 
1070 	/*
1071 	 * Set defaults, but the inode is still incomplete!
1072 	 * Note: get_new_inode() sets the following on a new inode:
1073 	 *      i_sb = sb
1074 	 *      i_no = ino
1075 	 *      i_flags = sb->s_flags
1076 	 *      i_state = 0
1077 	 * clean_inode(): zero fills and sets
1078 	 *      i_count = 1
1079 	 *      i_nlink = 1
1080 	 *      i_op = NULL;
1081 	 */
1082 	bh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 0, &ident);
1083 	if (!bh) {
1084 		printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
1085 		       inode->i_ino);
1086 		make_bad_inode(inode);
1087 		return;
1088 	}
1089 
1090 	if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
1091 	    ident != TAG_IDENT_USE) {
1092 		printk(KERN_ERR "udf: udf_read_inode(ino %ld) "
1093 				"failed ident=%d\n", inode->i_ino, ident);
1094 		brelse(bh);
1095 		make_bad_inode(inode);
1096 		return;
1097 	}
1098 
1099 	fe = (struct fileEntry *)bh->b_data;
1100 
1101 	if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
1102 		struct buffer_head *ibh;
1103 
1104 		ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1,
1105 					&ident);
1106 		if (ident == TAG_IDENT_IE && ibh) {
1107 			struct buffer_head *nbh = NULL;
1108 			struct kernel_lb_addr loc;
1109 			struct indirectEntry *ie;
1110 
1111 			ie = (struct indirectEntry *)ibh->b_data;
1112 			loc = lelb_to_cpu(ie->indirectICB.extLocation);
1113 
1114 			if (ie->indirectICB.extLength &&
1115 				(nbh = udf_read_ptagged(inode->i_sb, &loc, 0,
1116 							&ident))) {
1117 				if (ident == TAG_IDENT_FE ||
1118 					ident == TAG_IDENT_EFE) {
1119 					memcpy(&iinfo->i_location,
1120 						&loc,
1121 						sizeof(struct kernel_lb_addr));
1122 					brelse(bh);
1123 					brelse(ibh);
1124 					brelse(nbh);
1125 					__udf_read_inode(inode);
1126 					return;
1127 				}
1128 				brelse(nbh);
1129 			}
1130 		}
1131 		brelse(ibh);
1132 	} else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
1133 		printk(KERN_ERR "udf: unsupported strategy type: %d\n",
1134 		       le16_to_cpu(fe->icbTag.strategyType));
1135 		brelse(bh);
1136 		make_bad_inode(inode);
1137 		return;
1138 	}
1139 	udf_fill_inode(inode, bh);
1140 
1141 	brelse(bh);
1142 }
1143 
1144 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1145 {
1146 	struct fileEntry *fe;
1147 	struct extendedFileEntry *efe;
1148 	int offset;
1149 	struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
1150 	struct udf_inode_info *iinfo = UDF_I(inode);
1151 
1152 	fe = (struct fileEntry *)bh->b_data;
1153 	efe = (struct extendedFileEntry *)bh->b_data;
1154 
1155 	if (fe->icbTag.strategyType == cpu_to_le16(4))
1156 		iinfo->i_strat4096 = 0;
1157 	else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
1158 		iinfo->i_strat4096 = 1;
1159 
1160 	iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) &
1161 							ICBTAG_FLAG_AD_MASK;
1162 	iinfo->i_unique = 0;
1163 	iinfo->i_lenEAttr = 0;
1164 	iinfo->i_lenExtents = 0;
1165 	iinfo->i_lenAlloc = 0;
1166 	iinfo->i_next_alloc_block = 0;
1167 	iinfo->i_next_alloc_goal = 0;
1168 	if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
1169 		iinfo->i_efe = 1;
1170 		iinfo->i_use = 0;
1171 		if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
1172 					sizeof(struct extendedFileEntry))) {
1173 			make_bad_inode(inode);
1174 			return;
1175 		}
1176 		memcpy(iinfo->i_ext.i_data,
1177 		       bh->b_data + sizeof(struct extendedFileEntry),
1178 		       inode->i_sb->s_blocksize -
1179 					sizeof(struct extendedFileEntry));
1180 	} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
1181 		iinfo->i_efe = 0;
1182 		iinfo->i_use = 0;
1183 		if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
1184 						sizeof(struct fileEntry))) {
1185 			make_bad_inode(inode);
1186 			return;
1187 		}
1188 		memcpy(iinfo->i_ext.i_data,
1189 		       bh->b_data + sizeof(struct fileEntry),
1190 		       inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1191 	} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
1192 		iinfo->i_efe = 0;
1193 		iinfo->i_use = 1;
1194 		iinfo->i_lenAlloc = le32_to_cpu(
1195 				((struct unallocSpaceEntry *)bh->b_data)->
1196 				 lengthAllocDescs);
1197 		if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
1198 					sizeof(struct unallocSpaceEntry))) {
1199 			make_bad_inode(inode);
1200 			return;
1201 		}
1202 		memcpy(iinfo->i_ext.i_data,
1203 		       bh->b_data + sizeof(struct unallocSpaceEntry),
1204 		       inode->i_sb->s_blocksize -
1205 					sizeof(struct unallocSpaceEntry));
1206 		return;
1207 	}
1208 
1209 	inode->i_uid = le32_to_cpu(fe->uid);
1210 	if (inode->i_uid == -1 ||
1211 	    UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) ||
1212 	    UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET))
1213 		inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
1214 
1215 	inode->i_gid = le32_to_cpu(fe->gid);
1216 	if (inode->i_gid == -1 ||
1217 	    UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_IGNORE) ||
1218 	    UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
1219 		inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1220 
1221 	inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1222 	if (!inode->i_nlink)
1223 		inode->i_nlink = 1;
1224 
1225 	inode->i_size = le64_to_cpu(fe->informationLength);
1226 	iinfo->i_lenExtents = inode->i_size;
1227 
1228 	if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY &&
1229 			sbi->s_fmode != UDF_INVALID_MODE)
1230 		inode->i_mode = sbi->s_fmode;
1231 	else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY &&
1232 			sbi->s_dmode != UDF_INVALID_MODE)
1233 		inode->i_mode = sbi->s_dmode;
1234 	else
1235 		inode->i_mode = udf_convert_permissions(fe);
1236 	inode->i_mode &= ~sbi->s_umask;
1237 
1238 	if (iinfo->i_efe == 0) {
1239 		inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1240 			(inode->i_sb->s_blocksize_bits - 9);
1241 
1242 		if (!udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime))
1243 			inode->i_atime = sbi->s_record_time;
1244 
1245 		if (!udf_disk_stamp_to_time(&inode->i_mtime,
1246 					    fe->modificationTime))
1247 			inode->i_mtime = sbi->s_record_time;
1248 
1249 		if (!udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime))
1250 			inode->i_ctime = sbi->s_record_time;
1251 
1252 		iinfo->i_unique = le64_to_cpu(fe->uniqueID);
1253 		iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
1254 		iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs);
1255 		offset = sizeof(struct fileEntry) + iinfo->i_lenEAttr;
1256 	} else {
1257 		inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1258 		    (inode->i_sb->s_blocksize_bits - 9);
1259 
1260 		if (!udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime))
1261 			inode->i_atime = sbi->s_record_time;
1262 
1263 		if (!udf_disk_stamp_to_time(&inode->i_mtime,
1264 					    efe->modificationTime))
1265 			inode->i_mtime = sbi->s_record_time;
1266 
1267 		if (!udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime))
1268 			iinfo->i_crtime = sbi->s_record_time;
1269 
1270 		if (!udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime))
1271 			inode->i_ctime = sbi->s_record_time;
1272 
1273 		iinfo->i_unique = le64_to_cpu(efe->uniqueID);
1274 		iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
1275 		iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
1276 		offset = sizeof(struct extendedFileEntry) +
1277 							iinfo->i_lenEAttr;
1278 	}
1279 
1280 	switch (fe->icbTag.fileType) {
1281 	case ICBTAG_FILE_TYPE_DIRECTORY:
1282 		inode->i_op = &udf_dir_inode_operations;
1283 		inode->i_fop = &udf_dir_operations;
1284 		inode->i_mode |= S_IFDIR;
1285 		inc_nlink(inode);
1286 		break;
1287 	case ICBTAG_FILE_TYPE_REALTIME:
1288 	case ICBTAG_FILE_TYPE_REGULAR:
1289 	case ICBTAG_FILE_TYPE_UNDEF:
1290 	case ICBTAG_FILE_TYPE_VAT20:
1291 		if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
1292 			inode->i_data.a_ops = &udf_adinicb_aops;
1293 		else
1294 			inode->i_data.a_ops = &udf_aops;
1295 		inode->i_op = &udf_file_inode_operations;
1296 		inode->i_fop = &udf_file_operations;
1297 		inode->i_mode |= S_IFREG;
1298 		break;
1299 	case ICBTAG_FILE_TYPE_BLOCK:
1300 		inode->i_mode |= S_IFBLK;
1301 		break;
1302 	case ICBTAG_FILE_TYPE_CHAR:
1303 		inode->i_mode |= S_IFCHR;
1304 		break;
1305 	case ICBTAG_FILE_TYPE_FIFO:
1306 		init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1307 		break;
1308 	case ICBTAG_FILE_TYPE_SOCKET:
1309 		init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1310 		break;
1311 	case ICBTAG_FILE_TYPE_SYMLINK:
1312 		inode->i_data.a_ops = &udf_symlink_aops;
1313 		inode->i_op = &page_symlink_inode_operations;
1314 		inode->i_mode = S_IFLNK | S_IRWXUGO;
1315 		break;
1316 	case ICBTAG_FILE_TYPE_MAIN:
1317 		udf_debug("METADATA FILE-----\n");
1318 		break;
1319 	case ICBTAG_FILE_TYPE_MIRROR:
1320 		udf_debug("METADATA MIRROR FILE-----\n");
1321 		break;
1322 	case ICBTAG_FILE_TYPE_BITMAP:
1323 		udf_debug("METADATA BITMAP FILE-----\n");
1324 		break;
1325 	default:
1326 		printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown "
1327 				"file type=%d\n", inode->i_ino,
1328 				fe->icbTag.fileType);
1329 		make_bad_inode(inode);
1330 		return;
1331 	}
1332 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1333 		struct deviceSpec *dsea =
1334 			(struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
1335 		if (dsea) {
1336 			init_special_inode(inode, inode->i_mode,
1337 				MKDEV(le32_to_cpu(dsea->majorDeviceIdent),
1338 				      le32_to_cpu(dsea->minorDeviceIdent)));
1339 			/* Developer ID ??? */
1340 		} else
1341 			make_bad_inode(inode);
1342 	}
1343 }
1344 
1345 static int udf_alloc_i_data(struct inode *inode, size_t size)
1346 {
1347 	struct udf_inode_info *iinfo = UDF_I(inode);
1348 	iinfo->i_ext.i_data = kmalloc(size, GFP_KERNEL);
1349 
1350 	if (!iinfo->i_ext.i_data) {
1351 		printk(KERN_ERR "udf:udf_alloc_i_data (ino %ld) "
1352 				"no free memory\n", inode->i_ino);
1353 		return -ENOMEM;
1354 	}
1355 
1356 	return 0;
1357 }
1358 
1359 static mode_t udf_convert_permissions(struct fileEntry *fe)
1360 {
1361 	mode_t mode;
1362 	uint32_t permissions;
1363 	uint32_t flags;
1364 
1365 	permissions = le32_to_cpu(fe->permissions);
1366 	flags = le16_to_cpu(fe->icbTag.flags);
1367 
1368 	mode =	((permissions) & S_IRWXO) |
1369 		((permissions >> 2) & S_IRWXG) |
1370 		((permissions >> 4) & S_IRWXU) |
1371 		((flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1372 		((flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1373 		((flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1374 
1375 	return mode;
1376 }
1377 
1378 int udf_write_inode(struct inode *inode, int sync)
1379 {
1380 	int ret;
1381 
1382 	lock_kernel();
1383 	ret = udf_update_inode(inode, sync);
1384 	unlock_kernel();
1385 
1386 	return ret;
1387 }
1388 
1389 int udf_sync_inode(struct inode *inode)
1390 {
1391 	return udf_update_inode(inode, 1);
1392 }
1393 
1394 static int udf_update_inode(struct inode *inode, int do_sync)
1395 {
1396 	struct buffer_head *bh = NULL;
1397 	struct fileEntry *fe;
1398 	struct extendedFileEntry *efe;
1399 	uint32_t udfperms;
1400 	uint16_t icbflags;
1401 	uint16_t crclen;
1402 	int err = 0;
1403 	struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
1404 	unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
1405 	struct udf_inode_info *iinfo = UDF_I(inode);
1406 
1407 	bh = udf_tread(inode->i_sb,
1408 			udf_get_lb_pblock(inode->i_sb,
1409 					  &iinfo->i_location, 0));
1410 	if (!bh) {
1411 		udf_debug("bread failure\n");
1412 		return -EIO;
1413 	}
1414 
1415 	memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
1416 
1417 	fe = (struct fileEntry *)bh->b_data;
1418 	efe = (struct extendedFileEntry *)bh->b_data;
1419 
1420 	if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
1421 		struct unallocSpaceEntry *use =
1422 			(struct unallocSpaceEntry *)bh->b_data;
1423 
1424 		use->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1425 		memcpy(bh->b_data + sizeof(struct unallocSpaceEntry),
1426 		       iinfo->i_ext.i_data, inode->i_sb->s_blocksize -
1427 					sizeof(struct unallocSpaceEntry));
1428 		crclen = sizeof(struct unallocSpaceEntry) +
1429 				iinfo->i_lenAlloc - sizeof(struct tag);
1430 		use->descTag.tagLocation = cpu_to_le32(
1431 						iinfo->i_location.
1432 							logicalBlockNum);
1433 		use->descTag.descCRCLength = cpu_to_le16(crclen);
1434 		use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use +
1435 							   sizeof(struct tag),
1436 							   crclen));
1437 		use->descTag.tagChecksum = udf_tag_checksum(&use->descTag);
1438 
1439 		mark_buffer_dirty(bh);
1440 		brelse(bh);
1441 		return err;
1442 	}
1443 
1444 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
1445 		fe->uid = cpu_to_le32(-1);
1446 	else
1447 		fe->uid = cpu_to_le32(inode->i_uid);
1448 
1449 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
1450 		fe->gid = cpu_to_le32(-1);
1451 	else
1452 		fe->gid = cpu_to_le32(inode->i_gid);
1453 
1454 	udfperms = ((inode->i_mode & S_IRWXO)) |
1455 		   ((inode->i_mode & S_IRWXG) << 2) |
1456 		   ((inode->i_mode & S_IRWXU) << 4);
1457 
1458 	udfperms |= (le32_to_cpu(fe->permissions) &
1459 		    (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1460 		     FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1461 		     FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1462 	fe->permissions = cpu_to_le32(udfperms);
1463 
1464 	if (S_ISDIR(inode->i_mode))
1465 		fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1466 	else
1467 		fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1468 
1469 	fe->informationLength = cpu_to_le64(inode->i_size);
1470 
1471 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1472 		struct regid *eid;
1473 		struct deviceSpec *dsea =
1474 			(struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
1475 		if (!dsea) {
1476 			dsea = (struct deviceSpec *)
1477 				udf_add_extendedattr(inode,
1478 						     sizeof(struct deviceSpec) +
1479 						     sizeof(struct regid), 12, 0x3);
1480 			dsea->attrType = cpu_to_le32(12);
1481 			dsea->attrSubtype = 1;
1482 			dsea->attrLength = cpu_to_le32(
1483 						sizeof(struct deviceSpec) +
1484 						sizeof(struct regid));
1485 			dsea->impUseLength = cpu_to_le32(sizeof(struct regid));
1486 		}
1487 		eid = (struct regid *)dsea->impUse;
1488 		memset(eid, 0, sizeof(struct regid));
1489 		strcpy(eid->ident, UDF_ID_DEVELOPER);
1490 		eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1491 		eid->identSuffix[1] = UDF_OS_ID_LINUX;
1492 		dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1493 		dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1494 	}
1495 
1496 	if (iinfo->i_efe == 0) {
1497 		memcpy(bh->b_data + sizeof(struct fileEntry),
1498 		       iinfo->i_ext.i_data,
1499 		       inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1500 		fe->logicalBlocksRecorded = cpu_to_le64(
1501 			(inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >>
1502 			(blocksize_bits - 9));
1503 
1504 		udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime);
1505 		udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime);
1506 		udf_time_to_disk_stamp(&fe->attrTime, inode->i_ctime);
1507 		memset(&(fe->impIdent), 0, sizeof(struct regid));
1508 		strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1509 		fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1510 		fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1511 		fe->uniqueID = cpu_to_le64(iinfo->i_unique);
1512 		fe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
1513 		fe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1514 		fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1515 		crclen = sizeof(struct fileEntry);
1516 	} else {
1517 		memcpy(bh->b_data + sizeof(struct extendedFileEntry),
1518 		       iinfo->i_ext.i_data,
1519 		       inode->i_sb->s_blocksize -
1520 					sizeof(struct extendedFileEntry));
1521 		efe->objectSize = cpu_to_le64(inode->i_size);
1522 		efe->logicalBlocksRecorded = cpu_to_le64(
1523 			(inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >>
1524 			(blocksize_bits - 9));
1525 
1526 		if (iinfo->i_crtime.tv_sec > inode->i_atime.tv_sec ||
1527 		    (iinfo->i_crtime.tv_sec == inode->i_atime.tv_sec &&
1528 		     iinfo->i_crtime.tv_nsec > inode->i_atime.tv_nsec))
1529 			iinfo->i_crtime = inode->i_atime;
1530 
1531 		if (iinfo->i_crtime.tv_sec > inode->i_mtime.tv_sec ||
1532 		    (iinfo->i_crtime.tv_sec == inode->i_mtime.tv_sec &&
1533 		     iinfo->i_crtime.tv_nsec > inode->i_mtime.tv_nsec))
1534 			iinfo->i_crtime = inode->i_mtime;
1535 
1536 		if (iinfo->i_crtime.tv_sec > inode->i_ctime.tv_sec ||
1537 		    (iinfo->i_crtime.tv_sec == inode->i_ctime.tv_sec &&
1538 		     iinfo->i_crtime.tv_nsec > inode->i_ctime.tv_nsec))
1539 			iinfo->i_crtime = inode->i_ctime;
1540 
1541 		udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime);
1542 		udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime);
1543 		udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime);
1544 		udf_time_to_disk_stamp(&efe->attrTime, inode->i_ctime);
1545 
1546 		memset(&(efe->impIdent), 0, sizeof(struct regid));
1547 		strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1548 		efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1549 		efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1550 		efe->uniqueID = cpu_to_le64(iinfo->i_unique);
1551 		efe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
1552 		efe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1553 		efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1554 		crclen = sizeof(struct extendedFileEntry);
1555 	}
1556 	if (iinfo->i_strat4096) {
1557 		fe->icbTag.strategyType = cpu_to_le16(4096);
1558 		fe->icbTag.strategyParameter = cpu_to_le16(1);
1559 		fe->icbTag.numEntries = cpu_to_le16(2);
1560 	} else {
1561 		fe->icbTag.strategyType = cpu_to_le16(4);
1562 		fe->icbTag.numEntries = cpu_to_le16(1);
1563 	}
1564 
1565 	if (S_ISDIR(inode->i_mode))
1566 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1567 	else if (S_ISREG(inode->i_mode))
1568 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1569 	else if (S_ISLNK(inode->i_mode))
1570 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1571 	else if (S_ISBLK(inode->i_mode))
1572 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1573 	else if (S_ISCHR(inode->i_mode))
1574 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1575 	else if (S_ISFIFO(inode->i_mode))
1576 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1577 	else if (S_ISSOCK(inode->i_mode))
1578 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1579 
1580 	icbflags =	iinfo->i_alloc_type |
1581 			((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1582 			((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1583 			((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1584 			(le16_to_cpu(fe->icbTag.flags) &
1585 				~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1586 				ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1587 
1588 	fe->icbTag.flags = cpu_to_le16(icbflags);
1589 	if (sbi->s_udfrev >= 0x0200)
1590 		fe->descTag.descVersion = cpu_to_le16(3);
1591 	else
1592 		fe->descTag.descVersion = cpu_to_le16(2);
1593 	fe->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number);
1594 	fe->descTag.tagLocation = cpu_to_le32(
1595 					iinfo->i_location.logicalBlockNum);
1596 	crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc -
1597 								sizeof(struct tag);
1598 	fe->descTag.descCRCLength = cpu_to_le16(crclen);
1599 	fe->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)fe + sizeof(struct tag),
1600 						  crclen));
1601 	fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
1602 
1603 	/* write the data blocks */
1604 	mark_buffer_dirty(bh);
1605 	if (do_sync) {
1606 		sync_dirty_buffer(bh);
1607 		if (buffer_req(bh) && !buffer_uptodate(bh)) {
1608 			printk(KERN_WARNING "IO error syncing udf inode "
1609 				"[%s:%08lx]\n", inode->i_sb->s_id,
1610 				inode->i_ino);
1611 			err = -EIO;
1612 		}
1613 	}
1614 	brelse(bh);
1615 
1616 	return err;
1617 }
1618 
1619 struct inode *udf_iget(struct super_block *sb, struct kernel_lb_addr *ino)
1620 {
1621 	unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1622 	struct inode *inode = iget_locked(sb, block);
1623 
1624 	if (!inode)
1625 		return NULL;
1626 
1627 	if (inode->i_state & I_NEW) {
1628 		memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
1629 		__udf_read_inode(inode);
1630 		unlock_new_inode(inode);
1631 	}
1632 
1633 	if (is_bad_inode(inode))
1634 		goto out_iput;
1635 
1636 	if (ino->logicalBlockNum >= UDF_SB(sb)->
1637 			s_partmaps[ino->partitionReferenceNum].s_partition_len) {
1638 		udf_debug("block=%d, partition=%d out of range\n",
1639 			  ino->logicalBlockNum, ino->partitionReferenceNum);
1640 		make_bad_inode(inode);
1641 		goto out_iput;
1642 	}
1643 
1644 	return inode;
1645 
1646  out_iput:
1647 	iput(inode);
1648 	return NULL;
1649 }
1650 
1651 int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
1652 		    struct kernel_lb_addr *eloc, uint32_t elen, int inc)
1653 {
1654 	int adsize;
1655 	struct short_ad *sad = NULL;
1656 	struct long_ad *lad = NULL;
1657 	struct allocExtDesc *aed;
1658 	int8_t etype;
1659 	uint8_t *ptr;
1660 	struct udf_inode_info *iinfo = UDF_I(inode);
1661 
1662 	if (!epos->bh)
1663 		ptr = iinfo->i_ext.i_data + epos->offset -
1664 			udf_file_entry_alloc_offset(inode) +
1665 			iinfo->i_lenEAttr;
1666 	else
1667 		ptr = epos->bh->b_data + epos->offset;
1668 
1669 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
1670 		adsize = sizeof(struct short_ad);
1671 	else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
1672 		adsize = sizeof(struct long_ad);
1673 	else
1674 		return -1;
1675 
1676 	if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize) {
1677 		char *sptr, *dptr;
1678 		struct buffer_head *nbh;
1679 		int err, loffset;
1680 		struct kernel_lb_addr obloc = epos->block;
1681 
1682 		epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL,
1683 						obloc.partitionReferenceNum,
1684 						obloc.logicalBlockNum, &err);
1685 		if (!epos->block.logicalBlockNum)
1686 			return -1;
1687 		nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1688 								 &epos->block,
1689 								 0));
1690 		if (!nbh)
1691 			return -1;
1692 		lock_buffer(nbh);
1693 		memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1694 		set_buffer_uptodate(nbh);
1695 		unlock_buffer(nbh);
1696 		mark_buffer_dirty_inode(nbh, inode);
1697 
1698 		aed = (struct allocExtDesc *)(nbh->b_data);
1699 		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1700 			aed->previousAllocExtLocation =
1701 					cpu_to_le32(obloc.logicalBlockNum);
1702 		if (epos->offset + adsize > inode->i_sb->s_blocksize) {
1703 			loffset = epos->offset;
1704 			aed->lengthAllocDescs = cpu_to_le32(adsize);
1705 			sptr = ptr - adsize;
1706 			dptr = nbh->b_data + sizeof(struct allocExtDesc);
1707 			memcpy(dptr, sptr, adsize);
1708 			epos->offset = sizeof(struct allocExtDesc) + adsize;
1709 		} else {
1710 			loffset = epos->offset + adsize;
1711 			aed->lengthAllocDescs = cpu_to_le32(0);
1712 			sptr = ptr;
1713 			epos->offset = sizeof(struct allocExtDesc);
1714 
1715 			if (epos->bh) {
1716 				aed = (struct allocExtDesc *)epos->bh->b_data;
1717 				le32_add_cpu(&aed->lengthAllocDescs, adsize);
1718 			} else {
1719 				iinfo->i_lenAlloc += adsize;
1720 				mark_inode_dirty(inode);
1721 			}
1722 		}
1723 		if (UDF_SB(inode->i_sb)->s_udfrev >= 0x0200)
1724 			udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1725 				    epos->block.logicalBlockNum, sizeof(struct tag));
1726 		else
1727 			udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1728 				    epos->block.logicalBlockNum, sizeof(struct tag));
1729 		switch (iinfo->i_alloc_type) {
1730 		case ICBTAG_FLAG_AD_SHORT:
1731 			sad = (struct short_ad *)sptr;
1732 			sad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS |
1733 						     inode->i_sb->s_blocksize);
1734 			sad->extPosition =
1735 				cpu_to_le32(epos->block.logicalBlockNum);
1736 			break;
1737 		case ICBTAG_FLAG_AD_LONG:
1738 			lad = (struct long_ad *)sptr;
1739 			lad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS |
1740 						     inode->i_sb->s_blocksize);
1741 			lad->extLocation = cpu_to_lelb(epos->block);
1742 			memset(lad->impUse, 0x00, sizeof(lad->impUse));
1743 			break;
1744 		}
1745 		if (epos->bh) {
1746 			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1747 			    UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
1748 				udf_update_tag(epos->bh->b_data, loffset);
1749 			else
1750 				udf_update_tag(epos->bh->b_data,
1751 						sizeof(struct allocExtDesc));
1752 			mark_buffer_dirty_inode(epos->bh, inode);
1753 			brelse(epos->bh);
1754 		} else {
1755 			mark_inode_dirty(inode);
1756 		}
1757 		epos->bh = nbh;
1758 	}
1759 
1760 	etype = udf_write_aext(inode, epos, eloc, elen, inc);
1761 
1762 	if (!epos->bh) {
1763 		iinfo->i_lenAlloc += adsize;
1764 		mark_inode_dirty(inode);
1765 	} else {
1766 		aed = (struct allocExtDesc *)epos->bh->b_data;
1767 		le32_add_cpu(&aed->lengthAllocDescs, adsize);
1768 		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1769 				UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
1770 			udf_update_tag(epos->bh->b_data,
1771 					epos->offset + (inc ? 0 : adsize));
1772 		else
1773 			udf_update_tag(epos->bh->b_data,
1774 					sizeof(struct allocExtDesc));
1775 		mark_buffer_dirty_inode(epos->bh, inode);
1776 	}
1777 
1778 	return etype;
1779 }
1780 
1781 int8_t udf_write_aext(struct inode *inode, struct extent_position *epos,
1782 		      struct kernel_lb_addr *eloc, uint32_t elen, int inc)
1783 {
1784 	int adsize;
1785 	uint8_t *ptr;
1786 	struct short_ad *sad;
1787 	struct long_ad *lad;
1788 	struct udf_inode_info *iinfo = UDF_I(inode);
1789 
1790 	if (!epos->bh)
1791 		ptr = iinfo->i_ext.i_data + epos->offset -
1792 			udf_file_entry_alloc_offset(inode) +
1793 			iinfo->i_lenEAttr;
1794 	else
1795 		ptr = epos->bh->b_data + epos->offset;
1796 
1797 	switch (iinfo->i_alloc_type) {
1798 	case ICBTAG_FLAG_AD_SHORT:
1799 		sad = (struct short_ad *)ptr;
1800 		sad->extLength = cpu_to_le32(elen);
1801 		sad->extPosition = cpu_to_le32(eloc->logicalBlockNum);
1802 		adsize = sizeof(struct short_ad);
1803 		break;
1804 	case ICBTAG_FLAG_AD_LONG:
1805 		lad = (struct long_ad *)ptr;
1806 		lad->extLength = cpu_to_le32(elen);
1807 		lad->extLocation = cpu_to_lelb(*eloc);
1808 		memset(lad->impUse, 0x00, sizeof(lad->impUse));
1809 		adsize = sizeof(struct long_ad);
1810 		break;
1811 	default:
1812 		return -1;
1813 	}
1814 
1815 	if (epos->bh) {
1816 		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1817 		    UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) {
1818 			struct allocExtDesc *aed =
1819 				(struct allocExtDesc *)epos->bh->b_data;
1820 			udf_update_tag(epos->bh->b_data,
1821 				       le32_to_cpu(aed->lengthAllocDescs) +
1822 				       sizeof(struct allocExtDesc));
1823 		}
1824 		mark_buffer_dirty_inode(epos->bh, inode);
1825 	} else {
1826 		mark_inode_dirty(inode);
1827 	}
1828 
1829 	if (inc)
1830 		epos->offset += adsize;
1831 
1832 	return (elen >> 30);
1833 }
1834 
1835 int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
1836 		     struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
1837 {
1838 	int8_t etype;
1839 
1840 	while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
1841 	       (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
1842 		int block;
1843 		epos->block = *eloc;
1844 		epos->offset = sizeof(struct allocExtDesc);
1845 		brelse(epos->bh);
1846 		block = udf_get_lb_pblock(inode->i_sb, &epos->block, 0);
1847 		epos->bh = udf_tread(inode->i_sb, block);
1848 		if (!epos->bh) {
1849 			udf_debug("reading block %d failed!\n", block);
1850 			return -1;
1851 		}
1852 	}
1853 
1854 	return etype;
1855 }
1856 
1857 int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
1858 			struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
1859 {
1860 	int alen;
1861 	int8_t etype;
1862 	uint8_t *ptr;
1863 	struct short_ad *sad;
1864 	struct long_ad *lad;
1865 	struct udf_inode_info *iinfo = UDF_I(inode);
1866 
1867 	if (!epos->bh) {
1868 		if (!epos->offset)
1869 			epos->offset = udf_file_entry_alloc_offset(inode);
1870 		ptr = iinfo->i_ext.i_data + epos->offset -
1871 			udf_file_entry_alloc_offset(inode) +
1872 			iinfo->i_lenEAttr;
1873 		alen = udf_file_entry_alloc_offset(inode) +
1874 							iinfo->i_lenAlloc;
1875 	} else {
1876 		if (!epos->offset)
1877 			epos->offset = sizeof(struct allocExtDesc);
1878 		ptr = epos->bh->b_data + epos->offset;
1879 		alen = sizeof(struct allocExtDesc) +
1880 			le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->
1881 							lengthAllocDescs);
1882 	}
1883 
1884 	switch (iinfo->i_alloc_type) {
1885 	case ICBTAG_FLAG_AD_SHORT:
1886 		sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc);
1887 		if (!sad)
1888 			return -1;
1889 		etype = le32_to_cpu(sad->extLength) >> 30;
1890 		eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1891 		eloc->partitionReferenceNum =
1892 				iinfo->i_location.partitionReferenceNum;
1893 		*elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1894 		break;
1895 	case ICBTAG_FLAG_AD_LONG:
1896 		lad = udf_get_filelongad(ptr, alen, &epos->offset, inc);
1897 		if (!lad)
1898 			return -1;
1899 		etype = le32_to_cpu(lad->extLength) >> 30;
1900 		*eloc = lelb_to_cpu(lad->extLocation);
1901 		*elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1902 		break;
1903 	default:
1904 		udf_debug("alloc_type = %d unsupported\n",
1905 				iinfo->i_alloc_type);
1906 		return -1;
1907 	}
1908 
1909 	return etype;
1910 }
1911 
1912 static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos,
1913 			      struct kernel_lb_addr neloc, uint32_t nelen)
1914 {
1915 	struct kernel_lb_addr oeloc;
1916 	uint32_t oelen;
1917 	int8_t etype;
1918 
1919 	if (epos.bh)
1920 		get_bh(epos.bh);
1921 
1922 	while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) {
1923 		udf_write_aext(inode, &epos, &neloc, nelen, 1);
1924 		neloc = oeloc;
1925 		nelen = (etype << 30) | oelen;
1926 	}
1927 	udf_add_aext(inode, &epos, &neloc, nelen, 1);
1928 	brelse(epos.bh);
1929 
1930 	return (nelen >> 30);
1931 }
1932 
1933 int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
1934 		       struct kernel_lb_addr eloc, uint32_t elen)
1935 {
1936 	struct extent_position oepos;
1937 	int adsize;
1938 	int8_t etype;
1939 	struct allocExtDesc *aed;
1940 	struct udf_inode_info *iinfo;
1941 
1942 	if (epos.bh) {
1943 		get_bh(epos.bh);
1944 		get_bh(epos.bh);
1945 	}
1946 
1947 	iinfo = UDF_I(inode);
1948 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
1949 		adsize = sizeof(struct short_ad);
1950 	else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
1951 		adsize = sizeof(struct long_ad);
1952 	else
1953 		adsize = 0;
1954 
1955 	oepos = epos;
1956 	if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1)
1957 		return -1;
1958 
1959 	while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
1960 		udf_write_aext(inode, &oepos, &eloc, (etype << 30) | elen, 1);
1961 		if (oepos.bh != epos.bh) {
1962 			oepos.block = epos.block;
1963 			brelse(oepos.bh);
1964 			get_bh(epos.bh);
1965 			oepos.bh = epos.bh;
1966 			oepos.offset = epos.offset - adsize;
1967 		}
1968 	}
1969 	memset(&eloc, 0x00, sizeof(struct kernel_lb_addr));
1970 	elen = 0;
1971 
1972 	if (epos.bh != oepos.bh) {
1973 		udf_free_blocks(inode->i_sb, inode, &epos.block, 0, 1);
1974 		udf_write_aext(inode, &oepos, &eloc, elen, 1);
1975 		udf_write_aext(inode, &oepos, &eloc, elen, 1);
1976 		if (!oepos.bh) {
1977 			iinfo->i_lenAlloc -= (adsize * 2);
1978 			mark_inode_dirty(inode);
1979 		} else {
1980 			aed = (struct allocExtDesc *)oepos.bh->b_data;
1981 			le32_add_cpu(&aed->lengthAllocDescs, -(2 * adsize));
1982 			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1983 			    UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
1984 				udf_update_tag(oepos.bh->b_data,
1985 						oepos.offset - (2 * adsize));
1986 			else
1987 				udf_update_tag(oepos.bh->b_data,
1988 						sizeof(struct allocExtDesc));
1989 			mark_buffer_dirty_inode(oepos.bh, inode);
1990 		}
1991 	} else {
1992 		udf_write_aext(inode, &oepos, &eloc, elen, 1);
1993 		if (!oepos.bh) {
1994 			iinfo->i_lenAlloc -= adsize;
1995 			mark_inode_dirty(inode);
1996 		} else {
1997 			aed = (struct allocExtDesc *)oepos.bh->b_data;
1998 			le32_add_cpu(&aed->lengthAllocDescs, -adsize);
1999 			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2000 			    UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
2001 				udf_update_tag(oepos.bh->b_data,
2002 						epos.offset - adsize);
2003 			else
2004 				udf_update_tag(oepos.bh->b_data,
2005 						sizeof(struct allocExtDesc));
2006 			mark_buffer_dirty_inode(oepos.bh, inode);
2007 		}
2008 	}
2009 
2010 	brelse(epos.bh);
2011 	brelse(oepos.bh);
2012 
2013 	return (elen >> 30);
2014 }
2015 
2016 int8_t inode_bmap(struct inode *inode, sector_t block,
2017 		  struct extent_position *pos, struct kernel_lb_addr *eloc,
2018 		  uint32_t *elen, sector_t *offset)
2019 {
2020 	unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
2021 	loff_t lbcount = 0, bcount =
2022 	    (loff_t) block << blocksize_bits;
2023 	int8_t etype;
2024 	struct udf_inode_info *iinfo;
2025 
2026 	iinfo = UDF_I(inode);
2027 	pos->offset = 0;
2028 	pos->block = iinfo->i_location;
2029 	pos->bh = NULL;
2030 	*elen = 0;
2031 
2032 	do {
2033 		etype = udf_next_aext(inode, pos, eloc, elen, 1);
2034 		if (etype == -1) {
2035 			*offset = (bcount - lbcount) >> blocksize_bits;
2036 			iinfo->i_lenExtents = lbcount;
2037 			return -1;
2038 		}
2039 		lbcount += *elen;
2040 	} while (lbcount <= bcount);
2041 
2042 	*offset = (bcount + *elen - lbcount) >> blocksize_bits;
2043 
2044 	return etype;
2045 }
2046 
2047 long udf_block_map(struct inode *inode, sector_t block)
2048 {
2049 	struct kernel_lb_addr eloc;
2050 	uint32_t elen;
2051 	sector_t offset;
2052 	struct extent_position epos = {};
2053 	int ret;
2054 
2055 	lock_kernel();
2056 
2057 	if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) ==
2058 						(EXT_RECORDED_ALLOCATED >> 30))
2059 		ret = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
2060 	else
2061 		ret = 0;
2062 
2063 	unlock_kernel();
2064 	brelse(epos.bh);
2065 
2066 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2067 		return udf_fixed_to_variable(ret);
2068 	else
2069 		return ret;
2070 }
2071